xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision cf94db21905333e610e479688add629397a4b384)
10a8a69ddSRusty Russell /* Virtio ring implementation.
20a8a69ddSRusty Russell  *
30a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
40a8a69ddSRusty Russell  *
50a8a69ddSRusty Russell  *  This program is free software; you can redistribute it and/or modify
60a8a69ddSRusty Russell  *  it under the terms of the GNU General Public License as published by
70a8a69ddSRusty Russell  *  the Free Software Foundation; either version 2 of the License, or
80a8a69ddSRusty Russell  *  (at your option) any later version.
90a8a69ddSRusty Russell  *
100a8a69ddSRusty Russell  *  This program is distributed in the hope that it will be useful,
110a8a69ddSRusty Russell  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
120a8a69ddSRusty Russell  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
130a8a69ddSRusty Russell  *  GNU General Public License for more details.
140a8a69ddSRusty Russell  *
150a8a69ddSRusty Russell  *  You should have received a copy of the GNU General Public License
160a8a69ddSRusty Russell  *  along with this program; if not, write to the Free Software
170a8a69ddSRusty Russell  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
180a8a69ddSRusty Russell  */
190a8a69ddSRusty Russell #include <linux/virtio.h>
200a8a69ddSRusty Russell #include <linux/virtio_ring.h>
21e34f8725SRusty Russell #include <linux/virtio_config.h>
220a8a69ddSRusty Russell #include <linux/device.h>
235a0e3ad6STejun Heo #include <linux/slab.h>
24b5a2c4f1SPaul Gortmaker #include <linux/module.h>
25e93300b1SRusty Russell #include <linux/hrtimer.h>
26780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
2778fe3987SAndy Lutomirski #include <xen/xen.h>
280a8a69ddSRusty Russell 
290a8a69ddSRusty Russell #ifdef DEBUG
300a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
319499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
329499f5e7SRusty Russell 	do {							\
339499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
349499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
359499f5e7SRusty Russell 		BUG();						\
369499f5e7SRusty Russell 	} while (0)
37c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
383a35ce7dSRoel Kluin #define START_USE(_vq)						\
39c5f841f1SRusty Russell 	do {							\
40c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
419499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
429499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
43c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
44c5f841f1SRusty Russell 	} while (0)
453a35ce7dSRoel Kluin #define END_USE(_vq) \
4697a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
474d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq)				\
484d6a105eSTiwei Bie 	do {							\
494d6a105eSTiwei Bie 		ktime_t now = ktime_get();			\
504d6a105eSTiwei Bie 								\
514d6a105eSTiwei Bie 		/* No kick or get, with .1 second between?  Warn. */ \
524d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid)			\
534d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
544d6a105eSTiwei Bie 				(_vq)->last_add_time)) > 100);	\
554d6a105eSTiwei Bie 		(_vq)->last_add_time = now;			\
564d6a105eSTiwei Bie 		(_vq)->last_add_time_valid = true;		\
574d6a105eSTiwei Bie 	} while (0)
584d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq)				\
594d6a105eSTiwei Bie 	do {							\
604d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid) {		\
614d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
624d6a105eSTiwei Bie 				      (_vq)->last_add_time)) > 100); \
634d6a105eSTiwei Bie 		}						\
644d6a105eSTiwei Bie 	} while (0)
654d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq)				\
664d6a105eSTiwei Bie 	((_vq)->last_add_time_valid = false)
670a8a69ddSRusty Russell #else
689499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
699499f5e7SRusty Russell 	do {							\
709499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
719499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
729499f5e7SRusty Russell 		(_vq)->broken = true;				\
739499f5e7SRusty Russell 	} while (0)
740a8a69ddSRusty Russell #define START_USE(vq)
750a8a69ddSRusty Russell #define END_USE(vq)
764d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq)
774d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq)
784d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq)
790a8a69ddSRusty Russell #endif
800a8a69ddSRusty Russell 
81cbeedb72STiwei Bie struct vring_desc_state_split {
82780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
83780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
84780bc790SAndy Lutomirski };
85780bc790SAndy Lutomirski 
861ce9e605STiwei Bie struct vring_desc_state_packed {
871ce9e605STiwei Bie 	void *data;			/* Data for callback. */
881ce9e605STiwei Bie 	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
891ce9e605STiwei Bie 	u16 num;			/* Descriptor list length. */
901ce9e605STiwei Bie 	u16 next;			/* The next desc state in a list. */
911ce9e605STiwei Bie 	u16 last;			/* The last desc state in a list. */
921ce9e605STiwei Bie };
931ce9e605STiwei Bie 
941ce9e605STiwei Bie struct vring_desc_extra_packed {
951ce9e605STiwei Bie 	dma_addr_t addr;		/* Buffer DMA addr. */
961ce9e605STiwei Bie 	u32 len;			/* Buffer length. */
971ce9e605STiwei Bie 	u16 flags;			/* Descriptor flags. */
981ce9e605STiwei Bie };
991ce9e605STiwei Bie 
10043b4f721SMichael S. Tsirkin struct vring_virtqueue {
1010a8a69ddSRusty Russell 	struct virtqueue vq;
1020a8a69ddSRusty Russell 
1031ce9e605STiwei Bie 	/* Is this a packed ring? */
1041ce9e605STiwei Bie 	bool packed_ring;
1051ce9e605STiwei Bie 
106fb3fba6bSTiwei Bie 	/* Is DMA API used? */
107fb3fba6bSTiwei Bie 	bool use_dma_api;
108fb3fba6bSTiwei Bie 
1097b21e34fSRusty Russell 	/* Can we use weak barriers? */
1107b21e34fSRusty Russell 	bool weak_barriers;
1117b21e34fSRusty Russell 
1120a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
1130a8a69ddSRusty Russell 	bool broken;
1140a8a69ddSRusty Russell 
1159fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
1169fa29b9dSMark McLoughlin 	bool indirect;
1179fa29b9dSMark McLoughlin 
118a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
119a5c262c5SMichael S. Tsirkin 	bool event;
120a5c262c5SMichael S. Tsirkin 
1210a8a69ddSRusty Russell 	/* Head of free buffer list. */
1220a8a69ddSRusty Russell 	unsigned int free_head;
1230a8a69ddSRusty Russell 	/* Number we've added since last sync. */
1240a8a69ddSRusty Russell 	unsigned int num_added;
1250a8a69ddSRusty Russell 
1260a8a69ddSRusty Russell 	/* Last used index we've seen. */
1271bc4953eSAnthony Liguori 	u16 last_used_idx;
1280a8a69ddSRusty Russell 
1291ce9e605STiwei Bie 	union {
1301ce9e605STiwei Bie 		/* Available for split ring */
131e593bf97STiwei Bie 		struct {
1321ce9e605STiwei Bie 			/* Actual memory layout for this queue. */
133e593bf97STiwei Bie 			struct vring vring;
134e593bf97STiwei Bie 
135f277ec42SVenkatesh Srinivas 			/* Last written value to avail->flags */
136f277ec42SVenkatesh Srinivas 			u16 avail_flags_shadow;
137f277ec42SVenkatesh Srinivas 
1381ce9e605STiwei Bie 			/*
1391ce9e605STiwei Bie 			 * Last written value to avail->idx in
1401ce9e605STiwei Bie 			 * guest byte order.
1411ce9e605STiwei Bie 			 */
142f277ec42SVenkatesh Srinivas 			u16 avail_idx_shadow;
143cbeedb72STiwei Bie 
144cbeedb72STiwei Bie 			/* Per-descriptor state. */
145cbeedb72STiwei Bie 			struct vring_desc_state_split *desc_state;
146d79dca75STiwei Bie 
1471ce9e605STiwei Bie 			/* DMA address and size information */
148d79dca75STiwei Bie 			dma_addr_t queue_dma_addr;
1491ce9e605STiwei Bie 			size_t queue_size_in_bytes;
150e593bf97STiwei Bie 		} split;
151f277ec42SVenkatesh Srinivas 
1521ce9e605STiwei Bie 		/* Available for packed ring */
1531ce9e605STiwei Bie 		struct {
1541ce9e605STiwei Bie 			/* Actual memory layout for this queue. */
1559c0644eeSMichael S. Tsirkin 			struct {
1569c0644eeSMichael S. Tsirkin 				unsigned int num;
1579c0644eeSMichael S. Tsirkin 				struct vring_packed_desc *desc;
1589c0644eeSMichael S. Tsirkin 				struct vring_packed_desc_event *driver;
1599c0644eeSMichael S. Tsirkin 				struct vring_packed_desc_event *device;
1609c0644eeSMichael S. Tsirkin 			} vring;
1611ce9e605STiwei Bie 
1621ce9e605STiwei Bie 			/* Driver ring wrap counter. */
1631ce9e605STiwei Bie 			bool avail_wrap_counter;
1641ce9e605STiwei Bie 
1651ce9e605STiwei Bie 			/* Device ring wrap counter. */
1661ce9e605STiwei Bie 			bool used_wrap_counter;
1671ce9e605STiwei Bie 
1681ce9e605STiwei Bie 			/* Avail used flags. */
1691ce9e605STiwei Bie 			u16 avail_used_flags;
1701ce9e605STiwei Bie 
1711ce9e605STiwei Bie 			/* Index of the next avail descriptor. */
1721ce9e605STiwei Bie 			u16 next_avail_idx;
1731ce9e605STiwei Bie 
1741ce9e605STiwei Bie 			/*
1751ce9e605STiwei Bie 			 * Last written value to driver->flags in
1761ce9e605STiwei Bie 			 * guest byte order.
1771ce9e605STiwei Bie 			 */
1781ce9e605STiwei Bie 			u16 event_flags_shadow;
1791ce9e605STiwei Bie 
1801ce9e605STiwei Bie 			/* Per-descriptor state. */
1811ce9e605STiwei Bie 			struct vring_desc_state_packed *desc_state;
1821ce9e605STiwei Bie 			struct vring_desc_extra_packed *desc_extra;
1831ce9e605STiwei Bie 
1841ce9e605STiwei Bie 			/* DMA address and size information */
1851ce9e605STiwei Bie 			dma_addr_t ring_dma_addr;
1861ce9e605STiwei Bie 			dma_addr_t driver_event_dma_addr;
1871ce9e605STiwei Bie 			dma_addr_t device_event_dma_addr;
1881ce9e605STiwei Bie 			size_t ring_size_in_bytes;
1891ce9e605STiwei Bie 			size_t event_size_in_bytes;
1901ce9e605STiwei Bie 		} packed;
1911ce9e605STiwei Bie 	};
1921ce9e605STiwei Bie 
1930a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
19446f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
1950a8a69ddSRusty Russell 
1962a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
1972a2d1382SAndy Lutomirski 	bool we_own_ring;
1982a2d1382SAndy Lutomirski 
1990a8a69ddSRusty Russell #ifdef DEBUG
2000a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
2010a8a69ddSRusty Russell 	unsigned int in_use;
202e93300b1SRusty Russell 
203e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
204e93300b1SRusty Russell 	bool last_add_time_valid;
205e93300b1SRusty Russell 	ktime_t last_add_time;
2060a8a69ddSRusty Russell #endif
2070a8a69ddSRusty Russell };
2080a8a69ddSRusty Russell 
209e6f633e5STiwei Bie 
210e6f633e5STiwei Bie /*
211e6f633e5STiwei Bie  * Helpers.
212e6f633e5STiwei Bie  */
213e6f633e5STiwei Bie 
2140a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
2150a8a69ddSRusty Russell 
2162f18c2d1STiwei Bie static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
2172f18c2d1STiwei Bie 					  unsigned int total_sg)
2182f18c2d1STiwei Bie {
2192f18c2d1STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2202f18c2d1STiwei Bie 
2212f18c2d1STiwei Bie 	/*
2222f18c2d1STiwei Bie 	 * If the host supports indirect descriptor tables, and we have multiple
2232f18c2d1STiwei Bie 	 * buffers, then go indirect. FIXME: tune this threshold
2242f18c2d1STiwei Bie 	 */
2252f18c2d1STiwei Bie 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
2262f18c2d1STiwei Bie }
2272f18c2d1STiwei Bie 
228d26c96c8SAndy Lutomirski /*
2291a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
2301a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
2311a937693SMichael S. Tsirkin  *
2321a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
233d26c96c8SAndy Lutomirski  *
234d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
235d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
236d26c96c8SAndy Lutomirski  *
237d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
238d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
239d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
240d26c96c8SAndy Lutomirski  *
241d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
242d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
243d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
244d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
245d26c96c8SAndy Lutomirski  *
246d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
247d26c96c8SAndy Lutomirski  * API.
2481a937693SMichael S. Tsirkin  *
2491a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
2501a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
2511a937693SMichael S. Tsirkin  * unconditionally on data path.
252d26c96c8SAndy Lutomirski  */
253d26c96c8SAndy Lutomirski 
254d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
255d26c96c8SAndy Lutomirski {
2561a937693SMichael S. Tsirkin 	if (!virtio_has_iommu_quirk(vdev))
2571a937693SMichael S. Tsirkin 		return true;
2581a937693SMichael S. Tsirkin 
2591a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
26078fe3987SAndy Lutomirski 	/*
26178fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
26278fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
26378fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
26478fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
26578fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
26678fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
26778fe3987SAndy Lutomirski 	 */
26878fe3987SAndy Lutomirski 	if (xen_domain())
26978fe3987SAndy Lutomirski 		return true;
27078fe3987SAndy Lutomirski 
271d26c96c8SAndy Lutomirski 	return false;
272d26c96c8SAndy Lutomirski }
273d26c96c8SAndy Lutomirski 
274e6d6dd6cSJoerg Roedel size_t virtio_max_dma_size(struct virtio_device *vdev)
275e6d6dd6cSJoerg Roedel {
276e6d6dd6cSJoerg Roedel 	size_t max_segment_size = SIZE_MAX;
277e6d6dd6cSJoerg Roedel 
278e6d6dd6cSJoerg Roedel 	if (vring_use_dma_api(vdev))
279e6d6dd6cSJoerg Roedel 		max_segment_size = dma_max_mapping_size(&vdev->dev);
280e6d6dd6cSJoerg Roedel 
281e6d6dd6cSJoerg Roedel 	return max_segment_size;
282e6d6dd6cSJoerg Roedel }
283e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size);
284e6d6dd6cSJoerg Roedel 
285d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
286d79dca75STiwei Bie 			      dma_addr_t *dma_handle, gfp_t flag)
287d79dca75STiwei Bie {
288d79dca75STiwei Bie 	if (vring_use_dma_api(vdev)) {
289d79dca75STiwei Bie 		return dma_alloc_coherent(vdev->dev.parent, size,
290d79dca75STiwei Bie 					  dma_handle, flag);
291d79dca75STiwei Bie 	} else {
292d79dca75STiwei Bie 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
293d79dca75STiwei Bie 
294d79dca75STiwei Bie 		if (queue) {
295d79dca75STiwei Bie 			phys_addr_t phys_addr = virt_to_phys(queue);
296d79dca75STiwei Bie 			*dma_handle = (dma_addr_t)phys_addr;
297d79dca75STiwei Bie 
298d79dca75STiwei Bie 			/*
299d79dca75STiwei Bie 			 * Sanity check: make sure we dind't truncate
300d79dca75STiwei Bie 			 * the address.  The only arches I can find that
301d79dca75STiwei Bie 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
302d79dca75STiwei Bie 			 * are certain non-highmem MIPS and x86
303d79dca75STiwei Bie 			 * configurations, but these configurations
304d79dca75STiwei Bie 			 * should never allocate physical pages above 32
305d79dca75STiwei Bie 			 * bits, so this is fine.  Just in case, throw a
306d79dca75STiwei Bie 			 * warning and abort if we end up with an
307d79dca75STiwei Bie 			 * unrepresentable address.
308d79dca75STiwei Bie 			 */
309d79dca75STiwei Bie 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
310d79dca75STiwei Bie 				free_pages_exact(queue, PAGE_ALIGN(size));
311d79dca75STiwei Bie 				return NULL;
312d79dca75STiwei Bie 			}
313d79dca75STiwei Bie 		}
314d79dca75STiwei Bie 		return queue;
315d79dca75STiwei Bie 	}
316d79dca75STiwei Bie }
317d79dca75STiwei Bie 
318d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size,
319d79dca75STiwei Bie 			     void *queue, dma_addr_t dma_handle)
320d79dca75STiwei Bie {
321d79dca75STiwei Bie 	if (vring_use_dma_api(vdev))
322d79dca75STiwei Bie 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
323d79dca75STiwei Bie 	else
324d79dca75STiwei Bie 		free_pages_exact(queue, PAGE_ALIGN(size));
325d79dca75STiwei Bie }
326d79dca75STiwei Bie 
327780bc790SAndy Lutomirski /*
328780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
329780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
330780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
331780bc790SAndy Lutomirski  */
33275bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
333780bc790SAndy Lutomirski {
334780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
335780bc790SAndy Lutomirski }
336780bc790SAndy Lutomirski 
337780bc790SAndy Lutomirski /* Map one sg entry. */
338780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
339780bc790SAndy Lutomirski 				   struct scatterlist *sg,
340780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
341780bc790SAndy Lutomirski {
342fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
343780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
344780bc790SAndy Lutomirski 
345780bc790SAndy Lutomirski 	/*
346780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
347780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
348780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
349780bc790SAndy Lutomirski 	 */
350780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
351780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
352780bc790SAndy Lutomirski 			    direction);
353780bc790SAndy Lutomirski }
354780bc790SAndy Lutomirski 
355780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
356780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
357780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
358780bc790SAndy Lutomirski {
359fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
360780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
361780bc790SAndy Lutomirski 
362780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
363780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
364780bc790SAndy Lutomirski }
365780bc790SAndy Lutomirski 
366e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq,
367e6f633e5STiwei Bie 			       dma_addr_t addr)
368e6f633e5STiwei Bie {
369fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
370e6f633e5STiwei Bie 		return 0;
371e6f633e5STiwei Bie 
372e6f633e5STiwei Bie 	return dma_mapping_error(vring_dma_dev(vq), addr);
373e6f633e5STiwei Bie }
374e6f633e5STiwei Bie 
375e6f633e5STiwei Bie 
376e6f633e5STiwei Bie /*
377e6f633e5STiwei Bie  * Split ring specific functions - *_split().
378e6f633e5STiwei Bie  */
379e6f633e5STiwei Bie 
380138fd251STiwei Bie static void vring_unmap_one_split(const struct vring_virtqueue *vq,
381780bc790SAndy Lutomirski 				  struct vring_desc *desc)
382780bc790SAndy Lutomirski {
383780bc790SAndy Lutomirski 	u16 flags;
384780bc790SAndy Lutomirski 
385fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
386780bc790SAndy Lutomirski 		return;
387780bc790SAndy Lutomirski 
388780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
389780bc790SAndy Lutomirski 
390780bc790SAndy Lutomirski 	if (flags & VRING_DESC_F_INDIRECT) {
391780bc790SAndy Lutomirski 		dma_unmap_single(vring_dma_dev(vq),
392780bc790SAndy Lutomirski 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
393780bc790SAndy Lutomirski 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
394780bc790SAndy Lutomirski 				 (flags & VRING_DESC_F_WRITE) ?
395780bc790SAndy Lutomirski 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
396780bc790SAndy Lutomirski 	} else {
397780bc790SAndy Lutomirski 		dma_unmap_page(vring_dma_dev(vq),
398780bc790SAndy Lutomirski 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
399780bc790SAndy Lutomirski 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
400780bc790SAndy Lutomirski 			       (flags & VRING_DESC_F_WRITE) ?
401780bc790SAndy Lutomirski 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
402780bc790SAndy Lutomirski 	}
403780bc790SAndy Lutomirski }
404780bc790SAndy Lutomirski 
405138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
406138fd251STiwei Bie 					       unsigned int total_sg,
407138fd251STiwei Bie 					       gfp_t gfp)
4089fa29b9dSMark McLoughlin {
4099fa29b9dSMark McLoughlin 	struct vring_desc *desc;
410b25bd251SRusty Russell 	unsigned int i;
4119fa29b9dSMark McLoughlin 
412b92b1b89SWill Deacon 	/*
413b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
414b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
415b92b1b89SWill Deacon 	 * virtqueue.
416b92b1b89SWill Deacon 	 */
41782107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
418b92b1b89SWill Deacon 
4196da2ec56SKees Cook 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
4209fa29b9dSMark McLoughlin 	if (!desc)
421b25bd251SRusty Russell 		return NULL;
4229fa29b9dSMark McLoughlin 
423b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
42400e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
425b25bd251SRusty Russell 	return desc;
4269fa29b9dSMark McLoughlin }
4279fa29b9dSMark McLoughlin 
428138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq,
42913816c76SRusty Russell 				      struct scatterlist *sgs[],
430eeebf9b1SRusty Russell 				      unsigned int total_sg,
43113816c76SRusty Russell 				      unsigned int out_sgs,
43213816c76SRusty Russell 				      unsigned int in_sgs,
433bbd603efSMichael S. Tsirkin 				      void *data,
4345a08b04fSMichael S. Tsirkin 				      void *ctx,
435bbd603efSMichael S. Tsirkin 				      gfp_t gfp)
4360a8a69ddSRusty Russell {
4370a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
43813816c76SRusty Russell 	struct scatterlist *sg;
439b25bd251SRusty Russell 	struct vring_desc *desc;
440780bc790SAndy Lutomirski 	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
4411fe9b6feSMichael S. Tsirkin 	int head;
442b25bd251SRusty Russell 	bool indirect;
4430a8a69ddSRusty Russell 
4449fa29b9dSMark McLoughlin 	START_USE(vq);
4459fa29b9dSMark McLoughlin 
4460a8a69ddSRusty Russell 	BUG_ON(data == NULL);
4475a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
4489fa29b9dSMark McLoughlin 
44970670444SRusty Russell 	if (unlikely(vq->broken)) {
45070670444SRusty Russell 		END_USE(vq);
45170670444SRusty Russell 		return -EIO;
45270670444SRusty Russell 	}
45370670444SRusty Russell 
4544d6a105eSTiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
455e93300b1SRusty Russell 
45613816c76SRusty Russell 	BUG_ON(total_sg == 0);
4570a8a69ddSRusty Russell 
458b25bd251SRusty Russell 	head = vq->free_head;
459b25bd251SRusty Russell 
4602f18c2d1STiwei Bie 	if (virtqueue_use_indirect(_vq, total_sg))
461138fd251STiwei Bie 		desc = alloc_indirect_split(_vq, total_sg, gfp);
46244ed8089SRichard W.M. Jones 	else {
463b25bd251SRusty Russell 		desc = NULL;
464e593bf97STiwei Bie 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
46544ed8089SRichard W.M. Jones 	}
466b25bd251SRusty Russell 
467b25bd251SRusty Russell 	if (desc) {
468b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
469780bc790SAndy Lutomirski 		indirect = true;
470b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
471b25bd251SRusty Russell 		i = 0;
472b25bd251SRusty Russell 		descs_used = 1;
473b25bd251SRusty Russell 	} else {
474780bc790SAndy Lutomirski 		indirect = false;
475e593bf97STiwei Bie 		desc = vq->split.vring.desc;
476b25bd251SRusty Russell 		i = head;
477b25bd251SRusty Russell 		descs_used = total_sg;
478b25bd251SRusty Russell 	}
479b25bd251SRusty Russell 
480b25bd251SRusty Russell 	if (vq->vq.num_free < descs_used) {
4810a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
482b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
48344653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
48444653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
48544653eaeSRusty Russell 		 * host should service the ring ASAP. */
48613816c76SRusty Russell 		if (out_sgs)
487426e3e0aSRusty Russell 			vq->notify(&vq->vq);
48858625edfSWei Yongjun 		if (indirect)
48958625edfSWei Yongjun 			kfree(desc);
4900a8a69ddSRusty Russell 		END_USE(vq);
4910a8a69ddSRusty Russell 		return -ENOSPC;
4920a8a69ddSRusty Russell 	}
4930a8a69ddSRusty Russell 
49413816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
495eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
496780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
497780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
498780bc790SAndy Lutomirski 				goto unmap_release;
499780bc790SAndy Lutomirski 
50000e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
501780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
50200e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
5030a8a69ddSRusty Russell 			prev = i;
50400e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
5050a8a69ddSRusty Russell 		}
50613816c76SRusty Russell 	}
50713816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
508eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
509780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
510780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
511780bc790SAndy Lutomirski 				goto unmap_release;
512780bc790SAndy Lutomirski 
51300e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
514780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
51500e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
5160a8a69ddSRusty Russell 			prev = i;
51700e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
51813816c76SRusty Russell 		}
5190a8a69ddSRusty Russell 	}
5200a8a69ddSRusty Russell 	/* Last one doesn't continue. */
52100e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
5220a8a69ddSRusty Russell 
523780bc790SAndy Lutomirski 	if (indirect) {
524780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
525780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
526780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
527780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
528780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
529780bc790SAndy Lutomirski 			goto unmap_release;
530780bc790SAndy Lutomirski 
531e593bf97STiwei Bie 		vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
532e593bf97STiwei Bie 				VRING_DESC_F_INDIRECT);
533e593bf97STiwei Bie 		vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
534e593bf97STiwei Bie 				addr);
535780bc790SAndy Lutomirski 
536e593bf97STiwei Bie 		vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
537e593bf97STiwei Bie 				total_sg * sizeof(struct vring_desc));
538780bc790SAndy Lutomirski 	}
539780bc790SAndy Lutomirski 
540780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
541780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
542780bc790SAndy Lutomirski 
5430a8a69ddSRusty Russell 	/* Update free pointer */
544b25bd251SRusty Russell 	if (indirect)
545e593bf97STiwei Bie 		vq->free_head = virtio16_to_cpu(_vq->vdev,
546e593bf97STiwei Bie 					vq->split.vring.desc[head].next);
547b25bd251SRusty Russell 	else
5480a8a69ddSRusty Russell 		vq->free_head = i;
5490a8a69ddSRusty Russell 
550780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
551cbeedb72STiwei Bie 	vq->split.desc_state[head].data = data;
552780bc790SAndy Lutomirski 	if (indirect)
553cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = desc;
55487646a34SJason Wang 	else
555cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = ctx;
5560a8a69ddSRusty Russell 
5570a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
5583b720b8cSRusty Russell 	 * do sync). */
559e593bf97STiwei Bie 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
560e593bf97STiwei Bie 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
5610a8a69ddSRusty Russell 
562ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
563ee7cd898SRusty Russell 	 * new available array entries. */
564a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
565e593bf97STiwei Bie 	vq->split.avail_idx_shadow++;
566e593bf97STiwei Bie 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
567e593bf97STiwei Bie 						vq->split.avail_idx_shadow);
568ee7cd898SRusty Russell 	vq->num_added++;
569ee7cd898SRusty Russell 
5705e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
5715e05bf58STetsuo Handa 	END_USE(vq);
5725e05bf58STetsuo Handa 
573ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
574ee7cd898SRusty Russell 	 * just in case. */
575ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
576ee7cd898SRusty Russell 		virtqueue_kick(_vq);
577ee7cd898SRusty Russell 
57898e8c6bcSRusty Russell 	return 0;
579780bc790SAndy Lutomirski 
580780bc790SAndy Lutomirski unmap_release:
581780bc790SAndy Lutomirski 	err_idx = i;
582780bc790SAndy Lutomirski 	i = head;
583780bc790SAndy Lutomirski 
584780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
585780bc790SAndy Lutomirski 		if (i == err_idx)
586780bc790SAndy Lutomirski 			break;
587138fd251STiwei Bie 		vring_unmap_one_split(vq, &desc[i]);
588e593bf97STiwei Bie 		i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
589780bc790SAndy Lutomirski 	}
590780bc790SAndy Lutomirski 
591780bc790SAndy Lutomirski 	if (indirect)
592780bc790SAndy Lutomirski 		kfree(desc);
593780bc790SAndy Lutomirski 
5943cc36f6eSMichael S. Tsirkin 	END_USE(vq);
595780bc790SAndy Lutomirski 	return -EIO;
5960a8a69ddSRusty Russell }
59713816c76SRusty Russell 
598138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
5990a8a69ddSRusty Russell {
6000a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
601a5c262c5SMichael S. Tsirkin 	u16 new, old;
60241f0377fSRusty Russell 	bool needs_kick;
60341f0377fSRusty Russell 
6040a8a69ddSRusty Russell 	START_USE(vq);
605a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
606a72caae2SJason Wang 	 * event. */
607a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
6080a8a69ddSRusty Russell 
609e593bf97STiwei Bie 	old = vq->split.avail_idx_shadow - vq->num_added;
610e593bf97STiwei Bie 	new = vq->split.avail_idx_shadow;
6110a8a69ddSRusty Russell 	vq->num_added = 0;
6120a8a69ddSRusty Russell 
6134d6a105eSTiwei Bie 	LAST_ADD_TIME_CHECK(vq);
6144d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
615e93300b1SRusty Russell 
61641f0377fSRusty Russell 	if (vq->event) {
617e593bf97STiwei Bie 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
618e593bf97STiwei Bie 					vring_avail_event(&vq->split.vring)),
61941f0377fSRusty Russell 					      new, old);
62041f0377fSRusty Russell 	} else {
621e593bf97STiwei Bie 		needs_kick = !(vq->split.vring.used->flags &
622e593bf97STiwei Bie 					cpu_to_virtio16(_vq->vdev,
623e593bf97STiwei Bie 						VRING_USED_F_NO_NOTIFY));
62441f0377fSRusty Russell 	}
6250a8a69ddSRusty Russell 	END_USE(vq);
62641f0377fSRusty Russell 	return needs_kick;
62741f0377fSRusty Russell }
628138fd251STiwei Bie 
629138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
6305a08b04fSMichael S. Tsirkin 			     void **ctx)
6310a8a69ddSRusty Russell {
632780bc790SAndy Lutomirski 	unsigned int i, j;
633c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
6340a8a69ddSRusty Russell 
6350a8a69ddSRusty Russell 	/* Clear data ptr. */
636cbeedb72STiwei Bie 	vq->split.desc_state[head].data = NULL;
6370a8a69ddSRusty Russell 
638780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
6390a8a69ddSRusty Russell 	i = head;
6409fa29b9dSMark McLoughlin 
641e593bf97STiwei Bie 	while (vq->split.vring.desc[i].flags & nextflag) {
642e593bf97STiwei Bie 		vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
643e593bf97STiwei Bie 		i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
64406ca287dSRusty Russell 		vq->vq.num_free++;
6450a8a69ddSRusty Russell 	}
6460a8a69ddSRusty Russell 
647e593bf97STiwei Bie 	vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
648e593bf97STiwei Bie 	vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
649e593bf97STiwei Bie 						vq->free_head);
6500a8a69ddSRusty Russell 	vq->free_head = head;
651780bc790SAndy Lutomirski 
6520a8a69ddSRusty Russell 	/* Plus final descriptor */
65306ca287dSRusty Russell 	vq->vq.num_free++;
654780bc790SAndy Lutomirski 
6555a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
656cbeedb72STiwei Bie 		struct vring_desc *indir_desc =
657cbeedb72STiwei Bie 				vq->split.desc_state[head].indir_desc;
6585a08b04fSMichael S. Tsirkin 		u32 len;
6595a08b04fSMichael S. Tsirkin 
6605a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
6615a08b04fSMichael S. Tsirkin 		if (!indir_desc)
6625a08b04fSMichael S. Tsirkin 			return;
6635a08b04fSMichael S. Tsirkin 
664e593bf97STiwei Bie 		len = virtio32_to_cpu(vq->vq.vdev,
665e593bf97STiwei Bie 				vq->split.vring.desc[head].len);
666780bc790SAndy Lutomirski 
667e593bf97STiwei Bie 		BUG_ON(!(vq->split.vring.desc[head].flags &
668780bc790SAndy Lutomirski 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
669780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
670780bc790SAndy Lutomirski 
671780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
672138fd251STiwei Bie 			vring_unmap_one_split(vq, &indir_desc[j]);
673780bc790SAndy Lutomirski 
6745a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
675cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = NULL;
6765a08b04fSMichael S. Tsirkin 	} else if (ctx) {
677cbeedb72STiwei Bie 		*ctx = vq->split.desc_state[head].indir_desc;
678780bc790SAndy Lutomirski 	}
6790a8a69ddSRusty Russell }
6800a8a69ddSRusty Russell 
681138fd251STiwei Bie static inline bool more_used_split(const struct vring_virtqueue *vq)
6820a8a69ddSRusty Russell {
683e593bf97STiwei Bie 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
684e593bf97STiwei Bie 			vq->split.vring.used->idx);
6850a8a69ddSRusty Russell }
6860a8a69ddSRusty Russell 
687138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
688138fd251STiwei Bie 					 unsigned int *len,
6895a08b04fSMichael S. Tsirkin 					 void **ctx)
6900a8a69ddSRusty Russell {
6910a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
6920a8a69ddSRusty Russell 	void *ret;
6930a8a69ddSRusty Russell 	unsigned int i;
6943b720b8cSRusty Russell 	u16 last_used;
6950a8a69ddSRusty Russell 
6960a8a69ddSRusty Russell 	START_USE(vq);
6970a8a69ddSRusty Russell 
6985ef82752SRusty Russell 	if (unlikely(vq->broken)) {
6995ef82752SRusty Russell 		END_USE(vq);
7005ef82752SRusty Russell 		return NULL;
7015ef82752SRusty Russell 	}
7025ef82752SRusty Russell 
703138fd251STiwei Bie 	if (!more_used_split(vq)) {
7040a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
7050a8a69ddSRusty Russell 		END_USE(vq);
7060a8a69ddSRusty Russell 		return NULL;
7070a8a69ddSRusty Russell 	}
7080a8a69ddSRusty Russell 
7092d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
710a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
7112d61ba95SMichael S. Tsirkin 
712e593bf97STiwei Bie 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
713e593bf97STiwei Bie 	i = virtio32_to_cpu(_vq->vdev,
714e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].id);
715e593bf97STiwei Bie 	*len = virtio32_to_cpu(_vq->vdev,
716e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].len);
7170a8a69ddSRusty Russell 
718e593bf97STiwei Bie 	if (unlikely(i >= vq->split.vring.num)) {
7190a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
7200a8a69ddSRusty Russell 		return NULL;
7210a8a69ddSRusty Russell 	}
722cbeedb72STiwei Bie 	if (unlikely(!vq->split.desc_state[i].data)) {
7230a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
7240a8a69ddSRusty Russell 		return NULL;
7250a8a69ddSRusty Russell 	}
7260a8a69ddSRusty Russell 
727138fd251STiwei Bie 	/* detach_buf_split clears data, so grab it now. */
728cbeedb72STiwei Bie 	ret = vq->split.desc_state[i].data;
729138fd251STiwei Bie 	detach_buf_split(vq, i, ctx);
7300a8a69ddSRusty Russell 	vq->last_used_idx++;
731a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
732a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
733a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
734e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
735788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
736e593bf97STiwei Bie 				&vring_used_event(&vq->split.vring),
737788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
738a5c262c5SMichael S. Tsirkin 
7394d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
740e93300b1SRusty Russell 
7410a8a69ddSRusty Russell 	END_USE(vq);
7420a8a69ddSRusty Russell 	return ret;
7430a8a69ddSRusty Russell }
744138fd251STiwei Bie 
745138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq)
746138fd251STiwei Bie {
747138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
748138fd251STiwei Bie 
749e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
750e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
751138fd251STiwei Bie 		if (!vq->event)
752e593bf97STiwei Bie 			vq->split.vring.avail->flags =
753e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
754e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
755138fd251STiwei Bie 	}
756138fd251STiwei Bie }
757138fd251STiwei Bie 
758138fd251STiwei Bie static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
759cc229884SMichael S. Tsirkin {
760cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
761cc229884SMichael S. Tsirkin 	u16 last_used_idx;
762cc229884SMichael S. Tsirkin 
763cc229884SMichael S. Tsirkin 	START_USE(vq);
764cc229884SMichael S. Tsirkin 
765cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
766cc229884SMichael S. Tsirkin 	 * more to do. */
767cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
768cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
769cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
770e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
771e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
7720ea1e4a6SLadi Prosek 		if (!vq->event)
773e593bf97STiwei Bie 			vq->split.vring.avail->flags =
774e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
775e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
776f277ec42SVenkatesh Srinivas 	}
777e593bf97STiwei Bie 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
778e593bf97STiwei Bie 			last_used_idx = vq->last_used_idx);
779cc229884SMichael S. Tsirkin 	END_USE(vq);
780cc229884SMichael S. Tsirkin 	return last_used_idx;
781cc229884SMichael S. Tsirkin }
782138fd251STiwei Bie 
783138fd251STiwei Bie static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
784138fd251STiwei Bie {
785138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
786138fd251STiwei Bie 
787138fd251STiwei Bie 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
788e593bf97STiwei Bie 			vq->split.vring.used->idx);
789138fd251STiwei Bie }
790138fd251STiwei Bie 
791138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
7927ab358c2SMichael S. Tsirkin {
7937ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
7947ab358c2SMichael S. Tsirkin 	u16 bufs;
7957ab358c2SMichael S. Tsirkin 
7967ab358c2SMichael S. Tsirkin 	START_USE(vq);
7977ab358c2SMichael S. Tsirkin 
7987ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
7997ab358c2SMichael S. Tsirkin 	 * more to do. */
8007ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
8017ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
8020ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
803e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
804e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8050ea1e4a6SLadi Prosek 		if (!vq->event)
806e593bf97STiwei Bie 			vq->split.vring.avail->flags =
807e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
808e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
809f277ec42SVenkatesh Srinivas 	}
8107ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
811e593bf97STiwei Bie 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
812788e5b3aSMichael S. Tsirkin 
813788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
814e593bf97STiwei Bie 			&vring_used_event(&vq->split.vring),
815788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
816788e5b3aSMichael S. Tsirkin 
817e593bf97STiwei Bie 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
818e593bf97STiwei Bie 					- vq->last_used_idx) > bufs)) {
8197ab358c2SMichael S. Tsirkin 		END_USE(vq);
8207ab358c2SMichael S. Tsirkin 		return false;
8217ab358c2SMichael S. Tsirkin 	}
8227ab358c2SMichael S. Tsirkin 
8237ab358c2SMichael S. Tsirkin 	END_USE(vq);
8247ab358c2SMichael S. Tsirkin 	return true;
8257ab358c2SMichael S. Tsirkin }
8267ab358c2SMichael S. Tsirkin 
827138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
828c021eac4SShirley Ma {
829c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
830c021eac4SShirley Ma 	unsigned int i;
831c021eac4SShirley Ma 	void *buf;
832c021eac4SShirley Ma 
833c021eac4SShirley Ma 	START_USE(vq);
834c021eac4SShirley Ma 
835e593bf97STiwei Bie 	for (i = 0; i < vq->split.vring.num; i++) {
836cbeedb72STiwei Bie 		if (!vq->split.desc_state[i].data)
837c021eac4SShirley Ma 			continue;
838138fd251STiwei Bie 		/* detach_buf_split clears data, so grab it now. */
839cbeedb72STiwei Bie 		buf = vq->split.desc_state[i].data;
840138fd251STiwei Bie 		detach_buf_split(vq, i, NULL);
841e593bf97STiwei Bie 		vq->split.avail_idx_shadow--;
842e593bf97STiwei Bie 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
843e593bf97STiwei Bie 				vq->split.avail_idx_shadow);
844c021eac4SShirley Ma 		END_USE(vq);
845c021eac4SShirley Ma 		return buf;
846c021eac4SShirley Ma 	}
847c021eac4SShirley Ma 	/* That should have freed everything. */
848e593bf97STiwei Bie 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
849c021eac4SShirley Ma 
850c021eac4SShirley Ma 	END_USE(vq);
851c021eac4SShirley Ma 	return NULL;
852c021eac4SShirley Ma }
853138fd251STiwei Bie 
854d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split(
855d79dca75STiwei Bie 	unsigned int index,
856d79dca75STiwei Bie 	unsigned int num,
857d79dca75STiwei Bie 	unsigned int vring_align,
858d79dca75STiwei Bie 	struct virtio_device *vdev,
859d79dca75STiwei Bie 	bool weak_barriers,
860d79dca75STiwei Bie 	bool may_reduce_num,
861d79dca75STiwei Bie 	bool context,
862d79dca75STiwei Bie 	bool (*notify)(struct virtqueue *),
863d79dca75STiwei Bie 	void (*callback)(struct virtqueue *),
864d79dca75STiwei Bie 	const char *name)
865d79dca75STiwei Bie {
866d79dca75STiwei Bie 	struct virtqueue *vq;
867d79dca75STiwei Bie 	void *queue = NULL;
868d79dca75STiwei Bie 	dma_addr_t dma_addr;
869d79dca75STiwei Bie 	size_t queue_size_in_bytes;
870d79dca75STiwei Bie 	struct vring vring;
871d79dca75STiwei Bie 
872d79dca75STiwei Bie 	/* We assume num is a power of 2. */
873d79dca75STiwei Bie 	if (num & (num - 1)) {
874d79dca75STiwei Bie 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
875d79dca75STiwei Bie 		return NULL;
876d79dca75STiwei Bie 	}
877d79dca75STiwei Bie 
878d79dca75STiwei Bie 	/* TODO: allocate each queue chunk individually */
879d79dca75STiwei Bie 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
880d79dca75STiwei Bie 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
881d79dca75STiwei Bie 					  &dma_addr,
882d79dca75STiwei Bie 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
883d79dca75STiwei Bie 		if (queue)
884d79dca75STiwei Bie 			break;
885*cf94db21SCornelia Huck 		if (!may_reduce_num)
886*cf94db21SCornelia Huck 			return NULL;
887d79dca75STiwei Bie 	}
888d79dca75STiwei Bie 
889d79dca75STiwei Bie 	if (!num)
890d79dca75STiwei Bie 		return NULL;
891d79dca75STiwei Bie 
892d79dca75STiwei Bie 	if (!queue) {
893d79dca75STiwei Bie 		/* Try to get a single page. You are my only hope! */
894d79dca75STiwei Bie 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
895d79dca75STiwei Bie 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
896d79dca75STiwei Bie 	}
897d79dca75STiwei Bie 	if (!queue)
898d79dca75STiwei Bie 		return NULL;
899d79dca75STiwei Bie 
900d79dca75STiwei Bie 	queue_size_in_bytes = vring_size(num, vring_align);
901d79dca75STiwei Bie 	vring_init(&vring, num, queue, vring_align);
902d79dca75STiwei Bie 
903d79dca75STiwei Bie 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
904d79dca75STiwei Bie 				   notify, callback, name);
905d79dca75STiwei Bie 	if (!vq) {
906d79dca75STiwei Bie 		vring_free_queue(vdev, queue_size_in_bytes, queue,
907d79dca75STiwei Bie 				 dma_addr);
908d79dca75STiwei Bie 		return NULL;
909d79dca75STiwei Bie 	}
910d79dca75STiwei Bie 
911d79dca75STiwei Bie 	to_vvq(vq)->split.queue_dma_addr = dma_addr;
912d79dca75STiwei Bie 	to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
913d79dca75STiwei Bie 	to_vvq(vq)->we_own_ring = true;
914d79dca75STiwei Bie 
915d79dca75STiwei Bie 	return vq;
916d79dca75STiwei Bie }
917d79dca75STiwei Bie 
918e6f633e5STiwei Bie 
919e6f633e5STiwei Bie /*
9201ce9e605STiwei Bie  * Packed ring specific functions - *_packed().
9211ce9e605STiwei Bie  */
9221ce9e605STiwei Bie 
9231ce9e605STiwei Bie static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
9241ce9e605STiwei Bie 				     struct vring_desc_extra_packed *state)
9251ce9e605STiwei Bie {
9261ce9e605STiwei Bie 	u16 flags;
9271ce9e605STiwei Bie 
9281ce9e605STiwei Bie 	if (!vq->use_dma_api)
9291ce9e605STiwei Bie 		return;
9301ce9e605STiwei Bie 
9311ce9e605STiwei Bie 	flags = state->flags;
9321ce9e605STiwei Bie 
9331ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
9341ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
9351ce9e605STiwei Bie 				 state->addr, state->len,
9361ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
9371ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
9381ce9e605STiwei Bie 	} else {
9391ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
9401ce9e605STiwei Bie 			       state->addr, state->len,
9411ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
9421ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
9431ce9e605STiwei Bie 	}
9441ce9e605STiwei Bie }
9451ce9e605STiwei Bie 
9461ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
9471ce9e605STiwei Bie 				   struct vring_packed_desc *desc)
9481ce9e605STiwei Bie {
9491ce9e605STiwei Bie 	u16 flags;
9501ce9e605STiwei Bie 
9511ce9e605STiwei Bie 	if (!vq->use_dma_api)
9521ce9e605STiwei Bie 		return;
9531ce9e605STiwei Bie 
9541ce9e605STiwei Bie 	flags = le16_to_cpu(desc->flags);
9551ce9e605STiwei Bie 
9561ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
9571ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
9581ce9e605STiwei Bie 				 le64_to_cpu(desc->addr),
9591ce9e605STiwei Bie 				 le32_to_cpu(desc->len),
9601ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
9611ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
9621ce9e605STiwei Bie 	} else {
9631ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
9641ce9e605STiwei Bie 			       le64_to_cpu(desc->addr),
9651ce9e605STiwei Bie 			       le32_to_cpu(desc->len),
9661ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
9671ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
9681ce9e605STiwei Bie 	}
9691ce9e605STiwei Bie }
9701ce9e605STiwei Bie 
9711ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
9721ce9e605STiwei Bie 						       gfp_t gfp)
9731ce9e605STiwei Bie {
9741ce9e605STiwei Bie 	struct vring_packed_desc *desc;
9751ce9e605STiwei Bie 
9761ce9e605STiwei Bie 	/*
9771ce9e605STiwei Bie 	 * We require lowmem mappings for the descriptors because
9781ce9e605STiwei Bie 	 * otherwise virt_to_phys will give us bogus addresses in the
9791ce9e605STiwei Bie 	 * virtqueue.
9801ce9e605STiwei Bie 	 */
9811ce9e605STiwei Bie 	gfp &= ~__GFP_HIGHMEM;
9821ce9e605STiwei Bie 
9831ce9e605STiwei Bie 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
9841ce9e605STiwei Bie 
9851ce9e605STiwei Bie 	return desc;
9861ce9e605STiwei Bie }
9871ce9e605STiwei Bie 
9881ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
9891ce9e605STiwei Bie 				       struct scatterlist *sgs[],
9901ce9e605STiwei Bie 				       unsigned int total_sg,
9911ce9e605STiwei Bie 				       unsigned int out_sgs,
9921ce9e605STiwei Bie 				       unsigned int in_sgs,
9931ce9e605STiwei Bie 				       void *data,
9941ce9e605STiwei Bie 				       gfp_t gfp)
9951ce9e605STiwei Bie {
9961ce9e605STiwei Bie 	struct vring_packed_desc *desc;
9971ce9e605STiwei Bie 	struct scatterlist *sg;
9981ce9e605STiwei Bie 	unsigned int i, n, err_idx;
9991ce9e605STiwei Bie 	u16 head, id;
10001ce9e605STiwei Bie 	dma_addr_t addr;
10011ce9e605STiwei Bie 
10021ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
10031ce9e605STiwei Bie 	desc = alloc_indirect_packed(total_sg, gfp);
10041ce9e605STiwei Bie 
10051ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < 1)) {
10061ce9e605STiwei Bie 		pr_debug("Can't add buf len 1 - avail = 0\n");
10071ce9e605STiwei Bie 		END_USE(vq);
10081ce9e605STiwei Bie 		return -ENOSPC;
10091ce9e605STiwei Bie 	}
10101ce9e605STiwei Bie 
10111ce9e605STiwei Bie 	i = 0;
10121ce9e605STiwei Bie 	id = vq->free_head;
10131ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
10141ce9e605STiwei Bie 
10151ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
10161ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
10171ce9e605STiwei Bie 			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
10181ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
10191ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
10201ce9e605STiwei Bie 				goto unmap_release;
10211ce9e605STiwei Bie 
10221ce9e605STiwei Bie 			desc[i].flags = cpu_to_le16(n < out_sgs ?
10231ce9e605STiwei Bie 						0 : VRING_DESC_F_WRITE);
10241ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
10251ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
10261ce9e605STiwei Bie 			i++;
10271ce9e605STiwei Bie 		}
10281ce9e605STiwei Bie 	}
10291ce9e605STiwei Bie 
10301ce9e605STiwei Bie 	/* Now that the indirect table is filled in, map it. */
10311ce9e605STiwei Bie 	addr = vring_map_single(vq, desc,
10321ce9e605STiwei Bie 			total_sg * sizeof(struct vring_packed_desc),
10331ce9e605STiwei Bie 			DMA_TO_DEVICE);
10341ce9e605STiwei Bie 	if (vring_mapping_error(vq, addr))
10351ce9e605STiwei Bie 		goto unmap_release;
10361ce9e605STiwei Bie 
10371ce9e605STiwei Bie 	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
10381ce9e605STiwei Bie 	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
10391ce9e605STiwei Bie 				sizeof(struct vring_packed_desc));
10401ce9e605STiwei Bie 	vq->packed.vring.desc[head].id = cpu_to_le16(id);
10411ce9e605STiwei Bie 
10421ce9e605STiwei Bie 	if (vq->use_dma_api) {
10431ce9e605STiwei Bie 		vq->packed.desc_extra[id].addr = addr;
10441ce9e605STiwei Bie 		vq->packed.desc_extra[id].len = total_sg *
10451ce9e605STiwei Bie 				sizeof(struct vring_packed_desc);
10461ce9e605STiwei Bie 		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
10471ce9e605STiwei Bie 						  vq->packed.avail_used_flags;
10481ce9e605STiwei Bie 	}
10491ce9e605STiwei Bie 
10501ce9e605STiwei Bie 	/*
10511ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
10521ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
10531ce9e605STiwei Bie 	 * the list are made available.
10541ce9e605STiwei Bie 	 */
10551ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
10561ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
10571ce9e605STiwei Bie 						vq->packed.avail_used_flags);
10581ce9e605STiwei Bie 
10591ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
10601ce9e605STiwei Bie 	vq->vq.num_free -= 1;
10611ce9e605STiwei Bie 
10621ce9e605STiwei Bie 	/* Update free pointer */
10631ce9e605STiwei Bie 	n = head + 1;
10641ce9e605STiwei Bie 	if (n >= vq->packed.vring.num) {
10651ce9e605STiwei Bie 		n = 0;
10661ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
10671ce9e605STiwei Bie 		vq->packed.avail_used_flags ^=
10681ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_AVAIL |
10691ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_USED;
10701ce9e605STiwei Bie 	}
10711ce9e605STiwei Bie 	vq->packed.next_avail_idx = n;
10721ce9e605STiwei Bie 	vq->free_head = vq->packed.desc_state[id].next;
10731ce9e605STiwei Bie 
10741ce9e605STiwei Bie 	/* Store token and indirect buffer state. */
10751ce9e605STiwei Bie 	vq->packed.desc_state[id].num = 1;
10761ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
10771ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = desc;
10781ce9e605STiwei Bie 	vq->packed.desc_state[id].last = id;
10791ce9e605STiwei Bie 
10801ce9e605STiwei Bie 	vq->num_added += 1;
10811ce9e605STiwei Bie 
10821ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
10831ce9e605STiwei Bie 	END_USE(vq);
10841ce9e605STiwei Bie 
10851ce9e605STiwei Bie 	return 0;
10861ce9e605STiwei Bie 
10871ce9e605STiwei Bie unmap_release:
10881ce9e605STiwei Bie 	err_idx = i;
10891ce9e605STiwei Bie 
10901ce9e605STiwei Bie 	for (i = 0; i < err_idx; i++)
10911ce9e605STiwei Bie 		vring_unmap_desc_packed(vq, &desc[i]);
10921ce9e605STiwei Bie 
10931ce9e605STiwei Bie 	kfree(desc);
10941ce9e605STiwei Bie 
10951ce9e605STiwei Bie 	END_USE(vq);
10961ce9e605STiwei Bie 	return -EIO;
10971ce9e605STiwei Bie }
10981ce9e605STiwei Bie 
10991ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq,
11001ce9e605STiwei Bie 				       struct scatterlist *sgs[],
11011ce9e605STiwei Bie 				       unsigned int total_sg,
11021ce9e605STiwei Bie 				       unsigned int out_sgs,
11031ce9e605STiwei Bie 				       unsigned int in_sgs,
11041ce9e605STiwei Bie 				       void *data,
11051ce9e605STiwei Bie 				       void *ctx,
11061ce9e605STiwei Bie 				       gfp_t gfp)
11071ce9e605STiwei Bie {
11081ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
11091ce9e605STiwei Bie 	struct vring_packed_desc *desc;
11101ce9e605STiwei Bie 	struct scatterlist *sg;
11111ce9e605STiwei Bie 	unsigned int i, n, c, descs_used, err_idx;
11121ce9e605STiwei Bie 	__le16 uninitialized_var(head_flags), flags;
11131ce9e605STiwei Bie 	u16 head, id, uninitialized_var(prev), curr, avail_used_flags;
11141ce9e605STiwei Bie 
11151ce9e605STiwei Bie 	START_USE(vq);
11161ce9e605STiwei Bie 
11171ce9e605STiwei Bie 	BUG_ON(data == NULL);
11181ce9e605STiwei Bie 	BUG_ON(ctx && vq->indirect);
11191ce9e605STiwei Bie 
11201ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
11211ce9e605STiwei Bie 		END_USE(vq);
11221ce9e605STiwei Bie 		return -EIO;
11231ce9e605STiwei Bie 	}
11241ce9e605STiwei Bie 
11251ce9e605STiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
11261ce9e605STiwei Bie 
11271ce9e605STiwei Bie 	BUG_ON(total_sg == 0);
11281ce9e605STiwei Bie 
11291ce9e605STiwei Bie 	if (virtqueue_use_indirect(_vq, total_sg))
11301ce9e605STiwei Bie 		return virtqueue_add_indirect_packed(vq, sgs, total_sg,
11311ce9e605STiwei Bie 				out_sgs, in_sgs, data, gfp);
11321ce9e605STiwei Bie 
11331ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
11341ce9e605STiwei Bie 	avail_used_flags = vq->packed.avail_used_flags;
11351ce9e605STiwei Bie 
11361ce9e605STiwei Bie 	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
11371ce9e605STiwei Bie 
11381ce9e605STiwei Bie 	desc = vq->packed.vring.desc;
11391ce9e605STiwei Bie 	i = head;
11401ce9e605STiwei Bie 	descs_used = total_sg;
11411ce9e605STiwei Bie 
11421ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < descs_used)) {
11431ce9e605STiwei Bie 		pr_debug("Can't add buf len %i - avail = %i\n",
11441ce9e605STiwei Bie 			 descs_used, vq->vq.num_free);
11451ce9e605STiwei Bie 		END_USE(vq);
11461ce9e605STiwei Bie 		return -ENOSPC;
11471ce9e605STiwei Bie 	}
11481ce9e605STiwei Bie 
11491ce9e605STiwei Bie 	id = vq->free_head;
11501ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
11511ce9e605STiwei Bie 
11521ce9e605STiwei Bie 	curr = id;
11531ce9e605STiwei Bie 	c = 0;
11541ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
11551ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
11561ce9e605STiwei Bie 			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
11571ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
11581ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
11591ce9e605STiwei Bie 				goto unmap_release;
11601ce9e605STiwei Bie 
11611ce9e605STiwei Bie 			flags = cpu_to_le16(vq->packed.avail_used_flags |
11621ce9e605STiwei Bie 				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
11631ce9e605STiwei Bie 				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
11641ce9e605STiwei Bie 			if (i == head)
11651ce9e605STiwei Bie 				head_flags = flags;
11661ce9e605STiwei Bie 			else
11671ce9e605STiwei Bie 				desc[i].flags = flags;
11681ce9e605STiwei Bie 
11691ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
11701ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
11711ce9e605STiwei Bie 			desc[i].id = cpu_to_le16(id);
11721ce9e605STiwei Bie 
11731ce9e605STiwei Bie 			if (unlikely(vq->use_dma_api)) {
11741ce9e605STiwei Bie 				vq->packed.desc_extra[curr].addr = addr;
11751ce9e605STiwei Bie 				vq->packed.desc_extra[curr].len = sg->length;
11761ce9e605STiwei Bie 				vq->packed.desc_extra[curr].flags =
11771ce9e605STiwei Bie 					le16_to_cpu(flags);
11781ce9e605STiwei Bie 			}
11791ce9e605STiwei Bie 			prev = curr;
11801ce9e605STiwei Bie 			curr = vq->packed.desc_state[curr].next;
11811ce9e605STiwei Bie 
11821ce9e605STiwei Bie 			if ((unlikely(++i >= vq->packed.vring.num))) {
11831ce9e605STiwei Bie 				i = 0;
11841ce9e605STiwei Bie 				vq->packed.avail_used_flags ^=
11851ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_AVAIL |
11861ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_USED;
11871ce9e605STiwei Bie 			}
11881ce9e605STiwei Bie 		}
11891ce9e605STiwei Bie 	}
11901ce9e605STiwei Bie 
11911ce9e605STiwei Bie 	if (i < head)
11921ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
11931ce9e605STiwei Bie 
11941ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
11951ce9e605STiwei Bie 	vq->vq.num_free -= descs_used;
11961ce9e605STiwei Bie 
11971ce9e605STiwei Bie 	/* Update free pointer */
11981ce9e605STiwei Bie 	vq->packed.next_avail_idx = i;
11991ce9e605STiwei Bie 	vq->free_head = curr;
12001ce9e605STiwei Bie 
12011ce9e605STiwei Bie 	/* Store token. */
12021ce9e605STiwei Bie 	vq->packed.desc_state[id].num = descs_used;
12031ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
12041ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = ctx;
12051ce9e605STiwei Bie 	vq->packed.desc_state[id].last = prev;
12061ce9e605STiwei Bie 
12071ce9e605STiwei Bie 	/*
12081ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
12091ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
12101ce9e605STiwei Bie 	 * the list are made available.
12111ce9e605STiwei Bie 	 */
12121ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
12131ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = head_flags;
12141ce9e605STiwei Bie 	vq->num_added += descs_used;
12151ce9e605STiwei Bie 
12161ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
12171ce9e605STiwei Bie 	END_USE(vq);
12181ce9e605STiwei Bie 
12191ce9e605STiwei Bie 	return 0;
12201ce9e605STiwei Bie 
12211ce9e605STiwei Bie unmap_release:
12221ce9e605STiwei Bie 	err_idx = i;
12231ce9e605STiwei Bie 	i = head;
12241ce9e605STiwei Bie 
12251ce9e605STiwei Bie 	vq->packed.avail_used_flags = avail_used_flags;
12261ce9e605STiwei Bie 
12271ce9e605STiwei Bie 	for (n = 0; n < total_sg; n++) {
12281ce9e605STiwei Bie 		if (i == err_idx)
12291ce9e605STiwei Bie 			break;
12301ce9e605STiwei Bie 		vring_unmap_desc_packed(vq, &desc[i]);
12311ce9e605STiwei Bie 		i++;
12321ce9e605STiwei Bie 		if (i >= vq->packed.vring.num)
12331ce9e605STiwei Bie 			i = 0;
12341ce9e605STiwei Bie 	}
12351ce9e605STiwei Bie 
12361ce9e605STiwei Bie 	END_USE(vq);
12371ce9e605STiwei Bie 	return -EIO;
12381ce9e605STiwei Bie }
12391ce9e605STiwei Bie 
12401ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
12411ce9e605STiwei Bie {
12421ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1243f51f9826STiwei Bie 	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
12441ce9e605STiwei Bie 	bool needs_kick;
12451ce9e605STiwei Bie 	union {
12461ce9e605STiwei Bie 		struct {
12471ce9e605STiwei Bie 			__le16 off_wrap;
12481ce9e605STiwei Bie 			__le16 flags;
12491ce9e605STiwei Bie 		};
12501ce9e605STiwei Bie 		u32 u32;
12511ce9e605STiwei Bie 	} snapshot;
12521ce9e605STiwei Bie 
12531ce9e605STiwei Bie 	START_USE(vq);
12541ce9e605STiwei Bie 
12551ce9e605STiwei Bie 	/*
12561ce9e605STiwei Bie 	 * We need to expose the new flags value before checking notification
12571ce9e605STiwei Bie 	 * suppressions.
12581ce9e605STiwei Bie 	 */
12591ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
12601ce9e605STiwei Bie 
1261f51f9826STiwei Bie 	old = vq->packed.next_avail_idx - vq->num_added;
1262f51f9826STiwei Bie 	new = vq->packed.next_avail_idx;
12631ce9e605STiwei Bie 	vq->num_added = 0;
12641ce9e605STiwei Bie 
12651ce9e605STiwei Bie 	snapshot.u32 = *(u32 *)vq->packed.vring.device;
12661ce9e605STiwei Bie 	flags = le16_to_cpu(snapshot.flags);
12671ce9e605STiwei Bie 
12681ce9e605STiwei Bie 	LAST_ADD_TIME_CHECK(vq);
12691ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
12701ce9e605STiwei Bie 
1271f51f9826STiwei Bie 	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
12721ce9e605STiwei Bie 		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1273f51f9826STiwei Bie 		goto out;
1274f51f9826STiwei Bie 	}
1275f51f9826STiwei Bie 
1276f51f9826STiwei Bie 	off_wrap = le16_to_cpu(snapshot.off_wrap);
1277f51f9826STiwei Bie 
1278f51f9826STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1279f51f9826STiwei Bie 	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1280f51f9826STiwei Bie 	if (wrap_counter != vq->packed.avail_wrap_counter)
1281f51f9826STiwei Bie 		event_idx -= vq->packed.vring.num;
1282f51f9826STiwei Bie 
1283f51f9826STiwei Bie 	needs_kick = vring_need_event(event_idx, new, old);
1284f51f9826STiwei Bie out:
12851ce9e605STiwei Bie 	END_USE(vq);
12861ce9e605STiwei Bie 	return needs_kick;
12871ce9e605STiwei Bie }
12881ce9e605STiwei Bie 
12891ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq,
12901ce9e605STiwei Bie 			      unsigned int id, void **ctx)
12911ce9e605STiwei Bie {
12921ce9e605STiwei Bie 	struct vring_desc_state_packed *state = NULL;
12931ce9e605STiwei Bie 	struct vring_packed_desc *desc;
12941ce9e605STiwei Bie 	unsigned int i, curr;
12951ce9e605STiwei Bie 
12961ce9e605STiwei Bie 	state = &vq->packed.desc_state[id];
12971ce9e605STiwei Bie 
12981ce9e605STiwei Bie 	/* Clear data ptr. */
12991ce9e605STiwei Bie 	state->data = NULL;
13001ce9e605STiwei Bie 
13011ce9e605STiwei Bie 	vq->packed.desc_state[state->last].next = vq->free_head;
13021ce9e605STiwei Bie 	vq->free_head = id;
13031ce9e605STiwei Bie 	vq->vq.num_free += state->num;
13041ce9e605STiwei Bie 
13051ce9e605STiwei Bie 	if (unlikely(vq->use_dma_api)) {
13061ce9e605STiwei Bie 		curr = id;
13071ce9e605STiwei Bie 		for (i = 0; i < state->num; i++) {
13081ce9e605STiwei Bie 			vring_unmap_state_packed(vq,
13091ce9e605STiwei Bie 				&vq->packed.desc_extra[curr]);
13101ce9e605STiwei Bie 			curr = vq->packed.desc_state[curr].next;
13111ce9e605STiwei Bie 		}
13121ce9e605STiwei Bie 	}
13131ce9e605STiwei Bie 
13141ce9e605STiwei Bie 	if (vq->indirect) {
13151ce9e605STiwei Bie 		u32 len;
13161ce9e605STiwei Bie 
13171ce9e605STiwei Bie 		/* Free the indirect table, if any, now that it's unmapped. */
13181ce9e605STiwei Bie 		desc = state->indir_desc;
13191ce9e605STiwei Bie 		if (!desc)
13201ce9e605STiwei Bie 			return;
13211ce9e605STiwei Bie 
13221ce9e605STiwei Bie 		if (vq->use_dma_api) {
13231ce9e605STiwei Bie 			len = vq->packed.desc_extra[id].len;
13241ce9e605STiwei Bie 			for (i = 0; i < len / sizeof(struct vring_packed_desc);
13251ce9e605STiwei Bie 					i++)
13261ce9e605STiwei Bie 				vring_unmap_desc_packed(vq, &desc[i]);
13271ce9e605STiwei Bie 		}
13281ce9e605STiwei Bie 		kfree(desc);
13291ce9e605STiwei Bie 		state->indir_desc = NULL;
13301ce9e605STiwei Bie 	} else if (ctx) {
13311ce9e605STiwei Bie 		*ctx = state->indir_desc;
13321ce9e605STiwei Bie 	}
13331ce9e605STiwei Bie }
13341ce9e605STiwei Bie 
13351ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
13361ce9e605STiwei Bie 				       u16 idx, bool used_wrap_counter)
13371ce9e605STiwei Bie {
13381ce9e605STiwei Bie 	bool avail, used;
13391ce9e605STiwei Bie 	u16 flags;
13401ce9e605STiwei Bie 
13411ce9e605STiwei Bie 	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
13421ce9e605STiwei Bie 	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
13431ce9e605STiwei Bie 	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
13441ce9e605STiwei Bie 
13451ce9e605STiwei Bie 	return avail == used && used == used_wrap_counter;
13461ce9e605STiwei Bie }
13471ce9e605STiwei Bie 
13481ce9e605STiwei Bie static inline bool more_used_packed(const struct vring_virtqueue *vq)
13491ce9e605STiwei Bie {
13501ce9e605STiwei Bie 	return is_used_desc_packed(vq, vq->last_used_idx,
13511ce9e605STiwei Bie 			vq->packed.used_wrap_counter);
13521ce9e605STiwei Bie }
13531ce9e605STiwei Bie 
13541ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
13551ce9e605STiwei Bie 					  unsigned int *len,
13561ce9e605STiwei Bie 					  void **ctx)
13571ce9e605STiwei Bie {
13581ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
13591ce9e605STiwei Bie 	u16 last_used, id;
13601ce9e605STiwei Bie 	void *ret;
13611ce9e605STiwei Bie 
13621ce9e605STiwei Bie 	START_USE(vq);
13631ce9e605STiwei Bie 
13641ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
13651ce9e605STiwei Bie 		END_USE(vq);
13661ce9e605STiwei Bie 		return NULL;
13671ce9e605STiwei Bie 	}
13681ce9e605STiwei Bie 
13691ce9e605STiwei Bie 	if (!more_used_packed(vq)) {
13701ce9e605STiwei Bie 		pr_debug("No more buffers in queue\n");
13711ce9e605STiwei Bie 		END_USE(vq);
13721ce9e605STiwei Bie 		return NULL;
13731ce9e605STiwei Bie 	}
13741ce9e605STiwei Bie 
13751ce9e605STiwei Bie 	/* Only get used elements after they have been exposed by host. */
13761ce9e605STiwei Bie 	virtio_rmb(vq->weak_barriers);
13771ce9e605STiwei Bie 
13781ce9e605STiwei Bie 	last_used = vq->last_used_idx;
13791ce9e605STiwei Bie 	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
13801ce9e605STiwei Bie 	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
13811ce9e605STiwei Bie 
13821ce9e605STiwei Bie 	if (unlikely(id >= vq->packed.vring.num)) {
13831ce9e605STiwei Bie 		BAD_RING(vq, "id %u out of range\n", id);
13841ce9e605STiwei Bie 		return NULL;
13851ce9e605STiwei Bie 	}
13861ce9e605STiwei Bie 	if (unlikely(!vq->packed.desc_state[id].data)) {
13871ce9e605STiwei Bie 		BAD_RING(vq, "id %u is not a head!\n", id);
13881ce9e605STiwei Bie 		return NULL;
13891ce9e605STiwei Bie 	}
13901ce9e605STiwei Bie 
13911ce9e605STiwei Bie 	/* detach_buf_packed clears data, so grab it now. */
13921ce9e605STiwei Bie 	ret = vq->packed.desc_state[id].data;
13931ce9e605STiwei Bie 	detach_buf_packed(vq, id, ctx);
13941ce9e605STiwei Bie 
13951ce9e605STiwei Bie 	vq->last_used_idx += vq->packed.desc_state[id].num;
13961ce9e605STiwei Bie 	if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
13971ce9e605STiwei Bie 		vq->last_used_idx -= vq->packed.vring.num;
13981ce9e605STiwei Bie 		vq->packed.used_wrap_counter ^= 1;
13991ce9e605STiwei Bie 	}
14001ce9e605STiwei Bie 
1401f51f9826STiwei Bie 	/*
1402f51f9826STiwei Bie 	 * If we expect an interrupt for the next entry, tell host
1403f51f9826STiwei Bie 	 * by writing event index and flush out the write before
1404f51f9826STiwei Bie 	 * the read in the next get_buf call.
1405f51f9826STiwei Bie 	 */
1406f51f9826STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1407f51f9826STiwei Bie 		virtio_store_mb(vq->weak_barriers,
1408f51f9826STiwei Bie 				&vq->packed.vring.driver->off_wrap,
1409f51f9826STiwei Bie 				cpu_to_le16(vq->last_used_idx |
1410f51f9826STiwei Bie 					(vq->packed.used_wrap_counter <<
1411f51f9826STiwei Bie 					 VRING_PACKED_EVENT_F_WRAP_CTR)));
1412f51f9826STiwei Bie 
14131ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
14141ce9e605STiwei Bie 
14151ce9e605STiwei Bie 	END_USE(vq);
14161ce9e605STiwei Bie 	return ret;
14171ce9e605STiwei Bie }
14181ce9e605STiwei Bie 
14191ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
14201ce9e605STiwei Bie {
14211ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
14221ce9e605STiwei Bie 
14231ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
14241ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
14251ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
14261ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
14271ce9e605STiwei Bie 	}
14281ce9e605STiwei Bie }
14291ce9e605STiwei Bie 
14301ce9e605STiwei Bie static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
14311ce9e605STiwei Bie {
14321ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
14331ce9e605STiwei Bie 
14341ce9e605STiwei Bie 	START_USE(vq);
14351ce9e605STiwei Bie 
14361ce9e605STiwei Bie 	/*
14371ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
14381ce9e605STiwei Bie 	 * more to do.
14391ce9e605STiwei Bie 	 */
14401ce9e605STiwei Bie 
1441f51f9826STiwei Bie 	if (vq->event) {
1442f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap =
1443f51f9826STiwei Bie 			cpu_to_le16(vq->last_used_idx |
1444f51f9826STiwei Bie 				(vq->packed.used_wrap_counter <<
1445f51f9826STiwei Bie 				 VRING_PACKED_EVENT_F_WRAP_CTR));
1446f51f9826STiwei Bie 		/*
1447f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1448f51f9826STiwei Bie 		 * counter first before updating event flags.
1449f51f9826STiwei Bie 		 */
1450f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1451f51f9826STiwei Bie 	}
1452f51f9826STiwei Bie 
14531ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1454f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1455f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1456f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
14571ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
14581ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
14591ce9e605STiwei Bie 	}
14601ce9e605STiwei Bie 
14611ce9e605STiwei Bie 	END_USE(vq);
14621ce9e605STiwei Bie 	return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
14631ce9e605STiwei Bie 			VRING_PACKED_EVENT_F_WRAP_CTR);
14641ce9e605STiwei Bie }
14651ce9e605STiwei Bie 
14661ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
14671ce9e605STiwei Bie {
14681ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
14691ce9e605STiwei Bie 	bool wrap_counter;
14701ce9e605STiwei Bie 	u16 used_idx;
14711ce9e605STiwei Bie 
14721ce9e605STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
14731ce9e605STiwei Bie 	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
14741ce9e605STiwei Bie 
14751ce9e605STiwei Bie 	return is_used_desc_packed(vq, used_idx, wrap_counter);
14761ce9e605STiwei Bie }
14771ce9e605STiwei Bie 
14781ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
14791ce9e605STiwei Bie {
14801ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
14811ce9e605STiwei Bie 	u16 used_idx, wrap_counter;
1482f51f9826STiwei Bie 	u16 bufs;
14831ce9e605STiwei Bie 
14841ce9e605STiwei Bie 	START_USE(vq);
14851ce9e605STiwei Bie 
14861ce9e605STiwei Bie 	/*
14871ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
14881ce9e605STiwei Bie 	 * more to do.
14891ce9e605STiwei Bie 	 */
14901ce9e605STiwei Bie 
1491f51f9826STiwei Bie 	if (vq->event) {
1492f51f9826STiwei Bie 		/* TODO: tune this threshold */
1493f51f9826STiwei Bie 		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
14941ce9e605STiwei Bie 		wrap_counter = vq->packed.used_wrap_counter;
14951ce9e605STiwei Bie 
1496f51f9826STiwei Bie 		used_idx = vq->last_used_idx + bufs;
1497f51f9826STiwei Bie 		if (used_idx >= vq->packed.vring.num) {
1498f51f9826STiwei Bie 			used_idx -= vq->packed.vring.num;
1499f51f9826STiwei Bie 			wrap_counter ^= 1;
1500f51f9826STiwei Bie 		}
1501f51f9826STiwei Bie 
1502f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1503f51f9826STiwei Bie 			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1504f51f9826STiwei Bie 
1505f51f9826STiwei Bie 		/*
1506f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1507f51f9826STiwei Bie 		 * counter first before updating event flags.
1508f51f9826STiwei Bie 		 */
1509f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1510f51f9826STiwei Bie 	} else {
1511f51f9826STiwei Bie 		used_idx = vq->last_used_idx;
1512f51f9826STiwei Bie 		wrap_counter = vq->packed.used_wrap_counter;
1513f51f9826STiwei Bie 	}
1514f51f9826STiwei Bie 
15151ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1516f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1517f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1518f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
15191ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
15201ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
15211ce9e605STiwei Bie 	}
15221ce9e605STiwei Bie 
15231ce9e605STiwei Bie 	/*
15241ce9e605STiwei Bie 	 * We need to update event suppression structure first
15251ce9e605STiwei Bie 	 * before re-checking for more used buffers.
15261ce9e605STiwei Bie 	 */
15271ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
15281ce9e605STiwei Bie 
15291ce9e605STiwei Bie 	if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
15301ce9e605STiwei Bie 		END_USE(vq);
15311ce9e605STiwei Bie 		return false;
15321ce9e605STiwei Bie 	}
15331ce9e605STiwei Bie 
15341ce9e605STiwei Bie 	END_USE(vq);
15351ce9e605STiwei Bie 	return true;
15361ce9e605STiwei Bie }
15371ce9e605STiwei Bie 
15381ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
15391ce9e605STiwei Bie {
15401ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
15411ce9e605STiwei Bie 	unsigned int i;
15421ce9e605STiwei Bie 	void *buf;
15431ce9e605STiwei Bie 
15441ce9e605STiwei Bie 	START_USE(vq);
15451ce9e605STiwei Bie 
15461ce9e605STiwei Bie 	for (i = 0; i < vq->packed.vring.num; i++) {
15471ce9e605STiwei Bie 		if (!vq->packed.desc_state[i].data)
15481ce9e605STiwei Bie 			continue;
15491ce9e605STiwei Bie 		/* detach_buf clears data, so grab it now. */
15501ce9e605STiwei Bie 		buf = vq->packed.desc_state[i].data;
15511ce9e605STiwei Bie 		detach_buf_packed(vq, i, NULL);
15521ce9e605STiwei Bie 		END_USE(vq);
15531ce9e605STiwei Bie 		return buf;
15541ce9e605STiwei Bie 	}
15551ce9e605STiwei Bie 	/* That should have freed everything. */
15561ce9e605STiwei Bie 	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
15571ce9e605STiwei Bie 
15581ce9e605STiwei Bie 	END_USE(vq);
15591ce9e605STiwei Bie 	return NULL;
15601ce9e605STiwei Bie }
15611ce9e605STiwei Bie 
15621ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed(
15631ce9e605STiwei Bie 	unsigned int index,
15641ce9e605STiwei Bie 	unsigned int num,
15651ce9e605STiwei Bie 	unsigned int vring_align,
15661ce9e605STiwei Bie 	struct virtio_device *vdev,
15671ce9e605STiwei Bie 	bool weak_barriers,
15681ce9e605STiwei Bie 	bool may_reduce_num,
15691ce9e605STiwei Bie 	bool context,
15701ce9e605STiwei Bie 	bool (*notify)(struct virtqueue *),
15711ce9e605STiwei Bie 	void (*callback)(struct virtqueue *),
15721ce9e605STiwei Bie 	const char *name)
15731ce9e605STiwei Bie {
15741ce9e605STiwei Bie 	struct vring_virtqueue *vq;
15751ce9e605STiwei Bie 	struct vring_packed_desc *ring;
15761ce9e605STiwei Bie 	struct vring_packed_desc_event *driver, *device;
15771ce9e605STiwei Bie 	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
15781ce9e605STiwei Bie 	size_t ring_size_in_bytes, event_size_in_bytes;
15791ce9e605STiwei Bie 	unsigned int i;
15801ce9e605STiwei Bie 
15811ce9e605STiwei Bie 	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
15821ce9e605STiwei Bie 
15831ce9e605STiwei Bie 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
15841ce9e605STiwei Bie 				 &ring_dma_addr,
15851ce9e605STiwei Bie 				 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
15861ce9e605STiwei Bie 	if (!ring)
15871ce9e605STiwei Bie 		goto err_ring;
15881ce9e605STiwei Bie 
15891ce9e605STiwei Bie 	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
15901ce9e605STiwei Bie 
15911ce9e605STiwei Bie 	driver = vring_alloc_queue(vdev, event_size_in_bytes,
15921ce9e605STiwei Bie 				   &driver_event_dma_addr,
15931ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
15941ce9e605STiwei Bie 	if (!driver)
15951ce9e605STiwei Bie 		goto err_driver;
15961ce9e605STiwei Bie 
15971ce9e605STiwei Bie 	device = vring_alloc_queue(vdev, event_size_in_bytes,
15981ce9e605STiwei Bie 				   &device_event_dma_addr,
15991ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
16001ce9e605STiwei Bie 	if (!device)
16011ce9e605STiwei Bie 		goto err_device;
16021ce9e605STiwei Bie 
16031ce9e605STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
16041ce9e605STiwei Bie 	if (!vq)
16051ce9e605STiwei Bie 		goto err_vq;
16061ce9e605STiwei Bie 
16071ce9e605STiwei Bie 	vq->vq.callback = callback;
16081ce9e605STiwei Bie 	vq->vq.vdev = vdev;
16091ce9e605STiwei Bie 	vq->vq.name = name;
16101ce9e605STiwei Bie 	vq->vq.num_free = num;
16111ce9e605STiwei Bie 	vq->vq.index = index;
16121ce9e605STiwei Bie 	vq->we_own_ring = true;
16131ce9e605STiwei Bie 	vq->notify = notify;
16141ce9e605STiwei Bie 	vq->weak_barriers = weak_barriers;
16151ce9e605STiwei Bie 	vq->broken = false;
16161ce9e605STiwei Bie 	vq->last_used_idx = 0;
16171ce9e605STiwei Bie 	vq->num_added = 0;
16181ce9e605STiwei Bie 	vq->packed_ring = true;
16191ce9e605STiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
16201ce9e605STiwei Bie 	list_add_tail(&vq->vq.list, &vdev->vqs);
16211ce9e605STiwei Bie #ifdef DEBUG
16221ce9e605STiwei Bie 	vq->in_use = false;
16231ce9e605STiwei Bie 	vq->last_add_time_valid = false;
16241ce9e605STiwei Bie #endif
16251ce9e605STiwei Bie 
16261ce9e605STiwei Bie 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
16271ce9e605STiwei Bie 		!context;
16281ce9e605STiwei Bie 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
16291ce9e605STiwei Bie 
163045383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
163145383fb0STiwei Bie 		vq->weak_barriers = false;
163245383fb0STiwei Bie 
16331ce9e605STiwei Bie 	vq->packed.ring_dma_addr = ring_dma_addr;
16341ce9e605STiwei Bie 	vq->packed.driver_event_dma_addr = driver_event_dma_addr;
16351ce9e605STiwei Bie 	vq->packed.device_event_dma_addr = device_event_dma_addr;
16361ce9e605STiwei Bie 
16371ce9e605STiwei Bie 	vq->packed.ring_size_in_bytes = ring_size_in_bytes;
16381ce9e605STiwei Bie 	vq->packed.event_size_in_bytes = event_size_in_bytes;
16391ce9e605STiwei Bie 
16401ce9e605STiwei Bie 	vq->packed.vring.num = num;
16411ce9e605STiwei Bie 	vq->packed.vring.desc = ring;
16421ce9e605STiwei Bie 	vq->packed.vring.driver = driver;
16431ce9e605STiwei Bie 	vq->packed.vring.device = device;
16441ce9e605STiwei Bie 
16451ce9e605STiwei Bie 	vq->packed.next_avail_idx = 0;
16461ce9e605STiwei Bie 	vq->packed.avail_wrap_counter = 1;
16471ce9e605STiwei Bie 	vq->packed.used_wrap_counter = 1;
16481ce9e605STiwei Bie 	vq->packed.event_flags_shadow = 0;
16491ce9e605STiwei Bie 	vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
16501ce9e605STiwei Bie 
16511ce9e605STiwei Bie 	vq->packed.desc_state = kmalloc_array(num,
16521ce9e605STiwei Bie 			sizeof(struct vring_desc_state_packed),
16531ce9e605STiwei Bie 			GFP_KERNEL);
16541ce9e605STiwei Bie 	if (!vq->packed.desc_state)
16551ce9e605STiwei Bie 		goto err_desc_state;
16561ce9e605STiwei Bie 
16571ce9e605STiwei Bie 	memset(vq->packed.desc_state, 0,
16581ce9e605STiwei Bie 		num * sizeof(struct vring_desc_state_packed));
16591ce9e605STiwei Bie 
16601ce9e605STiwei Bie 	/* Put everything in free lists. */
16611ce9e605STiwei Bie 	vq->free_head = 0;
16621ce9e605STiwei Bie 	for (i = 0; i < num-1; i++)
16631ce9e605STiwei Bie 		vq->packed.desc_state[i].next = i + 1;
16641ce9e605STiwei Bie 
16651ce9e605STiwei Bie 	vq->packed.desc_extra = kmalloc_array(num,
16661ce9e605STiwei Bie 			sizeof(struct vring_desc_extra_packed),
16671ce9e605STiwei Bie 			GFP_KERNEL);
16681ce9e605STiwei Bie 	if (!vq->packed.desc_extra)
16691ce9e605STiwei Bie 		goto err_desc_extra;
16701ce9e605STiwei Bie 
16711ce9e605STiwei Bie 	memset(vq->packed.desc_extra, 0,
16721ce9e605STiwei Bie 		num * sizeof(struct vring_desc_extra_packed));
16731ce9e605STiwei Bie 
16741ce9e605STiwei Bie 	/* No callback?  Tell other side not to bother us. */
16751ce9e605STiwei Bie 	if (!callback) {
16761ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
16771ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
16781ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
16791ce9e605STiwei Bie 	}
16801ce9e605STiwei Bie 
16811ce9e605STiwei Bie 	return &vq->vq;
16821ce9e605STiwei Bie 
16831ce9e605STiwei Bie err_desc_extra:
16841ce9e605STiwei Bie 	kfree(vq->packed.desc_state);
16851ce9e605STiwei Bie err_desc_state:
16861ce9e605STiwei Bie 	kfree(vq);
16871ce9e605STiwei Bie err_vq:
16881ce9e605STiwei Bie 	vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
16891ce9e605STiwei Bie err_device:
16901ce9e605STiwei Bie 	vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
16911ce9e605STiwei Bie err_driver:
16921ce9e605STiwei Bie 	vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
16931ce9e605STiwei Bie err_ring:
16941ce9e605STiwei Bie 	return NULL;
16951ce9e605STiwei Bie }
16961ce9e605STiwei Bie 
16971ce9e605STiwei Bie 
16981ce9e605STiwei Bie /*
1699e6f633e5STiwei Bie  * Generic functions and exported symbols.
1700e6f633e5STiwei Bie  */
1701e6f633e5STiwei Bie 
1702e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq,
1703e6f633e5STiwei Bie 				struct scatterlist *sgs[],
1704e6f633e5STiwei Bie 				unsigned int total_sg,
1705e6f633e5STiwei Bie 				unsigned int out_sgs,
1706e6f633e5STiwei Bie 				unsigned int in_sgs,
1707e6f633e5STiwei Bie 				void *data,
1708e6f633e5STiwei Bie 				void *ctx,
1709e6f633e5STiwei Bie 				gfp_t gfp)
1710e6f633e5STiwei Bie {
17111ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17121ce9e605STiwei Bie 
17131ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
17141ce9e605STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp) :
17151ce9e605STiwei Bie 				 virtqueue_add_split(_vq, sgs, total_sg,
1716e6f633e5STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp);
1717e6f633e5STiwei Bie }
1718e6f633e5STiwei Bie 
1719e6f633e5STiwei Bie /**
1720e6f633e5STiwei Bie  * virtqueue_add_sgs - expose buffers to other end
1721e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1722e6f633e5STiwei Bie  * @sgs: array of terminated scatterlists.
1723e6f633e5STiwei Bie  * @out_num: the number of scatterlists readable by other side
1724e6f633e5STiwei Bie  * @in_num: the number of scatterlists which are writable (after readable ones)
1725e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1726e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1727e6f633e5STiwei Bie  *
1728e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1729e6f633e5STiwei Bie  * at the same time (except where noted).
1730e6f633e5STiwei Bie  *
1731e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1732e6f633e5STiwei Bie  */
1733e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq,
1734e6f633e5STiwei Bie 		      struct scatterlist *sgs[],
1735e6f633e5STiwei Bie 		      unsigned int out_sgs,
1736e6f633e5STiwei Bie 		      unsigned int in_sgs,
1737e6f633e5STiwei Bie 		      void *data,
1738e6f633e5STiwei Bie 		      gfp_t gfp)
1739e6f633e5STiwei Bie {
1740e6f633e5STiwei Bie 	unsigned int i, total_sg = 0;
1741e6f633e5STiwei Bie 
1742e6f633e5STiwei Bie 	/* Count them first. */
1743e6f633e5STiwei Bie 	for (i = 0; i < out_sgs + in_sgs; i++) {
1744e6f633e5STiwei Bie 		struct scatterlist *sg;
1745e6f633e5STiwei Bie 
1746e6f633e5STiwei Bie 		for (sg = sgs[i]; sg; sg = sg_next(sg))
1747e6f633e5STiwei Bie 			total_sg++;
1748e6f633e5STiwei Bie 	}
1749e6f633e5STiwei Bie 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1750e6f633e5STiwei Bie 			     data, NULL, gfp);
1751e6f633e5STiwei Bie }
1752e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1753e6f633e5STiwei Bie 
1754e6f633e5STiwei Bie /**
1755e6f633e5STiwei Bie  * virtqueue_add_outbuf - expose output buffers to other end
1756e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1757e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1758e6f633e5STiwei Bie  * @num: the number of entries in @sg readable by other side
1759e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1760e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1761e6f633e5STiwei Bie  *
1762e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1763e6f633e5STiwei Bie  * at the same time (except where noted).
1764e6f633e5STiwei Bie  *
1765e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1766e6f633e5STiwei Bie  */
1767e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq,
1768e6f633e5STiwei Bie 			 struct scatterlist *sg, unsigned int num,
1769e6f633e5STiwei Bie 			 void *data,
1770e6f633e5STiwei Bie 			 gfp_t gfp)
1771e6f633e5STiwei Bie {
1772e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1773e6f633e5STiwei Bie }
1774e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1775e6f633e5STiwei Bie 
1776e6f633e5STiwei Bie /**
1777e6f633e5STiwei Bie  * virtqueue_add_inbuf - expose input buffers to other end
1778e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1779e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1780e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
1781e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1782e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1783e6f633e5STiwei Bie  *
1784e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1785e6f633e5STiwei Bie  * at the same time (except where noted).
1786e6f633e5STiwei Bie  *
1787e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1788e6f633e5STiwei Bie  */
1789e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq,
1790e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
1791e6f633e5STiwei Bie 			void *data,
1792e6f633e5STiwei Bie 			gfp_t gfp)
1793e6f633e5STiwei Bie {
1794e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1795e6f633e5STiwei Bie }
1796e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1797e6f633e5STiwei Bie 
1798e6f633e5STiwei Bie /**
1799e6f633e5STiwei Bie  * virtqueue_add_inbuf_ctx - expose input buffers to other end
1800e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1801e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1802e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
1803e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1804e6f633e5STiwei Bie  * @ctx: extra context for the token
1805e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1806e6f633e5STiwei Bie  *
1807e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1808e6f633e5STiwei Bie  * at the same time (except where noted).
1809e6f633e5STiwei Bie  *
1810e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1811e6f633e5STiwei Bie  */
1812e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1813e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
1814e6f633e5STiwei Bie 			void *data,
1815e6f633e5STiwei Bie 			void *ctx,
1816e6f633e5STiwei Bie 			gfp_t gfp)
1817e6f633e5STiwei Bie {
1818e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1819e6f633e5STiwei Bie }
1820e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1821e6f633e5STiwei Bie 
1822e6f633e5STiwei Bie /**
1823e6f633e5STiwei Bie  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1824e6f633e5STiwei Bie  * @vq: the struct virtqueue
1825e6f633e5STiwei Bie  *
1826e6f633e5STiwei Bie  * Instead of virtqueue_kick(), you can do:
1827e6f633e5STiwei Bie  *	if (virtqueue_kick_prepare(vq))
1828e6f633e5STiwei Bie  *		virtqueue_notify(vq);
1829e6f633e5STiwei Bie  *
1830e6f633e5STiwei Bie  * This is sometimes useful because the virtqueue_kick_prepare() needs
1831e6f633e5STiwei Bie  * to be serialized, but the actual virtqueue_notify() call does not.
1832e6f633e5STiwei Bie  */
1833e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq)
1834e6f633e5STiwei Bie {
18351ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
18361ce9e605STiwei Bie 
18371ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
18381ce9e605STiwei Bie 				 virtqueue_kick_prepare_split(_vq);
1839e6f633e5STiwei Bie }
1840e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1841e6f633e5STiwei Bie 
1842e6f633e5STiwei Bie /**
1843e6f633e5STiwei Bie  * virtqueue_notify - second half of split virtqueue_kick call.
1844e6f633e5STiwei Bie  * @vq: the struct virtqueue
1845e6f633e5STiwei Bie  *
1846e6f633e5STiwei Bie  * This does not need to be serialized.
1847e6f633e5STiwei Bie  *
1848e6f633e5STiwei Bie  * Returns false if host notify failed or queue is broken, otherwise true.
1849e6f633e5STiwei Bie  */
1850e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq)
1851e6f633e5STiwei Bie {
1852e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1853e6f633e5STiwei Bie 
1854e6f633e5STiwei Bie 	if (unlikely(vq->broken))
1855e6f633e5STiwei Bie 		return false;
1856e6f633e5STiwei Bie 
1857e6f633e5STiwei Bie 	/* Prod other side to tell it about changes. */
1858e6f633e5STiwei Bie 	if (!vq->notify(_vq)) {
1859e6f633e5STiwei Bie 		vq->broken = true;
1860e6f633e5STiwei Bie 		return false;
1861e6f633e5STiwei Bie 	}
1862e6f633e5STiwei Bie 	return true;
1863e6f633e5STiwei Bie }
1864e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify);
1865e6f633e5STiwei Bie 
1866e6f633e5STiwei Bie /**
1867e6f633e5STiwei Bie  * virtqueue_kick - update after add_buf
1868e6f633e5STiwei Bie  * @vq: the struct virtqueue
1869e6f633e5STiwei Bie  *
1870e6f633e5STiwei Bie  * After one or more virtqueue_add_* calls, invoke this to kick
1871e6f633e5STiwei Bie  * the other side.
1872e6f633e5STiwei Bie  *
1873e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1874e6f633e5STiwei Bie  * operations at the same time (except where noted).
1875e6f633e5STiwei Bie  *
1876e6f633e5STiwei Bie  * Returns false if kick failed, otherwise true.
1877e6f633e5STiwei Bie  */
1878e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq)
1879e6f633e5STiwei Bie {
1880e6f633e5STiwei Bie 	if (virtqueue_kick_prepare(vq))
1881e6f633e5STiwei Bie 		return virtqueue_notify(vq);
1882e6f633e5STiwei Bie 	return true;
1883e6f633e5STiwei Bie }
1884e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick);
1885e6f633e5STiwei Bie 
1886e6f633e5STiwei Bie /**
1887e6f633e5STiwei Bie  * virtqueue_get_buf - get the next used buffer
1888e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1889e6f633e5STiwei Bie  * @len: the length written into the buffer
1890e6f633e5STiwei Bie  *
1891e6f633e5STiwei Bie  * If the device wrote data into the buffer, @len will be set to the
1892e6f633e5STiwei Bie  * amount written.  This means you don't need to clear the buffer
1893e6f633e5STiwei Bie  * beforehand to ensure there's no data leakage in the case of short
1894e6f633e5STiwei Bie  * writes.
1895e6f633e5STiwei Bie  *
1896e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1897e6f633e5STiwei Bie  * operations at the same time (except where noted).
1898e6f633e5STiwei Bie  *
1899e6f633e5STiwei Bie  * Returns NULL if there are no used buffers, or the "data" token
1900e6f633e5STiwei Bie  * handed to virtqueue_add_*().
1901e6f633e5STiwei Bie  */
1902e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1903e6f633e5STiwei Bie 			    void **ctx)
1904e6f633e5STiwei Bie {
19051ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
19061ce9e605STiwei Bie 
19071ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
19081ce9e605STiwei Bie 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1909e6f633e5STiwei Bie }
1910e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1911e6f633e5STiwei Bie 
1912e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1913e6f633e5STiwei Bie {
1914e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx(_vq, len, NULL);
1915e6f633e5STiwei Bie }
1916e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1917e6f633e5STiwei Bie /**
1918e6f633e5STiwei Bie  * virtqueue_disable_cb - disable callbacks
1919e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1920e6f633e5STiwei Bie  *
1921e6f633e5STiwei Bie  * Note that this is not necessarily synchronous, hence unreliable and only
1922e6f633e5STiwei Bie  * useful as an optimization.
1923e6f633e5STiwei Bie  *
1924e6f633e5STiwei Bie  * Unlike other operations, this need not be serialized.
1925e6f633e5STiwei Bie  */
1926e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq)
1927e6f633e5STiwei Bie {
19281ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
19291ce9e605STiwei Bie 
19301ce9e605STiwei Bie 	if (vq->packed_ring)
19311ce9e605STiwei Bie 		virtqueue_disable_cb_packed(_vq);
19321ce9e605STiwei Bie 	else
1933e6f633e5STiwei Bie 		virtqueue_disable_cb_split(_vq);
1934e6f633e5STiwei Bie }
1935e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1936e6f633e5STiwei Bie 
1937e6f633e5STiwei Bie /**
1938e6f633e5STiwei Bie  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1939e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1940e6f633e5STiwei Bie  *
1941e6f633e5STiwei Bie  * This re-enables callbacks; it returns current queue state
1942e6f633e5STiwei Bie  * in an opaque unsigned value. This value should be later tested by
1943e6f633e5STiwei Bie  * virtqueue_poll, to detect a possible race between the driver checking for
1944e6f633e5STiwei Bie  * more work, and enabling callbacks.
1945e6f633e5STiwei Bie  *
1946e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1947e6f633e5STiwei Bie  * operations at the same time (except where noted).
1948e6f633e5STiwei Bie  */
1949e6f633e5STiwei Bie unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1950e6f633e5STiwei Bie {
19511ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
19521ce9e605STiwei Bie 
19531ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
19541ce9e605STiwei Bie 				 virtqueue_enable_cb_prepare_split(_vq);
1955e6f633e5STiwei Bie }
1956e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1957e6f633e5STiwei Bie 
1958e6f633e5STiwei Bie /**
1959e6f633e5STiwei Bie  * virtqueue_poll - query pending used buffers
1960e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1961e6f633e5STiwei Bie  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1962e6f633e5STiwei Bie  *
1963e6f633e5STiwei Bie  * Returns "true" if there are pending used buffers in the queue.
1964e6f633e5STiwei Bie  *
1965e6f633e5STiwei Bie  * This does not need to be serialized.
1966e6f633e5STiwei Bie  */
1967e6f633e5STiwei Bie bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1968e6f633e5STiwei Bie {
1969e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1970e6f633e5STiwei Bie 
1971e6f633e5STiwei Bie 	virtio_mb(vq->weak_barriers);
19721ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
19731ce9e605STiwei Bie 				 virtqueue_poll_split(_vq, last_used_idx);
1974e6f633e5STiwei Bie }
1975e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll);
1976e6f633e5STiwei Bie 
1977e6f633e5STiwei Bie /**
1978e6f633e5STiwei Bie  * virtqueue_enable_cb - restart callbacks after disable_cb.
1979e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1980e6f633e5STiwei Bie  *
1981e6f633e5STiwei Bie  * This re-enables callbacks; it returns "false" if there are pending
1982e6f633e5STiwei Bie  * buffers in the queue, to detect a possible race between the driver
1983e6f633e5STiwei Bie  * checking for more work, and enabling callbacks.
1984e6f633e5STiwei Bie  *
1985e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1986e6f633e5STiwei Bie  * operations at the same time (except where noted).
1987e6f633e5STiwei Bie  */
1988e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq)
1989e6f633e5STiwei Bie {
1990e6f633e5STiwei Bie 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1991e6f633e5STiwei Bie 
1992e6f633e5STiwei Bie 	return !virtqueue_poll(_vq, last_used_idx);
1993e6f633e5STiwei Bie }
1994e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1995e6f633e5STiwei Bie 
1996e6f633e5STiwei Bie /**
1997e6f633e5STiwei Bie  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
1998e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1999e6f633e5STiwei Bie  *
2000e6f633e5STiwei Bie  * This re-enables callbacks but hints to the other side to delay
2001e6f633e5STiwei Bie  * interrupts until most of the available buffers have been processed;
2002e6f633e5STiwei Bie  * it returns "false" if there are many pending buffers in the queue,
2003e6f633e5STiwei Bie  * to detect a possible race between the driver checking for more work,
2004e6f633e5STiwei Bie  * and enabling callbacks.
2005e6f633e5STiwei Bie  *
2006e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2007e6f633e5STiwei Bie  * operations at the same time (except where noted).
2008e6f633e5STiwei Bie  */
2009e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2010e6f633e5STiwei Bie {
20111ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20121ce9e605STiwei Bie 
20131ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
20141ce9e605STiwei Bie 				 virtqueue_enable_cb_delayed_split(_vq);
2015e6f633e5STiwei Bie }
2016e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2017e6f633e5STiwei Bie 
2018138fd251STiwei Bie /**
2019138fd251STiwei Bie  * virtqueue_detach_unused_buf - detach first unused buffer
2020138fd251STiwei Bie  * @vq: the struct virtqueue we're talking about.
2021138fd251STiwei Bie  *
2022138fd251STiwei Bie  * Returns NULL or the "data" token handed to virtqueue_add_*().
2023138fd251STiwei Bie  * This is not valid on an active queue; it is useful only for device
2024138fd251STiwei Bie  * shutdown.
2025138fd251STiwei Bie  */
2026138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2027138fd251STiwei Bie {
20281ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20291ce9e605STiwei Bie 
20301ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
20311ce9e605STiwei Bie 				 virtqueue_detach_unused_buf_split(_vq);
2032138fd251STiwei Bie }
20337c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2034c021eac4SShirley Ma 
2035138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq)
2036138fd251STiwei Bie {
20371ce9e605STiwei Bie 	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2038138fd251STiwei Bie }
2039138fd251STiwei Bie 
20400a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
20410a8a69ddSRusty Russell {
20420a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
20430a8a69ddSRusty Russell 
20440a8a69ddSRusty Russell 	if (!more_used(vq)) {
20450a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
20460a8a69ddSRusty Russell 		return IRQ_NONE;
20470a8a69ddSRusty Russell 	}
20480a8a69ddSRusty Russell 
20490a8a69ddSRusty Russell 	if (unlikely(vq->broken))
20500a8a69ddSRusty Russell 		return IRQ_HANDLED;
20510a8a69ddSRusty Russell 
20520a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
205318445c4dSRusty Russell 	if (vq->vq.callback)
205418445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
20550a8a69ddSRusty Russell 
20560a8a69ddSRusty Russell 	return IRQ_HANDLED;
20570a8a69ddSRusty Russell }
2058c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
20590a8a69ddSRusty Russell 
20601ce9e605STiwei Bie /* Only available for split ring */
20612a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index,
20622a2d1382SAndy Lutomirski 					struct vring vring,
20630a8a69ddSRusty Russell 					struct virtio_device *vdev,
20647b21e34fSRusty Russell 					bool weak_barriers,
2065f94682ddSMichael S. Tsirkin 					bool context,
206646f9c2b9SHeinz Graalfs 					bool (*notify)(struct virtqueue *),
20679499f5e7SRusty Russell 					void (*callback)(struct virtqueue *),
20689499f5e7SRusty Russell 					const char *name)
20690a8a69ddSRusty Russell {
20700a8a69ddSRusty Russell 	unsigned int i;
20712a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
20720a8a69ddSRusty Russell 
20731ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
20741ce9e605STiwei Bie 		return NULL;
20751ce9e605STiwei Bie 
2076cbeedb72STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
20770a8a69ddSRusty Russell 	if (!vq)
20780a8a69ddSRusty Russell 		return NULL;
20790a8a69ddSRusty Russell 
20801ce9e605STiwei Bie 	vq->packed_ring = false;
20810a8a69ddSRusty Russell 	vq->vq.callback = callback;
20820a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
20839499f5e7SRusty Russell 	vq->vq.name = name;
20842a2d1382SAndy Lutomirski 	vq->vq.num_free = vring.num;
208506ca287dSRusty Russell 	vq->vq.index = index;
20862a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
20870a8a69ddSRusty Russell 	vq->notify = notify;
20887b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
20890a8a69ddSRusty Russell 	vq->broken = false;
20900a8a69ddSRusty Russell 	vq->last_used_idx = 0;
20910a8a69ddSRusty Russell 	vq->num_added = 0;
2092fb3fba6bSTiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
20939499f5e7SRusty Russell 	list_add_tail(&vq->vq.list, &vdev->vqs);
20940a8a69ddSRusty Russell #ifdef DEBUG
20950a8a69ddSRusty Russell 	vq->in_use = false;
2096e93300b1SRusty Russell 	vq->last_add_time_valid = false;
20970a8a69ddSRusty Russell #endif
20980a8a69ddSRusty Russell 
20995a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
21005a08b04fSMichael S. Tsirkin 		!context;
2101a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
21029fa29b9dSMark McLoughlin 
210345383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
210445383fb0STiwei Bie 		vq->weak_barriers = false;
210545383fb0STiwei Bie 
2106d79dca75STiwei Bie 	vq->split.queue_dma_addr = 0;
2107d79dca75STiwei Bie 	vq->split.queue_size_in_bytes = 0;
2108d79dca75STiwei Bie 
2109e593bf97STiwei Bie 	vq->split.vring = vring;
2110e593bf97STiwei Bie 	vq->split.avail_flags_shadow = 0;
2111e593bf97STiwei Bie 	vq->split.avail_idx_shadow = 0;
2112e593bf97STiwei Bie 
21130a8a69ddSRusty Russell 	/* No callback?  Tell other side not to bother us. */
2114f277ec42SVenkatesh Srinivas 	if (!callback) {
2115e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
21160ea1e4a6SLadi Prosek 		if (!vq->event)
2117e593bf97STiwei Bie 			vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2118e593bf97STiwei Bie 					vq->split.avail_flags_shadow);
2119f277ec42SVenkatesh Srinivas 	}
21200a8a69ddSRusty Russell 
2121cbeedb72STiwei Bie 	vq->split.desc_state = kmalloc_array(vring.num,
2122cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split), GFP_KERNEL);
2123cbeedb72STiwei Bie 	if (!vq->split.desc_state) {
2124cbeedb72STiwei Bie 		kfree(vq);
2125cbeedb72STiwei Bie 		return NULL;
2126cbeedb72STiwei Bie 	}
2127cbeedb72STiwei Bie 
21280a8a69ddSRusty Russell 	/* Put everything in free lists. */
21290a8a69ddSRusty Russell 	vq->free_head = 0;
21302a2d1382SAndy Lutomirski 	for (i = 0; i < vring.num-1; i++)
2131e593bf97STiwei Bie 		vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2132cbeedb72STiwei Bie 	memset(vq->split.desc_state, 0, vring.num *
2133cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split));
21340a8a69ddSRusty Russell 
21350a8a69ddSRusty Russell 	return &vq->vq;
21360a8a69ddSRusty Russell }
21372a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
21382a2d1382SAndy Lutomirski 
21392a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
21402a2d1382SAndy Lutomirski 	unsigned int index,
21412a2d1382SAndy Lutomirski 	unsigned int num,
21422a2d1382SAndy Lutomirski 	unsigned int vring_align,
21432a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
21442a2d1382SAndy Lutomirski 	bool weak_barriers,
21452a2d1382SAndy Lutomirski 	bool may_reduce_num,
2146f94682ddSMichael S. Tsirkin 	bool context,
21472a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
21482a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
21492a2d1382SAndy Lutomirski 	const char *name)
21502a2d1382SAndy Lutomirski {
21511ce9e605STiwei Bie 
21521ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
21531ce9e605STiwei Bie 		return vring_create_virtqueue_packed(index, num, vring_align,
21541ce9e605STiwei Bie 				vdev, weak_barriers, may_reduce_num,
21551ce9e605STiwei Bie 				context, notify, callback, name);
21561ce9e605STiwei Bie 
2157d79dca75STiwei Bie 	return vring_create_virtqueue_split(index, num, vring_align,
2158d79dca75STiwei Bie 			vdev, weak_barriers, may_reduce_num,
2159d79dca75STiwei Bie 			context, notify, callback, name);
21602a2d1382SAndy Lutomirski }
21612a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
21622a2d1382SAndy Lutomirski 
21631ce9e605STiwei Bie /* Only available for split ring */
21642a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
21652a2d1382SAndy Lutomirski 				      unsigned int num,
21662a2d1382SAndy Lutomirski 				      unsigned int vring_align,
21672a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
21682a2d1382SAndy Lutomirski 				      bool weak_barriers,
2169f94682ddSMichael S. Tsirkin 				      bool context,
21702a2d1382SAndy Lutomirski 				      void *pages,
21712a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
21722a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
21732a2d1382SAndy Lutomirski 				      const char *name)
21742a2d1382SAndy Lutomirski {
21752a2d1382SAndy Lutomirski 	struct vring vring;
21761ce9e605STiwei Bie 
21771ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
21781ce9e605STiwei Bie 		return NULL;
21791ce9e605STiwei Bie 
21802a2d1382SAndy Lutomirski 	vring_init(&vring, num, pages, vring_align);
2181f94682ddSMichael S. Tsirkin 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
21822a2d1382SAndy Lutomirski 				     notify, callback, name);
21832a2d1382SAndy Lutomirski }
2184c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
21850a8a69ddSRusty Russell 
21862a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq)
21870a8a69ddSRusty Russell {
21882a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
21892a2d1382SAndy Lutomirski 
21902a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
21911ce9e605STiwei Bie 		if (vq->packed_ring) {
21921ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
21931ce9e605STiwei Bie 					 vq->packed.ring_size_in_bytes,
21941ce9e605STiwei Bie 					 vq->packed.vring.desc,
21951ce9e605STiwei Bie 					 vq->packed.ring_dma_addr);
21961ce9e605STiwei Bie 
21971ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
21981ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
21991ce9e605STiwei Bie 					 vq->packed.vring.driver,
22001ce9e605STiwei Bie 					 vq->packed.driver_event_dma_addr);
22011ce9e605STiwei Bie 
22021ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
22031ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
22041ce9e605STiwei Bie 					 vq->packed.vring.device,
22051ce9e605STiwei Bie 					 vq->packed.device_event_dma_addr);
22061ce9e605STiwei Bie 
22071ce9e605STiwei Bie 			kfree(vq->packed.desc_state);
22081ce9e605STiwei Bie 			kfree(vq->packed.desc_extra);
22091ce9e605STiwei Bie 		} else {
2210d79dca75STiwei Bie 			vring_free_queue(vq->vq.vdev,
2211d79dca75STiwei Bie 					 vq->split.queue_size_in_bytes,
2212d79dca75STiwei Bie 					 vq->split.vring.desc,
2213d79dca75STiwei Bie 					 vq->split.queue_dma_addr);
22141ce9e605STiwei Bie 
2215cbeedb72STiwei Bie 			kfree(vq->split.desc_state);
22162a2d1382SAndy Lutomirski 		}
22171ce9e605STiwei Bie 	}
22182a2d1382SAndy Lutomirski 	list_del(&_vq->list);
22192a2d1382SAndy Lutomirski 	kfree(vq);
22200a8a69ddSRusty Russell }
2221c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
22220a8a69ddSRusty Russell 
2223e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
2224e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
2225e34f8725SRusty Russell {
2226e34f8725SRusty Russell 	unsigned int i;
2227e34f8725SRusty Russell 
2228e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2229e34f8725SRusty Russell 		switch (i) {
22309fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
22319fa29b9dSMark McLoughlin 			break;
2232a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
2233a5c262c5SMichael S. Tsirkin 			break;
2234747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
2235747ae34aSMichael S. Tsirkin 			break;
22361a937693SMichael S. Tsirkin 		case VIRTIO_F_IOMMU_PLATFORM:
22371a937693SMichael S. Tsirkin 			break;
2238f959a128STiwei Bie 		case VIRTIO_F_RING_PACKED:
2239f959a128STiwei Bie 			break;
224045383fb0STiwei Bie 		case VIRTIO_F_ORDER_PLATFORM:
224145383fb0STiwei Bie 			break;
2242e34f8725SRusty Russell 		default:
2243e34f8725SRusty Russell 			/* We don't understand this bit. */
2244e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
2245e34f8725SRusty Russell 		}
2246e34f8725SRusty Russell 	}
2247e34f8725SRusty Russell }
2248e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
2249e34f8725SRusty Russell 
22505dfc1762SRusty Russell /**
22515dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
22525dfc1762SRusty Russell  * @vq: the struct virtqueue containing the vring of interest.
22535dfc1762SRusty Russell  *
22545dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
22555dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
22565dfc1762SRusty Russell  */
22578f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
22588f9f4668SRick Jones {
22598f9f4668SRick Jones 
22608f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
22618f9f4668SRick Jones 
22621ce9e605STiwei Bie 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
22638f9f4668SRick Jones }
22648f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
22658f9f4668SRick Jones 
2266b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
2267b3b32c94SHeinz Graalfs {
2268b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
2269b3b32c94SHeinz Graalfs 
2270b3b32c94SHeinz Graalfs 	return vq->broken;
2271b3b32c94SHeinz Graalfs }
2272b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2273b3b32c94SHeinz Graalfs 
2274e2dcdfe9SRusty Russell /*
2275e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
2276e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
2277e2dcdfe9SRusty Russell  */
2278e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
2279e2dcdfe9SRusty Russell {
2280e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
2281e2dcdfe9SRusty Russell 
2282e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
2283e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
2284e2dcdfe9SRusty Russell 		vq->broken = true;
2285e2dcdfe9SRusty Russell 	}
2286e2dcdfe9SRusty Russell }
2287e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
2288e2dcdfe9SRusty Russell 
22892a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
229089062652SCornelia Huck {
229189062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
229289062652SCornelia Huck 
22932a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
229489062652SCornelia Huck 
22951ce9e605STiwei Bie 	if (vq->packed_ring)
22961ce9e605STiwei Bie 		return vq->packed.ring_dma_addr;
22971ce9e605STiwei Bie 
2298d79dca75STiwei Bie 	return vq->split.queue_dma_addr;
22992a2d1382SAndy Lutomirski }
23002a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
23012a2d1382SAndy Lutomirski 
23022a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
230389062652SCornelia Huck {
230489062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
230589062652SCornelia Huck 
23062a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
23072a2d1382SAndy Lutomirski 
23081ce9e605STiwei Bie 	if (vq->packed_ring)
23091ce9e605STiwei Bie 		return vq->packed.driver_event_dma_addr;
23101ce9e605STiwei Bie 
2311d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2312e593bf97STiwei Bie 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
231389062652SCornelia Huck }
23142a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
23152a2d1382SAndy Lutomirski 
23162a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
23172a2d1382SAndy Lutomirski {
23182a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
23192a2d1382SAndy Lutomirski 
23202a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
23212a2d1382SAndy Lutomirski 
23221ce9e605STiwei Bie 	if (vq->packed_ring)
23231ce9e605STiwei Bie 		return vq->packed.device_event_dma_addr;
23241ce9e605STiwei Bie 
2325d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2326e593bf97STiwei Bie 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
23272a2d1382SAndy Lutomirski }
23282a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
23292a2d1382SAndy Lutomirski 
23301ce9e605STiwei Bie /* Only available for split ring */
23312a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq)
23322a2d1382SAndy Lutomirski {
2333e593bf97STiwei Bie 	return &to_vvq(vq)->split.vring;
23342a2d1382SAndy Lutomirski }
23352a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
233689062652SCornelia Huck 
2337c6fd4701SRusty Russell MODULE_LICENSE("GPL");
2338