xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 249f255476328e597a598ccdbd4414e51a5b6d6e)
1fd534e9bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20a8a69ddSRusty Russell /* Virtio ring implementation.
30a8a69ddSRusty Russell  *
40a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
50a8a69ddSRusty Russell  */
60a8a69ddSRusty Russell #include <linux/virtio.h>
70a8a69ddSRusty Russell #include <linux/virtio_ring.h>
8e34f8725SRusty Russell #include <linux/virtio_config.h>
90a8a69ddSRusty Russell #include <linux/device.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11b5a2c4f1SPaul Gortmaker #include <linux/module.h>
12e93300b1SRusty Russell #include <linux/hrtimer.h>
13780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
1478fe3987SAndy Lutomirski #include <xen/xen.h>
150a8a69ddSRusty Russell 
160a8a69ddSRusty Russell #ifdef DEBUG
170a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
189499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
199499f5e7SRusty Russell 	do {							\
209499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
219499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
229499f5e7SRusty Russell 		BUG();						\
239499f5e7SRusty Russell 	} while (0)
24c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
253a35ce7dSRoel Kluin #define START_USE(_vq)						\
26c5f841f1SRusty Russell 	do {							\
27c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
289499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
299499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
30c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
31c5f841f1SRusty Russell 	} while (0)
323a35ce7dSRoel Kluin #define END_USE(_vq) \
3397a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
344d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq)				\
354d6a105eSTiwei Bie 	do {							\
364d6a105eSTiwei Bie 		ktime_t now = ktime_get();			\
374d6a105eSTiwei Bie 								\
384d6a105eSTiwei Bie 		/* No kick or get, with .1 second between?  Warn. */ \
394d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid)			\
404d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
414d6a105eSTiwei Bie 				(_vq)->last_add_time)) > 100);	\
424d6a105eSTiwei Bie 		(_vq)->last_add_time = now;			\
434d6a105eSTiwei Bie 		(_vq)->last_add_time_valid = true;		\
444d6a105eSTiwei Bie 	} while (0)
454d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq)				\
464d6a105eSTiwei Bie 	do {							\
474d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid) {		\
484d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
494d6a105eSTiwei Bie 				      (_vq)->last_add_time)) > 100); \
504d6a105eSTiwei Bie 		}						\
514d6a105eSTiwei Bie 	} while (0)
524d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq)				\
534d6a105eSTiwei Bie 	((_vq)->last_add_time_valid = false)
540a8a69ddSRusty Russell #else
559499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
569499f5e7SRusty Russell 	do {							\
579499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
589499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
599499f5e7SRusty Russell 		(_vq)->broken = true;				\
609499f5e7SRusty Russell 	} while (0)
610a8a69ddSRusty Russell #define START_USE(vq)
620a8a69ddSRusty Russell #define END_USE(vq)
634d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq)
644d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq)
654d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq)
660a8a69ddSRusty Russell #endif
670a8a69ddSRusty Russell 
68cbeedb72STiwei Bie struct vring_desc_state_split {
69780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
70780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
71780bc790SAndy Lutomirski };
72780bc790SAndy Lutomirski 
731ce9e605STiwei Bie struct vring_desc_state_packed {
741ce9e605STiwei Bie 	void *data;			/* Data for callback. */
751ce9e605STiwei Bie 	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
761ce9e605STiwei Bie 	u16 num;			/* Descriptor list length. */
771ce9e605STiwei Bie 	u16 last;			/* The last desc state in a list. */
781ce9e605STiwei Bie };
791ce9e605STiwei Bie 
801f28750fSJason Wang struct vring_desc_extra {
811ce9e605STiwei Bie 	dma_addr_t addr;		/* Buffer DMA addr. */
821ce9e605STiwei Bie 	u32 len;			/* Buffer length. */
831ce9e605STiwei Bie 	u16 flags;			/* Descriptor flags. */
84aeef9b47SJason Wang 	u16 next;			/* The next desc state in a list. */
851ce9e605STiwei Bie };
861ce9e605STiwei Bie 
8743b4f721SMichael S. Tsirkin struct vring_virtqueue {
880a8a69ddSRusty Russell 	struct virtqueue vq;
890a8a69ddSRusty Russell 
901ce9e605STiwei Bie 	/* Is this a packed ring? */
911ce9e605STiwei Bie 	bool packed_ring;
921ce9e605STiwei Bie 
93fb3fba6bSTiwei Bie 	/* Is DMA API used? */
94fb3fba6bSTiwei Bie 	bool use_dma_api;
95fb3fba6bSTiwei Bie 
967b21e34fSRusty Russell 	/* Can we use weak barriers? */
977b21e34fSRusty Russell 	bool weak_barriers;
987b21e34fSRusty Russell 
990a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
1000a8a69ddSRusty Russell 	bool broken;
1010a8a69ddSRusty Russell 
1029fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
1039fa29b9dSMark McLoughlin 	bool indirect;
1049fa29b9dSMark McLoughlin 
105a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
106a5c262c5SMichael S. Tsirkin 	bool event;
107a5c262c5SMichael S. Tsirkin 
1080a8a69ddSRusty Russell 	/* Head of free buffer list. */
1090a8a69ddSRusty Russell 	unsigned int free_head;
1100a8a69ddSRusty Russell 	/* Number we've added since last sync. */
1110a8a69ddSRusty Russell 	unsigned int num_added;
1120a8a69ddSRusty Russell 
1130a8a69ddSRusty Russell 	/* Last used index we've seen. */
1141bc4953eSAnthony Liguori 	u16 last_used_idx;
1150a8a69ddSRusty Russell 
1168d622d21SMichael S. Tsirkin 	/* Hint for event idx: already triggered no need to disable. */
1178d622d21SMichael S. Tsirkin 	bool event_triggered;
1188d622d21SMichael S. Tsirkin 
1191ce9e605STiwei Bie 	union {
1201ce9e605STiwei Bie 		/* Available for split ring */
121e593bf97STiwei Bie 		struct {
1221ce9e605STiwei Bie 			/* Actual memory layout for this queue. */
123e593bf97STiwei Bie 			struct vring vring;
124e593bf97STiwei Bie 
125f277ec42SVenkatesh Srinivas 			/* Last written value to avail->flags */
126f277ec42SVenkatesh Srinivas 			u16 avail_flags_shadow;
127f277ec42SVenkatesh Srinivas 
1281ce9e605STiwei Bie 			/*
1291ce9e605STiwei Bie 			 * Last written value to avail->idx in
1301ce9e605STiwei Bie 			 * guest byte order.
1311ce9e605STiwei Bie 			 */
132f277ec42SVenkatesh Srinivas 			u16 avail_idx_shadow;
133cbeedb72STiwei Bie 
134cbeedb72STiwei Bie 			/* Per-descriptor state. */
135cbeedb72STiwei Bie 			struct vring_desc_state_split *desc_state;
13672b5e895SJason Wang 			struct vring_desc_extra *desc_extra;
137d79dca75STiwei Bie 
1381ce9e605STiwei Bie 			/* DMA address and size information */
139d79dca75STiwei Bie 			dma_addr_t queue_dma_addr;
1401ce9e605STiwei Bie 			size_t queue_size_in_bytes;
141e593bf97STiwei Bie 		} split;
142f277ec42SVenkatesh Srinivas 
1431ce9e605STiwei Bie 		/* Available for packed ring */
1441ce9e605STiwei Bie 		struct {
1451ce9e605STiwei Bie 			/* Actual memory layout for this queue. */
1469c0644eeSMichael S. Tsirkin 			struct {
1479c0644eeSMichael S. Tsirkin 				unsigned int num;
1489c0644eeSMichael S. Tsirkin 				struct vring_packed_desc *desc;
1499c0644eeSMichael S. Tsirkin 				struct vring_packed_desc_event *driver;
1509c0644eeSMichael S. Tsirkin 				struct vring_packed_desc_event *device;
1519c0644eeSMichael S. Tsirkin 			} vring;
1521ce9e605STiwei Bie 
1531ce9e605STiwei Bie 			/* Driver ring wrap counter. */
1541ce9e605STiwei Bie 			bool avail_wrap_counter;
1551ce9e605STiwei Bie 
1561ce9e605STiwei Bie 			/* Device ring wrap counter. */
1571ce9e605STiwei Bie 			bool used_wrap_counter;
1581ce9e605STiwei Bie 
1591ce9e605STiwei Bie 			/* Avail used flags. */
1601ce9e605STiwei Bie 			u16 avail_used_flags;
1611ce9e605STiwei Bie 
1621ce9e605STiwei Bie 			/* Index of the next avail descriptor. */
1631ce9e605STiwei Bie 			u16 next_avail_idx;
1641ce9e605STiwei Bie 
1651ce9e605STiwei Bie 			/*
1661ce9e605STiwei Bie 			 * Last written value to driver->flags in
1671ce9e605STiwei Bie 			 * guest byte order.
1681ce9e605STiwei Bie 			 */
1691ce9e605STiwei Bie 			u16 event_flags_shadow;
1701ce9e605STiwei Bie 
1711ce9e605STiwei Bie 			/* Per-descriptor state. */
1721ce9e605STiwei Bie 			struct vring_desc_state_packed *desc_state;
1731f28750fSJason Wang 			struct vring_desc_extra *desc_extra;
1741ce9e605STiwei Bie 
1751ce9e605STiwei Bie 			/* DMA address and size information */
1761ce9e605STiwei Bie 			dma_addr_t ring_dma_addr;
1771ce9e605STiwei Bie 			dma_addr_t driver_event_dma_addr;
1781ce9e605STiwei Bie 			dma_addr_t device_event_dma_addr;
1791ce9e605STiwei Bie 			size_t ring_size_in_bytes;
1801ce9e605STiwei Bie 			size_t event_size_in_bytes;
1811ce9e605STiwei Bie 		} packed;
1821ce9e605STiwei Bie 	};
1831ce9e605STiwei Bie 
1840a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
18546f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
1860a8a69ddSRusty Russell 
1872a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
1882a2d1382SAndy Lutomirski 	bool we_own_ring;
1892a2d1382SAndy Lutomirski 
1900a8a69ddSRusty Russell #ifdef DEBUG
1910a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
1920a8a69ddSRusty Russell 	unsigned int in_use;
193e93300b1SRusty Russell 
194e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
195e93300b1SRusty Russell 	bool last_add_time_valid;
196e93300b1SRusty Russell 	ktime_t last_add_time;
1970a8a69ddSRusty Russell #endif
1980a8a69ddSRusty Russell };
1990a8a69ddSRusty Russell 
200e6f633e5STiwei Bie 
201e6f633e5STiwei Bie /*
202e6f633e5STiwei Bie  * Helpers.
203e6f633e5STiwei Bie  */
204e6f633e5STiwei Bie 
2050a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
2060a8a69ddSRusty Russell 
2072f18c2d1STiwei Bie static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
2082f18c2d1STiwei Bie 					  unsigned int total_sg)
2092f18c2d1STiwei Bie {
2102f18c2d1STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2112f18c2d1STiwei Bie 
2122f18c2d1STiwei Bie 	/*
2132f18c2d1STiwei Bie 	 * If the host supports indirect descriptor tables, and we have multiple
2142f18c2d1STiwei Bie 	 * buffers, then go indirect. FIXME: tune this threshold
2152f18c2d1STiwei Bie 	 */
2162f18c2d1STiwei Bie 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
2172f18c2d1STiwei Bie }
2182f18c2d1STiwei Bie 
219d26c96c8SAndy Lutomirski /*
2201a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
2211a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
2221a937693SMichael S. Tsirkin  *
2231a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
224d26c96c8SAndy Lutomirski  *
225d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
226d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
227d26c96c8SAndy Lutomirski  *
228d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
229d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
230d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
231d26c96c8SAndy Lutomirski  *
232d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
233d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
234d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
235d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
236d26c96c8SAndy Lutomirski  *
237d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
238d26c96c8SAndy Lutomirski  * API.
2391a937693SMichael S. Tsirkin  *
2401a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
2411a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
2421a937693SMichael S. Tsirkin  * unconditionally on data path.
243d26c96c8SAndy Lutomirski  */
244d26c96c8SAndy Lutomirski 
245d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
246d26c96c8SAndy Lutomirski {
24724b6842aSMichael S. Tsirkin 	if (!virtio_has_dma_quirk(vdev))
2481a937693SMichael S. Tsirkin 		return true;
2491a937693SMichael S. Tsirkin 
2501a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
25178fe3987SAndy Lutomirski 	/*
25278fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
25378fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
25478fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
25578fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
25678fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
25778fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
25878fe3987SAndy Lutomirski 	 */
25978fe3987SAndy Lutomirski 	if (xen_domain())
26078fe3987SAndy Lutomirski 		return true;
26178fe3987SAndy Lutomirski 
262d26c96c8SAndy Lutomirski 	return false;
263d26c96c8SAndy Lutomirski }
264d26c96c8SAndy Lutomirski 
265e6d6dd6cSJoerg Roedel size_t virtio_max_dma_size(struct virtio_device *vdev)
266e6d6dd6cSJoerg Roedel {
267e6d6dd6cSJoerg Roedel 	size_t max_segment_size = SIZE_MAX;
268e6d6dd6cSJoerg Roedel 
269e6d6dd6cSJoerg Roedel 	if (vring_use_dma_api(vdev))
270e6d6dd6cSJoerg Roedel 		max_segment_size = dma_max_mapping_size(&vdev->dev);
271e6d6dd6cSJoerg Roedel 
272e6d6dd6cSJoerg Roedel 	return max_segment_size;
273e6d6dd6cSJoerg Roedel }
274e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size);
275e6d6dd6cSJoerg Roedel 
276d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
277d79dca75STiwei Bie 			      dma_addr_t *dma_handle, gfp_t flag)
278d79dca75STiwei Bie {
279d79dca75STiwei Bie 	if (vring_use_dma_api(vdev)) {
280d79dca75STiwei Bie 		return dma_alloc_coherent(vdev->dev.parent, size,
281d79dca75STiwei Bie 					  dma_handle, flag);
282d79dca75STiwei Bie 	} else {
283d79dca75STiwei Bie 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
284d79dca75STiwei Bie 
285d79dca75STiwei Bie 		if (queue) {
286d79dca75STiwei Bie 			phys_addr_t phys_addr = virt_to_phys(queue);
287d79dca75STiwei Bie 			*dma_handle = (dma_addr_t)phys_addr;
288d79dca75STiwei Bie 
289d79dca75STiwei Bie 			/*
290d79dca75STiwei Bie 			 * Sanity check: make sure we dind't truncate
291d79dca75STiwei Bie 			 * the address.  The only arches I can find that
292d79dca75STiwei Bie 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
293d79dca75STiwei Bie 			 * are certain non-highmem MIPS and x86
294d79dca75STiwei Bie 			 * configurations, but these configurations
295d79dca75STiwei Bie 			 * should never allocate physical pages above 32
296d79dca75STiwei Bie 			 * bits, so this is fine.  Just in case, throw a
297d79dca75STiwei Bie 			 * warning and abort if we end up with an
298d79dca75STiwei Bie 			 * unrepresentable address.
299d79dca75STiwei Bie 			 */
300d79dca75STiwei Bie 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
301d79dca75STiwei Bie 				free_pages_exact(queue, PAGE_ALIGN(size));
302d79dca75STiwei Bie 				return NULL;
303d79dca75STiwei Bie 			}
304d79dca75STiwei Bie 		}
305d79dca75STiwei Bie 		return queue;
306d79dca75STiwei Bie 	}
307d79dca75STiwei Bie }
308d79dca75STiwei Bie 
309d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size,
310d79dca75STiwei Bie 			     void *queue, dma_addr_t dma_handle)
311d79dca75STiwei Bie {
312d79dca75STiwei Bie 	if (vring_use_dma_api(vdev))
313d79dca75STiwei Bie 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
314d79dca75STiwei Bie 	else
315d79dca75STiwei Bie 		free_pages_exact(queue, PAGE_ALIGN(size));
316d79dca75STiwei Bie }
317d79dca75STiwei Bie 
318780bc790SAndy Lutomirski /*
319780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
320780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
321780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
322780bc790SAndy Lutomirski  */
32375bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
324780bc790SAndy Lutomirski {
325780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
326780bc790SAndy Lutomirski }
327780bc790SAndy Lutomirski 
328780bc790SAndy Lutomirski /* Map one sg entry. */
329780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
330780bc790SAndy Lutomirski 				   struct scatterlist *sg,
331780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
332780bc790SAndy Lutomirski {
333fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
334780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
335780bc790SAndy Lutomirski 
336780bc790SAndy Lutomirski 	/*
337780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
338780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
339780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
340780bc790SAndy Lutomirski 	 */
341780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
342780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
343780bc790SAndy Lutomirski 			    direction);
344780bc790SAndy Lutomirski }
345780bc790SAndy Lutomirski 
346780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
347780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
348780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
349780bc790SAndy Lutomirski {
350fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
351780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
352780bc790SAndy Lutomirski 
353780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
354780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
355780bc790SAndy Lutomirski }
356780bc790SAndy Lutomirski 
357e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq,
358e6f633e5STiwei Bie 			       dma_addr_t addr)
359e6f633e5STiwei Bie {
360fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
361e6f633e5STiwei Bie 		return 0;
362e6f633e5STiwei Bie 
363e6f633e5STiwei Bie 	return dma_mapping_error(vring_dma_dev(vq), addr);
364e6f633e5STiwei Bie }
365e6f633e5STiwei Bie 
366e6f633e5STiwei Bie 
367e6f633e5STiwei Bie /*
368e6f633e5STiwei Bie  * Split ring specific functions - *_split().
369e6f633e5STiwei Bie  */
370e6f633e5STiwei Bie 
37172b5e895SJason Wang static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
372780bc790SAndy Lutomirski 					   struct vring_desc *desc)
373780bc790SAndy Lutomirski {
374780bc790SAndy Lutomirski 	u16 flags;
375780bc790SAndy Lutomirski 
376fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
377780bc790SAndy Lutomirski 		return;
378780bc790SAndy Lutomirski 
379780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
380780bc790SAndy Lutomirski 
381780bc790SAndy Lutomirski 	if (flags & VRING_DESC_F_INDIRECT) {
382780bc790SAndy Lutomirski 		dma_unmap_single(vring_dma_dev(vq),
383780bc790SAndy Lutomirski 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
384780bc790SAndy Lutomirski 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
385780bc790SAndy Lutomirski 				 (flags & VRING_DESC_F_WRITE) ?
386780bc790SAndy Lutomirski 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
387780bc790SAndy Lutomirski 	} else {
388780bc790SAndy Lutomirski 		dma_unmap_page(vring_dma_dev(vq),
389780bc790SAndy Lutomirski 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
390780bc790SAndy Lutomirski 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
391780bc790SAndy Lutomirski 			       (flags & VRING_DESC_F_WRITE) ?
392780bc790SAndy Lutomirski 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
393780bc790SAndy Lutomirski 	}
394780bc790SAndy Lutomirski }
395780bc790SAndy Lutomirski 
39672b5e895SJason Wang static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
39772b5e895SJason Wang 					  unsigned int i)
39872b5e895SJason Wang {
39972b5e895SJason Wang 	struct vring_desc_extra *extra = vq->split.desc_extra;
40072b5e895SJason Wang 	u16 flags;
40172b5e895SJason Wang 
40272b5e895SJason Wang 	if (!vq->use_dma_api)
40372b5e895SJason Wang 		goto out;
40472b5e895SJason Wang 
40572b5e895SJason Wang 	flags = extra[i].flags;
40672b5e895SJason Wang 
40772b5e895SJason Wang 	if (flags & VRING_DESC_F_INDIRECT) {
40872b5e895SJason Wang 		dma_unmap_single(vring_dma_dev(vq),
40972b5e895SJason Wang 				 extra[i].addr,
41072b5e895SJason Wang 				 extra[i].len,
41172b5e895SJason Wang 				 (flags & VRING_DESC_F_WRITE) ?
41272b5e895SJason Wang 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
41372b5e895SJason Wang 	} else {
41472b5e895SJason Wang 		dma_unmap_page(vring_dma_dev(vq),
41572b5e895SJason Wang 			       extra[i].addr,
41672b5e895SJason Wang 			       extra[i].len,
41772b5e895SJason Wang 			       (flags & VRING_DESC_F_WRITE) ?
41872b5e895SJason Wang 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
41972b5e895SJason Wang 	}
42072b5e895SJason Wang 
42172b5e895SJason Wang out:
42272b5e895SJason Wang 	return extra[i].next;
42372b5e895SJason Wang }
42472b5e895SJason Wang 
425138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
426138fd251STiwei Bie 					       unsigned int total_sg,
427138fd251STiwei Bie 					       gfp_t gfp)
4289fa29b9dSMark McLoughlin {
4299fa29b9dSMark McLoughlin 	struct vring_desc *desc;
430b25bd251SRusty Russell 	unsigned int i;
4319fa29b9dSMark McLoughlin 
432b92b1b89SWill Deacon 	/*
433b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
434b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
435b92b1b89SWill Deacon 	 * virtqueue.
436b92b1b89SWill Deacon 	 */
43782107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
438b92b1b89SWill Deacon 
4396da2ec56SKees Cook 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
4409fa29b9dSMark McLoughlin 	if (!desc)
441b25bd251SRusty Russell 		return NULL;
4429fa29b9dSMark McLoughlin 
443b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
44400e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
445b25bd251SRusty Russell 	return desc;
4469fa29b9dSMark McLoughlin }
4479fa29b9dSMark McLoughlin 
448fe4c3862SJason Wang static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
449fe4c3862SJason Wang 						    struct vring_desc *desc,
450fe4c3862SJason Wang 						    unsigned int i,
451fe4c3862SJason Wang 						    dma_addr_t addr,
452fe4c3862SJason Wang 						    unsigned int len,
45372b5e895SJason Wang 						    u16 flags,
45472b5e895SJason Wang 						    bool indirect)
455fe4c3862SJason Wang {
45672b5e895SJason Wang 	struct vring_virtqueue *vring = to_vvq(vq);
45772b5e895SJason Wang 	struct vring_desc_extra *extra = vring->split.desc_extra;
45872b5e895SJason Wang 	u16 next;
45972b5e895SJason Wang 
460fe4c3862SJason Wang 	desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
461fe4c3862SJason Wang 	desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
462fe4c3862SJason Wang 	desc[i].len = cpu_to_virtio32(vq->vdev, len);
463fe4c3862SJason Wang 
46472b5e895SJason Wang 	if (!indirect) {
46572b5e895SJason Wang 		next = extra[i].next;
46672b5e895SJason Wang 		desc[i].next = cpu_to_virtio16(vq->vdev, next);
46772b5e895SJason Wang 
46872b5e895SJason Wang 		extra[i].addr = addr;
46972b5e895SJason Wang 		extra[i].len = len;
47072b5e895SJason Wang 		extra[i].flags = flags;
47172b5e895SJason Wang 	} else
47272b5e895SJason Wang 		next = virtio16_to_cpu(vq->vdev, desc[i].next);
47372b5e895SJason Wang 
47472b5e895SJason Wang 	return next;
475fe4c3862SJason Wang }
476fe4c3862SJason Wang 
477138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq,
47813816c76SRusty Russell 				      struct scatterlist *sgs[],
479eeebf9b1SRusty Russell 				      unsigned int total_sg,
48013816c76SRusty Russell 				      unsigned int out_sgs,
48113816c76SRusty Russell 				      unsigned int in_sgs,
482bbd603efSMichael S. Tsirkin 				      void *data,
4835a08b04fSMichael S. Tsirkin 				      void *ctx,
484bbd603efSMichael S. Tsirkin 				      gfp_t gfp)
4850a8a69ddSRusty Russell {
4860a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
48713816c76SRusty Russell 	struct scatterlist *sg;
488b25bd251SRusty Russell 	struct vring_desc *desc;
4893f649ab7SKees Cook 	unsigned int i, n, avail, descs_used, prev, err_idx;
4901fe9b6feSMichael S. Tsirkin 	int head;
491b25bd251SRusty Russell 	bool indirect;
4920a8a69ddSRusty Russell 
4939fa29b9dSMark McLoughlin 	START_USE(vq);
4949fa29b9dSMark McLoughlin 
4950a8a69ddSRusty Russell 	BUG_ON(data == NULL);
4965a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
4979fa29b9dSMark McLoughlin 
49870670444SRusty Russell 	if (unlikely(vq->broken)) {
49970670444SRusty Russell 		END_USE(vq);
50070670444SRusty Russell 		return -EIO;
50170670444SRusty Russell 	}
50270670444SRusty Russell 
5034d6a105eSTiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
504e93300b1SRusty Russell 
50513816c76SRusty Russell 	BUG_ON(total_sg == 0);
5060a8a69ddSRusty Russell 
507b25bd251SRusty Russell 	head = vq->free_head;
508b25bd251SRusty Russell 
5092f18c2d1STiwei Bie 	if (virtqueue_use_indirect(_vq, total_sg))
510138fd251STiwei Bie 		desc = alloc_indirect_split(_vq, total_sg, gfp);
51144ed8089SRichard W.M. Jones 	else {
512b25bd251SRusty Russell 		desc = NULL;
513e593bf97STiwei Bie 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
51444ed8089SRichard W.M. Jones 	}
515b25bd251SRusty Russell 
516b25bd251SRusty Russell 	if (desc) {
517b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
518780bc790SAndy Lutomirski 		indirect = true;
519b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
520b25bd251SRusty Russell 		i = 0;
521b25bd251SRusty Russell 		descs_used = 1;
522b25bd251SRusty Russell 	} else {
523780bc790SAndy Lutomirski 		indirect = false;
524e593bf97STiwei Bie 		desc = vq->split.vring.desc;
525b25bd251SRusty Russell 		i = head;
526b25bd251SRusty Russell 		descs_used = total_sg;
527b25bd251SRusty Russell 	}
528b25bd251SRusty Russell 
529b25bd251SRusty Russell 	if (vq->vq.num_free < descs_used) {
5300a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
531b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
53244653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
53344653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
53444653eaeSRusty Russell 		 * host should service the ring ASAP. */
53513816c76SRusty Russell 		if (out_sgs)
536426e3e0aSRusty Russell 			vq->notify(&vq->vq);
53758625edfSWei Yongjun 		if (indirect)
53858625edfSWei Yongjun 			kfree(desc);
5390a8a69ddSRusty Russell 		END_USE(vq);
5400a8a69ddSRusty Russell 		return -ENOSPC;
5410a8a69ddSRusty Russell 	}
5420a8a69ddSRusty Russell 
54313816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
544eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
545780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
546780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
547780bc790SAndy Lutomirski 				goto unmap_release;
548780bc790SAndy Lutomirski 
5490a8a69ddSRusty Russell 			prev = i;
55072b5e895SJason Wang 			/* Note that we trust indirect descriptor
55172b5e895SJason Wang 			 * table since it use stream DMA mapping.
55272b5e895SJason Wang 			 */
553fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
55472b5e895SJason Wang 						     VRING_DESC_F_NEXT,
55572b5e895SJason Wang 						     indirect);
5560a8a69ddSRusty Russell 		}
55713816c76SRusty Russell 	}
55813816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
559eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
560780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
561780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
562780bc790SAndy Lutomirski 				goto unmap_release;
563780bc790SAndy Lutomirski 
5640a8a69ddSRusty Russell 			prev = i;
56572b5e895SJason Wang 			/* Note that we trust indirect descriptor
56672b5e895SJason Wang 			 * table since it use stream DMA mapping.
56772b5e895SJason Wang 			 */
568fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr,
569fe4c3862SJason Wang 						     sg->length,
570fe4c3862SJason Wang 						     VRING_DESC_F_NEXT |
57172b5e895SJason Wang 						     VRING_DESC_F_WRITE,
57272b5e895SJason Wang 						     indirect);
57313816c76SRusty Russell 		}
5740a8a69ddSRusty Russell 	}
5750a8a69ddSRusty Russell 	/* Last one doesn't continue. */
57600e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
57772b5e895SJason Wang 	if (!indirect && vq->use_dma_api)
57872b5e895SJason Wang 		vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
57972b5e895SJason Wang 			~VRING_DESC_F_NEXT;
5800a8a69ddSRusty Russell 
581780bc790SAndy Lutomirski 	if (indirect) {
582780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
583780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
584780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
585780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
586780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
587780bc790SAndy Lutomirski 			goto unmap_release;
588780bc790SAndy Lutomirski 
589fe4c3862SJason Wang 		virtqueue_add_desc_split(_vq, vq->split.vring.desc,
590fe4c3862SJason Wang 					 head, addr,
591fe4c3862SJason Wang 					 total_sg * sizeof(struct vring_desc),
59272b5e895SJason Wang 					 VRING_DESC_F_INDIRECT,
59372b5e895SJason Wang 					 false);
594780bc790SAndy Lutomirski 	}
595780bc790SAndy Lutomirski 
596780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
597780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
598780bc790SAndy Lutomirski 
5990a8a69ddSRusty Russell 	/* Update free pointer */
600b25bd251SRusty Russell 	if (indirect)
60172b5e895SJason Wang 		vq->free_head = vq->split.desc_extra[head].next;
602b25bd251SRusty Russell 	else
6030a8a69ddSRusty Russell 		vq->free_head = i;
6040a8a69ddSRusty Russell 
605780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
606cbeedb72STiwei Bie 	vq->split.desc_state[head].data = data;
607780bc790SAndy Lutomirski 	if (indirect)
608cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = desc;
60987646a34SJason Wang 	else
610cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = ctx;
6110a8a69ddSRusty Russell 
6120a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
6133b720b8cSRusty Russell 	 * do sync). */
614e593bf97STiwei Bie 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
615e593bf97STiwei Bie 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
6160a8a69ddSRusty Russell 
617ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
618ee7cd898SRusty Russell 	 * new available array entries. */
619a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
620e593bf97STiwei Bie 	vq->split.avail_idx_shadow++;
621e593bf97STiwei Bie 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
622e593bf97STiwei Bie 						vq->split.avail_idx_shadow);
623ee7cd898SRusty Russell 	vq->num_added++;
624ee7cd898SRusty Russell 
6255e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
6265e05bf58STetsuo Handa 	END_USE(vq);
6275e05bf58STetsuo Handa 
628ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
629ee7cd898SRusty Russell 	 * just in case. */
630ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
631ee7cd898SRusty Russell 		virtqueue_kick(_vq);
632ee7cd898SRusty Russell 
63398e8c6bcSRusty Russell 	return 0;
634780bc790SAndy Lutomirski 
635780bc790SAndy Lutomirski unmap_release:
636780bc790SAndy Lutomirski 	err_idx = i;
637cf8f1696SMatthias Lange 
638cf8f1696SMatthias Lange 	if (indirect)
639cf8f1696SMatthias Lange 		i = 0;
640cf8f1696SMatthias Lange 	else
641780bc790SAndy Lutomirski 		i = head;
642780bc790SAndy Lutomirski 
643780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
644780bc790SAndy Lutomirski 		if (i == err_idx)
645780bc790SAndy Lutomirski 			break;
64672b5e895SJason Wang 		if (indirect) {
64772b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &desc[i]);
648cf8f1696SMatthias Lange 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
64972b5e895SJason Wang 		} else
65072b5e895SJason Wang 			i = vring_unmap_one_split(vq, i);
651780bc790SAndy Lutomirski 	}
652780bc790SAndy Lutomirski 
653780bc790SAndy Lutomirski 	if (indirect)
654780bc790SAndy Lutomirski 		kfree(desc);
655780bc790SAndy Lutomirski 
6563cc36f6eSMichael S. Tsirkin 	END_USE(vq);
657f7728002SHalil Pasic 	return -ENOMEM;
6580a8a69ddSRusty Russell }
65913816c76SRusty Russell 
660138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
6610a8a69ddSRusty Russell {
6620a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
663a5c262c5SMichael S. Tsirkin 	u16 new, old;
66441f0377fSRusty Russell 	bool needs_kick;
66541f0377fSRusty Russell 
6660a8a69ddSRusty Russell 	START_USE(vq);
667a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
668a72caae2SJason Wang 	 * event. */
669a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
6700a8a69ddSRusty Russell 
671e593bf97STiwei Bie 	old = vq->split.avail_idx_shadow - vq->num_added;
672e593bf97STiwei Bie 	new = vq->split.avail_idx_shadow;
6730a8a69ddSRusty Russell 	vq->num_added = 0;
6740a8a69ddSRusty Russell 
6754d6a105eSTiwei Bie 	LAST_ADD_TIME_CHECK(vq);
6764d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
677e93300b1SRusty Russell 
67841f0377fSRusty Russell 	if (vq->event) {
679e593bf97STiwei Bie 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
680e593bf97STiwei Bie 					vring_avail_event(&vq->split.vring)),
68141f0377fSRusty Russell 					      new, old);
68241f0377fSRusty Russell 	} else {
683e593bf97STiwei Bie 		needs_kick = !(vq->split.vring.used->flags &
684e593bf97STiwei Bie 					cpu_to_virtio16(_vq->vdev,
685e593bf97STiwei Bie 						VRING_USED_F_NO_NOTIFY));
68641f0377fSRusty Russell 	}
6870a8a69ddSRusty Russell 	END_USE(vq);
68841f0377fSRusty Russell 	return needs_kick;
68941f0377fSRusty Russell }
690138fd251STiwei Bie 
691138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
6925a08b04fSMichael S. Tsirkin 			     void **ctx)
6930a8a69ddSRusty Russell {
694780bc790SAndy Lutomirski 	unsigned int i, j;
695c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
6960a8a69ddSRusty Russell 
6970a8a69ddSRusty Russell 	/* Clear data ptr. */
698cbeedb72STiwei Bie 	vq->split.desc_state[head].data = NULL;
6990a8a69ddSRusty Russell 
700780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
7010a8a69ddSRusty Russell 	i = head;
7029fa29b9dSMark McLoughlin 
703e593bf97STiwei Bie 	while (vq->split.vring.desc[i].flags & nextflag) {
70472b5e895SJason Wang 		vring_unmap_one_split(vq, i);
70572b5e895SJason Wang 		i = vq->split.desc_extra[i].next;
70606ca287dSRusty Russell 		vq->vq.num_free++;
7070a8a69ddSRusty Russell 	}
7080a8a69ddSRusty Russell 
70972b5e895SJason Wang 	vring_unmap_one_split(vq, i);
71072b5e895SJason Wang 	vq->split.desc_extra[i].next = vq->free_head;
7110a8a69ddSRusty Russell 	vq->free_head = head;
712780bc790SAndy Lutomirski 
7130a8a69ddSRusty Russell 	/* Plus final descriptor */
71406ca287dSRusty Russell 	vq->vq.num_free++;
715780bc790SAndy Lutomirski 
7165a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
717cbeedb72STiwei Bie 		struct vring_desc *indir_desc =
718cbeedb72STiwei Bie 				vq->split.desc_state[head].indir_desc;
7195a08b04fSMichael S. Tsirkin 		u32 len;
7205a08b04fSMichael S. Tsirkin 
7215a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
7225a08b04fSMichael S. Tsirkin 		if (!indir_desc)
7235a08b04fSMichael S. Tsirkin 			return;
7245a08b04fSMichael S. Tsirkin 
72572b5e895SJason Wang 		len = vq->split.desc_extra[head].len;
726780bc790SAndy Lutomirski 
72772b5e895SJason Wang 		BUG_ON(!(vq->split.desc_extra[head].flags &
72872b5e895SJason Wang 				VRING_DESC_F_INDIRECT));
729780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
730780bc790SAndy Lutomirski 
731780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
73272b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &indir_desc[j]);
733780bc790SAndy Lutomirski 
7345a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
735cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = NULL;
7365a08b04fSMichael S. Tsirkin 	} else if (ctx) {
737cbeedb72STiwei Bie 		*ctx = vq->split.desc_state[head].indir_desc;
738780bc790SAndy Lutomirski 	}
7390a8a69ddSRusty Russell }
7400a8a69ddSRusty Russell 
741138fd251STiwei Bie static inline bool more_used_split(const struct vring_virtqueue *vq)
7420a8a69ddSRusty Russell {
743e593bf97STiwei Bie 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
744e593bf97STiwei Bie 			vq->split.vring.used->idx);
7450a8a69ddSRusty Russell }
7460a8a69ddSRusty Russell 
747138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
748138fd251STiwei Bie 					 unsigned int *len,
7495a08b04fSMichael S. Tsirkin 					 void **ctx)
7500a8a69ddSRusty Russell {
7510a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
7520a8a69ddSRusty Russell 	void *ret;
7530a8a69ddSRusty Russell 	unsigned int i;
7543b720b8cSRusty Russell 	u16 last_used;
7550a8a69ddSRusty Russell 
7560a8a69ddSRusty Russell 	START_USE(vq);
7570a8a69ddSRusty Russell 
7585ef82752SRusty Russell 	if (unlikely(vq->broken)) {
7595ef82752SRusty Russell 		END_USE(vq);
7605ef82752SRusty Russell 		return NULL;
7615ef82752SRusty Russell 	}
7625ef82752SRusty Russell 
763138fd251STiwei Bie 	if (!more_used_split(vq)) {
7640a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
7650a8a69ddSRusty Russell 		END_USE(vq);
7660a8a69ddSRusty Russell 		return NULL;
7670a8a69ddSRusty Russell 	}
7680a8a69ddSRusty Russell 
7692d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
770a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
7712d61ba95SMichael S. Tsirkin 
772e593bf97STiwei Bie 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
773e593bf97STiwei Bie 	i = virtio32_to_cpu(_vq->vdev,
774e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].id);
775e593bf97STiwei Bie 	*len = virtio32_to_cpu(_vq->vdev,
776e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].len);
7770a8a69ddSRusty Russell 
778e593bf97STiwei Bie 	if (unlikely(i >= vq->split.vring.num)) {
7790a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
7800a8a69ddSRusty Russell 		return NULL;
7810a8a69ddSRusty Russell 	}
782cbeedb72STiwei Bie 	if (unlikely(!vq->split.desc_state[i].data)) {
7830a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
7840a8a69ddSRusty Russell 		return NULL;
7850a8a69ddSRusty Russell 	}
7860a8a69ddSRusty Russell 
787138fd251STiwei Bie 	/* detach_buf_split clears data, so grab it now. */
788cbeedb72STiwei Bie 	ret = vq->split.desc_state[i].data;
789138fd251STiwei Bie 	detach_buf_split(vq, i, ctx);
7900a8a69ddSRusty Russell 	vq->last_used_idx++;
791a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
792a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
793a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
794e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
795788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
796e593bf97STiwei Bie 				&vring_used_event(&vq->split.vring),
797788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
798a5c262c5SMichael S. Tsirkin 
7994d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
800e93300b1SRusty Russell 
8010a8a69ddSRusty Russell 	END_USE(vq);
8020a8a69ddSRusty Russell 	return ret;
8030a8a69ddSRusty Russell }
804138fd251STiwei Bie 
805138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq)
806138fd251STiwei Bie {
807138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
808138fd251STiwei Bie 
809e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
810e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
8118d622d21SMichael S. Tsirkin 		if (vq->event)
8128d622d21SMichael S. Tsirkin 			/* TODO: this is a hack. Figure out a cleaner value to write. */
8138d622d21SMichael S. Tsirkin 			vring_used_event(&vq->split.vring) = 0x0;
8148d622d21SMichael S. Tsirkin 		else
815e593bf97STiwei Bie 			vq->split.vring.avail->flags =
816e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
817e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
818138fd251STiwei Bie 	}
819138fd251STiwei Bie }
820138fd251STiwei Bie 
821138fd251STiwei Bie static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
822cc229884SMichael S. Tsirkin {
823cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
824cc229884SMichael S. Tsirkin 	u16 last_used_idx;
825cc229884SMichael S. Tsirkin 
826cc229884SMichael S. Tsirkin 	START_USE(vq);
827cc229884SMichael S. Tsirkin 
828cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
829cc229884SMichael S. Tsirkin 	 * more to do. */
830cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
831cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
832cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
833e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
834e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8350ea1e4a6SLadi Prosek 		if (!vq->event)
836e593bf97STiwei Bie 			vq->split.vring.avail->flags =
837e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
838e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
839f277ec42SVenkatesh Srinivas 	}
840e593bf97STiwei Bie 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
841e593bf97STiwei Bie 			last_used_idx = vq->last_used_idx);
842cc229884SMichael S. Tsirkin 	END_USE(vq);
843cc229884SMichael S. Tsirkin 	return last_used_idx;
844cc229884SMichael S. Tsirkin }
845138fd251STiwei Bie 
846138fd251STiwei Bie static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
847138fd251STiwei Bie {
848138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
849138fd251STiwei Bie 
850138fd251STiwei Bie 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
851e593bf97STiwei Bie 			vq->split.vring.used->idx);
852138fd251STiwei Bie }
853138fd251STiwei Bie 
854138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
8557ab358c2SMichael S. Tsirkin {
8567ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
8577ab358c2SMichael S. Tsirkin 	u16 bufs;
8587ab358c2SMichael S. Tsirkin 
8597ab358c2SMichael S. Tsirkin 	START_USE(vq);
8607ab358c2SMichael S. Tsirkin 
8617ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
8627ab358c2SMichael S. Tsirkin 	 * more to do. */
8637ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
8647ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
8650ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
866e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
867e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8680ea1e4a6SLadi Prosek 		if (!vq->event)
869e593bf97STiwei Bie 			vq->split.vring.avail->flags =
870e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
871e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
872f277ec42SVenkatesh Srinivas 	}
8737ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
874e593bf97STiwei Bie 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
875788e5b3aSMichael S. Tsirkin 
876788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
877e593bf97STiwei Bie 			&vring_used_event(&vq->split.vring),
878788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
879788e5b3aSMichael S. Tsirkin 
880e593bf97STiwei Bie 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
881e593bf97STiwei Bie 					- vq->last_used_idx) > bufs)) {
8827ab358c2SMichael S. Tsirkin 		END_USE(vq);
8837ab358c2SMichael S. Tsirkin 		return false;
8847ab358c2SMichael S. Tsirkin 	}
8857ab358c2SMichael S. Tsirkin 
8867ab358c2SMichael S. Tsirkin 	END_USE(vq);
8877ab358c2SMichael S. Tsirkin 	return true;
8887ab358c2SMichael S. Tsirkin }
8897ab358c2SMichael S. Tsirkin 
890138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
891c021eac4SShirley Ma {
892c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
893c021eac4SShirley Ma 	unsigned int i;
894c021eac4SShirley Ma 	void *buf;
895c021eac4SShirley Ma 
896c021eac4SShirley Ma 	START_USE(vq);
897c021eac4SShirley Ma 
898e593bf97STiwei Bie 	for (i = 0; i < vq->split.vring.num; i++) {
899cbeedb72STiwei Bie 		if (!vq->split.desc_state[i].data)
900c021eac4SShirley Ma 			continue;
901138fd251STiwei Bie 		/* detach_buf_split clears data, so grab it now. */
902cbeedb72STiwei Bie 		buf = vq->split.desc_state[i].data;
903138fd251STiwei Bie 		detach_buf_split(vq, i, NULL);
904e593bf97STiwei Bie 		vq->split.avail_idx_shadow--;
905e593bf97STiwei Bie 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
906e593bf97STiwei Bie 				vq->split.avail_idx_shadow);
907c021eac4SShirley Ma 		END_USE(vq);
908c021eac4SShirley Ma 		return buf;
909c021eac4SShirley Ma 	}
910c021eac4SShirley Ma 	/* That should have freed everything. */
911e593bf97STiwei Bie 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
912c021eac4SShirley Ma 
913c021eac4SShirley Ma 	END_USE(vq);
914c021eac4SShirley Ma 	return NULL;
915c021eac4SShirley Ma }
916138fd251STiwei Bie 
917d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split(
918d79dca75STiwei Bie 	unsigned int index,
919d79dca75STiwei Bie 	unsigned int num,
920d79dca75STiwei Bie 	unsigned int vring_align,
921d79dca75STiwei Bie 	struct virtio_device *vdev,
922d79dca75STiwei Bie 	bool weak_barriers,
923d79dca75STiwei Bie 	bool may_reduce_num,
924d79dca75STiwei Bie 	bool context,
925d79dca75STiwei Bie 	bool (*notify)(struct virtqueue *),
926d79dca75STiwei Bie 	void (*callback)(struct virtqueue *),
927d79dca75STiwei Bie 	const char *name)
928d79dca75STiwei Bie {
929d79dca75STiwei Bie 	struct virtqueue *vq;
930d79dca75STiwei Bie 	void *queue = NULL;
931d79dca75STiwei Bie 	dma_addr_t dma_addr;
932d79dca75STiwei Bie 	size_t queue_size_in_bytes;
933d79dca75STiwei Bie 	struct vring vring;
934d79dca75STiwei Bie 
935d79dca75STiwei Bie 	/* We assume num is a power of 2. */
936d79dca75STiwei Bie 	if (num & (num - 1)) {
937d79dca75STiwei Bie 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
938d79dca75STiwei Bie 		return NULL;
939d79dca75STiwei Bie 	}
940d79dca75STiwei Bie 
941d79dca75STiwei Bie 	/* TODO: allocate each queue chunk individually */
942d79dca75STiwei Bie 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
943d79dca75STiwei Bie 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
944d79dca75STiwei Bie 					  &dma_addr,
945d79dca75STiwei Bie 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
946d79dca75STiwei Bie 		if (queue)
947d79dca75STiwei Bie 			break;
948cf94db21SCornelia Huck 		if (!may_reduce_num)
949cf94db21SCornelia Huck 			return NULL;
950d79dca75STiwei Bie 	}
951d79dca75STiwei Bie 
952d79dca75STiwei Bie 	if (!num)
953d79dca75STiwei Bie 		return NULL;
954d79dca75STiwei Bie 
955d79dca75STiwei Bie 	if (!queue) {
956d79dca75STiwei Bie 		/* Try to get a single page. You are my only hope! */
957d79dca75STiwei Bie 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
958d79dca75STiwei Bie 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
959d79dca75STiwei Bie 	}
960d79dca75STiwei Bie 	if (!queue)
961d79dca75STiwei Bie 		return NULL;
962d79dca75STiwei Bie 
963d79dca75STiwei Bie 	queue_size_in_bytes = vring_size(num, vring_align);
964d79dca75STiwei Bie 	vring_init(&vring, num, queue, vring_align);
965d79dca75STiwei Bie 
966d79dca75STiwei Bie 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
967d79dca75STiwei Bie 				   notify, callback, name);
968d79dca75STiwei Bie 	if (!vq) {
969d79dca75STiwei Bie 		vring_free_queue(vdev, queue_size_in_bytes, queue,
970d79dca75STiwei Bie 				 dma_addr);
971d79dca75STiwei Bie 		return NULL;
972d79dca75STiwei Bie 	}
973d79dca75STiwei Bie 
974d79dca75STiwei Bie 	to_vvq(vq)->split.queue_dma_addr = dma_addr;
975d79dca75STiwei Bie 	to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
976d79dca75STiwei Bie 	to_vvq(vq)->we_own_ring = true;
977d79dca75STiwei Bie 
978d79dca75STiwei Bie 	return vq;
979d79dca75STiwei Bie }
980d79dca75STiwei Bie 
981e6f633e5STiwei Bie 
982e6f633e5STiwei Bie /*
9831ce9e605STiwei Bie  * Packed ring specific functions - *_packed().
9841ce9e605STiwei Bie  */
9851ce9e605STiwei Bie 
9861ce9e605STiwei Bie static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
9871f28750fSJason Wang 				     struct vring_desc_extra *state)
9881ce9e605STiwei Bie {
9891ce9e605STiwei Bie 	u16 flags;
9901ce9e605STiwei Bie 
9911ce9e605STiwei Bie 	if (!vq->use_dma_api)
9921ce9e605STiwei Bie 		return;
9931ce9e605STiwei Bie 
9941ce9e605STiwei Bie 	flags = state->flags;
9951ce9e605STiwei Bie 
9961ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
9971ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
9981ce9e605STiwei Bie 				 state->addr, state->len,
9991ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
10001ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
10011ce9e605STiwei Bie 	} else {
10021ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
10031ce9e605STiwei Bie 			       state->addr, state->len,
10041ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
10051ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
10061ce9e605STiwei Bie 	}
10071ce9e605STiwei Bie }
10081ce9e605STiwei Bie 
10091ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
10101ce9e605STiwei Bie 				   struct vring_packed_desc *desc)
10111ce9e605STiwei Bie {
10121ce9e605STiwei Bie 	u16 flags;
10131ce9e605STiwei Bie 
10141ce9e605STiwei Bie 	if (!vq->use_dma_api)
10151ce9e605STiwei Bie 		return;
10161ce9e605STiwei Bie 
10171ce9e605STiwei Bie 	flags = le16_to_cpu(desc->flags);
10181ce9e605STiwei Bie 
10191ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
10201ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
10211ce9e605STiwei Bie 				 le64_to_cpu(desc->addr),
10221ce9e605STiwei Bie 				 le32_to_cpu(desc->len),
10231ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
10241ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
10251ce9e605STiwei Bie 	} else {
10261ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
10271ce9e605STiwei Bie 			       le64_to_cpu(desc->addr),
10281ce9e605STiwei Bie 			       le32_to_cpu(desc->len),
10291ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
10301ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
10311ce9e605STiwei Bie 	}
10321ce9e605STiwei Bie }
10331ce9e605STiwei Bie 
10341ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
10351ce9e605STiwei Bie 						       gfp_t gfp)
10361ce9e605STiwei Bie {
10371ce9e605STiwei Bie 	struct vring_packed_desc *desc;
10381ce9e605STiwei Bie 
10391ce9e605STiwei Bie 	/*
10401ce9e605STiwei Bie 	 * We require lowmem mappings for the descriptors because
10411ce9e605STiwei Bie 	 * otherwise virt_to_phys will give us bogus addresses in the
10421ce9e605STiwei Bie 	 * virtqueue.
10431ce9e605STiwei Bie 	 */
10441ce9e605STiwei Bie 	gfp &= ~__GFP_HIGHMEM;
10451ce9e605STiwei Bie 
10461ce9e605STiwei Bie 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
10471ce9e605STiwei Bie 
10481ce9e605STiwei Bie 	return desc;
10491ce9e605STiwei Bie }
10501ce9e605STiwei Bie 
10511ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
10521ce9e605STiwei Bie 				       struct scatterlist *sgs[],
10531ce9e605STiwei Bie 				       unsigned int total_sg,
10541ce9e605STiwei Bie 				       unsigned int out_sgs,
10551ce9e605STiwei Bie 				       unsigned int in_sgs,
10561ce9e605STiwei Bie 				       void *data,
10571ce9e605STiwei Bie 				       gfp_t gfp)
10581ce9e605STiwei Bie {
10591ce9e605STiwei Bie 	struct vring_packed_desc *desc;
10601ce9e605STiwei Bie 	struct scatterlist *sg;
10611ce9e605STiwei Bie 	unsigned int i, n, err_idx;
10621ce9e605STiwei Bie 	u16 head, id;
10631ce9e605STiwei Bie 	dma_addr_t addr;
10641ce9e605STiwei Bie 
10651ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
10661ce9e605STiwei Bie 	desc = alloc_indirect_packed(total_sg, gfp);
10671ce9e605STiwei Bie 
10681ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < 1)) {
10691ce9e605STiwei Bie 		pr_debug("Can't add buf len 1 - avail = 0\n");
1070df0bfe75SYueHaibing 		kfree(desc);
10711ce9e605STiwei Bie 		END_USE(vq);
10721ce9e605STiwei Bie 		return -ENOSPC;
10731ce9e605STiwei Bie 	}
10741ce9e605STiwei Bie 
10751ce9e605STiwei Bie 	i = 0;
10761ce9e605STiwei Bie 	id = vq->free_head;
10771ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
10781ce9e605STiwei Bie 
10791ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
10801ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
10811ce9e605STiwei Bie 			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
10821ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
10831ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
10841ce9e605STiwei Bie 				goto unmap_release;
10851ce9e605STiwei Bie 
10861ce9e605STiwei Bie 			desc[i].flags = cpu_to_le16(n < out_sgs ?
10871ce9e605STiwei Bie 						0 : VRING_DESC_F_WRITE);
10881ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
10891ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
10901ce9e605STiwei Bie 			i++;
10911ce9e605STiwei Bie 		}
10921ce9e605STiwei Bie 	}
10931ce9e605STiwei Bie 
10941ce9e605STiwei Bie 	/* Now that the indirect table is filled in, map it. */
10951ce9e605STiwei Bie 	addr = vring_map_single(vq, desc,
10961ce9e605STiwei Bie 			total_sg * sizeof(struct vring_packed_desc),
10971ce9e605STiwei Bie 			DMA_TO_DEVICE);
10981ce9e605STiwei Bie 	if (vring_mapping_error(vq, addr))
10991ce9e605STiwei Bie 		goto unmap_release;
11001ce9e605STiwei Bie 
11011ce9e605STiwei Bie 	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
11021ce9e605STiwei Bie 	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
11031ce9e605STiwei Bie 				sizeof(struct vring_packed_desc));
11041ce9e605STiwei Bie 	vq->packed.vring.desc[head].id = cpu_to_le16(id);
11051ce9e605STiwei Bie 
11061ce9e605STiwei Bie 	if (vq->use_dma_api) {
11071ce9e605STiwei Bie 		vq->packed.desc_extra[id].addr = addr;
11081ce9e605STiwei Bie 		vq->packed.desc_extra[id].len = total_sg *
11091ce9e605STiwei Bie 				sizeof(struct vring_packed_desc);
11101ce9e605STiwei Bie 		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
11111ce9e605STiwei Bie 						  vq->packed.avail_used_flags;
11121ce9e605STiwei Bie 	}
11131ce9e605STiwei Bie 
11141ce9e605STiwei Bie 	/*
11151ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
11161ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
11171ce9e605STiwei Bie 	 * the list are made available.
11181ce9e605STiwei Bie 	 */
11191ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
11201ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
11211ce9e605STiwei Bie 						vq->packed.avail_used_flags);
11221ce9e605STiwei Bie 
11231ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
11241ce9e605STiwei Bie 	vq->vq.num_free -= 1;
11251ce9e605STiwei Bie 
11261ce9e605STiwei Bie 	/* Update free pointer */
11271ce9e605STiwei Bie 	n = head + 1;
11281ce9e605STiwei Bie 	if (n >= vq->packed.vring.num) {
11291ce9e605STiwei Bie 		n = 0;
11301ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
11311ce9e605STiwei Bie 		vq->packed.avail_used_flags ^=
11321ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_AVAIL |
11331ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_USED;
11341ce9e605STiwei Bie 	}
11351ce9e605STiwei Bie 	vq->packed.next_avail_idx = n;
1136aeef9b47SJason Wang 	vq->free_head = vq->packed.desc_extra[id].next;
11371ce9e605STiwei Bie 
11381ce9e605STiwei Bie 	/* Store token and indirect buffer state. */
11391ce9e605STiwei Bie 	vq->packed.desc_state[id].num = 1;
11401ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
11411ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = desc;
11421ce9e605STiwei Bie 	vq->packed.desc_state[id].last = id;
11431ce9e605STiwei Bie 
11441ce9e605STiwei Bie 	vq->num_added += 1;
11451ce9e605STiwei Bie 
11461ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
11471ce9e605STiwei Bie 	END_USE(vq);
11481ce9e605STiwei Bie 
11491ce9e605STiwei Bie 	return 0;
11501ce9e605STiwei Bie 
11511ce9e605STiwei Bie unmap_release:
11521ce9e605STiwei Bie 	err_idx = i;
11531ce9e605STiwei Bie 
11541ce9e605STiwei Bie 	for (i = 0; i < err_idx; i++)
11551ce9e605STiwei Bie 		vring_unmap_desc_packed(vq, &desc[i]);
11561ce9e605STiwei Bie 
11571ce9e605STiwei Bie 	kfree(desc);
11581ce9e605STiwei Bie 
11591ce9e605STiwei Bie 	END_USE(vq);
1160f7728002SHalil Pasic 	return -ENOMEM;
11611ce9e605STiwei Bie }
11621ce9e605STiwei Bie 
11631ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq,
11641ce9e605STiwei Bie 				       struct scatterlist *sgs[],
11651ce9e605STiwei Bie 				       unsigned int total_sg,
11661ce9e605STiwei Bie 				       unsigned int out_sgs,
11671ce9e605STiwei Bie 				       unsigned int in_sgs,
11681ce9e605STiwei Bie 				       void *data,
11691ce9e605STiwei Bie 				       void *ctx,
11701ce9e605STiwei Bie 				       gfp_t gfp)
11711ce9e605STiwei Bie {
11721ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
11731ce9e605STiwei Bie 	struct vring_packed_desc *desc;
11741ce9e605STiwei Bie 	struct scatterlist *sg;
11751ce9e605STiwei Bie 	unsigned int i, n, c, descs_used, err_idx;
11763f649ab7SKees Cook 	__le16 head_flags, flags;
11773f649ab7SKees Cook 	u16 head, id, prev, curr, avail_used_flags;
11781ce9e605STiwei Bie 
11791ce9e605STiwei Bie 	START_USE(vq);
11801ce9e605STiwei Bie 
11811ce9e605STiwei Bie 	BUG_ON(data == NULL);
11821ce9e605STiwei Bie 	BUG_ON(ctx && vq->indirect);
11831ce9e605STiwei Bie 
11841ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
11851ce9e605STiwei Bie 		END_USE(vq);
11861ce9e605STiwei Bie 		return -EIO;
11871ce9e605STiwei Bie 	}
11881ce9e605STiwei Bie 
11891ce9e605STiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
11901ce9e605STiwei Bie 
11911ce9e605STiwei Bie 	BUG_ON(total_sg == 0);
11921ce9e605STiwei Bie 
11931ce9e605STiwei Bie 	if (virtqueue_use_indirect(_vq, total_sg))
11941ce9e605STiwei Bie 		return virtqueue_add_indirect_packed(vq, sgs, total_sg,
11951ce9e605STiwei Bie 				out_sgs, in_sgs, data, gfp);
11961ce9e605STiwei Bie 
11971ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
11981ce9e605STiwei Bie 	avail_used_flags = vq->packed.avail_used_flags;
11991ce9e605STiwei Bie 
12001ce9e605STiwei Bie 	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
12011ce9e605STiwei Bie 
12021ce9e605STiwei Bie 	desc = vq->packed.vring.desc;
12031ce9e605STiwei Bie 	i = head;
12041ce9e605STiwei Bie 	descs_used = total_sg;
12051ce9e605STiwei Bie 
12061ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < descs_used)) {
12071ce9e605STiwei Bie 		pr_debug("Can't add buf len %i - avail = %i\n",
12081ce9e605STiwei Bie 			 descs_used, vq->vq.num_free);
12091ce9e605STiwei Bie 		END_USE(vq);
12101ce9e605STiwei Bie 		return -ENOSPC;
12111ce9e605STiwei Bie 	}
12121ce9e605STiwei Bie 
12131ce9e605STiwei Bie 	id = vq->free_head;
12141ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
12151ce9e605STiwei Bie 
12161ce9e605STiwei Bie 	curr = id;
12171ce9e605STiwei Bie 	c = 0;
12181ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
12191ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
12201ce9e605STiwei Bie 			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
12211ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
12221ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
12231ce9e605STiwei Bie 				goto unmap_release;
12241ce9e605STiwei Bie 
12251ce9e605STiwei Bie 			flags = cpu_to_le16(vq->packed.avail_used_flags |
12261ce9e605STiwei Bie 				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
12271ce9e605STiwei Bie 				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
12281ce9e605STiwei Bie 			if (i == head)
12291ce9e605STiwei Bie 				head_flags = flags;
12301ce9e605STiwei Bie 			else
12311ce9e605STiwei Bie 				desc[i].flags = flags;
12321ce9e605STiwei Bie 
12331ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
12341ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
12351ce9e605STiwei Bie 			desc[i].id = cpu_to_le16(id);
12361ce9e605STiwei Bie 
12371ce9e605STiwei Bie 			if (unlikely(vq->use_dma_api)) {
12381ce9e605STiwei Bie 				vq->packed.desc_extra[curr].addr = addr;
12391ce9e605STiwei Bie 				vq->packed.desc_extra[curr].len = sg->length;
12401ce9e605STiwei Bie 				vq->packed.desc_extra[curr].flags =
12411ce9e605STiwei Bie 					le16_to_cpu(flags);
12421ce9e605STiwei Bie 			}
12431ce9e605STiwei Bie 			prev = curr;
1244aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
12451ce9e605STiwei Bie 
12461ce9e605STiwei Bie 			if ((unlikely(++i >= vq->packed.vring.num))) {
12471ce9e605STiwei Bie 				i = 0;
12481ce9e605STiwei Bie 				vq->packed.avail_used_flags ^=
12491ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_AVAIL |
12501ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_USED;
12511ce9e605STiwei Bie 			}
12521ce9e605STiwei Bie 		}
12531ce9e605STiwei Bie 	}
12541ce9e605STiwei Bie 
12551ce9e605STiwei Bie 	if (i < head)
12561ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
12571ce9e605STiwei Bie 
12581ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
12591ce9e605STiwei Bie 	vq->vq.num_free -= descs_used;
12601ce9e605STiwei Bie 
12611ce9e605STiwei Bie 	/* Update free pointer */
12621ce9e605STiwei Bie 	vq->packed.next_avail_idx = i;
12631ce9e605STiwei Bie 	vq->free_head = curr;
12641ce9e605STiwei Bie 
12651ce9e605STiwei Bie 	/* Store token. */
12661ce9e605STiwei Bie 	vq->packed.desc_state[id].num = descs_used;
12671ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
12681ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = ctx;
12691ce9e605STiwei Bie 	vq->packed.desc_state[id].last = prev;
12701ce9e605STiwei Bie 
12711ce9e605STiwei Bie 	/*
12721ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
12731ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
12741ce9e605STiwei Bie 	 * the list are made available.
12751ce9e605STiwei Bie 	 */
12761ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
12771ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = head_flags;
12781ce9e605STiwei Bie 	vq->num_added += descs_used;
12791ce9e605STiwei Bie 
12801ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
12811ce9e605STiwei Bie 	END_USE(vq);
12821ce9e605STiwei Bie 
12831ce9e605STiwei Bie 	return 0;
12841ce9e605STiwei Bie 
12851ce9e605STiwei Bie unmap_release:
12861ce9e605STiwei Bie 	err_idx = i;
12871ce9e605STiwei Bie 	i = head;
128844593865SJason Wang 	curr = vq->free_head;
12891ce9e605STiwei Bie 
12901ce9e605STiwei Bie 	vq->packed.avail_used_flags = avail_used_flags;
12911ce9e605STiwei Bie 
12921ce9e605STiwei Bie 	for (n = 0; n < total_sg; n++) {
12931ce9e605STiwei Bie 		if (i == err_idx)
12941ce9e605STiwei Bie 			break;
129544593865SJason Wang 		vring_unmap_state_packed(vq,
129644593865SJason Wang 					 &vq->packed.desc_extra[curr]);
129744593865SJason Wang 		curr = vq->packed.desc_extra[curr].next;
12981ce9e605STiwei Bie 		i++;
12991ce9e605STiwei Bie 		if (i >= vq->packed.vring.num)
13001ce9e605STiwei Bie 			i = 0;
13011ce9e605STiwei Bie 	}
13021ce9e605STiwei Bie 
13031ce9e605STiwei Bie 	END_USE(vq);
13041ce9e605STiwei Bie 	return -EIO;
13051ce9e605STiwei Bie }
13061ce9e605STiwei Bie 
13071ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
13081ce9e605STiwei Bie {
13091ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1310f51f9826STiwei Bie 	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
13111ce9e605STiwei Bie 	bool needs_kick;
13121ce9e605STiwei Bie 	union {
13131ce9e605STiwei Bie 		struct {
13141ce9e605STiwei Bie 			__le16 off_wrap;
13151ce9e605STiwei Bie 			__le16 flags;
13161ce9e605STiwei Bie 		};
13171ce9e605STiwei Bie 		u32 u32;
13181ce9e605STiwei Bie 	} snapshot;
13191ce9e605STiwei Bie 
13201ce9e605STiwei Bie 	START_USE(vq);
13211ce9e605STiwei Bie 
13221ce9e605STiwei Bie 	/*
13231ce9e605STiwei Bie 	 * We need to expose the new flags value before checking notification
13241ce9e605STiwei Bie 	 * suppressions.
13251ce9e605STiwei Bie 	 */
13261ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
13271ce9e605STiwei Bie 
1328f51f9826STiwei Bie 	old = vq->packed.next_avail_idx - vq->num_added;
1329f51f9826STiwei Bie 	new = vq->packed.next_avail_idx;
13301ce9e605STiwei Bie 	vq->num_added = 0;
13311ce9e605STiwei Bie 
13321ce9e605STiwei Bie 	snapshot.u32 = *(u32 *)vq->packed.vring.device;
13331ce9e605STiwei Bie 	flags = le16_to_cpu(snapshot.flags);
13341ce9e605STiwei Bie 
13351ce9e605STiwei Bie 	LAST_ADD_TIME_CHECK(vq);
13361ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
13371ce9e605STiwei Bie 
1338f51f9826STiwei Bie 	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
13391ce9e605STiwei Bie 		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1340f51f9826STiwei Bie 		goto out;
1341f51f9826STiwei Bie 	}
1342f51f9826STiwei Bie 
1343f51f9826STiwei Bie 	off_wrap = le16_to_cpu(snapshot.off_wrap);
1344f51f9826STiwei Bie 
1345f51f9826STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1346f51f9826STiwei Bie 	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1347f51f9826STiwei Bie 	if (wrap_counter != vq->packed.avail_wrap_counter)
1348f51f9826STiwei Bie 		event_idx -= vq->packed.vring.num;
1349f51f9826STiwei Bie 
1350f51f9826STiwei Bie 	needs_kick = vring_need_event(event_idx, new, old);
1351f51f9826STiwei Bie out:
13521ce9e605STiwei Bie 	END_USE(vq);
13531ce9e605STiwei Bie 	return needs_kick;
13541ce9e605STiwei Bie }
13551ce9e605STiwei Bie 
13561ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq,
13571ce9e605STiwei Bie 			      unsigned int id, void **ctx)
13581ce9e605STiwei Bie {
13591ce9e605STiwei Bie 	struct vring_desc_state_packed *state = NULL;
13601ce9e605STiwei Bie 	struct vring_packed_desc *desc;
13611ce9e605STiwei Bie 	unsigned int i, curr;
13621ce9e605STiwei Bie 
13631ce9e605STiwei Bie 	state = &vq->packed.desc_state[id];
13641ce9e605STiwei Bie 
13651ce9e605STiwei Bie 	/* Clear data ptr. */
13661ce9e605STiwei Bie 	state->data = NULL;
13671ce9e605STiwei Bie 
1368aeef9b47SJason Wang 	vq->packed.desc_extra[state->last].next = vq->free_head;
13691ce9e605STiwei Bie 	vq->free_head = id;
13701ce9e605STiwei Bie 	vq->vq.num_free += state->num;
13711ce9e605STiwei Bie 
13721ce9e605STiwei Bie 	if (unlikely(vq->use_dma_api)) {
13731ce9e605STiwei Bie 		curr = id;
13741ce9e605STiwei Bie 		for (i = 0; i < state->num; i++) {
13751ce9e605STiwei Bie 			vring_unmap_state_packed(vq,
13761ce9e605STiwei Bie 				&vq->packed.desc_extra[curr]);
1377aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
13781ce9e605STiwei Bie 		}
13791ce9e605STiwei Bie 	}
13801ce9e605STiwei Bie 
13811ce9e605STiwei Bie 	if (vq->indirect) {
13821ce9e605STiwei Bie 		u32 len;
13831ce9e605STiwei Bie 
13841ce9e605STiwei Bie 		/* Free the indirect table, if any, now that it's unmapped. */
13851ce9e605STiwei Bie 		desc = state->indir_desc;
13861ce9e605STiwei Bie 		if (!desc)
13871ce9e605STiwei Bie 			return;
13881ce9e605STiwei Bie 
13891ce9e605STiwei Bie 		if (vq->use_dma_api) {
13901ce9e605STiwei Bie 			len = vq->packed.desc_extra[id].len;
13911ce9e605STiwei Bie 			for (i = 0; i < len / sizeof(struct vring_packed_desc);
13921ce9e605STiwei Bie 					i++)
13931ce9e605STiwei Bie 				vring_unmap_desc_packed(vq, &desc[i]);
13941ce9e605STiwei Bie 		}
13951ce9e605STiwei Bie 		kfree(desc);
13961ce9e605STiwei Bie 		state->indir_desc = NULL;
13971ce9e605STiwei Bie 	} else if (ctx) {
13981ce9e605STiwei Bie 		*ctx = state->indir_desc;
13991ce9e605STiwei Bie 	}
14001ce9e605STiwei Bie }
14011ce9e605STiwei Bie 
14021ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
14031ce9e605STiwei Bie 				       u16 idx, bool used_wrap_counter)
14041ce9e605STiwei Bie {
14051ce9e605STiwei Bie 	bool avail, used;
14061ce9e605STiwei Bie 	u16 flags;
14071ce9e605STiwei Bie 
14081ce9e605STiwei Bie 	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
14091ce9e605STiwei Bie 	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
14101ce9e605STiwei Bie 	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
14111ce9e605STiwei Bie 
14121ce9e605STiwei Bie 	return avail == used && used == used_wrap_counter;
14131ce9e605STiwei Bie }
14141ce9e605STiwei Bie 
14151ce9e605STiwei Bie static inline bool more_used_packed(const struct vring_virtqueue *vq)
14161ce9e605STiwei Bie {
14171ce9e605STiwei Bie 	return is_used_desc_packed(vq, vq->last_used_idx,
14181ce9e605STiwei Bie 			vq->packed.used_wrap_counter);
14191ce9e605STiwei Bie }
14201ce9e605STiwei Bie 
14211ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
14221ce9e605STiwei Bie 					  unsigned int *len,
14231ce9e605STiwei Bie 					  void **ctx)
14241ce9e605STiwei Bie {
14251ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
14261ce9e605STiwei Bie 	u16 last_used, id;
14271ce9e605STiwei Bie 	void *ret;
14281ce9e605STiwei Bie 
14291ce9e605STiwei Bie 	START_USE(vq);
14301ce9e605STiwei Bie 
14311ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
14321ce9e605STiwei Bie 		END_USE(vq);
14331ce9e605STiwei Bie 		return NULL;
14341ce9e605STiwei Bie 	}
14351ce9e605STiwei Bie 
14361ce9e605STiwei Bie 	if (!more_used_packed(vq)) {
14371ce9e605STiwei Bie 		pr_debug("No more buffers in queue\n");
14381ce9e605STiwei Bie 		END_USE(vq);
14391ce9e605STiwei Bie 		return NULL;
14401ce9e605STiwei Bie 	}
14411ce9e605STiwei Bie 
14421ce9e605STiwei Bie 	/* Only get used elements after they have been exposed by host. */
14431ce9e605STiwei Bie 	virtio_rmb(vq->weak_barriers);
14441ce9e605STiwei Bie 
14451ce9e605STiwei Bie 	last_used = vq->last_used_idx;
14461ce9e605STiwei Bie 	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
14471ce9e605STiwei Bie 	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
14481ce9e605STiwei Bie 
14491ce9e605STiwei Bie 	if (unlikely(id >= vq->packed.vring.num)) {
14501ce9e605STiwei Bie 		BAD_RING(vq, "id %u out of range\n", id);
14511ce9e605STiwei Bie 		return NULL;
14521ce9e605STiwei Bie 	}
14531ce9e605STiwei Bie 	if (unlikely(!vq->packed.desc_state[id].data)) {
14541ce9e605STiwei Bie 		BAD_RING(vq, "id %u is not a head!\n", id);
14551ce9e605STiwei Bie 		return NULL;
14561ce9e605STiwei Bie 	}
14571ce9e605STiwei Bie 
14581ce9e605STiwei Bie 	/* detach_buf_packed clears data, so grab it now. */
14591ce9e605STiwei Bie 	ret = vq->packed.desc_state[id].data;
14601ce9e605STiwei Bie 	detach_buf_packed(vq, id, ctx);
14611ce9e605STiwei Bie 
14621ce9e605STiwei Bie 	vq->last_used_idx += vq->packed.desc_state[id].num;
14631ce9e605STiwei Bie 	if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
14641ce9e605STiwei Bie 		vq->last_used_idx -= vq->packed.vring.num;
14651ce9e605STiwei Bie 		vq->packed.used_wrap_counter ^= 1;
14661ce9e605STiwei Bie 	}
14671ce9e605STiwei Bie 
1468f51f9826STiwei Bie 	/*
1469f51f9826STiwei Bie 	 * If we expect an interrupt for the next entry, tell host
1470f51f9826STiwei Bie 	 * by writing event index and flush out the write before
1471f51f9826STiwei Bie 	 * the read in the next get_buf call.
1472f51f9826STiwei Bie 	 */
1473f51f9826STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1474f51f9826STiwei Bie 		virtio_store_mb(vq->weak_barriers,
1475f51f9826STiwei Bie 				&vq->packed.vring.driver->off_wrap,
1476f51f9826STiwei Bie 				cpu_to_le16(vq->last_used_idx |
1477f51f9826STiwei Bie 					(vq->packed.used_wrap_counter <<
1478f51f9826STiwei Bie 					 VRING_PACKED_EVENT_F_WRAP_CTR)));
1479f51f9826STiwei Bie 
14801ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
14811ce9e605STiwei Bie 
14821ce9e605STiwei Bie 	END_USE(vq);
14831ce9e605STiwei Bie 	return ret;
14841ce9e605STiwei Bie }
14851ce9e605STiwei Bie 
14861ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
14871ce9e605STiwei Bie {
14881ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
14891ce9e605STiwei Bie 
14901ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
14911ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
14921ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
14931ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
14941ce9e605STiwei Bie 	}
14951ce9e605STiwei Bie }
14961ce9e605STiwei Bie 
14971ce9e605STiwei Bie static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
14981ce9e605STiwei Bie {
14991ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
15001ce9e605STiwei Bie 
15011ce9e605STiwei Bie 	START_USE(vq);
15021ce9e605STiwei Bie 
15031ce9e605STiwei Bie 	/*
15041ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
15051ce9e605STiwei Bie 	 * more to do.
15061ce9e605STiwei Bie 	 */
15071ce9e605STiwei Bie 
1508f51f9826STiwei Bie 	if (vq->event) {
1509f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap =
1510f51f9826STiwei Bie 			cpu_to_le16(vq->last_used_idx |
1511f51f9826STiwei Bie 				(vq->packed.used_wrap_counter <<
1512f51f9826STiwei Bie 				 VRING_PACKED_EVENT_F_WRAP_CTR));
1513f51f9826STiwei Bie 		/*
1514f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1515f51f9826STiwei Bie 		 * counter first before updating event flags.
1516f51f9826STiwei Bie 		 */
1517f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1518f51f9826STiwei Bie 	}
1519f51f9826STiwei Bie 
15201ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1521f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1522f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1523f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
15241ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
15251ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
15261ce9e605STiwei Bie 	}
15271ce9e605STiwei Bie 
15281ce9e605STiwei Bie 	END_USE(vq);
15291ce9e605STiwei Bie 	return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
15301ce9e605STiwei Bie 			VRING_PACKED_EVENT_F_WRAP_CTR);
15311ce9e605STiwei Bie }
15321ce9e605STiwei Bie 
15331ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
15341ce9e605STiwei Bie {
15351ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
15361ce9e605STiwei Bie 	bool wrap_counter;
15371ce9e605STiwei Bie 	u16 used_idx;
15381ce9e605STiwei Bie 
15391ce9e605STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
15401ce9e605STiwei Bie 	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
15411ce9e605STiwei Bie 
15421ce9e605STiwei Bie 	return is_used_desc_packed(vq, used_idx, wrap_counter);
15431ce9e605STiwei Bie }
15441ce9e605STiwei Bie 
15451ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
15461ce9e605STiwei Bie {
15471ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
15481ce9e605STiwei Bie 	u16 used_idx, wrap_counter;
1549f51f9826STiwei Bie 	u16 bufs;
15501ce9e605STiwei Bie 
15511ce9e605STiwei Bie 	START_USE(vq);
15521ce9e605STiwei Bie 
15531ce9e605STiwei Bie 	/*
15541ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
15551ce9e605STiwei Bie 	 * more to do.
15561ce9e605STiwei Bie 	 */
15571ce9e605STiwei Bie 
1558f51f9826STiwei Bie 	if (vq->event) {
1559f51f9826STiwei Bie 		/* TODO: tune this threshold */
1560f51f9826STiwei Bie 		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
15611ce9e605STiwei Bie 		wrap_counter = vq->packed.used_wrap_counter;
15621ce9e605STiwei Bie 
1563f51f9826STiwei Bie 		used_idx = vq->last_used_idx + bufs;
1564f51f9826STiwei Bie 		if (used_idx >= vq->packed.vring.num) {
1565f51f9826STiwei Bie 			used_idx -= vq->packed.vring.num;
1566f51f9826STiwei Bie 			wrap_counter ^= 1;
1567f51f9826STiwei Bie 		}
1568f51f9826STiwei Bie 
1569f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1570f51f9826STiwei Bie 			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1571f51f9826STiwei Bie 
1572f51f9826STiwei Bie 		/*
1573f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1574f51f9826STiwei Bie 		 * counter first before updating event flags.
1575f51f9826STiwei Bie 		 */
1576f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1577f51f9826STiwei Bie 	}
1578f51f9826STiwei Bie 
15791ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1580f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1581f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1582f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
15831ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
15841ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
15851ce9e605STiwei Bie 	}
15861ce9e605STiwei Bie 
15871ce9e605STiwei Bie 	/*
15881ce9e605STiwei Bie 	 * We need to update event suppression structure first
15891ce9e605STiwei Bie 	 * before re-checking for more used buffers.
15901ce9e605STiwei Bie 	 */
15911ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
15921ce9e605STiwei Bie 
159340ce7919SMarvin Liu 	if (is_used_desc_packed(vq,
159440ce7919SMarvin Liu 				vq->last_used_idx,
159540ce7919SMarvin Liu 				vq->packed.used_wrap_counter)) {
15961ce9e605STiwei Bie 		END_USE(vq);
15971ce9e605STiwei Bie 		return false;
15981ce9e605STiwei Bie 	}
15991ce9e605STiwei Bie 
16001ce9e605STiwei Bie 	END_USE(vq);
16011ce9e605STiwei Bie 	return true;
16021ce9e605STiwei Bie }
16031ce9e605STiwei Bie 
16041ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
16051ce9e605STiwei Bie {
16061ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
16071ce9e605STiwei Bie 	unsigned int i;
16081ce9e605STiwei Bie 	void *buf;
16091ce9e605STiwei Bie 
16101ce9e605STiwei Bie 	START_USE(vq);
16111ce9e605STiwei Bie 
16121ce9e605STiwei Bie 	for (i = 0; i < vq->packed.vring.num; i++) {
16131ce9e605STiwei Bie 		if (!vq->packed.desc_state[i].data)
16141ce9e605STiwei Bie 			continue;
16151ce9e605STiwei Bie 		/* detach_buf clears data, so grab it now. */
16161ce9e605STiwei Bie 		buf = vq->packed.desc_state[i].data;
16171ce9e605STiwei Bie 		detach_buf_packed(vq, i, NULL);
16181ce9e605STiwei Bie 		END_USE(vq);
16191ce9e605STiwei Bie 		return buf;
16201ce9e605STiwei Bie 	}
16211ce9e605STiwei Bie 	/* That should have freed everything. */
16221ce9e605STiwei Bie 	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
16231ce9e605STiwei Bie 
16241ce9e605STiwei Bie 	END_USE(vq);
16251ce9e605STiwei Bie 	return NULL;
16261ce9e605STiwei Bie }
16271ce9e605STiwei Bie 
16285a222421SJason Wang static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
16295a222421SJason Wang 						       unsigned int num)
16305a222421SJason Wang {
16315a222421SJason Wang 	struct vring_desc_extra *desc_extra;
16325a222421SJason Wang 	unsigned int i;
16335a222421SJason Wang 
16345a222421SJason Wang 	desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
16355a222421SJason Wang 				   GFP_KERNEL);
16365a222421SJason Wang 	if (!desc_extra)
16375a222421SJason Wang 		return NULL;
16385a222421SJason Wang 
16395a222421SJason Wang 	memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
16405a222421SJason Wang 
16415a222421SJason Wang 	for (i = 0; i < num - 1; i++)
16425a222421SJason Wang 		desc_extra[i].next = i + 1;
16435a222421SJason Wang 
16445a222421SJason Wang 	return desc_extra;
16455a222421SJason Wang }
16465a222421SJason Wang 
16471ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed(
16481ce9e605STiwei Bie 	unsigned int index,
16491ce9e605STiwei Bie 	unsigned int num,
16501ce9e605STiwei Bie 	unsigned int vring_align,
16511ce9e605STiwei Bie 	struct virtio_device *vdev,
16521ce9e605STiwei Bie 	bool weak_barriers,
16531ce9e605STiwei Bie 	bool may_reduce_num,
16541ce9e605STiwei Bie 	bool context,
16551ce9e605STiwei Bie 	bool (*notify)(struct virtqueue *),
16561ce9e605STiwei Bie 	void (*callback)(struct virtqueue *),
16571ce9e605STiwei Bie 	const char *name)
16581ce9e605STiwei Bie {
16591ce9e605STiwei Bie 	struct vring_virtqueue *vq;
16601ce9e605STiwei Bie 	struct vring_packed_desc *ring;
16611ce9e605STiwei Bie 	struct vring_packed_desc_event *driver, *device;
16621ce9e605STiwei Bie 	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
16631ce9e605STiwei Bie 	size_t ring_size_in_bytes, event_size_in_bytes;
16641ce9e605STiwei Bie 
16651ce9e605STiwei Bie 	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
16661ce9e605STiwei Bie 
16671ce9e605STiwei Bie 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
16681ce9e605STiwei Bie 				 &ring_dma_addr,
16691ce9e605STiwei Bie 				 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
16701ce9e605STiwei Bie 	if (!ring)
16711ce9e605STiwei Bie 		goto err_ring;
16721ce9e605STiwei Bie 
16731ce9e605STiwei Bie 	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
16741ce9e605STiwei Bie 
16751ce9e605STiwei Bie 	driver = vring_alloc_queue(vdev, event_size_in_bytes,
16761ce9e605STiwei Bie 				   &driver_event_dma_addr,
16771ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
16781ce9e605STiwei Bie 	if (!driver)
16791ce9e605STiwei Bie 		goto err_driver;
16801ce9e605STiwei Bie 
16811ce9e605STiwei Bie 	device = vring_alloc_queue(vdev, event_size_in_bytes,
16821ce9e605STiwei Bie 				   &device_event_dma_addr,
16831ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
16841ce9e605STiwei Bie 	if (!device)
16851ce9e605STiwei Bie 		goto err_device;
16861ce9e605STiwei Bie 
16871ce9e605STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
16881ce9e605STiwei Bie 	if (!vq)
16891ce9e605STiwei Bie 		goto err_vq;
16901ce9e605STiwei Bie 
16911ce9e605STiwei Bie 	vq->vq.callback = callback;
16921ce9e605STiwei Bie 	vq->vq.vdev = vdev;
16931ce9e605STiwei Bie 	vq->vq.name = name;
16941ce9e605STiwei Bie 	vq->vq.num_free = num;
16951ce9e605STiwei Bie 	vq->vq.index = index;
16961ce9e605STiwei Bie 	vq->we_own_ring = true;
16971ce9e605STiwei Bie 	vq->notify = notify;
16981ce9e605STiwei Bie 	vq->weak_barriers = weak_barriers;
16991ce9e605STiwei Bie 	vq->broken = false;
17001ce9e605STiwei Bie 	vq->last_used_idx = 0;
17018d622d21SMichael S. Tsirkin 	vq->event_triggered = false;
17021ce9e605STiwei Bie 	vq->num_added = 0;
17031ce9e605STiwei Bie 	vq->packed_ring = true;
17041ce9e605STiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
17051ce9e605STiwei Bie #ifdef DEBUG
17061ce9e605STiwei Bie 	vq->in_use = false;
17071ce9e605STiwei Bie 	vq->last_add_time_valid = false;
17081ce9e605STiwei Bie #endif
17091ce9e605STiwei Bie 
17101ce9e605STiwei Bie 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
17111ce9e605STiwei Bie 		!context;
17121ce9e605STiwei Bie 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
17131ce9e605STiwei Bie 
171445383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
171545383fb0STiwei Bie 		vq->weak_barriers = false;
171645383fb0STiwei Bie 
17171ce9e605STiwei Bie 	vq->packed.ring_dma_addr = ring_dma_addr;
17181ce9e605STiwei Bie 	vq->packed.driver_event_dma_addr = driver_event_dma_addr;
17191ce9e605STiwei Bie 	vq->packed.device_event_dma_addr = device_event_dma_addr;
17201ce9e605STiwei Bie 
17211ce9e605STiwei Bie 	vq->packed.ring_size_in_bytes = ring_size_in_bytes;
17221ce9e605STiwei Bie 	vq->packed.event_size_in_bytes = event_size_in_bytes;
17231ce9e605STiwei Bie 
17241ce9e605STiwei Bie 	vq->packed.vring.num = num;
17251ce9e605STiwei Bie 	vq->packed.vring.desc = ring;
17261ce9e605STiwei Bie 	vq->packed.vring.driver = driver;
17271ce9e605STiwei Bie 	vq->packed.vring.device = device;
17281ce9e605STiwei Bie 
17291ce9e605STiwei Bie 	vq->packed.next_avail_idx = 0;
17301ce9e605STiwei Bie 	vq->packed.avail_wrap_counter = 1;
17311ce9e605STiwei Bie 	vq->packed.used_wrap_counter = 1;
17321ce9e605STiwei Bie 	vq->packed.event_flags_shadow = 0;
17331ce9e605STiwei Bie 	vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
17341ce9e605STiwei Bie 
17351ce9e605STiwei Bie 	vq->packed.desc_state = kmalloc_array(num,
17361ce9e605STiwei Bie 			sizeof(struct vring_desc_state_packed),
17371ce9e605STiwei Bie 			GFP_KERNEL);
17381ce9e605STiwei Bie 	if (!vq->packed.desc_state)
17391ce9e605STiwei Bie 		goto err_desc_state;
17401ce9e605STiwei Bie 
17411ce9e605STiwei Bie 	memset(vq->packed.desc_state, 0,
17421ce9e605STiwei Bie 		num * sizeof(struct vring_desc_state_packed));
17431ce9e605STiwei Bie 
17441ce9e605STiwei Bie 	/* Put everything in free lists. */
17451ce9e605STiwei Bie 	vq->free_head = 0;
17461ce9e605STiwei Bie 
17475a222421SJason Wang 	vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
17481ce9e605STiwei Bie 	if (!vq->packed.desc_extra)
17491ce9e605STiwei Bie 		goto err_desc_extra;
17501ce9e605STiwei Bie 
17511ce9e605STiwei Bie 	/* No callback?  Tell other side not to bother us. */
17521ce9e605STiwei Bie 	if (!callback) {
17531ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
17541ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
17551ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
17561ce9e605STiwei Bie 	}
17571ce9e605STiwei Bie 
1758e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
17591ce9e605STiwei Bie 	return &vq->vq;
17601ce9e605STiwei Bie 
17611ce9e605STiwei Bie err_desc_extra:
17621ce9e605STiwei Bie 	kfree(vq->packed.desc_state);
17631ce9e605STiwei Bie err_desc_state:
17641ce9e605STiwei Bie 	kfree(vq);
17651ce9e605STiwei Bie err_vq:
1766ae93d8eaSDan Carpenter 	vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
17671ce9e605STiwei Bie err_device:
1768ae93d8eaSDan Carpenter 	vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
17691ce9e605STiwei Bie err_driver:
17701ce9e605STiwei Bie 	vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
17711ce9e605STiwei Bie err_ring:
17721ce9e605STiwei Bie 	return NULL;
17731ce9e605STiwei Bie }
17741ce9e605STiwei Bie 
17751ce9e605STiwei Bie 
17761ce9e605STiwei Bie /*
1777e6f633e5STiwei Bie  * Generic functions and exported symbols.
1778e6f633e5STiwei Bie  */
1779e6f633e5STiwei Bie 
1780e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq,
1781e6f633e5STiwei Bie 				struct scatterlist *sgs[],
1782e6f633e5STiwei Bie 				unsigned int total_sg,
1783e6f633e5STiwei Bie 				unsigned int out_sgs,
1784e6f633e5STiwei Bie 				unsigned int in_sgs,
1785e6f633e5STiwei Bie 				void *data,
1786e6f633e5STiwei Bie 				void *ctx,
1787e6f633e5STiwei Bie 				gfp_t gfp)
1788e6f633e5STiwei Bie {
17891ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17901ce9e605STiwei Bie 
17911ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
17921ce9e605STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp) :
17931ce9e605STiwei Bie 				 virtqueue_add_split(_vq, sgs, total_sg,
1794e6f633e5STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp);
1795e6f633e5STiwei Bie }
1796e6f633e5STiwei Bie 
1797e6f633e5STiwei Bie /**
1798e6f633e5STiwei Bie  * virtqueue_add_sgs - expose buffers to other end
1799a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
1800e6f633e5STiwei Bie  * @sgs: array of terminated scatterlists.
1801a5581206SJiang Biao  * @out_sgs: the number of scatterlists readable by other side
1802a5581206SJiang Biao  * @in_sgs: the number of scatterlists which are writable (after readable ones)
1803e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1804e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1805e6f633e5STiwei Bie  *
1806e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1807e6f633e5STiwei Bie  * at the same time (except where noted).
1808e6f633e5STiwei Bie  *
1809e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1810e6f633e5STiwei Bie  */
1811e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq,
1812e6f633e5STiwei Bie 		      struct scatterlist *sgs[],
1813e6f633e5STiwei Bie 		      unsigned int out_sgs,
1814e6f633e5STiwei Bie 		      unsigned int in_sgs,
1815e6f633e5STiwei Bie 		      void *data,
1816e6f633e5STiwei Bie 		      gfp_t gfp)
1817e6f633e5STiwei Bie {
1818e6f633e5STiwei Bie 	unsigned int i, total_sg = 0;
1819e6f633e5STiwei Bie 
1820e6f633e5STiwei Bie 	/* Count them first. */
1821e6f633e5STiwei Bie 	for (i = 0; i < out_sgs + in_sgs; i++) {
1822e6f633e5STiwei Bie 		struct scatterlist *sg;
1823e6f633e5STiwei Bie 
1824e6f633e5STiwei Bie 		for (sg = sgs[i]; sg; sg = sg_next(sg))
1825e6f633e5STiwei Bie 			total_sg++;
1826e6f633e5STiwei Bie 	}
1827e6f633e5STiwei Bie 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1828e6f633e5STiwei Bie 			     data, NULL, gfp);
1829e6f633e5STiwei Bie }
1830e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1831e6f633e5STiwei Bie 
1832e6f633e5STiwei Bie /**
1833e6f633e5STiwei Bie  * virtqueue_add_outbuf - expose output buffers to other end
1834e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1835e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1836e6f633e5STiwei Bie  * @num: the number of entries in @sg readable by other side
1837e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1838e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1839e6f633e5STiwei Bie  *
1840e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1841e6f633e5STiwei Bie  * at the same time (except where noted).
1842e6f633e5STiwei Bie  *
1843e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1844e6f633e5STiwei Bie  */
1845e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq,
1846e6f633e5STiwei Bie 			 struct scatterlist *sg, unsigned int num,
1847e6f633e5STiwei Bie 			 void *data,
1848e6f633e5STiwei Bie 			 gfp_t gfp)
1849e6f633e5STiwei Bie {
1850e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1851e6f633e5STiwei Bie }
1852e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1853e6f633e5STiwei Bie 
1854e6f633e5STiwei Bie /**
1855e6f633e5STiwei Bie  * virtqueue_add_inbuf - expose input buffers to other end
1856e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1857e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1858e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
1859e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1860e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1861e6f633e5STiwei Bie  *
1862e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1863e6f633e5STiwei Bie  * at the same time (except where noted).
1864e6f633e5STiwei Bie  *
1865e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1866e6f633e5STiwei Bie  */
1867e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq,
1868e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
1869e6f633e5STiwei Bie 			void *data,
1870e6f633e5STiwei Bie 			gfp_t gfp)
1871e6f633e5STiwei Bie {
1872e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1873e6f633e5STiwei Bie }
1874e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1875e6f633e5STiwei Bie 
1876e6f633e5STiwei Bie /**
1877e6f633e5STiwei Bie  * virtqueue_add_inbuf_ctx - expose input buffers to other end
1878e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1879e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1880e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
1881e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1882e6f633e5STiwei Bie  * @ctx: extra context for the token
1883e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1884e6f633e5STiwei Bie  *
1885e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1886e6f633e5STiwei Bie  * at the same time (except where noted).
1887e6f633e5STiwei Bie  *
1888e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1889e6f633e5STiwei Bie  */
1890e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1891e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
1892e6f633e5STiwei Bie 			void *data,
1893e6f633e5STiwei Bie 			void *ctx,
1894e6f633e5STiwei Bie 			gfp_t gfp)
1895e6f633e5STiwei Bie {
1896e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1897e6f633e5STiwei Bie }
1898e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1899e6f633e5STiwei Bie 
1900e6f633e5STiwei Bie /**
1901e6f633e5STiwei Bie  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1902a5581206SJiang Biao  * @_vq: the struct virtqueue
1903e6f633e5STiwei Bie  *
1904e6f633e5STiwei Bie  * Instead of virtqueue_kick(), you can do:
1905e6f633e5STiwei Bie  *	if (virtqueue_kick_prepare(vq))
1906e6f633e5STiwei Bie  *		virtqueue_notify(vq);
1907e6f633e5STiwei Bie  *
1908e6f633e5STiwei Bie  * This is sometimes useful because the virtqueue_kick_prepare() needs
1909e6f633e5STiwei Bie  * to be serialized, but the actual virtqueue_notify() call does not.
1910e6f633e5STiwei Bie  */
1911e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq)
1912e6f633e5STiwei Bie {
19131ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
19141ce9e605STiwei Bie 
19151ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
19161ce9e605STiwei Bie 				 virtqueue_kick_prepare_split(_vq);
1917e6f633e5STiwei Bie }
1918e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1919e6f633e5STiwei Bie 
1920e6f633e5STiwei Bie /**
1921e6f633e5STiwei Bie  * virtqueue_notify - second half of split virtqueue_kick call.
1922a5581206SJiang Biao  * @_vq: the struct virtqueue
1923e6f633e5STiwei Bie  *
1924e6f633e5STiwei Bie  * This does not need to be serialized.
1925e6f633e5STiwei Bie  *
1926e6f633e5STiwei Bie  * Returns false if host notify failed or queue is broken, otherwise true.
1927e6f633e5STiwei Bie  */
1928e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq)
1929e6f633e5STiwei Bie {
1930e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1931e6f633e5STiwei Bie 
1932e6f633e5STiwei Bie 	if (unlikely(vq->broken))
1933e6f633e5STiwei Bie 		return false;
1934e6f633e5STiwei Bie 
1935e6f633e5STiwei Bie 	/* Prod other side to tell it about changes. */
1936e6f633e5STiwei Bie 	if (!vq->notify(_vq)) {
1937e6f633e5STiwei Bie 		vq->broken = true;
1938e6f633e5STiwei Bie 		return false;
1939e6f633e5STiwei Bie 	}
1940e6f633e5STiwei Bie 	return true;
1941e6f633e5STiwei Bie }
1942e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify);
1943e6f633e5STiwei Bie 
1944e6f633e5STiwei Bie /**
1945e6f633e5STiwei Bie  * virtqueue_kick - update after add_buf
1946e6f633e5STiwei Bie  * @vq: the struct virtqueue
1947e6f633e5STiwei Bie  *
1948e6f633e5STiwei Bie  * After one or more virtqueue_add_* calls, invoke this to kick
1949e6f633e5STiwei Bie  * the other side.
1950e6f633e5STiwei Bie  *
1951e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1952e6f633e5STiwei Bie  * operations at the same time (except where noted).
1953e6f633e5STiwei Bie  *
1954e6f633e5STiwei Bie  * Returns false if kick failed, otherwise true.
1955e6f633e5STiwei Bie  */
1956e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq)
1957e6f633e5STiwei Bie {
1958e6f633e5STiwei Bie 	if (virtqueue_kick_prepare(vq))
1959e6f633e5STiwei Bie 		return virtqueue_notify(vq);
1960e6f633e5STiwei Bie 	return true;
1961e6f633e5STiwei Bie }
1962e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick);
1963e6f633e5STiwei Bie 
1964e6f633e5STiwei Bie /**
196531c11db6SYang Li  * virtqueue_get_buf_ctx - get the next used buffer
1966a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
1967e6f633e5STiwei Bie  * @len: the length written into the buffer
1968a5581206SJiang Biao  * @ctx: extra context for the token
1969e6f633e5STiwei Bie  *
1970e6f633e5STiwei Bie  * If the device wrote data into the buffer, @len will be set to the
1971e6f633e5STiwei Bie  * amount written.  This means you don't need to clear the buffer
1972e6f633e5STiwei Bie  * beforehand to ensure there's no data leakage in the case of short
1973e6f633e5STiwei Bie  * writes.
1974e6f633e5STiwei Bie  *
1975e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1976e6f633e5STiwei Bie  * operations at the same time (except where noted).
1977e6f633e5STiwei Bie  *
1978e6f633e5STiwei Bie  * Returns NULL if there are no used buffers, or the "data" token
1979e6f633e5STiwei Bie  * handed to virtqueue_add_*().
1980e6f633e5STiwei Bie  */
1981e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1982e6f633e5STiwei Bie 			    void **ctx)
1983e6f633e5STiwei Bie {
19841ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
19851ce9e605STiwei Bie 
19861ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
19871ce9e605STiwei Bie 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1988e6f633e5STiwei Bie }
1989e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1990e6f633e5STiwei Bie 
1991e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1992e6f633e5STiwei Bie {
1993e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx(_vq, len, NULL);
1994e6f633e5STiwei Bie }
1995e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1996e6f633e5STiwei Bie /**
1997e6f633e5STiwei Bie  * virtqueue_disable_cb - disable callbacks
1998a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
1999e6f633e5STiwei Bie  *
2000e6f633e5STiwei Bie  * Note that this is not necessarily synchronous, hence unreliable and only
2001e6f633e5STiwei Bie  * useful as an optimization.
2002e6f633e5STiwei Bie  *
2003e6f633e5STiwei Bie  * Unlike other operations, this need not be serialized.
2004e6f633e5STiwei Bie  */
2005e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq)
2006e6f633e5STiwei Bie {
20071ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20081ce9e605STiwei Bie 
20098d622d21SMichael S. Tsirkin 	/* If device triggered an event already it won't trigger one again:
20108d622d21SMichael S. Tsirkin 	 * no need to disable.
20118d622d21SMichael S. Tsirkin 	 */
20128d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
20138d622d21SMichael S. Tsirkin 		return;
20148d622d21SMichael S. Tsirkin 
20151ce9e605STiwei Bie 	if (vq->packed_ring)
20161ce9e605STiwei Bie 		virtqueue_disable_cb_packed(_vq);
20171ce9e605STiwei Bie 	else
2018e6f633e5STiwei Bie 		virtqueue_disable_cb_split(_vq);
2019e6f633e5STiwei Bie }
2020e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2021e6f633e5STiwei Bie 
2022e6f633e5STiwei Bie /**
2023e6f633e5STiwei Bie  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2024a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2025e6f633e5STiwei Bie  *
2026e6f633e5STiwei Bie  * This re-enables callbacks; it returns current queue state
2027e6f633e5STiwei Bie  * in an opaque unsigned value. This value should be later tested by
2028e6f633e5STiwei Bie  * virtqueue_poll, to detect a possible race between the driver checking for
2029e6f633e5STiwei Bie  * more work, and enabling callbacks.
2030e6f633e5STiwei Bie  *
2031e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2032e6f633e5STiwei Bie  * operations at the same time (except where noted).
2033e6f633e5STiwei Bie  */
2034e6f633e5STiwei Bie unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2035e6f633e5STiwei Bie {
20361ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20371ce9e605STiwei Bie 
20388d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
20398d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
20408d622d21SMichael S. Tsirkin 
20411ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
20421ce9e605STiwei Bie 				 virtqueue_enable_cb_prepare_split(_vq);
2043e6f633e5STiwei Bie }
2044e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2045e6f633e5STiwei Bie 
2046e6f633e5STiwei Bie /**
2047e6f633e5STiwei Bie  * virtqueue_poll - query pending used buffers
2048a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2049e6f633e5STiwei Bie  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2050e6f633e5STiwei Bie  *
2051e6f633e5STiwei Bie  * Returns "true" if there are pending used buffers in the queue.
2052e6f633e5STiwei Bie  *
2053e6f633e5STiwei Bie  * This does not need to be serialized.
2054e6f633e5STiwei Bie  */
2055e6f633e5STiwei Bie bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
2056e6f633e5STiwei Bie {
2057e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2058e6f633e5STiwei Bie 
2059481a0d74SMao Wenan 	if (unlikely(vq->broken))
2060481a0d74SMao Wenan 		return false;
2061481a0d74SMao Wenan 
2062e6f633e5STiwei Bie 	virtio_mb(vq->weak_barriers);
20631ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
20641ce9e605STiwei Bie 				 virtqueue_poll_split(_vq, last_used_idx);
2065e6f633e5STiwei Bie }
2066e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll);
2067e6f633e5STiwei Bie 
2068e6f633e5STiwei Bie /**
2069e6f633e5STiwei Bie  * virtqueue_enable_cb - restart callbacks after disable_cb.
2070a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2071e6f633e5STiwei Bie  *
2072e6f633e5STiwei Bie  * This re-enables callbacks; it returns "false" if there are pending
2073e6f633e5STiwei Bie  * buffers in the queue, to detect a possible race between the driver
2074e6f633e5STiwei Bie  * checking for more work, and enabling callbacks.
2075e6f633e5STiwei Bie  *
2076e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2077e6f633e5STiwei Bie  * operations at the same time (except where noted).
2078e6f633e5STiwei Bie  */
2079e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq)
2080e6f633e5STiwei Bie {
2081e6f633e5STiwei Bie 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2082e6f633e5STiwei Bie 
2083e6f633e5STiwei Bie 	return !virtqueue_poll(_vq, last_used_idx);
2084e6f633e5STiwei Bie }
2085e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2086e6f633e5STiwei Bie 
2087e6f633e5STiwei Bie /**
2088e6f633e5STiwei Bie  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2089a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2090e6f633e5STiwei Bie  *
2091e6f633e5STiwei Bie  * This re-enables callbacks but hints to the other side to delay
2092e6f633e5STiwei Bie  * interrupts until most of the available buffers have been processed;
2093e6f633e5STiwei Bie  * it returns "false" if there are many pending buffers in the queue,
2094e6f633e5STiwei Bie  * to detect a possible race between the driver checking for more work,
2095e6f633e5STiwei Bie  * and enabling callbacks.
2096e6f633e5STiwei Bie  *
2097e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2098e6f633e5STiwei Bie  * operations at the same time (except where noted).
2099e6f633e5STiwei Bie  */
2100e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2101e6f633e5STiwei Bie {
21021ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21031ce9e605STiwei Bie 
21048d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
21058d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
21068d622d21SMichael S. Tsirkin 
21071ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
21081ce9e605STiwei Bie 				 virtqueue_enable_cb_delayed_split(_vq);
2109e6f633e5STiwei Bie }
2110e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2111e6f633e5STiwei Bie 
2112138fd251STiwei Bie /**
2113138fd251STiwei Bie  * virtqueue_detach_unused_buf - detach first unused buffer
2114a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2115138fd251STiwei Bie  *
2116138fd251STiwei Bie  * Returns NULL or the "data" token handed to virtqueue_add_*().
2117138fd251STiwei Bie  * This is not valid on an active queue; it is useful only for device
2118138fd251STiwei Bie  * shutdown.
2119138fd251STiwei Bie  */
2120138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2121138fd251STiwei Bie {
21221ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21231ce9e605STiwei Bie 
21241ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
21251ce9e605STiwei Bie 				 virtqueue_detach_unused_buf_split(_vq);
2126138fd251STiwei Bie }
21277c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2128c021eac4SShirley Ma 
2129138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq)
2130138fd251STiwei Bie {
21311ce9e605STiwei Bie 	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2132138fd251STiwei Bie }
2133138fd251STiwei Bie 
21340a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
21350a8a69ddSRusty Russell {
21360a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
21370a8a69ddSRusty Russell 
21380a8a69ddSRusty Russell 	if (!more_used(vq)) {
21390a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
21400a8a69ddSRusty Russell 		return IRQ_NONE;
21410a8a69ddSRusty Russell 	}
21420a8a69ddSRusty Russell 
21430a8a69ddSRusty Russell 	if (unlikely(vq->broken))
21440a8a69ddSRusty Russell 		return IRQ_HANDLED;
21450a8a69ddSRusty Russell 
21468d622d21SMichael S. Tsirkin 	/* Just a hint for performance: so it's ok that this can be racy! */
21478d622d21SMichael S. Tsirkin 	if (vq->event)
21488d622d21SMichael S. Tsirkin 		vq->event_triggered = true;
21498d622d21SMichael S. Tsirkin 
21500a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
215118445c4dSRusty Russell 	if (vq->vq.callback)
215218445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
21530a8a69ddSRusty Russell 
21540a8a69ddSRusty Russell 	return IRQ_HANDLED;
21550a8a69ddSRusty Russell }
2156c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
21570a8a69ddSRusty Russell 
21581ce9e605STiwei Bie /* Only available for split ring */
21592a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index,
21602a2d1382SAndy Lutomirski 					struct vring vring,
21610a8a69ddSRusty Russell 					struct virtio_device *vdev,
21627b21e34fSRusty Russell 					bool weak_barriers,
2163f94682ddSMichael S. Tsirkin 					bool context,
216446f9c2b9SHeinz Graalfs 					bool (*notify)(struct virtqueue *),
21659499f5e7SRusty Russell 					void (*callback)(struct virtqueue *),
21669499f5e7SRusty Russell 					const char *name)
21670a8a69ddSRusty Russell {
21682a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
21690a8a69ddSRusty Russell 
21701ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
21711ce9e605STiwei Bie 		return NULL;
21721ce9e605STiwei Bie 
2173cbeedb72STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
21740a8a69ddSRusty Russell 	if (!vq)
21750a8a69ddSRusty Russell 		return NULL;
21760a8a69ddSRusty Russell 
21771ce9e605STiwei Bie 	vq->packed_ring = false;
21780a8a69ddSRusty Russell 	vq->vq.callback = callback;
21790a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
21809499f5e7SRusty Russell 	vq->vq.name = name;
21812a2d1382SAndy Lutomirski 	vq->vq.num_free = vring.num;
218206ca287dSRusty Russell 	vq->vq.index = index;
21832a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
21840a8a69ddSRusty Russell 	vq->notify = notify;
21857b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
21860a8a69ddSRusty Russell 	vq->broken = false;
21870a8a69ddSRusty Russell 	vq->last_used_idx = 0;
21888d622d21SMichael S. Tsirkin 	vq->event_triggered = false;
21890a8a69ddSRusty Russell 	vq->num_added = 0;
2190fb3fba6bSTiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
21910a8a69ddSRusty Russell #ifdef DEBUG
21920a8a69ddSRusty Russell 	vq->in_use = false;
2193e93300b1SRusty Russell 	vq->last_add_time_valid = false;
21940a8a69ddSRusty Russell #endif
21950a8a69ddSRusty Russell 
21965a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
21975a08b04fSMichael S. Tsirkin 		!context;
2198a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
21999fa29b9dSMark McLoughlin 
220045383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
220145383fb0STiwei Bie 		vq->weak_barriers = false;
220245383fb0STiwei Bie 
2203d79dca75STiwei Bie 	vq->split.queue_dma_addr = 0;
2204d79dca75STiwei Bie 	vq->split.queue_size_in_bytes = 0;
2205d79dca75STiwei Bie 
2206e593bf97STiwei Bie 	vq->split.vring = vring;
2207e593bf97STiwei Bie 	vq->split.avail_flags_shadow = 0;
2208e593bf97STiwei Bie 	vq->split.avail_idx_shadow = 0;
2209e593bf97STiwei Bie 
22100a8a69ddSRusty Russell 	/* No callback?  Tell other side not to bother us. */
2211f277ec42SVenkatesh Srinivas 	if (!callback) {
2212e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
22130ea1e4a6SLadi Prosek 		if (!vq->event)
2214e593bf97STiwei Bie 			vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2215e593bf97STiwei Bie 					vq->split.avail_flags_shadow);
2216f277ec42SVenkatesh Srinivas 	}
22170a8a69ddSRusty Russell 
2218cbeedb72STiwei Bie 	vq->split.desc_state = kmalloc_array(vring.num,
2219cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split), GFP_KERNEL);
22205bc72234SJason Wang 	if (!vq->split.desc_state)
22215bc72234SJason Wang 		goto err_state;
2222cbeedb72STiwei Bie 
222372b5e895SJason Wang 	vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
222472b5e895SJason Wang 	if (!vq->split.desc_extra)
222572b5e895SJason Wang 		goto err_extra;
222672b5e895SJason Wang 
22270a8a69ddSRusty Russell 	/* Put everything in free lists. */
22280a8a69ddSRusty Russell 	vq->free_head = 0;
2229cbeedb72STiwei Bie 	memset(vq->split.desc_state, 0, vring.num *
2230cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split));
22310a8a69ddSRusty Russell 
2232e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
22330a8a69ddSRusty Russell 	return &vq->vq;
22345bc72234SJason Wang 
223572b5e895SJason Wang err_extra:
223672b5e895SJason Wang 	kfree(vq->split.desc_state);
22375bc72234SJason Wang err_state:
22385bc72234SJason Wang 	kfree(vq);
22395bc72234SJason Wang 	return NULL;
22400a8a69ddSRusty Russell }
22412a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
22422a2d1382SAndy Lutomirski 
22432a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
22442a2d1382SAndy Lutomirski 	unsigned int index,
22452a2d1382SAndy Lutomirski 	unsigned int num,
22462a2d1382SAndy Lutomirski 	unsigned int vring_align,
22472a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
22482a2d1382SAndy Lutomirski 	bool weak_barriers,
22492a2d1382SAndy Lutomirski 	bool may_reduce_num,
2250f94682ddSMichael S. Tsirkin 	bool context,
22512a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
22522a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
22532a2d1382SAndy Lutomirski 	const char *name)
22542a2d1382SAndy Lutomirski {
22551ce9e605STiwei Bie 
22561ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
22571ce9e605STiwei Bie 		return vring_create_virtqueue_packed(index, num, vring_align,
22581ce9e605STiwei Bie 				vdev, weak_barriers, may_reduce_num,
22591ce9e605STiwei Bie 				context, notify, callback, name);
22601ce9e605STiwei Bie 
2261d79dca75STiwei Bie 	return vring_create_virtqueue_split(index, num, vring_align,
2262d79dca75STiwei Bie 			vdev, weak_barriers, may_reduce_num,
2263d79dca75STiwei Bie 			context, notify, callback, name);
22642a2d1382SAndy Lutomirski }
22652a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
22662a2d1382SAndy Lutomirski 
22671ce9e605STiwei Bie /* Only available for split ring */
22682a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
22692a2d1382SAndy Lutomirski 				      unsigned int num,
22702a2d1382SAndy Lutomirski 				      unsigned int vring_align,
22712a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
22722a2d1382SAndy Lutomirski 				      bool weak_barriers,
2273f94682ddSMichael S. Tsirkin 				      bool context,
22742a2d1382SAndy Lutomirski 				      void *pages,
22752a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
22762a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
22772a2d1382SAndy Lutomirski 				      const char *name)
22782a2d1382SAndy Lutomirski {
22792a2d1382SAndy Lutomirski 	struct vring vring;
22801ce9e605STiwei Bie 
22811ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
22821ce9e605STiwei Bie 		return NULL;
22831ce9e605STiwei Bie 
22842a2d1382SAndy Lutomirski 	vring_init(&vring, num, pages, vring_align);
2285f94682ddSMichael S. Tsirkin 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
22862a2d1382SAndy Lutomirski 				     notify, callback, name);
22872a2d1382SAndy Lutomirski }
2288c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
22890a8a69ddSRusty Russell 
22902a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq)
22910a8a69ddSRusty Russell {
22922a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
22932a2d1382SAndy Lutomirski 
2294*249f2554SParav Pandit 	list_del(&_vq->list);
2295*249f2554SParav Pandit 
22962a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
22971ce9e605STiwei Bie 		if (vq->packed_ring) {
22981ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
22991ce9e605STiwei Bie 					 vq->packed.ring_size_in_bytes,
23001ce9e605STiwei Bie 					 vq->packed.vring.desc,
23011ce9e605STiwei Bie 					 vq->packed.ring_dma_addr);
23021ce9e605STiwei Bie 
23031ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
23041ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
23051ce9e605STiwei Bie 					 vq->packed.vring.driver,
23061ce9e605STiwei Bie 					 vq->packed.driver_event_dma_addr);
23071ce9e605STiwei Bie 
23081ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
23091ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
23101ce9e605STiwei Bie 					 vq->packed.vring.device,
23111ce9e605STiwei Bie 					 vq->packed.device_event_dma_addr);
23121ce9e605STiwei Bie 
23131ce9e605STiwei Bie 			kfree(vq->packed.desc_state);
23141ce9e605STiwei Bie 			kfree(vq->packed.desc_extra);
23151ce9e605STiwei Bie 		} else {
2316d79dca75STiwei Bie 			vring_free_queue(vq->vq.vdev,
2317d79dca75STiwei Bie 					 vq->split.queue_size_in_bytes,
2318d79dca75STiwei Bie 					 vq->split.vring.desc,
2319d79dca75STiwei Bie 					 vq->split.queue_dma_addr);
2320f13f09a1SSuman Anna 		}
2321f13f09a1SSuman Anna 	}
232272b5e895SJason Wang 	if (!vq->packed_ring) {
2323cbeedb72STiwei Bie 		kfree(vq->split.desc_state);
232472b5e895SJason Wang 		kfree(vq->split.desc_extra);
232572b5e895SJason Wang 	}
23262a2d1382SAndy Lutomirski 	kfree(vq);
23270a8a69ddSRusty Russell }
2328c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
23290a8a69ddSRusty Russell 
2330e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
2331e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
2332e34f8725SRusty Russell {
2333e34f8725SRusty Russell 	unsigned int i;
2334e34f8725SRusty Russell 
2335e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2336e34f8725SRusty Russell 		switch (i) {
23379fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
23389fa29b9dSMark McLoughlin 			break;
2339a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
2340a5c262c5SMichael S. Tsirkin 			break;
2341747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
2342747ae34aSMichael S. Tsirkin 			break;
2343321bd212SMichael S. Tsirkin 		case VIRTIO_F_ACCESS_PLATFORM:
23441a937693SMichael S. Tsirkin 			break;
2345f959a128STiwei Bie 		case VIRTIO_F_RING_PACKED:
2346f959a128STiwei Bie 			break;
234745383fb0STiwei Bie 		case VIRTIO_F_ORDER_PLATFORM:
234845383fb0STiwei Bie 			break;
2349e34f8725SRusty Russell 		default:
2350e34f8725SRusty Russell 			/* We don't understand this bit. */
2351e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
2352e34f8725SRusty Russell 		}
2353e34f8725SRusty Russell 	}
2354e34f8725SRusty Russell }
2355e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
2356e34f8725SRusty Russell 
23575dfc1762SRusty Russell /**
23585dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
2359a5581206SJiang Biao  * @_vq: the struct virtqueue containing the vring of interest.
23605dfc1762SRusty Russell  *
23615dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
23625dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
23635dfc1762SRusty Russell  */
23648f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
23658f9f4668SRick Jones {
23668f9f4668SRick Jones 
23678f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
23688f9f4668SRick Jones 
23691ce9e605STiwei Bie 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
23708f9f4668SRick Jones }
23718f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
23728f9f4668SRick Jones 
2373b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
2374b3b32c94SHeinz Graalfs {
2375b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
2376b3b32c94SHeinz Graalfs 
237760f07798SParav Pandit 	return READ_ONCE(vq->broken);
2378b3b32c94SHeinz Graalfs }
2379b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2380b3b32c94SHeinz Graalfs 
2381e2dcdfe9SRusty Russell /*
2382e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
2383e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
2384e2dcdfe9SRusty Russell  */
2385e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
2386e2dcdfe9SRusty Russell {
2387e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
2388e2dcdfe9SRusty Russell 
2389e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
2390e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
239160f07798SParav Pandit 
239260f07798SParav Pandit 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
239360f07798SParav Pandit 		WRITE_ONCE(vq->broken, true);
2394e2dcdfe9SRusty Russell 	}
2395e2dcdfe9SRusty Russell }
2396e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
2397e2dcdfe9SRusty Russell 
23982a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
239989062652SCornelia Huck {
240089062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
240189062652SCornelia Huck 
24022a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
240389062652SCornelia Huck 
24041ce9e605STiwei Bie 	if (vq->packed_ring)
24051ce9e605STiwei Bie 		return vq->packed.ring_dma_addr;
24061ce9e605STiwei Bie 
2407d79dca75STiwei Bie 	return vq->split.queue_dma_addr;
24082a2d1382SAndy Lutomirski }
24092a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
24102a2d1382SAndy Lutomirski 
24112a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
241289062652SCornelia Huck {
241389062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
241489062652SCornelia Huck 
24152a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
24162a2d1382SAndy Lutomirski 
24171ce9e605STiwei Bie 	if (vq->packed_ring)
24181ce9e605STiwei Bie 		return vq->packed.driver_event_dma_addr;
24191ce9e605STiwei Bie 
2420d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2421e593bf97STiwei Bie 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
242289062652SCornelia Huck }
24232a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
24242a2d1382SAndy Lutomirski 
24252a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
24262a2d1382SAndy Lutomirski {
24272a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
24282a2d1382SAndy Lutomirski 
24292a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
24302a2d1382SAndy Lutomirski 
24311ce9e605STiwei Bie 	if (vq->packed_ring)
24321ce9e605STiwei Bie 		return vq->packed.device_event_dma_addr;
24331ce9e605STiwei Bie 
2434d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2435e593bf97STiwei Bie 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
24362a2d1382SAndy Lutomirski }
24372a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
24382a2d1382SAndy Lutomirski 
24391ce9e605STiwei Bie /* Only available for split ring */
24402a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq)
24412a2d1382SAndy Lutomirski {
2442e593bf97STiwei Bie 	return &to_vvq(vq)->split.vring;
24432a2d1382SAndy Lutomirski }
24442a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
244589062652SCornelia Huck 
2446c6fd4701SRusty Russell MODULE_LICENSE("GPL");
2447