xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision cbeedb72b97ad826e31e68e0717b763e2db0806d)
10a8a69ddSRusty Russell /* Virtio ring implementation.
20a8a69ddSRusty Russell  *
30a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
40a8a69ddSRusty Russell  *
50a8a69ddSRusty Russell  *  This program is free software; you can redistribute it and/or modify
60a8a69ddSRusty Russell  *  it under the terms of the GNU General Public License as published by
70a8a69ddSRusty Russell  *  the Free Software Foundation; either version 2 of the License, or
80a8a69ddSRusty Russell  *  (at your option) any later version.
90a8a69ddSRusty Russell  *
100a8a69ddSRusty Russell  *  This program is distributed in the hope that it will be useful,
110a8a69ddSRusty Russell  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
120a8a69ddSRusty Russell  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
130a8a69ddSRusty Russell  *  GNU General Public License for more details.
140a8a69ddSRusty Russell  *
150a8a69ddSRusty Russell  *  You should have received a copy of the GNU General Public License
160a8a69ddSRusty Russell  *  along with this program; if not, write to the Free Software
170a8a69ddSRusty Russell  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
180a8a69ddSRusty Russell  */
190a8a69ddSRusty Russell #include <linux/virtio.h>
200a8a69ddSRusty Russell #include <linux/virtio_ring.h>
21e34f8725SRusty Russell #include <linux/virtio_config.h>
220a8a69ddSRusty Russell #include <linux/device.h>
235a0e3ad6STejun Heo #include <linux/slab.h>
24b5a2c4f1SPaul Gortmaker #include <linux/module.h>
25e93300b1SRusty Russell #include <linux/hrtimer.h>
26780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
2778fe3987SAndy Lutomirski #include <xen/xen.h>
280a8a69ddSRusty Russell 
290a8a69ddSRusty Russell #ifdef DEBUG
300a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
319499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
329499f5e7SRusty Russell 	do {							\
339499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
349499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
359499f5e7SRusty Russell 		BUG();						\
369499f5e7SRusty Russell 	} while (0)
37c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
383a35ce7dSRoel Kluin #define START_USE(_vq)						\
39c5f841f1SRusty Russell 	do {							\
40c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
419499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
429499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
43c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
44c5f841f1SRusty Russell 	} while (0)
453a35ce7dSRoel Kluin #define END_USE(_vq) \
4697a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
474d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq)				\
484d6a105eSTiwei Bie 	do {							\
494d6a105eSTiwei Bie 		ktime_t now = ktime_get();			\
504d6a105eSTiwei Bie 								\
514d6a105eSTiwei Bie 		/* No kick or get, with .1 second between?  Warn. */ \
524d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid)			\
534d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
544d6a105eSTiwei Bie 				(_vq)->last_add_time)) > 100);	\
554d6a105eSTiwei Bie 		(_vq)->last_add_time = now;			\
564d6a105eSTiwei Bie 		(_vq)->last_add_time_valid = true;		\
574d6a105eSTiwei Bie 	} while (0)
584d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq)				\
594d6a105eSTiwei Bie 	do {							\
604d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid) {		\
614d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
624d6a105eSTiwei Bie 				      (_vq)->last_add_time)) > 100); \
634d6a105eSTiwei Bie 		}						\
644d6a105eSTiwei Bie 	} while (0)
654d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq)				\
664d6a105eSTiwei Bie 	((_vq)->last_add_time_valid = false)
670a8a69ddSRusty Russell #else
689499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
699499f5e7SRusty Russell 	do {							\
709499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
719499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
729499f5e7SRusty Russell 		(_vq)->broken = true;				\
739499f5e7SRusty Russell 	} while (0)
740a8a69ddSRusty Russell #define START_USE(vq)
750a8a69ddSRusty Russell #define END_USE(vq)
764d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq)
774d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq)
784d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq)
790a8a69ddSRusty Russell #endif
800a8a69ddSRusty Russell 
81*cbeedb72STiwei Bie struct vring_desc_state_split {
82780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
83780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
84780bc790SAndy Lutomirski };
85780bc790SAndy Lutomirski 
8643b4f721SMichael S. Tsirkin struct vring_virtqueue {
870a8a69ddSRusty Russell 	struct virtqueue vq;
880a8a69ddSRusty Russell 
897b21e34fSRusty Russell 	/* Can we use weak barriers? */
907b21e34fSRusty Russell 	bool weak_barriers;
917b21e34fSRusty Russell 
920a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
930a8a69ddSRusty Russell 	bool broken;
940a8a69ddSRusty Russell 
959fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
969fa29b9dSMark McLoughlin 	bool indirect;
979fa29b9dSMark McLoughlin 
98a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
99a5c262c5SMichael S. Tsirkin 	bool event;
100a5c262c5SMichael S. Tsirkin 
1010a8a69ddSRusty Russell 	/* Head of free buffer list. */
1020a8a69ddSRusty Russell 	unsigned int free_head;
1030a8a69ddSRusty Russell 	/* Number we've added since last sync. */
1040a8a69ddSRusty Russell 	unsigned int num_added;
1050a8a69ddSRusty Russell 
1060a8a69ddSRusty Russell 	/* Last used index we've seen. */
1071bc4953eSAnthony Liguori 	u16 last_used_idx;
1080a8a69ddSRusty Russell 
109e593bf97STiwei Bie 	struct {
110e593bf97STiwei Bie 		/* Actual memory layout for this queue */
111e593bf97STiwei Bie 		struct vring vring;
112e593bf97STiwei Bie 
113f277ec42SVenkatesh Srinivas 		/* Last written value to avail->flags */
114f277ec42SVenkatesh Srinivas 		u16 avail_flags_shadow;
115f277ec42SVenkatesh Srinivas 
116f277ec42SVenkatesh Srinivas 		/* Last written value to avail->idx in guest byte order */
117f277ec42SVenkatesh Srinivas 		u16 avail_idx_shadow;
118*cbeedb72STiwei Bie 
119*cbeedb72STiwei Bie 		/* Per-descriptor state. */
120*cbeedb72STiwei Bie 		struct vring_desc_state_split *desc_state;
121e593bf97STiwei Bie 	} split;
122f277ec42SVenkatesh Srinivas 
1230a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
12446f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
1250a8a69ddSRusty Russell 
1262a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
1272a2d1382SAndy Lutomirski 	bool we_own_ring;
1282a2d1382SAndy Lutomirski 	size_t queue_size_in_bytes;
1292a2d1382SAndy Lutomirski 	dma_addr_t queue_dma_addr;
1302a2d1382SAndy Lutomirski 
1310a8a69ddSRusty Russell #ifdef DEBUG
1320a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
1330a8a69ddSRusty Russell 	unsigned int in_use;
134e93300b1SRusty Russell 
135e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
136e93300b1SRusty Russell 	bool last_add_time_valid;
137e93300b1SRusty Russell 	ktime_t last_add_time;
1380a8a69ddSRusty Russell #endif
1390a8a69ddSRusty Russell };
1400a8a69ddSRusty Russell 
141e6f633e5STiwei Bie 
142e6f633e5STiwei Bie /*
143e6f633e5STiwei Bie  * Helpers.
144e6f633e5STiwei Bie  */
145e6f633e5STiwei Bie 
1460a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
1470a8a69ddSRusty Russell 
1482f18c2d1STiwei Bie static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
1492f18c2d1STiwei Bie 					  unsigned int total_sg)
1502f18c2d1STiwei Bie {
1512f18c2d1STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1522f18c2d1STiwei Bie 
1532f18c2d1STiwei Bie 	/*
1542f18c2d1STiwei Bie 	 * If the host supports indirect descriptor tables, and we have multiple
1552f18c2d1STiwei Bie 	 * buffers, then go indirect. FIXME: tune this threshold
1562f18c2d1STiwei Bie 	 */
1572f18c2d1STiwei Bie 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
1582f18c2d1STiwei Bie }
1592f18c2d1STiwei Bie 
160d26c96c8SAndy Lutomirski /*
1611a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
1621a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
1631a937693SMichael S. Tsirkin  *
1641a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
165d26c96c8SAndy Lutomirski  *
166d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
167d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
168d26c96c8SAndy Lutomirski  *
169d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
170d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
171d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
172d26c96c8SAndy Lutomirski  *
173d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
174d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
175d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
176d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
177d26c96c8SAndy Lutomirski  *
178d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
179d26c96c8SAndy Lutomirski  * API.
1801a937693SMichael S. Tsirkin  *
1811a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
1821a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
1831a937693SMichael S. Tsirkin  * unconditionally on data path.
184d26c96c8SAndy Lutomirski  */
185d26c96c8SAndy Lutomirski 
186d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
187d26c96c8SAndy Lutomirski {
1881a937693SMichael S. Tsirkin 	if (!virtio_has_iommu_quirk(vdev))
1891a937693SMichael S. Tsirkin 		return true;
1901a937693SMichael S. Tsirkin 
1911a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
19278fe3987SAndy Lutomirski 	/*
19378fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
19478fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
19578fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
19678fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
19778fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
19878fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
19978fe3987SAndy Lutomirski 	 */
20078fe3987SAndy Lutomirski 	if (xen_domain())
20178fe3987SAndy Lutomirski 		return true;
20278fe3987SAndy Lutomirski 
203d26c96c8SAndy Lutomirski 	return false;
204d26c96c8SAndy Lutomirski }
205d26c96c8SAndy Lutomirski 
206780bc790SAndy Lutomirski /*
207780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
208780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
209780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
210780bc790SAndy Lutomirski  */
21175bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
212780bc790SAndy Lutomirski {
213780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
214780bc790SAndy Lutomirski }
215780bc790SAndy Lutomirski 
216780bc790SAndy Lutomirski /* Map one sg entry. */
217780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
218780bc790SAndy Lutomirski 				   struct scatterlist *sg,
219780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
220780bc790SAndy Lutomirski {
221780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
222780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
223780bc790SAndy Lutomirski 
224780bc790SAndy Lutomirski 	/*
225780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
226780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
227780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
228780bc790SAndy Lutomirski 	 */
229780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
230780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
231780bc790SAndy Lutomirski 			    direction);
232780bc790SAndy Lutomirski }
233780bc790SAndy Lutomirski 
234780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
235780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
236780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
237780bc790SAndy Lutomirski {
238780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
239780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
240780bc790SAndy Lutomirski 
241780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
242780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
243780bc790SAndy Lutomirski }
244780bc790SAndy Lutomirski 
245e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq,
246e6f633e5STiwei Bie 			       dma_addr_t addr)
247e6f633e5STiwei Bie {
248e6f633e5STiwei Bie 	if (!vring_use_dma_api(vq->vq.vdev))
249e6f633e5STiwei Bie 		return 0;
250e6f633e5STiwei Bie 
251e6f633e5STiwei Bie 	return dma_mapping_error(vring_dma_dev(vq), addr);
252e6f633e5STiwei Bie }
253e6f633e5STiwei Bie 
254e6f633e5STiwei Bie 
255e6f633e5STiwei Bie /*
256e6f633e5STiwei Bie  * Split ring specific functions - *_split().
257e6f633e5STiwei Bie  */
258e6f633e5STiwei Bie 
259138fd251STiwei Bie static void vring_unmap_one_split(const struct vring_virtqueue *vq,
260780bc790SAndy Lutomirski 				  struct vring_desc *desc)
261780bc790SAndy Lutomirski {
262780bc790SAndy Lutomirski 	u16 flags;
263780bc790SAndy Lutomirski 
264780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
265780bc790SAndy Lutomirski 		return;
266780bc790SAndy Lutomirski 
267780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
268780bc790SAndy Lutomirski 
269780bc790SAndy Lutomirski 	if (flags & VRING_DESC_F_INDIRECT) {
270780bc790SAndy Lutomirski 		dma_unmap_single(vring_dma_dev(vq),
271780bc790SAndy Lutomirski 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
272780bc790SAndy Lutomirski 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
273780bc790SAndy Lutomirski 				 (flags & VRING_DESC_F_WRITE) ?
274780bc790SAndy Lutomirski 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
275780bc790SAndy Lutomirski 	} else {
276780bc790SAndy Lutomirski 		dma_unmap_page(vring_dma_dev(vq),
277780bc790SAndy Lutomirski 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
278780bc790SAndy Lutomirski 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
279780bc790SAndy Lutomirski 			       (flags & VRING_DESC_F_WRITE) ?
280780bc790SAndy Lutomirski 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
281780bc790SAndy Lutomirski 	}
282780bc790SAndy Lutomirski }
283780bc790SAndy Lutomirski 
284138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
285138fd251STiwei Bie 					       unsigned int total_sg,
286138fd251STiwei Bie 					       gfp_t gfp)
2879fa29b9dSMark McLoughlin {
2889fa29b9dSMark McLoughlin 	struct vring_desc *desc;
289b25bd251SRusty Russell 	unsigned int i;
2909fa29b9dSMark McLoughlin 
291b92b1b89SWill Deacon 	/*
292b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
293b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
294b92b1b89SWill Deacon 	 * virtqueue.
295b92b1b89SWill Deacon 	 */
29682107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
297b92b1b89SWill Deacon 
2986da2ec56SKees Cook 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
2999fa29b9dSMark McLoughlin 	if (!desc)
300b25bd251SRusty Russell 		return NULL;
3019fa29b9dSMark McLoughlin 
302b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
30300e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
304b25bd251SRusty Russell 	return desc;
3059fa29b9dSMark McLoughlin }
3069fa29b9dSMark McLoughlin 
307138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq,
30813816c76SRusty Russell 				      struct scatterlist *sgs[],
309eeebf9b1SRusty Russell 				      unsigned int total_sg,
31013816c76SRusty Russell 				      unsigned int out_sgs,
31113816c76SRusty Russell 				      unsigned int in_sgs,
312bbd603efSMichael S. Tsirkin 				      void *data,
3135a08b04fSMichael S. Tsirkin 				      void *ctx,
314bbd603efSMichael S. Tsirkin 				      gfp_t gfp)
3150a8a69ddSRusty Russell {
3160a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
31713816c76SRusty Russell 	struct scatterlist *sg;
318b25bd251SRusty Russell 	struct vring_desc *desc;
319780bc790SAndy Lutomirski 	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
3201fe9b6feSMichael S. Tsirkin 	int head;
321b25bd251SRusty Russell 	bool indirect;
3220a8a69ddSRusty Russell 
3239fa29b9dSMark McLoughlin 	START_USE(vq);
3249fa29b9dSMark McLoughlin 
3250a8a69ddSRusty Russell 	BUG_ON(data == NULL);
3265a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
3279fa29b9dSMark McLoughlin 
32870670444SRusty Russell 	if (unlikely(vq->broken)) {
32970670444SRusty Russell 		END_USE(vq);
33070670444SRusty Russell 		return -EIO;
33170670444SRusty Russell 	}
33270670444SRusty Russell 
3334d6a105eSTiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
334e93300b1SRusty Russell 
33513816c76SRusty Russell 	BUG_ON(total_sg == 0);
3360a8a69ddSRusty Russell 
337b25bd251SRusty Russell 	head = vq->free_head;
338b25bd251SRusty Russell 
3392f18c2d1STiwei Bie 	if (virtqueue_use_indirect(_vq, total_sg))
340138fd251STiwei Bie 		desc = alloc_indirect_split(_vq, total_sg, gfp);
34144ed8089SRichard W.M. Jones 	else {
342b25bd251SRusty Russell 		desc = NULL;
343e593bf97STiwei Bie 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
34444ed8089SRichard W.M. Jones 	}
345b25bd251SRusty Russell 
346b25bd251SRusty Russell 	if (desc) {
347b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
348780bc790SAndy Lutomirski 		indirect = true;
349b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
350b25bd251SRusty Russell 		i = 0;
351b25bd251SRusty Russell 		descs_used = 1;
352b25bd251SRusty Russell 	} else {
353780bc790SAndy Lutomirski 		indirect = false;
354e593bf97STiwei Bie 		desc = vq->split.vring.desc;
355b25bd251SRusty Russell 		i = head;
356b25bd251SRusty Russell 		descs_used = total_sg;
357b25bd251SRusty Russell 	}
358b25bd251SRusty Russell 
359b25bd251SRusty Russell 	if (vq->vq.num_free < descs_used) {
3600a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
361b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
36244653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
36344653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
36444653eaeSRusty Russell 		 * host should service the ring ASAP. */
36513816c76SRusty Russell 		if (out_sgs)
366426e3e0aSRusty Russell 			vq->notify(&vq->vq);
36758625edfSWei Yongjun 		if (indirect)
36858625edfSWei Yongjun 			kfree(desc);
3690a8a69ddSRusty Russell 		END_USE(vq);
3700a8a69ddSRusty Russell 		return -ENOSPC;
3710a8a69ddSRusty Russell 	}
3720a8a69ddSRusty Russell 
37313816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
374eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
375780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
376780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
377780bc790SAndy Lutomirski 				goto unmap_release;
378780bc790SAndy Lutomirski 
37900e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
380780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
38100e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3820a8a69ddSRusty Russell 			prev = i;
38300e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
3840a8a69ddSRusty Russell 		}
38513816c76SRusty Russell 	}
38613816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
387eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
388780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
389780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
390780bc790SAndy Lutomirski 				goto unmap_release;
391780bc790SAndy Lutomirski 
39200e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
393780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
39400e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3950a8a69ddSRusty Russell 			prev = i;
39600e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
39713816c76SRusty Russell 		}
3980a8a69ddSRusty Russell 	}
3990a8a69ddSRusty Russell 	/* Last one doesn't continue. */
40000e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
4010a8a69ddSRusty Russell 
402780bc790SAndy Lutomirski 	if (indirect) {
403780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
404780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
405780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
406780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
407780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
408780bc790SAndy Lutomirski 			goto unmap_release;
409780bc790SAndy Lutomirski 
410e593bf97STiwei Bie 		vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
411e593bf97STiwei Bie 				VRING_DESC_F_INDIRECT);
412e593bf97STiwei Bie 		vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
413e593bf97STiwei Bie 				addr);
414780bc790SAndy Lutomirski 
415e593bf97STiwei Bie 		vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
416e593bf97STiwei Bie 				total_sg * sizeof(struct vring_desc));
417780bc790SAndy Lutomirski 	}
418780bc790SAndy Lutomirski 
419780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
420780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
421780bc790SAndy Lutomirski 
4220a8a69ddSRusty Russell 	/* Update free pointer */
423b25bd251SRusty Russell 	if (indirect)
424e593bf97STiwei Bie 		vq->free_head = virtio16_to_cpu(_vq->vdev,
425e593bf97STiwei Bie 					vq->split.vring.desc[head].next);
426b25bd251SRusty Russell 	else
4270a8a69ddSRusty Russell 		vq->free_head = i;
4280a8a69ddSRusty Russell 
429780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
430*cbeedb72STiwei Bie 	vq->split.desc_state[head].data = data;
431780bc790SAndy Lutomirski 	if (indirect)
432*cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = desc;
43387646a34SJason Wang 	else
434*cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = ctx;
4350a8a69ddSRusty Russell 
4360a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
4373b720b8cSRusty Russell 	 * do sync). */
438e593bf97STiwei Bie 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
439e593bf97STiwei Bie 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
4400a8a69ddSRusty Russell 
441ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
442ee7cd898SRusty Russell 	 * new available array entries. */
443a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
444e593bf97STiwei Bie 	vq->split.avail_idx_shadow++;
445e593bf97STiwei Bie 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
446e593bf97STiwei Bie 						vq->split.avail_idx_shadow);
447ee7cd898SRusty Russell 	vq->num_added++;
448ee7cd898SRusty Russell 
4495e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
4505e05bf58STetsuo Handa 	END_USE(vq);
4515e05bf58STetsuo Handa 
452ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
453ee7cd898SRusty Russell 	 * just in case. */
454ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
455ee7cd898SRusty Russell 		virtqueue_kick(_vq);
456ee7cd898SRusty Russell 
45798e8c6bcSRusty Russell 	return 0;
458780bc790SAndy Lutomirski 
459780bc790SAndy Lutomirski unmap_release:
460780bc790SAndy Lutomirski 	err_idx = i;
461780bc790SAndy Lutomirski 	i = head;
462780bc790SAndy Lutomirski 
463780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
464780bc790SAndy Lutomirski 		if (i == err_idx)
465780bc790SAndy Lutomirski 			break;
466138fd251STiwei Bie 		vring_unmap_one_split(vq, &desc[i]);
467e593bf97STiwei Bie 		i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
468780bc790SAndy Lutomirski 	}
469780bc790SAndy Lutomirski 
470780bc790SAndy Lutomirski 	if (indirect)
471780bc790SAndy Lutomirski 		kfree(desc);
472780bc790SAndy Lutomirski 
4733cc36f6eSMichael S. Tsirkin 	END_USE(vq);
474780bc790SAndy Lutomirski 	return -EIO;
4750a8a69ddSRusty Russell }
47613816c76SRusty Russell 
477138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
4780a8a69ddSRusty Russell {
4790a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
480a5c262c5SMichael S. Tsirkin 	u16 new, old;
48141f0377fSRusty Russell 	bool needs_kick;
48241f0377fSRusty Russell 
4830a8a69ddSRusty Russell 	START_USE(vq);
484a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
485a72caae2SJason Wang 	 * event. */
486a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
4870a8a69ddSRusty Russell 
488e593bf97STiwei Bie 	old = vq->split.avail_idx_shadow - vq->num_added;
489e593bf97STiwei Bie 	new = vq->split.avail_idx_shadow;
4900a8a69ddSRusty Russell 	vq->num_added = 0;
4910a8a69ddSRusty Russell 
4924d6a105eSTiwei Bie 	LAST_ADD_TIME_CHECK(vq);
4934d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
494e93300b1SRusty Russell 
49541f0377fSRusty Russell 	if (vq->event) {
496e593bf97STiwei Bie 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
497e593bf97STiwei Bie 					vring_avail_event(&vq->split.vring)),
49841f0377fSRusty Russell 					      new, old);
49941f0377fSRusty Russell 	} else {
500e593bf97STiwei Bie 		needs_kick = !(vq->split.vring.used->flags &
501e593bf97STiwei Bie 					cpu_to_virtio16(_vq->vdev,
502e593bf97STiwei Bie 						VRING_USED_F_NO_NOTIFY));
50341f0377fSRusty Russell 	}
5040a8a69ddSRusty Russell 	END_USE(vq);
50541f0377fSRusty Russell 	return needs_kick;
50641f0377fSRusty Russell }
507138fd251STiwei Bie 
508138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
5095a08b04fSMichael S. Tsirkin 			     void **ctx)
5100a8a69ddSRusty Russell {
511780bc790SAndy Lutomirski 	unsigned int i, j;
512c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
5130a8a69ddSRusty Russell 
5140a8a69ddSRusty Russell 	/* Clear data ptr. */
515*cbeedb72STiwei Bie 	vq->split.desc_state[head].data = NULL;
5160a8a69ddSRusty Russell 
517780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
5180a8a69ddSRusty Russell 	i = head;
5199fa29b9dSMark McLoughlin 
520e593bf97STiwei Bie 	while (vq->split.vring.desc[i].flags & nextflag) {
521e593bf97STiwei Bie 		vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
522e593bf97STiwei Bie 		i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
52306ca287dSRusty Russell 		vq->vq.num_free++;
5240a8a69ddSRusty Russell 	}
5250a8a69ddSRusty Russell 
526e593bf97STiwei Bie 	vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
527e593bf97STiwei Bie 	vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
528e593bf97STiwei Bie 						vq->free_head);
5290a8a69ddSRusty Russell 	vq->free_head = head;
530780bc790SAndy Lutomirski 
5310a8a69ddSRusty Russell 	/* Plus final descriptor */
53206ca287dSRusty Russell 	vq->vq.num_free++;
533780bc790SAndy Lutomirski 
5345a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
535*cbeedb72STiwei Bie 		struct vring_desc *indir_desc =
536*cbeedb72STiwei Bie 				vq->split.desc_state[head].indir_desc;
5375a08b04fSMichael S. Tsirkin 		u32 len;
5385a08b04fSMichael S. Tsirkin 
5395a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
5405a08b04fSMichael S. Tsirkin 		if (!indir_desc)
5415a08b04fSMichael S. Tsirkin 			return;
5425a08b04fSMichael S. Tsirkin 
543e593bf97STiwei Bie 		len = virtio32_to_cpu(vq->vq.vdev,
544e593bf97STiwei Bie 				vq->split.vring.desc[head].len);
545780bc790SAndy Lutomirski 
546e593bf97STiwei Bie 		BUG_ON(!(vq->split.vring.desc[head].flags &
547780bc790SAndy Lutomirski 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
548780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
549780bc790SAndy Lutomirski 
550780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
551138fd251STiwei Bie 			vring_unmap_one_split(vq, &indir_desc[j]);
552780bc790SAndy Lutomirski 
5535a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
554*cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = NULL;
5555a08b04fSMichael S. Tsirkin 	} else if (ctx) {
556*cbeedb72STiwei Bie 		*ctx = vq->split.desc_state[head].indir_desc;
557780bc790SAndy Lutomirski 	}
5580a8a69ddSRusty Russell }
5590a8a69ddSRusty Russell 
560138fd251STiwei Bie static inline bool more_used_split(const struct vring_virtqueue *vq)
5610a8a69ddSRusty Russell {
562e593bf97STiwei Bie 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
563e593bf97STiwei Bie 			vq->split.vring.used->idx);
5640a8a69ddSRusty Russell }
5650a8a69ddSRusty Russell 
566138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
567138fd251STiwei Bie 					 unsigned int *len,
5685a08b04fSMichael S. Tsirkin 					 void **ctx)
5690a8a69ddSRusty Russell {
5700a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
5710a8a69ddSRusty Russell 	void *ret;
5720a8a69ddSRusty Russell 	unsigned int i;
5733b720b8cSRusty Russell 	u16 last_used;
5740a8a69ddSRusty Russell 
5750a8a69ddSRusty Russell 	START_USE(vq);
5760a8a69ddSRusty Russell 
5775ef82752SRusty Russell 	if (unlikely(vq->broken)) {
5785ef82752SRusty Russell 		END_USE(vq);
5795ef82752SRusty Russell 		return NULL;
5805ef82752SRusty Russell 	}
5815ef82752SRusty Russell 
582138fd251STiwei Bie 	if (!more_used_split(vq)) {
5830a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
5840a8a69ddSRusty Russell 		END_USE(vq);
5850a8a69ddSRusty Russell 		return NULL;
5860a8a69ddSRusty Russell 	}
5870a8a69ddSRusty Russell 
5882d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
589a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
5902d61ba95SMichael S. Tsirkin 
591e593bf97STiwei Bie 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
592e593bf97STiwei Bie 	i = virtio32_to_cpu(_vq->vdev,
593e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].id);
594e593bf97STiwei Bie 	*len = virtio32_to_cpu(_vq->vdev,
595e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].len);
5960a8a69ddSRusty Russell 
597e593bf97STiwei Bie 	if (unlikely(i >= vq->split.vring.num)) {
5980a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
5990a8a69ddSRusty Russell 		return NULL;
6000a8a69ddSRusty Russell 	}
601*cbeedb72STiwei Bie 	if (unlikely(!vq->split.desc_state[i].data)) {
6020a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
6030a8a69ddSRusty Russell 		return NULL;
6040a8a69ddSRusty Russell 	}
6050a8a69ddSRusty Russell 
606138fd251STiwei Bie 	/* detach_buf_split clears data, so grab it now. */
607*cbeedb72STiwei Bie 	ret = vq->split.desc_state[i].data;
608138fd251STiwei Bie 	detach_buf_split(vq, i, ctx);
6090a8a69ddSRusty Russell 	vq->last_used_idx++;
610a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
611a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
612a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
613e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
614788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
615e593bf97STiwei Bie 				&vring_used_event(&vq->split.vring),
616788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
617a5c262c5SMichael S. Tsirkin 
6184d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
619e93300b1SRusty Russell 
6200a8a69ddSRusty Russell 	END_USE(vq);
6210a8a69ddSRusty Russell 	return ret;
6220a8a69ddSRusty Russell }
623138fd251STiwei Bie 
624138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq)
625138fd251STiwei Bie {
626138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
627138fd251STiwei Bie 
628e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
629e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
630138fd251STiwei Bie 		if (!vq->event)
631e593bf97STiwei Bie 			vq->split.vring.avail->flags =
632e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
633e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
634138fd251STiwei Bie 	}
635138fd251STiwei Bie }
636138fd251STiwei Bie 
637138fd251STiwei Bie static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
638cc229884SMichael S. Tsirkin {
639cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
640cc229884SMichael S. Tsirkin 	u16 last_used_idx;
641cc229884SMichael S. Tsirkin 
642cc229884SMichael S. Tsirkin 	START_USE(vq);
643cc229884SMichael S. Tsirkin 
644cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
645cc229884SMichael S. Tsirkin 	 * more to do. */
646cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
647cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
648cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
649e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
650e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
6510ea1e4a6SLadi Prosek 		if (!vq->event)
652e593bf97STiwei Bie 			vq->split.vring.avail->flags =
653e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
654e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
655f277ec42SVenkatesh Srinivas 	}
656e593bf97STiwei Bie 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
657e593bf97STiwei Bie 			last_used_idx = vq->last_used_idx);
658cc229884SMichael S. Tsirkin 	END_USE(vq);
659cc229884SMichael S. Tsirkin 	return last_used_idx;
660cc229884SMichael S. Tsirkin }
661138fd251STiwei Bie 
662138fd251STiwei Bie static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
663138fd251STiwei Bie {
664138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
665138fd251STiwei Bie 
666138fd251STiwei Bie 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
667e593bf97STiwei Bie 			vq->split.vring.used->idx);
668138fd251STiwei Bie }
669138fd251STiwei Bie 
670138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
6717ab358c2SMichael S. Tsirkin {
6727ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
6737ab358c2SMichael S. Tsirkin 	u16 bufs;
6747ab358c2SMichael S. Tsirkin 
6757ab358c2SMichael S. Tsirkin 	START_USE(vq);
6767ab358c2SMichael S. Tsirkin 
6777ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
6787ab358c2SMichael S. Tsirkin 	 * more to do. */
6797ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
6807ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
6810ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
682e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
683e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
6840ea1e4a6SLadi Prosek 		if (!vq->event)
685e593bf97STiwei Bie 			vq->split.vring.avail->flags =
686e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
687e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
688f277ec42SVenkatesh Srinivas 	}
6897ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
690e593bf97STiwei Bie 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
691788e5b3aSMichael S. Tsirkin 
692788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
693e593bf97STiwei Bie 			&vring_used_event(&vq->split.vring),
694788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
695788e5b3aSMichael S. Tsirkin 
696e593bf97STiwei Bie 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
697e593bf97STiwei Bie 					- vq->last_used_idx) > bufs)) {
6987ab358c2SMichael S. Tsirkin 		END_USE(vq);
6997ab358c2SMichael S. Tsirkin 		return false;
7007ab358c2SMichael S. Tsirkin 	}
7017ab358c2SMichael S. Tsirkin 
7027ab358c2SMichael S. Tsirkin 	END_USE(vq);
7037ab358c2SMichael S. Tsirkin 	return true;
7047ab358c2SMichael S. Tsirkin }
7057ab358c2SMichael S. Tsirkin 
706138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
707c021eac4SShirley Ma {
708c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
709c021eac4SShirley Ma 	unsigned int i;
710c021eac4SShirley Ma 	void *buf;
711c021eac4SShirley Ma 
712c021eac4SShirley Ma 	START_USE(vq);
713c021eac4SShirley Ma 
714e593bf97STiwei Bie 	for (i = 0; i < vq->split.vring.num; i++) {
715*cbeedb72STiwei Bie 		if (!vq->split.desc_state[i].data)
716c021eac4SShirley Ma 			continue;
717138fd251STiwei Bie 		/* detach_buf_split clears data, so grab it now. */
718*cbeedb72STiwei Bie 		buf = vq->split.desc_state[i].data;
719138fd251STiwei Bie 		detach_buf_split(vq, i, NULL);
720e593bf97STiwei Bie 		vq->split.avail_idx_shadow--;
721e593bf97STiwei Bie 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
722e593bf97STiwei Bie 				vq->split.avail_idx_shadow);
723c021eac4SShirley Ma 		END_USE(vq);
724c021eac4SShirley Ma 		return buf;
725c021eac4SShirley Ma 	}
726c021eac4SShirley Ma 	/* That should have freed everything. */
727e593bf97STiwei Bie 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
728c021eac4SShirley Ma 
729c021eac4SShirley Ma 	END_USE(vq);
730c021eac4SShirley Ma 	return NULL;
731c021eac4SShirley Ma }
732138fd251STiwei Bie 
733e6f633e5STiwei Bie 
734e6f633e5STiwei Bie /*
735e6f633e5STiwei Bie  * Generic functions and exported symbols.
736e6f633e5STiwei Bie  */
737e6f633e5STiwei Bie 
738e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq,
739e6f633e5STiwei Bie 				struct scatterlist *sgs[],
740e6f633e5STiwei Bie 				unsigned int total_sg,
741e6f633e5STiwei Bie 				unsigned int out_sgs,
742e6f633e5STiwei Bie 				unsigned int in_sgs,
743e6f633e5STiwei Bie 				void *data,
744e6f633e5STiwei Bie 				void *ctx,
745e6f633e5STiwei Bie 				gfp_t gfp)
746e6f633e5STiwei Bie {
747e6f633e5STiwei Bie 	return virtqueue_add_split(_vq, sgs, total_sg,
748e6f633e5STiwei Bie 				   out_sgs, in_sgs, data, ctx, gfp);
749e6f633e5STiwei Bie }
750e6f633e5STiwei Bie 
751e6f633e5STiwei Bie /**
752e6f633e5STiwei Bie  * virtqueue_add_sgs - expose buffers to other end
753e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
754e6f633e5STiwei Bie  * @sgs: array of terminated scatterlists.
755e6f633e5STiwei Bie  * @out_num: the number of scatterlists readable by other side
756e6f633e5STiwei Bie  * @in_num: the number of scatterlists which are writable (after readable ones)
757e6f633e5STiwei Bie  * @data: the token identifying the buffer.
758e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
759e6f633e5STiwei Bie  *
760e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
761e6f633e5STiwei Bie  * at the same time (except where noted).
762e6f633e5STiwei Bie  *
763e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
764e6f633e5STiwei Bie  */
765e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq,
766e6f633e5STiwei Bie 		      struct scatterlist *sgs[],
767e6f633e5STiwei Bie 		      unsigned int out_sgs,
768e6f633e5STiwei Bie 		      unsigned int in_sgs,
769e6f633e5STiwei Bie 		      void *data,
770e6f633e5STiwei Bie 		      gfp_t gfp)
771e6f633e5STiwei Bie {
772e6f633e5STiwei Bie 	unsigned int i, total_sg = 0;
773e6f633e5STiwei Bie 
774e6f633e5STiwei Bie 	/* Count them first. */
775e6f633e5STiwei Bie 	for (i = 0; i < out_sgs + in_sgs; i++) {
776e6f633e5STiwei Bie 		struct scatterlist *sg;
777e6f633e5STiwei Bie 
778e6f633e5STiwei Bie 		for (sg = sgs[i]; sg; sg = sg_next(sg))
779e6f633e5STiwei Bie 			total_sg++;
780e6f633e5STiwei Bie 	}
781e6f633e5STiwei Bie 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
782e6f633e5STiwei Bie 			     data, NULL, gfp);
783e6f633e5STiwei Bie }
784e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
785e6f633e5STiwei Bie 
786e6f633e5STiwei Bie /**
787e6f633e5STiwei Bie  * virtqueue_add_outbuf - expose output buffers to other end
788e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
789e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
790e6f633e5STiwei Bie  * @num: the number of entries in @sg readable by other side
791e6f633e5STiwei Bie  * @data: the token identifying the buffer.
792e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
793e6f633e5STiwei Bie  *
794e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
795e6f633e5STiwei Bie  * at the same time (except where noted).
796e6f633e5STiwei Bie  *
797e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
798e6f633e5STiwei Bie  */
799e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq,
800e6f633e5STiwei Bie 			 struct scatterlist *sg, unsigned int num,
801e6f633e5STiwei Bie 			 void *data,
802e6f633e5STiwei Bie 			 gfp_t gfp)
803e6f633e5STiwei Bie {
804e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
805e6f633e5STiwei Bie }
806e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
807e6f633e5STiwei Bie 
808e6f633e5STiwei Bie /**
809e6f633e5STiwei Bie  * virtqueue_add_inbuf - expose input buffers to other end
810e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
811e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
812e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
813e6f633e5STiwei Bie  * @data: the token identifying the buffer.
814e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
815e6f633e5STiwei Bie  *
816e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
817e6f633e5STiwei Bie  * at the same time (except where noted).
818e6f633e5STiwei Bie  *
819e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
820e6f633e5STiwei Bie  */
821e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq,
822e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
823e6f633e5STiwei Bie 			void *data,
824e6f633e5STiwei Bie 			gfp_t gfp)
825e6f633e5STiwei Bie {
826e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
827e6f633e5STiwei Bie }
828e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
829e6f633e5STiwei Bie 
830e6f633e5STiwei Bie /**
831e6f633e5STiwei Bie  * virtqueue_add_inbuf_ctx - expose input buffers to other end
832e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
833e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
834e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
835e6f633e5STiwei Bie  * @data: the token identifying the buffer.
836e6f633e5STiwei Bie  * @ctx: extra context for the token
837e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
838e6f633e5STiwei Bie  *
839e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
840e6f633e5STiwei Bie  * at the same time (except where noted).
841e6f633e5STiwei Bie  *
842e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
843e6f633e5STiwei Bie  */
844e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
845e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
846e6f633e5STiwei Bie 			void *data,
847e6f633e5STiwei Bie 			void *ctx,
848e6f633e5STiwei Bie 			gfp_t gfp)
849e6f633e5STiwei Bie {
850e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
851e6f633e5STiwei Bie }
852e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
853e6f633e5STiwei Bie 
854e6f633e5STiwei Bie /**
855e6f633e5STiwei Bie  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
856e6f633e5STiwei Bie  * @vq: the struct virtqueue
857e6f633e5STiwei Bie  *
858e6f633e5STiwei Bie  * Instead of virtqueue_kick(), you can do:
859e6f633e5STiwei Bie  *	if (virtqueue_kick_prepare(vq))
860e6f633e5STiwei Bie  *		virtqueue_notify(vq);
861e6f633e5STiwei Bie  *
862e6f633e5STiwei Bie  * This is sometimes useful because the virtqueue_kick_prepare() needs
863e6f633e5STiwei Bie  * to be serialized, but the actual virtqueue_notify() call does not.
864e6f633e5STiwei Bie  */
865e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq)
866e6f633e5STiwei Bie {
867e6f633e5STiwei Bie 	return virtqueue_kick_prepare_split(_vq);
868e6f633e5STiwei Bie }
869e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
870e6f633e5STiwei Bie 
871e6f633e5STiwei Bie /**
872e6f633e5STiwei Bie  * virtqueue_notify - second half of split virtqueue_kick call.
873e6f633e5STiwei Bie  * @vq: the struct virtqueue
874e6f633e5STiwei Bie  *
875e6f633e5STiwei Bie  * This does not need to be serialized.
876e6f633e5STiwei Bie  *
877e6f633e5STiwei Bie  * Returns false if host notify failed or queue is broken, otherwise true.
878e6f633e5STiwei Bie  */
879e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq)
880e6f633e5STiwei Bie {
881e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
882e6f633e5STiwei Bie 
883e6f633e5STiwei Bie 	if (unlikely(vq->broken))
884e6f633e5STiwei Bie 		return false;
885e6f633e5STiwei Bie 
886e6f633e5STiwei Bie 	/* Prod other side to tell it about changes. */
887e6f633e5STiwei Bie 	if (!vq->notify(_vq)) {
888e6f633e5STiwei Bie 		vq->broken = true;
889e6f633e5STiwei Bie 		return false;
890e6f633e5STiwei Bie 	}
891e6f633e5STiwei Bie 	return true;
892e6f633e5STiwei Bie }
893e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify);
894e6f633e5STiwei Bie 
895e6f633e5STiwei Bie /**
896e6f633e5STiwei Bie  * virtqueue_kick - update after add_buf
897e6f633e5STiwei Bie  * @vq: the struct virtqueue
898e6f633e5STiwei Bie  *
899e6f633e5STiwei Bie  * After one or more virtqueue_add_* calls, invoke this to kick
900e6f633e5STiwei Bie  * the other side.
901e6f633e5STiwei Bie  *
902e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
903e6f633e5STiwei Bie  * operations at the same time (except where noted).
904e6f633e5STiwei Bie  *
905e6f633e5STiwei Bie  * Returns false if kick failed, otherwise true.
906e6f633e5STiwei Bie  */
907e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq)
908e6f633e5STiwei Bie {
909e6f633e5STiwei Bie 	if (virtqueue_kick_prepare(vq))
910e6f633e5STiwei Bie 		return virtqueue_notify(vq);
911e6f633e5STiwei Bie 	return true;
912e6f633e5STiwei Bie }
913e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick);
914e6f633e5STiwei Bie 
915e6f633e5STiwei Bie /**
916e6f633e5STiwei Bie  * virtqueue_get_buf - get the next used buffer
917e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
918e6f633e5STiwei Bie  * @len: the length written into the buffer
919e6f633e5STiwei Bie  *
920e6f633e5STiwei Bie  * If the device wrote data into the buffer, @len will be set to the
921e6f633e5STiwei Bie  * amount written.  This means you don't need to clear the buffer
922e6f633e5STiwei Bie  * beforehand to ensure there's no data leakage in the case of short
923e6f633e5STiwei Bie  * writes.
924e6f633e5STiwei Bie  *
925e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
926e6f633e5STiwei Bie  * operations at the same time (except where noted).
927e6f633e5STiwei Bie  *
928e6f633e5STiwei Bie  * Returns NULL if there are no used buffers, or the "data" token
929e6f633e5STiwei Bie  * handed to virtqueue_add_*().
930e6f633e5STiwei Bie  */
931e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
932e6f633e5STiwei Bie 			    void **ctx)
933e6f633e5STiwei Bie {
934e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx_split(_vq, len, ctx);
935e6f633e5STiwei Bie }
936e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
937e6f633e5STiwei Bie 
938e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
939e6f633e5STiwei Bie {
940e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx(_vq, len, NULL);
941e6f633e5STiwei Bie }
942e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf);
943e6f633e5STiwei Bie 
944e6f633e5STiwei Bie /**
945e6f633e5STiwei Bie  * virtqueue_disable_cb - disable callbacks
946e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
947e6f633e5STiwei Bie  *
948e6f633e5STiwei Bie  * Note that this is not necessarily synchronous, hence unreliable and only
949e6f633e5STiwei Bie  * useful as an optimization.
950e6f633e5STiwei Bie  *
951e6f633e5STiwei Bie  * Unlike other operations, this need not be serialized.
952e6f633e5STiwei Bie  */
953e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq)
954e6f633e5STiwei Bie {
955e6f633e5STiwei Bie 	virtqueue_disable_cb_split(_vq);
956e6f633e5STiwei Bie }
957e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
958e6f633e5STiwei Bie 
959e6f633e5STiwei Bie /**
960e6f633e5STiwei Bie  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
961e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
962e6f633e5STiwei Bie  *
963e6f633e5STiwei Bie  * This re-enables callbacks; it returns current queue state
964e6f633e5STiwei Bie  * in an opaque unsigned value. This value should be later tested by
965e6f633e5STiwei Bie  * virtqueue_poll, to detect a possible race between the driver checking for
966e6f633e5STiwei Bie  * more work, and enabling callbacks.
967e6f633e5STiwei Bie  *
968e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
969e6f633e5STiwei Bie  * operations at the same time (except where noted).
970e6f633e5STiwei Bie  */
971e6f633e5STiwei Bie unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
972e6f633e5STiwei Bie {
973e6f633e5STiwei Bie 	return virtqueue_enable_cb_prepare_split(_vq);
974e6f633e5STiwei Bie }
975e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
976e6f633e5STiwei Bie 
977e6f633e5STiwei Bie /**
978e6f633e5STiwei Bie  * virtqueue_poll - query pending used buffers
979e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
980e6f633e5STiwei Bie  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
981e6f633e5STiwei Bie  *
982e6f633e5STiwei Bie  * Returns "true" if there are pending used buffers in the queue.
983e6f633e5STiwei Bie  *
984e6f633e5STiwei Bie  * This does not need to be serialized.
985e6f633e5STiwei Bie  */
986e6f633e5STiwei Bie bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
987e6f633e5STiwei Bie {
988e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
989e6f633e5STiwei Bie 
990e6f633e5STiwei Bie 	virtio_mb(vq->weak_barriers);
991e6f633e5STiwei Bie 	return virtqueue_poll_split(_vq, last_used_idx);
992e6f633e5STiwei Bie }
993e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll);
994e6f633e5STiwei Bie 
995e6f633e5STiwei Bie /**
996e6f633e5STiwei Bie  * virtqueue_enable_cb - restart callbacks after disable_cb.
997e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
998e6f633e5STiwei Bie  *
999e6f633e5STiwei Bie  * This re-enables callbacks; it returns "false" if there are pending
1000e6f633e5STiwei Bie  * buffers in the queue, to detect a possible race between the driver
1001e6f633e5STiwei Bie  * checking for more work, and enabling callbacks.
1002e6f633e5STiwei Bie  *
1003e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1004e6f633e5STiwei Bie  * operations at the same time (except where noted).
1005e6f633e5STiwei Bie  */
1006e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq)
1007e6f633e5STiwei Bie {
1008e6f633e5STiwei Bie 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1009e6f633e5STiwei Bie 
1010e6f633e5STiwei Bie 	return !virtqueue_poll(_vq, last_used_idx);
1011e6f633e5STiwei Bie }
1012e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1013e6f633e5STiwei Bie 
1014e6f633e5STiwei Bie /**
1015e6f633e5STiwei Bie  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
1016e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1017e6f633e5STiwei Bie  *
1018e6f633e5STiwei Bie  * This re-enables callbacks but hints to the other side to delay
1019e6f633e5STiwei Bie  * interrupts until most of the available buffers have been processed;
1020e6f633e5STiwei Bie  * it returns "false" if there are many pending buffers in the queue,
1021e6f633e5STiwei Bie  * to detect a possible race between the driver checking for more work,
1022e6f633e5STiwei Bie  * and enabling callbacks.
1023e6f633e5STiwei Bie  *
1024e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1025e6f633e5STiwei Bie  * operations at the same time (except where noted).
1026e6f633e5STiwei Bie  */
1027e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
1028e6f633e5STiwei Bie {
1029e6f633e5STiwei Bie 	return virtqueue_enable_cb_delayed_split(_vq);
1030e6f633e5STiwei Bie }
1031e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
1032e6f633e5STiwei Bie 
1033138fd251STiwei Bie /**
1034138fd251STiwei Bie  * virtqueue_detach_unused_buf - detach first unused buffer
1035138fd251STiwei Bie  * @vq: the struct virtqueue we're talking about.
1036138fd251STiwei Bie  *
1037138fd251STiwei Bie  * Returns NULL or the "data" token handed to virtqueue_add_*().
1038138fd251STiwei Bie  * This is not valid on an active queue; it is useful only for device
1039138fd251STiwei Bie  * shutdown.
1040138fd251STiwei Bie  */
1041138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
1042138fd251STiwei Bie {
1043138fd251STiwei Bie 	return virtqueue_detach_unused_buf_split(_vq);
1044138fd251STiwei Bie }
10457c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
1046c021eac4SShirley Ma 
1047138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq)
1048138fd251STiwei Bie {
1049138fd251STiwei Bie 	return more_used_split(vq);
1050138fd251STiwei Bie }
1051138fd251STiwei Bie 
10520a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
10530a8a69ddSRusty Russell {
10540a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
10550a8a69ddSRusty Russell 
10560a8a69ddSRusty Russell 	if (!more_used(vq)) {
10570a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
10580a8a69ddSRusty Russell 		return IRQ_NONE;
10590a8a69ddSRusty Russell 	}
10600a8a69ddSRusty Russell 
10610a8a69ddSRusty Russell 	if (unlikely(vq->broken))
10620a8a69ddSRusty Russell 		return IRQ_HANDLED;
10630a8a69ddSRusty Russell 
10640a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
106518445c4dSRusty Russell 	if (vq->vq.callback)
106618445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
10670a8a69ddSRusty Russell 
10680a8a69ddSRusty Russell 	return IRQ_HANDLED;
10690a8a69ddSRusty Russell }
1070c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
10710a8a69ddSRusty Russell 
10722a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index,
10732a2d1382SAndy Lutomirski 					struct vring vring,
10740a8a69ddSRusty Russell 					struct virtio_device *vdev,
10757b21e34fSRusty Russell 					bool weak_barriers,
1076f94682ddSMichael S. Tsirkin 					bool context,
107746f9c2b9SHeinz Graalfs 					bool (*notify)(struct virtqueue *),
10789499f5e7SRusty Russell 					void (*callback)(struct virtqueue *),
10799499f5e7SRusty Russell 					const char *name)
10800a8a69ddSRusty Russell {
10810a8a69ddSRusty Russell 	unsigned int i;
10822a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
10830a8a69ddSRusty Russell 
1084*cbeedb72STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
10850a8a69ddSRusty Russell 	if (!vq)
10860a8a69ddSRusty Russell 		return NULL;
10870a8a69ddSRusty Russell 
10880a8a69ddSRusty Russell 	vq->vq.callback = callback;
10890a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
10909499f5e7SRusty Russell 	vq->vq.name = name;
10912a2d1382SAndy Lutomirski 	vq->vq.num_free = vring.num;
109206ca287dSRusty Russell 	vq->vq.index = index;
10932a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
10942a2d1382SAndy Lutomirski 	vq->queue_dma_addr = 0;
10952a2d1382SAndy Lutomirski 	vq->queue_size_in_bytes = 0;
10960a8a69ddSRusty Russell 	vq->notify = notify;
10977b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
10980a8a69ddSRusty Russell 	vq->broken = false;
10990a8a69ddSRusty Russell 	vq->last_used_idx = 0;
11000a8a69ddSRusty Russell 	vq->num_added = 0;
11019499f5e7SRusty Russell 	list_add_tail(&vq->vq.list, &vdev->vqs);
11020a8a69ddSRusty Russell #ifdef DEBUG
11030a8a69ddSRusty Russell 	vq->in_use = false;
1104e93300b1SRusty Russell 	vq->last_add_time_valid = false;
11050a8a69ddSRusty Russell #endif
11060a8a69ddSRusty Russell 
11075a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
11085a08b04fSMichael S. Tsirkin 		!context;
1109a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
11109fa29b9dSMark McLoughlin 
1111e593bf97STiwei Bie 	vq->split.vring = vring;
1112e593bf97STiwei Bie 	vq->split.avail_flags_shadow = 0;
1113e593bf97STiwei Bie 	vq->split.avail_idx_shadow = 0;
1114e593bf97STiwei Bie 
11150a8a69ddSRusty Russell 	/* No callback?  Tell other side not to bother us. */
1116f277ec42SVenkatesh Srinivas 	if (!callback) {
1117e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
11180ea1e4a6SLadi Prosek 		if (!vq->event)
1119e593bf97STiwei Bie 			vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
1120e593bf97STiwei Bie 					vq->split.avail_flags_shadow);
1121f277ec42SVenkatesh Srinivas 	}
11220a8a69ddSRusty Russell 
1123*cbeedb72STiwei Bie 	vq->split.desc_state = kmalloc_array(vring.num,
1124*cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split), GFP_KERNEL);
1125*cbeedb72STiwei Bie 	if (!vq->split.desc_state) {
1126*cbeedb72STiwei Bie 		kfree(vq);
1127*cbeedb72STiwei Bie 		return NULL;
1128*cbeedb72STiwei Bie 	}
1129*cbeedb72STiwei Bie 
11300a8a69ddSRusty Russell 	/* Put everything in free lists. */
11310a8a69ddSRusty Russell 	vq->free_head = 0;
11322a2d1382SAndy Lutomirski 	for (i = 0; i < vring.num-1; i++)
1133e593bf97STiwei Bie 		vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
1134*cbeedb72STiwei Bie 	memset(vq->split.desc_state, 0, vring.num *
1135*cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split));
11360a8a69ddSRusty Russell 
11370a8a69ddSRusty Russell 	return &vq->vq;
11380a8a69ddSRusty Russell }
11392a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
11402a2d1382SAndy Lutomirski 
11412a2d1382SAndy Lutomirski static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
11422a2d1382SAndy Lutomirski 			      dma_addr_t *dma_handle, gfp_t flag)
11432a2d1382SAndy Lutomirski {
11442a2d1382SAndy Lutomirski 	if (vring_use_dma_api(vdev)) {
11452a2d1382SAndy Lutomirski 		return dma_alloc_coherent(vdev->dev.parent, size,
11462a2d1382SAndy Lutomirski 					  dma_handle, flag);
11472a2d1382SAndy Lutomirski 	} else {
11482a2d1382SAndy Lutomirski 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
11492a2d1382SAndy Lutomirski 		if (queue) {
11502a2d1382SAndy Lutomirski 			phys_addr_t phys_addr = virt_to_phys(queue);
11512a2d1382SAndy Lutomirski 			*dma_handle = (dma_addr_t)phys_addr;
11522a2d1382SAndy Lutomirski 
11532a2d1382SAndy Lutomirski 			/*
11542a2d1382SAndy Lutomirski 			 * Sanity check: make sure we dind't truncate
11552a2d1382SAndy Lutomirski 			 * the address.  The only arches I can find that
11562a2d1382SAndy Lutomirski 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
11572a2d1382SAndy Lutomirski 			 * are certain non-highmem MIPS and x86
11582a2d1382SAndy Lutomirski 			 * configurations, but these configurations
11592a2d1382SAndy Lutomirski 			 * should never allocate physical pages above 32
11602a2d1382SAndy Lutomirski 			 * bits, so this is fine.  Just in case, throw a
11612a2d1382SAndy Lutomirski 			 * warning and abort if we end up with an
11622a2d1382SAndy Lutomirski 			 * unrepresentable address.
11632a2d1382SAndy Lutomirski 			 */
11642a2d1382SAndy Lutomirski 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
11652a2d1382SAndy Lutomirski 				free_pages_exact(queue, PAGE_ALIGN(size));
11662a2d1382SAndy Lutomirski 				return NULL;
11672a2d1382SAndy Lutomirski 			}
11682a2d1382SAndy Lutomirski 		}
11692a2d1382SAndy Lutomirski 		return queue;
11702a2d1382SAndy Lutomirski 	}
11712a2d1382SAndy Lutomirski }
11722a2d1382SAndy Lutomirski 
11732a2d1382SAndy Lutomirski static void vring_free_queue(struct virtio_device *vdev, size_t size,
11742a2d1382SAndy Lutomirski 			     void *queue, dma_addr_t dma_handle)
11752a2d1382SAndy Lutomirski {
11762a2d1382SAndy Lutomirski 	if (vring_use_dma_api(vdev)) {
11772a2d1382SAndy Lutomirski 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
11782a2d1382SAndy Lutomirski 	} else {
11792a2d1382SAndy Lutomirski 		free_pages_exact(queue, PAGE_ALIGN(size));
11802a2d1382SAndy Lutomirski 	}
11812a2d1382SAndy Lutomirski }
11822a2d1382SAndy Lutomirski 
11832a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
11842a2d1382SAndy Lutomirski 	unsigned int index,
11852a2d1382SAndy Lutomirski 	unsigned int num,
11862a2d1382SAndy Lutomirski 	unsigned int vring_align,
11872a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
11882a2d1382SAndy Lutomirski 	bool weak_barriers,
11892a2d1382SAndy Lutomirski 	bool may_reduce_num,
1190f94682ddSMichael S. Tsirkin 	bool context,
11912a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
11922a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
11932a2d1382SAndy Lutomirski 	const char *name)
11942a2d1382SAndy Lutomirski {
11952a2d1382SAndy Lutomirski 	struct virtqueue *vq;
1196e00f7bd2SDan Carpenter 	void *queue = NULL;
11972a2d1382SAndy Lutomirski 	dma_addr_t dma_addr;
11982a2d1382SAndy Lutomirski 	size_t queue_size_in_bytes;
11992a2d1382SAndy Lutomirski 	struct vring vring;
12002a2d1382SAndy Lutomirski 
12012a2d1382SAndy Lutomirski 	/* We assume num is a power of 2. */
12022a2d1382SAndy Lutomirski 	if (num & (num - 1)) {
12032a2d1382SAndy Lutomirski 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
12042a2d1382SAndy Lutomirski 		return NULL;
12052a2d1382SAndy Lutomirski 	}
12062a2d1382SAndy Lutomirski 
12072a2d1382SAndy Lutomirski 	/* TODO: allocate each queue chunk individually */
12082a2d1382SAndy Lutomirski 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
12092a2d1382SAndy Lutomirski 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
12102a2d1382SAndy Lutomirski 					  &dma_addr,
12112a2d1382SAndy Lutomirski 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
12122a2d1382SAndy Lutomirski 		if (queue)
12132a2d1382SAndy Lutomirski 			break;
12142a2d1382SAndy Lutomirski 	}
12152a2d1382SAndy Lutomirski 
12162a2d1382SAndy Lutomirski 	if (!num)
12172a2d1382SAndy Lutomirski 		return NULL;
12182a2d1382SAndy Lutomirski 
12192a2d1382SAndy Lutomirski 	if (!queue) {
12202a2d1382SAndy Lutomirski 		/* Try to get a single page. You are my only hope! */
12212a2d1382SAndy Lutomirski 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
12222a2d1382SAndy Lutomirski 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
12232a2d1382SAndy Lutomirski 	}
12242a2d1382SAndy Lutomirski 	if (!queue)
12252a2d1382SAndy Lutomirski 		return NULL;
12262a2d1382SAndy Lutomirski 
12272a2d1382SAndy Lutomirski 	queue_size_in_bytes = vring_size(num, vring_align);
12282a2d1382SAndy Lutomirski 	vring_init(&vring, num, queue, vring_align);
12292a2d1382SAndy Lutomirski 
1230f94682ddSMichael S. Tsirkin 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
12312a2d1382SAndy Lutomirski 				   notify, callback, name);
12322a2d1382SAndy Lutomirski 	if (!vq) {
12332a2d1382SAndy Lutomirski 		vring_free_queue(vdev, queue_size_in_bytes, queue,
12342a2d1382SAndy Lutomirski 				 dma_addr);
12352a2d1382SAndy Lutomirski 		return NULL;
12362a2d1382SAndy Lutomirski 	}
12372a2d1382SAndy Lutomirski 
12382a2d1382SAndy Lutomirski 	to_vvq(vq)->queue_dma_addr = dma_addr;
12392a2d1382SAndy Lutomirski 	to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
12402a2d1382SAndy Lutomirski 	to_vvq(vq)->we_own_ring = true;
12412a2d1382SAndy Lutomirski 
12422a2d1382SAndy Lutomirski 	return vq;
12432a2d1382SAndy Lutomirski }
12442a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
12452a2d1382SAndy Lutomirski 
12462a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
12472a2d1382SAndy Lutomirski 				      unsigned int num,
12482a2d1382SAndy Lutomirski 				      unsigned int vring_align,
12492a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
12502a2d1382SAndy Lutomirski 				      bool weak_barriers,
1251f94682ddSMichael S. Tsirkin 				      bool context,
12522a2d1382SAndy Lutomirski 				      void *pages,
12532a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
12542a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
12552a2d1382SAndy Lutomirski 				      const char *name)
12562a2d1382SAndy Lutomirski {
12572a2d1382SAndy Lutomirski 	struct vring vring;
12582a2d1382SAndy Lutomirski 	vring_init(&vring, num, pages, vring_align);
1259f94682ddSMichael S. Tsirkin 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
12602a2d1382SAndy Lutomirski 				     notify, callback, name);
12612a2d1382SAndy Lutomirski }
1262c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
12630a8a69ddSRusty Russell 
12642a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq)
12650a8a69ddSRusty Russell {
12662a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
12672a2d1382SAndy Lutomirski 
12682a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
12692a2d1382SAndy Lutomirski 		vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1270e593bf97STiwei Bie 				 vq->split.vring.desc, vq->queue_dma_addr);
1271*cbeedb72STiwei Bie 		kfree(vq->split.desc_state);
12722a2d1382SAndy Lutomirski 	}
12732a2d1382SAndy Lutomirski 	list_del(&_vq->list);
12742a2d1382SAndy Lutomirski 	kfree(vq);
12750a8a69ddSRusty Russell }
1276c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
12770a8a69ddSRusty Russell 
1278e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
1279e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
1280e34f8725SRusty Russell {
1281e34f8725SRusty Russell 	unsigned int i;
1282e34f8725SRusty Russell 
1283e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1284e34f8725SRusty Russell 		switch (i) {
12859fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
12869fa29b9dSMark McLoughlin 			break;
1287a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
1288a5c262c5SMichael S. Tsirkin 			break;
1289747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
1290747ae34aSMichael S. Tsirkin 			break;
12911a937693SMichael S. Tsirkin 		case VIRTIO_F_IOMMU_PLATFORM:
12921a937693SMichael S. Tsirkin 			break;
1293e34f8725SRusty Russell 		default:
1294e34f8725SRusty Russell 			/* We don't understand this bit. */
1295e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
1296e34f8725SRusty Russell 		}
1297e34f8725SRusty Russell 	}
1298e34f8725SRusty Russell }
1299e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
1300e34f8725SRusty Russell 
13015dfc1762SRusty Russell /**
13025dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
13035dfc1762SRusty Russell  * @vq: the struct virtqueue containing the vring of interest.
13045dfc1762SRusty Russell  *
13055dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
13065dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
13075dfc1762SRusty Russell  */
13088f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
13098f9f4668SRick Jones {
13108f9f4668SRick Jones 
13118f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
13128f9f4668SRick Jones 
1313e593bf97STiwei Bie 	return vq->split.vring.num;
13148f9f4668SRick Jones }
13158f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
13168f9f4668SRick Jones 
1317b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
1318b3b32c94SHeinz Graalfs {
1319b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
1320b3b32c94SHeinz Graalfs 
1321b3b32c94SHeinz Graalfs 	return vq->broken;
1322b3b32c94SHeinz Graalfs }
1323b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1324b3b32c94SHeinz Graalfs 
1325e2dcdfe9SRusty Russell /*
1326e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
1327e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
1328e2dcdfe9SRusty Russell  */
1329e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
1330e2dcdfe9SRusty Russell {
1331e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
1332e2dcdfe9SRusty Russell 
1333e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
1334e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
1335e2dcdfe9SRusty Russell 		vq->broken = true;
1336e2dcdfe9SRusty Russell 	}
1337e2dcdfe9SRusty Russell }
1338e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
1339e2dcdfe9SRusty Russell 
13402a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
134189062652SCornelia Huck {
134289062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
134389062652SCornelia Huck 
13442a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
134589062652SCornelia Huck 
13462a2d1382SAndy Lutomirski 	return vq->queue_dma_addr;
13472a2d1382SAndy Lutomirski }
13482a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
13492a2d1382SAndy Lutomirski 
13502a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
135189062652SCornelia Huck {
135289062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
135389062652SCornelia Huck 
13542a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
13552a2d1382SAndy Lutomirski 
13562a2d1382SAndy Lutomirski 	return vq->queue_dma_addr +
1357e593bf97STiwei Bie 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
135889062652SCornelia Huck }
13592a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
13602a2d1382SAndy Lutomirski 
13612a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
13622a2d1382SAndy Lutomirski {
13632a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
13642a2d1382SAndy Lutomirski 
13652a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
13662a2d1382SAndy Lutomirski 
13672a2d1382SAndy Lutomirski 	return vq->queue_dma_addr +
1368e593bf97STiwei Bie 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
13692a2d1382SAndy Lutomirski }
13702a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
13712a2d1382SAndy Lutomirski 
13722a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq)
13732a2d1382SAndy Lutomirski {
1374e593bf97STiwei Bie 	return &to_vvq(vq)->split.vring;
13752a2d1382SAndy Lutomirski }
13762a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
137789062652SCornelia Huck 
1378c6fd4701SRusty Russell MODULE_LICENSE("GPL");
1379