xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 58625edf9e2515ed41dac2a24fa8004030a87b87)
10a8a69ddSRusty Russell /* Virtio ring implementation.
20a8a69ddSRusty Russell  *
30a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
40a8a69ddSRusty Russell  *
50a8a69ddSRusty Russell  *  This program is free software; you can redistribute it and/or modify
60a8a69ddSRusty Russell  *  it under the terms of the GNU General Public License as published by
70a8a69ddSRusty Russell  *  the Free Software Foundation; either version 2 of the License, or
80a8a69ddSRusty Russell  *  (at your option) any later version.
90a8a69ddSRusty Russell  *
100a8a69ddSRusty Russell  *  This program is distributed in the hope that it will be useful,
110a8a69ddSRusty Russell  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
120a8a69ddSRusty Russell  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
130a8a69ddSRusty Russell  *  GNU General Public License for more details.
140a8a69ddSRusty Russell  *
150a8a69ddSRusty Russell  *  You should have received a copy of the GNU General Public License
160a8a69ddSRusty Russell  *  along with this program; if not, write to the Free Software
170a8a69ddSRusty Russell  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
180a8a69ddSRusty Russell  */
190a8a69ddSRusty Russell #include <linux/virtio.h>
200a8a69ddSRusty Russell #include <linux/virtio_ring.h>
21e34f8725SRusty Russell #include <linux/virtio_config.h>
220a8a69ddSRusty Russell #include <linux/device.h>
235a0e3ad6STejun Heo #include <linux/slab.h>
24b5a2c4f1SPaul Gortmaker #include <linux/module.h>
25e93300b1SRusty Russell #include <linux/hrtimer.h>
266abb2dd9SJoel Stanley #include <linux/kmemleak.h>
27780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
2878fe3987SAndy Lutomirski #include <xen/xen.h>
290a8a69ddSRusty Russell 
300a8a69ddSRusty Russell #ifdef DEBUG
310a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
329499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
339499f5e7SRusty Russell 	do {							\
349499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
359499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
369499f5e7SRusty Russell 		BUG();						\
379499f5e7SRusty Russell 	} while (0)
38c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
393a35ce7dSRoel Kluin #define START_USE(_vq)						\
40c5f841f1SRusty Russell 	do {							\
41c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
429499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
439499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
44c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
45c5f841f1SRusty Russell 	} while (0)
463a35ce7dSRoel Kluin #define END_USE(_vq) \
4797a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
480a8a69ddSRusty Russell #else
499499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
509499f5e7SRusty Russell 	do {							\
519499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
529499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
539499f5e7SRusty Russell 		(_vq)->broken = true;				\
549499f5e7SRusty Russell 	} while (0)
550a8a69ddSRusty Russell #define START_USE(vq)
560a8a69ddSRusty Russell #define END_USE(vq)
570a8a69ddSRusty Russell #endif
580a8a69ddSRusty Russell 
59780bc790SAndy Lutomirski struct vring_desc_state {
60780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
61780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
62780bc790SAndy Lutomirski };
63780bc790SAndy Lutomirski 
6443b4f721SMichael S. Tsirkin struct vring_virtqueue {
650a8a69ddSRusty Russell 	struct virtqueue vq;
660a8a69ddSRusty Russell 
670a8a69ddSRusty Russell 	/* Actual memory layout for this queue */
680a8a69ddSRusty Russell 	struct vring vring;
690a8a69ddSRusty Russell 
707b21e34fSRusty Russell 	/* Can we use weak barriers? */
717b21e34fSRusty Russell 	bool weak_barriers;
727b21e34fSRusty Russell 
730a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
740a8a69ddSRusty Russell 	bool broken;
750a8a69ddSRusty Russell 
769fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
779fa29b9dSMark McLoughlin 	bool indirect;
789fa29b9dSMark McLoughlin 
79a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
80a5c262c5SMichael S. Tsirkin 	bool event;
81a5c262c5SMichael S. Tsirkin 
820a8a69ddSRusty Russell 	/* Head of free buffer list. */
830a8a69ddSRusty Russell 	unsigned int free_head;
840a8a69ddSRusty Russell 	/* Number we've added since last sync. */
850a8a69ddSRusty Russell 	unsigned int num_added;
860a8a69ddSRusty Russell 
870a8a69ddSRusty Russell 	/* Last used index we've seen. */
881bc4953eSAnthony Liguori 	u16 last_used_idx;
890a8a69ddSRusty Russell 
90f277ec42SVenkatesh Srinivas 	/* Last written value to avail->flags */
91f277ec42SVenkatesh Srinivas 	u16 avail_flags_shadow;
92f277ec42SVenkatesh Srinivas 
93f277ec42SVenkatesh Srinivas 	/* Last written value to avail->idx in guest byte order */
94f277ec42SVenkatesh Srinivas 	u16 avail_idx_shadow;
95f277ec42SVenkatesh Srinivas 
960a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
9746f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
980a8a69ddSRusty Russell 
992a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
1002a2d1382SAndy Lutomirski 	bool we_own_ring;
1012a2d1382SAndy Lutomirski 	size_t queue_size_in_bytes;
1022a2d1382SAndy Lutomirski 	dma_addr_t queue_dma_addr;
1032a2d1382SAndy Lutomirski 
1040a8a69ddSRusty Russell #ifdef DEBUG
1050a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
1060a8a69ddSRusty Russell 	unsigned int in_use;
107e93300b1SRusty Russell 
108e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
109e93300b1SRusty Russell 	bool last_add_time_valid;
110e93300b1SRusty Russell 	ktime_t last_add_time;
1110a8a69ddSRusty Russell #endif
1120a8a69ddSRusty Russell 
113780bc790SAndy Lutomirski 	/* Per-descriptor state. */
114780bc790SAndy Lutomirski 	struct vring_desc_state desc_state[];
1150a8a69ddSRusty Russell };
1160a8a69ddSRusty Russell 
1170a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
1180a8a69ddSRusty Russell 
119d26c96c8SAndy Lutomirski /*
1201a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
1211a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
1221a937693SMichael S. Tsirkin  *
1231a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
124d26c96c8SAndy Lutomirski  *
125d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
126d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
127d26c96c8SAndy Lutomirski  *
128d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
129d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
130d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
131d26c96c8SAndy Lutomirski  *
132d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
133d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
134d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
136d26c96c8SAndy Lutomirski  *
137d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
138d26c96c8SAndy Lutomirski  * API.
1391a937693SMichael S. Tsirkin  *
1401a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
1411a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
1421a937693SMichael S. Tsirkin  * unconditionally on data path.
143d26c96c8SAndy Lutomirski  */
144d26c96c8SAndy Lutomirski 
145d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
146d26c96c8SAndy Lutomirski {
1471a937693SMichael S. Tsirkin 	if (!virtio_has_iommu_quirk(vdev))
1481a937693SMichael S. Tsirkin 		return true;
1491a937693SMichael S. Tsirkin 
1501a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
15178fe3987SAndy Lutomirski 	/*
15278fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
15378fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
15478fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
15578fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
15678fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
15778fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
15878fe3987SAndy Lutomirski 	 */
15978fe3987SAndy Lutomirski 	if (xen_domain())
16078fe3987SAndy Lutomirski 		return true;
16178fe3987SAndy Lutomirski 
162d26c96c8SAndy Lutomirski 	return false;
163d26c96c8SAndy Lutomirski }
164d26c96c8SAndy Lutomirski 
165780bc790SAndy Lutomirski /*
166780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
167780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
168780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
169780bc790SAndy Lutomirski  */
170780bc790SAndy Lutomirski struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171780bc790SAndy Lutomirski {
172780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
173780bc790SAndy Lutomirski }
174780bc790SAndy Lutomirski 
175780bc790SAndy Lutomirski /* Map one sg entry. */
176780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177780bc790SAndy Lutomirski 				   struct scatterlist *sg,
178780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
179780bc790SAndy Lutomirski {
180780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
181780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
182780bc790SAndy Lutomirski 
183780bc790SAndy Lutomirski 	/*
184780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
185780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
186780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
187780bc790SAndy Lutomirski 	 */
188780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
189780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
190780bc790SAndy Lutomirski 			    direction);
191780bc790SAndy Lutomirski }
192780bc790SAndy Lutomirski 
193780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
195780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
196780bc790SAndy Lutomirski {
197780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
198780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
199780bc790SAndy Lutomirski 
200780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
201780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
202780bc790SAndy Lutomirski }
203780bc790SAndy Lutomirski 
204780bc790SAndy Lutomirski static void vring_unmap_one(const struct vring_virtqueue *vq,
205780bc790SAndy Lutomirski 			    struct vring_desc *desc)
206780bc790SAndy Lutomirski {
207780bc790SAndy Lutomirski 	u16 flags;
208780bc790SAndy Lutomirski 
209780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
210780bc790SAndy Lutomirski 		return;
211780bc790SAndy Lutomirski 
212780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213780bc790SAndy Lutomirski 
214780bc790SAndy Lutomirski 	if (flags & VRING_DESC_F_INDIRECT) {
215780bc790SAndy Lutomirski 		dma_unmap_single(vring_dma_dev(vq),
216780bc790SAndy Lutomirski 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217780bc790SAndy Lutomirski 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
218780bc790SAndy Lutomirski 				 (flags & VRING_DESC_F_WRITE) ?
219780bc790SAndy Lutomirski 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220780bc790SAndy Lutomirski 	} else {
221780bc790SAndy Lutomirski 		dma_unmap_page(vring_dma_dev(vq),
222780bc790SAndy Lutomirski 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
223780bc790SAndy Lutomirski 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
224780bc790SAndy Lutomirski 			       (flags & VRING_DESC_F_WRITE) ?
225780bc790SAndy Lutomirski 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
226780bc790SAndy Lutomirski 	}
227780bc790SAndy Lutomirski }
228780bc790SAndy Lutomirski 
229780bc790SAndy Lutomirski static int vring_mapping_error(const struct vring_virtqueue *vq,
230780bc790SAndy Lutomirski 			       dma_addr_t addr)
231780bc790SAndy Lutomirski {
232780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
233780bc790SAndy Lutomirski 		return 0;
234780bc790SAndy Lutomirski 
235780bc790SAndy Lutomirski 	return dma_mapping_error(vring_dma_dev(vq), addr);
236780bc790SAndy Lutomirski }
237780bc790SAndy Lutomirski 
23800e6f3d9SMichael S. Tsirkin static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
23900e6f3d9SMichael S. Tsirkin 					 unsigned int total_sg, gfp_t gfp)
2409fa29b9dSMark McLoughlin {
2419fa29b9dSMark McLoughlin 	struct vring_desc *desc;
242b25bd251SRusty Russell 	unsigned int i;
2439fa29b9dSMark McLoughlin 
244b92b1b89SWill Deacon 	/*
245b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
246b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
247b92b1b89SWill Deacon 	 * virtqueue.
248b92b1b89SWill Deacon 	 */
24982107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
250b92b1b89SWill Deacon 
25113816c76SRusty Russell 	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
2529fa29b9dSMark McLoughlin 	if (!desc)
253b25bd251SRusty Russell 		return NULL;
2549fa29b9dSMark McLoughlin 
255b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
25600e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
257b25bd251SRusty Russell 	return desc;
2589fa29b9dSMark McLoughlin }
2599fa29b9dSMark McLoughlin 
26013816c76SRusty Russell static inline int virtqueue_add(struct virtqueue *_vq,
26113816c76SRusty Russell 				struct scatterlist *sgs[],
262eeebf9b1SRusty Russell 				unsigned int total_sg,
26313816c76SRusty Russell 				unsigned int out_sgs,
26413816c76SRusty Russell 				unsigned int in_sgs,
265bbd603efSMichael S. Tsirkin 				void *data,
266bbd603efSMichael S. Tsirkin 				gfp_t gfp)
2670a8a69ddSRusty Russell {
2680a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
26913816c76SRusty Russell 	struct scatterlist *sg;
270b25bd251SRusty Russell 	struct vring_desc *desc;
271780bc790SAndy Lutomirski 	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
2721fe9b6feSMichael S. Tsirkin 	int head;
273b25bd251SRusty Russell 	bool indirect;
2740a8a69ddSRusty Russell 
2759fa29b9dSMark McLoughlin 	START_USE(vq);
2769fa29b9dSMark McLoughlin 
2770a8a69ddSRusty Russell 	BUG_ON(data == NULL);
2789fa29b9dSMark McLoughlin 
27970670444SRusty Russell 	if (unlikely(vq->broken)) {
28070670444SRusty Russell 		END_USE(vq);
28170670444SRusty Russell 		return -EIO;
28270670444SRusty Russell 	}
28370670444SRusty Russell 
284e93300b1SRusty Russell #ifdef DEBUG
285e93300b1SRusty Russell 	{
286e93300b1SRusty Russell 		ktime_t now = ktime_get();
287e93300b1SRusty Russell 
288e93300b1SRusty Russell 		/* No kick or get, with .1 second between?  Warn. */
289e93300b1SRusty Russell 		if (vq->last_add_time_valid)
290e93300b1SRusty Russell 			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
291e93300b1SRusty Russell 					    > 100);
292e93300b1SRusty Russell 		vq->last_add_time = now;
293e93300b1SRusty Russell 		vq->last_add_time_valid = true;
294e93300b1SRusty Russell 	}
295e93300b1SRusty Russell #endif
296e93300b1SRusty Russell 
29713816c76SRusty Russell 	BUG_ON(total_sg > vq->vring.num);
29813816c76SRusty Russell 	BUG_ON(total_sg == 0);
2990a8a69ddSRusty Russell 
300b25bd251SRusty Russell 	head = vq->free_head;
301b25bd251SRusty Russell 
302b25bd251SRusty Russell 	/* If the host supports indirect descriptor tables, and we have multiple
303b25bd251SRusty Russell 	 * buffers, then go indirect. FIXME: tune this threshold */
304b25bd251SRusty Russell 	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
30500e6f3d9SMichael S. Tsirkin 		desc = alloc_indirect(_vq, total_sg, gfp);
306b25bd251SRusty Russell 	else
307b25bd251SRusty Russell 		desc = NULL;
308b25bd251SRusty Russell 
309b25bd251SRusty Russell 	if (desc) {
310b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
311780bc790SAndy Lutomirski 		indirect = true;
312b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
313b25bd251SRusty Russell 		i = 0;
314b25bd251SRusty Russell 		descs_used = 1;
315b25bd251SRusty Russell 	} else {
316780bc790SAndy Lutomirski 		indirect = false;
317b25bd251SRusty Russell 		desc = vq->vring.desc;
318b25bd251SRusty Russell 		i = head;
319b25bd251SRusty Russell 		descs_used = total_sg;
320b25bd251SRusty Russell 	}
321b25bd251SRusty Russell 
322b25bd251SRusty Russell 	if (vq->vq.num_free < descs_used) {
3230a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
324b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
32544653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
32644653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
32744653eaeSRusty Russell 		 * host should service the ring ASAP. */
32813816c76SRusty Russell 		if (out_sgs)
329426e3e0aSRusty Russell 			vq->notify(&vq->vq);
330*58625edfSWei Yongjun 		if (indirect)
331*58625edfSWei Yongjun 			kfree(desc);
3320a8a69ddSRusty Russell 		END_USE(vq);
3330a8a69ddSRusty Russell 		return -ENOSPC;
3340a8a69ddSRusty Russell 	}
3350a8a69ddSRusty Russell 
33613816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
337eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
338780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
339780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
340780bc790SAndy Lutomirski 				goto unmap_release;
341780bc790SAndy Lutomirski 
34200e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
343780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
34400e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3450a8a69ddSRusty Russell 			prev = i;
34600e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
3470a8a69ddSRusty Russell 		}
34813816c76SRusty Russell 	}
34913816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
350eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
351780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
352780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
353780bc790SAndy Lutomirski 				goto unmap_release;
354780bc790SAndy Lutomirski 
35500e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
356780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
35700e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3580a8a69ddSRusty Russell 			prev = i;
35900e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
36013816c76SRusty Russell 		}
3610a8a69ddSRusty Russell 	}
3620a8a69ddSRusty Russell 	/* Last one doesn't continue. */
36300e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
3640a8a69ddSRusty Russell 
365780bc790SAndy Lutomirski 	if (indirect) {
366780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
367780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
368780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
369780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
370780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
371780bc790SAndy Lutomirski 			goto unmap_release;
372780bc790SAndy Lutomirski 
373780bc790SAndy Lutomirski 		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
374780bc790SAndy Lutomirski 		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
375780bc790SAndy Lutomirski 
376780bc790SAndy Lutomirski 		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
377780bc790SAndy Lutomirski 	}
378780bc790SAndy Lutomirski 
379780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
380780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
381780bc790SAndy Lutomirski 
3820a8a69ddSRusty Russell 	/* Update free pointer */
383b25bd251SRusty Russell 	if (indirect)
38400e6f3d9SMichael S. Tsirkin 		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
385b25bd251SRusty Russell 	else
3860a8a69ddSRusty Russell 		vq->free_head = i;
3870a8a69ddSRusty Russell 
388780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
389780bc790SAndy Lutomirski 	vq->desc_state[head].data = data;
390780bc790SAndy Lutomirski 	if (indirect)
391780bc790SAndy Lutomirski 		vq->desc_state[head].indir_desc = desc;
3920a8a69ddSRusty Russell 
3930a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
3943b720b8cSRusty Russell 	 * do sync). */
395f277ec42SVenkatesh Srinivas 	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
39600e6f3d9SMichael S. Tsirkin 	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
3970a8a69ddSRusty Russell 
398ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
399ee7cd898SRusty Russell 	 * new available array entries. */
400a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
401f277ec42SVenkatesh Srinivas 	vq->avail_idx_shadow++;
402f277ec42SVenkatesh Srinivas 	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
403ee7cd898SRusty Russell 	vq->num_added++;
404ee7cd898SRusty Russell 
4055e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
4065e05bf58STetsuo Handa 	END_USE(vq);
4075e05bf58STetsuo Handa 
408ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
409ee7cd898SRusty Russell 	 * just in case. */
410ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
411ee7cd898SRusty Russell 		virtqueue_kick(_vq);
412ee7cd898SRusty Russell 
41398e8c6bcSRusty Russell 	return 0;
414780bc790SAndy Lutomirski 
415780bc790SAndy Lutomirski unmap_release:
416780bc790SAndy Lutomirski 	err_idx = i;
417780bc790SAndy Lutomirski 	i = head;
418780bc790SAndy Lutomirski 
419780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
420780bc790SAndy Lutomirski 		if (i == err_idx)
421780bc790SAndy Lutomirski 			break;
422780bc790SAndy Lutomirski 		vring_unmap_one(vq, &desc[i]);
423780bc790SAndy Lutomirski 		i = vq->vring.desc[i].next;
424780bc790SAndy Lutomirski 	}
425780bc790SAndy Lutomirski 
426780bc790SAndy Lutomirski 	vq->vq.num_free += total_sg;
427780bc790SAndy Lutomirski 
428780bc790SAndy Lutomirski 	if (indirect)
429780bc790SAndy Lutomirski 		kfree(desc);
430780bc790SAndy Lutomirski 
431780bc790SAndy Lutomirski 	return -EIO;
4320a8a69ddSRusty Russell }
43313816c76SRusty Russell 
43413816c76SRusty Russell /**
43513816c76SRusty Russell  * virtqueue_add_sgs - expose buffers to other end
43613816c76SRusty Russell  * @vq: the struct virtqueue we're talking about.
43713816c76SRusty Russell  * @sgs: array of terminated scatterlists.
43813816c76SRusty Russell  * @out_num: the number of scatterlists readable by other side
43913816c76SRusty Russell  * @in_num: the number of scatterlists which are writable (after readable ones)
44013816c76SRusty Russell  * @data: the token identifying the buffer.
44113816c76SRusty Russell  * @gfp: how to do memory allocations (if necessary).
44213816c76SRusty Russell  *
44313816c76SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
44413816c76SRusty Russell  * at the same time (except where noted).
44513816c76SRusty Russell  *
44670670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
44713816c76SRusty Russell  */
44813816c76SRusty Russell int virtqueue_add_sgs(struct virtqueue *_vq,
44913816c76SRusty Russell 		      struct scatterlist *sgs[],
45013816c76SRusty Russell 		      unsigned int out_sgs,
45113816c76SRusty Russell 		      unsigned int in_sgs,
45213816c76SRusty Russell 		      void *data,
45313816c76SRusty Russell 		      gfp_t gfp)
45413816c76SRusty Russell {
455eeebf9b1SRusty Russell 	unsigned int i, total_sg = 0;
45613816c76SRusty Russell 
45713816c76SRusty Russell 	/* Count them first. */
458eeebf9b1SRusty Russell 	for (i = 0; i < out_sgs + in_sgs; i++) {
45913816c76SRusty Russell 		struct scatterlist *sg;
46013816c76SRusty Russell 		for (sg = sgs[i]; sg; sg = sg_next(sg))
461eeebf9b1SRusty Russell 			total_sg++;
46213816c76SRusty Russell 	}
463eeebf9b1SRusty Russell 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
46413816c76SRusty Russell }
46513816c76SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
46613816c76SRusty Russell 
46713816c76SRusty Russell /**
468282edb36SRusty Russell  * virtqueue_add_outbuf - expose output buffers to other end
469282edb36SRusty Russell  * @vq: the struct virtqueue we're talking about.
470eeebf9b1SRusty Russell  * @sg: scatterlist (must be well-formed and terminated!)
471eeebf9b1SRusty Russell  * @num: the number of entries in @sg readable by other side
472282edb36SRusty Russell  * @data: the token identifying the buffer.
473282edb36SRusty Russell  * @gfp: how to do memory allocations (if necessary).
474282edb36SRusty Russell  *
475282edb36SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
476282edb36SRusty Russell  * at the same time (except where noted).
477282edb36SRusty Russell  *
47870670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
479282edb36SRusty Russell  */
480282edb36SRusty Russell int virtqueue_add_outbuf(struct virtqueue *vq,
481eeebf9b1SRusty Russell 			 struct scatterlist *sg, unsigned int num,
482282edb36SRusty Russell 			 void *data,
483282edb36SRusty Russell 			 gfp_t gfp)
484282edb36SRusty Russell {
485eeebf9b1SRusty Russell 	return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
486282edb36SRusty Russell }
487282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
488282edb36SRusty Russell 
489282edb36SRusty Russell /**
490282edb36SRusty Russell  * virtqueue_add_inbuf - expose input buffers to other end
491282edb36SRusty Russell  * @vq: the struct virtqueue we're talking about.
492eeebf9b1SRusty Russell  * @sg: scatterlist (must be well-formed and terminated!)
493eeebf9b1SRusty Russell  * @num: the number of entries in @sg writable by other side
494282edb36SRusty Russell  * @data: the token identifying the buffer.
495282edb36SRusty Russell  * @gfp: how to do memory allocations (if necessary).
496282edb36SRusty Russell  *
497282edb36SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
498282edb36SRusty Russell  * at the same time (except where noted).
499282edb36SRusty Russell  *
50070670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
501282edb36SRusty Russell  */
502282edb36SRusty Russell int virtqueue_add_inbuf(struct virtqueue *vq,
503eeebf9b1SRusty Russell 			struct scatterlist *sg, unsigned int num,
504282edb36SRusty Russell 			void *data,
505282edb36SRusty Russell 			gfp_t gfp)
506282edb36SRusty Russell {
507eeebf9b1SRusty Russell 	return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
508282edb36SRusty Russell }
509282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
510282edb36SRusty Russell 
511282edb36SRusty Russell /**
51241f0377fSRusty Russell  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
5135dfc1762SRusty Russell  * @vq: the struct virtqueue
5145dfc1762SRusty Russell  *
51541f0377fSRusty Russell  * Instead of virtqueue_kick(), you can do:
51641f0377fSRusty Russell  *	if (virtqueue_kick_prepare(vq))
51741f0377fSRusty Russell  *		virtqueue_notify(vq);
5185dfc1762SRusty Russell  *
51941f0377fSRusty Russell  * This is sometimes useful because the virtqueue_kick_prepare() needs
52041f0377fSRusty Russell  * to be serialized, but the actual virtqueue_notify() call does not.
5215dfc1762SRusty Russell  */
52241f0377fSRusty Russell bool virtqueue_kick_prepare(struct virtqueue *_vq)
5230a8a69ddSRusty Russell {
5240a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
525a5c262c5SMichael S. Tsirkin 	u16 new, old;
52641f0377fSRusty Russell 	bool needs_kick;
52741f0377fSRusty Russell 
5280a8a69ddSRusty Russell 	START_USE(vq);
529a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
530a72caae2SJason Wang 	 * event. */
531a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
5320a8a69ddSRusty Russell 
533f277ec42SVenkatesh Srinivas 	old = vq->avail_idx_shadow - vq->num_added;
534f277ec42SVenkatesh Srinivas 	new = vq->avail_idx_shadow;
5350a8a69ddSRusty Russell 	vq->num_added = 0;
5360a8a69ddSRusty Russell 
537e93300b1SRusty Russell #ifdef DEBUG
538e93300b1SRusty Russell 	if (vq->last_add_time_valid) {
539e93300b1SRusty Russell 		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
540e93300b1SRusty Russell 					      vq->last_add_time)) > 100);
541e93300b1SRusty Russell 	}
542e93300b1SRusty Russell 	vq->last_add_time_valid = false;
543e93300b1SRusty Russell #endif
544e93300b1SRusty Russell 
54541f0377fSRusty Russell 	if (vq->event) {
54600e6f3d9SMichael S. Tsirkin 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
54741f0377fSRusty Russell 					      new, old);
54841f0377fSRusty Russell 	} else {
54900e6f3d9SMichael S. Tsirkin 		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
55041f0377fSRusty Russell 	}
5510a8a69ddSRusty Russell 	END_USE(vq);
55241f0377fSRusty Russell 	return needs_kick;
55341f0377fSRusty Russell }
55441f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
55541f0377fSRusty Russell 
55641f0377fSRusty Russell /**
55741f0377fSRusty Russell  * virtqueue_notify - second half of split virtqueue_kick call.
55841f0377fSRusty Russell  * @vq: the struct virtqueue
55941f0377fSRusty Russell  *
56041f0377fSRusty Russell  * This does not need to be serialized.
5615b1bf7cbSHeinz Graalfs  *
5625b1bf7cbSHeinz Graalfs  * Returns false if host notify failed or queue is broken, otherwise true.
56341f0377fSRusty Russell  */
5645b1bf7cbSHeinz Graalfs bool virtqueue_notify(struct virtqueue *_vq)
56541f0377fSRusty Russell {
56641f0377fSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
56741f0377fSRusty Russell 
5685b1bf7cbSHeinz Graalfs 	if (unlikely(vq->broken))
5695b1bf7cbSHeinz Graalfs 		return false;
5705b1bf7cbSHeinz Graalfs 
57141f0377fSRusty Russell 	/* Prod other side to tell it about changes. */
5722342d6a6SHeinz Graalfs 	if (!vq->notify(_vq)) {
5735b1bf7cbSHeinz Graalfs 		vq->broken = true;
5745b1bf7cbSHeinz Graalfs 		return false;
5755b1bf7cbSHeinz Graalfs 	}
5765b1bf7cbSHeinz Graalfs 	return true;
57741f0377fSRusty Russell }
57841f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_notify);
57941f0377fSRusty Russell 
58041f0377fSRusty Russell /**
58141f0377fSRusty Russell  * virtqueue_kick - update after add_buf
58241f0377fSRusty Russell  * @vq: the struct virtqueue
58341f0377fSRusty Russell  *
584b3087e48SRusty Russell  * After one or more virtqueue_add_* calls, invoke this to kick
58541f0377fSRusty Russell  * the other side.
58641f0377fSRusty Russell  *
58741f0377fSRusty Russell  * Caller must ensure we don't call this with other virtqueue
58841f0377fSRusty Russell  * operations at the same time (except where noted).
5895b1bf7cbSHeinz Graalfs  *
5905b1bf7cbSHeinz Graalfs  * Returns false if kick failed, otherwise true.
59141f0377fSRusty Russell  */
5925b1bf7cbSHeinz Graalfs bool virtqueue_kick(struct virtqueue *vq)
59341f0377fSRusty Russell {
59441f0377fSRusty Russell 	if (virtqueue_kick_prepare(vq))
5955b1bf7cbSHeinz Graalfs 		return virtqueue_notify(vq);
5965b1bf7cbSHeinz Graalfs 	return true;
5970a8a69ddSRusty Russell }
5987c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_kick);
5990a8a69ddSRusty Russell 
6000a8a69ddSRusty Russell static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
6010a8a69ddSRusty Russell {
602780bc790SAndy Lutomirski 	unsigned int i, j;
603780bc790SAndy Lutomirski 	u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
6040a8a69ddSRusty Russell 
6050a8a69ddSRusty Russell 	/* Clear data ptr. */
606780bc790SAndy Lutomirski 	vq->desc_state[head].data = NULL;
6070a8a69ddSRusty Russell 
608780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
6090a8a69ddSRusty Russell 	i = head;
6109fa29b9dSMark McLoughlin 
611780bc790SAndy Lutomirski 	while (vq->vring.desc[i].flags & nextflag) {
612780bc790SAndy Lutomirski 		vring_unmap_one(vq, &vq->vring.desc[i]);
61300e6f3d9SMichael S. Tsirkin 		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
61406ca287dSRusty Russell 		vq->vq.num_free++;
6150a8a69ddSRusty Russell 	}
6160a8a69ddSRusty Russell 
617780bc790SAndy Lutomirski 	vring_unmap_one(vq, &vq->vring.desc[i]);
61800e6f3d9SMichael S. Tsirkin 	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
6190a8a69ddSRusty Russell 	vq->free_head = head;
620780bc790SAndy Lutomirski 
6210a8a69ddSRusty Russell 	/* Plus final descriptor */
62206ca287dSRusty Russell 	vq->vq.num_free++;
623780bc790SAndy Lutomirski 
624780bc790SAndy Lutomirski 	/* Free the indirect table, if any, now that it's unmapped. */
625780bc790SAndy Lutomirski 	if (vq->desc_state[head].indir_desc) {
626780bc790SAndy Lutomirski 		struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
627780bc790SAndy Lutomirski 		u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
628780bc790SAndy Lutomirski 
629780bc790SAndy Lutomirski 		BUG_ON(!(vq->vring.desc[head].flags &
630780bc790SAndy Lutomirski 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
631780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
632780bc790SAndy Lutomirski 
633780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
634780bc790SAndy Lutomirski 			vring_unmap_one(vq, &indir_desc[j]);
635780bc790SAndy Lutomirski 
636780bc790SAndy Lutomirski 		kfree(vq->desc_state[head].indir_desc);
637780bc790SAndy Lutomirski 		vq->desc_state[head].indir_desc = NULL;
638780bc790SAndy Lutomirski 	}
6390a8a69ddSRusty Russell }
6400a8a69ddSRusty Russell 
6410a8a69ddSRusty Russell static inline bool more_used(const struct vring_virtqueue *vq)
6420a8a69ddSRusty Russell {
64300e6f3d9SMichael S. Tsirkin 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
6440a8a69ddSRusty Russell }
6450a8a69ddSRusty Russell 
6465dfc1762SRusty Russell /**
6475dfc1762SRusty Russell  * virtqueue_get_buf - get the next used buffer
6485dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
6495dfc1762SRusty Russell  * @len: the length written into the buffer
6505dfc1762SRusty Russell  *
6515dfc1762SRusty Russell  * If the driver wrote data into the buffer, @len will be set to the
6525dfc1762SRusty Russell  * amount written.  This means you don't need to clear the buffer
6535dfc1762SRusty Russell  * beforehand to ensure there's no data leakage in the case of short
6545dfc1762SRusty Russell  * writes.
6555dfc1762SRusty Russell  *
6565dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
6575dfc1762SRusty Russell  * operations at the same time (except where noted).
6585dfc1762SRusty Russell  *
6595dfc1762SRusty Russell  * Returns NULL if there are no used buffers, or the "data" token
660b3087e48SRusty Russell  * handed to virtqueue_add_*().
6615dfc1762SRusty Russell  */
6627c5e9ed0SMichael S. Tsirkin void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
6630a8a69ddSRusty Russell {
6640a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
6650a8a69ddSRusty Russell 	void *ret;
6660a8a69ddSRusty Russell 	unsigned int i;
6673b720b8cSRusty Russell 	u16 last_used;
6680a8a69ddSRusty Russell 
6690a8a69ddSRusty Russell 	START_USE(vq);
6700a8a69ddSRusty Russell 
6715ef82752SRusty Russell 	if (unlikely(vq->broken)) {
6725ef82752SRusty Russell 		END_USE(vq);
6735ef82752SRusty Russell 		return NULL;
6745ef82752SRusty Russell 	}
6755ef82752SRusty Russell 
6760a8a69ddSRusty Russell 	if (!more_used(vq)) {
6770a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
6780a8a69ddSRusty Russell 		END_USE(vq);
6790a8a69ddSRusty Russell 		return NULL;
6800a8a69ddSRusty Russell 	}
6810a8a69ddSRusty Russell 
6822d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
683a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
6842d61ba95SMichael S. Tsirkin 
6853b720b8cSRusty Russell 	last_used = (vq->last_used_idx & (vq->vring.num - 1));
68600e6f3d9SMichael S. Tsirkin 	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
68700e6f3d9SMichael S. Tsirkin 	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
6880a8a69ddSRusty Russell 
6890a8a69ddSRusty Russell 	if (unlikely(i >= vq->vring.num)) {
6900a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
6910a8a69ddSRusty Russell 		return NULL;
6920a8a69ddSRusty Russell 	}
693780bc790SAndy Lutomirski 	if (unlikely(!vq->desc_state[i].data)) {
6940a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
6950a8a69ddSRusty Russell 		return NULL;
6960a8a69ddSRusty Russell 	}
6970a8a69ddSRusty Russell 
6980a8a69ddSRusty Russell 	/* detach_buf clears data, so grab it now. */
699780bc790SAndy Lutomirski 	ret = vq->desc_state[i].data;
7000a8a69ddSRusty Russell 	detach_buf(vq, i);
7010a8a69ddSRusty Russell 	vq->last_used_idx++;
702a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
703a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
704a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
705788e5b3aSMichael S. Tsirkin 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
706788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
707788e5b3aSMichael S. Tsirkin 				&vring_used_event(&vq->vring),
708788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
709a5c262c5SMichael S. Tsirkin 
710e93300b1SRusty Russell #ifdef DEBUG
711e93300b1SRusty Russell 	vq->last_add_time_valid = false;
712e93300b1SRusty Russell #endif
713e93300b1SRusty Russell 
7140a8a69ddSRusty Russell 	END_USE(vq);
7150a8a69ddSRusty Russell 	return ret;
7160a8a69ddSRusty Russell }
7177c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_get_buf);
7180a8a69ddSRusty Russell 
7195dfc1762SRusty Russell /**
7205dfc1762SRusty Russell  * virtqueue_disable_cb - disable callbacks
7215dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
7225dfc1762SRusty Russell  *
7235dfc1762SRusty Russell  * Note that this is not necessarily synchronous, hence unreliable and only
7245dfc1762SRusty Russell  * useful as an optimization.
7255dfc1762SRusty Russell  *
7265dfc1762SRusty Russell  * Unlike other operations, this need not be serialized.
7275dfc1762SRusty Russell  */
7287c5e9ed0SMichael S. Tsirkin void virtqueue_disable_cb(struct virtqueue *_vq)
72918445c4dSRusty Russell {
73018445c4dSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
73118445c4dSRusty Russell 
732f277ec42SVenkatesh Srinivas 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
733f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
734f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
735f277ec42SVenkatesh Srinivas 	}
736f277ec42SVenkatesh Srinivas 
73718445c4dSRusty Russell }
7387c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
73918445c4dSRusty Russell 
7405dfc1762SRusty Russell /**
741cc229884SMichael S. Tsirkin  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
742cc229884SMichael S. Tsirkin  * @vq: the struct virtqueue we're talking about.
743cc229884SMichael S. Tsirkin  *
744cc229884SMichael S. Tsirkin  * This re-enables callbacks; it returns current queue state
745cc229884SMichael S. Tsirkin  * in an opaque unsigned value. This value should be later tested by
746cc229884SMichael S. Tsirkin  * virtqueue_poll, to detect a possible race between the driver checking for
747cc229884SMichael S. Tsirkin  * more work, and enabling callbacks.
748cc229884SMichael S. Tsirkin  *
749cc229884SMichael S. Tsirkin  * Caller must ensure we don't call this with other virtqueue
750cc229884SMichael S. Tsirkin  * operations at the same time (except where noted).
751cc229884SMichael S. Tsirkin  */
752cc229884SMichael S. Tsirkin unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
753cc229884SMichael S. Tsirkin {
754cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
755cc229884SMichael S. Tsirkin 	u16 last_used_idx;
756cc229884SMichael S. Tsirkin 
757cc229884SMichael S. Tsirkin 	START_USE(vq);
758cc229884SMichael S. Tsirkin 
759cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
760cc229884SMichael S. Tsirkin 	 * more to do. */
761cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
762cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
763cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
764f277ec42SVenkatesh Srinivas 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
765f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
766f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
767f277ec42SVenkatesh Srinivas 	}
76800e6f3d9SMichael S. Tsirkin 	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
769cc229884SMichael S. Tsirkin 	END_USE(vq);
770cc229884SMichael S. Tsirkin 	return last_used_idx;
771cc229884SMichael S. Tsirkin }
772cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
773cc229884SMichael S. Tsirkin 
774cc229884SMichael S. Tsirkin /**
775cc229884SMichael S. Tsirkin  * virtqueue_poll - query pending used buffers
776cc229884SMichael S. Tsirkin  * @vq: the struct virtqueue we're talking about.
777cc229884SMichael S. Tsirkin  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
778cc229884SMichael S. Tsirkin  *
779cc229884SMichael S. Tsirkin  * Returns "true" if there are pending used buffers in the queue.
780cc229884SMichael S. Tsirkin  *
781cc229884SMichael S. Tsirkin  * This does not need to be serialized.
782cc229884SMichael S. Tsirkin  */
783cc229884SMichael S. Tsirkin bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
784cc229884SMichael S. Tsirkin {
785cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
786cc229884SMichael S. Tsirkin 
787cc229884SMichael S. Tsirkin 	virtio_mb(vq->weak_barriers);
78800e6f3d9SMichael S. Tsirkin 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
789cc229884SMichael S. Tsirkin }
790cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_poll);
791cc229884SMichael S. Tsirkin 
792cc229884SMichael S. Tsirkin /**
7935dfc1762SRusty Russell  * virtqueue_enable_cb - restart callbacks after disable_cb.
7945dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
7955dfc1762SRusty Russell  *
7965dfc1762SRusty Russell  * This re-enables callbacks; it returns "false" if there are pending
7975dfc1762SRusty Russell  * buffers in the queue, to detect a possible race between the driver
7985dfc1762SRusty Russell  * checking for more work, and enabling callbacks.
7995dfc1762SRusty Russell  *
8005dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
8015dfc1762SRusty Russell  * operations at the same time (except where noted).
8025dfc1762SRusty Russell  */
8037c5e9ed0SMichael S. Tsirkin bool virtqueue_enable_cb(struct virtqueue *_vq)
8040a8a69ddSRusty Russell {
805cc229884SMichael S. Tsirkin 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
806cc229884SMichael S. Tsirkin 	return !virtqueue_poll(_vq, last_used_idx);
8070a8a69ddSRusty Russell }
8087c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
8090a8a69ddSRusty Russell 
8105dfc1762SRusty Russell /**
8115dfc1762SRusty Russell  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
8125dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
8135dfc1762SRusty Russell  *
8145dfc1762SRusty Russell  * This re-enables callbacks but hints to the other side to delay
8155dfc1762SRusty Russell  * interrupts until most of the available buffers have been processed;
8165dfc1762SRusty Russell  * it returns "false" if there are many pending buffers in the queue,
8175dfc1762SRusty Russell  * to detect a possible race between the driver checking for more work,
8185dfc1762SRusty Russell  * and enabling callbacks.
8195dfc1762SRusty Russell  *
8205dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
8215dfc1762SRusty Russell  * operations at the same time (except where noted).
8225dfc1762SRusty Russell  */
8237ab358c2SMichael S. Tsirkin bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
8247ab358c2SMichael S. Tsirkin {
8257ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
8267ab358c2SMichael S. Tsirkin 	u16 bufs;
8277ab358c2SMichael S. Tsirkin 
8287ab358c2SMichael S. Tsirkin 	START_USE(vq);
8297ab358c2SMichael S. Tsirkin 
8307ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
8317ab358c2SMichael S. Tsirkin 	 * more to do. */
8327ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
8337ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
8347ab358c2SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
835f277ec42SVenkatesh Srinivas 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
836f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
837f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
838f277ec42SVenkatesh Srinivas 	}
8397ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
840f277ec42SVenkatesh Srinivas 	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
841788e5b3aSMichael S. Tsirkin 
842788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
843788e5b3aSMichael S. Tsirkin 			&vring_used_event(&vq->vring),
844788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
845788e5b3aSMichael S. Tsirkin 
84600e6f3d9SMichael S. Tsirkin 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
8477ab358c2SMichael S. Tsirkin 		END_USE(vq);
8487ab358c2SMichael S. Tsirkin 		return false;
8497ab358c2SMichael S. Tsirkin 	}
8507ab358c2SMichael S. Tsirkin 
8517ab358c2SMichael S. Tsirkin 	END_USE(vq);
8527ab358c2SMichael S. Tsirkin 	return true;
8537ab358c2SMichael S. Tsirkin }
8547ab358c2SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
8557ab358c2SMichael S. Tsirkin 
8565dfc1762SRusty Russell /**
8575dfc1762SRusty Russell  * virtqueue_detach_unused_buf - detach first unused buffer
8585dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
8595dfc1762SRusty Russell  *
860b3087e48SRusty Russell  * Returns NULL or the "data" token handed to virtqueue_add_*().
8615dfc1762SRusty Russell  * This is not valid on an active queue; it is useful only for device
8625dfc1762SRusty Russell  * shutdown.
8635dfc1762SRusty Russell  */
8647c5e9ed0SMichael S. Tsirkin void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
865c021eac4SShirley Ma {
866c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
867c021eac4SShirley Ma 	unsigned int i;
868c021eac4SShirley Ma 	void *buf;
869c021eac4SShirley Ma 
870c021eac4SShirley Ma 	START_USE(vq);
871c021eac4SShirley Ma 
872c021eac4SShirley Ma 	for (i = 0; i < vq->vring.num; i++) {
873780bc790SAndy Lutomirski 		if (!vq->desc_state[i].data)
874c021eac4SShirley Ma 			continue;
875c021eac4SShirley Ma 		/* detach_buf clears data, so grab it now. */
876780bc790SAndy Lutomirski 		buf = vq->desc_state[i].data;
877c021eac4SShirley Ma 		detach_buf(vq, i);
878f277ec42SVenkatesh Srinivas 		vq->avail_idx_shadow--;
879f277ec42SVenkatesh Srinivas 		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
880c021eac4SShirley Ma 		END_USE(vq);
881c021eac4SShirley Ma 		return buf;
882c021eac4SShirley Ma 	}
883c021eac4SShirley Ma 	/* That should have freed everything. */
88406ca287dSRusty Russell 	BUG_ON(vq->vq.num_free != vq->vring.num);
885c021eac4SShirley Ma 
886c021eac4SShirley Ma 	END_USE(vq);
887c021eac4SShirley Ma 	return NULL;
888c021eac4SShirley Ma }
8897c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
890c021eac4SShirley Ma 
8910a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
8920a8a69ddSRusty Russell {
8930a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
8940a8a69ddSRusty Russell 
8950a8a69ddSRusty Russell 	if (!more_used(vq)) {
8960a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
8970a8a69ddSRusty Russell 		return IRQ_NONE;
8980a8a69ddSRusty Russell 	}
8990a8a69ddSRusty Russell 
9000a8a69ddSRusty Russell 	if (unlikely(vq->broken))
9010a8a69ddSRusty Russell 		return IRQ_HANDLED;
9020a8a69ddSRusty Russell 
9030a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
90418445c4dSRusty Russell 	if (vq->vq.callback)
90518445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
9060a8a69ddSRusty Russell 
9070a8a69ddSRusty Russell 	return IRQ_HANDLED;
9080a8a69ddSRusty Russell }
909c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
9100a8a69ddSRusty Russell 
9112a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index,
9122a2d1382SAndy Lutomirski 					struct vring vring,
9130a8a69ddSRusty Russell 					struct virtio_device *vdev,
9147b21e34fSRusty Russell 					bool weak_barriers,
91546f9c2b9SHeinz Graalfs 					bool (*notify)(struct virtqueue *),
9169499f5e7SRusty Russell 					void (*callback)(struct virtqueue *),
9179499f5e7SRusty Russell 					const char *name)
9180a8a69ddSRusty Russell {
9190a8a69ddSRusty Russell 	unsigned int i;
9202a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
9210a8a69ddSRusty Russell 
9222a2d1382SAndy Lutomirski 	vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
923780bc790SAndy Lutomirski 		     GFP_KERNEL);
9240a8a69ddSRusty Russell 	if (!vq)
9250a8a69ddSRusty Russell 		return NULL;
9260a8a69ddSRusty Russell 
9272a2d1382SAndy Lutomirski 	vq->vring = vring;
9280a8a69ddSRusty Russell 	vq->vq.callback = callback;
9290a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
9309499f5e7SRusty Russell 	vq->vq.name = name;
9312a2d1382SAndy Lutomirski 	vq->vq.num_free = vring.num;
93206ca287dSRusty Russell 	vq->vq.index = index;
9332a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
9342a2d1382SAndy Lutomirski 	vq->queue_dma_addr = 0;
9352a2d1382SAndy Lutomirski 	vq->queue_size_in_bytes = 0;
9360a8a69ddSRusty Russell 	vq->notify = notify;
9377b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
9380a8a69ddSRusty Russell 	vq->broken = false;
9390a8a69ddSRusty Russell 	vq->last_used_idx = 0;
940f277ec42SVenkatesh Srinivas 	vq->avail_flags_shadow = 0;
941f277ec42SVenkatesh Srinivas 	vq->avail_idx_shadow = 0;
9420a8a69ddSRusty Russell 	vq->num_added = 0;
9439499f5e7SRusty Russell 	list_add_tail(&vq->vq.list, &vdev->vqs);
9440a8a69ddSRusty Russell #ifdef DEBUG
9450a8a69ddSRusty Russell 	vq->in_use = false;
946e93300b1SRusty Russell 	vq->last_add_time_valid = false;
9470a8a69ddSRusty Russell #endif
9480a8a69ddSRusty Russell 
9499fa29b9dSMark McLoughlin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
950a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9519fa29b9dSMark McLoughlin 
9520a8a69ddSRusty Russell 	/* No callback?  Tell other side not to bother us. */
953f277ec42SVenkatesh Srinivas 	if (!callback) {
954f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
955f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
956f277ec42SVenkatesh Srinivas 	}
9570a8a69ddSRusty Russell 
9580a8a69ddSRusty Russell 	/* Put everything in free lists. */
9590a8a69ddSRusty Russell 	vq->free_head = 0;
9602a2d1382SAndy Lutomirski 	for (i = 0; i < vring.num-1; i++)
96100e6f3d9SMichael S. Tsirkin 		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
9622a2d1382SAndy Lutomirski 	memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
9630a8a69ddSRusty Russell 
9640a8a69ddSRusty Russell 	return &vq->vq;
9650a8a69ddSRusty Russell }
9662a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
9672a2d1382SAndy Lutomirski 
9682a2d1382SAndy Lutomirski static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
9692a2d1382SAndy Lutomirski 			      dma_addr_t *dma_handle, gfp_t flag)
9702a2d1382SAndy Lutomirski {
9712a2d1382SAndy Lutomirski 	if (vring_use_dma_api(vdev)) {
9722a2d1382SAndy Lutomirski 		return dma_alloc_coherent(vdev->dev.parent, size,
9732a2d1382SAndy Lutomirski 					  dma_handle, flag);
9742a2d1382SAndy Lutomirski 	} else {
9752a2d1382SAndy Lutomirski 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
9762a2d1382SAndy Lutomirski 		if (queue) {
9772a2d1382SAndy Lutomirski 			phys_addr_t phys_addr = virt_to_phys(queue);
9782a2d1382SAndy Lutomirski 			*dma_handle = (dma_addr_t)phys_addr;
9792a2d1382SAndy Lutomirski 
9802a2d1382SAndy Lutomirski 			/*
9812a2d1382SAndy Lutomirski 			 * Sanity check: make sure we dind't truncate
9822a2d1382SAndy Lutomirski 			 * the address.  The only arches I can find that
9832a2d1382SAndy Lutomirski 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
9842a2d1382SAndy Lutomirski 			 * are certain non-highmem MIPS and x86
9852a2d1382SAndy Lutomirski 			 * configurations, but these configurations
9862a2d1382SAndy Lutomirski 			 * should never allocate physical pages above 32
9872a2d1382SAndy Lutomirski 			 * bits, so this is fine.  Just in case, throw a
9882a2d1382SAndy Lutomirski 			 * warning and abort if we end up with an
9892a2d1382SAndy Lutomirski 			 * unrepresentable address.
9902a2d1382SAndy Lutomirski 			 */
9912a2d1382SAndy Lutomirski 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
9922a2d1382SAndy Lutomirski 				free_pages_exact(queue, PAGE_ALIGN(size));
9932a2d1382SAndy Lutomirski 				return NULL;
9942a2d1382SAndy Lutomirski 			}
9952a2d1382SAndy Lutomirski 		}
9962a2d1382SAndy Lutomirski 		return queue;
9972a2d1382SAndy Lutomirski 	}
9982a2d1382SAndy Lutomirski }
9992a2d1382SAndy Lutomirski 
10002a2d1382SAndy Lutomirski static void vring_free_queue(struct virtio_device *vdev, size_t size,
10012a2d1382SAndy Lutomirski 			     void *queue, dma_addr_t dma_handle)
10022a2d1382SAndy Lutomirski {
10032a2d1382SAndy Lutomirski 	if (vring_use_dma_api(vdev)) {
10042a2d1382SAndy Lutomirski 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
10052a2d1382SAndy Lutomirski 	} else {
10062a2d1382SAndy Lutomirski 		free_pages_exact(queue, PAGE_ALIGN(size));
10072a2d1382SAndy Lutomirski 	}
10082a2d1382SAndy Lutomirski }
10092a2d1382SAndy Lutomirski 
10102a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
10112a2d1382SAndy Lutomirski 	unsigned int index,
10122a2d1382SAndy Lutomirski 	unsigned int num,
10132a2d1382SAndy Lutomirski 	unsigned int vring_align,
10142a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
10152a2d1382SAndy Lutomirski 	bool weak_barriers,
10162a2d1382SAndy Lutomirski 	bool may_reduce_num,
10172a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
10182a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
10192a2d1382SAndy Lutomirski 	const char *name)
10202a2d1382SAndy Lutomirski {
10212a2d1382SAndy Lutomirski 	struct virtqueue *vq;
1022e00f7bd2SDan Carpenter 	void *queue = NULL;
10232a2d1382SAndy Lutomirski 	dma_addr_t dma_addr;
10242a2d1382SAndy Lutomirski 	size_t queue_size_in_bytes;
10252a2d1382SAndy Lutomirski 	struct vring vring;
10262a2d1382SAndy Lutomirski 
10272a2d1382SAndy Lutomirski 	/* We assume num is a power of 2. */
10282a2d1382SAndy Lutomirski 	if (num & (num - 1)) {
10292a2d1382SAndy Lutomirski 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
10302a2d1382SAndy Lutomirski 		return NULL;
10312a2d1382SAndy Lutomirski 	}
10322a2d1382SAndy Lutomirski 
10332a2d1382SAndy Lutomirski 	/* TODO: allocate each queue chunk individually */
10342a2d1382SAndy Lutomirski 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
10352a2d1382SAndy Lutomirski 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
10362a2d1382SAndy Lutomirski 					  &dma_addr,
10372a2d1382SAndy Lutomirski 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
10382a2d1382SAndy Lutomirski 		if (queue)
10392a2d1382SAndy Lutomirski 			break;
10402a2d1382SAndy Lutomirski 	}
10412a2d1382SAndy Lutomirski 
10422a2d1382SAndy Lutomirski 	if (!num)
10432a2d1382SAndy Lutomirski 		return NULL;
10442a2d1382SAndy Lutomirski 
10452a2d1382SAndy Lutomirski 	if (!queue) {
10462a2d1382SAndy Lutomirski 		/* Try to get a single page. You are my only hope! */
10472a2d1382SAndy Lutomirski 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
10482a2d1382SAndy Lutomirski 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
10492a2d1382SAndy Lutomirski 	}
10502a2d1382SAndy Lutomirski 	if (!queue)
10512a2d1382SAndy Lutomirski 		return NULL;
10522a2d1382SAndy Lutomirski 
10532a2d1382SAndy Lutomirski 	queue_size_in_bytes = vring_size(num, vring_align);
10542a2d1382SAndy Lutomirski 	vring_init(&vring, num, queue, vring_align);
10552a2d1382SAndy Lutomirski 
10562a2d1382SAndy Lutomirski 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
10572a2d1382SAndy Lutomirski 				   notify, callback, name);
10582a2d1382SAndy Lutomirski 	if (!vq) {
10592a2d1382SAndy Lutomirski 		vring_free_queue(vdev, queue_size_in_bytes, queue,
10602a2d1382SAndy Lutomirski 				 dma_addr);
10612a2d1382SAndy Lutomirski 		return NULL;
10622a2d1382SAndy Lutomirski 	}
10632a2d1382SAndy Lutomirski 
10642a2d1382SAndy Lutomirski 	to_vvq(vq)->queue_dma_addr = dma_addr;
10652a2d1382SAndy Lutomirski 	to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
10662a2d1382SAndy Lutomirski 	to_vvq(vq)->we_own_ring = true;
10672a2d1382SAndy Lutomirski 
10682a2d1382SAndy Lutomirski 	return vq;
10692a2d1382SAndy Lutomirski }
10702a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
10712a2d1382SAndy Lutomirski 
10722a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
10732a2d1382SAndy Lutomirski 				      unsigned int num,
10742a2d1382SAndy Lutomirski 				      unsigned int vring_align,
10752a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
10762a2d1382SAndy Lutomirski 				      bool weak_barriers,
10772a2d1382SAndy Lutomirski 				      void *pages,
10782a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
10792a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
10802a2d1382SAndy Lutomirski 				      const char *name)
10812a2d1382SAndy Lutomirski {
10822a2d1382SAndy Lutomirski 	struct vring vring;
10832a2d1382SAndy Lutomirski 	vring_init(&vring, num, pages, vring_align);
10842a2d1382SAndy Lutomirski 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
10852a2d1382SAndy Lutomirski 				     notify, callback, name);
10862a2d1382SAndy Lutomirski }
1087c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
10880a8a69ddSRusty Russell 
10892a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq)
10900a8a69ddSRusty Russell {
10912a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
10922a2d1382SAndy Lutomirski 
10932a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
10942a2d1382SAndy Lutomirski 		vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
10952a2d1382SAndy Lutomirski 				 vq->vring.desc, vq->queue_dma_addr);
10962a2d1382SAndy Lutomirski 	}
10972a2d1382SAndy Lutomirski 	list_del(&_vq->list);
10982a2d1382SAndy Lutomirski 	kfree(vq);
10990a8a69ddSRusty Russell }
1100c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
11010a8a69ddSRusty Russell 
1102e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
1103e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
1104e34f8725SRusty Russell {
1105e34f8725SRusty Russell 	unsigned int i;
1106e34f8725SRusty Russell 
1107e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1108e34f8725SRusty Russell 		switch (i) {
11099fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
11109fa29b9dSMark McLoughlin 			break;
1111a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
1112a5c262c5SMichael S. Tsirkin 			break;
1113747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
1114747ae34aSMichael S. Tsirkin 			break;
11151a937693SMichael S. Tsirkin 		case VIRTIO_F_IOMMU_PLATFORM:
11161a937693SMichael S. Tsirkin 			break;
1117e34f8725SRusty Russell 		default:
1118e34f8725SRusty Russell 			/* We don't understand this bit. */
1119e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
1120e34f8725SRusty Russell 		}
1121e34f8725SRusty Russell 	}
1122e34f8725SRusty Russell }
1123e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
1124e34f8725SRusty Russell 
11255dfc1762SRusty Russell /**
11265dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
11275dfc1762SRusty Russell  * @vq: the struct virtqueue containing the vring of interest.
11285dfc1762SRusty Russell  *
11295dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
11305dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
11315dfc1762SRusty Russell  */
11328f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
11338f9f4668SRick Jones {
11348f9f4668SRick Jones 
11358f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
11368f9f4668SRick Jones 
11378f9f4668SRick Jones 	return vq->vring.num;
11388f9f4668SRick Jones }
11398f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
11408f9f4668SRick Jones 
1141b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
1142b3b32c94SHeinz Graalfs {
1143b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
1144b3b32c94SHeinz Graalfs 
1145b3b32c94SHeinz Graalfs 	return vq->broken;
1146b3b32c94SHeinz Graalfs }
1147b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1148b3b32c94SHeinz Graalfs 
1149e2dcdfe9SRusty Russell /*
1150e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
1151e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
1152e2dcdfe9SRusty Russell  */
1153e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
1154e2dcdfe9SRusty Russell {
1155e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
1156e2dcdfe9SRusty Russell 
1157e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
1158e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
1159e2dcdfe9SRusty Russell 		vq->broken = true;
1160e2dcdfe9SRusty Russell 	}
1161e2dcdfe9SRusty Russell }
1162e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
1163e2dcdfe9SRusty Russell 
11642a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
116589062652SCornelia Huck {
116689062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
116789062652SCornelia Huck 
11682a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
116989062652SCornelia Huck 
11702a2d1382SAndy Lutomirski 	return vq->queue_dma_addr;
11712a2d1382SAndy Lutomirski }
11722a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
11732a2d1382SAndy Lutomirski 
11742a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
117589062652SCornelia Huck {
117689062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
117789062652SCornelia Huck 
11782a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
11792a2d1382SAndy Lutomirski 
11802a2d1382SAndy Lutomirski 	return vq->queue_dma_addr +
11812a2d1382SAndy Lutomirski 		((char *)vq->vring.avail - (char *)vq->vring.desc);
118289062652SCornelia Huck }
11832a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
11842a2d1382SAndy Lutomirski 
11852a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
11862a2d1382SAndy Lutomirski {
11872a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
11882a2d1382SAndy Lutomirski 
11892a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
11902a2d1382SAndy Lutomirski 
11912a2d1382SAndy Lutomirski 	return vq->queue_dma_addr +
11922a2d1382SAndy Lutomirski 		((char *)vq->vring.used - (char *)vq->vring.desc);
11932a2d1382SAndy Lutomirski }
11942a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
11952a2d1382SAndy Lutomirski 
11962a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq)
11972a2d1382SAndy Lutomirski {
11982a2d1382SAndy Lutomirski 	return &to_vvq(vq)->vring;
11992a2d1382SAndy Lutomirski }
12002a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
120189062652SCornelia Huck 
1202c6fd4701SRusty Russell MODULE_LICENSE("GPL");
1203