xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 44ed8089e991a60d614abe0ee4b9057a28b364e4)
10a8a69ddSRusty Russell /* Virtio ring implementation.
20a8a69ddSRusty Russell  *
30a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
40a8a69ddSRusty Russell  *
50a8a69ddSRusty Russell  *  This program is free software; you can redistribute it and/or modify
60a8a69ddSRusty Russell  *  it under the terms of the GNU General Public License as published by
70a8a69ddSRusty Russell  *  the Free Software Foundation; either version 2 of the License, or
80a8a69ddSRusty Russell  *  (at your option) any later version.
90a8a69ddSRusty Russell  *
100a8a69ddSRusty Russell  *  This program is distributed in the hope that it will be useful,
110a8a69ddSRusty Russell  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
120a8a69ddSRusty Russell  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
130a8a69ddSRusty Russell  *  GNU General Public License for more details.
140a8a69ddSRusty Russell  *
150a8a69ddSRusty Russell  *  You should have received a copy of the GNU General Public License
160a8a69ddSRusty Russell  *  along with this program; if not, write to the Free Software
170a8a69ddSRusty Russell  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
180a8a69ddSRusty Russell  */
190a8a69ddSRusty Russell #include <linux/virtio.h>
200a8a69ddSRusty Russell #include <linux/virtio_ring.h>
21e34f8725SRusty Russell #include <linux/virtio_config.h>
220a8a69ddSRusty Russell #include <linux/device.h>
235a0e3ad6STejun Heo #include <linux/slab.h>
24b5a2c4f1SPaul Gortmaker #include <linux/module.h>
25e93300b1SRusty Russell #include <linux/hrtimer.h>
266abb2dd9SJoel Stanley #include <linux/kmemleak.h>
27780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
2878fe3987SAndy Lutomirski #include <xen/xen.h>
290a8a69ddSRusty Russell 
300a8a69ddSRusty Russell #ifdef DEBUG
310a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
329499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
339499f5e7SRusty Russell 	do {							\
349499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
359499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
369499f5e7SRusty Russell 		BUG();						\
379499f5e7SRusty Russell 	} while (0)
38c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
393a35ce7dSRoel Kluin #define START_USE(_vq)						\
40c5f841f1SRusty Russell 	do {							\
41c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
429499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
439499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
44c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
45c5f841f1SRusty Russell 	} while (0)
463a35ce7dSRoel Kluin #define END_USE(_vq) \
4797a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
480a8a69ddSRusty Russell #else
499499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
509499f5e7SRusty Russell 	do {							\
519499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
529499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
539499f5e7SRusty Russell 		(_vq)->broken = true;				\
549499f5e7SRusty Russell 	} while (0)
550a8a69ddSRusty Russell #define START_USE(vq)
560a8a69ddSRusty Russell #define END_USE(vq)
570a8a69ddSRusty Russell #endif
580a8a69ddSRusty Russell 
59780bc790SAndy Lutomirski struct vring_desc_state {
60780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
61780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
62780bc790SAndy Lutomirski };
63780bc790SAndy Lutomirski 
6443b4f721SMichael S. Tsirkin struct vring_virtqueue {
650a8a69ddSRusty Russell 	struct virtqueue vq;
660a8a69ddSRusty Russell 
670a8a69ddSRusty Russell 	/* Actual memory layout for this queue */
680a8a69ddSRusty Russell 	struct vring vring;
690a8a69ddSRusty Russell 
707b21e34fSRusty Russell 	/* Can we use weak barriers? */
717b21e34fSRusty Russell 	bool weak_barriers;
727b21e34fSRusty Russell 
730a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
740a8a69ddSRusty Russell 	bool broken;
750a8a69ddSRusty Russell 
769fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
779fa29b9dSMark McLoughlin 	bool indirect;
789fa29b9dSMark McLoughlin 
79a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
80a5c262c5SMichael S. Tsirkin 	bool event;
81a5c262c5SMichael S. Tsirkin 
820a8a69ddSRusty Russell 	/* Head of free buffer list. */
830a8a69ddSRusty Russell 	unsigned int free_head;
840a8a69ddSRusty Russell 	/* Number we've added since last sync. */
850a8a69ddSRusty Russell 	unsigned int num_added;
860a8a69ddSRusty Russell 
870a8a69ddSRusty Russell 	/* Last used index we've seen. */
881bc4953eSAnthony Liguori 	u16 last_used_idx;
890a8a69ddSRusty Russell 
90f277ec42SVenkatesh Srinivas 	/* Last written value to avail->flags */
91f277ec42SVenkatesh Srinivas 	u16 avail_flags_shadow;
92f277ec42SVenkatesh Srinivas 
93f277ec42SVenkatesh Srinivas 	/* Last written value to avail->idx in guest byte order */
94f277ec42SVenkatesh Srinivas 	u16 avail_idx_shadow;
95f277ec42SVenkatesh Srinivas 
960a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
9746f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
980a8a69ddSRusty Russell 
992a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
1002a2d1382SAndy Lutomirski 	bool we_own_ring;
1012a2d1382SAndy Lutomirski 	size_t queue_size_in_bytes;
1022a2d1382SAndy Lutomirski 	dma_addr_t queue_dma_addr;
1032a2d1382SAndy Lutomirski 
1040a8a69ddSRusty Russell #ifdef DEBUG
1050a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
1060a8a69ddSRusty Russell 	unsigned int in_use;
107e93300b1SRusty Russell 
108e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
109e93300b1SRusty Russell 	bool last_add_time_valid;
110e93300b1SRusty Russell 	ktime_t last_add_time;
1110a8a69ddSRusty Russell #endif
1120a8a69ddSRusty Russell 
113780bc790SAndy Lutomirski 	/* Per-descriptor state. */
114780bc790SAndy Lutomirski 	struct vring_desc_state desc_state[];
1150a8a69ddSRusty Russell };
1160a8a69ddSRusty Russell 
1170a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
1180a8a69ddSRusty Russell 
119d26c96c8SAndy Lutomirski /*
1201a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
1211a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
1221a937693SMichael S. Tsirkin  *
1231a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
124d26c96c8SAndy Lutomirski  *
125d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
126d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
127d26c96c8SAndy Lutomirski  *
128d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
129d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
130d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
131d26c96c8SAndy Lutomirski  *
132d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
133d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
134d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
136d26c96c8SAndy Lutomirski  *
137d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
138d26c96c8SAndy Lutomirski  * API.
1391a937693SMichael S. Tsirkin  *
1401a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
1411a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
1421a937693SMichael S. Tsirkin  * unconditionally on data path.
143d26c96c8SAndy Lutomirski  */
144d26c96c8SAndy Lutomirski 
145d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
146d26c96c8SAndy Lutomirski {
1471a937693SMichael S. Tsirkin 	if (!virtio_has_iommu_quirk(vdev))
1481a937693SMichael S. Tsirkin 		return true;
1491a937693SMichael S. Tsirkin 
1501a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
15178fe3987SAndy Lutomirski 	/*
15278fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
15378fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
15478fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
15578fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
15678fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
15778fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
15878fe3987SAndy Lutomirski 	 */
15978fe3987SAndy Lutomirski 	if (xen_domain())
16078fe3987SAndy Lutomirski 		return true;
16178fe3987SAndy Lutomirski 
162d26c96c8SAndy Lutomirski 	return false;
163d26c96c8SAndy Lutomirski }
164d26c96c8SAndy Lutomirski 
165780bc790SAndy Lutomirski /*
166780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
167780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
168780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
169780bc790SAndy Lutomirski  */
17075bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171780bc790SAndy Lutomirski {
172780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
173780bc790SAndy Lutomirski }
174780bc790SAndy Lutomirski 
175780bc790SAndy Lutomirski /* Map one sg entry. */
176780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177780bc790SAndy Lutomirski 				   struct scatterlist *sg,
178780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
179780bc790SAndy Lutomirski {
180780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
181780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
182780bc790SAndy Lutomirski 
183780bc790SAndy Lutomirski 	/*
184780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
185780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
186780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
187780bc790SAndy Lutomirski 	 */
188780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
189780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
190780bc790SAndy Lutomirski 			    direction);
191780bc790SAndy Lutomirski }
192780bc790SAndy Lutomirski 
193780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
195780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
196780bc790SAndy Lutomirski {
197780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
198780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
199780bc790SAndy Lutomirski 
200780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
201780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
202780bc790SAndy Lutomirski }
203780bc790SAndy Lutomirski 
204780bc790SAndy Lutomirski static void vring_unmap_one(const struct vring_virtqueue *vq,
205780bc790SAndy Lutomirski 			    struct vring_desc *desc)
206780bc790SAndy Lutomirski {
207780bc790SAndy Lutomirski 	u16 flags;
208780bc790SAndy Lutomirski 
209780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
210780bc790SAndy Lutomirski 		return;
211780bc790SAndy Lutomirski 
212780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213780bc790SAndy Lutomirski 
214780bc790SAndy Lutomirski 	if (flags & VRING_DESC_F_INDIRECT) {
215780bc790SAndy Lutomirski 		dma_unmap_single(vring_dma_dev(vq),
216780bc790SAndy Lutomirski 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217780bc790SAndy Lutomirski 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
218780bc790SAndy Lutomirski 				 (flags & VRING_DESC_F_WRITE) ?
219780bc790SAndy Lutomirski 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220780bc790SAndy Lutomirski 	} else {
221780bc790SAndy Lutomirski 		dma_unmap_page(vring_dma_dev(vq),
222780bc790SAndy Lutomirski 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
223780bc790SAndy Lutomirski 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
224780bc790SAndy Lutomirski 			       (flags & VRING_DESC_F_WRITE) ?
225780bc790SAndy Lutomirski 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
226780bc790SAndy Lutomirski 	}
227780bc790SAndy Lutomirski }
228780bc790SAndy Lutomirski 
229780bc790SAndy Lutomirski static int vring_mapping_error(const struct vring_virtqueue *vq,
230780bc790SAndy Lutomirski 			       dma_addr_t addr)
231780bc790SAndy Lutomirski {
232780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
233780bc790SAndy Lutomirski 		return 0;
234780bc790SAndy Lutomirski 
235780bc790SAndy Lutomirski 	return dma_mapping_error(vring_dma_dev(vq), addr);
236780bc790SAndy Lutomirski }
237780bc790SAndy Lutomirski 
23800e6f3d9SMichael S. Tsirkin static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
23900e6f3d9SMichael S. Tsirkin 					 unsigned int total_sg, gfp_t gfp)
2409fa29b9dSMark McLoughlin {
2419fa29b9dSMark McLoughlin 	struct vring_desc *desc;
242b25bd251SRusty Russell 	unsigned int i;
2439fa29b9dSMark McLoughlin 
244b92b1b89SWill Deacon 	/*
245b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
246b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
247b92b1b89SWill Deacon 	 * virtqueue.
248b92b1b89SWill Deacon 	 */
24982107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
250b92b1b89SWill Deacon 
25113816c76SRusty Russell 	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
2529fa29b9dSMark McLoughlin 	if (!desc)
253b25bd251SRusty Russell 		return NULL;
2549fa29b9dSMark McLoughlin 
255b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
25600e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
257b25bd251SRusty Russell 	return desc;
2589fa29b9dSMark McLoughlin }
2599fa29b9dSMark McLoughlin 
26013816c76SRusty Russell static inline int virtqueue_add(struct virtqueue *_vq,
26113816c76SRusty Russell 				struct scatterlist *sgs[],
262eeebf9b1SRusty Russell 				unsigned int total_sg,
26313816c76SRusty Russell 				unsigned int out_sgs,
26413816c76SRusty Russell 				unsigned int in_sgs,
265bbd603efSMichael S. Tsirkin 				void *data,
2665a08b04fSMichael S. Tsirkin 				void *ctx,
267bbd603efSMichael S. Tsirkin 				gfp_t gfp)
2680a8a69ddSRusty Russell {
2690a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
27013816c76SRusty Russell 	struct scatterlist *sg;
271b25bd251SRusty Russell 	struct vring_desc *desc;
272780bc790SAndy Lutomirski 	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
2731fe9b6feSMichael S. Tsirkin 	int head;
274b25bd251SRusty Russell 	bool indirect;
2750a8a69ddSRusty Russell 
2769fa29b9dSMark McLoughlin 	START_USE(vq);
2779fa29b9dSMark McLoughlin 
2780a8a69ddSRusty Russell 	BUG_ON(data == NULL);
2795a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
2809fa29b9dSMark McLoughlin 
28170670444SRusty Russell 	if (unlikely(vq->broken)) {
28270670444SRusty Russell 		END_USE(vq);
28370670444SRusty Russell 		return -EIO;
28470670444SRusty Russell 	}
28570670444SRusty Russell 
286e93300b1SRusty Russell #ifdef DEBUG
287e93300b1SRusty Russell 	{
288e93300b1SRusty Russell 		ktime_t now = ktime_get();
289e93300b1SRusty Russell 
290e93300b1SRusty Russell 		/* No kick or get, with .1 second between?  Warn. */
291e93300b1SRusty Russell 		if (vq->last_add_time_valid)
292e93300b1SRusty Russell 			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
293e93300b1SRusty Russell 					    > 100);
294e93300b1SRusty Russell 		vq->last_add_time = now;
295e93300b1SRusty Russell 		vq->last_add_time_valid = true;
296e93300b1SRusty Russell 	}
297e93300b1SRusty Russell #endif
298e93300b1SRusty Russell 
29913816c76SRusty Russell 	BUG_ON(total_sg == 0);
3000a8a69ddSRusty Russell 
301b25bd251SRusty Russell 	head = vq->free_head;
302b25bd251SRusty Russell 
303b25bd251SRusty Russell 	/* If the host supports indirect descriptor tables, and we have multiple
304b25bd251SRusty Russell 	 * buffers, then go indirect. FIXME: tune this threshold */
305b25bd251SRusty Russell 	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
30600e6f3d9SMichael S. Tsirkin 		desc = alloc_indirect(_vq, total_sg, gfp);
307*44ed8089SRichard W.M. Jones 	else {
308b25bd251SRusty Russell 		desc = NULL;
309*44ed8089SRichard W.M. Jones 		WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
310*44ed8089SRichard W.M. Jones 	}
311b25bd251SRusty Russell 
312b25bd251SRusty Russell 	if (desc) {
313b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
314780bc790SAndy Lutomirski 		indirect = true;
315b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
316b25bd251SRusty Russell 		i = 0;
317b25bd251SRusty Russell 		descs_used = 1;
318b25bd251SRusty Russell 	} else {
319780bc790SAndy Lutomirski 		indirect = false;
320b25bd251SRusty Russell 		desc = vq->vring.desc;
321b25bd251SRusty Russell 		i = head;
322b25bd251SRusty Russell 		descs_used = total_sg;
323b25bd251SRusty Russell 	}
324b25bd251SRusty Russell 
325b25bd251SRusty Russell 	if (vq->vq.num_free < descs_used) {
3260a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
327b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
32844653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
32944653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
33044653eaeSRusty Russell 		 * host should service the ring ASAP. */
33113816c76SRusty Russell 		if (out_sgs)
332426e3e0aSRusty Russell 			vq->notify(&vq->vq);
33358625edfSWei Yongjun 		if (indirect)
33458625edfSWei Yongjun 			kfree(desc);
3350a8a69ddSRusty Russell 		END_USE(vq);
3360a8a69ddSRusty Russell 		return -ENOSPC;
3370a8a69ddSRusty Russell 	}
3380a8a69ddSRusty Russell 
33913816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
340eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
341780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
342780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
343780bc790SAndy Lutomirski 				goto unmap_release;
344780bc790SAndy Lutomirski 
34500e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
346780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
34700e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3480a8a69ddSRusty Russell 			prev = i;
34900e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
3500a8a69ddSRusty Russell 		}
35113816c76SRusty Russell 	}
35213816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
353eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
354780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
355780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
356780bc790SAndy Lutomirski 				goto unmap_release;
357780bc790SAndy Lutomirski 
35800e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
359780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
36000e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3610a8a69ddSRusty Russell 			prev = i;
36200e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
36313816c76SRusty Russell 		}
3640a8a69ddSRusty Russell 	}
3650a8a69ddSRusty Russell 	/* Last one doesn't continue. */
36600e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
3670a8a69ddSRusty Russell 
368780bc790SAndy Lutomirski 	if (indirect) {
369780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
370780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
371780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
372780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
373780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
374780bc790SAndy Lutomirski 			goto unmap_release;
375780bc790SAndy Lutomirski 
376780bc790SAndy Lutomirski 		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
377780bc790SAndy Lutomirski 		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
378780bc790SAndy Lutomirski 
379780bc790SAndy Lutomirski 		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
380780bc790SAndy Lutomirski 	}
381780bc790SAndy Lutomirski 
382780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
383780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
384780bc790SAndy Lutomirski 
3850a8a69ddSRusty Russell 	/* Update free pointer */
386b25bd251SRusty Russell 	if (indirect)
38700e6f3d9SMichael S. Tsirkin 		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
388b25bd251SRusty Russell 	else
3890a8a69ddSRusty Russell 		vq->free_head = i;
3900a8a69ddSRusty Russell 
391780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
392780bc790SAndy Lutomirski 	vq->desc_state[head].data = data;
393780bc790SAndy Lutomirski 	if (indirect)
394780bc790SAndy Lutomirski 		vq->desc_state[head].indir_desc = desc;
3955a08b04fSMichael S. Tsirkin 	if (ctx)
3965a08b04fSMichael S. Tsirkin 		vq->desc_state[head].indir_desc = ctx;
3970a8a69ddSRusty Russell 
3980a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
3993b720b8cSRusty Russell 	 * do sync). */
400f277ec42SVenkatesh Srinivas 	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
40100e6f3d9SMichael S. Tsirkin 	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
4020a8a69ddSRusty Russell 
403ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
404ee7cd898SRusty Russell 	 * new available array entries. */
405a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
406f277ec42SVenkatesh Srinivas 	vq->avail_idx_shadow++;
407f277ec42SVenkatesh Srinivas 	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
408ee7cd898SRusty Russell 	vq->num_added++;
409ee7cd898SRusty Russell 
4105e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
4115e05bf58STetsuo Handa 	END_USE(vq);
4125e05bf58STetsuo Handa 
413ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
414ee7cd898SRusty Russell 	 * just in case. */
415ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
416ee7cd898SRusty Russell 		virtqueue_kick(_vq);
417ee7cd898SRusty Russell 
41898e8c6bcSRusty Russell 	return 0;
419780bc790SAndy Lutomirski 
420780bc790SAndy Lutomirski unmap_release:
421780bc790SAndy Lutomirski 	err_idx = i;
422780bc790SAndy Lutomirski 	i = head;
423780bc790SAndy Lutomirski 
424780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
425780bc790SAndy Lutomirski 		if (i == err_idx)
426780bc790SAndy Lutomirski 			break;
427780bc790SAndy Lutomirski 		vring_unmap_one(vq, &desc[i]);
428c60923cbSGonglei 		i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
429780bc790SAndy Lutomirski 	}
430780bc790SAndy Lutomirski 
431780bc790SAndy Lutomirski 	vq->vq.num_free += total_sg;
432780bc790SAndy Lutomirski 
433780bc790SAndy Lutomirski 	if (indirect)
434780bc790SAndy Lutomirski 		kfree(desc);
435780bc790SAndy Lutomirski 
4363cc36f6eSMichael S. Tsirkin 	END_USE(vq);
437780bc790SAndy Lutomirski 	return -EIO;
4380a8a69ddSRusty Russell }
43913816c76SRusty Russell 
44013816c76SRusty Russell /**
44113816c76SRusty Russell  * virtqueue_add_sgs - expose buffers to other end
44213816c76SRusty Russell  * @vq: the struct virtqueue we're talking about.
44313816c76SRusty Russell  * @sgs: array of terminated scatterlists.
44413816c76SRusty Russell  * @out_num: the number of scatterlists readable by other side
44513816c76SRusty Russell  * @in_num: the number of scatterlists which are writable (after readable ones)
44613816c76SRusty Russell  * @data: the token identifying the buffer.
44713816c76SRusty Russell  * @gfp: how to do memory allocations (if necessary).
44813816c76SRusty Russell  *
44913816c76SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
45013816c76SRusty Russell  * at the same time (except where noted).
45113816c76SRusty Russell  *
45270670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
45313816c76SRusty Russell  */
45413816c76SRusty Russell int virtqueue_add_sgs(struct virtqueue *_vq,
45513816c76SRusty Russell 		      struct scatterlist *sgs[],
45613816c76SRusty Russell 		      unsigned int out_sgs,
45713816c76SRusty Russell 		      unsigned int in_sgs,
45813816c76SRusty Russell 		      void *data,
45913816c76SRusty Russell 		      gfp_t gfp)
46013816c76SRusty Russell {
461eeebf9b1SRusty Russell 	unsigned int i, total_sg = 0;
46213816c76SRusty Russell 
46313816c76SRusty Russell 	/* Count them first. */
464eeebf9b1SRusty Russell 	for (i = 0; i < out_sgs + in_sgs; i++) {
46513816c76SRusty Russell 		struct scatterlist *sg;
46613816c76SRusty Russell 		for (sg = sgs[i]; sg; sg = sg_next(sg))
467eeebf9b1SRusty Russell 			total_sg++;
46813816c76SRusty Russell 	}
4695a08b04fSMichael S. Tsirkin 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
4705a08b04fSMichael S. Tsirkin 			     data, NULL, gfp);
47113816c76SRusty Russell }
47213816c76SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
47313816c76SRusty Russell 
47413816c76SRusty Russell /**
475282edb36SRusty Russell  * virtqueue_add_outbuf - expose output buffers to other end
476282edb36SRusty Russell  * @vq: the struct virtqueue we're talking about.
477eeebf9b1SRusty Russell  * @sg: scatterlist (must be well-formed and terminated!)
478eeebf9b1SRusty Russell  * @num: the number of entries in @sg readable by other side
479282edb36SRusty Russell  * @data: the token identifying the buffer.
480282edb36SRusty Russell  * @gfp: how to do memory allocations (if necessary).
481282edb36SRusty Russell  *
482282edb36SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
483282edb36SRusty Russell  * at the same time (except where noted).
484282edb36SRusty Russell  *
48570670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
486282edb36SRusty Russell  */
487282edb36SRusty Russell int virtqueue_add_outbuf(struct virtqueue *vq,
488eeebf9b1SRusty Russell 			 struct scatterlist *sg, unsigned int num,
489282edb36SRusty Russell 			 void *data,
490282edb36SRusty Russell 			 gfp_t gfp)
491282edb36SRusty Russell {
4925a08b04fSMichael S. Tsirkin 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
493282edb36SRusty Russell }
494282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
495282edb36SRusty Russell 
496282edb36SRusty Russell /**
497282edb36SRusty Russell  * virtqueue_add_inbuf - expose input buffers to other end
498282edb36SRusty Russell  * @vq: the struct virtqueue we're talking about.
499eeebf9b1SRusty Russell  * @sg: scatterlist (must be well-formed and terminated!)
500eeebf9b1SRusty Russell  * @num: the number of entries in @sg writable by other side
501282edb36SRusty Russell  * @data: the token identifying the buffer.
502282edb36SRusty Russell  * @gfp: how to do memory allocations (if necessary).
503282edb36SRusty Russell  *
504282edb36SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
505282edb36SRusty Russell  * at the same time (except where noted).
506282edb36SRusty Russell  *
50770670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
508282edb36SRusty Russell  */
509282edb36SRusty Russell int virtqueue_add_inbuf(struct virtqueue *vq,
510eeebf9b1SRusty Russell 			struct scatterlist *sg, unsigned int num,
511282edb36SRusty Russell 			void *data,
512282edb36SRusty Russell 			gfp_t gfp)
513282edb36SRusty Russell {
5145a08b04fSMichael S. Tsirkin 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
515282edb36SRusty Russell }
516282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
517282edb36SRusty Russell 
518282edb36SRusty Russell /**
5195a08b04fSMichael S. Tsirkin  * virtqueue_add_inbuf_ctx - expose input buffers to other end
5205a08b04fSMichael S. Tsirkin  * @vq: the struct virtqueue we're talking about.
5215a08b04fSMichael S. Tsirkin  * @sg: scatterlist (must be well-formed and terminated!)
5225a08b04fSMichael S. Tsirkin  * @num: the number of entries in @sg writable by other side
5235a08b04fSMichael S. Tsirkin  * @data: the token identifying the buffer.
5245a08b04fSMichael S. Tsirkin  * @ctx: extra context for the token
5255a08b04fSMichael S. Tsirkin  * @gfp: how to do memory allocations (if necessary).
5265a08b04fSMichael S. Tsirkin  *
5275a08b04fSMichael S. Tsirkin  * Caller must ensure we don't call this with other virtqueue operations
5285a08b04fSMichael S. Tsirkin  * at the same time (except where noted).
5295a08b04fSMichael S. Tsirkin  *
5305a08b04fSMichael S. Tsirkin  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
5315a08b04fSMichael S. Tsirkin  */
5325a08b04fSMichael S. Tsirkin int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
5335a08b04fSMichael S. Tsirkin 			struct scatterlist *sg, unsigned int num,
5345a08b04fSMichael S. Tsirkin 			void *data,
5355a08b04fSMichael S. Tsirkin 			void *ctx,
5365a08b04fSMichael S. Tsirkin 			gfp_t gfp)
5375a08b04fSMichael S. Tsirkin {
5385a08b04fSMichael S. Tsirkin 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
5395a08b04fSMichael S. Tsirkin }
5405a08b04fSMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
5415a08b04fSMichael S. Tsirkin 
5425a08b04fSMichael S. Tsirkin /**
54341f0377fSRusty Russell  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
5445dfc1762SRusty Russell  * @vq: the struct virtqueue
5455dfc1762SRusty Russell  *
54641f0377fSRusty Russell  * Instead of virtqueue_kick(), you can do:
54741f0377fSRusty Russell  *	if (virtqueue_kick_prepare(vq))
54841f0377fSRusty Russell  *		virtqueue_notify(vq);
5495dfc1762SRusty Russell  *
55041f0377fSRusty Russell  * This is sometimes useful because the virtqueue_kick_prepare() needs
55141f0377fSRusty Russell  * to be serialized, but the actual virtqueue_notify() call does not.
5525dfc1762SRusty Russell  */
55341f0377fSRusty Russell bool virtqueue_kick_prepare(struct virtqueue *_vq)
5540a8a69ddSRusty Russell {
5550a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
556a5c262c5SMichael S. Tsirkin 	u16 new, old;
55741f0377fSRusty Russell 	bool needs_kick;
55841f0377fSRusty Russell 
5590a8a69ddSRusty Russell 	START_USE(vq);
560a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
561a72caae2SJason Wang 	 * event. */
562a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
5630a8a69ddSRusty Russell 
564f277ec42SVenkatesh Srinivas 	old = vq->avail_idx_shadow - vq->num_added;
565f277ec42SVenkatesh Srinivas 	new = vq->avail_idx_shadow;
5660a8a69ddSRusty Russell 	vq->num_added = 0;
5670a8a69ddSRusty Russell 
568e93300b1SRusty Russell #ifdef DEBUG
569e93300b1SRusty Russell 	if (vq->last_add_time_valid) {
570e93300b1SRusty Russell 		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
571e93300b1SRusty Russell 					      vq->last_add_time)) > 100);
572e93300b1SRusty Russell 	}
573e93300b1SRusty Russell 	vq->last_add_time_valid = false;
574e93300b1SRusty Russell #endif
575e93300b1SRusty Russell 
57641f0377fSRusty Russell 	if (vq->event) {
57700e6f3d9SMichael S. Tsirkin 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
57841f0377fSRusty Russell 					      new, old);
57941f0377fSRusty Russell 	} else {
58000e6f3d9SMichael S. Tsirkin 		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
58141f0377fSRusty Russell 	}
5820a8a69ddSRusty Russell 	END_USE(vq);
58341f0377fSRusty Russell 	return needs_kick;
58441f0377fSRusty Russell }
58541f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
58641f0377fSRusty Russell 
58741f0377fSRusty Russell /**
58841f0377fSRusty Russell  * virtqueue_notify - second half of split virtqueue_kick call.
58941f0377fSRusty Russell  * @vq: the struct virtqueue
59041f0377fSRusty Russell  *
59141f0377fSRusty Russell  * This does not need to be serialized.
5925b1bf7cbSHeinz Graalfs  *
5935b1bf7cbSHeinz Graalfs  * Returns false if host notify failed or queue is broken, otherwise true.
59441f0377fSRusty Russell  */
5955b1bf7cbSHeinz Graalfs bool virtqueue_notify(struct virtqueue *_vq)
59641f0377fSRusty Russell {
59741f0377fSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
59841f0377fSRusty Russell 
5995b1bf7cbSHeinz Graalfs 	if (unlikely(vq->broken))
6005b1bf7cbSHeinz Graalfs 		return false;
6015b1bf7cbSHeinz Graalfs 
60241f0377fSRusty Russell 	/* Prod other side to tell it about changes. */
6032342d6a6SHeinz Graalfs 	if (!vq->notify(_vq)) {
6045b1bf7cbSHeinz Graalfs 		vq->broken = true;
6055b1bf7cbSHeinz Graalfs 		return false;
6065b1bf7cbSHeinz Graalfs 	}
6075b1bf7cbSHeinz Graalfs 	return true;
60841f0377fSRusty Russell }
60941f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_notify);
61041f0377fSRusty Russell 
61141f0377fSRusty Russell /**
61241f0377fSRusty Russell  * virtqueue_kick - update after add_buf
61341f0377fSRusty Russell  * @vq: the struct virtqueue
61441f0377fSRusty Russell  *
615b3087e48SRusty Russell  * After one or more virtqueue_add_* calls, invoke this to kick
61641f0377fSRusty Russell  * the other side.
61741f0377fSRusty Russell  *
61841f0377fSRusty Russell  * Caller must ensure we don't call this with other virtqueue
61941f0377fSRusty Russell  * operations at the same time (except where noted).
6205b1bf7cbSHeinz Graalfs  *
6215b1bf7cbSHeinz Graalfs  * Returns false if kick failed, otherwise true.
62241f0377fSRusty Russell  */
6235b1bf7cbSHeinz Graalfs bool virtqueue_kick(struct virtqueue *vq)
62441f0377fSRusty Russell {
62541f0377fSRusty Russell 	if (virtqueue_kick_prepare(vq))
6265b1bf7cbSHeinz Graalfs 		return virtqueue_notify(vq);
6275b1bf7cbSHeinz Graalfs 	return true;
6280a8a69ddSRusty Russell }
6297c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_kick);
6300a8a69ddSRusty Russell 
6315a08b04fSMichael S. Tsirkin static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
6325a08b04fSMichael S. Tsirkin 		       void **ctx)
6330a8a69ddSRusty Russell {
634780bc790SAndy Lutomirski 	unsigned int i, j;
635c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
6360a8a69ddSRusty Russell 
6370a8a69ddSRusty Russell 	/* Clear data ptr. */
638780bc790SAndy Lutomirski 	vq->desc_state[head].data = NULL;
6390a8a69ddSRusty Russell 
640780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
6410a8a69ddSRusty Russell 	i = head;
6429fa29b9dSMark McLoughlin 
643780bc790SAndy Lutomirski 	while (vq->vring.desc[i].flags & nextflag) {
644780bc790SAndy Lutomirski 		vring_unmap_one(vq, &vq->vring.desc[i]);
64500e6f3d9SMichael S. Tsirkin 		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
64606ca287dSRusty Russell 		vq->vq.num_free++;
6470a8a69ddSRusty Russell 	}
6480a8a69ddSRusty Russell 
649780bc790SAndy Lutomirski 	vring_unmap_one(vq, &vq->vring.desc[i]);
65000e6f3d9SMichael S. Tsirkin 	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
6510a8a69ddSRusty Russell 	vq->free_head = head;
652780bc790SAndy Lutomirski 
6530a8a69ddSRusty Russell 	/* Plus final descriptor */
65406ca287dSRusty Russell 	vq->vq.num_free++;
655780bc790SAndy Lutomirski 
6565a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
657780bc790SAndy Lutomirski 		struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
6585a08b04fSMichael S. Tsirkin 		u32 len;
6595a08b04fSMichael S. Tsirkin 
6605a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
6615a08b04fSMichael S. Tsirkin 		if (!indir_desc)
6625a08b04fSMichael S. Tsirkin 			return;
6635a08b04fSMichael S. Tsirkin 
6645a08b04fSMichael S. Tsirkin 		len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
665780bc790SAndy Lutomirski 
666780bc790SAndy Lutomirski 		BUG_ON(!(vq->vring.desc[head].flags &
667780bc790SAndy Lutomirski 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
668780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
669780bc790SAndy Lutomirski 
670780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
671780bc790SAndy Lutomirski 			vring_unmap_one(vq, &indir_desc[j]);
672780bc790SAndy Lutomirski 
6735a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
674780bc790SAndy Lutomirski 		vq->desc_state[head].indir_desc = NULL;
6755a08b04fSMichael S. Tsirkin 	} else if (ctx) {
6765a08b04fSMichael S. Tsirkin 		*ctx = vq->desc_state[head].indir_desc;
677780bc790SAndy Lutomirski 	}
6780a8a69ddSRusty Russell }
6790a8a69ddSRusty Russell 
6800a8a69ddSRusty Russell static inline bool more_used(const struct vring_virtqueue *vq)
6810a8a69ddSRusty Russell {
68200e6f3d9SMichael S. Tsirkin 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
6830a8a69ddSRusty Russell }
6840a8a69ddSRusty Russell 
6855dfc1762SRusty Russell /**
6865dfc1762SRusty Russell  * virtqueue_get_buf - get the next used buffer
6875dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
6885dfc1762SRusty Russell  * @len: the length written into the buffer
6895dfc1762SRusty Russell  *
6900c7eaf59SFelipe Franciosi  * If the device wrote data into the buffer, @len will be set to the
6915dfc1762SRusty Russell  * amount written.  This means you don't need to clear the buffer
6925dfc1762SRusty Russell  * beforehand to ensure there's no data leakage in the case of short
6935dfc1762SRusty Russell  * writes.
6945dfc1762SRusty Russell  *
6955dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
6965dfc1762SRusty Russell  * operations at the same time (except where noted).
6975dfc1762SRusty Russell  *
6985dfc1762SRusty Russell  * Returns NULL if there are no used buffers, or the "data" token
699b3087e48SRusty Russell  * handed to virtqueue_add_*().
7005dfc1762SRusty Russell  */
7015a08b04fSMichael S. Tsirkin void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
7025a08b04fSMichael S. Tsirkin 			    void **ctx)
7030a8a69ddSRusty Russell {
7040a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
7050a8a69ddSRusty Russell 	void *ret;
7060a8a69ddSRusty Russell 	unsigned int i;
7073b720b8cSRusty Russell 	u16 last_used;
7080a8a69ddSRusty Russell 
7090a8a69ddSRusty Russell 	START_USE(vq);
7100a8a69ddSRusty Russell 
7115ef82752SRusty Russell 	if (unlikely(vq->broken)) {
7125ef82752SRusty Russell 		END_USE(vq);
7135ef82752SRusty Russell 		return NULL;
7145ef82752SRusty Russell 	}
7155ef82752SRusty Russell 
7160a8a69ddSRusty Russell 	if (!more_used(vq)) {
7170a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
7180a8a69ddSRusty Russell 		END_USE(vq);
7190a8a69ddSRusty Russell 		return NULL;
7200a8a69ddSRusty Russell 	}
7210a8a69ddSRusty Russell 
7222d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
723a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
7242d61ba95SMichael S. Tsirkin 
7253b720b8cSRusty Russell 	last_used = (vq->last_used_idx & (vq->vring.num - 1));
72600e6f3d9SMichael S. Tsirkin 	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
72700e6f3d9SMichael S. Tsirkin 	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
7280a8a69ddSRusty Russell 
7290a8a69ddSRusty Russell 	if (unlikely(i >= vq->vring.num)) {
7300a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
7310a8a69ddSRusty Russell 		return NULL;
7320a8a69ddSRusty Russell 	}
733780bc790SAndy Lutomirski 	if (unlikely(!vq->desc_state[i].data)) {
7340a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
7350a8a69ddSRusty Russell 		return NULL;
7360a8a69ddSRusty Russell 	}
7370a8a69ddSRusty Russell 
7380a8a69ddSRusty Russell 	/* detach_buf clears data, so grab it now. */
739780bc790SAndy Lutomirski 	ret = vq->desc_state[i].data;
7405a08b04fSMichael S. Tsirkin 	detach_buf(vq, i, ctx);
7410a8a69ddSRusty Russell 	vq->last_used_idx++;
742a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
743a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
744a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
745788e5b3aSMichael S. Tsirkin 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
746788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
747788e5b3aSMichael S. Tsirkin 				&vring_used_event(&vq->vring),
748788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
749a5c262c5SMichael S. Tsirkin 
750e93300b1SRusty Russell #ifdef DEBUG
751e93300b1SRusty Russell 	vq->last_add_time_valid = false;
752e93300b1SRusty Russell #endif
753e93300b1SRusty Russell 
7540a8a69ddSRusty Russell 	END_USE(vq);
7550a8a69ddSRusty Russell 	return ret;
7560a8a69ddSRusty Russell }
7575a08b04fSMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
7580a8a69ddSRusty Russell 
7595a08b04fSMichael S. Tsirkin void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
7605a08b04fSMichael S. Tsirkin {
7615a08b04fSMichael S. Tsirkin 	return virtqueue_get_buf_ctx(_vq, len, NULL);
7625a08b04fSMichael S. Tsirkin }
7635a08b04fSMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_get_buf);
7645dfc1762SRusty Russell /**
7655dfc1762SRusty Russell  * virtqueue_disable_cb - disable callbacks
7665dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
7675dfc1762SRusty Russell  *
7685dfc1762SRusty Russell  * Note that this is not necessarily synchronous, hence unreliable and only
7695dfc1762SRusty Russell  * useful as an optimization.
7705dfc1762SRusty Russell  *
7715dfc1762SRusty Russell  * Unlike other operations, this need not be serialized.
7725dfc1762SRusty Russell  */
7737c5e9ed0SMichael S. Tsirkin void virtqueue_disable_cb(struct virtqueue *_vq)
77418445c4dSRusty Russell {
77518445c4dSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
77618445c4dSRusty Russell 
777f277ec42SVenkatesh Srinivas 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
778f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
7790ea1e4a6SLadi Prosek 		if (!vq->event)
780f277ec42SVenkatesh Srinivas 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
781f277ec42SVenkatesh Srinivas 	}
782f277ec42SVenkatesh Srinivas 
78318445c4dSRusty Russell }
7847c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
78518445c4dSRusty Russell 
7865dfc1762SRusty Russell /**
787cc229884SMichael S. Tsirkin  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
788cc229884SMichael S. Tsirkin  * @vq: the struct virtqueue we're talking about.
789cc229884SMichael S. Tsirkin  *
790cc229884SMichael S. Tsirkin  * This re-enables callbacks; it returns current queue state
791cc229884SMichael S. Tsirkin  * in an opaque unsigned value. This value should be later tested by
792cc229884SMichael S. Tsirkin  * virtqueue_poll, to detect a possible race between the driver checking for
793cc229884SMichael S. Tsirkin  * more work, and enabling callbacks.
794cc229884SMichael S. Tsirkin  *
795cc229884SMichael S. Tsirkin  * Caller must ensure we don't call this with other virtqueue
796cc229884SMichael S. Tsirkin  * operations at the same time (except where noted).
797cc229884SMichael S. Tsirkin  */
798cc229884SMichael S. Tsirkin unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
799cc229884SMichael S. Tsirkin {
800cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
801cc229884SMichael S. Tsirkin 	u16 last_used_idx;
802cc229884SMichael S. Tsirkin 
803cc229884SMichael S. Tsirkin 	START_USE(vq);
804cc229884SMichael S. Tsirkin 
805cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
806cc229884SMichael S. Tsirkin 	 * more to do. */
807cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
808cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
809cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
810f277ec42SVenkatesh Srinivas 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
811f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8120ea1e4a6SLadi Prosek 		if (!vq->event)
813f277ec42SVenkatesh Srinivas 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
814f277ec42SVenkatesh Srinivas 	}
81500e6f3d9SMichael S. Tsirkin 	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
816cc229884SMichael S. Tsirkin 	END_USE(vq);
817cc229884SMichael S. Tsirkin 	return last_used_idx;
818cc229884SMichael S. Tsirkin }
819cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
820cc229884SMichael S. Tsirkin 
821cc229884SMichael S. Tsirkin /**
822cc229884SMichael S. Tsirkin  * virtqueue_poll - query pending used buffers
823cc229884SMichael S. Tsirkin  * @vq: the struct virtqueue we're talking about.
824cc229884SMichael S. Tsirkin  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
825cc229884SMichael S. Tsirkin  *
826cc229884SMichael S. Tsirkin  * Returns "true" if there are pending used buffers in the queue.
827cc229884SMichael S. Tsirkin  *
828cc229884SMichael S. Tsirkin  * This does not need to be serialized.
829cc229884SMichael S. Tsirkin  */
830cc229884SMichael S. Tsirkin bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
831cc229884SMichael S. Tsirkin {
832cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
833cc229884SMichael S. Tsirkin 
834cc229884SMichael S. Tsirkin 	virtio_mb(vq->weak_barriers);
83500e6f3d9SMichael S. Tsirkin 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
836cc229884SMichael S. Tsirkin }
837cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_poll);
838cc229884SMichael S. Tsirkin 
839cc229884SMichael S. Tsirkin /**
8405dfc1762SRusty Russell  * virtqueue_enable_cb - restart callbacks after disable_cb.
8415dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
8425dfc1762SRusty Russell  *
8435dfc1762SRusty Russell  * This re-enables callbacks; it returns "false" if there are pending
8445dfc1762SRusty Russell  * buffers in the queue, to detect a possible race between the driver
8455dfc1762SRusty Russell  * checking for more work, and enabling callbacks.
8465dfc1762SRusty Russell  *
8475dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
8485dfc1762SRusty Russell  * operations at the same time (except where noted).
8495dfc1762SRusty Russell  */
8507c5e9ed0SMichael S. Tsirkin bool virtqueue_enable_cb(struct virtqueue *_vq)
8510a8a69ddSRusty Russell {
852cc229884SMichael S. Tsirkin 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
853cc229884SMichael S. Tsirkin 	return !virtqueue_poll(_vq, last_used_idx);
8540a8a69ddSRusty Russell }
8557c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
8560a8a69ddSRusty Russell 
8575dfc1762SRusty Russell /**
8585dfc1762SRusty Russell  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
8595dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
8605dfc1762SRusty Russell  *
8615dfc1762SRusty Russell  * This re-enables callbacks but hints to the other side to delay
8625dfc1762SRusty Russell  * interrupts until most of the available buffers have been processed;
8635dfc1762SRusty Russell  * it returns "false" if there are many pending buffers in the queue,
8645dfc1762SRusty Russell  * to detect a possible race between the driver checking for more work,
8655dfc1762SRusty Russell  * and enabling callbacks.
8665dfc1762SRusty Russell  *
8675dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
8685dfc1762SRusty Russell  * operations at the same time (except where noted).
8695dfc1762SRusty Russell  */
8707ab358c2SMichael S. Tsirkin bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
8717ab358c2SMichael S. Tsirkin {
8727ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
8737ab358c2SMichael S. Tsirkin 	u16 bufs;
8747ab358c2SMichael S. Tsirkin 
8757ab358c2SMichael S. Tsirkin 	START_USE(vq);
8767ab358c2SMichael S. Tsirkin 
8777ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
8787ab358c2SMichael S. Tsirkin 	 * more to do. */
8797ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
8807ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
8810ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
882f277ec42SVenkatesh Srinivas 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
883f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8840ea1e4a6SLadi Prosek 		if (!vq->event)
885f277ec42SVenkatesh Srinivas 			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
886f277ec42SVenkatesh Srinivas 	}
8877ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
888f277ec42SVenkatesh Srinivas 	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
889788e5b3aSMichael S. Tsirkin 
890788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
891788e5b3aSMichael S. Tsirkin 			&vring_used_event(&vq->vring),
892788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
893788e5b3aSMichael S. Tsirkin 
89400e6f3d9SMichael S. Tsirkin 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
8957ab358c2SMichael S. Tsirkin 		END_USE(vq);
8967ab358c2SMichael S. Tsirkin 		return false;
8977ab358c2SMichael S. Tsirkin 	}
8987ab358c2SMichael S. Tsirkin 
8997ab358c2SMichael S. Tsirkin 	END_USE(vq);
9007ab358c2SMichael S. Tsirkin 	return true;
9017ab358c2SMichael S. Tsirkin }
9027ab358c2SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
9037ab358c2SMichael S. Tsirkin 
9045dfc1762SRusty Russell /**
9055dfc1762SRusty Russell  * virtqueue_detach_unused_buf - detach first unused buffer
9065dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
9075dfc1762SRusty Russell  *
908b3087e48SRusty Russell  * Returns NULL or the "data" token handed to virtqueue_add_*().
9095dfc1762SRusty Russell  * This is not valid on an active queue; it is useful only for device
9105dfc1762SRusty Russell  * shutdown.
9115dfc1762SRusty Russell  */
9127c5e9ed0SMichael S. Tsirkin void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
913c021eac4SShirley Ma {
914c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
915c021eac4SShirley Ma 	unsigned int i;
916c021eac4SShirley Ma 	void *buf;
917c021eac4SShirley Ma 
918c021eac4SShirley Ma 	START_USE(vq);
919c021eac4SShirley Ma 
920c021eac4SShirley Ma 	for (i = 0; i < vq->vring.num; i++) {
921780bc790SAndy Lutomirski 		if (!vq->desc_state[i].data)
922c021eac4SShirley Ma 			continue;
923c021eac4SShirley Ma 		/* detach_buf clears data, so grab it now. */
924780bc790SAndy Lutomirski 		buf = vq->desc_state[i].data;
9255a08b04fSMichael S. Tsirkin 		detach_buf(vq, i, NULL);
926f277ec42SVenkatesh Srinivas 		vq->avail_idx_shadow--;
927f277ec42SVenkatesh Srinivas 		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
928c021eac4SShirley Ma 		END_USE(vq);
929c021eac4SShirley Ma 		return buf;
930c021eac4SShirley Ma 	}
931c021eac4SShirley Ma 	/* That should have freed everything. */
93206ca287dSRusty Russell 	BUG_ON(vq->vq.num_free != vq->vring.num);
933c021eac4SShirley Ma 
934c021eac4SShirley Ma 	END_USE(vq);
935c021eac4SShirley Ma 	return NULL;
936c021eac4SShirley Ma }
9377c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
938c021eac4SShirley Ma 
9390a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
9400a8a69ddSRusty Russell {
9410a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
9420a8a69ddSRusty Russell 
9430a8a69ddSRusty Russell 	if (!more_used(vq)) {
9440a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
9450a8a69ddSRusty Russell 		return IRQ_NONE;
9460a8a69ddSRusty Russell 	}
9470a8a69ddSRusty Russell 
9480a8a69ddSRusty Russell 	if (unlikely(vq->broken))
9490a8a69ddSRusty Russell 		return IRQ_HANDLED;
9500a8a69ddSRusty Russell 
9510a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
95218445c4dSRusty Russell 	if (vq->vq.callback)
95318445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
9540a8a69ddSRusty Russell 
9550a8a69ddSRusty Russell 	return IRQ_HANDLED;
9560a8a69ddSRusty Russell }
957c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
9580a8a69ddSRusty Russell 
9592a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index,
9602a2d1382SAndy Lutomirski 					struct vring vring,
9610a8a69ddSRusty Russell 					struct virtio_device *vdev,
9627b21e34fSRusty Russell 					bool weak_barriers,
963f94682ddSMichael S. Tsirkin 					bool context,
96446f9c2b9SHeinz Graalfs 					bool (*notify)(struct virtqueue *),
9659499f5e7SRusty Russell 					void (*callback)(struct virtqueue *),
9669499f5e7SRusty Russell 					const char *name)
9670a8a69ddSRusty Russell {
9680a8a69ddSRusty Russell 	unsigned int i;
9692a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
9700a8a69ddSRusty Russell 
9712a2d1382SAndy Lutomirski 	vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
972780bc790SAndy Lutomirski 		     GFP_KERNEL);
9730a8a69ddSRusty Russell 	if (!vq)
9740a8a69ddSRusty Russell 		return NULL;
9750a8a69ddSRusty Russell 
9762a2d1382SAndy Lutomirski 	vq->vring = vring;
9770a8a69ddSRusty Russell 	vq->vq.callback = callback;
9780a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
9799499f5e7SRusty Russell 	vq->vq.name = name;
9802a2d1382SAndy Lutomirski 	vq->vq.num_free = vring.num;
98106ca287dSRusty Russell 	vq->vq.index = index;
9822a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
9832a2d1382SAndy Lutomirski 	vq->queue_dma_addr = 0;
9842a2d1382SAndy Lutomirski 	vq->queue_size_in_bytes = 0;
9850a8a69ddSRusty Russell 	vq->notify = notify;
9867b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
9870a8a69ddSRusty Russell 	vq->broken = false;
9880a8a69ddSRusty Russell 	vq->last_used_idx = 0;
989f277ec42SVenkatesh Srinivas 	vq->avail_flags_shadow = 0;
990f277ec42SVenkatesh Srinivas 	vq->avail_idx_shadow = 0;
9910a8a69ddSRusty Russell 	vq->num_added = 0;
9929499f5e7SRusty Russell 	list_add_tail(&vq->vq.list, &vdev->vqs);
9930a8a69ddSRusty Russell #ifdef DEBUG
9940a8a69ddSRusty Russell 	vq->in_use = false;
995e93300b1SRusty Russell 	vq->last_add_time_valid = false;
9960a8a69ddSRusty Russell #endif
9970a8a69ddSRusty Russell 
9985a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
9995a08b04fSMichael S. Tsirkin 		!context;
1000a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
10019fa29b9dSMark McLoughlin 
10020a8a69ddSRusty Russell 	/* No callback?  Tell other side not to bother us. */
1003f277ec42SVenkatesh Srinivas 	if (!callback) {
1004f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
10050ea1e4a6SLadi Prosek 		if (!vq->event)
1006f277ec42SVenkatesh Srinivas 			vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
1007f277ec42SVenkatesh Srinivas 	}
10080a8a69ddSRusty Russell 
10090a8a69ddSRusty Russell 	/* Put everything in free lists. */
10100a8a69ddSRusty Russell 	vq->free_head = 0;
10112a2d1382SAndy Lutomirski 	for (i = 0; i < vring.num-1; i++)
101200e6f3d9SMichael S. Tsirkin 		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
10132a2d1382SAndy Lutomirski 	memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
10140a8a69ddSRusty Russell 
10150a8a69ddSRusty Russell 	return &vq->vq;
10160a8a69ddSRusty Russell }
10172a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
10182a2d1382SAndy Lutomirski 
10192a2d1382SAndy Lutomirski static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
10202a2d1382SAndy Lutomirski 			      dma_addr_t *dma_handle, gfp_t flag)
10212a2d1382SAndy Lutomirski {
10222a2d1382SAndy Lutomirski 	if (vring_use_dma_api(vdev)) {
10232a2d1382SAndy Lutomirski 		return dma_alloc_coherent(vdev->dev.parent, size,
10242a2d1382SAndy Lutomirski 					  dma_handle, flag);
10252a2d1382SAndy Lutomirski 	} else {
10262a2d1382SAndy Lutomirski 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
10272a2d1382SAndy Lutomirski 		if (queue) {
10282a2d1382SAndy Lutomirski 			phys_addr_t phys_addr = virt_to_phys(queue);
10292a2d1382SAndy Lutomirski 			*dma_handle = (dma_addr_t)phys_addr;
10302a2d1382SAndy Lutomirski 
10312a2d1382SAndy Lutomirski 			/*
10322a2d1382SAndy Lutomirski 			 * Sanity check: make sure we dind't truncate
10332a2d1382SAndy Lutomirski 			 * the address.  The only arches I can find that
10342a2d1382SAndy Lutomirski 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
10352a2d1382SAndy Lutomirski 			 * are certain non-highmem MIPS and x86
10362a2d1382SAndy Lutomirski 			 * configurations, but these configurations
10372a2d1382SAndy Lutomirski 			 * should never allocate physical pages above 32
10382a2d1382SAndy Lutomirski 			 * bits, so this is fine.  Just in case, throw a
10392a2d1382SAndy Lutomirski 			 * warning and abort if we end up with an
10402a2d1382SAndy Lutomirski 			 * unrepresentable address.
10412a2d1382SAndy Lutomirski 			 */
10422a2d1382SAndy Lutomirski 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
10432a2d1382SAndy Lutomirski 				free_pages_exact(queue, PAGE_ALIGN(size));
10442a2d1382SAndy Lutomirski 				return NULL;
10452a2d1382SAndy Lutomirski 			}
10462a2d1382SAndy Lutomirski 		}
10472a2d1382SAndy Lutomirski 		return queue;
10482a2d1382SAndy Lutomirski 	}
10492a2d1382SAndy Lutomirski }
10502a2d1382SAndy Lutomirski 
10512a2d1382SAndy Lutomirski static void vring_free_queue(struct virtio_device *vdev, size_t size,
10522a2d1382SAndy Lutomirski 			     void *queue, dma_addr_t dma_handle)
10532a2d1382SAndy Lutomirski {
10542a2d1382SAndy Lutomirski 	if (vring_use_dma_api(vdev)) {
10552a2d1382SAndy Lutomirski 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
10562a2d1382SAndy Lutomirski 	} else {
10572a2d1382SAndy Lutomirski 		free_pages_exact(queue, PAGE_ALIGN(size));
10582a2d1382SAndy Lutomirski 	}
10592a2d1382SAndy Lutomirski }
10602a2d1382SAndy Lutomirski 
10612a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
10622a2d1382SAndy Lutomirski 	unsigned int index,
10632a2d1382SAndy Lutomirski 	unsigned int num,
10642a2d1382SAndy Lutomirski 	unsigned int vring_align,
10652a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
10662a2d1382SAndy Lutomirski 	bool weak_barriers,
10672a2d1382SAndy Lutomirski 	bool may_reduce_num,
1068f94682ddSMichael S. Tsirkin 	bool context,
10692a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
10702a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
10712a2d1382SAndy Lutomirski 	const char *name)
10722a2d1382SAndy Lutomirski {
10732a2d1382SAndy Lutomirski 	struct virtqueue *vq;
1074e00f7bd2SDan Carpenter 	void *queue = NULL;
10752a2d1382SAndy Lutomirski 	dma_addr_t dma_addr;
10762a2d1382SAndy Lutomirski 	size_t queue_size_in_bytes;
10772a2d1382SAndy Lutomirski 	struct vring vring;
10782a2d1382SAndy Lutomirski 
10792a2d1382SAndy Lutomirski 	/* We assume num is a power of 2. */
10802a2d1382SAndy Lutomirski 	if (num & (num - 1)) {
10812a2d1382SAndy Lutomirski 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
10822a2d1382SAndy Lutomirski 		return NULL;
10832a2d1382SAndy Lutomirski 	}
10842a2d1382SAndy Lutomirski 
10852a2d1382SAndy Lutomirski 	/* TODO: allocate each queue chunk individually */
10862a2d1382SAndy Lutomirski 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
10872a2d1382SAndy Lutomirski 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
10882a2d1382SAndy Lutomirski 					  &dma_addr,
10892a2d1382SAndy Lutomirski 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
10902a2d1382SAndy Lutomirski 		if (queue)
10912a2d1382SAndy Lutomirski 			break;
10922a2d1382SAndy Lutomirski 	}
10932a2d1382SAndy Lutomirski 
10942a2d1382SAndy Lutomirski 	if (!num)
10952a2d1382SAndy Lutomirski 		return NULL;
10962a2d1382SAndy Lutomirski 
10972a2d1382SAndy Lutomirski 	if (!queue) {
10982a2d1382SAndy Lutomirski 		/* Try to get a single page. You are my only hope! */
10992a2d1382SAndy Lutomirski 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
11002a2d1382SAndy Lutomirski 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
11012a2d1382SAndy Lutomirski 	}
11022a2d1382SAndy Lutomirski 	if (!queue)
11032a2d1382SAndy Lutomirski 		return NULL;
11042a2d1382SAndy Lutomirski 
11052a2d1382SAndy Lutomirski 	queue_size_in_bytes = vring_size(num, vring_align);
11062a2d1382SAndy Lutomirski 	vring_init(&vring, num, queue, vring_align);
11072a2d1382SAndy Lutomirski 
1108f94682ddSMichael S. Tsirkin 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
11092a2d1382SAndy Lutomirski 				   notify, callback, name);
11102a2d1382SAndy Lutomirski 	if (!vq) {
11112a2d1382SAndy Lutomirski 		vring_free_queue(vdev, queue_size_in_bytes, queue,
11122a2d1382SAndy Lutomirski 				 dma_addr);
11132a2d1382SAndy Lutomirski 		return NULL;
11142a2d1382SAndy Lutomirski 	}
11152a2d1382SAndy Lutomirski 
11162a2d1382SAndy Lutomirski 	to_vvq(vq)->queue_dma_addr = dma_addr;
11172a2d1382SAndy Lutomirski 	to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
11182a2d1382SAndy Lutomirski 	to_vvq(vq)->we_own_ring = true;
11192a2d1382SAndy Lutomirski 
11202a2d1382SAndy Lutomirski 	return vq;
11212a2d1382SAndy Lutomirski }
11222a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
11232a2d1382SAndy Lutomirski 
11242a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
11252a2d1382SAndy Lutomirski 				      unsigned int num,
11262a2d1382SAndy Lutomirski 				      unsigned int vring_align,
11272a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
11282a2d1382SAndy Lutomirski 				      bool weak_barriers,
1129f94682ddSMichael S. Tsirkin 				      bool context,
11302a2d1382SAndy Lutomirski 				      void *pages,
11312a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
11322a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
11332a2d1382SAndy Lutomirski 				      const char *name)
11342a2d1382SAndy Lutomirski {
11352a2d1382SAndy Lutomirski 	struct vring vring;
11362a2d1382SAndy Lutomirski 	vring_init(&vring, num, pages, vring_align);
1137f94682ddSMichael S. Tsirkin 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
11382a2d1382SAndy Lutomirski 				     notify, callback, name);
11392a2d1382SAndy Lutomirski }
1140c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
11410a8a69ddSRusty Russell 
11422a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq)
11430a8a69ddSRusty Russell {
11442a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
11452a2d1382SAndy Lutomirski 
11462a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
11472a2d1382SAndy Lutomirski 		vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
11482a2d1382SAndy Lutomirski 				 vq->vring.desc, vq->queue_dma_addr);
11492a2d1382SAndy Lutomirski 	}
11502a2d1382SAndy Lutomirski 	list_del(&_vq->list);
11512a2d1382SAndy Lutomirski 	kfree(vq);
11520a8a69ddSRusty Russell }
1153c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
11540a8a69ddSRusty Russell 
1155e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
1156e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
1157e34f8725SRusty Russell {
1158e34f8725SRusty Russell 	unsigned int i;
1159e34f8725SRusty Russell 
1160e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1161e34f8725SRusty Russell 		switch (i) {
11629fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
11639fa29b9dSMark McLoughlin 			break;
1164a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
1165a5c262c5SMichael S. Tsirkin 			break;
1166747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
1167747ae34aSMichael S. Tsirkin 			break;
11681a937693SMichael S. Tsirkin 		case VIRTIO_F_IOMMU_PLATFORM:
11691a937693SMichael S. Tsirkin 			break;
1170e34f8725SRusty Russell 		default:
1171e34f8725SRusty Russell 			/* We don't understand this bit. */
1172e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
1173e34f8725SRusty Russell 		}
1174e34f8725SRusty Russell 	}
1175e34f8725SRusty Russell }
1176e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
1177e34f8725SRusty Russell 
11785dfc1762SRusty Russell /**
11795dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
11805dfc1762SRusty Russell  * @vq: the struct virtqueue containing the vring of interest.
11815dfc1762SRusty Russell  *
11825dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
11835dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
11845dfc1762SRusty Russell  */
11858f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
11868f9f4668SRick Jones {
11878f9f4668SRick Jones 
11888f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
11898f9f4668SRick Jones 
11908f9f4668SRick Jones 	return vq->vring.num;
11918f9f4668SRick Jones }
11928f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
11938f9f4668SRick Jones 
1194b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
1195b3b32c94SHeinz Graalfs {
1196b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
1197b3b32c94SHeinz Graalfs 
1198b3b32c94SHeinz Graalfs 	return vq->broken;
1199b3b32c94SHeinz Graalfs }
1200b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1201b3b32c94SHeinz Graalfs 
1202e2dcdfe9SRusty Russell /*
1203e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
1204e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
1205e2dcdfe9SRusty Russell  */
1206e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
1207e2dcdfe9SRusty Russell {
1208e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
1209e2dcdfe9SRusty Russell 
1210e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
1211e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
1212e2dcdfe9SRusty Russell 		vq->broken = true;
1213e2dcdfe9SRusty Russell 	}
1214e2dcdfe9SRusty Russell }
1215e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
1216e2dcdfe9SRusty Russell 
12172a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
121889062652SCornelia Huck {
121989062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
122089062652SCornelia Huck 
12212a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
122289062652SCornelia Huck 
12232a2d1382SAndy Lutomirski 	return vq->queue_dma_addr;
12242a2d1382SAndy Lutomirski }
12252a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
12262a2d1382SAndy Lutomirski 
12272a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
122889062652SCornelia Huck {
122989062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
123089062652SCornelia Huck 
12312a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
12322a2d1382SAndy Lutomirski 
12332a2d1382SAndy Lutomirski 	return vq->queue_dma_addr +
12342a2d1382SAndy Lutomirski 		((char *)vq->vring.avail - (char *)vq->vring.desc);
123589062652SCornelia Huck }
12362a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
12372a2d1382SAndy Lutomirski 
12382a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
12392a2d1382SAndy Lutomirski {
12402a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
12412a2d1382SAndy Lutomirski 
12422a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
12432a2d1382SAndy Lutomirski 
12442a2d1382SAndy Lutomirski 	return vq->queue_dma_addr +
12452a2d1382SAndy Lutomirski 		((char *)vq->vring.used - (char *)vq->vring.desc);
12462a2d1382SAndy Lutomirski }
12472a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
12482a2d1382SAndy Lutomirski 
12492a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq)
12502a2d1382SAndy Lutomirski {
12512a2d1382SAndy Lutomirski 	return &to_vvq(vq)->vring;
12522a2d1382SAndy Lutomirski }
12532a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
125489062652SCornelia Huck 
1255c6fd4701SRusty Russell MODULE_LICENSE("GPL");
1256