xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 780bc7903a32edb63be138487fd981694d993610)
10a8a69ddSRusty Russell /* Virtio ring implementation.
20a8a69ddSRusty Russell  *
30a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
40a8a69ddSRusty Russell  *
50a8a69ddSRusty Russell  *  This program is free software; you can redistribute it and/or modify
60a8a69ddSRusty Russell  *  it under the terms of the GNU General Public License as published by
70a8a69ddSRusty Russell  *  the Free Software Foundation; either version 2 of the License, or
80a8a69ddSRusty Russell  *  (at your option) any later version.
90a8a69ddSRusty Russell  *
100a8a69ddSRusty Russell  *  This program is distributed in the hope that it will be useful,
110a8a69ddSRusty Russell  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
120a8a69ddSRusty Russell  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
130a8a69ddSRusty Russell  *  GNU General Public License for more details.
140a8a69ddSRusty Russell  *
150a8a69ddSRusty Russell  *  You should have received a copy of the GNU General Public License
160a8a69ddSRusty Russell  *  along with this program; if not, write to the Free Software
170a8a69ddSRusty Russell  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
180a8a69ddSRusty Russell  */
190a8a69ddSRusty Russell #include <linux/virtio.h>
200a8a69ddSRusty Russell #include <linux/virtio_ring.h>
21e34f8725SRusty Russell #include <linux/virtio_config.h>
220a8a69ddSRusty Russell #include <linux/device.h>
235a0e3ad6STejun Heo #include <linux/slab.h>
24b5a2c4f1SPaul Gortmaker #include <linux/module.h>
25e93300b1SRusty Russell #include <linux/hrtimer.h>
266abb2dd9SJoel Stanley #include <linux/kmemleak.h>
27*780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
280a8a69ddSRusty Russell 
290a8a69ddSRusty Russell #ifdef DEBUG
300a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
319499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
329499f5e7SRusty Russell 	do {							\
339499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
349499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
359499f5e7SRusty Russell 		BUG();						\
369499f5e7SRusty Russell 	} while (0)
37c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
383a35ce7dSRoel Kluin #define START_USE(_vq)						\
39c5f841f1SRusty Russell 	do {							\
40c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
419499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
429499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
43c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
44c5f841f1SRusty Russell 	} while (0)
453a35ce7dSRoel Kluin #define END_USE(_vq) \
4697a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
470a8a69ddSRusty Russell #else
489499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
499499f5e7SRusty Russell 	do {							\
509499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
519499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
529499f5e7SRusty Russell 		(_vq)->broken = true;				\
539499f5e7SRusty Russell 	} while (0)
540a8a69ddSRusty Russell #define START_USE(vq)
550a8a69ddSRusty Russell #define END_USE(vq)
560a8a69ddSRusty Russell #endif
570a8a69ddSRusty Russell 
58*780bc790SAndy Lutomirski struct vring_desc_state {
59*780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
60*780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
61*780bc790SAndy Lutomirski };
62*780bc790SAndy Lutomirski 
6343b4f721SMichael S. Tsirkin struct vring_virtqueue {
640a8a69ddSRusty Russell 	struct virtqueue vq;
650a8a69ddSRusty Russell 
660a8a69ddSRusty Russell 	/* Actual memory layout for this queue */
670a8a69ddSRusty Russell 	struct vring vring;
680a8a69ddSRusty Russell 
697b21e34fSRusty Russell 	/* Can we use weak barriers? */
707b21e34fSRusty Russell 	bool weak_barriers;
717b21e34fSRusty Russell 
720a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
730a8a69ddSRusty Russell 	bool broken;
740a8a69ddSRusty Russell 
759fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
769fa29b9dSMark McLoughlin 	bool indirect;
779fa29b9dSMark McLoughlin 
78a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
79a5c262c5SMichael S. Tsirkin 	bool event;
80a5c262c5SMichael S. Tsirkin 
810a8a69ddSRusty Russell 	/* Head of free buffer list. */
820a8a69ddSRusty Russell 	unsigned int free_head;
830a8a69ddSRusty Russell 	/* Number we've added since last sync. */
840a8a69ddSRusty Russell 	unsigned int num_added;
850a8a69ddSRusty Russell 
860a8a69ddSRusty Russell 	/* Last used index we've seen. */
871bc4953eSAnthony Liguori 	u16 last_used_idx;
880a8a69ddSRusty Russell 
89f277ec42SVenkatesh Srinivas 	/* Last written value to avail->flags */
90f277ec42SVenkatesh Srinivas 	u16 avail_flags_shadow;
91f277ec42SVenkatesh Srinivas 
92f277ec42SVenkatesh Srinivas 	/* Last written value to avail->idx in guest byte order */
93f277ec42SVenkatesh Srinivas 	u16 avail_idx_shadow;
94f277ec42SVenkatesh Srinivas 
950a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
9646f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
970a8a69ddSRusty Russell 
980a8a69ddSRusty Russell #ifdef DEBUG
990a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
1000a8a69ddSRusty Russell 	unsigned int in_use;
101e93300b1SRusty Russell 
102e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
103e93300b1SRusty Russell 	bool last_add_time_valid;
104e93300b1SRusty Russell 	ktime_t last_add_time;
1050a8a69ddSRusty Russell #endif
1060a8a69ddSRusty Russell 
107*780bc790SAndy Lutomirski 	/* Per-descriptor state. */
108*780bc790SAndy Lutomirski 	struct vring_desc_state desc_state[];
1090a8a69ddSRusty Russell };
1100a8a69ddSRusty Russell 
1110a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
1120a8a69ddSRusty Russell 
113d26c96c8SAndy Lutomirski /*
114d26c96c8SAndy Lutomirski  * The interaction between virtio and a possible IOMMU is a mess.
115d26c96c8SAndy Lutomirski  *
116d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
117d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
118d26c96c8SAndy Lutomirski  *
119d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
120d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
121d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
122d26c96c8SAndy Lutomirski  *
123d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
124d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
125d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
126d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
127d26c96c8SAndy Lutomirski  *
128d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
129d26c96c8SAndy Lutomirski  * API.
130d26c96c8SAndy Lutomirski  */
131d26c96c8SAndy Lutomirski 
132d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
133d26c96c8SAndy Lutomirski {
134d26c96c8SAndy Lutomirski 	return false;
135d26c96c8SAndy Lutomirski }
136d26c96c8SAndy Lutomirski 
137*780bc790SAndy Lutomirski /*
138*780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
139*780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
140*780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
141*780bc790SAndy Lutomirski  */
142*780bc790SAndy Lutomirski struct device *vring_dma_dev(const struct vring_virtqueue *vq)
143*780bc790SAndy Lutomirski {
144*780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
145*780bc790SAndy Lutomirski }
146*780bc790SAndy Lutomirski 
147*780bc790SAndy Lutomirski /* Map one sg entry. */
148*780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
149*780bc790SAndy Lutomirski 				   struct scatterlist *sg,
150*780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
151*780bc790SAndy Lutomirski {
152*780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
153*780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
154*780bc790SAndy Lutomirski 
155*780bc790SAndy Lutomirski 	/*
156*780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
157*780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
158*780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
159*780bc790SAndy Lutomirski 	 */
160*780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
161*780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
162*780bc790SAndy Lutomirski 			    direction);
163*780bc790SAndy Lutomirski }
164*780bc790SAndy Lutomirski 
165*780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
166*780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
167*780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
168*780bc790SAndy Lutomirski {
169*780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
170*780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
171*780bc790SAndy Lutomirski 
172*780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
173*780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
174*780bc790SAndy Lutomirski }
175*780bc790SAndy Lutomirski 
176*780bc790SAndy Lutomirski static void vring_unmap_one(const struct vring_virtqueue *vq,
177*780bc790SAndy Lutomirski 			    struct vring_desc *desc)
178*780bc790SAndy Lutomirski {
179*780bc790SAndy Lutomirski 	u16 flags;
180*780bc790SAndy Lutomirski 
181*780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
182*780bc790SAndy Lutomirski 		return;
183*780bc790SAndy Lutomirski 
184*780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
185*780bc790SAndy Lutomirski 
186*780bc790SAndy Lutomirski 	if (flags & VRING_DESC_F_INDIRECT) {
187*780bc790SAndy Lutomirski 		dma_unmap_single(vring_dma_dev(vq),
188*780bc790SAndy Lutomirski 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
189*780bc790SAndy Lutomirski 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
190*780bc790SAndy Lutomirski 				 (flags & VRING_DESC_F_WRITE) ?
191*780bc790SAndy Lutomirski 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
192*780bc790SAndy Lutomirski 	} else {
193*780bc790SAndy Lutomirski 		dma_unmap_page(vring_dma_dev(vq),
194*780bc790SAndy Lutomirski 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
195*780bc790SAndy Lutomirski 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
196*780bc790SAndy Lutomirski 			       (flags & VRING_DESC_F_WRITE) ?
197*780bc790SAndy Lutomirski 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
198*780bc790SAndy Lutomirski 	}
199*780bc790SAndy Lutomirski }
200*780bc790SAndy Lutomirski 
201*780bc790SAndy Lutomirski static int vring_mapping_error(const struct vring_virtqueue *vq,
202*780bc790SAndy Lutomirski 			       dma_addr_t addr)
203*780bc790SAndy Lutomirski {
204*780bc790SAndy Lutomirski 	if (!vring_use_dma_api(vq->vq.vdev))
205*780bc790SAndy Lutomirski 		return 0;
206*780bc790SAndy Lutomirski 
207*780bc790SAndy Lutomirski 	return dma_mapping_error(vring_dma_dev(vq), addr);
208*780bc790SAndy Lutomirski }
209*780bc790SAndy Lutomirski 
21000e6f3d9SMichael S. Tsirkin static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
21100e6f3d9SMichael S. Tsirkin 					 unsigned int total_sg, gfp_t gfp)
2129fa29b9dSMark McLoughlin {
2139fa29b9dSMark McLoughlin 	struct vring_desc *desc;
214b25bd251SRusty Russell 	unsigned int i;
2159fa29b9dSMark McLoughlin 
216b92b1b89SWill Deacon 	/*
217b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
218b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
219b92b1b89SWill Deacon 	 * virtqueue.
220b92b1b89SWill Deacon 	 */
22182107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
222b92b1b89SWill Deacon 
22313816c76SRusty Russell 	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
2249fa29b9dSMark McLoughlin 	if (!desc)
225b25bd251SRusty Russell 		return NULL;
2269fa29b9dSMark McLoughlin 
227b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
22800e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
229b25bd251SRusty Russell 	return desc;
2309fa29b9dSMark McLoughlin }
2319fa29b9dSMark McLoughlin 
23213816c76SRusty Russell static inline int virtqueue_add(struct virtqueue *_vq,
23313816c76SRusty Russell 				struct scatterlist *sgs[],
234eeebf9b1SRusty Russell 				unsigned int total_sg,
23513816c76SRusty Russell 				unsigned int out_sgs,
23613816c76SRusty Russell 				unsigned int in_sgs,
237bbd603efSMichael S. Tsirkin 				void *data,
238bbd603efSMichael S. Tsirkin 				gfp_t gfp)
2390a8a69ddSRusty Russell {
2400a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
24113816c76SRusty Russell 	struct scatterlist *sg;
242b25bd251SRusty Russell 	struct vring_desc *desc;
243*780bc790SAndy Lutomirski 	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
2441fe9b6feSMichael S. Tsirkin 	int head;
245b25bd251SRusty Russell 	bool indirect;
2460a8a69ddSRusty Russell 
2479fa29b9dSMark McLoughlin 	START_USE(vq);
2489fa29b9dSMark McLoughlin 
2490a8a69ddSRusty Russell 	BUG_ON(data == NULL);
2509fa29b9dSMark McLoughlin 
25170670444SRusty Russell 	if (unlikely(vq->broken)) {
25270670444SRusty Russell 		END_USE(vq);
25370670444SRusty Russell 		return -EIO;
25470670444SRusty Russell 	}
25570670444SRusty Russell 
256e93300b1SRusty Russell #ifdef DEBUG
257e93300b1SRusty Russell 	{
258e93300b1SRusty Russell 		ktime_t now = ktime_get();
259e93300b1SRusty Russell 
260e93300b1SRusty Russell 		/* No kick or get, with .1 second between?  Warn. */
261e93300b1SRusty Russell 		if (vq->last_add_time_valid)
262e93300b1SRusty Russell 			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
263e93300b1SRusty Russell 					    > 100);
264e93300b1SRusty Russell 		vq->last_add_time = now;
265e93300b1SRusty Russell 		vq->last_add_time_valid = true;
266e93300b1SRusty Russell 	}
267e93300b1SRusty Russell #endif
268e93300b1SRusty Russell 
26913816c76SRusty Russell 	BUG_ON(total_sg > vq->vring.num);
27013816c76SRusty Russell 	BUG_ON(total_sg == 0);
2710a8a69ddSRusty Russell 
272b25bd251SRusty Russell 	head = vq->free_head;
273b25bd251SRusty Russell 
274b25bd251SRusty Russell 	/* If the host supports indirect descriptor tables, and we have multiple
275b25bd251SRusty Russell 	 * buffers, then go indirect. FIXME: tune this threshold */
276b25bd251SRusty Russell 	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
27700e6f3d9SMichael S. Tsirkin 		desc = alloc_indirect(_vq, total_sg, gfp);
278b25bd251SRusty Russell 	else
279b25bd251SRusty Russell 		desc = NULL;
280b25bd251SRusty Russell 
281b25bd251SRusty Russell 	if (desc) {
282b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
283*780bc790SAndy Lutomirski 		indirect = true;
284b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
285b25bd251SRusty Russell 		i = 0;
286b25bd251SRusty Russell 		descs_used = 1;
287b25bd251SRusty Russell 	} else {
288*780bc790SAndy Lutomirski 		indirect = false;
289b25bd251SRusty Russell 		desc = vq->vring.desc;
290b25bd251SRusty Russell 		i = head;
291b25bd251SRusty Russell 		descs_used = total_sg;
292b25bd251SRusty Russell 	}
293b25bd251SRusty Russell 
294b25bd251SRusty Russell 	if (vq->vq.num_free < descs_used) {
2950a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
296b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
29744653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
29844653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
29944653eaeSRusty Russell 		 * host should service the ring ASAP. */
30013816c76SRusty Russell 		if (out_sgs)
301426e3e0aSRusty Russell 			vq->notify(&vq->vq);
3020a8a69ddSRusty Russell 		END_USE(vq);
3030a8a69ddSRusty Russell 		return -ENOSPC;
3040a8a69ddSRusty Russell 	}
3050a8a69ddSRusty Russell 
30613816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
307eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
308*780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
309*780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
310*780bc790SAndy Lutomirski 				goto unmap_release;
311*780bc790SAndy Lutomirski 
31200e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
313*780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
31400e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3150a8a69ddSRusty Russell 			prev = i;
31600e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
3170a8a69ddSRusty Russell 		}
31813816c76SRusty Russell 	}
31913816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
320eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
321*780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
322*780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
323*780bc790SAndy Lutomirski 				goto unmap_release;
324*780bc790SAndy Lutomirski 
32500e6f3d9SMichael S. Tsirkin 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
326*780bc790SAndy Lutomirski 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
32700e6f3d9SMichael S. Tsirkin 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
3280a8a69ddSRusty Russell 			prev = i;
32900e6f3d9SMichael S. Tsirkin 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
33013816c76SRusty Russell 		}
3310a8a69ddSRusty Russell 	}
3320a8a69ddSRusty Russell 	/* Last one doesn't continue. */
33300e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
3340a8a69ddSRusty Russell 
335*780bc790SAndy Lutomirski 	if (indirect) {
336*780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
337*780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
338*780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
339*780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
340*780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
341*780bc790SAndy Lutomirski 			goto unmap_release;
342*780bc790SAndy Lutomirski 
343*780bc790SAndy Lutomirski 		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
344*780bc790SAndy Lutomirski 		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
345*780bc790SAndy Lutomirski 
346*780bc790SAndy Lutomirski 		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
347*780bc790SAndy Lutomirski 	}
348*780bc790SAndy Lutomirski 
349*780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
350*780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
351*780bc790SAndy Lutomirski 
3520a8a69ddSRusty Russell 	/* Update free pointer */
353b25bd251SRusty Russell 	if (indirect)
35400e6f3d9SMichael S. Tsirkin 		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
355b25bd251SRusty Russell 	else
3560a8a69ddSRusty Russell 		vq->free_head = i;
3570a8a69ddSRusty Russell 
358*780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
359*780bc790SAndy Lutomirski 	vq->desc_state[head].data = data;
360*780bc790SAndy Lutomirski 	if (indirect)
361*780bc790SAndy Lutomirski 		vq->desc_state[head].indir_desc = desc;
3620a8a69ddSRusty Russell 
3630a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
3643b720b8cSRusty Russell 	 * do sync). */
365f277ec42SVenkatesh Srinivas 	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
36600e6f3d9SMichael S. Tsirkin 	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
3670a8a69ddSRusty Russell 
368ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
369ee7cd898SRusty Russell 	 * new available array entries. */
370a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
371f277ec42SVenkatesh Srinivas 	vq->avail_idx_shadow++;
372f277ec42SVenkatesh Srinivas 	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
373ee7cd898SRusty Russell 	vq->num_added++;
374ee7cd898SRusty Russell 
3755e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
3765e05bf58STetsuo Handa 	END_USE(vq);
3775e05bf58STetsuo Handa 
378ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
379ee7cd898SRusty Russell 	 * just in case. */
380ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
381ee7cd898SRusty Russell 		virtqueue_kick(_vq);
382ee7cd898SRusty Russell 
38398e8c6bcSRusty Russell 	return 0;
384*780bc790SAndy Lutomirski 
385*780bc790SAndy Lutomirski unmap_release:
386*780bc790SAndy Lutomirski 	err_idx = i;
387*780bc790SAndy Lutomirski 	i = head;
388*780bc790SAndy Lutomirski 
389*780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
390*780bc790SAndy Lutomirski 		if (i == err_idx)
391*780bc790SAndy Lutomirski 			break;
392*780bc790SAndy Lutomirski 		vring_unmap_one(vq, &desc[i]);
393*780bc790SAndy Lutomirski 		i = vq->vring.desc[i].next;
394*780bc790SAndy Lutomirski 	}
395*780bc790SAndy Lutomirski 
396*780bc790SAndy Lutomirski 	vq->vq.num_free += total_sg;
397*780bc790SAndy Lutomirski 
398*780bc790SAndy Lutomirski 	if (indirect)
399*780bc790SAndy Lutomirski 		kfree(desc);
400*780bc790SAndy Lutomirski 
401*780bc790SAndy Lutomirski 	return -EIO;
4020a8a69ddSRusty Russell }
40313816c76SRusty Russell 
40413816c76SRusty Russell /**
40513816c76SRusty Russell  * virtqueue_add_sgs - expose buffers to other end
40613816c76SRusty Russell  * @vq: the struct virtqueue we're talking about.
40713816c76SRusty Russell  * @sgs: array of terminated scatterlists.
40813816c76SRusty Russell  * @out_num: the number of scatterlists readable by other side
40913816c76SRusty Russell  * @in_num: the number of scatterlists which are writable (after readable ones)
41013816c76SRusty Russell  * @data: the token identifying the buffer.
41113816c76SRusty Russell  * @gfp: how to do memory allocations (if necessary).
41213816c76SRusty Russell  *
41313816c76SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
41413816c76SRusty Russell  * at the same time (except where noted).
41513816c76SRusty Russell  *
41670670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
41713816c76SRusty Russell  */
41813816c76SRusty Russell int virtqueue_add_sgs(struct virtqueue *_vq,
41913816c76SRusty Russell 		      struct scatterlist *sgs[],
42013816c76SRusty Russell 		      unsigned int out_sgs,
42113816c76SRusty Russell 		      unsigned int in_sgs,
42213816c76SRusty Russell 		      void *data,
42313816c76SRusty Russell 		      gfp_t gfp)
42413816c76SRusty Russell {
425eeebf9b1SRusty Russell 	unsigned int i, total_sg = 0;
42613816c76SRusty Russell 
42713816c76SRusty Russell 	/* Count them first. */
428eeebf9b1SRusty Russell 	for (i = 0; i < out_sgs + in_sgs; i++) {
42913816c76SRusty Russell 		struct scatterlist *sg;
43013816c76SRusty Russell 		for (sg = sgs[i]; sg; sg = sg_next(sg))
431eeebf9b1SRusty Russell 			total_sg++;
43213816c76SRusty Russell 	}
433eeebf9b1SRusty Russell 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
43413816c76SRusty Russell }
43513816c76SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
43613816c76SRusty Russell 
43713816c76SRusty Russell /**
438282edb36SRusty Russell  * virtqueue_add_outbuf - expose output buffers to other end
439282edb36SRusty Russell  * @vq: the struct virtqueue we're talking about.
440eeebf9b1SRusty Russell  * @sg: scatterlist (must be well-formed and terminated!)
441eeebf9b1SRusty Russell  * @num: the number of entries in @sg readable by other side
442282edb36SRusty Russell  * @data: the token identifying the buffer.
443282edb36SRusty Russell  * @gfp: how to do memory allocations (if necessary).
444282edb36SRusty Russell  *
445282edb36SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
446282edb36SRusty Russell  * at the same time (except where noted).
447282edb36SRusty Russell  *
44870670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
449282edb36SRusty Russell  */
450282edb36SRusty Russell int virtqueue_add_outbuf(struct virtqueue *vq,
451eeebf9b1SRusty Russell 			 struct scatterlist *sg, unsigned int num,
452282edb36SRusty Russell 			 void *data,
453282edb36SRusty Russell 			 gfp_t gfp)
454282edb36SRusty Russell {
455eeebf9b1SRusty Russell 	return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
456282edb36SRusty Russell }
457282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
458282edb36SRusty Russell 
459282edb36SRusty Russell /**
460282edb36SRusty Russell  * virtqueue_add_inbuf - expose input buffers to other end
461282edb36SRusty Russell  * @vq: the struct virtqueue we're talking about.
462eeebf9b1SRusty Russell  * @sg: scatterlist (must be well-formed and terminated!)
463eeebf9b1SRusty Russell  * @num: the number of entries in @sg writable by other side
464282edb36SRusty Russell  * @data: the token identifying the buffer.
465282edb36SRusty Russell  * @gfp: how to do memory allocations (if necessary).
466282edb36SRusty Russell  *
467282edb36SRusty Russell  * Caller must ensure we don't call this with other virtqueue operations
468282edb36SRusty Russell  * at the same time (except where noted).
469282edb36SRusty Russell  *
47070670444SRusty Russell  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
471282edb36SRusty Russell  */
472282edb36SRusty Russell int virtqueue_add_inbuf(struct virtqueue *vq,
473eeebf9b1SRusty Russell 			struct scatterlist *sg, unsigned int num,
474282edb36SRusty Russell 			void *data,
475282edb36SRusty Russell 			gfp_t gfp)
476282edb36SRusty Russell {
477eeebf9b1SRusty Russell 	return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
478282edb36SRusty Russell }
479282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
480282edb36SRusty Russell 
481282edb36SRusty Russell /**
48241f0377fSRusty Russell  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
4835dfc1762SRusty Russell  * @vq: the struct virtqueue
4845dfc1762SRusty Russell  *
48541f0377fSRusty Russell  * Instead of virtqueue_kick(), you can do:
48641f0377fSRusty Russell  *	if (virtqueue_kick_prepare(vq))
48741f0377fSRusty Russell  *		virtqueue_notify(vq);
4885dfc1762SRusty Russell  *
48941f0377fSRusty Russell  * This is sometimes useful because the virtqueue_kick_prepare() needs
49041f0377fSRusty Russell  * to be serialized, but the actual virtqueue_notify() call does not.
4915dfc1762SRusty Russell  */
49241f0377fSRusty Russell bool virtqueue_kick_prepare(struct virtqueue *_vq)
4930a8a69ddSRusty Russell {
4940a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
495a5c262c5SMichael S. Tsirkin 	u16 new, old;
49641f0377fSRusty Russell 	bool needs_kick;
49741f0377fSRusty Russell 
4980a8a69ddSRusty Russell 	START_USE(vq);
499a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
500a72caae2SJason Wang 	 * event. */
501a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
5020a8a69ddSRusty Russell 
503f277ec42SVenkatesh Srinivas 	old = vq->avail_idx_shadow - vq->num_added;
504f277ec42SVenkatesh Srinivas 	new = vq->avail_idx_shadow;
5050a8a69ddSRusty Russell 	vq->num_added = 0;
5060a8a69ddSRusty Russell 
507e93300b1SRusty Russell #ifdef DEBUG
508e93300b1SRusty Russell 	if (vq->last_add_time_valid) {
509e93300b1SRusty Russell 		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
510e93300b1SRusty Russell 					      vq->last_add_time)) > 100);
511e93300b1SRusty Russell 	}
512e93300b1SRusty Russell 	vq->last_add_time_valid = false;
513e93300b1SRusty Russell #endif
514e93300b1SRusty Russell 
51541f0377fSRusty Russell 	if (vq->event) {
51600e6f3d9SMichael S. Tsirkin 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
51741f0377fSRusty Russell 					      new, old);
51841f0377fSRusty Russell 	} else {
51900e6f3d9SMichael S. Tsirkin 		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
52041f0377fSRusty Russell 	}
5210a8a69ddSRusty Russell 	END_USE(vq);
52241f0377fSRusty Russell 	return needs_kick;
52341f0377fSRusty Russell }
52441f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
52541f0377fSRusty Russell 
52641f0377fSRusty Russell /**
52741f0377fSRusty Russell  * virtqueue_notify - second half of split virtqueue_kick call.
52841f0377fSRusty Russell  * @vq: the struct virtqueue
52941f0377fSRusty Russell  *
53041f0377fSRusty Russell  * This does not need to be serialized.
5315b1bf7cbSHeinz Graalfs  *
5325b1bf7cbSHeinz Graalfs  * Returns false if host notify failed or queue is broken, otherwise true.
53341f0377fSRusty Russell  */
5345b1bf7cbSHeinz Graalfs bool virtqueue_notify(struct virtqueue *_vq)
53541f0377fSRusty Russell {
53641f0377fSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
53741f0377fSRusty Russell 
5385b1bf7cbSHeinz Graalfs 	if (unlikely(vq->broken))
5395b1bf7cbSHeinz Graalfs 		return false;
5405b1bf7cbSHeinz Graalfs 
54141f0377fSRusty Russell 	/* Prod other side to tell it about changes. */
5422342d6a6SHeinz Graalfs 	if (!vq->notify(_vq)) {
5435b1bf7cbSHeinz Graalfs 		vq->broken = true;
5445b1bf7cbSHeinz Graalfs 		return false;
5455b1bf7cbSHeinz Graalfs 	}
5465b1bf7cbSHeinz Graalfs 	return true;
54741f0377fSRusty Russell }
54841f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_notify);
54941f0377fSRusty Russell 
55041f0377fSRusty Russell /**
55141f0377fSRusty Russell  * virtqueue_kick - update after add_buf
55241f0377fSRusty Russell  * @vq: the struct virtqueue
55341f0377fSRusty Russell  *
554b3087e48SRusty Russell  * After one or more virtqueue_add_* calls, invoke this to kick
55541f0377fSRusty Russell  * the other side.
55641f0377fSRusty Russell  *
55741f0377fSRusty Russell  * Caller must ensure we don't call this with other virtqueue
55841f0377fSRusty Russell  * operations at the same time (except where noted).
5595b1bf7cbSHeinz Graalfs  *
5605b1bf7cbSHeinz Graalfs  * Returns false if kick failed, otherwise true.
56141f0377fSRusty Russell  */
5625b1bf7cbSHeinz Graalfs bool virtqueue_kick(struct virtqueue *vq)
56341f0377fSRusty Russell {
56441f0377fSRusty Russell 	if (virtqueue_kick_prepare(vq))
5655b1bf7cbSHeinz Graalfs 		return virtqueue_notify(vq);
5665b1bf7cbSHeinz Graalfs 	return true;
5670a8a69ddSRusty Russell }
5687c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_kick);
5690a8a69ddSRusty Russell 
5700a8a69ddSRusty Russell static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
5710a8a69ddSRusty Russell {
572*780bc790SAndy Lutomirski 	unsigned int i, j;
573*780bc790SAndy Lutomirski 	u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
5740a8a69ddSRusty Russell 
5750a8a69ddSRusty Russell 	/* Clear data ptr. */
576*780bc790SAndy Lutomirski 	vq->desc_state[head].data = NULL;
5770a8a69ddSRusty Russell 
578*780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
5790a8a69ddSRusty Russell 	i = head;
5809fa29b9dSMark McLoughlin 
581*780bc790SAndy Lutomirski 	while (vq->vring.desc[i].flags & nextflag) {
582*780bc790SAndy Lutomirski 		vring_unmap_one(vq, &vq->vring.desc[i]);
58300e6f3d9SMichael S. Tsirkin 		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
58406ca287dSRusty Russell 		vq->vq.num_free++;
5850a8a69ddSRusty Russell 	}
5860a8a69ddSRusty Russell 
587*780bc790SAndy Lutomirski 	vring_unmap_one(vq, &vq->vring.desc[i]);
58800e6f3d9SMichael S. Tsirkin 	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
5890a8a69ddSRusty Russell 	vq->free_head = head;
590*780bc790SAndy Lutomirski 
5910a8a69ddSRusty Russell 	/* Plus final descriptor */
59206ca287dSRusty Russell 	vq->vq.num_free++;
593*780bc790SAndy Lutomirski 
594*780bc790SAndy Lutomirski 	/* Free the indirect table, if any, now that it's unmapped. */
595*780bc790SAndy Lutomirski 	if (vq->desc_state[head].indir_desc) {
596*780bc790SAndy Lutomirski 		struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
597*780bc790SAndy Lutomirski 		u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
598*780bc790SAndy Lutomirski 
599*780bc790SAndy Lutomirski 		BUG_ON(!(vq->vring.desc[head].flags &
600*780bc790SAndy Lutomirski 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
601*780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
602*780bc790SAndy Lutomirski 
603*780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
604*780bc790SAndy Lutomirski 			vring_unmap_one(vq, &indir_desc[j]);
605*780bc790SAndy Lutomirski 
606*780bc790SAndy Lutomirski 		kfree(vq->desc_state[head].indir_desc);
607*780bc790SAndy Lutomirski 		vq->desc_state[head].indir_desc = NULL;
608*780bc790SAndy Lutomirski 	}
6090a8a69ddSRusty Russell }
6100a8a69ddSRusty Russell 
6110a8a69ddSRusty Russell static inline bool more_used(const struct vring_virtqueue *vq)
6120a8a69ddSRusty Russell {
61300e6f3d9SMichael S. Tsirkin 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
6140a8a69ddSRusty Russell }
6150a8a69ddSRusty Russell 
6165dfc1762SRusty Russell /**
6175dfc1762SRusty Russell  * virtqueue_get_buf - get the next used buffer
6185dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
6195dfc1762SRusty Russell  * @len: the length written into the buffer
6205dfc1762SRusty Russell  *
6215dfc1762SRusty Russell  * If the driver wrote data into the buffer, @len will be set to the
6225dfc1762SRusty Russell  * amount written.  This means you don't need to clear the buffer
6235dfc1762SRusty Russell  * beforehand to ensure there's no data leakage in the case of short
6245dfc1762SRusty Russell  * writes.
6255dfc1762SRusty Russell  *
6265dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
6275dfc1762SRusty Russell  * operations at the same time (except where noted).
6285dfc1762SRusty Russell  *
6295dfc1762SRusty Russell  * Returns NULL if there are no used buffers, or the "data" token
630b3087e48SRusty Russell  * handed to virtqueue_add_*().
6315dfc1762SRusty Russell  */
6327c5e9ed0SMichael S. Tsirkin void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
6330a8a69ddSRusty Russell {
6340a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
6350a8a69ddSRusty Russell 	void *ret;
6360a8a69ddSRusty Russell 	unsigned int i;
6373b720b8cSRusty Russell 	u16 last_used;
6380a8a69ddSRusty Russell 
6390a8a69ddSRusty Russell 	START_USE(vq);
6400a8a69ddSRusty Russell 
6415ef82752SRusty Russell 	if (unlikely(vq->broken)) {
6425ef82752SRusty Russell 		END_USE(vq);
6435ef82752SRusty Russell 		return NULL;
6445ef82752SRusty Russell 	}
6455ef82752SRusty Russell 
6460a8a69ddSRusty Russell 	if (!more_used(vq)) {
6470a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
6480a8a69ddSRusty Russell 		END_USE(vq);
6490a8a69ddSRusty Russell 		return NULL;
6500a8a69ddSRusty Russell 	}
6510a8a69ddSRusty Russell 
6522d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
653a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
6542d61ba95SMichael S. Tsirkin 
6553b720b8cSRusty Russell 	last_used = (vq->last_used_idx & (vq->vring.num - 1));
65600e6f3d9SMichael S. Tsirkin 	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
65700e6f3d9SMichael S. Tsirkin 	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
6580a8a69ddSRusty Russell 
6590a8a69ddSRusty Russell 	if (unlikely(i >= vq->vring.num)) {
6600a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
6610a8a69ddSRusty Russell 		return NULL;
6620a8a69ddSRusty Russell 	}
663*780bc790SAndy Lutomirski 	if (unlikely(!vq->desc_state[i].data)) {
6640a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
6650a8a69ddSRusty Russell 		return NULL;
6660a8a69ddSRusty Russell 	}
6670a8a69ddSRusty Russell 
6680a8a69ddSRusty Russell 	/* detach_buf clears data, so grab it now. */
669*780bc790SAndy Lutomirski 	ret = vq->desc_state[i].data;
6700a8a69ddSRusty Russell 	detach_buf(vq, i);
6710a8a69ddSRusty Russell 	vq->last_used_idx++;
672a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
673a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
674a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
675788e5b3aSMichael S. Tsirkin 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
676788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
677788e5b3aSMichael S. Tsirkin 				&vring_used_event(&vq->vring),
678788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
679a5c262c5SMichael S. Tsirkin 
680e93300b1SRusty Russell #ifdef DEBUG
681e93300b1SRusty Russell 	vq->last_add_time_valid = false;
682e93300b1SRusty Russell #endif
683e93300b1SRusty Russell 
6840a8a69ddSRusty Russell 	END_USE(vq);
6850a8a69ddSRusty Russell 	return ret;
6860a8a69ddSRusty Russell }
6877c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_get_buf);
6880a8a69ddSRusty Russell 
6895dfc1762SRusty Russell /**
6905dfc1762SRusty Russell  * virtqueue_disable_cb - disable callbacks
6915dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
6925dfc1762SRusty Russell  *
6935dfc1762SRusty Russell  * Note that this is not necessarily synchronous, hence unreliable and only
6945dfc1762SRusty Russell  * useful as an optimization.
6955dfc1762SRusty Russell  *
6965dfc1762SRusty Russell  * Unlike other operations, this need not be serialized.
6975dfc1762SRusty Russell  */
6987c5e9ed0SMichael S. Tsirkin void virtqueue_disable_cb(struct virtqueue *_vq)
69918445c4dSRusty Russell {
70018445c4dSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
70118445c4dSRusty Russell 
702f277ec42SVenkatesh Srinivas 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
703f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
704f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
705f277ec42SVenkatesh Srinivas 	}
706f277ec42SVenkatesh Srinivas 
70718445c4dSRusty Russell }
7087c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
70918445c4dSRusty Russell 
7105dfc1762SRusty Russell /**
711cc229884SMichael S. Tsirkin  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
712cc229884SMichael S. Tsirkin  * @vq: the struct virtqueue we're talking about.
713cc229884SMichael S. Tsirkin  *
714cc229884SMichael S. Tsirkin  * This re-enables callbacks; it returns current queue state
715cc229884SMichael S. Tsirkin  * in an opaque unsigned value. This value should be later tested by
716cc229884SMichael S. Tsirkin  * virtqueue_poll, to detect a possible race between the driver checking for
717cc229884SMichael S. Tsirkin  * more work, and enabling callbacks.
718cc229884SMichael S. Tsirkin  *
719cc229884SMichael S. Tsirkin  * Caller must ensure we don't call this with other virtqueue
720cc229884SMichael S. Tsirkin  * operations at the same time (except where noted).
721cc229884SMichael S. Tsirkin  */
722cc229884SMichael S. Tsirkin unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
723cc229884SMichael S. Tsirkin {
724cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
725cc229884SMichael S. Tsirkin 	u16 last_used_idx;
726cc229884SMichael S. Tsirkin 
727cc229884SMichael S. Tsirkin 	START_USE(vq);
728cc229884SMichael S. Tsirkin 
729cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
730cc229884SMichael S. Tsirkin 	 * more to do. */
731cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
732cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
733cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
734f277ec42SVenkatesh Srinivas 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
735f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
736f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
737f277ec42SVenkatesh Srinivas 	}
73800e6f3d9SMichael S. Tsirkin 	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
739cc229884SMichael S. Tsirkin 	END_USE(vq);
740cc229884SMichael S. Tsirkin 	return last_used_idx;
741cc229884SMichael S. Tsirkin }
742cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
743cc229884SMichael S. Tsirkin 
744cc229884SMichael S. Tsirkin /**
745cc229884SMichael S. Tsirkin  * virtqueue_poll - query pending used buffers
746cc229884SMichael S. Tsirkin  * @vq: the struct virtqueue we're talking about.
747cc229884SMichael S. Tsirkin  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
748cc229884SMichael S. Tsirkin  *
749cc229884SMichael S. Tsirkin  * Returns "true" if there are pending used buffers in the queue.
750cc229884SMichael S. Tsirkin  *
751cc229884SMichael S. Tsirkin  * This does not need to be serialized.
752cc229884SMichael S. Tsirkin  */
753cc229884SMichael S. Tsirkin bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
754cc229884SMichael S. Tsirkin {
755cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
756cc229884SMichael S. Tsirkin 
757cc229884SMichael S. Tsirkin 	virtio_mb(vq->weak_barriers);
75800e6f3d9SMichael S. Tsirkin 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
759cc229884SMichael S. Tsirkin }
760cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_poll);
761cc229884SMichael S. Tsirkin 
762cc229884SMichael S. Tsirkin /**
7635dfc1762SRusty Russell  * virtqueue_enable_cb - restart callbacks after disable_cb.
7645dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
7655dfc1762SRusty Russell  *
7665dfc1762SRusty Russell  * This re-enables callbacks; it returns "false" if there are pending
7675dfc1762SRusty Russell  * buffers in the queue, to detect a possible race between the driver
7685dfc1762SRusty Russell  * checking for more work, and enabling callbacks.
7695dfc1762SRusty Russell  *
7705dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
7715dfc1762SRusty Russell  * operations at the same time (except where noted).
7725dfc1762SRusty Russell  */
7737c5e9ed0SMichael S. Tsirkin bool virtqueue_enable_cb(struct virtqueue *_vq)
7740a8a69ddSRusty Russell {
775cc229884SMichael S. Tsirkin 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
776cc229884SMichael S. Tsirkin 	return !virtqueue_poll(_vq, last_used_idx);
7770a8a69ddSRusty Russell }
7787c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
7790a8a69ddSRusty Russell 
7805dfc1762SRusty Russell /**
7815dfc1762SRusty Russell  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
7825dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
7835dfc1762SRusty Russell  *
7845dfc1762SRusty Russell  * This re-enables callbacks but hints to the other side to delay
7855dfc1762SRusty Russell  * interrupts until most of the available buffers have been processed;
7865dfc1762SRusty Russell  * it returns "false" if there are many pending buffers in the queue,
7875dfc1762SRusty Russell  * to detect a possible race between the driver checking for more work,
7885dfc1762SRusty Russell  * and enabling callbacks.
7895dfc1762SRusty Russell  *
7905dfc1762SRusty Russell  * Caller must ensure we don't call this with other virtqueue
7915dfc1762SRusty Russell  * operations at the same time (except where noted).
7925dfc1762SRusty Russell  */
7937ab358c2SMichael S. Tsirkin bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
7947ab358c2SMichael S. Tsirkin {
7957ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
7967ab358c2SMichael S. Tsirkin 	u16 bufs;
7977ab358c2SMichael S. Tsirkin 
7987ab358c2SMichael S. Tsirkin 	START_USE(vq);
7997ab358c2SMichael S. Tsirkin 
8007ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
8017ab358c2SMichael S. Tsirkin 	 * more to do. */
8027ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
8037ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
8047ab358c2SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
805f277ec42SVenkatesh Srinivas 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
806f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
807f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
808f277ec42SVenkatesh Srinivas 	}
8097ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
810f277ec42SVenkatesh Srinivas 	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
811788e5b3aSMichael S. Tsirkin 
812788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
813788e5b3aSMichael S. Tsirkin 			&vring_used_event(&vq->vring),
814788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
815788e5b3aSMichael S. Tsirkin 
81600e6f3d9SMichael S. Tsirkin 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
8177ab358c2SMichael S. Tsirkin 		END_USE(vq);
8187ab358c2SMichael S. Tsirkin 		return false;
8197ab358c2SMichael S. Tsirkin 	}
8207ab358c2SMichael S. Tsirkin 
8217ab358c2SMichael S. Tsirkin 	END_USE(vq);
8227ab358c2SMichael S. Tsirkin 	return true;
8237ab358c2SMichael S. Tsirkin }
8247ab358c2SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
8257ab358c2SMichael S. Tsirkin 
8265dfc1762SRusty Russell /**
8275dfc1762SRusty Russell  * virtqueue_detach_unused_buf - detach first unused buffer
8285dfc1762SRusty Russell  * @vq: the struct virtqueue we're talking about.
8295dfc1762SRusty Russell  *
830b3087e48SRusty Russell  * Returns NULL or the "data" token handed to virtqueue_add_*().
8315dfc1762SRusty Russell  * This is not valid on an active queue; it is useful only for device
8325dfc1762SRusty Russell  * shutdown.
8335dfc1762SRusty Russell  */
8347c5e9ed0SMichael S. Tsirkin void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
835c021eac4SShirley Ma {
836c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
837c021eac4SShirley Ma 	unsigned int i;
838c021eac4SShirley Ma 	void *buf;
839c021eac4SShirley Ma 
840c021eac4SShirley Ma 	START_USE(vq);
841c021eac4SShirley Ma 
842c021eac4SShirley Ma 	for (i = 0; i < vq->vring.num; i++) {
843*780bc790SAndy Lutomirski 		if (!vq->desc_state[i].data)
844c021eac4SShirley Ma 			continue;
845c021eac4SShirley Ma 		/* detach_buf clears data, so grab it now. */
846*780bc790SAndy Lutomirski 		buf = vq->desc_state[i].data;
847c021eac4SShirley Ma 		detach_buf(vq, i);
848f277ec42SVenkatesh Srinivas 		vq->avail_idx_shadow--;
849f277ec42SVenkatesh Srinivas 		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
850c021eac4SShirley Ma 		END_USE(vq);
851c021eac4SShirley Ma 		return buf;
852c021eac4SShirley Ma 	}
853c021eac4SShirley Ma 	/* That should have freed everything. */
85406ca287dSRusty Russell 	BUG_ON(vq->vq.num_free != vq->vring.num);
855c021eac4SShirley Ma 
856c021eac4SShirley Ma 	END_USE(vq);
857c021eac4SShirley Ma 	return NULL;
858c021eac4SShirley Ma }
8597c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
860c021eac4SShirley Ma 
8610a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
8620a8a69ddSRusty Russell {
8630a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
8640a8a69ddSRusty Russell 
8650a8a69ddSRusty Russell 	if (!more_used(vq)) {
8660a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
8670a8a69ddSRusty Russell 		return IRQ_NONE;
8680a8a69ddSRusty Russell 	}
8690a8a69ddSRusty Russell 
8700a8a69ddSRusty Russell 	if (unlikely(vq->broken))
8710a8a69ddSRusty Russell 		return IRQ_HANDLED;
8720a8a69ddSRusty Russell 
8730a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
87418445c4dSRusty Russell 	if (vq->vq.callback)
87518445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
8760a8a69ddSRusty Russell 
8770a8a69ddSRusty Russell 	return IRQ_HANDLED;
8780a8a69ddSRusty Russell }
879c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
8800a8a69ddSRusty Russell 
88117bb6d40SJason Wang struct virtqueue *vring_new_virtqueue(unsigned int index,
88217bb6d40SJason Wang 				      unsigned int num,
88387c7d57cSRusty Russell 				      unsigned int vring_align,
8840a8a69ddSRusty Russell 				      struct virtio_device *vdev,
8857b21e34fSRusty Russell 				      bool weak_barriers,
8860a8a69ddSRusty Russell 				      void *pages,
88746f9c2b9SHeinz Graalfs 				      bool (*notify)(struct virtqueue *),
8889499f5e7SRusty Russell 				      void (*callback)(struct virtqueue *),
8899499f5e7SRusty Russell 				      const char *name)
8900a8a69ddSRusty Russell {
8910a8a69ddSRusty Russell 	struct vring_virtqueue *vq;
8920a8a69ddSRusty Russell 	unsigned int i;
8930a8a69ddSRusty Russell 
89442b36cc0SRusty Russell 	/* We assume num is a power of 2. */
89542b36cc0SRusty Russell 	if (num & (num - 1)) {
89642b36cc0SRusty Russell 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
89742b36cc0SRusty Russell 		return NULL;
89842b36cc0SRusty Russell 	}
89942b36cc0SRusty Russell 
900*780bc790SAndy Lutomirski 	vq = kmalloc(sizeof(*vq) + num * sizeof(struct vring_desc_state),
901*780bc790SAndy Lutomirski 		     GFP_KERNEL);
9020a8a69ddSRusty Russell 	if (!vq)
9030a8a69ddSRusty Russell 		return NULL;
9040a8a69ddSRusty Russell 
90587c7d57cSRusty Russell 	vring_init(&vq->vring, num, pages, vring_align);
9060a8a69ddSRusty Russell 	vq->vq.callback = callback;
9070a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
9089499f5e7SRusty Russell 	vq->vq.name = name;
90906ca287dSRusty Russell 	vq->vq.num_free = num;
91006ca287dSRusty Russell 	vq->vq.index = index;
9110a8a69ddSRusty Russell 	vq->notify = notify;
9127b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
9130a8a69ddSRusty Russell 	vq->broken = false;
9140a8a69ddSRusty Russell 	vq->last_used_idx = 0;
915f277ec42SVenkatesh Srinivas 	vq->avail_flags_shadow = 0;
916f277ec42SVenkatesh Srinivas 	vq->avail_idx_shadow = 0;
9170a8a69ddSRusty Russell 	vq->num_added = 0;
9189499f5e7SRusty Russell 	list_add_tail(&vq->vq.list, &vdev->vqs);
9190a8a69ddSRusty Russell #ifdef DEBUG
9200a8a69ddSRusty Russell 	vq->in_use = false;
921e93300b1SRusty Russell 	vq->last_add_time_valid = false;
9220a8a69ddSRusty Russell #endif
9230a8a69ddSRusty Russell 
9249fa29b9dSMark McLoughlin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
925a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9269fa29b9dSMark McLoughlin 
9270a8a69ddSRusty Russell 	/* No callback?  Tell other side not to bother us. */
928f277ec42SVenkatesh Srinivas 	if (!callback) {
929f277ec42SVenkatesh Srinivas 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
930f277ec42SVenkatesh Srinivas 		vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
931f277ec42SVenkatesh Srinivas 	}
9320a8a69ddSRusty Russell 
9330a8a69ddSRusty Russell 	/* Put everything in free lists. */
9340a8a69ddSRusty Russell 	vq->free_head = 0;
935*780bc790SAndy Lutomirski 	for (i = 0; i < num-1; i++)
93600e6f3d9SMichael S. Tsirkin 		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
937*780bc790SAndy Lutomirski 	memset(vq->desc_state, 0, num * sizeof(struct vring_desc_state));
9380a8a69ddSRusty Russell 
9390a8a69ddSRusty Russell 	return &vq->vq;
9400a8a69ddSRusty Russell }
941c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
9420a8a69ddSRusty Russell 
9430a8a69ddSRusty Russell void vring_del_virtqueue(struct virtqueue *vq)
9440a8a69ddSRusty Russell {
9459499f5e7SRusty Russell 	list_del(&vq->list);
9460a8a69ddSRusty Russell 	kfree(to_vvq(vq));
9470a8a69ddSRusty Russell }
948c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
9490a8a69ddSRusty Russell 
950e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
951e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
952e34f8725SRusty Russell {
953e34f8725SRusty Russell 	unsigned int i;
954e34f8725SRusty Russell 
955e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
956e34f8725SRusty Russell 		switch (i) {
9579fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
9589fa29b9dSMark McLoughlin 			break;
959a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
960a5c262c5SMichael S. Tsirkin 			break;
961747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
962747ae34aSMichael S. Tsirkin 			break;
963e34f8725SRusty Russell 		default:
964e34f8725SRusty Russell 			/* We don't understand this bit. */
965e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
966e34f8725SRusty Russell 		}
967e34f8725SRusty Russell 	}
968e34f8725SRusty Russell }
969e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
970e34f8725SRusty Russell 
9715dfc1762SRusty Russell /**
9725dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
9735dfc1762SRusty Russell  * @vq: the struct virtqueue containing the vring of interest.
9745dfc1762SRusty Russell  *
9755dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
9765dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
9775dfc1762SRusty Russell  */
9788f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
9798f9f4668SRick Jones {
9808f9f4668SRick Jones 
9818f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
9828f9f4668SRick Jones 
9838f9f4668SRick Jones 	return vq->vring.num;
9848f9f4668SRick Jones }
9858f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
9868f9f4668SRick Jones 
987b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
988b3b32c94SHeinz Graalfs {
989b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
990b3b32c94SHeinz Graalfs 
991b3b32c94SHeinz Graalfs 	return vq->broken;
992b3b32c94SHeinz Graalfs }
993b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
994b3b32c94SHeinz Graalfs 
995e2dcdfe9SRusty Russell /*
996e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
997e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
998e2dcdfe9SRusty Russell  */
999e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
1000e2dcdfe9SRusty Russell {
1001e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
1002e2dcdfe9SRusty Russell 
1003e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
1004e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
1005e2dcdfe9SRusty Russell 		vq->broken = true;
1006e2dcdfe9SRusty Russell 	}
1007e2dcdfe9SRusty Russell }
1008e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
1009e2dcdfe9SRusty Russell 
101089062652SCornelia Huck void *virtqueue_get_avail(struct virtqueue *_vq)
101189062652SCornelia Huck {
101289062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
101389062652SCornelia Huck 
101489062652SCornelia Huck 	return vq->vring.avail;
101589062652SCornelia Huck }
101689062652SCornelia Huck EXPORT_SYMBOL_GPL(virtqueue_get_avail);
101789062652SCornelia Huck 
101889062652SCornelia Huck void *virtqueue_get_used(struct virtqueue *_vq)
101989062652SCornelia Huck {
102089062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
102189062652SCornelia Huck 
102289062652SCornelia Huck 	return vq->vring.used;
102389062652SCornelia Huck }
102489062652SCornelia Huck EXPORT_SYMBOL_GPL(virtqueue_get_used);
102589062652SCornelia Huck 
1026c6fd4701SRusty Russell MODULE_LICENSE("GPL");
1027