xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 9b9c2cd4)
1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
27 
28 #ifdef DEBUG
29 /* For development, we want to crash whenever the ring is screwed. */
30 #define BAD_RING(_vq, fmt, args...)				\
31 	do {							\
32 		dev_err(&(_vq)->vq.vdev->dev,			\
33 			"%s:"fmt, (_vq)->vq.name, ##args);	\
34 		BUG();						\
35 	} while (0)
36 /* Caller is supposed to guarantee no reentry. */
37 #define START_USE(_vq)						\
38 	do {							\
39 		if ((_vq)->in_use)				\
40 			panic("%s:in_use = %i\n",		\
41 			      (_vq)->vq.name, (_vq)->in_use);	\
42 		(_vq)->in_use = __LINE__;			\
43 	} while (0)
44 #define END_USE(_vq) \
45 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
46 #else
47 #define BAD_RING(_vq, fmt, args...)				\
48 	do {							\
49 		dev_err(&_vq->vq.vdev->dev,			\
50 			"%s:"fmt, (_vq)->vq.name, ##args);	\
51 		(_vq)->broken = true;				\
52 	} while (0)
53 #define START_USE(vq)
54 #define END_USE(vq)
55 #endif
56 
57 struct vring_virtqueue {
58 	struct virtqueue vq;
59 
60 	/* Actual memory layout for this queue */
61 	struct vring vring;
62 
63 	/* Can we use weak barriers? */
64 	bool weak_barriers;
65 
66 	/* Other side has made a mess, don't try any more. */
67 	bool broken;
68 
69 	/* Host supports indirect buffers */
70 	bool indirect;
71 
72 	/* Host publishes avail event idx */
73 	bool event;
74 
75 	/* Head of free buffer list. */
76 	unsigned int free_head;
77 	/* Number we've added since last sync. */
78 	unsigned int num_added;
79 
80 	/* Last used index we've seen. */
81 	u16 last_used_idx;
82 
83 	/* Last written value to avail->flags */
84 	u16 avail_flags_shadow;
85 
86 	/* Last written value to avail->idx in guest byte order */
87 	u16 avail_idx_shadow;
88 
89 	/* How to notify other side. FIXME: commonalize hcalls! */
90 	bool (*notify)(struct virtqueue *vq);
91 
92 #ifdef DEBUG
93 	/* They're supposed to lock for us. */
94 	unsigned int in_use;
95 
96 	/* Figure out if their kicks are too delayed. */
97 	bool last_add_time_valid;
98 	ktime_t last_add_time;
99 #endif
100 
101 	/* Tokens for callbacks. */
102 	void *data[];
103 };
104 
105 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
106 
107 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
108 					 unsigned int total_sg, gfp_t gfp)
109 {
110 	struct vring_desc *desc;
111 	unsigned int i;
112 
113 	/*
114 	 * We require lowmem mappings for the descriptors because
115 	 * otherwise virt_to_phys will give us bogus addresses in the
116 	 * virtqueue.
117 	 */
118 	gfp &= ~__GFP_HIGHMEM;
119 
120 	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
121 	if (!desc)
122 		return NULL;
123 
124 	for (i = 0; i < total_sg; i++)
125 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
126 	return desc;
127 }
128 
129 static inline int virtqueue_add(struct virtqueue *_vq,
130 				struct scatterlist *sgs[],
131 				unsigned int total_sg,
132 				unsigned int out_sgs,
133 				unsigned int in_sgs,
134 				void *data,
135 				gfp_t gfp)
136 {
137 	struct vring_virtqueue *vq = to_vvq(_vq);
138 	struct scatterlist *sg;
139 	struct vring_desc *desc;
140 	unsigned int i, n, avail, descs_used, uninitialized_var(prev);
141 	int head;
142 	bool indirect;
143 
144 	START_USE(vq);
145 
146 	BUG_ON(data == NULL);
147 
148 	if (unlikely(vq->broken)) {
149 		END_USE(vq);
150 		return -EIO;
151 	}
152 
153 #ifdef DEBUG
154 	{
155 		ktime_t now = ktime_get();
156 
157 		/* No kick or get, with .1 second between?  Warn. */
158 		if (vq->last_add_time_valid)
159 			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
160 					    > 100);
161 		vq->last_add_time = now;
162 		vq->last_add_time_valid = true;
163 	}
164 #endif
165 
166 	BUG_ON(total_sg > vq->vring.num);
167 	BUG_ON(total_sg == 0);
168 
169 	head = vq->free_head;
170 
171 	/* If the host supports indirect descriptor tables, and we have multiple
172 	 * buffers, then go indirect. FIXME: tune this threshold */
173 	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
174 		desc = alloc_indirect(_vq, total_sg, gfp);
175 	else
176 		desc = NULL;
177 
178 	if (desc) {
179 		/* Use a single buffer which doesn't continue */
180 		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
181 		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
182 		/* avoid kmemleak false positive (hidden by virt_to_phys) */
183 		kmemleak_ignore(desc);
184 		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
185 
186 		/* Set up rest to use this indirect table. */
187 		i = 0;
188 		descs_used = 1;
189 		indirect = true;
190 	} else {
191 		desc = vq->vring.desc;
192 		i = head;
193 		descs_used = total_sg;
194 		indirect = false;
195 	}
196 
197 	if (vq->vq.num_free < descs_used) {
198 		pr_debug("Can't add buf len %i - avail = %i\n",
199 			 descs_used, vq->vq.num_free);
200 		/* FIXME: for historical reasons, we force a notify here if
201 		 * there are outgoing parts to the buffer.  Presumably the
202 		 * host should service the ring ASAP. */
203 		if (out_sgs)
204 			vq->notify(&vq->vq);
205 		END_USE(vq);
206 		return -ENOSPC;
207 	}
208 
209 	/* We're about to use some buffers from the free list. */
210 	vq->vq.num_free -= descs_used;
211 
212 	for (n = 0; n < out_sgs; n++) {
213 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
214 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
215 			desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
216 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
217 			prev = i;
218 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
219 		}
220 	}
221 	for (; n < (out_sgs + in_sgs); n++) {
222 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
223 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
224 			desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
225 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
226 			prev = i;
227 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
228 		}
229 	}
230 	/* Last one doesn't continue. */
231 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
232 
233 	/* Update free pointer */
234 	if (indirect)
235 		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
236 	else
237 		vq->free_head = i;
238 
239 	/* Set token. */
240 	vq->data[head] = data;
241 
242 	/* Put entry in available array (but don't update avail->idx until they
243 	 * do sync). */
244 	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
245 	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
246 
247 	/* Descriptors and available array need to be set before we expose the
248 	 * new available array entries. */
249 	virtio_wmb(vq->weak_barriers);
250 	vq->avail_idx_shadow++;
251 	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
252 	vq->num_added++;
253 
254 	pr_debug("Added buffer head %i to %p\n", head, vq);
255 	END_USE(vq);
256 
257 	/* This is very unlikely, but theoretically possible.  Kick
258 	 * just in case. */
259 	if (unlikely(vq->num_added == (1 << 16) - 1))
260 		virtqueue_kick(_vq);
261 
262 	return 0;
263 }
264 
265 /**
266  * virtqueue_add_sgs - expose buffers to other end
267  * @vq: the struct virtqueue we're talking about.
268  * @sgs: array of terminated scatterlists.
269  * @out_num: the number of scatterlists readable by other side
270  * @in_num: the number of scatterlists which are writable (after readable ones)
271  * @data: the token identifying the buffer.
272  * @gfp: how to do memory allocations (if necessary).
273  *
274  * Caller must ensure we don't call this with other virtqueue operations
275  * at the same time (except where noted).
276  *
277  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
278  */
279 int virtqueue_add_sgs(struct virtqueue *_vq,
280 		      struct scatterlist *sgs[],
281 		      unsigned int out_sgs,
282 		      unsigned int in_sgs,
283 		      void *data,
284 		      gfp_t gfp)
285 {
286 	unsigned int i, total_sg = 0;
287 
288 	/* Count them first. */
289 	for (i = 0; i < out_sgs + in_sgs; i++) {
290 		struct scatterlist *sg;
291 		for (sg = sgs[i]; sg; sg = sg_next(sg))
292 			total_sg++;
293 	}
294 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
295 }
296 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
297 
298 /**
299  * virtqueue_add_outbuf - expose output buffers to other end
300  * @vq: the struct virtqueue we're talking about.
301  * @sg: scatterlist (must be well-formed and terminated!)
302  * @num: the number of entries in @sg readable by other side
303  * @data: the token identifying the buffer.
304  * @gfp: how to do memory allocations (if necessary).
305  *
306  * Caller must ensure we don't call this with other virtqueue operations
307  * at the same time (except where noted).
308  *
309  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
310  */
311 int virtqueue_add_outbuf(struct virtqueue *vq,
312 			 struct scatterlist *sg, unsigned int num,
313 			 void *data,
314 			 gfp_t gfp)
315 {
316 	return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
317 }
318 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
319 
320 /**
321  * virtqueue_add_inbuf - expose input buffers to other end
322  * @vq: the struct virtqueue we're talking about.
323  * @sg: scatterlist (must be well-formed and terminated!)
324  * @num: the number of entries in @sg writable by other side
325  * @data: the token identifying the buffer.
326  * @gfp: how to do memory allocations (if necessary).
327  *
328  * Caller must ensure we don't call this with other virtqueue operations
329  * at the same time (except where noted).
330  *
331  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
332  */
333 int virtqueue_add_inbuf(struct virtqueue *vq,
334 			struct scatterlist *sg, unsigned int num,
335 			void *data,
336 			gfp_t gfp)
337 {
338 	return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
339 }
340 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
341 
342 /**
343  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
344  * @vq: the struct virtqueue
345  *
346  * Instead of virtqueue_kick(), you can do:
347  *	if (virtqueue_kick_prepare(vq))
348  *		virtqueue_notify(vq);
349  *
350  * This is sometimes useful because the virtqueue_kick_prepare() needs
351  * to be serialized, but the actual virtqueue_notify() call does not.
352  */
353 bool virtqueue_kick_prepare(struct virtqueue *_vq)
354 {
355 	struct vring_virtqueue *vq = to_vvq(_vq);
356 	u16 new, old;
357 	bool needs_kick;
358 
359 	START_USE(vq);
360 	/* We need to expose available array entries before checking avail
361 	 * event. */
362 	virtio_mb(vq->weak_barriers);
363 
364 	old = vq->avail_idx_shadow - vq->num_added;
365 	new = vq->avail_idx_shadow;
366 	vq->num_added = 0;
367 
368 #ifdef DEBUG
369 	if (vq->last_add_time_valid) {
370 		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
371 					      vq->last_add_time)) > 100);
372 	}
373 	vq->last_add_time_valid = false;
374 #endif
375 
376 	if (vq->event) {
377 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
378 					      new, old);
379 	} else {
380 		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
381 	}
382 	END_USE(vq);
383 	return needs_kick;
384 }
385 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
386 
387 /**
388  * virtqueue_notify - second half of split virtqueue_kick call.
389  * @vq: the struct virtqueue
390  *
391  * This does not need to be serialized.
392  *
393  * Returns false if host notify failed or queue is broken, otherwise true.
394  */
395 bool virtqueue_notify(struct virtqueue *_vq)
396 {
397 	struct vring_virtqueue *vq = to_vvq(_vq);
398 
399 	if (unlikely(vq->broken))
400 		return false;
401 
402 	/* Prod other side to tell it about changes. */
403 	if (!vq->notify(_vq)) {
404 		vq->broken = true;
405 		return false;
406 	}
407 	return true;
408 }
409 EXPORT_SYMBOL_GPL(virtqueue_notify);
410 
411 /**
412  * virtqueue_kick - update after add_buf
413  * @vq: the struct virtqueue
414  *
415  * After one or more virtqueue_add_* calls, invoke this to kick
416  * the other side.
417  *
418  * Caller must ensure we don't call this with other virtqueue
419  * operations at the same time (except where noted).
420  *
421  * Returns false if kick failed, otherwise true.
422  */
423 bool virtqueue_kick(struct virtqueue *vq)
424 {
425 	if (virtqueue_kick_prepare(vq))
426 		return virtqueue_notify(vq);
427 	return true;
428 }
429 EXPORT_SYMBOL_GPL(virtqueue_kick);
430 
431 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
432 {
433 	unsigned int i;
434 
435 	/* Clear data ptr. */
436 	vq->data[head] = NULL;
437 
438 	/* Put back on free list: find end */
439 	i = head;
440 
441 	/* Free the indirect table */
442 	if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
443 		kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
444 
445 	while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
446 		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
447 		vq->vq.num_free++;
448 	}
449 
450 	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
451 	vq->free_head = head;
452 	/* Plus final descriptor */
453 	vq->vq.num_free++;
454 }
455 
456 static inline bool more_used(const struct vring_virtqueue *vq)
457 {
458 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
459 }
460 
461 /**
462  * virtqueue_get_buf - get the next used buffer
463  * @vq: the struct virtqueue we're talking about.
464  * @len: the length written into the buffer
465  *
466  * If the driver wrote data into the buffer, @len will be set to the
467  * amount written.  This means you don't need to clear the buffer
468  * beforehand to ensure there's no data leakage in the case of short
469  * writes.
470  *
471  * Caller must ensure we don't call this with other virtqueue
472  * operations at the same time (except where noted).
473  *
474  * Returns NULL if there are no used buffers, or the "data" token
475  * handed to virtqueue_add_*().
476  */
477 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
478 {
479 	struct vring_virtqueue *vq = to_vvq(_vq);
480 	void *ret;
481 	unsigned int i;
482 	u16 last_used;
483 
484 	START_USE(vq);
485 
486 	if (unlikely(vq->broken)) {
487 		END_USE(vq);
488 		return NULL;
489 	}
490 
491 	if (!more_used(vq)) {
492 		pr_debug("No more buffers in queue\n");
493 		END_USE(vq);
494 		return NULL;
495 	}
496 
497 	/* Only get used array entries after they have been exposed by host. */
498 	virtio_rmb(vq->weak_barriers);
499 
500 	last_used = (vq->last_used_idx & (vq->vring.num - 1));
501 	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
502 	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
503 
504 	if (unlikely(i >= vq->vring.num)) {
505 		BAD_RING(vq, "id %u out of range\n", i);
506 		return NULL;
507 	}
508 	if (unlikely(!vq->data[i])) {
509 		BAD_RING(vq, "id %u is not a head!\n", i);
510 		return NULL;
511 	}
512 
513 	/* detach_buf clears data, so grab it now. */
514 	ret = vq->data[i];
515 	detach_buf(vq, i);
516 	vq->last_used_idx++;
517 	/* If we expect an interrupt for the next entry, tell host
518 	 * by writing event index and flush out the write before
519 	 * the read in the next get_buf call. */
520 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
521 		vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
522 		virtio_mb(vq->weak_barriers);
523 	}
524 
525 #ifdef DEBUG
526 	vq->last_add_time_valid = false;
527 #endif
528 
529 	END_USE(vq);
530 	return ret;
531 }
532 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
533 
534 /**
535  * virtqueue_disable_cb - disable callbacks
536  * @vq: the struct virtqueue we're talking about.
537  *
538  * Note that this is not necessarily synchronous, hence unreliable and only
539  * useful as an optimization.
540  *
541  * Unlike other operations, this need not be serialized.
542  */
543 void virtqueue_disable_cb(struct virtqueue *_vq)
544 {
545 	struct vring_virtqueue *vq = to_vvq(_vq);
546 
547 	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
548 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
549 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
550 	}
551 
552 }
553 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
554 
555 /**
556  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
557  * @vq: the struct virtqueue we're talking about.
558  *
559  * This re-enables callbacks; it returns current queue state
560  * in an opaque unsigned value. This value should be later tested by
561  * virtqueue_poll, to detect a possible race between the driver checking for
562  * more work, and enabling callbacks.
563  *
564  * Caller must ensure we don't call this with other virtqueue
565  * operations at the same time (except where noted).
566  */
567 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
568 {
569 	struct vring_virtqueue *vq = to_vvq(_vq);
570 	u16 last_used_idx;
571 
572 	START_USE(vq);
573 
574 	/* We optimistically turn back on interrupts, then check if there was
575 	 * more to do. */
576 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
577 	 * either clear the flags bit or point the event index at the next
578 	 * entry. Always do both to keep code simple. */
579 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
580 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
581 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
582 	}
583 	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
584 	END_USE(vq);
585 	return last_used_idx;
586 }
587 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
588 
589 /**
590  * virtqueue_poll - query pending used buffers
591  * @vq: the struct virtqueue we're talking about.
592  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
593  *
594  * Returns "true" if there are pending used buffers in the queue.
595  *
596  * This does not need to be serialized.
597  */
598 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
599 {
600 	struct vring_virtqueue *vq = to_vvq(_vq);
601 
602 	virtio_mb(vq->weak_barriers);
603 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
604 }
605 EXPORT_SYMBOL_GPL(virtqueue_poll);
606 
607 /**
608  * virtqueue_enable_cb - restart callbacks after disable_cb.
609  * @vq: the struct virtqueue we're talking about.
610  *
611  * This re-enables callbacks; it returns "false" if there are pending
612  * buffers in the queue, to detect a possible race between the driver
613  * checking for more work, and enabling callbacks.
614  *
615  * Caller must ensure we don't call this with other virtqueue
616  * operations at the same time (except where noted).
617  */
618 bool virtqueue_enable_cb(struct virtqueue *_vq)
619 {
620 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
621 	return !virtqueue_poll(_vq, last_used_idx);
622 }
623 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
624 
625 /**
626  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
627  * @vq: the struct virtqueue we're talking about.
628  *
629  * This re-enables callbacks but hints to the other side to delay
630  * interrupts until most of the available buffers have been processed;
631  * it returns "false" if there are many pending buffers in the queue,
632  * to detect a possible race between the driver checking for more work,
633  * and enabling callbacks.
634  *
635  * Caller must ensure we don't call this with other virtqueue
636  * operations at the same time (except where noted).
637  */
638 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
639 {
640 	struct vring_virtqueue *vq = to_vvq(_vq);
641 	u16 bufs;
642 
643 	START_USE(vq);
644 
645 	/* We optimistically turn back on interrupts, then check if there was
646 	 * more to do. */
647 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
648 	 * either clear the flags bit or point the event index at the next
649 	 * entry. Always do both to keep code simple. */
650 	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
651 		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
652 		vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
653 	}
654 	/* TODO: tune this threshold */
655 	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
656 	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
657 	virtio_mb(vq->weak_barriers);
658 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
659 		END_USE(vq);
660 		return false;
661 	}
662 
663 	END_USE(vq);
664 	return true;
665 }
666 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
667 
668 /**
669  * virtqueue_detach_unused_buf - detach first unused buffer
670  * @vq: the struct virtqueue we're talking about.
671  *
672  * Returns NULL or the "data" token handed to virtqueue_add_*().
673  * This is not valid on an active queue; it is useful only for device
674  * shutdown.
675  */
676 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
677 {
678 	struct vring_virtqueue *vq = to_vvq(_vq);
679 	unsigned int i;
680 	void *buf;
681 
682 	START_USE(vq);
683 
684 	for (i = 0; i < vq->vring.num; i++) {
685 		if (!vq->data[i])
686 			continue;
687 		/* detach_buf clears data, so grab it now. */
688 		buf = vq->data[i];
689 		detach_buf(vq, i);
690 		vq->avail_idx_shadow--;
691 		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
692 		END_USE(vq);
693 		return buf;
694 	}
695 	/* That should have freed everything. */
696 	BUG_ON(vq->vq.num_free != vq->vring.num);
697 
698 	END_USE(vq);
699 	return NULL;
700 }
701 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
702 
703 irqreturn_t vring_interrupt(int irq, void *_vq)
704 {
705 	struct vring_virtqueue *vq = to_vvq(_vq);
706 
707 	if (!more_used(vq)) {
708 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
709 		return IRQ_NONE;
710 	}
711 
712 	if (unlikely(vq->broken))
713 		return IRQ_HANDLED;
714 
715 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
716 	if (vq->vq.callback)
717 		vq->vq.callback(&vq->vq);
718 
719 	return IRQ_HANDLED;
720 }
721 EXPORT_SYMBOL_GPL(vring_interrupt);
722 
723 struct virtqueue *vring_new_virtqueue(unsigned int index,
724 				      unsigned int num,
725 				      unsigned int vring_align,
726 				      struct virtio_device *vdev,
727 				      bool weak_barriers,
728 				      void *pages,
729 				      bool (*notify)(struct virtqueue *),
730 				      void (*callback)(struct virtqueue *),
731 				      const char *name)
732 {
733 	struct vring_virtqueue *vq;
734 	unsigned int i;
735 
736 	/* We assume num is a power of 2. */
737 	if (num & (num - 1)) {
738 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
739 		return NULL;
740 	}
741 
742 	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
743 	if (!vq)
744 		return NULL;
745 
746 	vring_init(&vq->vring, num, pages, vring_align);
747 	vq->vq.callback = callback;
748 	vq->vq.vdev = vdev;
749 	vq->vq.name = name;
750 	vq->vq.num_free = num;
751 	vq->vq.index = index;
752 	vq->notify = notify;
753 	vq->weak_barriers = weak_barriers;
754 	vq->broken = false;
755 	vq->last_used_idx = 0;
756 	vq->avail_flags_shadow = 0;
757 	vq->avail_idx_shadow = 0;
758 	vq->num_added = 0;
759 	list_add_tail(&vq->vq.list, &vdev->vqs);
760 #ifdef DEBUG
761 	vq->in_use = false;
762 	vq->last_add_time_valid = false;
763 #endif
764 
765 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
766 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
767 
768 	/* No callback?  Tell other side not to bother us. */
769 	if (!callback) {
770 		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
771 		vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
772 	}
773 
774 	/* Put everything in free lists. */
775 	vq->free_head = 0;
776 	for (i = 0; i < num-1; i++) {
777 		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
778 		vq->data[i] = NULL;
779 	}
780 	vq->data[i] = NULL;
781 
782 	return &vq->vq;
783 }
784 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
785 
786 void vring_del_virtqueue(struct virtqueue *vq)
787 {
788 	list_del(&vq->list);
789 	kfree(to_vvq(vq));
790 }
791 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
792 
793 /* Manipulates transport-specific feature bits. */
794 void vring_transport_features(struct virtio_device *vdev)
795 {
796 	unsigned int i;
797 
798 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
799 		switch (i) {
800 		case VIRTIO_RING_F_INDIRECT_DESC:
801 			break;
802 		case VIRTIO_RING_F_EVENT_IDX:
803 			break;
804 		case VIRTIO_F_VERSION_1:
805 			break;
806 		default:
807 			/* We don't understand this bit. */
808 			__virtio_clear_bit(vdev, i);
809 		}
810 	}
811 }
812 EXPORT_SYMBOL_GPL(vring_transport_features);
813 
814 /**
815  * virtqueue_get_vring_size - return the size of the virtqueue's vring
816  * @vq: the struct virtqueue containing the vring of interest.
817  *
818  * Returns the size of the vring.  This is mainly used for boasting to
819  * userspace.  Unlike other operations, this need not be serialized.
820  */
821 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
822 {
823 
824 	struct vring_virtqueue *vq = to_vvq(_vq);
825 
826 	return vq->vring.num;
827 }
828 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
829 
830 bool virtqueue_is_broken(struct virtqueue *_vq)
831 {
832 	struct vring_virtqueue *vq = to_vvq(_vq);
833 
834 	return vq->broken;
835 }
836 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
837 
838 /*
839  * This should prevent the device from being used, allowing drivers to
840  * recover.  You may need to grab appropriate locks to flush.
841  */
842 void virtio_break_device(struct virtio_device *dev)
843 {
844 	struct virtqueue *_vq;
845 
846 	list_for_each_entry(_vq, &dev->vqs, list) {
847 		struct vring_virtqueue *vq = to_vvq(_vq);
848 		vq->broken = true;
849 	}
850 }
851 EXPORT_SYMBOL_GPL(virtio_break_device);
852 
853 void *virtqueue_get_avail(struct virtqueue *_vq)
854 {
855 	struct vring_virtqueue *vq = to_vvq(_vq);
856 
857 	return vq->vring.avail;
858 }
859 EXPORT_SYMBOL_GPL(virtqueue_get_avail);
860 
861 void *virtqueue_get_used(struct virtqueue *_vq)
862 {
863 	struct vring_virtqueue *vq = to_vvq(_vq);
864 
865 	return vq->vring.used;
866 }
867 EXPORT_SYMBOL_GPL(virtqueue_get_used);
868 
869 MODULE_LICENSE("GPL");
870