xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision a1e58bbd)
1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/device.h>
22 
23 #ifdef DEBUG
24 /* For development, we want to crash whenever the ring is screwed. */
25 #define BAD_RING(vq, fmt...)			\
26 	do { dev_err(&vq->vq.vdev->dev, fmt); BUG(); } while(0)
27 #define START_USE(vq) \
28 	do { if ((vq)->in_use) panic("in_use = %i\n", (vq)->in_use); (vq)->in_use = __LINE__; mb(); } while(0)
29 #define END_USE(vq) \
30 	do { BUG_ON(!(vq)->in_use); (vq)->in_use = 0; mb(); } while(0)
31 #else
32 #define BAD_RING(vq, fmt...)			\
33 	do { dev_err(&vq->vq.vdev->dev, fmt); (vq)->broken = true; } while(0)
34 #define START_USE(vq)
35 #define END_USE(vq)
36 #endif
37 
38 struct vring_virtqueue
39 {
40 	struct virtqueue vq;
41 
42 	/* Actual memory layout for this queue */
43 	struct vring vring;
44 
45 	/* Other side has made a mess, don't try any more. */
46 	bool broken;
47 
48 	/* Number of free buffers */
49 	unsigned int num_free;
50 	/* Head of free buffer list. */
51 	unsigned int free_head;
52 	/* Number we've added since last sync. */
53 	unsigned int num_added;
54 
55 	/* Last used index we've seen. */
56 	u16 last_used_idx;
57 
58 	/* How to notify other side. FIXME: commonalize hcalls! */
59 	void (*notify)(struct virtqueue *vq);
60 
61 #ifdef DEBUG
62 	/* They're supposed to lock for us. */
63 	unsigned int in_use;
64 #endif
65 
66 	/* Tokens for callbacks. */
67 	void *data[];
68 };
69 
70 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
71 
72 static int vring_add_buf(struct virtqueue *_vq,
73 			 struct scatterlist sg[],
74 			 unsigned int out,
75 			 unsigned int in,
76 			 void *data)
77 {
78 	struct vring_virtqueue *vq = to_vvq(_vq);
79 	unsigned int i, avail, head, uninitialized_var(prev);
80 
81 	BUG_ON(data == NULL);
82 	BUG_ON(out + in > vq->vring.num);
83 	BUG_ON(out + in == 0);
84 
85 	START_USE(vq);
86 
87 	if (vq->num_free < out + in) {
88 		pr_debug("Can't add buf len %i - avail = %i\n",
89 			 out + in, vq->num_free);
90 		/* We notify *even if* VRING_USED_F_NO_NOTIFY is set here. */
91 		vq->notify(&vq->vq);
92 		END_USE(vq);
93 		return -ENOSPC;
94 	}
95 
96 	/* We're about to use some buffers from the free list. */
97 	vq->num_free -= out + in;
98 
99 	head = vq->free_head;
100 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
101 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
102 		vq->vring.desc[i].addr = sg_phys(sg);
103 		vq->vring.desc[i].len = sg->length;
104 		prev = i;
105 		sg++;
106 	}
107 	for (; in; i = vq->vring.desc[i].next, in--) {
108 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
109 		vq->vring.desc[i].addr = sg_phys(sg);
110 		vq->vring.desc[i].len = sg->length;
111 		prev = i;
112 		sg++;
113 	}
114 	/* Last one doesn't continue. */
115 	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
116 
117 	/* Update free pointer */
118 	vq->free_head = i;
119 
120 	/* Set token. */
121 	vq->data[head] = data;
122 
123 	/* Put entry in available array (but don't update avail->idx until they
124 	 * do sync).  FIXME: avoid modulus here? */
125 	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
126 	vq->vring.avail->ring[avail] = head;
127 
128 	pr_debug("Added buffer head %i to %p\n", head, vq);
129 	END_USE(vq);
130 	return 0;
131 }
132 
133 static void vring_kick(struct virtqueue *_vq)
134 {
135 	struct vring_virtqueue *vq = to_vvq(_vq);
136 	START_USE(vq);
137 	/* Descriptors and available array need to be set before we expose the
138 	 * new available array entries. */
139 	wmb();
140 
141 	vq->vring.avail->idx += vq->num_added;
142 	vq->num_added = 0;
143 
144 	/* Need to update avail index before checking if we should notify */
145 	mb();
146 
147 	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
148 		/* Prod other side to tell it about changes. */
149 		vq->notify(&vq->vq);
150 
151 	END_USE(vq);
152 }
153 
154 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
155 {
156 	unsigned int i;
157 
158 	/* Clear data ptr. */
159 	vq->data[head] = NULL;
160 
161 	/* Put back on free list: find end */
162 	i = head;
163 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
164 		i = vq->vring.desc[i].next;
165 		vq->num_free++;
166 	}
167 
168 	vq->vring.desc[i].next = vq->free_head;
169 	vq->free_head = head;
170 	/* Plus final descriptor */
171 	vq->num_free++;
172 }
173 
174 static inline bool more_used(const struct vring_virtqueue *vq)
175 {
176 	return vq->last_used_idx != vq->vring.used->idx;
177 }
178 
179 static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
180 {
181 	struct vring_virtqueue *vq = to_vvq(_vq);
182 	void *ret;
183 	unsigned int i;
184 
185 	START_USE(vq);
186 
187 	if (!more_used(vq)) {
188 		pr_debug("No more buffers in queue\n");
189 		END_USE(vq);
190 		return NULL;
191 	}
192 
193 	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
194 	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
195 
196 	if (unlikely(i >= vq->vring.num)) {
197 		BAD_RING(vq, "id %u out of range\n", i);
198 		return NULL;
199 	}
200 	if (unlikely(!vq->data[i])) {
201 		BAD_RING(vq, "id %u is not a head!\n", i);
202 		return NULL;
203 	}
204 
205 	/* detach_buf clears data, so grab it now. */
206 	ret = vq->data[i];
207 	detach_buf(vq, i);
208 	vq->last_used_idx++;
209 	END_USE(vq);
210 	return ret;
211 }
212 
213 static void vring_disable_cb(struct virtqueue *_vq)
214 {
215 	struct vring_virtqueue *vq = to_vvq(_vq);
216 
217 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
218 }
219 
220 static bool vring_enable_cb(struct virtqueue *_vq)
221 {
222 	struct vring_virtqueue *vq = to_vvq(_vq);
223 
224 	START_USE(vq);
225 	BUG_ON(!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT));
226 
227 	/* We optimistically turn back on interrupts, then check if there was
228 	 * more to do. */
229 	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
230 	mb();
231 	if (unlikely(more_used(vq))) {
232 		END_USE(vq);
233 		return false;
234 	}
235 
236 	END_USE(vq);
237 	return true;
238 }
239 
240 irqreturn_t vring_interrupt(int irq, void *_vq)
241 {
242 	struct vring_virtqueue *vq = to_vvq(_vq);
243 
244 	if (!more_used(vq)) {
245 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
246 		return IRQ_NONE;
247 	}
248 
249 	if (unlikely(vq->broken))
250 		return IRQ_HANDLED;
251 
252 	/* Other side may have missed us turning off the interrupt,
253 	 * but we should preserve disable semantic for virtio users. */
254 	if (unlikely(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
255 		pr_debug("virtqueue interrupt after disable for %p\n", vq);
256 		return IRQ_HANDLED;
257 	}
258 
259 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
260 	if (vq->vq.callback)
261 		vq->vq.callback(&vq->vq);
262 
263 	return IRQ_HANDLED;
264 }
265 EXPORT_SYMBOL_GPL(vring_interrupt);
266 
267 static struct virtqueue_ops vring_vq_ops = {
268 	.add_buf = vring_add_buf,
269 	.get_buf = vring_get_buf,
270 	.kick = vring_kick,
271 	.disable_cb = vring_disable_cb,
272 	.enable_cb = vring_enable_cb,
273 };
274 
275 struct virtqueue *vring_new_virtqueue(unsigned int num,
276 				      struct virtio_device *vdev,
277 				      void *pages,
278 				      void (*notify)(struct virtqueue *),
279 				      void (*callback)(struct virtqueue *))
280 {
281 	struct vring_virtqueue *vq;
282 	unsigned int i;
283 
284 	/* We assume num is a power of 2. */
285 	if (num & (num - 1)) {
286 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
287 		return NULL;
288 	}
289 
290 	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
291 	if (!vq)
292 		return NULL;
293 
294 	vring_init(&vq->vring, num, pages, PAGE_SIZE);
295 	vq->vq.callback = callback;
296 	vq->vq.vdev = vdev;
297 	vq->vq.vq_ops = &vring_vq_ops;
298 	vq->notify = notify;
299 	vq->broken = false;
300 	vq->last_used_idx = 0;
301 	vq->num_added = 0;
302 #ifdef DEBUG
303 	vq->in_use = false;
304 #endif
305 
306 	/* No callback?  Tell other side not to bother us. */
307 	if (!callback)
308 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
309 
310 	/* Put everything in free lists. */
311 	vq->num_free = num;
312 	vq->free_head = 0;
313 	for (i = 0; i < num-1; i++)
314 		vq->vring.desc[i].next = i+1;
315 
316 	return &vq->vq;
317 }
318 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
319 
320 void vring_del_virtqueue(struct virtqueue *vq)
321 {
322 	kfree(to_vvq(vq));
323 }
324 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
325 
326 MODULE_LICENSE("GPL");
327