xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 7dd65feb)
1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 
24 #ifdef DEBUG
25 /* For development, we want to crash whenever the ring is screwed. */
26 #define BAD_RING(_vq, fmt, args...)				\
27 	do {							\
28 		dev_err(&(_vq)->vq.vdev->dev,			\
29 			"%s:"fmt, (_vq)->vq.name, ##args);	\
30 		BUG();						\
31 	} while (0)
32 /* Caller is supposed to guarantee no reentry. */
33 #define START_USE(_vq)						\
34 	do {							\
35 		if ((_vq)->in_use)				\
36 			panic("%s:in_use = %i\n",		\
37 			      (_vq)->vq.name, (_vq)->in_use);	\
38 		(_vq)->in_use = __LINE__;			\
39 		mb();						\
40 	} while (0)
41 #define END_USE(_vq) \
42 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
43 #else
44 #define BAD_RING(_vq, fmt, args...)				\
45 	do {							\
46 		dev_err(&_vq->vq.vdev->dev,			\
47 			"%s:"fmt, (_vq)->vq.name, ##args);	\
48 		(_vq)->broken = true;				\
49 	} while (0)
50 #define START_USE(vq)
51 #define END_USE(vq)
52 #endif
53 
54 struct vring_virtqueue
55 {
56 	struct virtqueue vq;
57 
58 	/* Actual memory layout for this queue */
59 	struct vring vring;
60 
61 	/* Other side has made a mess, don't try any more. */
62 	bool broken;
63 
64 	/* Host supports indirect buffers */
65 	bool indirect;
66 
67 	/* Number of free buffers */
68 	unsigned int num_free;
69 	/* Head of free buffer list. */
70 	unsigned int free_head;
71 	/* Number we've added since last sync. */
72 	unsigned int num_added;
73 
74 	/* Last used index we've seen. */
75 	u16 last_used_idx;
76 
77 	/* How to notify other side. FIXME: commonalize hcalls! */
78 	void (*notify)(struct virtqueue *vq);
79 
80 #ifdef DEBUG
81 	/* They're supposed to lock for us. */
82 	unsigned int in_use;
83 #endif
84 
85 	/* Tokens for callbacks. */
86 	void *data[];
87 };
88 
89 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
90 
91 /* Set up an indirect table of descriptors and add it to the queue. */
92 static int vring_add_indirect(struct vring_virtqueue *vq,
93 			      struct scatterlist sg[],
94 			      unsigned int out,
95 			      unsigned int in)
96 {
97 	struct vring_desc *desc;
98 	unsigned head;
99 	int i;
100 
101 	desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
102 	if (!desc)
103 		return vq->vring.num;
104 
105 	/* Transfer entries from the sg list into the indirect page */
106 	for (i = 0; i < out; i++) {
107 		desc[i].flags = VRING_DESC_F_NEXT;
108 		desc[i].addr = sg_phys(sg);
109 		desc[i].len = sg->length;
110 		desc[i].next = i+1;
111 		sg++;
112 	}
113 	for (; i < (out + in); i++) {
114 		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
115 		desc[i].addr = sg_phys(sg);
116 		desc[i].len = sg->length;
117 		desc[i].next = i+1;
118 		sg++;
119 	}
120 
121 	/* Last one doesn't continue. */
122 	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
123 	desc[i-1].next = 0;
124 
125 	/* We're about to use a buffer */
126 	vq->num_free--;
127 
128 	/* Use a single buffer which doesn't continue */
129 	head = vq->free_head;
130 	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
131 	vq->vring.desc[head].addr = virt_to_phys(desc);
132 	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
133 
134 	/* Update free pointer */
135 	vq->free_head = vq->vring.desc[head].next;
136 
137 	return head;
138 }
139 
140 static int vring_add_buf(struct virtqueue *_vq,
141 			 struct scatterlist sg[],
142 			 unsigned int out,
143 			 unsigned int in,
144 			 void *data)
145 {
146 	struct vring_virtqueue *vq = to_vvq(_vq);
147 	unsigned int i, avail, head, uninitialized_var(prev);
148 
149 	START_USE(vq);
150 
151 	BUG_ON(data == NULL);
152 
153 	/* If the host supports indirect descriptor tables, and we have multiple
154 	 * buffers, then go indirect. FIXME: tune this threshold */
155 	if (vq->indirect && (out + in) > 1 && vq->num_free) {
156 		head = vring_add_indirect(vq, sg, out, in);
157 		if (head != vq->vring.num)
158 			goto add_head;
159 	}
160 
161 	BUG_ON(out + in > vq->vring.num);
162 	BUG_ON(out + in == 0);
163 
164 	if (vq->num_free < out + in) {
165 		pr_debug("Can't add buf len %i - avail = %i\n",
166 			 out + in, vq->num_free);
167 		/* FIXME: for historical reasons, we force a notify here if
168 		 * there are outgoing parts to the buffer.  Presumably the
169 		 * host should service the ring ASAP. */
170 		if (out)
171 			vq->notify(&vq->vq);
172 		END_USE(vq);
173 		return -ENOSPC;
174 	}
175 
176 	/* We're about to use some buffers from the free list. */
177 	vq->num_free -= out + in;
178 
179 	head = vq->free_head;
180 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
181 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
182 		vq->vring.desc[i].addr = sg_phys(sg);
183 		vq->vring.desc[i].len = sg->length;
184 		prev = i;
185 		sg++;
186 	}
187 	for (; in; i = vq->vring.desc[i].next, in--) {
188 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
189 		vq->vring.desc[i].addr = sg_phys(sg);
190 		vq->vring.desc[i].len = sg->length;
191 		prev = i;
192 		sg++;
193 	}
194 	/* Last one doesn't continue. */
195 	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
196 
197 	/* Update free pointer */
198 	vq->free_head = i;
199 
200 add_head:
201 	/* Set token. */
202 	vq->data[head] = data;
203 
204 	/* Put entry in available array (but don't update avail->idx until they
205 	 * do sync).  FIXME: avoid modulus here? */
206 	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
207 	vq->vring.avail->ring[avail] = head;
208 
209 	pr_debug("Added buffer head %i to %p\n", head, vq);
210 	END_USE(vq);
211 
212 	/* If we're indirect, we can fit many (assuming not OOM). */
213 	if (vq->indirect)
214 		return vq->num_free ? vq->vring.num : 0;
215 	return vq->num_free;
216 }
217 
218 static void vring_kick(struct virtqueue *_vq)
219 {
220 	struct vring_virtqueue *vq = to_vvq(_vq);
221 	START_USE(vq);
222 	/* Descriptors and available array need to be set before we expose the
223 	 * new available array entries. */
224 	wmb();
225 
226 	vq->vring.avail->idx += vq->num_added;
227 	vq->num_added = 0;
228 
229 	/* Need to update avail index before checking if we should notify */
230 	mb();
231 
232 	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
233 		/* Prod other side to tell it about changes. */
234 		vq->notify(&vq->vq);
235 
236 	END_USE(vq);
237 }
238 
239 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
240 {
241 	unsigned int i;
242 
243 	/* Clear data ptr. */
244 	vq->data[head] = NULL;
245 
246 	/* Put back on free list: find end */
247 	i = head;
248 
249 	/* Free the indirect table */
250 	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
251 		kfree(phys_to_virt(vq->vring.desc[i].addr));
252 
253 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
254 		i = vq->vring.desc[i].next;
255 		vq->num_free++;
256 	}
257 
258 	vq->vring.desc[i].next = vq->free_head;
259 	vq->free_head = head;
260 	/* Plus final descriptor */
261 	vq->num_free++;
262 }
263 
264 static inline bool more_used(const struct vring_virtqueue *vq)
265 {
266 	return vq->last_used_idx != vq->vring.used->idx;
267 }
268 
269 static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
270 {
271 	struct vring_virtqueue *vq = to_vvq(_vq);
272 	void *ret;
273 	unsigned int i;
274 
275 	START_USE(vq);
276 
277 	if (unlikely(vq->broken)) {
278 		END_USE(vq);
279 		return NULL;
280 	}
281 
282 	if (!more_used(vq)) {
283 		pr_debug("No more buffers in queue\n");
284 		END_USE(vq);
285 		return NULL;
286 	}
287 
288 	/* Only get used array entries after they have been exposed by host. */
289 	rmb();
290 
291 	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
292 	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
293 
294 	if (unlikely(i >= vq->vring.num)) {
295 		BAD_RING(vq, "id %u out of range\n", i);
296 		return NULL;
297 	}
298 	if (unlikely(!vq->data[i])) {
299 		BAD_RING(vq, "id %u is not a head!\n", i);
300 		return NULL;
301 	}
302 
303 	/* detach_buf clears data, so grab it now. */
304 	ret = vq->data[i];
305 	detach_buf(vq, i);
306 	vq->last_used_idx++;
307 	END_USE(vq);
308 	return ret;
309 }
310 
311 static void vring_disable_cb(struct virtqueue *_vq)
312 {
313 	struct vring_virtqueue *vq = to_vvq(_vq);
314 
315 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
316 }
317 
318 static bool vring_enable_cb(struct virtqueue *_vq)
319 {
320 	struct vring_virtqueue *vq = to_vvq(_vq);
321 
322 	START_USE(vq);
323 
324 	/* We optimistically turn back on interrupts, then check if there was
325 	 * more to do. */
326 	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
327 	mb();
328 	if (unlikely(more_used(vq))) {
329 		END_USE(vq);
330 		return false;
331 	}
332 
333 	END_USE(vq);
334 	return true;
335 }
336 
337 irqreturn_t vring_interrupt(int irq, void *_vq)
338 {
339 	struct vring_virtqueue *vq = to_vvq(_vq);
340 
341 	if (!more_used(vq)) {
342 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
343 		return IRQ_NONE;
344 	}
345 
346 	if (unlikely(vq->broken))
347 		return IRQ_HANDLED;
348 
349 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
350 	if (vq->vq.callback)
351 		vq->vq.callback(&vq->vq);
352 
353 	return IRQ_HANDLED;
354 }
355 EXPORT_SYMBOL_GPL(vring_interrupt);
356 
357 static struct virtqueue_ops vring_vq_ops = {
358 	.add_buf = vring_add_buf,
359 	.get_buf = vring_get_buf,
360 	.kick = vring_kick,
361 	.disable_cb = vring_disable_cb,
362 	.enable_cb = vring_enable_cb,
363 };
364 
365 struct virtqueue *vring_new_virtqueue(unsigned int num,
366 				      unsigned int vring_align,
367 				      struct virtio_device *vdev,
368 				      void *pages,
369 				      void (*notify)(struct virtqueue *),
370 				      void (*callback)(struct virtqueue *),
371 				      const char *name)
372 {
373 	struct vring_virtqueue *vq;
374 	unsigned int i;
375 
376 	/* We assume num is a power of 2. */
377 	if (num & (num - 1)) {
378 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
379 		return NULL;
380 	}
381 
382 	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
383 	if (!vq)
384 		return NULL;
385 
386 	vring_init(&vq->vring, num, pages, vring_align);
387 	vq->vq.callback = callback;
388 	vq->vq.vdev = vdev;
389 	vq->vq.vq_ops = &vring_vq_ops;
390 	vq->vq.name = name;
391 	vq->notify = notify;
392 	vq->broken = false;
393 	vq->last_used_idx = 0;
394 	vq->num_added = 0;
395 	list_add_tail(&vq->vq.list, &vdev->vqs);
396 #ifdef DEBUG
397 	vq->in_use = false;
398 #endif
399 
400 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
401 
402 	/* No callback?  Tell other side not to bother us. */
403 	if (!callback)
404 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
405 
406 	/* Put everything in free lists. */
407 	vq->num_free = num;
408 	vq->free_head = 0;
409 	for (i = 0; i < num-1; i++)
410 		vq->vring.desc[i].next = i+1;
411 
412 	return &vq->vq;
413 }
414 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
415 
416 void vring_del_virtqueue(struct virtqueue *vq)
417 {
418 	list_del(&vq->list);
419 	kfree(to_vvq(vq));
420 }
421 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
422 
423 /* Manipulates transport-specific feature bits. */
424 void vring_transport_features(struct virtio_device *vdev)
425 {
426 	unsigned int i;
427 
428 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
429 		switch (i) {
430 		case VIRTIO_RING_F_INDIRECT_DESC:
431 			break;
432 		default:
433 			/* We don't understand this bit. */
434 			clear_bit(i, vdev->features);
435 		}
436 	}
437 }
438 EXPORT_SYMBOL_GPL(vring_transport_features);
439 
440 MODULE_LICENSE("GPL");
441