xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 5f5bac82)
1 /* Virtio ring implementation.
2  *
3  *  Copyright 2007 Rusty Russell IBM Corporation
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 
24 #ifdef DEBUG
25 /* For development, we want to crash whenever the ring is screwed. */
26 #define BAD_RING(_vq, fmt, args...)				\
27 	do {							\
28 		dev_err(&(_vq)->vq.vdev->dev,			\
29 			"%s:"fmt, (_vq)->vq.name, ##args);	\
30 		BUG();						\
31 	} while (0)
32 /* Caller is supposed to guarantee no reentry. */
33 #define START_USE(_vq)						\
34 	do {							\
35 		if ((_vq)->in_use)				\
36 			panic("%s:in_use = %i\n",		\
37 			      (_vq)->vq.name, (_vq)->in_use);	\
38 		(_vq)->in_use = __LINE__;			\
39 		mb();						\
40 	} while (0)
41 #define END_USE(_vq) \
42 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
43 #else
44 #define BAD_RING(_vq, fmt, args...)				\
45 	do {							\
46 		dev_err(&_vq->vq.vdev->dev,			\
47 			"%s:"fmt, (_vq)->vq.name, ##args);	\
48 		(_vq)->broken = true;				\
49 	} while (0)
50 #define START_USE(vq)
51 #define END_USE(vq)
52 #endif
53 
54 struct vring_virtqueue
55 {
56 	struct virtqueue vq;
57 
58 	/* Actual memory layout for this queue */
59 	struct vring vring;
60 
61 	/* Other side has made a mess, don't try any more. */
62 	bool broken;
63 
64 	/* Host supports indirect buffers */
65 	bool indirect;
66 
67 	/* Number of free buffers */
68 	unsigned int num_free;
69 	/* Head of free buffer list. */
70 	unsigned int free_head;
71 	/* Number we've added since last sync. */
72 	unsigned int num_added;
73 
74 	/* Last used index we've seen. */
75 	u16 last_used_idx;
76 
77 	/* How to notify other side. FIXME: commonalize hcalls! */
78 	void (*notify)(struct virtqueue *vq);
79 
80 #ifdef DEBUG
81 	/* They're supposed to lock for us. */
82 	unsigned int in_use;
83 #endif
84 
85 	/* Tokens for callbacks. */
86 	void *data[];
87 };
88 
89 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
90 
91 /* Set up an indirect table of descriptors and add it to the queue. */
92 static int vring_add_indirect(struct vring_virtqueue *vq,
93 			      struct scatterlist sg[],
94 			      unsigned int out,
95 			      unsigned int in)
96 {
97 	struct vring_desc *desc;
98 	unsigned head;
99 	int i;
100 
101 	desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
102 	if (!desc)
103 		return vq->vring.num;
104 
105 	/* Transfer entries from the sg list into the indirect page */
106 	for (i = 0; i < out; i++) {
107 		desc[i].flags = VRING_DESC_F_NEXT;
108 		desc[i].addr = sg_phys(sg);
109 		desc[i].len = sg->length;
110 		desc[i].next = i+1;
111 		sg++;
112 	}
113 	for (; i < (out + in); i++) {
114 		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
115 		desc[i].addr = sg_phys(sg);
116 		desc[i].len = sg->length;
117 		desc[i].next = i+1;
118 		sg++;
119 	}
120 
121 	/* Last one doesn't continue. */
122 	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
123 	desc[i-1].next = 0;
124 
125 	/* We're about to use a buffer */
126 	vq->num_free--;
127 
128 	/* Use a single buffer which doesn't continue */
129 	head = vq->free_head;
130 	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
131 	vq->vring.desc[head].addr = virt_to_phys(desc);
132 	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
133 
134 	/* Update free pointer */
135 	vq->free_head = vq->vring.desc[head].next;
136 
137 	return head;
138 }
139 
140 static int vring_add_buf(struct virtqueue *_vq,
141 			 struct scatterlist sg[],
142 			 unsigned int out,
143 			 unsigned int in,
144 			 void *data)
145 {
146 	struct vring_virtqueue *vq = to_vvq(_vq);
147 	unsigned int i, avail, head, uninitialized_var(prev);
148 
149 	START_USE(vq);
150 
151 	BUG_ON(data == NULL);
152 
153 	/* If the host supports indirect descriptor tables, and we have multiple
154 	 * buffers, then go indirect. FIXME: tune this threshold */
155 	if (vq->indirect && (out + in) > 1 && vq->num_free) {
156 		head = vring_add_indirect(vq, sg, out, in);
157 		if (head != vq->vring.num)
158 			goto add_head;
159 	}
160 
161 	BUG_ON(out + in > vq->vring.num);
162 	BUG_ON(out + in == 0);
163 
164 	if (vq->num_free < out + in) {
165 		pr_debug("Can't add buf len %i - avail = %i\n",
166 			 out + in, vq->num_free);
167 		/* FIXME: for historical reasons, we force a notify here if
168 		 * there are outgoing parts to the buffer.  Presumably the
169 		 * host should service the ring ASAP. */
170 		if (out)
171 			vq->notify(&vq->vq);
172 		END_USE(vq);
173 		return -ENOSPC;
174 	}
175 
176 	/* We're about to use some buffers from the free list. */
177 	vq->num_free -= out + in;
178 
179 	head = vq->free_head;
180 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
181 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
182 		vq->vring.desc[i].addr = sg_phys(sg);
183 		vq->vring.desc[i].len = sg->length;
184 		prev = i;
185 		sg++;
186 	}
187 	for (; in; i = vq->vring.desc[i].next, in--) {
188 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
189 		vq->vring.desc[i].addr = sg_phys(sg);
190 		vq->vring.desc[i].len = sg->length;
191 		prev = i;
192 		sg++;
193 	}
194 	/* Last one doesn't continue. */
195 	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
196 
197 	/* Update free pointer */
198 	vq->free_head = i;
199 
200 add_head:
201 	/* Set token. */
202 	vq->data[head] = data;
203 
204 	/* Put entry in available array (but don't update avail->idx until they
205 	 * do sync).  FIXME: avoid modulus here? */
206 	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
207 	vq->vring.avail->ring[avail] = head;
208 
209 	pr_debug("Added buffer head %i to %p\n", head, vq);
210 	END_USE(vq);
211 	return 0;
212 }
213 
214 static void vring_kick(struct virtqueue *_vq)
215 {
216 	struct vring_virtqueue *vq = to_vvq(_vq);
217 	START_USE(vq);
218 	/* Descriptors and available array need to be set before we expose the
219 	 * new available array entries. */
220 	wmb();
221 
222 	vq->vring.avail->idx += vq->num_added;
223 	vq->num_added = 0;
224 
225 	/* Need to update avail index before checking if we should notify */
226 	mb();
227 
228 	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
229 		/* Prod other side to tell it about changes. */
230 		vq->notify(&vq->vq);
231 
232 	END_USE(vq);
233 }
234 
235 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
236 {
237 	unsigned int i;
238 
239 	/* Clear data ptr. */
240 	vq->data[head] = NULL;
241 
242 	/* Put back on free list: find end */
243 	i = head;
244 
245 	/* Free the indirect table */
246 	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
247 		kfree(phys_to_virt(vq->vring.desc[i].addr));
248 
249 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
250 		i = vq->vring.desc[i].next;
251 		vq->num_free++;
252 	}
253 
254 	vq->vring.desc[i].next = vq->free_head;
255 	vq->free_head = head;
256 	/* Plus final descriptor */
257 	vq->num_free++;
258 }
259 
260 static inline bool more_used(const struct vring_virtqueue *vq)
261 {
262 	return vq->last_used_idx != vq->vring.used->idx;
263 }
264 
265 static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
266 {
267 	struct vring_virtqueue *vq = to_vvq(_vq);
268 	void *ret;
269 	unsigned int i;
270 
271 	START_USE(vq);
272 
273 	if (unlikely(vq->broken)) {
274 		END_USE(vq);
275 		return NULL;
276 	}
277 
278 	if (!more_used(vq)) {
279 		pr_debug("No more buffers in queue\n");
280 		END_USE(vq);
281 		return NULL;
282 	}
283 
284 	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
285 	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
286 
287 	if (unlikely(i >= vq->vring.num)) {
288 		BAD_RING(vq, "id %u out of range\n", i);
289 		return NULL;
290 	}
291 	if (unlikely(!vq->data[i])) {
292 		BAD_RING(vq, "id %u is not a head!\n", i);
293 		return NULL;
294 	}
295 
296 	/* detach_buf clears data, so grab it now. */
297 	ret = vq->data[i];
298 	detach_buf(vq, i);
299 	vq->last_used_idx++;
300 	END_USE(vq);
301 	return ret;
302 }
303 
304 static void vring_disable_cb(struct virtqueue *_vq)
305 {
306 	struct vring_virtqueue *vq = to_vvq(_vq);
307 
308 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
309 }
310 
311 static bool vring_enable_cb(struct virtqueue *_vq)
312 {
313 	struct vring_virtqueue *vq = to_vvq(_vq);
314 
315 	START_USE(vq);
316 
317 	/* We optimistically turn back on interrupts, then check if there was
318 	 * more to do. */
319 	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
320 	mb();
321 	if (unlikely(more_used(vq))) {
322 		END_USE(vq);
323 		return false;
324 	}
325 
326 	END_USE(vq);
327 	return true;
328 }
329 
330 irqreturn_t vring_interrupt(int irq, void *_vq)
331 {
332 	struct vring_virtqueue *vq = to_vvq(_vq);
333 
334 	if (!more_used(vq)) {
335 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
336 		return IRQ_NONE;
337 	}
338 
339 	if (unlikely(vq->broken))
340 		return IRQ_HANDLED;
341 
342 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
343 	if (vq->vq.callback)
344 		vq->vq.callback(&vq->vq);
345 
346 	return IRQ_HANDLED;
347 }
348 EXPORT_SYMBOL_GPL(vring_interrupt);
349 
350 static struct virtqueue_ops vring_vq_ops = {
351 	.add_buf = vring_add_buf,
352 	.get_buf = vring_get_buf,
353 	.kick = vring_kick,
354 	.disable_cb = vring_disable_cb,
355 	.enable_cb = vring_enable_cb,
356 };
357 
358 struct virtqueue *vring_new_virtqueue(unsigned int num,
359 				      unsigned int vring_align,
360 				      struct virtio_device *vdev,
361 				      void *pages,
362 				      void (*notify)(struct virtqueue *),
363 				      void (*callback)(struct virtqueue *),
364 				      const char *name)
365 {
366 	struct vring_virtqueue *vq;
367 	unsigned int i;
368 
369 	/* We assume num is a power of 2. */
370 	if (num & (num - 1)) {
371 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
372 		return NULL;
373 	}
374 
375 	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
376 	if (!vq)
377 		return NULL;
378 
379 	vring_init(&vq->vring, num, pages, vring_align);
380 	vq->vq.callback = callback;
381 	vq->vq.vdev = vdev;
382 	vq->vq.vq_ops = &vring_vq_ops;
383 	vq->vq.name = name;
384 	vq->notify = notify;
385 	vq->broken = false;
386 	vq->last_used_idx = 0;
387 	vq->num_added = 0;
388 	list_add_tail(&vq->vq.list, &vdev->vqs);
389 #ifdef DEBUG
390 	vq->in_use = false;
391 #endif
392 
393 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
394 
395 	/* No callback?  Tell other side not to bother us. */
396 	if (!callback)
397 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
398 
399 	/* Put everything in free lists. */
400 	vq->num_free = num;
401 	vq->free_head = 0;
402 	for (i = 0; i < num-1; i++)
403 		vq->vring.desc[i].next = i+1;
404 
405 	return &vq->vq;
406 }
407 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
408 
409 void vring_del_virtqueue(struct virtqueue *vq)
410 {
411 	list_del(&vq->list);
412 	kfree(to_vvq(vq));
413 }
414 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
415 
416 /* Manipulates transport-specific feature bits. */
417 void vring_transport_features(struct virtio_device *vdev)
418 {
419 	unsigned int i;
420 
421 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
422 		switch (i) {
423 		case VIRTIO_RING_F_INDIRECT_DESC:
424 			break;
425 		default:
426 			/* We don't understand this bit. */
427 			clear_bit(i, vdev->features);
428 		}
429 	}
430 }
431 EXPORT_SYMBOL_GPL(vring_transport_features);
432 
433 MODULE_LICENSE("GPL");
434