10a8a69ddSRusty Russell /* Virtio ring implementation. 20a8a69ddSRusty Russell * 30a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 40a8a69ddSRusty Russell * 50a8a69ddSRusty Russell * This program is free software; you can redistribute it and/or modify 60a8a69ddSRusty Russell * it under the terms of the GNU General Public License as published by 70a8a69ddSRusty Russell * the Free Software Foundation; either version 2 of the License, or 80a8a69ddSRusty Russell * (at your option) any later version. 90a8a69ddSRusty Russell * 100a8a69ddSRusty Russell * This program is distributed in the hope that it will be useful, 110a8a69ddSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 120a8a69ddSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 130a8a69ddSRusty Russell * GNU General Public License for more details. 140a8a69ddSRusty Russell * 150a8a69ddSRusty Russell * You should have received a copy of the GNU General Public License 160a8a69ddSRusty Russell * along with this program; if not, write to the Free Software 170a8a69ddSRusty Russell * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 180a8a69ddSRusty Russell */ 190a8a69ddSRusty Russell #include <linux/virtio.h> 200a8a69ddSRusty Russell #include <linux/virtio_ring.h> 21e34f8725SRusty Russell #include <linux/virtio_config.h> 220a8a69ddSRusty Russell #include <linux/device.h> 230a8a69ddSRusty Russell 240a8a69ddSRusty Russell #ifdef DEBUG 250a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 269499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 279499f5e7SRusty Russell do { \ 289499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 299499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 309499f5e7SRusty Russell BUG(); \ 319499f5e7SRusty Russell } while (0) 32c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 333a35ce7dSRoel Kluin #define START_USE(_vq) \ 34c5f841f1SRusty Russell do { \ 35c5f841f1SRusty Russell if ((_vq)->in_use) \ 369499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 379499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 38c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 39c5f841f1SRusty Russell mb(); \ 40c5f841f1SRusty Russell } while (0) 413a35ce7dSRoel Kluin #define END_USE(_vq) \ 423a35ce7dSRoel Kluin do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) 430a8a69ddSRusty Russell #else 449499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 459499f5e7SRusty Russell do { \ 469499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 479499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 489499f5e7SRusty Russell (_vq)->broken = true; \ 499499f5e7SRusty Russell } while (0) 500a8a69ddSRusty Russell #define START_USE(vq) 510a8a69ddSRusty Russell #define END_USE(vq) 520a8a69ddSRusty Russell #endif 530a8a69ddSRusty Russell 540a8a69ddSRusty Russell struct vring_virtqueue 550a8a69ddSRusty Russell { 560a8a69ddSRusty Russell struct virtqueue vq; 570a8a69ddSRusty Russell 580a8a69ddSRusty Russell /* Actual memory layout for this queue */ 590a8a69ddSRusty Russell struct vring vring; 600a8a69ddSRusty Russell 610a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 620a8a69ddSRusty Russell bool broken; 630a8a69ddSRusty Russell 649fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 659fa29b9dSMark McLoughlin bool indirect; 669fa29b9dSMark McLoughlin 670a8a69ddSRusty Russell /* Number of free buffers */ 680a8a69ddSRusty Russell unsigned int num_free; 690a8a69ddSRusty Russell /* Head of free buffer list. */ 700a8a69ddSRusty Russell unsigned int free_head; 710a8a69ddSRusty Russell /* Number we've added since last sync. */ 720a8a69ddSRusty Russell unsigned int num_added; 730a8a69ddSRusty Russell 740a8a69ddSRusty Russell /* Last used index we've seen. */ 751bc4953eSAnthony Liguori u16 last_used_idx; 760a8a69ddSRusty Russell 770a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 780a8a69ddSRusty Russell void (*notify)(struct virtqueue *vq); 790a8a69ddSRusty Russell 800a8a69ddSRusty Russell #ifdef DEBUG 810a8a69ddSRusty Russell /* They're supposed to lock for us. */ 820a8a69ddSRusty Russell unsigned int in_use; 830a8a69ddSRusty Russell #endif 840a8a69ddSRusty Russell 850a8a69ddSRusty Russell /* Tokens for callbacks. */ 860a8a69ddSRusty Russell void *data[]; 870a8a69ddSRusty Russell }; 880a8a69ddSRusty Russell 890a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 900a8a69ddSRusty Russell 919fa29b9dSMark McLoughlin /* Set up an indirect table of descriptors and add it to the queue. */ 929fa29b9dSMark McLoughlin static int vring_add_indirect(struct vring_virtqueue *vq, 939fa29b9dSMark McLoughlin struct scatterlist sg[], 949fa29b9dSMark McLoughlin unsigned int out, 959fa29b9dSMark McLoughlin unsigned int in) 969fa29b9dSMark McLoughlin { 979fa29b9dSMark McLoughlin struct vring_desc *desc; 989fa29b9dSMark McLoughlin unsigned head; 999fa29b9dSMark McLoughlin int i; 1009fa29b9dSMark McLoughlin 1019fa29b9dSMark McLoughlin desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); 1029fa29b9dSMark McLoughlin if (!desc) 1039fa29b9dSMark McLoughlin return vq->vring.num; 1049fa29b9dSMark McLoughlin 1059fa29b9dSMark McLoughlin /* Transfer entries from the sg list into the indirect page */ 1069fa29b9dSMark McLoughlin for (i = 0; i < out; i++) { 1079fa29b9dSMark McLoughlin desc[i].flags = VRING_DESC_F_NEXT; 1089fa29b9dSMark McLoughlin desc[i].addr = sg_phys(sg); 1099fa29b9dSMark McLoughlin desc[i].len = sg->length; 1109fa29b9dSMark McLoughlin desc[i].next = i+1; 1119fa29b9dSMark McLoughlin sg++; 1129fa29b9dSMark McLoughlin } 1139fa29b9dSMark McLoughlin for (; i < (out + in); i++) { 1149fa29b9dSMark McLoughlin desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 1159fa29b9dSMark McLoughlin desc[i].addr = sg_phys(sg); 1169fa29b9dSMark McLoughlin desc[i].len = sg->length; 1179fa29b9dSMark McLoughlin desc[i].next = i+1; 1189fa29b9dSMark McLoughlin sg++; 1199fa29b9dSMark McLoughlin } 1209fa29b9dSMark McLoughlin 1219fa29b9dSMark McLoughlin /* Last one doesn't continue. */ 1229fa29b9dSMark McLoughlin desc[i-1].flags &= ~VRING_DESC_F_NEXT; 1239fa29b9dSMark McLoughlin desc[i-1].next = 0; 1249fa29b9dSMark McLoughlin 1259fa29b9dSMark McLoughlin /* We're about to use a buffer */ 1269fa29b9dSMark McLoughlin vq->num_free--; 1279fa29b9dSMark McLoughlin 1289fa29b9dSMark McLoughlin /* Use a single buffer which doesn't continue */ 1299fa29b9dSMark McLoughlin head = vq->free_head; 1309fa29b9dSMark McLoughlin vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; 1319fa29b9dSMark McLoughlin vq->vring.desc[head].addr = virt_to_phys(desc); 1329fa29b9dSMark McLoughlin vq->vring.desc[head].len = i * sizeof(struct vring_desc); 1339fa29b9dSMark McLoughlin 1349fa29b9dSMark McLoughlin /* Update free pointer */ 1359fa29b9dSMark McLoughlin vq->free_head = vq->vring.desc[head].next; 1369fa29b9dSMark McLoughlin 1379fa29b9dSMark McLoughlin return head; 1389fa29b9dSMark McLoughlin } 1399fa29b9dSMark McLoughlin 1400a8a69ddSRusty Russell static int vring_add_buf(struct virtqueue *_vq, 1410a8a69ddSRusty Russell struct scatterlist sg[], 1420a8a69ddSRusty Russell unsigned int out, 1430a8a69ddSRusty Russell unsigned int in, 1440a8a69ddSRusty Russell void *data) 1450a8a69ddSRusty Russell { 1460a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 1470a8a69ddSRusty Russell unsigned int i, avail, head, uninitialized_var(prev); 1480a8a69ddSRusty Russell 1499fa29b9dSMark McLoughlin START_USE(vq); 1509fa29b9dSMark McLoughlin 1510a8a69ddSRusty Russell BUG_ON(data == NULL); 1529fa29b9dSMark McLoughlin 1539fa29b9dSMark McLoughlin /* If the host supports indirect descriptor tables, and we have multiple 1549fa29b9dSMark McLoughlin * buffers, then go indirect. FIXME: tune this threshold */ 1559fa29b9dSMark McLoughlin if (vq->indirect && (out + in) > 1 && vq->num_free) { 1569fa29b9dSMark McLoughlin head = vring_add_indirect(vq, sg, out, in); 1579fa29b9dSMark McLoughlin if (head != vq->vring.num) 1589fa29b9dSMark McLoughlin goto add_head; 1599fa29b9dSMark McLoughlin } 1609fa29b9dSMark McLoughlin 1610a8a69ddSRusty Russell BUG_ON(out + in > vq->vring.num); 1620a8a69ddSRusty Russell BUG_ON(out + in == 0); 1630a8a69ddSRusty Russell 1640a8a69ddSRusty Russell if (vq->num_free < out + in) { 1650a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 1660a8a69ddSRusty Russell out + in, vq->num_free); 16744653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 16844653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 16944653eaeSRusty Russell * host should service the ring ASAP. */ 17044653eaeSRusty Russell if (out) 171426e3e0aSRusty Russell vq->notify(&vq->vq); 1720a8a69ddSRusty Russell END_USE(vq); 1730a8a69ddSRusty Russell return -ENOSPC; 1740a8a69ddSRusty Russell } 1750a8a69ddSRusty Russell 1760a8a69ddSRusty Russell /* We're about to use some buffers from the free list. */ 1770a8a69ddSRusty Russell vq->num_free -= out + in; 1780a8a69ddSRusty Russell 1790a8a69ddSRusty Russell head = vq->free_head; 1800a8a69ddSRusty Russell for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { 1810a8a69ddSRusty Russell vq->vring.desc[i].flags = VRING_DESC_F_NEXT; 18215f9c890SRusty Russell vq->vring.desc[i].addr = sg_phys(sg); 1830a8a69ddSRusty Russell vq->vring.desc[i].len = sg->length; 1840a8a69ddSRusty Russell prev = i; 1850a8a69ddSRusty Russell sg++; 1860a8a69ddSRusty Russell } 1870a8a69ddSRusty Russell for (; in; i = vq->vring.desc[i].next, in--) { 1880a8a69ddSRusty Russell vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 18915f9c890SRusty Russell vq->vring.desc[i].addr = sg_phys(sg); 1900a8a69ddSRusty Russell vq->vring.desc[i].len = sg->length; 1910a8a69ddSRusty Russell prev = i; 1920a8a69ddSRusty Russell sg++; 1930a8a69ddSRusty Russell } 1940a8a69ddSRusty Russell /* Last one doesn't continue. */ 1950a8a69ddSRusty Russell vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; 1960a8a69ddSRusty Russell 1970a8a69ddSRusty Russell /* Update free pointer */ 1980a8a69ddSRusty Russell vq->free_head = i; 1990a8a69ddSRusty Russell 2009fa29b9dSMark McLoughlin add_head: 2010a8a69ddSRusty Russell /* Set token. */ 2020a8a69ddSRusty Russell vq->data[head] = data; 2030a8a69ddSRusty Russell 2040a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 2050a8a69ddSRusty Russell * do sync). FIXME: avoid modulus here? */ 2060a8a69ddSRusty Russell avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; 2070a8a69ddSRusty Russell vq->vring.avail->ring[avail] = head; 2080a8a69ddSRusty Russell 2090a8a69ddSRusty Russell pr_debug("Added buffer head %i to %p\n", head, vq); 2100a8a69ddSRusty Russell END_USE(vq); 2113c1b27d5SRusty Russell 2123c1b27d5SRusty Russell /* If we're indirect, we can fit many (assuming not OOM). */ 2133c1b27d5SRusty Russell if (vq->indirect) 2143c1b27d5SRusty Russell return vq->num_free ? vq->vring.num : 0; 2153c1b27d5SRusty Russell return vq->num_free; 2160a8a69ddSRusty Russell } 2170a8a69ddSRusty Russell 2180a8a69ddSRusty Russell static void vring_kick(struct virtqueue *_vq) 2190a8a69ddSRusty Russell { 2200a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 2210a8a69ddSRusty Russell START_USE(vq); 2220a8a69ddSRusty Russell /* Descriptors and available array need to be set before we expose the 2230a8a69ddSRusty Russell * new available array entries. */ 2240a8a69ddSRusty Russell wmb(); 2250a8a69ddSRusty Russell 2260a8a69ddSRusty Russell vq->vring.avail->idx += vq->num_added; 2270a8a69ddSRusty Russell vq->num_added = 0; 2280a8a69ddSRusty Russell 2290a8a69ddSRusty Russell /* Need to update avail index before checking if we should notify */ 2300a8a69ddSRusty Russell mb(); 2310a8a69ddSRusty Russell 2320a8a69ddSRusty Russell if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 2330a8a69ddSRusty Russell /* Prod other side to tell it about changes. */ 2340a8a69ddSRusty Russell vq->notify(&vq->vq); 2350a8a69ddSRusty Russell 2360a8a69ddSRusty Russell END_USE(vq); 2370a8a69ddSRusty Russell } 2380a8a69ddSRusty Russell 2390a8a69ddSRusty Russell static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 2400a8a69ddSRusty Russell { 2410a8a69ddSRusty Russell unsigned int i; 2420a8a69ddSRusty Russell 2430a8a69ddSRusty Russell /* Clear data ptr. */ 2440a8a69ddSRusty Russell vq->data[head] = NULL; 2450a8a69ddSRusty Russell 2460a8a69ddSRusty Russell /* Put back on free list: find end */ 2470a8a69ddSRusty Russell i = head; 2489fa29b9dSMark McLoughlin 2499fa29b9dSMark McLoughlin /* Free the indirect table */ 2509fa29b9dSMark McLoughlin if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) 2519fa29b9dSMark McLoughlin kfree(phys_to_virt(vq->vring.desc[i].addr)); 2529fa29b9dSMark McLoughlin 2530a8a69ddSRusty Russell while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 2540a8a69ddSRusty Russell i = vq->vring.desc[i].next; 2550a8a69ddSRusty Russell vq->num_free++; 2560a8a69ddSRusty Russell } 2570a8a69ddSRusty Russell 2580a8a69ddSRusty Russell vq->vring.desc[i].next = vq->free_head; 2590a8a69ddSRusty Russell vq->free_head = head; 2600a8a69ddSRusty Russell /* Plus final descriptor */ 2610a8a69ddSRusty Russell vq->num_free++; 2620a8a69ddSRusty Russell } 2630a8a69ddSRusty Russell 2640a8a69ddSRusty Russell static inline bool more_used(const struct vring_virtqueue *vq) 2650a8a69ddSRusty Russell { 2660a8a69ddSRusty Russell return vq->last_used_idx != vq->vring.used->idx; 2670a8a69ddSRusty Russell } 2680a8a69ddSRusty Russell 2690a8a69ddSRusty Russell static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) 2700a8a69ddSRusty Russell { 2710a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 2720a8a69ddSRusty Russell void *ret; 2730a8a69ddSRusty Russell unsigned int i; 2740a8a69ddSRusty Russell 2750a8a69ddSRusty Russell START_USE(vq); 2760a8a69ddSRusty Russell 2775ef82752SRusty Russell if (unlikely(vq->broken)) { 2785ef82752SRusty Russell END_USE(vq); 2795ef82752SRusty Russell return NULL; 2805ef82752SRusty Russell } 2815ef82752SRusty Russell 2820a8a69ddSRusty Russell if (!more_used(vq)) { 2830a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 2840a8a69ddSRusty Russell END_USE(vq); 2850a8a69ddSRusty Russell return NULL; 2860a8a69ddSRusty Russell } 2870a8a69ddSRusty Russell 288*2d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 289*2d61ba95SMichael S. Tsirkin rmb(); 290*2d61ba95SMichael S. Tsirkin 2910a8a69ddSRusty Russell i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 2920a8a69ddSRusty Russell *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; 2930a8a69ddSRusty Russell 2940a8a69ddSRusty Russell if (unlikely(i >= vq->vring.num)) { 2950a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 2960a8a69ddSRusty Russell return NULL; 2970a8a69ddSRusty Russell } 2980a8a69ddSRusty Russell if (unlikely(!vq->data[i])) { 2990a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 3000a8a69ddSRusty Russell return NULL; 3010a8a69ddSRusty Russell } 3020a8a69ddSRusty Russell 3030a8a69ddSRusty Russell /* detach_buf clears data, so grab it now. */ 3040a8a69ddSRusty Russell ret = vq->data[i]; 3050a8a69ddSRusty Russell detach_buf(vq, i); 3060a8a69ddSRusty Russell vq->last_used_idx++; 3070a8a69ddSRusty Russell END_USE(vq); 3080a8a69ddSRusty Russell return ret; 3090a8a69ddSRusty Russell } 3100a8a69ddSRusty Russell 31118445c4dSRusty Russell static void vring_disable_cb(struct virtqueue *_vq) 31218445c4dSRusty Russell { 31318445c4dSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 31418445c4dSRusty Russell 31518445c4dSRusty Russell vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 31618445c4dSRusty Russell } 31718445c4dSRusty Russell 31818445c4dSRusty Russell static bool vring_enable_cb(struct virtqueue *_vq) 3190a8a69ddSRusty Russell { 3200a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 3210a8a69ddSRusty Russell 3220a8a69ddSRusty Russell START_USE(vq); 3230a8a69ddSRusty Russell 3240a8a69ddSRusty Russell /* We optimistically turn back on interrupts, then check if there was 3250a8a69ddSRusty Russell * more to do. */ 3260a8a69ddSRusty Russell vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 3270a8a69ddSRusty Russell mb(); 3280a8a69ddSRusty Russell if (unlikely(more_used(vq))) { 3290a8a69ddSRusty Russell END_USE(vq); 3300a8a69ddSRusty Russell return false; 3310a8a69ddSRusty Russell } 3320a8a69ddSRusty Russell 3330a8a69ddSRusty Russell END_USE(vq); 3340a8a69ddSRusty Russell return true; 3350a8a69ddSRusty Russell } 3360a8a69ddSRusty Russell 3370a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 3380a8a69ddSRusty Russell { 3390a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 3400a8a69ddSRusty Russell 3410a8a69ddSRusty Russell if (!more_used(vq)) { 3420a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 3430a8a69ddSRusty Russell return IRQ_NONE; 3440a8a69ddSRusty Russell } 3450a8a69ddSRusty Russell 3460a8a69ddSRusty Russell if (unlikely(vq->broken)) 3470a8a69ddSRusty Russell return IRQ_HANDLED; 3480a8a69ddSRusty Russell 3490a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 35018445c4dSRusty Russell if (vq->vq.callback) 35118445c4dSRusty Russell vq->vq.callback(&vq->vq); 3520a8a69ddSRusty Russell 3530a8a69ddSRusty Russell return IRQ_HANDLED; 3540a8a69ddSRusty Russell } 355c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 3560a8a69ddSRusty Russell 3570a8a69ddSRusty Russell static struct virtqueue_ops vring_vq_ops = { 3580a8a69ddSRusty Russell .add_buf = vring_add_buf, 3590a8a69ddSRusty Russell .get_buf = vring_get_buf, 3600a8a69ddSRusty Russell .kick = vring_kick, 36118445c4dSRusty Russell .disable_cb = vring_disable_cb, 36218445c4dSRusty Russell .enable_cb = vring_enable_cb, 3630a8a69ddSRusty Russell }; 3640a8a69ddSRusty Russell 3650a8a69ddSRusty Russell struct virtqueue *vring_new_virtqueue(unsigned int num, 36687c7d57cSRusty Russell unsigned int vring_align, 3670a8a69ddSRusty Russell struct virtio_device *vdev, 3680a8a69ddSRusty Russell void *pages, 3690a8a69ddSRusty Russell void (*notify)(struct virtqueue *), 3709499f5e7SRusty Russell void (*callback)(struct virtqueue *), 3719499f5e7SRusty Russell const char *name) 3720a8a69ddSRusty Russell { 3730a8a69ddSRusty Russell struct vring_virtqueue *vq; 3740a8a69ddSRusty Russell unsigned int i; 3750a8a69ddSRusty Russell 37642b36cc0SRusty Russell /* We assume num is a power of 2. */ 37742b36cc0SRusty Russell if (num & (num - 1)) { 37842b36cc0SRusty Russell dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 37942b36cc0SRusty Russell return NULL; 38042b36cc0SRusty Russell } 38142b36cc0SRusty Russell 3820a8a69ddSRusty Russell vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); 3830a8a69ddSRusty Russell if (!vq) 3840a8a69ddSRusty Russell return NULL; 3850a8a69ddSRusty Russell 38687c7d57cSRusty Russell vring_init(&vq->vring, num, pages, vring_align); 3870a8a69ddSRusty Russell vq->vq.callback = callback; 3880a8a69ddSRusty Russell vq->vq.vdev = vdev; 3890a8a69ddSRusty Russell vq->vq.vq_ops = &vring_vq_ops; 3909499f5e7SRusty Russell vq->vq.name = name; 3910a8a69ddSRusty Russell vq->notify = notify; 3920a8a69ddSRusty Russell vq->broken = false; 3930a8a69ddSRusty Russell vq->last_used_idx = 0; 3940a8a69ddSRusty Russell vq->num_added = 0; 3959499f5e7SRusty Russell list_add_tail(&vq->vq.list, &vdev->vqs); 3960a8a69ddSRusty Russell #ifdef DEBUG 3970a8a69ddSRusty Russell vq->in_use = false; 3980a8a69ddSRusty Russell #endif 3990a8a69ddSRusty Russell 4009fa29b9dSMark McLoughlin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 4019fa29b9dSMark McLoughlin 4020a8a69ddSRusty Russell /* No callback? Tell other side not to bother us. */ 4030a8a69ddSRusty Russell if (!callback) 4040a8a69ddSRusty Russell vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 4050a8a69ddSRusty Russell 4060a8a69ddSRusty Russell /* Put everything in free lists. */ 4070a8a69ddSRusty Russell vq->num_free = num; 4080a8a69ddSRusty Russell vq->free_head = 0; 4090a8a69ddSRusty Russell for (i = 0; i < num-1; i++) 4100a8a69ddSRusty Russell vq->vring.desc[i].next = i+1; 4110a8a69ddSRusty Russell 4120a8a69ddSRusty Russell return &vq->vq; 4130a8a69ddSRusty Russell } 414c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 4150a8a69ddSRusty Russell 4160a8a69ddSRusty Russell void vring_del_virtqueue(struct virtqueue *vq) 4170a8a69ddSRusty Russell { 4189499f5e7SRusty Russell list_del(&vq->list); 4190a8a69ddSRusty Russell kfree(to_vvq(vq)); 4200a8a69ddSRusty Russell } 421c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 4220a8a69ddSRusty Russell 423e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 424e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 425e34f8725SRusty Russell { 426e34f8725SRusty Russell unsigned int i; 427e34f8725SRusty Russell 428e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 429e34f8725SRusty Russell switch (i) { 4309fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 4319fa29b9dSMark McLoughlin break; 432e34f8725SRusty Russell default: 433e34f8725SRusty Russell /* We don't understand this bit. */ 434e34f8725SRusty Russell clear_bit(i, vdev->features); 435e34f8725SRusty Russell } 436e34f8725SRusty Russell } 437e34f8725SRusty Russell } 438e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 439e34f8725SRusty Russell 440c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 441