10a8a69ddSRusty Russell /* Virtio ring implementation. 20a8a69ddSRusty Russell * 30a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 40a8a69ddSRusty Russell * 50a8a69ddSRusty Russell * This program is free software; you can redistribute it and/or modify 60a8a69ddSRusty Russell * it under the terms of the GNU General Public License as published by 70a8a69ddSRusty Russell * the Free Software Foundation; either version 2 of the License, or 80a8a69ddSRusty Russell * (at your option) any later version. 90a8a69ddSRusty Russell * 100a8a69ddSRusty Russell * This program is distributed in the hope that it will be useful, 110a8a69ddSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 120a8a69ddSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 130a8a69ddSRusty Russell * GNU General Public License for more details. 140a8a69ddSRusty Russell * 150a8a69ddSRusty Russell * You should have received a copy of the GNU General Public License 160a8a69ddSRusty Russell * along with this program; if not, write to the Free Software 170a8a69ddSRusty Russell * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 180a8a69ddSRusty Russell */ 190a8a69ddSRusty Russell #include <linux/virtio.h> 200a8a69ddSRusty Russell #include <linux/virtio_ring.h> 21e34f8725SRusty Russell #include <linux/virtio_config.h> 220a8a69ddSRusty Russell #include <linux/device.h> 235a0e3ad6STejun Heo #include <linux/slab.h> 24b5a2c4f1SPaul Gortmaker #include <linux/module.h> 25e93300b1SRusty Russell #include <linux/hrtimer.h> 260a8a69ddSRusty Russell 270a8a69ddSRusty Russell #ifdef DEBUG 280a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 299499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 309499f5e7SRusty Russell do { \ 319499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 329499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 339499f5e7SRusty Russell BUG(); \ 349499f5e7SRusty Russell } while (0) 35c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 363a35ce7dSRoel Kluin #define START_USE(_vq) \ 37c5f841f1SRusty Russell do { \ 38c5f841f1SRusty Russell if ((_vq)->in_use) \ 399499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 409499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 41c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 42c5f841f1SRusty Russell } while (0) 433a35ce7dSRoel Kluin #define END_USE(_vq) \ 4497a545abSRusty Russell do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 450a8a69ddSRusty Russell #else 469499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 479499f5e7SRusty Russell do { \ 489499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 499499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 509499f5e7SRusty Russell (_vq)->broken = true; \ 519499f5e7SRusty Russell } while (0) 520a8a69ddSRusty Russell #define START_USE(vq) 530a8a69ddSRusty Russell #define END_USE(vq) 540a8a69ddSRusty Russell #endif 550a8a69ddSRusty Russell 560a8a69ddSRusty Russell struct vring_virtqueue 570a8a69ddSRusty Russell { 580a8a69ddSRusty Russell struct virtqueue vq; 590a8a69ddSRusty Russell 600a8a69ddSRusty Russell /* Actual memory layout for this queue */ 610a8a69ddSRusty Russell struct vring vring; 620a8a69ddSRusty Russell 637b21e34fSRusty Russell /* Can we use weak barriers? */ 647b21e34fSRusty Russell bool weak_barriers; 657b21e34fSRusty Russell 660a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 670a8a69ddSRusty Russell bool broken; 680a8a69ddSRusty Russell 699fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 709fa29b9dSMark McLoughlin bool indirect; 719fa29b9dSMark McLoughlin 72a5c262c5SMichael S. Tsirkin /* Host publishes avail event idx */ 73a5c262c5SMichael S. Tsirkin bool event; 74a5c262c5SMichael S. Tsirkin 750a8a69ddSRusty Russell /* Head of free buffer list. */ 760a8a69ddSRusty Russell unsigned int free_head; 770a8a69ddSRusty Russell /* Number we've added since last sync. */ 780a8a69ddSRusty Russell unsigned int num_added; 790a8a69ddSRusty Russell 800a8a69ddSRusty Russell /* Last used index we've seen. */ 811bc4953eSAnthony Liguori u16 last_used_idx; 820a8a69ddSRusty Russell 830a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 84*46f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq); 850a8a69ddSRusty Russell 860a8a69ddSRusty Russell #ifdef DEBUG 870a8a69ddSRusty Russell /* They're supposed to lock for us. */ 880a8a69ddSRusty Russell unsigned int in_use; 89e93300b1SRusty Russell 90e93300b1SRusty Russell /* Figure out if their kicks are too delayed. */ 91e93300b1SRusty Russell bool last_add_time_valid; 92e93300b1SRusty Russell ktime_t last_add_time; 930a8a69ddSRusty Russell #endif 940a8a69ddSRusty Russell 950a8a69ddSRusty Russell /* Tokens for callbacks. */ 960a8a69ddSRusty Russell void *data[]; 970a8a69ddSRusty Russell }; 980a8a69ddSRusty Russell 990a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 1000a8a69ddSRusty Russell 10113816c76SRusty Russell static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, 10213816c76SRusty Russell unsigned int *count) 10313816c76SRusty Russell { 10413816c76SRusty Russell return sg_next(sg); 10513816c76SRusty Russell } 10613816c76SRusty Russell 10713816c76SRusty Russell static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, 10813816c76SRusty Russell unsigned int *count) 10913816c76SRusty Russell { 11013816c76SRusty Russell if (--(*count) == 0) 11113816c76SRusty Russell return NULL; 11213816c76SRusty Russell return sg + 1; 11313816c76SRusty Russell } 11413816c76SRusty Russell 1159fa29b9dSMark McLoughlin /* Set up an indirect table of descriptors and add it to the queue. */ 11613816c76SRusty Russell static inline int vring_add_indirect(struct vring_virtqueue *vq, 11713816c76SRusty Russell struct scatterlist *sgs[], 11813816c76SRusty Russell struct scatterlist *(*next) 11913816c76SRusty Russell (struct scatterlist *, unsigned int *), 12013816c76SRusty Russell unsigned int total_sg, 12113816c76SRusty Russell unsigned int total_out, 12213816c76SRusty Russell unsigned int total_in, 12313816c76SRusty Russell unsigned int out_sgs, 12413816c76SRusty Russell unsigned int in_sgs, 125bbd603efSMichael S. Tsirkin gfp_t gfp) 1269fa29b9dSMark McLoughlin { 1279fa29b9dSMark McLoughlin struct vring_desc *desc; 1289fa29b9dSMark McLoughlin unsigned head; 12913816c76SRusty Russell struct scatterlist *sg; 13013816c76SRusty Russell int i, n; 1319fa29b9dSMark McLoughlin 132b92b1b89SWill Deacon /* 133b92b1b89SWill Deacon * We require lowmem mappings for the descriptors because 134b92b1b89SWill Deacon * otherwise virt_to_phys will give us bogus addresses in the 135b92b1b89SWill Deacon * virtqueue. 136b92b1b89SWill Deacon */ 137b92b1b89SWill Deacon gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); 138b92b1b89SWill Deacon 13913816c76SRusty Russell desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 1409fa29b9dSMark McLoughlin if (!desc) 141686d3637SMichael S. Tsirkin return -ENOMEM; 1429fa29b9dSMark McLoughlin 14313816c76SRusty Russell /* Transfer entries from the sg lists into the indirect page */ 14413816c76SRusty Russell i = 0; 14513816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 14613816c76SRusty Russell for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { 1479fa29b9dSMark McLoughlin desc[i].flags = VRING_DESC_F_NEXT; 1489fa29b9dSMark McLoughlin desc[i].addr = sg_phys(sg); 1499fa29b9dSMark McLoughlin desc[i].len = sg->length; 1509fa29b9dSMark McLoughlin desc[i].next = i+1; 15113816c76SRusty Russell i++; 1529fa29b9dSMark McLoughlin } 15313816c76SRusty Russell } 15413816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 15513816c76SRusty Russell for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { 1569fa29b9dSMark McLoughlin desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 1579fa29b9dSMark McLoughlin desc[i].addr = sg_phys(sg); 1589fa29b9dSMark McLoughlin desc[i].len = sg->length; 1599fa29b9dSMark McLoughlin desc[i].next = i+1; 16013816c76SRusty Russell i++; 1619fa29b9dSMark McLoughlin } 16213816c76SRusty Russell } 16313816c76SRusty Russell BUG_ON(i != total_sg); 1649fa29b9dSMark McLoughlin 1659fa29b9dSMark McLoughlin /* Last one doesn't continue. */ 1669fa29b9dSMark McLoughlin desc[i-1].flags &= ~VRING_DESC_F_NEXT; 1679fa29b9dSMark McLoughlin desc[i-1].next = 0; 1689fa29b9dSMark McLoughlin 1699fa29b9dSMark McLoughlin /* We're about to use a buffer */ 17006ca287dSRusty Russell vq->vq.num_free--; 1719fa29b9dSMark McLoughlin 1729fa29b9dSMark McLoughlin /* Use a single buffer which doesn't continue */ 1739fa29b9dSMark McLoughlin head = vq->free_head; 1749fa29b9dSMark McLoughlin vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; 1759fa29b9dSMark McLoughlin vq->vring.desc[head].addr = virt_to_phys(desc); 176bb478d8bSRusty Russell /* kmemleak gives a false positive, as it's hidden by virt_to_phys */ 177bb478d8bSRusty Russell kmemleak_ignore(desc); 1789fa29b9dSMark McLoughlin vq->vring.desc[head].len = i * sizeof(struct vring_desc); 1799fa29b9dSMark McLoughlin 1809fa29b9dSMark McLoughlin /* Update free pointer */ 1819fa29b9dSMark McLoughlin vq->free_head = vq->vring.desc[head].next; 1829fa29b9dSMark McLoughlin 1839fa29b9dSMark McLoughlin return head; 1849fa29b9dSMark McLoughlin } 1859fa29b9dSMark McLoughlin 18613816c76SRusty Russell static inline int virtqueue_add(struct virtqueue *_vq, 18713816c76SRusty Russell struct scatterlist *sgs[], 18813816c76SRusty Russell struct scatterlist *(*next) 18913816c76SRusty Russell (struct scatterlist *, unsigned int *), 19013816c76SRusty Russell unsigned int total_out, 19113816c76SRusty Russell unsigned int total_in, 19213816c76SRusty Russell unsigned int out_sgs, 19313816c76SRusty Russell unsigned int in_sgs, 194bbd603efSMichael S. Tsirkin void *data, 195bbd603efSMichael S. Tsirkin gfp_t gfp) 1960a8a69ddSRusty Russell { 1970a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 19813816c76SRusty Russell struct scatterlist *sg; 19913816c76SRusty Russell unsigned int i, n, avail, uninitialized_var(prev), total_sg; 2001fe9b6feSMichael S. Tsirkin int head; 2010a8a69ddSRusty Russell 2029fa29b9dSMark McLoughlin START_USE(vq); 2039fa29b9dSMark McLoughlin 2040a8a69ddSRusty Russell BUG_ON(data == NULL); 2059fa29b9dSMark McLoughlin 206e93300b1SRusty Russell #ifdef DEBUG 207e93300b1SRusty Russell { 208e93300b1SRusty Russell ktime_t now = ktime_get(); 209e93300b1SRusty Russell 210e93300b1SRusty Russell /* No kick or get, with .1 second between? Warn. */ 211e93300b1SRusty Russell if (vq->last_add_time_valid) 212e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 213e93300b1SRusty Russell > 100); 214e93300b1SRusty Russell vq->last_add_time = now; 215e93300b1SRusty Russell vq->last_add_time_valid = true; 216e93300b1SRusty Russell } 217e93300b1SRusty Russell #endif 218e93300b1SRusty Russell 21913816c76SRusty Russell total_sg = total_in + total_out; 22013816c76SRusty Russell 2219fa29b9dSMark McLoughlin /* If the host supports indirect descriptor tables, and we have multiple 2229fa29b9dSMark McLoughlin * buffers, then go indirect. FIXME: tune this threshold */ 22313816c76SRusty Russell if (vq->indirect && total_sg > 1 && vq->vq.num_free) { 22413816c76SRusty Russell head = vring_add_indirect(vq, sgs, next, total_sg, total_out, 22513816c76SRusty Russell total_in, 22613816c76SRusty Russell out_sgs, in_sgs, gfp); 2271fe9b6feSMichael S. Tsirkin if (likely(head >= 0)) 2289fa29b9dSMark McLoughlin goto add_head; 2299fa29b9dSMark McLoughlin } 2309fa29b9dSMark McLoughlin 23113816c76SRusty Russell BUG_ON(total_sg > vq->vring.num); 23213816c76SRusty Russell BUG_ON(total_sg == 0); 2330a8a69ddSRusty Russell 23413816c76SRusty Russell if (vq->vq.num_free < total_sg) { 2350a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 23613816c76SRusty Russell total_sg, vq->vq.num_free); 23744653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 23844653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 23944653eaeSRusty Russell * host should service the ring ASAP. */ 24013816c76SRusty Russell if (out_sgs) 241426e3e0aSRusty Russell vq->notify(&vq->vq); 2420a8a69ddSRusty Russell END_USE(vq); 2430a8a69ddSRusty Russell return -ENOSPC; 2440a8a69ddSRusty Russell } 2450a8a69ddSRusty Russell 2460a8a69ddSRusty Russell /* We're about to use some buffers from the free list. */ 24713816c76SRusty Russell vq->vq.num_free -= total_sg; 2480a8a69ddSRusty Russell 24913816c76SRusty Russell head = i = vq->free_head; 25013816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 25113816c76SRusty Russell for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { 2520a8a69ddSRusty Russell vq->vring.desc[i].flags = VRING_DESC_F_NEXT; 25315f9c890SRusty Russell vq->vring.desc[i].addr = sg_phys(sg); 2540a8a69ddSRusty Russell vq->vring.desc[i].len = sg->length; 2550a8a69ddSRusty Russell prev = i; 25613816c76SRusty Russell i = vq->vring.desc[i].next; 2570a8a69ddSRusty Russell } 25813816c76SRusty Russell } 25913816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 26013816c76SRusty Russell for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { 2610a8a69ddSRusty Russell vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 26215f9c890SRusty Russell vq->vring.desc[i].addr = sg_phys(sg); 2630a8a69ddSRusty Russell vq->vring.desc[i].len = sg->length; 2640a8a69ddSRusty Russell prev = i; 26513816c76SRusty Russell i = vq->vring.desc[i].next; 26613816c76SRusty Russell } 2670a8a69ddSRusty Russell } 2680a8a69ddSRusty Russell /* Last one doesn't continue. */ 2690a8a69ddSRusty Russell vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; 2700a8a69ddSRusty Russell 2710a8a69ddSRusty Russell /* Update free pointer */ 2720a8a69ddSRusty Russell vq->free_head = i; 2730a8a69ddSRusty Russell 2749fa29b9dSMark McLoughlin add_head: 2750a8a69ddSRusty Russell /* Set token. */ 2760a8a69ddSRusty Russell vq->data[head] = data; 2770a8a69ddSRusty Russell 2780a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 2793b720b8cSRusty Russell * do sync). */ 280ee7cd898SRusty Russell avail = (vq->vring.avail->idx & (vq->vring.num-1)); 2810a8a69ddSRusty Russell vq->vring.avail->ring[avail] = head; 2820a8a69ddSRusty Russell 283ee7cd898SRusty Russell /* Descriptors and available array need to be set before we expose the 284ee7cd898SRusty Russell * new available array entries. */ 285a9a0fef7SRusty Russell virtio_wmb(vq->weak_barriers); 286ee7cd898SRusty Russell vq->vring.avail->idx++; 287ee7cd898SRusty Russell vq->num_added++; 288ee7cd898SRusty Russell 289ee7cd898SRusty Russell /* This is very unlikely, but theoretically possible. Kick 290ee7cd898SRusty Russell * just in case. */ 291ee7cd898SRusty Russell if (unlikely(vq->num_added == (1 << 16) - 1)) 292ee7cd898SRusty Russell virtqueue_kick(_vq); 293ee7cd898SRusty Russell 2940a8a69ddSRusty Russell pr_debug("Added buffer head %i to %p\n", head, vq); 2950a8a69ddSRusty Russell END_USE(vq); 2963c1b27d5SRusty Russell 29798e8c6bcSRusty Russell return 0; 2980a8a69ddSRusty Russell } 29913816c76SRusty Russell 30013816c76SRusty Russell /** 30113816c76SRusty Russell * virtqueue_add_sgs - expose buffers to other end 30213816c76SRusty Russell * @vq: the struct virtqueue we're talking about. 30313816c76SRusty Russell * @sgs: array of terminated scatterlists. 30413816c76SRusty Russell * @out_num: the number of scatterlists readable by other side 30513816c76SRusty Russell * @in_num: the number of scatterlists which are writable (after readable ones) 30613816c76SRusty Russell * @data: the token identifying the buffer. 30713816c76SRusty Russell * @gfp: how to do memory allocations (if necessary). 30813816c76SRusty Russell * 30913816c76SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 31013816c76SRusty Russell * at the same time (except where noted). 31113816c76SRusty Russell * 31213816c76SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM). 31313816c76SRusty Russell */ 31413816c76SRusty Russell int virtqueue_add_sgs(struct virtqueue *_vq, 31513816c76SRusty Russell struct scatterlist *sgs[], 31613816c76SRusty Russell unsigned int out_sgs, 31713816c76SRusty Russell unsigned int in_sgs, 31813816c76SRusty Russell void *data, 31913816c76SRusty Russell gfp_t gfp) 32013816c76SRusty Russell { 32113816c76SRusty Russell unsigned int i, total_out, total_in; 32213816c76SRusty Russell 32313816c76SRusty Russell /* Count them first. */ 32413816c76SRusty Russell for (i = total_out = total_in = 0; i < out_sgs; i++) { 32513816c76SRusty Russell struct scatterlist *sg; 32613816c76SRusty Russell for (sg = sgs[i]; sg; sg = sg_next(sg)) 32713816c76SRusty Russell total_out++; 32813816c76SRusty Russell } 32913816c76SRusty Russell for (; i < out_sgs + in_sgs; i++) { 33013816c76SRusty Russell struct scatterlist *sg; 33113816c76SRusty Russell for (sg = sgs[i]; sg; sg = sg_next(sg)) 33213816c76SRusty Russell total_in++; 33313816c76SRusty Russell } 33413816c76SRusty Russell return virtqueue_add(_vq, sgs, sg_next_chained, 33513816c76SRusty Russell total_out, total_in, out_sgs, in_sgs, data, gfp); 33613816c76SRusty Russell } 33713816c76SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 33813816c76SRusty Russell 33913816c76SRusty Russell /** 340282edb36SRusty Russell * virtqueue_add_outbuf - expose output buffers to other end 341282edb36SRusty Russell * @vq: the struct virtqueue we're talking about. 342282edb36SRusty Russell * @sgs: array of scatterlists (need not be terminated!) 343282edb36SRusty Russell * @num: the number of scatterlists readable by other side 344282edb36SRusty Russell * @data: the token identifying the buffer. 345282edb36SRusty Russell * @gfp: how to do memory allocations (if necessary). 346282edb36SRusty Russell * 347282edb36SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 348282edb36SRusty Russell * at the same time (except where noted). 349282edb36SRusty Russell * 350282edb36SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM). 351282edb36SRusty Russell */ 352282edb36SRusty Russell int virtqueue_add_outbuf(struct virtqueue *vq, 353282edb36SRusty Russell struct scatterlist sg[], unsigned int num, 354282edb36SRusty Russell void *data, 355282edb36SRusty Russell gfp_t gfp) 356282edb36SRusty Russell { 357282edb36SRusty Russell return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); 358282edb36SRusty Russell } 359282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 360282edb36SRusty Russell 361282edb36SRusty Russell /** 362282edb36SRusty Russell * virtqueue_add_inbuf - expose input buffers to other end 363282edb36SRusty Russell * @vq: the struct virtqueue we're talking about. 364282edb36SRusty Russell * @sgs: array of scatterlists (need not be terminated!) 365282edb36SRusty Russell * @num: the number of scatterlists writable by other side 366282edb36SRusty Russell * @data: the token identifying the buffer. 367282edb36SRusty Russell * @gfp: how to do memory allocations (if necessary). 368282edb36SRusty Russell * 369282edb36SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 370282edb36SRusty Russell * at the same time (except where noted). 371282edb36SRusty Russell * 372282edb36SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM). 373282edb36SRusty Russell */ 374282edb36SRusty Russell int virtqueue_add_inbuf(struct virtqueue *vq, 375282edb36SRusty Russell struct scatterlist sg[], unsigned int num, 376282edb36SRusty Russell void *data, 377282edb36SRusty Russell gfp_t gfp) 378282edb36SRusty Russell { 379282edb36SRusty Russell return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); 380282edb36SRusty Russell } 381282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 382282edb36SRusty Russell 383282edb36SRusty Russell /** 38441f0377fSRusty Russell * virtqueue_kick_prepare - first half of split virtqueue_kick call. 3855dfc1762SRusty Russell * @vq: the struct virtqueue 3865dfc1762SRusty Russell * 38741f0377fSRusty Russell * Instead of virtqueue_kick(), you can do: 38841f0377fSRusty Russell * if (virtqueue_kick_prepare(vq)) 38941f0377fSRusty Russell * virtqueue_notify(vq); 3905dfc1762SRusty Russell * 39141f0377fSRusty Russell * This is sometimes useful because the virtqueue_kick_prepare() needs 39241f0377fSRusty Russell * to be serialized, but the actual virtqueue_notify() call does not. 3935dfc1762SRusty Russell */ 39441f0377fSRusty Russell bool virtqueue_kick_prepare(struct virtqueue *_vq) 3950a8a69ddSRusty Russell { 3960a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 397a5c262c5SMichael S. Tsirkin u16 new, old; 39841f0377fSRusty Russell bool needs_kick; 39941f0377fSRusty Russell 4000a8a69ddSRusty Russell START_USE(vq); 401a72caae2SJason Wang /* We need to expose available array entries before checking avail 402a72caae2SJason Wang * event. */ 403a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 4040a8a69ddSRusty Russell 405ee7cd898SRusty Russell old = vq->vring.avail->idx - vq->num_added; 406ee7cd898SRusty Russell new = vq->vring.avail->idx; 4070a8a69ddSRusty Russell vq->num_added = 0; 4080a8a69ddSRusty Russell 409e93300b1SRusty Russell #ifdef DEBUG 410e93300b1SRusty Russell if (vq->last_add_time_valid) { 411e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 412e93300b1SRusty Russell vq->last_add_time)) > 100); 413e93300b1SRusty Russell } 414e93300b1SRusty Russell vq->last_add_time_valid = false; 415e93300b1SRusty Russell #endif 416e93300b1SRusty Russell 41741f0377fSRusty Russell if (vq->event) { 41841f0377fSRusty Russell needs_kick = vring_need_event(vring_avail_event(&vq->vring), 41941f0377fSRusty Russell new, old); 42041f0377fSRusty Russell } else { 42141f0377fSRusty Russell needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); 42241f0377fSRusty Russell } 4230a8a69ddSRusty Russell END_USE(vq); 42441f0377fSRusty Russell return needs_kick; 42541f0377fSRusty Russell } 42641f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 42741f0377fSRusty Russell 42841f0377fSRusty Russell /** 42941f0377fSRusty Russell * virtqueue_notify - second half of split virtqueue_kick call. 43041f0377fSRusty Russell * @vq: the struct virtqueue 43141f0377fSRusty Russell * 43241f0377fSRusty Russell * This does not need to be serialized. 43341f0377fSRusty Russell */ 43441f0377fSRusty Russell void virtqueue_notify(struct virtqueue *_vq) 43541f0377fSRusty Russell { 43641f0377fSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 43741f0377fSRusty Russell 43841f0377fSRusty Russell /* Prod other side to tell it about changes. */ 43941f0377fSRusty Russell vq->notify(_vq); 44041f0377fSRusty Russell } 44141f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_notify); 44241f0377fSRusty Russell 44341f0377fSRusty Russell /** 44441f0377fSRusty Russell * virtqueue_kick - update after add_buf 44541f0377fSRusty Russell * @vq: the struct virtqueue 44641f0377fSRusty Russell * 447b3087e48SRusty Russell * After one or more virtqueue_add_* calls, invoke this to kick 44841f0377fSRusty Russell * the other side. 44941f0377fSRusty Russell * 45041f0377fSRusty Russell * Caller must ensure we don't call this with other virtqueue 45141f0377fSRusty Russell * operations at the same time (except where noted). 45241f0377fSRusty Russell */ 45341f0377fSRusty Russell void virtqueue_kick(struct virtqueue *vq) 45441f0377fSRusty Russell { 45541f0377fSRusty Russell if (virtqueue_kick_prepare(vq)) 45641f0377fSRusty Russell virtqueue_notify(vq); 4570a8a69ddSRusty Russell } 4587c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_kick); 4590a8a69ddSRusty Russell 4600a8a69ddSRusty Russell static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 4610a8a69ddSRusty Russell { 4620a8a69ddSRusty Russell unsigned int i; 4630a8a69ddSRusty Russell 4640a8a69ddSRusty Russell /* Clear data ptr. */ 4650a8a69ddSRusty Russell vq->data[head] = NULL; 4660a8a69ddSRusty Russell 4670a8a69ddSRusty Russell /* Put back on free list: find end */ 4680a8a69ddSRusty Russell i = head; 4699fa29b9dSMark McLoughlin 4709fa29b9dSMark McLoughlin /* Free the indirect table */ 4719fa29b9dSMark McLoughlin if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) 4729fa29b9dSMark McLoughlin kfree(phys_to_virt(vq->vring.desc[i].addr)); 4739fa29b9dSMark McLoughlin 4740a8a69ddSRusty Russell while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 4750a8a69ddSRusty Russell i = vq->vring.desc[i].next; 47606ca287dSRusty Russell vq->vq.num_free++; 4770a8a69ddSRusty Russell } 4780a8a69ddSRusty Russell 4790a8a69ddSRusty Russell vq->vring.desc[i].next = vq->free_head; 4800a8a69ddSRusty Russell vq->free_head = head; 4810a8a69ddSRusty Russell /* Plus final descriptor */ 48206ca287dSRusty Russell vq->vq.num_free++; 4830a8a69ddSRusty Russell } 4840a8a69ddSRusty Russell 4850a8a69ddSRusty Russell static inline bool more_used(const struct vring_virtqueue *vq) 4860a8a69ddSRusty Russell { 4870a8a69ddSRusty Russell return vq->last_used_idx != vq->vring.used->idx; 4880a8a69ddSRusty Russell } 4890a8a69ddSRusty Russell 4905dfc1762SRusty Russell /** 4915dfc1762SRusty Russell * virtqueue_get_buf - get the next used buffer 4925dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 4935dfc1762SRusty Russell * @len: the length written into the buffer 4945dfc1762SRusty Russell * 4955dfc1762SRusty Russell * If the driver wrote data into the buffer, @len will be set to the 4965dfc1762SRusty Russell * amount written. This means you don't need to clear the buffer 4975dfc1762SRusty Russell * beforehand to ensure there's no data leakage in the case of short 4985dfc1762SRusty Russell * writes. 4995dfc1762SRusty Russell * 5005dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 5015dfc1762SRusty Russell * operations at the same time (except where noted). 5025dfc1762SRusty Russell * 5035dfc1762SRusty Russell * Returns NULL if there are no used buffers, or the "data" token 504b3087e48SRusty Russell * handed to virtqueue_add_*(). 5055dfc1762SRusty Russell */ 5067c5e9ed0SMichael S. Tsirkin void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 5070a8a69ddSRusty Russell { 5080a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 5090a8a69ddSRusty Russell void *ret; 5100a8a69ddSRusty Russell unsigned int i; 5113b720b8cSRusty Russell u16 last_used; 5120a8a69ddSRusty Russell 5130a8a69ddSRusty Russell START_USE(vq); 5140a8a69ddSRusty Russell 5155ef82752SRusty Russell if (unlikely(vq->broken)) { 5165ef82752SRusty Russell END_USE(vq); 5175ef82752SRusty Russell return NULL; 5185ef82752SRusty Russell } 5195ef82752SRusty Russell 5200a8a69ddSRusty Russell if (!more_used(vq)) { 5210a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 5220a8a69ddSRusty Russell END_USE(vq); 5230a8a69ddSRusty Russell return NULL; 5240a8a69ddSRusty Russell } 5250a8a69ddSRusty Russell 5262d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 527a9a0fef7SRusty Russell virtio_rmb(vq->weak_barriers); 5282d61ba95SMichael S. Tsirkin 5293b720b8cSRusty Russell last_used = (vq->last_used_idx & (vq->vring.num - 1)); 5303b720b8cSRusty Russell i = vq->vring.used->ring[last_used].id; 5313b720b8cSRusty Russell *len = vq->vring.used->ring[last_used].len; 5320a8a69ddSRusty Russell 5330a8a69ddSRusty Russell if (unlikely(i >= vq->vring.num)) { 5340a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 5350a8a69ddSRusty Russell return NULL; 5360a8a69ddSRusty Russell } 5370a8a69ddSRusty Russell if (unlikely(!vq->data[i])) { 5380a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 5390a8a69ddSRusty Russell return NULL; 5400a8a69ddSRusty Russell } 5410a8a69ddSRusty Russell 5420a8a69ddSRusty Russell /* detach_buf clears data, so grab it now. */ 5430a8a69ddSRusty Russell ret = vq->data[i]; 5440a8a69ddSRusty Russell detach_buf(vq, i); 5450a8a69ddSRusty Russell vq->last_used_idx++; 546a5c262c5SMichael S. Tsirkin /* If we expect an interrupt for the next entry, tell host 547a5c262c5SMichael S. Tsirkin * by writing event index and flush out the write before 548a5c262c5SMichael S. Tsirkin * the read in the next get_buf call. */ 549a5c262c5SMichael S. Tsirkin if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 550a5c262c5SMichael S. Tsirkin vring_used_event(&vq->vring) = vq->last_used_idx; 551a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 552a5c262c5SMichael S. Tsirkin } 553a5c262c5SMichael S. Tsirkin 554e93300b1SRusty Russell #ifdef DEBUG 555e93300b1SRusty Russell vq->last_add_time_valid = false; 556e93300b1SRusty Russell #endif 557e93300b1SRusty Russell 5580a8a69ddSRusty Russell END_USE(vq); 5590a8a69ddSRusty Russell return ret; 5600a8a69ddSRusty Russell } 5617c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_get_buf); 5620a8a69ddSRusty Russell 5635dfc1762SRusty Russell /** 5645dfc1762SRusty Russell * virtqueue_disable_cb - disable callbacks 5655dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 5665dfc1762SRusty Russell * 5675dfc1762SRusty Russell * Note that this is not necessarily synchronous, hence unreliable and only 5685dfc1762SRusty Russell * useful as an optimization. 5695dfc1762SRusty Russell * 5705dfc1762SRusty Russell * Unlike other operations, this need not be serialized. 5715dfc1762SRusty Russell */ 5727c5e9ed0SMichael S. Tsirkin void virtqueue_disable_cb(struct virtqueue *_vq) 57318445c4dSRusty Russell { 57418445c4dSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 57518445c4dSRusty Russell 57618445c4dSRusty Russell vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 57718445c4dSRusty Russell } 5787c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 57918445c4dSRusty Russell 5805dfc1762SRusty Russell /** 581cc229884SMichael S. Tsirkin * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 582cc229884SMichael S. Tsirkin * @vq: the struct virtqueue we're talking about. 583cc229884SMichael S. Tsirkin * 584cc229884SMichael S. Tsirkin * This re-enables callbacks; it returns current queue state 585cc229884SMichael S. Tsirkin * in an opaque unsigned value. This value should be later tested by 586cc229884SMichael S. Tsirkin * virtqueue_poll, to detect a possible race between the driver checking for 587cc229884SMichael S. Tsirkin * more work, and enabling callbacks. 588cc229884SMichael S. Tsirkin * 589cc229884SMichael S. Tsirkin * Caller must ensure we don't call this with other virtqueue 590cc229884SMichael S. Tsirkin * operations at the same time (except where noted). 591cc229884SMichael S. Tsirkin */ 592cc229884SMichael S. Tsirkin unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 593cc229884SMichael S. Tsirkin { 594cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 595cc229884SMichael S. Tsirkin u16 last_used_idx; 596cc229884SMichael S. Tsirkin 597cc229884SMichael S. Tsirkin START_USE(vq); 598cc229884SMichael S. Tsirkin 599cc229884SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 600cc229884SMichael S. Tsirkin * more to do. */ 601cc229884SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 602cc229884SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 603cc229884SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 604cc229884SMichael S. Tsirkin vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 605cc229884SMichael S. Tsirkin vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; 606cc229884SMichael S. Tsirkin END_USE(vq); 607cc229884SMichael S. Tsirkin return last_used_idx; 608cc229884SMichael S. Tsirkin } 609cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 610cc229884SMichael S. Tsirkin 611cc229884SMichael S. Tsirkin /** 612cc229884SMichael S. Tsirkin * virtqueue_poll - query pending used buffers 613cc229884SMichael S. Tsirkin * @vq: the struct virtqueue we're talking about. 614cc229884SMichael S. Tsirkin * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 615cc229884SMichael S. Tsirkin * 616cc229884SMichael S. Tsirkin * Returns "true" if there are pending used buffers in the queue. 617cc229884SMichael S. Tsirkin * 618cc229884SMichael S. Tsirkin * This does not need to be serialized. 619cc229884SMichael S. Tsirkin */ 620cc229884SMichael S. Tsirkin bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 621cc229884SMichael S. Tsirkin { 622cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 623cc229884SMichael S. Tsirkin 624cc229884SMichael S. Tsirkin virtio_mb(vq->weak_barriers); 625cc229884SMichael S. Tsirkin return (u16)last_used_idx != vq->vring.used->idx; 626cc229884SMichael S. Tsirkin } 627cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_poll); 628cc229884SMichael S. Tsirkin 629cc229884SMichael S. Tsirkin /** 6305dfc1762SRusty Russell * virtqueue_enable_cb - restart callbacks after disable_cb. 6315dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 6325dfc1762SRusty Russell * 6335dfc1762SRusty Russell * This re-enables callbacks; it returns "false" if there are pending 6345dfc1762SRusty Russell * buffers in the queue, to detect a possible race between the driver 6355dfc1762SRusty Russell * checking for more work, and enabling callbacks. 6365dfc1762SRusty Russell * 6375dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 6385dfc1762SRusty Russell * operations at the same time (except where noted). 6395dfc1762SRusty Russell */ 6407c5e9ed0SMichael S. Tsirkin bool virtqueue_enable_cb(struct virtqueue *_vq) 6410a8a69ddSRusty Russell { 642cc229884SMichael S. Tsirkin unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 643cc229884SMichael S. Tsirkin return !virtqueue_poll(_vq, last_used_idx); 6440a8a69ddSRusty Russell } 6457c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 6460a8a69ddSRusty Russell 6475dfc1762SRusty Russell /** 6485dfc1762SRusty Russell * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 6495dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 6505dfc1762SRusty Russell * 6515dfc1762SRusty Russell * This re-enables callbacks but hints to the other side to delay 6525dfc1762SRusty Russell * interrupts until most of the available buffers have been processed; 6535dfc1762SRusty Russell * it returns "false" if there are many pending buffers in the queue, 6545dfc1762SRusty Russell * to detect a possible race between the driver checking for more work, 6555dfc1762SRusty Russell * and enabling callbacks. 6565dfc1762SRusty Russell * 6575dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 6585dfc1762SRusty Russell * operations at the same time (except where noted). 6595dfc1762SRusty Russell */ 6607ab358c2SMichael S. Tsirkin bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 6617ab358c2SMichael S. Tsirkin { 6627ab358c2SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 6637ab358c2SMichael S. Tsirkin u16 bufs; 6647ab358c2SMichael S. Tsirkin 6657ab358c2SMichael S. Tsirkin START_USE(vq); 6667ab358c2SMichael S. Tsirkin 6677ab358c2SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 6687ab358c2SMichael S. Tsirkin * more to do. */ 6697ab358c2SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 6707ab358c2SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 6717ab358c2SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 6727ab358c2SMichael S. Tsirkin vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 6737ab358c2SMichael S. Tsirkin /* TODO: tune this threshold */ 6747ab358c2SMichael S. Tsirkin bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 6757ab358c2SMichael S. Tsirkin vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 676a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 6777ab358c2SMichael S. Tsirkin if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 6787ab358c2SMichael S. Tsirkin END_USE(vq); 6797ab358c2SMichael S. Tsirkin return false; 6807ab358c2SMichael S. Tsirkin } 6817ab358c2SMichael S. Tsirkin 6827ab358c2SMichael S. Tsirkin END_USE(vq); 6837ab358c2SMichael S. Tsirkin return true; 6847ab358c2SMichael S. Tsirkin } 6857ab358c2SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 6867ab358c2SMichael S. Tsirkin 6875dfc1762SRusty Russell /** 6885dfc1762SRusty Russell * virtqueue_detach_unused_buf - detach first unused buffer 6895dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 6905dfc1762SRusty Russell * 691b3087e48SRusty Russell * Returns NULL or the "data" token handed to virtqueue_add_*(). 6925dfc1762SRusty Russell * This is not valid on an active queue; it is useful only for device 6935dfc1762SRusty Russell * shutdown. 6945dfc1762SRusty Russell */ 6957c5e9ed0SMichael S. Tsirkin void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 696c021eac4SShirley Ma { 697c021eac4SShirley Ma struct vring_virtqueue *vq = to_vvq(_vq); 698c021eac4SShirley Ma unsigned int i; 699c021eac4SShirley Ma void *buf; 700c021eac4SShirley Ma 701c021eac4SShirley Ma START_USE(vq); 702c021eac4SShirley Ma 703c021eac4SShirley Ma for (i = 0; i < vq->vring.num; i++) { 704c021eac4SShirley Ma if (!vq->data[i]) 705c021eac4SShirley Ma continue; 706c021eac4SShirley Ma /* detach_buf clears data, so grab it now. */ 707c021eac4SShirley Ma buf = vq->data[i]; 708c021eac4SShirley Ma detach_buf(vq, i); 709b3258ff1SAmit Shah vq->vring.avail->idx--; 710c021eac4SShirley Ma END_USE(vq); 711c021eac4SShirley Ma return buf; 712c021eac4SShirley Ma } 713c021eac4SShirley Ma /* That should have freed everything. */ 71406ca287dSRusty Russell BUG_ON(vq->vq.num_free != vq->vring.num); 715c021eac4SShirley Ma 716c021eac4SShirley Ma END_USE(vq); 717c021eac4SShirley Ma return NULL; 718c021eac4SShirley Ma } 7197c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 720c021eac4SShirley Ma 7210a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 7220a8a69ddSRusty Russell { 7230a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 7240a8a69ddSRusty Russell 7250a8a69ddSRusty Russell if (!more_used(vq)) { 7260a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 7270a8a69ddSRusty Russell return IRQ_NONE; 7280a8a69ddSRusty Russell } 7290a8a69ddSRusty Russell 7300a8a69ddSRusty Russell if (unlikely(vq->broken)) 7310a8a69ddSRusty Russell return IRQ_HANDLED; 7320a8a69ddSRusty Russell 7330a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 73418445c4dSRusty Russell if (vq->vq.callback) 73518445c4dSRusty Russell vq->vq.callback(&vq->vq); 7360a8a69ddSRusty Russell 7370a8a69ddSRusty Russell return IRQ_HANDLED; 7380a8a69ddSRusty Russell } 739c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 7400a8a69ddSRusty Russell 74117bb6d40SJason Wang struct virtqueue *vring_new_virtqueue(unsigned int index, 74217bb6d40SJason Wang unsigned int num, 74387c7d57cSRusty Russell unsigned int vring_align, 7440a8a69ddSRusty Russell struct virtio_device *vdev, 7457b21e34fSRusty Russell bool weak_barriers, 7460a8a69ddSRusty Russell void *pages, 747*46f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *), 7489499f5e7SRusty Russell void (*callback)(struct virtqueue *), 7499499f5e7SRusty Russell const char *name) 7500a8a69ddSRusty Russell { 7510a8a69ddSRusty Russell struct vring_virtqueue *vq; 7520a8a69ddSRusty Russell unsigned int i; 7530a8a69ddSRusty Russell 75442b36cc0SRusty Russell /* We assume num is a power of 2. */ 75542b36cc0SRusty Russell if (num & (num - 1)) { 75642b36cc0SRusty Russell dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 75742b36cc0SRusty Russell return NULL; 75842b36cc0SRusty Russell } 75942b36cc0SRusty Russell 7600a8a69ddSRusty Russell vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); 7610a8a69ddSRusty Russell if (!vq) 7620a8a69ddSRusty Russell return NULL; 7630a8a69ddSRusty Russell 76487c7d57cSRusty Russell vring_init(&vq->vring, num, pages, vring_align); 7650a8a69ddSRusty Russell vq->vq.callback = callback; 7660a8a69ddSRusty Russell vq->vq.vdev = vdev; 7679499f5e7SRusty Russell vq->vq.name = name; 76806ca287dSRusty Russell vq->vq.num_free = num; 76906ca287dSRusty Russell vq->vq.index = index; 7700a8a69ddSRusty Russell vq->notify = notify; 7717b21e34fSRusty Russell vq->weak_barriers = weak_barriers; 7720a8a69ddSRusty Russell vq->broken = false; 7730a8a69ddSRusty Russell vq->last_used_idx = 0; 7740a8a69ddSRusty Russell vq->num_added = 0; 7759499f5e7SRusty Russell list_add_tail(&vq->vq.list, &vdev->vqs); 7760a8a69ddSRusty Russell #ifdef DEBUG 7770a8a69ddSRusty Russell vq->in_use = false; 778e93300b1SRusty Russell vq->last_add_time_valid = false; 7790a8a69ddSRusty Russell #endif 7800a8a69ddSRusty Russell 7819fa29b9dSMark McLoughlin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 782a5c262c5SMichael S. Tsirkin vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 7839fa29b9dSMark McLoughlin 7840a8a69ddSRusty Russell /* No callback? Tell other side not to bother us. */ 7850a8a69ddSRusty Russell if (!callback) 7860a8a69ddSRusty Russell vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 7870a8a69ddSRusty Russell 7880a8a69ddSRusty Russell /* Put everything in free lists. */ 7890a8a69ddSRusty Russell vq->free_head = 0; 7903b870624SAmit Shah for (i = 0; i < num-1; i++) { 7910a8a69ddSRusty Russell vq->vring.desc[i].next = i+1; 7923b870624SAmit Shah vq->data[i] = NULL; 7933b870624SAmit Shah } 7943b870624SAmit Shah vq->data[i] = NULL; 7950a8a69ddSRusty Russell 7960a8a69ddSRusty Russell return &vq->vq; 7970a8a69ddSRusty Russell } 798c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 7990a8a69ddSRusty Russell 8000a8a69ddSRusty Russell void vring_del_virtqueue(struct virtqueue *vq) 8010a8a69ddSRusty Russell { 8029499f5e7SRusty Russell list_del(&vq->list); 8030a8a69ddSRusty Russell kfree(to_vvq(vq)); 8040a8a69ddSRusty Russell } 805c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 8060a8a69ddSRusty Russell 807e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 808e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 809e34f8725SRusty Russell { 810e34f8725SRusty Russell unsigned int i; 811e34f8725SRusty Russell 812e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 813e34f8725SRusty Russell switch (i) { 8149fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 8159fa29b9dSMark McLoughlin break; 816a5c262c5SMichael S. Tsirkin case VIRTIO_RING_F_EVENT_IDX: 817a5c262c5SMichael S. Tsirkin break; 818e34f8725SRusty Russell default: 819e34f8725SRusty Russell /* We don't understand this bit. */ 820e34f8725SRusty Russell clear_bit(i, vdev->features); 821e34f8725SRusty Russell } 822e34f8725SRusty Russell } 823e34f8725SRusty Russell } 824e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 825e34f8725SRusty Russell 8265dfc1762SRusty Russell /** 8275dfc1762SRusty Russell * virtqueue_get_vring_size - return the size of the virtqueue's vring 8285dfc1762SRusty Russell * @vq: the struct virtqueue containing the vring of interest. 8295dfc1762SRusty Russell * 8305dfc1762SRusty Russell * Returns the size of the vring. This is mainly used for boasting to 8315dfc1762SRusty Russell * userspace. Unlike other operations, this need not be serialized. 8325dfc1762SRusty Russell */ 8338f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 8348f9f4668SRick Jones { 8358f9f4668SRick Jones 8368f9f4668SRick Jones struct vring_virtqueue *vq = to_vvq(_vq); 8378f9f4668SRick Jones 8388f9f4668SRick Jones return vq->vring.num; 8398f9f4668SRick Jones } 8408f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 8418f9f4668SRick Jones 842c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 843