10a8a69ddSRusty Russell /* Virtio ring implementation. 20a8a69ddSRusty Russell * 30a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 40a8a69ddSRusty Russell * 50a8a69ddSRusty Russell * This program is free software; you can redistribute it and/or modify 60a8a69ddSRusty Russell * it under the terms of the GNU General Public License as published by 70a8a69ddSRusty Russell * the Free Software Foundation; either version 2 of the License, or 80a8a69ddSRusty Russell * (at your option) any later version. 90a8a69ddSRusty Russell * 100a8a69ddSRusty Russell * This program is distributed in the hope that it will be useful, 110a8a69ddSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 120a8a69ddSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 130a8a69ddSRusty Russell * GNU General Public License for more details. 140a8a69ddSRusty Russell * 150a8a69ddSRusty Russell * You should have received a copy of the GNU General Public License 160a8a69ddSRusty Russell * along with this program; if not, write to the Free Software 170a8a69ddSRusty Russell * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 180a8a69ddSRusty Russell */ 190a8a69ddSRusty Russell #include <linux/virtio.h> 200a8a69ddSRusty Russell #include <linux/virtio_ring.h> 21e34f8725SRusty Russell #include <linux/virtio_config.h> 220a8a69ddSRusty Russell #include <linux/device.h> 235a0e3ad6STejun Heo #include <linux/slab.h> 24b5a2c4f1SPaul Gortmaker #include <linux/module.h> 25e93300b1SRusty Russell #include <linux/hrtimer.h> 266abb2dd9SJoel Stanley #include <linux/kmemleak.h> 270a8a69ddSRusty Russell 280a8a69ddSRusty Russell #ifdef DEBUG 290a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 309499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 319499f5e7SRusty Russell do { \ 329499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 339499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 349499f5e7SRusty Russell BUG(); \ 359499f5e7SRusty Russell } while (0) 36c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 373a35ce7dSRoel Kluin #define START_USE(_vq) \ 38c5f841f1SRusty Russell do { \ 39c5f841f1SRusty Russell if ((_vq)->in_use) \ 409499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 419499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 42c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 43c5f841f1SRusty Russell } while (0) 443a35ce7dSRoel Kluin #define END_USE(_vq) \ 4597a545abSRusty Russell do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 460a8a69ddSRusty Russell #else 479499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 489499f5e7SRusty Russell do { \ 499499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 509499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 519499f5e7SRusty Russell (_vq)->broken = true; \ 529499f5e7SRusty Russell } while (0) 530a8a69ddSRusty Russell #define START_USE(vq) 540a8a69ddSRusty Russell #define END_USE(vq) 550a8a69ddSRusty Russell #endif 560a8a69ddSRusty Russell 5743b4f721SMichael S. Tsirkin struct vring_virtqueue { 580a8a69ddSRusty Russell struct virtqueue vq; 590a8a69ddSRusty Russell 600a8a69ddSRusty Russell /* Actual memory layout for this queue */ 610a8a69ddSRusty Russell struct vring vring; 620a8a69ddSRusty Russell 637b21e34fSRusty Russell /* Can we use weak barriers? */ 647b21e34fSRusty Russell bool weak_barriers; 657b21e34fSRusty Russell 660a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 670a8a69ddSRusty Russell bool broken; 680a8a69ddSRusty Russell 699fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 709fa29b9dSMark McLoughlin bool indirect; 719fa29b9dSMark McLoughlin 72a5c262c5SMichael S. Tsirkin /* Host publishes avail event idx */ 73a5c262c5SMichael S. Tsirkin bool event; 74a5c262c5SMichael S. Tsirkin 750a8a69ddSRusty Russell /* Head of free buffer list. */ 760a8a69ddSRusty Russell unsigned int free_head; 770a8a69ddSRusty Russell /* Number we've added since last sync. */ 780a8a69ddSRusty Russell unsigned int num_added; 790a8a69ddSRusty Russell 800a8a69ddSRusty Russell /* Last used index we've seen. */ 811bc4953eSAnthony Liguori u16 last_used_idx; 820a8a69ddSRusty Russell 83f277ec42SVenkatesh Srinivas /* Last written value to avail->flags */ 84f277ec42SVenkatesh Srinivas u16 avail_flags_shadow; 85f277ec42SVenkatesh Srinivas 86f277ec42SVenkatesh Srinivas /* Last written value to avail->idx in guest byte order */ 87f277ec42SVenkatesh Srinivas u16 avail_idx_shadow; 88f277ec42SVenkatesh Srinivas 890a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 9046f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq); 910a8a69ddSRusty Russell 920a8a69ddSRusty Russell #ifdef DEBUG 930a8a69ddSRusty Russell /* They're supposed to lock for us. */ 940a8a69ddSRusty Russell unsigned int in_use; 95e93300b1SRusty Russell 96e93300b1SRusty Russell /* Figure out if their kicks are too delayed. */ 97e93300b1SRusty Russell bool last_add_time_valid; 98e93300b1SRusty Russell ktime_t last_add_time; 990a8a69ddSRusty Russell #endif 1000a8a69ddSRusty Russell 1010a8a69ddSRusty Russell /* Tokens for callbacks. */ 1020a8a69ddSRusty Russell void *data[]; 1030a8a69ddSRusty Russell }; 1040a8a69ddSRusty Russell 1050a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 1060a8a69ddSRusty Russell 107*d26c96c8SAndy Lutomirski /* 108*d26c96c8SAndy Lutomirski * The interaction between virtio and a possible IOMMU is a mess. 109*d26c96c8SAndy Lutomirski * 110*d26c96c8SAndy Lutomirski * On most systems with virtio, physical addresses match bus addresses, 111*d26c96c8SAndy Lutomirski * and it doesn't particularly matter whether we use the DMA API. 112*d26c96c8SAndy Lutomirski * 113*d26c96c8SAndy Lutomirski * On some systems, including Xen and any system with a physical device 114*d26c96c8SAndy Lutomirski * that speaks virtio behind a physical IOMMU, we must use the DMA API 115*d26c96c8SAndy Lutomirski * for virtio DMA to work at all. 116*d26c96c8SAndy Lutomirski * 117*d26c96c8SAndy Lutomirski * On other systems, including SPARC and PPC64, virtio-pci devices are 118*d26c96c8SAndy Lutomirski * enumerated as though they are behind an IOMMU, but the virtio host 119*d26c96c8SAndy Lutomirski * ignores the IOMMU, so we must either pretend that the IOMMU isn't 120*d26c96c8SAndy Lutomirski * there or somehow map everything as the identity. 121*d26c96c8SAndy Lutomirski * 122*d26c96c8SAndy Lutomirski * For the time being, we preserve historic behavior and bypass the DMA 123*d26c96c8SAndy Lutomirski * API. 124*d26c96c8SAndy Lutomirski */ 125*d26c96c8SAndy Lutomirski 126*d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev) 127*d26c96c8SAndy Lutomirski { 128*d26c96c8SAndy Lutomirski return false; 129*d26c96c8SAndy Lutomirski } 130*d26c96c8SAndy Lutomirski 13100e6f3d9SMichael S. Tsirkin static struct vring_desc *alloc_indirect(struct virtqueue *_vq, 13200e6f3d9SMichael S. Tsirkin unsigned int total_sg, gfp_t gfp) 1339fa29b9dSMark McLoughlin { 1349fa29b9dSMark McLoughlin struct vring_desc *desc; 135b25bd251SRusty Russell unsigned int i; 1369fa29b9dSMark McLoughlin 137b92b1b89SWill Deacon /* 138b92b1b89SWill Deacon * We require lowmem mappings for the descriptors because 139b92b1b89SWill Deacon * otherwise virt_to_phys will give us bogus addresses in the 140b92b1b89SWill Deacon * virtqueue. 141b92b1b89SWill Deacon */ 14282107539SMichal Hocko gfp &= ~__GFP_HIGHMEM; 143b92b1b89SWill Deacon 14413816c76SRusty Russell desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 1459fa29b9dSMark McLoughlin if (!desc) 146b25bd251SRusty Russell return NULL; 1479fa29b9dSMark McLoughlin 148b25bd251SRusty Russell for (i = 0; i < total_sg; i++) 14900e6f3d9SMichael S. Tsirkin desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 150b25bd251SRusty Russell return desc; 1519fa29b9dSMark McLoughlin } 1529fa29b9dSMark McLoughlin 15313816c76SRusty Russell static inline int virtqueue_add(struct virtqueue *_vq, 15413816c76SRusty Russell struct scatterlist *sgs[], 155eeebf9b1SRusty Russell unsigned int total_sg, 15613816c76SRusty Russell unsigned int out_sgs, 15713816c76SRusty Russell unsigned int in_sgs, 158bbd603efSMichael S. Tsirkin void *data, 159bbd603efSMichael S. Tsirkin gfp_t gfp) 1600a8a69ddSRusty Russell { 1610a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 16213816c76SRusty Russell struct scatterlist *sg; 163b25bd251SRusty Russell struct vring_desc *desc; 164b25bd251SRusty Russell unsigned int i, n, avail, descs_used, uninitialized_var(prev); 1651fe9b6feSMichael S. Tsirkin int head; 166b25bd251SRusty Russell bool indirect; 1670a8a69ddSRusty Russell 1689fa29b9dSMark McLoughlin START_USE(vq); 1699fa29b9dSMark McLoughlin 1700a8a69ddSRusty Russell BUG_ON(data == NULL); 1719fa29b9dSMark McLoughlin 17270670444SRusty Russell if (unlikely(vq->broken)) { 17370670444SRusty Russell END_USE(vq); 17470670444SRusty Russell return -EIO; 17570670444SRusty Russell } 17670670444SRusty Russell 177e93300b1SRusty Russell #ifdef DEBUG 178e93300b1SRusty Russell { 179e93300b1SRusty Russell ktime_t now = ktime_get(); 180e93300b1SRusty Russell 181e93300b1SRusty Russell /* No kick or get, with .1 second between? Warn. */ 182e93300b1SRusty Russell if (vq->last_add_time_valid) 183e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 184e93300b1SRusty Russell > 100); 185e93300b1SRusty Russell vq->last_add_time = now; 186e93300b1SRusty Russell vq->last_add_time_valid = true; 187e93300b1SRusty Russell } 188e93300b1SRusty Russell #endif 189e93300b1SRusty Russell 19013816c76SRusty Russell BUG_ON(total_sg > vq->vring.num); 19113816c76SRusty Russell BUG_ON(total_sg == 0); 1920a8a69ddSRusty Russell 193b25bd251SRusty Russell head = vq->free_head; 194b25bd251SRusty Russell 195b25bd251SRusty Russell /* If the host supports indirect descriptor tables, and we have multiple 196b25bd251SRusty Russell * buffers, then go indirect. FIXME: tune this threshold */ 197b25bd251SRusty Russell if (vq->indirect && total_sg > 1 && vq->vq.num_free) 19800e6f3d9SMichael S. Tsirkin desc = alloc_indirect(_vq, total_sg, gfp); 199b25bd251SRusty Russell else 200b25bd251SRusty Russell desc = NULL; 201b25bd251SRusty Russell 202b25bd251SRusty Russell if (desc) { 203b25bd251SRusty Russell /* Use a single buffer which doesn't continue */ 20400e6f3d9SMichael S. Tsirkin vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); 20500e6f3d9SMichael S. Tsirkin vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); 206b25bd251SRusty Russell /* avoid kmemleak false positive (hidden by virt_to_phys) */ 207b25bd251SRusty Russell kmemleak_ignore(desc); 20800e6f3d9SMichael S. Tsirkin vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); 209b25bd251SRusty Russell 210b25bd251SRusty Russell /* Set up rest to use this indirect table. */ 211b25bd251SRusty Russell i = 0; 212b25bd251SRusty Russell descs_used = 1; 213b25bd251SRusty Russell indirect = true; 214b25bd251SRusty Russell } else { 215b25bd251SRusty Russell desc = vq->vring.desc; 216b25bd251SRusty Russell i = head; 217b25bd251SRusty Russell descs_used = total_sg; 218b25bd251SRusty Russell indirect = false; 219b25bd251SRusty Russell } 220b25bd251SRusty Russell 221b25bd251SRusty Russell if (vq->vq.num_free < descs_used) { 2220a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 223b25bd251SRusty Russell descs_used, vq->vq.num_free); 22444653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 22544653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 22644653eaeSRusty Russell * host should service the ring ASAP. */ 22713816c76SRusty Russell if (out_sgs) 228426e3e0aSRusty Russell vq->notify(&vq->vq); 2290a8a69ddSRusty Russell END_USE(vq); 2300a8a69ddSRusty Russell return -ENOSPC; 2310a8a69ddSRusty Russell } 2320a8a69ddSRusty Russell 2330a8a69ddSRusty Russell /* We're about to use some buffers from the free list. */ 234b25bd251SRusty Russell vq->vq.num_free -= descs_used; 2350a8a69ddSRusty Russell 23613816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 237eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 23800e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 23900e6f3d9SMichael S. Tsirkin desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); 24000e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 2410a8a69ddSRusty Russell prev = i; 24200e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 2430a8a69ddSRusty Russell } 24413816c76SRusty Russell } 24513816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 246eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 24700e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 24800e6f3d9SMichael S. Tsirkin desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); 24900e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 2500a8a69ddSRusty Russell prev = i; 25100e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 25213816c76SRusty Russell } 2530a8a69ddSRusty Russell } 2540a8a69ddSRusty Russell /* Last one doesn't continue. */ 25500e6f3d9SMichael S. Tsirkin desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 2560a8a69ddSRusty Russell 2570a8a69ddSRusty Russell /* Update free pointer */ 258b25bd251SRusty Russell if (indirect) 25900e6f3d9SMichael S. Tsirkin vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); 260b25bd251SRusty Russell else 2610a8a69ddSRusty Russell vq->free_head = i; 2620a8a69ddSRusty Russell 2630a8a69ddSRusty Russell /* Set token. */ 2640a8a69ddSRusty Russell vq->data[head] = data; 2650a8a69ddSRusty Russell 2660a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 2673b720b8cSRusty Russell * do sync). */ 268f277ec42SVenkatesh Srinivas avail = vq->avail_idx_shadow & (vq->vring.num - 1); 26900e6f3d9SMichael S. Tsirkin vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 2700a8a69ddSRusty Russell 271ee7cd898SRusty Russell /* Descriptors and available array need to be set before we expose the 272ee7cd898SRusty Russell * new available array entries. */ 273a9a0fef7SRusty Russell virtio_wmb(vq->weak_barriers); 274f277ec42SVenkatesh Srinivas vq->avail_idx_shadow++; 275f277ec42SVenkatesh Srinivas vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 276ee7cd898SRusty Russell vq->num_added++; 277ee7cd898SRusty Russell 2785e05bf58STetsuo Handa pr_debug("Added buffer head %i to %p\n", head, vq); 2795e05bf58STetsuo Handa END_USE(vq); 2805e05bf58STetsuo Handa 281ee7cd898SRusty Russell /* This is very unlikely, but theoretically possible. Kick 282ee7cd898SRusty Russell * just in case. */ 283ee7cd898SRusty Russell if (unlikely(vq->num_added == (1 << 16) - 1)) 284ee7cd898SRusty Russell virtqueue_kick(_vq); 285ee7cd898SRusty Russell 28698e8c6bcSRusty Russell return 0; 2870a8a69ddSRusty Russell } 28813816c76SRusty Russell 28913816c76SRusty Russell /** 29013816c76SRusty Russell * virtqueue_add_sgs - expose buffers to other end 29113816c76SRusty Russell * @vq: the struct virtqueue we're talking about. 29213816c76SRusty Russell * @sgs: array of terminated scatterlists. 29313816c76SRusty Russell * @out_num: the number of scatterlists readable by other side 29413816c76SRusty Russell * @in_num: the number of scatterlists which are writable (after readable ones) 29513816c76SRusty Russell * @data: the token identifying the buffer. 29613816c76SRusty Russell * @gfp: how to do memory allocations (if necessary). 29713816c76SRusty Russell * 29813816c76SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 29913816c76SRusty Russell * at the same time (except where noted). 30013816c76SRusty Russell * 30170670444SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 30213816c76SRusty Russell */ 30313816c76SRusty Russell int virtqueue_add_sgs(struct virtqueue *_vq, 30413816c76SRusty Russell struct scatterlist *sgs[], 30513816c76SRusty Russell unsigned int out_sgs, 30613816c76SRusty Russell unsigned int in_sgs, 30713816c76SRusty Russell void *data, 30813816c76SRusty Russell gfp_t gfp) 30913816c76SRusty Russell { 310eeebf9b1SRusty Russell unsigned int i, total_sg = 0; 31113816c76SRusty Russell 31213816c76SRusty Russell /* Count them first. */ 313eeebf9b1SRusty Russell for (i = 0; i < out_sgs + in_sgs; i++) { 31413816c76SRusty Russell struct scatterlist *sg; 31513816c76SRusty Russell for (sg = sgs[i]; sg; sg = sg_next(sg)) 316eeebf9b1SRusty Russell total_sg++; 31713816c76SRusty Russell } 318eeebf9b1SRusty Russell return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); 31913816c76SRusty Russell } 32013816c76SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 32113816c76SRusty Russell 32213816c76SRusty Russell /** 323282edb36SRusty Russell * virtqueue_add_outbuf - expose output buffers to other end 324282edb36SRusty Russell * @vq: the struct virtqueue we're talking about. 325eeebf9b1SRusty Russell * @sg: scatterlist (must be well-formed and terminated!) 326eeebf9b1SRusty Russell * @num: the number of entries in @sg readable by other side 327282edb36SRusty Russell * @data: the token identifying the buffer. 328282edb36SRusty Russell * @gfp: how to do memory allocations (if necessary). 329282edb36SRusty Russell * 330282edb36SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 331282edb36SRusty Russell * at the same time (except where noted). 332282edb36SRusty Russell * 33370670444SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 334282edb36SRusty Russell */ 335282edb36SRusty Russell int virtqueue_add_outbuf(struct virtqueue *vq, 336eeebf9b1SRusty Russell struct scatterlist *sg, unsigned int num, 337282edb36SRusty Russell void *data, 338282edb36SRusty Russell gfp_t gfp) 339282edb36SRusty Russell { 340eeebf9b1SRusty Russell return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); 341282edb36SRusty Russell } 342282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 343282edb36SRusty Russell 344282edb36SRusty Russell /** 345282edb36SRusty Russell * virtqueue_add_inbuf - expose input buffers to other end 346282edb36SRusty Russell * @vq: the struct virtqueue we're talking about. 347eeebf9b1SRusty Russell * @sg: scatterlist (must be well-formed and terminated!) 348eeebf9b1SRusty Russell * @num: the number of entries in @sg writable by other side 349282edb36SRusty Russell * @data: the token identifying the buffer. 350282edb36SRusty Russell * @gfp: how to do memory allocations (if necessary). 351282edb36SRusty Russell * 352282edb36SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 353282edb36SRusty Russell * at the same time (except where noted). 354282edb36SRusty Russell * 35570670444SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 356282edb36SRusty Russell */ 357282edb36SRusty Russell int virtqueue_add_inbuf(struct virtqueue *vq, 358eeebf9b1SRusty Russell struct scatterlist *sg, unsigned int num, 359282edb36SRusty Russell void *data, 360282edb36SRusty Russell gfp_t gfp) 361282edb36SRusty Russell { 362eeebf9b1SRusty Russell return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); 363282edb36SRusty Russell } 364282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 365282edb36SRusty Russell 366282edb36SRusty Russell /** 36741f0377fSRusty Russell * virtqueue_kick_prepare - first half of split virtqueue_kick call. 3685dfc1762SRusty Russell * @vq: the struct virtqueue 3695dfc1762SRusty Russell * 37041f0377fSRusty Russell * Instead of virtqueue_kick(), you can do: 37141f0377fSRusty Russell * if (virtqueue_kick_prepare(vq)) 37241f0377fSRusty Russell * virtqueue_notify(vq); 3735dfc1762SRusty Russell * 37441f0377fSRusty Russell * This is sometimes useful because the virtqueue_kick_prepare() needs 37541f0377fSRusty Russell * to be serialized, but the actual virtqueue_notify() call does not. 3765dfc1762SRusty Russell */ 37741f0377fSRusty Russell bool virtqueue_kick_prepare(struct virtqueue *_vq) 3780a8a69ddSRusty Russell { 3790a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 380a5c262c5SMichael S. Tsirkin u16 new, old; 38141f0377fSRusty Russell bool needs_kick; 38241f0377fSRusty Russell 3830a8a69ddSRusty Russell START_USE(vq); 384a72caae2SJason Wang /* We need to expose available array entries before checking avail 385a72caae2SJason Wang * event. */ 386a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 3870a8a69ddSRusty Russell 388f277ec42SVenkatesh Srinivas old = vq->avail_idx_shadow - vq->num_added; 389f277ec42SVenkatesh Srinivas new = vq->avail_idx_shadow; 3900a8a69ddSRusty Russell vq->num_added = 0; 3910a8a69ddSRusty Russell 392e93300b1SRusty Russell #ifdef DEBUG 393e93300b1SRusty Russell if (vq->last_add_time_valid) { 394e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 395e93300b1SRusty Russell vq->last_add_time)) > 100); 396e93300b1SRusty Russell } 397e93300b1SRusty Russell vq->last_add_time_valid = false; 398e93300b1SRusty Russell #endif 399e93300b1SRusty Russell 40041f0377fSRusty Russell if (vq->event) { 40100e6f3d9SMichael S. Tsirkin needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), 40241f0377fSRusty Russell new, old); 40341f0377fSRusty Russell } else { 40400e6f3d9SMichael S. Tsirkin needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); 40541f0377fSRusty Russell } 4060a8a69ddSRusty Russell END_USE(vq); 40741f0377fSRusty Russell return needs_kick; 40841f0377fSRusty Russell } 40941f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 41041f0377fSRusty Russell 41141f0377fSRusty Russell /** 41241f0377fSRusty Russell * virtqueue_notify - second half of split virtqueue_kick call. 41341f0377fSRusty Russell * @vq: the struct virtqueue 41441f0377fSRusty Russell * 41541f0377fSRusty Russell * This does not need to be serialized. 4165b1bf7cbSHeinz Graalfs * 4175b1bf7cbSHeinz Graalfs * Returns false if host notify failed or queue is broken, otherwise true. 41841f0377fSRusty Russell */ 4195b1bf7cbSHeinz Graalfs bool virtqueue_notify(struct virtqueue *_vq) 42041f0377fSRusty Russell { 42141f0377fSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 42241f0377fSRusty Russell 4235b1bf7cbSHeinz Graalfs if (unlikely(vq->broken)) 4245b1bf7cbSHeinz Graalfs return false; 4255b1bf7cbSHeinz Graalfs 42641f0377fSRusty Russell /* Prod other side to tell it about changes. */ 4272342d6a6SHeinz Graalfs if (!vq->notify(_vq)) { 4285b1bf7cbSHeinz Graalfs vq->broken = true; 4295b1bf7cbSHeinz Graalfs return false; 4305b1bf7cbSHeinz Graalfs } 4315b1bf7cbSHeinz Graalfs return true; 43241f0377fSRusty Russell } 43341f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_notify); 43441f0377fSRusty Russell 43541f0377fSRusty Russell /** 43641f0377fSRusty Russell * virtqueue_kick - update after add_buf 43741f0377fSRusty Russell * @vq: the struct virtqueue 43841f0377fSRusty Russell * 439b3087e48SRusty Russell * After one or more virtqueue_add_* calls, invoke this to kick 44041f0377fSRusty Russell * the other side. 44141f0377fSRusty Russell * 44241f0377fSRusty Russell * Caller must ensure we don't call this with other virtqueue 44341f0377fSRusty Russell * operations at the same time (except where noted). 4445b1bf7cbSHeinz Graalfs * 4455b1bf7cbSHeinz Graalfs * Returns false if kick failed, otherwise true. 44641f0377fSRusty Russell */ 4475b1bf7cbSHeinz Graalfs bool virtqueue_kick(struct virtqueue *vq) 44841f0377fSRusty Russell { 44941f0377fSRusty Russell if (virtqueue_kick_prepare(vq)) 4505b1bf7cbSHeinz Graalfs return virtqueue_notify(vq); 4515b1bf7cbSHeinz Graalfs return true; 4520a8a69ddSRusty Russell } 4537c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_kick); 4540a8a69ddSRusty Russell 4550a8a69ddSRusty Russell static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 4560a8a69ddSRusty Russell { 4570a8a69ddSRusty Russell unsigned int i; 4580a8a69ddSRusty Russell 4590a8a69ddSRusty Russell /* Clear data ptr. */ 4600a8a69ddSRusty Russell vq->data[head] = NULL; 4610a8a69ddSRusty Russell 4620a8a69ddSRusty Russell /* Put back on free list: find end */ 4630a8a69ddSRusty Russell i = head; 4649fa29b9dSMark McLoughlin 4659fa29b9dSMark McLoughlin /* Free the indirect table */ 46600e6f3d9SMichael S. Tsirkin if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) 46700e6f3d9SMichael S. Tsirkin kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); 4689fa29b9dSMark McLoughlin 46900e6f3d9SMichael S. Tsirkin while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { 47000e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); 47106ca287dSRusty Russell vq->vq.num_free++; 4720a8a69ddSRusty Russell } 4730a8a69ddSRusty Russell 47400e6f3d9SMichael S. Tsirkin vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); 4750a8a69ddSRusty Russell vq->free_head = head; 4760a8a69ddSRusty Russell /* Plus final descriptor */ 47706ca287dSRusty Russell vq->vq.num_free++; 4780a8a69ddSRusty Russell } 4790a8a69ddSRusty Russell 4800a8a69ddSRusty Russell static inline bool more_used(const struct vring_virtqueue *vq) 4810a8a69ddSRusty Russell { 48200e6f3d9SMichael S. Tsirkin return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); 4830a8a69ddSRusty Russell } 4840a8a69ddSRusty Russell 4855dfc1762SRusty Russell /** 4865dfc1762SRusty Russell * virtqueue_get_buf - get the next used buffer 4875dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 4885dfc1762SRusty Russell * @len: the length written into the buffer 4895dfc1762SRusty Russell * 4905dfc1762SRusty Russell * If the driver wrote data into the buffer, @len will be set to the 4915dfc1762SRusty Russell * amount written. This means you don't need to clear the buffer 4925dfc1762SRusty Russell * beforehand to ensure there's no data leakage in the case of short 4935dfc1762SRusty Russell * writes. 4945dfc1762SRusty Russell * 4955dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 4965dfc1762SRusty Russell * operations at the same time (except where noted). 4975dfc1762SRusty Russell * 4985dfc1762SRusty Russell * Returns NULL if there are no used buffers, or the "data" token 499b3087e48SRusty Russell * handed to virtqueue_add_*(). 5005dfc1762SRusty Russell */ 5017c5e9ed0SMichael S. Tsirkin void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 5020a8a69ddSRusty Russell { 5030a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 5040a8a69ddSRusty Russell void *ret; 5050a8a69ddSRusty Russell unsigned int i; 5063b720b8cSRusty Russell u16 last_used; 5070a8a69ddSRusty Russell 5080a8a69ddSRusty Russell START_USE(vq); 5090a8a69ddSRusty Russell 5105ef82752SRusty Russell if (unlikely(vq->broken)) { 5115ef82752SRusty Russell END_USE(vq); 5125ef82752SRusty Russell return NULL; 5135ef82752SRusty Russell } 5145ef82752SRusty Russell 5150a8a69ddSRusty Russell if (!more_used(vq)) { 5160a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 5170a8a69ddSRusty Russell END_USE(vq); 5180a8a69ddSRusty Russell return NULL; 5190a8a69ddSRusty Russell } 5200a8a69ddSRusty Russell 5212d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 522a9a0fef7SRusty Russell virtio_rmb(vq->weak_barriers); 5232d61ba95SMichael S. Tsirkin 5243b720b8cSRusty Russell last_used = (vq->last_used_idx & (vq->vring.num - 1)); 52500e6f3d9SMichael S. Tsirkin i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); 52600e6f3d9SMichael S. Tsirkin *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); 5270a8a69ddSRusty Russell 5280a8a69ddSRusty Russell if (unlikely(i >= vq->vring.num)) { 5290a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 5300a8a69ddSRusty Russell return NULL; 5310a8a69ddSRusty Russell } 5320a8a69ddSRusty Russell if (unlikely(!vq->data[i])) { 5330a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 5340a8a69ddSRusty Russell return NULL; 5350a8a69ddSRusty Russell } 5360a8a69ddSRusty Russell 5370a8a69ddSRusty Russell /* detach_buf clears data, so grab it now. */ 5380a8a69ddSRusty Russell ret = vq->data[i]; 5390a8a69ddSRusty Russell detach_buf(vq, i); 5400a8a69ddSRusty Russell vq->last_used_idx++; 541a5c262c5SMichael S. Tsirkin /* If we expect an interrupt for the next entry, tell host 542a5c262c5SMichael S. Tsirkin * by writing event index and flush out the write before 543a5c262c5SMichael S. Tsirkin * the read in the next get_buf call. */ 544788e5b3aSMichael S. Tsirkin if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 545788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 546788e5b3aSMichael S. Tsirkin &vring_used_event(&vq->vring), 547788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 548a5c262c5SMichael S. Tsirkin 549e93300b1SRusty Russell #ifdef DEBUG 550e93300b1SRusty Russell vq->last_add_time_valid = false; 551e93300b1SRusty Russell #endif 552e93300b1SRusty Russell 5530a8a69ddSRusty Russell END_USE(vq); 5540a8a69ddSRusty Russell return ret; 5550a8a69ddSRusty Russell } 5567c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_get_buf); 5570a8a69ddSRusty Russell 5585dfc1762SRusty Russell /** 5595dfc1762SRusty Russell * virtqueue_disable_cb - disable callbacks 5605dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 5615dfc1762SRusty Russell * 5625dfc1762SRusty Russell * Note that this is not necessarily synchronous, hence unreliable and only 5635dfc1762SRusty Russell * useful as an optimization. 5645dfc1762SRusty Russell * 5655dfc1762SRusty Russell * Unlike other operations, this need not be serialized. 5665dfc1762SRusty Russell */ 5677c5e9ed0SMichael S. Tsirkin void virtqueue_disable_cb(struct virtqueue *_vq) 56818445c4dSRusty Russell { 56918445c4dSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 57018445c4dSRusty Russell 571f277ec42SVenkatesh Srinivas if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 572f277ec42SVenkatesh Srinivas vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 573f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 574f277ec42SVenkatesh Srinivas } 575f277ec42SVenkatesh Srinivas 57618445c4dSRusty Russell } 5777c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 57818445c4dSRusty Russell 5795dfc1762SRusty Russell /** 580cc229884SMichael S. Tsirkin * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 581cc229884SMichael S. Tsirkin * @vq: the struct virtqueue we're talking about. 582cc229884SMichael S. Tsirkin * 583cc229884SMichael S. Tsirkin * This re-enables callbacks; it returns current queue state 584cc229884SMichael S. Tsirkin * in an opaque unsigned value. This value should be later tested by 585cc229884SMichael S. Tsirkin * virtqueue_poll, to detect a possible race between the driver checking for 586cc229884SMichael S. Tsirkin * more work, and enabling callbacks. 587cc229884SMichael S. Tsirkin * 588cc229884SMichael S. Tsirkin * Caller must ensure we don't call this with other virtqueue 589cc229884SMichael S. Tsirkin * operations at the same time (except where noted). 590cc229884SMichael S. Tsirkin */ 591cc229884SMichael S. Tsirkin unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 592cc229884SMichael S. Tsirkin { 593cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 594cc229884SMichael S. Tsirkin u16 last_used_idx; 595cc229884SMichael S. Tsirkin 596cc229884SMichael S. Tsirkin START_USE(vq); 597cc229884SMichael S. Tsirkin 598cc229884SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 599cc229884SMichael S. Tsirkin * more to do. */ 600cc229884SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 601cc229884SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 602cc229884SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 603f277ec42SVenkatesh Srinivas if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 604f277ec42SVenkatesh Srinivas vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 605f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 606f277ec42SVenkatesh Srinivas } 60700e6f3d9SMichael S. Tsirkin vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 608cc229884SMichael S. Tsirkin END_USE(vq); 609cc229884SMichael S. Tsirkin return last_used_idx; 610cc229884SMichael S. Tsirkin } 611cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 612cc229884SMichael S. Tsirkin 613cc229884SMichael S. Tsirkin /** 614cc229884SMichael S. Tsirkin * virtqueue_poll - query pending used buffers 615cc229884SMichael S. Tsirkin * @vq: the struct virtqueue we're talking about. 616cc229884SMichael S. Tsirkin * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 617cc229884SMichael S. Tsirkin * 618cc229884SMichael S. Tsirkin * Returns "true" if there are pending used buffers in the queue. 619cc229884SMichael S. Tsirkin * 620cc229884SMichael S. Tsirkin * This does not need to be serialized. 621cc229884SMichael S. Tsirkin */ 622cc229884SMichael S. Tsirkin bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 623cc229884SMichael S. Tsirkin { 624cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 625cc229884SMichael S. Tsirkin 626cc229884SMichael S. Tsirkin virtio_mb(vq->weak_barriers); 62700e6f3d9SMichael S. Tsirkin return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); 628cc229884SMichael S. Tsirkin } 629cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_poll); 630cc229884SMichael S. Tsirkin 631cc229884SMichael S. Tsirkin /** 6325dfc1762SRusty Russell * virtqueue_enable_cb - restart callbacks after disable_cb. 6335dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 6345dfc1762SRusty Russell * 6355dfc1762SRusty Russell * This re-enables callbacks; it returns "false" if there are pending 6365dfc1762SRusty Russell * buffers in the queue, to detect a possible race between the driver 6375dfc1762SRusty Russell * checking for more work, and enabling callbacks. 6385dfc1762SRusty Russell * 6395dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 6405dfc1762SRusty Russell * operations at the same time (except where noted). 6415dfc1762SRusty Russell */ 6427c5e9ed0SMichael S. Tsirkin bool virtqueue_enable_cb(struct virtqueue *_vq) 6430a8a69ddSRusty Russell { 644cc229884SMichael S. Tsirkin unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 645cc229884SMichael S. Tsirkin return !virtqueue_poll(_vq, last_used_idx); 6460a8a69ddSRusty Russell } 6477c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 6480a8a69ddSRusty Russell 6495dfc1762SRusty Russell /** 6505dfc1762SRusty Russell * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 6515dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 6525dfc1762SRusty Russell * 6535dfc1762SRusty Russell * This re-enables callbacks but hints to the other side to delay 6545dfc1762SRusty Russell * interrupts until most of the available buffers have been processed; 6555dfc1762SRusty Russell * it returns "false" if there are many pending buffers in the queue, 6565dfc1762SRusty Russell * to detect a possible race between the driver checking for more work, 6575dfc1762SRusty Russell * and enabling callbacks. 6585dfc1762SRusty Russell * 6595dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 6605dfc1762SRusty Russell * operations at the same time (except where noted). 6615dfc1762SRusty Russell */ 6627ab358c2SMichael S. Tsirkin bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 6637ab358c2SMichael S. Tsirkin { 6647ab358c2SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 6657ab358c2SMichael S. Tsirkin u16 bufs; 6667ab358c2SMichael S. Tsirkin 6677ab358c2SMichael S. Tsirkin START_USE(vq); 6687ab358c2SMichael S. Tsirkin 6697ab358c2SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 6707ab358c2SMichael S. Tsirkin * more to do. */ 6717ab358c2SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 6727ab358c2SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 6737ab358c2SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 674f277ec42SVenkatesh Srinivas if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 675f277ec42SVenkatesh Srinivas vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 676f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 677f277ec42SVenkatesh Srinivas } 6787ab358c2SMichael S. Tsirkin /* TODO: tune this threshold */ 679f277ec42SVenkatesh Srinivas bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; 680788e5b3aSMichael S. Tsirkin 681788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 682788e5b3aSMichael S. Tsirkin &vring_used_event(&vq->vring), 683788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 684788e5b3aSMichael S. Tsirkin 68500e6f3d9SMichael S. Tsirkin if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 6867ab358c2SMichael S. Tsirkin END_USE(vq); 6877ab358c2SMichael S. Tsirkin return false; 6887ab358c2SMichael S. Tsirkin } 6897ab358c2SMichael S. Tsirkin 6907ab358c2SMichael S. Tsirkin END_USE(vq); 6917ab358c2SMichael S. Tsirkin return true; 6927ab358c2SMichael S. Tsirkin } 6937ab358c2SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 6947ab358c2SMichael S. Tsirkin 6955dfc1762SRusty Russell /** 6965dfc1762SRusty Russell * virtqueue_detach_unused_buf - detach first unused buffer 6975dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 6985dfc1762SRusty Russell * 699b3087e48SRusty Russell * Returns NULL or the "data" token handed to virtqueue_add_*(). 7005dfc1762SRusty Russell * This is not valid on an active queue; it is useful only for device 7015dfc1762SRusty Russell * shutdown. 7025dfc1762SRusty Russell */ 7037c5e9ed0SMichael S. Tsirkin void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 704c021eac4SShirley Ma { 705c021eac4SShirley Ma struct vring_virtqueue *vq = to_vvq(_vq); 706c021eac4SShirley Ma unsigned int i; 707c021eac4SShirley Ma void *buf; 708c021eac4SShirley Ma 709c021eac4SShirley Ma START_USE(vq); 710c021eac4SShirley Ma 711c021eac4SShirley Ma for (i = 0; i < vq->vring.num; i++) { 712c021eac4SShirley Ma if (!vq->data[i]) 713c021eac4SShirley Ma continue; 714c021eac4SShirley Ma /* detach_buf clears data, so grab it now. */ 715c021eac4SShirley Ma buf = vq->data[i]; 716c021eac4SShirley Ma detach_buf(vq, i); 717f277ec42SVenkatesh Srinivas vq->avail_idx_shadow--; 718f277ec42SVenkatesh Srinivas vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 719c021eac4SShirley Ma END_USE(vq); 720c021eac4SShirley Ma return buf; 721c021eac4SShirley Ma } 722c021eac4SShirley Ma /* That should have freed everything. */ 72306ca287dSRusty Russell BUG_ON(vq->vq.num_free != vq->vring.num); 724c021eac4SShirley Ma 725c021eac4SShirley Ma END_USE(vq); 726c021eac4SShirley Ma return NULL; 727c021eac4SShirley Ma } 7287c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 729c021eac4SShirley Ma 7300a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 7310a8a69ddSRusty Russell { 7320a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 7330a8a69ddSRusty Russell 7340a8a69ddSRusty Russell if (!more_used(vq)) { 7350a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 7360a8a69ddSRusty Russell return IRQ_NONE; 7370a8a69ddSRusty Russell } 7380a8a69ddSRusty Russell 7390a8a69ddSRusty Russell if (unlikely(vq->broken)) 7400a8a69ddSRusty Russell return IRQ_HANDLED; 7410a8a69ddSRusty Russell 7420a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 74318445c4dSRusty Russell if (vq->vq.callback) 74418445c4dSRusty Russell vq->vq.callback(&vq->vq); 7450a8a69ddSRusty Russell 7460a8a69ddSRusty Russell return IRQ_HANDLED; 7470a8a69ddSRusty Russell } 748c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 7490a8a69ddSRusty Russell 75017bb6d40SJason Wang struct virtqueue *vring_new_virtqueue(unsigned int index, 75117bb6d40SJason Wang unsigned int num, 75287c7d57cSRusty Russell unsigned int vring_align, 7530a8a69ddSRusty Russell struct virtio_device *vdev, 7547b21e34fSRusty Russell bool weak_barriers, 7550a8a69ddSRusty Russell void *pages, 75646f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *), 7579499f5e7SRusty Russell void (*callback)(struct virtqueue *), 7589499f5e7SRusty Russell const char *name) 7590a8a69ddSRusty Russell { 7600a8a69ddSRusty Russell struct vring_virtqueue *vq; 7610a8a69ddSRusty Russell unsigned int i; 7620a8a69ddSRusty Russell 76342b36cc0SRusty Russell /* We assume num is a power of 2. */ 76442b36cc0SRusty Russell if (num & (num - 1)) { 76542b36cc0SRusty Russell dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 76642b36cc0SRusty Russell return NULL; 76742b36cc0SRusty Russell } 76842b36cc0SRusty Russell 7690a8a69ddSRusty Russell vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); 7700a8a69ddSRusty Russell if (!vq) 7710a8a69ddSRusty Russell return NULL; 7720a8a69ddSRusty Russell 77387c7d57cSRusty Russell vring_init(&vq->vring, num, pages, vring_align); 7740a8a69ddSRusty Russell vq->vq.callback = callback; 7750a8a69ddSRusty Russell vq->vq.vdev = vdev; 7769499f5e7SRusty Russell vq->vq.name = name; 77706ca287dSRusty Russell vq->vq.num_free = num; 77806ca287dSRusty Russell vq->vq.index = index; 7790a8a69ddSRusty Russell vq->notify = notify; 7807b21e34fSRusty Russell vq->weak_barriers = weak_barriers; 7810a8a69ddSRusty Russell vq->broken = false; 7820a8a69ddSRusty Russell vq->last_used_idx = 0; 783f277ec42SVenkatesh Srinivas vq->avail_flags_shadow = 0; 784f277ec42SVenkatesh Srinivas vq->avail_idx_shadow = 0; 7850a8a69ddSRusty Russell vq->num_added = 0; 7869499f5e7SRusty Russell list_add_tail(&vq->vq.list, &vdev->vqs); 7870a8a69ddSRusty Russell #ifdef DEBUG 7880a8a69ddSRusty Russell vq->in_use = false; 789e93300b1SRusty Russell vq->last_add_time_valid = false; 7900a8a69ddSRusty Russell #endif 7910a8a69ddSRusty Russell 7929fa29b9dSMark McLoughlin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 793a5c262c5SMichael S. Tsirkin vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 7949fa29b9dSMark McLoughlin 7950a8a69ddSRusty Russell /* No callback? Tell other side not to bother us. */ 796f277ec42SVenkatesh Srinivas if (!callback) { 797f277ec42SVenkatesh Srinivas vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 798f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); 799f277ec42SVenkatesh Srinivas } 8000a8a69ddSRusty Russell 8010a8a69ddSRusty Russell /* Put everything in free lists. */ 8020a8a69ddSRusty Russell vq->free_head = 0; 8033b870624SAmit Shah for (i = 0; i < num-1; i++) { 80400e6f3d9SMichael S. Tsirkin vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 8053b870624SAmit Shah vq->data[i] = NULL; 8063b870624SAmit Shah } 8073b870624SAmit Shah vq->data[i] = NULL; 8080a8a69ddSRusty Russell 8090a8a69ddSRusty Russell return &vq->vq; 8100a8a69ddSRusty Russell } 811c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 8120a8a69ddSRusty Russell 8130a8a69ddSRusty Russell void vring_del_virtqueue(struct virtqueue *vq) 8140a8a69ddSRusty Russell { 8159499f5e7SRusty Russell list_del(&vq->list); 8160a8a69ddSRusty Russell kfree(to_vvq(vq)); 8170a8a69ddSRusty Russell } 818c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 8190a8a69ddSRusty Russell 820e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 821e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 822e34f8725SRusty Russell { 823e34f8725SRusty Russell unsigned int i; 824e34f8725SRusty Russell 825e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 826e34f8725SRusty Russell switch (i) { 8279fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 8289fa29b9dSMark McLoughlin break; 829a5c262c5SMichael S. Tsirkin case VIRTIO_RING_F_EVENT_IDX: 830a5c262c5SMichael S. Tsirkin break; 831747ae34aSMichael S. Tsirkin case VIRTIO_F_VERSION_1: 832747ae34aSMichael S. Tsirkin break; 833e34f8725SRusty Russell default: 834e34f8725SRusty Russell /* We don't understand this bit. */ 835e16e12beSMichael S. Tsirkin __virtio_clear_bit(vdev, i); 836e34f8725SRusty Russell } 837e34f8725SRusty Russell } 838e34f8725SRusty Russell } 839e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 840e34f8725SRusty Russell 8415dfc1762SRusty Russell /** 8425dfc1762SRusty Russell * virtqueue_get_vring_size - return the size of the virtqueue's vring 8435dfc1762SRusty Russell * @vq: the struct virtqueue containing the vring of interest. 8445dfc1762SRusty Russell * 8455dfc1762SRusty Russell * Returns the size of the vring. This is mainly used for boasting to 8465dfc1762SRusty Russell * userspace. Unlike other operations, this need not be serialized. 8475dfc1762SRusty Russell */ 8488f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 8498f9f4668SRick Jones { 8508f9f4668SRick Jones 8518f9f4668SRick Jones struct vring_virtqueue *vq = to_vvq(_vq); 8528f9f4668SRick Jones 8538f9f4668SRick Jones return vq->vring.num; 8548f9f4668SRick Jones } 8558f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 8568f9f4668SRick Jones 857b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq) 858b3b32c94SHeinz Graalfs { 859b3b32c94SHeinz Graalfs struct vring_virtqueue *vq = to_vvq(_vq); 860b3b32c94SHeinz Graalfs 861b3b32c94SHeinz Graalfs return vq->broken; 862b3b32c94SHeinz Graalfs } 863b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken); 864b3b32c94SHeinz Graalfs 865e2dcdfe9SRusty Russell /* 866e2dcdfe9SRusty Russell * This should prevent the device from being used, allowing drivers to 867e2dcdfe9SRusty Russell * recover. You may need to grab appropriate locks to flush. 868e2dcdfe9SRusty Russell */ 869e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev) 870e2dcdfe9SRusty Russell { 871e2dcdfe9SRusty Russell struct virtqueue *_vq; 872e2dcdfe9SRusty Russell 873e2dcdfe9SRusty Russell list_for_each_entry(_vq, &dev->vqs, list) { 874e2dcdfe9SRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 875e2dcdfe9SRusty Russell vq->broken = true; 876e2dcdfe9SRusty Russell } 877e2dcdfe9SRusty Russell } 878e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device); 879e2dcdfe9SRusty Russell 88089062652SCornelia Huck void *virtqueue_get_avail(struct virtqueue *_vq) 88189062652SCornelia Huck { 88289062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 88389062652SCornelia Huck 88489062652SCornelia Huck return vq->vring.avail; 88589062652SCornelia Huck } 88689062652SCornelia Huck EXPORT_SYMBOL_GPL(virtqueue_get_avail); 88789062652SCornelia Huck 88889062652SCornelia Huck void *virtqueue_get_used(struct virtqueue *_vq) 88989062652SCornelia Huck { 89089062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 89189062652SCornelia Huck 89289062652SCornelia Huck return vq->vring.used; 89389062652SCornelia Huck } 89489062652SCornelia Huck EXPORT_SYMBOL_GPL(virtqueue_get_used); 89589062652SCornelia Huck 896c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 897