10a8a69ddSRusty Russell /* Virtio ring implementation. 20a8a69ddSRusty Russell * 30a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 40a8a69ddSRusty Russell * 50a8a69ddSRusty Russell * This program is free software; you can redistribute it and/or modify 60a8a69ddSRusty Russell * it under the terms of the GNU General Public License as published by 70a8a69ddSRusty Russell * the Free Software Foundation; either version 2 of the License, or 80a8a69ddSRusty Russell * (at your option) any later version. 90a8a69ddSRusty Russell * 100a8a69ddSRusty Russell * This program is distributed in the hope that it will be useful, 110a8a69ddSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 120a8a69ddSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 130a8a69ddSRusty Russell * GNU General Public License for more details. 140a8a69ddSRusty Russell * 150a8a69ddSRusty Russell * You should have received a copy of the GNU General Public License 160a8a69ddSRusty Russell * along with this program; if not, write to the Free Software 170a8a69ddSRusty Russell * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 180a8a69ddSRusty Russell */ 190a8a69ddSRusty Russell #include <linux/virtio.h> 200a8a69ddSRusty Russell #include <linux/virtio_ring.h> 21e34f8725SRusty Russell #include <linux/virtio_config.h> 220a8a69ddSRusty Russell #include <linux/device.h> 235a0e3ad6STejun Heo #include <linux/slab.h> 24b5a2c4f1SPaul Gortmaker #include <linux/module.h> 25e93300b1SRusty Russell #include <linux/hrtimer.h> 266abb2dd9SJoel Stanley #include <linux/kmemleak.h> 27780bc790SAndy Lutomirski #include <linux/dma-mapping.h> 28*78fe3987SAndy Lutomirski #include <xen/xen.h> 290a8a69ddSRusty Russell 300a8a69ddSRusty Russell #ifdef DEBUG 310a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 329499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 339499f5e7SRusty Russell do { \ 349499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 359499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 369499f5e7SRusty Russell BUG(); \ 379499f5e7SRusty Russell } while (0) 38c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 393a35ce7dSRoel Kluin #define START_USE(_vq) \ 40c5f841f1SRusty Russell do { \ 41c5f841f1SRusty Russell if ((_vq)->in_use) \ 429499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 439499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 44c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 45c5f841f1SRusty Russell } while (0) 463a35ce7dSRoel Kluin #define END_USE(_vq) \ 4797a545abSRusty Russell do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 480a8a69ddSRusty Russell #else 499499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 509499f5e7SRusty Russell do { \ 519499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 529499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 539499f5e7SRusty Russell (_vq)->broken = true; \ 549499f5e7SRusty Russell } while (0) 550a8a69ddSRusty Russell #define START_USE(vq) 560a8a69ddSRusty Russell #define END_USE(vq) 570a8a69ddSRusty Russell #endif 580a8a69ddSRusty Russell 59780bc790SAndy Lutomirski struct vring_desc_state { 60780bc790SAndy Lutomirski void *data; /* Data for callback. */ 61780bc790SAndy Lutomirski struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 62780bc790SAndy Lutomirski }; 63780bc790SAndy Lutomirski 6443b4f721SMichael S. Tsirkin struct vring_virtqueue { 650a8a69ddSRusty Russell struct virtqueue vq; 660a8a69ddSRusty Russell 670a8a69ddSRusty Russell /* Actual memory layout for this queue */ 680a8a69ddSRusty Russell struct vring vring; 690a8a69ddSRusty Russell 707b21e34fSRusty Russell /* Can we use weak barriers? */ 717b21e34fSRusty Russell bool weak_barriers; 727b21e34fSRusty Russell 730a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 740a8a69ddSRusty Russell bool broken; 750a8a69ddSRusty Russell 769fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 779fa29b9dSMark McLoughlin bool indirect; 789fa29b9dSMark McLoughlin 79a5c262c5SMichael S. Tsirkin /* Host publishes avail event idx */ 80a5c262c5SMichael S. Tsirkin bool event; 81a5c262c5SMichael S. Tsirkin 820a8a69ddSRusty Russell /* Head of free buffer list. */ 830a8a69ddSRusty Russell unsigned int free_head; 840a8a69ddSRusty Russell /* Number we've added since last sync. */ 850a8a69ddSRusty Russell unsigned int num_added; 860a8a69ddSRusty Russell 870a8a69ddSRusty Russell /* Last used index we've seen. */ 881bc4953eSAnthony Liguori u16 last_used_idx; 890a8a69ddSRusty Russell 90f277ec42SVenkatesh Srinivas /* Last written value to avail->flags */ 91f277ec42SVenkatesh Srinivas u16 avail_flags_shadow; 92f277ec42SVenkatesh Srinivas 93f277ec42SVenkatesh Srinivas /* Last written value to avail->idx in guest byte order */ 94f277ec42SVenkatesh Srinivas u16 avail_idx_shadow; 95f277ec42SVenkatesh Srinivas 960a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 9746f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq); 980a8a69ddSRusty Russell 992a2d1382SAndy Lutomirski /* DMA, allocation, and size information */ 1002a2d1382SAndy Lutomirski bool we_own_ring; 1012a2d1382SAndy Lutomirski size_t queue_size_in_bytes; 1022a2d1382SAndy Lutomirski dma_addr_t queue_dma_addr; 1032a2d1382SAndy Lutomirski 1040a8a69ddSRusty Russell #ifdef DEBUG 1050a8a69ddSRusty Russell /* They're supposed to lock for us. */ 1060a8a69ddSRusty Russell unsigned int in_use; 107e93300b1SRusty Russell 108e93300b1SRusty Russell /* Figure out if their kicks are too delayed. */ 109e93300b1SRusty Russell bool last_add_time_valid; 110e93300b1SRusty Russell ktime_t last_add_time; 1110a8a69ddSRusty Russell #endif 1120a8a69ddSRusty Russell 113780bc790SAndy Lutomirski /* Per-descriptor state. */ 114780bc790SAndy Lutomirski struct vring_desc_state desc_state[]; 1150a8a69ddSRusty Russell }; 1160a8a69ddSRusty Russell 1170a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 1180a8a69ddSRusty Russell 119d26c96c8SAndy Lutomirski /* 120d26c96c8SAndy Lutomirski * The interaction between virtio and a possible IOMMU is a mess. 121d26c96c8SAndy Lutomirski * 122d26c96c8SAndy Lutomirski * On most systems with virtio, physical addresses match bus addresses, 123d26c96c8SAndy Lutomirski * and it doesn't particularly matter whether we use the DMA API. 124d26c96c8SAndy Lutomirski * 125d26c96c8SAndy Lutomirski * On some systems, including Xen and any system with a physical device 126d26c96c8SAndy Lutomirski * that speaks virtio behind a physical IOMMU, we must use the DMA API 127d26c96c8SAndy Lutomirski * for virtio DMA to work at all. 128d26c96c8SAndy Lutomirski * 129d26c96c8SAndy Lutomirski * On other systems, including SPARC and PPC64, virtio-pci devices are 130d26c96c8SAndy Lutomirski * enumerated as though they are behind an IOMMU, but the virtio host 131d26c96c8SAndy Lutomirski * ignores the IOMMU, so we must either pretend that the IOMMU isn't 132d26c96c8SAndy Lutomirski * there or somehow map everything as the identity. 133d26c96c8SAndy Lutomirski * 134d26c96c8SAndy Lutomirski * For the time being, we preserve historic behavior and bypass the DMA 135d26c96c8SAndy Lutomirski * API. 136d26c96c8SAndy Lutomirski */ 137d26c96c8SAndy Lutomirski 138d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev) 139d26c96c8SAndy Lutomirski { 140*78fe3987SAndy Lutomirski /* 141*78fe3987SAndy Lutomirski * In theory, it's possible to have a buggy QEMU-supposed 142*78fe3987SAndy Lutomirski * emulated Q35 IOMMU and Xen enabled at the same time. On 143*78fe3987SAndy Lutomirski * such a configuration, virtio has never worked and will 144*78fe3987SAndy Lutomirski * not work without an even larger kludge. Instead, enable 145*78fe3987SAndy Lutomirski * the DMA API if we're a Xen guest, which at least allows 146*78fe3987SAndy Lutomirski * all of the sensible Xen configurations to work correctly. 147*78fe3987SAndy Lutomirski */ 148*78fe3987SAndy Lutomirski if (xen_domain()) 149*78fe3987SAndy Lutomirski return true; 150*78fe3987SAndy Lutomirski 151d26c96c8SAndy Lutomirski return false; 152d26c96c8SAndy Lutomirski } 153d26c96c8SAndy Lutomirski 154780bc790SAndy Lutomirski /* 155780bc790SAndy Lutomirski * The DMA ops on various arches are rather gnarly right now, and 156780bc790SAndy Lutomirski * making all of the arch DMA ops work on the vring device itself 157780bc790SAndy Lutomirski * is a mess. For now, we use the parent device for DMA ops. 158780bc790SAndy Lutomirski */ 159780bc790SAndy Lutomirski struct device *vring_dma_dev(const struct vring_virtqueue *vq) 160780bc790SAndy Lutomirski { 161780bc790SAndy Lutomirski return vq->vq.vdev->dev.parent; 162780bc790SAndy Lutomirski } 163780bc790SAndy Lutomirski 164780bc790SAndy Lutomirski /* Map one sg entry. */ 165780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 166780bc790SAndy Lutomirski struct scatterlist *sg, 167780bc790SAndy Lutomirski enum dma_data_direction direction) 168780bc790SAndy Lutomirski { 169780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 170780bc790SAndy Lutomirski return (dma_addr_t)sg_phys(sg); 171780bc790SAndy Lutomirski 172780bc790SAndy Lutomirski /* 173780bc790SAndy Lutomirski * We can't use dma_map_sg, because we don't use scatterlists in 174780bc790SAndy Lutomirski * the way it expects (we don't guarantee that the scatterlist 175780bc790SAndy Lutomirski * will exist for the lifetime of the mapping). 176780bc790SAndy Lutomirski */ 177780bc790SAndy Lutomirski return dma_map_page(vring_dma_dev(vq), 178780bc790SAndy Lutomirski sg_page(sg), sg->offset, sg->length, 179780bc790SAndy Lutomirski direction); 180780bc790SAndy Lutomirski } 181780bc790SAndy Lutomirski 182780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 183780bc790SAndy Lutomirski void *cpu_addr, size_t size, 184780bc790SAndy Lutomirski enum dma_data_direction direction) 185780bc790SAndy Lutomirski { 186780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 187780bc790SAndy Lutomirski return (dma_addr_t)virt_to_phys(cpu_addr); 188780bc790SAndy Lutomirski 189780bc790SAndy Lutomirski return dma_map_single(vring_dma_dev(vq), 190780bc790SAndy Lutomirski cpu_addr, size, direction); 191780bc790SAndy Lutomirski } 192780bc790SAndy Lutomirski 193780bc790SAndy Lutomirski static void vring_unmap_one(const struct vring_virtqueue *vq, 194780bc790SAndy Lutomirski struct vring_desc *desc) 195780bc790SAndy Lutomirski { 196780bc790SAndy Lutomirski u16 flags; 197780bc790SAndy Lutomirski 198780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 199780bc790SAndy Lutomirski return; 200780bc790SAndy Lutomirski 201780bc790SAndy Lutomirski flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 202780bc790SAndy Lutomirski 203780bc790SAndy Lutomirski if (flags & VRING_DESC_F_INDIRECT) { 204780bc790SAndy Lutomirski dma_unmap_single(vring_dma_dev(vq), 205780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 206780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 207780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 208780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 209780bc790SAndy Lutomirski } else { 210780bc790SAndy Lutomirski dma_unmap_page(vring_dma_dev(vq), 211780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 212780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 213780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 214780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 215780bc790SAndy Lutomirski } 216780bc790SAndy Lutomirski } 217780bc790SAndy Lutomirski 218780bc790SAndy Lutomirski static int vring_mapping_error(const struct vring_virtqueue *vq, 219780bc790SAndy Lutomirski dma_addr_t addr) 220780bc790SAndy Lutomirski { 221780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 222780bc790SAndy Lutomirski return 0; 223780bc790SAndy Lutomirski 224780bc790SAndy Lutomirski return dma_mapping_error(vring_dma_dev(vq), addr); 225780bc790SAndy Lutomirski } 226780bc790SAndy Lutomirski 22700e6f3d9SMichael S. Tsirkin static struct vring_desc *alloc_indirect(struct virtqueue *_vq, 22800e6f3d9SMichael S. Tsirkin unsigned int total_sg, gfp_t gfp) 2299fa29b9dSMark McLoughlin { 2309fa29b9dSMark McLoughlin struct vring_desc *desc; 231b25bd251SRusty Russell unsigned int i; 2329fa29b9dSMark McLoughlin 233b92b1b89SWill Deacon /* 234b92b1b89SWill Deacon * We require lowmem mappings for the descriptors because 235b92b1b89SWill Deacon * otherwise virt_to_phys will give us bogus addresses in the 236b92b1b89SWill Deacon * virtqueue. 237b92b1b89SWill Deacon */ 23882107539SMichal Hocko gfp &= ~__GFP_HIGHMEM; 239b92b1b89SWill Deacon 24013816c76SRusty Russell desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 2419fa29b9dSMark McLoughlin if (!desc) 242b25bd251SRusty Russell return NULL; 2439fa29b9dSMark McLoughlin 244b25bd251SRusty Russell for (i = 0; i < total_sg; i++) 24500e6f3d9SMichael S. Tsirkin desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 246b25bd251SRusty Russell return desc; 2479fa29b9dSMark McLoughlin } 2489fa29b9dSMark McLoughlin 24913816c76SRusty Russell static inline int virtqueue_add(struct virtqueue *_vq, 25013816c76SRusty Russell struct scatterlist *sgs[], 251eeebf9b1SRusty Russell unsigned int total_sg, 25213816c76SRusty Russell unsigned int out_sgs, 25313816c76SRusty Russell unsigned int in_sgs, 254bbd603efSMichael S. Tsirkin void *data, 255bbd603efSMichael S. Tsirkin gfp_t gfp) 2560a8a69ddSRusty Russell { 2570a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 25813816c76SRusty Russell struct scatterlist *sg; 259b25bd251SRusty Russell struct vring_desc *desc; 260780bc790SAndy Lutomirski unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 2611fe9b6feSMichael S. Tsirkin int head; 262b25bd251SRusty Russell bool indirect; 2630a8a69ddSRusty Russell 2649fa29b9dSMark McLoughlin START_USE(vq); 2659fa29b9dSMark McLoughlin 2660a8a69ddSRusty Russell BUG_ON(data == NULL); 2679fa29b9dSMark McLoughlin 26870670444SRusty Russell if (unlikely(vq->broken)) { 26970670444SRusty Russell END_USE(vq); 27070670444SRusty Russell return -EIO; 27170670444SRusty Russell } 27270670444SRusty Russell 273e93300b1SRusty Russell #ifdef DEBUG 274e93300b1SRusty Russell { 275e93300b1SRusty Russell ktime_t now = ktime_get(); 276e93300b1SRusty Russell 277e93300b1SRusty Russell /* No kick or get, with .1 second between? Warn. */ 278e93300b1SRusty Russell if (vq->last_add_time_valid) 279e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 280e93300b1SRusty Russell > 100); 281e93300b1SRusty Russell vq->last_add_time = now; 282e93300b1SRusty Russell vq->last_add_time_valid = true; 283e93300b1SRusty Russell } 284e93300b1SRusty Russell #endif 285e93300b1SRusty Russell 28613816c76SRusty Russell BUG_ON(total_sg > vq->vring.num); 28713816c76SRusty Russell BUG_ON(total_sg == 0); 2880a8a69ddSRusty Russell 289b25bd251SRusty Russell head = vq->free_head; 290b25bd251SRusty Russell 291b25bd251SRusty Russell /* If the host supports indirect descriptor tables, and we have multiple 292b25bd251SRusty Russell * buffers, then go indirect. FIXME: tune this threshold */ 293b25bd251SRusty Russell if (vq->indirect && total_sg > 1 && vq->vq.num_free) 29400e6f3d9SMichael S. Tsirkin desc = alloc_indirect(_vq, total_sg, gfp); 295b25bd251SRusty Russell else 296b25bd251SRusty Russell desc = NULL; 297b25bd251SRusty Russell 298b25bd251SRusty Russell if (desc) { 299b25bd251SRusty Russell /* Use a single buffer which doesn't continue */ 300780bc790SAndy Lutomirski indirect = true; 301b25bd251SRusty Russell /* Set up rest to use this indirect table. */ 302b25bd251SRusty Russell i = 0; 303b25bd251SRusty Russell descs_used = 1; 304b25bd251SRusty Russell } else { 305780bc790SAndy Lutomirski indirect = false; 306b25bd251SRusty Russell desc = vq->vring.desc; 307b25bd251SRusty Russell i = head; 308b25bd251SRusty Russell descs_used = total_sg; 309b25bd251SRusty Russell } 310b25bd251SRusty Russell 311b25bd251SRusty Russell if (vq->vq.num_free < descs_used) { 3120a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 313b25bd251SRusty Russell descs_used, vq->vq.num_free); 31444653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 31544653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 31644653eaeSRusty Russell * host should service the ring ASAP. */ 31713816c76SRusty Russell if (out_sgs) 318426e3e0aSRusty Russell vq->notify(&vq->vq); 3190a8a69ddSRusty Russell END_USE(vq); 3200a8a69ddSRusty Russell return -ENOSPC; 3210a8a69ddSRusty Russell } 3220a8a69ddSRusty Russell 32313816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 324eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 325780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 326780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 327780bc790SAndy Lutomirski goto unmap_release; 328780bc790SAndy Lutomirski 32900e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 330780bc790SAndy Lutomirski desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 33100e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 3320a8a69ddSRusty Russell prev = i; 33300e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 3340a8a69ddSRusty Russell } 33513816c76SRusty Russell } 33613816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 337eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 338780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 339780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 340780bc790SAndy Lutomirski goto unmap_release; 341780bc790SAndy Lutomirski 34200e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 343780bc790SAndy Lutomirski desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 34400e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 3450a8a69ddSRusty Russell prev = i; 34600e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 34713816c76SRusty Russell } 3480a8a69ddSRusty Russell } 3490a8a69ddSRusty Russell /* Last one doesn't continue. */ 35000e6f3d9SMichael S. Tsirkin desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 3510a8a69ddSRusty Russell 352780bc790SAndy Lutomirski if (indirect) { 353780bc790SAndy Lutomirski /* Now that the indirect table is filled in, map it. */ 354780bc790SAndy Lutomirski dma_addr_t addr = vring_map_single( 355780bc790SAndy Lutomirski vq, desc, total_sg * sizeof(struct vring_desc), 356780bc790SAndy Lutomirski DMA_TO_DEVICE); 357780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 358780bc790SAndy Lutomirski goto unmap_release; 359780bc790SAndy Lutomirski 360780bc790SAndy Lutomirski vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); 361780bc790SAndy Lutomirski vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr); 362780bc790SAndy Lutomirski 363780bc790SAndy Lutomirski vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); 364780bc790SAndy Lutomirski } 365780bc790SAndy Lutomirski 366780bc790SAndy Lutomirski /* We're using some buffers from the free list. */ 367780bc790SAndy Lutomirski vq->vq.num_free -= descs_used; 368780bc790SAndy Lutomirski 3690a8a69ddSRusty Russell /* Update free pointer */ 370b25bd251SRusty Russell if (indirect) 37100e6f3d9SMichael S. Tsirkin vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); 372b25bd251SRusty Russell else 3730a8a69ddSRusty Russell vq->free_head = i; 3740a8a69ddSRusty Russell 375780bc790SAndy Lutomirski /* Store token and indirect buffer state. */ 376780bc790SAndy Lutomirski vq->desc_state[head].data = data; 377780bc790SAndy Lutomirski if (indirect) 378780bc790SAndy Lutomirski vq->desc_state[head].indir_desc = desc; 3790a8a69ddSRusty Russell 3800a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 3813b720b8cSRusty Russell * do sync). */ 382f277ec42SVenkatesh Srinivas avail = vq->avail_idx_shadow & (vq->vring.num - 1); 38300e6f3d9SMichael S. Tsirkin vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 3840a8a69ddSRusty Russell 385ee7cd898SRusty Russell /* Descriptors and available array need to be set before we expose the 386ee7cd898SRusty Russell * new available array entries. */ 387a9a0fef7SRusty Russell virtio_wmb(vq->weak_barriers); 388f277ec42SVenkatesh Srinivas vq->avail_idx_shadow++; 389f277ec42SVenkatesh Srinivas vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 390ee7cd898SRusty Russell vq->num_added++; 391ee7cd898SRusty Russell 3925e05bf58STetsuo Handa pr_debug("Added buffer head %i to %p\n", head, vq); 3935e05bf58STetsuo Handa END_USE(vq); 3945e05bf58STetsuo Handa 395ee7cd898SRusty Russell /* This is very unlikely, but theoretically possible. Kick 396ee7cd898SRusty Russell * just in case. */ 397ee7cd898SRusty Russell if (unlikely(vq->num_added == (1 << 16) - 1)) 398ee7cd898SRusty Russell virtqueue_kick(_vq); 399ee7cd898SRusty Russell 40098e8c6bcSRusty Russell return 0; 401780bc790SAndy Lutomirski 402780bc790SAndy Lutomirski unmap_release: 403780bc790SAndy Lutomirski err_idx = i; 404780bc790SAndy Lutomirski i = head; 405780bc790SAndy Lutomirski 406780bc790SAndy Lutomirski for (n = 0; n < total_sg; n++) { 407780bc790SAndy Lutomirski if (i == err_idx) 408780bc790SAndy Lutomirski break; 409780bc790SAndy Lutomirski vring_unmap_one(vq, &desc[i]); 410780bc790SAndy Lutomirski i = vq->vring.desc[i].next; 411780bc790SAndy Lutomirski } 412780bc790SAndy Lutomirski 413780bc790SAndy Lutomirski vq->vq.num_free += total_sg; 414780bc790SAndy Lutomirski 415780bc790SAndy Lutomirski if (indirect) 416780bc790SAndy Lutomirski kfree(desc); 417780bc790SAndy Lutomirski 418780bc790SAndy Lutomirski return -EIO; 4190a8a69ddSRusty Russell } 42013816c76SRusty Russell 42113816c76SRusty Russell /** 42213816c76SRusty Russell * virtqueue_add_sgs - expose buffers to other end 42313816c76SRusty Russell * @vq: the struct virtqueue we're talking about. 42413816c76SRusty Russell * @sgs: array of terminated scatterlists. 42513816c76SRusty Russell * @out_num: the number of scatterlists readable by other side 42613816c76SRusty Russell * @in_num: the number of scatterlists which are writable (after readable ones) 42713816c76SRusty Russell * @data: the token identifying the buffer. 42813816c76SRusty Russell * @gfp: how to do memory allocations (if necessary). 42913816c76SRusty Russell * 43013816c76SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 43113816c76SRusty Russell * at the same time (except where noted). 43213816c76SRusty Russell * 43370670444SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 43413816c76SRusty Russell */ 43513816c76SRusty Russell int virtqueue_add_sgs(struct virtqueue *_vq, 43613816c76SRusty Russell struct scatterlist *sgs[], 43713816c76SRusty Russell unsigned int out_sgs, 43813816c76SRusty Russell unsigned int in_sgs, 43913816c76SRusty Russell void *data, 44013816c76SRusty Russell gfp_t gfp) 44113816c76SRusty Russell { 442eeebf9b1SRusty Russell unsigned int i, total_sg = 0; 44313816c76SRusty Russell 44413816c76SRusty Russell /* Count them first. */ 445eeebf9b1SRusty Russell for (i = 0; i < out_sgs + in_sgs; i++) { 44613816c76SRusty Russell struct scatterlist *sg; 44713816c76SRusty Russell for (sg = sgs[i]; sg; sg = sg_next(sg)) 448eeebf9b1SRusty Russell total_sg++; 44913816c76SRusty Russell } 450eeebf9b1SRusty Russell return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); 45113816c76SRusty Russell } 45213816c76SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 45313816c76SRusty Russell 45413816c76SRusty Russell /** 455282edb36SRusty Russell * virtqueue_add_outbuf - expose output buffers to other end 456282edb36SRusty Russell * @vq: the struct virtqueue we're talking about. 457eeebf9b1SRusty Russell * @sg: scatterlist (must be well-formed and terminated!) 458eeebf9b1SRusty Russell * @num: the number of entries in @sg readable by other side 459282edb36SRusty Russell * @data: the token identifying the buffer. 460282edb36SRusty Russell * @gfp: how to do memory allocations (if necessary). 461282edb36SRusty Russell * 462282edb36SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 463282edb36SRusty Russell * at the same time (except where noted). 464282edb36SRusty Russell * 46570670444SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 466282edb36SRusty Russell */ 467282edb36SRusty Russell int virtqueue_add_outbuf(struct virtqueue *vq, 468eeebf9b1SRusty Russell struct scatterlist *sg, unsigned int num, 469282edb36SRusty Russell void *data, 470282edb36SRusty Russell gfp_t gfp) 471282edb36SRusty Russell { 472eeebf9b1SRusty Russell return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); 473282edb36SRusty Russell } 474282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 475282edb36SRusty Russell 476282edb36SRusty Russell /** 477282edb36SRusty Russell * virtqueue_add_inbuf - expose input buffers to other end 478282edb36SRusty Russell * @vq: the struct virtqueue we're talking about. 479eeebf9b1SRusty Russell * @sg: scatterlist (must be well-formed and terminated!) 480eeebf9b1SRusty Russell * @num: the number of entries in @sg writable by other side 481282edb36SRusty Russell * @data: the token identifying the buffer. 482282edb36SRusty Russell * @gfp: how to do memory allocations (if necessary). 483282edb36SRusty Russell * 484282edb36SRusty Russell * Caller must ensure we don't call this with other virtqueue operations 485282edb36SRusty Russell * at the same time (except where noted). 486282edb36SRusty Russell * 48770670444SRusty Russell * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 488282edb36SRusty Russell */ 489282edb36SRusty Russell int virtqueue_add_inbuf(struct virtqueue *vq, 490eeebf9b1SRusty Russell struct scatterlist *sg, unsigned int num, 491282edb36SRusty Russell void *data, 492282edb36SRusty Russell gfp_t gfp) 493282edb36SRusty Russell { 494eeebf9b1SRusty Russell return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); 495282edb36SRusty Russell } 496282edb36SRusty Russell EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 497282edb36SRusty Russell 498282edb36SRusty Russell /** 49941f0377fSRusty Russell * virtqueue_kick_prepare - first half of split virtqueue_kick call. 5005dfc1762SRusty Russell * @vq: the struct virtqueue 5015dfc1762SRusty Russell * 50241f0377fSRusty Russell * Instead of virtqueue_kick(), you can do: 50341f0377fSRusty Russell * if (virtqueue_kick_prepare(vq)) 50441f0377fSRusty Russell * virtqueue_notify(vq); 5055dfc1762SRusty Russell * 50641f0377fSRusty Russell * This is sometimes useful because the virtqueue_kick_prepare() needs 50741f0377fSRusty Russell * to be serialized, but the actual virtqueue_notify() call does not. 5085dfc1762SRusty Russell */ 50941f0377fSRusty Russell bool virtqueue_kick_prepare(struct virtqueue *_vq) 5100a8a69ddSRusty Russell { 5110a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 512a5c262c5SMichael S. Tsirkin u16 new, old; 51341f0377fSRusty Russell bool needs_kick; 51441f0377fSRusty Russell 5150a8a69ddSRusty Russell START_USE(vq); 516a72caae2SJason Wang /* We need to expose available array entries before checking avail 517a72caae2SJason Wang * event. */ 518a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 5190a8a69ddSRusty Russell 520f277ec42SVenkatesh Srinivas old = vq->avail_idx_shadow - vq->num_added; 521f277ec42SVenkatesh Srinivas new = vq->avail_idx_shadow; 5220a8a69ddSRusty Russell vq->num_added = 0; 5230a8a69ddSRusty Russell 524e93300b1SRusty Russell #ifdef DEBUG 525e93300b1SRusty Russell if (vq->last_add_time_valid) { 526e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 527e93300b1SRusty Russell vq->last_add_time)) > 100); 528e93300b1SRusty Russell } 529e93300b1SRusty Russell vq->last_add_time_valid = false; 530e93300b1SRusty Russell #endif 531e93300b1SRusty Russell 53241f0377fSRusty Russell if (vq->event) { 53300e6f3d9SMichael S. Tsirkin needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), 53441f0377fSRusty Russell new, old); 53541f0377fSRusty Russell } else { 53600e6f3d9SMichael S. Tsirkin needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); 53741f0377fSRusty Russell } 5380a8a69ddSRusty Russell END_USE(vq); 53941f0377fSRusty Russell return needs_kick; 54041f0377fSRusty Russell } 54141f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 54241f0377fSRusty Russell 54341f0377fSRusty Russell /** 54441f0377fSRusty Russell * virtqueue_notify - second half of split virtqueue_kick call. 54541f0377fSRusty Russell * @vq: the struct virtqueue 54641f0377fSRusty Russell * 54741f0377fSRusty Russell * This does not need to be serialized. 5485b1bf7cbSHeinz Graalfs * 5495b1bf7cbSHeinz Graalfs * Returns false if host notify failed or queue is broken, otherwise true. 55041f0377fSRusty Russell */ 5515b1bf7cbSHeinz Graalfs bool virtqueue_notify(struct virtqueue *_vq) 55241f0377fSRusty Russell { 55341f0377fSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 55441f0377fSRusty Russell 5555b1bf7cbSHeinz Graalfs if (unlikely(vq->broken)) 5565b1bf7cbSHeinz Graalfs return false; 5575b1bf7cbSHeinz Graalfs 55841f0377fSRusty Russell /* Prod other side to tell it about changes. */ 5592342d6a6SHeinz Graalfs if (!vq->notify(_vq)) { 5605b1bf7cbSHeinz Graalfs vq->broken = true; 5615b1bf7cbSHeinz Graalfs return false; 5625b1bf7cbSHeinz Graalfs } 5635b1bf7cbSHeinz Graalfs return true; 56441f0377fSRusty Russell } 56541f0377fSRusty Russell EXPORT_SYMBOL_GPL(virtqueue_notify); 56641f0377fSRusty Russell 56741f0377fSRusty Russell /** 56841f0377fSRusty Russell * virtqueue_kick - update after add_buf 56941f0377fSRusty Russell * @vq: the struct virtqueue 57041f0377fSRusty Russell * 571b3087e48SRusty Russell * After one or more virtqueue_add_* calls, invoke this to kick 57241f0377fSRusty Russell * the other side. 57341f0377fSRusty Russell * 57441f0377fSRusty Russell * Caller must ensure we don't call this with other virtqueue 57541f0377fSRusty Russell * operations at the same time (except where noted). 5765b1bf7cbSHeinz Graalfs * 5775b1bf7cbSHeinz Graalfs * Returns false if kick failed, otherwise true. 57841f0377fSRusty Russell */ 5795b1bf7cbSHeinz Graalfs bool virtqueue_kick(struct virtqueue *vq) 58041f0377fSRusty Russell { 58141f0377fSRusty Russell if (virtqueue_kick_prepare(vq)) 5825b1bf7cbSHeinz Graalfs return virtqueue_notify(vq); 5835b1bf7cbSHeinz Graalfs return true; 5840a8a69ddSRusty Russell } 5857c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_kick); 5860a8a69ddSRusty Russell 5870a8a69ddSRusty Russell static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 5880a8a69ddSRusty Russell { 589780bc790SAndy Lutomirski unsigned int i, j; 590780bc790SAndy Lutomirski u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 5910a8a69ddSRusty Russell 5920a8a69ddSRusty Russell /* Clear data ptr. */ 593780bc790SAndy Lutomirski vq->desc_state[head].data = NULL; 5940a8a69ddSRusty Russell 595780bc790SAndy Lutomirski /* Put back on free list: unmap first-level descriptors and find end */ 5960a8a69ddSRusty Russell i = head; 5979fa29b9dSMark McLoughlin 598780bc790SAndy Lutomirski while (vq->vring.desc[i].flags & nextflag) { 599780bc790SAndy Lutomirski vring_unmap_one(vq, &vq->vring.desc[i]); 60000e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); 60106ca287dSRusty Russell vq->vq.num_free++; 6020a8a69ddSRusty Russell } 6030a8a69ddSRusty Russell 604780bc790SAndy Lutomirski vring_unmap_one(vq, &vq->vring.desc[i]); 60500e6f3d9SMichael S. Tsirkin vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); 6060a8a69ddSRusty Russell vq->free_head = head; 607780bc790SAndy Lutomirski 6080a8a69ddSRusty Russell /* Plus final descriptor */ 60906ca287dSRusty Russell vq->vq.num_free++; 610780bc790SAndy Lutomirski 611780bc790SAndy Lutomirski /* Free the indirect table, if any, now that it's unmapped. */ 612780bc790SAndy Lutomirski if (vq->desc_state[head].indir_desc) { 613780bc790SAndy Lutomirski struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 614780bc790SAndy Lutomirski u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); 615780bc790SAndy Lutomirski 616780bc790SAndy Lutomirski BUG_ON(!(vq->vring.desc[head].flags & 617780bc790SAndy Lutomirski cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 618780bc790SAndy Lutomirski BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 619780bc790SAndy Lutomirski 620780bc790SAndy Lutomirski for (j = 0; j < len / sizeof(struct vring_desc); j++) 621780bc790SAndy Lutomirski vring_unmap_one(vq, &indir_desc[j]); 622780bc790SAndy Lutomirski 623780bc790SAndy Lutomirski kfree(vq->desc_state[head].indir_desc); 624780bc790SAndy Lutomirski vq->desc_state[head].indir_desc = NULL; 625780bc790SAndy Lutomirski } 6260a8a69ddSRusty Russell } 6270a8a69ddSRusty Russell 6280a8a69ddSRusty Russell static inline bool more_used(const struct vring_virtqueue *vq) 6290a8a69ddSRusty Russell { 63000e6f3d9SMichael S. Tsirkin return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); 6310a8a69ddSRusty Russell } 6320a8a69ddSRusty Russell 6335dfc1762SRusty Russell /** 6345dfc1762SRusty Russell * virtqueue_get_buf - get the next used buffer 6355dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 6365dfc1762SRusty Russell * @len: the length written into the buffer 6375dfc1762SRusty Russell * 6385dfc1762SRusty Russell * If the driver wrote data into the buffer, @len will be set to the 6395dfc1762SRusty Russell * amount written. This means you don't need to clear the buffer 6405dfc1762SRusty Russell * beforehand to ensure there's no data leakage in the case of short 6415dfc1762SRusty Russell * writes. 6425dfc1762SRusty Russell * 6435dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 6445dfc1762SRusty Russell * operations at the same time (except where noted). 6455dfc1762SRusty Russell * 6465dfc1762SRusty Russell * Returns NULL if there are no used buffers, or the "data" token 647b3087e48SRusty Russell * handed to virtqueue_add_*(). 6485dfc1762SRusty Russell */ 6497c5e9ed0SMichael S. Tsirkin void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 6500a8a69ddSRusty Russell { 6510a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 6520a8a69ddSRusty Russell void *ret; 6530a8a69ddSRusty Russell unsigned int i; 6543b720b8cSRusty Russell u16 last_used; 6550a8a69ddSRusty Russell 6560a8a69ddSRusty Russell START_USE(vq); 6570a8a69ddSRusty Russell 6585ef82752SRusty Russell if (unlikely(vq->broken)) { 6595ef82752SRusty Russell END_USE(vq); 6605ef82752SRusty Russell return NULL; 6615ef82752SRusty Russell } 6625ef82752SRusty Russell 6630a8a69ddSRusty Russell if (!more_used(vq)) { 6640a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 6650a8a69ddSRusty Russell END_USE(vq); 6660a8a69ddSRusty Russell return NULL; 6670a8a69ddSRusty Russell } 6680a8a69ddSRusty Russell 6692d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 670a9a0fef7SRusty Russell virtio_rmb(vq->weak_barriers); 6712d61ba95SMichael S. Tsirkin 6723b720b8cSRusty Russell last_used = (vq->last_used_idx & (vq->vring.num - 1)); 67300e6f3d9SMichael S. Tsirkin i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); 67400e6f3d9SMichael S. Tsirkin *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); 6750a8a69ddSRusty Russell 6760a8a69ddSRusty Russell if (unlikely(i >= vq->vring.num)) { 6770a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 6780a8a69ddSRusty Russell return NULL; 6790a8a69ddSRusty Russell } 680780bc790SAndy Lutomirski if (unlikely(!vq->desc_state[i].data)) { 6810a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 6820a8a69ddSRusty Russell return NULL; 6830a8a69ddSRusty Russell } 6840a8a69ddSRusty Russell 6850a8a69ddSRusty Russell /* detach_buf clears data, so grab it now. */ 686780bc790SAndy Lutomirski ret = vq->desc_state[i].data; 6870a8a69ddSRusty Russell detach_buf(vq, i); 6880a8a69ddSRusty Russell vq->last_used_idx++; 689a5c262c5SMichael S. Tsirkin /* If we expect an interrupt for the next entry, tell host 690a5c262c5SMichael S. Tsirkin * by writing event index and flush out the write before 691a5c262c5SMichael S. Tsirkin * the read in the next get_buf call. */ 692788e5b3aSMichael S. Tsirkin if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 693788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 694788e5b3aSMichael S. Tsirkin &vring_used_event(&vq->vring), 695788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 696a5c262c5SMichael S. Tsirkin 697e93300b1SRusty Russell #ifdef DEBUG 698e93300b1SRusty Russell vq->last_add_time_valid = false; 699e93300b1SRusty Russell #endif 700e93300b1SRusty Russell 7010a8a69ddSRusty Russell END_USE(vq); 7020a8a69ddSRusty Russell return ret; 7030a8a69ddSRusty Russell } 7047c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_get_buf); 7050a8a69ddSRusty Russell 7065dfc1762SRusty Russell /** 7075dfc1762SRusty Russell * virtqueue_disable_cb - disable callbacks 7085dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 7095dfc1762SRusty Russell * 7105dfc1762SRusty Russell * Note that this is not necessarily synchronous, hence unreliable and only 7115dfc1762SRusty Russell * useful as an optimization. 7125dfc1762SRusty Russell * 7135dfc1762SRusty Russell * Unlike other operations, this need not be serialized. 7145dfc1762SRusty Russell */ 7157c5e9ed0SMichael S. Tsirkin void virtqueue_disable_cb(struct virtqueue *_vq) 71618445c4dSRusty Russell { 71718445c4dSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 71818445c4dSRusty Russell 719f277ec42SVenkatesh Srinivas if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 720f277ec42SVenkatesh Srinivas vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 721f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 722f277ec42SVenkatesh Srinivas } 723f277ec42SVenkatesh Srinivas 72418445c4dSRusty Russell } 7257c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 72618445c4dSRusty Russell 7275dfc1762SRusty Russell /** 728cc229884SMichael S. Tsirkin * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 729cc229884SMichael S. Tsirkin * @vq: the struct virtqueue we're talking about. 730cc229884SMichael S. Tsirkin * 731cc229884SMichael S. Tsirkin * This re-enables callbacks; it returns current queue state 732cc229884SMichael S. Tsirkin * in an opaque unsigned value. This value should be later tested by 733cc229884SMichael S. Tsirkin * virtqueue_poll, to detect a possible race between the driver checking for 734cc229884SMichael S. Tsirkin * more work, and enabling callbacks. 735cc229884SMichael S. Tsirkin * 736cc229884SMichael S. Tsirkin * Caller must ensure we don't call this with other virtqueue 737cc229884SMichael S. Tsirkin * operations at the same time (except where noted). 738cc229884SMichael S. Tsirkin */ 739cc229884SMichael S. Tsirkin unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 740cc229884SMichael S. Tsirkin { 741cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 742cc229884SMichael S. Tsirkin u16 last_used_idx; 743cc229884SMichael S. Tsirkin 744cc229884SMichael S. Tsirkin START_USE(vq); 745cc229884SMichael S. Tsirkin 746cc229884SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 747cc229884SMichael S. Tsirkin * more to do. */ 748cc229884SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 749cc229884SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 750cc229884SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 751f277ec42SVenkatesh Srinivas if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 752f277ec42SVenkatesh Srinivas vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 753f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 754f277ec42SVenkatesh Srinivas } 75500e6f3d9SMichael S. Tsirkin vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 756cc229884SMichael S. Tsirkin END_USE(vq); 757cc229884SMichael S. Tsirkin return last_used_idx; 758cc229884SMichael S. Tsirkin } 759cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 760cc229884SMichael S. Tsirkin 761cc229884SMichael S. Tsirkin /** 762cc229884SMichael S. Tsirkin * virtqueue_poll - query pending used buffers 763cc229884SMichael S. Tsirkin * @vq: the struct virtqueue we're talking about. 764cc229884SMichael S. Tsirkin * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 765cc229884SMichael S. Tsirkin * 766cc229884SMichael S. Tsirkin * Returns "true" if there are pending used buffers in the queue. 767cc229884SMichael S. Tsirkin * 768cc229884SMichael S. Tsirkin * This does not need to be serialized. 769cc229884SMichael S. Tsirkin */ 770cc229884SMichael S. Tsirkin bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 771cc229884SMichael S. Tsirkin { 772cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 773cc229884SMichael S. Tsirkin 774cc229884SMichael S. Tsirkin virtio_mb(vq->weak_barriers); 77500e6f3d9SMichael S. Tsirkin return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); 776cc229884SMichael S. Tsirkin } 777cc229884SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_poll); 778cc229884SMichael S. Tsirkin 779cc229884SMichael S. Tsirkin /** 7805dfc1762SRusty Russell * virtqueue_enable_cb - restart callbacks after disable_cb. 7815dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 7825dfc1762SRusty Russell * 7835dfc1762SRusty Russell * This re-enables callbacks; it returns "false" if there are pending 7845dfc1762SRusty Russell * buffers in the queue, to detect a possible race between the driver 7855dfc1762SRusty Russell * checking for more work, and enabling callbacks. 7865dfc1762SRusty Russell * 7875dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 7885dfc1762SRusty Russell * operations at the same time (except where noted). 7895dfc1762SRusty Russell */ 7907c5e9ed0SMichael S. Tsirkin bool virtqueue_enable_cb(struct virtqueue *_vq) 7910a8a69ddSRusty Russell { 792cc229884SMichael S. Tsirkin unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 793cc229884SMichael S. Tsirkin return !virtqueue_poll(_vq, last_used_idx); 7940a8a69ddSRusty Russell } 7957c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 7960a8a69ddSRusty Russell 7975dfc1762SRusty Russell /** 7985dfc1762SRusty Russell * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 7995dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 8005dfc1762SRusty Russell * 8015dfc1762SRusty Russell * This re-enables callbacks but hints to the other side to delay 8025dfc1762SRusty Russell * interrupts until most of the available buffers have been processed; 8035dfc1762SRusty Russell * it returns "false" if there are many pending buffers in the queue, 8045dfc1762SRusty Russell * to detect a possible race between the driver checking for more work, 8055dfc1762SRusty Russell * and enabling callbacks. 8065dfc1762SRusty Russell * 8075dfc1762SRusty Russell * Caller must ensure we don't call this with other virtqueue 8085dfc1762SRusty Russell * operations at the same time (except where noted). 8095dfc1762SRusty Russell */ 8107ab358c2SMichael S. Tsirkin bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 8117ab358c2SMichael S. Tsirkin { 8127ab358c2SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 8137ab358c2SMichael S. Tsirkin u16 bufs; 8147ab358c2SMichael S. Tsirkin 8157ab358c2SMichael S. Tsirkin START_USE(vq); 8167ab358c2SMichael S. Tsirkin 8177ab358c2SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 8187ab358c2SMichael S. Tsirkin * more to do. */ 8197ab358c2SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 8207ab358c2SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 8217ab358c2SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 822f277ec42SVenkatesh Srinivas if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 823f277ec42SVenkatesh Srinivas vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 824f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 825f277ec42SVenkatesh Srinivas } 8267ab358c2SMichael S. Tsirkin /* TODO: tune this threshold */ 827f277ec42SVenkatesh Srinivas bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; 828788e5b3aSMichael S. Tsirkin 829788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 830788e5b3aSMichael S. Tsirkin &vring_used_event(&vq->vring), 831788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 832788e5b3aSMichael S. Tsirkin 83300e6f3d9SMichael S. Tsirkin if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 8347ab358c2SMichael S. Tsirkin END_USE(vq); 8357ab358c2SMichael S. Tsirkin return false; 8367ab358c2SMichael S. Tsirkin } 8377ab358c2SMichael S. Tsirkin 8387ab358c2SMichael S. Tsirkin END_USE(vq); 8397ab358c2SMichael S. Tsirkin return true; 8407ab358c2SMichael S. Tsirkin } 8417ab358c2SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 8427ab358c2SMichael S. Tsirkin 8435dfc1762SRusty Russell /** 8445dfc1762SRusty Russell * virtqueue_detach_unused_buf - detach first unused buffer 8455dfc1762SRusty Russell * @vq: the struct virtqueue we're talking about. 8465dfc1762SRusty Russell * 847b3087e48SRusty Russell * Returns NULL or the "data" token handed to virtqueue_add_*(). 8485dfc1762SRusty Russell * This is not valid on an active queue; it is useful only for device 8495dfc1762SRusty Russell * shutdown. 8505dfc1762SRusty Russell */ 8517c5e9ed0SMichael S. Tsirkin void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 852c021eac4SShirley Ma { 853c021eac4SShirley Ma struct vring_virtqueue *vq = to_vvq(_vq); 854c021eac4SShirley Ma unsigned int i; 855c021eac4SShirley Ma void *buf; 856c021eac4SShirley Ma 857c021eac4SShirley Ma START_USE(vq); 858c021eac4SShirley Ma 859c021eac4SShirley Ma for (i = 0; i < vq->vring.num; i++) { 860780bc790SAndy Lutomirski if (!vq->desc_state[i].data) 861c021eac4SShirley Ma continue; 862c021eac4SShirley Ma /* detach_buf clears data, so grab it now. */ 863780bc790SAndy Lutomirski buf = vq->desc_state[i].data; 864c021eac4SShirley Ma detach_buf(vq, i); 865f277ec42SVenkatesh Srinivas vq->avail_idx_shadow--; 866f277ec42SVenkatesh Srinivas vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 867c021eac4SShirley Ma END_USE(vq); 868c021eac4SShirley Ma return buf; 869c021eac4SShirley Ma } 870c021eac4SShirley Ma /* That should have freed everything. */ 87106ca287dSRusty Russell BUG_ON(vq->vq.num_free != vq->vring.num); 872c021eac4SShirley Ma 873c021eac4SShirley Ma END_USE(vq); 874c021eac4SShirley Ma return NULL; 875c021eac4SShirley Ma } 8767c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 877c021eac4SShirley Ma 8780a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 8790a8a69ddSRusty Russell { 8800a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 8810a8a69ddSRusty Russell 8820a8a69ddSRusty Russell if (!more_used(vq)) { 8830a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 8840a8a69ddSRusty Russell return IRQ_NONE; 8850a8a69ddSRusty Russell } 8860a8a69ddSRusty Russell 8870a8a69ddSRusty Russell if (unlikely(vq->broken)) 8880a8a69ddSRusty Russell return IRQ_HANDLED; 8890a8a69ddSRusty Russell 8900a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 89118445c4dSRusty Russell if (vq->vq.callback) 89218445c4dSRusty Russell vq->vq.callback(&vq->vq); 8930a8a69ddSRusty Russell 8940a8a69ddSRusty Russell return IRQ_HANDLED; 8950a8a69ddSRusty Russell } 896c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 8970a8a69ddSRusty Russell 8982a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index, 8992a2d1382SAndy Lutomirski struct vring vring, 9000a8a69ddSRusty Russell struct virtio_device *vdev, 9017b21e34fSRusty Russell bool weak_barriers, 90246f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *), 9039499f5e7SRusty Russell void (*callback)(struct virtqueue *), 9049499f5e7SRusty Russell const char *name) 9050a8a69ddSRusty Russell { 9060a8a69ddSRusty Russell unsigned int i; 9072a2d1382SAndy Lutomirski struct vring_virtqueue *vq; 9080a8a69ddSRusty Russell 9092a2d1382SAndy Lutomirski vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), 910780bc790SAndy Lutomirski GFP_KERNEL); 9110a8a69ddSRusty Russell if (!vq) 9120a8a69ddSRusty Russell return NULL; 9130a8a69ddSRusty Russell 9142a2d1382SAndy Lutomirski vq->vring = vring; 9150a8a69ddSRusty Russell vq->vq.callback = callback; 9160a8a69ddSRusty Russell vq->vq.vdev = vdev; 9179499f5e7SRusty Russell vq->vq.name = name; 9182a2d1382SAndy Lutomirski vq->vq.num_free = vring.num; 91906ca287dSRusty Russell vq->vq.index = index; 9202a2d1382SAndy Lutomirski vq->we_own_ring = false; 9212a2d1382SAndy Lutomirski vq->queue_dma_addr = 0; 9222a2d1382SAndy Lutomirski vq->queue_size_in_bytes = 0; 9230a8a69ddSRusty Russell vq->notify = notify; 9247b21e34fSRusty Russell vq->weak_barriers = weak_barriers; 9250a8a69ddSRusty Russell vq->broken = false; 9260a8a69ddSRusty Russell vq->last_used_idx = 0; 927f277ec42SVenkatesh Srinivas vq->avail_flags_shadow = 0; 928f277ec42SVenkatesh Srinivas vq->avail_idx_shadow = 0; 9290a8a69ddSRusty Russell vq->num_added = 0; 9309499f5e7SRusty Russell list_add_tail(&vq->vq.list, &vdev->vqs); 9310a8a69ddSRusty Russell #ifdef DEBUG 9320a8a69ddSRusty Russell vq->in_use = false; 933e93300b1SRusty Russell vq->last_add_time_valid = false; 9340a8a69ddSRusty Russell #endif 9350a8a69ddSRusty Russell 9369fa29b9dSMark McLoughlin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 937a5c262c5SMichael S. Tsirkin vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 9389fa29b9dSMark McLoughlin 9390a8a69ddSRusty Russell /* No callback? Tell other side not to bother us. */ 940f277ec42SVenkatesh Srinivas if (!callback) { 941f277ec42SVenkatesh Srinivas vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 942f277ec42SVenkatesh Srinivas vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); 943f277ec42SVenkatesh Srinivas } 9440a8a69ddSRusty Russell 9450a8a69ddSRusty Russell /* Put everything in free lists. */ 9460a8a69ddSRusty Russell vq->free_head = 0; 9472a2d1382SAndy Lutomirski for (i = 0; i < vring.num-1; i++) 94800e6f3d9SMichael S. Tsirkin vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 9492a2d1382SAndy Lutomirski memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); 9500a8a69ddSRusty Russell 9510a8a69ddSRusty Russell return &vq->vq; 9520a8a69ddSRusty Russell } 9532a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 9542a2d1382SAndy Lutomirski 9552a2d1382SAndy Lutomirski static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 9562a2d1382SAndy Lutomirski dma_addr_t *dma_handle, gfp_t flag) 9572a2d1382SAndy Lutomirski { 9582a2d1382SAndy Lutomirski if (vring_use_dma_api(vdev)) { 9592a2d1382SAndy Lutomirski return dma_alloc_coherent(vdev->dev.parent, size, 9602a2d1382SAndy Lutomirski dma_handle, flag); 9612a2d1382SAndy Lutomirski } else { 9622a2d1382SAndy Lutomirski void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 9632a2d1382SAndy Lutomirski if (queue) { 9642a2d1382SAndy Lutomirski phys_addr_t phys_addr = virt_to_phys(queue); 9652a2d1382SAndy Lutomirski *dma_handle = (dma_addr_t)phys_addr; 9662a2d1382SAndy Lutomirski 9672a2d1382SAndy Lutomirski /* 9682a2d1382SAndy Lutomirski * Sanity check: make sure we dind't truncate 9692a2d1382SAndy Lutomirski * the address. The only arches I can find that 9702a2d1382SAndy Lutomirski * have 64-bit phys_addr_t but 32-bit dma_addr_t 9712a2d1382SAndy Lutomirski * are certain non-highmem MIPS and x86 9722a2d1382SAndy Lutomirski * configurations, but these configurations 9732a2d1382SAndy Lutomirski * should never allocate physical pages above 32 9742a2d1382SAndy Lutomirski * bits, so this is fine. Just in case, throw a 9752a2d1382SAndy Lutomirski * warning and abort if we end up with an 9762a2d1382SAndy Lutomirski * unrepresentable address. 9772a2d1382SAndy Lutomirski */ 9782a2d1382SAndy Lutomirski if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 9792a2d1382SAndy Lutomirski free_pages_exact(queue, PAGE_ALIGN(size)); 9802a2d1382SAndy Lutomirski return NULL; 9812a2d1382SAndy Lutomirski } 9822a2d1382SAndy Lutomirski } 9832a2d1382SAndy Lutomirski return queue; 9842a2d1382SAndy Lutomirski } 9852a2d1382SAndy Lutomirski } 9862a2d1382SAndy Lutomirski 9872a2d1382SAndy Lutomirski static void vring_free_queue(struct virtio_device *vdev, size_t size, 9882a2d1382SAndy Lutomirski void *queue, dma_addr_t dma_handle) 9892a2d1382SAndy Lutomirski { 9902a2d1382SAndy Lutomirski if (vring_use_dma_api(vdev)) { 9912a2d1382SAndy Lutomirski dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 9922a2d1382SAndy Lutomirski } else { 9932a2d1382SAndy Lutomirski free_pages_exact(queue, PAGE_ALIGN(size)); 9942a2d1382SAndy Lutomirski } 9952a2d1382SAndy Lutomirski } 9962a2d1382SAndy Lutomirski 9972a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue( 9982a2d1382SAndy Lutomirski unsigned int index, 9992a2d1382SAndy Lutomirski unsigned int num, 10002a2d1382SAndy Lutomirski unsigned int vring_align, 10012a2d1382SAndy Lutomirski struct virtio_device *vdev, 10022a2d1382SAndy Lutomirski bool weak_barriers, 10032a2d1382SAndy Lutomirski bool may_reduce_num, 10042a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *), 10052a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *), 10062a2d1382SAndy Lutomirski const char *name) 10072a2d1382SAndy Lutomirski { 10082a2d1382SAndy Lutomirski struct virtqueue *vq; 10092a2d1382SAndy Lutomirski void *queue; 10102a2d1382SAndy Lutomirski dma_addr_t dma_addr; 10112a2d1382SAndy Lutomirski size_t queue_size_in_bytes; 10122a2d1382SAndy Lutomirski struct vring vring; 10132a2d1382SAndy Lutomirski 10142a2d1382SAndy Lutomirski /* We assume num is a power of 2. */ 10152a2d1382SAndy Lutomirski if (num & (num - 1)) { 10162a2d1382SAndy Lutomirski dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 10172a2d1382SAndy Lutomirski return NULL; 10182a2d1382SAndy Lutomirski } 10192a2d1382SAndy Lutomirski 10202a2d1382SAndy Lutomirski /* TODO: allocate each queue chunk individually */ 10212a2d1382SAndy Lutomirski for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 10222a2d1382SAndy Lutomirski queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 10232a2d1382SAndy Lutomirski &dma_addr, 10242a2d1382SAndy Lutomirski GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 10252a2d1382SAndy Lutomirski if (queue) 10262a2d1382SAndy Lutomirski break; 10272a2d1382SAndy Lutomirski } 10282a2d1382SAndy Lutomirski 10292a2d1382SAndy Lutomirski if (!num) 10302a2d1382SAndy Lutomirski return NULL; 10312a2d1382SAndy Lutomirski 10322a2d1382SAndy Lutomirski if (!queue) { 10332a2d1382SAndy Lutomirski /* Try to get a single page. You are my only hope! */ 10342a2d1382SAndy Lutomirski queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 10352a2d1382SAndy Lutomirski &dma_addr, GFP_KERNEL|__GFP_ZERO); 10362a2d1382SAndy Lutomirski } 10372a2d1382SAndy Lutomirski if (!queue) 10382a2d1382SAndy Lutomirski return NULL; 10392a2d1382SAndy Lutomirski 10402a2d1382SAndy Lutomirski queue_size_in_bytes = vring_size(num, vring_align); 10412a2d1382SAndy Lutomirski vring_init(&vring, num, queue, vring_align); 10422a2d1382SAndy Lutomirski 10432a2d1382SAndy Lutomirski vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, 10442a2d1382SAndy Lutomirski notify, callback, name); 10452a2d1382SAndy Lutomirski if (!vq) { 10462a2d1382SAndy Lutomirski vring_free_queue(vdev, queue_size_in_bytes, queue, 10472a2d1382SAndy Lutomirski dma_addr); 10482a2d1382SAndy Lutomirski return NULL; 10492a2d1382SAndy Lutomirski } 10502a2d1382SAndy Lutomirski 10512a2d1382SAndy Lutomirski to_vvq(vq)->queue_dma_addr = dma_addr; 10522a2d1382SAndy Lutomirski to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 10532a2d1382SAndy Lutomirski to_vvq(vq)->we_own_ring = true; 10542a2d1382SAndy Lutomirski 10552a2d1382SAndy Lutomirski return vq; 10562a2d1382SAndy Lutomirski } 10572a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue); 10582a2d1382SAndy Lutomirski 10592a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index, 10602a2d1382SAndy Lutomirski unsigned int num, 10612a2d1382SAndy Lutomirski unsigned int vring_align, 10622a2d1382SAndy Lutomirski struct virtio_device *vdev, 10632a2d1382SAndy Lutomirski bool weak_barriers, 10642a2d1382SAndy Lutomirski void *pages, 10652a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *vq), 10662a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *vq), 10672a2d1382SAndy Lutomirski const char *name) 10682a2d1382SAndy Lutomirski { 10692a2d1382SAndy Lutomirski struct vring vring; 10702a2d1382SAndy Lutomirski vring_init(&vring, num, pages, vring_align); 10712a2d1382SAndy Lutomirski return __vring_new_virtqueue(index, vring, vdev, weak_barriers, 10722a2d1382SAndy Lutomirski notify, callback, name); 10732a2d1382SAndy Lutomirski } 1074c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 10750a8a69ddSRusty Russell 10762a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq) 10770a8a69ddSRusty Russell { 10782a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 10792a2d1382SAndy Lutomirski 10802a2d1382SAndy Lutomirski if (vq->we_own_ring) { 10812a2d1382SAndy Lutomirski vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 10822a2d1382SAndy Lutomirski vq->vring.desc, vq->queue_dma_addr); 10832a2d1382SAndy Lutomirski } 10842a2d1382SAndy Lutomirski list_del(&_vq->list); 10852a2d1382SAndy Lutomirski kfree(vq); 10860a8a69ddSRusty Russell } 1087c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 10880a8a69ddSRusty Russell 1089e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 1090e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 1091e34f8725SRusty Russell { 1092e34f8725SRusty Russell unsigned int i; 1093e34f8725SRusty Russell 1094e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1095e34f8725SRusty Russell switch (i) { 10969fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 10979fa29b9dSMark McLoughlin break; 1098a5c262c5SMichael S. Tsirkin case VIRTIO_RING_F_EVENT_IDX: 1099a5c262c5SMichael S. Tsirkin break; 1100747ae34aSMichael S. Tsirkin case VIRTIO_F_VERSION_1: 1101747ae34aSMichael S. Tsirkin break; 1102e34f8725SRusty Russell default: 1103e34f8725SRusty Russell /* We don't understand this bit. */ 1104e16e12beSMichael S. Tsirkin __virtio_clear_bit(vdev, i); 1105e34f8725SRusty Russell } 1106e34f8725SRusty Russell } 1107e34f8725SRusty Russell } 1108e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 1109e34f8725SRusty Russell 11105dfc1762SRusty Russell /** 11115dfc1762SRusty Russell * virtqueue_get_vring_size - return the size of the virtqueue's vring 11125dfc1762SRusty Russell * @vq: the struct virtqueue containing the vring of interest. 11135dfc1762SRusty Russell * 11145dfc1762SRusty Russell * Returns the size of the vring. This is mainly used for boasting to 11155dfc1762SRusty Russell * userspace. Unlike other operations, this need not be serialized. 11165dfc1762SRusty Russell */ 11178f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 11188f9f4668SRick Jones { 11198f9f4668SRick Jones 11208f9f4668SRick Jones struct vring_virtqueue *vq = to_vvq(_vq); 11218f9f4668SRick Jones 11228f9f4668SRick Jones return vq->vring.num; 11238f9f4668SRick Jones } 11248f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 11258f9f4668SRick Jones 1126b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq) 1127b3b32c94SHeinz Graalfs { 1128b3b32c94SHeinz Graalfs struct vring_virtqueue *vq = to_vvq(_vq); 1129b3b32c94SHeinz Graalfs 1130b3b32c94SHeinz Graalfs return vq->broken; 1131b3b32c94SHeinz Graalfs } 1132b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1133b3b32c94SHeinz Graalfs 1134e2dcdfe9SRusty Russell /* 1135e2dcdfe9SRusty Russell * This should prevent the device from being used, allowing drivers to 1136e2dcdfe9SRusty Russell * recover. You may need to grab appropriate locks to flush. 1137e2dcdfe9SRusty Russell */ 1138e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev) 1139e2dcdfe9SRusty Russell { 1140e2dcdfe9SRusty Russell struct virtqueue *_vq; 1141e2dcdfe9SRusty Russell 1142e2dcdfe9SRusty Russell list_for_each_entry(_vq, &dev->vqs, list) { 1143e2dcdfe9SRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 1144e2dcdfe9SRusty Russell vq->broken = true; 1145e2dcdfe9SRusty Russell } 1146e2dcdfe9SRusty Russell } 1147e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device); 1148e2dcdfe9SRusty Russell 11492a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 115089062652SCornelia Huck { 115189062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 115289062652SCornelia Huck 11532a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 115489062652SCornelia Huck 11552a2d1382SAndy Lutomirski return vq->queue_dma_addr; 11562a2d1382SAndy Lutomirski } 11572a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 11582a2d1382SAndy Lutomirski 11592a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 116089062652SCornelia Huck { 116189062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 116289062652SCornelia Huck 11632a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 11642a2d1382SAndy Lutomirski 11652a2d1382SAndy Lutomirski return vq->queue_dma_addr + 11662a2d1382SAndy Lutomirski ((char *)vq->vring.avail - (char *)vq->vring.desc); 116789062652SCornelia Huck } 11682a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 11692a2d1382SAndy Lutomirski 11702a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 11712a2d1382SAndy Lutomirski { 11722a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 11732a2d1382SAndy Lutomirski 11742a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 11752a2d1382SAndy Lutomirski 11762a2d1382SAndy Lutomirski return vq->queue_dma_addr + 11772a2d1382SAndy Lutomirski ((char *)vq->vring.used - (char *)vq->vring.desc); 11782a2d1382SAndy Lutomirski } 11792a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 11802a2d1382SAndy Lutomirski 11812a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq) 11822a2d1382SAndy Lutomirski { 11832a2d1382SAndy Lutomirski return &to_vvq(vq)->vring; 11842a2d1382SAndy Lutomirski } 11852a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring); 118689062652SCornelia Huck 1187c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 1188