10a8a69ddSRusty Russell /* Virtio ring implementation. 20a8a69ddSRusty Russell * 30a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 40a8a69ddSRusty Russell * 50a8a69ddSRusty Russell * This program is free software; you can redistribute it and/or modify 60a8a69ddSRusty Russell * it under the terms of the GNU General Public License as published by 70a8a69ddSRusty Russell * the Free Software Foundation; either version 2 of the License, or 80a8a69ddSRusty Russell * (at your option) any later version. 90a8a69ddSRusty Russell * 100a8a69ddSRusty Russell * This program is distributed in the hope that it will be useful, 110a8a69ddSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 120a8a69ddSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 130a8a69ddSRusty Russell * GNU General Public License for more details. 140a8a69ddSRusty Russell * 150a8a69ddSRusty Russell * You should have received a copy of the GNU General Public License 160a8a69ddSRusty Russell * along with this program; if not, write to the Free Software 170a8a69ddSRusty Russell * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 180a8a69ddSRusty Russell */ 190a8a69ddSRusty Russell #include <linux/virtio.h> 200a8a69ddSRusty Russell #include <linux/virtio_ring.h> 21e34f8725SRusty Russell #include <linux/virtio_config.h> 220a8a69ddSRusty Russell #include <linux/device.h> 235a0e3ad6STejun Heo #include <linux/slab.h> 24b5a2c4f1SPaul Gortmaker #include <linux/module.h> 25e93300b1SRusty Russell #include <linux/hrtimer.h> 26780bc790SAndy Lutomirski #include <linux/dma-mapping.h> 2778fe3987SAndy Lutomirski #include <xen/xen.h> 280a8a69ddSRusty Russell 290a8a69ddSRusty Russell #ifdef DEBUG 300a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 319499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 329499f5e7SRusty Russell do { \ 339499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 349499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 359499f5e7SRusty Russell BUG(); \ 369499f5e7SRusty Russell } while (0) 37c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 383a35ce7dSRoel Kluin #define START_USE(_vq) \ 39c5f841f1SRusty Russell do { \ 40c5f841f1SRusty Russell if ((_vq)->in_use) \ 419499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 429499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 43c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 44c5f841f1SRusty Russell } while (0) 453a35ce7dSRoel Kluin #define END_USE(_vq) \ 4697a545abSRusty Russell do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 470a8a69ddSRusty Russell #else 489499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 499499f5e7SRusty Russell do { \ 509499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 519499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 529499f5e7SRusty Russell (_vq)->broken = true; \ 539499f5e7SRusty Russell } while (0) 540a8a69ddSRusty Russell #define START_USE(vq) 550a8a69ddSRusty Russell #define END_USE(vq) 560a8a69ddSRusty Russell #endif 570a8a69ddSRusty Russell 58780bc790SAndy Lutomirski struct vring_desc_state { 59780bc790SAndy Lutomirski void *data; /* Data for callback. */ 60780bc790SAndy Lutomirski struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 61780bc790SAndy Lutomirski }; 62780bc790SAndy Lutomirski 6343b4f721SMichael S. Tsirkin struct vring_virtqueue { 640a8a69ddSRusty Russell struct virtqueue vq; 650a8a69ddSRusty Russell 667b21e34fSRusty Russell /* Can we use weak barriers? */ 677b21e34fSRusty Russell bool weak_barriers; 687b21e34fSRusty Russell 690a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 700a8a69ddSRusty Russell bool broken; 710a8a69ddSRusty Russell 729fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 739fa29b9dSMark McLoughlin bool indirect; 749fa29b9dSMark McLoughlin 75a5c262c5SMichael S. Tsirkin /* Host publishes avail event idx */ 76a5c262c5SMichael S. Tsirkin bool event; 77a5c262c5SMichael S. Tsirkin 780a8a69ddSRusty Russell /* Head of free buffer list. */ 790a8a69ddSRusty Russell unsigned int free_head; 800a8a69ddSRusty Russell /* Number we've added since last sync. */ 810a8a69ddSRusty Russell unsigned int num_added; 820a8a69ddSRusty Russell 830a8a69ddSRusty Russell /* Last used index we've seen. */ 841bc4953eSAnthony Liguori u16 last_used_idx; 850a8a69ddSRusty Russell 86*e593bf97STiwei Bie struct { 87*e593bf97STiwei Bie /* Actual memory layout for this queue */ 88*e593bf97STiwei Bie struct vring vring; 89*e593bf97STiwei Bie 90f277ec42SVenkatesh Srinivas /* Last written value to avail->flags */ 91f277ec42SVenkatesh Srinivas u16 avail_flags_shadow; 92f277ec42SVenkatesh Srinivas 93f277ec42SVenkatesh Srinivas /* Last written value to avail->idx in guest byte order */ 94f277ec42SVenkatesh Srinivas u16 avail_idx_shadow; 95*e593bf97STiwei Bie } split; 96f277ec42SVenkatesh Srinivas 970a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 9846f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq); 990a8a69ddSRusty Russell 1002a2d1382SAndy Lutomirski /* DMA, allocation, and size information */ 1012a2d1382SAndy Lutomirski bool we_own_ring; 1022a2d1382SAndy Lutomirski size_t queue_size_in_bytes; 1032a2d1382SAndy Lutomirski dma_addr_t queue_dma_addr; 1042a2d1382SAndy Lutomirski 1050a8a69ddSRusty Russell #ifdef DEBUG 1060a8a69ddSRusty Russell /* They're supposed to lock for us. */ 1070a8a69ddSRusty Russell unsigned int in_use; 108e93300b1SRusty Russell 109e93300b1SRusty Russell /* Figure out if their kicks are too delayed. */ 110e93300b1SRusty Russell bool last_add_time_valid; 111e93300b1SRusty Russell ktime_t last_add_time; 1120a8a69ddSRusty Russell #endif 1130a8a69ddSRusty Russell 114780bc790SAndy Lutomirski /* Per-descriptor state. */ 115780bc790SAndy Lutomirski struct vring_desc_state desc_state[]; 1160a8a69ddSRusty Russell }; 1170a8a69ddSRusty Russell 118e6f633e5STiwei Bie 119e6f633e5STiwei Bie /* 120e6f633e5STiwei Bie * Helpers. 121e6f633e5STiwei Bie */ 122e6f633e5STiwei Bie 1230a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 1240a8a69ddSRusty Russell 125d26c96c8SAndy Lutomirski /* 1261a937693SMichael S. Tsirkin * Modern virtio devices have feature bits to specify whether they need a 1271a937693SMichael S. Tsirkin * quirk and bypass the IOMMU. If not there, just use the DMA API. 1281a937693SMichael S. Tsirkin * 1291a937693SMichael S. Tsirkin * If there, the interaction between virtio and DMA API is messy. 130d26c96c8SAndy Lutomirski * 131d26c96c8SAndy Lutomirski * On most systems with virtio, physical addresses match bus addresses, 132d26c96c8SAndy Lutomirski * and it doesn't particularly matter whether we use the DMA API. 133d26c96c8SAndy Lutomirski * 134d26c96c8SAndy Lutomirski * On some systems, including Xen and any system with a physical device 135d26c96c8SAndy Lutomirski * that speaks virtio behind a physical IOMMU, we must use the DMA API 136d26c96c8SAndy Lutomirski * for virtio DMA to work at all. 137d26c96c8SAndy Lutomirski * 138d26c96c8SAndy Lutomirski * On other systems, including SPARC and PPC64, virtio-pci devices are 139d26c96c8SAndy Lutomirski * enumerated as though they are behind an IOMMU, but the virtio host 140d26c96c8SAndy Lutomirski * ignores the IOMMU, so we must either pretend that the IOMMU isn't 141d26c96c8SAndy Lutomirski * there or somehow map everything as the identity. 142d26c96c8SAndy Lutomirski * 143d26c96c8SAndy Lutomirski * For the time being, we preserve historic behavior and bypass the DMA 144d26c96c8SAndy Lutomirski * API. 1451a937693SMichael S. Tsirkin * 1461a937693SMichael S. Tsirkin * TODO: install a per-device DMA ops structure that does the right thing 1471a937693SMichael S. Tsirkin * taking into account all the above quirks, and use the DMA API 1481a937693SMichael S. Tsirkin * unconditionally on data path. 149d26c96c8SAndy Lutomirski */ 150d26c96c8SAndy Lutomirski 151d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev) 152d26c96c8SAndy Lutomirski { 1531a937693SMichael S. Tsirkin if (!virtio_has_iommu_quirk(vdev)) 1541a937693SMichael S. Tsirkin return true; 1551a937693SMichael S. Tsirkin 1561a937693SMichael S. Tsirkin /* Otherwise, we are left to guess. */ 15778fe3987SAndy Lutomirski /* 15878fe3987SAndy Lutomirski * In theory, it's possible to have a buggy QEMU-supposed 15978fe3987SAndy Lutomirski * emulated Q35 IOMMU and Xen enabled at the same time. On 16078fe3987SAndy Lutomirski * such a configuration, virtio has never worked and will 16178fe3987SAndy Lutomirski * not work without an even larger kludge. Instead, enable 16278fe3987SAndy Lutomirski * the DMA API if we're a Xen guest, which at least allows 16378fe3987SAndy Lutomirski * all of the sensible Xen configurations to work correctly. 16478fe3987SAndy Lutomirski */ 16578fe3987SAndy Lutomirski if (xen_domain()) 16678fe3987SAndy Lutomirski return true; 16778fe3987SAndy Lutomirski 168d26c96c8SAndy Lutomirski return false; 169d26c96c8SAndy Lutomirski } 170d26c96c8SAndy Lutomirski 171780bc790SAndy Lutomirski /* 172780bc790SAndy Lutomirski * The DMA ops on various arches are rather gnarly right now, and 173780bc790SAndy Lutomirski * making all of the arch DMA ops work on the vring device itself 174780bc790SAndy Lutomirski * is a mess. For now, we use the parent device for DMA ops. 175780bc790SAndy Lutomirski */ 17675bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) 177780bc790SAndy Lutomirski { 178780bc790SAndy Lutomirski return vq->vq.vdev->dev.parent; 179780bc790SAndy Lutomirski } 180780bc790SAndy Lutomirski 181780bc790SAndy Lutomirski /* Map one sg entry. */ 182780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 183780bc790SAndy Lutomirski struct scatterlist *sg, 184780bc790SAndy Lutomirski enum dma_data_direction direction) 185780bc790SAndy Lutomirski { 186780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 187780bc790SAndy Lutomirski return (dma_addr_t)sg_phys(sg); 188780bc790SAndy Lutomirski 189780bc790SAndy Lutomirski /* 190780bc790SAndy Lutomirski * We can't use dma_map_sg, because we don't use scatterlists in 191780bc790SAndy Lutomirski * the way it expects (we don't guarantee that the scatterlist 192780bc790SAndy Lutomirski * will exist for the lifetime of the mapping). 193780bc790SAndy Lutomirski */ 194780bc790SAndy Lutomirski return dma_map_page(vring_dma_dev(vq), 195780bc790SAndy Lutomirski sg_page(sg), sg->offset, sg->length, 196780bc790SAndy Lutomirski direction); 197780bc790SAndy Lutomirski } 198780bc790SAndy Lutomirski 199780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 200780bc790SAndy Lutomirski void *cpu_addr, size_t size, 201780bc790SAndy Lutomirski enum dma_data_direction direction) 202780bc790SAndy Lutomirski { 203780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 204780bc790SAndy Lutomirski return (dma_addr_t)virt_to_phys(cpu_addr); 205780bc790SAndy Lutomirski 206780bc790SAndy Lutomirski return dma_map_single(vring_dma_dev(vq), 207780bc790SAndy Lutomirski cpu_addr, size, direction); 208780bc790SAndy Lutomirski } 209780bc790SAndy Lutomirski 210e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq, 211e6f633e5STiwei Bie dma_addr_t addr) 212e6f633e5STiwei Bie { 213e6f633e5STiwei Bie if (!vring_use_dma_api(vq->vq.vdev)) 214e6f633e5STiwei Bie return 0; 215e6f633e5STiwei Bie 216e6f633e5STiwei Bie return dma_mapping_error(vring_dma_dev(vq), addr); 217e6f633e5STiwei Bie } 218e6f633e5STiwei Bie 219e6f633e5STiwei Bie 220e6f633e5STiwei Bie /* 221e6f633e5STiwei Bie * Split ring specific functions - *_split(). 222e6f633e5STiwei Bie */ 223e6f633e5STiwei Bie 224138fd251STiwei Bie static void vring_unmap_one_split(const struct vring_virtqueue *vq, 225780bc790SAndy Lutomirski struct vring_desc *desc) 226780bc790SAndy Lutomirski { 227780bc790SAndy Lutomirski u16 flags; 228780bc790SAndy Lutomirski 229780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 230780bc790SAndy Lutomirski return; 231780bc790SAndy Lutomirski 232780bc790SAndy Lutomirski flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 233780bc790SAndy Lutomirski 234780bc790SAndy Lutomirski if (flags & VRING_DESC_F_INDIRECT) { 235780bc790SAndy Lutomirski dma_unmap_single(vring_dma_dev(vq), 236780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 237780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 238780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 239780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 240780bc790SAndy Lutomirski } else { 241780bc790SAndy Lutomirski dma_unmap_page(vring_dma_dev(vq), 242780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 243780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 244780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 245780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 246780bc790SAndy Lutomirski } 247780bc790SAndy Lutomirski } 248780bc790SAndy Lutomirski 249138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, 250138fd251STiwei Bie unsigned int total_sg, 251138fd251STiwei Bie gfp_t gfp) 2529fa29b9dSMark McLoughlin { 2539fa29b9dSMark McLoughlin struct vring_desc *desc; 254b25bd251SRusty Russell unsigned int i; 2559fa29b9dSMark McLoughlin 256b92b1b89SWill Deacon /* 257b92b1b89SWill Deacon * We require lowmem mappings for the descriptors because 258b92b1b89SWill Deacon * otherwise virt_to_phys will give us bogus addresses in the 259b92b1b89SWill Deacon * virtqueue. 260b92b1b89SWill Deacon */ 26182107539SMichal Hocko gfp &= ~__GFP_HIGHMEM; 262b92b1b89SWill Deacon 2636da2ec56SKees Cook desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); 2649fa29b9dSMark McLoughlin if (!desc) 265b25bd251SRusty Russell return NULL; 2669fa29b9dSMark McLoughlin 267b25bd251SRusty Russell for (i = 0; i < total_sg; i++) 26800e6f3d9SMichael S. Tsirkin desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 269b25bd251SRusty Russell return desc; 2709fa29b9dSMark McLoughlin } 2719fa29b9dSMark McLoughlin 272138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq, 27313816c76SRusty Russell struct scatterlist *sgs[], 274eeebf9b1SRusty Russell unsigned int total_sg, 27513816c76SRusty Russell unsigned int out_sgs, 27613816c76SRusty Russell unsigned int in_sgs, 277bbd603efSMichael S. Tsirkin void *data, 2785a08b04fSMichael S. Tsirkin void *ctx, 279bbd603efSMichael S. Tsirkin gfp_t gfp) 2800a8a69ddSRusty Russell { 2810a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 28213816c76SRusty Russell struct scatterlist *sg; 283b25bd251SRusty Russell struct vring_desc *desc; 284780bc790SAndy Lutomirski unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 2851fe9b6feSMichael S. Tsirkin int head; 286b25bd251SRusty Russell bool indirect; 2870a8a69ddSRusty Russell 2889fa29b9dSMark McLoughlin START_USE(vq); 2899fa29b9dSMark McLoughlin 2900a8a69ddSRusty Russell BUG_ON(data == NULL); 2915a08b04fSMichael S. Tsirkin BUG_ON(ctx && vq->indirect); 2929fa29b9dSMark McLoughlin 29370670444SRusty Russell if (unlikely(vq->broken)) { 29470670444SRusty Russell END_USE(vq); 29570670444SRusty Russell return -EIO; 29670670444SRusty Russell } 29770670444SRusty Russell 298e93300b1SRusty Russell #ifdef DEBUG 299e93300b1SRusty Russell { 300e93300b1SRusty Russell ktime_t now = ktime_get(); 301e93300b1SRusty Russell 302e93300b1SRusty Russell /* No kick or get, with .1 second between? Warn. */ 303e93300b1SRusty Russell if (vq->last_add_time_valid) 304e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 305e93300b1SRusty Russell > 100); 306e93300b1SRusty Russell vq->last_add_time = now; 307e93300b1SRusty Russell vq->last_add_time_valid = true; 308e93300b1SRusty Russell } 309e93300b1SRusty Russell #endif 310e93300b1SRusty Russell 31113816c76SRusty Russell BUG_ON(total_sg == 0); 3120a8a69ddSRusty Russell 313b25bd251SRusty Russell head = vq->free_head; 314b25bd251SRusty Russell 315b25bd251SRusty Russell /* If the host supports indirect descriptor tables, and we have multiple 316b25bd251SRusty Russell * buffers, then go indirect. FIXME: tune this threshold */ 317b25bd251SRusty Russell if (vq->indirect && total_sg > 1 && vq->vq.num_free) 318138fd251STiwei Bie desc = alloc_indirect_split(_vq, total_sg, gfp); 31944ed8089SRichard W.M. Jones else { 320b25bd251SRusty Russell desc = NULL; 321*e593bf97STiwei Bie WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); 32244ed8089SRichard W.M. Jones } 323b25bd251SRusty Russell 324b25bd251SRusty Russell if (desc) { 325b25bd251SRusty Russell /* Use a single buffer which doesn't continue */ 326780bc790SAndy Lutomirski indirect = true; 327b25bd251SRusty Russell /* Set up rest to use this indirect table. */ 328b25bd251SRusty Russell i = 0; 329b25bd251SRusty Russell descs_used = 1; 330b25bd251SRusty Russell } else { 331780bc790SAndy Lutomirski indirect = false; 332*e593bf97STiwei Bie desc = vq->split.vring.desc; 333b25bd251SRusty Russell i = head; 334b25bd251SRusty Russell descs_used = total_sg; 335b25bd251SRusty Russell } 336b25bd251SRusty Russell 337b25bd251SRusty Russell if (vq->vq.num_free < descs_used) { 3380a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 339b25bd251SRusty Russell descs_used, vq->vq.num_free); 34044653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 34144653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 34244653eaeSRusty Russell * host should service the ring ASAP. */ 34313816c76SRusty Russell if (out_sgs) 344426e3e0aSRusty Russell vq->notify(&vq->vq); 34558625edfSWei Yongjun if (indirect) 34658625edfSWei Yongjun kfree(desc); 3470a8a69ddSRusty Russell END_USE(vq); 3480a8a69ddSRusty Russell return -ENOSPC; 3490a8a69ddSRusty Russell } 3500a8a69ddSRusty Russell 35113816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 352eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 353780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 354780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 355780bc790SAndy Lutomirski goto unmap_release; 356780bc790SAndy Lutomirski 35700e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 358780bc790SAndy Lutomirski desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 35900e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 3600a8a69ddSRusty Russell prev = i; 36100e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 3620a8a69ddSRusty Russell } 36313816c76SRusty Russell } 36413816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 365eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 366780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 367780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 368780bc790SAndy Lutomirski goto unmap_release; 369780bc790SAndy Lutomirski 37000e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 371780bc790SAndy Lutomirski desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 37200e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 3730a8a69ddSRusty Russell prev = i; 37400e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 37513816c76SRusty Russell } 3760a8a69ddSRusty Russell } 3770a8a69ddSRusty Russell /* Last one doesn't continue. */ 37800e6f3d9SMichael S. Tsirkin desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 3790a8a69ddSRusty Russell 380780bc790SAndy Lutomirski if (indirect) { 381780bc790SAndy Lutomirski /* Now that the indirect table is filled in, map it. */ 382780bc790SAndy Lutomirski dma_addr_t addr = vring_map_single( 383780bc790SAndy Lutomirski vq, desc, total_sg * sizeof(struct vring_desc), 384780bc790SAndy Lutomirski DMA_TO_DEVICE); 385780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 386780bc790SAndy Lutomirski goto unmap_release; 387780bc790SAndy Lutomirski 388*e593bf97STiwei Bie vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, 389*e593bf97STiwei Bie VRING_DESC_F_INDIRECT); 390*e593bf97STiwei Bie vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, 391*e593bf97STiwei Bie addr); 392780bc790SAndy Lutomirski 393*e593bf97STiwei Bie vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev, 394*e593bf97STiwei Bie total_sg * sizeof(struct vring_desc)); 395780bc790SAndy Lutomirski } 396780bc790SAndy Lutomirski 397780bc790SAndy Lutomirski /* We're using some buffers from the free list. */ 398780bc790SAndy Lutomirski vq->vq.num_free -= descs_used; 399780bc790SAndy Lutomirski 4000a8a69ddSRusty Russell /* Update free pointer */ 401b25bd251SRusty Russell if (indirect) 402*e593bf97STiwei Bie vq->free_head = virtio16_to_cpu(_vq->vdev, 403*e593bf97STiwei Bie vq->split.vring.desc[head].next); 404b25bd251SRusty Russell else 4050a8a69ddSRusty Russell vq->free_head = i; 4060a8a69ddSRusty Russell 407780bc790SAndy Lutomirski /* Store token and indirect buffer state. */ 408780bc790SAndy Lutomirski vq->desc_state[head].data = data; 409780bc790SAndy Lutomirski if (indirect) 410780bc790SAndy Lutomirski vq->desc_state[head].indir_desc = desc; 41187646a34SJason Wang else 4125a08b04fSMichael S. Tsirkin vq->desc_state[head].indir_desc = ctx; 4130a8a69ddSRusty Russell 4140a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 4153b720b8cSRusty Russell * do sync). */ 416*e593bf97STiwei Bie avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); 417*e593bf97STiwei Bie vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 4180a8a69ddSRusty Russell 419ee7cd898SRusty Russell /* Descriptors and available array need to be set before we expose the 420ee7cd898SRusty Russell * new available array entries. */ 421a9a0fef7SRusty Russell virtio_wmb(vq->weak_barriers); 422*e593bf97STiwei Bie vq->split.avail_idx_shadow++; 423*e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 424*e593bf97STiwei Bie vq->split.avail_idx_shadow); 425ee7cd898SRusty Russell vq->num_added++; 426ee7cd898SRusty Russell 4275e05bf58STetsuo Handa pr_debug("Added buffer head %i to %p\n", head, vq); 4285e05bf58STetsuo Handa END_USE(vq); 4295e05bf58STetsuo Handa 430ee7cd898SRusty Russell /* This is very unlikely, but theoretically possible. Kick 431ee7cd898SRusty Russell * just in case. */ 432ee7cd898SRusty Russell if (unlikely(vq->num_added == (1 << 16) - 1)) 433ee7cd898SRusty Russell virtqueue_kick(_vq); 434ee7cd898SRusty Russell 43598e8c6bcSRusty Russell return 0; 436780bc790SAndy Lutomirski 437780bc790SAndy Lutomirski unmap_release: 438780bc790SAndy Lutomirski err_idx = i; 439780bc790SAndy Lutomirski i = head; 440780bc790SAndy Lutomirski 441780bc790SAndy Lutomirski for (n = 0; n < total_sg; n++) { 442780bc790SAndy Lutomirski if (i == err_idx) 443780bc790SAndy Lutomirski break; 444138fd251STiwei Bie vring_unmap_one_split(vq, &desc[i]); 445*e593bf97STiwei Bie i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next); 446780bc790SAndy Lutomirski } 447780bc790SAndy Lutomirski 448780bc790SAndy Lutomirski if (indirect) 449780bc790SAndy Lutomirski kfree(desc); 450780bc790SAndy Lutomirski 4513cc36f6eSMichael S. Tsirkin END_USE(vq); 452780bc790SAndy Lutomirski return -EIO; 4530a8a69ddSRusty Russell } 45413816c76SRusty Russell 455138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) 4560a8a69ddSRusty Russell { 4570a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 458a5c262c5SMichael S. Tsirkin u16 new, old; 45941f0377fSRusty Russell bool needs_kick; 46041f0377fSRusty Russell 4610a8a69ddSRusty Russell START_USE(vq); 462a72caae2SJason Wang /* We need to expose available array entries before checking avail 463a72caae2SJason Wang * event. */ 464a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 4650a8a69ddSRusty Russell 466*e593bf97STiwei Bie old = vq->split.avail_idx_shadow - vq->num_added; 467*e593bf97STiwei Bie new = vq->split.avail_idx_shadow; 4680a8a69ddSRusty Russell vq->num_added = 0; 4690a8a69ddSRusty Russell 470e93300b1SRusty Russell #ifdef DEBUG 471e93300b1SRusty Russell if (vq->last_add_time_valid) { 472e93300b1SRusty Russell WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 473e93300b1SRusty Russell vq->last_add_time)) > 100); 474e93300b1SRusty Russell } 475e93300b1SRusty Russell vq->last_add_time_valid = false; 476e93300b1SRusty Russell #endif 477e93300b1SRusty Russell 47841f0377fSRusty Russell if (vq->event) { 479*e593bf97STiwei Bie needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, 480*e593bf97STiwei Bie vring_avail_event(&vq->split.vring)), 48141f0377fSRusty Russell new, old); 48241f0377fSRusty Russell } else { 483*e593bf97STiwei Bie needs_kick = !(vq->split.vring.used->flags & 484*e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 485*e593bf97STiwei Bie VRING_USED_F_NO_NOTIFY)); 48641f0377fSRusty Russell } 4870a8a69ddSRusty Russell END_USE(vq); 48841f0377fSRusty Russell return needs_kick; 48941f0377fSRusty Russell } 490138fd251STiwei Bie 491138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, 4925a08b04fSMichael S. Tsirkin void **ctx) 4930a8a69ddSRusty Russell { 494780bc790SAndy Lutomirski unsigned int i, j; 495c60923cbSGonglei __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 4960a8a69ddSRusty Russell 4970a8a69ddSRusty Russell /* Clear data ptr. */ 498780bc790SAndy Lutomirski vq->desc_state[head].data = NULL; 4990a8a69ddSRusty Russell 500780bc790SAndy Lutomirski /* Put back on free list: unmap first-level descriptors and find end */ 5010a8a69ddSRusty Russell i = head; 5029fa29b9dSMark McLoughlin 503*e593bf97STiwei Bie while (vq->split.vring.desc[i].flags & nextflag) { 504*e593bf97STiwei Bie vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 505*e593bf97STiwei Bie i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next); 50606ca287dSRusty Russell vq->vq.num_free++; 5070a8a69ddSRusty Russell } 5080a8a69ddSRusty Russell 509*e593bf97STiwei Bie vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 510*e593bf97STiwei Bie vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, 511*e593bf97STiwei Bie vq->free_head); 5120a8a69ddSRusty Russell vq->free_head = head; 513780bc790SAndy Lutomirski 5140a8a69ddSRusty Russell /* Plus final descriptor */ 51506ca287dSRusty Russell vq->vq.num_free++; 516780bc790SAndy Lutomirski 5175a08b04fSMichael S. Tsirkin if (vq->indirect) { 518780bc790SAndy Lutomirski struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 5195a08b04fSMichael S. Tsirkin u32 len; 5205a08b04fSMichael S. Tsirkin 5215a08b04fSMichael S. Tsirkin /* Free the indirect table, if any, now that it's unmapped. */ 5225a08b04fSMichael S. Tsirkin if (!indir_desc) 5235a08b04fSMichael S. Tsirkin return; 5245a08b04fSMichael S. Tsirkin 525*e593bf97STiwei Bie len = virtio32_to_cpu(vq->vq.vdev, 526*e593bf97STiwei Bie vq->split.vring.desc[head].len); 527780bc790SAndy Lutomirski 528*e593bf97STiwei Bie BUG_ON(!(vq->split.vring.desc[head].flags & 529780bc790SAndy Lutomirski cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 530780bc790SAndy Lutomirski BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 531780bc790SAndy Lutomirski 532780bc790SAndy Lutomirski for (j = 0; j < len / sizeof(struct vring_desc); j++) 533138fd251STiwei Bie vring_unmap_one_split(vq, &indir_desc[j]); 534780bc790SAndy Lutomirski 5355a08b04fSMichael S. Tsirkin kfree(indir_desc); 536780bc790SAndy Lutomirski vq->desc_state[head].indir_desc = NULL; 5375a08b04fSMichael S. Tsirkin } else if (ctx) { 5385a08b04fSMichael S. Tsirkin *ctx = vq->desc_state[head].indir_desc; 539780bc790SAndy Lutomirski } 5400a8a69ddSRusty Russell } 5410a8a69ddSRusty Russell 542138fd251STiwei Bie static inline bool more_used_split(const struct vring_virtqueue *vq) 5430a8a69ddSRusty Russell { 544*e593bf97STiwei Bie return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, 545*e593bf97STiwei Bie vq->split.vring.used->idx); 5460a8a69ddSRusty Russell } 5470a8a69ddSRusty Russell 548138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, 549138fd251STiwei Bie unsigned int *len, 5505a08b04fSMichael S. Tsirkin void **ctx) 5510a8a69ddSRusty Russell { 5520a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 5530a8a69ddSRusty Russell void *ret; 5540a8a69ddSRusty Russell unsigned int i; 5553b720b8cSRusty Russell u16 last_used; 5560a8a69ddSRusty Russell 5570a8a69ddSRusty Russell START_USE(vq); 5580a8a69ddSRusty Russell 5595ef82752SRusty Russell if (unlikely(vq->broken)) { 5605ef82752SRusty Russell END_USE(vq); 5615ef82752SRusty Russell return NULL; 5625ef82752SRusty Russell } 5635ef82752SRusty Russell 564138fd251STiwei Bie if (!more_used_split(vq)) { 5650a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 5660a8a69ddSRusty Russell END_USE(vq); 5670a8a69ddSRusty Russell return NULL; 5680a8a69ddSRusty Russell } 5690a8a69ddSRusty Russell 5702d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 571a9a0fef7SRusty Russell virtio_rmb(vq->weak_barriers); 5722d61ba95SMichael S. Tsirkin 573*e593bf97STiwei Bie last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); 574*e593bf97STiwei Bie i = virtio32_to_cpu(_vq->vdev, 575*e593bf97STiwei Bie vq->split.vring.used->ring[last_used].id); 576*e593bf97STiwei Bie *len = virtio32_to_cpu(_vq->vdev, 577*e593bf97STiwei Bie vq->split.vring.used->ring[last_used].len); 5780a8a69ddSRusty Russell 579*e593bf97STiwei Bie if (unlikely(i >= vq->split.vring.num)) { 5800a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 5810a8a69ddSRusty Russell return NULL; 5820a8a69ddSRusty Russell } 583780bc790SAndy Lutomirski if (unlikely(!vq->desc_state[i].data)) { 5840a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 5850a8a69ddSRusty Russell return NULL; 5860a8a69ddSRusty Russell } 5870a8a69ddSRusty Russell 588138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 589780bc790SAndy Lutomirski ret = vq->desc_state[i].data; 590138fd251STiwei Bie detach_buf_split(vq, i, ctx); 5910a8a69ddSRusty Russell vq->last_used_idx++; 592a5c262c5SMichael S. Tsirkin /* If we expect an interrupt for the next entry, tell host 593a5c262c5SMichael S. Tsirkin * by writing event index and flush out the write before 594a5c262c5SMichael S. Tsirkin * the read in the next get_buf call. */ 595*e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 596788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 597*e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 598788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 599a5c262c5SMichael S. Tsirkin 600e93300b1SRusty Russell #ifdef DEBUG 601e93300b1SRusty Russell vq->last_add_time_valid = false; 602e93300b1SRusty Russell #endif 603e93300b1SRusty Russell 6040a8a69ddSRusty Russell END_USE(vq); 6050a8a69ddSRusty Russell return ret; 6060a8a69ddSRusty Russell } 607138fd251STiwei Bie 608138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq) 609138fd251STiwei Bie { 610138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 611138fd251STiwei Bie 612*e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 613*e593bf97STiwei Bie vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 614138fd251STiwei Bie if (!vq->event) 615*e593bf97STiwei Bie vq->split.vring.avail->flags = 616*e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 617*e593bf97STiwei Bie vq->split.avail_flags_shadow); 618138fd251STiwei Bie } 619138fd251STiwei Bie } 620138fd251STiwei Bie 621138fd251STiwei Bie static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) 622cc229884SMichael S. Tsirkin { 623cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 624cc229884SMichael S. Tsirkin u16 last_used_idx; 625cc229884SMichael S. Tsirkin 626cc229884SMichael S. Tsirkin START_USE(vq); 627cc229884SMichael S. Tsirkin 628cc229884SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 629cc229884SMichael S. Tsirkin * more to do. */ 630cc229884SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 631cc229884SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 632cc229884SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 633*e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 634*e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 6350ea1e4a6SLadi Prosek if (!vq->event) 636*e593bf97STiwei Bie vq->split.vring.avail->flags = 637*e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 638*e593bf97STiwei Bie vq->split.avail_flags_shadow); 639f277ec42SVenkatesh Srinivas } 640*e593bf97STiwei Bie vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, 641*e593bf97STiwei Bie last_used_idx = vq->last_used_idx); 642cc229884SMichael S. Tsirkin END_USE(vq); 643cc229884SMichael S. Tsirkin return last_used_idx; 644cc229884SMichael S. Tsirkin } 645138fd251STiwei Bie 646138fd251STiwei Bie static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx) 647138fd251STiwei Bie { 648138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 649138fd251STiwei Bie 650138fd251STiwei Bie return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, 651*e593bf97STiwei Bie vq->split.vring.used->idx); 652138fd251STiwei Bie } 653138fd251STiwei Bie 654138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) 6557ab358c2SMichael S. Tsirkin { 6567ab358c2SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 6577ab358c2SMichael S. Tsirkin u16 bufs; 6587ab358c2SMichael S. Tsirkin 6597ab358c2SMichael S. Tsirkin START_USE(vq); 6607ab358c2SMichael S. Tsirkin 6617ab358c2SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 6627ab358c2SMichael S. Tsirkin * more to do. */ 6637ab358c2SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 6647ab358c2SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 6650ea1e4a6SLadi Prosek * entry. Always update the event index to keep code simple. */ 666*e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 667*e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 6680ea1e4a6SLadi Prosek if (!vq->event) 669*e593bf97STiwei Bie vq->split.vring.avail->flags = 670*e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 671*e593bf97STiwei Bie vq->split.avail_flags_shadow); 672f277ec42SVenkatesh Srinivas } 6737ab358c2SMichael S. Tsirkin /* TODO: tune this threshold */ 674*e593bf97STiwei Bie bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; 675788e5b3aSMichael S. Tsirkin 676788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 677*e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 678788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 679788e5b3aSMichael S. Tsirkin 680*e593bf97STiwei Bie if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) 681*e593bf97STiwei Bie - vq->last_used_idx) > bufs)) { 6827ab358c2SMichael S. Tsirkin END_USE(vq); 6837ab358c2SMichael S. Tsirkin return false; 6847ab358c2SMichael S. Tsirkin } 6857ab358c2SMichael S. Tsirkin 6867ab358c2SMichael S. Tsirkin END_USE(vq); 6877ab358c2SMichael S. Tsirkin return true; 6887ab358c2SMichael S. Tsirkin } 6897ab358c2SMichael S. Tsirkin 690138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) 691c021eac4SShirley Ma { 692c021eac4SShirley Ma struct vring_virtqueue *vq = to_vvq(_vq); 693c021eac4SShirley Ma unsigned int i; 694c021eac4SShirley Ma void *buf; 695c021eac4SShirley Ma 696c021eac4SShirley Ma START_USE(vq); 697c021eac4SShirley Ma 698*e593bf97STiwei Bie for (i = 0; i < vq->split.vring.num; i++) { 699780bc790SAndy Lutomirski if (!vq->desc_state[i].data) 700c021eac4SShirley Ma continue; 701138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 702780bc790SAndy Lutomirski buf = vq->desc_state[i].data; 703138fd251STiwei Bie detach_buf_split(vq, i, NULL); 704*e593bf97STiwei Bie vq->split.avail_idx_shadow--; 705*e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 706*e593bf97STiwei Bie vq->split.avail_idx_shadow); 707c021eac4SShirley Ma END_USE(vq); 708c021eac4SShirley Ma return buf; 709c021eac4SShirley Ma } 710c021eac4SShirley Ma /* That should have freed everything. */ 711*e593bf97STiwei Bie BUG_ON(vq->vq.num_free != vq->split.vring.num); 712c021eac4SShirley Ma 713c021eac4SShirley Ma END_USE(vq); 714c021eac4SShirley Ma return NULL; 715c021eac4SShirley Ma } 716138fd251STiwei Bie 717e6f633e5STiwei Bie 718e6f633e5STiwei Bie /* 719e6f633e5STiwei Bie * Generic functions and exported symbols. 720e6f633e5STiwei Bie */ 721e6f633e5STiwei Bie 722e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq, 723e6f633e5STiwei Bie struct scatterlist *sgs[], 724e6f633e5STiwei Bie unsigned int total_sg, 725e6f633e5STiwei Bie unsigned int out_sgs, 726e6f633e5STiwei Bie unsigned int in_sgs, 727e6f633e5STiwei Bie void *data, 728e6f633e5STiwei Bie void *ctx, 729e6f633e5STiwei Bie gfp_t gfp) 730e6f633e5STiwei Bie { 731e6f633e5STiwei Bie return virtqueue_add_split(_vq, sgs, total_sg, 732e6f633e5STiwei Bie out_sgs, in_sgs, data, ctx, gfp); 733e6f633e5STiwei Bie } 734e6f633e5STiwei Bie 735e6f633e5STiwei Bie /** 736e6f633e5STiwei Bie * virtqueue_add_sgs - expose buffers to other end 737e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 738e6f633e5STiwei Bie * @sgs: array of terminated scatterlists. 739e6f633e5STiwei Bie * @out_num: the number of scatterlists readable by other side 740e6f633e5STiwei Bie * @in_num: the number of scatterlists which are writable (after readable ones) 741e6f633e5STiwei Bie * @data: the token identifying the buffer. 742e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 743e6f633e5STiwei Bie * 744e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 745e6f633e5STiwei Bie * at the same time (except where noted). 746e6f633e5STiwei Bie * 747e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 748e6f633e5STiwei Bie */ 749e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq, 750e6f633e5STiwei Bie struct scatterlist *sgs[], 751e6f633e5STiwei Bie unsigned int out_sgs, 752e6f633e5STiwei Bie unsigned int in_sgs, 753e6f633e5STiwei Bie void *data, 754e6f633e5STiwei Bie gfp_t gfp) 755e6f633e5STiwei Bie { 756e6f633e5STiwei Bie unsigned int i, total_sg = 0; 757e6f633e5STiwei Bie 758e6f633e5STiwei Bie /* Count them first. */ 759e6f633e5STiwei Bie for (i = 0; i < out_sgs + in_sgs; i++) { 760e6f633e5STiwei Bie struct scatterlist *sg; 761e6f633e5STiwei Bie 762e6f633e5STiwei Bie for (sg = sgs[i]; sg; sg = sg_next(sg)) 763e6f633e5STiwei Bie total_sg++; 764e6f633e5STiwei Bie } 765e6f633e5STiwei Bie return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, 766e6f633e5STiwei Bie data, NULL, gfp); 767e6f633e5STiwei Bie } 768e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 769e6f633e5STiwei Bie 770e6f633e5STiwei Bie /** 771e6f633e5STiwei Bie * virtqueue_add_outbuf - expose output buffers to other end 772e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 773e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 774e6f633e5STiwei Bie * @num: the number of entries in @sg readable by other side 775e6f633e5STiwei Bie * @data: the token identifying the buffer. 776e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 777e6f633e5STiwei Bie * 778e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 779e6f633e5STiwei Bie * at the same time (except where noted). 780e6f633e5STiwei Bie * 781e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 782e6f633e5STiwei Bie */ 783e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq, 784e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 785e6f633e5STiwei Bie void *data, 786e6f633e5STiwei Bie gfp_t gfp) 787e6f633e5STiwei Bie { 788e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); 789e6f633e5STiwei Bie } 790e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 791e6f633e5STiwei Bie 792e6f633e5STiwei Bie /** 793e6f633e5STiwei Bie * virtqueue_add_inbuf - expose input buffers to other end 794e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 795e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 796e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 797e6f633e5STiwei Bie * @data: the token identifying the buffer. 798e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 799e6f633e5STiwei Bie * 800e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 801e6f633e5STiwei Bie * at the same time (except where noted). 802e6f633e5STiwei Bie * 803e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 804e6f633e5STiwei Bie */ 805e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq, 806e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 807e6f633e5STiwei Bie void *data, 808e6f633e5STiwei Bie gfp_t gfp) 809e6f633e5STiwei Bie { 810e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); 811e6f633e5STiwei Bie } 812e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 813e6f633e5STiwei Bie 814e6f633e5STiwei Bie /** 815e6f633e5STiwei Bie * virtqueue_add_inbuf_ctx - expose input buffers to other end 816e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 817e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 818e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 819e6f633e5STiwei Bie * @data: the token identifying the buffer. 820e6f633e5STiwei Bie * @ctx: extra context for the token 821e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 822e6f633e5STiwei Bie * 823e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 824e6f633e5STiwei Bie * at the same time (except where noted). 825e6f633e5STiwei Bie * 826e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 827e6f633e5STiwei Bie */ 828e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq, 829e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 830e6f633e5STiwei Bie void *data, 831e6f633e5STiwei Bie void *ctx, 832e6f633e5STiwei Bie gfp_t gfp) 833e6f633e5STiwei Bie { 834e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); 835e6f633e5STiwei Bie } 836e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); 837e6f633e5STiwei Bie 838e6f633e5STiwei Bie /** 839e6f633e5STiwei Bie * virtqueue_kick_prepare - first half of split virtqueue_kick call. 840e6f633e5STiwei Bie * @vq: the struct virtqueue 841e6f633e5STiwei Bie * 842e6f633e5STiwei Bie * Instead of virtqueue_kick(), you can do: 843e6f633e5STiwei Bie * if (virtqueue_kick_prepare(vq)) 844e6f633e5STiwei Bie * virtqueue_notify(vq); 845e6f633e5STiwei Bie * 846e6f633e5STiwei Bie * This is sometimes useful because the virtqueue_kick_prepare() needs 847e6f633e5STiwei Bie * to be serialized, but the actual virtqueue_notify() call does not. 848e6f633e5STiwei Bie */ 849e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq) 850e6f633e5STiwei Bie { 851e6f633e5STiwei Bie return virtqueue_kick_prepare_split(_vq); 852e6f633e5STiwei Bie } 853e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 854e6f633e5STiwei Bie 855e6f633e5STiwei Bie /** 856e6f633e5STiwei Bie * virtqueue_notify - second half of split virtqueue_kick call. 857e6f633e5STiwei Bie * @vq: the struct virtqueue 858e6f633e5STiwei Bie * 859e6f633e5STiwei Bie * This does not need to be serialized. 860e6f633e5STiwei Bie * 861e6f633e5STiwei Bie * Returns false if host notify failed or queue is broken, otherwise true. 862e6f633e5STiwei Bie */ 863e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq) 864e6f633e5STiwei Bie { 865e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 866e6f633e5STiwei Bie 867e6f633e5STiwei Bie if (unlikely(vq->broken)) 868e6f633e5STiwei Bie return false; 869e6f633e5STiwei Bie 870e6f633e5STiwei Bie /* Prod other side to tell it about changes. */ 871e6f633e5STiwei Bie if (!vq->notify(_vq)) { 872e6f633e5STiwei Bie vq->broken = true; 873e6f633e5STiwei Bie return false; 874e6f633e5STiwei Bie } 875e6f633e5STiwei Bie return true; 876e6f633e5STiwei Bie } 877e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify); 878e6f633e5STiwei Bie 879e6f633e5STiwei Bie /** 880e6f633e5STiwei Bie * virtqueue_kick - update after add_buf 881e6f633e5STiwei Bie * @vq: the struct virtqueue 882e6f633e5STiwei Bie * 883e6f633e5STiwei Bie * After one or more virtqueue_add_* calls, invoke this to kick 884e6f633e5STiwei Bie * the other side. 885e6f633e5STiwei Bie * 886e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 887e6f633e5STiwei Bie * operations at the same time (except where noted). 888e6f633e5STiwei Bie * 889e6f633e5STiwei Bie * Returns false if kick failed, otherwise true. 890e6f633e5STiwei Bie */ 891e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq) 892e6f633e5STiwei Bie { 893e6f633e5STiwei Bie if (virtqueue_kick_prepare(vq)) 894e6f633e5STiwei Bie return virtqueue_notify(vq); 895e6f633e5STiwei Bie return true; 896e6f633e5STiwei Bie } 897e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick); 898e6f633e5STiwei Bie 899e6f633e5STiwei Bie /** 900e6f633e5STiwei Bie * virtqueue_get_buf - get the next used buffer 901e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 902e6f633e5STiwei Bie * @len: the length written into the buffer 903e6f633e5STiwei Bie * 904e6f633e5STiwei Bie * If the device wrote data into the buffer, @len will be set to the 905e6f633e5STiwei Bie * amount written. This means you don't need to clear the buffer 906e6f633e5STiwei Bie * beforehand to ensure there's no data leakage in the case of short 907e6f633e5STiwei Bie * writes. 908e6f633e5STiwei Bie * 909e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 910e6f633e5STiwei Bie * operations at the same time (except where noted). 911e6f633e5STiwei Bie * 912e6f633e5STiwei Bie * Returns NULL if there are no used buffers, or the "data" token 913e6f633e5STiwei Bie * handed to virtqueue_add_*(). 914e6f633e5STiwei Bie */ 915e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, 916e6f633e5STiwei Bie void **ctx) 917e6f633e5STiwei Bie { 918e6f633e5STiwei Bie return virtqueue_get_buf_ctx_split(_vq, len, ctx); 919e6f633e5STiwei Bie } 920e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); 921e6f633e5STiwei Bie 922e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 923e6f633e5STiwei Bie { 924e6f633e5STiwei Bie return virtqueue_get_buf_ctx(_vq, len, NULL); 925e6f633e5STiwei Bie } 926e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf); 927e6f633e5STiwei Bie 928e6f633e5STiwei Bie /** 929e6f633e5STiwei Bie * virtqueue_disable_cb - disable callbacks 930e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 931e6f633e5STiwei Bie * 932e6f633e5STiwei Bie * Note that this is not necessarily synchronous, hence unreliable and only 933e6f633e5STiwei Bie * useful as an optimization. 934e6f633e5STiwei Bie * 935e6f633e5STiwei Bie * Unlike other operations, this need not be serialized. 936e6f633e5STiwei Bie */ 937e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq) 938e6f633e5STiwei Bie { 939e6f633e5STiwei Bie virtqueue_disable_cb_split(_vq); 940e6f633e5STiwei Bie } 941e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 942e6f633e5STiwei Bie 943e6f633e5STiwei Bie /** 944e6f633e5STiwei Bie * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 945e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 946e6f633e5STiwei Bie * 947e6f633e5STiwei Bie * This re-enables callbacks; it returns current queue state 948e6f633e5STiwei Bie * in an opaque unsigned value. This value should be later tested by 949e6f633e5STiwei Bie * virtqueue_poll, to detect a possible race between the driver checking for 950e6f633e5STiwei Bie * more work, and enabling callbacks. 951e6f633e5STiwei Bie * 952e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 953e6f633e5STiwei Bie * operations at the same time (except where noted). 954e6f633e5STiwei Bie */ 955e6f633e5STiwei Bie unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 956e6f633e5STiwei Bie { 957e6f633e5STiwei Bie return virtqueue_enable_cb_prepare_split(_vq); 958e6f633e5STiwei Bie } 959e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 960e6f633e5STiwei Bie 961e6f633e5STiwei Bie /** 962e6f633e5STiwei Bie * virtqueue_poll - query pending used buffers 963e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 964e6f633e5STiwei Bie * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 965e6f633e5STiwei Bie * 966e6f633e5STiwei Bie * Returns "true" if there are pending used buffers in the queue. 967e6f633e5STiwei Bie * 968e6f633e5STiwei Bie * This does not need to be serialized. 969e6f633e5STiwei Bie */ 970e6f633e5STiwei Bie bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 971e6f633e5STiwei Bie { 972e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 973e6f633e5STiwei Bie 974e6f633e5STiwei Bie virtio_mb(vq->weak_barriers); 975e6f633e5STiwei Bie return virtqueue_poll_split(_vq, last_used_idx); 976e6f633e5STiwei Bie } 977e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll); 978e6f633e5STiwei Bie 979e6f633e5STiwei Bie /** 980e6f633e5STiwei Bie * virtqueue_enable_cb - restart callbacks after disable_cb. 981e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 982e6f633e5STiwei Bie * 983e6f633e5STiwei Bie * This re-enables callbacks; it returns "false" if there are pending 984e6f633e5STiwei Bie * buffers in the queue, to detect a possible race between the driver 985e6f633e5STiwei Bie * checking for more work, and enabling callbacks. 986e6f633e5STiwei Bie * 987e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 988e6f633e5STiwei Bie * operations at the same time (except where noted). 989e6f633e5STiwei Bie */ 990e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq) 991e6f633e5STiwei Bie { 992e6f633e5STiwei Bie unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 993e6f633e5STiwei Bie 994e6f633e5STiwei Bie return !virtqueue_poll(_vq, last_used_idx); 995e6f633e5STiwei Bie } 996e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 997e6f633e5STiwei Bie 998e6f633e5STiwei Bie /** 999e6f633e5STiwei Bie * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 1000e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 1001e6f633e5STiwei Bie * 1002e6f633e5STiwei Bie * This re-enables callbacks but hints to the other side to delay 1003e6f633e5STiwei Bie * interrupts until most of the available buffers have been processed; 1004e6f633e5STiwei Bie * it returns "false" if there are many pending buffers in the queue, 1005e6f633e5STiwei Bie * to detect a possible race between the driver checking for more work, 1006e6f633e5STiwei Bie * and enabling callbacks. 1007e6f633e5STiwei Bie * 1008e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 1009e6f633e5STiwei Bie * operations at the same time (except where noted). 1010e6f633e5STiwei Bie */ 1011e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 1012e6f633e5STiwei Bie { 1013e6f633e5STiwei Bie return virtqueue_enable_cb_delayed_split(_vq); 1014e6f633e5STiwei Bie } 1015e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 1016e6f633e5STiwei Bie 1017138fd251STiwei Bie /** 1018138fd251STiwei Bie * virtqueue_detach_unused_buf - detach first unused buffer 1019138fd251STiwei Bie * @vq: the struct virtqueue we're talking about. 1020138fd251STiwei Bie * 1021138fd251STiwei Bie * Returns NULL or the "data" token handed to virtqueue_add_*(). 1022138fd251STiwei Bie * This is not valid on an active queue; it is useful only for device 1023138fd251STiwei Bie * shutdown. 1024138fd251STiwei Bie */ 1025138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 1026138fd251STiwei Bie { 1027138fd251STiwei Bie return virtqueue_detach_unused_buf_split(_vq); 1028138fd251STiwei Bie } 10297c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 1030c021eac4SShirley Ma 1031138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq) 1032138fd251STiwei Bie { 1033138fd251STiwei Bie return more_used_split(vq); 1034138fd251STiwei Bie } 1035138fd251STiwei Bie 10360a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 10370a8a69ddSRusty Russell { 10380a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 10390a8a69ddSRusty Russell 10400a8a69ddSRusty Russell if (!more_used(vq)) { 10410a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 10420a8a69ddSRusty Russell return IRQ_NONE; 10430a8a69ddSRusty Russell } 10440a8a69ddSRusty Russell 10450a8a69ddSRusty Russell if (unlikely(vq->broken)) 10460a8a69ddSRusty Russell return IRQ_HANDLED; 10470a8a69ddSRusty Russell 10480a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 104918445c4dSRusty Russell if (vq->vq.callback) 105018445c4dSRusty Russell vq->vq.callback(&vq->vq); 10510a8a69ddSRusty Russell 10520a8a69ddSRusty Russell return IRQ_HANDLED; 10530a8a69ddSRusty Russell } 1054c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 10550a8a69ddSRusty Russell 10562a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index, 10572a2d1382SAndy Lutomirski struct vring vring, 10580a8a69ddSRusty Russell struct virtio_device *vdev, 10597b21e34fSRusty Russell bool weak_barriers, 1060f94682ddSMichael S. Tsirkin bool context, 106146f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *), 10629499f5e7SRusty Russell void (*callback)(struct virtqueue *), 10639499f5e7SRusty Russell const char *name) 10640a8a69ddSRusty Russell { 10650a8a69ddSRusty Russell unsigned int i; 10662a2d1382SAndy Lutomirski struct vring_virtqueue *vq; 10670a8a69ddSRusty Russell 10682a2d1382SAndy Lutomirski vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), 1069780bc790SAndy Lutomirski GFP_KERNEL); 10700a8a69ddSRusty Russell if (!vq) 10710a8a69ddSRusty Russell return NULL; 10720a8a69ddSRusty Russell 10730a8a69ddSRusty Russell vq->vq.callback = callback; 10740a8a69ddSRusty Russell vq->vq.vdev = vdev; 10759499f5e7SRusty Russell vq->vq.name = name; 10762a2d1382SAndy Lutomirski vq->vq.num_free = vring.num; 107706ca287dSRusty Russell vq->vq.index = index; 10782a2d1382SAndy Lutomirski vq->we_own_ring = false; 10792a2d1382SAndy Lutomirski vq->queue_dma_addr = 0; 10802a2d1382SAndy Lutomirski vq->queue_size_in_bytes = 0; 10810a8a69ddSRusty Russell vq->notify = notify; 10827b21e34fSRusty Russell vq->weak_barriers = weak_barriers; 10830a8a69ddSRusty Russell vq->broken = false; 10840a8a69ddSRusty Russell vq->last_used_idx = 0; 10850a8a69ddSRusty Russell vq->num_added = 0; 10869499f5e7SRusty Russell list_add_tail(&vq->vq.list, &vdev->vqs); 10870a8a69ddSRusty Russell #ifdef DEBUG 10880a8a69ddSRusty Russell vq->in_use = false; 1089e93300b1SRusty Russell vq->last_add_time_valid = false; 10900a8a69ddSRusty Russell #endif 10910a8a69ddSRusty Russell 10925a08b04fSMichael S. Tsirkin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 10935a08b04fSMichael S. Tsirkin !context; 1094a5c262c5SMichael S. Tsirkin vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 10959fa29b9dSMark McLoughlin 1096*e593bf97STiwei Bie vq->split.vring = vring; 1097*e593bf97STiwei Bie vq->split.avail_flags_shadow = 0; 1098*e593bf97STiwei Bie vq->split.avail_idx_shadow = 0; 1099*e593bf97STiwei Bie 11000a8a69ddSRusty Russell /* No callback? Tell other side not to bother us. */ 1101f277ec42SVenkatesh Srinivas if (!callback) { 1102*e593bf97STiwei Bie vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 11030ea1e4a6SLadi Prosek if (!vq->event) 1104*e593bf97STiwei Bie vq->split.vring.avail->flags = cpu_to_virtio16(vdev, 1105*e593bf97STiwei Bie vq->split.avail_flags_shadow); 1106f277ec42SVenkatesh Srinivas } 11070a8a69ddSRusty Russell 11080a8a69ddSRusty Russell /* Put everything in free lists. */ 11090a8a69ddSRusty Russell vq->free_head = 0; 11102a2d1382SAndy Lutomirski for (i = 0; i < vring.num-1; i++) 1111*e593bf97STiwei Bie vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 11122a2d1382SAndy Lutomirski memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); 11130a8a69ddSRusty Russell 11140a8a69ddSRusty Russell return &vq->vq; 11150a8a69ddSRusty Russell } 11162a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 11172a2d1382SAndy Lutomirski 11182a2d1382SAndy Lutomirski static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 11192a2d1382SAndy Lutomirski dma_addr_t *dma_handle, gfp_t flag) 11202a2d1382SAndy Lutomirski { 11212a2d1382SAndy Lutomirski if (vring_use_dma_api(vdev)) { 11222a2d1382SAndy Lutomirski return dma_alloc_coherent(vdev->dev.parent, size, 11232a2d1382SAndy Lutomirski dma_handle, flag); 11242a2d1382SAndy Lutomirski } else { 11252a2d1382SAndy Lutomirski void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 11262a2d1382SAndy Lutomirski if (queue) { 11272a2d1382SAndy Lutomirski phys_addr_t phys_addr = virt_to_phys(queue); 11282a2d1382SAndy Lutomirski *dma_handle = (dma_addr_t)phys_addr; 11292a2d1382SAndy Lutomirski 11302a2d1382SAndy Lutomirski /* 11312a2d1382SAndy Lutomirski * Sanity check: make sure we dind't truncate 11322a2d1382SAndy Lutomirski * the address. The only arches I can find that 11332a2d1382SAndy Lutomirski * have 64-bit phys_addr_t but 32-bit dma_addr_t 11342a2d1382SAndy Lutomirski * are certain non-highmem MIPS and x86 11352a2d1382SAndy Lutomirski * configurations, but these configurations 11362a2d1382SAndy Lutomirski * should never allocate physical pages above 32 11372a2d1382SAndy Lutomirski * bits, so this is fine. Just in case, throw a 11382a2d1382SAndy Lutomirski * warning and abort if we end up with an 11392a2d1382SAndy Lutomirski * unrepresentable address. 11402a2d1382SAndy Lutomirski */ 11412a2d1382SAndy Lutomirski if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 11422a2d1382SAndy Lutomirski free_pages_exact(queue, PAGE_ALIGN(size)); 11432a2d1382SAndy Lutomirski return NULL; 11442a2d1382SAndy Lutomirski } 11452a2d1382SAndy Lutomirski } 11462a2d1382SAndy Lutomirski return queue; 11472a2d1382SAndy Lutomirski } 11482a2d1382SAndy Lutomirski } 11492a2d1382SAndy Lutomirski 11502a2d1382SAndy Lutomirski static void vring_free_queue(struct virtio_device *vdev, size_t size, 11512a2d1382SAndy Lutomirski void *queue, dma_addr_t dma_handle) 11522a2d1382SAndy Lutomirski { 11532a2d1382SAndy Lutomirski if (vring_use_dma_api(vdev)) { 11542a2d1382SAndy Lutomirski dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 11552a2d1382SAndy Lutomirski } else { 11562a2d1382SAndy Lutomirski free_pages_exact(queue, PAGE_ALIGN(size)); 11572a2d1382SAndy Lutomirski } 11582a2d1382SAndy Lutomirski } 11592a2d1382SAndy Lutomirski 11602a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue( 11612a2d1382SAndy Lutomirski unsigned int index, 11622a2d1382SAndy Lutomirski unsigned int num, 11632a2d1382SAndy Lutomirski unsigned int vring_align, 11642a2d1382SAndy Lutomirski struct virtio_device *vdev, 11652a2d1382SAndy Lutomirski bool weak_barriers, 11662a2d1382SAndy Lutomirski bool may_reduce_num, 1167f94682ddSMichael S. Tsirkin bool context, 11682a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *), 11692a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *), 11702a2d1382SAndy Lutomirski const char *name) 11712a2d1382SAndy Lutomirski { 11722a2d1382SAndy Lutomirski struct virtqueue *vq; 1173e00f7bd2SDan Carpenter void *queue = NULL; 11742a2d1382SAndy Lutomirski dma_addr_t dma_addr; 11752a2d1382SAndy Lutomirski size_t queue_size_in_bytes; 11762a2d1382SAndy Lutomirski struct vring vring; 11772a2d1382SAndy Lutomirski 11782a2d1382SAndy Lutomirski /* We assume num is a power of 2. */ 11792a2d1382SAndy Lutomirski if (num & (num - 1)) { 11802a2d1382SAndy Lutomirski dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 11812a2d1382SAndy Lutomirski return NULL; 11822a2d1382SAndy Lutomirski } 11832a2d1382SAndy Lutomirski 11842a2d1382SAndy Lutomirski /* TODO: allocate each queue chunk individually */ 11852a2d1382SAndy Lutomirski for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 11862a2d1382SAndy Lutomirski queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 11872a2d1382SAndy Lutomirski &dma_addr, 11882a2d1382SAndy Lutomirski GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 11892a2d1382SAndy Lutomirski if (queue) 11902a2d1382SAndy Lutomirski break; 11912a2d1382SAndy Lutomirski } 11922a2d1382SAndy Lutomirski 11932a2d1382SAndy Lutomirski if (!num) 11942a2d1382SAndy Lutomirski return NULL; 11952a2d1382SAndy Lutomirski 11962a2d1382SAndy Lutomirski if (!queue) { 11972a2d1382SAndy Lutomirski /* Try to get a single page. You are my only hope! */ 11982a2d1382SAndy Lutomirski queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 11992a2d1382SAndy Lutomirski &dma_addr, GFP_KERNEL|__GFP_ZERO); 12002a2d1382SAndy Lutomirski } 12012a2d1382SAndy Lutomirski if (!queue) 12022a2d1382SAndy Lutomirski return NULL; 12032a2d1382SAndy Lutomirski 12042a2d1382SAndy Lutomirski queue_size_in_bytes = vring_size(num, vring_align); 12052a2d1382SAndy Lutomirski vring_init(&vring, num, queue, vring_align); 12062a2d1382SAndy Lutomirski 1207f94682ddSMichael S. Tsirkin vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 12082a2d1382SAndy Lutomirski notify, callback, name); 12092a2d1382SAndy Lutomirski if (!vq) { 12102a2d1382SAndy Lutomirski vring_free_queue(vdev, queue_size_in_bytes, queue, 12112a2d1382SAndy Lutomirski dma_addr); 12122a2d1382SAndy Lutomirski return NULL; 12132a2d1382SAndy Lutomirski } 12142a2d1382SAndy Lutomirski 12152a2d1382SAndy Lutomirski to_vvq(vq)->queue_dma_addr = dma_addr; 12162a2d1382SAndy Lutomirski to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 12172a2d1382SAndy Lutomirski to_vvq(vq)->we_own_ring = true; 12182a2d1382SAndy Lutomirski 12192a2d1382SAndy Lutomirski return vq; 12202a2d1382SAndy Lutomirski } 12212a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue); 12222a2d1382SAndy Lutomirski 12232a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index, 12242a2d1382SAndy Lutomirski unsigned int num, 12252a2d1382SAndy Lutomirski unsigned int vring_align, 12262a2d1382SAndy Lutomirski struct virtio_device *vdev, 12272a2d1382SAndy Lutomirski bool weak_barriers, 1228f94682ddSMichael S. Tsirkin bool context, 12292a2d1382SAndy Lutomirski void *pages, 12302a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *vq), 12312a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *vq), 12322a2d1382SAndy Lutomirski const char *name) 12332a2d1382SAndy Lutomirski { 12342a2d1382SAndy Lutomirski struct vring vring; 12352a2d1382SAndy Lutomirski vring_init(&vring, num, pages, vring_align); 1236f94682ddSMichael S. Tsirkin return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 12372a2d1382SAndy Lutomirski notify, callback, name); 12382a2d1382SAndy Lutomirski } 1239c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 12400a8a69ddSRusty Russell 12412a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq) 12420a8a69ddSRusty Russell { 12432a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 12442a2d1382SAndy Lutomirski 12452a2d1382SAndy Lutomirski if (vq->we_own_ring) { 12462a2d1382SAndy Lutomirski vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1247*e593bf97STiwei Bie vq->split.vring.desc, vq->queue_dma_addr); 12482a2d1382SAndy Lutomirski } 12492a2d1382SAndy Lutomirski list_del(&_vq->list); 12502a2d1382SAndy Lutomirski kfree(vq); 12510a8a69ddSRusty Russell } 1252c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 12530a8a69ddSRusty Russell 1254e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 1255e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 1256e34f8725SRusty Russell { 1257e34f8725SRusty Russell unsigned int i; 1258e34f8725SRusty Russell 1259e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1260e34f8725SRusty Russell switch (i) { 12619fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 12629fa29b9dSMark McLoughlin break; 1263a5c262c5SMichael S. Tsirkin case VIRTIO_RING_F_EVENT_IDX: 1264a5c262c5SMichael S. Tsirkin break; 1265747ae34aSMichael S. Tsirkin case VIRTIO_F_VERSION_1: 1266747ae34aSMichael S. Tsirkin break; 12671a937693SMichael S. Tsirkin case VIRTIO_F_IOMMU_PLATFORM: 12681a937693SMichael S. Tsirkin break; 1269e34f8725SRusty Russell default: 1270e34f8725SRusty Russell /* We don't understand this bit. */ 1271e16e12beSMichael S. Tsirkin __virtio_clear_bit(vdev, i); 1272e34f8725SRusty Russell } 1273e34f8725SRusty Russell } 1274e34f8725SRusty Russell } 1275e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 1276e34f8725SRusty Russell 12775dfc1762SRusty Russell /** 12785dfc1762SRusty Russell * virtqueue_get_vring_size - return the size of the virtqueue's vring 12795dfc1762SRusty Russell * @vq: the struct virtqueue containing the vring of interest. 12805dfc1762SRusty Russell * 12815dfc1762SRusty Russell * Returns the size of the vring. This is mainly used for boasting to 12825dfc1762SRusty Russell * userspace. Unlike other operations, this need not be serialized. 12835dfc1762SRusty Russell */ 12848f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 12858f9f4668SRick Jones { 12868f9f4668SRick Jones 12878f9f4668SRick Jones struct vring_virtqueue *vq = to_vvq(_vq); 12888f9f4668SRick Jones 1289*e593bf97STiwei Bie return vq->split.vring.num; 12908f9f4668SRick Jones } 12918f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 12928f9f4668SRick Jones 1293b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq) 1294b3b32c94SHeinz Graalfs { 1295b3b32c94SHeinz Graalfs struct vring_virtqueue *vq = to_vvq(_vq); 1296b3b32c94SHeinz Graalfs 1297b3b32c94SHeinz Graalfs return vq->broken; 1298b3b32c94SHeinz Graalfs } 1299b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1300b3b32c94SHeinz Graalfs 1301e2dcdfe9SRusty Russell /* 1302e2dcdfe9SRusty Russell * This should prevent the device from being used, allowing drivers to 1303e2dcdfe9SRusty Russell * recover. You may need to grab appropriate locks to flush. 1304e2dcdfe9SRusty Russell */ 1305e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev) 1306e2dcdfe9SRusty Russell { 1307e2dcdfe9SRusty Russell struct virtqueue *_vq; 1308e2dcdfe9SRusty Russell 1309e2dcdfe9SRusty Russell list_for_each_entry(_vq, &dev->vqs, list) { 1310e2dcdfe9SRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 1311e2dcdfe9SRusty Russell vq->broken = true; 1312e2dcdfe9SRusty Russell } 1313e2dcdfe9SRusty Russell } 1314e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device); 1315e2dcdfe9SRusty Russell 13162a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 131789062652SCornelia Huck { 131889062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 131989062652SCornelia Huck 13202a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 132189062652SCornelia Huck 13222a2d1382SAndy Lutomirski return vq->queue_dma_addr; 13232a2d1382SAndy Lutomirski } 13242a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 13252a2d1382SAndy Lutomirski 13262a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 132789062652SCornelia Huck { 132889062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 132989062652SCornelia Huck 13302a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 13312a2d1382SAndy Lutomirski 13322a2d1382SAndy Lutomirski return vq->queue_dma_addr + 1333*e593bf97STiwei Bie ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); 133489062652SCornelia Huck } 13352a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 13362a2d1382SAndy Lutomirski 13372a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 13382a2d1382SAndy Lutomirski { 13392a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 13402a2d1382SAndy Lutomirski 13412a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 13422a2d1382SAndy Lutomirski 13432a2d1382SAndy Lutomirski return vq->queue_dma_addr + 1344*e593bf97STiwei Bie ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); 13452a2d1382SAndy Lutomirski } 13462a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 13472a2d1382SAndy Lutomirski 13482a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq) 13492a2d1382SAndy Lutomirski { 1350*e593bf97STiwei Bie return &to_vvq(vq)->split.vring; 13512a2d1382SAndy Lutomirski } 13522a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring); 135389062652SCornelia Huck 1354c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 1355