10a8a69ddSRusty Russell /* Virtio ring implementation. 20a8a69ddSRusty Russell * 30a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 40a8a69ddSRusty Russell * 50a8a69ddSRusty Russell * This program is free software; you can redistribute it and/or modify 60a8a69ddSRusty Russell * it under the terms of the GNU General Public License as published by 70a8a69ddSRusty Russell * the Free Software Foundation; either version 2 of the License, or 80a8a69ddSRusty Russell * (at your option) any later version. 90a8a69ddSRusty Russell * 100a8a69ddSRusty Russell * This program is distributed in the hope that it will be useful, 110a8a69ddSRusty Russell * but WITHOUT ANY WARRANTY; without even the implied warranty of 120a8a69ddSRusty Russell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 130a8a69ddSRusty Russell * GNU General Public License for more details. 140a8a69ddSRusty Russell * 150a8a69ddSRusty Russell * You should have received a copy of the GNU General Public License 160a8a69ddSRusty Russell * along with this program; if not, write to the Free Software 170a8a69ddSRusty Russell * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 180a8a69ddSRusty Russell */ 190a8a69ddSRusty Russell #include <linux/virtio.h> 200a8a69ddSRusty Russell #include <linux/virtio_ring.h> 21e34f8725SRusty Russell #include <linux/virtio_config.h> 220a8a69ddSRusty Russell #include <linux/device.h> 235a0e3ad6STejun Heo #include <linux/slab.h> 24b5a2c4f1SPaul Gortmaker #include <linux/module.h> 25e93300b1SRusty Russell #include <linux/hrtimer.h> 26780bc790SAndy Lutomirski #include <linux/dma-mapping.h> 2778fe3987SAndy Lutomirski #include <xen/xen.h> 280a8a69ddSRusty Russell 290a8a69ddSRusty Russell #ifdef DEBUG 300a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 319499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 329499f5e7SRusty Russell do { \ 339499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 349499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 359499f5e7SRusty Russell BUG(); \ 369499f5e7SRusty Russell } while (0) 37c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 383a35ce7dSRoel Kluin #define START_USE(_vq) \ 39c5f841f1SRusty Russell do { \ 40c5f841f1SRusty Russell if ((_vq)->in_use) \ 419499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 429499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 43c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 44c5f841f1SRusty Russell } while (0) 453a35ce7dSRoel Kluin #define END_USE(_vq) \ 4697a545abSRusty Russell do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 474d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq) \ 484d6a105eSTiwei Bie do { \ 494d6a105eSTiwei Bie ktime_t now = ktime_get(); \ 504d6a105eSTiwei Bie \ 514d6a105eSTiwei Bie /* No kick or get, with .1 second between? Warn. */ \ 524d6a105eSTiwei Bie if ((_vq)->last_add_time_valid) \ 534d6a105eSTiwei Bie WARN_ON(ktime_to_ms(ktime_sub(now, \ 544d6a105eSTiwei Bie (_vq)->last_add_time)) > 100); \ 554d6a105eSTiwei Bie (_vq)->last_add_time = now; \ 564d6a105eSTiwei Bie (_vq)->last_add_time_valid = true; \ 574d6a105eSTiwei Bie } while (0) 584d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq) \ 594d6a105eSTiwei Bie do { \ 604d6a105eSTiwei Bie if ((_vq)->last_add_time_valid) { \ 614d6a105eSTiwei Bie WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ 624d6a105eSTiwei Bie (_vq)->last_add_time)) > 100); \ 634d6a105eSTiwei Bie } \ 644d6a105eSTiwei Bie } while (0) 654d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq) \ 664d6a105eSTiwei Bie ((_vq)->last_add_time_valid = false) 670a8a69ddSRusty Russell #else 689499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 699499f5e7SRusty Russell do { \ 709499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 719499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 729499f5e7SRusty Russell (_vq)->broken = true; \ 739499f5e7SRusty Russell } while (0) 740a8a69ddSRusty Russell #define START_USE(vq) 750a8a69ddSRusty Russell #define END_USE(vq) 764d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq) 774d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq) 784d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq) 790a8a69ddSRusty Russell #endif 800a8a69ddSRusty Russell 81780bc790SAndy Lutomirski struct vring_desc_state { 82780bc790SAndy Lutomirski void *data; /* Data for callback. */ 83780bc790SAndy Lutomirski struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 84780bc790SAndy Lutomirski }; 85780bc790SAndy Lutomirski 8643b4f721SMichael S. Tsirkin struct vring_virtqueue { 870a8a69ddSRusty Russell struct virtqueue vq; 880a8a69ddSRusty Russell 897b21e34fSRusty Russell /* Can we use weak barriers? */ 907b21e34fSRusty Russell bool weak_barriers; 917b21e34fSRusty Russell 920a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 930a8a69ddSRusty Russell bool broken; 940a8a69ddSRusty Russell 959fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 969fa29b9dSMark McLoughlin bool indirect; 979fa29b9dSMark McLoughlin 98a5c262c5SMichael S. Tsirkin /* Host publishes avail event idx */ 99a5c262c5SMichael S. Tsirkin bool event; 100a5c262c5SMichael S. Tsirkin 1010a8a69ddSRusty Russell /* Head of free buffer list. */ 1020a8a69ddSRusty Russell unsigned int free_head; 1030a8a69ddSRusty Russell /* Number we've added since last sync. */ 1040a8a69ddSRusty Russell unsigned int num_added; 1050a8a69ddSRusty Russell 1060a8a69ddSRusty Russell /* Last used index we've seen. */ 1071bc4953eSAnthony Liguori u16 last_used_idx; 1080a8a69ddSRusty Russell 109e593bf97STiwei Bie struct { 110e593bf97STiwei Bie /* Actual memory layout for this queue */ 111e593bf97STiwei Bie struct vring vring; 112e593bf97STiwei Bie 113f277ec42SVenkatesh Srinivas /* Last written value to avail->flags */ 114f277ec42SVenkatesh Srinivas u16 avail_flags_shadow; 115f277ec42SVenkatesh Srinivas 116f277ec42SVenkatesh Srinivas /* Last written value to avail->idx in guest byte order */ 117f277ec42SVenkatesh Srinivas u16 avail_idx_shadow; 118e593bf97STiwei Bie } split; 119f277ec42SVenkatesh Srinivas 1200a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 12146f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq); 1220a8a69ddSRusty Russell 1232a2d1382SAndy Lutomirski /* DMA, allocation, and size information */ 1242a2d1382SAndy Lutomirski bool we_own_ring; 1252a2d1382SAndy Lutomirski size_t queue_size_in_bytes; 1262a2d1382SAndy Lutomirski dma_addr_t queue_dma_addr; 1272a2d1382SAndy Lutomirski 1280a8a69ddSRusty Russell #ifdef DEBUG 1290a8a69ddSRusty Russell /* They're supposed to lock for us. */ 1300a8a69ddSRusty Russell unsigned int in_use; 131e93300b1SRusty Russell 132e93300b1SRusty Russell /* Figure out if their kicks are too delayed. */ 133e93300b1SRusty Russell bool last_add_time_valid; 134e93300b1SRusty Russell ktime_t last_add_time; 1350a8a69ddSRusty Russell #endif 1360a8a69ddSRusty Russell 137780bc790SAndy Lutomirski /* Per-descriptor state. */ 138780bc790SAndy Lutomirski struct vring_desc_state desc_state[]; 1390a8a69ddSRusty Russell }; 1400a8a69ddSRusty Russell 141e6f633e5STiwei Bie 142e6f633e5STiwei Bie /* 143e6f633e5STiwei Bie * Helpers. 144e6f633e5STiwei Bie */ 145e6f633e5STiwei Bie 1460a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 1470a8a69ddSRusty Russell 148*2f18c2d1STiwei Bie static inline bool virtqueue_use_indirect(struct virtqueue *_vq, 149*2f18c2d1STiwei Bie unsigned int total_sg) 150*2f18c2d1STiwei Bie { 151*2f18c2d1STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 152*2f18c2d1STiwei Bie 153*2f18c2d1STiwei Bie /* 154*2f18c2d1STiwei Bie * If the host supports indirect descriptor tables, and we have multiple 155*2f18c2d1STiwei Bie * buffers, then go indirect. FIXME: tune this threshold 156*2f18c2d1STiwei Bie */ 157*2f18c2d1STiwei Bie return (vq->indirect && total_sg > 1 && vq->vq.num_free); 158*2f18c2d1STiwei Bie } 159*2f18c2d1STiwei Bie 160d26c96c8SAndy Lutomirski /* 1611a937693SMichael S. Tsirkin * Modern virtio devices have feature bits to specify whether they need a 1621a937693SMichael S. Tsirkin * quirk and bypass the IOMMU. If not there, just use the DMA API. 1631a937693SMichael S. Tsirkin * 1641a937693SMichael S. Tsirkin * If there, the interaction between virtio and DMA API is messy. 165d26c96c8SAndy Lutomirski * 166d26c96c8SAndy Lutomirski * On most systems with virtio, physical addresses match bus addresses, 167d26c96c8SAndy Lutomirski * and it doesn't particularly matter whether we use the DMA API. 168d26c96c8SAndy Lutomirski * 169d26c96c8SAndy Lutomirski * On some systems, including Xen and any system with a physical device 170d26c96c8SAndy Lutomirski * that speaks virtio behind a physical IOMMU, we must use the DMA API 171d26c96c8SAndy Lutomirski * for virtio DMA to work at all. 172d26c96c8SAndy Lutomirski * 173d26c96c8SAndy Lutomirski * On other systems, including SPARC and PPC64, virtio-pci devices are 174d26c96c8SAndy Lutomirski * enumerated as though they are behind an IOMMU, but the virtio host 175d26c96c8SAndy Lutomirski * ignores the IOMMU, so we must either pretend that the IOMMU isn't 176d26c96c8SAndy Lutomirski * there or somehow map everything as the identity. 177d26c96c8SAndy Lutomirski * 178d26c96c8SAndy Lutomirski * For the time being, we preserve historic behavior and bypass the DMA 179d26c96c8SAndy Lutomirski * API. 1801a937693SMichael S. Tsirkin * 1811a937693SMichael S. Tsirkin * TODO: install a per-device DMA ops structure that does the right thing 1821a937693SMichael S. Tsirkin * taking into account all the above quirks, and use the DMA API 1831a937693SMichael S. Tsirkin * unconditionally on data path. 184d26c96c8SAndy Lutomirski */ 185d26c96c8SAndy Lutomirski 186d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev) 187d26c96c8SAndy Lutomirski { 1881a937693SMichael S. Tsirkin if (!virtio_has_iommu_quirk(vdev)) 1891a937693SMichael S. Tsirkin return true; 1901a937693SMichael S. Tsirkin 1911a937693SMichael S. Tsirkin /* Otherwise, we are left to guess. */ 19278fe3987SAndy Lutomirski /* 19378fe3987SAndy Lutomirski * In theory, it's possible to have a buggy QEMU-supposed 19478fe3987SAndy Lutomirski * emulated Q35 IOMMU and Xen enabled at the same time. On 19578fe3987SAndy Lutomirski * such a configuration, virtio has never worked and will 19678fe3987SAndy Lutomirski * not work without an even larger kludge. Instead, enable 19778fe3987SAndy Lutomirski * the DMA API if we're a Xen guest, which at least allows 19878fe3987SAndy Lutomirski * all of the sensible Xen configurations to work correctly. 19978fe3987SAndy Lutomirski */ 20078fe3987SAndy Lutomirski if (xen_domain()) 20178fe3987SAndy Lutomirski return true; 20278fe3987SAndy Lutomirski 203d26c96c8SAndy Lutomirski return false; 204d26c96c8SAndy Lutomirski } 205d26c96c8SAndy Lutomirski 206780bc790SAndy Lutomirski /* 207780bc790SAndy Lutomirski * The DMA ops on various arches are rather gnarly right now, and 208780bc790SAndy Lutomirski * making all of the arch DMA ops work on the vring device itself 209780bc790SAndy Lutomirski * is a mess. For now, we use the parent device for DMA ops. 210780bc790SAndy Lutomirski */ 21175bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) 212780bc790SAndy Lutomirski { 213780bc790SAndy Lutomirski return vq->vq.vdev->dev.parent; 214780bc790SAndy Lutomirski } 215780bc790SAndy Lutomirski 216780bc790SAndy Lutomirski /* Map one sg entry. */ 217780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 218780bc790SAndy Lutomirski struct scatterlist *sg, 219780bc790SAndy Lutomirski enum dma_data_direction direction) 220780bc790SAndy Lutomirski { 221780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 222780bc790SAndy Lutomirski return (dma_addr_t)sg_phys(sg); 223780bc790SAndy Lutomirski 224780bc790SAndy Lutomirski /* 225780bc790SAndy Lutomirski * We can't use dma_map_sg, because we don't use scatterlists in 226780bc790SAndy Lutomirski * the way it expects (we don't guarantee that the scatterlist 227780bc790SAndy Lutomirski * will exist for the lifetime of the mapping). 228780bc790SAndy Lutomirski */ 229780bc790SAndy Lutomirski return dma_map_page(vring_dma_dev(vq), 230780bc790SAndy Lutomirski sg_page(sg), sg->offset, sg->length, 231780bc790SAndy Lutomirski direction); 232780bc790SAndy Lutomirski } 233780bc790SAndy Lutomirski 234780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 235780bc790SAndy Lutomirski void *cpu_addr, size_t size, 236780bc790SAndy Lutomirski enum dma_data_direction direction) 237780bc790SAndy Lutomirski { 238780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 239780bc790SAndy Lutomirski return (dma_addr_t)virt_to_phys(cpu_addr); 240780bc790SAndy Lutomirski 241780bc790SAndy Lutomirski return dma_map_single(vring_dma_dev(vq), 242780bc790SAndy Lutomirski cpu_addr, size, direction); 243780bc790SAndy Lutomirski } 244780bc790SAndy Lutomirski 245e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq, 246e6f633e5STiwei Bie dma_addr_t addr) 247e6f633e5STiwei Bie { 248e6f633e5STiwei Bie if (!vring_use_dma_api(vq->vq.vdev)) 249e6f633e5STiwei Bie return 0; 250e6f633e5STiwei Bie 251e6f633e5STiwei Bie return dma_mapping_error(vring_dma_dev(vq), addr); 252e6f633e5STiwei Bie } 253e6f633e5STiwei Bie 254e6f633e5STiwei Bie 255e6f633e5STiwei Bie /* 256e6f633e5STiwei Bie * Split ring specific functions - *_split(). 257e6f633e5STiwei Bie */ 258e6f633e5STiwei Bie 259138fd251STiwei Bie static void vring_unmap_one_split(const struct vring_virtqueue *vq, 260780bc790SAndy Lutomirski struct vring_desc *desc) 261780bc790SAndy Lutomirski { 262780bc790SAndy Lutomirski u16 flags; 263780bc790SAndy Lutomirski 264780bc790SAndy Lutomirski if (!vring_use_dma_api(vq->vq.vdev)) 265780bc790SAndy Lutomirski return; 266780bc790SAndy Lutomirski 267780bc790SAndy Lutomirski flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 268780bc790SAndy Lutomirski 269780bc790SAndy Lutomirski if (flags & VRING_DESC_F_INDIRECT) { 270780bc790SAndy Lutomirski dma_unmap_single(vring_dma_dev(vq), 271780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 272780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 273780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 274780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 275780bc790SAndy Lutomirski } else { 276780bc790SAndy Lutomirski dma_unmap_page(vring_dma_dev(vq), 277780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 278780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 279780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 280780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 281780bc790SAndy Lutomirski } 282780bc790SAndy Lutomirski } 283780bc790SAndy Lutomirski 284138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, 285138fd251STiwei Bie unsigned int total_sg, 286138fd251STiwei Bie gfp_t gfp) 2879fa29b9dSMark McLoughlin { 2889fa29b9dSMark McLoughlin struct vring_desc *desc; 289b25bd251SRusty Russell unsigned int i; 2909fa29b9dSMark McLoughlin 291b92b1b89SWill Deacon /* 292b92b1b89SWill Deacon * We require lowmem mappings for the descriptors because 293b92b1b89SWill Deacon * otherwise virt_to_phys will give us bogus addresses in the 294b92b1b89SWill Deacon * virtqueue. 295b92b1b89SWill Deacon */ 29682107539SMichal Hocko gfp &= ~__GFP_HIGHMEM; 297b92b1b89SWill Deacon 2986da2ec56SKees Cook desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); 2999fa29b9dSMark McLoughlin if (!desc) 300b25bd251SRusty Russell return NULL; 3019fa29b9dSMark McLoughlin 302b25bd251SRusty Russell for (i = 0; i < total_sg; i++) 30300e6f3d9SMichael S. Tsirkin desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 304b25bd251SRusty Russell return desc; 3059fa29b9dSMark McLoughlin } 3069fa29b9dSMark McLoughlin 307138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq, 30813816c76SRusty Russell struct scatterlist *sgs[], 309eeebf9b1SRusty Russell unsigned int total_sg, 31013816c76SRusty Russell unsigned int out_sgs, 31113816c76SRusty Russell unsigned int in_sgs, 312bbd603efSMichael S. Tsirkin void *data, 3135a08b04fSMichael S. Tsirkin void *ctx, 314bbd603efSMichael S. Tsirkin gfp_t gfp) 3150a8a69ddSRusty Russell { 3160a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 31713816c76SRusty Russell struct scatterlist *sg; 318b25bd251SRusty Russell struct vring_desc *desc; 319780bc790SAndy Lutomirski unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 3201fe9b6feSMichael S. Tsirkin int head; 321b25bd251SRusty Russell bool indirect; 3220a8a69ddSRusty Russell 3239fa29b9dSMark McLoughlin START_USE(vq); 3249fa29b9dSMark McLoughlin 3250a8a69ddSRusty Russell BUG_ON(data == NULL); 3265a08b04fSMichael S. Tsirkin BUG_ON(ctx && vq->indirect); 3279fa29b9dSMark McLoughlin 32870670444SRusty Russell if (unlikely(vq->broken)) { 32970670444SRusty Russell END_USE(vq); 33070670444SRusty Russell return -EIO; 33170670444SRusty Russell } 33270670444SRusty Russell 3334d6a105eSTiwei Bie LAST_ADD_TIME_UPDATE(vq); 334e93300b1SRusty Russell 33513816c76SRusty Russell BUG_ON(total_sg == 0); 3360a8a69ddSRusty Russell 337b25bd251SRusty Russell head = vq->free_head; 338b25bd251SRusty Russell 339*2f18c2d1STiwei Bie if (virtqueue_use_indirect(_vq, total_sg)) 340138fd251STiwei Bie desc = alloc_indirect_split(_vq, total_sg, gfp); 34144ed8089SRichard W.M. Jones else { 342b25bd251SRusty Russell desc = NULL; 343e593bf97STiwei Bie WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); 34444ed8089SRichard W.M. Jones } 345b25bd251SRusty Russell 346b25bd251SRusty Russell if (desc) { 347b25bd251SRusty Russell /* Use a single buffer which doesn't continue */ 348780bc790SAndy Lutomirski indirect = true; 349b25bd251SRusty Russell /* Set up rest to use this indirect table. */ 350b25bd251SRusty Russell i = 0; 351b25bd251SRusty Russell descs_used = 1; 352b25bd251SRusty Russell } else { 353780bc790SAndy Lutomirski indirect = false; 354e593bf97STiwei Bie desc = vq->split.vring.desc; 355b25bd251SRusty Russell i = head; 356b25bd251SRusty Russell descs_used = total_sg; 357b25bd251SRusty Russell } 358b25bd251SRusty Russell 359b25bd251SRusty Russell if (vq->vq.num_free < descs_used) { 3600a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 361b25bd251SRusty Russell descs_used, vq->vq.num_free); 36244653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 36344653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 36444653eaeSRusty Russell * host should service the ring ASAP. */ 36513816c76SRusty Russell if (out_sgs) 366426e3e0aSRusty Russell vq->notify(&vq->vq); 36758625edfSWei Yongjun if (indirect) 36858625edfSWei Yongjun kfree(desc); 3690a8a69ddSRusty Russell END_USE(vq); 3700a8a69ddSRusty Russell return -ENOSPC; 3710a8a69ddSRusty Russell } 3720a8a69ddSRusty Russell 37313816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 374eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 375780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 376780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 377780bc790SAndy Lutomirski goto unmap_release; 378780bc790SAndy Lutomirski 37900e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 380780bc790SAndy Lutomirski desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 38100e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 3820a8a69ddSRusty Russell prev = i; 38300e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 3840a8a69ddSRusty Russell } 38513816c76SRusty Russell } 38613816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 387eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 388780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 389780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 390780bc790SAndy Lutomirski goto unmap_release; 391780bc790SAndy Lutomirski 39200e6f3d9SMichael S. Tsirkin desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 393780bc790SAndy Lutomirski desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 39400e6f3d9SMichael S. Tsirkin desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 3950a8a69ddSRusty Russell prev = i; 39600e6f3d9SMichael S. Tsirkin i = virtio16_to_cpu(_vq->vdev, desc[i].next); 39713816c76SRusty Russell } 3980a8a69ddSRusty Russell } 3990a8a69ddSRusty Russell /* Last one doesn't continue. */ 40000e6f3d9SMichael S. Tsirkin desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 4010a8a69ddSRusty Russell 402780bc790SAndy Lutomirski if (indirect) { 403780bc790SAndy Lutomirski /* Now that the indirect table is filled in, map it. */ 404780bc790SAndy Lutomirski dma_addr_t addr = vring_map_single( 405780bc790SAndy Lutomirski vq, desc, total_sg * sizeof(struct vring_desc), 406780bc790SAndy Lutomirski DMA_TO_DEVICE); 407780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 408780bc790SAndy Lutomirski goto unmap_release; 409780bc790SAndy Lutomirski 410e593bf97STiwei Bie vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, 411e593bf97STiwei Bie VRING_DESC_F_INDIRECT); 412e593bf97STiwei Bie vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, 413e593bf97STiwei Bie addr); 414780bc790SAndy Lutomirski 415e593bf97STiwei Bie vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev, 416e593bf97STiwei Bie total_sg * sizeof(struct vring_desc)); 417780bc790SAndy Lutomirski } 418780bc790SAndy Lutomirski 419780bc790SAndy Lutomirski /* We're using some buffers from the free list. */ 420780bc790SAndy Lutomirski vq->vq.num_free -= descs_used; 421780bc790SAndy Lutomirski 4220a8a69ddSRusty Russell /* Update free pointer */ 423b25bd251SRusty Russell if (indirect) 424e593bf97STiwei Bie vq->free_head = virtio16_to_cpu(_vq->vdev, 425e593bf97STiwei Bie vq->split.vring.desc[head].next); 426b25bd251SRusty Russell else 4270a8a69ddSRusty Russell vq->free_head = i; 4280a8a69ddSRusty Russell 429780bc790SAndy Lutomirski /* Store token and indirect buffer state. */ 430780bc790SAndy Lutomirski vq->desc_state[head].data = data; 431780bc790SAndy Lutomirski if (indirect) 432780bc790SAndy Lutomirski vq->desc_state[head].indir_desc = desc; 43387646a34SJason Wang else 4345a08b04fSMichael S. Tsirkin vq->desc_state[head].indir_desc = ctx; 4350a8a69ddSRusty Russell 4360a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 4373b720b8cSRusty Russell * do sync). */ 438e593bf97STiwei Bie avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); 439e593bf97STiwei Bie vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 4400a8a69ddSRusty Russell 441ee7cd898SRusty Russell /* Descriptors and available array need to be set before we expose the 442ee7cd898SRusty Russell * new available array entries. */ 443a9a0fef7SRusty Russell virtio_wmb(vq->weak_barriers); 444e593bf97STiwei Bie vq->split.avail_idx_shadow++; 445e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 446e593bf97STiwei Bie vq->split.avail_idx_shadow); 447ee7cd898SRusty Russell vq->num_added++; 448ee7cd898SRusty Russell 4495e05bf58STetsuo Handa pr_debug("Added buffer head %i to %p\n", head, vq); 4505e05bf58STetsuo Handa END_USE(vq); 4515e05bf58STetsuo Handa 452ee7cd898SRusty Russell /* This is very unlikely, but theoretically possible. Kick 453ee7cd898SRusty Russell * just in case. */ 454ee7cd898SRusty Russell if (unlikely(vq->num_added == (1 << 16) - 1)) 455ee7cd898SRusty Russell virtqueue_kick(_vq); 456ee7cd898SRusty Russell 45798e8c6bcSRusty Russell return 0; 458780bc790SAndy Lutomirski 459780bc790SAndy Lutomirski unmap_release: 460780bc790SAndy Lutomirski err_idx = i; 461780bc790SAndy Lutomirski i = head; 462780bc790SAndy Lutomirski 463780bc790SAndy Lutomirski for (n = 0; n < total_sg; n++) { 464780bc790SAndy Lutomirski if (i == err_idx) 465780bc790SAndy Lutomirski break; 466138fd251STiwei Bie vring_unmap_one_split(vq, &desc[i]); 467e593bf97STiwei Bie i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next); 468780bc790SAndy Lutomirski } 469780bc790SAndy Lutomirski 470780bc790SAndy Lutomirski if (indirect) 471780bc790SAndy Lutomirski kfree(desc); 472780bc790SAndy Lutomirski 4733cc36f6eSMichael S. Tsirkin END_USE(vq); 474780bc790SAndy Lutomirski return -EIO; 4750a8a69ddSRusty Russell } 47613816c76SRusty Russell 477138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) 4780a8a69ddSRusty Russell { 4790a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 480a5c262c5SMichael S. Tsirkin u16 new, old; 48141f0377fSRusty Russell bool needs_kick; 48241f0377fSRusty Russell 4830a8a69ddSRusty Russell START_USE(vq); 484a72caae2SJason Wang /* We need to expose available array entries before checking avail 485a72caae2SJason Wang * event. */ 486a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 4870a8a69ddSRusty Russell 488e593bf97STiwei Bie old = vq->split.avail_idx_shadow - vq->num_added; 489e593bf97STiwei Bie new = vq->split.avail_idx_shadow; 4900a8a69ddSRusty Russell vq->num_added = 0; 4910a8a69ddSRusty Russell 4924d6a105eSTiwei Bie LAST_ADD_TIME_CHECK(vq); 4934d6a105eSTiwei Bie LAST_ADD_TIME_INVALID(vq); 494e93300b1SRusty Russell 49541f0377fSRusty Russell if (vq->event) { 496e593bf97STiwei Bie needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, 497e593bf97STiwei Bie vring_avail_event(&vq->split.vring)), 49841f0377fSRusty Russell new, old); 49941f0377fSRusty Russell } else { 500e593bf97STiwei Bie needs_kick = !(vq->split.vring.used->flags & 501e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 502e593bf97STiwei Bie VRING_USED_F_NO_NOTIFY)); 50341f0377fSRusty Russell } 5040a8a69ddSRusty Russell END_USE(vq); 50541f0377fSRusty Russell return needs_kick; 50641f0377fSRusty Russell } 507138fd251STiwei Bie 508138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, 5095a08b04fSMichael S. Tsirkin void **ctx) 5100a8a69ddSRusty Russell { 511780bc790SAndy Lutomirski unsigned int i, j; 512c60923cbSGonglei __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 5130a8a69ddSRusty Russell 5140a8a69ddSRusty Russell /* Clear data ptr. */ 515780bc790SAndy Lutomirski vq->desc_state[head].data = NULL; 5160a8a69ddSRusty Russell 517780bc790SAndy Lutomirski /* Put back on free list: unmap first-level descriptors and find end */ 5180a8a69ddSRusty Russell i = head; 5199fa29b9dSMark McLoughlin 520e593bf97STiwei Bie while (vq->split.vring.desc[i].flags & nextflag) { 521e593bf97STiwei Bie vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 522e593bf97STiwei Bie i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next); 52306ca287dSRusty Russell vq->vq.num_free++; 5240a8a69ddSRusty Russell } 5250a8a69ddSRusty Russell 526e593bf97STiwei Bie vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 527e593bf97STiwei Bie vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, 528e593bf97STiwei Bie vq->free_head); 5290a8a69ddSRusty Russell vq->free_head = head; 530780bc790SAndy Lutomirski 5310a8a69ddSRusty Russell /* Plus final descriptor */ 53206ca287dSRusty Russell vq->vq.num_free++; 533780bc790SAndy Lutomirski 5345a08b04fSMichael S. Tsirkin if (vq->indirect) { 535780bc790SAndy Lutomirski struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 5365a08b04fSMichael S. Tsirkin u32 len; 5375a08b04fSMichael S. Tsirkin 5385a08b04fSMichael S. Tsirkin /* Free the indirect table, if any, now that it's unmapped. */ 5395a08b04fSMichael S. Tsirkin if (!indir_desc) 5405a08b04fSMichael S. Tsirkin return; 5415a08b04fSMichael S. Tsirkin 542e593bf97STiwei Bie len = virtio32_to_cpu(vq->vq.vdev, 543e593bf97STiwei Bie vq->split.vring.desc[head].len); 544780bc790SAndy Lutomirski 545e593bf97STiwei Bie BUG_ON(!(vq->split.vring.desc[head].flags & 546780bc790SAndy Lutomirski cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 547780bc790SAndy Lutomirski BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 548780bc790SAndy Lutomirski 549780bc790SAndy Lutomirski for (j = 0; j < len / sizeof(struct vring_desc); j++) 550138fd251STiwei Bie vring_unmap_one_split(vq, &indir_desc[j]); 551780bc790SAndy Lutomirski 5525a08b04fSMichael S. Tsirkin kfree(indir_desc); 553780bc790SAndy Lutomirski vq->desc_state[head].indir_desc = NULL; 5545a08b04fSMichael S. Tsirkin } else if (ctx) { 5555a08b04fSMichael S. Tsirkin *ctx = vq->desc_state[head].indir_desc; 556780bc790SAndy Lutomirski } 5570a8a69ddSRusty Russell } 5580a8a69ddSRusty Russell 559138fd251STiwei Bie static inline bool more_used_split(const struct vring_virtqueue *vq) 5600a8a69ddSRusty Russell { 561e593bf97STiwei Bie return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, 562e593bf97STiwei Bie vq->split.vring.used->idx); 5630a8a69ddSRusty Russell } 5640a8a69ddSRusty Russell 565138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, 566138fd251STiwei Bie unsigned int *len, 5675a08b04fSMichael S. Tsirkin void **ctx) 5680a8a69ddSRusty Russell { 5690a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 5700a8a69ddSRusty Russell void *ret; 5710a8a69ddSRusty Russell unsigned int i; 5723b720b8cSRusty Russell u16 last_used; 5730a8a69ddSRusty Russell 5740a8a69ddSRusty Russell START_USE(vq); 5750a8a69ddSRusty Russell 5765ef82752SRusty Russell if (unlikely(vq->broken)) { 5775ef82752SRusty Russell END_USE(vq); 5785ef82752SRusty Russell return NULL; 5795ef82752SRusty Russell } 5805ef82752SRusty Russell 581138fd251STiwei Bie if (!more_used_split(vq)) { 5820a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 5830a8a69ddSRusty Russell END_USE(vq); 5840a8a69ddSRusty Russell return NULL; 5850a8a69ddSRusty Russell } 5860a8a69ddSRusty Russell 5872d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 588a9a0fef7SRusty Russell virtio_rmb(vq->weak_barriers); 5892d61ba95SMichael S. Tsirkin 590e593bf97STiwei Bie last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); 591e593bf97STiwei Bie i = virtio32_to_cpu(_vq->vdev, 592e593bf97STiwei Bie vq->split.vring.used->ring[last_used].id); 593e593bf97STiwei Bie *len = virtio32_to_cpu(_vq->vdev, 594e593bf97STiwei Bie vq->split.vring.used->ring[last_used].len); 5950a8a69ddSRusty Russell 596e593bf97STiwei Bie if (unlikely(i >= vq->split.vring.num)) { 5970a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 5980a8a69ddSRusty Russell return NULL; 5990a8a69ddSRusty Russell } 600780bc790SAndy Lutomirski if (unlikely(!vq->desc_state[i].data)) { 6010a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 6020a8a69ddSRusty Russell return NULL; 6030a8a69ddSRusty Russell } 6040a8a69ddSRusty Russell 605138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 606780bc790SAndy Lutomirski ret = vq->desc_state[i].data; 607138fd251STiwei Bie detach_buf_split(vq, i, ctx); 6080a8a69ddSRusty Russell vq->last_used_idx++; 609a5c262c5SMichael S. Tsirkin /* If we expect an interrupt for the next entry, tell host 610a5c262c5SMichael S. Tsirkin * by writing event index and flush out the write before 611a5c262c5SMichael S. Tsirkin * the read in the next get_buf call. */ 612e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 613788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 614e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 615788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 616a5c262c5SMichael S. Tsirkin 6174d6a105eSTiwei Bie LAST_ADD_TIME_INVALID(vq); 618e93300b1SRusty Russell 6190a8a69ddSRusty Russell END_USE(vq); 6200a8a69ddSRusty Russell return ret; 6210a8a69ddSRusty Russell } 622138fd251STiwei Bie 623138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq) 624138fd251STiwei Bie { 625138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 626138fd251STiwei Bie 627e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 628e593bf97STiwei Bie vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 629138fd251STiwei Bie if (!vq->event) 630e593bf97STiwei Bie vq->split.vring.avail->flags = 631e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 632e593bf97STiwei Bie vq->split.avail_flags_shadow); 633138fd251STiwei Bie } 634138fd251STiwei Bie } 635138fd251STiwei Bie 636138fd251STiwei Bie static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) 637cc229884SMichael S. Tsirkin { 638cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 639cc229884SMichael S. Tsirkin u16 last_used_idx; 640cc229884SMichael S. Tsirkin 641cc229884SMichael S. Tsirkin START_USE(vq); 642cc229884SMichael S. Tsirkin 643cc229884SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 644cc229884SMichael S. Tsirkin * more to do. */ 645cc229884SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 646cc229884SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 647cc229884SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 648e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 649e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 6500ea1e4a6SLadi Prosek if (!vq->event) 651e593bf97STiwei Bie vq->split.vring.avail->flags = 652e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 653e593bf97STiwei Bie vq->split.avail_flags_shadow); 654f277ec42SVenkatesh Srinivas } 655e593bf97STiwei Bie vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, 656e593bf97STiwei Bie last_used_idx = vq->last_used_idx); 657cc229884SMichael S. Tsirkin END_USE(vq); 658cc229884SMichael S. Tsirkin return last_used_idx; 659cc229884SMichael S. Tsirkin } 660138fd251STiwei Bie 661138fd251STiwei Bie static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx) 662138fd251STiwei Bie { 663138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 664138fd251STiwei Bie 665138fd251STiwei Bie return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, 666e593bf97STiwei Bie vq->split.vring.used->idx); 667138fd251STiwei Bie } 668138fd251STiwei Bie 669138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) 6707ab358c2SMichael S. Tsirkin { 6717ab358c2SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 6727ab358c2SMichael S. Tsirkin u16 bufs; 6737ab358c2SMichael S. Tsirkin 6747ab358c2SMichael S. Tsirkin START_USE(vq); 6757ab358c2SMichael S. Tsirkin 6767ab358c2SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 6777ab358c2SMichael S. Tsirkin * more to do. */ 6787ab358c2SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 6797ab358c2SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 6800ea1e4a6SLadi Prosek * entry. Always update the event index to keep code simple. */ 681e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 682e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 6830ea1e4a6SLadi Prosek if (!vq->event) 684e593bf97STiwei Bie vq->split.vring.avail->flags = 685e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 686e593bf97STiwei Bie vq->split.avail_flags_shadow); 687f277ec42SVenkatesh Srinivas } 6887ab358c2SMichael S. Tsirkin /* TODO: tune this threshold */ 689e593bf97STiwei Bie bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; 690788e5b3aSMichael S. Tsirkin 691788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 692e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 693788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 694788e5b3aSMichael S. Tsirkin 695e593bf97STiwei Bie if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) 696e593bf97STiwei Bie - vq->last_used_idx) > bufs)) { 6977ab358c2SMichael S. Tsirkin END_USE(vq); 6987ab358c2SMichael S. Tsirkin return false; 6997ab358c2SMichael S. Tsirkin } 7007ab358c2SMichael S. Tsirkin 7017ab358c2SMichael S. Tsirkin END_USE(vq); 7027ab358c2SMichael S. Tsirkin return true; 7037ab358c2SMichael S. Tsirkin } 7047ab358c2SMichael S. Tsirkin 705138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) 706c021eac4SShirley Ma { 707c021eac4SShirley Ma struct vring_virtqueue *vq = to_vvq(_vq); 708c021eac4SShirley Ma unsigned int i; 709c021eac4SShirley Ma void *buf; 710c021eac4SShirley Ma 711c021eac4SShirley Ma START_USE(vq); 712c021eac4SShirley Ma 713e593bf97STiwei Bie for (i = 0; i < vq->split.vring.num; i++) { 714780bc790SAndy Lutomirski if (!vq->desc_state[i].data) 715c021eac4SShirley Ma continue; 716138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 717780bc790SAndy Lutomirski buf = vq->desc_state[i].data; 718138fd251STiwei Bie detach_buf_split(vq, i, NULL); 719e593bf97STiwei Bie vq->split.avail_idx_shadow--; 720e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 721e593bf97STiwei Bie vq->split.avail_idx_shadow); 722c021eac4SShirley Ma END_USE(vq); 723c021eac4SShirley Ma return buf; 724c021eac4SShirley Ma } 725c021eac4SShirley Ma /* That should have freed everything. */ 726e593bf97STiwei Bie BUG_ON(vq->vq.num_free != vq->split.vring.num); 727c021eac4SShirley Ma 728c021eac4SShirley Ma END_USE(vq); 729c021eac4SShirley Ma return NULL; 730c021eac4SShirley Ma } 731138fd251STiwei Bie 732e6f633e5STiwei Bie 733e6f633e5STiwei Bie /* 734e6f633e5STiwei Bie * Generic functions and exported symbols. 735e6f633e5STiwei Bie */ 736e6f633e5STiwei Bie 737e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq, 738e6f633e5STiwei Bie struct scatterlist *sgs[], 739e6f633e5STiwei Bie unsigned int total_sg, 740e6f633e5STiwei Bie unsigned int out_sgs, 741e6f633e5STiwei Bie unsigned int in_sgs, 742e6f633e5STiwei Bie void *data, 743e6f633e5STiwei Bie void *ctx, 744e6f633e5STiwei Bie gfp_t gfp) 745e6f633e5STiwei Bie { 746e6f633e5STiwei Bie return virtqueue_add_split(_vq, sgs, total_sg, 747e6f633e5STiwei Bie out_sgs, in_sgs, data, ctx, gfp); 748e6f633e5STiwei Bie } 749e6f633e5STiwei Bie 750e6f633e5STiwei Bie /** 751e6f633e5STiwei Bie * virtqueue_add_sgs - expose buffers to other end 752e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 753e6f633e5STiwei Bie * @sgs: array of terminated scatterlists. 754e6f633e5STiwei Bie * @out_num: the number of scatterlists readable by other side 755e6f633e5STiwei Bie * @in_num: the number of scatterlists which are writable (after readable ones) 756e6f633e5STiwei Bie * @data: the token identifying the buffer. 757e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 758e6f633e5STiwei Bie * 759e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 760e6f633e5STiwei Bie * at the same time (except where noted). 761e6f633e5STiwei Bie * 762e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 763e6f633e5STiwei Bie */ 764e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq, 765e6f633e5STiwei Bie struct scatterlist *sgs[], 766e6f633e5STiwei Bie unsigned int out_sgs, 767e6f633e5STiwei Bie unsigned int in_sgs, 768e6f633e5STiwei Bie void *data, 769e6f633e5STiwei Bie gfp_t gfp) 770e6f633e5STiwei Bie { 771e6f633e5STiwei Bie unsigned int i, total_sg = 0; 772e6f633e5STiwei Bie 773e6f633e5STiwei Bie /* Count them first. */ 774e6f633e5STiwei Bie for (i = 0; i < out_sgs + in_sgs; i++) { 775e6f633e5STiwei Bie struct scatterlist *sg; 776e6f633e5STiwei Bie 777e6f633e5STiwei Bie for (sg = sgs[i]; sg; sg = sg_next(sg)) 778e6f633e5STiwei Bie total_sg++; 779e6f633e5STiwei Bie } 780e6f633e5STiwei Bie return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, 781e6f633e5STiwei Bie data, NULL, gfp); 782e6f633e5STiwei Bie } 783e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 784e6f633e5STiwei Bie 785e6f633e5STiwei Bie /** 786e6f633e5STiwei Bie * virtqueue_add_outbuf - expose output buffers to other end 787e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 788e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 789e6f633e5STiwei Bie * @num: the number of entries in @sg readable by other side 790e6f633e5STiwei Bie * @data: the token identifying the buffer. 791e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 792e6f633e5STiwei Bie * 793e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 794e6f633e5STiwei Bie * at the same time (except where noted). 795e6f633e5STiwei Bie * 796e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 797e6f633e5STiwei Bie */ 798e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq, 799e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 800e6f633e5STiwei Bie void *data, 801e6f633e5STiwei Bie gfp_t gfp) 802e6f633e5STiwei Bie { 803e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); 804e6f633e5STiwei Bie } 805e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 806e6f633e5STiwei Bie 807e6f633e5STiwei Bie /** 808e6f633e5STiwei Bie * virtqueue_add_inbuf - expose input buffers to other end 809e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 810e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 811e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 812e6f633e5STiwei Bie * @data: the token identifying the buffer. 813e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 814e6f633e5STiwei Bie * 815e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 816e6f633e5STiwei Bie * at the same time (except where noted). 817e6f633e5STiwei Bie * 818e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 819e6f633e5STiwei Bie */ 820e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq, 821e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 822e6f633e5STiwei Bie void *data, 823e6f633e5STiwei Bie gfp_t gfp) 824e6f633e5STiwei Bie { 825e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); 826e6f633e5STiwei Bie } 827e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 828e6f633e5STiwei Bie 829e6f633e5STiwei Bie /** 830e6f633e5STiwei Bie * virtqueue_add_inbuf_ctx - expose input buffers to other end 831e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 832e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 833e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 834e6f633e5STiwei Bie * @data: the token identifying the buffer. 835e6f633e5STiwei Bie * @ctx: extra context for the token 836e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 837e6f633e5STiwei Bie * 838e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 839e6f633e5STiwei Bie * at the same time (except where noted). 840e6f633e5STiwei Bie * 841e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 842e6f633e5STiwei Bie */ 843e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq, 844e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 845e6f633e5STiwei Bie void *data, 846e6f633e5STiwei Bie void *ctx, 847e6f633e5STiwei Bie gfp_t gfp) 848e6f633e5STiwei Bie { 849e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); 850e6f633e5STiwei Bie } 851e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); 852e6f633e5STiwei Bie 853e6f633e5STiwei Bie /** 854e6f633e5STiwei Bie * virtqueue_kick_prepare - first half of split virtqueue_kick call. 855e6f633e5STiwei Bie * @vq: the struct virtqueue 856e6f633e5STiwei Bie * 857e6f633e5STiwei Bie * Instead of virtqueue_kick(), you can do: 858e6f633e5STiwei Bie * if (virtqueue_kick_prepare(vq)) 859e6f633e5STiwei Bie * virtqueue_notify(vq); 860e6f633e5STiwei Bie * 861e6f633e5STiwei Bie * This is sometimes useful because the virtqueue_kick_prepare() needs 862e6f633e5STiwei Bie * to be serialized, but the actual virtqueue_notify() call does not. 863e6f633e5STiwei Bie */ 864e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq) 865e6f633e5STiwei Bie { 866e6f633e5STiwei Bie return virtqueue_kick_prepare_split(_vq); 867e6f633e5STiwei Bie } 868e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 869e6f633e5STiwei Bie 870e6f633e5STiwei Bie /** 871e6f633e5STiwei Bie * virtqueue_notify - second half of split virtqueue_kick call. 872e6f633e5STiwei Bie * @vq: the struct virtqueue 873e6f633e5STiwei Bie * 874e6f633e5STiwei Bie * This does not need to be serialized. 875e6f633e5STiwei Bie * 876e6f633e5STiwei Bie * Returns false if host notify failed or queue is broken, otherwise true. 877e6f633e5STiwei Bie */ 878e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq) 879e6f633e5STiwei Bie { 880e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 881e6f633e5STiwei Bie 882e6f633e5STiwei Bie if (unlikely(vq->broken)) 883e6f633e5STiwei Bie return false; 884e6f633e5STiwei Bie 885e6f633e5STiwei Bie /* Prod other side to tell it about changes. */ 886e6f633e5STiwei Bie if (!vq->notify(_vq)) { 887e6f633e5STiwei Bie vq->broken = true; 888e6f633e5STiwei Bie return false; 889e6f633e5STiwei Bie } 890e6f633e5STiwei Bie return true; 891e6f633e5STiwei Bie } 892e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify); 893e6f633e5STiwei Bie 894e6f633e5STiwei Bie /** 895e6f633e5STiwei Bie * virtqueue_kick - update after add_buf 896e6f633e5STiwei Bie * @vq: the struct virtqueue 897e6f633e5STiwei Bie * 898e6f633e5STiwei Bie * After one or more virtqueue_add_* calls, invoke this to kick 899e6f633e5STiwei Bie * the other side. 900e6f633e5STiwei Bie * 901e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 902e6f633e5STiwei Bie * operations at the same time (except where noted). 903e6f633e5STiwei Bie * 904e6f633e5STiwei Bie * Returns false if kick failed, otherwise true. 905e6f633e5STiwei Bie */ 906e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq) 907e6f633e5STiwei Bie { 908e6f633e5STiwei Bie if (virtqueue_kick_prepare(vq)) 909e6f633e5STiwei Bie return virtqueue_notify(vq); 910e6f633e5STiwei Bie return true; 911e6f633e5STiwei Bie } 912e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick); 913e6f633e5STiwei Bie 914e6f633e5STiwei Bie /** 915e6f633e5STiwei Bie * virtqueue_get_buf - get the next used buffer 916e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 917e6f633e5STiwei Bie * @len: the length written into the buffer 918e6f633e5STiwei Bie * 919e6f633e5STiwei Bie * If the device wrote data into the buffer, @len will be set to the 920e6f633e5STiwei Bie * amount written. This means you don't need to clear the buffer 921e6f633e5STiwei Bie * beforehand to ensure there's no data leakage in the case of short 922e6f633e5STiwei Bie * writes. 923e6f633e5STiwei Bie * 924e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 925e6f633e5STiwei Bie * operations at the same time (except where noted). 926e6f633e5STiwei Bie * 927e6f633e5STiwei Bie * Returns NULL if there are no used buffers, or the "data" token 928e6f633e5STiwei Bie * handed to virtqueue_add_*(). 929e6f633e5STiwei Bie */ 930e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, 931e6f633e5STiwei Bie void **ctx) 932e6f633e5STiwei Bie { 933e6f633e5STiwei Bie return virtqueue_get_buf_ctx_split(_vq, len, ctx); 934e6f633e5STiwei Bie } 935e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); 936e6f633e5STiwei Bie 937e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 938e6f633e5STiwei Bie { 939e6f633e5STiwei Bie return virtqueue_get_buf_ctx(_vq, len, NULL); 940e6f633e5STiwei Bie } 941e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf); 942e6f633e5STiwei Bie 943e6f633e5STiwei Bie /** 944e6f633e5STiwei Bie * virtqueue_disable_cb - disable callbacks 945e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 946e6f633e5STiwei Bie * 947e6f633e5STiwei Bie * Note that this is not necessarily synchronous, hence unreliable and only 948e6f633e5STiwei Bie * useful as an optimization. 949e6f633e5STiwei Bie * 950e6f633e5STiwei Bie * Unlike other operations, this need not be serialized. 951e6f633e5STiwei Bie */ 952e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq) 953e6f633e5STiwei Bie { 954e6f633e5STiwei Bie virtqueue_disable_cb_split(_vq); 955e6f633e5STiwei Bie } 956e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 957e6f633e5STiwei Bie 958e6f633e5STiwei Bie /** 959e6f633e5STiwei Bie * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 960e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 961e6f633e5STiwei Bie * 962e6f633e5STiwei Bie * This re-enables callbacks; it returns current queue state 963e6f633e5STiwei Bie * in an opaque unsigned value. This value should be later tested by 964e6f633e5STiwei Bie * virtqueue_poll, to detect a possible race between the driver checking for 965e6f633e5STiwei Bie * more work, and enabling callbacks. 966e6f633e5STiwei Bie * 967e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 968e6f633e5STiwei Bie * operations at the same time (except where noted). 969e6f633e5STiwei Bie */ 970e6f633e5STiwei Bie unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 971e6f633e5STiwei Bie { 972e6f633e5STiwei Bie return virtqueue_enable_cb_prepare_split(_vq); 973e6f633e5STiwei Bie } 974e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 975e6f633e5STiwei Bie 976e6f633e5STiwei Bie /** 977e6f633e5STiwei Bie * virtqueue_poll - query pending used buffers 978e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 979e6f633e5STiwei Bie * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 980e6f633e5STiwei Bie * 981e6f633e5STiwei Bie * Returns "true" if there are pending used buffers in the queue. 982e6f633e5STiwei Bie * 983e6f633e5STiwei Bie * This does not need to be serialized. 984e6f633e5STiwei Bie */ 985e6f633e5STiwei Bie bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 986e6f633e5STiwei Bie { 987e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 988e6f633e5STiwei Bie 989e6f633e5STiwei Bie virtio_mb(vq->weak_barriers); 990e6f633e5STiwei Bie return virtqueue_poll_split(_vq, last_used_idx); 991e6f633e5STiwei Bie } 992e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll); 993e6f633e5STiwei Bie 994e6f633e5STiwei Bie /** 995e6f633e5STiwei Bie * virtqueue_enable_cb - restart callbacks after disable_cb. 996e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 997e6f633e5STiwei Bie * 998e6f633e5STiwei Bie * This re-enables callbacks; it returns "false" if there are pending 999e6f633e5STiwei Bie * buffers in the queue, to detect a possible race between the driver 1000e6f633e5STiwei Bie * checking for more work, and enabling callbacks. 1001e6f633e5STiwei Bie * 1002e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 1003e6f633e5STiwei Bie * operations at the same time (except where noted). 1004e6f633e5STiwei Bie */ 1005e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq) 1006e6f633e5STiwei Bie { 1007e6f633e5STiwei Bie unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 1008e6f633e5STiwei Bie 1009e6f633e5STiwei Bie return !virtqueue_poll(_vq, last_used_idx); 1010e6f633e5STiwei Bie } 1011e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 1012e6f633e5STiwei Bie 1013e6f633e5STiwei Bie /** 1014e6f633e5STiwei Bie * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 1015e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 1016e6f633e5STiwei Bie * 1017e6f633e5STiwei Bie * This re-enables callbacks but hints to the other side to delay 1018e6f633e5STiwei Bie * interrupts until most of the available buffers have been processed; 1019e6f633e5STiwei Bie * it returns "false" if there are many pending buffers in the queue, 1020e6f633e5STiwei Bie * to detect a possible race between the driver checking for more work, 1021e6f633e5STiwei Bie * and enabling callbacks. 1022e6f633e5STiwei Bie * 1023e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 1024e6f633e5STiwei Bie * operations at the same time (except where noted). 1025e6f633e5STiwei Bie */ 1026e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 1027e6f633e5STiwei Bie { 1028e6f633e5STiwei Bie return virtqueue_enable_cb_delayed_split(_vq); 1029e6f633e5STiwei Bie } 1030e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 1031e6f633e5STiwei Bie 1032138fd251STiwei Bie /** 1033138fd251STiwei Bie * virtqueue_detach_unused_buf - detach first unused buffer 1034138fd251STiwei Bie * @vq: the struct virtqueue we're talking about. 1035138fd251STiwei Bie * 1036138fd251STiwei Bie * Returns NULL or the "data" token handed to virtqueue_add_*(). 1037138fd251STiwei Bie * This is not valid on an active queue; it is useful only for device 1038138fd251STiwei Bie * shutdown. 1039138fd251STiwei Bie */ 1040138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 1041138fd251STiwei Bie { 1042138fd251STiwei Bie return virtqueue_detach_unused_buf_split(_vq); 1043138fd251STiwei Bie } 10447c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 1045c021eac4SShirley Ma 1046138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq) 1047138fd251STiwei Bie { 1048138fd251STiwei Bie return more_used_split(vq); 1049138fd251STiwei Bie } 1050138fd251STiwei Bie 10510a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 10520a8a69ddSRusty Russell { 10530a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 10540a8a69ddSRusty Russell 10550a8a69ddSRusty Russell if (!more_used(vq)) { 10560a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 10570a8a69ddSRusty Russell return IRQ_NONE; 10580a8a69ddSRusty Russell } 10590a8a69ddSRusty Russell 10600a8a69ddSRusty Russell if (unlikely(vq->broken)) 10610a8a69ddSRusty Russell return IRQ_HANDLED; 10620a8a69ddSRusty Russell 10630a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 106418445c4dSRusty Russell if (vq->vq.callback) 106518445c4dSRusty Russell vq->vq.callback(&vq->vq); 10660a8a69ddSRusty Russell 10670a8a69ddSRusty Russell return IRQ_HANDLED; 10680a8a69ddSRusty Russell } 1069c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 10700a8a69ddSRusty Russell 10712a2d1382SAndy Lutomirski struct virtqueue *__vring_new_virtqueue(unsigned int index, 10722a2d1382SAndy Lutomirski struct vring vring, 10730a8a69ddSRusty Russell struct virtio_device *vdev, 10747b21e34fSRusty Russell bool weak_barriers, 1075f94682ddSMichael S. Tsirkin bool context, 107646f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *), 10779499f5e7SRusty Russell void (*callback)(struct virtqueue *), 10789499f5e7SRusty Russell const char *name) 10790a8a69ddSRusty Russell { 10800a8a69ddSRusty Russell unsigned int i; 10812a2d1382SAndy Lutomirski struct vring_virtqueue *vq; 10820a8a69ddSRusty Russell 10832a2d1382SAndy Lutomirski vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), 1084780bc790SAndy Lutomirski GFP_KERNEL); 10850a8a69ddSRusty Russell if (!vq) 10860a8a69ddSRusty Russell return NULL; 10870a8a69ddSRusty Russell 10880a8a69ddSRusty Russell vq->vq.callback = callback; 10890a8a69ddSRusty Russell vq->vq.vdev = vdev; 10909499f5e7SRusty Russell vq->vq.name = name; 10912a2d1382SAndy Lutomirski vq->vq.num_free = vring.num; 109206ca287dSRusty Russell vq->vq.index = index; 10932a2d1382SAndy Lutomirski vq->we_own_ring = false; 10942a2d1382SAndy Lutomirski vq->queue_dma_addr = 0; 10952a2d1382SAndy Lutomirski vq->queue_size_in_bytes = 0; 10960a8a69ddSRusty Russell vq->notify = notify; 10977b21e34fSRusty Russell vq->weak_barriers = weak_barriers; 10980a8a69ddSRusty Russell vq->broken = false; 10990a8a69ddSRusty Russell vq->last_used_idx = 0; 11000a8a69ddSRusty Russell vq->num_added = 0; 11019499f5e7SRusty Russell list_add_tail(&vq->vq.list, &vdev->vqs); 11020a8a69ddSRusty Russell #ifdef DEBUG 11030a8a69ddSRusty Russell vq->in_use = false; 1104e93300b1SRusty Russell vq->last_add_time_valid = false; 11050a8a69ddSRusty Russell #endif 11060a8a69ddSRusty Russell 11075a08b04fSMichael S. Tsirkin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 11085a08b04fSMichael S. Tsirkin !context; 1109a5c262c5SMichael S. Tsirkin vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 11109fa29b9dSMark McLoughlin 1111e593bf97STiwei Bie vq->split.vring = vring; 1112e593bf97STiwei Bie vq->split.avail_flags_shadow = 0; 1113e593bf97STiwei Bie vq->split.avail_idx_shadow = 0; 1114e593bf97STiwei Bie 11150a8a69ddSRusty Russell /* No callback? Tell other side not to bother us. */ 1116f277ec42SVenkatesh Srinivas if (!callback) { 1117e593bf97STiwei Bie vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 11180ea1e4a6SLadi Prosek if (!vq->event) 1119e593bf97STiwei Bie vq->split.vring.avail->flags = cpu_to_virtio16(vdev, 1120e593bf97STiwei Bie vq->split.avail_flags_shadow); 1121f277ec42SVenkatesh Srinivas } 11220a8a69ddSRusty Russell 11230a8a69ddSRusty Russell /* Put everything in free lists. */ 11240a8a69ddSRusty Russell vq->free_head = 0; 11252a2d1382SAndy Lutomirski for (i = 0; i < vring.num-1; i++) 1126e593bf97STiwei Bie vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 11272a2d1382SAndy Lutomirski memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); 11280a8a69ddSRusty Russell 11290a8a69ddSRusty Russell return &vq->vq; 11300a8a69ddSRusty Russell } 11312a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 11322a2d1382SAndy Lutomirski 11332a2d1382SAndy Lutomirski static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 11342a2d1382SAndy Lutomirski dma_addr_t *dma_handle, gfp_t flag) 11352a2d1382SAndy Lutomirski { 11362a2d1382SAndy Lutomirski if (vring_use_dma_api(vdev)) { 11372a2d1382SAndy Lutomirski return dma_alloc_coherent(vdev->dev.parent, size, 11382a2d1382SAndy Lutomirski dma_handle, flag); 11392a2d1382SAndy Lutomirski } else { 11402a2d1382SAndy Lutomirski void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 11412a2d1382SAndy Lutomirski if (queue) { 11422a2d1382SAndy Lutomirski phys_addr_t phys_addr = virt_to_phys(queue); 11432a2d1382SAndy Lutomirski *dma_handle = (dma_addr_t)phys_addr; 11442a2d1382SAndy Lutomirski 11452a2d1382SAndy Lutomirski /* 11462a2d1382SAndy Lutomirski * Sanity check: make sure we dind't truncate 11472a2d1382SAndy Lutomirski * the address. The only arches I can find that 11482a2d1382SAndy Lutomirski * have 64-bit phys_addr_t but 32-bit dma_addr_t 11492a2d1382SAndy Lutomirski * are certain non-highmem MIPS and x86 11502a2d1382SAndy Lutomirski * configurations, but these configurations 11512a2d1382SAndy Lutomirski * should never allocate physical pages above 32 11522a2d1382SAndy Lutomirski * bits, so this is fine. Just in case, throw a 11532a2d1382SAndy Lutomirski * warning and abort if we end up with an 11542a2d1382SAndy Lutomirski * unrepresentable address. 11552a2d1382SAndy Lutomirski */ 11562a2d1382SAndy Lutomirski if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 11572a2d1382SAndy Lutomirski free_pages_exact(queue, PAGE_ALIGN(size)); 11582a2d1382SAndy Lutomirski return NULL; 11592a2d1382SAndy Lutomirski } 11602a2d1382SAndy Lutomirski } 11612a2d1382SAndy Lutomirski return queue; 11622a2d1382SAndy Lutomirski } 11632a2d1382SAndy Lutomirski } 11642a2d1382SAndy Lutomirski 11652a2d1382SAndy Lutomirski static void vring_free_queue(struct virtio_device *vdev, size_t size, 11662a2d1382SAndy Lutomirski void *queue, dma_addr_t dma_handle) 11672a2d1382SAndy Lutomirski { 11682a2d1382SAndy Lutomirski if (vring_use_dma_api(vdev)) { 11692a2d1382SAndy Lutomirski dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 11702a2d1382SAndy Lutomirski } else { 11712a2d1382SAndy Lutomirski free_pages_exact(queue, PAGE_ALIGN(size)); 11722a2d1382SAndy Lutomirski } 11732a2d1382SAndy Lutomirski } 11742a2d1382SAndy Lutomirski 11752a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue( 11762a2d1382SAndy Lutomirski unsigned int index, 11772a2d1382SAndy Lutomirski unsigned int num, 11782a2d1382SAndy Lutomirski unsigned int vring_align, 11792a2d1382SAndy Lutomirski struct virtio_device *vdev, 11802a2d1382SAndy Lutomirski bool weak_barriers, 11812a2d1382SAndy Lutomirski bool may_reduce_num, 1182f94682ddSMichael S. Tsirkin bool context, 11832a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *), 11842a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *), 11852a2d1382SAndy Lutomirski const char *name) 11862a2d1382SAndy Lutomirski { 11872a2d1382SAndy Lutomirski struct virtqueue *vq; 1188e00f7bd2SDan Carpenter void *queue = NULL; 11892a2d1382SAndy Lutomirski dma_addr_t dma_addr; 11902a2d1382SAndy Lutomirski size_t queue_size_in_bytes; 11912a2d1382SAndy Lutomirski struct vring vring; 11922a2d1382SAndy Lutomirski 11932a2d1382SAndy Lutomirski /* We assume num is a power of 2. */ 11942a2d1382SAndy Lutomirski if (num & (num - 1)) { 11952a2d1382SAndy Lutomirski dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 11962a2d1382SAndy Lutomirski return NULL; 11972a2d1382SAndy Lutomirski } 11982a2d1382SAndy Lutomirski 11992a2d1382SAndy Lutomirski /* TODO: allocate each queue chunk individually */ 12002a2d1382SAndy Lutomirski for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 12012a2d1382SAndy Lutomirski queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 12022a2d1382SAndy Lutomirski &dma_addr, 12032a2d1382SAndy Lutomirski GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 12042a2d1382SAndy Lutomirski if (queue) 12052a2d1382SAndy Lutomirski break; 12062a2d1382SAndy Lutomirski } 12072a2d1382SAndy Lutomirski 12082a2d1382SAndy Lutomirski if (!num) 12092a2d1382SAndy Lutomirski return NULL; 12102a2d1382SAndy Lutomirski 12112a2d1382SAndy Lutomirski if (!queue) { 12122a2d1382SAndy Lutomirski /* Try to get a single page. You are my only hope! */ 12132a2d1382SAndy Lutomirski queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 12142a2d1382SAndy Lutomirski &dma_addr, GFP_KERNEL|__GFP_ZERO); 12152a2d1382SAndy Lutomirski } 12162a2d1382SAndy Lutomirski if (!queue) 12172a2d1382SAndy Lutomirski return NULL; 12182a2d1382SAndy Lutomirski 12192a2d1382SAndy Lutomirski queue_size_in_bytes = vring_size(num, vring_align); 12202a2d1382SAndy Lutomirski vring_init(&vring, num, queue, vring_align); 12212a2d1382SAndy Lutomirski 1222f94682ddSMichael S. Tsirkin vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 12232a2d1382SAndy Lutomirski notify, callback, name); 12242a2d1382SAndy Lutomirski if (!vq) { 12252a2d1382SAndy Lutomirski vring_free_queue(vdev, queue_size_in_bytes, queue, 12262a2d1382SAndy Lutomirski dma_addr); 12272a2d1382SAndy Lutomirski return NULL; 12282a2d1382SAndy Lutomirski } 12292a2d1382SAndy Lutomirski 12302a2d1382SAndy Lutomirski to_vvq(vq)->queue_dma_addr = dma_addr; 12312a2d1382SAndy Lutomirski to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 12322a2d1382SAndy Lutomirski to_vvq(vq)->we_own_ring = true; 12332a2d1382SAndy Lutomirski 12342a2d1382SAndy Lutomirski return vq; 12352a2d1382SAndy Lutomirski } 12362a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue); 12372a2d1382SAndy Lutomirski 12382a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index, 12392a2d1382SAndy Lutomirski unsigned int num, 12402a2d1382SAndy Lutomirski unsigned int vring_align, 12412a2d1382SAndy Lutomirski struct virtio_device *vdev, 12422a2d1382SAndy Lutomirski bool weak_barriers, 1243f94682ddSMichael S. Tsirkin bool context, 12442a2d1382SAndy Lutomirski void *pages, 12452a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *vq), 12462a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *vq), 12472a2d1382SAndy Lutomirski const char *name) 12482a2d1382SAndy Lutomirski { 12492a2d1382SAndy Lutomirski struct vring vring; 12502a2d1382SAndy Lutomirski vring_init(&vring, num, pages, vring_align); 1251f94682ddSMichael S. Tsirkin return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 12522a2d1382SAndy Lutomirski notify, callback, name); 12532a2d1382SAndy Lutomirski } 1254c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 12550a8a69ddSRusty Russell 12562a2d1382SAndy Lutomirski void vring_del_virtqueue(struct virtqueue *_vq) 12570a8a69ddSRusty Russell { 12582a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 12592a2d1382SAndy Lutomirski 12602a2d1382SAndy Lutomirski if (vq->we_own_ring) { 12612a2d1382SAndy Lutomirski vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1262e593bf97STiwei Bie vq->split.vring.desc, vq->queue_dma_addr); 12632a2d1382SAndy Lutomirski } 12642a2d1382SAndy Lutomirski list_del(&_vq->list); 12652a2d1382SAndy Lutomirski kfree(vq); 12660a8a69ddSRusty Russell } 1267c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 12680a8a69ddSRusty Russell 1269e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 1270e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 1271e34f8725SRusty Russell { 1272e34f8725SRusty Russell unsigned int i; 1273e34f8725SRusty Russell 1274e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1275e34f8725SRusty Russell switch (i) { 12769fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 12779fa29b9dSMark McLoughlin break; 1278a5c262c5SMichael S. Tsirkin case VIRTIO_RING_F_EVENT_IDX: 1279a5c262c5SMichael S. Tsirkin break; 1280747ae34aSMichael S. Tsirkin case VIRTIO_F_VERSION_1: 1281747ae34aSMichael S. Tsirkin break; 12821a937693SMichael S. Tsirkin case VIRTIO_F_IOMMU_PLATFORM: 12831a937693SMichael S. Tsirkin break; 1284e34f8725SRusty Russell default: 1285e34f8725SRusty Russell /* We don't understand this bit. */ 1286e16e12beSMichael S. Tsirkin __virtio_clear_bit(vdev, i); 1287e34f8725SRusty Russell } 1288e34f8725SRusty Russell } 1289e34f8725SRusty Russell } 1290e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 1291e34f8725SRusty Russell 12925dfc1762SRusty Russell /** 12935dfc1762SRusty Russell * virtqueue_get_vring_size - return the size of the virtqueue's vring 12945dfc1762SRusty Russell * @vq: the struct virtqueue containing the vring of interest. 12955dfc1762SRusty Russell * 12965dfc1762SRusty Russell * Returns the size of the vring. This is mainly used for boasting to 12975dfc1762SRusty Russell * userspace. Unlike other operations, this need not be serialized. 12985dfc1762SRusty Russell */ 12998f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 13008f9f4668SRick Jones { 13018f9f4668SRick Jones 13028f9f4668SRick Jones struct vring_virtqueue *vq = to_vvq(_vq); 13038f9f4668SRick Jones 1304e593bf97STiwei Bie return vq->split.vring.num; 13058f9f4668SRick Jones } 13068f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 13078f9f4668SRick Jones 1308b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq) 1309b3b32c94SHeinz Graalfs { 1310b3b32c94SHeinz Graalfs struct vring_virtqueue *vq = to_vvq(_vq); 1311b3b32c94SHeinz Graalfs 1312b3b32c94SHeinz Graalfs return vq->broken; 1313b3b32c94SHeinz Graalfs } 1314b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1315b3b32c94SHeinz Graalfs 1316e2dcdfe9SRusty Russell /* 1317e2dcdfe9SRusty Russell * This should prevent the device from being used, allowing drivers to 1318e2dcdfe9SRusty Russell * recover. You may need to grab appropriate locks to flush. 1319e2dcdfe9SRusty Russell */ 1320e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev) 1321e2dcdfe9SRusty Russell { 1322e2dcdfe9SRusty Russell struct virtqueue *_vq; 1323e2dcdfe9SRusty Russell 1324e2dcdfe9SRusty Russell list_for_each_entry(_vq, &dev->vqs, list) { 1325e2dcdfe9SRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 1326e2dcdfe9SRusty Russell vq->broken = true; 1327e2dcdfe9SRusty Russell } 1328e2dcdfe9SRusty Russell } 1329e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device); 1330e2dcdfe9SRusty Russell 13312a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 133289062652SCornelia Huck { 133389062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 133489062652SCornelia Huck 13352a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 133689062652SCornelia Huck 13372a2d1382SAndy Lutomirski return vq->queue_dma_addr; 13382a2d1382SAndy Lutomirski } 13392a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 13402a2d1382SAndy Lutomirski 13412a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 134289062652SCornelia Huck { 134389062652SCornelia Huck struct vring_virtqueue *vq = to_vvq(_vq); 134489062652SCornelia Huck 13452a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 13462a2d1382SAndy Lutomirski 13472a2d1382SAndy Lutomirski return vq->queue_dma_addr + 1348e593bf97STiwei Bie ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); 134989062652SCornelia Huck } 13502a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 13512a2d1382SAndy Lutomirski 13522a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 13532a2d1382SAndy Lutomirski { 13542a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 13552a2d1382SAndy Lutomirski 13562a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 13572a2d1382SAndy Lutomirski 13582a2d1382SAndy Lutomirski return vq->queue_dma_addr + 1359e593bf97STiwei Bie ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); 13602a2d1382SAndy Lutomirski } 13612a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 13622a2d1382SAndy Lutomirski 13632a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq) 13642a2d1382SAndy Lutomirski { 1365e593bf97STiwei Bie return &to_vvq(vq)->split.vring; 13662a2d1382SAndy Lutomirski } 13672a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring); 136889062652SCornelia Huck 1369c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 1370