1fd534e9bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 20a8a69ddSRusty Russell /* Virtio ring implementation. 30a8a69ddSRusty Russell * 40a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 50a8a69ddSRusty Russell */ 60a8a69ddSRusty Russell #include <linux/virtio.h> 70a8a69ddSRusty Russell #include <linux/virtio_ring.h> 8e34f8725SRusty Russell #include <linux/virtio_config.h> 90a8a69ddSRusty Russell #include <linux/device.h> 105a0e3ad6STejun Heo #include <linux/slab.h> 11b5a2c4f1SPaul Gortmaker #include <linux/module.h> 12e93300b1SRusty Russell #include <linux/hrtimer.h> 13780bc790SAndy Lutomirski #include <linux/dma-mapping.h> 1488938359SAlexander Potapenko #include <linux/kmsan.h> 15f8ce7263SMichael S. Tsirkin #include <linux/spinlock.h> 1678fe3987SAndy Lutomirski #include <xen/xen.h> 170a8a69ddSRusty Russell 180a8a69ddSRusty Russell #ifdef DEBUG 190a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 209499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 219499f5e7SRusty Russell do { \ 229499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 239499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 249499f5e7SRusty Russell BUG(); \ 259499f5e7SRusty Russell } while (0) 26c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 273a35ce7dSRoel Kluin #define START_USE(_vq) \ 28c5f841f1SRusty Russell do { \ 29c5f841f1SRusty Russell if ((_vq)->in_use) \ 309499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 319499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 32c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 33c5f841f1SRusty Russell } while (0) 343a35ce7dSRoel Kluin #define END_USE(_vq) \ 3597a545abSRusty Russell do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 364d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq) \ 374d6a105eSTiwei Bie do { \ 384d6a105eSTiwei Bie ktime_t now = ktime_get(); \ 394d6a105eSTiwei Bie \ 404d6a105eSTiwei Bie /* No kick or get, with .1 second between? Warn. */ \ 414d6a105eSTiwei Bie if ((_vq)->last_add_time_valid) \ 424d6a105eSTiwei Bie WARN_ON(ktime_to_ms(ktime_sub(now, \ 434d6a105eSTiwei Bie (_vq)->last_add_time)) > 100); \ 444d6a105eSTiwei Bie (_vq)->last_add_time = now; \ 454d6a105eSTiwei Bie (_vq)->last_add_time_valid = true; \ 464d6a105eSTiwei Bie } while (0) 474d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq) \ 484d6a105eSTiwei Bie do { \ 494d6a105eSTiwei Bie if ((_vq)->last_add_time_valid) { \ 504d6a105eSTiwei Bie WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ 514d6a105eSTiwei Bie (_vq)->last_add_time)) > 100); \ 524d6a105eSTiwei Bie } \ 534d6a105eSTiwei Bie } while (0) 544d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq) \ 554d6a105eSTiwei Bie ((_vq)->last_add_time_valid = false) 560a8a69ddSRusty Russell #else 579499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 589499f5e7SRusty Russell do { \ 599499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 609499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 619499f5e7SRusty Russell (_vq)->broken = true; \ 629499f5e7SRusty Russell } while (0) 630a8a69ddSRusty Russell #define START_USE(vq) 640a8a69ddSRusty Russell #define END_USE(vq) 654d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq) 664d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq) 674d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq) 680a8a69ddSRusty Russell #endif 690a8a69ddSRusty Russell 70cbeedb72STiwei Bie struct vring_desc_state_split { 71780bc790SAndy Lutomirski void *data; /* Data for callback. */ 72780bc790SAndy Lutomirski struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 73780bc790SAndy Lutomirski }; 74780bc790SAndy Lutomirski 751ce9e605STiwei Bie struct vring_desc_state_packed { 761ce9e605STiwei Bie void *data; /* Data for callback. */ 771ce9e605STiwei Bie struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ 781ce9e605STiwei Bie u16 num; /* Descriptor list length. */ 791ce9e605STiwei Bie u16 last; /* The last desc state in a list. */ 801ce9e605STiwei Bie }; 811ce9e605STiwei Bie 821f28750fSJason Wang struct vring_desc_extra { 83ef5c366fSJason Wang dma_addr_t addr; /* Descriptor DMA addr. */ 84ef5c366fSJason Wang u32 len; /* Descriptor length. */ 851ce9e605STiwei Bie u16 flags; /* Descriptor flags. */ 86aeef9b47SJason Wang u16 next; /* The next desc state in a list. */ 871ce9e605STiwei Bie }; 881ce9e605STiwei Bie 89d76136e4SXuan Zhuo struct vring_virtqueue_split { 90d76136e4SXuan Zhuo /* Actual memory layout for this queue. */ 91d76136e4SXuan Zhuo struct vring vring; 92d76136e4SXuan Zhuo 93d76136e4SXuan Zhuo /* Last written value to avail->flags */ 94d76136e4SXuan Zhuo u16 avail_flags_shadow; 95d76136e4SXuan Zhuo 96d76136e4SXuan Zhuo /* 97d76136e4SXuan Zhuo * Last written value to avail->idx in 98d76136e4SXuan Zhuo * guest byte order. 99d76136e4SXuan Zhuo */ 100d76136e4SXuan Zhuo u16 avail_idx_shadow; 101d76136e4SXuan Zhuo 102d76136e4SXuan Zhuo /* Per-descriptor state. */ 103d76136e4SXuan Zhuo struct vring_desc_state_split *desc_state; 104d76136e4SXuan Zhuo struct vring_desc_extra *desc_extra; 105d76136e4SXuan Zhuo 106d76136e4SXuan Zhuo /* DMA address and size information */ 107d76136e4SXuan Zhuo dma_addr_t queue_dma_addr; 108d76136e4SXuan Zhuo size_t queue_size_in_bytes; 109af36b16fSXuan Zhuo 110af36b16fSXuan Zhuo /* 111af36b16fSXuan Zhuo * The parameters for creating vrings are reserved for creating new 112af36b16fSXuan Zhuo * vring. 113af36b16fSXuan Zhuo */ 114af36b16fSXuan Zhuo u32 vring_align; 115af36b16fSXuan Zhuo bool may_reduce_num; 116d76136e4SXuan Zhuo }; 117d76136e4SXuan Zhuo 118d76136e4SXuan Zhuo struct vring_virtqueue_packed { 119d76136e4SXuan Zhuo /* Actual memory layout for this queue. */ 120d76136e4SXuan Zhuo struct { 121d76136e4SXuan Zhuo unsigned int num; 122d76136e4SXuan Zhuo struct vring_packed_desc *desc; 123d76136e4SXuan Zhuo struct vring_packed_desc_event *driver; 124d76136e4SXuan Zhuo struct vring_packed_desc_event *device; 125d76136e4SXuan Zhuo } vring; 126d76136e4SXuan Zhuo 127d76136e4SXuan Zhuo /* Driver ring wrap counter. */ 128d76136e4SXuan Zhuo bool avail_wrap_counter; 129d76136e4SXuan Zhuo 130d76136e4SXuan Zhuo /* Avail used flags. */ 131d76136e4SXuan Zhuo u16 avail_used_flags; 132d76136e4SXuan Zhuo 133d76136e4SXuan Zhuo /* Index of the next avail descriptor. */ 134d76136e4SXuan Zhuo u16 next_avail_idx; 135d76136e4SXuan Zhuo 136d76136e4SXuan Zhuo /* 137d76136e4SXuan Zhuo * Last written value to driver->flags in 138d76136e4SXuan Zhuo * guest byte order. 139d76136e4SXuan Zhuo */ 140d76136e4SXuan Zhuo u16 event_flags_shadow; 141d76136e4SXuan Zhuo 142d76136e4SXuan Zhuo /* Per-descriptor state. */ 143d76136e4SXuan Zhuo struct vring_desc_state_packed *desc_state; 144d76136e4SXuan Zhuo struct vring_desc_extra *desc_extra; 145d76136e4SXuan Zhuo 146d76136e4SXuan Zhuo /* DMA address and size information */ 147d76136e4SXuan Zhuo dma_addr_t ring_dma_addr; 148d76136e4SXuan Zhuo dma_addr_t driver_event_dma_addr; 149d76136e4SXuan Zhuo dma_addr_t device_event_dma_addr; 150d76136e4SXuan Zhuo size_t ring_size_in_bytes; 151d76136e4SXuan Zhuo size_t event_size_in_bytes; 152d76136e4SXuan Zhuo }; 153d76136e4SXuan Zhuo 15443b4f721SMichael S. Tsirkin struct vring_virtqueue { 1550a8a69ddSRusty Russell struct virtqueue vq; 1560a8a69ddSRusty Russell 1571ce9e605STiwei Bie /* Is this a packed ring? */ 1581ce9e605STiwei Bie bool packed_ring; 1591ce9e605STiwei Bie 160fb3fba6bSTiwei Bie /* Is DMA API used? */ 161fb3fba6bSTiwei Bie bool use_dma_api; 162fb3fba6bSTiwei Bie 1637b21e34fSRusty Russell /* Can we use weak barriers? */ 1647b21e34fSRusty Russell bool weak_barriers; 1657b21e34fSRusty Russell 1660a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 1670a8a69ddSRusty Russell bool broken; 1680a8a69ddSRusty Russell 1699fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 1709fa29b9dSMark McLoughlin bool indirect; 1719fa29b9dSMark McLoughlin 172a5c262c5SMichael S. Tsirkin /* Host publishes avail event idx */ 173a5c262c5SMichael S. Tsirkin bool event; 174a5c262c5SMichael S. Tsirkin 1750a8a69ddSRusty Russell /* Head of free buffer list. */ 1760a8a69ddSRusty Russell unsigned int free_head; 1770a8a69ddSRusty Russell /* Number we've added since last sync. */ 1780a8a69ddSRusty Russell unsigned int num_added; 1790a8a69ddSRusty Russell 180a7722890Shuangjie.albert /* Last used index we've seen. 181a7722890Shuangjie.albert * for split ring, it just contains last used index 182a7722890Shuangjie.albert * for packed ring: 183a7722890Shuangjie.albert * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. 184a7722890Shuangjie.albert * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter. 185a7722890Shuangjie.albert */ 1861bc4953eSAnthony Liguori u16 last_used_idx; 1870a8a69ddSRusty Russell 1888d622d21SMichael S. Tsirkin /* Hint for event idx: already triggered no need to disable. */ 1898d622d21SMichael S. Tsirkin bool event_triggered; 1908d622d21SMichael S. Tsirkin 1911ce9e605STiwei Bie union { 1921ce9e605STiwei Bie /* Available for split ring */ 193d76136e4SXuan Zhuo struct vring_virtqueue_split split; 194f277ec42SVenkatesh Srinivas 1951ce9e605STiwei Bie /* Available for packed ring */ 196d76136e4SXuan Zhuo struct vring_virtqueue_packed packed; 1971ce9e605STiwei Bie }; 1981ce9e605STiwei Bie 1990a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 20046f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq); 2010a8a69ddSRusty Russell 2022a2d1382SAndy Lutomirski /* DMA, allocation, and size information */ 2032a2d1382SAndy Lutomirski bool we_own_ring; 2042a2d1382SAndy Lutomirski 2052713ea3cSJason Wang /* Device used for doing DMA */ 2062713ea3cSJason Wang struct device *dma_dev; 2072713ea3cSJason Wang 2080a8a69ddSRusty Russell #ifdef DEBUG 2090a8a69ddSRusty Russell /* They're supposed to lock for us. */ 2100a8a69ddSRusty Russell unsigned int in_use; 211e93300b1SRusty Russell 212e93300b1SRusty Russell /* Figure out if their kicks are too delayed. */ 213e93300b1SRusty Russell bool last_add_time_valid; 214e93300b1SRusty Russell ktime_t last_add_time; 2150a8a69ddSRusty Russell #endif 2160a8a69ddSRusty Russell }; 2170a8a69ddSRusty Russell 21807d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index, 219cd4c812aSXuan Zhuo struct vring_virtqueue_split *vring_split, 22007d9629dSXuan Zhuo struct virtio_device *vdev, 22107d9629dSXuan Zhuo bool weak_barriers, 22207d9629dSXuan Zhuo bool context, 22307d9629dSXuan Zhuo bool (*notify)(struct virtqueue *), 22407d9629dSXuan Zhuo void (*callback)(struct virtqueue *), 2252713ea3cSJason Wang const char *name, 2262713ea3cSJason Wang struct device *dma_dev); 227a2b36c8dSXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num); 2286fea20e5SXuan Zhuo static void vring_free(struct virtqueue *_vq); 229e6f633e5STiwei Bie 230e6f633e5STiwei Bie /* 231e6f633e5STiwei Bie * Helpers. 232e6f633e5STiwei Bie */ 233e6f633e5STiwei Bie 2344b6ec919SFeng Liu #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq) 2350a8a69ddSRusty Russell 2364b6ec919SFeng Liu static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, 2372f18c2d1STiwei Bie unsigned int total_sg) 2382f18c2d1STiwei Bie { 2392f18c2d1STiwei Bie /* 2402f18c2d1STiwei Bie * If the host supports indirect descriptor tables, and we have multiple 2412f18c2d1STiwei Bie * buffers, then go indirect. FIXME: tune this threshold 2422f18c2d1STiwei Bie */ 2432f18c2d1STiwei Bie return (vq->indirect && total_sg > 1 && vq->vq.num_free); 2442f18c2d1STiwei Bie } 2452f18c2d1STiwei Bie 246d26c96c8SAndy Lutomirski /* 2471a937693SMichael S. Tsirkin * Modern virtio devices have feature bits to specify whether they need a 2481a937693SMichael S. Tsirkin * quirk and bypass the IOMMU. If not there, just use the DMA API. 2491a937693SMichael S. Tsirkin * 2501a937693SMichael S. Tsirkin * If there, the interaction between virtio and DMA API is messy. 251d26c96c8SAndy Lutomirski * 252d26c96c8SAndy Lutomirski * On most systems with virtio, physical addresses match bus addresses, 253d26c96c8SAndy Lutomirski * and it doesn't particularly matter whether we use the DMA API. 254d26c96c8SAndy Lutomirski * 255d26c96c8SAndy Lutomirski * On some systems, including Xen and any system with a physical device 256d26c96c8SAndy Lutomirski * that speaks virtio behind a physical IOMMU, we must use the DMA API 257d26c96c8SAndy Lutomirski * for virtio DMA to work at all. 258d26c96c8SAndy Lutomirski * 259d26c96c8SAndy Lutomirski * On other systems, including SPARC and PPC64, virtio-pci devices are 260d26c96c8SAndy Lutomirski * enumerated as though they are behind an IOMMU, but the virtio host 261d26c96c8SAndy Lutomirski * ignores the IOMMU, so we must either pretend that the IOMMU isn't 262d26c96c8SAndy Lutomirski * there or somehow map everything as the identity. 263d26c96c8SAndy Lutomirski * 264d26c96c8SAndy Lutomirski * For the time being, we preserve historic behavior and bypass the DMA 265d26c96c8SAndy Lutomirski * API. 2661a937693SMichael S. Tsirkin * 2671a937693SMichael S. Tsirkin * TODO: install a per-device DMA ops structure that does the right thing 2681a937693SMichael S. Tsirkin * taking into account all the above quirks, and use the DMA API 2691a937693SMichael S. Tsirkin * unconditionally on data path. 270d26c96c8SAndy Lutomirski */ 271d26c96c8SAndy Lutomirski 2724b6ec919SFeng Liu static bool vring_use_dma_api(const struct virtio_device *vdev) 273d26c96c8SAndy Lutomirski { 27424b6842aSMichael S. Tsirkin if (!virtio_has_dma_quirk(vdev)) 2751a937693SMichael S. Tsirkin return true; 2761a937693SMichael S. Tsirkin 2771a937693SMichael S. Tsirkin /* Otherwise, we are left to guess. */ 27878fe3987SAndy Lutomirski /* 27978fe3987SAndy Lutomirski * In theory, it's possible to have a buggy QEMU-supposed 28078fe3987SAndy Lutomirski * emulated Q35 IOMMU and Xen enabled at the same time. On 28178fe3987SAndy Lutomirski * such a configuration, virtio has never worked and will 28278fe3987SAndy Lutomirski * not work without an even larger kludge. Instead, enable 28378fe3987SAndy Lutomirski * the DMA API if we're a Xen guest, which at least allows 28478fe3987SAndy Lutomirski * all of the sensible Xen configurations to work correctly. 28578fe3987SAndy Lutomirski */ 28678fe3987SAndy Lutomirski if (xen_domain()) 28778fe3987SAndy Lutomirski return true; 28878fe3987SAndy Lutomirski 289d26c96c8SAndy Lutomirski return false; 290d26c96c8SAndy Lutomirski } 291d26c96c8SAndy Lutomirski 2924b6ec919SFeng Liu size_t virtio_max_dma_size(const struct virtio_device *vdev) 293e6d6dd6cSJoerg Roedel { 294e6d6dd6cSJoerg Roedel size_t max_segment_size = SIZE_MAX; 295e6d6dd6cSJoerg Roedel 296e6d6dd6cSJoerg Roedel if (vring_use_dma_api(vdev)) 297817fc978SWill Deacon max_segment_size = dma_max_mapping_size(vdev->dev.parent); 298e6d6dd6cSJoerg Roedel 299e6d6dd6cSJoerg Roedel return max_segment_size; 300e6d6dd6cSJoerg Roedel } 301e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size); 302e6d6dd6cSJoerg Roedel 303d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 3042713ea3cSJason Wang dma_addr_t *dma_handle, gfp_t flag, 3052713ea3cSJason Wang struct device *dma_dev) 306d79dca75STiwei Bie { 307d79dca75STiwei Bie if (vring_use_dma_api(vdev)) { 3082713ea3cSJason Wang return dma_alloc_coherent(dma_dev, size, 309d79dca75STiwei Bie dma_handle, flag); 310d79dca75STiwei Bie } else { 311d79dca75STiwei Bie void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 312d79dca75STiwei Bie 313d79dca75STiwei Bie if (queue) { 314d79dca75STiwei Bie phys_addr_t phys_addr = virt_to_phys(queue); 315d79dca75STiwei Bie *dma_handle = (dma_addr_t)phys_addr; 316d79dca75STiwei Bie 317d79dca75STiwei Bie /* 318d79dca75STiwei Bie * Sanity check: make sure we dind't truncate 319d79dca75STiwei Bie * the address. The only arches I can find that 320d79dca75STiwei Bie * have 64-bit phys_addr_t but 32-bit dma_addr_t 321d79dca75STiwei Bie * are certain non-highmem MIPS and x86 322d79dca75STiwei Bie * configurations, but these configurations 323d79dca75STiwei Bie * should never allocate physical pages above 32 324d79dca75STiwei Bie * bits, so this is fine. Just in case, throw a 325d79dca75STiwei Bie * warning and abort if we end up with an 326d79dca75STiwei Bie * unrepresentable address. 327d79dca75STiwei Bie */ 328d79dca75STiwei Bie if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 329d79dca75STiwei Bie free_pages_exact(queue, PAGE_ALIGN(size)); 330d79dca75STiwei Bie return NULL; 331d79dca75STiwei Bie } 332d79dca75STiwei Bie } 333d79dca75STiwei Bie return queue; 334d79dca75STiwei Bie } 335d79dca75STiwei Bie } 336d79dca75STiwei Bie 337d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size, 3382713ea3cSJason Wang void *queue, dma_addr_t dma_handle, 3392713ea3cSJason Wang struct device *dma_dev) 340d79dca75STiwei Bie { 341d79dca75STiwei Bie if (vring_use_dma_api(vdev)) 3422713ea3cSJason Wang dma_free_coherent(dma_dev, size, queue, dma_handle); 343d79dca75STiwei Bie else 344d79dca75STiwei Bie free_pages_exact(queue, PAGE_ALIGN(size)); 345d79dca75STiwei Bie } 346d79dca75STiwei Bie 347780bc790SAndy Lutomirski /* 348780bc790SAndy Lutomirski * The DMA ops on various arches are rather gnarly right now, and 349780bc790SAndy Lutomirski * making all of the arch DMA ops work on the vring device itself 3502713ea3cSJason Wang * is a mess. 351780bc790SAndy Lutomirski */ 3521adbd6b2SFeng Liu static struct device *vring_dma_dev(const struct vring_virtqueue *vq) 353780bc790SAndy Lutomirski { 3542713ea3cSJason Wang return vq->dma_dev; 355780bc790SAndy Lutomirski } 356780bc790SAndy Lutomirski 357780bc790SAndy Lutomirski /* Map one sg entry. */ 358*0e27fa6dSXuan Zhuo static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg, 359*0e27fa6dSXuan Zhuo enum dma_data_direction direction, dma_addr_t *addr) 360780bc790SAndy Lutomirski { 36188938359SAlexander Potapenko if (!vq->use_dma_api) { 36288938359SAlexander Potapenko /* 36388938359SAlexander Potapenko * If DMA is not used, KMSAN doesn't know that the scatterlist 36488938359SAlexander Potapenko * is initialized by the hardware. Explicitly check/unpoison it 36588938359SAlexander Potapenko * depending on the direction. 36688938359SAlexander Potapenko */ 36788938359SAlexander Potapenko kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction); 368*0e27fa6dSXuan Zhuo *addr = (dma_addr_t)sg_phys(sg); 369*0e27fa6dSXuan Zhuo return 0; 37088938359SAlexander Potapenko } 371780bc790SAndy Lutomirski 372780bc790SAndy Lutomirski /* 373780bc790SAndy Lutomirski * We can't use dma_map_sg, because we don't use scatterlists in 374780bc790SAndy Lutomirski * the way it expects (we don't guarantee that the scatterlist 375780bc790SAndy Lutomirski * will exist for the lifetime of the mapping). 376780bc790SAndy Lutomirski */ 377*0e27fa6dSXuan Zhuo *addr = dma_map_page(vring_dma_dev(vq), 378780bc790SAndy Lutomirski sg_page(sg), sg->offset, sg->length, 379780bc790SAndy Lutomirski direction); 380*0e27fa6dSXuan Zhuo 381*0e27fa6dSXuan Zhuo if (dma_mapping_error(vring_dma_dev(vq), *addr)) 382*0e27fa6dSXuan Zhuo return -ENOMEM; 383*0e27fa6dSXuan Zhuo 384*0e27fa6dSXuan Zhuo return 0; 385780bc790SAndy Lutomirski } 386780bc790SAndy Lutomirski 387780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 388780bc790SAndy Lutomirski void *cpu_addr, size_t size, 389780bc790SAndy Lutomirski enum dma_data_direction direction) 390780bc790SAndy Lutomirski { 391fb3fba6bSTiwei Bie if (!vq->use_dma_api) 392780bc790SAndy Lutomirski return (dma_addr_t)virt_to_phys(cpu_addr); 393780bc790SAndy Lutomirski 394780bc790SAndy Lutomirski return dma_map_single(vring_dma_dev(vq), 395780bc790SAndy Lutomirski cpu_addr, size, direction); 396780bc790SAndy Lutomirski } 397780bc790SAndy Lutomirski 398e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq, 399e6f633e5STiwei Bie dma_addr_t addr) 400e6f633e5STiwei Bie { 401fb3fba6bSTiwei Bie if (!vq->use_dma_api) 402e6f633e5STiwei Bie return 0; 403e6f633e5STiwei Bie 404e6f633e5STiwei Bie return dma_mapping_error(vring_dma_dev(vq), addr); 405e6f633e5STiwei Bie } 406e6f633e5STiwei Bie 4073a897128SXuan Zhuo static void virtqueue_init(struct vring_virtqueue *vq, u32 num) 4083a897128SXuan Zhuo { 4093a897128SXuan Zhuo vq->vq.num_free = num; 4103a897128SXuan Zhuo 4113a897128SXuan Zhuo if (vq->packed_ring) 4123a897128SXuan Zhuo vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); 4133a897128SXuan Zhuo else 4143a897128SXuan Zhuo vq->last_used_idx = 0; 4153a897128SXuan Zhuo 4163a897128SXuan Zhuo vq->event_triggered = false; 4173a897128SXuan Zhuo vq->num_added = 0; 4183a897128SXuan Zhuo 4193a897128SXuan Zhuo #ifdef DEBUG 4203a897128SXuan Zhuo vq->in_use = false; 4213a897128SXuan Zhuo vq->last_add_time_valid = false; 4223a897128SXuan Zhuo #endif 4233a897128SXuan Zhuo } 4243a897128SXuan Zhuo 425e6f633e5STiwei Bie 426e6f633e5STiwei Bie /* 427e6f633e5STiwei Bie * Split ring specific functions - *_split(). 428e6f633e5STiwei Bie */ 429e6f633e5STiwei Bie 43072b5e895SJason Wang static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, 4314b6ec919SFeng Liu const struct vring_desc *desc) 432780bc790SAndy Lutomirski { 433780bc790SAndy Lutomirski u16 flags; 434780bc790SAndy Lutomirski 435fb3fba6bSTiwei Bie if (!vq->use_dma_api) 436780bc790SAndy Lutomirski return; 437780bc790SAndy Lutomirski 438780bc790SAndy Lutomirski flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 439780bc790SAndy Lutomirski 440780bc790SAndy Lutomirski dma_unmap_page(vring_dma_dev(vq), 441780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 442780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 443780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 444780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 445780bc790SAndy Lutomirski } 446780bc790SAndy Lutomirski 44772b5e895SJason Wang static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, 44872b5e895SJason Wang unsigned int i) 44972b5e895SJason Wang { 45072b5e895SJason Wang struct vring_desc_extra *extra = vq->split.desc_extra; 45172b5e895SJason Wang u16 flags; 45272b5e895SJason Wang 45372b5e895SJason Wang if (!vq->use_dma_api) 45472b5e895SJason Wang goto out; 45572b5e895SJason Wang 45672b5e895SJason Wang flags = extra[i].flags; 45772b5e895SJason Wang 45872b5e895SJason Wang if (flags & VRING_DESC_F_INDIRECT) { 45972b5e895SJason Wang dma_unmap_single(vring_dma_dev(vq), 46072b5e895SJason Wang extra[i].addr, 46172b5e895SJason Wang extra[i].len, 46272b5e895SJason Wang (flags & VRING_DESC_F_WRITE) ? 46372b5e895SJason Wang DMA_FROM_DEVICE : DMA_TO_DEVICE); 46472b5e895SJason Wang } else { 46572b5e895SJason Wang dma_unmap_page(vring_dma_dev(vq), 46672b5e895SJason Wang extra[i].addr, 46772b5e895SJason Wang extra[i].len, 46872b5e895SJason Wang (flags & VRING_DESC_F_WRITE) ? 46972b5e895SJason Wang DMA_FROM_DEVICE : DMA_TO_DEVICE); 47072b5e895SJason Wang } 47172b5e895SJason Wang 47272b5e895SJason Wang out: 47372b5e895SJason Wang return extra[i].next; 47472b5e895SJason Wang } 47572b5e895SJason Wang 476138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, 477138fd251STiwei Bie unsigned int total_sg, 478138fd251STiwei Bie gfp_t gfp) 4799fa29b9dSMark McLoughlin { 4809fa29b9dSMark McLoughlin struct vring_desc *desc; 481b25bd251SRusty Russell unsigned int i; 4829fa29b9dSMark McLoughlin 483b92b1b89SWill Deacon /* 484b92b1b89SWill Deacon * We require lowmem mappings for the descriptors because 485b92b1b89SWill Deacon * otherwise virt_to_phys will give us bogus addresses in the 486b92b1b89SWill Deacon * virtqueue. 487b92b1b89SWill Deacon */ 48882107539SMichal Hocko gfp &= ~__GFP_HIGHMEM; 489b92b1b89SWill Deacon 4906da2ec56SKees Cook desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); 4919fa29b9dSMark McLoughlin if (!desc) 492b25bd251SRusty Russell return NULL; 4939fa29b9dSMark McLoughlin 494b25bd251SRusty Russell for (i = 0; i < total_sg; i++) 49500e6f3d9SMichael S. Tsirkin desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 496b25bd251SRusty Russell return desc; 4979fa29b9dSMark McLoughlin } 4989fa29b9dSMark McLoughlin 499fe4c3862SJason Wang static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, 500fe4c3862SJason Wang struct vring_desc *desc, 501fe4c3862SJason Wang unsigned int i, 502fe4c3862SJason Wang dma_addr_t addr, 503fe4c3862SJason Wang unsigned int len, 50472b5e895SJason Wang u16 flags, 50572b5e895SJason Wang bool indirect) 506fe4c3862SJason Wang { 50772b5e895SJason Wang struct vring_virtqueue *vring = to_vvq(vq); 50872b5e895SJason Wang struct vring_desc_extra *extra = vring->split.desc_extra; 50972b5e895SJason Wang u16 next; 51072b5e895SJason Wang 511fe4c3862SJason Wang desc[i].flags = cpu_to_virtio16(vq->vdev, flags); 512fe4c3862SJason Wang desc[i].addr = cpu_to_virtio64(vq->vdev, addr); 513fe4c3862SJason Wang desc[i].len = cpu_to_virtio32(vq->vdev, len); 514fe4c3862SJason Wang 51572b5e895SJason Wang if (!indirect) { 51672b5e895SJason Wang next = extra[i].next; 51772b5e895SJason Wang desc[i].next = cpu_to_virtio16(vq->vdev, next); 51872b5e895SJason Wang 51972b5e895SJason Wang extra[i].addr = addr; 52072b5e895SJason Wang extra[i].len = len; 52172b5e895SJason Wang extra[i].flags = flags; 52272b5e895SJason Wang } else 52372b5e895SJason Wang next = virtio16_to_cpu(vq->vdev, desc[i].next); 52472b5e895SJason Wang 52572b5e895SJason Wang return next; 526fe4c3862SJason Wang } 527fe4c3862SJason Wang 528138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq, 52913816c76SRusty Russell struct scatterlist *sgs[], 530eeebf9b1SRusty Russell unsigned int total_sg, 53113816c76SRusty Russell unsigned int out_sgs, 53213816c76SRusty Russell unsigned int in_sgs, 533bbd603efSMichael S. Tsirkin void *data, 5345a08b04fSMichael S. Tsirkin void *ctx, 535bbd603efSMichael S. Tsirkin gfp_t gfp) 5360a8a69ddSRusty Russell { 5370a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 53813816c76SRusty Russell struct scatterlist *sg; 539b25bd251SRusty Russell struct vring_desc *desc; 5403f649ab7SKees Cook unsigned int i, n, avail, descs_used, prev, err_idx; 5411fe9b6feSMichael S. Tsirkin int head; 542b25bd251SRusty Russell bool indirect; 5430a8a69ddSRusty Russell 5449fa29b9dSMark McLoughlin START_USE(vq); 5459fa29b9dSMark McLoughlin 5460a8a69ddSRusty Russell BUG_ON(data == NULL); 5475a08b04fSMichael S. Tsirkin BUG_ON(ctx && vq->indirect); 5489fa29b9dSMark McLoughlin 54970670444SRusty Russell if (unlikely(vq->broken)) { 55070670444SRusty Russell END_USE(vq); 55170670444SRusty Russell return -EIO; 55270670444SRusty Russell } 55370670444SRusty Russell 5544d6a105eSTiwei Bie LAST_ADD_TIME_UPDATE(vq); 555e93300b1SRusty Russell 55613816c76SRusty Russell BUG_ON(total_sg == 0); 5570a8a69ddSRusty Russell 558b25bd251SRusty Russell head = vq->free_head; 559b25bd251SRusty Russell 56035c51e09SXianting Tian if (virtqueue_use_indirect(vq, total_sg)) 561138fd251STiwei Bie desc = alloc_indirect_split(_vq, total_sg, gfp); 56244ed8089SRichard W.M. Jones else { 563b25bd251SRusty Russell desc = NULL; 564e593bf97STiwei Bie WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); 56544ed8089SRichard W.M. Jones } 566b25bd251SRusty Russell 567b25bd251SRusty Russell if (desc) { 568b25bd251SRusty Russell /* Use a single buffer which doesn't continue */ 569780bc790SAndy Lutomirski indirect = true; 570b25bd251SRusty Russell /* Set up rest to use this indirect table. */ 571b25bd251SRusty Russell i = 0; 572b25bd251SRusty Russell descs_used = 1; 573b25bd251SRusty Russell } else { 574780bc790SAndy Lutomirski indirect = false; 575e593bf97STiwei Bie desc = vq->split.vring.desc; 576b25bd251SRusty Russell i = head; 577b25bd251SRusty Russell descs_used = total_sg; 578b25bd251SRusty Russell } 579b25bd251SRusty Russell 580b4b4ff73SXianting Tian if (unlikely(vq->vq.num_free < descs_used)) { 5810a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 582b25bd251SRusty Russell descs_used, vq->vq.num_free); 58344653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 58444653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 58544653eaeSRusty Russell * host should service the ring ASAP. */ 58613816c76SRusty Russell if (out_sgs) 587426e3e0aSRusty Russell vq->notify(&vq->vq); 58858625edfSWei Yongjun if (indirect) 58958625edfSWei Yongjun kfree(desc); 5900a8a69ddSRusty Russell END_USE(vq); 5910a8a69ddSRusty Russell return -ENOSPC; 5920a8a69ddSRusty Russell } 5930a8a69ddSRusty Russell 59413816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 595eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 596*0e27fa6dSXuan Zhuo dma_addr_t addr; 597*0e27fa6dSXuan Zhuo 598*0e27fa6dSXuan Zhuo if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr)) 599780bc790SAndy Lutomirski goto unmap_release; 600780bc790SAndy Lutomirski 6010a8a69ddSRusty Russell prev = i; 60272b5e895SJason Wang /* Note that we trust indirect descriptor 60372b5e895SJason Wang * table since it use stream DMA mapping. 60472b5e895SJason Wang */ 605fe4c3862SJason Wang i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, 60672b5e895SJason Wang VRING_DESC_F_NEXT, 60772b5e895SJason Wang indirect); 6080a8a69ddSRusty Russell } 60913816c76SRusty Russell } 61013816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 611eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 612*0e27fa6dSXuan Zhuo dma_addr_t addr; 613*0e27fa6dSXuan Zhuo 614*0e27fa6dSXuan Zhuo if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr)) 615780bc790SAndy Lutomirski goto unmap_release; 616780bc790SAndy Lutomirski 6170a8a69ddSRusty Russell prev = i; 61872b5e895SJason Wang /* Note that we trust indirect descriptor 61972b5e895SJason Wang * table since it use stream DMA mapping. 62072b5e895SJason Wang */ 621fe4c3862SJason Wang i = virtqueue_add_desc_split(_vq, desc, i, addr, 622fe4c3862SJason Wang sg->length, 623fe4c3862SJason Wang VRING_DESC_F_NEXT | 62472b5e895SJason Wang VRING_DESC_F_WRITE, 62572b5e895SJason Wang indirect); 62613816c76SRusty Russell } 6270a8a69ddSRusty Russell } 6280a8a69ddSRusty Russell /* Last one doesn't continue. */ 62900e6f3d9SMichael S. Tsirkin desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 63072b5e895SJason Wang if (!indirect && vq->use_dma_api) 631890d3356SVincent Whitchurch vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= 63272b5e895SJason Wang ~VRING_DESC_F_NEXT; 6330a8a69ddSRusty Russell 634780bc790SAndy Lutomirski if (indirect) { 635780bc790SAndy Lutomirski /* Now that the indirect table is filled in, map it. */ 636780bc790SAndy Lutomirski dma_addr_t addr = vring_map_single( 637780bc790SAndy Lutomirski vq, desc, total_sg * sizeof(struct vring_desc), 638780bc790SAndy Lutomirski DMA_TO_DEVICE); 639780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 640780bc790SAndy Lutomirski goto unmap_release; 641780bc790SAndy Lutomirski 642fe4c3862SJason Wang virtqueue_add_desc_split(_vq, vq->split.vring.desc, 643fe4c3862SJason Wang head, addr, 644fe4c3862SJason Wang total_sg * sizeof(struct vring_desc), 64572b5e895SJason Wang VRING_DESC_F_INDIRECT, 64672b5e895SJason Wang false); 647780bc790SAndy Lutomirski } 648780bc790SAndy Lutomirski 649780bc790SAndy Lutomirski /* We're using some buffers from the free list. */ 650780bc790SAndy Lutomirski vq->vq.num_free -= descs_used; 651780bc790SAndy Lutomirski 6520a8a69ddSRusty Russell /* Update free pointer */ 653b25bd251SRusty Russell if (indirect) 65472b5e895SJason Wang vq->free_head = vq->split.desc_extra[head].next; 655b25bd251SRusty Russell else 6560a8a69ddSRusty Russell vq->free_head = i; 6570a8a69ddSRusty Russell 658780bc790SAndy Lutomirski /* Store token and indirect buffer state. */ 659cbeedb72STiwei Bie vq->split.desc_state[head].data = data; 660780bc790SAndy Lutomirski if (indirect) 661cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc = desc; 66287646a34SJason Wang else 663cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc = ctx; 6640a8a69ddSRusty Russell 6650a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 6663b720b8cSRusty Russell * do sync). */ 667e593bf97STiwei Bie avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); 668e593bf97STiwei Bie vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 6690a8a69ddSRusty Russell 670ee7cd898SRusty Russell /* Descriptors and available array need to be set before we expose the 671ee7cd898SRusty Russell * new available array entries. */ 672a9a0fef7SRusty Russell virtio_wmb(vq->weak_barriers); 673e593bf97STiwei Bie vq->split.avail_idx_shadow++; 674e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 675e593bf97STiwei Bie vq->split.avail_idx_shadow); 676ee7cd898SRusty Russell vq->num_added++; 677ee7cd898SRusty Russell 6785e05bf58STetsuo Handa pr_debug("Added buffer head %i to %p\n", head, vq); 6795e05bf58STetsuo Handa END_USE(vq); 6805e05bf58STetsuo Handa 681ee7cd898SRusty Russell /* This is very unlikely, but theoretically possible. Kick 682ee7cd898SRusty Russell * just in case. */ 683ee7cd898SRusty Russell if (unlikely(vq->num_added == (1 << 16) - 1)) 684ee7cd898SRusty Russell virtqueue_kick(_vq); 685ee7cd898SRusty Russell 68698e8c6bcSRusty Russell return 0; 687780bc790SAndy Lutomirski 688780bc790SAndy Lutomirski unmap_release: 689780bc790SAndy Lutomirski err_idx = i; 690cf8f1696SMatthias Lange 691cf8f1696SMatthias Lange if (indirect) 692cf8f1696SMatthias Lange i = 0; 693cf8f1696SMatthias Lange else 694780bc790SAndy Lutomirski i = head; 695780bc790SAndy Lutomirski 696780bc790SAndy Lutomirski for (n = 0; n < total_sg; n++) { 697780bc790SAndy Lutomirski if (i == err_idx) 698780bc790SAndy Lutomirski break; 69972b5e895SJason Wang if (indirect) { 70072b5e895SJason Wang vring_unmap_one_split_indirect(vq, &desc[i]); 701cf8f1696SMatthias Lange i = virtio16_to_cpu(_vq->vdev, desc[i].next); 70272b5e895SJason Wang } else 70372b5e895SJason Wang i = vring_unmap_one_split(vq, i); 704780bc790SAndy Lutomirski } 705780bc790SAndy Lutomirski 706780bc790SAndy Lutomirski if (indirect) 707780bc790SAndy Lutomirski kfree(desc); 708780bc790SAndy Lutomirski 7093cc36f6eSMichael S. Tsirkin END_USE(vq); 710f7728002SHalil Pasic return -ENOMEM; 7110a8a69ddSRusty Russell } 71213816c76SRusty Russell 713138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) 7140a8a69ddSRusty Russell { 7150a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 716a5c262c5SMichael S. Tsirkin u16 new, old; 71741f0377fSRusty Russell bool needs_kick; 71841f0377fSRusty Russell 7190a8a69ddSRusty Russell START_USE(vq); 720a72caae2SJason Wang /* We need to expose available array entries before checking avail 721a72caae2SJason Wang * event. */ 722a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 7230a8a69ddSRusty Russell 724e593bf97STiwei Bie old = vq->split.avail_idx_shadow - vq->num_added; 725e593bf97STiwei Bie new = vq->split.avail_idx_shadow; 7260a8a69ddSRusty Russell vq->num_added = 0; 7270a8a69ddSRusty Russell 7284d6a105eSTiwei Bie LAST_ADD_TIME_CHECK(vq); 7294d6a105eSTiwei Bie LAST_ADD_TIME_INVALID(vq); 730e93300b1SRusty Russell 73141f0377fSRusty Russell if (vq->event) { 732e593bf97STiwei Bie needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, 733e593bf97STiwei Bie vring_avail_event(&vq->split.vring)), 73441f0377fSRusty Russell new, old); 73541f0377fSRusty Russell } else { 736e593bf97STiwei Bie needs_kick = !(vq->split.vring.used->flags & 737e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 738e593bf97STiwei Bie VRING_USED_F_NO_NOTIFY)); 73941f0377fSRusty Russell } 7400a8a69ddSRusty Russell END_USE(vq); 74141f0377fSRusty Russell return needs_kick; 74241f0377fSRusty Russell } 743138fd251STiwei Bie 744138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, 7455a08b04fSMichael S. Tsirkin void **ctx) 7460a8a69ddSRusty Russell { 747780bc790SAndy Lutomirski unsigned int i, j; 748c60923cbSGonglei __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 7490a8a69ddSRusty Russell 7500a8a69ddSRusty Russell /* Clear data ptr. */ 751cbeedb72STiwei Bie vq->split.desc_state[head].data = NULL; 7520a8a69ddSRusty Russell 753780bc790SAndy Lutomirski /* Put back on free list: unmap first-level descriptors and find end */ 7540a8a69ddSRusty Russell i = head; 7559fa29b9dSMark McLoughlin 756e593bf97STiwei Bie while (vq->split.vring.desc[i].flags & nextflag) { 75772b5e895SJason Wang vring_unmap_one_split(vq, i); 75872b5e895SJason Wang i = vq->split.desc_extra[i].next; 75906ca287dSRusty Russell vq->vq.num_free++; 7600a8a69ddSRusty Russell } 7610a8a69ddSRusty Russell 76272b5e895SJason Wang vring_unmap_one_split(vq, i); 76372b5e895SJason Wang vq->split.desc_extra[i].next = vq->free_head; 7640a8a69ddSRusty Russell vq->free_head = head; 765780bc790SAndy Lutomirski 7660a8a69ddSRusty Russell /* Plus final descriptor */ 76706ca287dSRusty Russell vq->vq.num_free++; 768780bc790SAndy Lutomirski 7695a08b04fSMichael S. Tsirkin if (vq->indirect) { 770cbeedb72STiwei Bie struct vring_desc *indir_desc = 771cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc; 7725a08b04fSMichael S. Tsirkin u32 len; 7735a08b04fSMichael S. Tsirkin 7745a08b04fSMichael S. Tsirkin /* Free the indirect table, if any, now that it's unmapped. */ 7755a08b04fSMichael S. Tsirkin if (!indir_desc) 7765a08b04fSMichael S. Tsirkin return; 7775a08b04fSMichael S. Tsirkin 77872b5e895SJason Wang len = vq->split.desc_extra[head].len; 779780bc790SAndy Lutomirski 78072b5e895SJason Wang BUG_ON(!(vq->split.desc_extra[head].flags & 78172b5e895SJason Wang VRING_DESC_F_INDIRECT)); 782780bc790SAndy Lutomirski BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 783780bc790SAndy Lutomirski 784610c708bSXuan Zhuo if (vq->use_dma_api) { 785780bc790SAndy Lutomirski for (j = 0; j < len / sizeof(struct vring_desc); j++) 78672b5e895SJason Wang vring_unmap_one_split_indirect(vq, &indir_desc[j]); 787610c708bSXuan Zhuo } 788780bc790SAndy Lutomirski 7895a08b04fSMichael S. Tsirkin kfree(indir_desc); 790cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc = NULL; 7915a08b04fSMichael S. Tsirkin } else if (ctx) { 792cbeedb72STiwei Bie *ctx = vq->split.desc_state[head].indir_desc; 793780bc790SAndy Lutomirski } 7940a8a69ddSRusty Russell } 7950a8a69ddSRusty Russell 7961adbd6b2SFeng Liu static bool more_used_split(const struct vring_virtqueue *vq) 7970a8a69ddSRusty Russell { 798e593bf97STiwei Bie return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, 799e593bf97STiwei Bie vq->split.vring.used->idx); 8000a8a69ddSRusty Russell } 8010a8a69ddSRusty Russell 802138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, 803138fd251STiwei Bie unsigned int *len, 8045a08b04fSMichael S. Tsirkin void **ctx) 8050a8a69ddSRusty Russell { 8060a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 8070a8a69ddSRusty Russell void *ret; 8080a8a69ddSRusty Russell unsigned int i; 8093b720b8cSRusty Russell u16 last_used; 8100a8a69ddSRusty Russell 8110a8a69ddSRusty Russell START_USE(vq); 8120a8a69ddSRusty Russell 8135ef82752SRusty Russell if (unlikely(vq->broken)) { 8145ef82752SRusty Russell END_USE(vq); 8155ef82752SRusty Russell return NULL; 8165ef82752SRusty Russell } 8175ef82752SRusty Russell 818138fd251STiwei Bie if (!more_used_split(vq)) { 8190a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 8200a8a69ddSRusty Russell END_USE(vq); 8210a8a69ddSRusty Russell return NULL; 8220a8a69ddSRusty Russell } 8230a8a69ddSRusty Russell 8242d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 825a9a0fef7SRusty Russell virtio_rmb(vq->weak_barriers); 8262d61ba95SMichael S. Tsirkin 827e593bf97STiwei Bie last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); 828e593bf97STiwei Bie i = virtio32_to_cpu(_vq->vdev, 829e593bf97STiwei Bie vq->split.vring.used->ring[last_used].id); 830e593bf97STiwei Bie *len = virtio32_to_cpu(_vq->vdev, 831e593bf97STiwei Bie vq->split.vring.used->ring[last_used].len); 8320a8a69ddSRusty Russell 833e593bf97STiwei Bie if (unlikely(i >= vq->split.vring.num)) { 8340a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 8350a8a69ddSRusty Russell return NULL; 8360a8a69ddSRusty Russell } 837cbeedb72STiwei Bie if (unlikely(!vq->split.desc_state[i].data)) { 8380a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 8390a8a69ddSRusty Russell return NULL; 8400a8a69ddSRusty Russell } 8410a8a69ddSRusty Russell 842138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 843cbeedb72STiwei Bie ret = vq->split.desc_state[i].data; 844138fd251STiwei Bie detach_buf_split(vq, i, ctx); 8450a8a69ddSRusty Russell vq->last_used_idx++; 846a5c262c5SMichael S. Tsirkin /* If we expect an interrupt for the next entry, tell host 847a5c262c5SMichael S. Tsirkin * by writing event index and flush out the write before 848a5c262c5SMichael S. Tsirkin * the read in the next get_buf call. */ 849e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 850788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 851e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 852788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 853a5c262c5SMichael S. Tsirkin 8544d6a105eSTiwei Bie LAST_ADD_TIME_INVALID(vq); 855e93300b1SRusty Russell 8560a8a69ddSRusty Russell END_USE(vq); 8570a8a69ddSRusty Russell return ret; 8580a8a69ddSRusty Russell } 859138fd251STiwei Bie 860138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq) 861138fd251STiwei Bie { 862138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 863138fd251STiwei Bie 864e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 865e593bf97STiwei Bie vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 8666c0b057cSAlbert Huang 8676c0b057cSAlbert Huang /* 8686c0b057cSAlbert Huang * If device triggered an event already it won't trigger one again: 8696c0b057cSAlbert Huang * no need to disable. 8706c0b057cSAlbert Huang */ 8716c0b057cSAlbert Huang if (vq->event_triggered) 8726c0b057cSAlbert Huang return; 8736c0b057cSAlbert Huang 8748d622d21SMichael S. Tsirkin if (vq->event) 8758d622d21SMichael S. Tsirkin /* TODO: this is a hack. Figure out a cleaner value to write. */ 8768d622d21SMichael S. Tsirkin vring_used_event(&vq->split.vring) = 0x0; 8778d622d21SMichael S. Tsirkin else 878e593bf97STiwei Bie vq->split.vring.avail->flags = 879e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 880e593bf97STiwei Bie vq->split.avail_flags_shadow); 881138fd251STiwei Bie } 882138fd251STiwei Bie } 883138fd251STiwei Bie 88431532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) 885cc229884SMichael S. Tsirkin { 886cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 887cc229884SMichael S. Tsirkin u16 last_used_idx; 888cc229884SMichael S. Tsirkin 889cc229884SMichael S. Tsirkin START_USE(vq); 890cc229884SMichael S. Tsirkin 891cc229884SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 892cc229884SMichael S. Tsirkin * more to do. */ 893cc229884SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 894cc229884SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 895cc229884SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 896e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 897e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 8980ea1e4a6SLadi Prosek if (!vq->event) 899e593bf97STiwei Bie vq->split.vring.avail->flags = 900e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 901e593bf97STiwei Bie vq->split.avail_flags_shadow); 902f277ec42SVenkatesh Srinivas } 903e593bf97STiwei Bie vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, 904e593bf97STiwei Bie last_used_idx = vq->last_used_idx); 905cc229884SMichael S. Tsirkin END_USE(vq); 906cc229884SMichael S. Tsirkin return last_used_idx; 907cc229884SMichael S. Tsirkin } 908138fd251STiwei Bie 90931532340SSolomon Tan static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx) 910138fd251STiwei Bie { 911138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 912138fd251STiwei Bie 913138fd251STiwei Bie return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, 914e593bf97STiwei Bie vq->split.vring.used->idx); 915138fd251STiwei Bie } 916138fd251STiwei Bie 917138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) 9187ab358c2SMichael S. Tsirkin { 9197ab358c2SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 9207ab358c2SMichael S. Tsirkin u16 bufs; 9217ab358c2SMichael S. Tsirkin 9227ab358c2SMichael S. Tsirkin START_USE(vq); 9237ab358c2SMichael S. Tsirkin 9247ab358c2SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 9257ab358c2SMichael S. Tsirkin * more to do. */ 9267ab358c2SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 9277ab358c2SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 9280ea1e4a6SLadi Prosek * entry. Always update the event index to keep code simple. */ 929e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 930e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 9310ea1e4a6SLadi Prosek if (!vq->event) 932e593bf97STiwei Bie vq->split.vring.avail->flags = 933e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 934e593bf97STiwei Bie vq->split.avail_flags_shadow); 935f277ec42SVenkatesh Srinivas } 9367ab358c2SMichael S. Tsirkin /* TODO: tune this threshold */ 937e593bf97STiwei Bie bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; 938788e5b3aSMichael S. Tsirkin 939788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 940e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 941788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 942788e5b3aSMichael S. Tsirkin 943e593bf97STiwei Bie if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) 944e593bf97STiwei Bie - vq->last_used_idx) > bufs)) { 9457ab358c2SMichael S. Tsirkin END_USE(vq); 9467ab358c2SMichael S. Tsirkin return false; 9477ab358c2SMichael S. Tsirkin } 9487ab358c2SMichael S. Tsirkin 9497ab358c2SMichael S. Tsirkin END_USE(vq); 9507ab358c2SMichael S. Tsirkin return true; 9517ab358c2SMichael S. Tsirkin } 9527ab358c2SMichael S. Tsirkin 953138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) 954c021eac4SShirley Ma { 955c021eac4SShirley Ma struct vring_virtqueue *vq = to_vvq(_vq); 956c021eac4SShirley Ma unsigned int i; 957c021eac4SShirley Ma void *buf; 958c021eac4SShirley Ma 959c021eac4SShirley Ma START_USE(vq); 960c021eac4SShirley Ma 961e593bf97STiwei Bie for (i = 0; i < vq->split.vring.num; i++) { 962cbeedb72STiwei Bie if (!vq->split.desc_state[i].data) 963c021eac4SShirley Ma continue; 964138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 965cbeedb72STiwei Bie buf = vq->split.desc_state[i].data; 966138fd251STiwei Bie detach_buf_split(vq, i, NULL); 967e593bf97STiwei Bie vq->split.avail_idx_shadow--; 968e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 969e593bf97STiwei Bie vq->split.avail_idx_shadow); 970c021eac4SShirley Ma END_USE(vq); 971c021eac4SShirley Ma return buf; 972c021eac4SShirley Ma } 973c021eac4SShirley Ma /* That should have freed everything. */ 974e593bf97STiwei Bie BUG_ON(vq->vq.num_free != vq->split.vring.num); 975c021eac4SShirley Ma 976c021eac4SShirley Ma END_USE(vq); 977c021eac4SShirley Ma return NULL; 978c021eac4SShirley Ma } 979138fd251STiwei Bie 980198fa7beSXuan Zhuo static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split, 981198fa7beSXuan Zhuo struct vring_virtqueue *vq) 982198fa7beSXuan Zhuo { 983198fa7beSXuan Zhuo struct virtio_device *vdev; 984198fa7beSXuan Zhuo 985198fa7beSXuan Zhuo vdev = vq->vq.vdev; 986198fa7beSXuan Zhuo 987198fa7beSXuan Zhuo vring_split->avail_flags_shadow = 0; 988198fa7beSXuan Zhuo vring_split->avail_idx_shadow = 0; 989198fa7beSXuan Zhuo 990198fa7beSXuan Zhuo /* No callback? Tell other side not to bother us. */ 991198fa7beSXuan Zhuo if (!vq->vq.callback) { 992198fa7beSXuan Zhuo vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 993198fa7beSXuan Zhuo if (!vq->event) 994198fa7beSXuan Zhuo vring_split->vring.avail->flags = cpu_to_virtio16(vdev, 995198fa7beSXuan Zhuo vring_split->avail_flags_shadow); 996198fa7beSXuan Zhuo } 997198fa7beSXuan Zhuo } 998198fa7beSXuan Zhuo 999e5175b41SXuan Zhuo static void virtqueue_reinit_split(struct vring_virtqueue *vq) 1000e5175b41SXuan Zhuo { 1001e5175b41SXuan Zhuo int num; 1002e5175b41SXuan Zhuo 1003e5175b41SXuan Zhuo num = vq->split.vring.num; 1004e5175b41SXuan Zhuo 1005e5175b41SXuan Zhuo vq->split.vring.avail->flags = 0; 1006e5175b41SXuan Zhuo vq->split.vring.avail->idx = 0; 1007e5175b41SXuan Zhuo 1008e5175b41SXuan Zhuo /* reset avail event */ 1009e5175b41SXuan Zhuo vq->split.vring.avail->ring[num] = 0; 1010e5175b41SXuan Zhuo 1011e5175b41SXuan Zhuo vq->split.vring.used->flags = 0; 1012e5175b41SXuan Zhuo vq->split.vring.used->idx = 0; 1013e5175b41SXuan Zhuo 1014e5175b41SXuan Zhuo /* reset used event */ 1015e5175b41SXuan Zhuo *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; 1016e5175b41SXuan Zhuo 1017e5175b41SXuan Zhuo virtqueue_init(vq, num); 1018e5175b41SXuan Zhuo 1019e5175b41SXuan Zhuo virtqueue_vring_init_split(&vq->split, vq); 1020e5175b41SXuan Zhuo } 1021e5175b41SXuan Zhuo 1022e1d6a423SXuan Zhuo static void virtqueue_vring_attach_split(struct vring_virtqueue *vq, 1023e1d6a423SXuan Zhuo struct vring_virtqueue_split *vring_split) 1024e1d6a423SXuan Zhuo { 1025e1d6a423SXuan Zhuo vq->split = *vring_split; 1026e1d6a423SXuan Zhuo 1027e1d6a423SXuan Zhuo /* Put everything in free lists. */ 1028e1d6a423SXuan Zhuo vq->free_head = 0; 1029e1d6a423SXuan Zhuo } 1030e1d6a423SXuan Zhuo 1031a2b36c8dSXuan Zhuo static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split) 1032a2b36c8dSXuan Zhuo { 1033a2b36c8dSXuan Zhuo struct vring_desc_state_split *state; 1034a2b36c8dSXuan Zhuo struct vring_desc_extra *extra; 1035a2b36c8dSXuan Zhuo u32 num = vring_split->vring.num; 1036a2b36c8dSXuan Zhuo 1037a2b36c8dSXuan Zhuo state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL); 1038a2b36c8dSXuan Zhuo if (!state) 1039a2b36c8dSXuan Zhuo goto err_state; 1040a2b36c8dSXuan Zhuo 1041a2b36c8dSXuan Zhuo extra = vring_alloc_desc_extra(num); 1042a2b36c8dSXuan Zhuo if (!extra) 1043a2b36c8dSXuan Zhuo goto err_extra; 1044a2b36c8dSXuan Zhuo 1045a2b36c8dSXuan Zhuo memset(state, 0, num * sizeof(struct vring_desc_state_split)); 1046a2b36c8dSXuan Zhuo 1047a2b36c8dSXuan Zhuo vring_split->desc_state = state; 1048a2b36c8dSXuan Zhuo vring_split->desc_extra = extra; 1049a2b36c8dSXuan Zhuo return 0; 1050a2b36c8dSXuan Zhuo 1051a2b36c8dSXuan Zhuo err_extra: 1052a2b36c8dSXuan Zhuo kfree(state); 1053a2b36c8dSXuan Zhuo err_state: 1054a2b36c8dSXuan Zhuo return -ENOMEM; 1055a2b36c8dSXuan Zhuo } 1056a2b36c8dSXuan Zhuo 105789f05d94SXuan Zhuo static void vring_free_split(struct vring_virtqueue_split *vring_split, 10582713ea3cSJason Wang struct virtio_device *vdev, struct device *dma_dev) 105989f05d94SXuan Zhuo { 106089f05d94SXuan Zhuo vring_free_queue(vdev, vring_split->queue_size_in_bytes, 106189f05d94SXuan Zhuo vring_split->vring.desc, 10622713ea3cSJason Wang vring_split->queue_dma_addr, 10632713ea3cSJason Wang dma_dev); 106489f05d94SXuan Zhuo 106589f05d94SXuan Zhuo kfree(vring_split->desc_state); 106689f05d94SXuan Zhuo kfree(vring_split->desc_extra); 106789f05d94SXuan Zhuo } 106889f05d94SXuan Zhuo 1069c2d87fe6SXuan Zhuo static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, 1070c2d87fe6SXuan Zhuo struct virtio_device *vdev, 1071c2d87fe6SXuan Zhuo u32 num, 1072c2d87fe6SXuan Zhuo unsigned int vring_align, 10732713ea3cSJason Wang bool may_reduce_num, 10742713ea3cSJason Wang struct device *dma_dev) 1075c2d87fe6SXuan Zhuo { 1076c2d87fe6SXuan Zhuo void *queue = NULL; 1077c2d87fe6SXuan Zhuo dma_addr_t dma_addr; 1078c2d87fe6SXuan Zhuo 1079c2d87fe6SXuan Zhuo /* We assume num is a power of 2. */ 1080b9d978a8SShaoqin Huang if (!is_power_of_2(num)) { 1081c2d87fe6SXuan Zhuo dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 1082c2d87fe6SXuan Zhuo return -EINVAL; 1083c2d87fe6SXuan Zhuo } 1084c2d87fe6SXuan Zhuo 1085c2d87fe6SXuan Zhuo /* TODO: allocate each queue chunk individually */ 1086c2d87fe6SXuan Zhuo for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 1087c2d87fe6SXuan Zhuo queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1088c2d87fe6SXuan Zhuo &dma_addr, 10892713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 10902713ea3cSJason Wang dma_dev); 1091c2d87fe6SXuan Zhuo if (queue) 1092c2d87fe6SXuan Zhuo break; 1093c2d87fe6SXuan Zhuo if (!may_reduce_num) 1094c2d87fe6SXuan Zhuo return -ENOMEM; 1095c2d87fe6SXuan Zhuo } 1096c2d87fe6SXuan Zhuo 1097c2d87fe6SXuan Zhuo if (!num) 1098c2d87fe6SXuan Zhuo return -ENOMEM; 1099c2d87fe6SXuan Zhuo 1100c2d87fe6SXuan Zhuo if (!queue) { 1101c2d87fe6SXuan Zhuo /* Try to get a single page. You are my only hope! */ 1102c2d87fe6SXuan Zhuo queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 11032713ea3cSJason Wang &dma_addr, GFP_KERNEL | __GFP_ZERO, 11042713ea3cSJason Wang dma_dev); 1105c2d87fe6SXuan Zhuo } 1106c2d87fe6SXuan Zhuo if (!queue) 1107c2d87fe6SXuan Zhuo return -ENOMEM; 1108c2d87fe6SXuan Zhuo 1109c2d87fe6SXuan Zhuo vring_init(&vring_split->vring, num, queue, vring_align); 1110c2d87fe6SXuan Zhuo 1111c2d87fe6SXuan Zhuo vring_split->queue_dma_addr = dma_addr; 1112c2d87fe6SXuan Zhuo vring_split->queue_size_in_bytes = vring_size(num, vring_align); 1113c2d87fe6SXuan Zhuo 1114af36b16fSXuan Zhuo vring_split->vring_align = vring_align; 1115af36b16fSXuan Zhuo vring_split->may_reduce_num = may_reduce_num; 1116af36b16fSXuan Zhuo 1117c2d87fe6SXuan Zhuo return 0; 1118c2d87fe6SXuan Zhuo } 1119c2d87fe6SXuan Zhuo 1120d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split( 1121d79dca75STiwei Bie unsigned int index, 1122d79dca75STiwei Bie unsigned int num, 1123d79dca75STiwei Bie unsigned int vring_align, 1124d79dca75STiwei Bie struct virtio_device *vdev, 1125d79dca75STiwei Bie bool weak_barriers, 1126d79dca75STiwei Bie bool may_reduce_num, 1127d79dca75STiwei Bie bool context, 1128d79dca75STiwei Bie bool (*notify)(struct virtqueue *), 1129d79dca75STiwei Bie void (*callback)(struct virtqueue *), 11302713ea3cSJason Wang const char *name, 11312713ea3cSJason Wang struct device *dma_dev) 1132d79dca75STiwei Bie { 1133cd4c812aSXuan Zhuo struct vring_virtqueue_split vring_split = {}; 1134d79dca75STiwei Bie struct virtqueue *vq; 1135c2d87fe6SXuan Zhuo int err; 1136d79dca75STiwei Bie 1137c2d87fe6SXuan Zhuo err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align, 11382713ea3cSJason Wang may_reduce_num, dma_dev); 1139c2d87fe6SXuan Zhuo if (err) 1140d79dca75STiwei Bie return NULL; 1141d79dca75STiwei Bie 1142cd4c812aSXuan Zhuo vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, 11432713ea3cSJason Wang context, notify, callback, name, dma_dev); 1144d79dca75STiwei Bie if (!vq) { 11452713ea3cSJason Wang vring_free_split(&vring_split, vdev, dma_dev); 1146d79dca75STiwei Bie return NULL; 1147d79dca75STiwei Bie } 1148d79dca75STiwei Bie 1149d79dca75STiwei Bie to_vvq(vq)->we_own_ring = true; 1150d79dca75STiwei Bie 1151d79dca75STiwei Bie return vq; 1152d79dca75STiwei Bie } 1153d79dca75STiwei Bie 11546fea20e5SXuan Zhuo static int virtqueue_resize_split(struct virtqueue *_vq, u32 num) 11556fea20e5SXuan Zhuo { 11566fea20e5SXuan Zhuo struct vring_virtqueue_split vring_split = {}; 11576fea20e5SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 11586fea20e5SXuan Zhuo struct virtio_device *vdev = _vq->vdev; 11596fea20e5SXuan Zhuo int err; 11606fea20e5SXuan Zhuo 11616fea20e5SXuan Zhuo err = vring_alloc_queue_split(&vring_split, vdev, num, 11626fea20e5SXuan Zhuo vq->split.vring_align, 11632713ea3cSJason Wang vq->split.may_reduce_num, 11642713ea3cSJason Wang vring_dma_dev(vq)); 11656fea20e5SXuan Zhuo if (err) 11666fea20e5SXuan Zhuo goto err; 11676fea20e5SXuan Zhuo 11686fea20e5SXuan Zhuo err = vring_alloc_state_extra_split(&vring_split); 11696fea20e5SXuan Zhuo if (err) 11706fea20e5SXuan Zhuo goto err_state_extra; 11716fea20e5SXuan Zhuo 11726fea20e5SXuan Zhuo vring_free(&vq->vq); 11736fea20e5SXuan Zhuo 11746fea20e5SXuan Zhuo virtqueue_vring_init_split(&vring_split, vq); 11756fea20e5SXuan Zhuo 11766fea20e5SXuan Zhuo virtqueue_init(vq, vring_split.vring.num); 11776fea20e5SXuan Zhuo virtqueue_vring_attach_split(vq, &vring_split); 11786fea20e5SXuan Zhuo 11796fea20e5SXuan Zhuo return 0; 11806fea20e5SXuan Zhuo 11816fea20e5SXuan Zhuo err_state_extra: 11822713ea3cSJason Wang vring_free_split(&vring_split, vdev, vring_dma_dev(vq)); 11836fea20e5SXuan Zhuo err: 11846fea20e5SXuan Zhuo virtqueue_reinit_split(vq); 11856fea20e5SXuan Zhuo return -ENOMEM; 11866fea20e5SXuan Zhuo } 11876fea20e5SXuan Zhuo 1188e6f633e5STiwei Bie 1189e6f633e5STiwei Bie /* 11901ce9e605STiwei Bie * Packed ring specific functions - *_packed(). 11911ce9e605STiwei Bie */ 11921adbd6b2SFeng Liu static bool packed_used_wrap_counter(u16 last_used_idx) 1193a7722890Shuangjie.albert { 1194a7722890Shuangjie.albert return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR)); 1195a7722890Shuangjie.albert } 1196a7722890Shuangjie.albert 11971adbd6b2SFeng Liu static u16 packed_last_used(u16 last_used_idx) 1198a7722890Shuangjie.albert { 1199a7722890Shuangjie.albert return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); 1200a7722890Shuangjie.albert } 12011ce9e605STiwei Bie 1202d80dc15bSXuan Zhuo static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, 12034b6ec919SFeng Liu const struct vring_desc_extra *extra) 12041ce9e605STiwei Bie { 12051ce9e605STiwei Bie u16 flags; 12061ce9e605STiwei Bie 12071ce9e605STiwei Bie if (!vq->use_dma_api) 12081ce9e605STiwei Bie return; 12091ce9e605STiwei Bie 1210d80dc15bSXuan Zhuo flags = extra->flags; 12111ce9e605STiwei Bie 12121ce9e605STiwei Bie if (flags & VRING_DESC_F_INDIRECT) { 12131ce9e605STiwei Bie dma_unmap_single(vring_dma_dev(vq), 1214d80dc15bSXuan Zhuo extra->addr, extra->len, 12151ce9e605STiwei Bie (flags & VRING_DESC_F_WRITE) ? 12161ce9e605STiwei Bie DMA_FROM_DEVICE : DMA_TO_DEVICE); 12171ce9e605STiwei Bie } else { 12181ce9e605STiwei Bie dma_unmap_page(vring_dma_dev(vq), 1219d80dc15bSXuan Zhuo extra->addr, extra->len, 12201ce9e605STiwei Bie (flags & VRING_DESC_F_WRITE) ? 12211ce9e605STiwei Bie DMA_FROM_DEVICE : DMA_TO_DEVICE); 12221ce9e605STiwei Bie } 12231ce9e605STiwei Bie } 12241ce9e605STiwei Bie 12251ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, 12264b6ec919SFeng Liu const struct vring_packed_desc *desc) 12271ce9e605STiwei Bie { 12281ce9e605STiwei Bie u16 flags; 12291ce9e605STiwei Bie 12301ce9e605STiwei Bie if (!vq->use_dma_api) 12311ce9e605STiwei Bie return; 12321ce9e605STiwei Bie 12331ce9e605STiwei Bie flags = le16_to_cpu(desc->flags); 12341ce9e605STiwei Bie 12351ce9e605STiwei Bie dma_unmap_page(vring_dma_dev(vq), 12361ce9e605STiwei Bie le64_to_cpu(desc->addr), 12371ce9e605STiwei Bie le32_to_cpu(desc->len), 12381ce9e605STiwei Bie (flags & VRING_DESC_F_WRITE) ? 12391ce9e605STiwei Bie DMA_FROM_DEVICE : DMA_TO_DEVICE); 12401ce9e605STiwei Bie } 12411ce9e605STiwei Bie 12421ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, 12431ce9e605STiwei Bie gfp_t gfp) 12441ce9e605STiwei Bie { 12451ce9e605STiwei Bie struct vring_packed_desc *desc; 12461ce9e605STiwei Bie 12471ce9e605STiwei Bie /* 12481ce9e605STiwei Bie * We require lowmem mappings for the descriptors because 12491ce9e605STiwei Bie * otherwise virt_to_phys will give us bogus addresses in the 12501ce9e605STiwei Bie * virtqueue. 12511ce9e605STiwei Bie */ 12521ce9e605STiwei Bie gfp &= ~__GFP_HIGHMEM; 12531ce9e605STiwei Bie 12541ce9e605STiwei Bie desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp); 12551ce9e605STiwei Bie 12561ce9e605STiwei Bie return desc; 12571ce9e605STiwei Bie } 12581ce9e605STiwei Bie 12591ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, 12601ce9e605STiwei Bie struct scatterlist *sgs[], 12611ce9e605STiwei Bie unsigned int total_sg, 12621ce9e605STiwei Bie unsigned int out_sgs, 12631ce9e605STiwei Bie unsigned int in_sgs, 12641ce9e605STiwei Bie void *data, 12651ce9e605STiwei Bie gfp_t gfp) 12661ce9e605STiwei Bie { 12671ce9e605STiwei Bie struct vring_packed_desc *desc; 12681ce9e605STiwei Bie struct scatterlist *sg; 12691ce9e605STiwei Bie unsigned int i, n, err_idx; 12701ce9e605STiwei Bie u16 head, id; 12711ce9e605STiwei Bie dma_addr_t addr; 12721ce9e605STiwei Bie 12731ce9e605STiwei Bie head = vq->packed.next_avail_idx; 12741ce9e605STiwei Bie desc = alloc_indirect_packed(total_sg, gfp); 1275fc6d70f4SXuan Zhuo if (!desc) 1276fc6d70f4SXuan Zhuo return -ENOMEM; 12771ce9e605STiwei Bie 12781ce9e605STiwei Bie if (unlikely(vq->vq.num_free < 1)) { 12791ce9e605STiwei Bie pr_debug("Can't add buf len 1 - avail = 0\n"); 1280df0bfe75SYueHaibing kfree(desc); 12811ce9e605STiwei Bie END_USE(vq); 12821ce9e605STiwei Bie return -ENOSPC; 12831ce9e605STiwei Bie } 12841ce9e605STiwei Bie 12851ce9e605STiwei Bie i = 0; 12861ce9e605STiwei Bie id = vq->free_head; 12871ce9e605STiwei Bie BUG_ON(id == vq->packed.vring.num); 12881ce9e605STiwei Bie 12891ce9e605STiwei Bie for (n = 0; n < out_sgs + in_sgs; n++) { 12901ce9e605STiwei Bie for (sg = sgs[n]; sg; sg = sg_next(sg)) { 1291*0e27fa6dSXuan Zhuo if (vring_map_one_sg(vq, sg, n < out_sgs ? 1292*0e27fa6dSXuan Zhuo DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr)) 12931ce9e605STiwei Bie goto unmap_release; 12941ce9e605STiwei Bie 12951ce9e605STiwei Bie desc[i].flags = cpu_to_le16(n < out_sgs ? 12961ce9e605STiwei Bie 0 : VRING_DESC_F_WRITE); 12971ce9e605STiwei Bie desc[i].addr = cpu_to_le64(addr); 12981ce9e605STiwei Bie desc[i].len = cpu_to_le32(sg->length); 12991ce9e605STiwei Bie i++; 13001ce9e605STiwei Bie } 13011ce9e605STiwei Bie } 13021ce9e605STiwei Bie 13031ce9e605STiwei Bie /* Now that the indirect table is filled in, map it. */ 13041ce9e605STiwei Bie addr = vring_map_single(vq, desc, 13051ce9e605STiwei Bie total_sg * sizeof(struct vring_packed_desc), 13061ce9e605STiwei Bie DMA_TO_DEVICE); 13071ce9e605STiwei Bie if (vring_mapping_error(vq, addr)) 13081ce9e605STiwei Bie goto unmap_release; 13091ce9e605STiwei Bie 13101ce9e605STiwei Bie vq->packed.vring.desc[head].addr = cpu_to_le64(addr); 13111ce9e605STiwei Bie vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * 13121ce9e605STiwei Bie sizeof(struct vring_packed_desc)); 13131ce9e605STiwei Bie vq->packed.vring.desc[head].id = cpu_to_le16(id); 13141ce9e605STiwei Bie 13151ce9e605STiwei Bie if (vq->use_dma_api) { 13161ce9e605STiwei Bie vq->packed.desc_extra[id].addr = addr; 13171ce9e605STiwei Bie vq->packed.desc_extra[id].len = total_sg * 13181ce9e605STiwei Bie sizeof(struct vring_packed_desc); 13191ce9e605STiwei Bie vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | 13201ce9e605STiwei Bie vq->packed.avail_used_flags; 13211ce9e605STiwei Bie } 13221ce9e605STiwei Bie 13231ce9e605STiwei Bie /* 13241ce9e605STiwei Bie * A driver MUST NOT make the first descriptor in the list 13251ce9e605STiwei Bie * available before all subsequent descriptors comprising 13261ce9e605STiwei Bie * the list are made available. 13271ce9e605STiwei Bie */ 13281ce9e605STiwei Bie virtio_wmb(vq->weak_barriers); 13291ce9e605STiwei Bie vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | 13301ce9e605STiwei Bie vq->packed.avail_used_flags); 13311ce9e605STiwei Bie 13321ce9e605STiwei Bie /* We're using some buffers from the free list. */ 13331ce9e605STiwei Bie vq->vq.num_free -= 1; 13341ce9e605STiwei Bie 13351ce9e605STiwei Bie /* Update free pointer */ 13361ce9e605STiwei Bie n = head + 1; 13371ce9e605STiwei Bie if (n >= vq->packed.vring.num) { 13381ce9e605STiwei Bie n = 0; 13391ce9e605STiwei Bie vq->packed.avail_wrap_counter ^= 1; 13401ce9e605STiwei Bie vq->packed.avail_used_flags ^= 13411ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_AVAIL | 13421ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_USED; 13431ce9e605STiwei Bie } 13441ce9e605STiwei Bie vq->packed.next_avail_idx = n; 1345aeef9b47SJason Wang vq->free_head = vq->packed.desc_extra[id].next; 13461ce9e605STiwei Bie 13471ce9e605STiwei Bie /* Store token and indirect buffer state. */ 13481ce9e605STiwei Bie vq->packed.desc_state[id].num = 1; 13491ce9e605STiwei Bie vq->packed.desc_state[id].data = data; 13501ce9e605STiwei Bie vq->packed.desc_state[id].indir_desc = desc; 13511ce9e605STiwei Bie vq->packed.desc_state[id].last = id; 13521ce9e605STiwei Bie 13531ce9e605STiwei Bie vq->num_added += 1; 13541ce9e605STiwei Bie 13551ce9e605STiwei Bie pr_debug("Added buffer head %i to %p\n", head, vq); 13561ce9e605STiwei Bie END_USE(vq); 13571ce9e605STiwei Bie 13581ce9e605STiwei Bie return 0; 13591ce9e605STiwei Bie 13601ce9e605STiwei Bie unmap_release: 13611ce9e605STiwei Bie err_idx = i; 13621ce9e605STiwei Bie 13631ce9e605STiwei Bie for (i = 0; i < err_idx; i++) 13641ce9e605STiwei Bie vring_unmap_desc_packed(vq, &desc[i]); 13651ce9e605STiwei Bie 13661ce9e605STiwei Bie kfree(desc); 13671ce9e605STiwei Bie 13681ce9e605STiwei Bie END_USE(vq); 1369f7728002SHalil Pasic return -ENOMEM; 13701ce9e605STiwei Bie } 13711ce9e605STiwei Bie 13721ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq, 13731ce9e605STiwei Bie struct scatterlist *sgs[], 13741ce9e605STiwei Bie unsigned int total_sg, 13751ce9e605STiwei Bie unsigned int out_sgs, 13761ce9e605STiwei Bie unsigned int in_sgs, 13771ce9e605STiwei Bie void *data, 13781ce9e605STiwei Bie void *ctx, 13791ce9e605STiwei Bie gfp_t gfp) 13801ce9e605STiwei Bie { 13811ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 13821ce9e605STiwei Bie struct vring_packed_desc *desc; 13831ce9e605STiwei Bie struct scatterlist *sg; 13841ce9e605STiwei Bie unsigned int i, n, c, descs_used, err_idx; 13853f649ab7SKees Cook __le16 head_flags, flags; 13863f649ab7SKees Cook u16 head, id, prev, curr, avail_used_flags; 1387fc6d70f4SXuan Zhuo int err; 13881ce9e605STiwei Bie 13891ce9e605STiwei Bie START_USE(vq); 13901ce9e605STiwei Bie 13911ce9e605STiwei Bie BUG_ON(data == NULL); 13921ce9e605STiwei Bie BUG_ON(ctx && vq->indirect); 13931ce9e605STiwei Bie 13941ce9e605STiwei Bie if (unlikely(vq->broken)) { 13951ce9e605STiwei Bie END_USE(vq); 13961ce9e605STiwei Bie return -EIO; 13971ce9e605STiwei Bie } 13981ce9e605STiwei Bie 13991ce9e605STiwei Bie LAST_ADD_TIME_UPDATE(vq); 14001ce9e605STiwei Bie 14011ce9e605STiwei Bie BUG_ON(total_sg == 0); 14021ce9e605STiwei Bie 140335c51e09SXianting Tian if (virtqueue_use_indirect(vq, total_sg)) { 1404fc6d70f4SXuan Zhuo err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, 1405fc6d70f4SXuan Zhuo in_sgs, data, gfp); 14061861ba62SMichael S. Tsirkin if (err != -ENOMEM) { 14071861ba62SMichael S. Tsirkin END_USE(vq); 1408fc6d70f4SXuan Zhuo return err; 14091861ba62SMichael S. Tsirkin } 1410fc6d70f4SXuan Zhuo 1411fc6d70f4SXuan Zhuo /* fall back on direct */ 1412fc6d70f4SXuan Zhuo } 14131ce9e605STiwei Bie 14141ce9e605STiwei Bie head = vq->packed.next_avail_idx; 14151ce9e605STiwei Bie avail_used_flags = vq->packed.avail_used_flags; 14161ce9e605STiwei Bie 14171ce9e605STiwei Bie WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); 14181ce9e605STiwei Bie 14191ce9e605STiwei Bie desc = vq->packed.vring.desc; 14201ce9e605STiwei Bie i = head; 14211ce9e605STiwei Bie descs_used = total_sg; 14221ce9e605STiwei Bie 14231ce9e605STiwei Bie if (unlikely(vq->vq.num_free < descs_used)) { 14241ce9e605STiwei Bie pr_debug("Can't add buf len %i - avail = %i\n", 14251ce9e605STiwei Bie descs_used, vq->vq.num_free); 14261ce9e605STiwei Bie END_USE(vq); 14271ce9e605STiwei Bie return -ENOSPC; 14281ce9e605STiwei Bie } 14291ce9e605STiwei Bie 14301ce9e605STiwei Bie id = vq->free_head; 14311ce9e605STiwei Bie BUG_ON(id == vq->packed.vring.num); 14321ce9e605STiwei Bie 14331ce9e605STiwei Bie curr = id; 14341ce9e605STiwei Bie c = 0; 14351ce9e605STiwei Bie for (n = 0; n < out_sgs + in_sgs; n++) { 14361ce9e605STiwei Bie for (sg = sgs[n]; sg; sg = sg_next(sg)) { 1437*0e27fa6dSXuan Zhuo dma_addr_t addr; 1438*0e27fa6dSXuan Zhuo 1439*0e27fa6dSXuan Zhuo if (vring_map_one_sg(vq, sg, n < out_sgs ? 1440*0e27fa6dSXuan Zhuo DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr)) 14411ce9e605STiwei Bie goto unmap_release; 14421ce9e605STiwei Bie 14431ce9e605STiwei Bie flags = cpu_to_le16(vq->packed.avail_used_flags | 14441ce9e605STiwei Bie (++c == total_sg ? 0 : VRING_DESC_F_NEXT) | 14451ce9e605STiwei Bie (n < out_sgs ? 0 : VRING_DESC_F_WRITE)); 14461ce9e605STiwei Bie if (i == head) 14471ce9e605STiwei Bie head_flags = flags; 14481ce9e605STiwei Bie else 14491ce9e605STiwei Bie desc[i].flags = flags; 14501ce9e605STiwei Bie 14511ce9e605STiwei Bie desc[i].addr = cpu_to_le64(addr); 14521ce9e605STiwei Bie desc[i].len = cpu_to_le32(sg->length); 14531ce9e605STiwei Bie desc[i].id = cpu_to_le16(id); 14541ce9e605STiwei Bie 14551ce9e605STiwei Bie if (unlikely(vq->use_dma_api)) { 14561ce9e605STiwei Bie vq->packed.desc_extra[curr].addr = addr; 14571ce9e605STiwei Bie vq->packed.desc_extra[curr].len = sg->length; 14581ce9e605STiwei Bie vq->packed.desc_extra[curr].flags = 14591ce9e605STiwei Bie le16_to_cpu(flags); 14601ce9e605STiwei Bie } 14611ce9e605STiwei Bie prev = curr; 1462aeef9b47SJason Wang curr = vq->packed.desc_extra[curr].next; 14631ce9e605STiwei Bie 14641ce9e605STiwei Bie if ((unlikely(++i >= vq->packed.vring.num))) { 14651ce9e605STiwei Bie i = 0; 14661ce9e605STiwei Bie vq->packed.avail_used_flags ^= 14671ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_AVAIL | 14681ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_USED; 14691ce9e605STiwei Bie } 14701ce9e605STiwei Bie } 14711ce9e605STiwei Bie } 14721ce9e605STiwei Bie 14731ce9e605STiwei Bie if (i < head) 14741ce9e605STiwei Bie vq->packed.avail_wrap_counter ^= 1; 14751ce9e605STiwei Bie 14761ce9e605STiwei Bie /* We're using some buffers from the free list. */ 14771ce9e605STiwei Bie vq->vq.num_free -= descs_used; 14781ce9e605STiwei Bie 14791ce9e605STiwei Bie /* Update free pointer */ 14801ce9e605STiwei Bie vq->packed.next_avail_idx = i; 14811ce9e605STiwei Bie vq->free_head = curr; 14821ce9e605STiwei Bie 14831ce9e605STiwei Bie /* Store token. */ 14841ce9e605STiwei Bie vq->packed.desc_state[id].num = descs_used; 14851ce9e605STiwei Bie vq->packed.desc_state[id].data = data; 14861ce9e605STiwei Bie vq->packed.desc_state[id].indir_desc = ctx; 14871ce9e605STiwei Bie vq->packed.desc_state[id].last = prev; 14881ce9e605STiwei Bie 14891ce9e605STiwei Bie /* 14901ce9e605STiwei Bie * A driver MUST NOT make the first descriptor in the list 14911ce9e605STiwei Bie * available before all subsequent descriptors comprising 14921ce9e605STiwei Bie * the list are made available. 14931ce9e605STiwei Bie */ 14941ce9e605STiwei Bie virtio_wmb(vq->weak_barriers); 14951ce9e605STiwei Bie vq->packed.vring.desc[head].flags = head_flags; 14961ce9e605STiwei Bie vq->num_added += descs_used; 14971ce9e605STiwei Bie 14981ce9e605STiwei Bie pr_debug("Added buffer head %i to %p\n", head, vq); 14991ce9e605STiwei Bie END_USE(vq); 15001ce9e605STiwei Bie 15011ce9e605STiwei Bie return 0; 15021ce9e605STiwei Bie 15031ce9e605STiwei Bie unmap_release: 15041ce9e605STiwei Bie err_idx = i; 15051ce9e605STiwei Bie i = head; 150644593865SJason Wang curr = vq->free_head; 15071ce9e605STiwei Bie 15081ce9e605STiwei Bie vq->packed.avail_used_flags = avail_used_flags; 15091ce9e605STiwei Bie 15101ce9e605STiwei Bie for (n = 0; n < total_sg; n++) { 15111ce9e605STiwei Bie if (i == err_idx) 15121ce9e605STiwei Bie break; 1513d80dc15bSXuan Zhuo vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); 151444593865SJason Wang curr = vq->packed.desc_extra[curr].next; 15151ce9e605STiwei Bie i++; 15161ce9e605STiwei Bie if (i >= vq->packed.vring.num) 15171ce9e605STiwei Bie i = 0; 15181ce9e605STiwei Bie } 15191ce9e605STiwei Bie 15201ce9e605STiwei Bie END_USE(vq); 15211ce9e605STiwei Bie return -EIO; 15221ce9e605STiwei Bie } 15231ce9e605STiwei Bie 15241ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) 15251ce9e605STiwei Bie { 15261ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 1527f51f9826STiwei Bie u16 new, old, off_wrap, flags, wrap_counter, event_idx; 15281ce9e605STiwei Bie bool needs_kick; 15291ce9e605STiwei Bie union { 15301ce9e605STiwei Bie struct { 15311ce9e605STiwei Bie __le16 off_wrap; 15321ce9e605STiwei Bie __le16 flags; 15331ce9e605STiwei Bie }; 15341ce9e605STiwei Bie u32 u32; 15351ce9e605STiwei Bie } snapshot; 15361ce9e605STiwei Bie 15371ce9e605STiwei Bie START_USE(vq); 15381ce9e605STiwei Bie 15391ce9e605STiwei Bie /* 15401ce9e605STiwei Bie * We need to expose the new flags value before checking notification 15411ce9e605STiwei Bie * suppressions. 15421ce9e605STiwei Bie */ 15431ce9e605STiwei Bie virtio_mb(vq->weak_barriers); 15441ce9e605STiwei Bie 1545f51f9826STiwei Bie old = vq->packed.next_avail_idx - vq->num_added; 1546f51f9826STiwei Bie new = vq->packed.next_avail_idx; 15471ce9e605STiwei Bie vq->num_added = 0; 15481ce9e605STiwei Bie 15491ce9e605STiwei Bie snapshot.u32 = *(u32 *)vq->packed.vring.device; 15501ce9e605STiwei Bie flags = le16_to_cpu(snapshot.flags); 15511ce9e605STiwei Bie 15521ce9e605STiwei Bie LAST_ADD_TIME_CHECK(vq); 15531ce9e605STiwei Bie LAST_ADD_TIME_INVALID(vq); 15541ce9e605STiwei Bie 1555f51f9826STiwei Bie if (flags != VRING_PACKED_EVENT_FLAG_DESC) { 15561ce9e605STiwei Bie needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE); 1557f51f9826STiwei Bie goto out; 1558f51f9826STiwei Bie } 1559f51f9826STiwei Bie 1560f51f9826STiwei Bie off_wrap = le16_to_cpu(snapshot.off_wrap); 1561f51f9826STiwei Bie 1562f51f9826STiwei Bie wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; 1563f51f9826STiwei Bie event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); 1564f51f9826STiwei Bie if (wrap_counter != vq->packed.avail_wrap_counter) 1565f51f9826STiwei Bie event_idx -= vq->packed.vring.num; 1566f51f9826STiwei Bie 1567f51f9826STiwei Bie needs_kick = vring_need_event(event_idx, new, old); 1568f51f9826STiwei Bie out: 15691ce9e605STiwei Bie END_USE(vq); 15701ce9e605STiwei Bie return needs_kick; 15711ce9e605STiwei Bie } 15721ce9e605STiwei Bie 15731ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq, 15741ce9e605STiwei Bie unsigned int id, void **ctx) 15751ce9e605STiwei Bie { 15761ce9e605STiwei Bie struct vring_desc_state_packed *state = NULL; 15771ce9e605STiwei Bie struct vring_packed_desc *desc; 15781ce9e605STiwei Bie unsigned int i, curr; 15791ce9e605STiwei Bie 15801ce9e605STiwei Bie state = &vq->packed.desc_state[id]; 15811ce9e605STiwei Bie 15821ce9e605STiwei Bie /* Clear data ptr. */ 15831ce9e605STiwei Bie state->data = NULL; 15841ce9e605STiwei Bie 1585aeef9b47SJason Wang vq->packed.desc_extra[state->last].next = vq->free_head; 15861ce9e605STiwei Bie vq->free_head = id; 15871ce9e605STiwei Bie vq->vq.num_free += state->num; 15881ce9e605STiwei Bie 15891ce9e605STiwei Bie if (unlikely(vq->use_dma_api)) { 15901ce9e605STiwei Bie curr = id; 15911ce9e605STiwei Bie for (i = 0; i < state->num; i++) { 1592d80dc15bSXuan Zhuo vring_unmap_extra_packed(vq, 15931ce9e605STiwei Bie &vq->packed.desc_extra[curr]); 1594aeef9b47SJason Wang curr = vq->packed.desc_extra[curr].next; 15951ce9e605STiwei Bie } 15961ce9e605STiwei Bie } 15971ce9e605STiwei Bie 15981ce9e605STiwei Bie if (vq->indirect) { 15991ce9e605STiwei Bie u32 len; 16001ce9e605STiwei Bie 16011ce9e605STiwei Bie /* Free the indirect table, if any, now that it's unmapped. */ 16021ce9e605STiwei Bie desc = state->indir_desc; 16031ce9e605STiwei Bie if (!desc) 16041ce9e605STiwei Bie return; 16051ce9e605STiwei Bie 16061ce9e605STiwei Bie if (vq->use_dma_api) { 16071ce9e605STiwei Bie len = vq->packed.desc_extra[id].len; 16081ce9e605STiwei Bie for (i = 0; i < len / sizeof(struct vring_packed_desc); 16091ce9e605STiwei Bie i++) 16101ce9e605STiwei Bie vring_unmap_desc_packed(vq, &desc[i]); 16111ce9e605STiwei Bie } 16121ce9e605STiwei Bie kfree(desc); 16131ce9e605STiwei Bie state->indir_desc = NULL; 16141ce9e605STiwei Bie } else if (ctx) { 16151ce9e605STiwei Bie *ctx = state->indir_desc; 16161ce9e605STiwei Bie } 16171ce9e605STiwei Bie } 16181ce9e605STiwei Bie 16191ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, 16201ce9e605STiwei Bie u16 idx, bool used_wrap_counter) 16211ce9e605STiwei Bie { 16221ce9e605STiwei Bie bool avail, used; 16231ce9e605STiwei Bie u16 flags; 16241ce9e605STiwei Bie 16251ce9e605STiwei Bie flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); 16261ce9e605STiwei Bie avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); 16271ce9e605STiwei Bie used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); 16281ce9e605STiwei Bie 16291ce9e605STiwei Bie return avail == used && used == used_wrap_counter; 16301ce9e605STiwei Bie } 16311ce9e605STiwei Bie 16321adbd6b2SFeng Liu static bool more_used_packed(const struct vring_virtqueue *vq) 16331ce9e605STiwei Bie { 1634a7722890Shuangjie.albert u16 last_used; 1635a7722890Shuangjie.albert u16 last_used_idx; 1636a7722890Shuangjie.albert bool used_wrap_counter; 1637a7722890Shuangjie.albert 1638a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1639a7722890Shuangjie.albert last_used = packed_last_used(last_used_idx); 1640a7722890Shuangjie.albert used_wrap_counter = packed_used_wrap_counter(last_used_idx); 1641a7722890Shuangjie.albert return is_used_desc_packed(vq, last_used, used_wrap_counter); 16421ce9e605STiwei Bie } 16431ce9e605STiwei Bie 16441ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, 16451ce9e605STiwei Bie unsigned int *len, 16461ce9e605STiwei Bie void **ctx) 16471ce9e605STiwei Bie { 16481ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 1649a7722890Shuangjie.albert u16 last_used, id, last_used_idx; 1650a7722890Shuangjie.albert bool used_wrap_counter; 16511ce9e605STiwei Bie void *ret; 16521ce9e605STiwei Bie 16531ce9e605STiwei Bie START_USE(vq); 16541ce9e605STiwei Bie 16551ce9e605STiwei Bie if (unlikely(vq->broken)) { 16561ce9e605STiwei Bie END_USE(vq); 16571ce9e605STiwei Bie return NULL; 16581ce9e605STiwei Bie } 16591ce9e605STiwei Bie 16601ce9e605STiwei Bie if (!more_used_packed(vq)) { 16611ce9e605STiwei Bie pr_debug("No more buffers in queue\n"); 16621ce9e605STiwei Bie END_USE(vq); 16631ce9e605STiwei Bie return NULL; 16641ce9e605STiwei Bie } 16651ce9e605STiwei Bie 16661ce9e605STiwei Bie /* Only get used elements after they have been exposed by host. */ 16671ce9e605STiwei Bie virtio_rmb(vq->weak_barriers); 16681ce9e605STiwei Bie 1669a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1670a7722890Shuangjie.albert used_wrap_counter = packed_used_wrap_counter(last_used_idx); 1671a7722890Shuangjie.albert last_used = packed_last_used(last_used_idx); 16721ce9e605STiwei Bie id = le16_to_cpu(vq->packed.vring.desc[last_used].id); 16731ce9e605STiwei Bie *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); 16741ce9e605STiwei Bie 16751ce9e605STiwei Bie if (unlikely(id >= vq->packed.vring.num)) { 16761ce9e605STiwei Bie BAD_RING(vq, "id %u out of range\n", id); 16771ce9e605STiwei Bie return NULL; 16781ce9e605STiwei Bie } 16791ce9e605STiwei Bie if (unlikely(!vq->packed.desc_state[id].data)) { 16801ce9e605STiwei Bie BAD_RING(vq, "id %u is not a head!\n", id); 16811ce9e605STiwei Bie return NULL; 16821ce9e605STiwei Bie } 16831ce9e605STiwei Bie 16841ce9e605STiwei Bie /* detach_buf_packed clears data, so grab it now. */ 16851ce9e605STiwei Bie ret = vq->packed.desc_state[id].data; 16861ce9e605STiwei Bie detach_buf_packed(vq, id, ctx); 16871ce9e605STiwei Bie 1688a7722890Shuangjie.albert last_used += vq->packed.desc_state[id].num; 1689a7722890Shuangjie.albert if (unlikely(last_used >= vq->packed.vring.num)) { 1690a7722890Shuangjie.albert last_used -= vq->packed.vring.num; 1691a7722890Shuangjie.albert used_wrap_counter ^= 1; 16921ce9e605STiwei Bie } 16931ce9e605STiwei Bie 1694a7722890Shuangjie.albert last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); 1695a7722890Shuangjie.albert WRITE_ONCE(vq->last_used_idx, last_used); 1696a7722890Shuangjie.albert 1697f51f9826STiwei Bie /* 1698f51f9826STiwei Bie * If we expect an interrupt for the next entry, tell host 1699f51f9826STiwei Bie * by writing event index and flush out the write before 1700f51f9826STiwei Bie * the read in the next get_buf call. 1701f51f9826STiwei Bie */ 1702f51f9826STiwei Bie if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) 1703f51f9826STiwei Bie virtio_store_mb(vq->weak_barriers, 1704f51f9826STiwei Bie &vq->packed.vring.driver->off_wrap, 1705a7722890Shuangjie.albert cpu_to_le16(vq->last_used_idx)); 1706f51f9826STiwei Bie 17071ce9e605STiwei Bie LAST_ADD_TIME_INVALID(vq); 17081ce9e605STiwei Bie 17091ce9e605STiwei Bie END_USE(vq); 17101ce9e605STiwei Bie return ret; 17111ce9e605STiwei Bie } 17121ce9e605STiwei Bie 17131ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq) 17141ce9e605STiwei Bie { 17151ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 17161ce9e605STiwei Bie 17171ce9e605STiwei Bie if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { 17181ce9e605STiwei Bie vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; 17196c0b057cSAlbert Huang 17206c0b057cSAlbert Huang /* 17216c0b057cSAlbert Huang * If device triggered an event already it won't trigger one again: 17226c0b057cSAlbert Huang * no need to disable. 17236c0b057cSAlbert Huang */ 17246c0b057cSAlbert Huang if (vq->event_triggered) 17256c0b057cSAlbert Huang return; 17266c0b057cSAlbert Huang 17271ce9e605STiwei Bie vq->packed.vring.driver->flags = 17281ce9e605STiwei Bie cpu_to_le16(vq->packed.event_flags_shadow); 17291ce9e605STiwei Bie } 17301ce9e605STiwei Bie } 17311ce9e605STiwei Bie 173231532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) 17331ce9e605STiwei Bie { 17341ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 17351ce9e605STiwei Bie 17361ce9e605STiwei Bie START_USE(vq); 17371ce9e605STiwei Bie 17381ce9e605STiwei Bie /* 17391ce9e605STiwei Bie * We optimistically turn back on interrupts, then check if there was 17401ce9e605STiwei Bie * more to do. 17411ce9e605STiwei Bie */ 17421ce9e605STiwei Bie 1743f51f9826STiwei Bie if (vq->event) { 1744f51f9826STiwei Bie vq->packed.vring.driver->off_wrap = 1745a7722890Shuangjie.albert cpu_to_le16(vq->last_used_idx); 1746f51f9826STiwei Bie /* 1747f51f9826STiwei Bie * We need to update event offset and event wrap 1748f51f9826STiwei Bie * counter first before updating event flags. 1749f51f9826STiwei Bie */ 1750f51f9826STiwei Bie virtio_wmb(vq->weak_barriers); 1751f51f9826STiwei Bie } 1752f51f9826STiwei Bie 17531ce9e605STiwei Bie if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { 1754f51f9826STiwei Bie vq->packed.event_flags_shadow = vq->event ? 1755f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_DESC : 1756f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_ENABLE; 17571ce9e605STiwei Bie vq->packed.vring.driver->flags = 17581ce9e605STiwei Bie cpu_to_le16(vq->packed.event_flags_shadow); 17591ce9e605STiwei Bie } 17601ce9e605STiwei Bie 17611ce9e605STiwei Bie END_USE(vq); 1762a7722890Shuangjie.albert return vq->last_used_idx; 17631ce9e605STiwei Bie } 17641ce9e605STiwei Bie 17651ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) 17661ce9e605STiwei Bie { 17671ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 17681ce9e605STiwei Bie bool wrap_counter; 17691ce9e605STiwei Bie u16 used_idx; 17701ce9e605STiwei Bie 17711ce9e605STiwei Bie wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; 17721ce9e605STiwei Bie used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); 17731ce9e605STiwei Bie 17741ce9e605STiwei Bie return is_used_desc_packed(vq, used_idx, wrap_counter); 17751ce9e605STiwei Bie } 17761ce9e605STiwei Bie 17771ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) 17781ce9e605STiwei Bie { 17791ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 1780a7722890Shuangjie.albert u16 used_idx, wrap_counter, last_used_idx; 1781f51f9826STiwei Bie u16 bufs; 17821ce9e605STiwei Bie 17831ce9e605STiwei Bie START_USE(vq); 17841ce9e605STiwei Bie 17851ce9e605STiwei Bie /* 17861ce9e605STiwei Bie * We optimistically turn back on interrupts, then check if there was 17871ce9e605STiwei Bie * more to do. 17881ce9e605STiwei Bie */ 17891ce9e605STiwei Bie 1790f51f9826STiwei Bie if (vq->event) { 1791f51f9826STiwei Bie /* TODO: tune this threshold */ 1792f51f9826STiwei Bie bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; 1793a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1794a7722890Shuangjie.albert wrap_counter = packed_used_wrap_counter(last_used_idx); 17951ce9e605STiwei Bie 1796a7722890Shuangjie.albert used_idx = packed_last_used(last_used_idx) + bufs; 1797f51f9826STiwei Bie if (used_idx >= vq->packed.vring.num) { 1798f51f9826STiwei Bie used_idx -= vq->packed.vring.num; 1799f51f9826STiwei Bie wrap_counter ^= 1; 1800f51f9826STiwei Bie } 1801f51f9826STiwei Bie 1802f51f9826STiwei Bie vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | 1803f51f9826STiwei Bie (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); 1804f51f9826STiwei Bie 1805f51f9826STiwei Bie /* 1806f51f9826STiwei Bie * We need to update event offset and event wrap 1807f51f9826STiwei Bie * counter first before updating event flags. 1808f51f9826STiwei Bie */ 1809f51f9826STiwei Bie virtio_wmb(vq->weak_barriers); 1810f51f9826STiwei Bie } 1811f51f9826STiwei Bie 18121ce9e605STiwei Bie if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { 1813f51f9826STiwei Bie vq->packed.event_flags_shadow = vq->event ? 1814f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_DESC : 1815f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_ENABLE; 18161ce9e605STiwei Bie vq->packed.vring.driver->flags = 18171ce9e605STiwei Bie cpu_to_le16(vq->packed.event_flags_shadow); 18181ce9e605STiwei Bie } 18191ce9e605STiwei Bie 18201ce9e605STiwei Bie /* 18211ce9e605STiwei Bie * We need to update event suppression structure first 18221ce9e605STiwei Bie * before re-checking for more used buffers. 18231ce9e605STiwei Bie */ 18241ce9e605STiwei Bie virtio_mb(vq->weak_barriers); 18251ce9e605STiwei Bie 1826a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1827a7722890Shuangjie.albert wrap_counter = packed_used_wrap_counter(last_used_idx); 1828a7722890Shuangjie.albert used_idx = packed_last_used(last_used_idx); 1829a7722890Shuangjie.albert if (is_used_desc_packed(vq, used_idx, wrap_counter)) { 18301ce9e605STiwei Bie END_USE(vq); 18311ce9e605STiwei Bie return false; 18321ce9e605STiwei Bie } 18331ce9e605STiwei Bie 18341ce9e605STiwei Bie END_USE(vq); 18351ce9e605STiwei Bie return true; 18361ce9e605STiwei Bie } 18371ce9e605STiwei Bie 18381ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) 18391ce9e605STiwei Bie { 18401ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 18411ce9e605STiwei Bie unsigned int i; 18421ce9e605STiwei Bie void *buf; 18431ce9e605STiwei Bie 18441ce9e605STiwei Bie START_USE(vq); 18451ce9e605STiwei Bie 18461ce9e605STiwei Bie for (i = 0; i < vq->packed.vring.num; i++) { 18471ce9e605STiwei Bie if (!vq->packed.desc_state[i].data) 18481ce9e605STiwei Bie continue; 18491ce9e605STiwei Bie /* detach_buf clears data, so grab it now. */ 18501ce9e605STiwei Bie buf = vq->packed.desc_state[i].data; 18511ce9e605STiwei Bie detach_buf_packed(vq, i, NULL); 18521ce9e605STiwei Bie END_USE(vq); 18531ce9e605STiwei Bie return buf; 18541ce9e605STiwei Bie } 18551ce9e605STiwei Bie /* That should have freed everything. */ 18561ce9e605STiwei Bie BUG_ON(vq->vq.num_free != vq->packed.vring.num); 18571ce9e605STiwei Bie 18581ce9e605STiwei Bie END_USE(vq); 18591ce9e605STiwei Bie return NULL; 18601ce9e605STiwei Bie } 18611ce9e605STiwei Bie 186296ef18a2SXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num) 18635a222421SJason Wang { 18645a222421SJason Wang struct vring_desc_extra *desc_extra; 18655a222421SJason Wang unsigned int i; 18665a222421SJason Wang 18675a222421SJason Wang desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra), 18685a222421SJason Wang GFP_KERNEL); 18695a222421SJason Wang if (!desc_extra) 18705a222421SJason Wang return NULL; 18715a222421SJason Wang 18725a222421SJason Wang memset(desc_extra, 0, num * sizeof(struct vring_desc_extra)); 18735a222421SJason Wang 18745a222421SJason Wang for (i = 0; i < num - 1; i++) 18755a222421SJason Wang desc_extra[i].next = i + 1; 18765a222421SJason Wang 18775a222421SJason Wang return desc_extra; 18785a222421SJason Wang } 18795a222421SJason Wang 18806356f8bbSXuan Zhuo static void vring_free_packed(struct vring_virtqueue_packed *vring_packed, 18812713ea3cSJason Wang struct virtio_device *vdev, 18822713ea3cSJason Wang struct device *dma_dev) 18836356f8bbSXuan Zhuo { 18846356f8bbSXuan Zhuo if (vring_packed->vring.desc) 18856356f8bbSXuan Zhuo vring_free_queue(vdev, vring_packed->ring_size_in_bytes, 18866356f8bbSXuan Zhuo vring_packed->vring.desc, 18872713ea3cSJason Wang vring_packed->ring_dma_addr, 18882713ea3cSJason Wang dma_dev); 18896356f8bbSXuan Zhuo 18906356f8bbSXuan Zhuo if (vring_packed->vring.driver) 18916356f8bbSXuan Zhuo vring_free_queue(vdev, vring_packed->event_size_in_bytes, 18926356f8bbSXuan Zhuo vring_packed->vring.driver, 18932713ea3cSJason Wang vring_packed->driver_event_dma_addr, 18942713ea3cSJason Wang dma_dev); 18956356f8bbSXuan Zhuo 18966356f8bbSXuan Zhuo if (vring_packed->vring.device) 18976356f8bbSXuan Zhuo vring_free_queue(vdev, vring_packed->event_size_in_bytes, 18986356f8bbSXuan Zhuo vring_packed->vring.device, 18992713ea3cSJason Wang vring_packed->device_event_dma_addr, 19002713ea3cSJason Wang dma_dev); 19016356f8bbSXuan Zhuo 19026356f8bbSXuan Zhuo kfree(vring_packed->desc_state); 19036356f8bbSXuan Zhuo kfree(vring_packed->desc_extra); 19046356f8bbSXuan Zhuo } 19056356f8bbSXuan Zhuo 19066b60b9c0SXuan Zhuo static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed, 19076b60b9c0SXuan Zhuo struct virtio_device *vdev, 19082713ea3cSJason Wang u32 num, struct device *dma_dev) 19096b60b9c0SXuan Zhuo { 19106b60b9c0SXuan Zhuo struct vring_packed_desc *ring; 19116b60b9c0SXuan Zhuo struct vring_packed_desc_event *driver, *device; 19126b60b9c0SXuan Zhuo dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr; 19136b60b9c0SXuan Zhuo size_t ring_size_in_bytes, event_size_in_bytes; 19146b60b9c0SXuan Zhuo 19156b60b9c0SXuan Zhuo ring_size_in_bytes = num * sizeof(struct vring_packed_desc); 19166b60b9c0SXuan Zhuo 19176b60b9c0SXuan Zhuo ring = vring_alloc_queue(vdev, ring_size_in_bytes, 19186b60b9c0SXuan Zhuo &ring_dma_addr, 19192713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 19202713ea3cSJason Wang dma_dev); 19216b60b9c0SXuan Zhuo if (!ring) 19226b60b9c0SXuan Zhuo goto err; 19236b60b9c0SXuan Zhuo 19246b60b9c0SXuan Zhuo vring_packed->vring.desc = ring; 19256b60b9c0SXuan Zhuo vring_packed->ring_dma_addr = ring_dma_addr; 19266b60b9c0SXuan Zhuo vring_packed->ring_size_in_bytes = ring_size_in_bytes; 19276b60b9c0SXuan Zhuo 19286b60b9c0SXuan Zhuo event_size_in_bytes = sizeof(struct vring_packed_desc_event); 19296b60b9c0SXuan Zhuo 19306b60b9c0SXuan Zhuo driver = vring_alloc_queue(vdev, event_size_in_bytes, 19316b60b9c0SXuan Zhuo &driver_event_dma_addr, 19322713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 19332713ea3cSJason Wang dma_dev); 19346b60b9c0SXuan Zhuo if (!driver) 19356b60b9c0SXuan Zhuo goto err; 19366b60b9c0SXuan Zhuo 19376b60b9c0SXuan Zhuo vring_packed->vring.driver = driver; 19386b60b9c0SXuan Zhuo vring_packed->event_size_in_bytes = event_size_in_bytes; 19396b60b9c0SXuan Zhuo vring_packed->driver_event_dma_addr = driver_event_dma_addr; 19406b60b9c0SXuan Zhuo 19416b60b9c0SXuan Zhuo device = vring_alloc_queue(vdev, event_size_in_bytes, 19426b60b9c0SXuan Zhuo &device_event_dma_addr, 19432713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 19442713ea3cSJason Wang dma_dev); 19456b60b9c0SXuan Zhuo if (!device) 19466b60b9c0SXuan Zhuo goto err; 19476b60b9c0SXuan Zhuo 19486b60b9c0SXuan Zhuo vring_packed->vring.device = device; 19496b60b9c0SXuan Zhuo vring_packed->device_event_dma_addr = device_event_dma_addr; 19506b60b9c0SXuan Zhuo 19516b60b9c0SXuan Zhuo vring_packed->vring.num = num; 19526b60b9c0SXuan Zhuo 19536b60b9c0SXuan Zhuo return 0; 19546b60b9c0SXuan Zhuo 19556b60b9c0SXuan Zhuo err: 19562713ea3cSJason Wang vring_free_packed(vring_packed, vdev, dma_dev); 19576b60b9c0SXuan Zhuo return -ENOMEM; 19586b60b9c0SXuan Zhuo } 19596b60b9c0SXuan Zhuo 1960ef3167cfSXuan Zhuo static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed) 1961ef3167cfSXuan Zhuo { 1962ef3167cfSXuan Zhuo struct vring_desc_state_packed *state; 1963ef3167cfSXuan Zhuo struct vring_desc_extra *extra; 1964ef3167cfSXuan Zhuo u32 num = vring_packed->vring.num; 1965ef3167cfSXuan Zhuo 1966ef3167cfSXuan Zhuo state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL); 1967ef3167cfSXuan Zhuo if (!state) 1968ef3167cfSXuan Zhuo goto err_desc_state; 1969ef3167cfSXuan Zhuo 1970ef3167cfSXuan Zhuo memset(state, 0, num * sizeof(struct vring_desc_state_packed)); 1971ef3167cfSXuan Zhuo 1972ef3167cfSXuan Zhuo extra = vring_alloc_desc_extra(num); 1973ef3167cfSXuan Zhuo if (!extra) 1974ef3167cfSXuan Zhuo goto err_desc_extra; 1975ef3167cfSXuan Zhuo 1976ef3167cfSXuan Zhuo vring_packed->desc_state = state; 1977ef3167cfSXuan Zhuo vring_packed->desc_extra = extra; 1978ef3167cfSXuan Zhuo 1979ef3167cfSXuan Zhuo return 0; 1980ef3167cfSXuan Zhuo 1981ef3167cfSXuan Zhuo err_desc_extra: 1982ef3167cfSXuan Zhuo kfree(state); 1983ef3167cfSXuan Zhuo err_desc_state: 1984ef3167cfSXuan Zhuo return -ENOMEM; 1985ef3167cfSXuan Zhuo } 1986ef3167cfSXuan Zhuo 19871a107c87SXuan Zhuo static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed, 19881a107c87SXuan Zhuo bool callback) 19891a107c87SXuan Zhuo { 19901a107c87SXuan Zhuo vring_packed->next_avail_idx = 0; 19911a107c87SXuan Zhuo vring_packed->avail_wrap_counter = 1; 19921a107c87SXuan Zhuo vring_packed->event_flags_shadow = 0; 19931a107c87SXuan Zhuo vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; 19941a107c87SXuan Zhuo 19951a107c87SXuan Zhuo /* No callback? Tell other side not to bother us. */ 19961a107c87SXuan Zhuo if (!callback) { 19971a107c87SXuan Zhuo vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; 19981a107c87SXuan Zhuo vring_packed->vring.driver->flags = 19991a107c87SXuan Zhuo cpu_to_le16(vring_packed->event_flags_shadow); 20001a107c87SXuan Zhuo } 20011a107c87SXuan Zhuo } 20021a107c87SXuan Zhuo 200351d649f1SXuan Zhuo static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq, 200451d649f1SXuan Zhuo struct vring_virtqueue_packed *vring_packed) 200551d649f1SXuan Zhuo { 200651d649f1SXuan Zhuo vq->packed = *vring_packed; 200751d649f1SXuan Zhuo 200851d649f1SXuan Zhuo /* Put everything in free lists. */ 200951d649f1SXuan Zhuo vq->free_head = 0; 201051d649f1SXuan Zhuo } 201151d649f1SXuan Zhuo 201256775e14SXuan Zhuo static void virtqueue_reinit_packed(struct vring_virtqueue *vq) 201356775e14SXuan Zhuo { 201456775e14SXuan Zhuo memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); 201556775e14SXuan Zhuo memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); 201656775e14SXuan Zhuo 201756775e14SXuan Zhuo /* we need to reset the desc.flags. For more, see is_used_desc_packed() */ 201856775e14SXuan Zhuo memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); 201956775e14SXuan Zhuo 202056775e14SXuan Zhuo virtqueue_init(vq, vq->packed.vring.num); 202156775e14SXuan Zhuo virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); 202256775e14SXuan Zhuo } 202356775e14SXuan Zhuo 20241ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed( 20251ce9e605STiwei Bie unsigned int index, 20261ce9e605STiwei Bie unsigned int num, 20271ce9e605STiwei Bie unsigned int vring_align, 20281ce9e605STiwei Bie struct virtio_device *vdev, 20291ce9e605STiwei Bie bool weak_barriers, 20301ce9e605STiwei Bie bool may_reduce_num, 20311ce9e605STiwei Bie bool context, 20321ce9e605STiwei Bie bool (*notify)(struct virtqueue *), 20331ce9e605STiwei Bie void (*callback)(struct virtqueue *), 20342713ea3cSJason Wang const char *name, 20352713ea3cSJason Wang struct device *dma_dev) 20361ce9e605STiwei Bie { 20376b60b9c0SXuan Zhuo struct vring_virtqueue_packed vring_packed = {}; 20381ce9e605STiwei Bie struct vring_virtqueue *vq; 2039ef3167cfSXuan Zhuo int err; 20401ce9e605STiwei Bie 20412713ea3cSJason Wang if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) 20421ce9e605STiwei Bie goto err_ring; 20431ce9e605STiwei Bie 20441ce9e605STiwei Bie vq = kmalloc(sizeof(*vq), GFP_KERNEL); 20451ce9e605STiwei Bie if (!vq) 20461ce9e605STiwei Bie goto err_vq; 20471ce9e605STiwei Bie 20481ce9e605STiwei Bie vq->vq.callback = callback; 20491ce9e605STiwei Bie vq->vq.vdev = vdev; 20501ce9e605STiwei Bie vq->vq.name = name; 20511ce9e605STiwei Bie vq->vq.index = index; 20524913e854SXuan Zhuo vq->vq.reset = false; 20531ce9e605STiwei Bie vq->we_own_ring = true; 20541ce9e605STiwei Bie vq->notify = notify; 20551ce9e605STiwei Bie vq->weak_barriers = weak_barriers; 2056c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 20578b4ec69dSJason Wang vq->broken = true; 2058c346dae4SJason Wang #else 2059c346dae4SJason Wang vq->broken = false; 2060c346dae4SJason Wang #endif 20611ce9e605STiwei Bie vq->packed_ring = true; 20622713ea3cSJason Wang vq->dma_dev = dma_dev; 20631ce9e605STiwei Bie vq->use_dma_api = vring_use_dma_api(vdev); 20641ce9e605STiwei Bie 20651ce9e605STiwei Bie vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 20661ce9e605STiwei Bie !context; 20671ce9e605STiwei Bie vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 20681ce9e605STiwei Bie 206945383fb0STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) 207045383fb0STiwei Bie vq->weak_barriers = false; 207145383fb0STiwei Bie 2072ef3167cfSXuan Zhuo err = vring_alloc_state_extra_packed(&vring_packed); 2073ef3167cfSXuan Zhuo if (err) 2074ef3167cfSXuan Zhuo goto err_state_extra; 20751ce9e605STiwei Bie 20761a107c87SXuan Zhuo virtqueue_vring_init_packed(&vring_packed, !!callback); 20771ce9e605STiwei Bie 20783a897128SXuan Zhuo virtqueue_init(vq, num); 207951d649f1SXuan Zhuo virtqueue_vring_attach_packed(vq, &vring_packed); 20803a897128SXuan Zhuo 20810e566c8fSParav Pandit spin_lock(&vdev->vqs_list_lock); 2082e152d8afSDan Carpenter list_add_tail(&vq->vq.list, &vdev->vqs); 20830e566c8fSParav Pandit spin_unlock(&vdev->vqs_list_lock); 20841ce9e605STiwei Bie return &vq->vq; 20851ce9e605STiwei Bie 2086ef3167cfSXuan Zhuo err_state_extra: 20871ce9e605STiwei Bie kfree(vq); 20881ce9e605STiwei Bie err_vq: 20892713ea3cSJason Wang vring_free_packed(&vring_packed, vdev, dma_dev); 20901ce9e605STiwei Bie err_ring: 20911ce9e605STiwei Bie return NULL; 20921ce9e605STiwei Bie } 20931ce9e605STiwei Bie 2094947f9fcfSXuan Zhuo static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num) 2095947f9fcfSXuan Zhuo { 2096947f9fcfSXuan Zhuo struct vring_virtqueue_packed vring_packed = {}; 2097947f9fcfSXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 2098947f9fcfSXuan Zhuo struct virtio_device *vdev = _vq->vdev; 2099947f9fcfSXuan Zhuo int err; 2100947f9fcfSXuan Zhuo 21012713ea3cSJason Wang if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq))) 2102947f9fcfSXuan Zhuo goto err_ring; 2103947f9fcfSXuan Zhuo 2104947f9fcfSXuan Zhuo err = vring_alloc_state_extra_packed(&vring_packed); 2105947f9fcfSXuan Zhuo if (err) 2106947f9fcfSXuan Zhuo goto err_state_extra; 2107947f9fcfSXuan Zhuo 2108947f9fcfSXuan Zhuo vring_free(&vq->vq); 2109947f9fcfSXuan Zhuo 2110947f9fcfSXuan Zhuo virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); 2111947f9fcfSXuan Zhuo 2112947f9fcfSXuan Zhuo virtqueue_init(vq, vring_packed.vring.num); 2113947f9fcfSXuan Zhuo virtqueue_vring_attach_packed(vq, &vring_packed); 2114947f9fcfSXuan Zhuo 2115947f9fcfSXuan Zhuo return 0; 2116947f9fcfSXuan Zhuo 2117947f9fcfSXuan Zhuo err_state_extra: 21182713ea3cSJason Wang vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq)); 2119947f9fcfSXuan Zhuo err_ring: 2120947f9fcfSXuan Zhuo virtqueue_reinit_packed(vq); 2121947f9fcfSXuan Zhuo return -ENOMEM; 2122947f9fcfSXuan Zhuo } 2123947f9fcfSXuan Zhuo 21241ce9e605STiwei Bie 21251ce9e605STiwei Bie /* 2126e6f633e5STiwei Bie * Generic functions and exported symbols. 2127e6f633e5STiwei Bie */ 2128e6f633e5STiwei Bie 2129e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq, 2130e6f633e5STiwei Bie struct scatterlist *sgs[], 2131e6f633e5STiwei Bie unsigned int total_sg, 2132e6f633e5STiwei Bie unsigned int out_sgs, 2133e6f633e5STiwei Bie unsigned int in_sgs, 2134e6f633e5STiwei Bie void *data, 2135e6f633e5STiwei Bie void *ctx, 2136e6f633e5STiwei Bie gfp_t gfp) 2137e6f633e5STiwei Bie { 21381ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 21391ce9e605STiwei Bie 21401ce9e605STiwei Bie return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, 21411ce9e605STiwei Bie out_sgs, in_sgs, data, ctx, gfp) : 21421ce9e605STiwei Bie virtqueue_add_split(_vq, sgs, total_sg, 2143e6f633e5STiwei Bie out_sgs, in_sgs, data, ctx, gfp); 2144e6f633e5STiwei Bie } 2145e6f633e5STiwei Bie 2146e6f633e5STiwei Bie /** 2147e6f633e5STiwei Bie * virtqueue_add_sgs - expose buffers to other end 2148a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2149e6f633e5STiwei Bie * @sgs: array of terminated scatterlists. 2150a5581206SJiang Biao * @out_sgs: the number of scatterlists readable by other side 2151a5581206SJiang Biao * @in_sgs: the number of scatterlists which are writable (after readable ones) 2152e6f633e5STiwei Bie * @data: the token identifying the buffer. 2153e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2154e6f633e5STiwei Bie * 2155e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2156e6f633e5STiwei Bie * at the same time (except where noted). 2157e6f633e5STiwei Bie * 2158e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2159e6f633e5STiwei Bie */ 2160e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq, 2161e6f633e5STiwei Bie struct scatterlist *sgs[], 2162e6f633e5STiwei Bie unsigned int out_sgs, 2163e6f633e5STiwei Bie unsigned int in_sgs, 2164e6f633e5STiwei Bie void *data, 2165e6f633e5STiwei Bie gfp_t gfp) 2166e6f633e5STiwei Bie { 2167e6f633e5STiwei Bie unsigned int i, total_sg = 0; 2168e6f633e5STiwei Bie 2169e6f633e5STiwei Bie /* Count them first. */ 2170e6f633e5STiwei Bie for (i = 0; i < out_sgs + in_sgs; i++) { 2171e6f633e5STiwei Bie struct scatterlist *sg; 2172e6f633e5STiwei Bie 2173e6f633e5STiwei Bie for (sg = sgs[i]; sg; sg = sg_next(sg)) 2174e6f633e5STiwei Bie total_sg++; 2175e6f633e5STiwei Bie } 2176e6f633e5STiwei Bie return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, 2177e6f633e5STiwei Bie data, NULL, gfp); 2178e6f633e5STiwei Bie } 2179e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 2180e6f633e5STiwei Bie 2181e6f633e5STiwei Bie /** 2182e6f633e5STiwei Bie * virtqueue_add_outbuf - expose output buffers to other end 2183e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 2184e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 2185e6f633e5STiwei Bie * @num: the number of entries in @sg readable by other side 2186e6f633e5STiwei Bie * @data: the token identifying the buffer. 2187e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2188e6f633e5STiwei Bie * 2189e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2190e6f633e5STiwei Bie * at the same time (except where noted). 2191e6f633e5STiwei Bie * 2192e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2193e6f633e5STiwei Bie */ 2194e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq, 2195e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 2196e6f633e5STiwei Bie void *data, 2197e6f633e5STiwei Bie gfp_t gfp) 2198e6f633e5STiwei Bie { 2199e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); 2200e6f633e5STiwei Bie } 2201e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 2202e6f633e5STiwei Bie 2203e6f633e5STiwei Bie /** 2204e6f633e5STiwei Bie * virtqueue_add_inbuf - expose input buffers to other end 2205e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 2206e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 2207e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 2208e6f633e5STiwei Bie * @data: the token identifying the buffer. 2209e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2210e6f633e5STiwei Bie * 2211e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2212e6f633e5STiwei Bie * at the same time (except where noted). 2213e6f633e5STiwei Bie * 2214e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2215e6f633e5STiwei Bie */ 2216e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq, 2217e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 2218e6f633e5STiwei Bie void *data, 2219e6f633e5STiwei Bie gfp_t gfp) 2220e6f633e5STiwei Bie { 2221e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); 2222e6f633e5STiwei Bie } 2223e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 2224e6f633e5STiwei Bie 2225e6f633e5STiwei Bie /** 2226e6f633e5STiwei Bie * virtqueue_add_inbuf_ctx - expose input buffers to other end 2227e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 2228e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 2229e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 2230e6f633e5STiwei Bie * @data: the token identifying the buffer. 2231e6f633e5STiwei Bie * @ctx: extra context for the token 2232e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2233e6f633e5STiwei Bie * 2234e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2235e6f633e5STiwei Bie * at the same time (except where noted). 2236e6f633e5STiwei Bie * 2237e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2238e6f633e5STiwei Bie */ 2239e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq, 2240e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 2241e6f633e5STiwei Bie void *data, 2242e6f633e5STiwei Bie void *ctx, 2243e6f633e5STiwei Bie gfp_t gfp) 2244e6f633e5STiwei Bie { 2245e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); 2246e6f633e5STiwei Bie } 2247e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); 2248e6f633e5STiwei Bie 2249e6f633e5STiwei Bie /** 2250e6f633e5STiwei Bie * virtqueue_kick_prepare - first half of split virtqueue_kick call. 2251a5581206SJiang Biao * @_vq: the struct virtqueue 2252e6f633e5STiwei Bie * 2253e6f633e5STiwei Bie * Instead of virtqueue_kick(), you can do: 2254e6f633e5STiwei Bie * if (virtqueue_kick_prepare(vq)) 2255e6f633e5STiwei Bie * virtqueue_notify(vq); 2256e6f633e5STiwei Bie * 2257e6f633e5STiwei Bie * This is sometimes useful because the virtqueue_kick_prepare() needs 2258e6f633e5STiwei Bie * to be serialized, but the actual virtqueue_notify() call does not. 2259e6f633e5STiwei Bie */ 2260e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq) 2261e6f633e5STiwei Bie { 22621ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 22631ce9e605STiwei Bie 22641ce9e605STiwei Bie return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : 22651ce9e605STiwei Bie virtqueue_kick_prepare_split(_vq); 2266e6f633e5STiwei Bie } 2267e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 2268e6f633e5STiwei Bie 2269e6f633e5STiwei Bie /** 2270e6f633e5STiwei Bie * virtqueue_notify - second half of split virtqueue_kick call. 2271a5581206SJiang Biao * @_vq: the struct virtqueue 2272e6f633e5STiwei Bie * 2273e6f633e5STiwei Bie * This does not need to be serialized. 2274e6f633e5STiwei Bie * 2275e6f633e5STiwei Bie * Returns false if host notify failed or queue is broken, otherwise true. 2276e6f633e5STiwei Bie */ 2277e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq) 2278e6f633e5STiwei Bie { 2279e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 2280e6f633e5STiwei Bie 2281e6f633e5STiwei Bie if (unlikely(vq->broken)) 2282e6f633e5STiwei Bie return false; 2283e6f633e5STiwei Bie 2284e6f633e5STiwei Bie /* Prod other side to tell it about changes. */ 2285e6f633e5STiwei Bie if (!vq->notify(_vq)) { 2286e6f633e5STiwei Bie vq->broken = true; 2287e6f633e5STiwei Bie return false; 2288e6f633e5STiwei Bie } 2289e6f633e5STiwei Bie return true; 2290e6f633e5STiwei Bie } 2291e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify); 2292e6f633e5STiwei Bie 2293e6f633e5STiwei Bie /** 2294e6f633e5STiwei Bie * virtqueue_kick - update after add_buf 2295e6f633e5STiwei Bie * @vq: the struct virtqueue 2296e6f633e5STiwei Bie * 2297e6f633e5STiwei Bie * After one or more virtqueue_add_* calls, invoke this to kick 2298e6f633e5STiwei Bie * the other side. 2299e6f633e5STiwei Bie * 2300e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2301e6f633e5STiwei Bie * operations at the same time (except where noted). 2302e6f633e5STiwei Bie * 2303e6f633e5STiwei Bie * Returns false if kick failed, otherwise true. 2304e6f633e5STiwei Bie */ 2305e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq) 2306e6f633e5STiwei Bie { 2307e6f633e5STiwei Bie if (virtqueue_kick_prepare(vq)) 2308e6f633e5STiwei Bie return virtqueue_notify(vq); 2309e6f633e5STiwei Bie return true; 2310e6f633e5STiwei Bie } 2311e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick); 2312e6f633e5STiwei Bie 2313e6f633e5STiwei Bie /** 231431c11db6SYang Li * virtqueue_get_buf_ctx - get the next used buffer 2315a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2316e6f633e5STiwei Bie * @len: the length written into the buffer 2317a5581206SJiang Biao * @ctx: extra context for the token 2318e6f633e5STiwei Bie * 2319e6f633e5STiwei Bie * If the device wrote data into the buffer, @len will be set to the 2320e6f633e5STiwei Bie * amount written. This means you don't need to clear the buffer 2321e6f633e5STiwei Bie * beforehand to ensure there's no data leakage in the case of short 2322e6f633e5STiwei Bie * writes. 2323e6f633e5STiwei Bie * 2324e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2325e6f633e5STiwei Bie * operations at the same time (except where noted). 2326e6f633e5STiwei Bie * 2327e6f633e5STiwei Bie * Returns NULL if there are no used buffers, or the "data" token 2328e6f633e5STiwei Bie * handed to virtqueue_add_*(). 2329e6f633e5STiwei Bie */ 2330e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, 2331e6f633e5STiwei Bie void **ctx) 2332e6f633e5STiwei Bie { 23331ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 23341ce9e605STiwei Bie 23351ce9e605STiwei Bie return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : 23361ce9e605STiwei Bie virtqueue_get_buf_ctx_split(_vq, len, ctx); 2337e6f633e5STiwei Bie } 2338e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); 2339e6f633e5STiwei Bie 2340e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 2341e6f633e5STiwei Bie { 2342e6f633e5STiwei Bie return virtqueue_get_buf_ctx(_vq, len, NULL); 2343e6f633e5STiwei Bie } 2344e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf); 2345e6f633e5STiwei Bie /** 2346e6f633e5STiwei Bie * virtqueue_disable_cb - disable callbacks 2347a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2348e6f633e5STiwei Bie * 2349e6f633e5STiwei Bie * Note that this is not necessarily synchronous, hence unreliable and only 2350e6f633e5STiwei Bie * useful as an optimization. 2351e6f633e5STiwei Bie * 2352e6f633e5STiwei Bie * Unlike other operations, this need not be serialized. 2353e6f633e5STiwei Bie */ 2354e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq) 2355e6f633e5STiwei Bie { 23561ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 23571ce9e605STiwei Bie 23581ce9e605STiwei Bie if (vq->packed_ring) 23591ce9e605STiwei Bie virtqueue_disable_cb_packed(_vq); 23601ce9e605STiwei Bie else 2361e6f633e5STiwei Bie virtqueue_disable_cb_split(_vq); 2362e6f633e5STiwei Bie } 2363e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 2364e6f633e5STiwei Bie 2365e6f633e5STiwei Bie /** 2366e6f633e5STiwei Bie * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 2367a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2368e6f633e5STiwei Bie * 2369e6f633e5STiwei Bie * This re-enables callbacks; it returns current queue state 2370e6f633e5STiwei Bie * in an opaque unsigned value. This value should be later tested by 2371e6f633e5STiwei Bie * virtqueue_poll, to detect a possible race between the driver checking for 2372e6f633e5STiwei Bie * more work, and enabling callbacks. 2373e6f633e5STiwei Bie * 2374e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2375e6f633e5STiwei Bie * operations at the same time (except where noted). 2376e6f633e5STiwei Bie */ 237731532340SSolomon Tan unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq) 2378e6f633e5STiwei Bie { 23791ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 23801ce9e605STiwei Bie 23818d622d21SMichael S. Tsirkin if (vq->event_triggered) 23828d622d21SMichael S. Tsirkin vq->event_triggered = false; 23838d622d21SMichael S. Tsirkin 23841ce9e605STiwei Bie return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : 23851ce9e605STiwei Bie virtqueue_enable_cb_prepare_split(_vq); 2386e6f633e5STiwei Bie } 2387e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 2388e6f633e5STiwei Bie 2389e6f633e5STiwei Bie /** 2390e6f633e5STiwei Bie * virtqueue_poll - query pending used buffers 2391a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2392e6f633e5STiwei Bie * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 2393e6f633e5STiwei Bie * 2394e6f633e5STiwei Bie * Returns "true" if there are pending used buffers in the queue. 2395e6f633e5STiwei Bie * 2396e6f633e5STiwei Bie * This does not need to be serialized. 2397e6f633e5STiwei Bie */ 239831532340SSolomon Tan bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx) 2399e6f633e5STiwei Bie { 2400e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 2401e6f633e5STiwei Bie 2402481a0d74SMao Wenan if (unlikely(vq->broken)) 2403481a0d74SMao Wenan return false; 2404481a0d74SMao Wenan 2405e6f633e5STiwei Bie virtio_mb(vq->weak_barriers); 24061ce9e605STiwei Bie return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : 24071ce9e605STiwei Bie virtqueue_poll_split(_vq, last_used_idx); 2408e6f633e5STiwei Bie } 2409e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll); 2410e6f633e5STiwei Bie 2411e6f633e5STiwei Bie /** 2412e6f633e5STiwei Bie * virtqueue_enable_cb - restart callbacks after disable_cb. 2413a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2414e6f633e5STiwei Bie * 2415e6f633e5STiwei Bie * This re-enables callbacks; it returns "false" if there are pending 2416e6f633e5STiwei Bie * buffers in the queue, to detect a possible race between the driver 2417e6f633e5STiwei Bie * checking for more work, and enabling callbacks. 2418e6f633e5STiwei Bie * 2419e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2420e6f633e5STiwei Bie * operations at the same time (except where noted). 2421e6f633e5STiwei Bie */ 2422e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq) 2423e6f633e5STiwei Bie { 242431532340SSolomon Tan unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq); 2425e6f633e5STiwei Bie 2426e6f633e5STiwei Bie return !virtqueue_poll(_vq, last_used_idx); 2427e6f633e5STiwei Bie } 2428e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 2429e6f633e5STiwei Bie 2430e6f633e5STiwei Bie /** 2431e6f633e5STiwei Bie * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 2432a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2433e6f633e5STiwei Bie * 2434e6f633e5STiwei Bie * This re-enables callbacks but hints to the other side to delay 2435e6f633e5STiwei Bie * interrupts until most of the available buffers have been processed; 2436e6f633e5STiwei Bie * it returns "false" if there are many pending buffers in the queue, 2437e6f633e5STiwei Bie * to detect a possible race between the driver checking for more work, 2438e6f633e5STiwei Bie * and enabling callbacks. 2439e6f633e5STiwei Bie * 2440e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2441e6f633e5STiwei Bie * operations at the same time (except where noted). 2442e6f633e5STiwei Bie */ 2443e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 2444e6f633e5STiwei Bie { 24451ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 24461ce9e605STiwei Bie 24478d622d21SMichael S. Tsirkin if (vq->event_triggered) 24488d622d21SMichael S. Tsirkin vq->event_triggered = false; 24498d622d21SMichael S. Tsirkin 24501ce9e605STiwei Bie return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : 24511ce9e605STiwei Bie virtqueue_enable_cb_delayed_split(_vq); 2452e6f633e5STiwei Bie } 2453e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 2454e6f633e5STiwei Bie 2455138fd251STiwei Bie /** 2456138fd251STiwei Bie * virtqueue_detach_unused_buf - detach first unused buffer 2457a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2458138fd251STiwei Bie * 2459138fd251STiwei Bie * Returns NULL or the "data" token handed to virtqueue_add_*(). 2460a62eecb3SXuan Zhuo * This is not valid on an active queue; it is useful for device 2461a62eecb3SXuan Zhuo * shutdown or the reset queue. 2462138fd251STiwei Bie */ 2463138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 2464138fd251STiwei Bie { 24651ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 24661ce9e605STiwei Bie 24671ce9e605STiwei Bie return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : 24681ce9e605STiwei Bie virtqueue_detach_unused_buf_split(_vq); 2469138fd251STiwei Bie } 24707c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 2471c021eac4SShirley Ma 2472138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq) 2473138fd251STiwei Bie { 24741ce9e605STiwei Bie return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); 2475138fd251STiwei Bie } 2476138fd251STiwei Bie 24775c669c4aSRicardo Cañuelo /** 24785c669c4aSRicardo Cañuelo * vring_interrupt - notify a virtqueue on an interrupt 24795c669c4aSRicardo Cañuelo * @irq: the IRQ number (ignored) 24805c669c4aSRicardo Cañuelo * @_vq: the struct virtqueue to notify 24815c669c4aSRicardo Cañuelo * 24825c669c4aSRicardo Cañuelo * Calls the callback function of @_vq to process the virtqueue 24835c669c4aSRicardo Cañuelo * notification. 24845c669c4aSRicardo Cañuelo */ 24850a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 24860a8a69ddSRusty Russell { 24870a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 24880a8a69ddSRusty Russell 24890a8a69ddSRusty Russell if (!more_used(vq)) { 24900a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 24910a8a69ddSRusty Russell return IRQ_NONE; 24920a8a69ddSRusty Russell } 24930a8a69ddSRusty Russell 24948b4ec69dSJason Wang if (unlikely(vq->broken)) { 2495c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 24968b4ec69dSJason Wang dev_warn_once(&vq->vq.vdev->dev, 24978b4ec69dSJason Wang "virtio vring IRQ raised before DRIVER_OK"); 24988b4ec69dSJason Wang return IRQ_NONE; 2499c346dae4SJason Wang #else 2500c346dae4SJason Wang return IRQ_HANDLED; 2501c346dae4SJason Wang #endif 25028b4ec69dSJason Wang } 25030a8a69ddSRusty Russell 25048d622d21SMichael S. Tsirkin /* Just a hint for performance: so it's ok that this can be racy! */ 25058d622d21SMichael S. Tsirkin if (vq->event) 25068d622d21SMichael S. Tsirkin vq->event_triggered = true; 25078d622d21SMichael S. Tsirkin 25080a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 250918445c4dSRusty Russell if (vq->vq.callback) 251018445c4dSRusty Russell vq->vq.callback(&vq->vq); 25110a8a69ddSRusty Russell 25120a8a69ddSRusty Russell return IRQ_HANDLED; 25130a8a69ddSRusty Russell } 2514c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 25150a8a69ddSRusty Russell 25161ce9e605STiwei Bie /* Only available for split ring */ 251707d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index, 2518cd4c812aSXuan Zhuo struct vring_virtqueue_split *vring_split, 25190a8a69ddSRusty Russell struct virtio_device *vdev, 25207b21e34fSRusty Russell bool weak_barriers, 2521f94682ddSMichael S. Tsirkin bool context, 252246f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *), 25239499f5e7SRusty Russell void (*callback)(struct virtqueue *), 25242713ea3cSJason Wang const char *name, 25252713ea3cSJason Wang struct device *dma_dev) 25260a8a69ddSRusty Russell { 25272a2d1382SAndy Lutomirski struct vring_virtqueue *vq; 2528a2b36c8dSXuan Zhuo int err; 25290a8a69ddSRusty Russell 25301ce9e605STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 25311ce9e605STiwei Bie return NULL; 25321ce9e605STiwei Bie 2533cbeedb72STiwei Bie vq = kmalloc(sizeof(*vq), GFP_KERNEL); 25340a8a69ddSRusty Russell if (!vq) 25350a8a69ddSRusty Russell return NULL; 25360a8a69ddSRusty Russell 25371ce9e605STiwei Bie vq->packed_ring = false; 25380a8a69ddSRusty Russell vq->vq.callback = callback; 25390a8a69ddSRusty Russell vq->vq.vdev = vdev; 25409499f5e7SRusty Russell vq->vq.name = name; 254106ca287dSRusty Russell vq->vq.index = index; 25424913e854SXuan Zhuo vq->vq.reset = false; 25432a2d1382SAndy Lutomirski vq->we_own_ring = false; 25440a8a69ddSRusty Russell vq->notify = notify; 25457b21e34fSRusty Russell vq->weak_barriers = weak_barriers; 2546c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 25478b4ec69dSJason Wang vq->broken = true; 2548c346dae4SJason Wang #else 2549c346dae4SJason Wang vq->broken = false; 2550c346dae4SJason Wang #endif 25512713ea3cSJason Wang vq->dma_dev = dma_dev; 2552fb3fba6bSTiwei Bie vq->use_dma_api = vring_use_dma_api(vdev); 25530a8a69ddSRusty Russell 25545a08b04fSMichael S. Tsirkin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 25555a08b04fSMichael S. Tsirkin !context; 2556a5c262c5SMichael S. Tsirkin vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 25579fa29b9dSMark McLoughlin 255845383fb0STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) 255945383fb0STiwei Bie vq->weak_barriers = false; 256045383fb0STiwei Bie 2561a2b36c8dSXuan Zhuo err = vring_alloc_state_extra_split(vring_split); 2562a2b36c8dSXuan Zhuo if (err) { 2563a2b36c8dSXuan Zhuo kfree(vq); 2564a2b36c8dSXuan Zhuo return NULL; 2565a2b36c8dSXuan Zhuo } 256672b5e895SJason Wang 2567198fa7beSXuan Zhuo virtqueue_vring_init_split(vring_split, vq); 2568198fa7beSXuan Zhuo 2569cd4c812aSXuan Zhuo virtqueue_init(vq, vring_split->vring.num); 2570e1d6a423SXuan Zhuo virtqueue_vring_attach_split(vq, vring_split); 25713a897128SXuan Zhuo 25720e566c8fSParav Pandit spin_lock(&vdev->vqs_list_lock); 2573e152d8afSDan Carpenter list_add_tail(&vq->vq.list, &vdev->vqs); 25740e566c8fSParav Pandit spin_unlock(&vdev->vqs_list_lock); 25750a8a69ddSRusty Russell return &vq->vq; 25760a8a69ddSRusty Russell } 25772a2d1382SAndy Lutomirski 25782a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue( 25792a2d1382SAndy Lutomirski unsigned int index, 25802a2d1382SAndy Lutomirski unsigned int num, 25812a2d1382SAndy Lutomirski unsigned int vring_align, 25822a2d1382SAndy Lutomirski struct virtio_device *vdev, 25832a2d1382SAndy Lutomirski bool weak_barriers, 25842a2d1382SAndy Lutomirski bool may_reduce_num, 2585f94682ddSMichael S. Tsirkin bool context, 25862a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *), 25872a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *), 25882a2d1382SAndy Lutomirski const char *name) 25892a2d1382SAndy Lutomirski { 25901ce9e605STiwei Bie 25911ce9e605STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 25921ce9e605STiwei Bie return vring_create_virtqueue_packed(index, num, vring_align, 25931ce9e605STiwei Bie vdev, weak_barriers, may_reduce_num, 25942713ea3cSJason Wang context, notify, callback, name, vdev->dev.parent); 25951ce9e605STiwei Bie 2596d79dca75STiwei Bie return vring_create_virtqueue_split(index, num, vring_align, 2597d79dca75STiwei Bie vdev, weak_barriers, may_reduce_num, 25982713ea3cSJason Wang context, notify, callback, name, vdev->dev.parent); 25992a2d1382SAndy Lutomirski } 26002a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue); 26012a2d1382SAndy Lutomirski 26022713ea3cSJason Wang struct virtqueue *vring_create_virtqueue_dma( 26032713ea3cSJason Wang unsigned int index, 26042713ea3cSJason Wang unsigned int num, 26052713ea3cSJason Wang unsigned int vring_align, 26062713ea3cSJason Wang struct virtio_device *vdev, 26072713ea3cSJason Wang bool weak_barriers, 26082713ea3cSJason Wang bool may_reduce_num, 26092713ea3cSJason Wang bool context, 26102713ea3cSJason Wang bool (*notify)(struct virtqueue *), 26112713ea3cSJason Wang void (*callback)(struct virtqueue *), 26122713ea3cSJason Wang const char *name, 26132713ea3cSJason Wang struct device *dma_dev) 26142713ea3cSJason Wang { 26152713ea3cSJason Wang 26162713ea3cSJason Wang if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 26172713ea3cSJason Wang return vring_create_virtqueue_packed(index, num, vring_align, 26182713ea3cSJason Wang vdev, weak_barriers, may_reduce_num, 26192713ea3cSJason Wang context, notify, callback, name, dma_dev); 26202713ea3cSJason Wang 26212713ea3cSJason Wang return vring_create_virtqueue_split(index, num, vring_align, 26222713ea3cSJason Wang vdev, weak_barriers, may_reduce_num, 26232713ea3cSJason Wang context, notify, callback, name, dma_dev); 26242713ea3cSJason Wang } 26252713ea3cSJason Wang EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); 26262713ea3cSJason Wang 2627c790e8e1SXuan Zhuo /** 2628c790e8e1SXuan Zhuo * virtqueue_resize - resize the vring of vq 2629c790e8e1SXuan Zhuo * @_vq: the struct virtqueue we're talking about. 2630c790e8e1SXuan Zhuo * @num: new ring num 2631c790e8e1SXuan Zhuo * @recycle: callback for recycle the useless buffer 2632c790e8e1SXuan Zhuo * 2633c790e8e1SXuan Zhuo * When it is really necessary to create a new vring, it will set the current vq 2634c790e8e1SXuan Zhuo * into the reset state. Then call the passed callback to recycle the buffer 2635c790e8e1SXuan Zhuo * that is no longer used. Only after the new vring is successfully created, the 2636c790e8e1SXuan Zhuo * old vring will be released. 2637c790e8e1SXuan Zhuo * 2638c790e8e1SXuan Zhuo * Caller must ensure we don't call this with other virtqueue operations 2639c790e8e1SXuan Zhuo * at the same time (except where noted). 2640c790e8e1SXuan Zhuo * 2641c790e8e1SXuan Zhuo * Returns zero or a negative error. 2642c790e8e1SXuan Zhuo * 0: success. 2643c790e8e1SXuan Zhuo * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size. 2644c790e8e1SXuan Zhuo * vq can still work normally 2645c790e8e1SXuan Zhuo * -EBUSY: Failed to sync with device, vq may not work properly 2646c790e8e1SXuan Zhuo * -ENOENT: Transport or device not supported 2647c790e8e1SXuan Zhuo * -E2BIG/-EINVAL: num error 2648c790e8e1SXuan Zhuo * -EPERM: Operation not permitted 2649c790e8e1SXuan Zhuo * 2650c790e8e1SXuan Zhuo */ 2651c790e8e1SXuan Zhuo int virtqueue_resize(struct virtqueue *_vq, u32 num, 2652c790e8e1SXuan Zhuo void (*recycle)(struct virtqueue *vq, void *buf)) 2653c790e8e1SXuan Zhuo { 2654c790e8e1SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 2655c790e8e1SXuan Zhuo struct virtio_device *vdev = vq->vq.vdev; 2656c790e8e1SXuan Zhuo void *buf; 2657c790e8e1SXuan Zhuo int err; 2658c790e8e1SXuan Zhuo 2659c790e8e1SXuan Zhuo if (!vq->we_own_ring) 2660c790e8e1SXuan Zhuo return -EPERM; 2661c790e8e1SXuan Zhuo 2662c790e8e1SXuan Zhuo if (num > vq->vq.num_max) 2663c790e8e1SXuan Zhuo return -E2BIG; 2664c790e8e1SXuan Zhuo 2665c790e8e1SXuan Zhuo if (!num) 2666c790e8e1SXuan Zhuo return -EINVAL; 2667c790e8e1SXuan Zhuo 2668c790e8e1SXuan Zhuo if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) 2669c790e8e1SXuan Zhuo return 0; 2670c790e8e1SXuan Zhuo 2671c790e8e1SXuan Zhuo if (!vdev->config->disable_vq_and_reset) 2672c790e8e1SXuan Zhuo return -ENOENT; 2673c790e8e1SXuan Zhuo 2674c790e8e1SXuan Zhuo if (!vdev->config->enable_vq_after_reset) 2675c790e8e1SXuan Zhuo return -ENOENT; 2676c790e8e1SXuan Zhuo 2677c790e8e1SXuan Zhuo err = vdev->config->disable_vq_and_reset(_vq); 2678c790e8e1SXuan Zhuo if (err) 2679c790e8e1SXuan Zhuo return err; 2680c790e8e1SXuan Zhuo 2681c790e8e1SXuan Zhuo while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL) 2682c790e8e1SXuan Zhuo recycle(_vq, buf); 2683c790e8e1SXuan Zhuo 2684c790e8e1SXuan Zhuo if (vq->packed_ring) 2685c790e8e1SXuan Zhuo err = virtqueue_resize_packed(_vq, num); 2686c790e8e1SXuan Zhuo else 2687c790e8e1SXuan Zhuo err = virtqueue_resize_split(_vq, num); 2688c790e8e1SXuan Zhuo 2689c790e8e1SXuan Zhuo if (vdev->config->enable_vq_after_reset(_vq)) 2690c790e8e1SXuan Zhuo return -EBUSY; 2691c790e8e1SXuan Zhuo 2692c790e8e1SXuan Zhuo return err; 2693c790e8e1SXuan Zhuo } 2694c790e8e1SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_resize); 2695c790e8e1SXuan Zhuo 26961ce9e605STiwei Bie /* Only available for split ring */ 26972a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index, 26982a2d1382SAndy Lutomirski unsigned int num, 26992a2d1382SAndy Lutomirski unsigned int vring_align, 27002a2d1382SAndy Lutomirski struct virtio_device *vdev, 27012a2d1382SAndy Lutomirski bool weak_barriers, 2702f94682ddSMichael S. Tsirkin bool context, 27032a2d1382SAndy Lutomirski void *pages, 27042a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *vq), 27052a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *vq), 27062a2d1382SAndy Lutomirski const char *name) 27072a2d1382SAndy Lutomirski { 2708cd4c812aSXuan Zhuo struct vring_virtqueue_split vring_split = {}; 27091ce9e605STiwei Bie 27101ce9e605STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 27111ce9e605STiwei Bie return NULL; 27121ce9e605STiwei Bie 2713cd4c812aSXuan Zhuo vring_init(&vring_split.vring, num, pages, vring_align); 2714cd4c812aSXuan Zhuo return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, 27152713ea3cSJason Wang context, notify, callback, name, 27162713ea3cSJason Wang vdev->dev.parent); 27172a2d1382SAndy Lutomirski } 2718c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 27190a8a69ddSRusty Russell 27203ea19e32SXuan Zhuo static void vring_free(struct virtqueue *_vq) 27210a8a69ddSRusty Russell { 27222a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 27232a2d1382SAndy Lutomirski 27242a2d1382SAndy Lutomirski if (vq->we_own_ring) { 27251ce9e605STiwei Bie if (vq->packed_ring) { 27261ce9e605STiwei Bie vring_free_queue(vq->vq.vdev, 27271ce9e605STiwei Bie vq->packed.ring_size_in_bytes, 27281ce9e605STiwei Bie vq->packed.vring.desc, 27292713ea3cSJason Wang vq->packed.ring_dma_addr, 27302713ea3cSJason Wang vring_dma_dev(vq)); 27311ce9e605STiwei Bie 27321ce9e605STiwei Bie vring_free_queue(vq->vq.vdev, 27331ce9e605STiwei Bie vq->packed.event_size_in_bytes, 27341ce9e605STiwei Bie vq->packed.vring.driver, 27352713ea3cSJason Wang vq->packed.driver_event_dma_addr, 27362713ea3cSJason Wang vring_dma_dev(vq)); 27371ce9e605STiwei Bie 27381ce9e605STiwei Bie vring_free_queue(vq->vq.vdev, 27391ce9e605STiwei Bie vq->packed.event_size_in_bytes, 27401ce9e605STiwei Bie vq->packed.vring.device, 27412713ea3cSJason Wang vq->packed.device_event_dma_addr, 27422713ea3cSJason Wang vring_dma_dev(vq)); 27431ce9e605STiwei Bie 27441ce9e605STiwei Bie kfree(vq->packed.desc_state); 27451ce9e605STiwei Bie kfree(vq->packed.desc_extra); 27461ce9e605STiwei Bie } else { 2747d79dca75STiwei Bie vring_free_queue(vq->vq.vdev, 2748d79dca75STiwei Bie vq->split.queue_size_in_bytes, 2749d79dca75STiwei Bie vq->split.vring.desc, 27502713ea3cSJason Wang vq->split.queue_dma_addr, 27512713ea3cSJason Wang vring_dma_dev(vq)); 2752f13f09a1SSuman Anna } 2753f13f09a1SSuman Anna } 275472b5e895SJason Wang if (!vq->packed_ring) { 2755cbeedb72STiwei Bie kfree(vq->split.desc_state); 275672b5e895SJason Wang kfree(vq->split.desc_extra); 275772b5e895SJason Wang } 27583ea19e32SXuan Zhuo } 27593ea19e32SXuan Zhuo 27603ea19e32SXuan Zhuo void vring_del_virtqueue(struct virtqueue *_vq) 27613ea19e32SXuan Zhuo { 27623ea19e32SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 27633ea19e32SXuan Zhuo 27643ea19e32SXuan Zhuo spin_lock(&vq->vq.vdev->vqs_list_lock); 27653ea19e32SXuan Zhuo list_del(&_vq->list); 27663ea19e32SXuan Zhuo spin_unlock(&vq->vq.vdev->vqs_list_lock); 27673ea19e32SXuan Zhuo 27683ea19e32SXuan Zhuo vring_free(_vq); 27693ea19e32SXuan Zhuo 27702a2d1382SAndy Lutomirski kfree(vq); 27710a8a69ddSRusty Russell } 2772c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 27730a8a69ddSRusty Russell 2774af8ececdSViktor Prutyanov u32 vring_notification_data(struct virtqueue *_vq) 2775af8ececdSViktor Prutyanov { 2776af8ececdSViktor Prutyanov struct vring_virtqueue *vq = to_vvq(_vq); 2777af8ececdSViktor Prutyanov u16 next; 2778af8ececdSViktor Prutyanov 2779af8ececdSViktor Prutyanov if (vq->packed_ring) 2780af8ececdSViktor Prutyanov next = (vq->packed.next_avail_idx & 2781af8ececdSViktor Prutyanov ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) | 2782af8ececdSViktor Prutyanov vq->packed.avail_wrap_counter << 2783af8ececdSViktor Prutyanov VRING_PACKED_EVENT_F_WRAP_CTR; 2784af8ececdSViktor Prutyanov else 2785af8ececdSViktor Prutyanov next = vq->split.avail_idx_shadow; 2786af8ececdSViktor Prutyanov 2787af8ececdSViktor Prutyanov return next << 16 | _vq->index; 2788af8ececdSViktor Prutyanov } 2789af8ececdSViktor Prutyanov EXPORT_SYMBOL_GPL(vring_notification_data); 2790af8ececdSViktor Prutyanov 2791e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 2792e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 2793e34f8725SRusty Russell { 2794e34f8725SRusty Russell unsigned int i; 2795e34f8725SRusty Russell 2796e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 2797e34f8725SRusty Russell switch (i) { 27989fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 27999fa29b9dSMark McLoughlin break; 2800a5c262c5SMichael S. Tsirkin case VIRTIO_RING_F_EVENT_IDX: 2801a5c262c5SMichael S. Tsirkin break; 2802747ae34aSMichael S. Tsirkin case VIRTIO_F_VERSION_1: 2803747ae34aSMichael S. Tsirkin break; 2804321bd212SMichael S. Tsirkin case VIRTIO_F_ACCESS_PLATFORM: 28051a937693SMichael S. Tsirkin break; 2806f959a128STiwei Bie case VIRTIO_F_RING_PACKED: 2807f959a128STiwei Bie break; 280845383fb0STiwei Bie case VIRTIO_F_ORDER_PLATFORM: 280945383fb0STiwei Bie break; 2810af8ececdSViktor Prutyanov case VIRTIO_F_NOTIFICATION_DATA: 2811af8ececdSViktor Prutyanov break; 2812e34f8725SRusty Russell default: 2813e34f8725SRusty Russell /* We don't understand this bit. */ 2814e16e12beSMichael S. Tsirkin __virtio_clear_bit(vdev, i); 2815e34f8725SRusty Russell } 2816e34f8725SRusty Russell } 2817e34f8725SRusty Russell } 2818e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 2819e34f8725SRusty Russell 28205dfc1762SRusty Russell /** 28215dfc1762SRusty Russell * virtqueue_get_vring_size - return the size of the virtqueue's vring 2822a5581206SJiang Biao * @_vq: the struct virtqueue containing the vring of interest. 28235dfc1762SRusty Russell * 28245dfc1762SRusty Russell * Returns the size of the vring. This is mainly used for boasting to 28255dfc1762SRusty Russell * userspace. Unlike other operations, this need not be serialized. 28265dfc1762SRusty Russell */ 28274b6ec919SFeng Liu unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq) 28288f9f4668SRick Jones { 28298f9f4668SRick Jones 28304b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 28318f9f4668SRick Jones 28321ce9e605STiwei Bie return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; 28338f9f4668SRick Jones } 28348f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 28358f9f4668SRick Jones 283632510631SXuan Zhuo /* 283732510631SXuan Zhuo * This function should only be called by the core, not directly by the driver. 283832510631SXuan Zhuo */ 283932510631SXuan Zhuo void __virtqueue_break(struct virtqueue *_vq) 284032510631SXuan Zhuo { 284132510631SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 284232510631SXuan Zhuo 284332510631SXuan Zhuo /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 284432510631SXuan Zhuo WRITE_ONCE(vq->broken, true); 284532510631SXuan Zhuo } 284632510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_break); 284732510631SXuan Zhuo 284832510631SXuan Zhuo /* 284932510631SXuan Zhuo * This function should only be called by the core, not directly by the driver. 285032510631SXuan Zhuo */ 285132510631SXuan Zhuo void __virtqueue_unbreak(struct virtqueue *_vq) 285232510631SXuan Zhuo { 285332510631SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 285432510631SXuan Zhuo 285532510631SXuan Zhuo /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 285632510631SXuan Zhuo WRITE_ONCE(vq->broken, false); 285732510631SXuan Zhuo } 285832510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_unbreak); 285932510631SXuan Zhuo 28604b6ec919SFeng Liu bool virtqueue_is_broken(const struct virtqueue *_vq) 2861b3b32c94SHeinz Graalfs { 28624b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 2863b3b32c94SHeinz Graalfs 286460f07798SParav Pandit return READ_ONCE(vq->broken); 2865b3b32c94SHeinz Graalfs } 2866b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken); 2867b3b32c94SHeinz Graalfs 2868e2dcdfe9SRusty Russell /* 2869e2dcdfe9SRusty Russell * This should prevent the device from being used, allowing drivers to 2870e2dcdfe9SRusty Russell * recover. You may need to grab appropriate locks to flush. 2871e2dcdfe9SRusty Russell */ 2872e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev) 2873e2dcdfe9SRusty Russell { 2874e2dcdfe9SRusty Russell struct virtqueue *_vq; 2875e2dcdfe9SRusty Russell 28760e566c8fSParav Pandit spin_lock(&dev->vqs_list_lock); 2877e2dcdfe9SRusty Russell list_for_each_entry(_vq, &dev->vqs, list) { 2878e2dcdfe9SRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 287960f07798SParav Pandit 288060f07798SParav Pandit /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 288160f07798SParav Pandit WRITE_ONCE(vq->broken, true); 2882e2dcdfe9SRusty Russell } 28830e566c8fSParav Pandit spin_unlock(&dev->vqs_list_lock); 2884e2dcdfe9SRusty Russell } 2885e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device); 2886e2dcdfe9SRusty Russell 2887be83f04dSJason Wang /* 2888be83f04dSJason Wang * This should allow the device to be used by the driver. You may 2889be83f04dSJason Wang * need to grab appropriate locks to flush the write to 2890be83f04dSJason Wang * vq->broken. This should only be used in some specific case e.g 2891be83f04dSJason Wang * (probing and restoring). This function should only be called by the 2892be83f04dSJason Wang * core, not directly by the driver. 2893be83f04dSJason Wang */ 2894be83f04dSJason Wang void __virtio_unbreak_device(struct virtio_device *dev) 2895be83f04dSJason Wang { 2896be83f04dSJason Wang struct virtqueue *_vq; 2897be83f04dSJason Wang 2898be83f04dSJason Wang spin_lock(&dev->vqs_list_lock); 2899be83f04dSJason Wang list_for_each_entry(_vq, &dev->vqs, list) { 2900be83f04dSJason Wang struct vring_virtqueue *vq = to_vvq(_vq); 2901be83f04dSJason Wang 2902be83f04dSJason Wang /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 2903be83f04dSJason Wang WRITE_ONCE(vq->broken, false); 2904be83f04dSJason Wang } 2905be83f04dSJason Wang spin_unlock(&dev->vqs_list_lock); 2906be83f04dSJason Wang } 2907be83f04dSJason Wang EXPORT_SYMBOL_GPL(__virtio_unbreak_device); 2908be83f04dSJason Wang 29094b6ec919SFeng Liu dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq) 291089062652SCornelia Huck { 29114b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 291289062652SCornelia Huck 29132a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 291489062652SCornelia Huck 29151ce9e605STiwei Bie if (vq->packed_ring) 29161ce9e605STiwei Bie return vq->packed.ring_dma_addr; 29171ce9e605STiwei Bie 2918d79dca75STiwei Bie return vq->split.queue_dma_addr; 29192a2d1382SAndy Lutomirski } 29202a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 29212a2d1382SAndy Lutomirski 29224b6ec919SFeng Liu dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq) 292389062652SCornelia Huck { 29244b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 292589062652SCornelia Huck 29262a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 29272a2d1382SAndy Lutomirski 29281ce9e605STiwei Bie if (vq->packed_ring) 29291ce9e605STiwei Bie return vq->packed.driver_event_dma_addr; 29301ce9e605STiwei Bie 2931d79dca75STiwei Bie return vq->split.queue_dma_addr + 2932e593bf97STiwei Bie ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); 293389062652SCornelia Huck } 29342a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 29352a2d1382SAndy Lutomirski 29364b6ec919SFeng Liu dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq) 29372a2d1382SAndy Lutomirski { 29384b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 29392a2d1382SAndy Lutomirski 29402a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 29412a2d1382SAndy Lutomirski 29421ce9e605STiwei Bie if (vq->packed_ring) 29431ce9e605STiwei Bie return vq->packed.device_event_dma_addr; 29441ce9e605STiwei Bie 2945d79dca75STiwei Bie return vq->split.queue_dma_addr + 2946e593bf97STiwei Bie ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); 29472a2d1382SAndy Lutomirski } 29482a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 29492a2d1382SAndy Lutomirski 29501ce9e605STiwei Bie /* Only available for split ring */ 29514b6ec919SFeng Liu const struct vring *virtqueue_get_vring(const struct virtqueue *vq) 29522a2d1382SAndy Lutomirski { 2953e593bf97STiwei Bie return &to_vvq(vq)->split.vring; 29542a2d1382SAndy Lutomirski } 29552a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring); 295689062652SCornelia Huck 2957c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 2958