1fd534e9bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 20a8a69ddSRusty Russell /* Virtio ring implementation. 30a8a69ddSRusty Russell * 40a8a69ddSRusty Russell * Copyright 2007 Rusty Russell IBM Corporation 50a8a69ddSRusty Russell */ 60a8a69ddSRusty Russell #include <linux/virtio.h> 70a8a69ddSRusty Russell #include <linux/virtio_ring.h> 8e34f8725SRusty Russell #include <linux/virtio_config.h> 90a8a69ddSRusty Russell #include <linux/device.h> 105a0e3ad6STejun Heo #include <linux/slab.h> 11b5a2c4f1SPaul Gortmaker #include <linux/module.h> 12e93300b1SRusty Russell #include <linux/hrtimer.h> 13780bc790SAndy Lutomirski #include <linux/dma-mapping.h> 1488938359SAlexander Potapenko #include <linux/kmsan.h> 15f8ce7263SMichael S. Tsirkin #include <linux/spinlock.h> 1678fe3987SAndy Lutomirski #include <xen/xen.h> 170a8a69ddSRusty Russell 180a8a69ddSRusty Russell #ifdef DEBUG 190a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */ 209499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 219499f5e7SRusty Russell do { \ 229499f5e7SRusty Russell dev_err(&(_vq)->vq.vdev->dev, \ 239499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 249499f5e7SRusty Russell BUG(); \ 259499f5e7SRusty Russell } while (0) 26c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */ 273a35ce7dSRoel Kluin #define START_USE(_vq) \ 28c5f841f1SRusty Russell do { \ 29c5f841f1SRusty Russell if ((_vq)->in_use) \ 309499f5e7SRusty Russell panic("%s:in_use = %i\n", \ 319499f5e7SRusty Russell (_vq)->vq.name, (_vq)->in_use); \ 32c5f841f1SRusty Russell (_vq)->in_use = __LINE__; \ 33c5f841f1SRusty Russell } while (0) 343a35ce7dSRoel Kluin #define END_USE(_vq) \ 3597a545abSRusty Russell do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 364d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq) \ 374d6a105eSTiwei Bie do { \ 384d6a105eSTiwei Bie ktime_t now = ktime_get(); \ 394d6a105eSTiwei Bie \ 404d6a105eSTiwei Bie /* No kick or get, with .1 second between? Warn. */ \ 414d6a105eSTiwei Bie if ((_vq)->last_add_time_valid) \ 424d6a105eSTiwei Bie WARN_ON(ktime_to_ms(ktime_sub(now, \ 434d6a105eSTiwei Bie (_vq)->last_add_time)) > 100); \ 444d6a105eSTiwei Bie (_vq)->last_add_time = now; \ 454d6a105eSTiwei Bie (_vq)->last_add_time_valid = true; \ 464d6a105eSTiwei Bie } while (0) 474d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq) \ 484d6a105eSTiwei Bie do { \ 494d6a105eSTiwei Bie if ((_vq)->last_add_time_valid) { \ 504d6a105eSTiwei Bie WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ 514d6a105eSTiwei Bie (_vq)->last_add_time)) > 100); \ 524d6a105eSTiwei Bie } \ 534d6a105eSTiwei Bie } while (0) 544d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq) \ 554d6a105eSTiwei Bie ((_vq)->last_add_time_valid = false) 560a8a69ddSRusty Russell #else 579499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...) \ 589499f5e7SRusty Russell do { \ 599499f5e7SRusty Russell dev_err(&_vq->vq.vdev->dev, \ 609499f5e7SRusty Russell "%s:"fmt, (_vq)->vq.name, ##args); \ 619499f5e7SRusty Russell (_vq)->broken = true; \ 629499f5e7SRusty Russell } while (0) 630a8a69ddSRusty Russell #define START_USE(vq) 640a8a69ddSRusty Russell #define END_USE(vq) 654d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq) 664d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq) 674d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq) 680a8a69ddSRusty Russell #endif 690a8a69ddSRusty Russell 70cbeedb72STiwei Bie struct vring_desc_state_split { 71780bc790SAndy Lutomirski void *data; /* Data for callback. */ 72780bc790SAndy Lutomirski struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 73780bc790SAndy Lutomirski }; 74780bc790SAndy Lutomirski 751ce9e605STiwei Bie struct vring_desc_state_packed { 761ce9e605STiwei Bie void *data; /* Data for callback. */ 771ce9e605STiwei Bie struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ 781ce9e605STiwei Bie u16 num; /* Descriptor list length. */ 791ce9e605STiwei Bie u16 last; /* The last desc state in a list. */ 801ce9e605STiwei Bie }; 811ce9e605STiwei Bie 821f28750fSJason Wang struct vring_desc_extra { 83ef5c366fSJason Wang dma_addr_t addr; /* Descriptor DMA addr. */ 84ef5c366fSJason Wang u32 len; /* Descriptor length. */ 851ce9e605STiwei Bie u16 flags; /* Descriptor flags. */ 86aeef9b47SJason Wang u16 next; /* The next desc state in a list. */ 871ce9e605STiwei Bie }; 881ce9e605STiwei Bie 89d76136e4SXuan Zhuo struct vring_virtqueue_split { 90d76136e4SXuan Zhuo /* Actual memory layout for this queue. */ 91d76136e4SXuan Zhuo struct vring vring; 92d76136e4SXuan Zhuo 93d76136e4SXuan Zhuo /* Last written value to avail->flags */ 94d76136e4SXuan Zhuo u16 avail_flags_shadow; 95d76136e4SXuan Zhuo 96d76136e4SXuan Zhuo /* 97d76136e4SXuan Zhuo * Last written value to avail->idx in 98d76136e4SXuan Zhuo * guest byte order. 99d76136e4SXuan Zhuo */ 100d76136e4SXuan Zhuo u16 avail_idx_shadow; 101d76136e4SXuan Zhuo 102d76136e4SXuan Zhuo /* Per-descriptor state. */ 103d76136e4SXuan Zhuo struct vring_desc_state_split *desc_state; 104d76136e4SXuan Zhuo struct vring_desc_extra *desc_extra; 105d76136e4SXuan Zhuo 106d76136e4SXuan Zhuo /* DMA address and size information */ 107d76136e4SXuan Zhuo dma_addr_t queue_dma_addr; 108d76136e4SXuan Zhuo size_t queue_size_in_bytes; 109af36b16fSXuan Zhuo 110af36b16fSXuan Zhuo /* 111af36b16fSXuan Zhuo * The parameters for creating vrings are reserved for creating new 112af36b16fSXuan Zhuo * vring. 113af36b16fSXuan Zhuo */ 114af36b16fSXuan Zhuo u32 vring_align; 115af36b16fSXuan Zhuo bool may_reduce_num; 116d76136e4SXuan Zhuo }; 117d76136e4SXuan Zhuo 118d76136e4SXuan Zhuo struct vring_virtqueue_packed { 119d76136e4SXuan Zhuo /* Actual memory layout for this queue. */ 120d76136e4SXuan Zhuo struct { 121d76136e4SXuan Zhuo unsigned int num; 122d76136e4SXuan Zhuo struct vring_packed_desc *desc; 123d76136e4SXuan Zhuo struct vring_packed_desc_event *driver; 124d76136e4SXuan Zhuo struct vring_packed_desc_event *device; 125d76136e4SXuan Zhuo } vring; 126d76136e4SXuan Zhuo 127d76136e4SXuan Zhuo /* Driver ring wrap counter. */ 128d76136e4SXuan Zhuo bool avail_wrap_counter; 129d76136e4SXuan Zhuo 130d76136e4SXuan Zhuo /* Avail used flags. */ 131d76136e4SXuan Zhuo u16 avail_used_flags; 132d76136e4SXuan Zhuo 133d76136e4SXuan Zhuo /* Index of the next avail descriptor. */ 134d76136e4SXuan Zhuo u16 next_avail_idx; 135d76136e4SXuan Zhuo 136d76136e4SXuan Zhuo /* 137d76136e4SXuan Zhuo * Last written value to driver->flags in 138d76136e4SXuan Zhuo * guest byte order. 139d76136e4SXuan Zhuo */ 140d76136e4SXuan Zhuo u16 event_flags_shadow; 141d76136e4SXuan Zhuo 142d76136e4SXuan Zhuo /* Per-descriptor state. */ 143d76136e4SXuan Zhuo struct vring_desc_state_packed *desc_state; 144d76136e4SXuan Zhuo struct vring_desc_extra *desc_extra; 145d76136e4SXuan Zhuo 146d76136e4SXuan Zhuo /* DMA address and size information */ 147d76136e4SXuan Zhuo dma_addr_t ring_dma_addr; 148d76136e4SXuan Zhuo dma_addr_t driver_event_dma_addr; 149d76136e4SXuan Zhuo dma_addr_t device_event_dma_addr; 150d76136e4SXuan Zhuo size_t ring_size_in_bytes; 151d76136e4SXuan Zhuo size_t event_size_in_bytes; 152d76136e4SXuan Zhuo }; 153d76136e4SXuan Zhuo 15443b4f721SMichael S. Tsirkin struct vring_virtqueue { 1550a8a69ddSRusty Russell struct virtqueue vq; 1560a8a69ddSRusty Russell 1571ce9e605STiwei Bie /* Is this a packed ring? */ 1581ce9e605STiwei Bie bool packed_ring; 1591ce9e605STiwei Bie 160fb3fba6bSTiwei Bie /* Is DMA API used? */ 161fb3fba6bSTiwei Bie bool use_dma_api; 162fb3fba6bSTiwei Bie 1637b21e34fSRusty Russell /* Can we use weak barriers? */ 1647b21e34fSRusty Russell bool weak_barriers; 1657b21e34fSRusty Russell 1660a8a69ddSRusty Russell /* Other side has made a mess, don't try any more. */ 1670a8a69ddSRusty Russell bool broken; 1680a8a69ddSRusty Russell 1699fa29b9dSMark McLoughlin /* Host supports indirect buffers */ 1709fa29b9dSMark McLoughlin bool indirect; 1719fa29b9dSMark McLoughlin 172a5c262c5SMichael S. Tsirkin /* Host publishes avail event idx */ 173a5c262c5SMichael S. Tsirkin bool event; 174a5c262c5SMichael S. Tsirkin 1750a8a69ddSRusty Russell /* Head of free buffer list. */ 1760a8a69ddSRusty Russell unsigned int free_head; 1770a8a69ddSRusty Russell /* Number we've added since last sync. */ 1780a8a69ddSRusty Russell unsigned int num_added; 1790a8a69ddSRusty Russell 180a7722890Shuangjie.albert /* Last used index we've seen. 181a7722890Shuangjie.albert * for split ring, it just contains last used index 182a7722890Shuangjie.albert * for packed ring: 183a7722890Shuangjie.albert * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. 184a7722890Shuangjie.albert * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter. 185a7722890Shuangjie.albert */ 1861bc4953eSAnthony Liguori u16 last_used_idx; 1870a8a69ddSRusty Russell 1888d622d21SMichael S. Tsirkin /* Hint for event idx: already triggered no need to disable. */ 1898d622d21SMichael S. Tsirkin bool event_triggered; 1908d622d21SMichael S. Tsirkin 1911ce9e605STiwei Bie union { 1921ce9e605STiwei Bie /* Available for split ring */ 193d76136e4SXuan Zhuo struct vring_virtqueue_split split; 194f277ec42SVenkatesh Srinivas 1951ce9e605STiwei Bie /* Available for packed ring */ 196d76136e4SXuan Zhuo struct vring_virtqueue_packed packed; 1971ce9e605STiwei Bie }; 1981ce9e605STiwei Bie 1990a8a69ddSRusty Russell /* How to notify other side. FIXME: commonalize hcalls! */ 20046f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq); 2010a8a69ddSRusty Russell 2022a2d1382SAndy Lutomirski /* DMA, allocation, and size information */ 2032a2d1382SAndy Lutomirski bool we_own_ring; 2042a2d1382SAndy Lutomirski 2052713ea3cSJason Wang /* Device used for doing DMA */ 2062713ea3cSJason Wang struct device *dma_dev; 2072713ea3cSJason Wang 2080a8a69ddSRusty Russell #ifdef DEBUG 2090a8a69ddSRusty Russell /* They're supposed to lock for us. */ 2100a8a69ddSRusty Russell unsigned int in_use; 211e93300b1SRusty Russell 212e93300b1SRusty Russell /* Figure out if their kicks are too delayed. */ 213e93300b1SRusty Russell bool last_add_time_valid; 214e93300b1SRusty Russell ktime_t last_add_time; 2150a8a69ddSRusty Russell #endif 2160a8a69ddSRusty Russell }; 2170a8a69ddSRusty Russell 21807d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index, 219cd4c812aSXuan Zhuo struct vring_virtqueue_split *vring_split, 22007d9629dSXuan Zhuo struct virtio_device *vdev, 22107d9629dSXuan Zhuo bool weak_barriers, 22207d9629dSXuan Zhuo bool context, 22307d9629dSXuan Zhuo bool (*notify)(struct virtqueue *), 22407d9629dSXuan Zhuo void (*callback)(struct virtqueue *), 2252713ea3cSJason Wang const char *name, 2262713ea3cSJason Wang struct device *dma_dev); 227a2b36c8dSXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num); 2286fea20e5SXuan Zhuo static void vring_free(struct virtqueue *_vq); 229e6f633e5STiwei Bie 230e6f633e5STiwei Bie /* 231e6f633e5STiwei Bie * Helpers. 232e6f633e5STiwei Bie */ 233e6f633e5STiwei Bie 234*4b6ec919SFeng Liu #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq) 2350a8a69ddSRusty Russell 236*4b6ec919SFeng Liu static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, 2372f18c2d1STiwei Bie unsigned int total_sg) 2382f18c2d1STiwei Bie { 2392f18c2d1STiwei Bie /* 2402f18c2d1STiwei Bie * If the host supports indirect descriptor tables, and we have multiple 2412f18c2d1STiwei Bie * buffers, then go indirect. FIXME: tune this threshold 2422f18c2d1STiwei Bie */ 2432f18c2d1STiwei Bie return (vq->indirect && total_sg > 1 && vq->vq.num_free); 2442f18c2d1STiwei Bie } 2452f18c2d1STiwei Bie 246d26c96c8SAndy Lutomirski /* 2471a937693SMichael S. Tsirkin * Modern virtio devices have feature bits to specify whether they need a 2481a937693SMichael S. Tsirkin * quirk and bypass the IOMMU. If not there, just use the DMA API. 2491a937693SMichael S. Tsirkin * 2501a937693SMichael S. Tsirkin * If there, the interaction between virtio and DMA API is messy. 251d26c96c8SAndy Lutomirski * 252d26c96c8SAndy Lutomirski * On most systems with virtio, physical addresses match bus addresses, 253d26c96c8SAndy Lutomirski * and it doesn't particularly matter whether we use the DMA API. 254d26c96c8SAndy Lutomirski * 255d26c96c8SAndy Lutomirski * On some systems, including Xen and any system with a physical device 256d26c96c8SAndy Lutomirski * that speaks virtio behind a physical IOMMU, we must use the DMA API 257d26c96c8SAndy Lutomirski * for virtio DMA to work at all. 258d26c96c8SAndy Lutomirski * 259d26c96c8SAndy Lutomirski * On other systems, including SPARC and PPC64, virtio-pci devices are 260d26c96c8SAndy Lutomirski * enumerated as though they are behind an IOMMU, but the virtio host 261d26c96c8SAndy Lutomirski * ignores the IOMMU, so we must either pretend that the IOMMU isn't 262d26c96c8SAndy Lutomirski * there or somehow map everything as the identity. 263d26c96c8SAndy Lutomirski * 264d26c96c8SAndy Lutomirski * For the time being, we preserve historic behavior and bypass the DMA 265d26c96c8SAndy Lutomirski * API. 2661a937693SMichael S. Tsirkin * 2671a937693SMichael S. Tsirkin * TODO: install a per-device DMA ops structure that does the right thing 2681a937693SMichael S. Tsirkin * taking into account all the above quirks, and use the DMA API 2691a937693SMichael S. Tsirkin * unconditionally on data path. 270d26c96c8SAndy Lutomirski */ 271d26c96c8SAndy Lutomirski 272*4b6ec919SFeng Liu static bool vring_use_dma_api(const struct virtio_device *vdev) 273d26c96c8SAndy Lutomirski { 27424b6842aSMichael S. Tsirkin if (!virtio_has_dma_quirk(vdev)) 2751a937693SMichael S. Tsirkin return true; 2761a937693SMichael S. Tsirkin 2771a937693SMichael S. Tsirkin /* Otherwise, we are left to guess. */ 27878fe3987SAndy Lutomirski /* 27978fe3987SAndy Lutomirski * In theory, it's possible to have a buggy QEMU-supposed 28078fe3987SAndy Lutomirski * emulated Q35 IOMMU and Xen enabled at the same time. On 28178fe3987SAndy Lutomirski * such a configuration, virtio has never worked and will 28278fe3987SAndy Lutomirski * not work without an even larger kludge. Instead, enable 28378fe3987SAndy Lutomirski * the DMA API if we're a Xen guest, which at least allows 28478fe3987SAndy Lutomirski * all of the sensible Xen configurations to work correctly. 28578fe3987SAndy Lutomirski */ 28678fe3987SAndy Lutomirski if (xen_domain()) 28778fe3987SAndy Lutomirski return true; 28878fe3987SAndy Lutomirski 289d26c96c8SAndy Lutomirski return false; 290d26c96c8SAndy Lutomirski } 291d26c96c8SAndy Lutomirski 292*4b6ec919SFeng Liu size_t virtio_max_dma_size(const struct virtio_device *vdev) 293e6d6dd6cSJoerg Roedel { 294e6d6dd6cSJoerg Roedel size_t max_segment_size = SIZE_MAX; 295e6d6dd6cSJoerg Roedel 296e6d6dd6cSJoerg Roedel if (vring_use_dma_api(vdev)) 297817fc978SWill Deacon max_segment_size = dma_max_mapping_size(vdev->dev.parent); 298e6d6dd6cSJoerg Roedel 299e6d6dd6cSJoerg Roedel return max_segment_size; 300e6d6dd6cSJoerg Roedel } 301e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size); 302e6d6dd6cSJoerg Roedel 303d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 3042713ea3cSJason Wang dma_addr_t *dma_handle, gfp_t flag, 3052713ea3cSJason Wang struct device *dma_dev) 306d79dca75STiwei Bie { 307d79dca75STiwei Bie if (vring_use_dma_api(vdev)) { 3082713ea3cSJason Wang return dma_alloc_coherent(dma_dev, size, 309d79dca75STiwei Bie dma_handle, flag); 310d79dca75STiwei Bie } else { 311d79dca75STiwei Bie void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 312d79dca75STiwei Bie 313d79dca75STiwei Bie if (queue) { 314d79dca75STiwei Bie phys_addr_t phys_addr = virt_to_phys(queue); 315d79dca75STiwei Bie *dma_handle = (dma_addr_t)phys_addr; 316d79dca75STiwei Bie 317d79dca75STiwei Bie /* 318d79dca75STiwei Bie * Sanity check: make sure we dind't truncate 319d79dca75STiwei Bie * the address. The only arches I can find that 320d79dca75STiwei Bie * have 64-bit phys_addr_t but 32-bit dma_addr_t 321d79dca75STiwei Bie * are certain non-highmem MIPS and x86 322d79dca75STiwei Bie * configurations, but these configurations 323d79dca75STiwei Bie * should never allocate physical pages above 32 324d79dca75STiwei Bie * bits, so this is fine. Just in case, throw a 325d79dca75STiwei Bie * warning and abort if we end up with an 326d79dca75STiwei Bie * unrepresentable address. 327d79dca75STiwei Bie */ 328d79dca75STiwei Bie if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 329d79dca75STiwei Bie free_pages_exact(queue, PAGE_ALIGN(size)); 330d79dca75STiwei Bie return NULL; 331d79dca75STiwei Bie } 332d79dca75STiwei Bie } 333d79dca75STiwei Bie return queue; 334d79dca75STiwei Bie } 335d79dca75STiwei Bie } 336d79dca75STiwei Bie 337d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size, 3382713ea3cSJason Wang void *queue, dma_addr_t dma_handle, 3392713ea3cSJason Wang struct device *dma_dev) 340d79dca75STiwei Bie { 341d79dca75STiwei Bie if (vring_use_dma_api(vdev)) 3422713ea3cSJason Wang dma_free_coherent(dma_dev, size, queue, dma_handle); 343d79dca75STiwei Bie else 344d79dca75STiwei Bie free_pages_exact(queue, PAGE_ALIGN(size)); 345d79dca75STiwei Bie } 346d79dca75STiwei Bie 347780bc790SAndy Lutomirski /* 348780bc790SAndy Lutomirski * The DMA ops on various arches are rather gnarly right now, and 349780bc790SAndy Lutomirski * making all of the arch DMA ops work on the vring device itself 3502713ea3cSJason Wang * is a mess. 351780bc790SAndy Lutomirski */ 3521adbd6b2SFeng Liu static struct device *vring_dma_dev(const struct vring_virtqueue *vq) 353780bc790SAndy Lutomirski { 3542713ea3cSJason Wang return vq->dma_dev; 355780bc790SAndy Lutomirski } 356780bc790SAndy Lutomirski 357780bc790SAndy Lutomirski /* Map one sg entry. */ 358780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 359780bc790SAndy Lutomirski struct scatterlist *sg, 360780bc790SAndy Lutomirski enum dma_data_direction direction) 361780bc790SAndy Lutomirski { 36288938359SAlexander Potapenko if (!vq->use_dma_api) { 36388938359SAlexander Potapenko /* 36488938359SAlexander Potapenko * If DMA is not used, KMSAN doesn't know that the scatterlist 36588938359SAlexander Potapenko * is initialized by the hardware. Explicitly check/unpoison it 36688938359SAlexander Potapenko * depending on the direction. 36788938359SAlexander Potapenko */ 36888938359SAlexander Potapenko kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction); 369780bc790SAndy Lutomirski return (dma_addr_t)sg_phys(sg); 37088938359SAlexander Potapenko } 371780bc790SAndy Lutomirski 372780bc790SAndy Lutomirski /* 373780bc790SAndy Lutomirski * We can't use dma_map_sg, because we don't use scatterlists in 374780bc790SAndy Lutomirski * the way it expects (we don't guarantee that the scatterlist 375780bc790SAndy Lutomirski * will exist for the lifetime of the mapping). 376780bc790SAndy Lutomirski */ 377780bc790SAndy Lutomirski return dma_map_page(vring_dma_dev(vq), 378780bc790SAndy Lutomirski sg_page(sg), sg->offset, sg->length, 379780bc790SAndy Lutomirski direction); 380780bc790SAndy Lutomirski } 381780bc790SAndy Lutomirski 382780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 383780bc790SAndy Lutomirski void *cpu_addr, size_t size, 384780bc790SAndy Lutomirski enum dma_data_direction direction) 385780bc790SAndy Lutomirski { 386fb3fba6bSTiwei Bie if (!vq->use_dma_api) 387780bc790SAndy Lutomirski return (dma_addr_t)virt_to_phys(cpu_addr); 388780bc790SAndy Lutomirski 389780bc790SAndy Lutomirski return dma_map_single(vring_dma_dev(vq), 390780bc790SAndy Lutomirski cpu_addr, size, direction); 391780bc790SAndy Lutomirski } 392780bc790SAndy Lutomirski 393e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq, 394e6f633e5STiwei Bie dma_addr_t addr) 395e6f633e5STiwei Bie { 396fb3fba6bSTiwei Bie if (!vq->use_dma_api) 397e6f633e5STiwei Bie return 0; 398e6f633e5STiwei Bie 399e6f633e5STiwei Bie return dma_mapping_error(vring_dma_dev(vq), addr); 400e6f633e5STiwei Bie } 401e6f633e5STiwei Bie 4023a897128SXuan Zhuo static void virtqueue_init(struct vring_virtqueue *vq, u32 num) 4033a897128SXuan Zhuo { 4043a897128SXuan Zhuo vq->vq.num_free = num; 4053a897128SXuan Zhuo 4063a897128SXuan Zhuo if (vq->packed_ring) 4073a897128SXuan Zhuo vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); 4083a897128SXuan Zhuo else 4093a897128SXuan Zhuo vq->last_used_idx = 0; 4103a897128SXuan Zhuo 4113a897128SXuan Zhuo vq->event_triggered = false; 4123a897128SXuan Zhuo vq->num_added = 0; 4133a897128SXuan Zhuo 4143a897128SXuan Zhuo #ifdef DEBUG 4153a897128SXuan Zhuo vq->in_use = false; 4163a897128SXuan Zhuo vq->last_add_time_valid = false; 4173a897128SXuan Zhuo #endif 4183a897128SXuan Zhuo } 4193a897128SXuan Zhuo 420e6f633e5STiwei Bie 421e6f633e5STiwei Bie /* 422e6f633e5STiwei Bie * Split ring specific functions - *_split(). 423e6f633e5STiwei Bie */ 424e6f633e5STiwei Bie 42572b5e895SJason Wang static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, 426*4b6ec919SFeng Liu const struct vring_desc *desc) 427780bc790SAndy Lutomirski { 428780bc790SAndy Lutomirski u16 flags; 429780bc790SAndy Lutomirski 430fb3fba6bSTiwei Bie if (!vq->use_dma_api) 431780bc790SAndy Lutomirski return; 432780bc790SAndy Lutomirski 433780bc790SAndy Lutomirski flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 434780bc790SAndy Lutomirski 435780bc790SAndy Lutomirski dma_unmap_page(vring_dma_dev(vq), 436780bc790SAndy Lutomirski virtio64_to_cpu(vq->vq.vdev, desc->addr), 437780bc790SAndy Lutomirski virtio32_to_cpu(vq->vq.vdev, desc->len), 438780bc790SAndy Lutomirski (flags & VRING_DESC_F_WRITE) ? 439780bc790SAndy Lutomirski DMA_FROM_DEVICE : DMA_TO_DEVICE); 440780bc790SAndy Lutomirski } 441780bc790SAndy Lutomirski 44272b5e895SJason Wang static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, 44372b5e895SJason Wang unsigned int i) 44472b5e895SJason Wang { 44572b5e895SJason Wang struct vring_desc_extra *extra = vq->split.desc_extra; 44672b5e895SJason Wang u16 flags; 44772b5e895SJason Wang 44872b5e895SJason Wang if (!vq->use_dma_api) 44972b5e895SJason Wang goto out; 45072b5e895SJason Wang 45172b5e895SJason Wang flags = extra[i].flags; 45272b5e895SJason Wang 45372b5e895SJason Wang if (flags & VRING_DESC_F_INDIRECT) { 45472b5e895SJason Wang dma_unmap_single(vring_dma_dev(vq), 45572b5e895SJason Wang extra[i].addr, 45672b5e895SJason Wang extra[i].len, 45772b5e895SJason Wang (flags & VRING_DESC_F_WRITE) ? 45872b5e895SJason Wang DMA_FROM_DEVICE : DMA_TO_DEVICE); 45972b5e895SJason Wang } else { 46072b5e895SJason Wang dma_unmap_page(vring_dma_dev(vq), 46172b5e895SJason Wang extra[i].addr, 46272b5e895SJason Wang extra[i].len, 46372b5e895SJason Wang (flags & VRING_DESC_F_WRITE) ? 46472b5e895SJason Wang DMA_FROM_DEVICE : DMA_TO_DEVICE); 46572b5e895SJason Wang } 46672b5e895SJason Wang 46772b5e895SJason Wang out: 46872b5e895SJason Wang return extra[i].next; 46972b5e895SJason Wang } 47072b5e895SJason Wang 471138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, 472138fd251STiwei Bie unsigned int total_sg, 473138fd251STiwei Bie gfp_t gfp) 4749fa29b9dSMark McLoughlin { 4759fa29b9dSMark McLoughlin struct vring_desc *desc; 476b25bd251SRusty Russell unsigned int i; 4779fa29b9dSMark McLoughlin 478b92b1b89SWill Deacon /* 479b92b1b89SWill Deacon * We require lowmem mappings for the descriptors because 480b92b1b89SWill Deacon * otherwise virt_to_phys will give us bogus addresses in the 481b92b1b89SWill Deacon * virtqueue. 482b92b1b89SWill Deacon */ 48382107539SMichal Hocko gfp &= ~__GFP_HIGHMEM; 484b92b1b89SWill Deacon 4856da2ec56SKees Cook desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); 4869fa29b9dSMark McLoughlin if (!desc) 487b25bd251SRusty Russell return NULL; 4889fa29b9dSMark McLoughlin 489b25bd251SRusty Russell for (i = 0; i < total_sg; i++) 49000e6f3d9SMichael S. Tsirkin desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 491b25bd251SRusty Russell return desc; 4929fa29b9dSMark McLoughlin } 4939fa29b9dSMark McLoughlin 494fe4c3862SJason Wang static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, 495fe4c3862SJason Wang struct vring_desc *desc, 496fe4c3862SJason Wang unsigned int i, 497fe4c3862SJason Wang dma_addr_t addr, 498fe4c3862SJason Wang unsigned int len, 49972b5e895SJason Wang u16 flags, 50072b5e895SJason Wang bool indirect) 501fe4c3862SJason Wang { 50272b5e895SJason Wang struct vring_virtqueue *vring = to_vvq(vq); 50372b5e895SJason Wang struct vring_desc_extra *extra = vring->split.desc_extra; 50472b5e895SJason Wang u16 next; 50572b5e895SJason Wang 506fe4c3862SJason Wang desc[i].flags = cpu_to_virtio16(vq->vdev, flags); 507fe4c3862SJason Wang desc[i].addr = cpu_to_virtio64(vq->vdev, addr); 508fe4c3862SJason Wang desc[i].len = cpu_to_virtio32(vq->vdev, len); 509fe4c3862SJason Wang 51072b5e895SJason Wang if (!indirect) { 51172b5e895SJason Wang next = extra[i].next; 51272b5e895SJason Wang desc[i].next = cpu_to_virtio16(vq->vdev, next); 51372b5e895SJason Wang 51472b5e895SJason Wang extra[i].addr = addr; 51572b5e895SJason Wang extra[i].len = len; 51672b5e895SJason Wang extra[i].flags = flags; 51772b5e895SJason Wang } else 51872b5e895SJason Wang next = virtio16_to_cpu(vq->vdev, desc[i].next); 51972b5e895SJason Wang 52072b5e895SJason Wang return next; 521fe4c3862SJason Wang } 522fe4c3862SJason Wang 523138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq, 52413816c76SRusty Russell struct scatterlist *sgs[], 525eeebf9b1SRusty Russell unsigned int total_sg, 52613816c76SRusty Russell unsigned int out_sgs, 52713816c76SRusty Russell unsigned int in_sgs, 528bbd603efSMichael S. Tsirkin void *data, 5295a08b04fSMichael S. Tsirkin void *ctx, 530bbd603efSMichael S. Tsirkin gfp_t gfp) 5310a8a69ddSRusty Russell { 5320a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 53313816c76SRusty Russell struct scatterlist *sg; 534b25bd251SRusty Russell struct vring_desc *desc; 5353f649ab7SKees Cook unsigned int i, n, avail, descs_used, prev, err_idx; 5361fe9b6feSMichael S. Tsirkin int head; 537b25bd251SRusty Russell bool indirect; 5380a8a69ddSRusty Russell 5399fa29b9dSMark McLoughlin START_USE(vq); 5409fa29b9dSMark McLoughlin 5410a8a69ddSRusty Russell BUG_ON(data == NULL); 5425a08b04fSMichael S. Tsirkin BUG_ON(ctx && vq->indirect); 5439fa29b9dSMark McLoughlin 54470670444SRusty Russell if (unlikely(vq->broken)) { 54570670444SRusty Russell END_USE(vq); 54670670444SRusty Russell return -EIO; 54770670444SRusty Russell } 54870670444SRusty Russell 5494d6a105eSTiwei Bie LAST_ADD_TIME_UPDATE(vq); 550e93300b1SRusty Russell 55113816c76SRusty Russell BUG_ON(total_sg == 0); 5520a8a69ddSRusty Russell 553b25bd251SRusty Russell head = vq->free_head; 554b25bd251SRusty Russell 55535c51e09SXianting Tian if (virtqueue_use_indirect(vq, total_sg)) 556138fd251STiwei Bie desc = alloc_indirect_split(_vq, total_sg, gfp); 55744ed8089SRichard W.M. Jones else { 558b25bd251SRusty Russell desc = NULL; 559e593bf97STiwei Bie WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); 56044ed8089SRichard W.M. Jones } 561b25bd251SRusty Russell 562b25bd251SRusty Russell if (desc) { 563b25bd251SRusty Russell /* Use a single buffer which doesn't continue */ 564780bc790SAndy Lutomirski indirect = true; 565b25bd251SRusty Russell /* Set up rest to use this indirect table. */ 566b25bd251SRusty Russell i = 0; 567b25bd251SRusty Russell descs_used = 1; 568b25bd251SRusty Russell } else { 569780bc790SAndy Lutomirski indirect = false; 570e593bf97STiwei Bie desc = vq->split.vring.desc; 571b25bd251SRusty Russell i = head; 572b25bd251SRusty Russell descs_used = total_sg; 573b25bd251SRusty Russell } 574b25bd251SRusty Russell 575b4b4ff73SXianting Tian if (unlikely(vq->vq.num_free < descs_used)) { 5760a8a69ddSRusty Russell pr_debug("Can't add buf len %i - avail = %i\n", 577b25bd251SRusty Russell descs_used, vq->vq.num_free); 57844653eaeSRusty Russell /* FIXME: for historical reasons, we force a notify here if 57944653eaeSRusty Russell * there are outgoing parts to the buffer. Presumably the 58044653eaeSRusty Russell * host should service the ring ASAP. */ 58113816c76SRusty Russell if (out_sgs) 582426e3e0aSRusty Russell vq->notify(&vq->vq); 58358625edfSWei Yongjun if (indirect) 58458625edfSWei Yongjun kfree(desc); 5850a8a69ddSRusty Russell END_USE(vq); 5860a8a69ddSRusty Russell return -ENOSPC; 5870a8a69ddSRusty Russell } 5880a8a69ddSRusty Russell 58913816c76SRusty Russell for (n = 0; n < out_sgs; n++) { 590eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 591780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 592780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 593780bc790SAndy Lutomirski goto unmap_release; 594780bc790SAndy Lutomirski 5950a8a69ddSRusty Russell prev = i; 59672b5e895SJason Wang /* Note that we trust indirect descriptor 59772b5e895SJason Wang * table since it use stream DMA mapping. 59872b5e895SJason Wang */ 599fe4c3862SJason Wang i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, 60072b5e895SJason Wang VRING_DESC_F_NEXT, 60172b5e895SJason Wang indirect); 6020a8a69ddSRusty Russell } 60313816c76SRusty Russell } 60413816c76SRusty Russell for (; n < (out_sgs + in_sgs); n++) { 605eeebf9b1SRusty Russell for (sg = sgs[n]; sg; sg = sg_next(sg)) { 606780bc790SAndy Lutomirski dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 607780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 608780bc790SAndy Lutomirski goto unmap_release; 609780bc790SAndy Lutomirski 6100a8a69ddSRusty Russell prev = i; 61172b5e895SJason Wang /* Note that we trust indirect descriptor 61272b5e895SJason Wang * table since it use stream DMA mapping. 61372b5e895SJason Wang */ 614fe4c3862SJason Wang i = virtqueue_add_desc_split(_vq, desc, i, addr, 615fe4c3862SJason Wang sg->length, 616fe4c3862SJason Wang VRING_DESC_F_NEXT | 61772b5e895SJason Wang VRING_DESC_F_WRITE, 61872b5e895SJason Wang indirect); 61913816c76SRusty Russell } 6200a8a69ddSRusty Russell } 6210a8a69ddSRusty Russell /* Last one doesn't continue. */ 62200e6f3d9SMichael S. Tsirkin desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 62372b5e895SJason Wang if (!indirect && vq->use_dma_api) 624890d3356SVincent Whitchurch vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= 62572b5e895SJason Wang ~VRING_DESC_F_NEXT; 6260a8a69ddSRusty Russell 627780bc790SAndy Lutomirski if (indirect) { 628780bc790SAndy Lutomirski /* Now that the indirect table is filled in, map it. */ 629780bc790SAndy Lutomirski dma_addr_t addr = vring_map_single( 630780bc790SAndy Lutomirski vq, desc, total_sg * sizeof(struct vring_desc), 631780bc790SAndy Lutomirski DMA_TO_DEVICE); 632780bc790SAndy Lutomirski if (vring_mapping_error(vq, addr)) 633780bc790SAndy Lutomirski goto unmap_release; 634780bc790SAndy Lutomirski 635fe4c3862SJason Wang virtqueue_add_desc_split(_vq, vq->split.vring.desc, 636fe4c3862SJason Wang head, addr, 637fe4c3862SJason Wang total_sg * sizeof(struct vring_desc), 63872b5e895SJason Wang VRING_DESC_F_INDIRECT, 63972b5e895SJason Wang false); 640780bc790SAndy Lutomirski } 641780bc790SAndy Lutomirski 642780bc790SAndy Lutomirski /* We're using some buffers from the free list. */ 643780bc790SAndy Lutomirski vq->vq.num_free -= descs_used; 644780bc790SAndy Lutomirski 6450a8a69ddSRusty Russell /* Update free pointer */ 646b25bd251SRusty Russell if (indirect) 64772b5e895SJason Wang vq->free_head = vq->split.desc_extra[head].next; 648b25bd251SRusty Russell else 6490a8a69ddSRusty Russell vq->free_head = i; 6500a8a69ddSRusty Russell 651780bc790SAndy Lutomirski /* Store token and indirect buffer state. */ 652cbeedb72STiwei Bie vq->split.desc_state[head].data = data; 653780bc790SAndy Lutomirski if (indirect) 654cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc = desc; 65587646a34SJason Wang else 656cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc = ctx; 6570a8a69ddSRusty Russell 6580a8a69ddSRusty Russell /* Put entry in available array (but don't update avail->idx until they 6593b720b8cSRusty Russell * do sync). */ 660e593bf97STiwei Bie avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); 661e593bf97STiwei Bie vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 6620a8a69ddSRusty Russell 663ee7cd898SRusty Russell /* Descriptors and available array need to be set before we expose the 664ee7cd898SRusty Russell * new available array entries. */ 665a9a0fef7SRusty Russell virtio_wmb(vq->weak_barriers); 666e593bf97STiwei Bie vq->split.avail_idx_shadow++; 667e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 668e593bf97STiwei Bie vq->split.avail_idx_shadow); 669ee7cd898SRusty Russell vq->num_added++; 670ee7cd898SRusty Russell 6715e05bf58STetsuo Handa pr_debug("Added buffer head %i to %p\n", head, vq); 6725e05bf58STetsuo Handa END_USE(vq); 6735e05bf58STetsuo Handa 674ee7cd898SRusty Russell /* This is very unlikely, but theoretically possible. Kick 675ee7cd898SRusty Russell * just in case. */ 676ee7cd898SRusty Russell if (unlikely(vq->num_added == (1 << 16) - 1)) 677ee7cd898SRusty Russell virtqueue_kick(_vq); 678ee7cd898SRusty Russell 67998e8c6bcSRusty Russell return 0; 680780bc790SAndy Lutomirski 681780bc790SAndy Lutomirski unmap_release: 682780bc790SAndy Lutomirski err_idx = i; 683cf8f1696SMatthias Lange 684cf8f1696SMatthias Lange if (indirect) 685cf8f1696SMatthias Lange i = 0; 686cf8f1696SMatthias Lange else 687780bc790SAndy Lutomirski i = head; 688780bc790SAndy Lutomirski 689780bc790SAndy Lutomirski for (n = 0; n < total_sg; n++) { 690780bc790SAndy Lutomirski if (i == err_idx) 691780bc790SAndy Lutomirski break; 69272b5e895SJason Wang if (indirect) { 69372b5e895SJason Wang vring_unmap_one_split_indirect(vq, &desc[i]); 694cf8f1696SMatthias Lange i = virtio16_to_cpu(_vq->vdev, desc[i].next); 69572b5e895SJason Wang } else 69672b5e895SJason Wang i = vring_unmap_one_split(vq, i); 697780bc790SAndy Lutomirski } 698780bc790SAndy Lutomirski 699780bc790SAndy Lutomirski if (indirect) 700780bc790SAndy Lutomirski kfree(desc); 701780bc790SAndy Lutomirski 7023cc36f6eSMichael S. Tsirkin END_USE(vq); 703f7728002SHalil Pasic return -ENOMEM; 7040a8a69ddSRusty Russell } 70513816c76SRusty Russell 706138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) 7070a8a69ddSRusty Russell { 7080a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 709a5c262c5SMichael S. Tsirkin u16 new, old; 71041f0377fSRusty Russell bool needs_kick; 71141f0377fSRusty Russell 7120a8a69ddSRusty Russell START_USE(vq); 713a72caae2SJason Wang /* We need to expose available array entries before checking avail 714a72caae2SJason Wang * event. */ 715a9a0fef7SRusty Russell virtio_mb(vq->weak_barriers); 7160a8a69ddSRusty Russell 717e593bf97STiwei Bie old = vq->split.avail_idx_shadow - vq->num_added; 718e593bf97STiwei Bie new = vq->split.avail_idx_shadow; 7190a8a69ddSRusty Russell vq->num_added = 0; 7200a8a69ddSRusty Russell 7214d6a105eSTiwei Bie LAST_ADD_TIME_CHECK(vq); 7224d6a105eSTiwei Bie LAST_ADD_TIME_INVALID(vq); 723e93300b1SRusty Russell 72441f0377fSRusty Russell if (vq->event) { 725e593bf97STiwei Bie needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, 726e593bf97STiwei Bie vring_avail_event(&vq->split.vring)), 72741f0377fSRusty Russell new, old); 72841f0377fSRusty Russell } else { 729e593bf97STiwei Bie needs_kick = !(vq->split.vring.used->flags & 730e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 731e593bf97STiwei Bie VRING_USED_F_NO_NOTIFY)); 73241f0377fSRusty Russell } 7330a8a69ddSRusty Russell END_USE(vq); 73441f0377fSRusty Russell return needs_kick; 73541f0377fSRusty Russell } 736138fd251STiwei Bie 737138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, 7385a08b04fSMichael S. Tsirkin void **ctx) 7390a8a69ddSRusty Russell { 740780bc790SAndy Lutomirski unsigned int i, j; 741c60923cbSGonglei __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 7420a8a69ddSRusty Russell 7430a8a69ddSRusty Russell /* Clear data ptr. */ 744cbeedb72STiwei Bie vq->split.desc_state[head].data = NULL; 7450a8a69ddSRusty Russell 746780bc790SAndy Lutomirski /* Put back on free list: unmap first-level descriptors and find end */ 7470a8a69ddSRusty Russell i = head; 7489fa29b9dSMark McLoughlin 749e593bf97STiwei Bie while (vq->split.vring.desc[i].flags & nextflag) { 75072b5e895SJason Wang vring_unmap_one_split(vq, i); 75172b5e895SJason Wang i = vq->split.desc_extra[i].next; 75206ca287dSRusty Russell vq->vq.num_free++; 7530a8a69ddSRusty Russell } 7540a8a69ddSRusty Russell 75572b5e895SJason Wang vring_unmap_one_split(vq, i); 75672b5e895SJason Wang vq->split.desc_extra[i].next = vq->free_head; 7570a8a69ddSRusty Russell vq->free_head = head; 758780bc790SAndy Lutomirski 7590a8a69ddSRusty Russell /* Plus final descriptor */ 76006ca287dSRusty Russell vq->vq.num_free++; 761780bc790SAndy Lutomirski 7625a08b04fSMichael S. Tsirkin if (vq->indirect) { 763cbeedb72STiwei Bie struct vring_desc *indir_desc = 764cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc; 7655a08b04fSMichael S. Tsirkin u32 len; 7665a08b04fSMichael S. Tsirkin 7675a08b04fSMichael S. Tsirkin /* Free the indirect table, if any, now that it's unmapped. */ 7685a08b04fSMichael S. Tsirkin if (!indir_desc) 7695a08b04fSMichael S. Tsirkin return; 7705a08b04fSMichael S. Tsirkin 77172b5e895SJason Wang len = vq->split.desc_extra[head].len; 772780bc790SAndy Lutomirski 77372b5e895SJason Wang BUG_ON(!(vq->split.desc_extra[head].flags & 77472b5e895SJason Wang VRING_DESC_F_INDIRECT)); 775780bc790SAndy Lutomirski BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 776780bc790SAndy Lutomirski 777780bc790SAndy Lutomirski for (j = 0; j < len / sizeof(struct vring_desc); j++) 77872b5e895SJason Wang vring_unmap_one_split_indirect(vq, &indir_desc[j]); 779780bc790SAndy Lutomirski 7805a08b04fSMichael S. Tsirkin kfree(indir_desc); 781cbeedb72STiwei Bie vq->split.desc_state[head].indir_desc = NULL; 7825a08b04fSMichael S. Tsirkin } else if (ctx) { 783cbeedb72STiwei Bie *ctx = vq->split.desc_state[head].indir_desc; 784780bc790SAndy Lutomirski } 7850a8a69ddSRusty Russell } 7860a8a69ddSRusty Russell 7871adbd6b2SFeng Liu static bool more_used_split(const struct vring_virtqueue *vq) 7880a8a69ddSRusty Russell { 789e593bf97STiwei Bie return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, 790e593bf97STiwei Bie vq->split.vring.used->idx); 7910a8a69ddSRusty Russell } 7920a8a69ddSRusty Russell 793138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, 794138fd251STiwei Bie unsigned int *len, 7955a08b04fSMichael S. Tsirkin void **ctx) 7960a8a69ddSRusty Russell { 7970a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 7980a8a69ddSRusty Russell void *ret; 7990a8a69ddSRusty Russell unsigned int i; 8003b720b8cSRusty Russell u16 last_used; 8010a8a69ddSRusty Russell 8020a8a69ddSRusty Russell START_USE(vq); 8030a8a69ddSRusty Russell 8045ef82752SRusty Russell if (unlikely(vq->broken)) { 8055ef82752SRusty Russell END_USE(vq); 8065ef82752SRusty Russell return NULL; 8075ef82752SRusty Russell } 8085ef82752SRusty Russell 809138fd251STiwei Bie if (!more_used_split(vq)) { 8100a8a69ddSRusty Russell pr_debug("No more buffers in queue\n"); 8110a8a69ddSRusty Russell END_USE(vq); 8120a8a69ddSRusty Russell return NULL; 8130a8a69ddSRusty Russell } 8140a8a69ddSRusty Russell 8152d61ba95SMichael S. Tsirkin /* Only get used array entries after they have been exposed by host. */ 816a9a0fef7SRusty Russell virtio_rmb(vq->weak_barriers); 8172d61ba95SMichael S. Tsirkin 818e593bf97STiwei Bie last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); 819e593bf97STiwei Bie i = virtio32_to_cpu(_vq->vdev, 820e593bf97STiwei Bie vq->split.vring.used->ring[last_used].id); 821e593bf97STiwei Bie *len = virtio32_to_cpu(_vq->vdev, 822e593bf97STiwei Bie vq->split.vring.used->ring[last_used].len); 8230a8a69ddSRusty Russell 824e593bf97STiwei Bie if (unlikely(i >= vq->split.vring.num)) { 8250a8a69ddSRusty Russell BAD_RING(vq, "id %u out of range\n", i); 8260a8a69ddSRusty Russell return NULL; 8270a8a69ddSRusty Russell } 828cbeedb72STiwei Bie if (unlikely(!vq->split.desc_state[i].data)) { 8290a8a69ddSRusty Russell BAD_RING(vq, "id %u is not a head!\n", i); 8300a8a69ddSRusty Russell return NULL; 8310a8a69ddSRusty Russell } 8320a8a69ddSRusty Russell 833138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 834cbeedb72STiwei Bie ret = vq->split.desc_state[i].data; 835138fd251STiwei Bie detach_buf_split(vq, i, ctx); 8360a8a69ddSRusty Russell vq->last_used_idx++; 837a5c262c5SMichael S. Tsirkin /* If we expect an interrupt for the next entry, tell host 838a5c262c5SMichael S. Tsirkin * by writing event index and flush out the write before 839a5c262c5SMichael S. Tsirkin * the read in the next get_buf call. */ 840e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 841788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 842e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 843788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 844a5c262c5SMichael S. Tsirkin 8454d6a105eSTiwei Bie LAST_ADD_TIME_INVALID(vq); 846e93300b1SRusty Russell 8470a8a69ddSRusty Russell END_USE(vq); 8480a8a69ddSRusty Russell return ret; 8490a8a69ddSRusty Russell } 850138fd251STiwei Bie 851138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq) 852138fd251STiwei Bie { 853138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 854138fd251STiwei Bie 855e593bf97STiwei Bie if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 856e593bf97STiwei Bie vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 8578d622d21SMichael S. Tsirkin if (vq->event) 8588d622d21SMichael S. Tsirkin /* TODO: this is a hack. Figure out a cleaner value to write. */ 8598d622d21SMichael S. Tsirkin vring_used_event(&vq->split.vring) = 0x0; 8608d622d21SMichael S. Tsirkin else 861e593bf97STiwei Bie vq->split.vring.avail->flags = 862e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 863e593bf97STiwei Bie vq->split.avail_flags_shadow); 864138fd251STiwei Bie } 865138fd251STiwei Bie } 866138fd251STiwei Bie 86731532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) 868cc229884SMichael S. Tsirkin { 869cc229884SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 870cc229884SMichael S. Tsirkin u16 last_used_idx; 871cc229884SMichael S. Tsirkin 872cc229884SMichael S. Tsirkin START_USE(vq); 873cc229884SMichael S. Tsirkin 874cc229884SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 875cc229884SMichael S. Tsirkin * more to do. */ 876cc229884SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 877cc229884SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 878cc229884SMichael S. Tsirkin * entry. Always do both to keep code simple. */ 879e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 880e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 8810ea1e4a6SLadi Prosek if (!vq->event) 882e593bf97STiwei Bie vq->split.vring.avail->flags = 883e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 884e593bf97STiwei Bie vq->split.avail_flags_shadow); 885f277ec42SVenkatesh Srinivas } 886e593bf97STiwei Bie vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, 887e593bf97STiwei Bie last_used_idx = vq->last_used_idx); 888cc229884SMichael S. Tsirkin END_USE(vq); 889cc229884SMichael S. Tsirkin return last_used_idx; 890cc229884SMichael S. Tsirkin } 891138fd251STiwei Bie 89231532340SSolomon Tan static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx) 893138fd251STiwei Bie { 894138fd251STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 895138fd251STiwei Bie 896138fd251STiwei Bie return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, 897e593bf97STiwei Bie vq->split.vring.used->idx); 898138fd251STiwei Bie } 899138fd251STiwei Bie 900138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) 9017ab358c2SMichael S. Tsirkin { 9027ab358c2SMichael S. Tsirkin struct vring_virtqueue *vq = to_vvq(_vq); 9037ab358c2SMichael S. Tsirkin u16 bufs; 9047ab358c2SMichael S. Tsirkin 9057ab358c2SMichael S. Tsirkin START_USE(vq); 9067ab358c2SMichael S. Tsirkin 9077ab358c2SMichael S. Tsirkin /* We optimistically turn back on interrupts, then check if there was 9087ab358c2SMichael S. Tsirkin * more to do. */ 9097ab358c2SMichael S. Tsirkin /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 9107ab358c2SMichael S. Tsirkin * either clear the flags bit or point the event index at the next 9110ea1e4a6SLadi Prosek * entry. Always update the event index to keep code simple. */ 912e593bf97STiwei Bie if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 913e593bf97STiwei Bie vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 9140ea1e4a6SLadi Prosek if (!vq->event) 915e593bf97STiwei Bie vq->split.vring.avail->flags = 916e593bf97STiwei Bie cpu_to_virtio16(_vq->vdev, 917e593bf97STiwei Bie vq->split.avail_flags_shadow); 918f277ec42SVenkatesh Srinivas } 9197ab358c2SMichael S. Tsirkin /* TODO: tune this threshold */ 920e593bf97STiwei Bie bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; 921788e5b3aSMichael S. Tsirkin 922788e5b3aSMichael S. Tsirkin virtio_store_mb(vq->weak_barriers, 923e593bf97STiwei Bie &vring_used_event(&vq->split.vring), 924788e5b3aSMichael S. Tsirkin cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 925788e5b3aSMichael S. Tsirkin 926e593bf97STiwei Bie if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) 927e593bf97STiwei Bie - vq->last_used_idx) > bufs)) { 9287ab358c2SMichael S. Tsirkin END_USE(vq); 9297ab358c2SMichael S. Tsirkin return false; 9307ab358c2SMichael S. Tsirkin } 9317ab358c2SMichael S. Tsirkin 9327ab358c2SMichael S. Tsirkin END_USE(vq); 9337ab358c2SMichael S. Tsirkin return true; 9347ab358c2SMichael S. Tsirkin } 9357ab358c2SMichael S. Tsirkin 936138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) 937c021eac4SShirley Ma { 938c021eac4SShirley Ma struct vring_virtqueue *vq = to_vvq(_vq); 939c021eac4SShirley Ma unsigned int i; 940c021eac4SShirley Ma void *buf; 941c021eac4SShirley Ma 942c021eac4SShirley Ma START_USE(vq); 943c021eac4SShirley Ma 944e593bf97STiwei Bie for (i = 0; i < vq->split.vring.num; i++) { 945cbeedb72STiwei Bie if (!vq->split.desc_state[i].data) 946c021eac4SShirley Ma continue; 947138fd251STiwei Bie /* detach_buf_split clears data, so grab it now. */ 948cbeedb72STiwei Bie buf = vq->split.desc_state[i].data; 949138fd251STiwei Bie detach_buf_split(vq, i, NULL); 950e593bf97STiwei Bie vq->split.avail_idx_shadow--; 951e593bf97STiwei Bie vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 952e593bf97STiwei Bie vq->split.avail_idx_shadow); 953c021eac4SShirley Ma END_USE(vq); 954c021eac4SShirley Ma return buf; 955c021eac4SShirley Ma } 956c021eac4SShirley Ma /* That should have freed everything. */ 957e593bf97STiwei Bie BUG_ON(vq->vq.num_free != vq->split.vring.num); 958c021eac4SShirley Ma 959c021eac4SShirley Ma END_USE(vq); 960c021eac4SShirley Ma return NULL; 961c021eac4SShirley Ma } 962138fd251STiwei Bie 963198fa7beSXuan Zhuo static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split, 964198fa7beSXuan Zhuo struct vring_virtqueue *vq) 965198fa7beSXuan Zhuo { 966198fa7beSXuan Zhuo struct virtio_device *vdev; 967198fa7beSXuan Zhuo 968198fa7beSXuan Zhuo vdev = vq->vq.vdev; 969198fa7beSXuan Zhuo 970198fa7beSXuan Zhuo vring_split->avail_flags_shadow = 0; 971198fa7beSXuan Zhuo vring_split->avail_idx_shadow = 0; 972198fa7beSXuan Zhuo 973198fa7beSXuan Zhuo /* No callback? Tell other side not to bother us. */ 974198fa7beSXuan Zhuo if (!vq->vq.callback) { 975198fa7beSXuan Zhuo vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 976198fa7beSXuan Zhuo if (!vq->event) 977198fa7beSXuan Zhuo vring_split->vring.avail->flags = cpu_to_virtio16(vdev, 978198fa7beSXuan Zhuo vring_split->avail_flags_shadow); 979198fa7beSXuan Zhuo } 980198fa7beSXuan Zhuo } 981198fa7beSXuan Zhuo 982e5175b41SXuan Zhuo static void virtqueue_reinit_split(struct vring_virtqueue *vq) 983e5175b41SXuan Zhuo { 984e5175b41SXuan Zhuo int num; 985e5175b41SXuan Zhuo 986e5175b41SXuan Zhuo num = vq->split.vring.num; 987e5175b41SXuan Zhuo 988e5175b41SXuan Zhuo vq->split.vring.avail->flags = 0; 989e5175b41SXuan Zhuo vq->split.vring.avail->idx = 0; 990e5175b41SXuan Zhuo 991e5175b41SXuan Zhuo /* reset avail event */ 992e5175b41SXuan Zhuo vq->split.vring.avail->ring[num] = 0; 993e5175b41SXuan Zhuo 994e5175b41SXuan Zhuo vq->split.vring.used->flags = 0; 995e5175b41SXuan Zhuo vq->split.vring.used->idx = 0; 996e5175b41SXuan Zhuo 997e5175b41SXuan Zhuo /* reset used event */ 998e5175b41SXuan Zhuo *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; 999e5175b41SXuan Zhuo 1000e5175b41SXuan Zhuo virtqueue_init(vq, num); 1001e5175b41SXuan Zhuo 1002e5175b41SXuan Zhuo virtqueue_vring_init_split(&vq->split, vq); 1003e5175b41SXuan Zhuo } 1004e5175b41SXuan Zhuo 1005e1d6a423SXuan Zhuo static void virtqueue_vring_attach_split(struct vring_virtqueue *vq, 1006e1d6a423SXuan Zhuo struct vring_virtqueue_split *vring_split) 1007e1d6a423SXuan Zhuo { 1008e1d6a423SXuan Zhuo vq->split = *vring_split; 1009e1d6a423SXuan Zhuo 1010e1d6a423SXuan Zhuo /* Put everything in free lists. */ 1011e1d6a423SXuan Zhuo vq->free_head = 0; 1012e1d6a423SXuan Zhuo } 1013e1d6a423SXuan Zhuo 1014a2b36c8dSXuan Zhuo static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split) 1015a2b36c8dSXuan Zhuo { 1016a2b36c8dSXuan Zhuo struct vring_desc_state_split *state; 1017a2b36c8dSXuan Zhuo struct vring_desc_extra *extra; 1018a2b36c8dSXuan Zhuo u32 num = vring_split->vring.num; 1019a2b36c8dSXuan Zhuo 1020a2b36c8dSXuan Zhuo state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL); 1021a2b36c8dSXuan Zhuo if (!state) 1022a2b36c8dSXuan Zhuo goto err_state; 1023a2b36c8dSXuan Zhuo 1024a2b36c8dSXuan Zhuo extra = vring_alloc_desc_extra(num); 1025a2b36c8dSXuan Zhuo if (!extra) 1026a2b36c8dSXuan Zhuo goto err_extra; 1027a2b36c8dSXuan Zhuo 1028a2b36c8dSXuan Zhuo memset(state, 0, num * sizeof(struct vring_desc_state_split)); 1029a2b36c8dSXuan Zhuo 1030a2b36c8dSXuan Zhuo vring_split->desc_state = state; 1031a2b36c8dSXuan Zhuo vring_split->desc_extra = extra; 1032a2b36c8dSXuan Zhuo return 0; 1033a2b36c8dSXuan Zhuo 1034a2b36c8dSXuan Zhuo err_extra: 1035a2b36c8dSXuan Zhuo kfree(state); 1036a2b36c8dSXuan Zhuo err_state: 1037a2b36c8dSXuan Zhuo return -ENOMEM; 1038a2b36c8dSXuan Zhuo } 1039a2b36c8dSXuan Zhuo 104089f05d94SXuan Zhuo static void vring_free_split(struct vring_virtqueue_split *vring_split, 10412713ea3cSJason Wang struct virtio_device *vdev, struct device *dma_dev) 104289f05d94SXuan Zhuo { 104389f05d94SXuan Zhuo vring_free_queue(vdev, vring_split->queue_size_in_bytes, 104489f05d94SXuan Zhuo vring_split->vring.desc, 10452713ea3cSJason Wang vring_split->queue_dma_addr, 10462713ea3cSJason Wang dma_dev); 104789f05d94SXuan Zhuo 104889f05d94SXuan Zhuo kfree(vring_split->desc_state); 104989f05d94SXuan Zhuo kfree(vring_split->desc_extra); 105089f05d94SXuan Zhuo } 105189f05d94SXuan Zhuo 1052c2d87fe6SXuan Zhuo static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, 1053c2d87fe6SXuan Zhuo struct virtio_device *vdev, 1054c2d87fe6SXuan Zhuo u32 num, 1055c2d87fe6SXuan Zhuo unsigned int vring_align, 10562713ea3cSJason Wang bool may_reduce_num, 10572713ea3cSJason Wang struct device *dma_dev) 1058c2d87fe6SXuan Zhuo { 1059c2d87fe6SXuan Zhuo void *queue = NULL; 1060c2d87fe6SXuan Zhuo dma_addr_t dma_addr; 1061c2d87fe6SXuan Zhuo 1062c2d87fe6SXuan Zhuo /* We assume num is a power of 2. */ 1063b9d978a8SShaoqin Huang if (!is_power_of_2(num)) { 1064c2d87fe6SXuan Zhuo dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 1065c2d87fe6SXuan Zhuo return -EINVAL; 1066c2d87fe6SXuan Zhuo } 1067c2d87fe6SXuan Zhuo 1068c2d87fe6SXuan Zhuo /* TODO: allocate each queue chunk individually */ 1069c2d87fe6SXuan Zhuo for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 1070c2d87fe6SXuan Zhuo queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1071c2d87fe6SXuan Zhuo &dma_addr, 10722713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 10732713ea3cSJason Wang dma_dev); 1074c2d87fe6SXuan Zhuo if (queue) 1075c2d87fe6SXuan Zhuo break; 1076c2d87fe6SXuan Zhuo if (!may_reduce_num) 1077c2d87fe6SXuan Zhuo return -ENOMEM; 1078c2d87fe6SXuan Zhuo } 1079c2d87fe6SXuan Zhuo 1080c2d87fe6SXuan Zhuo if (!num) 1081c2d87fe6SXuan Zhuo return -ENOMEM; 1082c2d87fe6SXuan Zhuo 1083c2d87fe6SXuan Zhuo if (!queue) { 1084c2d87fe6SXuan Zhuo /* Try to get a single page. You are my only hope! */ 1085c2d87fe6SXuan Zhuo queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 10862713ea3cSJason Wang &dma_addr, GFP_KERNEL | __GFP_ZERO, 10872713ea3cSJason Wang dma_dev); 1088c2d87fe6SXuan Zhuo } 1089c2d87fe6SXuan Zhuo if (!queue) 1090c2d87fe6SXuan Zhuo return -ENOMEM; 1091c2d87fe6SXuan Zhuo 1092c2d87fe6SXuan Zhuo vring_init(&vring_split->vring, num, queue, vring_align); 1093c2d87fe6SXuan Zhuo 1094c2d87fe6SXuan Zhuo vring_split->queue_dma_addr = dma_addr; 1095c2d87fe6SXuan Zhuo vring_split->queue_size_in_bytes = vring_size(num, vring_align); 1096c2d87fe6SXuan Zhuo 1097af36b16fSXuan Zhuo vring_split->vring_align = vring_align; 1098af36b16fSXuan Zhuo vring_split->may_reduce_num = may_reduce_num; 1099af36b16fSXuan Zhuo 1100c2d87fe6SXuan Zhuo return 0; 1101c2d87fe6SXuan Zhuo } 1102c2d87fe6SXuan Zhuo 1103d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split( 1104d79dca75STiwei Bie unsigned int index, 1105d79dca75STiwei Bie unsigned int num, 1106d79dca75STiwei Bie unsigned int vring_align, 1107d79dca75STiwei Bie struct virtio_device *vdev, 1108d79dca75STiwei Bie bool weak_barriers, 1109d79dca75STiwei Bie bool may_reduce_num, 1110d79dca75STiwei Bie bool context, 1111d79dca75STiwei Bie bool (*notify)(struct virtqueue *), 1112d79dca75STiwei Bie void (*callback)(struct virtqueue *), 11132713ea3cSJason Wang const char *name, 11142713ea3cSJason Wang struct device *dma_dev) 1115d79dca75STiwei Bie { 1116cd4c812aSXuan Zhuo struct vring_virtqueue_split vring_split = {}; 1117d79dca75STiwei Bie struct virtqueue *vq; 1118c2d87fe6SXuan Zhuo int err; 1119d79dca75STiwei Bie 1120c2d87fe6SXuan Zhuo err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align, 11212713ea3cSJason Wang may_reduce_num, dma_dev); 1122c2d87fe6SXuan Zhuo if (err) 1123d79dca75STiwei Bie return NULL; 1124d79dca75STiwei Bie 1125cd4c812aSXuan Zhuo vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, 11262713ea3cSJason Wang context, notify, callback, name, dma_dev); 1127d79dca75STiwei Bie if (!vq) { 11282713ea3cSJason Wang vring_free_split(&vring_split, vdev, dma_dev); 1129d79dca75STiwei Bie return NULL; 1130d79dca75STiwei Bie } 1131d79dca75STiwei Bie 1132d79dca75STiwei Bie to_vvq(vq)->we_own_ring = true; 1133d79dca75STiwei Bie 1134d79dca75STiwei Bie return vq; 1135d79dca75STiwei Bie } 1136d79dca75STiwei Bie 11376fea20e5SXuan Zhuo static int virtqueue_resize_split(struct virtqueue *_vq, u32 num) 11386fea20e5SXuan Zhuo { 11396fea20e5SXuan Zhuo struct vring_virtqueue_split vring_split = {}; 11406fea20e5SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 11416fea20e5SXuan Zhuo struct virtio_device *vdev = _vq->vdev; 11426fea20e5SXuan Zhuo int err; 11436fea20e5SXuan Zhuo 11446fea20e5SXuan Zhuo err = vring_alloc_queue_split(&vring_split, vdev, num, 11456fea20e5SXuan Zhuo vq->split.vring_align, 11462713ea3cSJason Wang vq->split.may_reduce_num, 11472713ea3cSJason Wang vring_dma_dev(vq)); 11486fea20e5SXuan Zhuo if (err) 11496fea20e5SXuan Zhuo goto err; 11506fea20e5SXuan Zhuo 11516fea20e5SXuan Zhuo err = vring_alloc_state_extra_split(&vring_split); 11526fea20e5SXuan Zhuo if (err) 11536fea20e5SXuan Zhuo goto err_state_extra; 11546fea20e5SXuan Zhuo 11556fea20e5SXuan Zhuo vring_free(&vq->vq); 11566fea20e5SXuan Zhuo 11576fea20e5SXuan Zhuo virtqueue_vring_init_split(&vring_split, vq); 11586fea20e5SXuan Zhuo 11596fea20e5SXuan Zhuo virtqueue_init(vq, vring_split.vring.num); 11606fea20e5SXuan Zhuo virtqueue_vring_attach_split(vq, &vring_split); 11616fea20e5SXuan Zhuo 11626fea20e5SXuan Zhuo return 0; 11636fea20e5SXuan Zhuo 11646fea20e5SXuan Zhuo err_state_extra: 11652713ea3cSJason Wang vring_free_split(&vring_split, vdev, vring_dma_dev(vq)); 11666fea20e5SXuan Zhuo err: 11676fea20e5SXuan Zhuo virtqueue_reinit_split(vq); 11686fea20e5SXuan Zhuo return -ENOMEM; 11696fea20e5SXuan Zhuo } 11706fea20e5SXuan Zhuo 1171e6f633e5STiwei Bie 1172e6f633e5STiwei Bie /* 11731ce9e605STiwei Bie * Packed ring specific functions - *_packed(). 11741ce9e605STiwei Bie */ 11751adbd6b2SFeng Liu static bool packed_used_wrap_counter(u16 last_used_idx) 1176a7722890Shuangjie.albert { 1177a7722890Shuangjie.albert return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR)); 1178a7722890Shuangjie.albert } 1179a7722890Shuangjie.albert 11801adbd6b2SFeng Liu static u16 packed_last_used(u16 last_used_idx) 1181a7722890Shuangjie.albert { 1182a7722890Shuangjie.albert return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); 1183a7722890Shuangjie.albert } 11841ce9e605STiwei Bie 1185d80dc15bSXuan Zhuo static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, 1186*4b6ec919SFeng Liu const struct vring_desc_extra *extra) 11871ce9e605STiwei Bie { 11881ce9e605STiwei Bie u16 flags; 11891ce9e605STiwei Bie 11901ce9e605STiwei Bie if (!vq->use_dma_api) 11911ce9e605STiwei Bie return; 11921ce9e605STiwei Bie 1193d80dc15bSXuan Zhuo flags = extra->flags; 11941ce9e605STiwei Bie 11951ce9e605STiwei Bie if (flags & VRING_DESC_F_INDIRECT) { 11961ce9e605STiwei Bie dma_unmap_single(vring_dma_dev(vq), 1197d80dc15bSXuan Zhuo extra->addr, extra->len, 11981ce9e605STiwei Bie (flags & VRING_DESC_F_WRITE) ? 11991ce9e605STiwei Bie DMA_FROM_DEVICE : DMA_TO_DEVICE); 12001ce9e605STiwei Bie } else { 12011ce9e605STiwei Bie dma_unmap_page(vring_dma_dev(vq), 1202d80dc15bSXuan Zhuo extra->addr, extra->len, 12031ce9e605STiwei Bie (flags & VRING_DESC_F_WRITE) ? 12041ce9e605STiwei Bie DMA_FROM_DEVICE : DMA_TO_DEVICE); 12051ce9e605STiwei Bie } 12061ce9e605STiwei Bie } 12071ce9e605STiwei Bie 12081ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, 1209*4b6ec919SFeng Liu const struct vring_packed_desc *desc) 12101ce9e605STiwei Bie { 12111ce9e605STiwei Bie u16 flags; 12121ce9e605STiwei Bie 12131ce9e605STiwei Bie if (!vq->use_dma_api) 12141ce9e605STiwei Bie return; 12151ce9e605STiwei Bie 12161ce9e605STiwei Bie flags = le16_to_cpu(desc->flags); 12171ce9e605STiwei Bie 12181ce9e605STiwei Bie dma_unmap_page(vring_dma_dev(vq), 12191ce9e605STiwei Bie le64_to_cpu(desc->addr), 12201ce9e605STiwei Bie le32_to_cpu(desc->len), 12211ce9e605STiwei Bie (flags & VRING_DESC_F_WRITE) ? 12221ce9e605STiwei Bie DMA_FROM_DEVICE : DMA_TO_DEVICE); 12231ce9e605STiwei Bie } 12241ce9e605STiwei Bie 12251ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, 12261ce9e605STiwei Bie gfp_t gfp) 12271ce9e605STiwei Bie { 12281ce9e605STiwei Bie struct vring_packed_desc *desc; 12291ce9e605STiwei Bie 12301ce9e605STiwei Bie /* 12311ce9e605STiwei Bie * We require lowmem mappings for the descriptors because 12321ce9e605STiwei Bie * otherwise virt_to_phys will give us bogus addresses in the 12331ce9e605STiwei Bie * virtqueue. 12341ce9e605STiwei Bie */ 12351ce9e605STiwei Bie gfp &= ~__GFP_HIGHMEM; 12361ce9e605STiwei Bie 12371ce9e605STiwei Bie desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp); 12381ce9e605STiwei Bie 12391ce9e605STiwei Bie return desc; 12401ce9e605STiwei Bie } 12411ce9e605STiwei Bie 12421ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, 12431ce9e605STiwei Bie struct scatterlist *sgs[], 12441ce9e605STiwei Bie unsigned int total_sg, 12451ce9e605STiwei Bie unsigned int out_sgs, 12461ce9e605STiwei Bie unsigned int in_sgs, 12471ce9e605STiwei Bie void *data, 12481ce9e605STiwei Bie gfp_t gfp) 12491ce9e605STiwei Bie { 12501ce9e605STiwei Bie struct vring_packed_desc *desc; 12511ce9e605STiwei Bie struct scatterlist *sg; 12521ce9e605STiwei Bie unsigned int i, n, err_idx; 12531ce9e605STiwei Bie u16 head, id; 12541ce9e605STiwei Bie dma_addr_t addr; 12551ce9e605STiwei Bie 12561ce9e605STiwei Bie head = vq->packed.next_avail_idx; 12571ce9e605STiwei Bie desc = alloc_indirect_packed(total_sg, gfp); 1258fc6d70f4SXuan Zhuo if (!desc) 1259fc6d70f4SXuan Zhuo return -ENOMEM; 12601ce9e605STiwei Bie 12611ce9e605STiwei Bie if (unlikely(vq->vq.num_free < 1)) { 12621ce9e605STiwei Bie pr_debug("Can't add buf len 1 - avail = 0\n"); 1263df0bfe75SYueHaibing kfree(desc); 12641ce9e605STiwei Bie END_USE(vq); 12651ce9e605STiwei Bie return -ENOSPC; 12661ce9e605STiwei Bie } 12671ce9e605STiwei Bie 12681ce9e605STiwei Bie i = 0; 12691ce9e605STiwei Bie id = vq->free_head; 12701ce9e605STiwei Bie BUG_ON(id == vq->packed.vring.num); 12711ce9e605STiwei Bie 12721ce9e605STiwei Bie for (n = 0; n < out_sgs + in_sgs; n++) { 12731ce9e605STiwei Bie for (sg = sgs[n]; sg; sg = sg_next(sg)) { 12741ce9e605STiwei Bie addr = vring_map_one_sg(vq, sg, n < out_sgs ? 12751ce9e605STiwei Bie DMA_TO_DEVICE : DMA_FROM_DEVICE); 12761ce9e605STiwei Bie if (vring_mapping_error(vq, addr)) 12771ce9e605STiwei Bie goto unmap_release; 12781ce9e605STiwei Bie 12791ce9e605STiwei Bie desc[i].flags = cpu_to_le16(n < out_sgs ? 12801ce9e605STiwei Bie 0 : VRING_DESC_F_WRITE); 12811ce9e605STiwei Bie desc[i].addr = cpu_to_le64(addr); 12821ce9e605STiwei Bie desc[i].len = cpu_to_le32(sg->length); 12831ce9e605STiwei Bie i++; 12841ce9e605STiwei Bie } 12851ce9e605STiwei Bie } 12861ce9e605STiwei Bie 12871ce9e605STiwei Bie /* Now that the indirect table is filled in, map it. */ 12881ce9e605STiwei Bie addr = vring_map_single(vq, desc, 12891ce9e605STiwei Bie total_sg * sizeof(struct vring_packed_desc), 12901ce9e605STiwei Bie DMA_TO_DEVICE); 12911ce9e605STiwei Bie if (vring_mapping_error(vq, addr)) 12921ce9e605STiwei Bie goto unmap_release; 12931ce9e605STiwei Bie 12941ce9e605STiwei Bie vq->packed.vring.desc[head].addr = cpu_to_le64(addr); 12951ce9e605STiwei Bie vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * 12961ce9e605STiwei Bie sizeof(struct vring_packed_desc)); 12971ce9e605STiwei Bie vq->packed.vring.desc[head].id = cpu_to_le16(id); 12981ce9e605STiwei Bie 12991ce9e605STiwei Bie if (vq->use_dma_api) { 13001ce9e605STiwei Bie vq->packed.desc_extra[id].addr = addr; 13011ce9e605STiwei Bie vq->packed.desc_extra[id].len = total_sg * 13021ce9e605STiwei Bie sizeof(struct vring_packed_desc); 13031ce9e605STiwei Bie vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | 13041ce9e605STiwei Bie vq->packed.avail_used_flags; 13051ce9e605STiwei Bie } 13061ce9e605STiwei Bie 13071ce9e605STiwei Bie /* 13081ce9e605STiwei Bie * A driver MUST NOT make the first descriptor in the list 13091ce9e605STiwei Bie * available before all subsequent descriptors comprising 13101ce9e605STiwei Bie * the list are made available. 13111ce9e605STiwei Bie */ 13121ce9e605STiwei Bie virtio_wmb(vq->weak_barriers); 13131ce9e605STiwei Bie vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | 13141ce9e605STiwei Bie vq->packed.avail_used_flags); 13151ce9e605STiwei Bie 13161ce9e605STiwei Bie /* We're using some buffers from the free list. */ 13171ce9e605STiwei Bie vq->vq.num_free -= 1; 13181ce9e605STiwei Bie 13191ce9e605STiwei Bie /* Update free pointer */ 13201ce9e605STiwei Bie n = head + 1; 13211ce9e605STiwei Bie if (n >= vq->packed.vring.num) { 13221ce9e605STiwei Bie n = 0; 13231ce9e605STiwei Bie vq->packed.avail_wrap_counter ^= 1; 13241ce9e605STiwei Bie vq->packed.avail_used_flags ^= 13251ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_AVAIL | 13261ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_USED; 13271ce9e605STiwei Bie } 13281ce9e605STiwei Bie vq->packed.next_avail_idx = n; 1329aeef9b47SJason Wang vq->free_head = vq->packed.desc_extra[id].next; 13301ce9e605STiwei Bie 13311ce9e605STiwei Bie /* Store token and indirect buffer state. */ 13321ce9e605STiwei Bie vq->packed.desc_state[id].num = 1; 13331ce9e605STiwei Bie vq->packed.desc_state[id].data = data; 13341ce9e605STiwei Bie vq->packed.desc_state[id].indir_desc = desc; 13351ce9e605STiwei Bie vq->packed.desc_state[id].last = id; 13361ce9e605STiwei Bie 13371ce9e605STiwei Bie vq->num_added += 1; 13381ce9e605STiwei Bie 13391ce9e605STiwei Bie pr_debug("Added buffer head %i to %p\n", head, vq); 13401ce9e605STiwei Bie END_USE(vq); 13411ce9e605STiwei Bie 13421ce9e605STiwei Bie return 0; 13431ce9e605STiwei Bie 13441ce9e605STiwei Bie unmap_release: 13451ce9e605STiwei Bie err_idx = i; 13461ce9e605STiwei Bie 13471ce9e605STiwei Bie for (i = 0; i < err_idx; i++) 13481ce9e605STiwei Bie vring_unmap_desc_packed(vq, &desc[i]); 13491ce9e605STiwei Bie 13501ce9e605STiwei Bie kfree(desc); 13511ce9e605STiwei Bie 13521ce9e605STiwei Bie END_USE(vq); 1353f7728002SHalil Pasic return -ENOMEM; 13541ce9e605STiwei Bie } 13551ce9e605STiwei Bie 13561ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq, 13571ce9e605STiwei Bie struct scatterlist *sgs[], 13581ce9e605STiwei Bie unsigned int total_sg, 13591ce9e605STiwei Bie unsigned int out_sgs, 13601ce9e605STiwei Bie unsigned int in_sgs, 13611ce9e605STiwei Bie void *data, 13621ce9e605STiwei Bie void *ctx, 13631ce9e605STiwei Bie gfp_t gfp) 13641ce9e605STiwei Bie { 13651ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 13661ce9e605STiwei Bie struct vring_packed_desc *desc; 13671ce9e605STiwei Bie struct scatterlist *sg; 13681ce9e605STiwei Bie unsigned int i, n, c, descs_used, err_idx; 13693f649ab7SKees Cook __le16 head_flags, flags; 13703f649ab7SKees Cook u16 head, id, prev, curr, avail_used_flags; 1371fc6d70f4SXuan Zhuo int err; 13721ce9e605STiwei Bie 13731ce9e605STiwei Bie START_USE(vq); 13741ce9e605STiwei Bie 13751ce9e605STiwei Bie BUG_ON(data == NULL); 13761ce9e605STiwei Bie BUG_ON(ctx && vq->indirect); 13771ce9e605STiwei Bie 13781ce9e605STiwei Bie if (unlikely(vq->broken)) { 13791ce9e605STiwei Bie END_USE(vq); 13801ce9e605STiwei Bie return -EIO; 13811ce9e605STiwei Bie } 13821ce9e605STiwei Bie 13831ce9e605STiwei Bie LAST_ADD_TIME_UPDATE(vq); 13841ce9e605STiwei Bie 13851ce9e605STiwei Bie BUG_ON(total_sg == 0); 13861ce9e605STiwei Bie 138735c51e09SXianting Tian if (virtqueue_use_indirect(vq, total_sg)) { 1388fc6d70f4SXuan Zhuo err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, 1389fc6d70f4SXuan Zhuo in_sgs, data, gfp); 13901861ba62SMichael S. Tsirkin if (err != -ENOMEM) { 13911861ba62SMichael S. Tsirkin END_USE(vq); 1392fc6d70f4SXuan Zhuo return err; 13931861ba62SMichael S. Tsirkin } 1394fc6d70f4SXuan Zhuo 1395fc6d70f4SXuan Zhuo /* fall back on direct */ 1396fc6d70f4SXuan Zhuo } 13971ce9e605STiwei Bie 13981ce9e605STiwei Bie head = vq->packed.next_avail_idx; 13991ce9e605STiwei Bie avail_used_flags = vq->packed.avail_used_flags; 14001ce9e605STiwei Bie 14011ce9e605STiwei Bie WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); 14021ce9e605STiwei Bie 14031ce9e605STiwei Bie desc = vq->packed.vring.desc; 14041ce9e605STiwei Bie i = head; 14051ce9e605STiwei Bie descs_used = total_sg; 14061ce9e605STiwei Bie 14071ce9e605STiwei Bie if (unlikely(vq->vq.num_free < descs_used)) { 14081ce9e605STiwei Bie pr_debug("Can't add buf len %i - avail = %i\n", 14091ce9e605STiwei Bie descs_used, vq->vq.num_free); 14101ce9e605STiwei Bie END_USE(vq); 14111ce9e605STiwei Bie return -ENOSPC; 14121ce9e605STiwei Bie } 14131ce9e605STiwei Bie 14141ce9e605STiwei Bie id = vq->free_head; 14151ce9e605STiwei Bie BUG_ON(id == vq->packed.vring.num); 14161ce9e605STiwei Bie 14171ce9e605STiwei Bie curr = id; 14181ce9e605STiwei Bie c = 0; 14191ce9e605STiwei Bie for (n = 0; n < out_sgs + in_sgs; n++) { 14201ce9e605STiwei Bie for (sg = sgs[n]; sg; sg = sg_next(sg)) { 14211ce9e605STiwei Bie dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? 14221ce9e605STiwei Bie DMA_TO_DEVICE : DMA_FROM_DEVICE); 14231ce9e605STiwei Bie if (vring_mapping_error(vq, addr)) 14241ce9e605STiwei Bie goto unmap_release; 14251ce9e605STiwei Bie 14261ce9e605STiwei Bie flags = cpu_to_le16(vq->packed.avail_used_flags | 14271ce9e605STiwei Bie (++c == total_sg ? 0 : VRING_DESC_F_NEXT) | 14281ce9e605STiwei Bie (n < out_sgs ? 0 : VRING_DESC_F_WRITE)); 14291ce9e605STiwei Bie if (i == head) 14301ce9e605STiwei Bie head_flags = flags; 14311ce9e605STiwei Bie else 14321ce9e605STiwei Bie desc[i].flags = flags; 14331ce9e605STiwei Bie 14341ce9e605STiwei Bie desc[i].addr = cpu_to_le64(addr); 14351ce9e605STiwei Bie desc[i].len = cpu_to_le32(sg->length); 14361ce9e605STiwei Bie desc[i].id = cpu_to_le16(id); 14371ce9e605STiwei Bie 14381ce9e605STiwei Bie if (unlikely(vq->use_dma_api)) { 14391ce9e605STiwei Bie vq->packed.desc_extra[curr].addr = addr; 14401ce9e605STiwei Bie vq->packed.desc_extra[curr].len = sg->length; 14411ce9e605STiwei Bie vq->packed.desc_extra[curr].flags = 14421ce9e605STiwei Bie le16_to_cpu(flags); 14431ce9e605STiwei Bie } 14441ce9e605STiwei Bie prev = curr; 1445aeef9b47SJason Wang curr = vq->packed.desc_extra[curr].next; 14461ce9e605STiwei Bie 14471ce9e605STiwei Bie if ((unlikely(++i >= vq->packed.vring.num))) { 14481ce9e605STiwei Bie i = 0; 14491ce9e605STiwei Bie vq->packed.avail_used_flags ^= 14501ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_AVAIL | 14511ce9e605STiwei Bie 1 << VRING_PACKED_DESC_F_USED; 14521ce9e605STiwei Bie } 14531ce9e605STiwei Bie } 14541ce9e605STiwei Bie } 14551ce9e605STiwei Bie 14561ce9e605STiwei Bie if (i < head) 14571ce9e605STiwei Bie vq->packed.avail_wrap_counter ^= 1; 14581ce9e605STiwei Bie 14591ce9e605STiwei Bie /* We're using some buffers from the free list. */ 14601ce9e605STiwei Bie vq->vq.num_free -= descs_used; 14611ce9e605STiwei Bie 14621ce9e605STiwei Bie /* Update free pointer */ 14631ce9e605STiwei Bie vq->packed.next_avail_idx = i; 14641ce9e605STiwei Bie vq->free_head = curr; 14651ce9e605STiwei Bie 14661ce9e605STiwei Bie /* Store token. */ 14671ce9e605STiwei Bie vq->packed.desc_state[id].num = descs_used; 14681ce9e605STiwei Bie vq->packed.desc_state[id].data = data; 14691ce9e605STiwei Bie vq->packed.desc_state[id].indir_desc = ctx; 14701ce9e605STiwei Bie vq->packed.desc_state[id].last = prev; 14711ce9e605STiwei Bie 14721ce9e605STiwei Bie /* 14731ce9e605STiwei Bie * A driver MUST NOT make the first descriptor in the list 14741ce9e605STiwei Bie * available before all subsequent descriptors comprising 14751ce9e605STiwei Bie * the list are made available. 14761ce9e605STiwei Bie */ 14771ce9e605STiwei Bie virtio_wmb(vq->weak_barriers); 14781ce9e605STiwei Bie vq->packed.vring.desc[head].flags = head_flags; 14791ce9e605STiwei Bie vq->num_added += descs_used; 14801ce9e605STiwei Bie 14811ce9e605STiwei Bie pr_debug("Added buffer head %i to %p\n", head, vq); 14821ce9e605STiwei Bie END_USE(vq); 14831ce9e605STiwei Bie 14841ce9e605STiwei Bie return 0; 14851ce9e605STiwei Bie 14861ce9e605STiwei Bie unmap_release: 14871ce9e605STiwei Bie err_idx = i; 14881ce9e605STiwei Bie i = head; 148944593865SJason Wang curr = vq->free_head; 14901ce9e605STiwei Bie 14911ce9e605STiwei Bie vq->packed.avail_used_flags = avail_used_flags; 14921ce9e605STiwei Bie 14931ce9e605STiwei Bie for (n = 0; n < total_sg; n++) { 14941ce9e605STiwei Bie if (i == err_idx) 14951ce9e605STiwei Bie break; 1496d80dc15bSXuan Zhuo vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); 149744593865SJason Wang curr = vq->packed.desc_extra[curr].next; 14981ce9e605STiwei Bie i++; 14991ce9e605STiwei Bie if (i >= vq->packed.vring.num) 15001ce9e605STiwei Bie i = 0; 15011ce9e605STiwei Bie } 15021ce9e605STiwei Bie 15031ce9e605STiwei Bie END_USE(vq); 15041ce9e605STiwei Bie return -EIO; 15051ce9e605STiwei Bie } 15061ce9e605STiwei Bie 15071ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) 15081ce9e605STiwei Bie { 15091ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 1510f51f9826STiwei Bie u16 new, old, off_wrap, flags, wrap_counter, event_idx; 15111ce9e605STiwei Bie bool needs_kick; 15121ce9e605STiwei Bie union { 15131ce9e605STiwei Bie struct { 15141ce9e605STiwei Bie __le16 off_wrap; 15151ce9e605STiwei Bie __le16 flags; 15161ce9e605STiwei Bie }; 15171ce9e605STiwei Bie u32 u32; 15181ce9e605STiwei Bie } snapshot; 15191ce9e605STiwei Bie 15201ce9e605STiwei Bie START_USE(vq); 15211ce9e605STiwei Bie 15221ce9e605STiwei Bie /* 15231ce9e605STiwei Bie * We need to expose the new flags value before checking notification 15241ce9e605STiwei Bie * suppressions. 15251ce9e605STiwei Bie */ 15261ce9e605STiwei Bie virtio_mb(vq->weak_barriers); 15271ce9e605STiwei Bie 1528f51f9826STiwei Bie old = vq->packed.next_avail_idx - vq->num_added; 1529f51f9826STiwei Bie new = vq->packed.next_avail_idx; 15301ce9e605STiwei Bie vq->num_added = 0; 15311ce9e605STiwei Bie 15321ce9e605STiwei Bie snapshot.u32 = *(u32 *)vq->packed.vring.device; 15331ce9e605STiwei Bie flags = le16_to_cpu(snapshot.flags); 15341ce9e605STiwei Bie 15351ce9e605STiwei Bie LAST_ADD_TIME_CHECK(vq); 15361ce9e605STiwei Bie LAST_ADD_TIME_INVALID(vq); 15371ce9e605STiwei Bie 1538f51f9826STiwei Bie if (flags != VRING_PACKED_EVENT_FLAG_DESC) { 15391ce9e605STiwei Bie needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE); 1540f51f9826STiwei Bie goto out; 1541f51f9826STiwei Bie } 1542f51f9826STiwei Bie 1543f51f9826STiwei Bie off_wrap = le16_to_cpu(snapshot.off_wrap); 1544f51f9826STiwei Bie 1545f51f9826STiwei Bie wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; 1546f51f9826STiwei Bie event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); 1547f51f9826STiwei Bie if (wrap_counter != vq->packed.avail_wrap_counter) 1548f51f9826STiwei Bie event_idx -= vq->packed.vring.num; 1549f51f9826STiwei Bie 1550f51f9826STiwei Bie needs_kick = vring_need_event(event_idx, new, old); 1551f51f9826STiwei Bie out: 15521ce9e605STiwei Bie END_USE(vq); 15531ce9e605STiwei Bie return needs_kick; 15541ce9e605STiwei Bie } 15551ce9e605STiwei Bie 15561ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq, 15571ce9e605STiwei Bie unsigned int id, void **ctx) 15581ce9e605STiwei Bie { 15591ce9e605STiwei Bie struct vring_desc_state_packed *state = NULL; 15601ce9e605STiwei Bie struct vring_packed_desc *desc; 15611ce9e605STiwei Bie unsigned int i, curr; 15621ce9e605STiwei Bie 15631ce9e605STiwei Bie state = &vq->packed.desc_state[id]; 15641ce9e605STiwei Bie 15651ce9e605STiwei Bie /* Clear data ptr. */ 15661ce9e605STiwei Bie state->data = NULL; 15671ce9e605STiwei Bie 1568aeef9b47SJason Wang vq->packed.desc_extra[state->last].next = vq->free_head; 15691ce9e605STiwei Bie vq->free_head = id; 15701ce9e605STiwei Bie vq->vq.num_free += state->num; 15711ce9e605STiwei Bie 15721ce9e605STiwei Bie if (unlikely(vq->use_dma_api)) { 15731ce9e605STiwei Bie curr = id; 15741ce9e605STiwei Bie for (i = 0; i < state->num; i++) { 1575d80dc15bSXuan Zhuo vring_unmap_extra_packed(vq, 15761ce9e605STiwei Bie &vq->packed.desc_extra[curr]); 1577aeef9b47SJason Wang curr = vq->packed.desc_extra[curr].next; 15781ce9e605STiwei Bie } 15791ce9e605STiwei Bie } 15801ce9e605STiwei Bie 15811ce9e605STiwei Bie if (vq->indirect) { 15821ce9e605STiwei Bie u32 len; 15831ce9e605STiwei Bie 15841ce9e605STiwei Bie /* Free the indirect table, if any, now that it's unmapped. */ 15851ce9e605STiwei Bie desc = state->indir_desc; 15861ce9e605STiwei Bie if (!desc) 15871ce9e605STiwei Bie return; 15881ce9e605STiwei Bie 15891ce9e605STiwei Bie if (vq->use_dma_api) { 15901ce9e605STiwei Bie len = vq->packed.desc_extra[id].len; 15911ce9e605STiwei Bie for (i = 0; i < len / sizeof(struct vring_packed_desc); 15921ce9e605STiwei Bie i++) 15931ce9e605STiwei Bie vring_unmap_desc_packed(vq, &desc[i]); 15941ce9e605STiwei Bie } 15951ce9e605STiwei Bie kfree(desc); 15961ce9e605STiwei Bie state->indir_desc = NULL; 15971ce9e605STiwei Bie } else if (ctx) { 15981ce9e605STiwei Bie *ctx = state->indir_desc; 15991ce9e605STiwei Bie } 16001ce9e605STiwei Bie } 16011ce9e605STiwei Bie 16021ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, 16031ce9e605STiwei Bie u16 idx, bool used_wrap_counter) 16041ce9e605STiwei Bie { 16051ce9e605STiwei Bie bool avail, used; 16061ce9e605STiwei Bie u16 flags; 16071ce9e605STiwei Bie 16081ce9e605STiwei Bie flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); 16091ce9e605STiwei Bie avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); 16101ce9e605STiwei Bie used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); 16111ce9e605STiwei Bie 16121ce9e605STiwei Bie return avail == used && used == used_wrap_counter; 16131ce9e605STiwei Bie } 16141ce9e605STiwei Bie 16151adbd6b2SFeng Liu static bool more_used_packed(const struct vring_virtqueue *vq) 16161ce9e605STiwei Bie { 1617a7722890Shuangjie.albert u16 last_used; 1618a7722890Shuangjie.albert u16 last_used_idx; 1619a7722890Shuangjie.albert bool used_wrap_counter; 1620a7722890Shuangjie.albert 1621a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1622a7722890Shuangjie.albert last_used = packed_last_used(last_used_idx); 1623a7722890Shuangjie.albert used_wrap_counter = packed_used_wrap_counter(last_used_idx); 1624a7722890Shuangjie.albert return is_used_desc_packed(vq, last_used, used_wrap_counter); 16251ce9e605STiwei Bie } 16261ce9e605STiwei Bie 16271ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, 16281ce9e605STiwei Bie unsigned int *len, 16291ce9e605STiwei Bie void **ctx) 16301ce9e605STiwei Bie { 16311ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 1632a7722890Shuangjie.albert u16 last_used, id, last_used_idx; 1633a7722890Shuangjie.albert bool used_wrap_counter; 16341ce9e605STiwei Bie void *ret; 16351ce9e605STiwei Bie 16361ce9e605STiwei Bie START_USE(vq); 16371ce9e605STiwei Bie 16381ce9e605STiwei Bie if (unlikely(vq->broken)) { 16391ce9e605STiwei Bie END_USE(vq); 16401ce9e605STiwei Bie return NULL; 16411ce9e605STiwei Bie } 16421ce9e605STiwei Bie 16431ce9e605STiwei Bie if (!more_used_packed(vq)) { 16441ce9e605STiwei Bie pr_debug("No more buffers in queue\n"); 16451ce9e605STiwei Bie END_USE(vq); 16461ce9e605STiwei Bie return NULL; 16471ce9e605STiwei Bie } 16481ce9e605STiwei Bie 16491ce9e605STiwei Bie /* Only get used elements after they have been exposed by host. */ 16501ce9e605STiwei Bie virtio_rmb(vq->weak_barriers); 16511ce9e605STiwei Bie 1652a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1653a7722890Shuangjie.albert used_wrap_counter = packed_used_wrap_counter(last_used_idx); 1654a7722890Shuangjie.albert last_used = packed_last_used(last_used_idx); 16551ce9e605STiwei Bie id = le16_to_cpu(vq->packed.vring.desc[last_used].id); 16561ce9e605STiwei Bie *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); 16571ce9e605STiwei Bie 16581ce9e605STiwei Bie if (unlikely(id >= vq->packed.vring.num)) { 16591ce9e605STiwei Bie BAD_RING(vq, "id %u out of range\n", id); 16601ce9e605STiwei Bie return NULL; 16611ce9e605STiwei Bie } 16621ce9e605STiwei Bie if (unlikely(!vq->packed.desc_state[id].data)) { 16631ce9e605STiwei Bie BAD_RING(vq, "id %u is not a head!\n", id); 16641ce9e605STiwei Bie return NULL; 16651ce9e605STiwei Bie } 16661ce9e605STiwei Bie 16671ce9e605STiwei Bie /* detach_buf_packed clears data, so grab it now. */ 16681ce9e605STiwei Bie ret = vq->packed.desc_state[id].data; 16691ce9e605STiwei Bie detach_buf_packed(vq, id, ctx); 16701ce9e605STiwei Bie 1671a7722890Shuangjie.albert last_used += vq->packed.desc_state[id].num; 1672a7722890Shuangjie.albert if (unlikely(last_used >= vq->packed.vring.num)) { 1673a7722890Shuangjie.albert last_used -= vq->packed.vring.num; 1674a7722890Shuangjie.albert used_wrap_counter ^= 1; 16751ce9e605STiwei Bie } 16761ce9e605STiwei Bie 1677a7722890Shuangjie.albert last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); 1678a7722890Shuangjie.albert WRITE_ONCE(vq->last_used_idx, last_used); 1679a7722890Shuangjie.albert 1680f51f9826STiwei Bie /* 1681f51f9826STiwei Bie * If we expect an interrupt for the next entry, tell host 1682f51f9826STiwei Bie * by writing event index and flush out the write before 1683f51f9826STiwei Bie * the read in the next get_buf call. 1684f51f9826STiwei Bie */ 1685f51f9826STiwei Bie if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) 1686f51f9826STiwei Bie virtio_store_mb(vq->weak_barriers, 1687f51f9826STiwei Bie &vq->packed.vring.driver->off_wrap, 1688a7722890Shuangjie.albert cpu_to_le16(vq->last_used_idx)); 1689f51f9826STiwei Bie 16901ce9e605STiwei Bie LAST_ADD_TIME_INVALID(vq); 16911ce9e605STiwei Bie 16921ce9e605STiwei Bie END_USE(vq); 16931ce9e605STiwei Bie return ret; 16941ce9e605STiwei Bie } 16951ce9e605STiwei Bie 16961ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq) 16971ce9e605STiwei Bie { 16981ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 16991ce9e605STiwei Bie 17001ce9e605STiwei Bie if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { 17011ce9e605STiwei Bie vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; 17021ce9e605STiwei Bie vq->packed.vring.driver->flags = 17031ce9e605STiwei Bie cpu_to_le16(vq->packed.event_flags_shadow); 17041ce9e605STiwei Bie } 17051ce9e605STiwei Bie } 17061ce9e605STiwei Bie 170731532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) 17081ce9e605STiwei Bie { 17091ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 17101ce9e605STiwei Bie 17111ce9e605STiwei Bie START_USE(vq); 17121ce9e605STiwei Bie 17131ce9e605STiwei Bie /* 17141ce9e605STiwei Bie * We optimistically turn back on interrupts, then check if there was 17151ce9e605STiwei Bie * more to do. 17161ce9e605STiwei Bie */ 17171ce9e605STiwei Bie 1718f51f9826STiwei Bie if (vq->event) { 1719f51f9826STiwei Bie vq->packed.vring.driver->off_wrap = 1720a7722890Shuangjie.albert cpu_to_le16(vq->last_used_idx); 1721f51f9826STiwei Bie /* 1722f51f9826STiwei Bie * We need to update event offset and event wrap 1723f51f9826STiwei Bie * counter first before updating event flags. 1724f51f9826STiwei Bie */ 1725f51f9826STiwei Bie virtio_wmb(vq->weak_barriers); 1726f51f9826STiwei Bie } 1727f51f9826STiwei Bie 17281ce9e605STiwei Bie if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { 1729f51f9826STiwei Bie vq->packed.event_flags_shadow = vq->event ? 1730f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_DESC : 1731f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_ENABLE; 17321ce9e605STiwei Bie vq->packed.vring.driver->flags = 17331ce9e605STiwei Bie cpu_to_le16(vq->packed.event_flags_shadow); 17341ce9e605STiwei Bie } 17351ce9e605STiwei Bie 17361ce9e605STiwei Bie END_USE(vq); 1737a7722890Shuangjie.albert return vq->last_used_idx; 17381ce9e605STiwei Bie } 17391ce9e605STiwei Bie 17401ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap) 17411ce9e605STiwei Bie { 17421ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 17431ce9e605STiwei Bie bool wrap_counter; 17441ce9e605STiwei Bie u16 used_idx; 17451ce9e605STiwei Bie 17461ce9e605STiwei Bie wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; 17471ce9e605STiwei Bie used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); 17481ce9e605STiwei Bie 17491ce9e605STiwei Bie return is_used_desc_packed(vq, used_idx, wrap_counter); 17501ce9e605STiwei Bie } 17511ce9e605STiwei Bie 17521ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) 17531ce9e605STiwei Bie { 17541ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 1755a7722890Shuangjie.albert u16 used_idx, wrap_counter, last_used_idx; 1756f51f9826STiwei Bie u16 bufs; 17571ce9e605STiwei Bie 17581ce9e605STiwei Bie START_USE(vq); 17591ce9e605STiwei Bie 17601ce9e605STiwei Bie /* 17611ce9e605STiwei Bie * We optimistically turn back on interrupts, then check if there was 17621ce9e605STiwei Bie * more to do. 17631ce9e605STiwei Bie */ 17641ce9e605STiwei Bie 1765f51f9826STiwei Bie if (vq->event) { 1766f51f9826STiwei Bie /* TODO: tune this threshold */ 1767f51f9826STiwei Bie bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; 1768a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1769a7722890Shuangjie.albert wrap_counter = packed_used_wrap_counter(last_used_idx); 17701ce9e605STiwei Bie 1771a7722890Shuangjie.albert used_idx = packed_last_used(last_used_idx) + bufs; 1772f51f9826STiwei Bie if (used_idx >= vq->packed.vring.num) { 1773f51f9826STiwei Bie used_idx -= vq->packed.vring.num; 1774f51f9826STiwei Bie wrap_counter ^= 1; 1775f51f9826STiwei Bie } 1776f51f9826STiwei Bie 1777f51f9826STiwei Bie vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | 1778f51f9826STiwei Bie (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); 1779f51f9826STiwei Bie 1780f51f9826STiwei Bie /* 1781f51f9826STiwei Bie * We need to update event offset and event wrap 1782f51f9826STiwei Bie * counter first before updating event flags. 1783f51f9826STiwei Bie */ 1784f51f9826STiwei Bie virtio_wmb(vq->weak_barriers); 1785f51f9826STiwei Bie } 1786f51f9826STiwei Bie 17871ce9e605STiwei Bie if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { 1788f51f9826STiwei Bie vq->packed.event_flags_shadow = vq->event ? 1789f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_DESC : 1790f51f9826STiwei Bie VRING_PACKED_EVENT_FLAG_ENABLE; 17911ce9e605STiwei Bie vq->packed.vring.driver->flags = 17921ce9e605STiwei Bie cpu_to_le16(vq->packed.event_flags_shadow); 17931ce9e605STiwei Bie } 17941ce9e605STiwei Bie 17951ce9e605STiwei Bie /* 17961ce9e605STiwei Bie * We need to update event suppression structure first 17971ce9e605STiwei Bie * before re-checking for more used buffers. 17981ce9e605STiwei Bie */ 17991ce9e605STiwei Bie virtio_mb(vq->weak_barriers); 18001ce9e605STiwei Bie 1801a7722890Shuangjie.albert last_used_idx = READ_ONCE(vq->last_used_idx); 1802a7722890Shuangjie.albert wrap_counter = packed_used_wrap_counter(last_used_idx); 1803a7722890Shuangjie.albert used_idx = packed_last_used(last_used_idx); 1804a7722890Shuangjie.albert if (is_used_desc_packed(vq, used_idx, wrap_counter)) { 18051ce9e605STiwei Bie END_USE(vq); 18061ce9e605STiwei Bie return false; 18071ce9e605STiwei Bie } 18081ce9e605STiwei Bie 18091ce9e605STiwei Bie END_USE(vq); 18101ce9e605STiwei Bie return true; 18111ce9e605STiwei Bie } 18121ce9e605STiwei Bie 18131ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) 18141ce9e605STiwei Bie { 18151ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 18161ce9e605STiwei Bie unsigned int i; 18171ce9e605STiwei Bie void *buf; 18181ce9e605STiwei Bie 18191ce9e605STiwei Bie START_USE(vq); 18201ce9e605STiwei Bie 18211ce9e605STiwei Bie for (i = 0; i < vq->packed.vring.num; i++) { 18221ce9e605STiwei Bie if (!vq->packed.desc_state[i].data) 18231ce9e605STiwei Bie continue; 18241ce9e605STiwei Bie /* detach_buf clears data, so grab it now. */ 18251ce9e605STiwei Bie buf = vq->packed.desc_state[i].data; 18261ce9e605STiwei Bie detach_buf_packed(vq, i, NULL); 18271ce9e605STiwei Bie END_USE(vq); 18281ce9e605STiwei Bie return buf; 18291ce9e605STiwei Bie } 18301ce9e605STiwei Bie /* That should have freed everything. */ 18311ce9e605STiwei Bie BUG_ON(vq->vq.num_free != vq->packed.vring.num); 18321ce9e605STiwei Bie 18331ce9e605STiwei Bie END_USE(vq); 18341ce9e605STiwei Bie return NULL; 18351ce9e605STiwei Bie } 18361ce9e605STiwei Bie 183796ef18a2SXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num) 18385a222421SJason Wang { 18395a222421SJason Wang struct vring_desc_extra *desc_extra; 18405a222421SJason Wang unsigned int i; 18415a222421SJason Wang 18425a222421SJason Wang desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra), 18435a222421SJason Wang GFP_KERNEL); 18445a222421SJason Wang if (!desc_extra) 18455a222421SJason Wang return NULL; 18465a222421SJason Wang 18475a222421SJason Wang memset(desc_extra, 0, num * sizeof(struct vring_desc_extra)); 18485a222421SJason Wang 18495a222421SJason Wang for (i = 0; i < num - 1; i++) 18505a222421SJason Wang desc_extra[i].next = i + 1; 18515a222421SJason Wang 18525a222421SJason Wang return desc_extra; 18535a222421SJason Wang } 18545a222421SJason Wang 18556356f8bbSXuan Zhuo static void vring_free_packed(struct vring_virtqueue_packed *vring_packed, 18562713ea3cSJason Wang struct virtio_device *vdev, 18572713ea3cSJason Wang struct device *dma_dev) 18586356f8bbSXuan Zhuo { 18596356f8bbSXuan Zhuo if (vring_packed->vring.desc) 18606356f8bbSXuan Zhuo vring_free_queue(vdev, vring_packed->ring_size_in_bytes, 18616356f8bbSXuan Zhuo vring_packed->vring.desc, 18622713ea3cSJason Wang vring_packed->ring_dma_addr, 18632713ea3cSJason Wang dma_dev); 18646356f8bbSXuan Zhuo 18656356f8bbSXuan Zhuo if (vring_packed->vring.driver) 18666356f8bbSXuan Zhuo vring_free_queue(vdev, vring_packed->event_size_in_bytes, 18676356f8bbSXuan Zhuo vring_packed->vring.driver, 18682713ea3cSJason Wang vring_packed->driver_event_dma_addr, 18692713ea3cSJason Wang dma_dev); 18706356f8bbSXuan Zhuo 18716356f8bbSXuan Zhuo if (vring_packed->vring.device) 18726356f8bbSXuan Zhuo vring_free_queue(vdev, vring_packed->event_size_in_bytes, 18736356f8bbSXuan Zhuo vring_packed->vring.device, 18742713ea3cSJason Wang vring_packed->device_event_dma_addr, 18752713ea3cSJason Wang dma_dev); 18766356f8bbSXuan Zhuo 18776356f8bbSXuan Zhuo kfree(vring_packed->desc_state); 18786356f8bbSXuan Zhuo kfree(vring_packed->desc_extra); 18796356f8bbSXuan Zhuo } 18806356f8bbSXuan Zhuo 18816b60b9c0SXuan Zhuo static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed, 18826b60b9c0SXuan Zhuo struct virtio_device *vdev, 18832713ea3cSJason Wang u32 num, struct device *dma_dev) 18846b60b9c0SXuan Zhuo { 18856b60b9c0SXuan Zhuo struct vring_packed_desc *ring; 18866b60b9c0SXuan Zhuo struct vring_packed_desc_event *driver, *device; 18876b60b9c0SXuan Zhuo dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr; 18886b60b9c0SXuan Zhuo size_t ring_size_in_bytes, event_size_in_bytes; 18896b60b9c0SXuan Zhuo 18906b60b9c0SXuan Zhuo ring_size_in_bytes = num * sizeof(struct vring_packed_desc); 18916b60b9c0SXuan Zhuo 18926b60b9c0SXuan Zhuo ring = vring_alloc_queue(vdev, ring_size_in_bytes, 18936b60b9c0SXuan Zhuo &ring_dma_addr, 18942713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 18952713ea3cSJason Wang dma_dev); 18966b60b9c0SXuan Zhuo if (!ring) 18976b60b9c0SXuan Zhuo goto err; 18986b60b9c0SXuan Zhuo 18996b60b9c0SXuan Zhuo vring_packed->vring.desc = ring; 19006b60b9c0SXuan Zhuo vring_packed->ring_dma_addr = ring_dma_addr; 19016b60b9c0SXuan Zhuo vring_packed->ring_size_in_bytes = ring_size_in_bytes; 19026b60b9c0SXuan Zhuo 19036b60b9c0SXuan Zhuo event_size_in_bytes = sizeof(struct vring_packed_desc_event); 19046b60b9c0SXuan Zhuo 19056b60b9c0SXuan Zhuo driver = vring_alloc_queue(vdev, event_size_in_bytes, 19066b60b9c0SXuan Zhuo &driver_event_dma_addr, 19072713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 19082713ea3cSJason Wang dma_dev); 19096b60b9c0SXuan Zhuo if (!driver) 19106b60b9c0SXuan Zhuo goto err; 19116b60b9c0SXuan Zhuo 19126b60b9c0SXuan Zhuo vring_packed->vring.driver = driver; 19136b60b9c0SXuan Zhuo vring_packed->event_size_in_bytes = event_size_in_bytes; 19146b60b9c0SXuan Zhuo vring_packed->driver_event_dma_addr = driver_event_dma_addr; 19156b60b9c0SXuan Zhuo 19166b60b9c0SXuan Zhuo device = vring_alloc_queue(vdev, event_size_in_bytes, 19176b60b9c0SXuan Zhuo &device_event_dma_addr, 19182713ea3cSJason Wang GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 19192713ea3cSJason Wang dma_dev); 19206b60b9c0SXuan Zhuo if (!device) 19216b60b9c0SXuan Zhuo goto err; 19226b60b9c0SXuan Zhuo 19236b60b9c0SXuan Zhuo vring_packed->vring.device = device; 19246b60b9c0SXuan Zhuo vring_packed->device_event_dma_addr = device_event_dma_addr; 19256b60b9c0SXuan Zhuo 19266b60b9c0SXuan Zhuo vring_packed->vring.num = num; 19276b60b9c0SXuan Zhuo 19286b60b9c0SXuan Zhuo return 0; 19296b60b9c0SXuan Zhuo 19306b60b9c0SXuan Zhuo err: 19312713ea3cSJason Wang vring_free_packed(vring_packed, vdev, dma_dev); 19326b60b9c0SXuan Zhuo return -ENOMEM; 19336b60b9c0SXuan Zhuo } 19346b60b9c0SXuan Zhuo 1935ef3167cfSXuan Zhuo static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed) 1936ef3167cfSXuan Zhuo { 1937ef3167cfSXuan Zhuo struct vring_desc_state_packed *state; 1938ef3167cfSXuan Zhuo struct vring_desc_extra *extra; 1939ef3167cfSXuan Zhuo u32 num = vring_packed->vring.num; 1940ef3167cfSXuan Zhuo 1941ef3167cfSXuan Zhuo state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL); 1942ef3167cfSXuan Zhuo if (!state) 1943ef3167cfSXuan Zhuo goto err_desc_state; 1944ef3167cfSXuan Zhuo 1945ef3167cfSXuan Zhuo memset(state, 0, num * sizeof(struct vring_desc_state_packed)); 1946ef3167cfSXuan Zhuo 1947ef3167cfSXuan Zhuo extra = vring_alloc_desc_extra(num); 1948ef3167cfSXuan Zhuo if (!extra) 1949ef3167cfSXuan Zhuo goto err_desc_extra; 1950ef3167cfSXuan Zhuo 1951ef3167cfSXuan Zhuo vring_packed->desc_state = state; 1952ef3167cfSXuan Zhuo vring_packed->desc_extra = extra; 1953ef3167cfSXuan Zhuo 1954ef3167cfSXuan Zhuo return 0; 1955ef3167cfSXuan Zhuo 1956ef3167cfSXuan Zhuo err_desc_extra: 1957ef3167cfSXuan Zhuo kfree(state); 1958ef3167cfSXuan Zhuo err_desc_state: 1959ef3167cfSXuan Zhuo return -ENOMEM; 1960ef3167cfSXuan Zhuo } 1961ef3167cfSXuan Zhuo 19621a107c87SXuan Zhuo static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed, 19631a107c87SXuan Zhuo bool callback) 19641a107c87SXuan Zhuo { 19651a107c87SXuan Zhuo vring_packed->next_avail_idx = 0; 19661a107c87SXuan Zhuo vring_packed->avail_wrap_counter = 1; 19671a107c87SXuan Zhuo vring_packed->event_flags_shadow = 0; 19681a107c87SXuan Zhuo vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; 19691a107c87SXuan Zhuo 19701a107c87SXuan Zhuo /* No callback? Tell other side not to bother us. */ 19711a107c87SXuan Zhuo if (!callback) { 19721a107c87SXuan Zhuo vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; 19731a107c87SXuan Zhuo vring_packed->vring.driver->flags = 19741a107c87SXuan Zhuo cpu_to_le16(vring_packed->event_flags_shadow); 19751a107c87SXuan Zhuo } 19761a107c87SXuan Zhuo } 19771a107c87SXuan Zhuo 197851d649f1SXuan Zhuo static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq, 197951d649f1SXuan Zhuo struct vring_virtqueue_packed *vring_packed) 198051d649f1SXuan Zhuo { 198151d649f1SXuan Zhuo vq->packed = *vring_packed; 198251d649f1SXuan Zhuo 198351d649f1SXuan Zhuo /* Put everything in free lists. */ 198451d649f1SXuan Zhuo vq->free_head = 0; 198551d649f1SXuan Zhuo } 198651d649f1SXuan Zhuo 198756775e14SXuan Zhuo static void virtqueue_reinit_packed(struct vring_virtqueue *vq) 198856775e14SXuan Zhuo { 198956775e14SXuan Zhuo memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); 199056775e14SXuan Zhuo memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); 199156775e14SXuan Zhuo 199256775e14SXuan Zhuo /* we need to reset the desc.flags. For more, see is_used_desc_packed() */ 199356775e14SXuan Zhuo memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); 199456775e14SXuan Zhuo 199556775e14SXuan Zhuo virtqueue_init(vq, vq->packed.vring.num); 199656775e14SXuan Zhuo virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); 199756775e14SXuan Zhuo } 199856775e14SXuan Zhuo 19991ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed( 20001ce9e605STiwei Bie unsigned int index, 20011ce9e605STiwei Bie unsigned int num, 20021ce9e605STiwei Bie unsigned int vring_align, 20031ce9e605STiwei Bie struct virtio_device *vdev, 20041ce9e605STiwei Bie bool weak_barriers, 20051ce9e605STiwei Bie bool may_reduce_num, 20061ce9e605STiwei Bie bool context, 20071ce9e605STiwei Bie bool (*notify)(struct virtqueue *), 20081ce9e605STiwei Bie void (*callback)(struct virtqueue *), 20092713ea3cSJason Wang const char *name, 20102713ea3cSJason Wang struct device *dma_dev) 20111ce9e605STiwei Bie { 20126b60b9c0SXuan Zhuo struct vring_virtqueue_packed vring_packed = {}; 20131ce9e605STiwei Bie struct vring_virtqueue *vq; 2014ef3167cfSXuan Zhuo int err; 20151ce9e605STiwei Bie 20162713ea3cSJason Wang if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) 20171ce9e605STiwei Bie goto err_ring; 20181ce9e605STiwei Bie 20191ce9e605STiwei Bie vq = kmalloc(sizeof(*vq), GFP_KERNEL); 20201ce9e605STiwei Bie if (!vq) 20211ce9e605STiwei Bie goto err_vq; 20221ce9e605STiwei Bie 20231ce9e605STiwei Bie vq->vq.callback = callback; 20241ce9e605STiwei Bie vq->vq.vdev = vdev; 20251ce9e605STiwei Bie vq->vq.name = name; 20261ce9e605STiwei Bie vq->vq.index = index; 20274913e854SXuan Zhuo vq->vq.reset = false; 20281ce9e605STiwei Bie vq->we_own_ring = true; 20291ce9e605STiwei Bie vq->notify = notify; 20301ce9e605STiwei Bie vq->weak_barriers = weak_barriers; 2031c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 20328b4ec69dSJason Wang vq->broken = true; 2033c346dae4SJason Wang #else 2034c346dae4SJason Wang vq->broken = false; 2035c346dae4SJason Wang #endif 20361ce9e605STiwei Bie vq->packed_ring = true; 20372713ea3cSJason Wang vq->dma_dev = dma_dev; 20381ce9e605STiwei Bie vq->use_dma_api = vring_use_dma_api(vdev); 20391ce9e605STiwei Bie 20401ce9e605STiwei Bie vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 20411ce9e605STiwei Bie !context; 20421ce9e605STiwei Bie vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 20431ce9e605STiwei Bie 204445383fb0STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) 204545383fb0STiwei Bie vq->weak_barriers = false; 204645383fb0STiwei Bie 2047ef3167cfSXuan Zhuo err = vring_alloc_state_extra_packed(&vring_packed); 2048ef3167cfSXuan Zhuo if (err) 2049ef3167cfSXuan Zhuo goto err_state_extra; 20501ce9e605STiwei Bie 20511a107c87SXuan Zhuo virtqueue_vring_init_packed(&vring_packed, !!callback); 20521ce9e605STiwei Bie 20533a897128SXuan Zhuo virtqueue_init(vq, num); 205451d649f1SXuan Zhuo virtqueue_vring_attach_packed(vq, &vring_packed); 20553a897128SXuan Zhuo 20560e566c8fSParav Pandit spin_lock(&vdev->vqs_list_lock); 2057e152d8afSDan Carpenter list_add_tail(&vq->vq.list, &vdev->vqs); 20580e566c8fSParav Pandit spin_unlock(&vdev->vqs_list_lock); 20591ce9e605STiwei Bie return &vq->vq; 20601ce9e605STiwei Bie 2061ef3167cfSXuan Zhuo err_state_extra: 20621ce9e605STiwei Bie kfree(vq); 20631ce9e605STiwei Bie err_vq: 20642713ea3cSJason Wang vring_free_packed(&vring_packed, vdev, dma_dev); 20651ce9e605STiwei Bie err_ring: 20661ce9e605STiwei Bie return NULL; 20671ce9e605STiwei Bie } 20681ce9e605STiwei Bie 2069947f9fcfSXuan Zhuo static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num) 2070947f9fcfSXuan Zhuo { 2071947f9fcfSXuan Zhuo struct vring_virtqueue_packed vring_packed = {}; 2072947f9fcfSXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 2073947f9fcfSXuan Zhuo struct virtio_device *vdev = _vq->vdev; 2074947f9fcfSXuan Zhuo int err; 2075947f9fcfSXuan Zhuo 20762713ea3cSJason Wang if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq))) 2077947f9fcfSXuan Zhuo goto err_ring; 2078947f9fcfSXuan Zhuo 2079947f9fcfSXuan Zhuo err = vring_alloc_state_extra_packed(&vring_packed); 2080947f9fcfSXuan Zhuo if (err) 2081947f9fcfSXuan Zhuo goto err_state_extra; 2082947f9fcfSXuan Zhuo 2083947f9fcfSXuan Zhuo vring_free(&vq->vq); 2084947f9fcfSXuan Zhuo 2085947f9fcfSXuan Zhuo virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); 2086947f9fcfSXuan Zhuo 2087947f9fcfSXuan Zhuo virtqueue_init(vq, vring_packed.vring.num); 2088947f9fcfSXuan Zhuo virtqueue_vring_attach_packed(vq, &vring_packed); 2089947f9fcfSXuan Zhuo 2090947f9fcfSXuan Zhuo return 0; 2091947f9fcfSXuan Zhuo 2092947f9fcfSXuan Zhuo err_state_extra: 20932713ea3cSJason Wang vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq)); 2094947f9fcfSXuan Zhuo err_ring: 2095947f9fcfSXuan Zhuo virtqueue_reinit_packed(vq); 2096947f9fcfSXuan Zhuo return -ENOMEM; 2097947f9fcfSXuan Zhuo } 2098947f9fcfSXuan Zhuo 20991ce9e605STiwei Bie 21001ce9e605STiwei Bie /* 2101e6f633e5STiwei Bie * Generic functions and exported symbols. 2102e6f633e5STiwei Bie */ 2103e6f633e5STiwei Bie 2104e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq, 2105e6f633e5STiwei Bie struct scatterlist *sgs[], 2106e6f633e5STiwei Bie unsigned int total_sg, 2107e6f633e5STiwei Bie unsigned int out_sgs, 2108e6f633e5STiwei Bie unsigned int in_sgs, 2109e6f633e5STiwei Bie void *data, 2110e6f633e5STiwei Bie void *ctx, 2111e6f633e5STiwei Bie gfp_t gfp) 2112e6f633e5STiwei Bie { 21131ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 21141ce9e605STiwei Bie 21151ce9e605STiwei Bie return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, 21161ce9e605STiwei Bie out_sgs, in_sgs, data, ctx, gfp) : 21171ce9e605STiwei Bie virtqueue_add_split(_vq, sgs, total_sg, 2118e6f633e5STiwei Bie out_sgs, in_sgs, data, ctx, gfp); 2119e6f633e5STiwei Bie } 2120e6f633e5STiwei Bie 2121e6f633e5STiwei Bie /** 2122e6f633e5STiwei Bie * virtqueue_add_sgs - expose buffers to other end 2123a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2124e6f633e5STiwei Bie * @sgs: array of terminated scatterlists. 2125a5581206SJiang Biao * @out_sgs: the number of scatterlists readable by other side 2126a5581206SJiang Biao * @in_sgs: the number of scatterlists which are writable (after readable ones) 2127e6f633e5STiwei Bie * @data: the token identifying the buffer. 2128e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2129e6f633e5STiwei Bie * 2130e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2131e6f633e5STiwei Bie * at the same time (except where noted). 2132e6f633e5STiwei Bie * 2133e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2134e6f633e5STiwei Bie */ 2135e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq, 2136e6f633e5STiwei Bie struct scatterlist *sgs[], 2137e6f633e5STiwei Bie unsigned int out_sgs, 2138e6f633e5STiwei Bie unsigned int in_sgs, 2139e6f633e5STiwei Bie void *data, 2140e6f633e5STiwei Bie gfp_t gfp) 2141e6f633e5STiwei Bie { 2142e6f633e5STiwei Bie unsigned int i, total_sg = 0; 2143e6f633e5STiwei Bie 2144e6f633e5STiwei Bie /* Count them first. */ 2145e6f633e5STiwei Bie for (i = 0; i < out_sgs + in_sgs; i++) { 2146e6f633e5STiwei Bie struct scatterlist *sg; 2147e6f633e5STiwei Bie 2148e6f633e5STiwei Bie for (sg = sgs[i]; sg; sg = sg_next(sg)) 2149e6f633e5STiwei Bie total_sg++; 2150e6f633e5STiwei Bie } 2151e6f633e5STiwei Bie return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, 2152e6f633e5STiwei Bie data, NULL, gfp); 2153e6f633e5STiwei Bie } 2154e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 2155e6f633e5STiwei Bie 2156e6f633e5STiwei Bie /** 2157e6f633e5STiwei Bie * virtqueue_add_outbuf - expose output buffers to other end 2158e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 2159e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 2160e6f633e5STiwei Bie * @num: the number of entries in @sg readable by other side 2161e6f633e5STiwei Bie * @data: the token identifying the buffer. 2162e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2163e6f633e5STiwei Bie * 2164e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2165e6f633e5STiwei Bie * at the same time (except where noted). 2166e6f633e5STiwei Bie * 2167e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2168e6f633e5STiwei Bie */ 2169e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq, 2170e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 2171e6f633e5STiwei Bie void *data, 2172e6f633e5STiwei Bie gfp_t gfp) 2173e6f633e5STiwei Bie { 2174e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); 2175e6f633e5STiwei Bie } 2176e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 2177e6f633e5STiwei Bie 2178e6f633e5STiwei Bie /** 2179e6f633e5STiwei Bie * virtqueue_add_inbuf - expose input buffers to other end 2180e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 2181e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 2182e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 2183e6f633e5STiwei Bie * @data: the token identifying the buffer. 2184e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2185e6f633e5STiwei Bie * 2186e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2187e6f633e5STiwei Bie * at the same time (except where noted). 2188e6f633e5STiwei Bie * 2189e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2190e6f633e5STiwei Bie */ 2191e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq, 2192e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 2193e6f633e5STiwei Bie void *data, 2194e6f633e5STiwei Bie gfp_t gfp) 2195e6f633e5STiwei Bie { 2196e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); 2197e6f633e5STiwei Bie } 2198e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 2199e6f633e5STiwei Bie 2200e6f633e5STiwei Bie /** 2201e6f633e5STiwei Bie * virtqueue_add_inbuf_ctx - expose input buffers to other end 2202e6f633e5STiwei Bie * @vq: the struct virtqueue we're talking about. 2203e6f633e5STiwei Bie * @sg: scatterlist (must be well-formed and terminated!) 2204e6f633e5STiwei Bie * @num: the number of entries in @sg writable by other side 2205e6f633e5STiwei Bie * @data: the token identifying the buffer. 2206e6f633e5STiwei Bie * @ctx: extra context for the token 2207e6f633e5STiwei Bie * @gfp: how to do memory allocations (if necessary). 2208e6f633e5STiwei Bie * 2209e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue operations 2210e6f633e5STiwei Bie * at the same time (except where noted). 2211e6f633e5STiwei Bie * 2212e6f633e5STiwei Bie * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 2213e6f633e5STiwei Bie */ 2214e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq, 2215e6f633e5STiwei Bie struct scatterlist *sg, unsigned int num, 2216e6f633e5STiwei Bie void *data, 2217e6f633e5STiwei Bie void *ctx, 2218e6f633e5STiwei Bie gfp_t gfp) 2219e6f633e5STiwei Bie { 2220e6f633e5STiwei Bie return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); 2221e6f633e5STiwei Bie } 2222e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); 2223e6f633e5STiwei Bie 2224e6f633e5STiwei Bie /** 2225e6f633e5STiwei Bie * virtqueue_kick_prepare - first half of split virtqueue_kick call. 2226a5581206SJiang Biao * @_vq: the struct virtqueue 2227e6f633e5STiwei Bie * 2228e6f633e5STiwei Bie * Instead of virtqueue_kick(), you can do: 2229e6f633e5STiwei Bie * if (virtqueue_kick_prepare(vq)) 2230e6f633e5STiwei Bie * virtqueue_notify(vq); 2231e6f633e5STiwei Bie * 2232e6f633e5STiwei Bie * This is sometimes useful because the virtqueue_kick_prepare() needs 2233e6f633e5STiwei Bie * to be serialized, but the actual virtqueue_notify() call does not. 2234e6f633e5STiwei Bie */ 2235e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq) 2236e6f633e5STiwei Bie { 22371ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 22381ce9e605STiwei Bie 22391ce9e605STiwei Bie return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : 22401ce9e605STiwei Bie virtqueue_kick_prepare_split(_vq); 2241e6f633e5STiwei Bie } 2242e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 2243e6f633e5STiwei Bie 2244e6f633e5STiwei Bie /** 2245e6f633e5STiwei Bie * virtqueue_notify - second half of split virtqueue_kick call. 2246a5581206SJiang Biao * @_vq: the struct virtqueue 2247e6f633e5STiwei Bie * 2248e6f633e5STiwei Bie * This does not need to be serialized. 2249e6f633e5STiwei Bie * 2250e6f633e5STiwei Bie * Returns false if host notify failed or queue is broken, otherwise true. 2251e6f633e5STiwei Bie */ 2252e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq) 2253e6f633e5STiwei Bie { 2254e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 2255e6f633e5STiwei Bie 2256e6f633e5STiwei Bie if (unlikely(vq->broken)) 2257e6f633e5STiwei Bie return false; 2258e6f633e5STiwei Bie 2259e6f633e5STiwei Bie /* Prod other side to tell it about changes. */ 2260e6f633e5STiwei Bie if (!vq->notify(_vq)) { 2261e6f633e5STiwei Bie vq->broken = true; 2262e6f633e5STiwei Bie return false; 2263e6f633e5STiwei Bie } 2264e6f633e5STiwei Bie return true; 2265e6f633e5STiwei Bie } 2266e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify); 2267e6f633e5STiwei Bie 2268e6f633e5STiwei Bie /** 2269e6f633e5STiwei Bie * virtqueue_kick - update after add_buf 2270e6f633e5STiwei Bie * @vq: the struct virtqueue 2271e6f633e5STiwei Bie * 2272e6f633e5STiwei Bie * After one or more virtqueue_add_* calls, invoke this to kick 2273e6f633e5STiwei Bie * the other side. 2274e6f633e5STiwei Bie * 2275e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2276e6f633e5STiwei Bie * operations at the same time (except where noted). 2277e6f633e5STiwei Bie * 2278e6f633e5STiwei Bie * Returns false if kick failed, otherwise true. 2279e6f633e5STiwei Bie */ 2280e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq) 2281e6f633e5STiwei Bie { 2282e6f633e5STiwei Bie if (virtqueue_kick_prepare(vq)) 2283e6f633e5STiwei Bie return virtqueue_notify(vq); 2284e6f633e5STiwei Bie return true; 2285e6f633e5STiwei Bie } 2286e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick); 2287e6f633e5STiwei Bie 2288e6f633e5STiwei Bie /** 228931c11db6SYang Li * virtqueue_get_buf_ctx - get the next used buffer 2290a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2291e6f633e5STiwei Bie * @len: the length written into the buffer 2292a5581206SJiang Biao * @ctx: extra context for the token 2293e6f633e5STiwei Bie * 2294e6f633e5STiwei Bie * If the device wrote data into the buffer, @len will be set to the 2295e6f633e5STiwei Bie * amount written. This means you don't need to clear the buffer 2296e6f633e5STiwei Bie * beforehand to ensure there's no data leakage in the case of short 2297e6f633e5STiwei Bie * writes. 2298e6f633e5STiwei Bie * 2299e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2300e6f633e5STiwei Bie * operations at the same time (except where noted). 2301e6f633e5STiwei Bie * 2302e6f633e5STiwei Bie * Returns NULL if there are no used buffers, or the "data" token 2303e6f633e5STiwei Bie * handed to virtqueue_add_*(). 2304e6f633e5STiwei Bie */ 2305e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, 2306e6f633e5STiwei Bie void **ctx) 2307e6f633e5STiwei Bie { 23081ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 23091ce9e605STiwei Bie 23101ce9e605STiwei Bie return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : 23111ce9e605STiwei Bie virtqueue_get_buf_ctx_split(_vq, len, ctx); 2312e6f633e5STiwei Bie } 2313e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); 2314e6f633e5STiwei Bie 2315e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 2316e6f633e5STiwei Bie { 2317e6f633e5STiwei Bie return virtqueue_get_buf_ctx(_vq, len, NULL); 2318e6f633e5STiwei Bie } 2319e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf); 2320e6f633e5STiwei Bie /** 2321e6f633e5STiwei Bie * virtqueue_disable_cb - disable callbacks 2322a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2323e6f633e5STiwei Bie * 2324e6f633e5STiwei Bie * Note that this is not necessarily synchronous, hence unreliable and only 2325e6f633e5STiwei Bie * useful as an optimization. 2326e6f633e5STiwei Bie * 2327e6f633e5STiwei Bie * Unlike other operations, this need not be serialized. 2328e6f633e5STiwei Bie */ 2329e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq) 2330e6f633e5STiwei Bie { 23311ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 23321ce9e605STiwei Bie 23338d622d21SMichael S. Tsirkin /* If device triggered an event already it won't trigger one again: 23348d622d21SMichael S. Tsirkin * no need to disable. 23358d622d21SMichael S. Tsirkin */ 23368d622d21SMichael S. Tsirkin if (vq->event_triggered) 23378d622d21SMichael S. Tsirkin return; 23388d622d21SMichael S. Tsirkin 23391ce9e605STiwei Bie if (vq->packed_ring) 23401ce9e605STiwei Bie virtqueue_disable_cb_packed(_vq); 23411ce9e605STiwei Bie else 2342e6f633e5STiwei Bie virtqueue_disable_cb_split(_vq); 2343e6f633e5STiwei Bie } 2344e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 2345e6f633e5STiwei Bie 2346e6f633e5STiwei Bie /** 2347e6f633e5STiwei Bie * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 2348a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2349e6f633e5STiwei Bie * 2350e6f633e5STiwei Bie * This re-enables callbacks; it returns current queue state 2351e6f633e5STiwei Bie * in an opaque unsigned value. This value should be later tested by 2352e6f633e5STiwei Bie * virtqueue_poll, to detect a possible race between the driver checking for 2353e6f633e5STiwei Bie * more work, and enabling callbacks. 2354e6f633e5STiwei Bie * 2355e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2356e6f633e5STiwei Bie * operations at the same time (except where noted). 2357e6f633e5STiwei Bie */ 235831532340SSolomon Tan unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq) 2359e6f633e5STiwei Bie { 23601ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 23611ce9e605STiwei Bie 23628d622d21SMichael S. Tsirkin if (vq->event_triggered) 23638d622d21SMichael S. Tsirkin vq->event_triggered = false; 23648d622d21SMichael S. Tsirkin 23651ce9e605STiwei Bie return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : 23661ce9e605STiwei Bie virtqueue_enable_cb_prepare_split(_vq); 2367e6f633e5STiwei Bie } 2368e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 2369e6f633e5STiwei Bie 2370e6f633e5STiwei Bie /** 2371e6f633e5STiwei Bie * virtqueue_poll - query pending used buffers 2372a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2373e6f633e5STiwei Bie * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 2374e6f633e5STiwei Bie * 2375e6f633e5STiwei Bie * Returns "true" if there are pending used buffers in the queue. 2376e6f633e5STiwei Bie * 2377e6f633e5STiwei Bie * This does not need to be serialized. 2378e6f633e5STiwei Bie */ 237931532340SSolomon Tan bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx) 2380e6f633e5STiwei Bie { 2381e6f633e5STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 2382e6f633e5STiwei Bie 2383481a0d74SMao Wenan if (unlikely(vq->broken)) 2384481a0d74SMao Wenan return false; 2385481a0d74SMao Wenan 2386e6f633e5STiwei Bie virtio_mb(vq->weak_barriers); 23871ce9e605STiwei Bie return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : 23881ce9e605STiwei Bie virtqueue_poll_split(_vq, last_used_idx); 2389e6f633e5STiwei Bie } 2390e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll); 2391e6f633e5STiwei Bie 2392e6f633e5STiwei Bie /** 2393e6f633e5STiwei Bie * virtqueue_enable_cb - restart callbacks after disable_cb. 2394a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2395e6f633e5STiwei Bie * 2396e6f633e5STiwei Bie * This re-enables callbacks; it returns "false" if there are pending 2397e6f633e5STiwei Bie * buffers in the queue, to detect a possible race between the driver 2398e6f633e5STiwei Bie * checking for more work, and enabling callbacks. 2399e6f633e5STiwei Bie * 2400e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2401e6f633e5STiwei Bie * operations at the same time (except where noted). 2402e6f633e5STiwei Bie */ 2403e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq) 2404e6f633e5STiwei Bie { 240531532340SSolomon Tan unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq); 2406e6f633e5STiwei Bie 2407e6f633e5STiwei Bie return !virtqueue_poll(_vq, last_used_idx); 2408e6f633e5STiwei Bie } 2409e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 2410e6f633e5STiwei Bie 2411e6f633e5STiwei Bie /** 2412e6f633e5STiwei Bie * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 2413a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2414e6f633e5STiwei Bie * 2415e6f633e5STiwei Bie * This re-enables callbacks but hints to the other side to delay 2416e6f633e5STiwei Bie * interrupts until most of the available buffers have been processed; 2417e6f633e5STiwei Bie * it returns "false" if there are many pending buffers in the queue, 2418e6f633e5STiwei Bie * to detect a possible race between the driver checking for more work, 2419e6f633e5STiwei Bie * and enabling callbacks. 2420e6f633e5STiwei Bie * 2421e6f633e5STiwei Bie * Caller must ensure we don't call this with other virtqueue 2422e6f633e5STiwei Bie * operations at the same time (except where noted). 2423e6f633e5STiwei Bie */ 2424e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 2425e6f633e5STiwei Bie { 24261ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 24271ce9e605STiwei Bie 24288d622d21SMichael S. Tsirkin if (vq->event_triggered) 24298d622d21SMichael S. Tsirkin vq->event_triggered = false; 24308d622d21SMichael S. Tsirkin 24311ce9e605STiwei Bie return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : 24321ce9e605STiwei Bie virtqueue_enable_cb_delayed_split(_vq); 2433e6f633e5STiwei Bie } 2434e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 2435e6f633e5STiwei Bie 2436138fd251STiwei Bie /** 2437138fd251STiwei Bie * virtqueue_detach_unused_buf - detach first unused buffer 2438a5581206SJiang Biao * @_vq: the struct virtqueue we're talking about. 2439138fd251STiwei Bie * 2440138fd251STiwei Bie * Returns NULL or the "data" token handed to virtqueue_add_*(). 2441a62eecb3SXuan Zhuo * This is not valid on an active queue; it is useful for device 2442a62eecb3SXuan Zhuo * shutdown or the reset queue. 2443138fd251STiwei Bie */ 2444138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 2445138fd251STiwei Bie { 24461ce9e605STiwei Bie struct vring_virtqueue *vq = to_vvq(_vq); 24471ce9e605STiwei Bie 24481ce9e605STiwei Bie return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : 24491ce9e605STiwei Bie virtqueue_detach_unused_buf_split(_vq); 2450138fd251STiwei Bie } 24517c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 2452c021eac4SShirley Ma 2453138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq) 2454138fd251STiwei Bie { 24551ce9e605STiwei Bie return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); 2456138fd251STiwei Bie } 2457138fd251STiwei Bie 24585c669c4aSRicardo Cañuelo /** 24595c669c4aSRicardo Cañuelo * vring_interrupt - notify a virtqueue on an interrupt 24605c669c4aSRicardo Cañuelo * @irq: the IRQ number (ignored) 24615c669c4aSRicardo Cañuelo * @_vq: the struct virtqueue to notify 24625c669c4aSRicardo Cañuelo * 24635c669c4aSRicardo Cañuelo * Calls the callback function of @_vq to process the virtqueue 24645c669c4aSRicardo Cañuelo * notification. 24655c669c4aSRicardo Cañuelo */ 24660a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq) 24670a8a69ddSRusty Russell { 24680a8a69ddSRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 24690a8a69ddSRusty Russell 24700a8a69ddSRusty Russell if (!more_used(vq)) { 24710a8a69ddSRusty Russell pr_debug("virtqueue interrupt with no work for %p\n", vq); 24720a8a69ddSRusty Russell return IRQ_NONE; 24730a8a69ddSRusty Russell } 24740a8a69ddSRusty Russell 24758b4ec69dSJason Wang if (unlikely(vq->broken)) { 2476c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 24778b4ec69dSJason Wang dev_warn_once(&vq->vq.vdev->dev, 24788b4ec69dSJason Wang "virtio vring IRQ raised before DRIVER_OK"); 24798b4ec69dSJason Wang return IRQ_NONE; 2480c346dae4SJason Wang #else 2481c346dae4SJason Wang return IRQ_HANDLED; 2482c346dae4SJason Wang #endif 24838b4ec69dSJason Wang } 24840a8a69ddSRusty Russell 24858d622d21SMichael S. Tsirkin /* Just a hint for performance: so it's ok that this can be racy! */ 24868d622d21SMichael S. Tsirkin if (vq->event) 24878d622d21SMichael S. Tsirkin vq->event_triggered = true; 24888d622d21SMichael S. Tsirkin 24890a8a69ddSRusty Russell pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 249018445c4dSRusty Russell if (vq->vq.callback) 249118445c4dSRusty Russell vq->vq.callback(&vq->vq); 24920a8a69ddSRusty Russell 24930a8a69ddSRusty Russell return IRQ_HANDLED; 24940a8a69ddSRusty Russell } 2495c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt); 24960a8a69ddSRusty Russell 24971ce9e605STiwei Bie /* Only available for split ring */ 249807d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index, 2499cd4c812aSXuan Zhuo struct vring_virtqueue_split *vring_split, 25000a8a69ddSRusty Russell struct virtio_device *vdev, 25017b21e34fSRusty Russell bool weak_barriers, 2502f94682ddSMichael S. Tsirkin bool context, 250346f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *), 25049499f5e7SRusty Russell void (*callback)(struct virtqueue *), 25052713ea3cSJason Wang const char *name, 25062713ea3cSJason Wang struct device *dma_dev) 25070a8a69ddSRusty Russell { 25082a2d1382SAndy Lutomirski struct vring_virtqueue *vq; 2509a2b36c8dSXuan Zhuo int err; 25100a8a69ddSRusty Russell 25111ce9e605STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 25121ce9e605STiwei Bie return NULL; 25131ce9e605STiwei Bie 2514cbeedb72STiwei Bie vq = kmalloc(sizeof(*vq), GFP_KERNEL); 25150a8a69ddSRusty Russell if (!vq) 25160a8a69ddSRusty Russell return NULL; 25170a8a69ddSRusty Russell 25181ce9e605STiwei Bie vq->packed_ring = false; 25190a8a69ddSRusty Russell vq->vq.callback = callback; 25200a8a69ddSRusty Russell vq->vq.vdev = vdev; 25219499f5e7SRusty Russell vq->vq.name = name; 252206ca287dSRusty Russell vq->vq.index = index; 25234913e854SXuan Zhuo vq->vq.reset = false; 25242a2d1382SAndy Lutomirski vq->we_own_ring = false; 25250a8a69ddSRusty Russell vq->notify = notify; 25267b21e34fSRusty Russell vq->weak_barriers = weak_barriers; 2527c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 25288b4ec69dSJason Wang vq->broken = true; 2529c346dae4SJason Wang #else 2530c346dae4SJason Wang vq->broken = false; 2531c346dae4SJason Wang #endif 25322713ea3cSJason Wang vq->dma_dev = dma_dev; 2533fb3fba6bSTiwei Bie vq->use_dma_api = vring_use_dma_api(vdev); 25340a8a69ddSRusty Russell 25355a08b04fSMichael S. Tsirkin vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 25365a08b04fSMichael S. Tsirkin !context; 2537a5c262c5SMichael S. Tsirkin vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 25389fa29b9dSMark McLoughlin 253945383fb0STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) 254045383fb0STiwei Bie vq->weak_barriers = false; 254145383fb0STiwei Bie 2542a2b36c8dSXuan Zhuo err = vring_alloc_state_extra_split(vring_split); 2543a2b36c8dSXuan Zhuo if (err) { 2544a2b36c8dSXuan Zhuo kfree(vq); 2545a2b36c8dSXuan Zhuo return NULL; 2546a2b36c8dSXuan Zhuo } 254772b5e895SJason Wang 2548198fa7beSXuan Zhuo virtqueue_vring_init_split(vring_split, vq); 2549198fa7beSXuan Zhuo 2550cd4c812aSXuan Zhuo virtqueue_init(vq, vring_split->vring.num); 2551e1d6a423SXuan Zhuo virtqueue_vring_attach_split(vq, vring_split); 25523a897128SXuan Zhuo 25530e566c8fSParav Pandit spin_lock(&vdev->vqs_list_lock); 2554e152d8afSDan Carpenter list_add_tail(&vq->vq.list, &vdev->vqs); 25550e566c8fSParav Pandit spin_unlock(&vdev->vqs_list_lock); 25560a8a69ddSRusty Russell return &vq->vq; 25570a8a69ddSRusty Russell } 25582a2d1382SAndy Lutomirski 25592a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue( 25602a2d1382SAndy Lutomirski unsigned int index, 25612a2d1382SAndy Lutomirski unsigned int num, 25622a2d1382SAndy Lutomirski unsigned int vring_align, 25632a2d1382SAndy Lutomirski struct virtio_device *vdev, 25642a2d1382SAndy Lutomirski bool weak_barriers, 25652a2d1382SAndy Lutomirski bool may_reduce_num, 2566f94682ddSMichael S. Tsirkin bool context, 25672a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *), 25682a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *), 25692a2d1382SAndy Lutomirski const char *name) 25702a2d1382SAndy Lutomirski { 25711ce9e605STiwei Bie 25721ce9e605STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 25731ce9e605STiwei Bie return vring_create_virtqueue_packed(index, num, vring_align, 25741ce9e605STiwei Bie vdev, weak_barriers, may_reduce_num, 25752713ea3cSJason Wang context, notify, callback, name, vdev->dev.parent); 25761ce9e605STiwei Bie 2577d79dca75STiwei Bie return vring_create_virtqueue_split(index, num, vring_align, 2578d79dca75STiwei Bie vdev, weak_barriers, may_reduce_num, 25792713ea3cSJason Wang context, notify, callback, name, vdev->dev.parent); 25802a2d1382SAndy Lutomirski } 25812a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue); 25822a2d1382SAndy Lutomirski 25832713ea3cSJason Wang struct virtqueue *vring_create_virtqueue_dma( 25842713ea3cSJason Wang unsigned int index, 25852713ea3cSJason Wang unsigned int num, 25862713ea3cSJason Wang unsigned int vring_align, 25872713ea3cSJason Wang struct virtio_device *vdev, 25882713ea3cSJason Wang bool weak_barriers, 25892713ea3cSJason Wang bool may_reduce_num, 25902713ea3cSJason Wang bool context, 25912713ea3cSJason Wang bool (*notify)(struct virtqueue *), 25922713ea3cSJason Wang void (*callback)(struct virtqueue *), 25932713ea3cSJason Wang const char *name, 25942713ea3cSJason Wang struct device *dma_dev) 25952713ea3cSJason Wang { 25962713ea3cSJason Wang 25972713ea3cSJason Wang if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 25982713ea3cSJason Wang return vring_create_virtqueue_packed(index, num, vring_align, 25992713ea3cSJason Wang vdev, weak_barriers, may_reduce_num, 26002713ea3cSJason Wang context, notify, callback, name, dma_dev); 26012713ea3cSJason Wang 26022713ea3cSJason Wang return vring_create_virtqueue_split(index, num, vring_align, 26032713ea3cSJason Wang vdev, weak_barriers, may_reduce_num, 26042713ea3cSJason Wang context, notify, callback, name, dma_dev); 26052713ea3cSJason Wang } 26062713ea3cSJason Wang EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); 26072713ea3cSJason Wang 2608c790e8e1SXuan Zhuo /** 2609c790e8e1SXuan Zhuo * virtqueue_resize - resize the vring of vq 2610c790e8e1SXuan Zhuo * @_vq: the struct virtqueue we're talking about. 2611c790e8e1SXuan Zhuo * @num: new ring num 2612c790e8e1SXuan Zhuo * @recycle: callback for recycle the useless buffer 2613c790e8e1SXuan Zhuo * 2614c790e8e1SXuan Zhuo * When it is really necessary to create a new vring, it will set the current vq 2615c790e8e1SXuan Zhuo * into the reset state. Then call the passed callback to recycle the buffer 2616c790e8e1SXuan Zhuo * that is no longer used. Only after the new vring is successfully created, the 2617c790e8e1SXuan Zhuo * old vring will be released. 2618c790e8e1SXuan Zhuo * 2619c790e8e1SXuan Zhuo * Caller must ensure we don't call this with other virtqueue operations 2620c790e8e1SXuan Zhuo * at the same time (except where noted). 2621c790e8e1SXuan Zhuo * 2622c790e8e1SXuan Zhuo * Returns zero or a negative error. 2623c790e8e1SXuan Zhuo * 0: success. 2624c790e8e1SXuan Zhuo * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size. 2625c790e8e1SXuan Zhuo * vq can still work normally 2626c790e8e1SXuan Zhuo * -EBUSY: Failed to sync with device, vq may not work properly 2627c790e8e1SXuan Zhuo * -ENOENT: Transport or device not supported 2628c790e8e1SXuan Zhuo * -E2BIG/-EINVAL: num error 2629c790e8e1SXuan Zhuo * -EPERM: Operation not permitted 2630c790e8e1SXuan Zhuo * 2631c790e8e1SXuan Zhuo */ 2632c790e8e1SXuan Zhuo int virtqueue_resize(struct virtqueue *_vq, u32 num, 2633c790e8e1SXuan Zhuo void (*recycle)(struct virtqueue *vq, void *buf)) 2634c790e8e1SXuan Zhuo { 2635c790e8e1SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 2636c790e8e1SXuan Zhuo struct virtio_device *vdev = vq->vq.vdev; 2637c790e8e1SXuan Zhuo void *buf; 2638c790e8e1SXuan Zhuo int err; 2639c790e8e1SXuan Zhuo 2640c790e8e1SXuan Zhuo if (!vq->we_own_ring) 2641c790e8e1SXuan Zhuo return -EPERM; 2642c790e8e1SXuan Zhuo 2643c790e8e1SXuan Zhuo if (num > vq->vq.num_max) 2644c790e8e1SXuan Zhuo return -E2BIG; 2645c790e8e1SXuan Zhuo 2646c790e8e1SXuan Zhuo if (!num) 2647c790e8e1SXuan Zhuo return -EINVAL; 2648c790e8e1SXuan Zhuo 2649c790e8e1SXuan Zhuo if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) 2650c790e8e1SXuan Zhuo return 0; 2651c790e8e1SXuan Zhuo 2652c790e8e1SXuan Zhuo if (!vdev->config->disable_vq_and_reset) 2653c790e8e1SXuan Zhuo return -ENOENT; 2654c790e8e1SXuan Zhuo 2655c790e8e1SXuan Zhuo if (!vdev->config->enable_vq_after_reset) 2656c790e8e1SXuan Zhuo return -ENOENT; 2657c790e8e1SXuan Zhuo 2658c790e8e1SXuan Zhuo err = vdev->config->disable_vq_and_reset(_vq); 2659c790e8e1SXuan Zhuo if (err) 2660c790e8e1SXuan Zhuo return err; 2661c790e8e1SXuan Zhuo 2662c790e8e1SXuan Zhuo while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL) 2663c790e8e1SXuan Zhuo recycle(_vq, buf); 2664c790e8e1SXuan Zhuo 2665c790e8e1SXuan Zhuo if (vq->packed_ring) 2666c790e8e1SXuan Zhuo err = virtqueue_resize_packed(_vq, num); 2667c790e8e1SXuan Zhuo else 2668c790e8e1SXuan Zhuo err = virtqueue_resize_split(_vq, num); 2669c790e8e1SXuan Zhuo 2670c790e8e1SXuan Zhuo if (vdev->config->enable_vq_after_reset(_vq)) 2671c790e8e1SXuan Zhuo return -EBUSY; 2672c790e8e1SXuan Zhuo 2673c790e8e1SXuan Zhuo return err; 2674c790e8e1SXuan Zhuo } 2675c790e8e1SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_resize); 2676c790e8e1SXuan Zhuo 26771ce9e605STiwei Bie /* Only available for split ring */ 26782a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index, 26792a2d1382SAndy Lutomirski unsigned int num, 26802a2d1382SAndy Lutomirski unsigned int vring_align, 26812a2d1382SAndy Lutomirski struct virtio_device *vdev, 26822a2d1382SAndy Lutomirski bool weak_barriers, 2683f94682ddSMichael S. Tsirkin bool context, 26842a2d1382SAndy Lutomirski void *pages, 26852a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *vq), 26862a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *vq), 26872a2d1382SAndy Lutomirski const char *name) 26882a2d1382SAndy Lutomirski { 2689cd4c812aSXuan Zhuo struct vring_virtqueue_split vring_split = {}; 26901ce9e605STiwei Bie 26911ce9e605STiwei Bie if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) 26921ce9e605STiwei Bie return NULL; 26931ce9e605STiwei Bie 2694cd4c812aSXuan Zhuo vring_init(&vring_split.vring, num, pages, vring_align); 2695cd4c812aSXuan Zhuo return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, 26962713ea3cSJason Wang context, notify, callback, name, 26972713ea3cSJason Wang vdev->dev.parent); 26982a2d1382SAndy Lutomirski } 2699c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue); 27000a8a69ddSRusty Russell 27013ea19e32SXuan Zhuo static void vring_free(struct virtqueue *_vq) 27020a8a69ddSRusty Russell { 27032a2d1382SAndy Lutomirski struct vring_virtqueue *vq = to_vvq(_vq); 27042a2d1382SAndy Lutomirski 27052a2d1382SAndy Lutomirski if (vq->we_own_ring) { 27061ce9e605STiwei Bie if (vq->packed_ring) { 27071ce9e605STiwei Bie vring_free_queue(vq->vq.vdev, 27081ce9e605STiwei Bie vq->packed.ring_size_in_bytes, 27091ce9e605STiwei Bie vq->packed.vring.desc, 27102713ea3cSJason Wang vq->packed.ring_dma_addr, 27112713ea3cSJason Wang vring_dma_dev(vq)); 27121ce9e605STiwei Bie 27131ce9e605STiwei Bie vring_free_queue(vq->vq.vdev, 27141ce9e605STiwei Bie vq->packed.event_size_in_bytes, 27151ce9e605STiwei Bie vq->packed.vring.driver, 27162713ea3cSJason Wang vq->packed.driver_event_dma_addr, 27172713ea3cSJason Wang vring_dma_dev(vq)); 27181ce9e605STiwei Bie 27191ce9e605STiwei Bie vring_free_queue(vq->vq.vdev, 27201ce9e605STiwei Bie vq->packed.event_size_in_bytes, 27211ce9e605STiwei Bie vq->packed.vring.device, 27222713ea3cSJason Wang vq->packed.device_event_dma_addr, 27232713ea3cSJason Wang vring_dma_dev(vq)); 27241ce9e605STiwei Bie 27251ce9e605STiwei Bie kfree(vq->packed.desc_state); 27261ce9e605STiwei Bie kfree(vq->packed.desc_extra); 27271ce9e605STiwei Bie } else { 2728d79dca75STiwei Bie vring_free_queue(vq->vq.vdev, 2729d79dca75STiwei Bie vq->split.queue_size_in_bytes, 2730d79dca75STiwei Bie vq->split.vring.desc, 27312713ea3cSJason Wang vq->split.queue_dma_addr, 27322713ea3cSJason Wang vring_dma_dev(vq)); 2733f13f09a1SSuman Anna } 2734f13f09a1SSuman Anna } 273572b5e895SJason Wang if (!vq->packed_ring) { 2736cbeedb72STiwei Bie kfree(vq->split.desc_state); 273772b5e895SJason Wang kfree(vq->split.desc_extra); 273872b5e895SJason Wang } 27393ea19e32SXuan Zhuo } 27403ea19e32SXuan Zhuo 27413ea19e32SXuan Zhuo void vring_del_virtqueue(struct virtqueue *_vq) 27423ea19e32SXuan Zhuo { 27433ea19e32SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 27443ea19e32SXuan Zhuo 27453ea19e32SXuan Zhuo spin_lock(&vq->vq.vdev->vqs_list_lock); 27463ea19e32SXuan Zhuo list_del(&_vq->list); 27473ea19e32SXuan Zhuo spin_unlock(&vq->vq.vdev->vqs_list_lock); 27483ea19e32SXuan Zhuo 27493ea19e32SXuan Zhuo vring_free(_vq); 27503ea19e32SXuan Zhuo 27512a2d1382SAndy Lutomirski kfree(vq); 27520a8a69ddSRusty Russell } 2753c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue); 27540a8a69ddSRusty Russell 2755e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */ 2756e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev) 2757e34f8725SRusty Russell { 2758e34f8725SRusty Russell unsigned int i; 2759e34f8725SRusty Russell 2760e34f8725SRusty Russell for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 2761e34f8725SRusty Russell switch (i) { 27629fa29b9dSMark McLoughlin case VIRTIO_RING_F_INDIRECT_DESC: 27639fa29b9dSMark McLoughlin break; 2764a5c262c5SMichael S. Tsirkin case VIRTIO_RING_F_EVENT_IDX: 2765a5c262c5SMichael S. Tsirkin break; 2766747ae34aSMichael S. Tsirkin case VIRTIO_F_VERSION_1: 2767747ae34aSMichael S. Tsirkin break; 2768321bd212SMichael S. Tsirkin case VIRTIO_F_ACCESS_PLATFORM: 27691a937693SMichael S. Tsirkin break; 2770f959a128STiwei Bie case VIRTIO_F_RING_PACKED: 2771f959a128STiwei Bie break; 277245383fb0STiwei Bie case VIRTIO_F_ORDER_PLATFORM: 277345383fb0STiwei Bie break; 2774e34f8725SRusty Russell default: 2775e34f8725SRusty Russell /* We don't understand this bit. */ 2776e16e12beSMichael S. Tsirkin __virtio_clear_bit(vdev, i); 2777e34f8725SRusty Russell } 2778e34f8725SRusty Russell } 2779e34f8725SRusty Russell } 2780e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features); 2781e34f8725SRusty Russell 27825dfc1762SRusty Russell /** 27835dfc1762SRusty Russell * virtqueue_get_vring_size - return the size of the virtqueue's vring 2784a5581206SJiang Biao * @_vq: the struct virtqueue containing the vring of interest. 27855dfc1762SRusty Russell * 27865dfc1762SRusty Russell * Returns the size of the vring. This is mainly used for boasting to 27875dfc1762SRusty Russell * userspace. Unlike other operations, this need not be serialized. 27885dfc1762SRusty Russell */ 2789*4b6ec919SFeng Liu unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq) 27908f9f4668SRick Jones { 27918f9f4668SRick Jones 2792*4b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 27938f9f4668SRick Jones 27941ce9e605STiwei Bie return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; 27958f9f4668SRick Jones } 27968f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 27978f9f4668SRick Jones 279832510631SXuan Zhuo /* 279932510631SXuan Zhuo * This function should only be called by the core, not directly by the driver. 280032510631SXuan Zhuo */ 280132510631SXuan Zhuo void __virtqueue_break(struct virtqueue *_vq) 280232510631SXuan Zhuo { 280332510631SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 280432510631SXuan Zhuo 280532510631SXuan Zhuo /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 280632510631SXuan Zhuo WRITE_ONCE(vq->broken, true); 280732510631SXuan Zhuo } 280832510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_break); 280932510631SXuan Zhuo 281032510631SXuan Zhuo /* 281132510631SXuan Zhuo * This function should only be called by the core, not directly by the driver. 281232510631SXuan Zhuo */ 281332510631SXuan Zhuo void __virtqueue_unbreak(struct virtqueue *_vq) 281432510631SXuan Zhuo { 281532510631SXuan Zhuo struct vring_virtqueue *vq = to_vvq(_vq); 281632510631SXuan Zhuo 281732510631SXuan Zhuo /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 281832510631SXuan Zhuo WRITE_ONCE(vq->broken, false); 281932510631SXuan Zhuo } 282032510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_unbreak); 282132510631SXuan Zhuo 2822*4b6ec919SFeng Liu bool virtqueue_is_broken(const struct virtqueue *_vq) 2823b3b32c94SHeinz Graalfs { 2824*4b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 2825b3b32c94SHeinz Graalfs 282660f07798SParav Pandit return READ_ONCE(vq->broken); 2827b3b32c94SHeinz Graalfs } 2828b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken); 2829b3b32c94SHeinz Graalfs 2830e2dcdfe9SRusty Russell /* 2831e2dcdfe9SRusty Russell * This should prevent the device from being used, allowing drivers to 2832e2dcdfe9SRusty Russell * recover. You may need to grab appropriate locks to flush. 2833e2dcdfe9SRusty Russell */ 2834e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev) 2835e2dcdfe9SRusty Russell { 2836e2dcdfe9SRusty Russell struct virtqueue *_vq; 2837e2dcdfe9SRusty Russell 28380e566c8fSParav Pandit spin_lock(&dev->vqs_list_lock); 2839e2dcdfe9SRusty Russell list_for_each_entry(_vq, &dev->vqs, list) { 2840e2dcdfe9SRusty Russell struct vring_virtqueue *vq = to_vvq(_vq); 284160f07798SParav Pandit 284260f07798SParav Pandit /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 284360f07798SParav Pandit WRITE_ONCE(vq->broken, true); 2844e2dcdfe9SRusty Russell } 28450e566c8fSParav Pandit spin_unlock(&dev->vqs_list_lock); 2846e2dcdfe9SRusty Russell } 2847e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device); 2848e2dcdfe9SRusty Russell 2849be83f04dSJason Wang /* 2850be83f04dSJason Wang * This should allow the device to be used by the driver. You may 2851be83f04dSJason Wang * need to grab appropriate locks to flush the write to 2852be83f04dSJason Wang * vq->broken. This should only be used in some specific case e.g 2853be83f04dSJason Wang * (probing and restoring). This function should only be called by the 2854be83f04dSJason Wang * core, not directly by the driver. 2855be83f04dSJason Wang */ 2856be83f04dSJason Wang void __virtio_unbreak_device(struct virtio_device *dev) 2857be83f04dSJason Wang { 2858be83f04dSJason Wang struct virtqueue *_vq; 2859be83f04dSJason Wang 2860be83f04dSJason Wang spin_lock(&dev->vqs_list_lock); 2861be83f04dSJason Wang list_for_each_entry(_vq, &dev->vqs, list) { 2862be83f04dSJason Wang struct vring_virtqueue *vq = to_vvq(_vq); 2863be83f04dSJason Wang 2864be83f04dSJason Wang /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ 2865be83f04dSJason Wang WRITE_ONCE(vq->broken, false); 2866be83f04dSJason Wang } 2867be83f04dSJason Wang spin_unlock(&dev->vqs_list_lock); 2868be83f04dSJason Wang } 2869be83f04dSJason Wang EXPORT_SYMBOL_GPL(__virtio_unbreak_device); 2870be83f04dSJason Wang 2871*4b6ec919SFeng Liu dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq) 287289062652SCornelia Huck { 2873*4b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 287489062652SCornelia Huck 28752a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 287689062652SCornelia Huck 28771ce9e605STiwei Bie if (vq->packed_ring) 28781ce9e605STiwei Bie return vq->packed.ring_dma_addr; 28791ce9e605STiwei Bie 2880d79dca75STiwei Bie return vq->split.queue_dma_addr; 28812a2d1382SAndy Lutomirski } 28822a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 28832a2d1382SAndy Lutomirski 2884*4b6ec919SFeng Liu dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq) 288589062652SCornelia Huck { 2886*4b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 288789062652SCornelia Huck 28882a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 28892a2d1382SAndy Lutomirski 28901ce9e605STiwei Bie if (vq->packed_ring) 28911ce9e605STiwei Bie return vq->packed.driver_event_dma_addr; 28921ce9e605STiwei Bie 2893d79dca75STiwei Bie return vq->split.queue_dma_addr + 2894e593bf97STiwei Bie ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); 289589062652SCornelia Huck } 28962a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 28972a2d1382SAndy Lutomirski 2898*4b6ec919SFeng Liu dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq) 28992a2d1382SAndy Lutomirski { 2900*4b6ec919SFeng Liu const struct vring_virtqueue *vq = to_vvq(_vq); 29012a2d1382SAndy Lutomirski 29022a2d1382SAndy Lutomirski BUG_ON(!vq->we_own_ring); 29032a2d1382SAndy Lutomirski 29041ce9e605STiwei Bie if (vq->packed_ring) 29051ce9e605STiwei Bie return vq->packed.device_event_dma_addr; 29061ce9e605STiwei Bie 2907d79dca75STiwei Bie return vq->split.queue_dma_addr + 2908e593bf97STiwei Bie ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); 29092a2d1382SAndy Lutomirski } 29102a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 29112a2d1382SAndy Lutomirski 29121ce9e605STiwei Bie /* Only available for split ring */ 2913*4b6ec919SFeng Liu const struct vring *virtqueue_get_vring(const struct virtqueue *vq) 29142a2d1382SAndy Lutomirski { 2915e593bf97STiwei Bie return &to_vvq(vq)->split.vring; 29162a2d1382SAndy Lutomirski } 29172a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring); 291889062652SCornelia Huck 2919c6fd4701SRusty Russell MODULE_LICENSE("GPL"); 2920