11fcf0512SMichael S. Tsirkin /* 21fcf0512SMichael S. Tsirkin * Virtio PCI driver - modern (virtio 1.0) device support 31fcf0512SMichael S. Tsirkin * 41fcf0512SMichael S. Tsirkin * This module allows virtio devices to be used over a virtual PCI device. 51fcf0512SMichael S. Tsirkin * This can be used with QEMU based VMMs like KVM or Xen. 61fcf0512SMichael S. Tsirkin * 71fcf0512SMichael S. Tsirkin * Copyright IBM Corp. 2007 81fcf0512SMichael S. Tsirkin * Copyright Red Hat, Inc. 2014 91fcf0512SMichael S. Tsirkin * 101fcf0512SMichael S. Tsirkin * Authors: 111fcf0512SMichael S. Tsirkin * Anthony Liguori <aliguori@us.ibm.com> 121fcf0512SMichael S. Tsirkin * Rusty Russell <rusty@rustcorp.com.au> 131fcf0512SMichael S. Tsirkin * Michael S. Tsirkin <mst@redhat.com> 141fcf0512SMichael S. Tsirkin * 151fcf0512SMichael S. Tsirkin * This work is licensed under the terms of the GNU GPL, version 2 or later. 161fcf0512SMichael S. Tsirkin * See the COPYING file in the top-level directory. 171fcf0512SMichael S. Tsirkin * 181fcf0512SMichael S. Tsirkin */ 191fcf0512SMichael S. Tsirkin 201fcf0512SMichael S. Tsirkin #define VIRTIO_PCI_NO_LEGACY 211fcf0512SMichael S. Tsirkin #include "virtio_pci_common.h" 221fcf0512SMichael S. Tsirkin 23c5d4c2c9SMichael S. Tsirkin /* 24c5d4c2c9SMichael S. Tsirkin * Type-safe wrappers for io accesses. 25c5d4c2c9SMichael S. Tsirkin * Use these to enforce at compile time the following spec requirement: 26c5d4c2c9SMichael S. Tsirkin * 27c5d4c2c9SMichael S. Tsirkin * The driver MUST access each field using the “natural” access 28c5d4c2c9SMichael S. Tsirkin * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 29c5d4c2c9SMichael S. Tsirkin * for 16-bit fields and 8-bit accesses for 8-bit fields. 30c5d4c2c9SMichael S. Tsirkin */ 31c5d4c2c9SMichael S. Tsirkin static inline u8 vp_ioread8(u8 __iomem *addr) 32c5d4c2c9SMichael S. Tsirkin { 33c5d4c2c9SMichael S. Tsirkin return ioread8(addr); 34c5d4c2c9SMichael S. Tsirkin } 35c5d4c2c9SMichael S. Tsirkin static inline u16 vp_ioread16 (u16 __iomem *addr) 36c5d4c2c9SMichael S. Tsirkin { 37c5d4c2c9SMichael S. Tsirkin return ioread16(addr); 38c5d4c2c9SMichael S. Tsirkin } 39c5d4c2c9SMichael S. Tsirkin 40c5d4c2c9SMichael S. Tsirkin static inline u32 vp_ioread32(u32 __iomem *addr) 41c5d4c2c9SMichael S. Tsirkin { 42c5d4c2c9SMichael S. Tsirkin return ioread32(addr); 43c5d4c2c9SMichael S. Tsirkin } 44c5d4c2c9SMichael S. Tsirkin 45c5d4c2c9SMichael S. Tsirkin static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 46c5d4c2c9SMichael S. Tsirkin { 47c5d4c2c9SMichael S. Tsirkin iowrite8(value, addr); 48c5d4c2c9SMichael S. Tsirkin } 49c5d4c2c9SMichael S. Tsirkin 50c5d4c2c9SMichael S. Tsirkin static inline void vp_iowrite16(u16 value, u16 __iomem *addr) 51c5d4c2c9SMichael S. Tsirkin { 52c5d4c2c9SMichael S. Tsirkin iowrite16(value, addr); 53c5d4c2c9SMichael S. Tsirkin } 54c5d4c2c9SMichael S. Tsirkin 55c5d4c2c9SMichael S. Tsirkin static inline void vp_iowrite32(u32 value, u32 __iomem *addr) 56c5d4c2c9SMichael S. Tsirkin { 57c5d4c2c9SMichael S. Tsirkin iowrite32(value, addr); 58c5d4c2c9SMichael S. Tsirkin } 59c5d4c2c9SMichael S. Tsirkin 60a8557d32SMichael S. Tsirkin static void vp_iowrite64_twopart(u64 val, 61a8557d32SMichael S. Tsirkin __le32 __iomem *lo, __le32 __iomem *hi) 62a8557d32SMichael S. Tsirkin { 63a8557d32SMichael S. Tsirkin vp_iowrite32((u32)val, lo); 64a8557d32SMichael S. Tsirkin vp_iowrite32(val >> 32, hi); 65a8557d32SMichael S. Tsirkin } 66a8557d32SMichael S. Tsirkin 671fcf0512SMichael S. Tsirkin static void __iomem *map_capability(struct pci_dev *dev, int off, 681fcf0512SMichael S. Tsirkin size_t minlen, 691fcf0512SMichael S. Tsirkin u32 align, 701fcf0512SMichael S. Tsirkin u32 start, u32 size, 711fcf0512SMichael S. Tsirkin size_t *len) 721fcf0512SMichael S. Tsirkin { 731fcf0512SMichael S. Tsirkin u8 bar; 741fcf0512SMichael S. Tsirkin u32 offset, length; 751fcf0512SMichael S. Tsirkin void __iomem *p; 761fcf0512SMichael S. Tsirkin 771fcf0512SMichael S. Tsirkin pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 781fcf0512SMichael S. Tsirkin bar), 791fcf0512SMichael S. Tsirkin &bar); 801fcf0512SMichael S. Tsirkin pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 811fcf0512SMichael S. Tsirkin &offset); 821fcf0512SMichael S. Tsirkin pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 831fcf0512SMichael S. Tsirkin &length); 841fcf0512SMichael S. Tsirkin 851fcf0512SMichael S. Tsirkin if (length <= start) { 861fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 871fcf0512SMichael S. Tsirkin "virtio_pci: bad capability len %u (>%u expected)\n", 881fcf0512SMichael S. Tsirkin length, start); 891fcf0512SMichael S. Tsirkin return NULL; 901fcf0512SMichael S. Tsirkin } 911fcf0512SMichael S. Tsirkin 921fcf0512SMichael S. Tsirkin if (length - start < minlen) { 931fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 941fcf0512SMichael S. Tsirkin "virtio_pci: bad capability len %u (>=%zu expected)\n", 951fcf0512SMichael S. Tsirkin length, minlen); 961fcf0512SMichael S. Tsirkin return NULL; 971fcf0512SMichael S. Tsirkin } 981fcf0512SMichael S. Tsirkin 991fcf0512SMichael S. Tsirkin length -= start; 1001fcf0512SMichael S. Tsirkin 1011fcf0512SMichael S. Tsirkin if (start + offset < offset) { 1021fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1031fcf0512SMichael S. Tsirkin "virtio_pci: map wrap-around %u+%u\n", 1041fcf0512SMichael S. Tsirkin start, offset); 1051fcf0512SMichael S. Tsirkin return NULL; 1061fcf0512SMichael S. Tsirkin } 1071fcf0512SMichael S. Tsirkin 1081fcf0512SMichael S. Tsirkin offset += start; 1091fcf0512SMichael S. Tsirkin 1101fcf0512SMichael S. Tsirkin if (offset & (align - 1)) { 1111fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1121fcf0512SMichael S. Tsirkin "virtio_pci: offset %u not aligned to %u\n", 1131fcf0512SMichael S. Tsirkin offset, align); 1141fcf0512SMichael S. Tsirkin return NULL; 1151fcf0512SMichael S. Tsirkin } 1161fcf0512SMichael S. Tsirkin 1171fcf0512SMichael S. Tsirkin if (length > size) 1181fcf0512SMichael S. Tsirkin length = size; 1191fcf0512SMichael S. Tsirkin 1201fcf0512SMichael S. Tsirkin if (len) 1211fcf0512SMichael S. Tsirkin *len = length; 1221fcf0512SMichael S. Tsirkin 1231fcf0512SMichael S. Tsirkin if (minlen + offset < minlen || 1241fcf0512SMichael S. Tsirkin minlen + offset > pci_resource_len(dev, bar)) { 1251fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1261fcf0512SMichael S. Tsirkin "virtio_pci: map virtio %zu@%u " 1271fcf0512SMichael S. Tsirkin "out of range on bar %i length %lu\n", 1281fcf0512SMichael S. Tsirkin minlen, offset, 1291fcf0512SMichael S. Tsirkin bar, (unsigned long)pci_resource_len(dev, bar)); 1301fcf0512SMichael S. Tsirkin return NULL; 1311fcf0512SMichael S. Tsirkin } 1321fcf0512SMichael S. Tsirkin 1331fcf0512SMichael S. Tsirkin p = pci_iomap_range(dev, bar, offset, length); 1341fcf0512SMichael S. Tsirkin if (!p) 1351fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1361fcf0512SMichael S. Tsirkin "virtio_pci: unable to map virtio %u@%u on bar %i\n", 1371fcf0512SMichael S. Tsirkin length, offset, bar); 1381fcf0512SMichael S. Tsirkin return p; 1391fcf0512SMichael S. Tsirkin } 1401fcf0512SMichael S. Tsirkin 1411fcf0512SMichael S. Tsirkin /* virtio config->get_features() implementation */ 1421fcf0512SMichael S. Tsirkin static u64 vp_get_features(struct virtio_device *vdev) 1431fcf0512SMichael S. Tsirkin { 1441fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 1451fcf0512SMichael S. Tsirkin u64 features; 1461fcf0512SMichael S. Tsirkin 147a8557d32SMichael S. Tsirkin vp_iowrite32(0, &vp_dev->common->device_feature_select); 148a8557d32SMichael S. Tsirkin features = vp_ioread32(&vp_dev->common->device_feature); 149a8557d32SMichael S. Tsirkin vp_iowrite32(1, &vp_dev->common->device_feature_select); 150a8557d32SMichael S. Tsirkin features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); 1511fcf0512SMichael S. Tsirkin 1521fcf0512SMichael S. Tsirkin return features; 1531fcf0512SMichael S. Tsirkin } 1541fcf0512SMichael S. Tsirkin 1551fcf0512SMichael S. Tsirkin /* virtio config->finalize_features() implementation */ 1561fcf0512SMichael S. Tsirkin static int vp_finalize_features(struct virtio_device *vdev) 1571fcf0512SMichael S. Tsirkin { 1581fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 1591fcf0512SMichael S. Tsirkin 1601fcf0512SMichael S. Tsirkin /* Give virtio_ring a chance to accept features. */ 1611fcf0512SMichael S. Tsirkin vring_transport_features(vdev); 1621fcf0512SMichael S. Tsirkin 1631fcf0512SMichael S. Tsirkin if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 1641fcf0512SMichael S. Tsirkin dev_err(&vdev->dev, "virtio: device uses modern interface " 1651fcf0512SMichael S. Tsirkin "but does not have VIRTIO_F_VERSION_1\n"); 1661fcf0512SMichael S. Tsirkin return -EINVAL; 1671fcf0512SMichael S. Tsirkin } 1681fcf0512SMichael S. Tsirkin 169a8557d32SMichael S. Tsirkin vp_iowrite32(0, &vp_dev->common->guest_feature_select); 170a8557d32SMichael S. Tsirkin vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); 171a8557d32SMichael S. Tsirkin vp_iowrite32(1, &vp_dev->common->guest_feature_select); 172a8557d32SMichael S. Tsirkin vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); 1731fcf0512SMichael S. Tsirkin 1741fcf0512SMichael S. Tsirkin return 0; 1751fcf0512SMichael S. Tsirkin } 1761fcf0512SMichael S. Tsirkin 1771fcf0512SMichael S. Tsirkin /* virtio config->get() implementation */ 1781fcf0512SMichael S. Tsirkin static void vp_get(struct virtio_device *vdev, unsigned offset, 1791fcf0512SMichael S. Tsirkin void *buf, unsigned len) 1801fcf0512SMichael S. Tsirkin { 1811fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 1821fcf0512SMichael S. Tsirkin u8 b; 1831fcf0512SMichael S. Tsirkin __le16 w; 1841fcf0512SMichael S. Tsirkin __le32 l; 1851fcf0512SMichael S. Tsirkin 1861fcf0512SMichael S. Tsirkin BUG_ON(offset + len > vp_dev->device_len); 1871fcf0512SMichael S. Tsirkin 1881fcf0512SMichael S. Tsirkin switch (len) { 1891fcf0512SMichael S. Tsirkin case 1: 1901fcf0512SMichael S. Tsirkin b = ioread8(vp_dev->device + offset); 1911fcf0512SMichael S. Tsirkin memcpy(buf, &b, sizeof b); 1921fcf0512SMichael S. Tsirkin break; 1931fcf0512SMichael S. Tsirkin case 2: 1941fcf0512SMichael S. Tsirkin w = cpu_to_le16(ioread16(vp_dev->device + offset)); 1951fcf0512SMichael S. Tsirkin memcpy(buf, &w, sizeof w); 1961fcf0512SMichael S. Tsirkin break; 1971fcf0512SMichael S. Tsirkin case 4: 1981fcf0512SMichael S. Tsirkin l = cpu_to_le32(ioread32(vp_dev->device + offset)); 1991fcf0512SMichael S. Tsirkin memcpy(buf, &l, sizeof l); 2001fcf0512SMichael S. Tsirkin break; 2011fcf0512SMichael S. Tsirkin case 8: 2021fcf0512SMichael S. Tsirkin l = cpu_to_le32(ioread32(vp_dev->device + offset)); 2031fcf0512SMichael S. Tsirkin memcpy(buf, &l, sizeof l); 2041fcf0512SMichael S. Tsirkin l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); 2051fcf0512SMichael S. Tsirkin memcpy(buf + sizeof l, &l, sizeof l); 2061fcf0512SMichael S. Tsirkin break; 2071fcf0512SMichael S. Tsirkin default: 2081fcf0512SMichael S. Tsirkin BUG(); 2091fcf0512SMichael S. Tsirkin } 2101fcf0512SMichael S. Tsirkin } 2111fcf0512SMichael S. Tsirkin 2121fcf0512SMichael S. Tsirkin /* the config->set() implementation. it's symmetric to the config->get() 2131fcf0512SMichael S. Tsirkin * implementation */ 2141fcf0512SMichael S. Tsirkin static void vp_set(struct virtio_device *vdev, unsigned offset, 2151fcf0512SMichael S. Tsirkin const void *buf, unsigned len) 2161fcf0512SMichael S. Tsirkin { 2171fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 2181fcf0512SMichael S. Tsirkin u8 b; 2191fcf0512SMichael S. Tsirkin __le16 w; 2201fcf0512SMichael S. Tsirkin __le32 l; 2211fcf0512SMichael S. Tsirkin 2221fcf0512SMichael S. Tsirkin BUG_ON(offset + len > vp_dev->device_len); 2231fcf0512SMichael S. Tsirkin 2241fcf0512SMichael S. Tsirkin switch (len) { 2251fcf0512SMichael S. Tsirkin case 1: 2261fcf0512SMichael S. Tsirkin memcpy(&b, buf, sizeof b); 2271fcf0512SMichael S. Tsirkin iowrite8(b, vp_dev->device + offset); 2281fcf0512SMichael S. Tsirkin break; 2291fcf0512SMichael S. Tsirkin case 2: 2301fcf0512SMichael S. Tsirkin memcpy(&w, buf, sizeof w); 2311fcf0512SMichael S. Tsirkin iowrite16(le16_to_cpu(w), vp_dev->device + offset); 2321fcf0512SMichael S. Tsirkin break; 2331fcf0512SMichael S. Tsirkin case 4: 2341fcf0512SMichael S. Tsirkin memcpy(&l, buf, sizeof l); 2351fcf0512SMichael S. Tsirkin iowrite32(le32_to_cpu(l), vp_dev->device + offset); 2361fcf0512SMichael S. Tsirkin break; 2371fcf0512SMichael S. Tsirkin case 8: 2381fcf0512SMichael S. Tsirkin memcpy(&l, buf, sizeof l); 2391fcf0512SMichael S. Tsirkin iowrite32(le32_to_cpu(l), vp_dev->device + offset); 2401fcf0512SMichael S. Tsirkin memcpy(&l, buf + sizeof l, sizeof l); 2411fcf0512SMichael S. Tsirkin iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); 2421fcf0512SMichael S. Tsirkin break; 2431fcf0512SMichael S. Tsirkin default: 2441fcf0512SMichael S. Tsirkin BUG(); 2451fcf0512SMichael S. Tsirkin } 2461fcf0512SMichael S. Tsirkin } 2471fcf0512SMichael S. Tsirkin 2481fcf0512SMichael S. Tsirkin static u32 vp_generation(struct virtio_device *vdev) 2491fcf0512SMichael S. Tsirkin { 2501fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 251a8557d32SMichael S. Tsirkin return vp_ioread8(&vp_dev->common->config_generation); 2521fcf0512SMichael S. Tsirkin } 2531fcf0512SMichael S. Tsirkin 2541fcf0512SMichael S. Tsirkin /* config->{get,set}_status() implementations */ 2551fcf0512SMichael S. Tsirkin static u8 vp_get_status(struct virtio_device *vdev) 2561fcf0512SMichael S. Tsirkin { 2571fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 258a8557d32SMichael S. Tsirkin return vp_ioread8(&vp_dev->common->device_status); 2591fcf0512SMichael S. Tsirkin } 2601fcf0512SMichael S. Tsirkin 2611fcf0512SMichael S. Tsirkin static void vp_set_status(struct virtio_device *vdev, u8 status) 2621fcf0512SMichael S. Tsirkin { 2631fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 2641fcf0512SMichael S. Tsirkin /* We should never be setting status to 0. */ 2651fcf0512SMichael S. Tsirkin BUG_ON(status == 0); 266a8557d32SMichael S. Tsirkin vp_iowrite8(status, &vp_dev->common->device_status); 2671fcf0512SMichael S. Tsirkin } 2681fcf0512SMichael S. Tsirkin 2691fcf0512SMichael S. Tsirkin static void vp_reset(struct virtio_device *vdev) 2701fcf0512SMichael S. Tsirkin { 2711fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 2721fcf0512SMichael S. Tsirkin /* 0 status means a reset. */ 273a8557d32SMichael S. Tsirkin vp_iowrite8(0, &vp_dev->common->device_status); 2741fcf0512SMichael S. Tsirkin /* Flush out the status write, and flush in device writes, 2751fcf0512SMichael S. Tsirkin * including MSI-X interrupts, if any. */ 276a8557d32SMichael S. Tsirkin vp_ioread8(&vp_dev->common->device_status); 2771fcf0512SMichael S. Tsirkin /* Flush pending VQ/configuration callbacks. */ 2781fcf0512SMichael S. Tsirkin vp_synchronize_vectors(vdev); 2791fcf0512SMichael S. Tsirkin } 2801fcf0512SMichael S. Tsirkin 2811fcf0512SMichael S. Tsirkin static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) 2821fcf0512SMichael S. Tsirkin { 2831fcf0512SMichael S. Tsirkin /* Setup the vector used for configuration events */ 284a8557d32SMichael S. Tsirkin vp_iowrite16(vector, &vp_dev->common->msix_config); 2851fcf0512SMichael S. Tsirkin /* Verify we had enough resources to assign the vector */ 2861fcf0512SMichael S. Tsirkin /* Will also flush the write out to device */ 287a8557d32SMichael S. Tsirkin return vp_ioread16(&vp_dev->common->msix_config); 2881fcf0512SMichael S. Tsirkin } 2891fcf0512SMichael S. Tsirkin 2901fcf0512SMichael S. Tsirkin static size_t vring_pci_size(u16 num) 2911fcf0512SMichael S. Tsirkin { 2921fcf0512SMichael S. Tsirkin /* We only need a cacheline separation. */ 2931fcf0512SMichael S. Tsirkin return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES)); 2941fcf0512SMichael S. Tsirkin } 2951fcf0512SMichael S. Tsirkin 2961fcf0512SMichael S. Tsirkin static void *alloc_virtqueue_pages(int *num) 2971fcf0512SMichael S. Tsirkin { 2981fcf0512SMichael S. Tsirkin void *pages; 2991fcf0512SMichael S. Tsirkin 3001fcf0512SMichael S. Tsirkin /* TODO: allocate each queue chunk individually */ 3011fcf0512SMichael S. Tsirkin for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) { 3021fcf0512SMichael S. Tsirkin pages = alloc_pages_exact(vring_pci_size(*num), 3031fcf0512SMichael S. Tsirkin GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); 3041fcf0512SMichael S. Tsirkin if (pages) 3051fcf0512SMichael S. Tsirkin return pages; 3061fcf0512SMichael S. Tsirkin } 3071fcf0512SMichael S. Tsirkin 3081fcf0512SMichael S. Tsirkin if (!*num) 3091fcf0512SMichael S. Tsirkin return NULL; 3101fcf0512SMichael S. Tsirkin 3111fcf0512SMichael S. Tsirkin /* Try to get a single page. You are my only hope! */ 3121fcf0512SMichael S. Tsirkin return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO); 3131fcf0512SMichael S. Tsirkin } 3141fcf0512SMichael S. Tsirkin 3151fcf0512SMichael S. Tsirkin static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 3161fcf0512SMichael S. Tsirkin struct virtio_pci_vq_info *info, 3171fcf0512SMichael S. Tsirkin unsigned index, 3181fcf0512SMichael S. Tsirkin void (*callback)(struct virtqueue *vq), 3191fcf0512SMichael S. Tsirkin const char *name, 3201fcf0512SMichael S. Tsirkin u16 msix_vec) 3211fcf0512SMichael S. Tsirkin { 3221fcf0512SMichael S. Tsirkin struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; 3231fcf0512SMichael S. Tsirkin struct virtqueue *vq; 3241fcf0512SMichael S. Tsirkin u16 num, off; 3251fcf0512SMichael S. Tsirkin int err; 3261fcf0512SMichael S. Tsirkin 327a8557d32SMichael S. Tsirkin if (index >= vp_ioread16(&cfg->num_queues)) 3281fcf0512SMichael S. Tsirkin return ERR_PTR(-ENOENT); 3291fcf0512SMichael S. Tsirkin 3301fcf0512SMichael S. Tsirkin /* Select the queue we're interested in */ 331a8557d32SMichael S. Tsirkin vp_iowrite16(index, &cfg->queue_select); 3321fcf0512SMichael S. Tsirkin 3331fcf0512SMichael S. Tsirkin /* Check if queue is either not available or already active. */ 334a8557d32SMichael S. Tsirkin num = vp_ioread16(&cfg->queue_size); 335a8557d32SMichael S. Tsirkin if (!num || vp_ioread16(&cfg->queue_enable)) 3361fcf0512SMichael S. Tsirkin return ERR_PTR(-ENOENT); 3371fcf0512SMichael S. Tsirkin 3381fcf0512SMichael S. Tsirkin if (num & (num - 1)) { 3391fcf0512SMichael S. Tsirkin dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); 3401fcf0512SMichael S. Tsirkin return ERR_PTR(-EINVAL); 3411fcf0512SMichael S. Tsirkin } 3421fcf0512SMichael S. Tsirkin 3431fcf0512SMichael S. Tsirkin /* get offset of notification word for this vq */ 344a8557d32SMichael S. Tsirkin off = vp_ioread16(&cfg->queue_notify_off); 3451fcf0512SMichael S. Tsirkin 3461fcf0512SMichael S. Tsirkin info->num = num; 3471fcf0512SMichael S. Tsirkin info->msix_vector = msix_vec; 3481fcf0512SMichael S. Tsirkin 3491fcf0512SMichael S. Tsirkin info->queue = alloc_virtqueue_pages(&info->num); 3501fcf0512SMichael S. Tsirkin if (info->queue == NULL) 3511fcf0512SMichael S. Tsirkin return ERR_PTR(-ENOMEM); 3521fcf0512SMichael S. Tsirkin 3531fcf0512SMichael S. Tsirkin /* create the vring */ 3541fcf0512SMichael S. Tsirkin vq = vring_new_virtqueue(index, info->num, 3551fcf0512SMichael S. Tsirkin SMP_CACHE_BYTES, &vp_dev->vdev, 3561fcf0512SMichael S. Tsirkin true, info->queue, vp_notify, callback, name); 3571fcf0512SMichael S. Tsirkin if (!vq) { 3581fcf0512SMichael S. Tsirkin err = -ENOMEM; 3591fcf0512SMichael S. Tsirkin goto err_new_queue; 3601fcf0512SMichael S. Tsirkin } 3611fcf0512SMichael S. Tsirkin 3621fcf0512SMichael S. Tsirkin /* activate the queue */ 363a8557d32SMichael S. Tsirkin vp_iowrite16(num, &cfg->queue_size); 364a8557d32SMichael S. Tsirkin vp_iowrite64_twopart(virt_to_phys(info->queue), 3651fcf0512SMichael S. Tsirkin &cfg->queue_desc_lo, &cfg->queue_desc_hi); 366a8557d32SMichael S. Tsirkin vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), 3671fcf0512SMichael S. Tsirkin &cfg->queue_avail_lo, &cfg->queue_avail_hi); 368a8557d32SMichael S. Tsirkin vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), 3691fcf0512SMichael S. Tsirkin &cfg->queue_used_lo, &cfg->queue_used_hi); 3701fcf0512SMichael S. Tsirkin 3713909213cSMichael S. Tsirkin if (vp_dev->notify_base) { 3723909213cSMichael S. Tsirkin /* offset should not wrap */ 3733909213cSMichael S. Tsirkin if ((u64)off * vp_dev->notify_offset_multiplier + 2 3743909213cSMichael S. Tsirkin > vp_dev->notify_len) { 3753909213cSMichael S. Tsirkin dev_warn(&vp_dev->pci_dev->dev, 3763909213cSMichael S. Tsirkin "bad notification offset %u (x %u) " 3773909213cSMichael S. Tsirkin "for queue %u > %zd", 3783909213cSMichael S. Tsirkin off, vp_dev->notify_offset_multiplier, 3793909213cSMichael S. Tsirkin index, vp_dev->notify_len); 3803909213cSMichael S. Tsirkin err = -EINVAL; 3813909213cSMichael S. Tsirkin goto err_map_notify; 3823909213cSMichael S. Tsirkin } 3833909213cSMichael S. Tsirkin vq->priv = (void __force *)vp_dev->notify_base + 3843909213cSMichael S. Tsirkin off * vp_dev->notify_offset_multiplier; 3853909213cSMichael S. Tsirkin } else { 3861fcf0512SMichael S. Tsirkin vq->priv = (void __force *)map_capability(vp_dev->pci_dev, 3871fcf0512SMichael S. Tsirkin vp_dev->notify_map_cap, 2, 2, 3881fcf0512SMichael S. Tsirkin off * vp_dev->notify_offset_multiplier, 2, 3891fcf0512SMichael S. Tsirkin NULL); 3903909213cSMichael S. Tsirkin } 3911fcf0512SMichael S. Tsirkin 3921fcf0512SMichael S. Tsirkin if (!vq->priv) { 3931fcf0512SMichael S. Tsirkin err = -ENOMEM; 3941fcf0512SMichael S. Tsirkin goto err_map_notify; 3951fcf0512SMichael S. Tsirkin } 3961fcf0512SMichael S. Tsirkin 3971fcf0512SMichael S. Tsirkin if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 398a8557d32SMichael S. Tsirkin vp_iowrite16(msix_vec, &cfg->queue_msix_vector); 399a8557d32SMichael S. Tsirkin msix_vec = vp_ioread16(&cfg->queue_msix_vector); 4001fcf0512SMichael S. Tsirkin if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 4011fcf0512SMichael S. Tsirkin err = -EBUSY; 4021fcf0512SMichael S. Tsirkin goto err_assign_vector; 4031fcf0512SMichael S. Tsirkin } 4041fcf0512SMichael S. Tsirkin } 4051fcf0512SMichael S. Tsirkin 4061fcf0512SMichael S. Tsirkin return vq; 4071fcf0512SMichael S. Tsirkin 4081fcf0512SMichael S. Tsirkin err_assign_vector: 4093909213cSMichael S. Tsirkin if (!vp_dev->notify_base) 4101fcf0512SMichael S. Tsirkin pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); 4111fcf0512SMichael S. Tsirkin err_map_notify: 4121fcf0512SMichael S. Tsirkin vring_del_virtqueue(vq); 4131fcf0512SMichael S. Tsirkin err_new_queue: 4141fcf0512SMichael S. Tsirkin free_pages_exact(info->queue, vring_pci_size(info->num)); 4151fcf0512SMichael S. Tsirkin return ERR_PTR(err); 4161fcf0512SMichael S. Tsirkin } 4171fcf0512SMichael S. Tsirkin 4181fcf0512SMichael S. Tsirkin static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, 4191fcf0512SMichael S. Tsirkin struct virtqueue *vqs[], 4201fcf0512SMichael S. Tsirkin vq_callback_t *callbacks[], 4211fcf0512SMichael S. Tsirkin const char *names[]) 4221fcf0512SMichael S. Tsirkin { 4231fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 4241fcf0512SMichael S. Tsirkin struct virtqueue *vq; 4251fcf0512SMichael S. Tsirkin int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); 4261fcf0512SMichael S. Tsirkin 4271fcf0512SMichael S. Tsirkin if (rc) 4281fcf0512SMichael S. Tsirkin return rc; 4291fcf0512SMichael S. Tsirkin 4301fcf0512SMichael S. Tsirkin /* Select and activate all queues. Has to be done last: once we do 4311fcf0512SMichael S. Tsirkin * this, there's no way to go back except reset. 4321fcf0512SMichael S. Tsirkin */ 4331fcf0512SMichael S. Tsirkin list_for_each_entry(vq, &vdev->vqs, list) { 434a8557d32SMichael S. Tsirkin vp_iowrite16(vq->index, &vp_dev->common->queue_select); 435a8557d32SMichael S. Tsirkin vp_iowrite16(1, &vp_dev->common->queue_enable); 4361fcf0512SMichael S. Tsirkin } 4371fcf0512SMichael S. Tsirkin 4381fcf0512SMichael S. Tsirkin return 0; 4391fcf0512SMichael S. Tsirkin } 4401fcf0512SMichael S. Tsirkin 4411fcf0512SMichael S. Tsirkin static void del_vq(struct virtio_pci_vq_info *info) 4421fcf0512SMichael S. Tsirkin { 4431fcf0512SMichael S. Tsirkin struct virtqueue *vq = info->vq; 4441fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 4451fcf0512SMichael S. Tsirkin 446a8557d32SMichael S. Tsirkin vp_iowrite16(vq->index, &vp_dev->common->queue_select); 4471fcf0512SMichael S. Tsirkin 4481fcf0512SMichael S. Tsirkin if (vp_dev->msix_enabled) { 449a8557d32SMichael S. Tsirkin vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 4501fcf0512SMichael S. Tsirkin &vp_dev->common->queue_msix_vector); 4511fcf0512SMichael S. Tsirkin /* Flush the write out to device */ 452a8557d32SMichael S. Tsirkin vp_ioread16(&vp_dev->common->queue_msix_vector); 4531fcf0512SMichael S. Tsirkin } 4541fcf0512SMichael S. Tsirkin 4553909213cSMichael S. Tsirkin if (!vp_dev->notify_base) 4561fcf0512SMichael S. Tsirkin pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); 4571fcf0512SMichael S. Tsirkin 4581fcf0512SMichael S. Tsirkin vring_del_virtqueue(vq); 4591fcf0512SMichael S. Tsirkin 4601fcf0512SMichael S. Tsirkin free_pages_exact(info->queue, vring_pci_size(info->num)); 4611fcf0512SMichael S. Tsirkin } 4621fcf0512SMichael S. Tsirkin 463d3f5f065SMichael S. Tsirkin static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 464d3f5f065SMichael S. Tsirkin .get = NULL, 465d3f5f065SMichael S. Tsirkin .set = NULL, 466d3f5f065SMichael S. Tsirkin .generation = vp_generation, 467d3f5f065SMichael S. Tsirkin .get_status = vp_get_status, 468d3f5f065SMichael S. Tsirkin .set_status = vp_set_status, 469d3f5f065SMichael S. Tsirkin .reset = vp_reset, 470d3f5f065SMichael S. Tsirkin .find_vqs = vp_modern_find_vqs, 471d3f5f065SMichael S. Tsirkin .del_vqs = vp_del_vqs, 472d3f5f065SMichael S. Tsirkin .get_features = vp_get_features, 473d3f5f065SMichael S. Tsirkin .finalize_features = vp_finalize_features, 474d3f5f065SMichael S. Tsirkin .bus_name = vp_bus_name, 475d3f5f065SMichael S. Tsirkin .set_vq_affinity = vp_set_vq_affinity, 476d3f5f065SMichael S. Tsirkin }; 477d3f5f065SMichael S. Tsirkin 4781fcf0512SMichael S. Tsirkin static const struct virtio_config_ops virtio_pci_config_ops = { 4791fcf0512SMichael S. Tsirkin .get = vp_get, 4801fcf0512SMichael S. Tsirkin .set = vp_set, 4811fcf0512SMichael S. Tsirkin .generation = vp_generation, 4821fcf0512SMichael S. Tsirkin .get_status = vp_get_status, 4831fcf0512SMichael S. Tsirkin .set_status = vp_set_status, 4841fcf0512SMichael S. Tsirkin .reset = vp_reset, 4851fcf0512SMichael S. Tsirkin .find_vqs = vp_modern_find_vqs, 4861fcf0512SMichael S. Tsirkin .del_vqs = vp_del_vqs, 4871fcf0512SMichael S. Tsirkin .get_features = vp_get_features, 4881fcf0512SMichael S. Tsirkin .finalize_features = vp_finalize_features, 4891fcf0512SMichael S. Tsirkin .bus_name = vp_bus_name, 4901fcf0512SMichael S. Tsirkin .set_vq_affinity = vp_set_vq_affinity, 4911fcf0512SMichael S. Tsirkin }; 4921fcf0512SMichael S. Tsirkin 4931fcf0512SMichael S. Tsirkin /** 4941fcf0512SMichael S. Tsirkin * virtio_pci_find_capability - walk capabilities to find device info. 4951fcf0512SMichael S. Tsirkin * @dev: the pci device 4961fcf0512SMichael S. Tsirkin * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 4971fcf0512SMichael S. Tsirkin * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 4981fcf0512SMichael S. Tsirkin * 4991fcf0512SMichael S. Tsirkin * Returns offset of the capability, or 0. 5001fcf0512SMichael S. Tsirkin */ 5011fcf0512SMichael S. Tsirkin static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 5021fcf0512SMichael S. Tsirkin u32 ioresource_types) 5031fcf0512SMichael S. Tsirkin { 5041fcf0512SMichael S. Tsirkin int pos; 5051fcf0512SMichael S. Tsirkin 5061fcf0512SMichael S. Tsirkin for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 5071fcf0512SMichael S. Tsirkin pos > 0; 5081fcf0512SMichael S. Tsirkin pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 5091fcf0512SMichael S. Tsirkin u8 type, bar; 5101fcf0512SMichael S. Tsirkin pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 5111fcf0512SMichael S. Tsirkin cfg_type), 5121fcf0512SMichael S. Tsirkin &type); 5131fcf0512SMichael S. Tsirkin pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 5141fcf0512SMichael S. Tsirkin bar), 5151fcf0512SMichael S. Tsirkin &bar); 5161fcf0512SMichael S. Tsirkin 5171fcf0512SMichael S. Tsirkin /* Ignore structures with reserved BAR values */ 5181fcf0512SMichael S. Tsirkin if (bar > 0x5) 5191fcf0512SMichael S. Tsirkin continue; 5201fcf0512SMichael S. Tsirkin 5211fcf0512SMichael S. Tsirkin if (type == cfg_type) { 5221fcf0512SMichael S. Tsirkin if (pci_resource_len(dev, bar) && 5231fcf0512SMichael S. Tsirkin pci_resource_flags(dev, bar) & ioresource_types) 5241fcf0512SMichael S. Tsirkin return pos; 5251fcf0512SMichael S. Tsirkin } 5261fcf0512SMichael S. Tsirkin } 5271fcf0512SMichael S. Tsirkin return 0; 5281fcf0512SMichael S. Tsirkin } 5291fcf0512SMichael S. Tsirkin 53089461c4aSRusty Russell /* This is part of the ABI. Don't screw with it. */ 5311fcf0512SMichael S. Tsirkin static inline void check_offsets(void) 5321fcf0512SMichael S. Tsirkin { 53389461c4aSRusty Russell /* Note: disk space was harmed in compilation of this function. */ 53489461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 53589461c4aSRusty Russell offsetof(struct virtio_pci_cap, cap_vndr)); 53689461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 53789461c4aSRusty Russell offsetof(struct virtio_pci_cap, cap_next)); 53889461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 53989461c4aSRusty Russell offsetof(struct virtio_pci_cap, cap_len)); 54089461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 54189461c4aSRusty Russell offsetof(struct virtio_pci_cap, cfg_type)); 54289461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 54389461c4aSRusty Russell offsetof(struct virtio_pci_cap, bar)); 54489461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 54589461c4aSRusty Russell offsetof(struct virtio_pci_cap, offset)); 54689461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 54789461c4aSRusty Russell offsetof(struct virtio_pci_cap, length)); 54889461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 54989461c4aSRusty Russell offsetof(struct virtio_pci_notify_cap, 55089461c4aSRusty Russell notify_off_multiplier)); 55189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 55289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, 55389461c4aSRusty Russell device_feature_select)); 55489461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 55589461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, device_feature)); 55689461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 55789461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, 55889461c4aSRusty Russell guest_feature_select)); 55989461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 56089461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, guest_feature)); 56189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 56289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, msix_config)); 56389461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 56489461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, num_queues)); 56589461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 56689461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, device_status)); 56789461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 56889461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, config_generation)); 56989461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 57089461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_select)); 57189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 57289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_size)); 57389461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 57489461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 57589461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 57689461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_enable)); 57789461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 57889461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 57989461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 58089461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 58189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 58289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 58389461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 58489461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 58589461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 58689461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 58789461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 58889461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 58989461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 59089461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 5911fcf0512SMichael S. Tsirkin } 5921fcf0512SMichael S. Tsirkin 5931fcf0512SMichael S. Tsirkin /* the PCI probing function */ 5941fcf0512SMichael S. Tsirkin int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 5951fcf0512SMichael S. Tsirkin { 5961fcf0512SMichael S. Tsirkin struct pci_dev *pci_dev = vp_dev->pci_dev; 5971fcf0512SMichael S. Tsirkin int err, common, isr, notify, device; 5981fcf0512SMichael S. Tsirkin u32 notify_length; 5993909213cSMichael S. Tsirkin u32 notify_offset; 6001fcf0512SMichael S. Tsirkin 6011fcf0512SMichael S. Tsirkin check_offsets(); 6021fcf0512SMichael S. Tsirkin 6031fcf0512SMichael S. Tsirkin /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 6041fcf0512SMichael S. Tsirkin if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 6051fcf0512SMichael S. Tsirkin return -ENODEV; 6061fcf0512SMichael S. Tsirkin 6071fcf0512SMichael S. Tsirkin if (pci_dev->device < 0x1040) { 6081fcf0512SMichael S. Tsirkin /* Transitional devices: use the PCI subsystem device id as 6091fcf0512SMichael S. Tsirkin * virtio device id, same as legacy driver always did. 6101fcf0512SMichael S. Tsirkin */ 6111fcf0512SMichael S. Tsirkin vp_dev->vdev.id.device = pci_dev->subsystem_device; 6121fcf0512SMichael S. Tsirkin } else { 6131fcf0512SMichael S. Tsirkin /* Modern devices: simply use PCI device id, but start from 0x1040. */ 6141fcf0512SMichael S. Tsirkin vp_dev->vdev.id.device = pci_dev->device - 0x1040; 6151fcf0512SMichael S. Tsirkin } 6161fcf0512SMichael S. Tsirkin vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 6171fcf0512SMichael S. Tsirkin 6181fcf0512SMichael S. Tsirkin if (virtio_device_is_legacy_only(vp_dev->vdev.id)) 6191fcf0512SMichael S. Tsirkin return -ENODEV; 6201fcf0512SMichael S. Tsirkin 6211fcf0512SMichael S. Tsirkin /* check for a common config: if not, use legacy mode (bar 0). */ 6221fcf0512SMichael S. Tsirkin common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 6231fcf0512SMichael S. Tsirkin IORESOURCE_IO | IORESOURCE_MEM); 6241fcf0512SMichael S. Tsirkin if (!common) { 6251fcf0512SMichael S. Tsirkin dev_info(&pci_dev->dev, 6261fcf0512SMichael S. Tsirkin "virtio_pci: leaving for legacy driver\n"); 6271fcf0512SMichael S. Tsirkin return -ENODEV; 6281fcf0512SMichael S. Tsirkin } 6291fcf0512SMichael S. Tsirkin 6301fcf0512SMichael S. Tsirkin /* If common is there, these should be too... */ 6311fcf0512SMichael S. Tsirkin isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 6321fcf0512SMichael S. Tsirkin IORESOURCE_IO | IORESOURCE_MEM); 6331fcf0512SMichael S. Tsirkin notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 6341fcf0512SMichael S. Tsirkin IORESOURCE_IO | IORESOURCE_MEM); 6351fcf0512SMichael S. Tsirkin if (!isr || !notify) { 6361fcf0512SMichael S. Tsirkin dev_err(&pci_dev->dev, 6371fcf0512SMichael S. Tsirkin "virtio_pci: missing capabilities %i/%i/%i\n", 6381fcf0512SMichael S. Tsirkin common, isr, notify); 6391fcf0512SMichael S. Tsirkin return -EINVAL; 6401fcf0512SMichael S. Tsirkin } 6411fcf0512SMichael S. Tsirkin 6421fcf0512SMichael S. Tsirkin /* Device capability is only mandatory for devices that have 6431fcf0512SMichael S. Tsirkin * device-specific configuration. 6441fcf0512SMichael S. Tsirkin */ 6451fcf0512SMichael S. Tsirkin device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 6461fcf0512SMichael S. Tsirkin IORESOURCE_IO | IORESOURCE_MEM); 6471fcf0512SMichael S. Tsirkin 6481fcf0512SMichael S. Tsirkin err = -EINVAL; 6491fcf0512SMichael S. Tsirkin vp_dev->common = map_capability(pci_dev, common, 6501fcf0512SMichael S. Tsirkin sizeof(struct virtio_pci_common_cfg), 4, 6511fcf0512SMichael S. Tsirkin 0, sizeof(struct virtio_pci_common_cfg), 6521fcf0512SMichael S. Tsirkin NULL); 6531fcf0512SMichael S. Tsirkin if (!vp_dev->common) 6541fcf0512SMichael S. Tsirkin goto err_map_common; 6551fcf0512SMichael S. Tsirkin vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, 6561fcf0512SMichael S. Tsirkin 0, 1, 6571fcf0512SMichael S. Tsirkin NULL); 6581fcf0512SMichael S. Tsirkin if (!vp_dev->isr) 6591fcf0512SMichael S. Tsirkin goto err_map_isr; 6601fcf0512SMichael S. Tsirkin 6611fcf0512SMichael S. Tsirkin /* Read notify_off_multiplier from config space. */ 6621fcf0512SMichael S. Tsirkin pci_read_config_dword(pci_dev, 6631fcf0512SMichael S. Tsirkin notify + offsetof(struct virtio_pci_notify_cap, 6641fcf0512SMichael S. Tsirkin notify_off_multiplier), 6651fcf0512SMichael S. Tsirkin &vp_dev->notify_offset_multiplier); 6663909213cSMichael S. Tsirkin /* Read notify length and offset from config space. */ 6671fcf0512SMichael S. Tsirkin pci_read_config_dword(pci_dev, 6681fcf0512SMichael S. Tsirkin notify + offsetof(struct virtio_pci_notify_cap, 6691fcf0512SMichael S. Tsirkin cap.length), 6701fcf0512SMichael S. Tsirkin ¬ify_length); 6711fcf0512SMichael S. Tsirkin 6723909213cSMichael S. Tsirkin pci_read_config_dword(pci_dev, 6733909213cSMichael S. Tsirkin notify + offsetof(struct virtio_pci_notify_cap, 6743909213cSMichael S. Tsirkin cap.length), 6753909213cSMichael S. Tsirkin ¬ify_offset); 6763909213cSMichael S. Tsirkin 6773909213cSMichael S. Tsirkin /* We don't know how many VQs we'll map, ahead of the time. 6783909213cSMichael S. Tsirkin * If notify length is small, map it all now. 6793909213cSMichael S. Tsirkin * Otherwise, map each VQ individually later. 6803909213cSMichael S. Tsirkin */ 6813909213cSMichael S. Tsirkin if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 6823909213cSMichael S. Tsirkin vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, 6833909213cSMichael S. Tsirkin 0, notify_length, 6843909213cSMichael S. Tsirkin &vp_dev->notify_len); 6853909213cSMichael S. Tsirkin if (!vp_dev->notify_base) 6863909213cSMichael S. Tsirkin goto err_map_notify; 6873909213cSMichael S. Tsirkin } else { 6881fcf0512SMichael S. Tsirkin vp_dev->notify_map_cap = notify; 6893909213cSMichael S. Tsirkin } 6901fcf0512SMichael S. Tsirkin 6911fcf0512SMichael S. Tsirkin /* Again, we don't know how much we should map, but PAGE_SIZE 6921fcf0512SMichael S. Tsirkin * is more than enough for all existing devices. 6931fcf0512SMichael S. Tsirkin */ 6941fcf0512SMichael S. Tsirkin if (device) { 6951fcf0512SMichael S. Tsirkin vp_dev->device = map_capability(pci_dev, device, 0, 4, 6961fcf0512SMichael S. Tsirkin 0, PAGE_SIZE, 6971fcf0512SMichael S. Tsirkin &vp_dev->device_len); 6981fcf0512SMichael S. Tsirkin if (!vp_dev->device) 6991fcf0512SMichael S. Tsirkin goto err_map_device; 7001fcf0512SMichael S. Tsirkin 7011fcf0512SMichael S. Tsirkin vp_dev->vdev.config = &virtio_pci_config_ops; 702d3f5f065SMichael S. Tsirkin } else { 703d3f5f065SMichael S. Tsirkin vp_dev->vdev.config = &virtio_pci_config_nodev_ops; 704d3f5f065SMichael S. Tsirkin } 7051fcf0512SMichael S. Tsirkin 7061fcf0512SMichael S. Tsirkin vp_dev->config_vector = vp_config_vector; 7071fcf0512SMichael S. Tsirkin vp_dev->setup_vq = setup_vq; 7081fcf0512SMichael S. Tsirkin vp_dev->del_vq = del_vq; 7091fcf0512SMichael S. Tsirkin 7101fcf0512SMichael S. Tsirkin return 0; 7111fcf0512SMichael S. Tsirkin 7121fcf0512SMichael S. Tsirkin err_map_device: 7133909213cSMichael S. Tsirkin if (vp_dev->notify_base) 7143909213cSMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->notify_base); 7153909213cSMichael S. Tsirkin err_map_notify: 7161fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->isr); 7171fcf0512SMichael S. Tsirkin err_map_isr: 7181fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->common); 7191fcf0512SMichael S. Tsirkin err_map_common: 7201fcf0512SMichael S. Tsirkin return err; 7211fcf0512SMichael S. Tsirkin } 7221fcf0512SMichael S. Tsirkin 7231fcf0512SMichael S. Tsirkin void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) 7241fcf0512SMichael S. Tsirkin { 7251fcf0512SMichael S. Tsirkin struct pci_dev *pci_dev = vp_dev->pci_dev; 7261fcf0512SMichael S. Tsirkin 7271fcf0512SMichael S. Tsirkin if (vp_dev->device) 7281fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->device); 7293909213cSMichael S. Tsirkin if (vp_dev->notify_base) 7303909213cSMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->notify_base); 7311fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->isr); 7321fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->common); 7331fcf0512SMichael S. Tsirkin } 734