11fcf0512SMichael S. Tsirkin /* 21fcf0512SMichael S. Tsirkin * Virtio PCI driver - modern (virtio 1.0) device support 31fcf0512SMichael S. Tsirkin * 41fcf0512SMichael S. Tsirkin * This module allows virtio devices to be used over a virtual PCI device. 51fcf0512SMichael S. Tsirkin * This can be used with QEMU based VMMs like KVM or Xen. 61fcf0512SMichael S. Tsirkin * 71fcf0512SMichael S. Tsirkin * Copyright IBM Corp. 2007 81fcf0512SMichael S. Tsirkin * Copyright Red Hat, Inc. 2014 91fcf0512SMichael S. Tsirkin * 101fcf0512SMichael S. Tsirkin * Authors: 111fcf0512SMichael S. Tsirkin * Anthony Liguori <aliguori@us.ibm.com> 121fcf0512SMichael S. Tsirkin * Rusty Russell <rusty@rustcorp.com.au> 131fcf0512SMichael S. Tsirkin * Michael S. Tsirkin <mst@redhat.com> 141fcf0512SMichael S. Tsirkin * 151fcf0512SMichael S. Tsirkin * This work is licensed under the terms of the GNU GPL, version 2 or later. 161fcf0512SMichael S. Tsirkin * See the COPYING file in the top-level directory. 171fcf0512SMichael S. Tsirkin * 181fcf0512SMichael S. Tsirkin */ 191fcf0512SMichael S. Tsirkin 2005dbcb43SMichael S. Tsirkin #include <linux/delay.h> 211fcf0512SMichael S. Tsirkin #define VIRTIO_PCI_NO_LEGACY 221fcf0512SMichael S. Tsirkin #include "virtio_pci_common.h" 231fcf0512SMichael S. Tsirkin 24c5d4c2c9SMichael S. Tsirkin /* 25c5d4c2c9SMichael S. Tsirkin * Type-safe wrappers for io accesses. 26c5d4c2c9SMichael S. Tsirkin * Use these to enforce at compile time the following spec requirement: 27c5d4c2c9SMichael S. Tsirkin * 28c5d4c2c9SMichael S. Tsirkin * The driver MUST access each field using the “natural” access 29c5d4c2c9SMichael S. Tsirkin * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 30c5d4c2c9SMichael S. Tsirkin * for 16-bit fields and 8-bit accesses for 8-bit fields. 31c5d4c2c9SMichael S. Tsirkin */ 32c5d4c2c9SMichael S. Tsirkin static inline u8 vp_ioread8(u8 __iomem *addr) 33c5d4c2c9SMichael S. Tsirkin { 34c5d4c2c9SMichael S. Tsirkin return ioread8(addr); 35c5d4c2c9SMichael S. Tsirkin } 3661bd405fSGonglei static inline u16 vp_ioread16 (__le16 __iomem *addr) 37c5d4c2c9SMichael S. Tsirkin { 38c5d4c2c9SMichael S. Tsirkin return ioread16(addr); 39c5d4c2c9SMichael S. Tsirkin } 40c5d4c2c9SMichael S. Tsirkin 4161bd405fSGonglei static inline u32 vp_ioread32(__le32 __iomem *addr) 42c5d4c2c9SMichael S. Tsirkin { 43c5d4c2c9SMichael S. Tsirkin return ioread32(addr); 44c5d4c2c9SMichael S. Tsirkin } 45c5d4c2c9SMichael S. Tsirkin 46c5d4c2c9SMichael S. Tsirkin static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 47c5d4c2c9SMichael S. Tsirkin { 48c5d4c2c9SMichael S. Tsirkin iowrite8(value, addr); 49c5d4c2c9SMichael S. Tsirkin } 50c5d4c2c9SMichael S. Tsirkin 5161bd405fSGonglei static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) 52c5d4c2c9SMichael S. Tsirkin { 53c5d4c2c9SMichael S. Tsirkin iowrite16(value, addr); 54c5d4c2c9SMichael S. Tsirkin } 55c5d4c2c9SMichael S. Tsirkin 5661bd405fSGonglei static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) 57c5d4c2c9SMichael S. Tsirkin { 58c5d4c2c9SMichael S. Tsirkin iowrite32(value, addr); 59c5d4c2c9SMichael S. Tsirkin } 60c5d4c2c9SMichael S. Tsirkin 61a8557d32SMichael S. Tsirkin static void vp_iowrite64_twopart(u64 val, 62a8557d32SMichael S. Tsirkin __le32 __iomem *lo, __le32 __iomem *hi) 63a8557d32SMichael S. Tsirkin { 64a8557d32SMichael S. Tsirkin vp_iowrite32((u32)val, lo); 65a8557d32SMichael S. Tsirkin vp_iowrite32(val >> 32, hi); 66a8557d32SMichael S. Tsirkin } 67a8557d32SMichael S. Tsirkin 681fcf0512SMichael S. Tsirkin static void __iomem *map_capability(struct pci_dev *dev, int off, 691fcf0512SMichael S. Tsirkin size_t minlen, 701fcf0512SMichael S. Tsirkin u32 align, 711fcf0512SMichael S. Tsirkin u32 start, u32 size, 721fcf0512SMichael S. Tsirkin size_t *len) 731fcf0512SMichael S. Tsirkin { 741fcf0512SMichael S. Tsirkin u8 bar; 751fcf0512SMichael S. Tsirkin u32 offset, length; 761fcf0512SMichael S. Tsirkin void __iomem *p; 771fcf0512SMichael S. Tsirkin 781fcf0512SMichael S. Tsirkin pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 791fcf0512SMichael S. Tsirkin bar), 801fcf0512SMichael S. Tsirkin &bar); 811fcf0512SMichael S. Tsirkin pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 821fcf0512SMichael S. Tsirkin &offset); 831fcf0512SMichael S. Tsirkin pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 841fcf0512SMichael S. Tsirkin &length); 851fcf0512SMichael S. Tsirkin 861fcf0512SMichael S. Tsirkin if (length <= start) { 871fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 881fcf0512SMichael S. Tsirkin "virtio_pci: bad capability len %u (>%u expected)\n", 891fcf0512SMichael S. Tsirkin length, start); 901fcf0512SMichael S. Tsirkin return NULL; 911fcf0512SMichael S. Tsirkin } 921fcf0512SMichael S. Tsirkin 931fcf0512SMichael S. Tsirkin if (length - start < minlen) { 941fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 951fcf0512SMichael S. Tsirkin "virtio_pci: bad capability len %u (>=%zu expected)\n", 961fcf0512SMichael S. Tsirkin length, minlen); 971fcf0512SMichael S. Tsirkin return NULL; 981fcf0512SMichael S. Tsirkin } 991fcf0512SMichael S. Tsirkin 1001fcf0512SMichael S. Tsirkin length -= start; 1011fcf0512SMichael S. Tsirkin 1021fcf0512SMichael S. Tsirkin if (start + offset < offset) { 1031fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1041fcf0512SMichael S. Tsirkin "virtio_pci: map wrap-around %u+%u\n", 1051fcf0512SMichael S. Tsirkin start, offset); 1061fcf0512SMichael S. Tsirkin return NULL; 1071fcf0512SMichael S. Tsirkin } 1081fcf0512SMichael S. Tsirkin 1091fcf0512SMichael S. Tsirkin offset += start; 1101fcf0512SMichael S. Tsirkin 1111fcf0512SMichael S. Tsirkin if (offset & (align - 1)) { 1121fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1131fcf0512SMichael S. Tsirkin "virtio_pci: offset %u not aligned to %u\n", 1141fcf0512SMichael S. Tsirkin offset, align); 1151fcf0512SMichael S. Tsirkin return NULL; 1161fcf0512SMichael S. Tsirkin } 1171fcf0512SMichael S. Tsirkin 1181fcf0512SMichael S. Tsirkin if (length > size) 1191fcf0512SMichael S. Tsirkin length = size; 1201fcf0512SMichael S. Tsirkin 1211fcf0512SMichael S. Tsirkin if (len) 1221fcf0512SMichael S. Tsirkin *len = length; 1231fcf0512SMichael S. Tsirkin 1241fcf0512SMichael S. Tsirkin if (minlen + offset < minlen || 1251fcf0512SMichael S. Tsirkin minlen + offset > pci_resource_len(dev, bar)) { 1261fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1271fcf0512SMichael S. Tsirkin "virtio_pci: map virtio %zu@%u " 1281fcf0512SMichael S. Tsirkin "out of range on bar %i length %lu\n", 1291fcf0512SMichael S. Tsirkin minlen, offset, 1301fcf0512SMichael S. Tsirkin bar, (unsigned long)pci_resource_len(dev, bar)); 1311fcf0512SMichael S. Tsirkin return NULL; 1321fcf0512SMichael S. Tsirkin } 1331fcf0512SMichael S. Tsirkin 1341fcf0512SMichael S. Tsirkin p = pci_iomap_range(dev, bar, offset, length); 1351fcf0512SMichael S. Tsirkin if (!p) 1361fcf0512SMichael S. Tsirkin dev_err(&dev->dev, 1371fcf0512SMichael S. Tsirkin "virtio_pci: unable to map virtio %u@%u on bar %i\n", 1381fcf0512SMichael S. Tsirkin length, offset, bar); 1391fcf0512SMichael S. Tsirkin return p; 1401fcf0512SMichael S. Tsirkin } 1411fcf0512SMichael S. Tsirkin 1421fcf0512SMichael S. Tsirkin /* virtio config->get_features() implementation */ 1431fcf0512SMichael S. Tsirkin static u64 vp_get_features(struct virtio_device *vdev) 1441fcf0512SMichael S. Tsirkin { 1451fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 1461fcf0512SMichael S. Tsirkin u64 features; 1471fcf0512SMichael S. Tsirkin 148a8557d32SMichael S. Tsirkin vp_iowrite32(0, &vp_dev->common->device_feature_select); 149a8557d32SMichael S. Tsirkin features = vp_ioread32(&vp_dev->common->device_feature); 150a8557d32SMichael S. Tsirkin vp_iowrite32(1, &vp_dev->common->device_feature_select); 151a8557d32SMichael S. Tsirkin features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); 1521fcf0512SMichael S. Tsirkin 1531fcf0512SMichael S. Tsirkin return features; 1541fcf0512SMichael S. Tsirkin } 1551fcf0512SMichael S. Tsirkin 1561fcf0512SMichael S. Tsirkin /* virtio config->finalize_features() implementation */ 1571fcf0512SMichael S. Tsirkin static int vp_finalize_features(struct virtio_device *vdev) 1581fcf0512SMichael S. Tsirkin { 1591fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 1601fcf0512SMichael S. Tsirkin 1611fcf0512SMichael S. Tsirkin /* Give virtio_ring a chance to accept features. */ 1621fcf0512SMichael S. Tsirkin vring_transport_features(vdev); 1631fcf0512SMichael S. Tsirkin 1641fcf0512SMichael S. Tsirkin if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 1651fcf0512SMichael S. Tsirkin dev_err(&vdev->dev, "virtio: device uses modern interface " 1661fcf0512SMichael S. Tsirkin "but does not have VIRTIO_F_VERSION_1\n"); 1671fcf0512SMichael S. Tsirkin return -EINVAL; 1681fcf0512SMichael S. Tsirkin } 1691fcf0512SMichael S. Tsirkin 170a8557d32SMichael S. Tsirkin vp_iowrite32(0, &vp_dev->common->guest_feature_select); 171a8557d32SMichael S. Tsirkin vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); 172a8557d32SMichael S. Tsirkin vp_iowrite32(1, &vp_dev->common->guest_feature_select); 173a8557d32SMichael S. Tsirkin vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); 1741fcf0512SMichael S. Tsirkin 1751fcf0512SMichael S. Tsirkin return 0; 1761fcf0512SMichael S. Tsirkin } 1771fcf0512SMichael S. Tsirkin 1781fcf0512SMichael S. Tsirkin /* virtio config->get() implementation */ 1791fcf0512SMichael S. Tsirkin static void vp_get(struct virtio_device *vdev, unsigned offset, 1801fcf0512SMichael S. Tsirkin void *buf, unsigned len) 1811fcf0512SMichael S. Tsirkin { 1821fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 1831fcf0512SMichael S. Tsirkin u8 b; 1841fcf0512SMichael S. Tsirkin __le16 w; 1851fcf0512SMichael S. Tsirkin __le32 l; 1861fcf0512SMichael S. Tsirkin 1871fcf0512SMichael S. Tsirkin BUG_ON(offset + len > vp_dev->device_len); 1881fcf0512SMichael S. Tsirkin 1891fcf0512SMichael S. Tsirkin switch (len) { 1901fcf0512SMichael S. Tsirkin case 1: 1911fcf0512SMichael S. Tsirkin b = ioread8(vp_dev->device + offset); 1921fcf0512SMichael S. Tsirkin memcpy(buf, &b, sizeof b); 1931fcf0512SMichael S. Tsirkin break; 1941fcf0512SMichael S. Tsirkin case 2: 1951fcf0512SMichael S. Tsirkin w = cpu_to_le16(ioread16(vp_dev->device + offset)); 1961fcf0512SMichael S. Tsirkin memcpy(buf, &w, sizeof w); 1971fcf0512SMichael S. Tsirkin break; 1981fcf0512SMichael S. Tsirkin case 4: 1991fcf0512SMichael S. Tsirkin l = cpu_to_le32(ioread32(vp_dev->device + offset)); 2001fcf0512SMichael S. Tsirkin memcpy(buf, &l, sizeof l); 2011fcf0512SMichael S. Tsirkin break; 2021fcf0512SMichael S. Tsirkin case 8: 2031fcf0512SMichael S. Tsirkin l = cpu_to_le32(ioread32(vp_dev->device + offset)); 2041fcf0512SMichael S. Tsirkin memcpy(buf, &l, sizeof l); 2051fcf0512SMichael S. Tsirkin l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); 2061fcf0512SMichael S. Tsirkin memcpy(buf + sizeof l, &l, sizeof l); 2071fcf0512SMichael S. Tsirkin break; 2081fcf0512SMichael S. Tsirkin default: 2091fcf0512SMichael S. Tsirkin BUG(); 2101fcf0512SMichael S. Tsirkin } 2111fcf0512SMichael S. Tsirkin } 2121fcf0512SMichael S. Tsirkin 2131fcf0512SMichael S. Tsirkin /* the config->set() implementation. it's symmetric to the config->get() 2141fcf0512SMichael S. Tsirkin * implementation */ 2151fcf0512SMichael S. Tsirkin static void vp_set(struct virtio_device *vdev, unsigned offset, 2161fcf0512SMichael S. Tsirkin const void *buf, unsigned len) 2171fcf0512SMichael S. Tsirkin { 2181fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 2191fcf0512SMichael S. Tsirkin u8 b; 2201fcf0512SMichael S. Tsirkin __le16 w; 2211fcf0512SMichael S. Tsirkin __le32 l; 2221fcf0512SMichael S. Tsirkin 2231fcf0512SMichael S. Tsirkin BUG_ON(offset + len > vp_dev->device_len); 2241fcf0512SMichael S. Tsirkin 2251fcf0512SMichael S. Tsirkin switch (len) { 2261fcf0512SMichael S. Tsirkin case 1: 2271fcf0512SMichael S. Tsirkin memcpy(&b, buf, sizeof b); 2281fcf0512SMichael S. Tsirkin iowrite8(b, vp_dev->device + offset); 2291fcf0512SMichael S. Tsirkin break; 2301fcf0512SMichael S. Tsirkin case 2: 2311fcf0512SMichael S. Tsirkin memcpy(&w, buf, sizeof w); 2321fcf0512SMichael S. Tsirkin iowrite16(le16_to_cpu(w), vp_dev->device + offset); 2331fcf0512SMichael S. Tsirkin break; 2341fcf0512SMichael S. Tsirkin case 4: 2351fcf0512SMichael S. Tsirkin memcpy(&l, buf, sizeof l); 2361fcf0512SMichael S. Tsirkin iowrite32(le32_to_cpu(l), vp_dev->device + offset); 2371fcf0512SMichael S. Tsirkin break; 2381fcf0512SMichael S. Tsirkin case 8: 2391fcf0512SMichael S. Tsirkin memcpy(&l, buf, sizeof l); 2401fcf0512SMichael S. Tsirkin iowrite32(le32_to_cpu(l), vp_dev->device + offset); 2411fcf0512SMichael S. Tsirkin memcpy(&l, buf + sizeof l, sizeof l); 2421fcf0512SMichael S. Tsirkin iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); 2431fcf0512SMichael S. Tsirkin break; 2441fcf0512SMichael S. Tsirkin default: 2451fcf0512SMichael S. Tsirkin BUG(); 2461fcf0512SMichael S. Tsirkin } 2471fcf0512SMichael S. Tsirkin } 2481fcf0512SMichael S. Tsirkin 2491fcf0512SMichael S. Tsirkin static u32 vp_generation(struct virtio_device *vdev) 2501fcf0512SMichael S. Tsirkin { 2511fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 252a8557d32SMichael S. Tsirkin return vp_ioread8(&vp_dev->common->config_generation); 2531fcf0512SMichael S. Tsirkin } 2541fcf0512SMichael S. Tsirkin 2551fcf0512SMichael S. Tsirkin /* config->{get,set}_status() implementations */ 2561fcf0512SMichael S. Tsirkin static u8 vp_get_status(struct virtio_device *vdev) 2571fcf0512SMichael S. Tsirkin { 2581fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 259a8557d32SMichael S. Tsirkin return vp_ioread8(&vp_dev->common->device_status); 2601fcf0512SMichael S. Tsirkin } 2611fcf0512SMichael S. Tsirkin 2621fcf0512SMichael S. Tsirkin static void vp_set_status(struct virtio_device *vdev, u8 status) 2631fcf0512SMichael S. Tsirkin { 2641fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 2651fcf0512SMichael S. Tsirkin /* We should never be setting status to 0. */ 2661fcf0512SMichael S. Tsirkin BUG_ON(status == 0); 267a8557d32SMichael S. Tsirkin vp_iowrite8(status, &vp_dev->common->device_status); 2681fcf0512SMichael S. Tsirkin } 2691fcf0512SMichael S. Tsirkin 2701fcf0512SMichael S. Tsirkin static void vp_reset(struct virtio_device *vdev) 2711fcf0512SMichael S. Tsirkin { 2721fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 2731fcf0512SMichael S. Tsirkin /* 0 status means a reset. */ 274a8557d32SMichael S. Tsirkin vp_iowrite8(0, &vp_dev->common->device_status); 27505dbcb43SMichael S. Tsirkin /* After writing 0 to device_status, the driver MUST wait for a read of 27605dbcb43SMichael S. Tsirkin * device_status to return 0 before reinitializing the device. 27705dbcb43SMichael S. Tsirkin * This will flush out the status write, and flush in device writes, 27805dbcb43SMichael S. Tsirkin * including MSI-X interrupts, if any. 27905dbcb43SMichael S. Tsirkin */ 28005dbcb43SMichael S. Tsirkin while (vp_ioread8(&vp_dev->common->device_status)) 28105dbcb43SMichael S. Tsirkin msleep(1); 2821fcf0512SMichael S. Tsirkin /* Flush pending VQ/configuration callbacks. */ 2831fcf0512SMichael S. Tsirkin vp_synchronize_vectors(vdev); 2841fcf0512SMichael S. Tsirkin } 2851fcf0512SMichael S. Tsirkin 2861fcf0512SMichael S. Tsirkin static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) 2871fcf0512SMichael S. Tsirkin { 2881fcf0512SMichael S. Tsirkin /* Setup the vector used for configuration events */ 289a8557d32SMichael S. Tsirkin vp_iowrite16(vector, &vp_dev->common->msix_config); 2901fcf0512SMichael S. Tsirkin /* Verify we had enough resources to assign the vector */ 2911fcf0512SMichael S. Tsirkin /* Will also flush the write out to device */ 292a8557d32SMichael S. Tsirkin return vp_ioread16(&vp_dev->common->msix_config); 2931fcf0512SMichael S. Tsirkin } 2941fcf0512SMichael S. Tsirkin 2951fcf0512SMichael S. Tsirkin static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 2960a9b3f47SMichael S. Tsirkin struct virtio_pci_vq_info *info, 2971fcf0512SMichael S. Tsirkin unsigned index, 2981fcf0512SMichael S. Tsirkin void (*callback)(struct virtqueue *vq), 2991fcf0512SMichael S. Tsirkin const char *name, 3001fcf0512SMichael S. Tsirkin u16 msix_vec) 3011fcf0512SMichael S. Tsirkin { 3021fcf0512SMichael S. Tsirkin struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; 3031fcf0512SMichael S. Tsirkin struct virtqueue *vq; 3041fcf0512SMichael S. Tsirkin u16 num, off; 3051fcf0512SMichael S. Tsirkin int err; 3061fcf0512SMichael S. Tsirkin 307a8557d32SMichael S. Tsirkin if (index >= vp_ioread16(&cfg->num_queues)) 3081fcf0512SMichael S. Tsirkin return ERR_PTR(-ENOENT); 3091fcf0512SMichael S. Tsirkin 3101fcf0512SMichael S. Tsirkin /* Select the queue we're interested in */ 311a8557d32SMichael S. Tsirkin vp_iowrite16(index, &cfg->queue_select); 3121fcf0512SMichael S. Tsirkin 3131fcf0512SMichael S. Tsirkin /* Check if queue is either not available or already active. */ 314a8557d32SMichael S. Tsirkin num = vp_ioread16(&cfg->queue_size); 315a8557d32SMichael S. Tsirkin if (!num || vp_ioread16(&cfg->queue_enable)) 3161fcf0512SMichael S. Tsirkin return ERR_PTR(-ENOENT); 3171fcf0512SMichael S. Tsirkin 3181fcf0512SMichael S. Tsirkin if (num & (num - 1)) { 3191fcf0512SMichael S. Tsirkin dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); 3201fcf0512SMichael S. Tsirkin return ERR_PTR(-EINVAL); 3211fcf0512SMichael S. Tsirkin } 3221fcf0512SMichael S. Tsirkin 3231fcf0512SMichael S. Tsirkin /* get offset of notification word for this vq */ 324a8557d32SMichael S. Tsirkin off = vp_ioread16(&cfg->queue_notify_off); 3251fcf0512SMichael S. Tsirkin 3260a9b3f47SMichael S. Tsirkin info->msix_vector = msix_vec; 3270a9b3f47SMichael S. Tsirkin 3287a5589b2SAndy Lutomirski /* create the vring */ 3297a5589b2SAndy Lutomirski vq = vring_create_virtqueue(index, num, 3307a5589b2SAndy Lutomirski SMP_CACHE_BYTES, &vp_dev->vdev, 3317a5589b2SAndy Lutomirski true, true, vp_notify, callback, name); 3327a5589b2SAndy Lutomirski if (!vq) 3331fcf0512SMichael S. Tsirkin return ERR_PTR(-ENOMEM); 3341fcf0512SMichael S. Tsirkin 3351fcf0512SMichael S. Tsirkin /* activate the queue */ 3367a5589b2SAndy Lutomirski vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size); 3377a5589b2SAndy Lutomirski vp_iowrite64_twopart(virtqueue_get_desc_addr(vq), 3381fcf0512SMichael S. Tsirkin &cfg->queue_desc_lo, &cfg->queue_desc_hi); 3397a5589b2SAndy Lutomirski vp_iowrite64_twopart(virtqueue_get_avail_addr(vq), 3401fcf0512SMichael S. Tsirkin &cfg->queue_avail_lo, &cfg->queue_avail_hi); 3417a5589b2SAndy Lutomirski vp_iowrite64_twopart(virtqueue_get_used_addr(vq), 3421fcf0512SMichael S. Tsirkin &cfg->queue_used_lo, &cfg->queue_used_hi); 3431fcf0512SMichael S. Tsirkin 3443909213cSMichael S. Tsirkin if (vp_dev->notify_base) { 3453909213cSMichael S. Tsirkin /* offset should not wrap */ 3463909213cSMichael S. Tsirkin if ((u64)off * vp_dev->notify_offset_multiplier + 2 3473909213cSMichael S. Tsirkin > vp_dev->notify_len) { 3483909213cSMichael S. Tsirkin dev_warn(&vp_dev->pci_dev->dev, 3493909213cSMichael S. Tsirkin "bad notification offset %u (x %u) " 3503909213cSMichael S. Tsirkin "for queue %u > %zd", 3513909213cSMichael S. Tsirkin off, vp_dev->notify_offset_multiplier, 3523909213cSMichael S. Tsirkin index, vp_dev->notify_len); 3533909213cSMichael S. Tsirkin err = -EINVAL; 3543909213cSMichael S. Tsirkin goto err_map_notify; 3553909213cSMichael S. Tsirkin } 3563909213cSMichael S. Tsirkin vq->priv = (void __force *)vp_dev->notify_base + 3573909213cSMichael S. Tsirkin off * vp_dev->notify_offset_multiplier; 3583909213cSMichael S. Tsirkin } else { 3591fcf0512SMichael S. Tsirkin vq->priv = (void __force *)map_capability(vp_dev->pci_dev, 3601fcf0512SMichael S. Tsirkin vp_dev->notify_map_cap, 2, 2, 3611fcf0512SMichael S. Tsirkin off * vp_dev->notify_offset_multiplier, 2, 3621fcf0512SMichael S. Tsirkin NULL); 3633909213cSMichael S. Tsirkin } 3641fcf0512SMichael S. Tsirkin 3651fcf0512SMichael S. Tsirkin if (!vq->priv) { 3661fcf0512SMichael S. Tsirkin err = -ENOMEM; 3671fcf0512SMichael S. Tsirkin goto err_map_notify; 3681fcf0512SMichael S. Tsirkin } 3691fcf0512SMichael S. Tsirkin 3701fcf0512SMichael S. Tsirkin if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 371a8557d32SMichael S. Tsirkin vp_iowrite16(msix_vec, &cfg->queue_msix_vector); 372a8557d32SMichael S. Tsirkin msix_vec = vp_ioread16(&cfg->queue_msix_vector); 3731fcf0512SMichael S. Tsirkin if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 3741fcf0512SMichael S. Tsirkin err = -EBUSY; 3751fcf0512SMichael S. Tsirkin goto err_assign_vector; 3761fcf0512SMichael S. Tsirkin } 3771fcf0512SMichael S. Tsirkin } 3781fcf0512SMichael S. Tsirkin 3791fcf0512SMichael S. Tsirkin return vq; 3801fcf0512SMichael S. Tsirkin 3811fcf0512SMichael S. Tsirkin err_assign_vector: 3823909213cSMichael S. Tsirkin if (!vp_dev->notify_base) 3831fcf0512SMichael S. Tsirkin pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); 3841fcf0512SMichael S. Tsirkin err_map_notify: 3851fcf0512SMichael S. Tsirkin vring_del_virtqueue(vq); 3861fcf0512SMichael S. Tsirkin return ERR_PTR(err); 3871fcf0512SMichael S. Tsirkin } 3881fcf0512SMichael S. Tsirkin 3891fcf0512SMichael S. Tsirkin static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, 390fb5e31d9SChristoph Hellwig struct virtqueue *vqs[], vq_callback_t *callbacks[], 391fb5e31d9SChristoph Hellwig const char * const names[], struct irq_affinity *desc) 3921fcf0512SMichael S. Tsirkin { 3931fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vdev); 3941fcf0512SMichael S. Tsirkin struct virtqueue *vq; 395fb5e31d9SChristoph Hellwig int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, desc); 3961fcf0512SMichael S. Tsirkin 3971fcf0512SMichael S. Tsirkin if (rc) 3981fcf0512SMichael S. Tsirkin return rc; 3991fcf0512SMichael S. Tsirkin 4001fcf0512SMichael S. Tsirkin /* Select and activate all queues. Has to be done last: once we do 4011fcf0512SMichael S. Tsirkin * this, there's no way to go back except reset. 4021fcf0512SMichael S. Tsirkin */ 4031fcf0512SMichael S. Tsirkin list_for_each_entry(vq, &vdev->vqs, list) { 404a8557d32SMichael S. Tsirkin vp_iowrite16(vq->index, &vp_dev->common->queue_select); 405a8557d32SMichael S. Tsirkin vp_iowrite16(1, &vp_dev->common->queue_enable); 4061fcf0512SMichael S. Tsirkin } 4071fcf0512SMichael S. Tsirkin 4081fcf0512SMichael S. Tsirkin return 0; 4091fcf0512SMichael S. Tsirkin } 4101fcf0512SMichael S. Tsirkin 4110a9b3f47SMichael S. Tsirkin static void del_vq(struct virtio_pci_vq_info *info) 4121fcf0512SMichael S. Tsirkin { 4130a9b3f47SMichael S. Tsirkin struct virtqueue *vq = info->vq; 4141fcf0512SMichael S. Tsirkin struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 4151fcf0512SMichael S. Tsirkin 416a8557d32SMichael S. Tsirkin vp_iowrite16(vq->index, &vp_dev->common->queue_select); 4171fcf0512SMichael S. Tsirkin 4182008c154SMichael S. Tsirkin if (vp_dev->msix_enabled) { 419a8557d32SMichael S. Tsirkin vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 4201fcf0512SMichael S. Tsirkin &vp_dev->common->queue_msix_vector); 4211fcf0512SMichael S. Tsirkin /* Flush the write out to device */ 422a8557d32SMichael S. Tsirkin vp_ioread16(&vp_dev->common->queue_msix_vector); 4231fcf0512SMichael S. Tsirkin } 4241fcf0512SMichael S. Tsirkin 4253909213cSMichael S. Tsirkin if (!vp_dev->notify_base) 4261fcf0512SMichael S. Tsirkin pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); 4271fcf0512SMichael S. Tsirkin 4281fcf0512SMichael S. Tsirkin vring_del_virtqueue(vq); 4291fcf0512SMichael S. Tsirkin } 4301fcf0512SMichael S. Tsirkin 431d3f5f065SMichael S. Tsirkin static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 432d3f5f065SMichael S. Tsirkin .get = NULL, 433d3f5f065SMichael S. Tsirkin .set = NULL, 434d3f5f065SMichael S. Tsirkin .generation = vp_generation, 435d3f5f065SMichael S. Tsirkin .get_status = vp_get_status, 436d3f5f065SMichael S. Tsirkin .set_status = vp_set_status, 437d3f5f065SMichael S. Tsirkin .reset = vp_reset, 438d3f5f065SMichael S. Tsirkin .find_vqs = vp_modern_find_vqs, 439d3f5f065SMichael S. Tsirkin .del_vqs = vp_del_vqs, 440d3f5f065SMichael S. Tsirkin .get_features = vp_get_features, 441d3f5f065SMichael S. Tsirkin .finalize_features = vp_finalize_features, 442d3f5f065SMichael S. Tsirkin .bus_name = vp_bus_name, 443d3f5f065SMichael S. Tsirkin .set_vq_affinity = vp_set_vq_affinity, 444bbaba479SChristoph Hellwig .get_vq_affinity = vp_get_vq_affinity, 445d3f5f065SMichael S. Tsirkin }; 446d3f5f065SMichael S. Tsirkin 4471fcf0512SMichael S. Tsirkin static const struct virtio_config_ops virtio_pci_config_ops = { 4481fcf0512SMichael S. Tsirkin .get = vp_get, 4491fcf0512SMichael S. Tsirkin .set = vp_set, 4501fcf0512SMichael S. Tsirkin .generation = vp_generation, 4511fcf0512SMichael S. Tsirkin .get_status = vp_get_status, 4521fcf0512SMichael S. Tsirkin .set_status = vp_set_status, 4531fcf0512SMichael S. Tsirkin .reset = vp_reset, 4541fcf0512SMichael S. Tsirkin .find_vqs = vp_modern_find_vqs, 4551fcf0512SMichael S. Tsirkin .del_vqs = vp_del_vqs, 4561fcf0512SMichael S. Tsirkin .get_features = vp_get_features, 4571fcf0512SMichael S. Tsirkin .finalize_features = vp_finalize_features, 4581fcf0512SMichael S. Tsirkin .bus_name = vp_bus_name, 4591fcf0512SMichael S. Tsirkin .set_vq_affinity = vp_set_vq_affinity, 460bbaba479SChristoph Hellwig .get_vq_affinity = vp_get_vq_affinity, 4611fcf0512SMichael S. Tsirkin }; 4621fcf0512SMichael S. Tsirkin 4631fcf0512SMichael S. Tsirkin /** 4641fcf0512SMichael S. Tsirkin * virtio_pci_find_capability - walk capabilities to find device info. 4651fcf0512SMichael S. Tsirkin * @dev: the pci device 4661fcf0512SMichael S. Tsirkin * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 4671fcf0512SMichael S. Tsirkin * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 4681fcf0512SMichael S. Tsirkin * 4691fcf0512SMichael S. Tsirkin * Returns offset of the capability, or 0. 4701fcf0512SMichael S. Tsirkin */ 4711fcf0512SMichael S. Tsirkin static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 47259a5b0f7SGerd Hoffmann u32 ioresource_types, int *bars) 4731fcf0512SMichael S. Tsirkin { 4741fcf0512SMichael S. Tsirkin int pos; 4751fcf0512SMichael S. Tsirkin 4761fcf0512SMichael S. Tsirkin for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 4771fcf0512SMichael S. Tsirkin pos > 0; 4781fcf0512SMichael S. Tsirkin pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 4791fcf0512SMichael S. Tsirkin u8 type, bar; 4801fcf0512SMichael S. Tsirkin pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 4811fcf0512SMichael S. Tsirkin cfg_type), 4821fcf0512SMichael S. Tsirkin &type); 4831fcf0512SMichael S. Tsirkin pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 4841fcf0512SMichael S. Tsirkin bar), 4851fcf0512SMichael S. Tsirkin &bar); 4861fcf0512SMichael S. Tsirkin 4871fcf0512SMichael S. Tsirkin /* Ignore structures with reserved BAR values */ 4881fcf0512SMichael S. Tsirkin if (bar > 0x5) 4891fcf0512SMichael S. Tsirkin continue; 4901fcf0512SMichael S. Tsirkin 4911fcf0512SMichael S. Tsirkin if (type == cfg_type) { 4921fcf0512SMichael S. Tsirkin if (pci_resource_len(dev, bar) && 49359a5b0f7SGerd Hoffmann pci_resource_flags(dev, bar) & ioresource_types) { 49459a5b0f7SGerd Hoffmann *bars |= (1 << bar); 4951fcf0512SMichael S. Tsirkin return pos; 4961fcf0512SMichael S. Tsirkin } 4971fcf0512SMichael S. Tsirkin } 49859a5b0f7SGerd Hoffmann } 4991fcf0512SMichael S. Tsirkin return 0; 5001fcf0512SMichael S. Tsirkin } 5011fcf0512SMichael S. Tsirkin 50289461c4aSRusty Russell /* This is part of the ABI. Don't screw with it. */ 5031fcf0512SMichael S. Tsirkin static inline void check_offsets(void) 5041fcf0512SMichael S. Tsirkin { 50589461c4aSRusty Russell /* Note: disk space was harmed in compilation of this function. */ 50689461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 50789461c4aSRusty Russell offsetof(struct virtio_pci_cap, cap_vndr)); 50889461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 50989461c4aSRusty Russell offsetof(struct virtio_pci_cap, cap_next)); 51089461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 51189461c4aSRusty Russell offsetof(struct virtio_pci_cap, cap_len)); 51289461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 51389461c4aSRusty Russell offsetof(struct virtio_pci_cap, cfg_type)); 51489461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 51589461c4aSRusty Russell offsetof(struct virtio_pci_cap, bar)); 51689461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 51789461c4aSRusty Russell offsetof(struct virtio_pci_cap, offset)); 51889461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 51989461c4aSRusty Russell offsetof(struct virtio_pci_cap, length)); 52089461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 52189461c4aSRusty Russell offsetof(struct virtio_pci_notify_cap, 52289461c4aSRusty Russell notify_off_multiplier)); 52389461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 52489461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, 52589461c4aSRusty Russell device_feature_select)); 52689461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 52789461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, device_feature)); 52889461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 52989461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, 53089461c4aSRusty Russell guest_feature_select)); 53189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 53289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, guest_feature)); 53389461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 53489461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, msix_config)); 53589461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 53689461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, num_queues)); 53789461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 53889461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, device_status)); 53989461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 54089461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, config_generation)); 54189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 54289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_select)); 54389461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 54489461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_size)); 54589461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 54689461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 54789461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 54889461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_enable)); 54989461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 55089461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 55189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 55289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 55389461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 55489461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 55589461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 55689461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 55789461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 55889461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 55989461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 56089461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 56189461c4aSRusty Russell BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 56289461c4aSRusty Russell offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 5631fcf0512SMichael S. Tsirkin } 5641fcf0512SMichael S. Tsirkin 5651fcf0512SMichael S. Tsirkin /* the PCI probing function */ 5661fcf0512SMichael S. Tsirkin int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 5671fcf0512SMichael S. Tsirkin { 5681fcf0512SMichael S. Tsirkin struct pci_dev *pci_dev = vp_dev->pci_dev; 5691fcf0512SMichael S. Tsirkin int err, common, isr, notify, device; 5701fcf0512SMichael S. Tsirkin u32 notify_length; 5713909213cSMichael S. Tsirkin u32 notify_offset; 5721fcf0512SMichael S. Tsirkin 5731fcf0512SMichael S. Tsirkin check_offsets(); 5741fcf0512SMichael S. Tsirkin 5751fcf0512SMichael S. Tsirkin /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 5761fcf0512SMichael S. Tsirkin if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 5771fcf0512SMichael S. Tsirkin return -ENODEV; 5781fcf0512SMichael S. Tsirkin 5791fcf0512SMichael S. Tsirkin if (pci_dev->device < 0x1040) { 5801fcf0512SMichael S. Tsirkin /* Transitional devices: use the PCI subsystem device id as 5811fcf0512SMichael S. Tsirkin * virtio device id, same as legacy driver always did. 5821fcf0512SMichael S. Tsirkin */ 5831fcf0512SMichael S. Tsirkin vp_dev->vdev.id.device = pci_dev->subsystem_device; 5841fcf0512SMichael S. Tsirkin } else { 5851fcf0512SMichael S. Tsirkin /* Modern devices: simply use PCI device id, but start from 0x1040. */ 5861fcf0512SMichael S. Tsirkin vp_dev->vdev.id.device = pci_dev->device - 0x1040; 5871fcf0512SMichael S. Tsirkin } 5881fcf0512SMichael S. Tsirkin vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 5891fcf0512SMichael S. Tsirkin 5901fcf0512SMichael S. Tsirkin /* check for a common config: if not, use legacy mode (bar 0). */ 5911fcf0512SMichael S. Tsirkin common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 59259a5b0f7SGerd Hoffmann IORESOURCE_IO | IORESOURCE_MEM, 59359a5b0f7SGerd Hoffmann &vp_dev->modern_bars); 5941fcf0512SMichael S. Tsirkin if (!common) { 5951fcf0512SMichael S. Tsirkin dev_info(&pci_dev->dev, 5961fcf0512SMichael S. Tsirkin "virtio_pci: leaving for legacy driver\n"); 5971fcf0512SMichael S. Tsirkin return -ENODEV; 5981fcf0512SMichael S. Tsirkin } 5991fcf0512SMichael S. Tsirkin 6001fcf0512SMichael S. Tsirkin /* If common is there, these should be too... */ 6011fcf0512SMichael S. Tsirkin isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 60259a5b0f7SGerd Hoffmann IORESOURCE_IO | IORESOURCE_MEM, 60359a5b0f7SGerd Hoffmann &vp_dev->modern_bars); 6041fcf0512SMichael S. Tsirkin notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 60559a5b0f7SGerd Hoffmann IORESOURCE_IO | IORESOURCE_MEM, 60659a5b0f7SGerd Hoffmann &vp_dev->modern_bars); 6071fcf0512SMichael S. Tsirkin if (!isr || !notify) { 6081fcf0512SMichael S. Tsirkin dev_err(&pci_dev->dev, 6091fcf0512SMichael S. Tsirkin "virtio_pci: missing capabilities %i/%i/%i\n", 6101fcf0512SMichael S. Tsirkin common, isr, notify); 6111fcf0512SMichael S. Tsirkin return -EINVAL; 6121fcf0512SMichael S. Tsirkin } 6131fcf0512SMichael S. Tsirkin 6147a5589b2SAndy Lutomirski err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 6157a5589b2SAndy Lutomirski if (err) 6167a5589b2SAndy Lutomirski err = dma_set_mask_and_coherent(&pci_dev->dev, 6177a5589b2SAndy Lutomirski DMA_BIT_MASK(32)); 6187a5589b2SAndy Lutomirski if (err) 6197a5589b2SAndy Lutomirski dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 6207a5589b2SAndy Lutomirski 6211fcf0512SMichael S. Tsirkin /* Device capability is only mandatory for devices that have 6221fcf0512SMichael S. Tsirkin * device-specific configuration. 6231fcf0512SMichael S. Tsirkin */ 6241fcf0512SMichael S. Tsirkin device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 62559a5b0f7SGerd Hoffmann IORESOURCE_IO | IORESOURCE_MEM, 62659a5b0f7SGerd Hoffmann &vp_dev->modern_bars); 62759a5b0f7SGerd Hoffmann 62859a5b0f7SGerd Hoffmann err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars, 62959a5b0f7SGerd Hoffmann "virtio-pci-modern"); 63059a5b0f7SGerd Hoffmann if (err) 63159a5b0f7SGerd Hoffmann return err; 6321fcf0512SMichael S. Tsirkin 6331fcf0512SMichael S. Tsirkin err = -EINVAL; 6341fcf0512SMichael S. Tsirkin vp_dev->common = map_capability(pci_dev, common, 6351fcf0512SMichael S. Tsirkin sizeof(struct virtio_pci_common_cfg), 4, 6361fcf0512SMichael S. Tsirkin 0, sizeof(struct virtio_pci_common_cfg), 6371fcf0512SMichael S. Tsirkin NULL); 6381fcf0512SMichael S. Tsirkin if (!vp_dev->common) 6391fcf0512SMichael S. Tsirkin goto err_map_common; 6401fcf0512SMichael S. Tsirkin vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, 6411fcf0512SMichael S. Tsirkin 0, 1, 6421fcf0512SMichael S. Tsirkin NULL); 6431fcf0512SMichael S. Tsirkin if (!vp_dev->isr) 6441fcf0512SMichael S. Tsirkin goto err_map_isr; 6451fcf0512SMichael S. Tsirkin 6461fcf0512SMichael S. Tsirkin /* Read notify_off_multiplier from config space. */ 6471fcf0512SMichael S. Tsirkin pci_read_config_dword(pci_dev, 6481fcf0512SMichael S. Tsirkin notify + offsetof(struct virtio_pci_notify_cap, 6491fcf0512SMichael S. Tsirkin notify_off_multiplier), 6501fcf0512SMichael S. Tsirkin &vp_dev->notify_offset_multiplier); 6513909213cSMichael S. Tsirkin /* Read notify length and offset from config space. */ 6521fcf0512SMichael S. Tsirkin pci_read_config_dword(pci_dev, 6531fcf0512SMichael S. Tsirkin notify + offsetof(struct virtio_pci_notify_cap, 6541fcf0512SMichael S. Tsirkin cap.length), 6551fcf0512SMichael S. Tsirkin ¬ify_length); 6561fcf0512SMichael S. Tsirkin 6573909213cSMichael S. Tsirkin pci_read_config_dword(pci_dev, 6583909213cSMichael S. Tsirkin notify + offsetof(struct virtio_pci_notify_cap, 6594e94ebddSLadi Prosek cap.offset), 6603909213cSMichael S. Tsirkin ¬ify_offset); 6613909213cSMichael S. Tsirkin 6623909213cSMichael S. Tsirkin /* We don't know how many VQs we'll map, ahead of the time. 6633909213cSMichael S. Tsirkin * If notify length is small, map it all now. 6643909213cSMichael S. Tsirkin * Otherwise, map each VQ individually later. 6653909213cSMichael S. Tsirkin */ 6663909213cSMichael S. Tsirkin if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 6673909213cSMichael S. Tsirkin vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, 6683909213cSMichael S. Tsirkin 0, notify_length, 6693909213cSMichael S. Tsirkin &vp_dev->notify_len); 6703909213cSMichael S. Tsirkin if (!vp_dev->notify_base) 6713909213cSMichael S. Tsirkin goto err_map_notify; 6723909213cSMichael S. Tsirkin } else { 6731fcf0512SMichael S. Tsirkin vp_dev->notify_map_cap = notify; 6743909213cSMichael S. Tsirkin } 6751fcf0512SMichael S. Tsirkin 6761fcf0512SMichael S. Tsirkin /* Again, we don't know how much we should map, but PAGE_SIZE 6771fcf0512SMichael S. Tsirkin * is more than enough for all existing devices. 6781fcf0512SMichael S. Tsirkin */ 6791fcf0512SMichael S. Tsirkin if (device) { 6801fcf0512SMichael S. Tsirkin vp_dev->device = map_capability(pci_dev, device, 0, 4, 6811fcf0512SMichael S. Tsirkin 0, PAGE_SIZE, 6821fcf0512SMichael S. Tsirkin &vp_dev->device_len); 6831fcf0512SMichael S. Tsirkin if (!vp_dev->device) 6841fcf0512SMichael S. Tsirkin goto err_map_device; 6851fcf0512SMichael S. Tsirkin 6861fcf0512SMichael S. Tsirkin vp_dev->vdev.config = &virtio_pci_config_ops; 687d3f5f065SMichael S. Tsirkin } else { 688d3f5f065SMichael S. Tsirkin vp_dev->vdev.config = &virtio_pci_config_nodev_ops; 689d3f5f065SMichael S. Tsirkin } 6901fcf0512SMichael S. Tsirkin 6911fcf0512SMichael S. Tsirkin vp_dev->config_vector = vp_config_vector; 6921fcf0512SMichael S. Tsirkin vp_dev->setup_vq = setup_vq; 6931fcf0512SMichael S. Tsirkin vp_dev->del_vq = del_vq; 6941fcf0512SMichael S. Tsirkin 6951fcf0512SMichael S. Tsirkin return 0; 6961fcf0512SMichael S. Tsirkin 6971fcf0512SMichael S. Tsirkin err_map_device: 6983909213cSMichael S. Tsirkin if (vp_dev->notify_base) 6993909213cSMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->notify_base); 7003909213cSMichael S. Tsirkin err_map_notify: 7011fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->isr); 7021fcf0512SMichael S. Tsirkin err_map_isr: 7031fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->common); 7041fcf0512SMichael S. Tsirkin err_map_common: 7051fcf0512SMichael S. Tsirkin return err; 7061fcf0512SMichael S. Tsirkin } 7071fcf0512SMichael S. Tsirkin 7081fcf0512SMichael S. Tsirkin void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) 7091fcf0512SMichael S. Tsirkin { 7101fcf0512SMichael S. Tsirkin struct pci_dev *pci_dev = vp_dev->pci_dev; 7111fcf0512SMichael S. Tsirkin 7121fcf0512SMichael S. Tsirkin if (vp_dev->device) 7131fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->device); 7143909213cSMichael S. Tsirkin if (vp_dev->notify_base) 7153909213cSMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->notify_base); 7161fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->isr); 7171fcf0512SMichael S. Tsirkin pci_iounmap(pci_dev, vp_dev->common); 71859a5b0f7SGerd Hoffmann pci_release_selected_regions(pci_dev, vp_dev->modern_bars); 7191fcf0512SMichael S. Tsirkin } 720