1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio PCI driver - modern (virtio 1.0) device support 4 * 5 * This module allows virtio devices to be used over a virtual PCI device. 6 * This can be used with QEMU based VMMs like KVM or Xen. 7 * 8 * Copyright IBM Corp. 2007 9 * Copyright Red Hat, Inc. 2014 10 * 11 * Authors: 12 * Anthony Liguori <aliguori@us.ibm.com> 13 * Rusty Russell <rusty@rustcorp.com.au> 14 * Michael S. Tsirkin <mst@redhat.com> 15 */ 16 17 #include <linux/delay.h> 18 #define VIRTIO_PCI_NO_LEGACY 19 #define VIRTIO_RING_NO_LEGACY 20 #include "virtio_pci_common.h" 21 22 static u64 vp_get_features(struct virtio_device *vdev) 23 { 24 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 25 26 return vp_modern_get_features(&vp_dev->mdev); 27 } 28 29 static void vp_transport_features(struct virtio_device *vdev, u64 features) 30 { 31 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 32 struct pci_dev *pci_dev = vp_dev->pci_dev; 33 34 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) && 35 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV)) 36 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV); 37 } 38 39 /* virtio config->finalize_features() implementation */ 40 static int vp_finalize_features(struct virtio_device *vdev) 41 { 42 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 43 u64 features = vdev->features; 44 45 /* Give virtio_ring a chance to accept features. */ 46 vring_transport_features(vdev); 47 48 /* Give virtio_pci a chance to accept features. */ 49 vp_transport_features(vdev, features); 50 51 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 52 dev_err(&vdev->dev, "virtio: device uses modern interface " 53 "but does not have VIRTIO_F_VERSION_1\n"); 54 return -EINVAL; 55 } 56 57 vp_modern_set_features(&vp_dev->mdev, vdev->features); 58 59 return 0; 60 } 61 62 /* virtio config->get() implementation */ 63 static void vp_get(struct virtio_device *vdev, unsigned int offset, 64 void *buf, unsigned int len) 65 { 66 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 67 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 68 void __iomem *device = mdev->device; 69 u8 b; 70 __le16 w; 71 __le32 l; 72 73 BUG_ON(offset + len > mdev->device_len); 74 75 switch (len) { 76 case 1: 77 b = ioread8(device + offset); 78 memcpy(buf, &b, sizeof b); 79 break; 80 case 2: 81 w = cpu_to_le16(ioread16(device + offset)); 82 memcpy(buf, &w, sizeof w); 83 break; 84 case 4: 85 l = cpu_to_le32(ioread32(device + offset)); 86 memcpy(buf, &l, sizeof l); 87 break; 88 case 8: 89 l = cpu_to_le32(ioread32(device + offset)); 90 memcpy(buf, &l, sizeof l); 91 l = cpu_to_le32(ioread32(device + offset + sizeof l)); 92 memcpy(buf + sizeof l, &l, sizeof l); 93 break; 94 default: 95 BUG(); 96 } 97 } 98 99 /* the config->set() implementation. it's symmetric to the config->get() 100 * implementation */ 101 static void vp_set(struct virtio_device *vdev, unsigned int offset, 102 const void *buf, unsigned int len) 103 { 104 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 105 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 106 void __iomem *device = mdev->device; 107 u8 b; 108 __le16 w; 109 __le32 l; 110 111 BUG_ON(offset + len > mdev->device_len); 112 113 switch (len) { 114 case 1: 115 memcpy(&b, buf, sizeof b); 116 iowrite8(b, device + offset); 117 break; 118 case 2: 119 memcpy(&w, buf, sizeof w); 120 iowrite16(le16_to_cpu(w), device + offset); 121 break; 122 case 4: 123 memcpy(&l, buf, sizeof l); 124 iowrite32(le32_to_cpu(l), device + offset); 125 break; 126 case 8: 127 memcpy(&l, buf, sizeof l); 128 iowrite32(le32_to_cpu(l), device + offset); 129 memcpy(&l, buf + sizeof l, sizeof l); 130 iowrite32(le32_to_cpu(l), device + offset + sizeof l); 131 break; 132 default: 133 BUG(); 134 } 135 } 136 137 static u32 vp_generation(struct virtio_device *vdev) 138 { 139 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 140 141 return vp_modern_generation(&vp_dev->mdev); 142 } 143 144 /* config->{get,set}_status() implementations */ 145 static u8 vp_get_status(struct virtio_device *vdev) 146 { 147 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 148 149 return vp_modern_get_status(&vp_dev->mdev); 150 } 151 152 static void vp_set_status(struct virtio_device *vdev, u8 status) 153 { 154 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 155 156 /* We should never be setting status to 0. */ 157 BUG_ON(status == 0); 158 vp_modern_set_status(&vp_dev->mdev, status); 159 } 160 161 static void vp_reset(struct virtio_device *vdev) 162 { 163 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 164 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 165 166 /* 0 status means a reset. */ 167 vp_modern_set_status(mdev, 0); 168 /* After writing 0 to device_status, the driver MUST wait for a read of 169 * device_status to return 0 before reinitializing the device. 170 * This will flush out the status write, and flush in device writes, 171 * including MSI-X interrupts, if any. 172 */ 173 while (vp_modern_get_status(mdev)) 174 msleep(1); 175 /* Flush pending VQ/configuration callbacks. */ 176 vp_synchronize_vectors(vdev); 177 } 178 179 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) 180 { 181 return vp_modern_config_vector(&vp_dev->mdev, vector); 182 } 183 184 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 185 struct virtio_pci_vq_info *info, 186 unsigned int index, 187 void (*callback)(struct virtqueue *vq), 188 const char *name, 189 bool ctx, 190 u16 msix_vec) 191 { 192 193 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 194 struct virtqueue *vq; 195 u16 num; 196 int err; 197 198 if (index >= vp_modern_get_num_queues(mdev)) 199 return ERR_PTR(-ENOENT); 200 201 /* Check if queue is either not available or already active. */ 202 num = vp_modern_get_queue_size(mdev, index); 203 if (!num || vp_modern_get_queue_enable(mdev, index)) 204 return ERR_PTR(-ENOENT); 205 206 if (num & (num - 1)) { 207 dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); 208 return ERR_PTR(-EINVAL); 209 } 210 211 info->msix_vector = msix_vec; 212 213 /* create the vring */ 214 vq = vring_create_virtqueue(index, num, 215 SMP_CACHE_BYTES, &vp_dev->vdev, 216 true, true, ctx, 217 vp_notify, callback, name); 218 if (!vq) 219 return ERR_PTR(-ENOMEM); 220 221 /* activate the queue */ 222 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq)); 223 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq), 224 virtqueue_get_avail_addr(vq), 225 virtqueue_get_used_addr(vq)); 226 227 vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL); 228 if (!vq->priv) { 229 err = -ENOMEM; 230 goto err_map_notify; 231 } 232 233 if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 234 msix_vec = vp_modern_queue_vector(mdev, index, msix_vec); 235 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 236 err = -EBUSY; 237 goto err_assign_vector; 238 } 239 } 240 241 return vq; 242 243 err_assign_vector: 244 if (!mdev->notify_base) 245 pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv); 246 err_map_notify: 247 vring_del_virtqueue(vq); 248 return ERR_PTR(err); 249 } 250 251 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs, 252 struct virtqueue *vqs[], 253 vq_callback_t *callbacks[], 254 const char * const names[], const bool *ctx, 255 struct irq_affinity *desc) 256 { 257 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 258 struct virtqueue *vq; 259 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc); 260 261 if (rc) 262 return rc; 263 264 /* Select and activate all queues. Has to be done last: once we do 265 * this, there's no way to go back except reset. 266 */ 267 list_for_each_entry(vq, &vdev->vqs, list) 268 vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true); 269 270 return 0; 271 } 272 273 static void del_vq(struct virtio_pci_vq_info *info) 274 { 275 struct virtqueue *vq = info->vq; 276 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 277 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 278 279 if (vp_dev->msix_enabled) 280 vp_modern_queue_vector(mdev, vq->index, 281 VIRTIO_MSI_NO_VECTOR); 282 283 if (!mdev->notify_base) 284 pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv); 285 286 vring_del_virtqueue(vq); 287 } 288 289 static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id, 290 u8 *bar, u64 *offset, u64 *len) 291 { 292 int pos; 293 294 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0; 295 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 296 u8 type, cap_len, id, res_bar; 297 u32 tmp32; 298 u64 res_offset, res_length; 299 300 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 301 cfg_type), &type); 302 if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG) 303 continue; 304 305 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 306 cap_len), &cap_len); 307 if (cap_len != sizeof(struct virtio_pci_cap64)) { 308 dev_err(&dev->dev, "%s: shm cap with bad size offset:" 309 " %d size: %d\n", __func__, pos, cap_len); 310 continue; 311 } 312 313 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 314 id), &id); 315 if (id != required_id) 316 continue; 317 318 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 319 bar), &res_bar); 320 if (res_bar >= PCI_STD_NUM_BARS) 321 continue; 322 323 /* Type and ID match, and the BAR value isn't reserved. 324 * Looks good. 325 */ 326 327 /* Read the lower 32bit of length and offset */ 328 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap, 329 offset), &tmp32); 330 res_offset = tmp32; 331 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap, 332 length), &tmp32); 333 res_length = tmp32; 334 335 /* and now the top half */ 336 pci_read_config_dword(dev, 337 pos + offsetof(struct virtio_pci_cap64, 338 offset_hi), &tmp32); 339 res_offset |= ((u64)tmp32) << 32; 340 pci_read_config_dword(dev, 341 pos + offsetof(struct virtio_pci_cap64, 342 length_hi), &tmp32); 343 res_length |= ((u64)tmp32) << 32; 344 345 *bar = res_bar; 346 *offset = res_offset; 347 *len = res_length; 348 349 return pos; 350 } 351 return 0; 352 } 353 354 static bool vp_get_shm_region(struct virtio_device *vdev, 355 struct virtio_shm_region *region, u8 id) 356 { 357 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 358 struct pci_dev *pci_dev = vp_dev->pci_dev; 359 u8 bar; 360 u64 offset, len; 361 phys_addr_t phys_addr; 362 size_t bar_len; 363 364 if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len)) 365 return false; 366 367 phys_addr = pci_resource_start(pci_dev, bar); 368 bar_len = pci_resource_len(pci_dev, bar); 369 370 if ((offset + len) < offset) { 371 dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n", 372 __func__); 373 return false; 374 } 375 376 if (offset + len > bar_len) { 377 dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n", 378 __func__); 379 return false; 380 } 381 382 region->len = len; 383 region->addr = (u64) phys_addr + offset; 384 385 return true; 386 } 387 388 static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 389 .get = NULL, 390 .set = NULL, 391 .generation = vp_generation, 392 .get_status = vp_get_status, 393 .set_status = vp_set_status, 394 .reset = vp_reset, 395 .find_vqs = vp_modern_find_vqs, 396 .del_vqs = vp_del_vqs, 397 .synchronize_cbs = vp_synchronize_vectors, 398 .get_features = vp_get_features, 399 .finalize_features = vp_finalize_features, 400 .bus_name = vp_bus_name, 401 .set_vq_affinity = vp_set_vq_affinity, 402 .get_vq_affinity = vp_get_vq_affinity, 403 .get_shm_region = vp_get_shm_region, 404 }; 405 406 static const struct virtio_config_ops virtio_pci_config_ops = { 407 .get = vp_get, 408 .set = vp_set, 409 .generation = vp_generation, 410 .get_status = vp_get_status, 411 .set_status = vp_set_status, 412 .reset = vp_reset, 413 .find_vqs = vp_modern_find_vqs, 414 .del_vqs = vp_del_vqs, 415 .synchronize_cbs = vp_synchronize_vectors, 416 .get_features = vp_get_features, 417 .finalize_features = vp_finalize_features, 418 .bus_name = vp_bus_name, 419 .set_vq_affinity = vp_set_vq_affinity, 420 .get_vq_affinity = vp_get_vq_affinity, 421 .get_shm_region = vp_get_shm_region, 422 }; 423 424 /* the PCI probing function */ 425 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 426 { 427 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 428 struct pci_dev *pci_dev = vp_dev->pci_dev; 429 int err; 430 431 mdev->pci_dev = pci_dev; 432 433 err = vp_modern_probe(mdev); 434 if (err) 435 return err; 436 437 if (mdev->device) 438 vp_dev->vdev.config = &virtio_pci_config_ops; 439 else 440 vp_dev->vdev.config = &virtio_pci_config_nodev_ops; 441 442 vp_dev->config_vector = vp_config_vector; 443 vp_dev->setup_vq = setup_vq; 444 vp_dev->del_vq = del_vq; 445 vp_dev->isr = mdev->isr; 446 vp_dev->vdev.id = mdev->id; 447 448 return 0; 449 } 450 451 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) 452 { 453 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 454 455 vp_modern_remove(mdev); 456 } 457