1 /* 2 * Virtio PCI driver - modern (virtio 1.0) device support 3 * 4 * This module allows virtio devices to be used over a virtual PCI device. 5 * This can be used with QEMU based VMMs like KVM or Xen. 6 * 7 * Copyright IBM Corp. 2007 8 * Copyright Red Hat, Inc. 2014 9 * 10 * Authors: 11 * Anthony Liguori <aliguori@us.ibm.com> 12 * Rusty Russell <rusty@rustcorp.com.au> 13 * Michael S. Tsirkin <mst@redhat.com> 14 * 15 * This work is licensed under the terms of the GNU GPL, version 2 or later. 16 * See the COPYING file in the top-level directory. 17 * 18 */ 19 20 #include <linux/delay.h> 21 #define VIRTIO_PCI_NO_LEGACY 22 #include "virtio_pci_common.h" 23 24 /* 25 * Type-safe wrappers for io accesses. 26 * Use these to enforce at compile time the following spec requirement: 27 * 28 * The driver MUST access each field using the “natural” access 29 * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 30 * for 16-bit fields and 8-bit accesses for 8-bit fields. 31 */ 32 static inline u8 vp_ioread8(u8 __iomem *addr) 33 { 34 return ioread8(addr); 35 } 36 static inline u16 vp_ioread16 (__le16 __iomem *addr) 37 { 38 return ioread16(addr); 39 } 40 41 static inline u32 vp_ioread32(__le32 __iomem *addr) 42 { 43 return ioread32(addr); 44 } 45 46 static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 47 { 48 iowrite8(value, addr); 49 } 50 51 static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) 52 { 53 iowrite16(value, addr); 54 } 55 56 static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) 57 { 58 iowrite32(value, addr); 59 } 60 61 static void vp_iowrite64_twopart(u64 val, 62 __le32 __iomem *lo, __le32 __iomem *hi) 63 { 64 vp_iowrite32((u32)val, lo); 65 vp_iowrite32(val >> 32, hi); 66 } 67 68 static void __iomem *map_capability(struct pci_dev *dev, int off, 69 size_t minlen, 70 u32 align, 71 u32 start, u32 size, 72 size_t *len) 73 { 74 u8 bar; 75 u32 offset, length; 76 void __iomem *p; 77 78 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 79 bar), 80 &bar); 81 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 82 &offset); 83 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 84 &length); 85 86 if (length <= start) { 87 dev_err(&dev->dev, 88 "virtio_pci: bad capability len %u (>%u expected)\n", 89 length, start); 90 return NULL; 91 } 92 93 if (length - start < minlen) { 94 dev_err(&dev->dev, 95 "virtio_pci: bad capability len %u (>=%zu expected)\n", 96 length, minlen); 97 return NULL; 98 } 99 100 length -= start; 101 102 if (start + offset < offset) { 103 dev_err(&dev->dev, 104 "virtio_pci: map wrap-around %u+%u\n", 105 start, offset); 106 return NULL; 107 } 108 109 offset += start; 110 111 if (offset & (align - 1)) { 112 dev_err(&dev->dev, 113 "virtio_pci: offset %u not aligned to %u\n", 114 offset, align); 115 return NULL; 116 } 117 118 if (length > size) 119 length = size; 120 121 if (len) 122 *len = length; 123 124 if (minlen + offset < minlen || 125 minlen + offset > pci_resource_len(dev, bar)) { 126 dev_err(&dev->dev, 127 "virtio_pci: map virtio %zu@%u " 128 "out of range on bar %i length %lu\n", 129 minlen, offset, 130 bar, (unsigned long)pci_resource_len(dev, bar)); 131 return NULL; 132 } 133 134 p = pci_iomap_range(dev, bar, offset, length); 135 if (!p) 136 dev_err(&dev->dev, 137 "virtio_pci: unable to map virtio %u@%u on bar %i\n", 138 length, offset, bar); 139 return p; 140 } 141 142 /* virtio config->get_features() implementation */ 143 static u64 vp_get_features(struct virtio_device *vdev) 144 { 145 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 146 u64 features; 147 148 vp_iowrite32(0, &vp_dev->common->device_feature_select); 149 features = vp_ioread32(&vp_dev->common->device_feature); 150 vp_iowrite32(1, &vp_dev->common->device_feature_select); 151 features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); 152 153 return features; 154 } 155 156 static void vp_transport_features(struct virtio_device *vdev, u64 features) 157 { 158 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 159 struct pci_dev *pci_dev = vp_dev->pci_dev; 160 161 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) && 162 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV)) 163 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV); 164 } 165 166 /* virtio config->finalize_features() implementation */ 167 static int vp_finalize_features(struct virtio_device *vdev) 168 { 169 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 170 u64 features = vdev->features; 171 172 /* Give virtio_ring a chance to accept features. */ 173 vring_transport_features(vdev); 174 175 /* Give virtio_pci a chance to accept features. */ 176 vp_transport_features(vdev, features); 177 178 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 179 dev_err(&vdev->dev, "virtio: device uses modern interface " 180 "but does not have VIRTIO_F_VERSION_1\n"); 181 return -EINVAL; 182 } 183 184 vp_iowrite32(0, &vp_dev->common->guest_feature_select); 185 vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); 186 vp_iowrite32(1, &vp_dev->common->guest_feature_select); 187 vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); 188 189 return 0; 190 } 191 192 /* virtio config->get() implementation */ 193 static void vp_get(struct virtio_device *vdev, unsigned offset, 194 void *buf, unsigned len) 195 { 196 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 197 u8 b; 198 __le16 w; 199 __le32 l; 200 201 BUG_ON(offset + len > vp_dev->device_len); 202 203 switch (len) { 204 case 1: 205 b = ioread8(vp_dev->device + offset); 206 memcpy(buf, &b, sizeof b); 207 break; 208 case 2: 209 w = cpu_to_le16(ioread16(vp_dev->device + offset)); 210 memcpy(buf, &w, sizeof w); 211 break; 212 case 4: 213 l = cpu_to_le32(ioread32(vp_dev->device + offset)); 214 memcpy(buf, &l, sizeof l); 215 break; 216 case 8: 217 l = cpu_to_le32(ioread32(vp_dev->device + offset)); 218 memcpy(buf, &l, sizeof l); 219 l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); 220 memcpy(buf + sizeof l, &l, sizeof l); 221 break; 222 default: 223 BUG(); 224 } 225 } 226 227 /* the config->set() implementation. it's symmetric to the config->get() 228 * implementation */ 229 static void vp_set(struct virtio_device *vdev, unsigned offset, 230 const void *buf, unsigned len) 231 { 232 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 233 u8 b; 234 __le16 w; 235 __le32 l; 236 237 BUG_ON(offset + len > vp_dev->device_len); 238 239 switch (len) { 240 case 1: 241 memcpy(&b, buf, sizeof b); 242 iowrite8(b, vp_dev->device + offset); 243 break; 244 case 2: 245 memcpy(&w, buf, sizeof w); 246 iowrite16(le16_to_cpu(w), vp_dev->device + offset); 247 break; 248 case 4: 249 memcpy(&l, buf, sizeof l); 250 iowrite32(le32_to_cpu(l), vp_dev->device + offset); 251 break; 252 case 8: 253 memcpy(&l, buf, sizeof l); 254 iowrite32(le32_to_cpu(l), vp_dev->device + offset); 255 memcpy(&l, buf + sizeof l, sizeof l); 256 iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); 257 break; 258 default: 259 BUG(); 260 } 261 } 262 263 static u32 vp_generation(struct virtio_device *vdev) 264 { 265 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 266 return vp_ioread8(&vp_dev->common->config_generation); 267 } 268 269 /* config->{get,set}_status() implementations */ 270 static u8 vp_get_status(struct virtio_device *vdev) 271 { 272 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 273 return vp_ioread8(&vp_dev->common->device_status); 274 } 275 276 static void vp_set_status(struct virtio_device *vdev, u8 status) 277 { 278 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 279 /* We should never be setting status to 0. */ 280 BUG_ON(status == 0); 281 vp_iowrite8(status, &vp_dev->common->device_status); 282 } 283 284 static void vp_reset(struct virtio_device *vdev) 285 { 286 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 287 /* 0 status means a reset. */ 288 vp_iowrite8(0, &vp_dev->common->device_status); 289 /* After writing 0 to device_status, the driver MUST wait for a read of 290 * device_status to return 0 before reinitializing the device. 291 * This will flush out the status write, and flush in device writes, 292 * including MSI-X interrupts, if any. 293 */ 294 while (vp_ioread8(&vp_dev->common->device_status)) 295 msleep(1); 296 /* Flush pending VQ/configuration callbacks. */ 297 vp_synchronize_vectors(vdev); 298 } 299 300 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) 301 { 302 /* Setup the vector used for configuration events */ 303 vp_iowrite16(vector, &vp_dev->common->msix_config); 304 /* Verify we had enough resources to assign the vector */ 305 /* Will also flush the write out to device */ 306 return vp_ioread16(&vp_dev->common->msix_config); 307 } 308 309 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 310 struct virtio_pci_vq_info *info, 311 unsigned index, 312 void (*callback)(struct virtqueue *vq), 313 const char *name, 314 bool ctx, 315 u16 msix_vec) 316 { 317 struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; 318 struct virtqueue *vq; 319 u16 num, off; 320 int err; 321 322 if (index >= vp_ioread16(&cfg->num_queues)) 323 return ERR_PTR(-ENOENT); 324 325 /* Select the queue we're interested in */ 326 vp_iowrite16(index, &cfg->queue_select); 327 328 /* Check if queue is either not available or already active. */ 329 num = vp_ioread16(&cfg->queue_size); 330 if (!num || vp_ioread16(&cfg->queue_enable)) 331 return ERR_PTR(-ENOENT); 332 333 if (num & (num - 1)) { 334 dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); 335 return ERR_PTR(-EINVAL); 336 } 337 338 /* get offset of notification word for this vq */ 339 off = vp_ioread16(&cfg->queue_notify_off); 340 341 info->msix_vector = msix_vec; 342 343 /* create the vring */ 344 vq = vring_create_virtqueue(index, num, 345 SMP_CACHE_BYTES, &vp_dev->vdev, 346 true, true, ctx, 347 vp_notify, callback, name); 348 if (!vq) 349 return ERR_PTR(-ENOMEM); 350 351 /* activate the queue */ 352 vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size); 353 vp_iowrite64_twopart(virtqueue_get_desc_addr(vq), 354 &cfg->queue_desc_lo, &cfg->queue_desc_hi); 355 vp_iowrite64_twopart(virtqueue_get_avail_addr(vq), 356 &cfg->queue_avail_lo, &cfg->queue_avail_hi); 357 vp_iowrite64_twopart(virtqueue_get_used_addr(vq), 358 &cfg->queue_used_lo, &cfg->queue_used_hi); 359 360 if (vp_dev->notify_base) { 361 /* offset should not wrap */ 362 if ((u64)off * vp_dev->notify_offset_multiplier + 2 363 > vp_dev->notify_len) { 364 dev_warn(&vp_dev->pci_dev->dev, 365 "bad notification offset %u (x %u) " 366 "for queue %u > %zd", 367 off, vp_dev->notify_offset_multiplier, 368 index, vp_dev->notify_len); 369 err = -EINVAL; 370 goto err_map_notify; 371 } 372 vq->priv = (void __force *)vp_dev->notify_base + 373 off * vp_dev->notify_offset_multiplier; 374 } else { 375 vq->priv = (void __force *)map_capability(vp_dev->pci_dev, 376 vp_dev->notify_map_cap, 2, 2, 377 off * vp_dev->notify_offset_multiplier, 2, 378 NULL); 379 } 380 381 if (!vq->priv) { 382 err = -ENOMEM; 383 goto err_map_notify; 384 } 385 386 if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 387 vp_iowrite16(msix_vec, &cfg->queue_msix_vector); 388 msix_vec = vp_ioread16(&cfg->queue_msix_vector); 389 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 390 err = -EBUSY; 391 goto err_assign_vector; 392 } 393 } 394 395 return vq; 396 397 err_assign_vector: 398 if (!vp_dev->notify_base) 399 pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); 400 err_map_notify: 401 vring_del_virtqueue(vq); 402 return ERR_PTR(err); 403 } 404 405 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, 406 struct virtqueue *vqs[], 407 vq_callback_t *callbacks[], 408 const char * const names[], const bool *ctx, 409 struct irq_affinity *desc) 410 { 411 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 412 struct virtqueue *vq; 413 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc); 414 415 if (rc) 416 return rc; 417 418 /* Select and activate all queues. Has to be done last: once we do 419 * this, there's no way to go back except reset. 420 */ 421 list_for_each_entry(vq, &vdev->vqs, list) { 422 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 423 vp_iowrite16(1, &vp_dev->common->queue_enable); 424 } 425 426 return 0; 427 } 428 429 static void del_vq(struct virtio_pci_vq_info *info) 430 { 431 struct virtqueue *vq = info->vq; 432 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 433 434 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 435 436 if (vp_dev->msix_enabled) { 437 vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 438 &vp_dev->common->queue_msix_vector); 439 /* Flush the write out to device */ 440 vp_ioread16(&vp_dev->common->queue_msix_vector); 441 } 442 443 if (!vp_dev->notify_base) 444 pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); 445 446 vring_del_virtqueue(vq); 447 } 448 449 static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 450 .get = NULL, 451 .set = NULL, 452 .generation = vp_generation, 453 .get_status = vp_get_status, 454 .set_status = vp_set_status, 455 .reset = vp_reset, 456 .find_vqs = vp_modern_find_vqs, 457 .del_vqs = vp_del_vqs, 458 .get_features = vp_get_features, 459 .finalize_features = vp_finalize_features, 460 .bus_name = vp_bus_name, 461 .set_vq_affinity = vp_set_vq_affinity, 462 .get_vq_affinity = vp_get_vq_affinity, 463 }; 464 465 static const struct virtio_config_ops virtio_pci_config_ops = { 466 .get = vp_get, 467 .set = vp_set, 468 .generation = vp_generation, 469 .get_status = vp_get_status, 470 .set_status = vp_set_status, 471 .reset = vp_reset, 472 .find_vqs = vp_modern_find_vqs, 473 .del_vqs = vp_del_vqs, 474 .get_features = vp_get_features, 475 .finalize_features = vp_finalize_features, 476 .bus_name = vp_bus_name, 477 .set_vq_affinity = vp_set_vq_affinity, 478 .get_vq_affinity = vp_get_vq_affinity, 479 }; 480 481 /** 482 * virtio_pci_find_capability - walk capabilities to find device info. 483 * @dev: the pci device 484 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 485 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 486 * 487 * Returns offset of the capability, or 0. 488 */ 489 static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 490 u32 ioresource_types, int *bars) 491 { 492 int pos; 493 494 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 495 pos > 0; 496 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 497 u8 type, bar; 498 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 499 cfg_type), 500 &type); 501 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 502 bar), 503 &bar); 504 505 /* Ignore structures with reserved BAR values */ 506 if (bar > 0x5) 507 continue; 508 509 if (type == cfg_type) { 510 if (pci_resource_len(dev, bar) && 511 pci_resource_flags(dev, bar) & ioresource_types) { 512 *bars |= (1 << bar); 513 return pos; 514 } 515 } 516 } 517 return 0; 518 } 519 520 /* This is part of the ABI. Don't screw with it. */ 521 static inline void check_offsets(void) 522 { 523 /* Note: disk space was harmed in compilation of this function. */ 524 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 525 offsetof(struct virtio_pci_cap, cap_vndr)); 526 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 527 offsetof(struct virtio_pci_cap, cap_next)); 528 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 529 offsetof(struct virtio_pci_cap, cap_len)); 530 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 531 offsetof(struct virtio_pci_cap, cfg_type)); 532 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 533 offsetof(struct virtio_pci_cap, bar)); 534 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 535 offsetof(struct virtio_pci_cap, offset)); 536 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 537 offsetof(struct virtio_pci_cap, length)); 538 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 539 offsetof(struct virtio_pci_notify_cap, 540 notify_off_multiplier)); 541 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 542 offsetof(struct virtio_pci_common_cfg, 543 device_feature_select)); 544 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 545 offsetof(struct virtio_pci_common_cfg, device_feature)); 546 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 547 offsetof(struct virtio_pci_common_cfg, 548 guest_feature_select)); 549 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 550 offsetof(struct virtio_pci_common_cfg, guest_feature)); 551 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 552 offsetof(struct virtio_pci_common_cfg, msix_config)); 553 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 554 offsetof(struct virtio_pci_common_cfg, num_queues)); 555 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 556 offsetof(struct virtio_pci_common_cfg, device_status)); 557 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 558 offsetof(struct virtio_pci_common_cfg, config_generation)); 559 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 560 offsetof(struct virtio_pci_common_cfg, queue_select)); 561 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 562 offsetof(struct virtio_pci_common_cfg, queue_size)); 563 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 564 offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 565 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 566 offsetof(struct virtio_pci_common_cfg, queue_enable)); 567 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 568 offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 569 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 570 offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 571 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 572 offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 573 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 574 offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 575 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 576 offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 577 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 578 offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 579 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 580 offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 581 } 582 583 /* the PCI probing function */ 584 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 585 { 586 struct pci_dev *pci_dev = vp_dev->pci_dev; 587 int err, common, isr, notify, device; 588 u32 notify_length; 589 u32 notify_offset; 590 591 check_offsets(); 592 593 /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 594 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 595 return -ENODEV; 596 597 if (pci_dev->device < 0x1040) { 598 /* Transitional devices: use the PCI subsystem device id as 599 * virtio device id, same as legacy driver always did. 600 */ 601 vp_dev->vdev.id.device = pci_dev->subsystem_device; 602 } else { 603 /* Modern devices: simply use PCI device id, but start from 0x1040. */ 604 vp_dev->vdev.id.device = pci_dev->device - 0x1040; 605 } 606 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 607 608 /* check for a common config: if not, use legacy mode (bar 0). */ 609 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 610 IORESOURCE_IO | IORESOURCE_MEM, 611 &vp_dev->modern_bars); 612 if (!common) { 613 dev_info(&pci_dev->dev, 614 "virtio_pci: leaving for legacy driver\n"); 615 return -ENODEV; 616 } 617 618 /* If common is there, these should be too... */ 619 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 620 IORESOURCE_IO | IORESOURCE_MEM, 621 &vp_dev->modern_bars); 622 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 623 IORESOURCE_IO | IORESOURCE_MEM, 624 &vp_dev->modern_bars); 625 if (!isr || !notify) { 626 dev_err(&pci_dev->dev, 627 "virtio_pci: missing capabilities %i/%i/%i\n", 628 common, isr, notify); 629 return -EINVAL; 630 } 631 632 err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 633 if (err) 634 err = dma_set_mask_and_coherent(&pci_dev->dev, 635 DMA_BIT_MASK(32)); 636 if (err) 637 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 638 639 /* Device capability is only mandatory for devices that have 640 * device-specific configuration. 641 */ 642 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 643 IORESOURCE_IO | IORESOURCE_MEM, 644 &vp_dev->modern_bars); 645 646 err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars, 647 "virtio-pci-modern"); 648 if (err) 649 return err; 650 651 err = -EINVAL; 652 vp_dev->common = map_capability(pci_dev, common, 653 sizeof(struct virtio_pci_common_cfg), 4, 654 0, sizeof(struct virtio_pci_common_cfg), 655 NULL); 656 if (!vp_dev->common) 657 goto err_map_common; 658 vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, 659 0, 1, 660 NULL); 661 if (!vp_dev->isr) 662 goto err_map_isr; 663 664 /* Read notify_off_multiplier from config space. */ 665 pci_read_config_dword(pci_dev, 666 notify + offsetof(struct virtio_pci_notify_cap, 667 notify_off_multiplier), 668 &vp_dev->notify_offset_multiplier); 669 /* Read notify length and offset from config space. */ 670 pci_read_config_dword(pci_dev, 671 notify + offsetof(struct virtio_pci_notify_cap, 672 cap.length), 673 ¬ify_length); 674 675 pci_read_config_dword(pci_dev, 676 notify + offsetof(struct virtio_pci_notify_cap, 677 cap.offset), 678 ¬ify_offset); 679 680 /* We don't know how many VQs we'll map, ahead of the time. 681 * If notify length is small, map it all now. 682 * Otherwise, map each VQ individually later. 683 */ 684 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 685 vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, 686 0, notify_length, 687 &vp_dev->notify_len); 688 if (!vp_dev->notify_base) 689 goto err_map_notify; 690 } else { 691 vp_dev->notify_map_cap = notify; 692 } 693 694 /* Again, we don't know how much we should map, but PAGE_SIZE 695 * is more than enough for all existing devices. 696 */ 697 if (device) { 698 vp_dev->device = map_capability(pci_dev, device, 0, 4, 699 0, PAGE_SIZE, 700 &vp_dev->device_len); 701 if (!vp_dev->device) 702 goto err_map_device; 703 704 vp_dev->vdev.config = &virtio_pci_config_ops; 705 } else { 706 vp_dev->vdev.config = &virtio_pci_config_nodev_ops; 707 } 708 709 vp_dev->config_vector = vp_config_vector; 710 vp_dev->setup_vq = setup_vq; 711 vp_dev->del_vq = del_vq; 712 713 return 0; 714 715 err_map_device: 716 if (vp_dev->notify_base) 717 pci_iounmap(pci_dev, vp_dev->notify_base); 718 err_map_notify: 719 pci_iounmap(pci_dev, vp_dev->isr); 720 err_map_isr: 721 pci_iounmap(pci_dev, vp_dev->common); 722 err_map_common: 723 return err; 724 } 725 726 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) 727 { 728 struct pci_dev *pci_dev = vp_dev->pci_dev; 729 730 if (vp_dev->device) 731 pci_iounmap(pci_dev, vp_dev->device); 732 if (vp_dev->notify_base) 733 pci_iounmap(pci_dev, vp_dev->notify_base); 734 pci_iounmap(pci_dev, vp_dev->isr); 735 pci_iounmap(pci_dev, vp_dev->common); 736 pci_release_selected_regions(pci_dev, vp_dev->modern_bars); 737 } 738