1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Volume Management Device driver 4 * Copyright (c) 2015, Intel Corporation. 5 */ 6 7 #include <linux/device.h> 8 #include <linux/interrupt.h> 9 #include <linux/irq.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/msi.h> 13 #include <linux/pci.h> 14 #include <linux/srcu.h> 15 #include <linux/rculist.h> 16 #include <linux/rcupdate.h> 17 18 #include <asm/irqdomain.h> 19 #include <asm/device.h> 20 #include <asm/msi.h> 21 #include <asm/msidef.h> 22 23 #define VMD_CFGBAR 0 24 #define VMD_MEMBAR1 2 25 #define VMD_MEMBAR2 4 26 27 #define PCI_REG_VMCAP 0x40 28 #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) 29 #define PCI_REG_VMCONFIG 0x44 30 #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) 31 #define PCI_REG_VMLOCK 0x70 32 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) 33 34 enum vmd_features { 35 /* 36 * Device may contain registers which hint the physical location of the 37 * membars, in order to allow proper address translation during 38 * resource assignment to enable guest virtualization 39 */ 40 VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), 41 42 /* 43 * Device may provide root port configuration information which limits 44 * bus numbering 45 */ 46 VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), 47 }; 48 49 /* 50 * Lock for manipulating VMD IRQ lists. 51 */ 52 static DEFINE_RAW_SPINLOCK(list_lock); 53 54 /** 55 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector 56 * @node: list item for parent traversal. 57 * @irq: back pointer to parent. 58 * @enabled: true if driver enabled IRQ 59 * @virq: the virtual IRQ value provided to the requesting driver. 60 * 61 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to 62 * a VMD IRQ using this structure. 63 */ 64 struct vmd_irq { 65 struct list_head node; 66 struct vmd_irq_list *irq; 67 bool enabled; 68 unsigned int virq; 69 }; 70 71 /** 72 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector 73 * @irq_list: the list of irq's the VMD one demuxes to. 74 * @srcu: SRCU struct for local synchronization. 75 * @count: number of child IRQs assigned to this vector; used to track 76 * sharing. 77 */ 78 struct vmd_irq_list { 79 struct list_head irq_list; 80 struct srcu_struct srcu; 81 unsigned int count; 82 }; 83 84 struct vmd_dev { 85 struct pci_dev *dev; 86 87 spinlock_t cfg_lock; 88 char __iomem *cfgbar; 89 90 int msix_count; 91 struct vmd_irq_list *irqs; 92 93 struct pci_sysdata sysdata; 94 struct resource resources[3]; 95 struct irq_domain *irq_domain; 96 struct pci_bus *bus; 97 98 #ifdef CONFIG_X86_DEV_DMA_OPS 99 struct dma_map_ops dma_ops; 100 struct dma_domain dma_domain; 101 #endif 102 }; 103 104 static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) 105 { 106 return container_of(bus->sysdata, struct vmd_dev, sysdata); 107 } 108 109 static inline unsigned int index_from_irqs(struct vmd_dev *vmd, 110 struct vmd_irq_list *irqs) 111 { 112 return irqs - vmd->irqs; 113 } 114 115 /* 116 * Drivers managing a device in a VMD domain allocate their own IRQs as before, 117 * but the MSI entry for the hardware it's driving will be programmed with a 118 * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its 119 * domain into one of its own, and the VMD driver de-muxes these for the 120 * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations 121 * and irq_chip to set this up. 122 */ 123 static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 124 { 125 struct vmd_irq *vmdirq = data->chip_data; 126 struct vmd_irq_list *irq = vmdirq->irq; 127 struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); 128 129 msg->address_hi = MSI_ADDR_BASE_HI; 130 msg->address_lo = MSI_ADDR_BASE_LO | 131 MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); 132 msg->data = 0; 133 } 134 135 /* 136 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. 137 */ 138 static void vmd_irq_enable(struct irq_data *data) 139 { 140 struct vmd_irq *vmdirq = data->chip_data; 141 unsigned long flags; 142 143 raw_spin_lock_irqsave(&list_lock, flags); 144 WARN_ON(vmdirq->enabled); 145 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); 146 vmdirq->enabled = true; 147 raw_spin_unlock_irqrestore(&list_lock, flags); 148 149 data->chip->irq_unmask(data); 150 } 151 152 static void vmd_irq_disable(struct irq_data *data) 153 { 154 struct vmd_irq *vmdirq = data->chip_data; 155 unsigned long flags; 156 157 data->chip->irq_mask(data); 158 159 raw_spin_lock_irqsave(&list_lock, flags); 160 if (vmdirq->enabled) { 161 list_del_rcu(&vmdirq->node); 162 vmdirq->enabled = false; 163 } 164 raw_spin_unlock_irqrestore(&list_lock, flags); 165 } 166 167 /* 168 * XXX: Stubbed until we develop acceptable way to not create conflicts with 169 * other devices sharing the same vector. 170 */ 171 static int vmd_irq_set_affinity(struct irq_data *data, 172 const struct cpumask *dest, bool force) 173 { 174 return -EINVAL; 175 } 176 177 static struct irq_chip vmd_msi_controller = { 178 .name = "VMD-MSI", 179 .irq_enable = vmd_irq_enable, 180 .irq_disable = vmd_irq_disable, 181 .irq_compose_msi_msg = vmd_compose_msi_msg, 182 .irq_set_affinity = vmd_irq_set_affinity, 183 }; 184 185 static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, 186 msi_alloc_info_t *arg) 187 { 188 return 0; 189 } 190 191 /* 192 * XXX: We can be even smarter selecting the best IRQ once we solve the 193 * affinity problem. 194 */ 195 static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) 196 { 197 int i, best = 1; 198 unsigned long flags; 199 200 if (vmd->msix_count == 1) 201 return &vmd->irqs[0]; 202 203 /* 204 * White list for fast-interrupt handlers. All others will share the 205 * "slow" interrupt vector. 206 */ 207 switch (msi_desc_to_pci_dev(desc)->class) { 208 case PCI_CLASS_STORAGE_EXPRESS: 209 break; 210 default: 211 return &vmd->irqs[0]; 212 } 213 214 raw_spin_lock_irqsave(&list_lock, flags); 215 for (i = 1; i < vmd->msix_count; i++) 216 if (vmd->irqs[i].count < vmd->irqs[best].count) 217 best = i; 218 vmd->irqs[best].count++; 219 raw_spin_unlock_irqrestore(&list_lock, flags); 220 221 return &vmd->irqs[best]; 222 } 223 224 static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, 225 unsigned int virq, irq_hw_number_t hwirq, 226 msi_alloc_info_t *arg) 227 { 228 struct msi_desc *desc = arg->desc; 229 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); 230 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); 231 unsigned int index, vector; 232 233 if (!vmdirq) 234 return -ENOMEM; 235 236 INIT_LIST_HEAD(&vmdirq->node); 237 vmdirq->irq = vmd_next_irq(vmd, desc); 238 vmdirq->virq = virq; 239 index = index_from_irqs(vmd, vmdirq->irq); 240 vector = pci_irq_vector(vmd->dev, index); 241 242 irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, 243 handle_untracked_irq, vmd, NULL); 244 return 0; 245 } 246 247 static void vmd_msi_free(struct irq_domain *domain, 248 struct msi_domain_info *info, unsigned int virq) 249 { 250 struct vmd_irq *vmdirq = irq_get_chip_data(virq); 251 unsigned long flags; 252 253 synchronize_srcu(&vmdirq->irq->srcu); 254 255 /* XXX: Potential optimization to rebalance */ 256 raw_spin_lock_irqsave(&list_lock, flags); 257 vmdirq->irq->count--; 258 raw_spin_unlock_irqrestore(&list_lock, flags); 259 260 kfree(vmdirq); 261 } 262 263 static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, 264 int nvec, msi_alloc_info_t *arg) 265 { 266 struct pci_dev *pdev = to_pci_dev(dev); 267 struct vmd_dev *vmd = vmd_from_bus(pdev->bus); 268 269 if (nvec > vmd->msix_count) 270 return vmd->msix_count; 271 272 memset(arg, 0, sizeof(*arg)); 273 return 0; 274 } 275 276 static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) 277 { 278 arg->desc = desc; 279 } 280 281 static struct msi_domain_ops vmd_msi_domain_ops = { 282 .get_hwirq = vmd_get_hwirq, 283 .msi_init = vmd_msi_init, 284 .msi_free = vmd_msi_free, 285 .msi_prepare = vmd_msi_prepare, 286 .set_desc = vmd_set_desc, 287 }; 288 289 static struct msi_domain_info vmd_msi_domain_info = { 290 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 291 MSI_FLAG_PCI_MSIX, 292 .ops = &vmd_msi_domain_ops, 293 .chip = &vmd_msi_controller, 294 }; 295 296 #ifdef CONFIG_X86_DEV_DMA_OPS 297 /* 298 * VMD replaces the requester ID with its own. DMA mappings for devices in a 299 * VMD domain need to be mapped for the VMD, not the device requiring 300 * the mapping. 301 */ 302 static struct device *to_vmd_dev(struct device *dev) 303 { 304 struct pci_dev *pdev = to_pci_dev(dev); 305 struct vmd_dev *vmd = vmd_from_bus(pdev->bus); 306 307 return &vmd->dev->dev; 308 } 309 310 static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, 311 gfp_t flag, unsigned long attrs) 312 { 313 return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs); 314 } 315 316 static void vmd_free(struct device *dev, size_t size, void *vaddr, 317 dma_addr_t addr, unsigned long attrs) 318 { 319 return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs); 320 } 321 322 static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, 323 void *cpu_addr, dma_addr_t addr, size_t size, 324 unsigned long attrs) 325 { 326 return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size, 327 attrs); 328 } 329 330 static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, 331 void *cpu_addr, dma_addr_t addr, size_t size, 332 unsigned long attrs) 333 { 334 return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size, 335 attrs); 336 } 337 338 static dma_addr_t vmd_map_page(struct device *dev, struct page *page, 339 unsigned long offset, size_t size, 340 enum dma_data_direction dir, 341 unsigned long attrs) 342 { 343 return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir, 344 attrs); 345 } 346 347 static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, 348 enum dma_data_direction dir, unsigned long attrs) 349 { 350 dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs); 351 } 352 353 static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, 354 enum dma_data_direction dir, unsigned long attrs) 355 { 356 return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); 357 } 358 359 static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 360 enum dma_data_direction dir, unsigned long attrs) 361 { 362 dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); 363 } 364 365 static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 366 size_t size, enum dma_data_direction dir) 367 { 368 dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); 369 } 370 371 static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, 372 size_t size, enum dma_data_direction dir) 373 { 374 dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir); 375 } 376 377 static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 378 int nents, enum dma_data_direction dir) 379 { 380 dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); 381 } 382 383 static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 384 int nents, enum dma_data_direction dir) 385 { 386 dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); 387 } 388 389 static int vmd_dma_supported(struct device *dev, u64 mask) 390 { 391 return dma_supported(to_vmd_dev(dev), mask); 392 } 393 394 static u64 vmd_get_required_mask(struct device *dev) 395 { 396 return dma_get_required_mask(to_vmd_dev(dev)); 397 } 398 399 static void vmd_teardown_dma_ops(struct vmd_dev *vmd) 400 { 401 struct dma_domain *domain = &vmd->dma_domain; 402 403 if (get_dma_ops(&vmd->dev->dev)) 404 del_dma_domain(domain); 405 } 406 407 #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ 408 do { \ 409 if (source->fn) \ 410 dest->fn = vmd_##fn; \ 411 } while (0) 412 413 static void vmd_setup_dma_ops(struct vmd_dev *vmd) 414 { 415 const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); 416 struct dma_map_ops *dest = &vmd->dma_ops; 417 struct dma_domain *domain = &vmd->dma_domain; 418 419 domain->domain_nr = vmd->sysdata.domain; 420 domain->dma_ops = dest; 421 422 if (!source) 423 return; 424 ASSIGN_VMD_DMA_OPS(source, dest, alloc); 425 ASSIGN_VMD_DMA_OPS(source, dest, free); 426 ASSIGN_VMD_DMA_OPS(source, dest, mmap); 427 ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); 428 ASSIGN_VMD_DMA_OPS(source, dest, map_page); 429 ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); 430 ASSIGN_VMD_DMA_OPS(source, dest, map_sg); 431 ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); 432 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); 433 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); 434 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); 435 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); 436 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); 437 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); 438 add_dma_domain(domain); 439 } 440 #undef ASSIGN_VMD_DMA_OPS 441 #else 442 static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} 443 static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} 444 #endif 445 446 static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, 447 unsigned int devfn, int reg, int len) 448 { 449 char __iomem *addr = vmd->cfgbar + 450 (bus->number << 20) + (devfn << 12) + reg; 451 452 if ((addr - vmd->cfgbar) + len >= 453 resource_size(&vmd->dev->resource[VMD_CFGBAR])) 454 return NULL; 455 456 return addr; 457 } 458 459 /* 460 * CPU may deadlock if config space is not serialized on some versions of this 461 * hardware, so all config space access is done under a spinlock. 462 */ 463 static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, 464 int len, u32 *value) 465 { 466 struct vmd_dev *vmd = vmd_from_bus(bus); 467 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); 468 unsigned long flags; 469 int ret = 0; 470 471 if (!addr) 472 return -EFAULT; 473 474 spin_lock_irqsave(&vmd->cfg_lock, flags); 475 switch (len) { 476 case 1: 477 *value = readb(addr); 478 break; 479 case 2: 480 *value = readw(addr); 481 break; 482 case 4: 483 *value = readl(addr); 484 break; 485 default: 486 ret = -EINVAL; 487 break; 488 } 489 spin_unlock_irqrestore(&vmd->cfg_lock, flags); 490 return ret; 491 } 492 493 /* 494 * VMD h/w converts non-posted config writes to posted memory writes. The 495 * read-back in this function forces the completion so it returns only after 496 * the config space was written, as expected. 497 */ 498 static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, 499 int len, u32 value) 500 { 501 struct vmd_dev *vmd = vmd_from_bus(bus); 502 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); 503 unsigned long flags; 504 int ret = 0; 505 506 if (!addr) 507 return -EFAULT; 508 509 spin_lock_irqsave(&vmd->cfg_lock, flags); 510 switch (len) { 511 case 1: 512 writeb(value, addr); 513 readb(addr); 514 break; 515 case 2: 516 writew(value, addr); 517 readw(addr); 518 break; 519 case 4: 520 writel(value, addr); 521 readl(addr); 522 break; 523 default: 524 ret = -EINVAL; 525 break; 526 } 527 spin_unlock_irqrestore(&vmd->cfg_lock, flags); 528 return ret; 529 } 530 531 static struct pci_ops vmd_ops = { 532 .read = vmd_pci_read, 533 .write = vmd_pci_write, 534 }; 535 536 static void vmd_attach_resources(struct vmd_dev *vmd) 537 { 538 vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; 539 vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; 540 } 541 542 static void vmd_detach_resources(struct vmd_dev *vmd) 543 { 544 vmd->dev->resource[VMD_MEMBAR1].child = NULL; 545 vmd->dev->resource[VMD_MEMBAR2].child = NULL; 546 } 547 548 /* 549 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. 550 * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower 551 * 16 bits are the PCI Segment Group (domain) number. Other bits are 552 * currently reserved. 553 */ 554 static int vmd_find_free_domain(void) 555 { 556 int domain = 0xffff; 557 struct pci_bus *bus = NULL; 558 559 while ((bus = pci_find_next_bus(bus)) != NULL) 560 domain = max_t(int, domain, pci_domain_nr(bus)); 561 return domain + 1; 562 } 563 564 static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) 565 { 566 struct pci_sysdata *sd = &vmd->sysdata; 567 struct fwnode_handle *fn; 568 struct resource *res; 569 u32 upper_bits; 570 unsigned long flags; 571 LIST_HEAD(resources); 572 resource_size_t offset[2] = {0}; 573 resource_size_t membar2_offset = 0x2000, busn_start = 0; 574 struct pci_bus *child; 575 576 /* 577 * Shadow registers may exist in certain VMD device ids which allow 578 * guests to correctly assign host physical addresses to the root ports 579 * and child devices. These registers will either return the host value 580 * or 0, depending on an enable bit in the VMD device. 581 */ 582 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { 583 u32 vmlock; 584 int ret; 585 586 membar2_offset = 0x2018; 587 ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); 588 if (ret || vmlock == ~0) 589 return -ENODEV; 590 591 if (MB2_SHADOW_EN(vmlock)) { 592 void __iomem *membar2; 593 594 membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); 595 if (!membar2) 596 return -ENOMEM; 597 offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - 598 readq(membar2 + 0x2008); 599 offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - 600 readq(membar2 + 0x2010); 601 pci_iounmap(vmd->dev, membar2); 602 } 603 } 604 605 /* 606 * Certain VMD devices may have a root port configuration option which 607 * limits the bus range to between 0-127 or 128-255 608 */ 609 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { 610 u32 vmcap, vmconfig; 611 612 pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); 613 pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); 614 if (BUS_RESTRICT_CAP(vmcap) && 615 (BUS_RESTRICT_CFG(vmconfig) == 0x1)) 616 busn_start = 128; 617 } 618 619 res = &vmd->dev->resource[VMD_CFGBAR]; 620 vmd->resources[0] = (struct resource) { 621 .name = "VMD CFGBAR", 622 .start = busn_start, 623 .end = busn_start + (resource_size(res) >> 20) - 1, 624 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, 625 }; 626 627 /* 628 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can 629 * put 32-bit resources in the window. 630 * 631 * There's no hardware reason why a 64-bit window *couldn't* 632 * contain a 32-bit resource, but pbus_size_mem() computes the 633 * bridge window size assuming a 64-bit window will contain no 634 * 32-bit resources. __pci_assign_resource() enforces that 635 * artificial restriction to make sure everything will fit. 636 * 637 * The only way we could use a 64-bit non-prefechable MEMBAR is 638 * if its address is <4GB so that we can convert it to a 32-bit 639 * resource. To be visible to the host OS, all VMD endpoints must 640 * be initially configured by platform BIOS, which includes setting 641 * up these resources. We can assume the device is configured 642 * according to the platform needs. 643 */ 644 res = &vmd->dev->resource[VMD_MEMBAR1]; 645 upper_bits = upper_32_bits(res->end); 646 flags = res->flags & ~IORESOURCE_SIZEALIGN; 647 if (!upper_bits) 648 flags &= ~IORESOURCE_MEM_64; 649 vmd->resources[1] = (struct resource) { 650 .name = "VMD MEMBAR1", 651 .start = res->start, 652 .end = res->end, 653 .flags = flags, 654 .parent = res, 655 }; 656 657 res = &vmd->dev->resource[VMD_MEMBAR2]; 658 upper_bits = upper_32_bits(res->end); 659 flags = res->flags & ~IORESOURCE_SIZEALIGN; 660 if (!upper_bits) 661 flags &= ~IORESOURCE_MEM_64; 662 vmd->resources[2] = (struct resource) { 663 .name = "VMD MEMBAR2", 664 .start = res->start + membar2_offset, 665 .end = res->end, 666 .flags = flags, 667 .parent = res, 668 }; 669 670 sd->vmd_domain = true; 671 sd->domain = vmd_find_free_domain(); 672 if (sd->domain < 0) 673 return sd->domain; 674 675 sd->node = pcibus_to_node(vmd->dev->bus); 676 677 fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); 678 if (!fn) 679 return -ENODEV; 680 681 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, 682 x86_vector_domain); 683 irq_domain_free_fwnode(fn); 684 if (!vmd->irq_domain) 685 return -ENODEV; 686 687 pci_add_resource(&resources, &vmd->resources[0]); 688 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); 689 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); 690 691 vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, 692 sd, &resources); 693 if (!vmd->bus) { 694 pci_free_resource_list(&resources); 695 irq_domain_remove(vmd->irq_domain); 696 return -ENODEV; 697 } 698 699 vmd_attach_resources(vmd); 700 vmd_setup_dma_ops(vmd); 701 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); 702 703 pci_scan_child_bus(vmd->bus); 704 pci_assign_unassigned_bus_resources(vmd->bus); 705 706 /* 707 * VMD root buses are virtual and don't return true on pci_is_pcie() 708 * and will fail pcie_bus_configure_settings() early. It can instead be 709 * run on each of the real root ports. 710 */ 711 list_for_each_entry(child, &vmd->bus->children, node) 712 pcie_bus_configure_settings(child); 713 714 pci_bus_add_devices(vmd->bus); 715 716 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, 717 "domain"), "Can't create symlink to domain\n"); 718 return 0; 719 } 720 721 static irqreturn_t vmd_irq(int irq, void *data) 722 { 723 struct vmd_irq_list *irqs = data; 724 struct vmd_irq *vmdirq; 725 int idx; 726 727 idx = srcu_read_lock(&irqs->srcu); 728 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) 729 generic_handle_irq(vmdirq->virq); 730 srcu_read_unlock(&irqs->srcu, idx); 731 732 return IRQ_HANDLED; 733 } 734 735 static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) 736 { 737 struct vmd_dev *vmd; 738 int i, err; 739 740 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) 741 return -ENOMEM; 742 743 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); 744 if (!vmd) 745 return -ENOMEM; 746 747 vmd->dev = dev; 748 err = pcim_enable_device(dev); 749 if (err < 0) 750 return err; 751 752 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); 753 if (!vmd->cfgbar) 754 return -ENOMEM; 755 756 pci_set_master(dev); 757 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && 758 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) 759 return -ENODEV; 760 761 vmd->msix_count = pci_msix_vec_count(dev); 762 if (vmd->msix_count < 0) 763 return -ENODEV; 764 765 vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, 766 PCI_IRQ_MSIX); 767 if (vmd->msix_count < 0) 768 return vmd->msix_count; 769 770 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), 771 GFP_KERNEL); 772 if (!vmd->irqs) 773 return -ENOMEM; 774 775 for (i = 0; i < vmd->msix_count; i++) { 776 err = init_srcu_struct(&vmd->irqs[i].srcu); 777 if (err) 778 return err; 779 780 INIT_LIST_HEAD(&vmd->irqs[i].irq_list); 781 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), 782 vmd_irq, IRQF_NO_THREAD, 783 "vmd", &vmd->irqs[i]); 784 if (err) 785 return err; 786 } 787 788 spin_lock_init(&vmd->cfg_lock); 789 pci_set_drvdata(dev, vmd); 790 err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); 791 if (err) 792 return err; 793 794 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", 795 vmd->sysdata.domain); 796 return 0; 797 } 798 799 static void vmd_cleanup_srcu(struct vmd_dev *vmd) 800 { 801 int i; 802 803 for (i = 0; i < vmd->msix_count; i++) 804 cleanup_srcu_struct(&vmd->irqs[i].srcu); 805 } 806 807 static void vmd_remove(struct pci_dev *dev) 808 { 809 struct vmd_dev *vmd = pci_get_drvdata(dev); 810 811 sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); 812 pci_stop_root_bus(vmd->bus); 813 pci_remove_root_bus(vmd->bus); 814 vmd_cleanup_srcu(vmd); 815 vmd_teardown_dma_ops(vmd); 816 vmd_detach_resources(vmd); 817 irq_domain_remove(vmd->irq_domain); 818 } 819 820 #ifdef CONFIG_PM_SLEEP 821 static int vmd_suspend(struct device *dev) 822 { 823 struct pci_dev *pdev = to_pci_dev(dev); 824 struct vmd_dev *vmd = pci_get_drvdata(pdev); 825 int i; 826 827 for (i = 0; i < vmd->msix_count; i++) 828 devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); 829 830 pci_save_state(pdev); 831 return 0; 832 } 833 834 static int vmd_resume(struct device *dev) 835 { 836 struct pci_dev *pdev = to_pci_dev(dev); 837 struct vmd_dev *vmd = pci_get_drvdata(pdev); 838 int err, i; 839 840 for (i = 0; i < vmd->msix_count; i++) { 841 err = devm_request_irq(dev, pci_irq_vector(pdev, i), 842 vmd_irq, IRQF_NO_THREAD, 843 "vmd", &vmd->irqs[i]); 844 if (err) 845 return err; 846 } 847 848 pci_restore_state(pdev); 849 return 0; 850 } 851 #endif 852 static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); 853 854 static const struct pci_device_id vmd_ids[] = { 855 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, 856 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), 857 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | 858 VMD_FEAT_HAS_BUS_RESTRICTIONS,}, 859 {0,} 860 }; 861 MODULE_DEVICE_TABLE(pci, vmd_ids); 862 863 static struct pci_driver vmd_drv = { 864 .name = "vmd", 865 .id_table = vmd_ids, 866 .probe = vmd_probe, 867 .remove = vmd_remove, 868 .driver = { 869 .pm = &vmd_dev_pm_ops, 870 }, 871 }; 872 module_pci_driver(vmd_drv); 873 874 MODULE_AUTHOR("Intel Corporation"); 875 MODULE_LICENSE("GPL v2"); 876 MODULE_VERSION("0.6"); 877