1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2002-2004 IBM Corp. 5 * (C) Copyright 2003 Matthew Wilcox 6 * (C) Copyright 2003 Hewlett-Packard 7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 9 * 10 * File attributes for PCI devices 11 * 12 * Modeled after usb's driverfs.c 13 */ 14 15 16 #include <linux/kernel.h> 17 #include <linux/sched.h> 18 #include <linux/pci.h> 19 #include <linux/stat.h> 20 #include <linux/export.h> 21 #include <linux/topology.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/capability.h> 25 #include <linux/security.h> 26 #include <linux/slab.h> 27 #include <linux/vgaarb.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/msi.h> 30 #include <linux/of.h> 31 #include <linux/aperture.h> 32 #include "pci.h" 33 34 static int sysfs_initialized; /* = 0 */ 35 36 /* show configuration fields */ 37 #define pci_config_attr(field, format_string) \ 38 static ssize_t \ 39 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 40 { \ 41 struct pci_dev *pdev; \ 42 \ 43 pdev = to_pci_dev(dev); \ 44 return sysfs_emit(buf, format_string, pdev->field); \ 45 } \ 46 static DEVICE_ATTR_RO(field) 47 48 pci_config_attr(vendor, "0x%04x\n"); 49 pci_config_attr(device, "0x%04x\n"); 50 pci_config_attr(subsystem_vendor, "0x%04x\n"); 51 pci_config_attr(subsystem_device, "0x%04x\n"); 52 pci_config_attr(revision, "0x%02x\n"); 53 pci_config_attr(class, "0x%06x\n"); 54 55 static ssize_t irq_show(struct device *dev, 56 struct device_attribute *attr, 57 char *buf) 58 { 59 struct pci_dev *pdev = to_pci_dev(dev); 60 61 #ifdef CONFIG_PCI_MSI 62 /* 63 * For MSI, show the first MSI IRQ; for all other cases including 64 * MSI-X, show the legacy INTx IRQ. 65 */ 66 if (pdev->msi_enabled) 67 return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0)); 68 #endif 69 70 return sysfs_emit(buf, "%u\n", pdev->irq); 71 } 72 static DEVICE_ATTR_RO(irq); 73 74 static ssize_t broken_parity_status_show(struct device *dev, 75 struct device_attribute *attr, 76 char *buf) 77 { 78 struct pci_dev *pdev = to_pci_dev(dev); 79 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status); 80 } 81 82 static ssize_t broken_parity_status_store(struct device *dev, 83 struct device_attribute *attr, 84 const char *buf, size_t count) 85 { 86 struct pci_dev *pdev = to_pci_dev(dev); 87 unsigned long val; 88 89 if (kstrtoul(buf, 0, &val) < 0) 90 return -EINVAL; 91 92 pdev->broken_parity_status = !!val; 93 94 return count; 95 } 96 static DEVICE_ATTR_RW(broken_parity_status); 97 98 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, 99 struct device_attribute *attr, char *buf) 100 { 101 const struct cpumask *mask; 102 103 #ifdef CONFIG_NUMA 104 if (dev_to_node(dev) == NUMA_NO_NODE) 105 mask = cpu_online_mask; 106 else 107 mask = cpumask_of_node(dev_to_node(dev)); 108 #else 109 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 110 #endif 111 return cpumap_print_to_pagebuf(list, buf, mask); 112 } 113 114 static ssize_t local_cpus_show(struct device *dev, 115 struct device_attribute *attr, char *buf) 116 { 117 return pci_dev_show_local_cpu(dev, false, attr, buf); 118 } 119 static DEVICE_ATTR_RO(local_cpus); 120 121 static ssize_t local_cpulist_show(struct device *dev, 122 struct device_attribute *attr, char *buf) 123 { 124 return pci_dev_show_local_cpu(dev, true, attr, buf); 125 } 126 static DEVICE_ATTR_RO(local_cpulist); 127 128 /* 129 * PCI Bus Class Devices 130 */ 131 static ssize_t cpuaffinity_show(struct device *dev, 132 struct device_attribute *attr, char *buf) 133 { 134 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 135 136 return cpumap_print_to_pagebuf(false, buf, cpumask); 137 } 138 static DEVICE_ATTR_RO(cpuaffinity); 139 140 static ssize_t cpulistaffinity_show(struct device *dev, 141 struct device_attribute *attr, char *buf) 142 { 143 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 144 145 return cpumap_print_to_pagebuf(true, buf, cpumask); 146 } 147 static DEVICE_ATTR_RO(cpulistaffinity); 148 149 static ssize_t power_state_show(struct device *dev, 150 struct device_attribute *attr, char *buf) 151 { 152 struct pci_dev *pdev = to_pci_dev(dev); 153 154 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state)); 155 } 156 static DEVICE_ATTR_RO(power_state); 157 158 /* show resources */ 159 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 160 char *buf) 161 { 162 struct pci_dev *pci_dev = to_pci_dev(dev); 163 int i; 164 int max; 165 resource_size_t start, end; 166 size_t len = 0; 167 168 if (pci_dev->subordinate) 169 max = DEVICE_COUNT_RESOURCE; 170 else 171 max = PCI_BRIDGE_RESOURCES; 172 173 for (i = 0; i < max; i++) { 174 struct resource *res = &pci_dev->resource[i]; 175 pci_resource_to_user(pci_dev, i, res, &start, &end); 176 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n", 177 (unsigned long long)start, 178 (unsigned long long)end, 179 (unsigned long long)res->flags); 180 } 181 return len; 182 } 183 static DEVICE_ATTR_RO(resource); 184 185 static ssize_t max_link_speed_show(struct device *dev, 186 struct device_attribute *attr, char *buf) 187 { 188 struct pci_dev *pdev = to_pci_dev(dev); 189 190 return sysfs_emit(buf, "%s\n", 191 pci_speed_string(pcie_get_speed_cap(pdev))); 192 } 193 static DEVICE_ATTR_RO(max_link_speed); 194 195 static ssize_t max_link_width_show(struct device *dev, 196 struct device_attribute *attr, char *buf) 197 { 198 struct pci_dev *pdev = to_pci_dev(dev); 199 200 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); 201 } 202 static DEVICE_ATTR_RO(max_link_width); 203 204 static ssize_t current_link_speed_show(struct device *dev, 205 struct device_attribute *attr, char *buf) 206 { 207 struct pci_dev *pci_dev = to_pci_dev(dev); 208 u16 linkstat; 209 int err; 210 enum pci_bus_speed speed; 211 212 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 213 if (err) 214 return -EINVAL; 215 216 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS]; 217 218 return sysfs_emit(buf, "%s\n", pci_speed_string(speed)); 219 } 220 static DEVICE_ATTR_RO(current_link_speed); 221 222 static ssize_t current_link_width_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct pci_dev *pci_dev = to_pci_dev(dev); 226 u16 linkstat; 227 int err; 228 229 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 230 if (err) 231 return -EINVAL; 232 233 return sysfs_emit(buf, "%u\n", 234 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); 235 } 236 static DEVICE_ATTR_RO(current_link_width); 237 238 static ssize_t secondary_bus_number_show(struct device *dev, 239 struct device_attribute *attr, 240 char *buf) 241 { 242 struct pci_dev *pci_dev = to_pci_dev(dev); 243 u8 sec_bus; 244 int err; 245 246 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); 247 if (err) 248 return -EINVAL; 249 250 return sysfs_emit(buf, "%u\n", sec_bus); 251 } 252 static DEVICE_ATTR_RO(secondary_bus_number); 253 254 static ssize_t subordinate_bus_number_show(struct device *dev, 255 struct device_attribute *attr, 256 char *buf) 257 { 258 struct pci_dev *pci_dev = to_pci_dev(dev); 259 u8 sub_bus; 260 int err; 261 262 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); 263 if (err) 264 return -EINVAL; 265 266 return sysfs_emit(buf, "%u\n", sub_bus); 267 } 268 static DEVICE_ATTR_RO(subordinate_bus_number); 269 270 static ssize_t ari_enabled_show(struct device *dev, 271 struct device_attribute *attr, 272 char *buf) 273 { 274 struct pci_dev *pci_dev = to_pci_dev(dev); 275 276 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); 277 } 278 static DEVICE_ATTR_RO(ari_enabled); 279 280 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 281 char *buf) 282 { 283 struct pci_dev *pci_dev = to_pci_dev(dev); 284 285 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", 286 pci_dev->vendor, pci_dev->device, 287 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 288 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 289 (u8)(pci_dev->class)); 290 } 291 static DEVICE_ATTR_RO(modalias); 292 293 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 294 const char *buf, size_t count) 295 { 296 struct pci_dev *pdev = to_pci_dev(dev); 297 unsigned long val; 298 ssize_t result = 0; 299 300 /* this can crash the machine when done on the "wrong" device */ 301 if (!capable(CAP_SYS_ADMIN)) 302 return -EPERM; 303 304 if (kstrtoul(buf, 0, &val) < 0) 305 return -EINVAL; 306 307 device_lock(dev); 308 if (dev->driver) 309 result = -EBUSY; 310 else if (val) 311 result = pci_enable_device(pdev); 312 else if (pci_is_enabled(pdev)) 313 pci_disable_device(pdev); 314 else 315 result = -EIO; 316 device_unlock(dev); 317 318 return result < 0 ? result : count; 319 } 320 321 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 322 char *buf) 323 { 324 struct pci_dev *pdev; 325 326 pdev = to_pci_dev(dev); 327 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 328 } 329 static DEVICE_ATTR_RW(enable); 330 331 #ifdef CONFIG_NUMA 332 static ssize_t numa_node_store(struct device *dev, 333 struct device_attribute *attr, const char *buf, 334 size_t count) 335 { 336 struct pci_dev *pdev = to_pci_dev(dev); 337 int node; 338 339 if (!capable(CAP_SYS_ADMIN)) 340 return -EPERM; 341 342 if (kstrtoint(buf, 0, &node) < 0) 343 return -EINVAL; 344 345 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) 346 return -EINVAL; 347 348 if (node != NUMA_NO_NODE && !node_online(node)) 349 return -EINVAL; 350 351 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 352 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 353 node); 354 355 dev->numa_node = node; 356 return count; 357 } 358 359 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 360 char *buf) 361 { 362 return sysfs_emit(buf, "%d\n", dev->numa_node); 363 } 364 static DEVICE_ATTR_RW(numa_node); 365 #endif 366 367 static ssize_t dma_mask_bits_show(struct device *dev, 368 struct device_attribute *attr, char *buf) 369 { 370 struct pci_dev *pdev = to_pci_dev(dev); 371 372 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask)); 373 } 374 static DEVICE_ATTR_RO(dma_mask_bits); 375 376 static ssize_t consistent_dma_mask_bits_show(struct device *dev, 377 struct device_attribute *attr, 378 char *buf) 379 { 380 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask)); 381 } 382 static DEVICE_ATTR_RO(consistent_dma_mask_bits); 383 384 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, 385 char *buf) 386 { 387 struct pci_dev *pdev = to_pci_dev(dev); 388 struct pci_bus *subordinate = pdev->subordinate; 389 390 return sysfs_emit(buf, "%u\n", subordinate ? 391 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) 392 : !pdev->no_msi); 393 } 394 395 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 396 const char *buf, size_t count) 397 { 398 struct pci_dev *pdev = to_pci_dev(dev); 399 struct pci_bus *subordinate = pdev->subordinate; 400 unsigned long val; 401 402 if (!capable(CAP_SYS_ADMIN)) 403 return -EPERM; 404 405 if (kstrtoul(buf, 0, &val) < 0) 406 return -EINVAL; 407 408 /* 409 * "no_msi" and "bus_flags" only affect what happens when a driver 410 * requests MSI or MSI-X. They don't affect any drivers that have 411 * already requested MSI or MSI-X. 412 */ 413 if (!subordinate) { 414 pdev->no_msi = !val; 415 pci_info(pdev, "MSI/MSI-X %s for future drivers\n", 416 val ? "allowed" : "disallowed"); 417 return count; 418 } 419 420 if (val) 421 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 422 else 423 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 424 425 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", 426 val ? "allowed" : "disallowed"); 427 return count; 428 } 429 static DEVICE_ATTR_RW(msi_bus); 430 431 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count) 432 { 433 unsigned long val; 434 struct pci_bus *b = NULL; 435 436 if (kstrtoul(buf, 0, &val) < 0) 437 return -EINVAL; 438 439 if (val) { 440 pci_lock_rescan_remove(); 441 while ((b = pci_find_next_bus(b)) != NULL) 442 pci_rescan_bus(b); 443 pci_unlock_rescan_remove(); 444 } 445 return count; 446 } 447 static BUS_ATTR_WO(rescan); 448 449 static struct attribute *pci_bus_attrs[] = { 450 &bus_attr_rescan.attr, 451 NULL, 452 }; 453 454 static const struct attribute_group pci_bus_group = { 455 .attrs = pci_bus_attrs, 456 }; 457 458 const struct attribute_group *pci_bus_groups[] = { 459 &pci_bus_group, 460 NULL, 461 }; 462 463 static ssize_t dev_rescan_store(struct device *dev, 464 struct device_attribute *attr, const char *buf, 465 size_t count) 466 { 467 unsigned long val; 468 struct pci_dev *pdev = to_pci_dev(dev); 469 470 if (kstrtoul(buf, 0, &val) < 0) 471 return -EINVAL; 472 473 if (val) { 474 pci_lock_rescan_remove(); 475 pci_rescan_bus(pdev->bus); 476 pci_unlock_rescan_remove(); 477 } 478 return count; 479 } 480 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL, 481 dev_rescan_store); 482 483 static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 484 const char *buf, size_t count) 485 { 486 unsigned long val; 487 488 if (kstrtoul(buf, 0, &val) < 0) 489 return -EINVAL; 490 491 if (val && device_remove_file_self(dev, attr)) 492 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 493 return count; 494 } 495 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL, 496 remove_store); 497 498 static ssize_t bus_rescan_store(struct device *dev, 499 struct device_attribute *attr, 500 const char *buf, size_t count) 501 { 502 unsigned long val; 503 struct pci_bus *bus = to_pci_bus(dev); 504 505 if (kstrtoul(buf, 0, &val) < 0) 506 return -EINVAL; 507 508 if (val) { 509 pci_lock_rescan_remove(); 510 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 511 pci_rescan_bus_bridge_resize(bus->self); 512 else 513 pci_rescan_bus(bus); 514 pci_unlock_rescan_remove(); 515 } 516 return count; 517 } 518 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, 519 bus_rescan_store); 520 521 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 522 static ssize_t d3cold_allowed_store(struct device *dev, 523 struct device_attribute *attr, 524 const char *buf, size_t count) 525 { 526 struct pci_dev *pdev = to_pci_dev(dev); 527 unsigned long val; 528 529 if (kstrtoul(buf, 0, &val) < 0) 530 return -EINVAL; 531 532 pdev->d3cold_allowed = !!val; 533 if (pdev->d3cold_allowed) 534 pci_d3cold_enable(pdev); 535 else 536 pci_d3cold_disable(pdev); 537 538 pm_runtime_resume(dev); 539 540 return count; 541 } 542 543 static ssize_t d3cold_allowed_show(struct device *dev, 544 struct device_attribute *attr, char *buf) 545 { 546 struct pci_dev *pdev = to_pci_dev(dev); 547 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed); 548 } 549 static DEVICE_ATTR_RW(d3cold_allowed); 550 #endif 551 552 #ifdef CONFIG_OF 553 static ssize_t devspec_show(struct device *dev, 554 struct device_attribute *attr, char *buf) 555 { 556 struct pci_dev *pdev = to_pci_dev(dev); 557 struct device_node *np = pci_device_to_OF_node(pdev); 558 559 if (np == NULL) 560 return 0; 561 return sysfs_emit(buf, "%pOF\n", np); 562 } 563 static DEVICE_ATTR_RO(devspec); 564 #endif 565 566 static ssize_t driver_override_store(struct device *dev, 567 struct device_attribute *attr, 568 const char *buf, size_t count) 569 { 570 struct pci_dev *pdev = to_pci_dev(dev); 571 int ret; 572 573 ret = driver_set_override(dev, &pdev->driver_override, buf, count); 574 if (ret) 575 return ret; 576 577 return count; 578 } 579 580 static ssize_t driver_override_show(struct device *dev, 581 struct device_attribute *attr, char *buf) 582 { 583 struct pci_dev *pdev = to_pci_dev(dev); 584 ssize_t len; 585 586 device_lock(dev); 587 len = sysfs_emit(buf, "%s\n", pdev->driver_override); 588 device_unlock(dev); 589 return len; 590 } 591 static DEVICE_ATTR_RW(driver_override); 592 593 static struct attribute *pci_dev_attrs[] = { 594 &dev_attr_power_state.attr, 595 &dev_attr_resource.attr, 596 &dev_attr_vendor.attr, 597 &dev_attr_device.attr, 598 &dev_attr_subsystem_vendor.attr, 599 &dev_attr_subsystem_device.attr, 600 &dev_attr_revision.attr, 601 &dev_attr_class.attr, 602 &dev_attr_irq.attr, 603 &dev_attr_local_cpus.attr, 604 &dev_attr_local_cpulist.attr, 605 &dev_attr_modalias.attr, 606 #ifdef CONFIG_NUMA 607 &dev_attr_numa_node.attr, 608 #endif 609 &dev_attr_dma_mask_bits.attr, 610 &dev_attr_consistent_dma_mask_bits.attr, 611 &dev_attr_enable.attr, 612 &dev_attr_broken_parity_status.attr, 613 &dev_attr_msi_bus.attr, 614 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 615 &dev_attr_d3cold_allowed.attr, 616 #endif 617 #ifdef CONFIG_OF 618 &dev_attr_devspec.attr, 619 #endif 620 &dev_attr_driver_override.attr, 621 &dev_attr_ari_enabled.attr, 622 NULL, 623 }; 624 625 static struct attribute *pci_bridge_attrs[] = { 626 &dev_attr_subordinate_bus_number.attr, 627 &dev_attr_secondary_bus_number.attr, 628 NULL, 629 }; 630 631 static struct attribute *pcie_dev_attrs[] = { 632 &dev_attr_current_link_speed.attr, 633 &dev_attr_current_link_width.attr, 634 &dev_attr_max_link_width.attr, 635 &dev_attr_max_link_speed.attr, 636 NULL, 637 }; 638 639 static struct attribute *pcibus_attrs[] = { 640 &dev_attr_bus_rescan.attr, 641 &dev_attr_cpuaffinity.attr, 642 &dev_attr_cpulistaffinity.attr, 643 NULL, 644 }; 645 646 static const struct attribute_group pcibus_group = { 647 .attrs = pcibus_attrs, 648 }; 649 650 const struct attribute_group *pcibus_groups[] = { 651 &pcibus_group, 652 NULL, 653 }; 654 655 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, 656 char *buf) 657 { 658 struct pci_dev *pdev = to_pci_dev(dev); 659 struct pci_dev *vga_dev = vga_default_device(); 660 661 if (vga_dev) 662 return sysfs_emit(buf, "%u\n", (pdev == vga_dev)); 663 664 return sysfs_emit(buf, "%u\n", 665 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 666 IORESOURCE_ROM_SHADOW)); 667 } 668 static DEVICE_ATTR_RO(boot_vga); 669 670 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 671 struct bin_attribute *bin_attr, char *buf, 672 loff_t off, size_t count) 673 { 674 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 675 unsigned int size = 64; 676 loff_t init_off = off; 677 u8 *data = (u8 *) buf; 678 679 /* Several chips lock up trying to read undefined config space */ 680 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) 681 size = dev->cfg_size; 682 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 683 size = 128; 684 685 if (off > size) 686 return 0; 687 if (off + count > size) { 688 size -= off; 689 count = size; 690 } else { 691 size = count; 692 } 693 694 pci_config_pm_runtime_get(dev); 695 696 if ((off & 1) && size) { 697 u8 val; 698 pci_user_read_config_byte(dev, off, &val); 699 data[off - init_off] = val; 700 off++; 701 size--; 702 } 703 704 if ((off & 3) && size > 2) { 705 u16 val; 706 pci_user_read_config_word(dev, off, &val); 707 data[off - init_off] = val & 0xff; 708 data[off - init_off + 1] = (val >> 8) & 0xff; 709 off += 2; 710 size -= 2; 711 } 712 713 while (size > 3) { 714 u32 val; 715 pci_user_read_config_dword(dev, off, &val); 716 data[off - init_off] = val & 0xff; 717 data[off - init_off + 1] = (val >> 8) & 0xff; 718 data[off - init_off + 2] = (val >> 16) & 0xff; 719 data[off - init_off + 3] = (val >> 24) & 0xff; 720 off += 4; 721 size -= 4; 722 cond_resched(); 723 } 724 725 if (size >= 2) { 726 u16 val; 727 pci_user_read_config_word(dev, off, &val); 728 data[off - init_off] = val & 0xff; 729 data[off - init_off + 1] = (val >> 8) & 0xff; 730 off += 2; 731 size -= 2; 732 } 733 734 if (size > 0) { 735 u8 val; 736 pci_user_read_config_byte(dev, off, &val); 737 data[off - init_off] = val; 738 } 739 740 pci_config_pm_runtime_put(dev); 741 742 return count; 743 } 744 745 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, 746 struct bin_attribute *bin_attr, char *buf, 747 loff_t off, size_t count) 748 { 749 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 750 unsigned int size = count; 751 loff_t init_off = off; 752 u8 *data = (u8 *) buf; 753 int ret; 754 755 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 756 if (ret) 757 return ret; 758 759 if (resource_is_exclusive(&dev->driver_exclusive_resource, off, 760 count)) { 761 pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx", 762 current->comm, off); 763 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 764 } 765 766 if (off > dev->cfg_size) 767 return 0; 768 if (off + count > dev->cfg_size) { 769 size = dev->cfg_size - off; 770 count = size; 771 } 772 773 pci_config_pm_runtime_get(dev); 774 775 if ((off & 1) && size) { 776 pci_user_write_config_byte(dev, off, data[off - init_off]); 777 off++; 778 size--; 779 } 780 781 if ((off & 3) && size > 2) { 782 u16 val = data[off - init_off]; 783 val |= (u16) data[off - init_off + 1] << 8; 784 pci_user_write_config_word(dev, off, val); 785 off += 2; 786 size -= 2; 787 } 788 789 while (size > 3) { 790 u32 val = data[off - init_off]; 791 val |= (u32) data[off - init_off + 1] << 8; 792 val |= (u32) data[off - init_off + 2] << 16; 793 val |= (u32) data[off - init_off + 3] << 24; 794 pci_user_write_config_dword(dev, off, val); 795 off += 4; 796 size -= 4; 797 } 798 799 if (size >= 2) { 800 u16 val = data[off - init_off]; 801 val |= (u16) data[off - init_off + 1] << 8; 802 pci_user_write_config_word(dev, off, val); 803 off += 2; 804 size -= 2; 805 } 806 807 if (size) 808 pci_user_write_config_byte(dev, off, data[off - init_off]); 809 810 pci_config_pm_runtime_put(dev); 811 812 return count; 813 } 814 static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0); 815 816 static struct bin_attribute *pci_dev_config_attrs[] = { 817 &bin_attr_config, 818 NULL, 819 }; 820 821 static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj, 822 struct bin_attribute *a, int n) 823 { 824 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 825 826 a->size = PCI_CFG_SPACE_SIZE; 827 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 828 a->size = PCI_CFG_SPACE_EXP_SIZE; 829 830 return a->attr.mode; 831 } 832 833 static const struct attribute_group pci_dev_config_attr_group = { 834 .bin_attrs = pci_dev_config_attrs, 835 .is_bin_visible = pci_dev_config_attr_is_visible, 836 }; 837 838 #ifdef HAVE_PCI_LEGACY 839 /** 840 * pci_read_legacy_io - read byte(s) from legacy I/O port space 841 * @filp: open sysfs file 842 * @kobj: kobject corresponding to file to read from 843 * @bin_attr: struct bin_attribute for this file 844 * @buf: buffer to store results 845 * @off: offset into legacy I/O port space 846 * @count: number of bytes to read 847 * 848 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 849 * callback routine (pci_legacy_read). 850 */ 851 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, 852 struct bin_attribute *bin_attr, char *buf, 853 loff_t off, size_t count) 854 { 855 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 856 857 /* Only support 1, 2 or 4 byte accesses */ 858 if (count != 1 && count != 2 && count != 4) 859 return -EINVAL; 860 861 return pci_legacy_read(bus, off, (u32 *)buf, count); 862 } 863 864 /** 865 * pci_write_legacy_io - write byte(s) to legacy I/O port space 866 * @filp: open sysfs file 867 * @kobj: kobject corresponding to file to read from 868 * @bin_attr: struct bin_attribute for this file 869 * @buf: buffer containing value to be written 870 * @off: offset into legacy I/O port space 871 * @count: number of bytes to write 872 * 873 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 874 * callback routine (pci_legacy_write). 875 */ 876 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, 877 struct bin_attribute *bin_attr, char *buf, 878 loff_t off, size_t count) 879 { 880 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 881 882 /* Only support 1, 2 or 4 byte accesses */ 883 if (count != 1 && count != 2 && count != 4) 884 return -EINVAL; 885 886 return pci_legacy_write(bus, off, *(u32 *)buf, count); 887 } 888 889 /** 890 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 891 * @filp: open sysfs file 892 * @kobj: kobject corresponding to device to be mapped 893 * @attr: struct bin_attribute for this file 894 * @vma: struct vm_area_struct passed to mmap 895 * 896 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 897 * legacy memory space (first meg of bus space) into application virtual 898 * memory space. 899 */ 900 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 901 struct bin_attribute *attr, 902 struct vm_area_struct *vma) 903 { 904 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 905 906 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 907 } 908 909 /** 910 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 911 * @filp: open sysfs file 912 * @kobj: kobject corresponding to device to be mapped 913 * @attr: struct bin_attribute for this file 914 * @vma: struct vm_area_struct passed to mmap 915 * 916 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 917 * legacy IO space (first meg of bus space) into application virtual 918 * memory space. Returns -ENOSYS if the operation isn't supported 919 */ 920 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 921 struct bin_attribute *attr, 922 struct vm_area_struct *vma) 923 { 924 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 925 926 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 927 } 928 929 /** 930 * pci_adjust_legacy_attr - adjustment of legacy file attributes 931 * @b: bus to create files under 932 * @mmap_type: I/O port or memory 933 * 934 * Stub implementation. Can be overridden by arch if necessary. 935 */ 936 void __weak pci_adjust_legacy_attr(struct pci_bus *b, 937 enum pci_mmap_state mmap_type) 938 { 939 } 940 941 /** 942 * pci_create_legacy_files - create legacy I/O port and memory files 943 * @b: bus to create files under 944 * 945 * Some platforms allow access to legacy I/O port and ISA memory space on 946 * a per-bus basis. This routine creates the files and ties them into 947 * their associated read, write and mmap files from pci-sysfs.c 948 * 949 * On error unwind, but don't propagate the error to the caller 950 * as it is ok to set up the PCI bus without these files. 951 */ 952 void pci_create_legacy_files(struct pci_bus *b) 953 { 954 int error; 955 956 if (!sysfs_initialized) 957 return; 958 959 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), 960 GFP_ATOMIC); 961 if (!b->legacy_io) 962 goto kzalloc_err; 963 964 sysfs_bin_attr_init(b->legacy_io); 965 b->legacy_io->attr.name = "legacy_io"; 966 b->legacy_io->size = 0xffff; 967 b->legacy_io->attr.mode = 0600; 968 b->legacy_io->read = pci_read_legacy_io; 969 b->legacy_io->write = pci_write_legacy_io; 970 b->legacy_io->mmap = pci_mmap_legacy_io; 971 b->legacy_io->f_mapping = iomem_get_mapping; 972 pci_adjust_legacy_attr(b, pci_mmap_io); 973 error = device_create_bin_file(&b->dev, b->legacy_io); 974 if (error) 975 goto legacy_io_err; 976 977 /* Allocated above after the legacy_io struct */ 978 b->legacy_mem = b->legacy_io + 1; 979 sysfs_bin_attr_init(b->legacy_mem); 980 b->legacy_mem->attr.name = "legacy_mem"; 981 b->legacy_mem->size = 1024*1024; 982 b->legacy_mem->attr.mode = 0600; 983 b->legacy_mem->mmap = pci_mmap_legacy_mem; 984 b->legacy_mem->f_mapping = iomem_get_mapping; 985 pci_adjust_legacy_attr(b, pci_mmap_mem); 986 error = device_create_bin_file(&b->dev, b->legacy_mem); 987 if (error) 988 goto legacy_mem_err; 989 990 return; 991 992 legacy_mem_err: 993 device_remove_bin_file(&b->dev, b->legacy_io); 994 legacy_io_err: 995 kfree(b->legacy_io); 996 b->legacy_io = NULL; 997 kzalloc_err: 998 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); 999 } 1000 1001 void pci_remove_legacy_files(struct pci_bus *b) 1002 { 1003 if (b->legacy_io) { 1004 device_remove_bin_file(&b->dev, b->legacy_io); 1005 device_remove_bin_file(&b->dev, b->legacy_mem); 1006 kfree(b->legacy_io); /* both are allocated here */ 1007 } 1008 } 1009 #endif /* HAVE_PCI_LEGACY */ 1010 1011 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 1012 1013 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 1014 enum pci_mmap_api mmap_api) 1015 { 1016 unsigned long nr, start, size; 1017 resource_size_t pci_start = 0, pci_end; 1018 1019 if (pci_resource_len(pdev, resno) == 0) 1020 return 0; 1021 nr = vma_pages(vma); 1022 start = vma->vm_pgoff; 1023 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 1024 if (mmap_api == PCI_MMAP_PROCFS) { 1025 pci_resource_to_user(pdev, resno, &pdev->resource[resno], 1026 &pci_start, &pci_end); 1027 pci_start >>= PAGE_SHIFT; 1028 } 1029 if (start >= pci_start && start < pci_start + size && 1030 start + nr <= pci_start + size) 1031 return 1; 1032 return 0; 1033 } 1034 1035 /** 1036 * pci_mmap_resource - map a PCI resource into user memory space 1037 * @kobj: kobject for mapping 1038 * @attr: struct bin_attribute for the file being mapped 1039 * @vma: struct vm_area_struct passed into the mmap 1040 * @write_combine: 1 for write_combine mapping 1041 * 1042 * Use the regular PCI mapping routines to map a PCI resource into userspace. 1043 */ 1044 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 1045 struct vm_area_struct *vma, int write_combine) 1046 { 1047 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1048 int bar = (unsigned long)attr->private; 1049 enum pci_mmap_state mmap_type; 1050 struct resource *res = &pdev->resource[bar]; 1051 int ret; 1052 1053 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 1054 if (ret) 1055 return ret; 1056 1057 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1058 return -EINVAL; 1059 1060 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) 1061 return -EINVAL; 1062 1063 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1064 1065 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1066 } 1067 1068 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 1069 struct bin_attribute *attr, 1070 struct vm_area_struct *vma) 1071 { 1072 return pci_mmap_resource(kobj, attr, vma, 0); 1073 } 1074 1075 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 1076 struct bin_attribute *attr, 1077 struct vm_area_struct *vma) 1078 { 1079 return pci_mmap_resource(kobj, attr, vma, 1); 1080 } 1081 1082 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, 1083 struct bin_attribute *attr, char *buf, 1084 loff_t off, size_t count, bool write) 1085 { 1086 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1087 int bar = (unsigned long)attr->private; 1088 unsigned long port = off; 1089 1090 port += pci_resource_start(pdev, bar); 1091 1092 if (port > pci_resource_end(pdev, bar)) 1093 return 0; 1094 1095 if (port + count - 1 > pci_resource_end(pdev, bar)) 1096 return -EINVAL; 1097 1098 switch (count) { 1099 case 1: 1100 if (write) 1101 outb(*(u8 *)buf, port); 1102 else 1103 *(u8 *)buf = inb(port); 1104 return 1; 1105 case 2: 1106 if (write) 1107 outw(*(u16 *)buf, port); 1108 else 1109 *(u16 *)buf = inw(port); 1110 return 2; 1111 case 4: 1112 if (write) 1113 outl(*(u32 *)buf, port); 1114 else 1115 *(u32 *)buf = inl(port); 1116 return 4; 1117 } 1118 return -EINVAL; 1119 } 1120 1121 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, 1122 struct bin_attribute *attr, char *buf, 1123 loff_t off, size_t count) 1124 { 1125 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1126 } 1127 1128 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, 1129 struct bin_attribute *attr, char *buf, 1130 loff_t off, size_t count) 1131 { 1132 int ret; 1133 1134 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 1135 if (ret) 1136 return ret; 1137 1138 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1139 } 1140 1141 /** 1142 * pci_remove_resource_files - cleanup resource files 1143 * @pdev: dev to cleanup 1144 * 1145 * If we created resource files for @pdev, remove them from sysfs and 1146 * free their resources. 1147 */ 1148 static void pci_remove_resource_files(struct pci_dev *pdev) 1149 { 1150 int i; 1151 1152 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 1153 struct bin_attribute *res_attr; 1154 1155 res_attr = pdev->res_attr[i]; 1156 if (res_attr) { 1157 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1158 kfree(res_attr); 1159 } 1160 1161 res_attr = pdev->res_attr_wc[i]; 1162 if (res_attr) { 1163 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1164 kfree(res_attr); 1165 } 1166 } 1167 } 1168 1169 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1170 { 1171 /* allocate attribute structure, piggyback attribute name */ 1172 int name_len = write_combine ? 13 : 10; 1173 struct bin_attribute *res_attr; 1174 char *res_attr_name; 1175 int retval; 1176 1177 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1178 if (!res_attr) 1179 return -ENOMEM; 1180 1181 res_attr_name = (char *)(res_attr + 1); 1182 1183 sysfs_bin_attr_init(res_attr); 1184 if (write_combine) { 1185 sprintf(res_attr_name, "resource%d_wc", num); 1186 res_attr->mmap = pci_mmap_resource_wc; 1187 } else { 1188 sprintf(res_attr_name, "resource%d", num); 1189 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1190 res_attr->read = pci_read_resource_io; 1191 res_attr->write = pci_write_resource_io; 1192 if (arch_can_pci_mmap_io()) 1193 res_attr->mmap = pci_mmap_resource_uc; 1194 } else { 1195 res_attr->mmap = pci_mmap_resource_uc; 1196 } 1197 } 1198 if (res_attr->mmap) 1199 res_attr->f_mapping = iomem_get_mapping; 1200 res_attr->attr.name = res_attr_name; 1201 res_attr->attr.mode = 0600; 1202 res_attr->size = pci_resource_len(pdev, num); 1203 res_attr->private = (void *)(unsigned long)num; 1204 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1205 if (retval) { 1206 kfree(res_attr); 1207 return retval; 1208 } 1209 1210 if (write_combine) 1211 pdev->res_attr_wc[num] = res_attr; 1212 else 1213 pdev->res_attr[num] = res_attr; 1214 1215 return 0; 1216 } 1217 1218 /** 1219 * pci_create_resource_files - create resource files in sysfs for @dev 1220 * @pdev: dev in question 1221 * 1222 * Walk the resources in @pdev creating files for each resource available. 1223 */ 1224 static int pci_create_resource_files(struct pci_dev *pdev) 1225 { 1226 int i; 1227 int retval; 1228 1229 /* Expose the PCI resources from this device as files */ 1230 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 1231 1232 /* skip empty resources */ 1233 if (!pci_resource_len(pdev, i)) 1234 continue; 1235 1236 retval = pci_create_attr(pdev, i, 0); 1237 /* for prefetchable resources, create a WC mappable file */ 1238 if (!retval && arch_can_pci_mmap_wc() && 1239 pdev->resource[i].flags & IORESOURCE_PREFETCH) 1240 retval = pci_create_attr(pdev, i, 1); 1241 if (retval) { 1242 pci_remove_resource_files(pdev); 1243 return retval; 1244 } 1245 } 1246 return 0; 1247 } 1248 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */ 1249 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1250 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1251 #endif 1252 1253 /** 1254 * pci_write_rom - used to enable access to the PCI ROM display 1255 * @filp: sysfs file 1256 * @kobj: kernel object handle 1257 * @bin_attr: struct bin_attribute for this file 1258 * @buf: user input 1259 * @off: file offset 1260 * @count: number of byte in input 1261 * 1262 * writing anything except 0 enables it 1263 */ 1264 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, 1265 struct bin_attribute *bin_attr, char *buf, 1266 loff_t off, size_t count) 1267 { 1268 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1269 1270 if ((off == 0) && (*buf == '0') && (count == 2)) 1271 pdev->rom_attr_enabled = 0; 1272 else 1273 pdev->rom_attr_enabled = 1; 1274 1275 return count; 1276 } 1277 1278 /** 1279 * pci_read_rom - read a PCI ROM 1280 * @filp: sysfs file 1281 * @kobj: kernel object handle 1282 * @bin_attr: struct bin_attribute for this file 1283 * @buf: where to put the data we read from the ROM 1284 * @off: file offset 1285 * @count: number of bytes to read 1286 * 1287 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1288 * device corresponding to @kobj. 1289 */ 1290 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, 1291 struct bin_attribute *bin_attr, char *buf, 1292 loff_t off, size_t count) 1293 { 1294 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1295 void __iomem *rom; 1296 size_t size; 1297 1298 if (!pdev->rom_attr_enabled) 1299 return -EINVAL; 1300 1301 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1302 if (!rom || !size) 1303 return -EIO; 1304 1305 if (off >= size) 1306 count = 0; 1307 else { 1308 if (off + count > size) 1309 count = size - off; 1310 1311 memcpy_fromio(buf, rom + off, count); 1312 } 1313 pci_unmap_rom(pdev, rom); 1314 1315 return count; 1316 } 1317 static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0); 1318 1319 static struct bin_attribute *pci_dev_rom_attrs[] = { 1320 &bin_attr_rom, 1321 NULL, 1322 }; 1323 1324 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj, 1325 struct bin_attribute *a, int n) 1326 { 1327 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1328 size_t rom_size; 1329 1330 /* If the device has a ROM, try to expose it in sysfs. */ 1331 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1332 if (!rom_size) 1333 return 0; 1334 1335 a->size = rom_size; 1336 1337 return a->attr.mode; 1338 } 1339 1340 static const struct attribute_group pci_dev_rom_attr_group = { 1341 .bin_attrs = pci_dev_rom_attrs, 1342 .is_bin_visible = pci_dev_rom_attr_is_visible, 1343 }; 1344 1345 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 1346 const char *buf, size_t count) 1347 { 1348 struct pci_dev *pdev = to_pci_dev(dev); 1349 unsigned long val; 1350 ssize_t result; 1351 1352 if (kstrtoul(buf, 0, &val) < 0) 1353 return -EINVAL; 1354 1355 if (val != 1) 1356 return -EINVAL; 1357 1358 pm_runtime_get_sync(dev); 1359 result = pci_reset_function(pdev); 1360 pm_runtime_put(dev); 1361 if (result < 0) 1362 return result; 1363 1364 return count; 1365 } 1366 static DEVICE_ATTR_WO(reset); 1367 1368 static struct attribute *pci_dev_reset_attrs[] = { 1369 &dev_attr_reset.attr, 1370 NULL, 1371 }; 1372 1373 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj, 1374 struct attribute *a, int n) 1375 { 1376 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1377 1378 if (!pci_reset_supported(pdev)) 1379 return 0; 1380 1381 return a->mode; 1382 } 1383 1384 static const struct attribute_group pci_dev_reset_attr_group = { 1385 .attrs = pci_dev_reset_attrs, 1386 .is_visible = pci_dev_reset_attr_is_visible, 1387 }; 1388 1389 #define pci_dev_resource_resize_attr(n) \ 1390 static ssize_t resource##n##_resize_show(struct device *dev, \ 1391 struct device_attribute *attr, \ 1392 char * buf) \ 1393 { \ 1394 struct pci_dev *pdev = to_pci_dev(dev); \ 1395 ssize_t ret; \ 1396 \ 1397 pci_config_pm_runtime_get(pdev); \ 1398 \ 1399 ret = sysfs_emit(buf, "%016llx\n", \ 1400 (u64)pci_rebar_get_possible_sizes(pdev, n)); \ 1401 \ 1402 pci_config_pm_runtime_put(pdev); \ 1403 \ 1404 return ret; \ 1405 } \ 1406 \ 1407 static ssize_t resource##n##_resize_store(struct device *dev, \ 1408 struct device_attribute *attr,\ 1409 const char *buf, size_t count)\ 1410 { \ 1411 struct pci_dev *pdev = to_pci_dev(dev); \ 1412 unsigned long size, flags; \ 1413 int ret, i; \ 1414 u16 cmd; \ 1415 \ 1416 if (kstrtoul(buf, 0, &size) < 0) \ 1417 return -EINVAL; \ 1418 \ 1419 device_lock(dev); \ 1420 if (dev->driver) { \ 1421 ret = -EBUSY; \ 1422 goto unlock; \ 1423 } \ 1424 \ 1425 pci_config_pm_runtime_get(pdev); \ 1426 \ 1427 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \ 1428 ret = aperture_remove_conflicting_pci_devices(pdev, \ 1429 "resourceN_resize"); \ 1430 if (ret) \ 1431 goto pm_put; \ 1432 } \ 1433 \ 1434 pci_read_config_word(pdev, PCI_COMMAND, &cmd); \ 1435 pci_write_config_word(pdev, PCI_COMMAND, \ 1436 cmd & ~PCI_COMMAND_MEMORY); \ 1437 \ 1438 flags = pci_resource_flags(pdev, n); \ 1439 \ 1440 pci_remove_resource_files(pdev); \ 1441 \ 1442 for (i = 0; i < PCI_STD_NUM_BARS; i++) { \ 1443 if (pci_resource_len(pdev, i) && \ 1444 pci_resource_flags(pdev, i) == flags) \ 1445 pci_release_resource(pdev, i); \ 1446 } \ 1447 \ 1448 ret = pci_resize_resource(pdev, n, size); \ 1449 \ 1450 pci_assign_unassigned_bus_resources(pdev->bus); \ 1451 \ 1452 if (pci_create_resource_files(pdev)) \ 1453 pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\ 1454 \ 1455 pci_write_config_word(pdev, PCI_COMMAND, cmd); \ 1456 pm_put: \ 1457 pci_config_pm_runtime_put(pdev); \ 1458 unlock: \ 1459 device_unlock(dev); \ 1460 \ 1461 return ret ? ret : count; \ 1462 } \ 1463 static DEVICE_ATTR_RW(resource##n##_resize) 1464 1465 pci_dev_resource_resize_attr(0); 1466 pci_dev_resource_resize_attr(1); 1467 pci_dev_resource_resize_attr(2); 1468 pci_dev_resource_resize_attr(3); 1469 pci_dev_resource_resize_attr(4); 1470 pci_dev_resource_resize_attr(5); 1471 1472 static struct attribute *resource_resize_attrs[] = { 1473 &dev_attr_resource0_resize.attr, 1474 &dev_attr_resource1_resize.attr, 1475 &dev_attr_resource2_resize.attr, 1476 &dev_attr_resource3_resize.attr, 1477 &dev_attr_resource4_resize.attr, 1478 &dev_attr_resource5_resize.attr, 1479 NULL, 1480 }; 1481 1482 static umode_t resource_resize_is_visible(struct kobject *kobj, 1483 struct attribute *a, int n) 1484 { 1485 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1486 1487 return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode; 1488 } 1489 1490 static const struct attribute_group pci_dev_resource_resize_group = { 1491 .attrs = resource_resize_attrs, 1492 .is_visible = resource_resize_is_visible, 1493 }; 1494 1495 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) 1496 { 1497 if (!sysfs_initialized) 1498 return -EACCES; 1499 1500 return pci_create_resource_files(pdev); 1501 } 1502 1503 /** 1504 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1505 * @pdev: device whose entries we should free 1506 * 1507 * Cleanup when @pdev is removed from sysfs. 1508 */ 1509 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1510 { 1511 if (!sysfs_initialized) 1512 return; 1513 1514 pci_remove_resource_files(pdev); 1515 } 1516 1517 static int __init pci_sysfs_init(void) 1518 { 1519 struct pci_dev *pdev = NULL; 1520 struct pci_bus *pbus = NULL; 1521 int retval; 1522 1523 sysfs_initialized = 1; 1524 for_each_pci_dev(pdev) { 1525 retval = pci_create_sysfs_dev_files(pdev); 1526 if (retval) { 1527 pci_dev_put(pdev); 1528 return retval; 1529 } 1530 } 1531 1532 while ((pbus = pci_find_next_bus(pbus))) 1533 pci_create_legacy_files(pbus); 1534 1535 return 0; 1536 } 1537 late_initcall(pci_sysfs_init); 1538 1539 static struct attribute *pci_dev_dev_attrs[] = { 1540 &dev_attr_boot_vga.attr, 1541 NULL, 1542 }; 1543 1544 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1545 struct attribute *a, int n) 1546 { 1547 struct device *dev = kobj_to_dev(kobj); 1548 struct pci_dev *pdev = to_pci_dev(dev); 1549 1550 if (a == &dev_attr_boot_vga.attr) 1551 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1552 return 0; 1553 1554 return a->mode; 1555 } 1556 1557 static struct attribute *pci_dev_hp_attrs[] = { 1558 &dev_attr_remove.attr, 1559 &dev_attr_dev_rescan.attr, 1560 NULL, 1561 }; 1562 1563 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1564 struct attribute *a, int n) 1565 { 1566 struct device *dev = kobj_to_dev(kobj); 1567 struct pci_dev *pdev = to_pci_dev(dev); 1568 1569 if (pdev->is_virtfn) 1570 return 0; 1571 1572 return a->mode; 1573 } 1574 1575 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, 1576 struct attribute *a, int n) 1577 { 1578 struct device *dev = kobj_to_dev(kobj); 1579 struct pci_dev *pdev = to_pci_dev(dev); 1580 1581 if (pci_is_bridge(pdev)) 1582 return a->mode; 1583 1584 return 0; 1585 } 1586 1587 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, 1588 struct attribute *a, int n) 1589 { 1590 struct device *dev = kobj_to_dev(kobj); 1591 struct pci_dev *pdev = to_pci_dev(dev); 1592 1593 if (pci_is_pcie(pdev)) 1594 return a->mode; 1595 1596 return 0; 1597 } 1598 1599 static const struct attribute_group pci_dev_group = { 1600 .attrs = pci_dev_attrs, 1601 }; 1602 1603 const struct attribute_group *pci_dev_groups[] = { 1604 &pci_dev_group, 1605 &pci_dev_config_attr_group, 1606 &pci_dev_rom_attr_group, 1607 &pci_dev_reset_attr_group, 1608 &pci_dev_reset_method_attr_group, 1609 &pci_dev_vpd_attr_group, 1610 #ifdef CONFIG_DMI 1611 &pci_dev_smbios_attr_group, 1612 #endif 1613 #ifdef CONFIG_ACPI 1614 &pci_dev_acpi_attr_group, 1615 #endif 1616 &pci_dev_resource_resize_group, 1617 NULL, 1618 }; 1619 1620 static const struct attribute_group pci_dev_hp_attr_group = { 1621 .attrs = pci_dev_hp_attrs, 1622 .is_visible = pci_dev_hp_attrs_are_visible, 1623 }; 1624 1625 static const struct attribute_group pci_dev_attr_group = { 1626 .attrs = pci_dev_dev_attrs, 1627 .is_visible = pci_dev_attrs_are_visible, 1628 }; 1629 1630 static const struct attribute_group pci_bridge_attr_group = { 1631 .attrs = pci_bridge_attrs, 1632 .is_visible = pci_bridge_attrs_are_visible, 1633 }; 1634 1635 static const struct attribute_group pcie_dev_attr_group = { 1636 .attrs = pcie_dev_attrs, 1637 .is_visible = pcie_dev_attrs_are_visible, 1638 }; 1639 1640 static const struct attribute_group *pci_dev_attr_groups[] = { 1641 &pci_dev_attr_group, 1642 &pci_dev_hp_attr_group, 1643 #ifdef CONFIG_PCI_IOV 1644 &sriov_pf_dev_attr_group, 1645 &sriov_vf_dev_attr_group, 1646 #endif 1647 &pci_bridge_attr_group, 1648 &pcie_dev_attr_group, 1649 #ifdef CONFIG_PCIEAER 1650 &aer_stats_attr_group, 1651 #endif 1652 #ifdef CONFIG_PCIEASPM 1653 &aspm_ctrl_attr_group, 1654 #endif 1655 NULL, 1656 }; 1657 1658 const struct device_type pci_dev_type = { 1659 .groups = pci_dev_attr_groups, 1660 }; 1661