1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2002-2004 IBM Corp. 5 * (C) Copyright 2003 Matthew Wilcox 6 * (C) Copyright 2003 Hewlett-Packard 7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 9 * 10 * File attributes for PCI devices 11 * 12 * Modeled after usb's driverfs.c 13 */ 14 15 16 #include <linux/kernel.h> 17 #include <linux/sched.h> 18 #include <linux/pci.h> 19 #include <linux/stat.h> 20 #include <linux/export.h> 21 #include <linux/topology.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/capability.h> 25 #include <linux/security.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/slab.h> 28 #include <linux/vgaarb.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/of.h> 31 #include "pci.h" 32 33 static int sysfs_initialized; /* = 0 */ 34 35 /* show configuration fields */ 36 #define pci_config_attr(field, format_string) \ 37 static ssize_t \ 38 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 39 { \ 40 struct pci_dev *pdev; \ 41 \ 42 pdev = to_pci_dev(dev); \ 43 return sprintf(buf, format_string, pdev->field); \ 44 } \ 45 static DEVICE_ATTR_RO(field) 46 47 pci_config_attr(vendor, "0x%04x\n"); 48 pci_config_attr(device, "0x%04x\n"); 49 pci_config_attr(subsystem_vendor, "0x%04x\n"); 50 pci_config_attr(subsystem_device, "0x%04x\n"); 51 pci_config_attr(revision, "0x%02x\n"); 52 pci_config_attr(class, "0x%06x\n"); 53 pci_config_attr(irq, "%u\n"); 54 55 static ssize_t broken_parity_status_show(struct device *dev, 56 struct device_attribute *attr, 57 char *buf) 58 { 59 struct pci_dev *pdev = to_pci_dev(dev); 60 return sprintf(buf, "%u\n", pdev->broken_parity_status); 61 } 62 63 static ssize_t broken_parity_status_store(struct device *dev, 64 struct device_attribute *attr, 65 const char *buf, size_t count) 66 { 67 struct pci_dev *pdev = to_pci_dev(dev); 68 unsigned long val; 69 70 if (kstrtoul(buf, 0, &val) < 0) 71 return -EINVAL; 72 73 pdev->broken_parity_status = !!val; 74 75 return count; 76 } 77 static DEVICE_ATTR_RW(broken_parity_status); 78 79 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, 80 struct device_attribute *attr, char *buf) 81 { 82 const struct cpumask *mask; 83 84 #ifdef CONFIG_NUMA 85 mask = (dev_to_node(dev) == -1) ? cpu_online_mask : 86 cpumask_of_node(dev_to_node(dev)); 87 #else 88 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 89 #endif 90 return cpumap_print_to_pagebuf(list, buf, mask); 91 } 92 93 static ssize_t local_cpus_show(struct device *dev, 94 struct device_attribute *attr, char *buf) 95 { 96 return pci_dev_show_local_cpu(dev, false, attr, buf); 97 } 98 static DEVICE_ATTR_RO(local_cpus); 99 100 static ssize_t local_cpulist_show(struct device *dev, 101 struct device_attribute *attr, char *buf) 102 { 103 return pci_dev_show_local_cpu(dev, true, attr, buf); 104 } 105 static DEVICE_ATTR_RO(local_cpulist); 106 107 /* 108 * PCI Bus Class Devices 109 */ 110 static ssize_t cpuaffinity_show(struct device *dev, 111 struct device_attribute *attr, char *buf) 112 { 113 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 114 115 return cpumap_print_to_pagebuf(false, buf, cpumask); 116 } 117 static DEVICE_ATTR_RO(cpuaffinity); 118 119 static ssize_t cpulistaffinity_show(struct device *dev, 120 struct device_attribute *attr, char *buf) 121 { 122 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 123 124 return cpumap_print_to_pagebuf(true, buf, cpumask); 125 } 126 static DEVICE_ATTR_RO(cpulistaffinity); 127 128 /* show resources */ 129 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 130 char *buf) 131 { 132 struct pci_dev *pci_dev = to_pci_dev(dev); 133 char *str = buf; 134 int i; 135 int max; 136 resource_size_t start, end; 137 138 if (pci_dev->subordinate) 139 max = DEVICE_COUNT_RESOURCE; 140 else 141 max = PCI_BRIDGE_RESOURCES; 142 143 for (i = 0; i < max; i++) { 144 struct resource *res = &pci_dev->resource[i]; 145 pci_resource_to_user(pci_dev, i, res, &start, &end); 146 str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n", 147 (unsigned long long)start, 148 (unsigned long long)end, 149 (unsigned long long)res->flags); 150 } 151 return (str - buf); 152 } 153 static DEVICE_ATTR_RO(resource); 154 155 static ssize_t max_link_speed_show(struct device *dev, 156 struct device_attribute *attr, char *buf) 157 { 158 struct pci_dev *pdev = to_pci_dev(dev); 159 160 return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev))); 161 } 162 static DEVICE_ATTR_RO(max_link_speed); 163 164 static ssize_t max_link_width_show(struct device *dev, 165 struct device_attribute *attr, char *buf) 166 { 167 struct pci_dev *pdev = to_pci_dev(dev); 168 169 return sprintf(buf, "%u\n", pcie_get_width_cap(pdev)); 170 } 171 static DEVICE_ATTR_RO(max_link_width); 172 173 static ssize_t current_link_speed_show(struct device *dev, 174 struct device_attribute *attr, char *buf) 175 { 176 struct pci_dev *pci_dev = to_pci_dev(dev); 177 u16 linkstat; 178 int err; 179 const char *speed; 180 181 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 182 if (err) 183 return -EINVAL; 184 185 switch (linkstat & PCI_EXP_LNKSTA_CLS) { 186 case PCI_EXP_LNKSTA_CLS_16_0GB: 187 speed = "16 GT/s"; 188 break; 189 case PCI_EXP_LNKSTA_CLS_8_0GB: 190 speed = "8 GT/s"; 191 break; 192 case PCI_EXP_LNKSTA_CLS_5_0GB: 193 speed = "5 GT/s"; 194 break; 195 case PCI_EXP_LNKSTA_CLS_2_5GB: 196 speed = "2.5 GT/s"; 197 break; 198 default: 199 speed = "Unknown speed"; 200 } 201 202 return sprintf(buf, "%s\n", speed); 203 } 204 static DEVICE_ATTR_RO(current_link_speed); 205 206 static ssize_t current_link_width_show(struct device *dev, 207 struct device_attribute *attr, char *buf) 208 { 209 struct pci_dev *pci_dev = to_pci_dev(dev); 210 u16 linkstat; 211 int err; 212 213 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 214 if (err) 215 return -EINVAL; 216 217 return sprintf(buf, "%u\n", 218 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); 219 } 220 static DEVICE_ATTR_RO(current_link_width); 221 222 static ssize_t secondary_bus_number_show(struct device *dev, 223 struct device_attribute *attr, 224 char *buf) 225 { 226 struct pci_dev *pci_dev = to_pci_dev(dev); 227 u8 sec_bus; 228 int err; 229 230 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); 231 if (err) 232 return -EINVAL; 233 234 return sprintf(buf, "%u\n", sec_bus); 235 } 236 static DEVICE_ATTR_RO(secondary_bus_number); 237 238 static ssize_t subordinate_bus_number_show(struct device *dev, 239 struct device_attribute *attr, 240 char *buf) 241 { 242 struct pci_dev *pci_dev = to_pci_dev(dev); 243 u8 sub_bus; 244 int err; 245 246 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); 247 if (err) 248 return -EINVAL; 249 250 return sprintf(buf, "%u\n", sub_bus); 251 } 252 static DEVICE_ATTR_RO(subordinate_bus_number); 253 254 static ssize_t ari_enabled_show(struct device *dev, 255 struct device_attribute *attr, 256 char *buf) 257 { 258 struct pci_dev *pci_dev = to_pci_dev(dev); 259 260 return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); 261 } 262 static DEVICE_ATTR_RO(ari_enabled); 263 264 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 265 char *buf) 266 { 267 struct pci_dev *pci_dev = to_pci_dev(dev); 268 269 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", 270 pci_dev->vendor, pci_dev->device, 271 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 272 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 273 (u8)(pci_dev->class)); 274 } 275 static DEVICE_ATTR_RO(modalias); 276 277 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 278 const char *buf, size_t count) 279 { 280 struct pci_dev *pdev = to_pci_dev(dev); 281 unsigned long val; 282 ssize_t result = kstrtoul(buf, 0, &val); 283 284 if (result < 0) 285 return result; 286 287 /* this can crash the machine when done on the "wrong" device */ 288 if (!capable(CAP_SYS_ADMIN)) 289 return -EPERM; 290 291 device_lock(dev); 292 if (dev->driver) 293 result = -EBUSY; 294 else if (val) 295 result = pci_enable_device(pdev); 296 else if (pci_is_enabled(pdev)) 297 pci_disable_device(pdev); 298 else 299 result = -EIO; 300 device_unlock(dev); 301 302 return result < 0 ? result : count; 303 } 304 305 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 306 char *buf) 307 { 308 struct pci_dev *pdev; 309 310 pdev = to_pci_dev(dev); 311 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 312 } 313 static DEVICE_ATTR_RW(enable); 314 315 #ifdef CONFIG_NUMA 316 static ssize_t numa_node_store(struct device *dev, 317 struct device_attribute *attr, const char *buf, 318 size_t count) 319 { 320 struct pci_dev *pdev = to_pci_dev(dev); 321 int node, ret; 322 323 if (!capable(CAP_SYS_ADMIN)) 324 return -EPERM; 325 326 ret = kstrtoint(buf, 0, &node); 327 if (ret) 328 return ret; 329 330 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) 331 return -EINVAL; 332 333 if (node != NUMA_NO_NODE && !node_online(node)) 334 return -EINVAL; 335 336 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 337 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 338 node); 339 340 dev->numa_node = node; 341 return count; 342 } 343 344 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 345 char *buf) 346 { 347 return sprintf(buf, "%d\n", dev->numa_node); 348 } 349 static DEVICE_ATTR_RW(numa_node); 350 #endif 351 352 static ssize_t dma_mask_bits_show(struct device *dev, 353 struct device_attribute *attr, char *buf) 354 { 355 struct pci_dev *pdev = to_pci_dev(dev); 356 357 return sprintf(buf, "%d\n", fls64(pdev->dma_mask)); 358 } 359 static DEVICE_ATTR_RO(dma_mask_bits); 360 361 static ssize_t consistent_dma_mask_bits_show(struct device *dev, 362 struct device_attribute *attr, 363 char *buf) 364 { 365 return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask)); 366 } 367 static DEVICE_ATTR_RO(consistent_dma_mask_bits); 368 369 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, 370 char *buf) 371 { 372 struct pci_dev *pdev = to_pci_dev(dev); 373 struct pci_bus *subordinate = pdev->subordinate; 374 375 return sprintf(buf, "%u\n", subordinate ? 376 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) 377 : !pdev->no_msi); 378 } 379 380 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 381 const char *buf, size_t count) 382 { 383 struct pci_dev *pdev = to_pci_dev(dev); 384 struct pci_bus *subordinate = pdev->subordinate; 385 unsigned long val; 386 387 if (kstrtoul(buf, 0, &val) < 0) 388 return -EINVAL; 389 390 if (!capable(CAP_SYS_ADMIN)) 391 return -EPERM; 392 393 /* 394 * "no_msi" and "bus_flags" only affect what happens when a driver 395 * requests MSI or MSI-X. They don't affect any drivers that have 396 * already requested MSI or MSI-X. 397 */ 398 if (!subordinate) { 399 pdev->no_msi = !val; 400 pci_info(pdev, "MSI/MSI-X %s for future drivers\n", 401 val ? "allowed" : "disallowed"); 402 return count; 403 } 404 405 if (val) 406 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 407 else 408 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 409 410 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", 411 val ? "allowed" : "disallowed"); 412 return count; 413 } 414 static DEVICE_ATTR_RW(msi_bus); 415 416 static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, 417 size_t count) 418 { 419 unsigned long val; 420 struct pci_bus *b = NULL; 421 422 if (kstrtoul(buf, 0, &val) < 0) 423 return -EINVAL; 424 425 if (val) { 426 pci_lock_rescan_remove(); 427 while ((b = pci_find_next_bus(b)) != NULL) 428 pci_rescan_bus(b); 429 pci_unlock_rescan_remove(); 430 } 431 return count; 432 } 433 static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); 434 435 static struct attribute *pci_bus_attrs[] = { 436 &bus_attr_rescan.attr, 437 NULL, 438 }; 439 440 static const struct attribute_group pci_bus_group = { 441 .attrs = pci_bus_attrs, 442 }; 443 444 const struct attribute_group *pci_bus_groups[] = { 445 &pci_bus_group, 446 NULL, 447 }; 448 449 static ssize_t dev_rescan_store(struct device *dev, 450 struct device_attribute *attr, const char *buf, 451 size_t count) 452 { 453 unsigned long val; 454 struct pci_dev *pdev = to_pci_dev(dev); 455 456 if (kstrtoul(buf, 0, &val) < 0) 457 return -EINVAL; 458 459 if (val) { 460 pci_lock_rescan_remove(); 461 pci_rescan_bus(pdev->bus); 462 pci_unlock_rescan_remove(); 463 } 464 return count; 465 } 466 static struct device_attribute dev_rescan_attr = __ATTR(rescan, 467 (S_IWUSR|S_IWGRP), 468 NULL, dev_rescan_store); 469 470 static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 471 const char *buf, size_t count) 472 { 473 unsigned long val; 474 475 if (kstrtoul(buf, 0, &val) < 0) 476 return -EINVAL; 477 478 if (val && device_remove_file_self(dev, attr)) 479 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 480 return count; 481 } 482 static struct device_attribute dev_remove_attr = __ATTR(remove, 483 (S_IWUSR|S_IWGRP), 484 NULL, remove_store); 485 486 static ssize_t dev_bus_rescan_store(struct device *dev, 487 struct device_attribute *attr, 488 const char *buf, size_t count) 489 { 490 unsigned long val; 491 struct pci_bus *bus = to_pci_bus(dev); 492 493 if (kstrtoul(buf, 0, &val) < 0) 494 return -EINVAL; 495 496 if (val) { 497 pci_lock_rescan_remove(); 498 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 499 pci_rescan_bus_bridge_resize(bus->self); 500 else 501 pci_rescan_bus(bus); 502 pci_unlock_rescan_remove(); 503 } 504 return count; 505 } 506 static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); 507 508 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 509 static ssize_t d3cold_allowed_store(struct device *dev, 510 struct device_attribute *attr, 511 const char *buf, size_t count) 512 { 513 struct pci_dev *pdev = to_pci_dev(dev); 514 unsigned long val; 515 516 if (kstrtoul(buf, 0, &val) < 0) 517 return -EINVAL; 518 519 pdev->d3cold_allowed = !!val; 520 if (pdev->d3cold_allowed) 521 pci_d3cold_enable(pdev); 522 else 523 pci_d3cold_disable(pdev); 524 525 pm_runtime_resume(dev); 526 527 return count; 528 } 529 530 static ssize_t d3cold_allowed_show(struct device *dev, 531 struct device_attribute *attr, char *buf) 532 { 533 struct pci_dev *pdev = to_pci_dev(dev); 534 return sprintf(buf, "%u\n", pdev->d3cold_allowed); 535 } 536 static DEVICE_ATTR_RW(d3cold_allowed); 537 #endif 538 539 #ifdef CONFIG_OF 540 static ssize_t devspec_show(struct device *dev, 541 struct device_attribute *attr, char *buf) 542 { 543 struct pci_dev *pdev = to_pci_dev(dev); 544 struct device_node *np = pci_device_to_OF_node(pdev); 545 546 if (np == NULL) 547 return 0; 548 return sprintf(buf, "%pOF", np); 549 } 550 static DEVICE_ATTR_RO(devspec); 551 #endif 552 553 #ifdef CONFIG_PCI_IOV 554 static ssize_t sriov_totalvfs_show(struct device *dev, 555 struct device_attribute *attr, 556 char *buf) 557 { 558 struct pci_dev *pdev = to_pci_dev(dev); 559 560 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); 561 } 562 563 564 static ssize_t sriov_numvfs_show(struct device *dev, 565 struct device_attribute *attr, 566 char *buf) 567 { 568 struct pci_dev *pdev = to_pci_dev(dev); 569 570 return sprintf(buf, "%u\n", pdev->sriov->num_VFs); 571 } 572 573 /* 574 * num_vfs > 0; number of VFs to enable 575 * num_vfs = 0; disable all VFs 576 * 577 * Note: SRIOV spec doesn't allow partial VF 578 * disable, so it's all or none. 579 */ 580 static ssize_t sriov_numvfs_store(struct device *dev, 581 struct device_attribute *attr, 582 const char *buf, size_t count) 583 { 584 struct pci_dev *pdev = to_pci_dev(dev); 585 int ret; 586 u16 num_vfs; 587 588 ret = kstrtou16(buf, 0, &num_vfs); 589 if (ret < 0) 590 return ret; 591 592 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 593 return -ERANGE; 594 595 device_lock(&pdev->dev); 596 597 if (num_vfs == pdev->sriov->num_VFs) 598 goto exit; 599 600 /* is PF driver loaded w/callback */ 601 if (!pdev->driver || !pdev->driver->sriov_configure) { 602 pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n"); 603 ret = -ENOENT; 604 goto exit; 605 } 606 607 if (num_vfs == 0) { 608 /* disable VFs */ 609 ret = pdev->driver->sriov_configure(pdev, 0); 610 goto exit; 611 } 612 613 /* enable VFs */ 614 if (pdev->sriov->num_VFs) { 615 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", 616 pdev->sriov->num_VFs, num_vfs); 617 ret = -EBUSY; 618 goto exit; 619 } 620 621 ret = pdev->driver->sriov_configure(pdev, num_vfs); 622 if (ret < 0) 623 goto exit; 624 625 if (ret != num_vfs) 626 pci_warn(pdev, "%d VFs requested; only %d enabled\n", 627 num_vfs, ret); 628 629 exit: 630 device_unlock(&pdev->dev); 631 632 if (ret < 0) 633 return ret; 634 635 return count; 636 } 637 638 static ssize_t sriov_offset_show(struct device *dev, 639 struct device_attribute *attr, 640 char *buf) 641 { 642 struct pci_dev *pdev = to_pci_dev(dev); 643 644 return sprintf(buf, "%u\n", pdev->sriov->offset); 645 } 646 647 static ssize_t sriov_stride_show(struct device *dev, 648 struct device_attribute *attr, 649 char *buf) 650 { 651 struct pci_dev *pdev = to_pci_dev(dev); 652 653 return sprintf(buf, "%u\n", pdev->sriov->stride); 654 } 655 656 static ssize_t sriov_vf_device_show(struct device *dev, 657 struct device_attribute *attr, 658 char *buf) 659 { 660 struct pci_dev *pdev = to_pci_dev(dev); 661 662 return sprintf(buf, "%x\n", pdev->sriov->vf_device); 663 } 664 665 static ssize_t sriov_drivers_autoprobe_show(struct device *dev, 666 struct device_attribute *attr, 667 char *buf) 668 { 669 struct pci_dev *pdev = to_pci_dev(dev); 670 671 return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); 672 } 673 674 static ssize_t sriov_drivers_autoprobe_store(struct device *dev, 675 struct device_attribute *attr, 676 const char *buf, size_t count) 677 { 678 struct pci_dev *pdev = to_pci_dev(dev); 679 bool drivers_autoprobe; 680 681 if (kstrtobool(buf, &drivers_autoprobe) < 0) 682 return -EINVAL; 683 684 pdev->sriov->drivers_autoprobe = drivers_autoprobe; 685 686 return count; 687 } 688 689 static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); 690 static struct device_attribute sriov_numvfs_attr = 691 __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), 692 sriov_numvfs_show, sriov_numvfs_store); 693 static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset); 694 static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride); 695 static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device); 696 static struct device_attribute sriov_drivers_autoprobe_attr = 697 __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), 698 sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); 699 #endif /* CONFIG_PCI_IOV */ 700 701 static ssize_t driver_override_store(struct device *dev, 702 struct device_attribute *attr, 703 const char *buf, size_t count) 704 { 705 struct pci_dev *pdev = to_pci_dev(dev); 706 char *driver_override, *old, *cp; 707 708 /* We need to keep extra room for a newline */ 709 if (count >= (PAGE_SIZE - 1)) 710 return -EINVAL; 711 712 driver_override = kstrndup(buf, count, GFP_KERNEL); 713 if (!driver_override) 714 return -ENOMEM; 715 716 cp = strchr(driver_override, '\n'); 717 if (cp) 718 *cp = '\0'; 719 720 device_lock(dev); 721 old = pdev->driver_override; 722 if (strlen(driver_override)) { 723 pdev->driver_override = driver_override; 724 } else { 725 kfree(driver_override); 726 pdev->driver_override = NULL; 727 } 728 device_unlock(dev); 729 730 kfree(old); 731 732 return count; 733 } 734 735 static ssize_t driver_override_show(struct device *dev, 736 struct device_attribute *attr, char *buf) 737 { 738 struct pci_dev *pdev = to_pci_dev(dev); 739 ssize_t len; 740 741 device_lock(dev); 742 len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); 743 device_unlock(dev); 744 return len; 745 } 746 static DEVICE_ATTR_RW(driver_override); 747 748 static struct attribute *pci_dev_attrs[] = { 749 &dev_attr_resource.attr, 750 &dev_attr_vendor.attr, 751 &dev_attr_device.attr, 752 &dev_attr_subsystem_vendor.attr, 753 &dev_attr_subsystem_device.attr, 754 &dev_attr_revision.attr, 755 &dev_attr_class.attr, 756 &dev_attr_irq.attr, 757 &dev_attr_local_cpus.attr, 758 &dev_attr_local_cpulist.attr, 759 &dev_attr_modalias.attr, 760 #ifdef CONFIG_NUMA 761 &dev_attr_numa_node.attr, 762 #endif 763 &dev_attr_dma_mask_bits.attr, 764 &dev_attr_consistent_dma_mask_bits.attr, 765 &dev_attr_enable.attr, 766 &dev_attr_broken_parity_status.attr, 767 &dev_attr_msi_bus.attr, 768 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 769 &dev_attr_d3cold_allowed.attr, 770 #endif 771 #ifdef CONFIG_OF 772 &dev_attr_devspec.attr, 773 #endif 774 &dev_attr_driver_override.attr, 775 &dev_attr_ari_enabled.attr, 776 NULL, 777 }; 778 779 static struct attribute *pci_bridge_attrs[] = { 780 &dev_attr_subordinate_bus_number.attr, 781 &dev_attr_secondary_bus_number.attr, 782 NULL, 783 }; 784 785 static struct attribute *pcie_dev_attrs[] = { 786 &dev_attr_current_link_speed.attr, 787 &dev_attr_current_link_width.attr, 788 &dev_attr_max_link_width.attr, 789 &dev_attr_max_link_speed.attr, 790 NULL, 791 }; 792 793 static struct attribute *pcibus_attrs[] = { 794 &dev_attr_rescan.attr, 795 &dev_attr_cpuaffinity.attr, 796 &dev_attr_cpulistaffinity.attr, 797 NULL, 798 }; 799 800 static const struct attribute_group pcibus_group = { 801 .attrs = pcibus_attrs, 802 }; 803 804 const struct attribute_group *pcibus_groups[] = { 805 &pcibus_group, 806 NULL, 807 }; 808 809 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, 810 char *buf) 811 { 812 struct pci_dev *pdev = to_pci_dev(dev); 813 struct pci_dev *vga_dev = vga_default_device(); 814 815 if (vga_dev) 816 return sprintf(buf, "%u\n", (pdev == vga_dev)); 817 818 return sprintf(buf, "%u\n", 819 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 820 IORESOURCE_ROM_SHADOW)); 821 } 822 static struct device_attribute vga_attr = __ATTR_RO(boot_vga); 823 824 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 825 struct bin_attribute *bin_attr, char *buf, 826 loff_t off, size_t count) 827 { 828 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 829 unsigned int size = 64; 830 loff_t init_off = off; 831 u8 *data = (u8 *) buf; 832 833 /* Several chips lock up trying to read undefined config space */ 834 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) 835 size = dev->cfg_size; 836 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 837 size = 128; 838 839 if (off > size) 840 return 0; 841 if (off + count > size) { 842 size -= off; 843 count = size; 844 } else { 845 size = count; 846 } 847 848 pci_config_pm_runtime_get(dev); 849 850 if ((off & 1) && size) { 851 u8 val; 852 pci_user_read_config_byte(dev, off, &val); 853 data[off - init_off] = val; 854 off++; 855 size--; 856 } 857 858 if ((off & 3) && size > 2) { 859 u16 val; 860 pci_user_read_config_word(dev, off, &val); 861 data[off - init_off] = val & 0xff; 862 data[off - init_off + 1] = (val >> 8) & 0xff; 863 off += 2; 864 size -= 2; 865 } 866 867 while (size > 3) { 868 u32 val; 869 pci_user_read_config_dword(dev, off, &val); 870 data[off - init_off] = val & 0xff; 871 data[off - init_off + 1] = (val >> 8) & 0xff; 872 data[off - init_off + 2] = (val >> 16) & 0xff; 873 data[off - init_off + 3] = (val >> 24) & 0xff; 874 off += 4; 875 size -= 4; 876 } 877 878 if (size >= 2) { 879 u16 val; 880 pci_user_read_config_word(dev, off, &val); 881 data[off - init_off] = val & 0xff; 882 data[off - init_off + 1] = (val >> 8) & 0xff; 883 off += 2; 884 size -= 2; 885 } 886 887 if (size > 0) { 888 u8 val; 889 pci_user_read_config_byte(dev, off, &val); 890 data[off - init_off] = val; 891 off++; 892 --size; 893 } 894 895 pci_config_pm_runtime_put(dev); 896 897 return count; 898 } 899 900 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, 901 struct bin_attribute *bin_attr, char *buf, 902 loff_t off, size_t count) 903 { 904 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 905 unsigned int size = count; 906 loff_t init_off = off; 907 u8 *data = (u8 *) buf; 908 909 if (off > dev->cfg_size) 910 return 0; 911 if (off + count > dev->cfg_size) { 912 size = dev->cfg_size - off; 913 count = size; 914 } 915 916 pci_config_pm_runtime_get(dev); 917 918 if ((off & 1) && size) { 919 pci_user_write_config_byte(dev, off, data[off - init_off]); 920 off++; 921 size--; 922 } 923 924 if ((off & 3) && size > 2) { 925 u16 val = data[off - init_off]; 926 val |= (u16) data[off - init_off + 1] << 8; 927 pci_user_write_config_word(dev, off, val); 928 off += 2; 929 size -= 2; 930 } 931 932 while (size > 3) { 933 u32 val = data[off - init_off]; 934 val |= (u32) data[off - init_off + 1] << 8; 935 val |= (u32) data[off - init_off + 2] << 16; 936 val |= (u32) data[off - init_off + 3] << 24; 937 pci_user_write_config_dword(dev, off, val); 938 off += 4; 939 size -= 4; 940 } 941 942 if (size >= 2) { 943 u16 val = data[off - init_off]; 944 val |= (u16) data[off - init_off + 1] << 8; 945 pci_user_write_config_word(dev, off, val); 946 off += 2; 947 size -= 2; 948 } 949 950 if (size) { 951 pci_user_write_config_byte(dev, off, data[off - init_off]); 952 off++; 953 --size; 954 } 955 956 pci_config_pm_runtime_put(dev); 957 958 return count; 959 } 960 961 #ifdef HAVE_PCI_LEGACY 962 /** 963 * pci_read_legacy_io - read byte(s) from legacy I/O port space 964 * @filp: open sysfs file 965 * @kobj: kobject corresponding to file to read from 966 * @bin_attr: struct bin_attribute for this file 967 * @buf: buffer to store results 968 * @off: offset into legacy I/O port space 969 * @count: number of bytes to read 970 * 971 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 972 * callback routine (pci_legacy_read). 973 */ 974 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, 975 struct bin_attribute *bin_attr, char *buf, 976 loff_t off, size_t count) 977 { 978 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 979 980 /* Only support 1, 2 or 4 byte accesses */ 981 if (count != 1 && count != 2 && count != 4) 982 return -EINVAL; 983 984 return pci_legacy_read(bus, off, (u32 *)buf, count); 985 } 986 987 /** 988 * pci_write_legacy_io - write byte(s) to legacy I/O port space 989 * @filp: open sysfs file 990 * @kobj: kobject corresponding to file to read from 991 * @bin_attr: struct bin_attribute for this file 992 * @buf: buffer containing value to be written 993 * @off: offset into legacy I/O port space 994 * @count: number of bytes to write 995 * 996 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 997 * callback routine (pci_legacy_write). 998 */ 999 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, 1000 struct bin_attribute *bin_attr, char *buf, 1001 loff_t off, size_t count) 1002 { 1003 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1004 1005 /* Only support 1, 2 or 4 byte accesses */ 1006 if (count != 1 && count != 2 && count != 4) 1007 return -EINVAL; 1008 1009 return pci_legacy_write(bus, off, *(u32 *)buf, count); 1010 } 1011 1012 /** 1013 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 1014 * @filp: open sysfs file 1015 * @kobj: kobject corresponding to device to be mapped 1016 * @attr: struct bin_attribute for this file 1017 * @vma: struct vm_area_struct passed to mmap 1018 * 1019 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 1020 * legacy memory space (first meg of bus space) into application virtual 1021 * memory space. 1022 */ 1023 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 1024 struct bin_attribute *attr, 1025 struct vm_area_struct *vma) 1026 { 1027 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1028 1029 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 1030 } 1031 1032 /** 1033 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 1034 * @filp: open sysfs file 1035 * @kobj: kobject corresponding to device to be mapped 1036 * @attr: struct bin_attribute for this file 1037 * @vma: struct vm_area_struct passed to mmap 1038 * 1039 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 1040 * legacy IO space (first meg of bus space) into application virtual 1041 * memory space. Returns -ENOSYS if the operation isn't supported 1042 */ 1043 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 1044 struct bin_attribute *attr, 1045 struct vm_area_struct *vma) 1046 { 1047 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1048 1049 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 1050 } 1051 1052 /** 1053 * pci_adjust_legacy_attr - adjustment of legacy file attributes 1054 * @b: bus to create files under 1055 * @mmap_type: I/O port or memory 1056 * 1057 * Stub implementation. Can be overridden by arch if necessary. 1058 */ 1059 void __weak pci_adjust_legacy_attr(struct pci_bus *b, 1060 enum pci_mmap_state mmap_type) 1061 { 1062 } 1063 1064 /** 1065 * pci_create_legacy_files - create legacy I/O port and memory files 1066 * @b: bus to create files under 1067 * 1068 * Some platforms allow access to legacy I/O port and ISA memory space on 1069 * a per-bus basis. This routine creates the files and ties them into 1070 * their associated read, write and mmap files from pci-sysfs.c 1071 * 1072 * On error unwind, but don't propagate the error to the caller 1073 * as it is ok to set up the PCI bus without these files. 1074 */ 1075 void pci_create_legacy_files(struct pci_bus *b) 1076 { 1077 int error; 1078 1079 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), 1080 GFP_ATOMIC); 1081 if (!b->legacy_io) 1082 goto kzalloc_err; 1083 1084 sysfs_bin_attr_init(b->legacy_io); 1085 b->legacy_io->attr.name = "legacy_io"; 1086 b->legacy_io->size = 0xffff; 1087 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 1088 b->legacy_io->read = pci_read_legacy_io; 1089 b->legacy_io->write = pci_write_legacy_io; 1090 b->legacy_io->mmap = pci_mmap_legacy_io; 1091 pci_adjust_legacy_attr(b, pci_mmap_io); 1092 error = device_create_bin_file(&b->dev, b->legacy_io); 1093 if (error) 1094 goto legacy_io_err; 1095 1096 /* Allocated above after the legacy_io struct */ 1097 b->legacy_mem = b->legacy_io + 1; 1098 sysfs_bin_attr_init(b->legacy_mem); 1099 b->legacy_mem->attr.name = "legacy_mem"; 1100 b->legacy_mem->size = 1024*1024; 1101 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 1102 b->legacy_mem->mmap = pci_mmap_legacy_mem; 1103 pci_adjust_legacy_attr(b, pci_mmap_mem); 1104 error = device_create_bin_file(&b->dev, b->legacy_mem); 1105 if (error) 1106 goto legacy_mem_err; 1107 1108 return; 1109 1110 legacy_mem_err: 1111 device_remove_bin_file(&b->dev, b->legacy_io); 1112 legacy_io_err: 1113 kfree(b->legacy_io); 1114 b->legacy_io = NULL; 1115 kzalloc_err: 1116 printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n"); 1117 return; 1118 } 1119 1120 void pci_remove_legacy_files(struct pci_bus *b) 1121 { 1122 if (b->legacy_io) { 1123 device_remove_bin_file(&b->dev, b->legacy_io); 1124 device_remove_bin_file(&b->dev, b->legacy_mem); 1125 kfree(b->legacy_io); /* both are allocated here */ 1126 } 1127 } 1128 #endif /* HAVE_PCI_LEGACY */ 1129 1130 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 1131 1132 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 1133 enum pci_mmap_api mmap_api) 1134 { 1135 unsigned long nr, start, size; 1136 resource_size_t pci_start = 0, pci_end; 1137 1138 if (pci_resource_len(pdev, resno) == 0) 1139 return 0; 1140 nr = vma_pages(vma); 1141 start = vma->vm_pgoff; 1142 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 1143 if (mmap_api == PCI_MMAP_PROCFS) { 1144 pci_resource_to_user(pdev, resno, &pdev->resource[resno], 1145 &pci_start, &pci_end); 1146 pci_start >>= PAGE_SHIFT; 1147 } 1148 if (start >= pci_start && start < pci_start + size && 1149 start + nr <= pci_start + size) 1150 return 1; 1151 return 0; 1152 } 1153 1154 /** 1155 * pci_mmap_resource - map a PCI resource into user memory space 1156 * @kobj: kobject for mapping 1157 * @attr: struct bin_attribute for the file being mapped 1158 * @vma: struct vm_area_struct passed into the mmap 1159 * @write_combine: 1 for write_combine mapping 1160 * 1161 * Use the regular PCI mapping routines to map a PCI resource into userspace. 1162 */ 1163 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 1164 struct vm_area_struct *vma, int write_combine) 1165 { 1166 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1167 int bar = (unsigned long)attr->private; 1168 enum pci_mmap_state mmap_type; 1169 struct resource *res = &pdev->resource[bar]; 1170 1171 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1172 return -EINVAL; 1173 1174 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) 1175 return -EINVAL; 1176 1177 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1178 1179 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1180 } 1181 1182 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 1183 struct bin_attribute *attr, 1184 struct vm_area_struct *vma) 1185 { 1186 return pci_mmap_resource(kobj, attr, vma, 0); 1187 } 1188 1189 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 1190 struct bin_attribute *attr, 1191 struct vm_area_struct *vma) 1192 { 1193 return pci_mmap_resource(kobj, attr, vma, 1); 1194 } 1195 1196 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, 1197 struct bin_attribute *attr, char *buf, 1198 loff_t off, size_t count, bool write) 1199 { 1200 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1201 int bar = (unsigned long)attr->private; 1202 unsigned long port = off; 1203 1204 port += pci_resource_start(pdev, bar); 1205 1206 if (port > pci_resource_end(pdev, bar)) 1207 return 0; 1208 1209 if (port + count - 1 > pci_resource_end(pdev, bar)) 1210 return -EINVAL; 1211 1212 switch (count) { 1213 case 1: 1214 if (write) 1215 outb(*(u8 *)buf, port); 1216 else 1217 *(u8 *)buf = inb(port); 1218 return 1; 1219 case 2: 1220 if (write) 1221 outw(*(u16 *)buf, port); 1222 else 1223 *(u16 *)buf = inw(port); 1224 return 2; 1225 case 4: 1226 if (write) 1227 outl(*(u32 *)buf, port); 1228 else 1229 *(u32 *)buf = inl(port); 1230 return 4; 1231 } 1232 return -EINVAL; 1233 } 1234 1235 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, 1236 struct bin_attribute *attr, char *buf, 1237 loff_t off, size_t count) 1238 { 1239 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1240 } 1241 1242 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, 1243 struct bin_attribute *attr, char *buf, 1244 loff_t off, size_t count) 1245 { 1246 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1247 } 1248 1249 /** 1250 * pci_remove_resource_files - cleanup resource files 1251 * @pdev: dev to cleanup 1252 * 1253 * If we created resource files for @pdev, remove them from sysfs and 1254 * free their resources. 1255 */ 1256 static void pci_remove_resource_files(struct pci_dev *pdev) 1257 { 1258 int i; 1259 1260 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1261 struct bin_attribute *res_attr; 1262 1263 res_attr = pdev->res_attr[i]; 1264 if (res_attr) { 1265 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1266 kfree(res_attr); 1267 } 1268 1269 res_attr = pdev->res_attr_wc[i]; 1270 if (res_attr) { 1271 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1272 kfree(res_attr); 1273 } 1274 } 1275 } 1276 1277 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1278 { 1279 /* allocate attribute structure, piggyback attribute name */ 1280 int name_len = write_combine ? 13 : 10; 1281 struct bin_attribute *res_attr; 1282 char *res_attr_name; 1283 int retval; 1284 1285 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1286 if (!res_attr) 1287 return -ENOMEM; 1288 1289 res_attr_name = (char *)(res_attr + 1); 1290 1291 sysfs_bin_attr_init(res_attr); 1292 if (write_combine) { 1293 pdev->res_attr_wc[num] = res_attr; 1294 sprintf(res_attr_name, "resource%d_wc", num); 1295 res_attr->mmap = pci_mmap_resource_wc; 1296 } else { 1297 pdev->res_attr[num] = res_attr; 1298 sprintf(res_attr_name, "resource%d", num); 1299 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1300 res_attr->read = pci_read_resource_io; 1301 res_attr->write = pci_write_resource_io; 1302 if (arch_can_pci_mmap_io()) 1303 res_attr->mmap = pci_mmap_resource_uc; 1304 } else { 1305 res_attr->mmap = pci_mmap_resource_uc; 1306 } 1307 } 1308 res_attr->attr.name = res_attr_name; 1309 res_attr->attr.mode = S_IRUSR | S_IWUSR; 1310 res_attr->size = pci_resource_len(pdev, num); 1311 res_attr->private = (void *)(unsigned long)num; 1312 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1313 if (retval) 1314 kfree(res_attr); 1315 1316 return retval; 1317 } 1318 1319 /** 1320 * pci_create_resource_files - create resource files in sysfs for @dev 1321 * @pdev: dev in question 1322 * 1323 * Walk the resources in @pdev creating files for each resource available. 1324 */ 1325 static int pci_create_resource_files(struct pci_dev *pdev) 1326 { 1327 int i; 1328 int retval; 1329 1330 /* Expose the PCI resources from this device as files */ 1331 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1332 1333 /* skip empty resources */ 1334 if (!pci_resource_len(pdev, i)) 1335 continue; 1336 1337 retval = pci_create_attr(pdev, i, 0); 1338 /* for prefetchable resources, create a WC mappable file */ 1339 if (!retval && arch_can_pci_mmap_wc() && 1340 pdev->resource[i].flags & IORESOURCE_PREFETCH) 1341 retval = pci_create_attr(pdev, i, 1); 1342 if (retval) { 1343 pci_remove_resource_files(pdev); 1344 return retval; 1345 } 1346 } 1347 return 0; 1348 } 1349 #else /* !HAVE_PCI_MMAP */ 1350 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1351 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1352 #endif /* HAVE_PCI_MMAP */ 1353 1354 /** 1355 * pci_write_rom - used to enable access to the PCI ROM display 1356 * @filp: sysfs file 1357 * @kobj: kernel object handle 1358 * @bin_attr: struct bin_attribute for this file 1359 * @buf: user input 1360 * @off: file offset 1361 * @count: number of byte in input 1362 * 1363 * writing anything except 0 enables it 1364 */ 1365 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, 1366 struct bin_attribute *bin_attr, char *buf, 1367 loff_t off, size_t count) 1368 { 1369 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1370 1371 if ((off == 0) && (*buf == '0') && (count == 2)) 1372 pdev->rom_attr_enabled = 0; 1373 else 1374 pdev->rom_attr_enabled = 1; 1375 1376 return count; 1377 } 1378 1379 /** 1380 * pci_read_rom - read a PCI ROM 1381 * @filp: sysfs file 1382 * @kobj: kernel object handle 1383 * @bin_attr: struct bin_attribute for this file 1384 * @buf: where to put the data we read from the ROM 1385 * @off: file offset 1386 * @count: number of bytes to read 1387 * 1388 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1389 * device corresponding to @kobj. 1390 */ 1391 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, 1392 struct bin_attribute *bin_attr, char *buf, 1393 loff_t off, size_t count) 1394 { 1395 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1396 void __iomem *rom; 1397 size_t size; 1398 1399 if (!pdev->rom_attr_enabled) 1400 return -EINVAL; 1401 1402 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1403 if (!rom || !size) 1404 return -EIO; 1405 1406 if (off >= size) 1407 count = 0; 1408 else { 1409 if (off + count > size) 1410 count = size - off; 1411 1412 memcpy_fromio(buf, rom + off, count); 1413 } 1414 pci_unmap_rom(pdev, rom); 1415 1416 return count; 1417 } 1418 1419 static const struct bin_attribute pci_config_attr = { 1420 .attr = { 1421 .name = "config", 1422 .mode = S_IRUGO | S_IWUSR, 1423 }, 1424 .size = PCI_CFG_SPACE_SIZE, 1425 .read = pci_read_config, 1426 .write = pci_write_config, 1427 }; 1428 1429 static const struct bin_attribute pcie_config_attr = { 1430 .attr = { 1431 .name = "config", 1432 .mode = S_IRUGO | S_IWUSR, 1433 }, 1434 .size = PCI_CFG_SPACE_EXP_SIZE, 1435 .read = pci_read_config, 1436 .write = pci_write_config, 1437 }; 1438 1439 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 1440 const char *buf, size_t count) 1441 { 1442 struct pci_dev *pdev = to_pci_dev(dev); 1443 unsigned long val; 1444 ssize_t result = kstrtoul(buf, 0, &val); 1445 1446 if (result < 0) 1447 return result; 1448 1449 if (val != 1) 1450 return -EINVAL; 1451 1452 result = pci_reset_function(pdev); 1453 if (result < 0) 1454 return result; 1455 1456 return count; 1457 } 1458 1459 static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); 1460 1461 static int pci_create_capabilities_sysfs(struct pci_dev *dev) 1462 { 1463 int retval; 1464 1465 pcie_vpd_create_sysfs_dev_files(dev); 1466 pcie_aspm_create_sysfs_dev_files(dev); 1467 1468 if (dev->reset_fn) { 1469 retval = device_create_file(&dev->dev, &reset_attr); 1470 if (retval) 1471 goto error; 1472 } 1473 return 0; 1474 1475 error: 1476 pcie_aspm_remove_sysfs_dev_files(dev); 1477 pcie_vpd_remove_sysfs_dev_files(dev); 1478 return retval; 1479 } 1480 1481 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) 1482 { 1483 int retval; 1484 int rom_size; 1485 struct bin_attribute *attr; 1486 1487 if (!sysfs_initialized) 1488 return -EACCES; 1489 1490 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1491 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1492 else 1493 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); 1494 if (retval) 1495 goto err; 1496 1497 retval = pci_create_resource_files(pdev); 1498 if (retval) 1499 goto err_config_file; 1500 1501 /* If the device has a ROM, try to expose it in sysfs. */ 1502 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1503 if (rom_size) { 1504 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 1505 if (!attr) { 1506 retval = -ENOMEM; 1507 goto err_resource_files; 1508 } 1509 sysfs_bin_attr_init(attr); 1510 attr->size = rom_size; 1511 attr->attr.name = "rom"; 1512 attr->attr.mode = S_IRUSR | S_IWUSR; 1513 attr->read = pci_read_rom; 1514 attr->write = pci_write_rom; 1515 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); 1516 if (retval) { 1517 kfree(attr); 1518 goto err_resource_files; 1519 } 1520 pdev->rom_attr = attr; 1521 } 1522 1523 /* add sysfs entries for various capabilities */ 1524 retval = pci_create_capabilities_sysfs(pdev); 1525 if (retval) 1526 goto err_rom_file; 1527 1528 pci_create_firmware_label_files(pdev); 1529 1530 return 0; 1531 1532 err_rom_file: 1533 if (pdev->rom_attr) { 1534 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1535 kfree(pdev->rom_attr); 1536 pdev->rom_attr = NULL; 1537 } 1538 err_resource_files: 1539 pci_remove_resource_files(pdev); 1540 err_config_file: 1541 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1542 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1543 else 1544 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1545 err: 1546 return retval; 1547 } 1548 1549 static void pci_remove_capabilities_sysfs(struct pci_dev *dev) 1550 { 1551 pcie_vpd_remove_sysfs_dev_files(dev); 1552 pcie_aspm_remove_sysfs_dev_files(dev); 1553 if (dev->reset_fn) { 1554 device_remove_file(&dev->dev, &reset_attr); 1555 dev->reset_fn = 0; 1556 } 1557 } 1558 1559 /** 1560 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1561 * @pdev: device whose entries we should free 1562 * 1563 * Cleanup when @pdev is removed from sysfs. 1564 */ 1565 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1566 { 1567 if (!sysfs_initialized) 1568 return; 1569 1570 pci_remove_capabilities_sysfs(pdev); 1571 1572 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1573 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1574 else 1575 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1576 1577 pci_remove_resource_files(pdev); 1578 1579 if (pdev->rom_attr) { 1580 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1581 kfree(pdev->rom_attr); 1582 pdev->rom_attr = NULL; 1583 } 1584 1585 pci_remove_firmware_label_files(pdev); 1586 } 1587 1588 static int __init pci_sysfs_init(void) 1589 { 1590 struct pci_dev *pdev = NULL; 1591 int retval; 1592 1593 sysfs_initialized = 1; 1594 for_each_pci_dev(pdev) { 1595 retval = pci_create_sysfs_dev_files(pdev); 1596 if (retval) { 1597 pci_dev_put(pdev); 1598 return retval; 1599 } 1600 } 1601 1602 return 0; 1603 } 1604 late_initcall(pci_sysfs_init); 1605 1606 static struct attribute *pci_dev_dev_attrs[] = { 1607 &vga_attr.attr, 1608 NULL, 1609 }; 1610 1611 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1612 struct attribute *a, int n) 1613 { 1614 struct device *dev = kobj_to_dev(kobj); 1615 struct pci_dev *pdev = to_pci_dev(dev); 1616 1617 if (a == &vga_attr.attr) 1618 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1619 return 0; 1620 1621 return a->mode; 1622 } 1623 1624 static struct attribute *pci_dev_hp_attrs[] = { 1625 &dev_remove_attr.attr, 1626 &dev_rescan_attr.attr, 1627 NULL, 1628 }; 1629 1630 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1631 struct attribute *a, int n) 1632 { 1633 struct device *dev = kobj_to_dev(kobj); 1634 struct pci_dev *pdev = to_pci_dev(dev); 1635 1636 if (pdev->is_virtfn) 1637 return 0; 1638 1639 return a->mode; 1640 } 1641 1642 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, 1643 struct attribute *a, int n) 1644 { 1645 struct device *dev = kobj_to_dev(kobj); 1646 struct pci_dev *pdev = to_pci_dev(dev); 1647 1648 if (pci_is_bridge(pdev)) 1649 return a->mode; 1650 1651 return 0; 1652 } 1653 1654 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, 1655 struct attribute *a, int n) 1656 { 1657 struct device *dev = kobj_to_dev(kobj); 1658 struct pci_dev *pdev = to_pci_dev(dev); 1659 1660 if (pci_is_pcie(pdev)) 1661 return a->mode; 1662 1663 return 0; 1664 } 1665 1666 static const struct attribute_group pci_dev_group = { 1667 .attrs = pci_dev_attrs, 1668 }; 1669 1670 const struct attribute_group *pci_dev_groups[] = { 1671 &pci_dev_group, 1672 NULL, 1673 }; 1674 1675 static const struct attribute_group pci_bridge_group = { 1676 .attrs = pci_bridge_attrs, 1677 }; 1678 1679 const struct attribute_group *pci_bridge_groups[] = { 1680 &pci_bridge_group, 1681 NULL, 1682 }; 1683 1684 static const struct attribute_group pcie_dev_group = { 1685 .attrs = pcie_dev_attrs, 1686 }; 1687 1688 const struct attribute_group *pcie_dev_groups[] = { 1689 &pcie_dev_group, 1690 NULL, 1691 }; 1692 1693 static const struct attribute_group pci_dev_hp_attr_group = { 1694 .attrs = pci_dev_hp_attrs, 1695 .is_visible = pci_dev_hp_attrs_are_visible, 1696 }; 1697 1698 #ifdef CONFIG_PCI_IOV 1699 static struct attribute *sriov_dev_attrs[] = { 1700 &sriov_totalvfs_attr.attr, 1701 &sriov_numvfs_attr.attr, 1702 &sriov_offset_attr.attr, 1703 &sriov_stride_attr.attr, 1704 &sriov_vf_device_attr.attr, 1705 &sriov_drivers_autoprobe_attr.attr, 1706 NULL, 1707 }; 1708 1709 static umode_t sriov_attrs_are_visible(struct kobject *kobj, 1710 struct attribute *a, int n) 1711 { 1712 struct device *dev = kobj_to_dev(kobj); 1713 1714 if (!dev_is_pf(dev)) 1715 return 0; 1716 1717 return a->mode; 1718 } 1719 1720 static const struct attribute_group sriov_dev_attr_group = { 1721 .attrs = sriov_dev_attrs, 1722 .is_visible = sriov_attrs_are_visible, 1723 }; 1724 #endif /* CONFIG_PCI_IOV */ 1725 1726 static const struct attribute_group pci_dev_attr_group = { 1727 .attrs = pci_dev_dev_attrs, 1728 .is_visible = pci_dev_attrs_are_visible, 1729 }; 1730 1731 static const struct attribute_group pci_bridge_attr_group = { 1732 .attrs = pci_bridge_attrs, 1733 .is_visible = pci_bridge_attrs_are_visible, 1734 }; 1735 1736 static const struct attribute_group pcie_dev_attr_group = { 1737 .attrs = pcie_dev_attrs, 1738 .is_visible = pcie_dev_attrs_are_visible, 1739 }; 1740 1741 static const struct attribute_group *pci_dev_attr_groups[] = { 1742 &pci_dev_attr_group, 1743 &pci_dev_hp_attr_group, 1744 #ifdef CONFIG_PCI_IOV 1745 &sriov_dev_attr_group, 1746 #endif 1747 &pci_bridge_attr_group, 1748 &pcie_dev_attr_group, 1749 NULL, 1750 }; 1751 1752 const struct device_type pci_dev_type = { 1753 .groups = pci_dev_attr_groups, 1754 }; 1755