1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2002-2004 IBM Corp. 5 * (C) Copyright 2003 Matthew Wilcox 6 * (C) Copyright 2003 Hewlett-Packard 7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 9 * 10 * File attributes for PCI devices 11 * 12 * Modeled after usb's driverfs.c 13 */ 14 15 16 #include <linux/kernel.h> 17 #include <linux/sched.h> 18 #include <linux/pci.h> 19 #include <linux/stat.h> 20 #include <linux/export.h> 21 #include <linux/topology.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/capability.h> 25 #include <linux/security.h> 26 #include <linux/slab.h> 27 #include <linux/vgaarb.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/of.h> 30 #include "pci.h" 31 32 static int sysfs_initialized; /* = 0 */ 33 34 /* show configuration fields */ 35 #define pci_config_attr(field, format_string) \ 36 static ssize_t \ 37 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 38 { \ 39 struct pci_dev *pdev; \ 40 \ 41 pdev = to_pci_dev(dev); \ 42 return sprintf(buf, format_string, pdev->field); \ 43 } \ 44 static DEVICE_ATTR_RO(field) 45 46 pci_config_attr(vendor, "0x%04x\n"); 47 pci_config_attr(device, "0x%04x\n"); 48 pci_config_attr(subsystem_vendor, "0x%04x\n"); 49 pci_config_attr(subsystem_device, "0x%04x\n"); 50 pci_config_attr(revision, "0x%02x\n"); 51 pci_config_attr(class, "0x%06x\n"); 52 pci_config_attr(irq, "%u\n"); 53 54 static ssize_t broken_parity_status_show(struct device *dev, 55 struct device_attribute *attr, 56 char *buf) 57 { 58 struct pci_dev *pdev = to_pci_dev(dev); 59 return sprintf(buf, "%u\n", pdev->broken_parity_status); 60 } 61 62 static ssize_t broken_parity_status_store(struct device *dev, 63 struct device_attribute *attr, 64 const char *buf, size_t count) 65 { 66 struct pci_dev *pdev = to_pci_dev(dev); 67 unsigned long val; 68 69 if (kstrtoul(buf, 0, &val) < 0) 70 return -EINVAL; 71 72 pdev->broken_parity_status = !!val; 73 74 return count; 75 } 76 static DEVICE_ATTR_RW(broken_parity_status); 77 78 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, 79 struct device_attribute *attr, char *buf) 80 { 81 const struct cpumask *mask; 82 83 #ifdef CONFIG_NUMA 84 mask = (dev_to_node(dev) == -1) ? cpu_online_mask : 85 cpumask_of_node(dev_to_node(dev)); 86 #else 87 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 88 #endif 89 return cpumap_print_to_pagebuf(list, buf, mask); 90 } 91 92 static ssize_t local_cpus_show(struct device *dev, 93 struct device_attribute *attr, char *buf) 94 { 95 return pci_dev_show_local_cpu(dev, false, attr, buf); 96 } 97 static DEVICE_ATTR_RO(local_cpus); 98 99 static ssize_t local_cpulist_show(struct device *dev, 100 struct device_attribute *attr, char *buf) 101 { 102 return pci_dev_show_local_cpu(dev, true, attr, buf); 103 } 104 static DEVICE_ATTR_RO(local_cpulist); 105 106 /* 107 * PCI Bus Class Devices 108 */ 109 static ssize_t cpuaffinity_show(struct device *dev, 110 struct device_attribute *attr, char *buf) 111 { 112 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 113 114 return cpumap_print_to_pagebuf(false, buf, cpumask); 115 } 116 static DEVICE_ATTR_RO(cpuaffinity); 117 118 static ssize_t cpulistaffinity_show(struct device *dev, 119 struct device_attribute *attr, char *buf) 120 { 121 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 122 123 return cpumap_print_to_pagebuf(true, buf, cpumask); 124 } 125 static DEVICE_ATTR_RO(cpulistaffinity); 126 127 /* show resources */ 128 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 129 char *buf) 130 { 131 struct pci_dev *pci_dev = to_pci_dev(dev); 132 char *str = buf; 133 int i; 134 int max; 135 resource_size_t start, end; 136 137 if (pci_dev->subordinate) 138 max = DEVICE_COUNT_RESOURCE; 139 else 140 max = PCI_BRIDGE_RESOURCES; 141 142 for (i = 0; i < max; i++) { 143 struct resource *res = &pci_dev->resource[i]; 144 pci_resource_to_user(pci_dev, i, res, &start, &end); 145 str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n", 146 (unsigned long long)start, 147 (unsigned long long)end, 148 (unsigned long long)res->flags); 149 } 150 return (str - buf); 151 } 152 static DEVICE_ATTR_RO(resource); 153 154 static ssize_t max_link_speed_show(struct device *dev, 155 struct device_attribute *attr, char *buf) 156 { 157 struct pci_dev *pdev = to_pci_dev(dev); 158 159 return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev))); 160 } 161 static DEVICE_ATTR_RO(max_link_speed); 162 163 static ssize_t max_link_width_show(struct device *dev, 164 struct device_attribute *attr, char *buf) 165 { 166 struct pci_dev *pdev = to_pci_dev(dev); 167 168 return sprintf(buf, "%u\n", pcie_get_width_cap(pdev)); 169 } 170 static DEVICE_ATTR_RO(max_link_width); 171 172 static ssize_t current_link_speed_show(struct device *dev, 173 struct device_attribute *attr, char *buf) 174 { 175 struct pci_dev *pci_dev = to_pci_dev(dev); 176 u16 linkstat; 177 int err; 178 const char *speed; 179 180 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 181 if (err) 182 return -EINVAL; 183 184 switch (linkstat & PCI_EXP_LNKSTA_CLS) { 185 case PCI_EXP_LNKSTA_CLS_16_0GB: 186 speed = "16 GT/s"; 187 break; 188 case PCI_EXP_LNKSTA_CLS_8_0GB: 189 speed = "8 GT/s"; 190 break; 191 case PCI_EXP_LNKSTA_CLS_5_0GB: 192 speed = "5 GT/s"; 193 break; 194 case PCI_EXP_LNKSTA_CLS_2_5GB: 195 speed = "2.5 GT/s"; 196 break; 197 default: 198 speed = "Unknown speed"; 199 } 200 201 return sprintf(buf, "%s\n", speed); 202 } 203 static DEVICE_ATTR_RO(current_link_speed); 204 205 static ssize_t current_link_width_show(struct device *dev, 206 struct device_attribute *attr, char *buf) 207 { 208 struct pci_dev *pci_dev = to_pci_dev(dev); 209 u16 linkstat; 210 int err; 211 212 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 213 if (err) 214 return -EINVAL; 215 216 return sprintf(buf, "%u\n", 217 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); 218 } 219 static DEVICE_ATTR_RO(current_link_width); 220 221 static ssize_t secondary_bus_number_show(struct device *dev, 222 struct device_attribute *attr, 223 char *buf) 224 { 225 struct pci_dev *pci_dev = to_pci_dev(dev); 226 u8 sec_bus; 227 int err; 228 229 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); 230 if (err) 231 return -EINVAL; 232 233 return sprintf(buf, "%u\n", sec_bus); 234 } 235 static DEVICE_ATTR_RO(secondary_bus_number); 236 237 static ssize_t subordinate_bus_number_show(struct device *dev, 238 struct device_attribute *attr, 239 char *buf) 240 { 241 struct pci_dev *pci_dev = to_pci_dev(dev); 242 u8 sub_bus; 243 int err; 244 245 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); 246 if (err) 247 return -EINVAL; 248 249 return sprintf(buf, "%u\n", sub_bus); 250 } 251 static DEVICE_ATTR_RO(subordinate_bus_number); 252 253 static ssize_t ari_enabled_show(struct device *dev, 254 struct device_attribute *attr, 255 char *buf) 256 { 257 struct pci_dev *pci_dev = to_pci_dev(dev); 258 259 return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); 260 } 261 static DEVICE_ATTR_RO(ari_enabled); 262 263 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 264 char *buf) 265 { 266 struct pci_dev *pci_dev = to_pci_dev(dev); 267 268 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", 269 pci_dev->vendor, pci_dev->device, 270 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 271 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 272 (u8)(pci_dev->class)); 273 } 274 static DEVICE_ATTR_RO(modalias); 275 276 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 277 const char *buf, size_t count) 278 { 279 struct pci_dev *pdev = to_pci_dev(dev); 280 unsigned long val; 281 ssize_t result = kstrtoul(buf, 0, &val); 282 283 if (result < 0) 284 return result; 285 286 /* this can crash the machine when done on the "wrong" device */ 287 if (!capable(CAP_SYS_ADMIN)) 288 return -EPERM; 289 290 device_lock(dev); 291 if (dev->driver) 292 result = -EBUSY; 293 else if (val) 294 result = pci_enable_device(pdev); 295 else if (pci_is_enabled(pdev)) 296 pci_disable_device(pdev); 297 else 298 result = -EIO; 299 device_unlock(dev); 300 301 return result < 0 ? result : count; 302 } 303 304 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 305 char *buf) 306 { 307 struct pci_dev *pdev; 308 309 pdev = to_pci_dev(dev); 310 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 311 } 312 static DEVICE_ATTR_RW(enable); 313 314 #ifdef CONFIG_NUMA 315 static ssize_t numa_node_store(struct device *dev, 316 struct device_attribute *attr, const char *buf, 317 size_t count) 318 { 319 struct pci_dev *pdev = to_pci_dev(dev); 320 int node, ret; 321 322 if (!capable(CAP_SYS_ADMIN)) 323 return -EPERM; 324 325 ret = kstrtoint(buf, 0, &node); 326 if (ret) 327 return ret; 328 329 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) 330 return -EINVAL; 331 332 if (node != NUMA_NO_NODE && !node_online(node)) 333 return -EINVAL; 334 335 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 336 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 337 node); 338 339 dev->numa_node = node; 340 return count; 341 } 342 343 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 344 char *buf) 345 { 346 return sprintf(buf, "%d\n", dev->numa_node); 347 } 348 static DEVICE_ATTR_RW(numa_node); 349 #endif 350 351 static ssize_t dma_mask_bits_show(struct device *dev, 352 struct device_attribute *attr, char *buf) 353 { 354 struct pci_dev *pdev = to_pci_dev(dev); 355 356 return sprintf(buf, "%d\n", fls64(pdev->dma_mask)); 357 } 358 static DEVICE_ATTR_RO(dma_mask_bits); 359 360 static ssize_t consistent_dma_mask_bits_show(struct device *dev, 361 struct device_attribute *attr, 362 char *buf) 363 { 364 return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask)); 365 } 366 static DEVICE_ATTR_RO(consistent_dma_mask_bits); 367 368 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, 369 char *buf) 370 { 371 struct pci_dev *pdev = to_pci_dev(dev); 372 struct pci_bus *subordinate = pdev->subordinate; 373 374 return sprintf(buf, "%u\n", subordinate ? 375 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) 376 : !pdev->no_msi); 377 } 378 379 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 380 const char *buf, size_t count) 381 { 382 struct pci_dev *pdev = to_pci_dev(dev); 383 struct pci_bus *subordinate = pdev->subordinate; 384 unsigned long val; 385 386 if (kstrtoul(buf, 0, &val) < 0) 387 return -EINVAL; 388 389 if (!capable(CAP_SYS_ADMIN)) 390 return -EPERM; 391 392 /* 393 * "no_msi" and "bus_flags" only affect what happens when a driver 394 * requests MSI or MSI-X. They don't affect any drivers that have 395 * already requested MSI or MSI-X. 396 */ 397 if (!subordinate) { 398 pdev->no_msi = !val; 399 pci_info(pdev, "MSI/MSI-X %s for future drivers\n", 400 val ? "allowed" : "disallowed"); 401 return count; 402 } 403 404 if (val) 405 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 406 else 407 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 408 409 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", 410 val ? "allowed" : "disallowed"); 411 return count; 412 } 413 static DEVICE_ATTR_RW(msi_bus); 414 415 static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, 416 size_t count) 417 { 418 unsigned long val; 419 struct pci_bus *b = NULL; 420 421 if (kstrtoul(buf, 0, &val) < 0) 422 return -EINVAL; 423 424 if (val) { 425 pci_lock_rescan_remove(); 426 while ((b = pci_find_next_bus(b)) != NULL) 427 pci_rescan_bus(b); 428 pci_unlock_rescan_remove(); 429 } 430 return count; 431 } 432 static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); 433 434 static struct attribute *pci_bus_attrs[] = { 435 &bus_attr_rescan.attr, 436 NULL, 437 }; 438 439 static const struct attribute_group pci_bus_group = { 440 .attrs = pci_bus_attrs, 441 }; 442 443 const struct attribute_group *pci_bus_groups[] = { 444 &pci_bus_group, 445 NULL, 446 }; 447 448 static ssize_t dev_rescan_store(struct device *dev, 449 struct device_attribute *attr, const char *buf, 450 size_t count) 451 { 452 unsigned long val; 453 struct pci_dev *pdev = to_pci_dev(dev); 454 455 if (kstrtoul(buf, 0, &val) < 0) 456 return -EINVAL; 457 458 if (val) { 459 pci_lock_rescan_remove(); 460 pci_rescan_bus(pdev->bus); 461 pci_unlock_rescan_remove(); 462 } 463 return count; 464 } 465 static struct device_attribute dev_rescan_attr = __ATTR(rescan, 466 (S_IWUSR|S_IWGRP), 467 NULL, dev_rescan_store); 468 469 static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 470 const char *buf, size_t count) 471 { 472 unsigned long val; 473 474 if (kstrtoul(buf, 0, &val) < 0) 475 return -EINVAL; 476 477 if (val && device_remove_file_self(dev, attr)) 478 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 479 return count; 480 } 481 static struct device_attribute dev_remove_attr = __ATTR(remove, 482 (S_IWUSR|S_IWGRP), 483 NULL, remove_store); 484 485 static ssize_t dev_bus_rescan_store(struct device *dev, 486 struct device_attribute *attr, 487 const char *buf, size_t count) 488 { 489 unsigned long val; 490 struct pci_bus *bus = to_pci_bus(dev); 491 492 if (kstrtoul(buf, 0, &val) < 0) 493 return -EINVAL; 494 495 if (val) { 496 pci_lock_rescan_remove(); 497 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 498 pci_rescan_bus_bridge_resize(bus->self); 499 else 500 pci_rescan_bus(bus); 501 pci_unlock_rescan_remove(); 502 } 503 return count; 504 } 505 static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store); 506 507 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 508 static ssize_t d3cold_allowed_store(struct device *dev, 509 struct device_attribute *attr, 510 const char *buf, size_t count) 511 { 512 struct pci_dev *pdev = to_pci_dev(dev); 513 unsigned long val; 514 515 if (kstrtoul(buf, 0, &val) < 0) 516 return -EINVAL; 517 518 pdev->d3cold_allowed = !!val; 519 if (pdev->d3cold_allowed) 520 pci_d3cold_enable(pdev); 521 else 522 pci_d3cold_disable(pdev); 523 524 pm_runtime_resume(dev); 525 526 return count; 527 } 528 529 static ssize_t d3cold_allowed_show(struct device *dev, 530 struct device_attribute *attr, char *buf) 531 { 532 struct pci_dev *pdev = to_pci_dev(dev); 533 return sprintf(buf, "%u\n", pdev->d3cold_allowed); 534 } 535 static DEVICE_ATTR_RW(d3cold_allowed); 536 #endif 537 538 #ifdef CONFIG_OF 539 static ssize_t devspec_show(struct device *dev, 540 struct device_attribute *attr, char *buf) 541 { 542 struct pci_dev *pdev = to_pci_dev(dev); 543 struct device_node *np = pci_device_to_OF_node(pdev); 544 545 if (np == NULL) 546 return 0; 547 return sprintf(buf, "%pOF", np); 548 } 549 static DEVICE_ATTR_RO(devspec); 550 #endif 551 552 #ifdef CONFIG_PCI_IOV 553 static ssize_t sriov_totalvfs_show(struct device *dev, 554 struct device_attribute *attr, 555 char *buf) 556 { 557 struct pci_dev *pdev = to_pci_dev(dev); 558 559 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); 560 } 561 562 563 static ssize_t sriov_numvfs_show(struct device *dev, 564 struct device_attribute *attr, 565 char *buf) 566 { 567 struct pci_dev *pdev = to_pci_dev(dev); 568 569 return sprintf(buf, "%u\n", pdev->sriov->num_VFs); 570 } 571 572 /* 573 * num_vfs > 0; number of VFs to enable 574 * num_vfs = 0; disable all VFs 575 * 576 * Note: SRIOV spec doesn't allow partial VF 577 * disable, so it's all or none. 578 */ 579 static ssize_t sriov_numvfs_store(struct device *dev, 580 struct device_attribute *attr, 581 const char *buf, size_t count) 582 { 583 struct pci_dev *pdev = to_pci_dev(dev); 584 int ret; 585 u16 num_vfs; 586 587 ret = kstrtou16(buf, 0, &num_vfs); 588 if (ret < 0) 589 return ret; 590 591 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 592 return -ERANGE; 593 594 device_lock(&pdev->dev); 595 596 if (num_vfs == pdev->sriov->num_VFs) 597 goto exit; 598 599 /* is PF driver loaded w/callback */ 600 if (!pdev->driver || !pdev->driver->sriov_configure) { 601 pci_info(pdev, "Driver doesn't support SRIOV configuration via sysfs\n"); 602 ret = -ENOENT; 603 goto exit; 604 } 605 606 if (num_vfs == 0) { 607 /* disable VFs */ 608 ret = pdev->driver->sriov_configure(pdev, 0); 609 goto exit; 610 } 611 612 /* enable VFs */ 613 if (pdev->sriov->num_VFs) { 614 pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", 615 pdev->sriov->num_VFs, num_vfs); 616 ret = -EBUSY; 617 goto exit; 618 } 619 620 ret = pdev->driver->sriov_configure(pdev, num_vfs); 621 if (ret < 0) 622 goto exit; 623 624 if (ret != num_vfs) 625 pci_warn(pdev, "%d VFs requested; only %d enabled\n", 626 num_vfs, ret); 627 628 exit: 629 device_unlock(&pdev->dev); 630 631 if (ret < 0) 632 return ret; 633 634 return count; 635 } 636 637 static ssize_t sriov_offset_show(struct device *dev, 638 struct device_attribute *attr, 639 char *buf) 640 { 641 struct pci_dev *pdev = to_pci_dev(dev); 642 643 return sprintf(buf, "%u\n", pdev->sriov->offset); 644 } 645 646 static ssize_t sriov_stride_show(struct device *dev, 647 struct device_attribute *attr, 648 char *buf) 649 { 650 struct pci_dev *pdev = to_pci_dev(dev); 651 652 return sprintf(buf, "%u\n", pdev->sriov->stride); 653 } 654 655 static ssize_t sriov_vf_device_show(struct device *dev, 656 struct device_attribute *attr, 657 char *buf) 658 { 659 struct pci_dev *pdev = to_pci_dev(dev); 660 661 return sprintf(buf, "%x\n", pdev->sriov->vf_device); 662 } 663 664 static ssize_t sriov_drivers_autoprobe_show(struct device *dev, 665 struct device_attribute *attr, 666 char *buf) 667 { 668 struct pci_dev *pdev = to_pci_dev(dev); 669 670 return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); 671 } 672 673 static ssize_t sriov_drivers_autoprobe_store(struct device *dev, 674 struct device_attribute *attr, 675 const char *buf, size_t count) 676 { 677 struct pci_dev *pdev = to_pci_dev(dev); 678 bool drivers_autoprobe; 679 680 if (kstrtobool(buf, &drivers_autoprobe) < 0) 681 return -EINVAL; 682 683 pdev->sriov->drivers_autoprobe = drivers_autoprobe; 684 685 return count; 686 } 687 688 static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); 689 static struct device_attribute sriov_numvfs_attr = 690 __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), 691 sriov_numvfs_show, sriov_numvfs_store); 692 static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset); 693 static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride); 694 static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device); 695 static struct device_attribute sriov_drivers_autoprobe_attr = 696 __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), 697 sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); 698 #endif /* CONFIG_PCI_IOV */ 699 700 static ssize_t driver_override_store(struct device *dev, 701 struct device_attribute *attr, 702 const char *buf, size_t count) 703 { 704 struct pci_dev *pdev = to_pci_dev(dev); 705 char *driver_override, *old, *cp; 706 707 /* We need to keep extra room for a newline */ 708 if (count >= (PAGE_SIZE - 1)) 709 return -EINVAL; 710 711 driver_override = kstrndup(buf, count, GFP_KERNEL); 712 if (!driver_override) 713 return -ENOMEM; 714 715 cp = strchr(driver_override, '\n'); 716 if (cp) 717 *cp = '\0'; 718 719 device_lock(dev); 720 old = pdev->driver_override; 721 if (strlen(driver_override)) { 722 pdev->driver_override = driver_override; 723 } else { 724 kfree(driver_override); 725 pdev->driver_override = NULL; 726 } 727 device_unlock(dev); 728 729 kfree(old); 730 731 return count; 732 } 733 734 static ssize_t driver_override_show(struct device *dev, 735 struct device_attribute *attr, char *buf) 736 { 737 struct pci_dev *pdev = to_pci_dev(dev); 738 ssize_t len; 739 740 device_lock(dev); 741 len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); 742 device_unlock(dev); 743 return len; 744 } 745 static DEVICE_ATTR_RW(driver_override); 746 747 static struct attribute *pci_dev_attrs[] = { 748 &dev_attr_resource.attr, 749 &dev_attr_vendor.attr, 750 &dev_attr_device.attr, 751 &dev_attr_subsystem_vendor.attr, 752 &dev_attr_subsystem_device.attr, 753 &dev_attr_revision.attr, 754 &dev_attr_class.attr, 755 &dev_attr_irq.attr, 756 &dev_attr_local_cpus.attr, 757 &dev_attr_local_cpulist.attr, 758 &dev_attr_modalias.attr, 759 #ifdef CONFIG_NUMA 760 &dev_attr_numa_node.attr, 761 #endif 762 &dev_attr_dma_mask_bits.attr, 763 &dev_attr_consistent_dma_mask_bits.attr, 764 &dev_attr_enable.attr, 765 &dev_attr_broken_parity_status.attr, 766 &dev_attr_msi_bus.attr, 767 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 768 &dev_attr_d3cold_allowed.attr, 769 #endif 770 #ifdef CONFIG_OF 771 &dev_attr_devspec.attr, 772 #endif 773 &dev_attr_driver_override.attr, 774 &dev_attr_ari_enabled.attr, 775 NULL, 776 }; 777 778 static struct attribute *pci_bridge_attrs[] = { 779 &dev_attr_subordinate_bus_number.attr, 780 &dev_attr_secondary_bus_number.attr, 781 NULL, 782 }; 783 784 static struct attribute *pcie_dev_attrs[] = { 785 &dev_attr_current_link_speed.attr, 786 &dev_attr_current_link_width.attr, 787 &dev_attr_max_link_width.attr, 788 &dev_attr_max_link_speed.attr, 789 NULL, 790 }; 791 792 static struct attribute *pcibus_attrs[] = { 793 &dev_attr_rescan.attr, 794 &dev_attr_cpuaffinity.attr, 795 &dev_attr_cpulistaffinity.attr, 796 NULL, 797 }; 798 799 static const struct attribute_group pcibus_group = { 800 .attrs = pcibus_attrs, 801 }; 802 803 const struct attribute_group *pcibus_groups[] = { 804 &pcibus_group, 805 NULL, 806 }; 807 808 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, 809 char *buf) 810 { 811 struct pci_dev *pdev = to_pci_dev(dev); 812 struct pci_dev *vga_dev = vga_default_device(); 813 814 if (vga_dev) 815 return sprintf(buf, "%u\n", (pdev == vga_dev)); 816 817 return sprintf(buf, "%u\n", 818 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 819 IORESOURCE_ROM_SHADOW)); 820 } 821 static struct device_attribute vga_attr = __ATTR_RO(boot_vga); 822 823 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 824 struct bin_attribute *bin_attr, char *buf, 825 loff_t off, size_t count) 826 { 827 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 828 unsigned int size = 64; 829 loff_t init_off = off; 830 u8 *data = (u8 *) buf; 831 832 /* Several chips lock up trying to read undefined config space */ 833 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) 834 size = dev->cfg_size; 835 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 836 size = 128; 837 838 if (off > size) 839 return 0; 840 if (off + count > size) { 841 size -= off; 842 count = size; 843 } else { 844 size = count; 845 } 846 847 pci_config_pm_runtime_get(dev); 848 849 if ((off & 1) && size) { 850 u8 val; 851 pci_user_read_config_byte(dev, off, &val); 852 data[off - init_off] = val; 853 off++; 854 size--; 855 } 856 857 if ((off & 3) && size > 2) { 858 u16 val; 859 pci_user_read_config_word(dev, off, &val); 860 data[off - init_off] = val & 0xff; 861 data[off - init_off + 1] = (val >> 8) & 0xff; 862 off += 2; 863 size -= 2; 864 } 865 866 while (size > 3) { 867 u32 val; 868 pci_user_read_config_dword(dev, off, &val); 869 data[off - init_off] = val & 0xff; 870 data[off - init_off + 1] = (val >> 8) & 0xff; 871 data[off - init_off + 2] = (val >> 16) & 0xff; 872 data[off - init_off + 3] = (val >> 24) & 0xff; 873 off += 4; 874 size -= 4; 875 } 876 877 if (size >= 2) { 878 u16 val; 879 pci_user_read_config_word(dev, off, &val); 880 data[off - init_off] = val & 0xff; 881 data[off - init_off + 1] = (val >> 8) & 0xff; 882 off += 2; 883 size -= 2; 884 } 885 886 if (size > 0) { 887 u8 val; 888 pci_user_read_config_byte(dev, off, &val); 889 data[off - init_off] = val; 890 off++; 891 --size; 892 } 893 894 pci_config_pm_runtime_put(dev); 895 896 return count; 897 } 898 899 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, 900 struct bin_attribute *bin_attr, char *buf, 901 loff_t off, size_t count) 902 { 903 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 904 unsigned int size = count; 905 loff_t init_off = off; 906 u8 *data = (u8 *) buf; 907 908 if (off > dev->cfg_size) 909 return 0; 910 if (off + count > dev->cfg_size) { 911 size = dev->cfg_size - off; 912 count = size; 913 } 914 915 pci_config_pm_runtime_get(dev); 916 917 if ((off & 1) && size) { 918 pci_user_write_config_byte(dev, off, data[off - init_off]); 919 off++; 920 size--; 921 } 922 923 if ((off & 3) && size > 2) { 924 u16 val = data[off - init_off]; 925 val |= (u16) data[off - init_off + 1] << 8; 926 pci_user_write_config_word(dev, off, val); 927 off += 2; 928 size -= 2; 929 } 930 931 while (size > 3) { 932 u32 val = data[off - init_off]; 933 val |= (u32) data[off - init_off + 1] << 8; 934 val |= (u32) data[off - init_off + 2] << 16; 935 val |= (u32) data[off - init_off + 3] << 24; 936 pci_user_write_config_dword(dev, off, val); 937 off += 4; 938 size -= 4; 939 } 940 941 if (size >= 2) { 942 u16 val = data[off - init_off]; 943 val |= (u16) data[off - init_off + 1] << 8; 944 pci_user_write_config_word(dev, off, val); 945 off += 2; 946 size -= 2; 947 } 948 949 if (size) { 950 pci_user_write_config_byte(dev, off, data[off - init_off]); 951 off++; 952 --size; 953 } 954 955 pci_config_pm_runtime_put(dev); 956 957 return count; 958 } 959 960 #ifdef HAVE_PCI_LEGACY 961 /** 962 * pci_read_legacy_io - read byte(s) from legacy I/O port space 963 * @filp: open sysfs file 964 * @kobj: kobject corresponding to file to read from 965 * @bin_attr: struct bin_attribute for this file 966 * @buf: buffer to store results 967 * @off: offset into legacy I/O port space 968 * @count: number of bytes to read 969 * 970 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 971 * callback routine (pci_legacy_read). 972 */ 973 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, 974 struct bin_attribute *bin_attr, char *buf, 975 loff_t off, size_t count) 976 { 977 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 978 979 /* Only support 1, 2 or 4 byte accesses */ 980 if (count != 1 && count != 2 && count != 4) 981 return -EINVAL; 982 983 return pci_legacy_read(bus, off, (u32 *)buf, count); 984 } 985 986 /** 987 * pci_write_legacy_io - write byte(s) to legacy I/O port space 988 * @filp: open sysfs file 989 * @kobj: kobject corresponding to file to read from 990 * @bin_attr: struct bin_attribute for this file 991 * @buf: buffer containing value to be written 992 * @off: offset into legacy I/O port space 993 * @count: number of bytes to write 994 * 995 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 996 * callback routine (pci_legacy_write). 997 */ 998 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, 999 struct bin_attribute *bin_attr, char *buf, 1000 loff_t off, size_t count) 1001 { 1002 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1003 1004 /* Only support 1, 2 or 4 byte accesses */ 1005 if (count != 1 && count != 2 && count != 4) 1006 return -EINVAL; 1007 1008 return pci_legacy_write(bus, off, *(u32 *)buf, count); 1009 } 1010 1011 /** 1012 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 1013 * @filp: open sysfs file 1014 * @kobj: kobject corresponding to device to be mapped 1015 * @attr: struct bin_attribute for this file 1016 * @vma: struct vm_area_struct passed to mmap 1017 * 1018 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 1019 * legacy memory space (first meg of bus space) into application virtual 1020 * memory space. 1021 */ 1022 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 1023 struct bin_attribute *attr, 1024 struct vm_area_struct *vma) 1025 { 1026 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1027 1028 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 1029 } 1030 1031 /** 1032 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 1033 * @filp: open sysfs file 1034 * @kobj: kobject corresponding to device to be mapped 1035 * @attr: struct bin_attribute for this file 1036 * @vma: struct vm_area_struct passed to mmap 1037 * 1038 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 1039 * legacy IO space (first meg of bus space) into application virtual 1040 * memory space. Returns -ENOSYS if the operation isn't supported 1041 */ 1042 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 1043 struct bin_attribute *attr, 1044 struct vm_area_struct *vma) 1045 { 1046 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 1047 1048 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 1049 } 1050 1051 /** 1052 * pci_adjust_legacy_attr - adjustment of legacy file attributes 1053 * @b: bus to create files under 1054 * @mmap_type: I/O port or memory 1055 * 1056 * Stub implementation. Can be overridden by arch if necessary. 1057 */ 1058 void __weak pci_adjust_legacy_attr(struct pci_bus *b, 1059 enum pci_mmap_state mmap_type) 1060 { 1061 } 1062 1063 /** 1064 * pci_create_legacy_files - create legacy I/O port and memory files 1065 * @b: bus to create files under 1066 * 1067 * Some platforms allow access to legacy I/O port and ISA memory space on 1068 * a per-bus basis. This routine creates the files and ties them into 1069 * their associated read, write and mmap files from pci-sysfs.c 1070 * 1071 * On error unwind, but don't propagate the error to the caller 1072 * as it is ok to set up the PCI bus without these files. 1073 */ 1074 void pci_create_legacy_files(struct pci_bus *b) 1075 { 1076 int error; 1077 1078 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), 1079 GFP_ATOMIC); 1080 if (!b->legacy_io) 1081 goto kzalloc_err; 1082 1083 sysfs_bin_attr_init(b->legacy_io); 1084 b->legacy_io->attr.name = "legacy_io"; 1085 b->legacy_io->size = 0xffff; 1086 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 1087 b->legacy_io->read = pci_read_legacy_io; 1088 b->legacy_io->write = pci_write_legacy_io; 1089 b->legacy_io->mmap = pci_mmap_legacy_io; 1090 pci_adjust_legacy_attr(b, pci_mmap_io); 1091 error = device_create_bin_file(&b->dev, b->legacy_io); 1092 if (error) 1093 goto legacy_io_err; 1094 1095 /* Allocated above after the legacy_io struct */ 1096 b->legacy_mem = b->legacy_io + 1; 1097 sysfs_bin_attr_init(b->legacy_mem); 1098 b->legacy_mem->attr.name = "legacy_mem"; 1099 b->legacy_mem->size = 1024*1024; 1100 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 1101 b->legacy_mem->mmap = pci_mmap_legacy_mem; 1102 pci_adjust_legacy_attr(b, pci_mmap_mem); 1103 error = device_create_bin_file(&b->dev, b->legacy_mem); 1104 if (error) 1105 goto legacy_mem_err; 1106 1107 return; 1108 1109 legacy_mem_err: 1110 device_remove_bin_file(&b->dev, b->legacy_io); 1111 legacy_io_err: 1112 kfree(b->legacy_io); 1113 b->legacy_io = NULL; 1114 kzalloc_err: 1115 printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n"); 1116 return; 1117 } 1118 1119 void pci_remove_legacy_files(struct pci_bus *b) 1120 { 1121 if (b->legacy_io) { 1122 device_remove_bin_file(&b->dev, b->legacy_io); 1123 device_remove_bin_file(&b->dev, b->legacy_mem); 1124 kfree(b->legacy_io); /* both are allocated here */ 1125 } 1126 } 1127 #endif /* HAVE_PCI_LEGACY */ 1128 1129 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 1130 1131 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 1132 enum pci_mmap_api mmap_api) 1133 { 1134 unsigned long nr, start, size; 1135 resource_size_t pci_start = 0, pci_end; 1136 1137 if (pci_resource_len(pdev, resno) == 0) 1138 return 0; 1139 nr = vma_pages(vma); 1140 start = vma->vm_pgoff; 1141 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 1142 if (mmap_api == PCI_MMAP_PROCFS) { 1143 pci_resource_to_user(pdev, resno, &pdev->resource[resno], 1144 &pci_start, &pci_end); 1145 pci_start >>= PAGE_SHIFT; 1146 } 1147 if (start >= pci_start && start < pci_start + size && 1148 start + nr <= pci_start + size) 1149 return 1; 1150 return 0; 1151 } 1152 1153 /** 1154 * pci_mmap_resource - map a PCI resource into user memory space 1155 * @kobj: kobject for mapping 1156 * @attr: struct bin_attribute for the file being mapped 1157 * @vma: struct vm_area_struct passed into the mmap 1158 * @write_combine: 1 for write_combine mapping 1159 * 1160 * Use the regular PCI mapping routines to map a PCI resource into userspace. 1161 */ 1162 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 1163 struct vm_area_struct *vma, int write_combine) 1164 { 1165 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1166 int bar = (unsigned long)attr->private; 1167 enum pci_mmap_state mmap_type; 1168 struct resource *res = &pdev->resource[bar]; 1169 1170 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1171 return -EINVAL; 1172 1173 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) 1174 return -EINVAL; 1175 1176 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1177 1178 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1179 } 1180 1181 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 1182 struct bin_attribute *attr, 1183 struct vm_area_struct *vma) 1184 { 1185 return pci_mmap_resource(kobj, attr, vma, 0); 1186 } 1187 1188 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 1189 struct bin_attribute *attr, 1190 struct vm_area_struct *vma) 1191 { 1192 return pci_mmap_resource(kobj, attr, vma, 1); 1193 } 1194 1195 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, 1196 struct bin_attribute *attr, char *buf, 1197 loff_t off, size_t count, bool write) 1198 { 1199 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1200 int bar = (unsigned long)attr->private; 1201 unsigned long port = off; 1202 1203 port += pci_resource_start(pdev, bar); 1204 1205 if (port > pci_resource_end(pdev, bar)) 1206 return 0; 1207 1208 if (port + count - 1 > pci_resource_end(pdev, bar)) 1209 return -EINVAL; 1210 1211 switch (count) { 1212 case 1: 1213 if (write) 1214 outb(*(u8 *)buf, port); 1215 else 1216 *(u8 *)buf = inb(port); 1217 return 1; 1218 case 2: 1219 if (write) 1220 outw(*(u16 *)buf, port); 1221 else 1222 *(u16 *)buf = inw(port); 1223 return 2; 1224 case 4: 1225 if (write) 1226 outl(*(u32 *)buf, port); 1227 else 1228 *(u32 *)buf = inl(port); 1229 return 4; 1230 } 1231 return -EINVAL; 1232 } 1233 1234 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, 1235 struct bin_attribute *attr, char *buf, 1236 loff_t off, size_t count) 1237 { 1238 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1239 } 1240 1241 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, 1242 struct bin_attribute *attr, char *buf, 1243 loff_t off, size_t count) 1244 { 1245 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1246 } 1247 1248 /** 1249 * pci_remove_resource_files - cleanup resource files 1250 * @pdev: dev to cleanup 1251 * 1252 * If we created resource files for @pdev, remove them from sysfs and 1253 * free their resources. 1254 */ 1255 static void pci_remove_resource_files(struct pci_dev *pdev) 1256 { 1257 int i; 1258 1259 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1260 struct bin_attribute *res_attr; 1261 1262 res_attr = pdev->res_attr[i]; 1263 if (res_attr) { 1264 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1265 kfree(res_attr); 1266 } 1267 1268 res_attr = pdev->res_attr_wc[i]; 1269 if (res_attr) { 1270 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1271 kfree(res_attr); 1272 } 1273 } 1274 } 1275 1276 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1277 { 1278 /* allocate attribute structure, piggyback attribute name */ 1279 int name_len = write_combine ? 13 : 10; 1280 struct bin_attribute *res_attr; 1281 char *res_attr_name; 1282 int retval; 1283 1284 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1285 if (!res_attr) 1286 return -ENOMEM; 1287 1288 res_attr_name = (char *)(res_attr + 1); 1289 1290 sysfs_bin_attr_init(res_attr); 1291 if (write_combine) { 1292 pdev->res_attr_wc[num] = res_attr; 1293 sprintf(res_attr_name, "resource%d_wc", num); 1294 res_attr->mmap = pci_mmap_resource_wc; 1295 } else { 1296 pdev->res_attr[num] = res_attr; 1297 sprintf(res_attr_name, "resource%d", num); 1298 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1299 res_attr->read = pci_read_resource_io; 1300 res_attr->write = pci_write_resource_io; 1301 if (arch_can_pci_mmap_io()) 1302 res_attr->mmap = pci_mmap_resource_uc; 1303 } else { 1304 res_attr->mmap = pci_mmap_resource_uc; 1305 } 1306 } 1307 res_attr->attr.name = res_attr_name; 1308 res_attr->attr.mode = S_IRUSR | S_IWUSR; 1309 res_attr->size = pci_resource_len(pdev, num); 1310 res_attr->private = (void *)(unsigned long)num; 1311 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1312 if (retval) 1313 kfree(res_attr); 1314 1315 return retval; 1316 } 1317 1318 /** 1319 * pci_create_resource_files - create resource files in sysfs for @dev 1320 * @pdev: dev in question 1321 * 1322 * Walk the resources in @pdev creating files for each resource available. 1323 */ 1324 static int pci_create_resource_files(struct pci_dev *pdev) 1325 { 1326 int i; 1327 int retval; 1328 1329 /* Expose the PCI resources from this device as files */ 1330 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1331 1332 /* skip empty resources */ 1333 if (!pci_resource_len(pdev, i)) 1334 continue; 1335 1336 retval = pci_create_attr(pdev, i, 0); 1337 /* for prefetchable resources, create a WC mappable file */ 1338 if (!retval && arch_can_pci_mmap_wc() && 1339 pdev->resource[i].flags & IORESOURCE_PREFETCH) 1340 retval = pci_create_attr(pdev, i, 1); 1341 if (retval) { 1342 pci_remove_resource_files(pdev); 1343 return retval; 1344 } 1345 } 1346 return 0; 1347 } 1348 #else /* !HAVE_PCI_MMAP */ 1349 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1350 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1351 #endif /* HAVE_PCI_MMAP */ 1352 1353 /** 1354 * pci_write_rom - used to enable access to the PCI ROM display 1355 * @filp: sysfs file 1356 * @kobj: kernel object handle 1357 * @bin_attr: struct bin_attribute for this file 1358 * @buf: user input 1359 * @off: file offset 1360 * @count: number of byte in input 1361 * 1362 * writing anything except 0 enables it 1363 */ 1364 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, 1365 struct bin_attribute *bin_attr, char *buf, 1366 loff_t off, size_t count) 1367 { 1368 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1369 1370 if ((off == 0) && (*buf == '0') && (count == 2)) 1371 pdev->rom_attr_enabled = 0; 1372 else 1373 pdev->rom_attr_enabled = 1; 1374 1375 return count; 1376 } 1377 1378 /** 1379 * pci_read_rom - read a PCI ROM 1380 * @filp: sysfs file 1381 * @kobj: kernel object handle 1382 * @bin_attr: struct bin_attribute for this file 1383 * @buf: where to put the data we read from the ROM 1384 * @off: file offset 1385 * @count: number of bytes to read 1386 * 1387 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1388 * device corresponding to @kobj. 1389 */ 1390 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, 1391 struct bin_attribute *bin_attr, char *buf, 1392 loff_t off, size_t count) 1393 { 1394 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1395 void __iomem *rom; 1396 size_t size; 1397 1398 if (!pdev->rom_attr_enabled) 1399 return -EINVAL; 1400 1401 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1402 if (!rom || !size) 1403 return -EIO; 1404 1405 if (off >= size) 1406 count = 0; 1407 else { 1408 if (off + count > size) 1409 count = size - off; 1410 1411 memcpy_fromio(buf, rom + off, count); 1412 } 1413 pci_unmap_rom(pdev, rom); 1414 1415 return count; 1416 } 1417 1418 static const struct bin_attribute pci_config_attr = { 1419 .attr = { 1420 .name = "config", 1421 .mode = S_IRUGO | S_IWUSR, 1422 }, 1423 .size = PCI_CFG_SPACE_SIZE, 1424 .read = pci_read_config, 1425 .write = pci_write_config, 1426 }; 1427 1428 static const struct bin_attribute pcie_config_attr = { 1429 .attr = { 1430 .name = "config", 1431 .mode = S_IRUGO | S_IWUSR, 1432 }, 1433 .size = PCI_CFG_SPACE_EXP_SIZE, 1434 .read = pci_read_config, 1435 .write = pci_write_config, 1436 }; 1437 1438 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 1439 const char *buf, size_t count) 1440 { 1441 struct pci_dev *pdev = to_pci_dev(dev); 1442 unsigned long val; 1443 ssize_t result = kstrtoul(buf, 0, &val); 1444 1445 if (result < 0) 1446 return result; 1447 1448 if (val != 1) 1449 return -EINVAL; 1450 1451 pm_runtime_get_sync(dev); 1452 result = pci_reset_function(pdev); 1453 pm_runtime_put(dev); 1454 if (result < 0) 1455 return result; 1456 1457 return count; 1458 } 1459 1460 static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); 1461 1462 static int pci_create_capabilities_sysfs(struct pci_dev *dev) 1463 { 1464 int retval; 1465 1466 pcie_vpd_create_sysfs_dev_files(dev); 1467 pcie_aspm_create_sysfs_dev_files(dev); 1468 1469 if (dev->reset_fn) { 1470 retval = device_create_file(&dev->dev, &reset_attr); 1471 if (retval) 1472 goto error; 1473 } 1474 return 0; 1475 1476 error: 1477 pcie_aspm_remove_sysfs_dev_files(dev); 1478 pcie_vpd_remove_sysfs_dev_files(dev); 1479 return retval; 1480 } 1481 1482 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) 1483 { 1484 int retval; 1485 int rom_size; 1486 struct bin_attribute *attr; 1487 1488 if (!sysfs_initialized) 1489 return -EACCES; 1490 1491 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1492 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1493 else 1494 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); 1495 if (retval) 1496 goto err; 1497 1498 retval = pci_create_resource_files(pdev); 1499 if (retval) 1500 goto err_config_file; 1501 1502 /* If the device has a ROM, try to expose it in sysfs. */ 1503 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1504 if (rom_size) { 1505 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 1506 if (!attr) { 1507 retval = -ENOMEM; 1508 goto err_resource_files; 1509 } 1510 sysfs_bin_attr_init(attr); 1511 attr->size = rom_size; 1512 attr->attr.name = "rom"; 1513 attr->attr.mode = S_IRUSR | S_IWUSR; 1514 attr->read = pci_read_rom; 1515 attr->write = pci_write_rom; 1516 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); 1517 if (retval) { 1518 kfree(attr); 1519 goto err_resource_files; 1520 } 1521 pdev->rom_attr = attr; 1522 } 1523 1524 /* add sysfs entries for various capabilities */ 1525 retval = pci_create_capabilities_sysfs(pdev); 1526 if (retval) 1527 goto err_rom_file; 1528 1529 pci_create_firmware_label_files(pdev); 1530 1531 return 0; 1532 1533 err_rom_file: 1534 if (pdev->rom_attr) { 1535 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1536 kfree(pdev->rom_attr); 1537 pdev->rom_attr = NULL; 1538 } 1539 err_resource_files: 1540 pci_remove_resource_files(pdev); 1541 err_config_file: 1542 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1543 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1544 else 1545 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1546 err: 1547 return retval; 1548 } 1549 1550 static void pci_remove_capabilities_sysfs(struct pci_dev *dev) 1551 { 1552 pcie_vpd_remove_sysfs_dev_files(dev); 1553 pcie_aspm_remove_sysfs_dev_files(dev); 1554 if (dev->reset_fn) { 1555 device_remove_file(&dev->dev, &reset_attr); 1556 dev->reset_fn = 0; 1557 } 1558 } 1559 1560 /** 1561 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1562 * @pdev: device whose entries we should free 1563 * 1564 * Cleanup when @pdev is removed from sysfs. 1565 */ 1566 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1567 { 1568 if (!sysfs_initialized) 1569 return; 1570 1571 pci_remove_capabilities_sysfs(pdev); 1572 1573 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 1574 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1575 else 1576 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1577 1578 pci_remove_resource_files(pdev); 1579 1580 if (pdev->rom_attr) { 1581 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1582 kfree(pdev->rom_attr); 1583 pdev->rom_attr = NULL; 1584 } 1585 1586 pci_remove_firmware_label_files(pdev); 1587 } 1588 1589 static int __init pci_sysfs_init(void) 1590 { 1591 struct pci_dev *pdev = NULL; 1592 int retval; 1593 1594 sysfs_initialized = 1; 1595 for_each_pci_dev(pdev) { 1596 retval = pci_create_sysfs_dev_files(pdev); 1597 if (retval) { 1598 pci_dev_put(pdev); 1599 return retval; 1600 } 1601 } 1602 1603 return 0; 1604 } 1605 late_initcall(pci_sysfs_init); 1606 1607 static struct attribute *pci_dev_dev_attrs[] = { 1608 &vga_attr.attr, 1609 NULL, 1610 }; 1611 1612 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1613 struct attribute *a, int n) 1614 { 1615 struct device *dev = kobj_to_dev(kobj); 1616 struct pci_dev *pdev = to_pci_dev(dev); 1617 1618 if (a == &vga_attr.attr) 1619 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1620 return 0; 1621 1622 return a->mode; 1623 } 1624 1625 static struct attribute *pci_dev_hp_attrs[] = { 1626 &dev_remove_attr.attr, 1627 &dev_rescan_attr.attr, 1628 NULL, 1629 }; 1630 1631 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1632 struct attribute *a, int n) 1633 { 1634 struct device *dev = kobj_to_dev(kobj); 1635 struct pci_dev *pdev = to_pci_dev(dev); 1636 1637 if (pdev->is_virtfn) 1638 return 0; 1639 1640 return a->mode; 1641 } 1642 1643 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, 1644 struct attribute *a, int n) 1645 { 1646 struct device *dev = kobj_to_dev(kobj); 1647 struct pci_dev *pdev = to_pci_dev(dev); 1648 1649 if (pci_is_bridge(pdev)) 1650 return a->mode; 1651 1652 return 0; 1653 } 1654 1655 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, 1656 struct attribute *a, int n) 1657 { 1658 struct device *dev = kobj_to_dev(kobj); 1659 struct pci_dev *pdev = to_pci_dev(dev); 1660 1661 if (pci_is_pcie(pdev)) 1662 return a->mode; 1663 1664 return 0; 1665 } 1666 1667 static const struct attribute_group pci_dev_group = { 1668 .attrs = pci_dev_attrs, 1669 }; 1670 1671 const struct attribute_group *pci_dev_groups[] = { 1672 &pci_dev_group, 1673 NULL, 1674 }; 1675 1676 static const struct attribute_group pci_bridge_group = { 1677 .attrs = pci_bridge_attrs, 1678 }; 1679 1680 const struct attribute_group *pci_bridge_groups[] = { 1681 &pci_bridge_group, 1682 NULL, 1683 }; 1684 1685 static const struct attribute_group pcie_dev_group = { 1686 .attrs = pcie_dev_attrs, 1687 }; 1688 1689 const struct attribute_group *pcie_dev_groups[] = { 1690 &pcie_dev_group, 1691 NULL, 1692 }; 1693 1694 static const struct attribute_group pci_dev_hp_attr_group = { 1695 .attrs = pci_dev_hp_attrs, 1696 .is_visible = pci_dev_hp_attrs_are_visible, 1697 }; 1698 1699 #ifdef CONFIG_PCI_IOV 1700 static struct attribute *sriov_dev_attrs[] = { 1701 &sriov_totalvfs_attr.attr, 1702 &sriov_numvfs_attr.attr, 1703 &sriov_offset_attr.attr, 1704 &sriov_stride_attr.attr, 1705 &sriov_vf_device_attr.attr, 1706 &sriov_drivers_autoprobe_attr.attr, 1707 NULL, 1708 }; 1709 1710 static umode_t sriov_attrs_are_visible(struct kobject *kobj, 1711 struct attribute *a, int n) 1712 { 1713 struct device *dev = kobj_to_dev(kobj); 1714 1715 if (!dev_is_pf(dev)) 1716 return 0; 1717 1718 return a->mode; 1719 } 1720 1721 static const struct attribute_group sriov_dev_attr_group = { 1722 .attrs = sriov_dev_attrs, 1723 .is_visible = sriov_attrs_are_visible, 1724 }; 1725 #endif /* CONFIG_PCI_IOV */ 1726 1727 static const struct attribute_group pci_dev_attr_group = { 1728 .attrs = pci_dev_dev_attrs, 1729 .is_visible = pci_dev_attrs_are_visible, 1730 }; 1731 1732 static const struct attribute_group pci_bridge_attr_group = { 1733 .attrs = pci_bridge_attrs, 1734 .is_visible = pci_bridge_attrs_are_visible, 1735 }; 1736 1737 static const struct attribute_group pcie_dev_attr_group = { 1738 .attrs = pcie_dev_attrs, 1739 .is_visible = pcie_dev_attrs_are_visible, 1740 }; 1741 1742 static const struct attribute_group *pci_dev_attr_groups[] = { 1743 &pci_dev_attr_group, 1744 &pci_dev_hp_attr_group, 1745 #ifdef CONFIG_PCI_IOV 1746 &sriov_dev_attr_group, 1747 #endif 1748 &pci_bridge_attr_group, 1749 &pcie_dev_attr_group, 1750 #ifdef CONFIG_PCIEAER 1751 &aer_stats_attr_group, 1752 #endif 1753 NULL, 1754 }; 1755 1756 const struct device_type pci_dev_type = { 1757 .groups = pci_dev_attr_groups, 1758 }; 1759