1 /* 2 * drivers/pci/pci-sysfs.c 3 * 4 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 5 * (C) Copyright 2002-2004 IBM Corp. 6 * (C) Copyright 2003 Matthew Wilcox 7 * (C) Copyright 2003 Hewlett-Packard 8 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 9 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 10 * 11 * File attributes for PCI devices 12 * 13 * Modeled after usb's driverfs.c 14 * 15 */ 16 17 18 #include <linux/kernel.h> 19 #include <linux/sched.h> 20 #include <linux/pci.h> 21 #include <linux/stat.h> 22 #include <linux/export.h> 23 #include <linux/topology.h> 24 #include <linux/mm.h> 25 #include <linux/fs.h> 26 #include <linux/capability.h> 27 #include <linux/security.h> 28 #include <linux/pci-aspm.h> 29 #include <linux/slab.h> 30 #include <linux/vgaarb.h> 31 #include <linux/pm_runtime.h> 32 #include "pci.h" 33 34 static int sysfs_initialized; /* = 0 */ 35 36 /* show configuration fields */ 37 #define pci_config_attr(field, format_string) \ 38 static ssize_t \ 39 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 40 { \ 41 struct pci_dev *pdev; \ 42 \ 43 pdev = to_pci_dev (dev); \ 44 return sprintf (buf, format_string, pdev->field); \ 45 } 46 47 pci_config_attr(vendor, "0x%04x\n"); 48 pci_config_attr(device, "0x%04x\n"); 49 pci_config_attr(subsystem_vendor, "0x%04x\n"); 50 pci_config_attr(subsystem_device, "0x%04x\n"); 51 pci_config_attr(class, "0x%06x\n"); 52 pci_config_attr(irq, "%u\n"); 53 54 static ssize_t broken_parity_status_show(struct device *dev, 55 struct device_attribute *attr, 56 char *buf) 57 { 58 struct pci_dev *pdev = to_pci_dev(dev); 59 return sprintf (buf, "%u\n", pdev->broken_parity_status); 60 } 61 62 static ssize_t broken_parity_status_store(struct device *dev, 63 struct device_attribute *attr, 64 const char *buf, size_t count) 65 { 66 struct pci_dev *pdev = to_pci_dev(dev); 67 unsigned long val; 68 69 if (kstrtoul(buf, 0, &val) < 0) 70 return -EINVAL; 71 72 pdev->broken_parity_status = !!val; 73 74 return count; 75 } 76 77 static ssize_t local_cpus_show(struct device *dev, 78 struct device_attribute *attr, char *buf) 79 { 80 const struct cpumask *mask; 81 int len; 82 83 #ifdef CONFIG_NUMA 84 mask = (dev_to_node(dev) == -1) ? cpu_online_mask : 85 cpumask_of_node(dev_to_node(dev)); 86 #else 87 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 88 #endif 89 len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); 90 buf[len++] = '\n'; 91 buf[len] = '\0'; 92 return len; 93 } 94 95 96 static ssize_t local_cpulist_show(struct device *dev, 97 struct device_attribute *attr, char *buf) 98 { 99 const struct cpumask *mask; 100 int len; 101 102 #ifdef CONFIG_NUMA 103 mask = (dev_to_node(dev) == -1) ? cpu_online_mask : 104 cpumask_of_node(dev_to_node(dev)); 105 #else 106 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 107 #endif 108 len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); 109 buf[len++] = '\n'; 110 buf[len] = '\0'; 111 return len; 112 } 113 114 /* 115 * PCI Bus Class Devices 116 */ 117 static ssize_t pci_bus_show_cpuaffinity(struct device *dev, 118 int type, 119 struct device_attribute *attr, 120 char *buf) 121 { 122 int ret; 123 const struct cpumask *cpumask; 124 125 cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 126 ret = type ? 127 cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) : 128 cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); 129 buf[ret++] = '\n'; 130 buf[ret] = '\0'; 131 return ret; 132 } 133 134 static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev, 135 struct device_attribute *attr, 136 char *buf) 137 { 138 return pci_bus_show_cpuaffinity(dev, 0, attr, buf); 139 } 140 141 static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev, 142 struct device_attribute *attr, 143 char *buf) 144 { 145 return pci_bus_show_cpuaffinity(dev, 1, attr, buf); 146 } 147 148 /* show resources */ 149 static ssize_t 150 resource_show(struct device * dev, struct device_attribute *attr, char * buf) 151 { 152 struct pci_dev * pci_dev = to_pci_dev(dev); 153 char * str = buf; 154 int i; 155 int max; 156 resource_size_t start, end; 157 158 if (pci_dev->subordinate) 159 max = DEVICE_COUNT_RESOURCE; 160 else 161 max = PCI_BRIDGE_RESOURCES; 162 163 for (i = 0; i < max; i++) { 164 struct resource *res = &pci_dev->resource[i]; 165 pci_resource_to_user(pci_dev, i, res, &start, &end); 166 str += sprintf(str,"0x%016llx 0x%016llx 0x%016llx\n", 167 (unsigned long long)start, 168 (unsigned long long)end, 169 (unsigned long long)res->flags); 170 } 171 return (str - buf); 172 } 173 174 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) 175 { 176 struct pci_dev *pci_dev = to_pci_dev(dev); 177 178 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n", 179 pci_dev->vendor, pci_dev->device, 180 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 181 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 182 (u8)(pci_dev->class)); 183 } 184 185 static ssize_t is_enabled_store(struct device *dev, 186 struct device_attribute *attr, const char *buf, 187 size_t count) 188 { 189 struct pci_dev *pdev = to_pci_dev(dev); 190 unsigned long val; 191 ssize_t result = kstrtoul(buf, 0, &val); 192 193 if (result < 0) 194 return result; 195 196 /* this can crash the machine when done on the "wrong" device */ 197 if (!capable(CAP_SYS_ADMIN)) 198 return -EPERM; 199 200 if (!val) { 201 if (pci_is_enabled(pdev)) 202 pci_disable_device(pdev); 203 else 204 result = -EIO; 205 } else 206 result = pci_enable_device(pdev); 207 208 return result < 0 ? result : count; 209 } 210 211 static ssize_t is_enabled_show(struct device *dev, 212 struct device_attribute *attr, char *buf) 213 { 214 struct pci_dev *pdev; 215 216 pdev = to_pci_dev (dev); 217 return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt)); 218 } 219 220 #ifdef CONFIG_NUMA 221 static ssize_t 222 numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) 223 { 224 return sprintf (buf, "%d\n", dev->numa_node); 225 } 226 #endif 227 228 static ssize_t 229 dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf) 230 { 231 struct pci_dev *pdev = to_pci_dev(dev); 232 233 return sprintf (buf, "%d\n", fls64(pdev->dma_mask)); 234 } 235 236 static ssize_t 237 consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr, 238 char *buf) 239 { 240 return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask)); 241 } 242 243 static ssize_t 244 msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf) 245 { 246 struct pci_dev *pdev = to_pci_dev(dev); 247 248 if (!pdev->subordinate) 249 return 0; 250 251 return sprintf (buf, "%u\n", 252 !(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)); 253 } 254 255 static ssize_t 256 msi_bus_store(struct device *dev, struct device_attribute *attr, 257 const char *buf, size_t count) 258 { 259 struct pci_dev *pdev = to_pci_dev(dev); 260 unsigned long val; 261 262 if (kstrtoul(buf, 0, &val) < 0) 263 return -EINVAL; 264 265 /* bad things may happen if the no_msi flag is changed 266 * while some drivers are loaded */ 267 if (!capable(CAP_SYS_ADMIN)) 268 return -EPERM; 269 270 /* Maybe pci devices without subordinate busses shouldn't even have this 271 * attribute in the first place? */ 272 if (!pdev->subordinate) 273 return count; 274 275 /* Is the flag going to change, or keep the value it already had? */ 276 if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^ 277 !!val) { 278 pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI; 279 280 dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI," 281 " bad things could happen\n", val ? "" : " not"); 282 } 283 284 return count; 285 } 286 287 static DEFINE_MUTEX(pci_remove_rescan_mutex); 288 static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, 289 size_t count) 290 { 291 unsigned long val; 292 struct pci_bus *b = NULL; 293 294 if (kstrtoul(buf, 0, &val) < 0) 295 return -EINVAL; 296 297 if (val) { 298 mutex_lock(&pci_remove_rescan_mutex); 299 while ((b = pci_find_next_bus(b)) != NULL) 300 pci_rescan_bus(b); 301 mutex_unlock(&pci_remove_rescan_mutex); 302 } 303 return count; 304 } 305 306 struct bus_attribute pci_bus_attrs[] = { 307 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store), 308 __ATTR_NULL 309 }; 310 311 static ssize_t 312 dev_rescan_store(struct device *dev, struct device_attribute *attr, 313 const char *buf, size_t count) 314 { 315 unsigned long val; 316 struct pci_dev *pdev = to_pci_dev(dev); 317 318 if (kstrtoul(buf, 0, &val) < 0) 319 return -EINVAL; 320 321 if (val) { 322 mutex_lock(&pci_remove_rescan_mutex); 323 pci_rescan_bus(pdev->bus); 324 mutex_unlock(&pci_remove_rescan_mutex); 325 } 326 return count; 327 } 328 struct device_attribute dev_rescan_attr = __ATTR(rescan, (S_IWUSR|S_IWGRP), 329 NULL, dev_rescan_store); 330 331 static void remove_callback(struct device *dev) 332 { 333 struct pci_dev *pdev = to_pci_dev(dev); 334 335 mutex_lock(&pci_remove_rescan_mutex); 336 pci_stop_and_remove_bus_device(pdev); 337 mutex_unlock(&pci_remove_rescan_mutex); 338 } 339 340 static ssize_t 341 remove_store(struct device *dev, struct device_attribute *dummy, 342 const char *buf, size_t count) 343 { 344 int ret = 0; 345 unsigned long val; 346 347 if (kstrtoul(buf, 0, &val) < 0) 348 return -EINVAL; 349 350 /* An attribute cannot be unregistered by one of its own methods, 351 * so we have to use this roundabout approach. 352 */ 353 if (val) 354 ret = device_schedule_callback(dev, remove_callback); 355 if (ret) 356 count = ret; 357 return count; 358 } 359 struct device_attribute dev_remove_attr = __ATTR(remove, (S_IWUSR|S_IWGRP), 360 NULL, remove_store); 361 362 static ssize_t 363 dev_bus_rescan_store(struct device *dev, struct device_attribute *attr, 364 const char *buf, size_t count) 365 { 366 unsigned long val; 367 struct pci_bus *bus = to_pci_bus(dev); 368 369 if (kstrtoul(buf, 0, &val) < 0) 370 return -EINVAL; 371 372 if (val) { 373 mutex_lock(&pci_remove_rescan_mutex); 374 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 375 pci_rescan_bus_bridge_resize(bus->self); 376 else 377 pci_rescan_bus(bus); 378 mutex_unlock(&pci_remove_rescan_mutex); 379 } 380 return count; 381 } 382 383 #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) 384 static ssize_t d3cold_allowed_store(struct device *dev, 385 struct device_attribute *attr, 386 const char *buf, size_t count) 387 { 388 struct pci_dev *pdev = to_pci_dev(dev); 389 unsigned long val; 390 391 if (kstrtoul(buf, 0, &val) < 0) 392 return -EINVAL; 393 394 pdev->d3cold_allowed = !!val; 395 pm_runtime_resume(dev); 396 397 return count; 398 } 399 400 static ssize_t d3cold_allowed_show(struct device *dev, 401 struct device_attribute *attr, char *buf) 402 { 403 struct pci_dev *pdev = to_pci_dev(dev); 404 return sprintf (buf, "%u\n", pdev->d3cold_allowed); 405 } 406 #endif 407 408 #ifdef CONFIG_PCI_IOV 409 static ssize_t sriov_totalvfs_show(struct device *dev, 410 struct device_attribute *attr, 411 char *buf) 412 { 413 struct pci_dev *pdev = to_pci_dev(dev); 414 415 return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); 416 } 417 418 419 static ssize_t sriov_numvfs_show(struct device *dev, 420 struct device_attribute *attr, 421 char *buf) 422 { 423 struct pci_dev *pdev = to_pci_dev(dev); 424 425 return sprintf(buf, "%u\n", pdev->sriov->num_VFs); 426 } 427 428 /* 429 * num_vfs > 0; number of VFs to enable 430 * num_vfs = 0; disable all VFs 431 * 432 * Note: SRIOV spec doesn't allow partial VF 433 * disable, so it's all or none. 434 */ 435 static ssize_t sriov_numvfs_store(struct device *dev, 436 struct device_attribute *attr, 437 const char *buf, size_t count) 438 { 439 struct pci_dev *pdev = to_pci_dev(dev); 440 int ret; 441 u16 num_vfs; 442 443 ret = kstrtou16(buf, 0, &num_vfs); 444 if (ret < 0) 445 return ret; 446 447 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 448 return -ERANGE; 449 450 if (num_vfs == pdev->sriov->num_VFs) 451 return count; /* no change */ 452 453 /* is PF driver loaded w/callback */ 454 if (!pdev->driver || !pdev->driver->sriov_configure) { 455 dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n"); 456 return -ENOSYS; 457 } 458 459 if (num_vfs == 0) { 460 /* disable VFs */ 461 ret = pdev->driver->sriov_configure(pdev, 0); 462 if (ret < 0) 463 return ret; 464 return count; 465 } 466 467 /* enable VFs */ 468 if (pdev->sriov->num_VFs) { 469 dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n", 470 pdev->sriov->num_VFs, num_vfs); 471 return -EBUSY; 472 } 473 474 ret = pdev->driver->sriov_configure(pdev, num_vfs); 475 if (ret < 0) 476 return ret; 477 478 if (ret != num_vfs) 479 dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n", 480 num_vfs, ret); 481 482 return count; 483 } 484 485 static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); 486 static struct device_attribute sriov_numvfs_attr = 487 __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), 488 sriov_numvfs_show, sriov_numvfs_store); 489 #endif /* CONFIG_PCI_IOV */ 490 491 struct device_attribute pci_dev_attrs[] = { 492 __ATTR_RO(resource), 493 __ATTR_RO(vendor), 494 __ATTR_RO(device), 495 __ATTR_RO(subsystem_vendor), 496 __ATTR_RO(subsystem_device), 497 __ATTR_RO(class), 498 __ATTR_RO(irq), 499 __ATTR_RO(local_cpus), 500 __ATTR_RO(local_cpulist), 501 __ATTR_RO(modalias), 502 #ifdef CONFIG_NUMA 503 __ATTR_RO(numa_node), 504 #endif 505 __ATTR_RO(dma_mask_bits), 506 __ATTR_RO(consistent_dma_mask_bits), 507 __ATTR(enable, 0600, is_enabled_show, is_enabled_store), 508 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), 509 broken_parity_status_show,broken_parity_status_store), 510 __ATTR(msi_bus, 0644, msi_bus_show, msi_bus_store), 511 #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) 512 __ATTR(d3cold_allowed, 0644, d3cold_allowed_show, d3cold_allowed_store), 513 #endif 514 __ATTR_NULL, 515 }; 516 517 struct device_attribute pcibus_dev_attrs[] = { 518 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store), 519 __ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL), 520 __ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL), 521 __ATTR_NULL, 522 }; 523 524 static ssize_t 525 boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) 526 { 527 struct pci_dev *pdev = to_pci_dev(dev); 528 struct pci_dev *vga_dev = vga_default_device(); 529 530 if (vga_dev) 531 return sprintf(buf, "%u\n", (pdev == vga_dev)); 532 533 return sprintf(buf, "%u\n", 534 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 535 IORESOURCE_ROM_SHADOW)); 536 } 537 struct device_attribute vga_attr = __ATTR_RO(boot_vga); 538 539 static ssize_t 540 pci_read_config(struct file *filp, struct kobject *kobj, 541 struct bin_attribute *bin_attr, 542 char *buf, loff_t off, size_t count) 543 { 544 struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); 545 unsigned int size = 64; 546 loff_t init_off = off; 547 u8 *data = (u8*) buf; 548 549 /* Several chips lock up trying to read undefined config space */ 550 if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0) { 551 size = dev->cfg_size; 552 } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { 553 size = 128; 554 } 555 556 if (off > size) 557 return 0; 558 if (off + count > size) { 559 size -= off; 560 count = size; 561 } else { 562 size = count; 563 } 564 565 pci_config_pm_runtime_get(dev); 566 567 if ((off & 1) && size) { 568 u8 val; 569 pci_user_read_config_byte(dev, off, &val); 570 data[off - init_off] = val; 571 off++; 572 size--; 573 } 574 575 if ((off & 3) && size > 2) { 576 u16 val; 577 pci_user_read_config_word(dev, off, &val); 578 data[off - init_off] = val & 0xff; 579 data[off - init_off + 1] = (val >> 8) & 0xff; 580 off += 2; 581 size -= 2; 582 } 583 584 while (size > 3) { 585 u32 val; 586 pci_user_read_config_dword(dev, off, &val); 587 data[off - init_off] = val & 0xff; 588 data[off - init_off + 1] = (val >> 8) & 0xff; 589 data[off - init_off + 2] = (val >> 16) & 0xff; 590 data[off - init_off + 3] = (val >> 24) & 0xff; 591 off += 4; 592 size -= 4; 593 } 594 595 if (size >= 2) { 596 u16 val; 597 pci_user_read_config_word(dev, off, &val); 598 data[off - init_off] = val & 0xff; 599 data[off - init_off + 1] = (val >> 8) & 0xff; 600 off += 2; 601 size -= 2; 602 } 603 604 if (size > 0) { 605 u8 val; 606 pci_user_read_config_byte(dev, off, &val); 607 data[off - init_off] = val; 608 off++; 609 --size; 610 } 611 612 pci_config_pm_runtime_put(dev); 613 614 return count; 615 } 616 617 static ssize_t 618 pci_write_config(struct file* filp, struct kobject *kobj, 619 struct bin_attribute *bin_attr, 620 char *buf, loff_t off, size_t count) 621 { 622 struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); 623 unsigned int size = count; 624 loff_t init_off = off; 625 u8 *data = (u8*) buf; 626 627 if (off > dev->cfg_size) 628 return 0; 629 if (off + count > dev->cfg_size) { 630 size = dev->cfg_size - off; 631 count = size; 632 } 633 634 pci_config_pm_runtime_get(dev); 635 636 if ((off & 1) && size) { 637 pci_user_write_config_byte(dev, off, data[off - init_off]); 638 off++; 639 size--; 640 } 641 642 if ((off & 3) && size > 2) { 643 u16 val = data[off - init_off]; 644 val |= (u16) data[off - init_off + 1] << 8; 645 pci_user_write_config_word(dev, off, val); 646 off += 2; 647 size -= 2; 648 } 649 650 while (size > 3) { 651 u32 val = data[off - init_off]; 652 val |= (u32) data[off - init_off + 1] << 8; 653 val |= (u32) data[off - init_off + 2] << 16; 654 val |= (u32) data[off - init_off + 3] << 24; 655 pci_user_write_config_dword(dev, off, val); 656 off += 4; 657 size -= 4; 658 } 659 660 if (size >= 2) { 661 u16 val = data[off - init_off]; 662 val |= (u16) data[off - init_off + 1] << 8; 663 pci_user_write_config_word(dev, off, val); 664 off += 2; 665 size -= 2; 666 } 667 668 if (size) { 669 pci_user_write_config_byte(dev, off, data[off - init_off]); 670 off++; 671 --size; 672 } 673 674 pci_config_pm_runtime_put(dev); 675 676 return count; 677 } 678 679 static ssize_t 680 read_vpd_attr(struct file *filp, struct kobject *kobj, 681 struct bin_attribute *bin_attr, 682 char *buf, loff_t off, size_t count) 683 { 684 struct pci_dev *dev = 685 to_pci_dev(container_of(kobj, struct device, kobj)); 686 687 if (off > bin_attr->size) 688 count = 0; 689 else if (count > bin_attr->size - off) 690 count = bin_attr->size - off; 691 692 return pci_read_vpd(dev, off, count, buf); 693 } 694 695 static ssize_t 696 write_vpd_attr(struct file *filp, struct kobject *kobj, 697 struct bin_attribute *bin_attr, 698 char *buf, loff_t off, size_t count) 699 { 700 struct pci_dev *dev = 701 to_pci_dev(container_of(kobj, struct device, kobj)); 702 703 if (off > bin_attr->size) 704 count = 0; 705 else if (count > bin_attr->size - off) 706 count = bin_attr->size - off; 707 708 return pci_write_vpd(dev, off, count, buf); 709 } 710 711 #ifdef HAVE_PCI_LEGACY 712 /** 713 * pci_read_legacy_io - read byte(s) from legacy I/O port space 714 * @filp: open sysfs file 715 * @kobj: kobject corresponding to file to read from 716 * @bin_attr: struct bin_attribute for this file 717 * @buf: buffer to store results 718 * @off: offset into legacy I/O port space 719 * @count: number of bytes to read 720 * 721 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 722 * callback routine (pci_legacy_read). 723 */ 724 static ssize_t 725 pci_read_legacy_io(struct file *filp, struct kobject *kobj, 726 struct bin_attribute *bin_attr, 727 char *buf, loff_t off, size_t count) 728 { 729 struct pci_bus *bus = to_pci_bus(container_of(kobj, 730 struct device, 731 kobj)); 732 733 /* Only support 1, 2 or 4 byte accesses */ 734 if (count != 1 && count != 2 && count != 4) 735 return -EINVAL; 736 737 return pci_legacy_read(bus, off, (u32 *)buf, count); 738 } 739 740 /** 741 * pci_write_legacy_io - write byte(s) to legacy I/O port space 742 * @filp: open sysfs file 743 * @kobj: kobject corresponding to file to read from 744 * @bin_attr: struct bin_attribute for this file 745 * @buf: buffer containing value to be written 746 * @off: offset into legacy I/O port space 747 * @count: number of bytes to write 748 * 749 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 750 * callback routine (pci_legacy_write). 751 */ 752 static ssize_t 753 pci_write_legacy_io(struct file *filp, struct kobject *kobj, 754 struct bin_attribute *bin_attr, 755 char *buf, loff_t off, size_t count) 756 { 757 struct pci_bus *bus = to_pci_bus(container_of(kobj, 758 struct device, 759 kobj)); 760 /* Only support 1, 2 or 4 byte accesses */ 761 if (count != 1 && count != 2 && count != 4) 762 return -EINVAL; 763 764 return pci_legacy_write(bus, off, *(u32 *)buf, count); 765 } 766 767 /** 768 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 769 * @filp: open sysfs file 770 * @kobj: kobject corresponding to device to be mapped 771 * @attr: struct bin_attribute for this file 772 * @vma: struct vm_area_struct passed to mmap 773 * 774 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 775 * legacy memory space (first meg of bus space) into application virtual 776 * memory space. 777 */ 778 static int 779 pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 780 struct bin_attribute *attr, 781 struct vm_area_struct *vma) 782 { 783 struct pci_bus *bus = to_pci_bus(container_of(kobj, 784 struct device, 785 kobj)); 786 787 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 788 } 789 790 /** 791 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 792 * @filp: open sysfs file 793 * @kobj: kobject corresponding to device to be mapped 794 * @attr: struct bin_attribute for this file 795 * @vma: struct vm_area_struct passed to mmap 796 * 797 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 798 * legacy IO space (first meg of bus space) into application virtual 799 * memory space. Returns -ENOSYS if the operation isn't supported 800 */ 801 static int 802 pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 803 struct bin_attribute *attr, 804 struct vm_area_struct *vma) 805 { 806 struct pci_bus *bus = to_pci_bus(container_of(kobj, 807 struct device, 808 kobj)); 809 810 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 811 } 812 813 /** 814 * pci_adjust_legacy_attr - adjustment of legacy file attributes 815 * @b: bus to create files under 816 * @mmap_type: I/O port or memory 817 * 818 * Stub implementation. Can be overridden by arch if necessary. 819 */ 820 void __weak 821 pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type) 822 { 823 return; 824 } 825 826 /** 827 * pci_create_legacy_files - create legacy I/O port and memory files 828 * @b: bus to create files under 829 * 830 * Some platforms allow access to legacy I/O port and ISA memory space on 831 * a per-bus basis. This routine creates the files and ties them into 832 * their associated read, write and mmap files from pci-sysfs.c 833 * 834 * On error unwind, but don't propagate the error to the caller 835 * as it is ok to set up the PCI bus without these files. 836 */ 837 void pci_create_legacy_files(struct pci_bus *b) 838 { 839 int error; 840 841 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, 842 GFP_ATOMIC); 843 if (!b->legacy_io) 844 goto kzalloc_err; 845 846 sysfs_bin_attr_init(b->legacy_io); 847 b->legacy_io->attr.name = "legacy_io"; 848 b->legacy_io->size = 0xffff; 849 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 850 b->legacy_io->read = pci_read_legacy_io; 851 b->legacy_io->write = pci_write_legacy_io; 852 b->legacy_io->mmap = pci_mmap_legacy_io; 853 pci_adjust_legacy_attr(b, pci_mmap_io); 854 error = device_create_bin_file(&b->dev, b->legacy_io); 855 if (error) 856 goto legacy_io_err; 857 858 /* Allocated above after the legacy_io struct */ 859 b->legacy_mem = b->legacy_io + 1; 860 sysfs_bin_attr_init(b->legacy_mem); 861 b->legacy_mem->attr.name = "legacy_mem"; 862 b->legacy_mem->size = 1024*1024; 863 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 864 b->legacy_mem->mmap = pci_mmap_legacy_mem; 865 pci_adjust_legacy_attr(b, pci_mmap_mem); 866 error = device_create_bin_file(&b->dev, b->legacy_mem); 867 if (error) 868 goto legacy_mem_err; 869 870 return; 871 872 legacy_mem_err: 873 device_remove_bin_file(&b->dev, b->legacy_io); 874 legacy_io_err: 875 kfree(b->legacy_io); 876 b->legacy_io = NULL; 877 kzalloc_err: 878 printk(KERN_WARNING "pci: warning: could not create legacy I/O port " 879 "and ISA memory resources to sysfs\n"); 880 return; 881 } 882 883 void pci_remove_legacy_files(struct pci_bus *b) 884 { 885 if (b->legacy_io) { 886 device_remove_bin_file(&b->dev, b->legacy_io); 887 device_remove_bin_file(&b->dev, b->legacy_mem); 888 kfree(b->legacy_io); /* both are allocated here */ 889 } 890 } 891 #endif /* HAVE_PCI_LEGACY */ 892 893 #ifdef HAVE_PCI_MMAP 894 895 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 896 enum pci_mmap_api mmap_api) 897 { 898 unsigned long nr, start, size, pci_start; 899 900 if (pci_resource_len(pdev, resno) == 0) 901 return 0; 902 nr = vma_pages(vma); 903 start = vma->vm_pgoff; 904 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 905 pci_start = (mmap_api == PCI_MMAP_PROCFS) ? 906 pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; 907 if (start >= pci_start && start < pci_start + size && 908 start + nr <= pci_start + size) 909 return 1; 910 return 0; 911 } 912 913 /** 914 * pci_mmap_resource - map a PCI resource into user memory space 915 * @kobj: kobject for mapping 916 * @attr: struct bin_attribute for the file being mapped 917 * @vma: struct vm_area_struct passed into the mmap 918 * @write_combine: 1 for write_combine mapping 919 * 920 * Use the regular PCI mapping routines to map a PCI resource into userspace. 921 */ 922 static int 923 pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 924 struct vm_area_struct *vma, int write_combine) 925 { 926 struct pci_dev *pdev = to_pci_dev(container_of(kobj, 927 struct device, kobj)); 928 struct resource *res = attr->private; 929 enum pci_mmap_state mmap_type; 930 resource_size_t start, end; 931 int i; 932 933 for (i = 0; i < PCI_ROM_RESOURCE; i++) 934 if (res == &pdev->resource[i]) 935 break; 936 if (i >= PCI_ROM_RESOURCE) 937 return -ENODEV; 938 939 if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { 940 WARN(1, "process \"%s\" tried to map 0x%08lx bytes " 941 "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", 942 current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, 943 pci_name(pdev), i, 944 (u64)pci_resource_start(pdev, i), 945 (u64)pci_resource_len(pdev, i)); 946 return -EINVAL; 947 } 948 949 /* pci_mmap_page_range() expects the same kind of entry as coming 950 * from /proc/bus/pci/ which is a "user visible" value. If this is 951 * different from the resource itself, arch will do necessary fixup. 952 */ 953 pci_resource_to_user(pdev, i, res, &start, &end); 954 vma->vm_pgoff += start >> PAGE_SHIFT; 955 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 956 957 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start)) 958 return -EINVAL; 959 960 return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); 961 } 962 963 static int 964 pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 965 struct bin_attribute *attr, 966 struct vm_area_struct *vma) 967 { 968 return pci_mmap_resource(kobj, attr, vma, 0); 969 } 970 971 static int 972 pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 973 struct bin_attribute *attr, 974 struct vm_area_struct *vma) 975 { 976 return pci_mmap_resource(kobj, attr, vma, 1); 977 } 978 979 static ssize_t 980 pci_resource_io(struct file *filp, struct kobject *kobj, 981 struct bin_attribute *attr, char *buf, 982 loff_t off, size_t count, bool write) 983 { 984 struct pci_dev *pdev = to_pci_dev(container_of(kobj, 985 struct device, kobj)); 986 struct resource *res = attr->private; 987 unsigned long port = off; 988 int i; 989 990 for (i = 0; i < PCI_ROM_RESOURCE; i++) 991 if (res == &pdev->resource[i]) 992 break; 993 if (i >= PCI_ROM_RESOURCE) 994 return -ENODEV; 995 996 port += pci_resource_start(pdev, i); 997 998 if (port > pci_resource_end(pdev, i)) 999 return 0; 1000 1001 if (port + count - 1 > pci_resource_end(pdev, i)) 1002 return -EINVAL; 1003 1004 switch (count) { 1005 case 1: 1006 if (write) 1007 outb(*(u8 *)buf, port); 1008 else 1009 *(u8 *)buf = inb(port); 1010 return 1; 1011 case 2: 1012 if (write) 1013 outw(*(u16 *)buf, port); 1014 else 1015 *(u16 *)buf = inw(port); 1016 return 2; 1017 case 4: 1018 if (write) 1019 outl(*(u32 *)buf, port); 1020 else 1021 *(u32 *)buf = inl(port); 1022 return 4; 1023 } 1024 return -EINVAL; 1025 } 1026 1027 static ssize_t 1028 pci_read_resource_io(struct file *filp, struct kobject *kobj, 1029 struct bin_attribute *attr, char *buf, 1030 loff_t off, size_t count) 1031 { 1032 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1033 } 1034 1035 static ssize_t 1036 pci_write_resource_io(struct file *filp, struct kobject *kobj, 1037 struct bin_attribute *attr, char *buf, 1038 loff_t off, size_t count) 1039 { 1040 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1041 } 1042 1043 /** 1044 * pci_remove_resource_files - cleanup resource files 1045 * @pdev: dev to cleanup 1046 * 1047 * If we created resource files for @pdev, remove them from sysfs and 1048 * free their resources. 1049 */ 1050 static void 1051 pci_remove_resource_files(struct pci_dev *pdev) 1052 { 1053 int i; 1054 1055 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1056 struct bin_attribute *res_attr; 1057 1058 res_attr = pdev->res_attr[i]; 1059 if (res_attr) { 1060 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1061 kfree(res_attr); 1062 } 1063 1064 res_attr = pdev->res_attr_wc[i]; 1065 if (res_attr) { 1066 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1067 kfree(res_attr); 1068 } 1069 } 1070 } 1071 1072 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1073 { 1074 /* allocate attribute structure, piggyback attribute name */ 1075 int name_len = write_combine ? 13 : 10; 1076 struct bin_attribute *res_attr; 1077 int retval; 1078 1079 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1080 if (res_attr) { 1081 char *res_attr_name = (char *)(res_attr + 1); 1082 1083 sysfs_bin_attr_init(res_attr); 1084 if (write_combine) { 1085 pdev->res_attr_wc[num] = res_attr; 1086 sprintf(res_attr_name, "resource%d_wc", num); 1087 res_attr->mmap = pci_mmap_resource_wc; 1088 } else { 1089 pdev->res_attr[num] = res_attr; 1090 sprintf(res_attr_name, "resource%d", num); 1091 res_attr->mmap = pci_mmap_resource_uc; 1092 } 1093 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1094 res_attr->read = pci_read_resource_io; 1095 res_attr->write = pci_write_resource_io; 1096 } 1097 res_attr->attr.name = res_attr_name; 1098 res_attr->attr.mode = S_IRUSR | S_IWUSR; 1099 res_attr->size = pci_resource_len(pdev, num); 1100 res_attr->private = &pdev->resource[num]; 1101 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1102 } else 1103 retval = -ENOMEM; 1104 1105 return retval; 1106 } 1107 1108 /** 1109 * pci_create_resource_files - create resource files in sysfs for @dev 1110 * @pdev: dev in question 1111 * 1112 * Walk the resources in @pdev creating files for each resource available. 1113 */ 1114 static int pci_create_resource_files(struct pci_dev *pdev) 1115 { 1116 int i; 1117 int retval; 1118 1119 /* Expose the PCI resources from this device as files */ 1120 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 1121 1122 /* skip empty resources */ 1123 if (!pci_resource_len(pdev, i)) 1124 continue; 1125 1126 retval = pci_create_attr(pdev, i, 0); 1127 /* for prefetchable resources, create a WC mappable file */ 1128 if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH) 1129 retval = pci_create_attr(pdev, i, 1); 1130 1131 if (retval) { 1132 pci_remove_resource_files(pdev); 1133 return retval; 1134 } 1135 } 1136 return 0; 1137 } 1138 #else /* !HAVE_PCI_MMAP */ 1139 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1140 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1141 #endif /* HAVE_PCI_MMAP */ 1142 1143 /** 1144 * pci_write_rom - used to enable access to the PCI ROM display 1145 * @filp: sysfs file 1146 * @kobj: kernel object handle 1147 * @bin_attr: struct bin_attribute for this file 1148 * @buf: user input 1149 * @off: file offset 1150 * @count: number of byte in input 1151 * 1152 * writing anything except 0 enables it 1153 */ 1154 static ssize_t 1155 pci_write_rom(struct file *filp, struct kobject *kobj, 1156 struct bin_attribute *bin_attr, 1157 char *buf, loff_t off, size_t count) 1158 { 1159 struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj)); 1160 1161 if ((off == 0) && (*buf == '0') && (count == 2)) 1162 pdev->rom_attr_enabled = 0; 1163 else 1164 pdev->rom_attr_enabled = 1; 1165 1166 return count; 1167 } 1168 1169 /** 1170 * pci_read_rom - read a PCI ROM 1171 * @filp: sysfs file 1172 * @kobj: kernel object handle 1173 * @bin_attr: struct bin_attribute for this file 1174 * @buf: where to put the data we read from the ROM 1175 * @off: file offset 1176 * @count: number of bytes to read 1177 * 1178 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1179 * device corresponding to @kobj. 1180 */ 1181 static ssize_t 1182 pci_read_rom(struct file *filp, struct kobject *kobj, 1183 struct bin_attribute *bin_attr, 1184 char *buf, loff_t off, size_t count) 1185 { 1186 struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj)); 1187 void __iomem *rom; 1188 size_t size; 1189 1190 if (!pdev->rom_attr_enabled) 1191 return -EINVAL; 1192 1193 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1194 if (!rom || !size) 1195 return -EIO; 1196 1197 if (off >= size) 1198 count = 0; 1199 else { 1200 if (off + count > size) 1201 count = size - off; 1202 1203 memcpy_fromio(buf, rom + off, count); 1204 } 1205 pci_unmap_rom(pdev, rom); 1206 1207 return count; 1208 } 1209 1210 static struct bin_attribute pci_config_attr = { 1211 .attr = { 1212 .name = "config", 1213 .mode = S_IRUGO | S_IWUSR, 1214 }, 1215 .size = PCI_CFG_SPACE_SIZE, 1216 .read = pci_read_config, 1217 .write = pci_write_config, 1218 }; 1219 1220 static struct bin_attribute pcie_config_attr = { 1221 .attr = { 1222 .name = "config", 1223 .mode = S_IRUGO | S_IWUSR, 1224 }, 1225 .size = PCI_CFG_SPACE_EXP_SIZE, 1226 .read = pci_read_config, 1227 .write = pci_write_config, 1228 }; 1229 1230 int __weak pcibios_add_platform_entries(struct pci_dev *dev) 1231 { 1232 return 0; 1233 } 1234 1235 static ssize_t reset_store(struct device *dev, 1236 struct device_attribute *attr, const char *buf, 1237 size_t count) 1238 { 1239 struct pci_dev *pdev = to_pci_dev(dev); 1240 unsigned long val; 1241 ssize_t result = kstrtoul(buf, 0, &val); 1242 1243 if (result < 0) 1244 return result; 1245 1246 if (val != 1) 1247 return -EINVAL; 1248 1249 result = pci_reset_function(pdev); 1250 if (result < 0) 1251 return result; 1252 1253 return count; 1254 } 1255 1256 static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store); 1257 1258 static int pci_create_capabilities_sysfs(struct pci_dev *dev) 1259 { 1260 int retval; 1261 struct bin_attribute *attr; 1262 1263 /* If the device has VPD, try to expose it in sysfs. */ 1264 if (dev->vpd) { 1265 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 1266 if (!attr) 1267 return -ENOMEM; 1268 1269 sysfs_bin_attr_init(attr); 1270 attr->size = dev->vpd->len; 1271 attr->attr.name = "vpd"; 1272 attr->attr.mode = S_IRUSR | S_IWUSR; 1273 attr->read = read_vpd_attr; 1274 attr->write = write_vpd_attr; 1275 retval = sysfs_create_bin_file(&dev->dev.kobj, attr); 1276 if (retval) { 1277 kfree(attr); 1278 return retval; 1279 } 1280 dev->vpd->attr = attr; 1281 } 1282 1283 /* Active State Power Management */ 1284 pcie_aspm_create_sysfs_dev_files(dev); 1285 1286 if (!pci_probe_reset_function(dev)) { 1287 retval = device_create_file(&dev->dev, &reset_attr); 1288 if (retval) 1289 goto error; 1290 dev->reset_fn = 1; 1291 } 1292 return 0; 1293 1294 error: 1295 pcie_aspm_remove_sysfs_dev_files(dev); 1296 if (dev->vpd && dev->vpd->attr) { 1297 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); 1298 kfree(dev->vpd->attr); 1299 } 1300 1301 return retval; 1302 } 1303 1304 int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) 1305 { 1306 int retval; 1307 int rom_size = 0; 1308 struct bin_attribute *attr; 1309 1310 if (!sysfs_initialized) 1311 return -EACCES; 1312 1313 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) 1314 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); 1315 else 1316 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1317 if (retval) 1318 goto err; 1319 1320 retval = pci_create_resource_files(pdev); 1321 if (retval) 1322 goto err_config_file; 1323 1324 if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) 1325 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1326 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) 1327 rom_size = 0x20000; 1328 1329 /* If the device has a ROM, try to expose it in sysfs. */ 1330 if (rom_size) { 1331 attr = kzalloc(sizeof(*attr), GFP_ATOMIC); 1332 if (!attr) { 1333 retval = -ENOMEM; 1334 goto err_resource_files; 1335 } 1336 sysfs_bin_attr_init(attr); 1337 attr->size = rom_size; 1338 attr->attr.name = "rom"; 1339 attr->attr.mode = S_IRUSR | S_IWUSR; 1340 attr->read = pci_read_rom; 1341 attr->write = pci_write_rom; 1342 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); 1343 if (retval) { 1344 kfree(attr); 1345 goto err_resource_files; 1346 } 1347 pdev->rom_attr = attr; 1348 } 1349 1350 /* add platform-specific attributes */ 1351 retval = pcibios_add_platform_entries(pdev); 1352 if (retval) 1353 goto err_rom_file; 1354 1355 /* add sysfs entries for various capabilities */ 1356 retval = pci_create_capabilities_sysfs(pdev); 1357 if (retval) 1358 goto err_rom_file; 1359 1360 pci_create_firmware_label_files(pdev); 1361 1362 return 0; 1363 1364 err_rom_file: 1365 if (rom_size) { 1366 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1367 kfree(pdev->rom_attr); 1368 pdev->rom_attr = NULL; 1369 } 1370 err_resource_files: 1371 pci_remove_resource_files(pdev); 1372 err_config_file: 1373 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) 1374 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1375 else 1376 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1377 err: 1378 return retval; 1379 } 1380 1381 static void pci_remove_capabilities_sysfs(struct pci_dev *dev) 1382 { 1383 if (dev->vpd && dev->vpd->attr) { 1384 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); 1385 kfree(dev->vpd->attr); 1386 } 1387 1388 pcie_aspm_remove_sysfs_dev_files(dev); 1389 if (dev->reset_fn) { 1390 device_remove_file(&dev->dev, &reset_attr); 1391 dev->reset_fn = 0; 1392 } 1393 } 1394 1395 /** 1396 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1397 * @pdev: device whose entries we should free 1398 * 1399 * Cleanup when @pdev is removed from sysfs. 1400 */ 1401 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1402 { 1403 int rom_size = 0; 1404 1405 if (!sysfs_initialized) 1406 return; 1407 1408 pci_remove_capabilities_sysfs(pdev); 1409 1410 if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) 1411 sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); 1412 else 1413 sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); 1414 1415 pci_remove_resource_files(pdev); 1416 1417 if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) 1418 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1419 else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) 1420 rom_size = 0x20000; 1421 1422 if (rom_size && pdev->rom_attr) { 1423 sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); 1424 kfree(pdev->rom_attr); 1425 } 1426 1427 pci_remove_firmware_label_files(pdev); 1428 1429 } 1430 1431 static int __init pci_sysfs_init(void) 1432 { 1433 struct pci_dev *pdev = NULL; 1434 int retval; 1435 1436 sysfs_initialized = 1; 1437 for_each_pci_dev(pdev) { 1438 retval = pci_create_sysfs_dev_files(pdev); 1439 if (retval) { 1440 pci_dev_put(pdev); 1441 return retval; 1442 } 1443 } 1444 1445 return 0; 1446 } 1447 1448 late_initcall(pci_sysfs_init); 1449 1450 static struct attribute *pci_dev_dev_attrs[] = { 1451 &vga_attr.attr, 1452 NULL, 1453 }; 1454 1455 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1456 struct attribute *a, int n) 1457 { 1458 struct device *dev = container_of(kobj, struct device, kobj); 1459 struct pci_dev *pdev = to_pci_dev(dev); 1460 1461 if (a == &vga_attr.attr) 1462 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1463 return 0; 1464 1465 return a->mode; 1466 } 1467 1468 static struct attribute *pci_dev_hp_attrs[] = { 1469 &dev_remove_attr.attr, 1470 &dev_rescan_attr.attr, 1471 NULL, 1472 }; 1473 1474 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1475 struct attribute *a, int n) 1476 { 1477 struct device *dev = container_of(kobj, struct device, kobj); 1478 struct pci_dev *pdev = to_pci_dev(dev); 1479 1480 if (pdev->is_virtfn) 1481 return 0; 1482 1483 return a->mode; 1484 } 1485 1486 static struct attribute_group pci_dev_hp_attr_group = { 1487 .attrs = pci_dev_hp_attrs, 1488 .is_visible = pci_dev_hp_attrs_are_visible, 1489 }; 1490 1491 #ifdef CONFIG_PCI_IOV 1492 static struct attribute *sriov_dev_attrs[] = { 1493 &sriov_totalvfs_attr.attr, 1494 &sriov_numvfs_attr.attr, 1495 NULL, 1496 }; 1497 1498 static umode_t sriov_attrs_are_visible(struct kobject *kobj, 1499 struct attribute *a, int n) 1500 { 1501 struct device *dev = container_of(kobj, struct device, kobj); 1502 1503 if (!dev_is_pf(dev)) 1504 return 0; 1505 1506 return a->mode; 1507 } 1508 1509 static struct attribute_group sriov_dev_attr_group = { 1510 .attrs = sriov_dev_attrs, 1511 .is_visible = sriov_attrs_are_visible, 1512 }; 1513 #endif /* CONFIG_PCI_IOV */ 1514 1515 static struct attribute_group pci_dev_attr_group = { 1516 .attrs = pci_dev_dev_attrs, 1517 .is_visible = pci_dev_attrs_are_visible, 1518 }; 1519 1520 static const struct attribute_group *pci_dev_attr_groups[] = { 1521 &pci_dev_attr_group, 1522 &pci_dev_hp_attr_group, 1523 #ifdef CONFIG_PCI_IOV 1524 &sriov_dev_attr_group, 1525 #endif 1526 NULL, 1527 }; 1528 1529 struct device_type pci_dev_type = { 1530 .groups = pci_dev_attr_groups, 1531 }; 1532