1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
4 * (C) Copyright 2002-2004 IBM Corp.
5 * (C) Copyright 2003 Matthew Wilcox
6 * (C) Copyright 2003 Hewlett-Packard
7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
9 *
10 * File attributes for PCI devices
11 *
12 * Modeled after usb's driverfs.c
13 */
14
15 #include <linux/bitfield.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/pci.h>
19 #include <linux/stat.h>
20 #include <linux/export.h>
21 #include <linux/topology.h>
22 #include <linux/mm.h>
23 #include <linux/fs.h>
24 #include <linux/capability.h>
25 #include <linux/security.h>
26 #include <linux/slab.h>
27 #include <linux/vgaarb.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/msi.h>
30 #include <linux/of.h>
31 #include <linux/aperture.h>
32 #include "pci.h"
33
34 static int sysfs_initialized; /* = 0 */
35
36 /* show configuration fields */
37 #define pci_config_attr(field, format_string) \
38 static ssize_t \
39 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
40 { \
41 struct pci_dev *pdev; \
42 \
43 pdev = to_pci_dev(dev); \
44 return sysfs_emit(buf, format_string, pdev->field); \
45 } \
46 static DEVICE_ATTR_RO(field)
47
48 pci_config_attr(vendor, "0x%04x\n");
49 pci_config_attr(device, "0x%04x\n");
50 pci_config_attr(subsystem_vendor, "0x%04x\n");
51 pci_config_attr(subsystem_device, "0x%04x\n");
52 pci_config_attr(revision, "0x%02x\n");
53 pci_config_attr(class, "0x%06x\n");
54
irq_show(struct device * dev,struct device_attribute * attr,char * buf)55 static ssize_t irq_show(struct device *dev,
56 struct device_attribute *attr,
57 char *buf)
58 {
59 struct pci_dev *pdev = to_pci_dev(dev);
60
61 #ifdef CONFIG_PCI_MSI
62 /*
63 * For MSI, show the first MSI IRQ; for all other cases including
64 * MSI-X, show the legacy INTx IRQ.
65 */
66 if (pdev->msi_enabled)
67 return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
68 #endif
69
70 return sysfs_emit(buf, "%u\n", pdev->irq);
71 }
72 static DEVICE_ATTR_RO(irq);
73
broken_parity_status_show(struct device * dev,struct device_attribute * attr,char * buf)74 static ssize_t broken_parity_status_show(struct device *dev,
75 struct device_attribute *attr,
76 char *buf)
77 {
78 struct pci_dev *pdev = to_pci_dev(dev);
79 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
80 }
81
broken_parity_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)82 static ssize_t broken_parity_status_store(struct device *dev,
83 struct device_attribute *attr,
84 const char *buf, size_t count)
85 {
86 struct pci_dev *pdev = to_pci_dev(dev);
87 unsigned long val;
88
89 if (kstrtoul(buf, 0, &val) < 0)
90 return -EINVAL;
91
92 pdev->broken_parity_status = !!val;
93
94 return count;
95 }
96 static DEVICE_ATTR_RW(broken_parity_status);
97
pci_dev_show_local_cpu(struct device * dev,bool list,struct device_attribute * attr,char * buf)98 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
99 struct device_attribute *attr, char *buf)
100 {
101 const struct cpumask *mask;
102
103 #ifdef CONFIG_NUMA
104 if (dev_to_node(dev) == NUMA_NO_NODE)
105 mask = cpu_online_mask;
106 else
107 mask = cpumask_of_node(dev_to_node(dev));
108 #else
109 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
110 #endif
111 return cpumap_print_to_pagebuf(list, buf, mask);
112 }
113
local_cpus_show(struct device * dev,struct device_attribute * attr,char * buf)114 static ssize_t local_cpus_show(struct device *dev,
115 struct device_attribute *attr, char *buf)
116 {
117 return pci_dev_show_local_cpu(dev, false, attr, buf);
118 }
119 static DEVICE_ATTR_RO(local_cpus);
120
local_cpulist_show(struct device * dev,struct device_attribute * attr,char * buf)121 static ssize_t local_cpulist_show(struct device *dev,
122 struct device_attribute *attr, char *buf)
123 {
124 return pci_dev_show_local_cpu(dev, true, attr, buf);
125 }
126 static DEVICE_ATTR_RO(local_cpulist);
127
128 /*
129 * PCI Bus Class Devices
130 */
cpuaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)131 static ssize_t cpuaffinity_show(struct device *dev,
132 struct device_attribute *attr, char *buf)
133 {
134 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
135
136 return cpumap_print_to_pagebuf(false, buf, cpumask);
137 }
138 static DEVICE_ATTR_RO(cpuaffinity);
139
cpulistaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)140 static ssize_t cpulistaffinity_show(struct device *dev,
141 struct device_attribute *attr, char *buf)
142 {
143 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
144
145 return cpumap_print_to_pagebuf(true, buf, cpumask);
146 }
147 static DEVICE_ATTR_RO(cpulistaffinity);
148
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)149 static ssize_t power_state_show(struct device *dev,
150 struct device_attribute *attr, char *buf)
151 {
152 struct pci_dev *pdev = to_pci_dev(dev);
153
154 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
155 }
156 static DEVICE_ATTR_RO(power_state);
157
158 /* show resources */
resource_show(struct device * dev,struct device_attribute * attr,char * buf)159 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
160 char *buf)
161 {
162 struct pci_dev *pci_dev = to_pci_dev(dev);
163 int i;
164 int max;
165 resource_size_t start, end;
166 size_t len = 0;
167
168 if (pci_dev->subordinate)
169 max = DEVICE_COUNT_RESOURCE;
170 else
171 max = PCI_BRIDGE_RESOURCES;
172
173 for (i = 0; i < max; i++) {
174 struct resource *res = &pci_dev->resource[i];
175 pci_resource_to_user(pci_dev, i, res, &start, &end);
176 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
177 (unsigned long long)start,
178 (unsigned long long)end,
179 (unsigned long long)res->flags);
180 }
181 return len;
182 }
183 static DEVICE_ATTR_RO(resource);
184
max_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)185 static ssize_t max_link_speed_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187 {
188 struct pci_dev *pdev = to_pci_dev(dev);
189
190 return sysfs_emit(buf, "%s\n",
191 pci_speed_string(pcie_get_speed_cap(pdev)));
192 }
193 static DEVICE_ATTR_RO(max_link_speed);
194
max_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)195 static ssize_t max_link_width_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
197 {
198 struct pci_dev *pdev = to_pci_dev(dev);
199
200 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
201 }
202 static DEVICE_ATTR_RO(max_link_width);
203
current_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)204 static ssize_t current_link_speed_show(struct device *dev,
205 struct device_attribute *attr, char *buf)
206 {
207 struct pci_dev *pci_dev = to_pci_dev(dev);
208 u16 linkstat;
209 int err;
210 enum pci_bus_speed speed;
211
212 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
213 if (err)
214 return -EINVAL;
215
216 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
217
218 return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
219 }
220 static DEVICE_ATTR_RO(current_link_speed);
221
current_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)222 static ssize_t current_link_width_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224 {
225 struct pci_dev *pci_dev = to_pci_dev(dev);
226 u16 linkstat;
227 int err;
228
229 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
230 if (err)
231 return -EINVAL;
232
233 return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
234 }
235 static DEVICE_ATTR_RO(current_link_width);
236
secondary_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)237 static ssize_t secondary_bus_number_show(struct device *dev,
238 struct device_attribute *attr,
239 char *buf)
240 {
241 struct pci_dev *pci_dev = to_pci_dev(dev);
242 u8 sec_bus;
243 int err;
244
245 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
246 if (err)
247 return -EINVAL;
248
249 return sysfs_emit(buf, "%u\n", sec_bus);
250 }
251 static DEVICE_ATTR_RO(secondary_bus_number);
252
subordinate_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)253 static ssize_t subordinate_bus_number_show(struct device *dev,
254 struct device_attribute *attr,
255 char *buf)
256 {
257 struct pci_dev *pci_dev = to_pci_dev(dev);
258 u8 sub_bus;
259 int err;
260
261 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
262 if (err)
263 return -EINVAL;
264
265 return sysfs_emit(buf, "%u\n", sub_bus);
266 }
267 static DEVICE_ATTR_RO(subordinate_bus_number);
268
ari_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)269 static ssize_t ari_enabled_show(struct device *dev,
270 struct device_attribute *attr,
271 char *buf)
272 {
273 struct pci_dev *pci_dev = to_pci_dev(dev);
274
275 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
276 }
277 static DEVICE_ATTR_RO(ari_enabled);
278
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)279 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
280 char *buf)
281 {
282 struct pci_dev *pci_dev = to_pci_dev(dev);
283
284 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
285 pci_dev->vendor, pci_dev->device,
286 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
287 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
288 (u8)(pci_dev->class));
289 }
290 static DEVICE_ATTR_RO(modalias);
291
enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)292 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
293 const char *buf, size_t count)
294 {
295 struct pci_dev *pdev = to_pci_dev(dev);
296 unsigned long val;
297 ssize_t result = 0;
298
299 /* this can crash the machine when done on the "wrong" device */
300 if (!capable(CAP_SYS_ADMIN))
301 return -EPERM;
302
303 if (kstrtoul(buf, 0, &val) < 0)
304 return -EINVAL;
305
306 device_lock(dev);
307 if (dev->driver)
308 result = -EBUSY;
309 else if (val)
310 result = pci_enable_device(pdev);
311 else if (pci_is_enabled(pdev))
312 pci_disable_device(pdev);
313 else
314 result = -EIO;
315 device_unlock(dev);
316
317 return result < 0 ? result : count;
318 }
319
enable_show(struct device * dev,struct device_attribute * attr,char * buf)320 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
321 char *buf)
322 {
323 struct pci_dev *pdev;
324
325 pdev = to_pci_dev(dev);
326 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
327 }
328 static DEVICE_ATTR_RW(enable);
329
330 #ifdef CONFIG_NUMA
numa_node_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)331 static ssize_t numa_node_store(struct device *dev,
332 struct device_attribute *attr, const char *buf,
333 size_t count)
334 {
335 struct pci_dev *pdev = to_pci_dev(dev);
336 int node;
337
338 if (!capable(CAP_SYS_ADMIN))
339 return -EPERM;
340
341 if (kstrtoint(buf, 0, &node) < 0)
342 return -EINVAL;
343
344 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
345 return -EINVAL;
346
347 if (node != NUMA_NO_NODE && !node_online(node))
348 return -EINVAL;
349
350 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
351 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
352 node);
353
354 dev->numa_node = node;
355 return count;
356 }
357
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)358 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
359 char *buf)
360 {
361 return sysfs_emit(buf, "%d\n", dev->numa_node);
362 }
363 static DEVICE_ATTR_RW(numa_node);
364 #endif
365
dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)366 static ssize_t dma_mask_bits_show(struct device *dev,
367 struct device_attribute *attr, char *buf)
368 {
369 struct pci_dev *pdev = to_pci_dev(dev);
370
371 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
372 }
373 static DEVICE_ATTR_RO(dma_mask_bits);
374
consistent_dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)375 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
376 struct device_attribute *attr,
377 char *buf)
378 {
379 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
380 }
381 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
382
msi_bus_show(struct device * dev,struct device_attribute * attr,char * buf)383 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
384 char *buf)
385 {
386 struct pci_dev *pdev = to_pci_dev(dev);
387 struct pci_bus *subordinate = pdev->subordinate;
388
389 return sysfs_emit(buf, "%u\n", subordinate ?
390 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
391 : !pdev->no_msi);
392 }
393
msi_bus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)394 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
395 const char *buf, size_t count)
396 {
397 struct pci_dev *pdev = to_pci_dev(dev);
398 struct pci_bus *subordinate = pdev->subordinate;
399 unsigned long val;
400
401 if (!capable(CAP_SYS_ADMIN))
402 return -EPERM;
403
404 if (kstrtoul(buf, 0, &val) < 0)
405 return -EINVAL;
406
407 /*
408 * "no_msi" and "bus_flags" only affect what happens when a driver
409 * requests MSI or MSI-X. They don't affect any drivers that have
410 * already requested MSI or MSI-X.
411 */
412 if (!subordinate) {
413 pdev->no_msi = !val;
414 pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
415 val ? "allowed" : "disallowed");
416 return count;
417 }
418
419 if (val)
420 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
421 else
422 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
423
424 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
425 val ? "allowed" : "disallowed");
426 return count;
427 }
428 static DEVICE_ATTR_RW(msi_bus);
429
rescan_store(const struct bus_type * bus,const char * buf,size_t count)430 static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count)
431 {
432 unsigned long val;
433 struct pci_bus *b = NULL;
434
435 if (kstrtoul(buf, 0, &val) < 0)
436 return -EINVAL;
437
438 if (val) {
439 pci_lock_rescan_remove();
440 while ((b = pci_find_next_bus(b)) != NULL)
441 pci_rescan_bus(b);
442 pci_unlock_rescan_remove();
443 }
444 return count;
445 }
446 static BUS_ATTR_WO(rescan);
447
448 static struct attribute *pci_bus_attrs[] = {
449 &bus_attr_rescan.attr,
450 NULL,
451 };
452
453 static const struct attribute_group pci_bus_group = {
454 .attrs = pci_bus_attrs,
455 };
456
457 const struct attribute_group *pci_bus_groups[] = {
458 &pci_bus_group,
459 NULL,
460 };
461
dev_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)462 static ssize_t dev_rescan_store(struct device *dev,
463 struct device_attribute *attr, const char *buf,
464 size_t count)
465 {
466 unsigned long val;
467 struct pci_dev *pdev = to_pci_dev(dev);
468
469 if (kstrtoul(buf, 0, &val) < 0)
470 return -EINVAL;
471
472 if (val) {
473 pci_lock_rescan_remove();
474 pci_rescan_bus(pdev->bus);
475 pci_unlock_rescan_remove();
476 }
477 return count;
478 }
479 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
480 dev_rescan_store);
481
remove_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)482 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
483 const char *buf, size_t count)
484 {
485 unsigned long val;
486
487 if (kstrtoul(buf, 0, &val) < 0)
488 return -EINVAL;
489
490 if (val && device_remove_file_self(dev, attr))
491 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
492 return count;
493 }
494 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
495 remove_store);
496
bus_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)497 static ssize_t bus_rescan_store(struct device *dev,
498 struct device_attribute *attr,
499 const char *buf, size_t count)
500 {
501 unsigned long val;
502 struct pci_bus *bus = to_pci_bus(dev);
503
504 if (kstrtoul(buf, 0, &val) < 0)
505 return -EINVAL;
506
507 if (val) {
508 pci_lock_rescan_remove();
509 if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
510 pci_rescan_bus_bridge_resize(bus->self);
511 else
512 pci_rescan_bus(bus);
513 pci_unlock_rescan_remove();
514 }
515 return count;
516 }
517 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
518 bus_rescan_store);
519
reset_subordinate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)520 static ssize_t reset_subordinate_store(struct device *dev,
521 struct device_attribute *attr,
522 const char *buf, size_t count)
523 {
524 struct pci_dev *pdev = to_pci_dev(dev);
525 struct pci_bus *bus = pdev->subordinate;
526 unsigned long val;
527
528 if (!capable(CAP_SYS_ADMIN))
529 return -EPERM;
530
531 if (kstrtoul(buf, 0, &val) < 0)
532 return -EINVAL;
533
534 if (val) {
535 int ret = __pci_reset_bus(bus);
536
537 if (ret)
538 return ret;
539 }
540
541 return count;
542 }
543 static DEVICE_ATTR_WO(reset_subordinate);
544
545 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
d3cold_allowed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)546 static ssize_t d3cold_allowed_store(struct device *dev,
547 struct device_attribute *attr,
548 const char *buf, size_t count)
549 {
550 struct pci_dev *pdev = to_pci_dev(dev);
551 unsigned long val;
552
553 if (kstrtoul(buf, 0, &val) < 0)
554 return -EINVAL;
555
556 pdev->d3cold_allowed = !!val;
557 pci_bridge_d3_update(pdev);
558
559 pm_runtime_resume(dev);
560
561 return count;
562 }
563
d3cold_allowed_show(struct device * dev,struct device_attribute * attr,char * buf)564 static ssize_t d3cold_allowed_show(struct device *dev,
565 struct device_attribute *attr, char *buf)
566 {
567 struct pci_dev *pdev = to_pci_dev(dev);
568 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
569 }
570 static DEVICE_ATTR_RW(d3cold_allowed);
571 #endif
572
573 #ifdef CONFIG_OF
devspec_show(struct device * dev,struct device_attribute * attr,char * buf)574 static ssize_t devspec_show(struct device *dev,
575 struct device_attribute *attr, char *buf)
576 {
577 struct pci_dev *pdev = to_pci_dev(dev);
578 struct device_node *np = pci_device_to_OF_node(pdev);
579
580 if (np == NULL)
581 return 0;
582 return sysfs_emit(buf, "%pOF\n", np);
583 }
584 static DEVICE_ATTR_RO(devspec);
585 #endif
586
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)587 static ssize_t driver_override_store(struct device *dev,
588 struct device_attribute *attr,
589 const char *buf, size_t count)
590 {
591 struct pci_dev *pdev = to_pci_dev(dev);
592 int ret;
593
594 ret = driver_set_override(dev, &pdev->driver_override, buf, count);
595 if (ret)
596 return ret;
597
598 return count;
599 }
600
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)601 static ssize_t driver_override_show(struct device *dev,
602 struct device_attribute *attr, char *buf)
603 {
604 struct pci_dev *pdev = to_pci_dev(dev);
605 ssize_t len;
606
607 device_lock(dev);
608 len = sysfs_emit(buf, "%s\n", pdev->driver_override);
609 device_unlock(dev);
610 return len;
611 }
612 static DEVICE_ATTR_RW(driver_override);
613
614 static struct attribute *pci_dev_attrs[] = {
615 &dev_attr_power_state.attr,
616 &dev_attr_resource.attr,
617 &dev_attr_vendor.attr,
618 &dev_attr_device.attr,
619 &dev_attr_subsystem_vendor.attr,
620 &dev_attr_subsystem_device.attr,
621 &dev_attr_revision.attr,
622 &dev_attr_class.attr,
623 &dev_attr_irq.attr,
624 &dev_attr_local_cpus.attr,
625 &dev_attr_local_cpulist.attr,
626 &dev_attr_modalias.attr,
627 #ifdef CONFIG_NUMA
628 &dev_attr_numa_node.attr,
629 #endif
630 &dev_attr_dma_mask_bits.attr,
631 &dev_attr_consistent_dma_mask_bits.attr,
632 &dev_attr_enable.attr,
633 &dev_attr_broken_parity_status.attr,
634 &dev_attr_msi_bus.attr,
635 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
636 &dev_attr_d3cold_allowed.attr,
637 #endif
638 #ifdef CONFIG_OF
639 &dev_attr_devspec.attr,
640 #endif
641 &dev_attr_driver_override.attr,
642 &dev_attr_ari_enabled.attr,
643 NULL,
644 };
645
646 static struct attribute *pci_bridge_attrs[] = {
647 &dev_attr_subordinate_bus_number.attr,
648 &dev_attr_secondary_bus_number.attr,
649 &dev_attr_reset_subordinate.attr,
650 NULL,
651 };
652
653 static struct attribute *pcie_dev_attrs[] = {
654 &dev_attr_current_link_speed.attr,
655 &dev_attr_current_link_width.attr,
656 &dev_attr_max_link_width.attr,
657 &dev_attr_max_link_speed.attr,
658 NULL,
659 };
660
661 static struct attribute *pcibus_attrs[] = {
662 &dev_attr_bus_rescan.attr,
663 &dev_attr_cpuaffinity.attr,
664 &dev_attr_cpulistaffinity.attr,
665 NULL,
666 };
667
668 static const struct attribute_group pcibus_group = {
669 .attrs = pcibus_attrs,
670 };
671
672 const struct attribute_group *pcibus_groups[] = {
673 &pcibus_group,
674 NULL,
675 };
676
boot_vga_show(struct device * dev,struct device_attribute * attr,char * buf)677 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
678 char *buf)
679 {
680 struct pci_dev *pdev = to_pci_dev(dev);
681 struct pci_dev *vga_dev = vga_default_device();
682
683 if (vga_dev)
684 return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
685
686 return sysfs_emit(buf, "%u\n",
687 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
688 IORESOURCE_ROM_SHADOW));
689 }
690 static DEVICE_ATTR_RO(boot_vga);
691
pci_read_config(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)692 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
693 struct bin_attribute *bin_attr, char *buf,
694 loff_t off, size_t count)
695 {
696 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
697 unsigned int size = 64;
698 loff_t init_off = off;
699 u8 *data = (u8 *) buf;
700
701 /* Several chips lock up trying to read undefined config space */
702 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
703 size = dev->cfg_size;
704 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
705 size = 128;
706
707 if (off > size)
708 return 0;
709 if (off + count > size) {
710 size -= off;
711 count = size;
712 } else {
713 size = count;
714 }
715
716 pci_config_pm_runtime_get(dev);
717
718 if ((off & 1) && size) {
719 u8 val;
720 pci_user_read_config_byte(dev, off, &val);
721 data[off - init_off] = val;
722 off++;
723 size--;
724 }
725
726 if ((off & 3) && size > 2) {
727 u16 val;
728 pci_user_read_config_word(dev, off, &val);
729 data[off - init_off] = val & 0xff;
730 data[off - init_off + 1] = (val >> 8) & 0xff;
731 off += 2;
732 size -= 2;
733 }
734
735 while (size > 3) {
736 u32 val;
737 pci_user_read_config_dword(dev, off, &val);
738 data[off - init_off] = val & 0xff;
739 data[off - init_off + 1] = (val >> 8) & 0xff;
740 data[off - init_off + 2] = (val >> 16) & 0xff;
741 data[off - init_off + 3] = (val >> 24) & 0xff;
742 off += 4;
743 size -= 4;
744 cond_resched();
745 }
746
747 if (size >= 2) {
748 u16 val;
749 pci_user_read_config_word(dev, off, &val);
750 data[off - init_off] = val & 0xff;
751 data[off - init_off + 1] = (val >> 8) & 0xff;
752 off += 2;
753 size -= 2;
754 }
755
756 if (size > 0) {
757 u8 val;
758 pci_user_read_config_byte(dev, off, &val);
759 data[off - init_off] = val;
760 }
761
762 pci_config_pm_runtime_put(dev);
763
764 return count;
765 }
766
pci_write_config(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)767 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
768 struct bin_attribute *bin_attr, char *buf,
769 loff_t off, size_t count)
770 {
771 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
772 unsigned int size = count;
773 loff_t init_off = off;
774 u8 *data = (u8 *) buf;
775 int ret;
776
777 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
778 if (ret)
779 return ret;
780
781 if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
782 count)) {
783 pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
784 current->comm, off);
785 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
786 }
787
788 if (off > dev->cfg_size)
789 return 0;
790 if (off + count > dev->cfg_size) {
791 size = dev->cfg_size - off;
792 count = size;
793 }
794
795 pci_config_pm_runtime_get(dev);
796
797 if ((off & 1) && size) {
798 pci_user_write_config_byte(dev, off, data[off - init_off]);
799 off++;
800 size--;
801 }
802
803 if ((off & 3) && size > 2) {
804 u16 val = data[off - init_off];
805 val |= (u16) data[off - init_off + 1] << 8;
806 pci_user_write_config_word(dev, off, val);
807 off += 2;
808 size -= 2;
809 }
810
811 while (size > 3) {
812 u32 val = data[off - init_off];
813 val |= (u32) data[off - init_off + 1] << 8;
814 val |= (u32) data[off - init_off + 2] << 16;
815 val |= (u32) data[off - init_off + 3] << 24;
816 pci_user_write_config_dword(dev, off, val);
817 off += 4;
818 size -= 4;
819 }
820
821 if (size >= 2) {
822 u16 val = data[off - init_off];
823 val |= (u16) data[off - init_off + 1] << 8;
824 pci_user_write_config_word(dev, off, val);
825 off += 2;
826 size -= 2;
827 }
828
829 if (size)
830 pci_user_write_config_byte(dev, off, data[off - init_off]);
831
832 pci_config_pm_runtime_put(dev);
833
834 return count;
835 }
836 static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
837
838 static struct bin_attribute *pci_dev_config_attrs[] = {
839 &bin_attr_config,
840 NULL,
841 };
842
pci_dev_config_attr_is_visible(struct kobject * kobj,struct bin_attribute * a,int n)843 static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
844 struct bin_attribute *a, int n)
845 {
846 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
847
848 a->size = PCI_CFG_SPACE_SIZE;
849 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
850 a->size = PCI_CFG_SPACE_EXP_SIZE;
851
852 return a->attr.mode;
853 }
854
855 static const struct attribute_group pci_dev_config_attr_group = {
856 .bin_attrs = pci_dev_config_attrs,
857 .is_bin_visible = pci_dev_config_attr_is_visible,
858 };
859
860 #ifdef HAVE_PCI_LEGACY
861 /**
862 * pci_read_legacy_io - read byte(s) from legacy I/O port space
863 * @filp: open sysfs file
864 * @kobj: kobject corresponding to file to read from
865 * @bin_attr: struct bin_attribute for this file
866 * @buf: buffer to store results
867 * @off: offset into legacy I/O port space
868 * @count: number of bytes to read
869 *
870 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
871 * callback routine (pci_legacy_read).
872 */
pci_read_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)873 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
874 struct bin_attribute *bin_attr, char *buf,
875 loff_t off, size_t count)
876 {
877 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
878
879 /* Only support 1, 2 or 4 byte accesses */
880 if (count != 1 && count != 2 && count != 4)
881 return -EINVAL;
882
883 return pci_legacy_read(bus, off, (u32 *)buf, count);
884 }
885
886 /**
887 * pci_write_legacy_io - write byte(s) to legacy I/O port space
888 * @filp: open sysfs file
889 * @kobj: kobject corresponding to file to read from
890 * @bin_attr: struct bin_attribute for this file
891 * @buf: buffer containing value to be written
892 * @off: offset into legacy I/O port space
893 * @count: number of bytes to write
894 *
895 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
896 * callback routine (pci_legacy_write).
897 */
pci_write_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)898 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
899 struct bin_attribute *bin_attr, char *buf,
900 loff_t off, size_t count)
901 {
902 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
903
904 /* Only support 1, 2 or 4 byte accesses */
905 if (count != 1 && count != 2 && count != 4)
906 return -EINVAL;
907
908 return pci_legacy_write(bus, off, *(u32 *)buf, count);
909 }
910
911 /**
912 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
913 * @filp: open sysfs file
914 * @kobj: kobject corresponding to device to be mapped
915 * @attr: struct bin_attribute for this file
916 * @vma: struct vm_area_struct passed to mmap
917 *
918 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
919 * legacy memory space (first meg of bus space) into application virtual
920 * memory space.
921 */
pci_mmap_legacy_mem(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)922 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
923 struct bin_attribute *attr,
924 struct vm_area_struct *vma)
925 {
926 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
927
928 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
929 }
930
931 /**
932 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
933 * @filp: open sysfs file
934 * @kobj: kobject corresponding to device to be mapped
935 * @attr: struct bin_attribute for this file
936 * @vma: struct vm_area_struct passed to mmap
937 *
938 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
939 * legacy IO space (first meg of bus space) into application virtual
940 * memory space. Returns -ENOSYS if the operation isn't supported
941 */
pci_mmap_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)942 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
943 struct bin_attribute *attr,
944 struct vm_area_struct *vma)
945 {
946 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
947
948 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
949 }
950
951 /**
952 * pci_adjust_legacy_attr - adjustment of legacy file attributes
953 * @b: bus to create files under
954 * @mmap_type: I/O port or memory
955 *
956 * Stub implementation. Can be overridden by arch if necessary.
957 */
pci_adjust_legacy_attr(struct pci_bus * b,enum pci_mmap_state mmap_type)958 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
959 enum pci_mmap_state mmap_type)
960 {
961 }
962
963 /**
964 * pci_create_legacy_files - create legacy I/O port and memory files
965 * @b: bus to create files under
966 *
967 * Some platforms allow access to legacy I/O port and ISA memory space on
968 * a per-bus basis. This routine creates the files and ties them into
969 * their associated read, write and mmap files from pci-sysfs.c
970 *
971 * On error unwind, but don't propagate the error to the caller
972 * as it is ok to set up the PCI bus without these files.
973 */
pci_create_legacy_files(struct pci_bus * b)974 void pci_create_legacy_files(struct pci_bus *b)
975 {
976 int error;
977
978 if (!sysfs_initialized)
979 return;
980
981 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
982 GFP_ATOMIC);
983 if (!b->legacy_io)
984 goto kzalloc_err;
985
986 sysfs_bin_attr_init(b->legacy_io);
987 b->legacy_io->attr.name = "legacy_io";
988 b->legacy_io->size = 0xffff;
989 b->legacy_io->attr.mode = 0600;
990 b->legacy_io->read = pci_read_legacy_io;
991 b->legacy_io->write = pci_write_legacy_io;
992 b->legacy_io->mmap = pci_mmap_legacy_io;
993 b->legacy_io->f_mapping = iomem_get_mapping;
994 pci_adjust_legacy_attr(b, pci_mmap_io);
995 error = device_create_bin_file(&b->dev, b->legacy_io);
996 if (error)
997 goto legacy_io_err;
998
999 /* Allocated above after the legacy_io struct */
1000 b->legacy_mem = b->legacy_io + 1;
1001 sysfs_bin_attr_init(b->legacy_mem);
1002 b->legacy_mem->attr.name = "legacy_mem";
1003 b->legacy_mem->size = 1024*1024;
1004 b->legacy_mem->attr.mode = 0600;
1005 b->legacy_mem->mmap = pci_mmap_legacy_mem;
1006 b->legacy_mem->f_mapping = iomem_get_mapping;
1007 pci_adjust_legacy_attr(b, pci_mmap_mem);
1008 error = device_create_bin_file(&b->dev, b->legacy_mem);
1009 if (error)
1010 goto legacy_mem_err;
1011
1012 return;
1013
1014 legacy_mem_err:
1015 device_remove_bin_file(&b->dev, b->legacy_io);
1016 legacy_io_err:
1017 kfree(b->legacy_io);
1018 b->legacy_io = NULL;
1019 kzalloc_err:
1020 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
1021 }
1022
pci_remove_legacy_files(struct pci_bus * b)1023 void pci_remove_legacy_files(struct pci_bus *b)
1024 {
1025 if (b->legacy_io) {
1026 device_remove_bin_file(&b->dev, b->legacy_io);
1027 device_remove_bin_file(&b->dev, b->legacy_mem);
1028 kfree(b->legacy_io); /* both are allocated here */
1029 }
1030 }
1031 #endif /* HAVE_PCI_LEGACY */
1032
1033 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1034
pci_mmap_fits(struct pci_dev * pdev,int resno,struct vm_area_struct * vma,enum pci_mmap_api mmap_api)1035 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
1036 enum pci_mmap_api mmap_api)
1037 {
1038 unsigned long nr, start, size;
1039 resource_size_t pci_start = 0, pci_end;
1040
1041 if (pci_resource_len(pdev, resno) == 0)
1042 return 0;
1043 nr = vma_pages(vma);
1044 start = vma->vm_pgoff;
1045 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
1046 if (mmap_api == PCI_MMAP_PROCFS) {
1047 pci_resource_to_user(pdev, resno, &pdev->resource[resno],
1048 &pci_start, &pci_end);
1049 pci_start >>= PAGE_SHIFT;
1050 }
1051 if (start >= pci_start && start < pci_start + size &&
1052 start + nr <= pci_start + size)
1053 return 1;
1054 return 0;
1055 }
1056
1057 /**
1058 * pci_mmap_resource - map a PCI resource into user memory space
1059 * @kobj: kobject for mapping
1060 * @attr: struct bin_attribute for the file being mapped
1061 * @vma: struct vm_area_struct passed into the mmap
1062 * @write_combine: 1 for write_combine mapping
1063 *
1064 * Use the regular PCI mapping routines to map a PCI resource into userspace.
1065 */
pci_mmap_resource(struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma,int write_combine)1066 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
1067 struct vm_area_struct *vma, int write_combine)
1068 {
1069 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1070 int bar = (unsigned long)attr->private;
1071 enum pci_mmap_state mmap_type;
1072 struct resource *res = &pdev->resource[bar];
1073 int ret;
1074
1075 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1076 if (ret)
1077 return ret;
1078
1079 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1080 return -EINVAL;
1081
1082 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1083 return -EINVAL;
1084
1085 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1086
1087 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1088 }
1089
pci_mmap_resource_uc(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)1090 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1091 struct bin_attribute *attr,
1092 struct vm_area_struct *vma)
1093 {
1094 return pci_mmap_resource(kobj, attr, vma, 0);
1095 }
1096
pci_mmap_resource_wc(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)1097 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1098 struct bin_attribute *attr,
1099 struct vm_area_struct *vma)
1100 {
1101 return pci_mmap_resource(kobj, attr, vma, 1);
1102 }
1103
pci_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count,bool write)1104 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1105 struct bin_attribute *attr, char *buf,
1106 loff_t off, size_t count, bool write)
1107 {
1108 #ifdef CONFIG_HAS_IOPORT
1109 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1110 int bar = (unsigned long)attr->private;
1111 unsigned long port = off;
1112
1113 port += pci_resource_start(pdev, bar);
1114
1115 if (port > pci_resource_end(pdev, bar))
1116 return 0;
1117
1118 if (port + count - 1 > pci_resource_end(pdev, bar))
1119 return -EINVAL;
1120
1121 switch (count) {
1122 case 1:
1123 if (write)
1124 outb(*(u8 *)buf, port);
1125 else
1126 *(u8 *)buf = inb(port);
1127 return 1;
1128 case 2:
1129 if (write)
1130 outw(*(u16 *)buf, port);
1131 else
1132 *(u16 *)buf = inw(port);
1133 return 2;
1134 case 4:
1135 if (write)
1136 outl(*(u32 *)buf, port);
1137 else
1138 *(u32 *)buf = inl(port);
1139 return 4;
1140 }
1141 return -EINVAL;
1142 #else
1143 return -ENXIO;
1144 #endif
1145 }
1146
pci_read_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1147 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1148 struct bin_attribute *attr, char *buf,
1149 loff_t off, size_t count)
1150 {
1151 return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1152 }
1153
pci_write_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1154 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1155 struct bin_attribute *attr, char *buf,
1156 loff_t off, size_t count)
1157 {
1158 int ret;
1159
1160 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1161 if (ret)
1162 return ret;
1163
1164 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1165 }
1166
1167 /**
1168 * pci_remove_resource_files - cleanup resource files
1169 * @pdev: dev to cleanup
1170 *
1171 * If we created resource files for @pdev, remove them from sysfs and
1172 * free their resources.
1173 */
pci_remove_resource_files(struct pci_dev * pdev)1174 static void pci_remove_resource_files(struct pci_dev *pdev)
1175 {
1176 int i;
1177
1178 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1179 struct bin_attribute *res_attr;
1180
1181 res_attr = pdev->res_attr[i];
1182 if (res_attr) {
1183 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1184 kfree(res_attr);
1185 }
1186
1187 res_attr = pdev->res_attr_wc[i];
1188 if (res_attr) {
1189 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1190 kfree(res_attr);
1191 }
1192 }
1193 }
1194
pci_create_attr(struct pci_dev * pdev,int num,int write_combine)1195 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1196 {
1197 /* allocate attribute structure, piggyback attribute name */
1198 int name_len = write_combine ? 13 : 10;
1199 struct bin_attribute *res_attr;
1200 char *res_attr_name;
1201 int retval;
1202
1203 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1204 if (!res_attr)
1205 return -ENOMEM;
1206
1207 res_attr_name = (char *)(res_attr + 1);
1208
1209 sysfs_bin_attr_init(res_attr);
1210 if (write_combine) {
1211 sprintf(res_attr_name, "resource%d_wc", num);
1212 res_attr->mmap = pci_mmap_resource_wc;
1213 } else {
1214 sprintf(res_attr_name, "resource%d", num);
1215 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1216 res_attr->read = pci_read_resource_io;
1217 res_attr->write = pci_write_resource_io;
1218 if (arch_can_pci_mmap_io())
1219 res_attr->mmap = pci_mmap_resource_uc;
1220 } else {
1221 res_attr->mmap = pci_mmap_resource_uc;
1222 }
1223 }
1224 if (res_attr->mmap)
1225 res_attr->f_mapping = iomem_get_mapping;
1226 res_attr->attr.name = res_attr_name;
1227 res_attr->attr.mode = 0600;
1228 res_attr->size = pci_resource_len(pdev, num);
1229 res_attr->private = (void *)(unsigned long)num;
1230 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1231 if (retval) {
1232 kfree(res_attr);
1233 return retval;
1234 }
1235
1236 if (write_combine)
1237 pdev->res_attr_wc[num] = res_attr;
1238 else
1239 pdev->res_attr[num] = res_attr;
1240
1241 return 0;
1242 }
1243
1244 /**
1245 * pci_create_resource_files - create resource files in sysfs for @dev
1246 * @pdev: dev in question
1247 *
1248 * Walk the resources in @pdev creating files for each resource available.
1249 */
pci_create_resource_files(struct pci_dev * pdev)1250 static int pci_create_resource_files(struct pci_dev *pdev)
1251 {
1252 int i;
1253 int retval;
1254
1255 /* Expose the PCI resources from this device as files */
1256 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1257
1258 /* skip empty resources */
1259 if (!pci_resource_len(pdev, i))
1260 continue;
1261
1262 retval = pci_create_attr(pdev, i, 0);
1263 /* for prefetchable resources, create a WC mappable file */
1264 if (!retval && arch_can_pci_mmap_wc() &&
1265 pdev->resource[i].flags & IORESOURCE_PREFETCH)
1266 retval = pci_create_attr(pdev, i, 1);
1267 if (retval) {
1268 pci_remove_resource_files(pdev);
1269 return retval;
1270 }
1271 }
1272 return 0;
1273 }
1274 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
pci_create_resource_files(struct pci_dev * dev)1275 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
pci_remove_resource_files(struct pci_dev * dev)1276 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1277 #endif
1278
1279 /**
1280 * pci_write_rom - used to enable access to the PCI ROM display
1281 * @filp: sysfs file
1282 * @kobj: kernel object handle
1283 * @bin_attr: struct bin_attribute for this file
1284 * @buf: user input
1285 * @off: file offset
1286 * @count: number of byte in input
1287 *
1288 * writing anything except 0 enables it
1289 */
pci_write_rom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1290 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1291 struct bin_attribute *bin_attr, char *buf,
1292 loff_t off, size_t count)
1293 {
1294 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1295
1296 if ((off == 0) && (*buf == '0') && (count == 2))
1297 pdev->rom_attr_enabled = 0;
1298 else
1299 pdev->rom_attr_enabled = 1;
1300
1301 return count;
1302 }
1303
1304 /**
1305 * pci_read_rom - read a PCI ROM
1306 * @filp: sysfs file
1307 * @kobj: kernel object handle
1308 * @bin_attr: struct bin_attribute for this file
1309 * @buf: where to put the data we read from the ROM
1310 * @off: file offset
1311 * @count: number of bytes to read
1312 *
1313 * Put @count bytes starting at @off into @buf from the ROM in the PCI
1314 * device corresponding to @kobj.
1315 */
pci_read_rom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1316 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1317 struct bin_attribute *bin_attr, char *buf,
1318 loff_t off, size_t count)
1319 {
1320 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1321 void __iomem *rom;
1322 size_t size;
1323
1324 if (!pdev->rom_attr_enabled)
1325 return -EINVAL;
1326
1327 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
1328 if (!rom || !size)
1329 return -EIO;
1330
1331 if (off >= size)
1332 count = 0;
1333 else {
1334 if (off + count > size)
1335 count = size - off;
1336
1337 memcpy_fromio(buf, rom + off, count);
1338 }
1339 pci_unmap_rom(pdev, rom);
1340
1341 return count;
1342 }
1343 static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1344
1345 static struct bin_attribute *pci_dev_rom_attrs[] = {
1346 &bin_attr_rom,
1347 NULL,
1348 };
1349
pci_dev_rom_attr_is_visible(struct kobject * kobj,struct bin_attribute * a,int n)1350 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1351 struct bin_attribute *a, int n)
1352 {
1353 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1354 size_t rom_size;
1355
1356 /* If the device has a ROM, try to expose it in sysfs. */
1357 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
1358 if (!rom_size)
1359 return 0;
1360
1361 a->size = rom_size;
1362
1363 return a->attr.mode;
1364 }
1365
1366 static const struct attribute_group pci_dev_rom_attr_group = {
1367 .bin_attrs = pci_dev_rom_attrs,
1368 .is_bin_visible = pci_dev_rom_attr_is_visible,
1369 };
1370
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1371 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1372 const char *buf, size_t count)
1373 {
1374 struct pci_dev *pdev = to_pci_dev(dev);
1375 unsigned long val;
1376 ssize_t result;
1377
1378 if (kstrtoul(buf, 0, &val) < 0)
1379 return -EINVAL;
1380
1381 if (val != 1)
1382 return -EINVAL;
1383
1384 pm_runtime_get_sync(dev);
1385 result = pci_reset_function(pdev);
1386 pm_runtime_put(dev);
1387 if (result < 0)
1388 return result;
1389
1390 return count;
1391 }
1392 static DEVICE_ATTR_WO(reset);
1393
1394 static struct attribute *pci_dev_reset_attrs[] = {
1395 &dev_attr_reset.attr,
1396 NULL,
1397 };
1398
pci_dev_reset_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)1399 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1400 struct attribute *a, int n)
1401 {
1402 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1403
1404 if (!pci_reset_supported(pdev))
1405 return 0;
1406
1407 return a->mode;
1408 }
1409
1410 static const struct attribute_group pci_dev_reset_attr_group = {
1411 .attrs = pci_dev_reset_attrs,
1412 .is_visible = pci_dev_reset_attr_is_visible,
1413 };
1414
1415 #define pci_dev_resource_resize_attr(n) \
1416 static ssize_t resource##n##_resize_show(struct device *dev, \
1417 struct device_attribute *attr, \
1418 char * buf) \
1419 { \
1420 struct pci_dev *pdev = to_pci_dev(dev); \
1421 ssize_t ret; \
1422 \
1423 pci_config_pm_runtime_get(pdev); \
1424 \
1425 ret = sysfs_emit(buf, "%016llx\n", \
1426 (u64)pci_rebar_get_possible_sizes(pdev, n)); \
1427 \
1428 pci_config_pm_runtime_put(pdev); \
1429 \
1430 return ret; \
1431 } \
1432 \
1433 static ssize_t resource##n##_resize_store(struct device *dev, \
1434 struct device_attribute *attr,\
1435 const char *buf, size_t count)\
1436 { \
1437 struct pci_dev *pdev = to_pci_dev(dev); \
1438 unsigned long size, flags; \
1439 int ret, i; \
1440 u16 cmd; \
1441 \
1442 if (kstrtoul(buf, 0, &size) < 0) \
1443 return -EINVAL; \
1444 \
1445 device_lock(dev); \
1446 if (dev->driver) { \
1447 ret = -EBUSY; \
1448 goto unlock; \
1449 } \
1450 \
1451 pci_config_pm_runtime_get(pdev); \
1452 \
1453 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
1454 ret = aperture_remove_conflicting_pci_devices(pdev, \
1455 "resourceN_resize"); \
1456 if (ret) \
1457 goto pm_put; \
1458 } \
1459 \
1460 pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
1461 pci_write_config_word(pdev, PCI_COMMAND, \
1462 cmd & ~PCI_COMMAND_MEMORY); \
1463 \
1464 flags = pci_resource_flags(pdev, n); \
1465 \
1466 pci_remove_resource_files(pdev); \
1467 \
1468 for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
1469 if (pci_resource_len(pdev, i) && \
1470 pci_resource_flags(pdev, i) == flags) \
1471 pci_release_resource(pdev, i); \
1472 } \
1473 \
1474 ret = pci_resize_resource(pdev, n, size); \
1475 \
1476 pci_assign_unassigned_bus_resources(pdev->bus); \
1477 \
1478 if (pci_create_resource_files(pdev)) \
1479 pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
1480 \
1481 pci_write_config_word(pdev, PCI_COMMAND, cmd); \
1482 pm_put: \
1483 pci_config_pm_runtime_put(pdev); \
1484 unlock: \
1485 device_unlock(dev); \
1486 \
1487 return ret ? ret : count; \
1488 } \
1489 static DEVICE_ATTR_RW(resource##n##_resize)
1490
1491 pci_dev_resource_resize_attr(0);
1492 pci_dev_resource_resize_attr(1);
1493 pci_dev_resource_resize_attr(2);
1494 pci_dev_resource_resize_attr(3);
1495 pci_dev_resource_resize_attr(4);
1496 pci_dev_resource_resize_attr(5);
1497
1498 static struct attribute *resource_resize_attrs[] = {
1499 &dev_attr_resource0_resize.attr,
1500 &dev_attr_resource1_resize.attr,
1501 &dev_attr_resource2_resize.attr,
1502 &dev_attr_resource3_resize.attr,
1503 &dev_attr_resource4_resize.attr,
1504 &dev_attr_resource5_resize.attr,
1505 NULL,
1506 };
1507
resource_resize_is_visible(struct kobject * kobj,struct attribute * a,int n)1508 static umode_t resource_resize_is_visible(struct kobject *kobj,
1509 struct attribute *a, int n)
1510 {
1511 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1512
1513 return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
1514 }
1515
1516 static const struct attribute_group pci_dev_resource_resize_group = {
1517 .attrs = resource_resize_attrs,
1518 .is_visible = resource_resize_is_visible,
1519 };
1520
pci_create_sysfs_dev_files(struct pci_dev * pdev)1521 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1522 {
1523 if (!sysfs_initialized)
1524 return -EACCES;
1525
1526 return pci_create_resource_files(pdev);
1527 }
1528
1529 /**
1530 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1531 * @pdev: device whose entries we should free
1532 *
1533 * Cleanup when @pdev is removed from sysfs.
1534 */
pci_remove_sysfs_dev_files(struct pci_dev * pdev)1535 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1536 {
1537 if (!sysfs_initialized)
1538 return;
1539
1540 pci_remove_resource_files(pdev);
1541 }
1542
pci_sysfs_init(void)1543 static int __init pci_sysfs_init(void)
1544 {
1545 struct pci_dev *pdev = NULL;
1546 struct pci_bus *pbus = NULL;
1547 int retval;
1548
1549 sysfs_initialized = 1;
1550 for_each_pci_dev(pdev) {
1551 retval = pci_create_sysfs_dev_files(pdev);
1552 if (retval) {
1553 pci_dev_put(pdev);
1554 return retval;
1555 }
1556 }
1557
1558 while ((pbus = pci_find_next_bus(pbus)))
1559 pci_create_legacy_files(pbus);
1560
1561 return 0;
1562 }
1563 late_initcall(pci_sysfs_init);
1564
1565 static struct attribute *pci_dev_dev_attrs[] = {
1566 &dev_attr_boot_vga.attr,
1567 NULL,
1568 };
1569
pci_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1570 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1571 struct attribute *a, int n)
1572 {
1573 struct device *dev = kobj_to_dev(kobj);
1574 struct pci_dev *pdev = to_pci_dev(dev);
1575
1576 if (a == &dev_attr_boot_vga.attr)
1577 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
1578 return 0;
1579
1580 return a->mode;
1581 }
1582
1583 static struct attribute *pci_dev_hp_attrs[] = {
1584 &dev_attr_remove.attr,
1585 &dev_attr_dev_rescan.attr,
1586 NULL,
1587 };
1588
pci_dev_hp_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1589 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1590 struct attribute *a, int n)
1591 {
1592 struct device *dev = kobj_to_dev(kobj);
1593 struct pci_dev *pdev = to_pci_dev(dev);
1594
1595 if (pdev->is_virtfn)
1596 return 0;
1597
1598 return a->mode;
1599 }
1600
pci_bridge_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1601 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1602 struct attribute *a, int n)
1603 {
1604 struct device *dev = kobj_to_dev(kobj);
1605 struct pci_dev *pdev = to_pci_dev(dev);
1606
1607 if (pci_is_bridge(pdev))
1608 return a->mode;
1609
1610 return 0;
1611 }
1612
pcie_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1613 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1614 struct attribute *a, int n)
1615 {
1616 struct device *dev = kobj_to_dev(kobj);
1617 struct pci_dev *pdev = to_pci_dev(dev);
1618
1619 if (pci_is_pcie(pdev))
1620 return a->mode;
1621
1622 return 0;
1623 }
1624
1625 static const struct attribute_group pci_dev_group = {
1626 .attrs = pci_dev_attrs,
1627 };
1628
1629 const struct attribute_group *pci_dev_groups[] = {
1630 &pci_dev_group,
1631 &pci_dev_config_attr_group,
1632 &pci_dev_rom_attr_group,
1633 &pci_dev_reset_attr_group,
1634 &pci_dev_reset_method_attr_group,
1635 &pci_dev_vpd_attr_group,
1636 #ifdef CONFIG_DMI
1637 &pci_dev_smbios_attr_group,
1638 #endif
1639 #ifdef CONFIG_ACPI
1640 &pci_dev_acpi_attr_group,
1641 #endif
1642 &pci_dev_resource_resize_group,
1643 NULL,
1644 };
1645
1646 static const struct attribute_group pci_dev_hp_attr_group = {
1647 .attrs = pci_dev_hp_attrs,
1648 .is_visible = pci_dev_hp_attrs_are_visible,
1649 };
1650
1651 static const struct attribute_group pci_dev_attr_group = {
1652 .attrs = pci_dev_dev_attrs,
1653 .is_visible = pci_dev_attrs_are_visible,
1654 };
1655
1656 static const struct attribute_group pci_bridge_attr_group = {
1657 .attrs = pci_bridge_attrs,
1658 .is_visible = pci_bridge_attrs_are_visible,
1659 };
1660
1661 static const struct attribute_group pcie_dev_attr_group = {
1662 .attrs = pcie_dev_attrs,
1663 .is_visible = pcie_dev_attrs_are_visible,
1664 };
1665
1666 static const struct attribute_group *pci_dev_attr_groups[] = {
1667 &pci_dev_attr_group,
1668 &pci_dev_hp_attr_group,
1669 #ifdef CONFIG_PCI_IOV
1670 &sriov_pf_dev_attr_group,
1671 &sriov_vf_dev_attr_group,
1672 #endif
1673 &pci_bridge_attr_group,
1674 &pcie_dev_attr_group,
1675 #ifdef CONFIG_PCIEAER
1676 &aer_stats_attr_group,
1677 #endif
1678 #ifdef CONFIG_PCIEASPM
1679 &aspm_ctrl_attr_group,
1680 #endif
1681 NULL,
1682 };
1683
1684 const struct device_type pci_dev_type = {
1685 .groups = pci_dev_attr_groups,
1686 };
1687