xref: /openbmc/linux/include/linux/pci.h (revision ccab04dc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *	pci.h
4  *
5  *	PCI defines and function prototypes
6  *	Copyright 1994, Drew Eckhardt
7  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8  *
9  *	PCI Express ASPM defines and function prototypes
10  *	Copyright (c) 2007 Intel Corp.
11  *		Zhang Yanmin (yanmin.zhang@intel.com)
12  *		Shaohua Li (shaohua.li@intel.com)
13  *
14  *	For more information, please consult the following manuals (look at
15  *	http://www.pcisig.com/ for how to get them):
16  *
17  *	PCI BIOS Specification
18  *	PCI Local Bus Specification
19  *	PCI to PCI Bridge Specification
20  *	PCI Express Specification
21  *	PCI System Design Guide
22  */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25 
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28 
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43 
44 #include <linux/pci_ids.h>
45 
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
47 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
48 			       PCI_STATUS_REC_MASTER_ABORT | \
49 			       PCI_STATUS_REC_TARGET_ABORT | \
50 			       PCI_STATUS_SIG_TARGET_ABORT | \
51 			       PCI_STATUS_PARITY)
52 
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 7
55 
56 #define PCI_RESET_PROBE		true
57 #define PCI_RESET_DO_RESET	false
58 
59 /*
60  * The PCI interface treats multi-function devices as independent
61  * devices.  The slot/function address of each device is encoded
62  * in a single byte as follows:
63  *
64  *	7:3 = slot
65  *	2:0 = function
66  *
67  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68  * In the interest of not exposing interfaces to user-space unnecessarily,
69  * the following kernel-only defines are being added here.
70  */
71 #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74 
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 	struct pci_bus		*bus;		/* Bus this slot is on */
78 	struct list_head	list;		/* Node in list of slots */
79 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
80 	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
81 	struct kobject		kobj;
82 };
83 
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 	return kobject_name(&slot->kobj);
87 }
88 
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 	pci_mmap_io,
92 	pci_mmap_mem
93 };
94 
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 	/* #0-5: standard PCI resources */
98 	PCI_STD_RESOURCES,
99 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100 
101 	/* #6: expansion ROM resource */
102 	PCI_ROM_RESOURCE,
103 
104 	/* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 	PCI_IOV_RESOURCES,
107 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109 
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
114 
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
120 
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_BRIDGE_RESOURCE_NUM 4
123 
124 	/* Resources assigned to buses behind the bridge */
125 	PCI_BRIDGE_RESOURCES,
126 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
127 				  PCI_BRIDGE_RESOURCE_NUM - 1,
128 
129 	/* Total resources associated with a PCI device */
130 	PCI_NUM_RESOURCES,
131 
132 	/* Preserve this for compatibility */
133 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
134 };
135 
136 /**
137  * enum pci_interrupt_pin - PCI INTx interrupt values
138  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
139  * @PCI_INTERRUPT_INTA: PCI INTA pin
140  * @PCI_INTERRUPT_INTB: PCI INTB pin
141  * @PCI_INTERRUPT_INTC: PCI INTC pin
142  * @PCI_INTERRUPT_INTD: PCI INTD pin
143  *
144  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
145  * PCI_INTERRUPT_PIN register.
146  */
147 enum pci_interrupt_pin {
148 	PCI_INTERRUPT_UNKNOWN,
149 	PCI_INTERRUPT_INTA,
150 	PCI_INTERRUPT_INTB,
151 	PCI_INTERRUPT_INTC,
152 	PCI_INTERRUPT_INTD,
153 };
154 
155 /* The number of legacy PCI INTx interrupts */
156 #define PCI_NUM_INTX	4
157 
158 /*
159  * Reading from a device that doesn't respond typically returns ~0.  A
160  * successful read from a device may also return ~0, so you need additional
161  * information to reliably identify errors.
162  */
163 #define PCI_ERROR_RESPONSE		(~0ULL)
164 #define PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
165 #define PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
166 
167 /*
168  * pci_power_t values must match the bits in the Capabilities PME_Support
169  * and Control/Status PowerState fields in the Power Management capability.
170  */
171 typedef int __bitwise pci_power_t;
172 
173 #define PCI_D0		((pci_power_t __force) 0)
174 #define PCI_D1		((pci_power_t __force) 1)
175 #define PCI_D2		((pci_power_t __force) 2)
176 #define PCI_D3hot	((pci_power_t __force) 3)
177 #define PCI_D3cold	((pci_power_t __force) 4)
178 #define PCI_UNKNOWN	((pci_power_t __force) 5)
179 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
180 
181 /* Remember to update this when the list above changes! */
182 extern const char *pci_power_names[];
183 
pci_power_name(pci_power_t state)184 static inline const char *pci_power_name(pci_power_t state)
185 {
186 	return pci_power_names[1 + (__force int) state];
187 }
188 
189 /**
190  * typedef pci_channel_state_t
191  *
192  * The pci_channel state describes connectivity between the CPU and
193  * the PCI device.  If some PCI bus between here and the PCI device
194  * has crashed or locked up, this info is reflected here.
195  */
196 typedef unsigned int __bitwise pci_channel_state_t;
197 
198 enum {
199 	/* I/O channel is in normal state */
200 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
201 
202 	/* I/O to channel is blocked */
203 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
204 
205 	/* PCI card is dead */
206 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
207 };
208 
209 typedef unsigned int __bitwise pcie_reset_state_t;
210 
211 enum pcie_reset_state {
212 	/* Reset is NOT asserted (Use to deassert reset) */
213 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
214 
215 	/* Use #PERST to reset PCIe device */
216 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
217 
218 	/* Use PCIe Hot Reset to reset device */
219 	pcie_hot_reset = (__force pcie_reset_state_t) 3
220 };
221 
222 typedef unsigned short __bitwise pci_dev_flags_t;
223 enum pci_dev_flags {
224 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
225 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
226 	/* Device configuration is irrevocably lost if disabled into D3 */
227 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
228 	/* Provide indication device is assigned by a Virtual Machine Manager */
229 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
230 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
231 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
232 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
233 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
234 	/* Do not use bus resets for device */
235 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
236 	/* Do not use PM reset even if device advertises NoSoftRst- */
237 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
238 	/* Get VPD from function 0 VPD */
239 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
240 	/* A non-root bridge where translation occurs, stop alias search here */
241 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
242 	/* Do not use FLR even if device advertises PCI_AF_CAP */
243 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
244 	/* Don't use Relaxed Ordering for TLPs directed at this device */
245 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
246 	/* Device does honor MSI masking despite saying otherwise */
247 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
248 };
249 
250 enum pci_irq_reroute_variant {
251 	INTEL_IRQ_REROUTE_VARIANT = 1,
252 	MAX_IRQ_REROUTE_VARIANTS = 3
253 };
254 
255 typedef unsigned short __bitwise pci_bus_flags_t;
256 enum pci_bus_flags {
257 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
258 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
259 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
260 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
261 };
262 
263 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
264 enum pcie_link_width {
265 	PCIE_LNK_WIDTH_RESRV	= 0x00,
266 	PCIE_LNK_X1		= 0x01,
267 	PCIE_LNK_X2		= 0x02,
268 	PCIE_LNK_X4		= 0x04,
269 	PCIE_LNK_X8		= 0x08,
270 	PCIE_LNK_X12		= 0x0c,
271 	PCIE_LNK_X16		= 0x10,
272 	PCIE_LNK_X32		= 0x20,
273 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
274 };
275 
276 /* See matching string table in pci_speed_string() */
277 enum pci_bus_speed {
278 	PCI_SPEED_33MHz			= 0x00,
279 	PCI_SPEED_66MHz			= 0x01,
280 	PCI_SPEED_66MHz_PCIX		= 0x02,
281 	PCI_SPEED_100MHz_PCIX		= 0x03,
282 	PCI_SPEED_133MHz_PCIX		= 0x04,
283 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
284 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
285 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
286 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
287 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
288 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
289 	AGP_UNKNOWN			= 0x0c,
290 	AGP_1X				= 0x0d,
291 	AGP_2X				= 0x0e,
292 	AGP_4X				= 0x0f,
293 	AGP_8X				= 0x10,
294 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
295 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
296 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
297 	PCIE_SPEED_2_5GT		= 0x14,
298 	PCIE_SPEED_5_0GT		= 0x15,
299 	PCIE_SPEED_8_0GT		= 0x16,
300 	PCIE_SPEED_16_0GT		= 0x17,
301 	PCIE_SPEED_32_0GT		= 0x18,
302 	PCIE_SPEED_64_0GT		= 0x19,
303 	PCI_SPEED_UNKNOWN		= 0xff,
304 };
305 
306 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
307 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
308 
309 struct pci_vpd {
310 	struct mutex	lock;
311 	unsigned int	len;
312 	u8		cap;
313 };
314 
315 struct irq_affinity;
316 struct pcie_link_state;
317 struct pci_sriov;
318 struct pci_p2pdma;
319 struct rcec_ea;
320 
321 /* The pci_dev structure describes PCI devices */
322 struct pci_dev {
323 	struct list_head bus_list;	/* Node in per-bus list */
324 	struct pci_bus	*bus;		/* Bus this device is on */
325 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
326 
327 	void		*sysdata;	/* Hook for sys-specific extension */
328 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
329 	struct pci_slot	*slot;		/* Physical slot this device is in */
330 
331 	unsigned int	devfn;		/* Encoded device & function index */
332 	unsigned short	vendor;
333 	unsigned short	device;
334 	unsigned short	subsystem_vendor;
335 	unsigned short	subsystem_device;
336 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
337 	u8		revision;	/* PCI revision, low byte of class word */
338 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
339 #ifdef CONFIG_PCIEAER
340 	u16		aer_cap;	/* AER capability offset */
341 	struct aer_stats *aer_stats;	/* AER stats for this device */
342 #endif
343 #ifdef CONFIG_PCIEPORTBUS
344 	struct rcec_ea	*rcec_ea;	/* RCEC cached endpoint association */
345 	struct pci_dev  *rcec;          /* Associated RCEC device */
346 #endif
347 	u32		devcap;		/* PCIe Device Capabilities */
348 	u8		pcie_cap;	/* PCIe capability offset */
349 	u8		msi_cap;	/* MSI capability offset */
350 	u8		msix_cap;	/* MSI-X capability offset */
351 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
352 	u8		rom_base_reg;	/* Config register controlling ROM */
353 	u8		pin;		/* Interrupt pin this device uses */
354 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
355 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
356 
357 	struct pci_driver *driver;	/* Driver bound to this device */
358 	u64		dma_mask;	/* Mask of the bits of bus address this
359 					   device implements.  Normally this is
360 					   0xffffffff.  You only need to change
361 					   this if your device has broken DMA
362 					   or supports 64-bit transfers.  */
363 
364 	struct device_dma_parameters dma_parms;
365 
366 	pci_power_t	current_state;	/* Current operating state. In ACPI,
367 					   this is D0-D3, D0 being fully
368 					   functional, and D3 being off. */
369 	u8		pm_cap;		/* PM capability offset */
370 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
371 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
372 					   can be generated */
373 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
374 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
375 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
376 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
377 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
378 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
379 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
380 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
381 						   decoding during BAR sizing */
382 	unsigned int	wakeup_prepared:1;
383 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
384 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
385 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
386 						      controlled exclusively by
387 						      user sysfs */
388 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
389 						   bit manually */
390 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
391 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
392 
393 #ifdef CONFIG_PCIEASPM
394 	struct pcie_link_state	*link_state;	/* ASPM link state */
395 	u16		l1ss;		/* L1SS Capability pointer */
396 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
397 					   supported from root to here */
398 #endif
399 	unsigned int	pasid_no_tlp:1;		/* PASID works without TLP Prefix */
400 	unsigned int	eetlp_prefix_path:1;	/* End-to-End TLP Prefix */
401 
402 	pci_channel_state_t error_state;	/* Current connectivity state */
403 	struct device	dev;			/* Generic device interface */
404 
405 	int		cfg_size;		/* Size of config space */
406 
407 	/*
408 	 * Instead of touching interrupt line and base address registers
409 	 * directly, use the values stored here. They might be different!
410 	 */
411 	unsigned int	irq;
412 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
413 	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
414 
415 	bool		match_driver;		/* Skip attaching driver */
416 
417 	unsigned int	transparent:1;		/* Subtractive decode bridge */
418 	unsigned int	io_window:1;		/* Bridge has I/O window */
419 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
420 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
421 	unsigned int	multifunction:1;	/* Multi-function device */
422 
423 	unsigned int	is_busmaster:1;		/* Is busmaster */
424 	unsigned int	no_msi:1;		/* May not use MSI */
425 	unsigned int	no_64bit_msi:1;		/* May only use 32-bit MSIs */
426 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
427 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
428 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
429 	unsigned int	msi_enabled:1;
430 	unsigned int	msix_enabled:1;
431 	unsigned int	ari_enabled:1;		/* ARI forwarding */
432 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
433 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
434 	unsigned int	pri_enabled:1;		/* Page Request Interface */
435 	unsigned int	is_managed:1;		/* Managed via devres */
436 	unsigned int	is_msi_managed:1;	/* MSI release via devres installed */
437 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
438 	unsigned int	state_saved:1;
439 	unsigned int	is_physfn:1;
440 	unsigned int	is_virtfn:1;
441 	unsigned int	is_hotplug_bridge:1;
442 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
443 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
444 	/*
445 	 * Devices marked being untrusted are the ones that can potentially
446 	 * execute DMA attacks and similar. They are typically connected
447 	 * through external ports such as Thunderbolt but not limited to
448 	 * that. When an IOMMU is enabled they should be getting full
449 	 * mappings to make sure they cannot access arbitrary memory.
450 	 */
451 	unsigned int	untrusted:1;
452 	/*
453 	 * Info from the platform, e.g., ACPI or device tree, may mark a
454 	 * device as "external-facing".  An external-facing device is
455 	 * itself internal but devices downstream from it are external.
456 	 */
457 	unsigned int	external_facing:1;
458 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
459 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
460 	unsigned int	irq_managed:1;
461 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
462 	unsigned int	is_probed:1;		/* Device probing in progress */
463 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
464 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
465 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
466 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
467 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
468 	pci_dev_flags_t dev_flags;
469 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
470 
471 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
472 	u32		saved_config_space[16]; /* Config space saved at suspend time */
473 	struct hlist_head saved_cap_space;
474 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
475 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
476 
477 #ifdef CONFIG_HOTPLUG_PCI_PCIE
478 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
479 #endif
480 #ifdef CONFIG_PCIE_PTM
481 	u16		ptm_cap;		/* PTM Capability */
482 	unsigned int	ptm_root:1;
483 	unsigned int	ptm_enabled:1;
484 	u8		ptm_granularity;
485 #endif
486 #ifdef CONFIG_PCI_MSI
487 	void __iomem	*msix_base;
488 	raw_spinlock_t	msi_lock;
489 #endif
490 	struct pci_vpd	vpd;
491 #ifdef CONFIG_PCIE_DPC
492 	u16		dpc_cap;
493 	unsigned int	dpc_rp_extensions:1;
494 	u8		dpc_rp_log_size;
495 #endif
496 #ifdef CONFIG_PCI_ATS
497 	union {
498 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
499 		struct pci_dev		*physfn;	/* VF: related PF */
500 	};
501 	u16		ats_cap;	/* ATS Capability offset */
502 	u8		ats_stu;	/* ATS Smallest Translation Unit */
503 #endif
504 #ifdef CONFIG_PCI_PRI
505 	u16		pri_cap;	/* PRI Capability offset */
506 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
507 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
508 #endif
509 #ifdef CONFIG_PCI_PASID
510 	u16		pasid_cap;	/* PASID Capability offset */
511 	u16		pasid_features;
512 #endif
513 #ifdef CONFIG_PCI_P2PDMA
514 	struct pci_p2pdma __rcu *p2pdma;
515 #endif
516 #ifdef CONFIG_PCI_DOE
517 	struct xarray	doe_mbs;	/* Data Object Exchange mailboxes */
518 #endif
519 	u16		acs_cap;	/* ACS Capability offset */
520 	phys_addr_t	rom;		/* Physical address if not from BAR */
521 	size_t		romlen;		/* Length if not from BAR */
522 	/*
523 	 * Driver name to force a match.  Do not set directly, because core
524 	 * frees it.  Use driver_set_override() to set or clear it.
525 	 */
526 	const char	*driver_override;
527 
528 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
529 
530 	/* These methods index pci_reset_fn_methods[] */
531 	u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
532 };
533 
pci_physfn(struct pci_dev * dev)534 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
535 {
536 #ifdef CONFIG_PCI_IOV
537 	if (dev->is_virtfn)
538 		dev = dev->physfn;
539 #endif
540 	return dev;
541 }
542 
543 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
544 
545 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
546 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
547 
pci_channel_offline(struct pci_dev * pdev)548 static inline int pci_channel_offline(struct pci_dev *pdev)
549 {
550 	return (pdev->error_state != pci_channel_io_normal);
551 }
552 
553 /*
554  * Currently in ACPI spec, for each PCI host bridge, PCI Segment
555  * Group number is limited to a 16-bit value, therefore (int)-1 is
556  * not a valid PCI domain number, and can be used as a sentinel
557  * value indicating ->domain_nr is not set by the driver (and
558  * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
559  * pci_bus_find_domain_nr()).
560  */
561 #define PCI_DOMAIN_NR_NOT_SET (-1)
562 
563 struct pci_host_bridge {
564 	struct device	dev;
565 	struct pci_bus	*bus;		/* Root bus */
566 	struct pci_ops	*ops;
567 	struct pci_ops	*child_ops;
568 	void		*sysdata;
569 	int		busnr;
570 	int		domain_nr;
571 	struct list_head windows;	/* resource_entry */
572 	struct list_head dma_ranges;	/* dma ranges resource list */
573 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
574 	int (*map_irq)(const struct pci_dev *, u8, u8);
575 	void (*release_fn)(struct pci_host_bridge *);
576 	void		*release_data;
577 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
578 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
579 	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
580 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
581 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
582 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
583 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
584 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
585 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
586 	unsigned int	native_cxl_error:1;	/* OS may use CXL RAS/Events */
587 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
588 	unsigned int	size_windows:1;		/* Enable root bus sizing */
589 	unsigned int	msi_domain:1;		/* Bridge wants MSI domain */
590 
591 	/* Resource alignment requirements */
592 	resource_size_t (*align_resource)(struct pci_dev *dev,
593 			const struct resource *res,
594 			resource_size_t start,
595 			resource_size_t size,
596 			resource_size_t align);
597 	unsigned long	private[] ____cacheline_aligned;
598 };
599 
600 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
601 
pci_host_bridge_priv(struct pci_host_bridge * bridge)602 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
603 {
604 	return (void *)bridge->private;
605 }
606 
pci_host_bridge_from_priv(void * priv)607 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
608 {
609 	return container_of(priv, struct pci_host_bridge, private);
610 }
611 
612 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
613 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
614 						   size_t priv);
615 void pci_free_host_bridge(struct pci_host_bridge *bridge);
616 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
617 
618 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
619 				 void (*release_fn)(struct pci_host_bridge *),
620 				 void *release_data);
621 
622 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
623 
624 /*
625  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
626  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
627  * buses below host bridges or subtractive decode bridges) go in the list.
628  * Use pci_bus_for_each_resource() to iterate through all the resources.
629  */
630 
631 /*
632  * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
633  * and there's no way to program the bridge with the details of the window.
634  * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
635  * decode bit set, because they are explicit and can be programmed with _SRS.
636  */
637 #define PCI_SUBTRACTIVE_DECODE	0x1
638 
639 struct pci_bus_resource {
640 	struct list_head	list;
641 	struct resource		*res;
642 	unsigned int		flags;
643 };
644 
645 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
646 
647 struct pci_bus {
648 	struct list_head node;		/* Node in list of buses */
649 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
650 	struct list_head children;	/* List of child buses */
651 	struct list_head devices;	/* List of devices on this bus */
652 	struct pci_dev	*self;		/* Bridge device as seen by parent */
653 	struct list_head slots;		/* List of slots on this bus;
654 					   protected by pci_slot_mutex */
655 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
656 	struct list_head resources;	/* Address space routed to this bus */
657 	struct resource busn_res;	/* Bus numbers routed to this bus */
658 
659 	struct pci_ops	*ops;		/* Configuration access functions */
660 	void		*sysdata;	/* Hook for sys-specific extension */
661 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
662 
663 	unsigned char	number;		/* Bus number */
664 	unsigned char	primary;	/* Number of primary bridge */
665 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
666 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
667 #ifdef CONFIG_PCI_DOMAINS_GENERIC
668 	int		domain_nr;
669 #endif
670 
671 	char		name[48];
672 
673 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
674 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
675 	struct device		*bridge;
676 	struct device		dev;
677 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
678 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
679 	unsigned int		is_added:1;
680 	unsigned int		unsafe_warn:1;	/* warned about RW1C config write */
681 };
682 
683 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
684 
pci_dev_id(struct pci_dev * dev)685 static inline u16 pci_dev_id(struct pci_dev *dev)
686 {
687 	return PCI_DEVID(dev->bus->number, dev->devfn);
688 }
689 
690 /*
691  * Returns true if the PCI bus is root (behind host-PCI bridge),
692  * false otherwise
693  *
694  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
695  * This is incorrect because "virtual" buses added for SR-IOV (via
696  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
697  */
pci_is_root_bus(struct pci_bus * pbus)698 static inline bool pci_is_root_bus(struct pci_bus *pbus)
699 {
700 	return !(pbus->parent);
701 }
702 
703 /**
704  * pci_is_bridge - check if the PCI device is a bridge
705  * @dev: PCI device
706  *
707  * Return true if the PCI device is bridge whether it has subordinate
708  * or not.
709  */
pci_is_bridge(struct pci_dev * dev)710 static inline bool pci_is_bridge(struct pci_dev *dev)
711 {
712 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
713 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
714 }
715 
716 #define for_each_pci_bridge(dev, bus)				\
717 	list_for_each_entry(dev, &bus->devices, bus_list)	\
718 		if (!pci_is_bridge(dev)) {} else
719 
pci_upstream_bridge(struct pci_dev * dev)720 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
721 {
722 	dev = pci_physfn(dev);
723 	if (pci_is_root_bus(dev->bus))
724 		return NULL;
725 
726 	return dev->bus->self;
727 }
728 
729 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)730 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
731 {
732 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
733 }
734 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)735 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
736 #endif
737 
738 /* Error values that may be returned by PCI functions */
739 #define PCIBIOS_SUCCESSFUL		0x00
740 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
741 #define PCIBIOS_BAD_VENDOR_ID		0x83
742 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
743 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
744 #define PCIBIOS_SET_FAILED		0x88
745 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
746 
747 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)748 static inline int pcibios_err_to_errno(int err)
749 {
750 	if (err <= PCIBIOS_SUCCESSFUL)
751 		return err; /* Assume already errno */
752 
753 	switch (err) {
754 	case PCIBIOS_FUNC_NOT_SUPPORTED:
755 		return -ENOENT;
756 	case PCIBIOS_BAD_VENDOR_ID:
757 		return -ENOTTY;
758 	case PCIBIOS_DEVICE_NOT_FOUND:
759 		return -ENODEV;
760 	case PCIBIOS_BAD_REGISTER_NUMBER:
761 		return -EFAULT;
762 	case PCIBIOS_SET_FAILED:
763 		return -EIO;
764 	case PCIBIOS_BUFFER_TOO_SMALL:
765 		return -ENOSPC;
766 	}
767 
768 	return -ERANGE;
769 }
770 
771 /* Low-level architecture-dependent routines */
772 
773 struct pci_ops {
774 	int (*add_bus)(struct pci_bus *bus);
775 	void (*remove_bus)(struct pci_bus *bus);
776 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
777 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
778 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
779 };
780 
781 /*
782  * ACPI needs to be able to access PCI config space before we've done a
783  * PCI bus scan and created pci_bus structures.
784  */
785 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
786 		 int reg, int len, u32 *val);
787 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
788 		  int reg, int len, u32 val);
789 
790 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
791 typedef u64 pci_bus_addr_t;
792 #else
793 typedef u32 pci_bus_addr_t;
794 #endif
795 
796 struct pci_bus_region {
797 	pci_bus_addr_t	start;
798 	pci_bus_addr_t	end;
799 };
800 
801 struct pci_dynids {
802 	spinlock_t		lock;	/* Protects list, index */
803 	struct list_head	list;	/* For IDs added at runtime */
804 };
805 
806 
807 /*
808  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
809  * a set of callbacks in struct pci_error_handlers, that device driver
810  * will be notified of PCI bus errors, and will be driven to recovery
811  * when an error occurs.
812  */
813 
814 typedef unsigned int __bitwise pci_ers_result_t;
815 
816 enum pci_ers_result {
817 	/* No result/none/not supported in device driver */
818 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
819 
820 	/* Device driver can recover without slot reset */
821 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
822 
823 	/* Device driver wants slot to be reset */
824 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
825 
826 	/* Device has completely failed, is unrecoverable */
827 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
828 
829 	/* Device driver is fully recovered and operational */
830 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
831 
832 	/* No AER capabilities registered for the driver */
833 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
834 };
835 
836 /* PCI bus error event callbacks */
837 struct pci_error_handlers {
838 	/* PCI bus error detected on this device */
839 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
840 					   pci_channel_state_t error);
841 
842 	/* MMIO has been re-enabled, but not DMA */
843 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
844 
845 	/* PCI slot has been reset */
846 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
847 
848 	/* PCI function reset prepare or completed */
849 	void (*reset_prepare)(struct pci_dev *dev);
850 	void (*reset_done)(struct pci_dev *dev);
851 
852 	/* Device driver may resume normal operations */
853 	void (*resume)(struct pci_dev *dev);
854 
855 	/* Allow device driver to record more details of a correctable error */
856 	void (*cor_error_detected)(struct pci_dev *dev);
857 };
858 
859 
860 struct module;
861 
862 /**
863  * struct pci_driver - PCI driver structure
864  * @node:	List of driver structures.
865  * @name:	Driver name.
866  * @id_table:	Pointer to table of device IDs the driver is
867  *		interested in.  Most drivers should export this
868  *		table using MODULE_DEVICE_TABLE(pci,...).
869  * @probe:	This probing function gets called (during execution
870  *		of pci_register_driver() for already existing
871  *		devices or later if a new device gets inserted) for
872  *		all PCI devices which match the ID table and are not
873  *		"owned" by the other drivers yet. This function gets
874  *		passed a "struct pci_dev \*" for each device whose
875  *		entry in the ID table matches the device. The probe
876  *		function returns zero when the driver chooses to
877  *		take "ownership" of the device or an error code
878  *		(negative number) otherwise.
879  *		The probe function always gets called from process
880  *		context, so it can sleep.
881  * @remove:	The remove() function gets called whenever a device
882  *		being handled by this driver is removed (either during
883  *		deregistration of the driver or when it's manually
884  *		pulled out of a hot-pluggable slot).
885  *		The remove function always gets called from process
886  *		context, so it can sleep.
887  * @suspend:	Put device into low power state.
888  * @resume:	Wake device from low power state.
889  *		(Please see Documentation/power/pci.rst for descriptions
890  *		of PCI Power Management and the related functions.)
891  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
892  *		Intended to stop any idling DMA operations.
893  *		Useful for enabling wake-on-lan (NIC) or changing
894  *		the power state of a device before reboot.
895  *		e.g. drivers/net/e100.c.
896  * @sriov_configure: Optional driver callback to allow configuration of
897  *		number of VFs to enable via sysfs "sriov_numvfs" file.
898  * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
899  *              vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
900  *              This will change MSI-X Table Size in the VF Message Control
901  *              registers.
902  * @sriov_get_vf_total_msix: PF driver callback to get the total number of
903  *              MSI-X vectors available for distribution to the VFs.
904  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
905  * @groups:	Sysfs attribute groups.
906  * @dev_groups: Attributes attached to the device that will be
907  *              created once it is bound to the driver.
908  * @driver:	Driver model structure.
909  * @dynids:	List of dynamically added device IDs.
910  * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
911  *		For most device drivers, no need to care about this flag
912  *		as long as all DMAs are handled through the kernel DMA API.
913  *		For some special ones, for example VFIO drivers, they know
914  *		how to manage the DMA themselves and set this flag so that
915  *		the IOMMU layer will allow them to setup and manage their
916  *		own I/O address space.
917  */
918 struct pci_driver {
919 	struct list_head	node;
920 	const char		*name;
921 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
922 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
923 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
924 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
925 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
926 	void (*shutdown)(struct pci_dev *dev);
927 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
928 	int  (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
929 	u32  (*sriov_get_vf_total_msix)(struct pci_dev *pf);
930 	const struct pci_error_handlers *err_handler;
931 	const struct attribute_group **groups;
932 	const struct attribute_group **dev_groups;
933 	struct device_driver	driver;
934 	struct pci_dynids	dynids;
935 	bool driver_managed_dma;
936 };
937 
to_pci_driver(struct device_driver * drv)938 static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
939 {
940     return drv ? container_of(drv, struct pci_driver, driver) : NULL;
941 }
942 
943 /**
944  * PCI_DEVICE - macro used to describe a specific PCI device
945  * @vend: the 16 bit PCI Vendor ID
946  * @dev: the 16 bit PCI Device ID
947  *
948  * This macro is used to create a struct pci_device_id that matches a
949  * specific device.  The subvendor and subdevice fields will be set to
950  * PCI_ANY_ID.
951  */
952 #define PCI_DEVICE(vend,dev) \
953 	.vendor = (vend), .device = (dev), \
954 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
955 
956 /**
957  * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
958  *                              override_only flags.
959  * @vend: the 16 bit PCI Vendor ID
960  * @dev: the 16 bit PCI Device ID
961  * @driver_override: the 32 bit PCI Device override_only
962  *
963  * This macro is used to create a struct pci_device_id that matches only a
964  * driver_override device. The subvendor and subdevice fields will be set to
965  * PCI_ANY_ID.
966  */
967 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
968 	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
969 	.subdevice = PCI_ANY_ID, .override_only = (driver_override)
970 
971 /**
972  * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
973  *                                   "driver_override" PCI device.
974  * @vend: the 16 bit PCI Vendor ID
975  * @dev: the 16 bit PCI Device ID
976  *
977  * This macro is used to create a struct pci_device_id that matches a
978  * specific device. The subvendor and subdevice fields will be set to
979  * PCI_ANY_ID and the driver_override will be set to
980  * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
981  */
982 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
983 	PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
984 
985 /**
986  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
987  * @vend: the 16 bit PCI Vendor ID
988  * @dev: the 16 bit PCI Device ID
989  * @subvend: the 16 bit PCI Subvendor ID
990  * @subdev: the 16 bit PCI Subdevice ID
991  *
992  * This macro is used to create a struct pci_device_id that matches a
993  * specific device with subsystem information.
994  */
995 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
996 	.vendor = (vend), .device = (dev), \
997 	.subvendor = (subvend), .subdevice = (subdev)
998 
999 /**
1000  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1001  * @dev_class: the class, subclass, prog-if triple for this device
1002  * @dev_class_mask: the class mask for this device
1003  *
1004  * This macro is used to create a struct pci_device_id that matches a
1005  * specific PCI class.  The vendor, device, subvendor, and subdevice
1006  * fields will be set to PCI_ANY_ID.
1007  */
1008 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1009 	.class = (dev_class), .class_mask = (dev_class_mask), \
1010 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1011 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1012 
1013 /**
1014  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1015  * @vend: the vendor name
1016  * @dev: the 16 bit PCI Device ID
1017  *
1018  * This macro is used to create a struct pci_device_id that matches a
1019  * specific PCI device.  The subvendor, and subdevice fields will be set
1020  * to PCI_ANY_ID. The macro allows the next field to follow as the device
1021  * private data.
1022  */
1023 #define PCI_VDEVICE(vend, dev) \
1024 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1025 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1026 
1027 /**
1028  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1029  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1030  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1031  * @data: the driver data to be filled
1032  *
1033  * This macro is used to create a struct pci_device_id that matches a
1034  * specific PCI device.  The subvendor, and subdevice fields will be set
1035  * to PCI_ANY_ID.
1036  */
1037 #define PCI_DEVICE_DATA(vend, dev, data) \
1038 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1039 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1040 	.driver_data = (kernel_ulong_t)(data)
1041 
1042 enum {
1043 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
1044 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
1045 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
1046 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
1047 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
1048 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
1049 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
1050 };
1051 
1052 #define PCI_IRQ_LEGACY		(1 << 0) /* Allow legacy interrupts */
1053 #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
1054 #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
1055 #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
1056 
1057 /* These external functions are only available when PCI support is enabled */
1058 #ifdef CONFIG_PCI
1059 
1060 extern unsigned int pci_flags;
1061 
pci_set_flags(int flags)1062 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1063 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1064 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1065 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1066 
1067 void pcie_bus_configure_settings(struct pci_bus *bus);
1068 
1069 enum pcie_bus_config_types {
1070 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1071 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1072 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1073 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1074 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1075 };
1076 
1077 extern enum pcie_bus_config_types pcie_bus_config;
1078 
1079 extern struct bus_type pci_bus_type;
1080 
1081 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1082  * code, or PCI core code. */
1083 extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1084 /* Some device drivers need know if PCI is initiated */
1085 int no_pci_devices(void);
1086 
1087 void pcibios_resource_survey_bus(struct pci_bus *bus);
1088 void pcibios_bus_add_device(struct pci_dev *pdev);
1089 void pcibios_add_bus(struct pci_bus *bus);
1090 void pcibios_remove_bus(struct pci_bus *bus);
1091 void pcibios_fixup_bus(struct pci_bus *);
1092 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1093 /* Architecture-specific versions may override this (weak) */
1094 char *pcibios_setup(char *str);
1095 
1096 /* Used only when drivers/pci/setup.c is used */
1097 resource_size_t pcibios_align_resource(void *, const struct resource *,
1098 				resource_size_t,
1099 				resource_size_t);
1100 
1101 /* Weak but can be overridden by arch */
1102 void pci_fixup_cardbus(struct pci_bus *);
1103 
1104 /* Generic PCI functions used internally */
1105 
1106 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1107 			     struct resource *res);
1108 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1109 			     struct pci_bus_region *region);
1110 void pcibios_scan_specific_bus(int busn);
1111 struct pci_bus *pci_find_bus(int domain, int busnr);
1112 void pci_bus_add_devices(const struct pci_bus *bus);
1113 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1114 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1115 				    struct pci_ops *ops, void *sysdata,
1116 				    struct list_head *resources);
1117 int pci_host_probe(struct pci_host_bridge *bridge);
1118 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1119 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1120 void pci_bus_release_busn_res(struct pci_bus *b);
1121 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1122 				  struct pci_ops *ops, void *sysdata,
1123 				  struct list_head *resources);
1124 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1125 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1126 				int busnr);
1127 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1128 				 const char *name,
1129 				 struct hotplug_slot *hotplug);
1130 void pci_destroy_slot(struct pci_slot *slot);
1131 #ifdef CONFIG_SYSFS
1132 void pci_dev_assign_slot(struct pci_dev *dev);
1133 #else
pci_dev_assign_slot(struct pci_dev * dev)1134 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1135 #endif
1136 int pci_scan_slot(struct pci_bus *bus, int devfn);
1137 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1138 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1139 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1140 void pci_bus_add_device(struct pci_dev *dev);
1141 void pci_read_bridge_bases(struct pci_bus *child);
1142 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1143 					  struct resource *res);
1144 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1145 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1146 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1147 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1148 void pci_dev_put(struct pci_dev *dev);
1149 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1150 void pci_remove_bus(struct pci_bus *b);
1151 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1152 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1153 void pci_stop_root_bus(struct pci_bus *bus);
1154 void pci_remove_root_bus(struct pci_bus *bus);
1155 void pci_setup_cardbus(struct pci_bus *bus);
1156 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1157 void pci_sort_breadthfirst(void);
1158 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1159 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1160 
1161 /* Generic PCI functions exported to card drivers */
1162 
1163 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1164 u8 pci_find_capability(struct pci_dev *dev, int cap);
1165 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1166 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1167 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1168 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1169 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1170 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1171 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1172 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1173 
1174 u64 pci_get_dsn(struct pci_dev *dev);
1175 
1176 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1177 			       struct pci_dev *from);
1178 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1179 			       unsigned int ss_vendor, unsigned int ss_device,
1180 			       struct pci_dev *from);
1181 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1182 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1183 					    unsigned int devfn);
1184 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1185 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1186 
1187 int pci_dev_present(const struct pci_device_id *ids);
1188 
1189 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1190 			     int where, u8 *val);
1191 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1192 			     int where, u16 *val);
1193 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1194 			      int where, u32 *val);
1195 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1196 			      int where, u8 val);
1197 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1198 			      int where, u16 val);
1199 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1200 			       int where, u32 val);
1201 
1202 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1203 			    int where, int size, u32 *val);
1204 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1205 			    int where, int size, u32 val);
1206 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1207 			      int where, int size, u32 *val);
1208 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1209 			       int where, int size, u32 val);
1210 
1211 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1212 
1213 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1214 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1215 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1216 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1217 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1218 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1219 
1220 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1221 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1222 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1223 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1224 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1225 						u16 clear, u16 set);
1226 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1227 					      u16 clear, u16 set);
1228 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1229 					u32 clear, u32 set);
1230 
1231 /**
1232  * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1233  * @dev:	PCI device structure of the PCI Express device
1234  * @pos:	PCI Express Capability Register
1235  * @clear:	Clear bitmask
1236  * @set:	Set bitmask
1237  *
1238  * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1239  * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1240  * Capability Registers are accessed concurrently in RMW fashion, hence
1241  * require locking which is handled transparently to the caller.
1242  */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1243 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1244 						     int pos,
1245 						     u16 clear, u16 set)
1246 {
1247 	switch (pos) {
1248 	case PCI_EXP_LNKCTL:
1249 	case PCI_EXP_RTCTL:
1250 		return pcie_capability_clear_and_set_word_locked(dev, pos,
1251 								 clear, set);
1252 	default:
1253 		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1254 								   clear, set);
1255 	}
1256 }
1257 
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1258 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1259 					   u16 set)
1260 {
1261 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1262 }
1263 
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1264 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1265 					    u32 set)
1266 {
1267 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1268 }
1269 
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1270 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1271 					     u16 clear)
1272 {
1273 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1274 }
1275 
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1276 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1277 					      u32 clear)
1278 {
1279 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1280 }
1281 
1282 /* User-space driven config access */
1283 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1284 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1285 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1286 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1287 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1288 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1289 
1290 int __must_check pci_enable_device(struct pci_dev *dev);
1291 int __must_check pci_enable_device_io(struct pci_dev *dev);
1292 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1293 int __must_check pci_reenable_device(struct pci_dev *);
1294 int __must_check pcim_enable_device(struct pci_dev *pdev);
1295 void pcim_pin_device(struct pci_dev *pdev);
1296 
pci_intx_mask_supported(struct pci_dev * pdev)1297 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1298 {
1299 	/*
1300 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1301 	 * writable and no quirk has marked the feature broken.
1302 	 */
1303 	return !pdev->broken_intx_masking;
1304 }
1305 
pci_is_enabled(struct pci_dev * pdev)1306 static inline int pci_is_enabled(struct pci_dev *pdev)
1307 {
1308 	return (atomic_read(&pdev->enable_cnt) > 0);
1309 }
1310 
pci_is_managed(struct pci_dev * pdev)1311 static inline int pci_is_managed(struct pci_dev *pdev)
1312 {
1313 	return pdev->is_managed;
1314 }
1315 
1316 void pci_disable_device(struct pci_dev *dev);
1317 
1318 extern unsigned int pcibios_max_latency;
1319 void pci_set_master(struct pci_dev *dev);
1320 void pci_clear_master(struct pci_dev *dev);
1321 
1322 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1323 int pci_set_cacheline_size(struct pci_dev *dev);
1324 int __must_check pci_set_mwi(struct pci_dev *dev);
1325 int __must_check pcim_set_mwi(struct pci_dev *dev);
1326 int pci_try_set_mwi(struct pci_dev *dev);
1327 void pci_clear_mwi(struct pci_dev *dev);
1328 void pci_disable_parity(struct pci_dev *dev);
1329 void pci_intx(struct pci_dev *dev, int enable);
1330 bool pci_check_and_mask_intx(struct pci_dev *dev);
1331 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1332 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1333 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1334 int pcix_get_max_mmrbc(struct pci_dev *dev);
1335 int pcix_get_mmrbc(struct pci_dev *dev);
1336 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1337 int pcie_get_readrq(struct pci_dev *dev);
1338 int pcie_set_readrq(struct pci_dev *dev, int rq);
1339 int pcie_get_mps(struct pci_dev *dev);
1340 int pcie_set_mps(struct pci_dev *dev, int mps);
1341 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1342 			     enum pci_bus_speed *speed,
1343 			     enum pcie_link_width *width);
1344 void pcie_print_link_status(struct pci_dev *dev);
1345 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1346 int pcie_flr(struct pci_dev *dev);
1347 int __pci_reset_function_locked(struct pci_dev *dev);
1348 int pci_reset_function(struct pci_dev *dev);
1349 int pci_reset_function_locked(struct pci_dev *dev);
1350 int pci_try_reset_function(struct pci_dev *dev);
1351 int pci_probe_reset_slot(struct pci_slot *slot);
1352 int pci_probe_reset_bus(struct pci_bus *bus);
1353 int pci_reset_bus(struct pci_dev *dev);
1354 void pci_reset_secondary_bus(struct pci_dev *dev);
1355 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1356 void pci_update_resource(struct pci_dev *dev, int resno);
1357 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1358 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1359 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1360 static inline int pci_rebar_bytes_to_size(u64 bytes)
1361 {
1362 	bytes = roundup_pow_of_two(bytes);
1363 
1364 	/* Return BAR size as defined in the resizable BAR specification */
1365 	return max(ilog2(bytes), 20) - 20;
1366 }
1367 
1368 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1369 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1370 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1371 bool pci_device_is_present(struct pci_dev *pdev);
1372 void pci_ignore_hotplug(struct pci_dev *dev);
1373 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1374 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1375 
1376 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1377 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1378 		const char *fmt, ...);
1379 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1380 
1381 /* ROM control related routines */
1382 int pci_enable_rom(struct pci_dev *pdev);
1383 void pci_disable_rom(struct pci_dev *pdev);
1384 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1385 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1386 
1387 /* Power management related routines */
1388 int pci_save_state(struct pci_dev *dev);
1389 void pci_restore_state(struct pci_dev *dev);
1390 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1391 int pci_load_saved_state(struct pci_dev *dev,
1392 			 struct pci_saved_state *state);
1393 int pci_load_and_free_saved_state(struct pci_dev *dev,
1394 				  struct pci_saved_state **state);
1395 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1396 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1397 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1398 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1399 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1400 void pci_pme_active(struct pci_dev *dev, bool enable);
1401 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1402 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1403 int pci_prepare_to_sleep(struct pci_dev *dev);
1404 int pci_back_from_sleep(struct pci_dev *dev);
1405 bool pci_dev_run_wake(struct pci_dev *dev);
1406 void pci_d3cold_enable(struct pci_dev *dev);
1407 void pci_d3cold_disable(struct pci_dev *dev);
1408 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1409 void pci_resume_bus(struct pci_bus *bus);
1410 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1411 
1412 /* For use by arch with custom probe code */
1413 void set_pcie_port_type(struct pci_dev *pdev);
1414 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1415 
1416 /* Functions for PCI Hotplug drivers to use */
1417 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1418 unsigned int pci_rescan_bus(struct pci_bus *bus);
1419 void pci_lock_rescan_remove(void);
1420 void pci_unlock_rescan_remove(void);
1421 
1422 /* Vital Product Data routines */
1423 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1424 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1425 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1426 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1427 
1428 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1429 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1430 void pci_bus_assign_resources(const struct pci_bus *bus);
1431 void pci_bus_claim_resources(struct pci_bus *bus);
1432 void pci_bus_size_bridges(struct pci_bus *bus);
1433 int pci_claim_resource(struct pci_dev *, int);
1434 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1435 void pci_assign_unassigned_resources(void);
1436 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1437 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1438 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1439 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1440 int pci_enable_resources(struct pci_dev *, int mask);
1441 void pci_assign_irq(struct pci_dev *dev);
1442 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1443 #define HAVE_PCI_REQ_REGIONS	2
1444 int __must_check pci_request_regions(struct pci_dev *, const char *);
1445 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1446 void pci_release_regions(struct pci_dev *);
1447 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1448 void pci_release_region(struct pci_dev *, int);
1449 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1450 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1451 void pci_release_selected_regions(struct pci_dev *, int);
1452 
1453 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1454 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1455 				    unsigned int len, const char *name)
1456 {
1457 	return __request_region(&pdev->driver_exclusive_resource, offset, len,
1458 				name, IORESOURCE_EXCLUSIVE);
1459 }
1460 
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1461 static inline void pci_release_config_region(struct pci_dev *pdev,
1462 					     unsigned int offset,
1463 					     unsigned int len)
1464 {
1465 	__release_region(&pdev->driver_exclusive_resource, offset, len);
1466 }
1467 
1468 /* drivers/pci/bus.c */
1469 void pci_add_resource(struct list_head *resources, struct resource *res);
1470 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1471 			     resource_size_t offset);
1472 void pci_free_resource_list(struct list_head *resources);
1473 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1474 			  unsigned int flags);
1475 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1476 void pci_bus_remove_resources(struct pci_bus *bus);
1477 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1478 int devm_request_pci_bus_resources(struct device *dev,
1479 				   struct list_head *resources);
1480 
1481 /* Temporary until new and working PCI SBR API in place */
1482 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1483 
1484 #define __pci_bus_for_each_res0(bus, res, ...)				\
1485 	for (unsigned int __b = 0;					\
1486 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1487 	     __b++)
1488 
1489 #define __pci_bus_for_each_res1(bus, res, __b)				\
1490 	for (__b = 0;							\
1491 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1492 	     __b++)
1493 
1494 /**
1495  * pci_bus_for_each_resource - iterate over PCI bus resources
1496  * @bus: the PCI bus
1497  * @res: pointer to the current resource
1498  * @...: optional index of the current resource
1499  *
1500  * Iterate over PCI bus resources. The first part is to go over PCI bus
1501  * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1502  * After that continue with the separate list of the additional resources,
1503  * if not empty. That's why the Logical OR is being used.
1504  *
1505  * Possible usage:
1506  *
1507  *	struct pci_bus *bus = ...;
1508  *	struct resource *res;
1509  *	unsigned int i;
1510  *
1511  * 	// With optional index
1512  * 	pci_bus_for_each_resource(bus, res, i)
1513  * 		pr_info("PCI bus resource[%u]: %pR\n", i, res);
1514  *
1515  * 	// Without index
1516  * 	pci_bus_for_each_resource(bus, res)
1517  * 		_do_something_(res);
1518  */
1519 #define pci_bus_for_each_resource(bus, res, ...)			\
1520 	CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__))	\
1521 		    (bus, res, __VA_ARGS__)
1522 
1523 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1524 			struct resource *res, resource_size_t size,
1525 			resource_size_t align, resource_size_t min,
1526 			unsigned long type_mask,
1527 			resource_size_t (*alignf)(void *,
1528 						  const struct resource *,
1529 						  resource_size_t,
1530 						  resource_size_t),
1531 			void *alignf_data);
1532 
1533 
1534 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1535 			resource_size_t size);
1536 unsigned long pci_address_to_pio(phys_addr_t addr);
1537 phys_addr_t pci_pio_to_address(unsigned long pio);
1538 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1539 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1540 			   phys_addr_t phys_addr);
1541 void pci_unmap_iospace(struct resource *res);
1542 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1543 				      resource_size_t offset,
1544 				      resource_size_t size);
1545 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1546 					  struct resource *res);
1547 
pci_bus_address(struct pci_dev * pdev,int bar)1548 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1549 {
1550 	struct pci_bus_region region;
1551 
1552 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1553 	return region.start;
1554 }
1555 
1556 /* Proper probing supporting hot-pluggable devices */
1557 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1558 				       const char *mod_name);
1559 
1560 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1561 #define pci_register_driver(driver)		\
1562 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1563 
1564 void pci_unregister_driver(struct pci_driver *dev);
1565 
1566 /**
1567  * module_pci_driver() - Helper macro for registering a PCI driver
1568  * @__pci_driver: pci_driver struct
1569  *
1570  * Helper macro for PCI drivers which do not do anything special in module
1571  * init/exit. This eliminates a lot of boilerplate. Each module may only
1572  * use this macro once, and calling it replaces module_init() and module_exit()
1573  */
1574 #define module_pci_driver(__pci_driver) \
1575 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1576 
1577 /**
1578  * builtin_pci_driver() - Helper macro for registering a PCI driver
1579  * @__pci_driver: pci_driver struct
1580  *
1581  * Helper macro for PCI drivers which do not do anything special in their
1582  * init code. This eliminates a lot of boilerplate. Each driver may only
1583  * use this macro once, and calling it replaces device_initcall(...)
1584  */
1585 #define builtin_pci_driver(__pci_driver) \
1586 	builtin_driver(__pci_driver, pci_register_driver)
1587 
1588 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1589 int pci_add_dynid(struct pci_driver *drv,
1590 		  unsigned int vendor, unsigned int device,
1591 		  unsigned int subvendor, unsigned int subdevice,
1592 		  unsigned int class, unsigned int class_mask,
1593 		  unsigned long driver_data);
1594 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1595 					 struct pci_dev *dev);
1596 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1597 		    int pass);
1598 
1599 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1600 		  void *userdata);
1601 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1602 			 void *userdata);
1603 int pci_cfg_space_size(struct pci_dev *dev);
1604 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1605 void pci_setup_bridge(struct pci_bus *bus);
1606 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1607 					 unsigned long type);
1608 
1609 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1610 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1611 
1612 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1613 		      unsigned int command_bits, u32 flags);
1614 
1615 /*
1616  * Virtual interrupts allow for more interrupts to be allocated
1617  * than the device has interrupts for. These are not programmed
1618  * into the device's MSI-X table and must be handled by some
1619  * other driver means.
1620  */
1621 #define PCI_IRQ_VIRTUAL		(1 << 4)
1622 
1623 #define PCI_IRQ_ALL_TYPES \
1624 	(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1625 
1626 #include <linux/dmapool.h>
1627 
1628 struct msix_entry {
1629 	u32	vector;	/* Kernel uses to write allocated vector */
1630 	u16	entry;	/* Driver uses to specify entry, OS writes */
1631 };
1632 
1633 struct msi_domain_template;
1634 
1635 #ifdef CONFIG_PCI_MSI
1636 int pci_msi_vec_count(struct pci_dev *dev);
1637 void pci_disable_msi(struct pci_dev *dev);
1638 int pci_msix_vec_count(struct pci_dev *dev);
1639 void pci_disable_msix(struct pci_dev *dev);
1640 void pci_restore_msi_state(struct pci_dev *dev);
1641 int pci_msi_enabled(void);
1642 int pci_enable_msi(struct pci_dev *dev);
1643 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1644 			  int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1645 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1646 					struct msix_entry *entries, int nvec)
1647 {
1648 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1649 	if (rc < 0)
1650 		return rc;
1651 	return 0;
1652 }
1653 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1654 			  unsigned int max_vecs, unsigned int flags);
1655 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1656 				   unsigned int max_vecs, unsigned int flags,
1657 				   struct irq_affinity *affd);
1658 
1659 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1660 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1661 				     const struct irq_affinity_desc *affdesc);
1662 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1663 
1664 void pci_free_irq_vectors(struct pci_dev *dev);
1665 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1666 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1667 bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
1668 			   unsigned int hwsize, void *data);
1669 struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
1670 				 const struct irq_affinity_desc *affdesc);
1671 void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
1672 
1673 #else
pci_msi_vec_count(struct pci_dev * dev)1674 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1675 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1676 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1677 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1678 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1679 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1680 static inline int pci_enable_msi(struct pci_dev *dev)
1681 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1682 static inline int pci_enable_msix_range(struct pci_dev *dev,
1683 			struct msix_entry *entries, int minvec, int maxvec)
1684 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1685 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1686 			struct msix_entry *entries, int nvec)
1687 { return -ENOSYS; }
1688 
1689 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1690 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1691 			       unsigned int max_vecs, unsigned int flags,
1692 			       struct irq_affinity *aff_desc)
1693 {
1694 	if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1695 		return 1;
1696 	return -ENOSPC;
1697 }
1698 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1699 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1700 		      unsigned int max_vecs, unsigned int flags)
1701 {
1702 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1703 					      flags, NULL);
1704 }
1705 
pci_msix_can_alloc_dyn(struct pci_dev * dev)1706 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1707 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1708 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1709 						   const struct irq_affinity_desc *affdesc)
1710 {
1711 	struct msi_map map = { .index = -ENOSYS, };
1712 
1713 	return map;
1714 }
1715 
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1716 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1717 {
1718 }
1719 
pci_free_irq_vectors(struct pci_dev * dev)1720 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1721 {
1722 }
1723 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1724 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1725 {
1726 	if (WARN_ON_ONCE(nr > 0))
1727 		return -EINVAL;
1728 	return dev->irq;
1729 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1730 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1731 		int vec)
1732 {
1733 	return cpu_possible_mask;
1734 }
1735 
pci_create_ims_domain(struct pci_dev * pdev,const struct msi_domain_template * template,unsigned int hwsize,void * data)1736 static inline bool pci_create_ims_domain(struct pci_dev *pdev,
1737 					 const struct msi_domain_template *template,
1738 					 unsigned int hwsize, void *data)
1739 { return false; }
1740 
pci_ims_alloc_irq(struct pci_dev * pdev,union msi_instance_cookie * icookie,const struct irq_affinity_desc * affdesc)1741 static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
1742 					       union msi_instance_cookie *icookie,
1743 					       const struct irq_affinity_desc *affdesc)
1744 {
1745 	struct msi_map map = { .index = -ENOSYS, };
1746 
1747 	return map;
1748 }
1749 
pci_ims_free_irq(struct pci_dev * pdev,struct msi_map map)1750 static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
1751 {
1752 }
1753 
1754 #endif
1755 
1756 /**
1757  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1758  * @d: the INTx IRQ domain
1759  * @node: the DT node for the device whose interrupt we're translating
1760  * @intspec: the interrupt specifier data from the DT
1761  * @intsize: the number of entries in @intspec
1762  * @out_hwirq: pointer at which to write the hwirq number
1763  * @out_type: pointer at which to write the interrupt type
1764  *
1765  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1766  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1767  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1768  * INTx value to obtain the hwirq number.
1769  *
1770  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1771  */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1772 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1773 				      struct device_node *node,
1774 				      const u32 *intspec,
1775 				      unsigned int intsize,
1776 				      unsigned long *out_hwirq,
1777 				      unsigned int *out_type)
1778 {
1779 	const u32 intx = intspec[0];
1780 
1781 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1782 		return -EINVAL;
1783 
1784 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1785 	return 0;
1786 }
1787 
1788 #ifdef CONFIG_PCIEPORTBUS
1789 extern bool pcie_ports_disabled;
1790 extern bool pcie_ports_native;
1791 #else
1792 #define pcie_ports_disabled	true
1793 #define pcie_ports_native	false
1794 #endif
1795 
1796 #define PCIE_LINK_STATE_L0S		BIT(0)
1797 #define PCIE_LINK_STATE_L1		BIT(1)
1798 #define PCIE_LINK_STATE_CLKPM		BIT(2)
1799 #define PCIE_LINK_STATE_L1_1		BIT(3)
1800 #define PCIE_LINK_STATE_L1_2		BIT(4)
1801 #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)
1802 #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)
1803 #define PCIE_LINK_STATE_ALL		(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\
1804 					 PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\
1805 					 PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\
1806 					 PCIE_LINK_STATE_L1_2_PCIPM)
1807 
1808 #ifdef CONFIG_PCIEASPM
1809 int pci_disable_link_state(struct pci_dev *pdev, int state);
1810 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1811 int pci_enable_link_state(struct pci_dev *pdev, int state);
1812 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1813 void pcie_no_aspm(void);
1814 bool pcie_aspm_support_enabled(void);
1815 bool pcie_aspm_enabled(struct pci_dev *pdev);
1816 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1817 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1818 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1819 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1820 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1821 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1822 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1823 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1824 { return 0; }
pcie_no_aspm(void)1825 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1826 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1827 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1828 #endif
1829 
1830 #ifdef CONFIG_PCIEAER
1831 bool pci_aer_available(void);
1832 #else
pci_aer_available(void)1833 static inline bool pci_aer_available(void) { return false; }
1834 #endif
1835 
1836 bool pci_ats_disabled(void);
1837 
1838 #ifdef CONFIG_PCIE_PTM
1839 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1840 void pci_disable_ptm(struct pci_dev *dev);
1841 bool pcie_ptm_enabled(struct pci_dev *dev);
1842 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1843 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1844 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1845 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1846 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1847 { return false; }
1848 #endif
1849 
1850 void pci_cfg_access_lock(struct pci_dev *dev);
1851 bool pci_cfg_access_trylock(struct pci_dev *dev);
1852 void pci_cfg_access_unlock(struct pci_dev *dev);
1853 
1854 void pci_dev_lock(struct pci_dev *dev);
1855 int pci_dev_trylock(struct pci_dev *dev);
1856 void pci_dev_unlock(struct pci_dev *dev);
1857 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1858 
1859 /*
1860  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
1861  * a PCI domain is defined to be a set of PCI buses which share
1862  * configuration space.
1863  */
1864 #ifdef CONFIG_PCI_DOMAINS
1865 extern int pci_domains_supported;
1866 #else
1867 enum { pci_domains_supported = 0 };
1868 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1869 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1870 #endif /* CONFIG_PCI_DOMAINS */
1871 
1872 /*
1873  * Generic implementation for PCI domain support. If your
1874  * architecture does not need custom management of PCI
1875  * domains then this implementation will be used
1876  */
1877 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1878 static inline int pci_domain_nr(struct pci_bus *bus)
1879 {
1880 	return bus->domain_nr;
1881 }
1882 #ifdef CONFIG_ACPI
1883 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1884 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1885 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1886 { return 0; }
1887 #endif
1888 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1889 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
1890 #endif
1891 
1892 /* Some architectures require additional setup to direct VGA traffic */
1893 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1894 				    unsigned int command_bits, u32 flags);
1895 void pci_register_set_vga_state(arch_set_vga_state_t func);
1896 
1897 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1898 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1899 {
1900 	return pci_request_selected_regions(pdev,
1901 			    pci_select_bars(pdev, IORESOURCE_IO), name);
1902 }
1903 
1904 static inline void
pci_release_io_regions(struct pci_dev * pdev)1905 pci_release_io_regions(struct pci_dev *pdev)
1906 {
1907 	return pci_release_selected_regions(pdev,
1908 			    pci_select_bars(pdev, IORESOURCE_IO));
1909 }
1910 
1911 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1912 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1913 {
1914 	return pci_request_selected_regions(pdev,
1915 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
1916 }
1917 
1918 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1919 pci_release_mem_regions(struct pci_dev *pdev)
1920 {
1921 	return pci_release_selected_regions(pdev,
1922 			    pci_select_bars(pdev, IORESOURCE_MEM));
1923 }
1924 
1925 #else /* CONFIG_PCI is not enabled */
1926 
pci_set_flags(int flags)1927 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1928 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1929 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1930 static inline int pci_has_flag(int flag) { return 0; }
1931 
1932 /*
1933  * If the system does not have PCI, clearly these return errors.  Define
1934  * these as simple inline functions to avoid hair in drivers.
1935  */
1936 #define _PCI_NOP(o, s, t) \
1937 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1938 						int where, t val) \
1939 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
1940 
1941 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
1942 				_PCI_NOP(o, word, u16 x) \
1943 				_PCI_NOP(o, dword, u32 x)
1944 _PCI_NOP_ALL(read, *)
1945 _PCI_NOP_ALL(write,)
1946 
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1947 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1948 					     unsigned int device,
1949 					     struct pci_dev *from)
1950 { return NULL; }
1951 
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1952 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1953 					     unsigned int device,
1954 					     unsigned int ss_vendor,
1955 					     unsigned int ss_device,
1956 					     struct pci_dev *from)
1957 { return NULL; }
1958 
pci_get_class(unsigned int class,struct pci_dev * from)1959 static inline struct pci_dev *pci_get_class(unsigned int class,
1960 					    struct pci_dev *from)
1961 { return NULL; }
1962 
pci_get_base_class(unsigned int class,struct pci_dev * from)1963 static inline struct pci_dev *pci_get_base_class(unsigned int class,
1964 						 struct pci_dev *from)
1965 { return NULL; }
1966 
pci_dev_present(const struct pci_device_id * ids)1967 static inline int pci_dev_present(const struct pci_device_id *ids)
1968 { return 0; }
1969 
1970 #define no_pci_devices()	(1)
1971 #define pci_dev_put(dev)	do { } while (0)
1972 
pci_set_master(struct pci_dev * dev)1973 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)1974 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1975 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)1976 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)1977 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)1978 static inline int pci_assign_resource(struct pci_dev *dev, int i)
1979 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)1980 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
1981 						     struct module *owner,
1982 						     const char *mod_name)
1983 { return 0; }
pci_register_driver(struct pci_driver * drv)1984 static inline int pci_register_driver(struct pci_driver *drv)
1985 { return 0; }
pci_unregister_driver(struct pci_driver * drv)1986 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)1987 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
1988 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)1989 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
1990 					   int cap)
1991 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)1992 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
1993 { return 0; }
1994 
pci_get_dsn(struct pci_dev * dev)1995 static inline u64 pci_get_dsn(struct pci_dev *dev)
1996 { return 0; }
1997 
1998 /* Power management related routines */
pci_save_state(struct pci_dev * dev)1999 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2000 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2001 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2002 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2003 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2004 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2005 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2006 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2007 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2008 					   pm_message_t state)
2009 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2010 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2011 				  int enable)
2012 { return 0; }
2013 
pci_find_resource(struct pci_dev * dev,struct resource * res)2014 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2015 						 struct resource *res)
2016 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2017 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2018 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2019 static inline void pci_release_regions(struct pci_dev *dev) { }
2020 
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2021 static inline int pci_register_io_range(struct fwnode_handle *fwnode,
2022 					phys_addr_t addr, resource_size_t size)
2023 { return -EINVAL; }
2024 
pci_address_to_pio(phys_addr_t addr)2025 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2026 
pci_find_next_bus(const struct pci_bus * from)2027 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2028 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2029 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2030 						unsigned int devfn)
2031 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2032 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2033 					unsigned int bus, unsigned int devfn)
2034 { return NULL; }
2035 
pci_domain_nr(struct pci_bus * bus)2036 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2037 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2038 
2039 #define dev_is_pci(d) (false)
2040 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2041 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2042 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2043 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2044 				      struct device_node *node,
2045 				      const u32 *intspec,
2046 				      unsigned int intsize,
2047 				      unsigned long *out_hwirq,
2048 				      unsigned int *out_type)
2049 { return -EINVAL; }
2050 
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2051 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2052 							 struct pci_dev *dev)
2053 { return NULL; }
pci_ats_disabled(void)2054 static inline bool pci_ats_disabled(void) { return true; }
2055 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2056 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2057 {
2058 	return -EINVAL;
2059 }
2060 
2061 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2062 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2063 			       unsigned int max_vecs, unsigned int flags,
2064 			       struct irq_affinity *aff_desc)
2065 {
2066 	return -ENOSPC;
2067 }
2068 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2069 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2070 		      unsigned int max_vecs, unsigned int flags)
2071 {
2072 	return -ENOSPC;
2073 }
2074 #endif /* CONFIG_PCI */
2075 
2076 /* Include architecture-dependent settings and functions */
2077 
2078 #include <asm/pci.h>
2079 
2080 /*
2081  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2082  * is expected to be an offset within that region.
2083  *
2084  */
2085 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2086 			    struct vm_area_struct *vma,
2087 			    enum pci_mmap_state mmap_state, int write_combine);
2088 
2089 #ifndef arch_can_pci_mmap_wc
2090 #define arch_can_pci_mmap_wc()		0
2091 #endif
2092 
2093 #ifndef arch_can_pci_mmap_io
2094 #define arch_can_pci_mmap_io()		0
2095 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2096 #else
2097 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2098 #endif
2099 
2100 #ifndef pci_root_bus_fwnode
2101 #define pci_root_bus_fwnode(bus)	NULL
2102 #endif
2103 
2104 /*
2105  * These helpers provide future and backwards compatibility
2106  * for accessing popular PCI BAR info
2107  */
2108 #define pci_resource_n(dev, bar)	(&(dev)->resource[(bar)])
2109 #define pci_resource_start(dev, bar)	(pci_resource_n(dev, bar)->start)
2110 #define pci_resource_end(dev, bar)	(pci_resource_n(dev, bar)->end)
2111 #define pci_resource_flags(dev, bar)	(pci_resource_n(dev, bar)->flags)
2112 #define pci_resource_len(dev,bar)					\
2113 	(pci_resource_end((dev), (bar)) ? 				\
2114 	 resource_size(pci_resource_n((dev), (bar))) : 0)
2115 
2116 #define __pci_dev_for_each_res0(dev, res, ...)				  \
2117 	for (unsigned int __b = 0;					  \
2118 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2119 	     __b++)
2120 
2121 #define __pci_dev_for_each_res1(dev, res, __b)				  \
2122 	for (__b = 0;							  \
2123 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2124 	     __b++)
2125 
2126 #define pci_dev_for_each_resource(dev, res, ...)			\
2127 	CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) 	\
2128 		    (dev, res, __VA_ARGS__)
2129 
2130 /*
2131  * Similar to the helpers above, these manipulate per-pci_dev
2132  * driver-specific data.  They are really just a wrapper around
2133  * the generic device structure functions of these calls.
2134  */
pci_get_drvdata(struct pci_dev * pdev)2135 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2136 {
2137 	return dev_get_drvdata(&pdev->dev);
2138 }
2139 
pci_set_drvdata(struct pci_dev * pdev,void * data)2140 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2141 {
2142 	dev_set_drvdata(&pdev->dev, data);
2143 }
2144 
pci_name(const struct pci_dev * pdev)2145 static inline const char *pci_name(const struct pci_dev *pdev)
2146 {
2147 	return dev_name(&pdev->dev);
2148 }
2149 
2150 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2151 			  const struct resource *rsrc,
2152 			  resource_size_t *start, resource_size_t *end);
2153 
2154 /*
2155  * The world is not perfect and supplies us with broken PCI devices.
2156  * For at least a part of these bugs we need a work-around, so both
2157  * generic (drivers/pci/quirks.c) and per-architecture code can define
2158  * fixup hooks to be called for particular buggy devices.
2159  */
2160 
2161 struct pci_fixup {
2162 	u16 vendor;			/* Or PCI_ANY_ID */
2163 	u16 device;			/* Or PCI_ANY_ID */
2164 	u32 class;			/* Or PCI_ANY_ID */
2165 	unsigned int class_shift;	/* should be 0, 8, 16 */
2166 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2167 	int hook_offset;
2168 #else
2169 	void (*hook)(struct pci_dev *dev);
2170 #endif
2171 };
2172 
2173 enum pci_fixup_pass {
2174 	pci_fixup_early,	/* Before probing BARs */
2175 	pci_fixup_header,	/* After reading configuration header */
2176 	pci_fixup_final,	/* Final phase of device fixups */
2177 	pci_fixup_enable,	/* pci_enable_device() time */
2178 	pci_fixup_resume,	/* pci_device_resume() */
2179 	pci_fixup_suspend,	/* pci_device_suspend() */
2180 	pci_fixup_resume_early, /* pci_device_resume_early() */
2181 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
2182 };
2183 
2184 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2185 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2186 				    class_shift, hook)			\
2187 	__ADDRESSABLE(hook)						\
2188 	asm(".section "	#sec ", \"a\"				\n"	\
2189 	    ".balign	16					\n"	\
2190 	    ".short "	#vendor ", " #device "			\n"	\
2191 	    ".long "	#class ", " #class_shift "		\n"	\
2192 	    ".long "	#hook " - .				\n"	\
2193 	    ".previous						\n");
2194 
2195 /*
2196  * Clang's LTO may rename static functions in C, but has no way to
2197  * handle such renamings when referenced from inline asm. To work
2198  * around this, create global C stubs for these cases.
2199  */
2200 #ifdef CONFIG_LTO_CLANG
2201 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2202 				  class_shift, hook, stub)		\
2203 	void stub(struct pci_dev *dev);					\
2204 	void stub(struct pci_dev *dev)					\
2205 	{ 								\
2206 		hook(dev); 						\
2207 	}								\
2208 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2209 				  class_shift, stub)
2210 #else
2211 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2212 				  class_shift, hook, stub)		\
2213 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2214 				  class_shift, hook)
2215 #endif
2216 
2217 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2218 				  class_shift, hook)			\
2219 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2220 				  class_shift, hook, __UNIQUE_ID(hook))
2221 #else
2222 /* Anonymous variables would be nice... */
2223 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
2224 				  class_shift, hook)			\
2225 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
2226 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
2227 		= { vendor, device, class, class_shift, hook };
2228 #endif
2229 
2230 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
2231 					 class_shift, hook)		\
2232 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2233 		hook, vendor, device, class, class_shift, hook)
2234 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
2235 					 class_shift, hook)		\
2236 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2237 		hook, vendor, device, class, class_shift, hook)
2238 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
2239 					 class_shift, hook)		\
2240 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2241 		hook, vendor, device, class, class_shift, hook)
2242 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
2243 					 class_shift, hook)		\
2244 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2245 		hook, vendor, device, class, class_shift, hook)
2246 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2247 					 class_shift, hook)		\
2248 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2249 		resume##hook, vendor, device, class, class_shift, hook)
2250 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2251 					 class_shift, hook)		\
2252 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2253 		resume_early##hook, vendor, device, class, class_shift, hook)
2254 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2255 					 class_shift, hook)		\
2256 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2257 		suspend##hook, vendor, device, class, class_shift, hook)
2258 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2259 					 class_shift, hook)		\
2260 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2261 		suspend_late##hook, vendor, device, class, class_shift, hook)
2262 
2263 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2264 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2265 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2266 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2267 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2268 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2269 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2270 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2271 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2272 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2273 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2274 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2275 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2276 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2277 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2278 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2279 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2280 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2281 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2282 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2283 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2284 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2285 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2286 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2287 
2288 #ifdef CONFIG_PCI_QUIRKS
2289 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2290 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2291 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2292 				    struct pci_dev *dev) { }
2293 #endif
2294 
2295 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2296 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2297 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2298 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2299 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
2300 				   const char *name);
2301 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2302 
2303 extern int pci_pci_problems;
2304 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2305 #define PCIPCI_TRITON		2
2306 #define PCIPCI_NATOMA		4
2307 #define PCIPCI_VIAETBF		8
2308 #define PCIPCI_VSFX		16
2309 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2310 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2311 
2312 extern unsigned long pci_cardbus_io_size;
2313 extern unsigned long pci_cardbus_mem_size;
2314 extern u8 pci_dfl_cache_line_size;
2315 extern u8 pci_cache_line_size;
2316 
2317 /* Architecture-specific versions may override these (weak) */
2318 void pcibios_disable_device(struct pci_dev *dev);
2319 void pcibios_set_master(struct pci_dev *dev);
2320 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2321 				 enum pcie_reset_state state);
2322 int pcibios_device_add(struct pci_dev *dev);
2323 void pcibios_release_device(struct pci_dev *dev);
2324 #ifdef CONFIG_PCI
2325 void pcibios_penalize_isa_irq(int irq, int active);
2326 #else
pcibios_penalize_isa_irq(int irq,int active)2327 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2328 #endif
2329 int pcibios_alloc_irq(struct pci_dev *dev);
2330 void pcibios_free_irq(struct pci_dev *dev);
2331 resource_size_t pcibios_default_alignment(void);
2332 
2333 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2334 extern int pci_create_resource_files(struct pci_dev *dev);
2335 extern void pci_remove_resource_files(struct pci_dev *dev);
2336 #endif
2337 
2338 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2339 void __init pci_mmcfg_early_init(void);
2340 void __init pci_mmcfg_late_init(void);
2341 #else
pci_mmcfg_early_init(void)2342 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2343 static inline void pci_mmcfg_late_init(void) { }
2344 #endif
2345 
2346 int pci_ext_cfg_avail(void);
2347 
2348 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2349 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2350 
2351 #ifdef CONFIG_PCI_IOV
2352 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2353 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2354 int pci_iov_vf_id(struct pci_dev *dev);
2355 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2356 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2357 void pci_disable_sriov(struct pci_dev *dev);
2358 
2359 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2360 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2361 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2362 int pci_num_vf(struct pci_dev *dev);
2363 int pci_vfs_assigned(struct pci_dev *dev);
2364 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2365 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2366 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2367 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2368 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2369 
2370 /* Arch may override these (weak) */
2371 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2372 int pcibios_sriov_disable(struct pci_dev *pdev);
2373 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2374 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2375 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2376 {
2377 	return -ENOSYS;
2378 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2379 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2380 {
2381 	return -ENOSYS;
2382 }
2383 
pci_iov_vf_id(struct pci_dev * dev)2384 static inline int pci_iov_vf_id(struct pci_dev *dev)
2385 {
2386 	return -ENOSYS;
2387 }
2388 
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2389 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2390 					   struct pci_driver *pf_driver)
2391 {
2392 	return ERR_PTR(-EINVAL);
2393 }
2394 
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2395 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2396 { return -ENODEV; }
2397 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2398 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2399 				     struct pci_dev *virtfn, int id)
2400 {
2401 	return -ENODEV;
2402 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2403 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2404 {
2405 	return -ENOSYS;
2406 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2407 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2408 					 int id) { }
pci_disable_sriov(struct pci_dev * dev)2409 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2410 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2411 static inline int pci_vfs_assigned(struct pci_dev *dev)
2412 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2413 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2414 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2415 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2416 { return 0; }
2417 #define pci_sriov_configure_simple	NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2418 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2419 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2420 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2421 #endif
2422 
2423 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2424 void pci_hp_create_module_link(struct pci_slot *pci_slot);
2425 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2426 #endif
2427 
2428 /**
2429  * pci_pcie_cap - get the saved PCIe capability offset
2430  * @dev: PCI device
2431  *
2432  * PCIe capability offset is calculated at PCI device initialization
2433  * time and saved in the data structure. This function returns saved
2434  * PCIe capability offset. Using this instead of pci_find_capability()
2435  * reduces unnecessary search in the PCI configuration space. If you
2436  * need to calculate PCIe capability offset from raw device for some
2437  * reasons, please use pci_find_capability() instead.
2438  */
pci_pcie_cap(struct pci_dev * dev)2439 static inline int pci_pcie_cap(struct pci_dev *dev)
2440 {
2441 	return dev->pcie_cap;
2442 }
2443 
2444 /**
2445  * pci_is_pcie - check if the PCI device is PCI Express capable
2446  * @dev: PCI device
2447  *
2448  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2449  */
pci_is_pcie(struct pci_dev * dev)2450 static inline bool pci_is_pcie(struct pci_dev *dev)
2451 {
2452 	return pci_pcie_cap(dev);
2453 }
2454 
2455 /**
2456  * pcie_caps_reg - get the PCIe Capabilities Register
2457  * @dev: PCI device
2458  */
pcie_caps_reg(const struct pci_dev * dev)2459 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2460 {
2461 	return dev->pcie_flags_reg;
2462 }
2463 
2464 /**
2465  * pci_pcie_type - get the PCIe device/port type
2466  * @dev: PCI device
2467  */
pci_pcie_type(const struct pci_dev * dev)2468 static inline int pci_pcie_type(const struct pci_dev *dev)
2469 {
2470 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2471 }
2472 
2473 /**
2474  * pcie_find_root_port - Get the PCIe root port device
2475  * @dev: PCI device
2476  *
2477  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2478  * for a given PCI/PCIe Device.
2479  */
pcie_find_root_port(struct pci_dev * dev)2480 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2481 {
2482 	while (dev) {
2483 		if (pci_is_pcie(dev) &&
2484 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2485 			return dev;
2486 		dev = pci_upstream_bridge(dev);
2487 	}
2488 
2489 	return NULL;
2490 }
2491 
pci_dev_is_disconnected(const struct pci_dev * dev)2492 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2493 {
2494 	/*
2495 	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2496 	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2497 	 * the value (e.g. inside the loop in pci_dev_wait()).
2498 	 */
2499 	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2500 }
2501 
2502 void pci_request_acs(void);
2503 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2504 bool pci_acs_path_enabled(struct pci_dev *start,
2505 			  struct pci_dev *end, u16 acs_flags);
2506 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2507 
2508 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2509 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2510 
2511 /* Large Resource Data Type Tag Item Names */
2512 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2513 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2514 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2515 
2516 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2517 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2518 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2519 
2520 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2521 #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2522 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2523 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2524 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2525 
2526 /**
2527  * pci_vpd_alloc - Allocate buffer and read VPD into it
2528  * @dev: PCI device
2529  * @size: pointer to field where VPD length is returned
2530  *
2531  * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2532  */
2533 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2534 
2535 /**
2536  * pci_vpd_find_id_string - Locate id string in VPD
2537  * @buf: Pointer to buffered VPD data
2538  * @len: The length of the buffer area in which to search
2539  * @size: Pointer to field where length of id string is returned
2540  *
2541  * Returns the index of the id string or -ENOENT if not found.
2542  */
2543 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2544 
2545 /**
2546  * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2547  * @buf: Pointer to buffered VPD data
2548  * @len: The length of the buffer area in which to search
2549  * @kw: The keyword to search for
2550  * @size: Pointer to field where length of found keyword data is returned
2551  *
2552  * Returns the index of the information field keyword data or -ENOENT if
2553  * not found.
2554  */
2555 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2556 				 const char *kw, unsigned int *size);
2557 
2558 /**
2559  * pci_vpd_check_csum - Check VPD checksum
2560  * @buf: Pointer to buffered VPD data
2561  * @len: VPD size
2562  *
2563  * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2564  */
2565 int pci_vpd_check_csum(const void *buf, unsigned int len);
2566 
2567 /* PCI <-> OF binding helpers */
2568 #ifdef CONFIG_OF
2569 struct device_node;
2570 struct irq_domain;
2571 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2572 bool pci_host_of_has_msi_map(struct device *dev);
2573 
2574 /* Arch may override this (weak) */
2575 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2576 
2577 #else	/* CONFIG_OF */
2578 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2579 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2580 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2581 #endif  /* CONFIG_OF */
2582 
2583 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2584 pci_device_to_OF_node(const struct pci_dev *pdev)
2585 {
2586 	return pdev ? pdev->dev.of_node : NULL;
2587 }
2588 
pci_bus_to_OF_node(struct pci_bus * bus)2589 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2590 {
2591 	return bus ? bus->dev.of_node : NULL;
2592 }
2593 
2594 #ifdef CONFIG_ACPI
2595 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2596 
2597 void
2598 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2599 bool pci_pr3_present(struct pci_dev *pdev);
2600 #else
2601 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2602 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2603 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2604 #endif
2605 
2606 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2607 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2608 {
2609 	return pdev->dev.archdata.edev;
2610 }
2611 #endif
2612 
2613 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2614 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2615 int pci_for_each_dma_alias(struct pci_dev *pdev,
2616 			   int (*fn)(struct pci_dev *pdev,
2617 				     u16 alias, void *data), void *data);
2618 
2619 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2620 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2621 {
2622 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2623 }
pci_clear_dev_assigned(struct pci_dev * pdev)2624 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2625 {
2626 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2627 }
pci_is_dev_assigned(struct pci_dev * pdev)2628 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2629 {
2630 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2631 }
2632 
2633 /**
2634  * pci_ari_enabled - query ARI forwarding status
2635  * @bus: the PCI bus
2636  *
2637  * Returns true if ARI forwarding is enabled.
2638  */
pci_ari_enabled(struct pci_bus * bus)2639 static inline bool pci_ari_enabled(struct pci_bus *bus)
2640 {
2641 	return bus->self && bus->self->ari_enabled;
2642 }
2643 
2644 /**
2645  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2646  * @pdev: PCI device to check
2647  *
2648  * Walk upwards from @pdev and check for each encountered bridge if it's part
2649  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2650  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2651  */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2652 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2653 {
2654 	struct pci_dev *parent = pdev;
2655 
2656 	if (pdev->is_thunderbolt)
2657 		return true;
2658 
2659 	while ((parent = pci_upstream_bridge(parent)))
2660 		if (parent->is_thunderbolt)
2661 			return true;
2662 
2663 	return false;
2664 }
2665 
2666 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2667 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2668 #endif
2669 
2670 #include <linux/dma-mapping.h>
2671 
2672 #define pci_printk(level, pdev, fmt, arg...) \
2673 	dev_printk(level, &(pdev)->dev, fmt, ##arg)
2674 
2675 #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2676 #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2677 #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2678 #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2679 #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2680 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2681 #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2682 #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2683 #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2684 
2685 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2686 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2687 
2688 #define pci_info_ratelimited(pdev, fmt, arg...) \
2689 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2690 
2691 #define pci_WARN(pdev, condition, fmt, arg...) \
2692 	WARN(condition, "%s %s: " fmt, \
2693 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2694 
2695 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2696 	WARN_ONCE(condition, "%s %s: " fmt, \
2697 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2698 
2699 #endif /* LINUX_PCI_H */
2700