xref: /openbmc/linux/drivers/pci/msi/msi.h (revision 612ad433)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #include <linux/pci.h>
4 #include <linux/msi.h>
5 
6 #define msix_table_size(flags)	((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
7 
8 int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
9 void pci_msi_teardown_msi_irqs(struct pci_dev *dev);
10 
11 /* Mask/unmask helpers */
12 void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set);
13 
pci_msi_mask(struct msi_desc * desc,u32 mask)14 static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
15 {
16 	pci_msi_update_mask(desc, 0, mask);
17 }
18 
pci_msi_unmask(struct msi_desc * desc,u32 mask)19 static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
20 {
21 	pci_msi_update_mask(desc, mask, 0);
22 }
23 
pci_msix_desc_addr(struct msi_desc * desc)24 static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
25 {
26 	return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
27 }
28 
29 /*
30  * This internal function does not flush PCI writes to the device.  All
31  * users must ensure that they read from the device before either assuming
32  * that the device state is up to date, or returning out of this file.
33  * It does not affect the msi_desc::msix_ctrl cache either. Use with care!
34  */
pci_msix_write_vector_ctrl(struct msi_desc * desc,u32 ctrl)35 static inline void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
36 {
37 	void __iomem *desc_addr = pci_msix_desc_addr(desc);
38 
39 	if (desc->pci.msi_attrib.can_mask)
40 		writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
41 }
42 
pci_msix_mask(struct msi_desc * desc)43 static inline void pci_msix_mask(struct msi_desc *desc)
44 {
45 	desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
46 	pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
47 	/* Flush write to device */
48 	readl(desc->pci.mask_base);
49 }
50 
pci_msix_unmask(struct msi_desc * desc)51 static inline void pci_msix_unmask(struct msi_desc *desc)
52 {
53 	desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
54 	pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
55 }
56 
__pci_msi_mask_desc(struct msi_desc * desc,u32 mask)57 static inline void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
58 {
59 	if (desc->pci.msi_attrib.is_msix)
60 		pci_msix_mask(desc);
61 	else
62 		pci_msi_mask(desc, mask);
63 }
64 
__pci_msi_unmask_desc(struct msi_desc * desc,u32 mask)65 static inline void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
66 {
67 	if (desc->pci.msi_attrib.is_msix)
68 		pci_msix_unmask(desc);
69 	else
70 		pci_msi_unmask(desc, mask);
71 }
72 
73 /*
74  * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
75  * mask all MSI interrupts by clearing the MSI enable bit does not work
76  * reliably as devices without an INTx disable bit will then generate a
77  * level IRQ which will never be cleared.
78  */
msi_multi_mask(struct msi_desc * desc)79 static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
80 {
81 	/* Don't shift by >= width of type */
82 	if (desc->pci.msi_attrib.multi_cap >= 5)
83 		return 0xffffffff;
84 	return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1;
85 }
86 
87 void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc);
88 
89 /* Subsystem variables */
90 extern int pci_msi_enable;
91 
92 /* MSI internal functions invoked from the public APIs */
93 void pci_msi_shutdown(struct pci_dev *dev);
94 void pci_msix_shutdown(struct pci_dev *dev);
95 void pci_free_msi_irqs(struct pci_dev *dev);
96 int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd);
97 int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec,
98 			    int maxvec,  struct irq_affinity *affd, int flags);
99 void __pci_restore_msi_state(struct pci_dev *dev);
100 void __pci_restore_msix_state(struct pci_dev *dev);
101 
102 /* irq_domain related functionality */
103 
104 enum support_mode {
105 	ALLOW_LEGACY,
106 	DENY_LEGACY,
107 };
108 
109 bool pci_msi_domain_supports(struct pci_dev *dev, unsigned int feature_mask, enum support_mode mode);
110 bool pci_setup_msi_device_domain(struct pci_dev *pdev);
111 bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize);
112 
113 /* Legacy (!IRQDOMAIN) fallbacks */
114 
115 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
116 int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
117 void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev);
118 #else
pci_msi_legacy_setup_msi_irqs(struct pci_dev * dev,int nvec,int type)119 static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
120 {
121 	WARN_ON_ONCE(1);
122 	return -ENODEV;
123 }
124 
pci_msi_legacy_teardown_msi_irqs(struct pci_dev * dev)125 static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
126 {
127 	WARN_ON_ONCE(1);
128 }
129 #endif
130