xref: /openbmc/linux/drivers/pci/msi/msi.h (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1aa423ac4SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */
2aa423ac4SThomas Gleixner 
3aa423ac4SThomas Gleixner #include <linux/pci.h>
4aa423ac4SThomas Gleixner #include <linux/msi.h>
5aa423ac4SThomas Gleixner 
6aa423ac4SThomas Gleixner #define msix_table_size(flags)	((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
7aa423ac4SThomas Gleixner 
8db537dd3SAhmed S. Darwish int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
9db537dd3SAhmed S. Darwish void pci_msi_teardown_msi_irqs(struct pci_dev *dev);
10aa423ac4SThomas Gleixner 
11c93fd526SAhmed S. Darwish /* Mask/unmask helpers */
12c93fd526SAhmed S. Darwish void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set);
13c93fd526SAhmed S. Darwish 
pci_msi_mask(struct msi_desc * desc,u32 mask)14c93fd526SAhmed S. Darwish static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
15c93fd526SAhmed S. Darwish {
16c93fd526SAhmed S. Darwish 	pci_msi_update_mask(desc, 0, mask);
17c93fd526SAhmed S. Darwish }
18c93fd526SAhmed S. Darwish 
pci_msi_unmask(struct msi_desc * desc,u32 mask)19c93fd526SAhmed S. Darwish static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
20c93fd526SAhmed S. Darwish {
21c93fd526SAhmed S. Darwish 	pci_msi_update_mask(desc, mask, 0);
22c93fd526SAhmed S. Darwish }
23c93fd526SAhmed S. Darwish 
pci_msix_desc_addr(struct msi_desc * desc)24c93fd526SAhmed S. Darwish static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
25c93fd526SAhmed S. Darwish {
26c93fd526SAhmed S. Darwish 	return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
27c93fd526SAhmed S. Darwish }
28c93fd526SAhmed S. Darwish 
29c93fd526SAhmed S. Darwish /*
30c93fd526SAhmed S. Darwish  * This internal function does not flush PCI writes to the device.  All
31c93fd526SAhmed S. Darwish  * users must ensure that they read from the device before either assuming
32c93fd526SAhmed S. Darwish  * that the device state is up to date, or returning out of this file.
33c93fd526SAhmed S. Darwish  * It does not affect the msi_desc::msix_ctrl cache either. Use with care!
34c93fd526SAhmed S. Darwish  */
pci_msix_write_vector_ctrl(struct msi_desc * desc,u32 ctrl)35c93fd526SAhmed S. Darwish static inline void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
36c93fd526SAhmed S. Darwish {
37c93fd526SAhmed S. Darwish 	void __iomem *desc_addr = pci_msix_desc_addr(desc);
38c93fd526SAhmed S. Darwish 
39c93fd526SAhmed S. Darwish 	if (desc->pci.msi_attrib.can_mask)
40c93fd526SAhmed S. Darwish 		writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
41c93fd526SAhmed S. Darwish }
42c93fd526SAhmed S. Darwish 
pci_msix_mask(struct msi_desc * desc)43c93fd526SAhmed S. Darwish static inline void pci_msix_mask(struct msi_desc *desc)
44c93fd526SAhmed S. Darwish {
45c93fd526SAhmed S. Darwish 	desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
46c93fd526SAhmed S. Darwish 	pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
47c93fd526SAhmed S. Darwish 	/* Flush write to device */
48c93fd526SAhmed S. Darwish 	readl(desc->pci.mask_base);
49c93fd526SAhmed S. Darwish }
50c93fd526SAhmed S. Darwish 
pci_msix_unmask(struct msi_desc * desc)51c93fd526SAhmed S. Darwish static inline void pci_msix_unmask(struct msi_desc *desc)
52c93fd526SAhmed S. Darwish {
53c93fd526SAhmed S. Darwish 	desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
54c93fd526SAhmed S. Darwish 	pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
55c93fd526SAhmed S. Darwish }
56c93fd526SAhmed S. Darwish 
__pci_msi_mask_desc(struct msi_desc * desc,u32 mask)57c93fd526SAhmed S. Darwish static inline void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
58c93fd526SAhmed S. Darwish {
59c93fd526SAhmed S. Darwish 	if (desc->pci.msi_attrib.is_msix)
60c93fd526SAhmed S. Darwish 		pci_msix_mask(desc);
61c93fd526SAhmed S. Darwish 	else
62c93fd526SAhmed S. Darwish 		pci_msi_mask(desc, mask);
63c93fd526SAhmed S. Darwish }
64c93fd526SAhmed S. Darwish 
__pci_msi_unmask_desc(struct msi_desc * desc,u32 mask)65c93fd526SAhmed S. Darwish static inline void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
66c93fd526SAhmed S. Darwish {
67c93fd526SAhmed S. Darwish 	if (desc->pci.msi_attrib.is_msix)
68c93fd526SAhmed S. Darwish 		pci_msix_unmask(desc);
69c93fd526SAhmed S. Darwish 	else
70c93fd526SAhmed S. Darwish 		pci_msi_unmask(desc, mask);
71c93fd526SAhmed S. Darwish }
72c93fd526SAhmed S. Darwish 
73c93fd526SAhmed S. Darwish /*
74c93fd526SAhmed S. Darwish  * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
75c93fd526SAhmed S. Darwish  * mask all MSI interrupts by clearing the MSI enable bit does not work
76c93fd526SAhmed S. Darwish  * reliably as devices without an INTx disable bit will then generate a
77c93fd526SAhmed S. Darwish  * level IRQ which will never be cleared.
78c93fd526SAhmed S. Darwish  */
msi_multi_mask(struct msi_desc * desc)79c93fd526SAhmed S. Darwish static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
80c93fd526SAhmed S. Darwish {
81c93fd526SAhmed S. Darwish 	/* Don't shift by >= width of type */
82c93fd526SAhmed S. Darwish 	if (desc->pci.msi_attrib.multi_cap >= 5)
83c93fd526SAhmed S. Darwish 		return 0xffffffff;
84c93fd526SAhmed S. Darwish 	return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1;
85c93fd526SAhmed S. Darwish }
86c93fd526SAhmed S. Darwish 
87*612ad433SThomas Gleixner void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc);
88*612ad433SThomas Gleixner 
89897a0b6aSAhmed S. Darwish /* Subsystem variables */
90897a0b6aSAhmed S. Darwish extern int pci_msi_enable;
91897a0b6aSAhmed S. Darwish 
92b12d0becSAhmed S. Darwish /* MSI internal functions invoked from the public APIs */
93b12d0becSAhmed S. Darwish void pci_msi_shutdown(struct pci_dev *dev);
9418e1926bSAhmed S. Darwish void pci_msix_shutdown(struct pci_dev *dev);
95b12d0becSAhmed S. Darwish void pci_free_msi_irqs(struct pci_dev *dev);
96bbda3407SAhmed S. Darwish int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd);
97be7496c1SAhmed S. Darwish int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec,
98be7496c1SAhmed S. Darwish 			    int maxvec,  struct irq_affinity *affd, int flags);
9957127da9SAhmed S. Darwish void __pci_restore_msi_state(struct pci_dev *dev);
10057127da9SAhmed S. Darwish void __pci_restore_msix_state(struct pci_dev *dev);
101b12d0becSAhmed S. Darwish 
102d2a463b2SThomas Gleixner /* irq_domain related functionality */
103d2a463b2SThomas Gleixner 
104d2a463b2SThomas Gleixner enum support_mode {
105d2a463b2SThomas Gleixner 	ALLOW_LEGACY,
106d2a463b2SThomas Gleixner 	DENY_LEGACY,
107d2a463b2SThomas Gleixner };
108d2a463b2SThomas Gleixner 
109d2a463b2SThomas Gleixner bool pci_msi_domain_supports(struct pci_dev *dev, unsigned int feature_mask, enum support_mode mode);
11015c72f82SThomas Gleixner bool pci_setup_msi_device_domain(struct pci_dev *pdev);
11115c72f82SThomas Gleixner bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize);
112d2a463b2SThomas Gleixner 
113c93fd526SAhmed S. Darwish /* Legacy (!IRQDOMAIN) fallbacks */
114be7496c1SAhmed S. Darwish 
115aa423ac4SThomas Gleixner #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
116db537dd3SAhmed S. Darwish int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
117db537dd3SAhmed S. Darwish void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev);
118aa423ac4SThomas Gleixner #else
pci_msi_legacy_setup_msi_irqs(struct pci_dev * dev,int nvec,int type)119aa423ac4SThomas Gleixner static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
120aa423ac4SThomas Gleixner {
121aa423ac4SThomas Gleixner 	WARN_ON_ONCE(1);
122aa423ac4SThomas Gleixner 	return -ENODEV;
123aa423ac4SThomas Gleixner }
124aa423ac4SThomas Gleixner 
pci_msi_legacy_teardown_msi_irqs(struct pci_dev * dev)125aa423ac4SThomas Gleixner static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
126aa423ac4SThomas Gleixner {
127aa423ac4SThomas Gleixner 	WARN_ON_ONCE(1);
128aa423ac4SThomas Gleixner }
129aa423ac4SThomas Gleixner #endif
130