xref: /openbmc/linux/include/linux/msi.h (revision 31e67366)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4 
5 #include <linux/kobject.h>
6 #include <linux/list.h>
7 #include <asm/msi.h>
8 
9 /* Dummy shadow structures if an architecture does not define them */
10 #ifndef arch_msi_msg_addr_lo
11 typedef struct arch_msi_msg_addr_lo {
12 	u32	address_lo;
13 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
14 #endif
15 
16 #ifndef arch_msi_msg_addr_hi
17 typedef struct arch_msi_msg_addr_hi {
18 	u32	address_hi;
19 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
20 #endif
21 
22 #ifndef arch_msi_msg_data
23 typedef struct arch_msi_msg_data {
24 	u32	data;
25 } __attribute__ ((packed)) arch_msi_msg_data_t;
26 #endif
27 
28 /**
29  * msi_msg - Representation of a MSI message
30  * @address_lo:		Low 32 bits of msi message address
31  * @arch_addrlo:	Architecture specific shadow of @address_lo
32  * @address_hi:		High 32 bits of msi message address
33  *			(only used when device supports it)
34  * @arch_addrhi:	Architecture specific shadow of @address_hi
35  * @data:		MSI message data (usually 16 bits)
36  * @arch_data:		Architecture specific shadow of @data
37  */
38 struct msi_msg {
39 	union {
40 		u32			address_lo;
41 		arch_msi_msg_addr_lo_t	arch_addr_lo;
42 	};
43 	union {
44 		u32			address_hi;
45 		arch_msi_msg_addr_hi_t	arch_addr_hi;
46 	};
47 	union {
48 		u32			data;
49 		arch_msi_msg_data_t	arch_data;
50 	};
51 };
52 
53 extern int pci_msi_ignore_mask;
54 /* Helper functions */
55 struct irq_data;
56 struct msi_desc;
57 struct pci_dev;
58 struct platform_msi_priv_data;
59 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
60 #ifdef CONFIG_GENERIC_MSI_IRQ
61 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
62 #else
63 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
64 {
65 }
66 #endif
67 
68 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
69 				    struct msi_msg *msg);
70 
71 /**
72  * platform_msi_desc - Platform device specific msi descriptor data
73  * @msi_priv_data:	Pointer to platform private data
74  * @msi_index:		The index of the MSI descriptor for multi MSI
75  */
76 struct platform_msi_desc {
77 	struct platform_msi_priv_data	*msi_priv_data;
78 	u16				msi_index;
79 };
80 
81 /**
82  * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
83  * @msi_index:		The index of the MSI descriptor
84  */
85 struct fsl_mc_msi_desc {
86 	u16				msi_index;
87 };
88 
89 /**
90  * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
91  * @dev_index: TISCI device index
92  */
93 struct ti_sci_inta_msi_desc {
94 	u16	dev_index;
95 };
96 
97 /**
98  * struct msi_desc - Descriptor structure for MSI based interrupts
99  * @list:	List head for management
100  * @irq:	The base interrupt number
101  * @nvec_used:	The number of vectors used
102  * @dev:	Pointer to the device which uses this descriptor
103  * @msg:	The last set MSI message cached for reuse
104  * @affinity:	Optional pointer to a cpu affinity mask for this descriptor
105  *
106  * @write_msi_msg:	Callback that may be called when the MSI message
107  *			address or data changes
108  * @write_msi_msg_data:	Data parameter for the callback.
109  *
110  * @masked:	[PCI MSI/X] Mask bits
111  * @is_msix:	[PCI MSI/X] True if MSI-X
112  * @multiple:	[PCI MSI/X] log2 num of messages allocated
113  * @multi_cap:	[PCI MSI/X] log2 num of messages supported
114  * @maskbit:	[PCI MSI/X] Mask-Pending bit supported?
115  * @is_64:	[PCI MSI/X] Address size: 0=32bit 1=64bit
116  * @entry_nr:	[PCI MSI/X] Entry which is described by this descriptor
117  * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
118  * @mask_pos:	[PCI MSI]   Mask register position
119  * @mask_base:	[PCI MSI-X] Mask register base address
120  * @platform:	[platform]  Platform device specific msi descriptor data
121  * @fsl_mc:	[fsl-mc]    FSL MC device specific msi descriptor data
122  * @inta:	[INTA]	    TISCI based INTA specific msi descriptor data
123  */
124 struct msi_desc {
125 	/* Shared device/bus type independent data */
126 	struct list_head		list;
127 	unsigned int			irq;
128 	unsigned int			nvec_used;
129 	struct device			*dev;
130 	struct msi_msg			msg;
131 	struct irq_affinity_desc	*affinity;
132 #ifdef CONFIG_IRQ_MSI_IOMMU
133 	const void			*iommu_cookie;
134 #endif
135 
136 	void (*write_msi_msg)(struct msi_desc *entry, void *data);
137 	void *write_msi_msg_data;
138 
139 	union {
140 		/* PCI MSI/X specific data */
141 		struct {
142 			u32 masked;
143 			struct {
144 				u8	is_msix		: 1;
145 				u8	multiple	: 3;
146 				u8	multi_cap	: 3;
147 				u8	maskbit		: 1;
148 				u8	is_64		: 1;
149 				u8	is_virtual	: 1;
150 				u16	entry_nr;
151 				unsigned default_irq;
152 			} msi_attrib;
153 			union {
154 				u8	mask_pos;
155 				void __iomem *mask_base;
156 			};
157 		};
158 
159 		/*
160 		 * Non PCI variants add their data structure here. New
161 		 * entries need to use a named structure. We want
162 		 * proper name spaces for this. The PCI part is
163 		 * anonymous for now as it would require an immediate
164 		 * tree wide cleanup.
165 		 */
166 		struct platform_msi_desc platform;
167 		struct fsl_mc_msi_desc fsl_mc;
168 		struct ti_sci_inta_msi_desc inta;
169 	};
170 };
171 
172 /* Helpers to hide struct msi_desc implementation details */
173 #define msi_desc_to_dev(desc)		((desc)->dev)
174 #define dev_to_msi_list(dev)		(&(dev)->msi_list)
175 #define first_msi_entry(dev)		\
176 	list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
177 #define for_each_msi_entry(desc, dev)	\
178 	list_for_each_entry((desc), dev_to_msi_list((dev)), list)
179 #define for_each_msi_entry_safe(desc, tmp, dev)	\
180 	list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
181 #define for_each_msi_vector(desc, __irq, dev)				\
182 	for_each_msi_entry((desc), (dev))				\
183 		if ((desc)->irq)					\
184 			for (__irq = (desc)->irq;			\
185 			     __irq < ((desc)->irq + (desc)->nvec_used);	\
186 			     __irq++)
187 
188 #ifdef CONFIG_IRQ_MSI_IOMMU
189 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
190 {
191 	return desc->iommu_cookie;
192 }
193 
194 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
195 					     const void *iommu_cookie)
196 {
197 	desc->iommu_cookie = iommu_cookie;
198 }
199 #else
200 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
201 {
202 	return NULL;
203 }
204 
205 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
206 					     const void *iommu_cookie)
207 {
208 }
209 #endif
210 
211 #ifdef CONFIG_PCI_MSI
212 #define first_pci_msi_entry(pdev)	first_msi_entry(&(pdev)->dev)
213 #define for_each_pci_msi_entry(desc, pdev)	\
214 	for_each_msi_entry((desc), &(pdev)->dev)
215 
216 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
217 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
218 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
219 #else /* CONFIG_PCI_MSI */
220 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
221 {
222 	return NULL;
223 }
224 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
225 {
226 }
227 #endif /* CONFIG_PCI_MSI */
228 
229 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
230 				 const struct irq_affinity_desc *affinity);
231 void free_msi_entry(struct msi_desc *entry);
232 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
233 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
234 
235 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
236 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
237 void pci_msi_mask_irq(struct irq_data *data);
238 void pci_msi_unmask_irq(struct irq_data *data);
239 
240 /*
241  * The arch hooks to setup up msi irqs. Default functions are implemented
242  * as weak symbols so that they /can/ be overriden by architecture specific
243  * code if needed. These hooks must be enabled by the architecture or by
244  * drivers which depend on them via msi_controller based MSI handling.
245  *
246  * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
247  * stubs with warnings.
248  */
249 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
250 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
251 void arch_teardown_msi_irq(unsigned int irq);
252 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
253 void arch_teardown_msi_irqs(struct pci_dev *dev);
254 void default_teardown_msi_irqs(struct pci_dev *dev);
255 #else
256 static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
257 {
258 	WARN_ON_ONCE(1);
259 	return -ENODEV;
260 }
261 
262 static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
263 {
264 	WARN_ON_ONCE(1);
265 }
266 #endif
267 
268 /*
269  * The restore hooks are still available as they are useful even
270  * for fully irq domain based setups. Courtesy to XEN/X86.
271  */
272 void arch_restore_msi_irqs(struct pci_dev *dev);
273 void default_restore_msi_irqs(struct pci_dev *dev);
274 
275 struct msi_controller {
276 	struct module *owner;
277 	struct device *dev;
278 	struct device_node *of_node;
279 	struct list_head list;
280 
281 	int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
282 			 struct msi_desc *desc);
283 	int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
284 			  int nvec, int type);
285 	void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
286 };
287 
288 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
289 
290 #include <linux/irqhandler.h>
291 
292 struct irq_domain;
293 struct irq_domain_ops;
294 struct irq_chip;
295 struct device_node;
296 struct fwnode_handle;
297 struct msi_domain_info;
298 
299 /**
300  * struct msi_domain_ops - MSI interrupt domain callbacks
301  * @get_hwirq:		Retrieve the resulting hw irq number
302  * @msi_init:		Domain specific init function for MSI interrupts
303  * @msi_free:		Domain specific function to free a MSI interrupts
304  * @msi_check:		Callback for verification of the domain/info/dev data
305  * @msi_prepare:	Prepare the allocation of the interrupts in the domain
306  * @msi_finish:		Optional callback to finalize the allocation
307  * @set_desc:		Set the msi descriptor for an interrupt
308  * @handle_error:	Optional error handler if the allocation fails
309  * @domain_alloc_irqs:	Optional function to override the default allocation
310  *			function.
311  * @domain_free_irqs:	Optional function to override the default free
312  *			function.
313  *
314  * @get_hwirq, @msi_init and @msi_free are callbacks used by
315  * msi_create_irq_domain() and related interfaces
316  *
317  * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
318  * are callbacks used by msi_domain_alloc_irqs() and related
319  * interfaces which are based on msi_desc.
320  *
321  * @domain_alloc_irqs, @domain_free_irqs can be used to override the
322  * default allocation/free functions (__msi_domain_alloc/free_irqs). This
323  * is initially for a wrapper around XENs seperate MSI universe which can't
324  * be wrapped into the regular irq domains concepts by mere mortals.  This
325  * allows to universally use msi_domain_alloc/free_irqs without having to
326  * special case XEN all over the place.
327  *
328  * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
329  * are set to the default implementation if NULL and even when
330  * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
331  * because these callbacks are obviously mandatory.
332  *
333  * This is NOT meant to be abused, but it can be useful to build wrappers
334  * for specialized MSI irq domains which need extra work before and after
335  * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
336  */
337 struct msi_domain_ops {
338 	irq_hw_number_t	(*get_hwirq)(struct msi_domain_info *info,
339 				     msi_alloc_info_t *arg);
340 	int		(*msi_init)(struct irq_domain *domain,
341 				    struct msi_domain_info *info,
342 				    unsigned int virq, irq_hw_number_t hwirq,
343 				    msi_alloc_info_t *arg);
344 	void		(*msi_free)(struct irq_domain *domain,
345 				    struct msi_domain_info *info,
346 				    unsigned int virq);
347 	int		(*msi_check)(struct irq_domain *domain,
348 				     struct msi_domain_info *info,
349 				     struct device *dev);
350 	int		(*msi_prepare)(struct irq_domain *domain,
351 				       struct device *dev, int nvec,
352 				       msi_alloc_info_t *arg);
353 	void		(*msi_finish)(msi_alloc_info_t *arg, int retval);
354 	void		(*set_desc)(msi_alloc_info_t *arg,
355 				    struct msi_desc *desc);
356 	int		(*handle_error)(struct irq_domain *domain,
357 					struct msi_desc *desc, int error);
358 	int		(*domain_alloc_irqs)(struct irq_domain *domain,
359 					     struct device *dev, int nvec);
360 	void		(*domain_free_irqs)(struct irq_domain *domain,
361 					    struct device *dev);
362 };
363 
364 /**
365  * struct msi_domain_info - MSI interrupt domain data
366  * @flags:		Flags to decribe features and capabilities
367  * @ops:		The callback data structure
368  * @chip:		Optional: associated interrupt chip
369  * @chip_data:		Optional: associated interrupt chip data
370  * @handler:		Optional: associated interrupt flow handler
371  * @handler_data:	Optional: associated interrupt flow handler data
372  * @handler_name:	Optional: associated interrupt flow handler name
373  * @data:		Optional: domain specific data
374  */
375 struct msi_domain_info {
376 	u32			flags;
377 	struct msi_domain_ops	*ops;
378 	struct irq_chip		*chip;
379 	void			*chip_data;
380 	irq_flow_handler_t	handler;
381 	void			*handler_data;
382 	const char		*handler_name;
383 	void			*data;
384 };
385 
386 /* Flags for msi_domain_info */
387 enum {
388 	/*
389 	 * Init non implemented ops callbacks with default MSI domain
390 	 * callbacks.
391 	 */
392 	MSI_FLAG_USE_DEF_DOM_OPS	= (1 << 0),
393 	/*
394 	 * Init non implemented chip callbacks with default MSI chip
395 	 * callbacks.
396 	 */
397 	MSI_FLAG_USE_DEF_CHIP_OPS	= (1 << 1),
398 	/* Support multiple PCI MSI interrupts */
399 	MSI_FLAG_MULTI_PCI_MSI		= (1 << 2),
400 	/* Support PCI MSIX interrupts */
401 	MSI_FLAG_PCI_MSIX		= (1 << 3),
402 	/* Needs early activate, required for PCI */
403 	MSI_FLAG_ACTIVATE_EARLY		= (1 << 4),
404 	/*
405 	 * Must reactivate when irq is started even when
406 	 * MSI_FLAG_ACTIVATE_EARLY has been set.
407 	 */
408 	MSI_FLAG_MUST_REACTIVATE	= (1 << 5),
409 	/* Is level-triggered capable, using two messages */
410 	MSI_FLAG_LEVEL_CAPABLE		= (1 << 6),
411 };
412 
413 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
414 			    bool force);
415 
416 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
417 					 struct msi_domain_info *info,
418 					 struct irq_domain *parent);
419 int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
420 			    int nvec);
421 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
422 			  int nvec);
423 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
424 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
425 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
426 
427 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
428 						  struct msi_domain_info *info,
429 						  struct irq_domain *parent);
430 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
431 				   irq_write_msi_msg_t write_msi_msg);
432 void platform_msi_domain_free_irqs(struct device *dev);
433 
434 /* When an MSI domain is used as an intermediate domain */
435 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
436 			    int nvec, msi_alloc_info_t *args);
437 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
438 			     int virq, int nvec, msi_alloc_info_t *args);
439 struct irq_domain *
440 __platform_msi_create_device_domain(struct device *dev,
441 				    unsigned int nvec,
442 				    bool is_tree,
443 				    irq_write_msi_msg_t write_msi_msg,
444 				    const struct irq_domain_ops *ops,
445 				    void *host_data);
446 
447 #define platform_msi_create_device_domain(dev, nvec, write, ops, data)	\
448 	__platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
449 #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
450 	__platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
451 
452 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
453 			      unsigned int nr_irqs);
454 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
455 			      unsigned int nvec);
456 void *platform_msi_get_host_data(struct irq_domain *domain);
457 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
458 
459 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
460 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
461 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
462 					     struct msi_domain_info *info,
463 					     struct irq_domain *parent);
464 int pci_msi_domain_check_cap(struct irq_domain *domain,
465 			     struct msi_domain_info *info, struct device *dev);
466 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
467 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
468 bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
469 #else
470 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
471 {
472 	return NULL;
473 }
474 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
475 
476 #endif /* LINUX_MSI_H */
477