xref: /openbmc/linux/kernel/irq/msi.c (revision 98043704)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2f3cf8bb0SJiang Liu /*
3f3cf8bb0SJiang Liu  * Copyright (C) 2014 Intel Corp.
4f3cf8bb0SJiang Liu  * Author: Jiang Liu <jiang.liu@linux.intel.com>
5f3cf8bb0SJiang Liu  *
6f3cf8bb0SJiang Liu  * This file is licensed under GPLv2.
7f3cf8bb0SJiang Liu  *
8a359f757SIngo Molnar  * This file contains common code to support Message Signaled Interrupts for
9f3cf8bb0SJiang Liu  * PCI compatible and non PCI compatible devices.
10f3cf8bb0SJiang Liu  */
11aeeb5965SJiang Liu #include <linux/types.h>
12aeeb5965SJiang Liu #include <linux/device.h>
13f3cf8bb0SJiang Liu #include <linux/irq.h>
14f3cf8bb0SJiang Liu #include <linux/irqdomain.h>
15f3cf8bb0SJiang Liu #include <linux/msi.h>
164e201566SMarc Zyngier #include <linux/slab.h>
173ba1f050SThomas Gleixner #include <linux/sysfs.h>
182f170814SBarry Song #include <linux/pci.h>
19d9109698SJiang Liu 
2007557ccbSThomas Gleixner #include "internals.h"
2107557ccbSThomas Gleixner 
2294ff94cfSThomas Gleixner /* Invalid Xarray index which is outside of any searchable range */
2394ff94cfSThomas Gleixner #define MSI_XA_MAX_INDEX	(ULONG_MAX - 1)
2494ff94cfSThomas Gleixner /* The maximum domain size */
2594ff94cfSThomas Gleixner #define MSI_XA_DOMAIN_SIZE	(MSI_MAX_INDEX + 1)
2694ff94cfSThomas Gleixner 
27bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev);
28cc9a246dSThomas Gleixner 
2994ff94cfSThomas Gleixner 
3028f4b041SThomas Gleixner /**
31cc9a246dSThomas Gleixner  * msi_alloc_desc - Allocate an initialized msi_desc
3228f4b041SThomas Gleixner  * @dev:	Pointer to the device for which this is allocated
3328f4b041SThomas Gleixner  * @nvec:	The number of vectors used in this entry
3428f4b041SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array size of @nvec
3528f4b041SThomas Gleixner  *
363b35e7e6SRandy Dunlap  * If @affinity is not %NULL then an affinity array[@nvec] is allocated
37bec04037SDou Liyang  * and the affinity masks and flags from @affinity are copied.
383b35e7e6SRandy Dunlap  *
393b35e7e6SRandy Dunlap  * Return: pointer to allocated &msi_desc on success or %NULL on failure
4028f4b041SThomas Gleixner  */
41cc9a246dSThomas Gleixner static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
42bec04037SDou Liyang 					const struct irq_affinity_desc *affinity)
43aa48b6f7SJiang Liu {
44cc9a246dSThomas Gleixner 	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
4528f4b041SThomas Gleixner 
46aa48b6f7SJiang Liu 	if (!desc)
47aa48b6f7SJiang Liu 		return NULL;
48aa48b6f7SJiang Liu 
49aa48b6f7SJiang Liu 	desc->dev = dev;
5028f4b041SThomas Gleixner 	desc->nvec_used = nvec;
5128f4b041SThomas Gleixner 	if (affinity) {
52cc9a246dSThomas Gleixner 		desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
5328f4b041SThomas Gleixner 		if (!desc->affinity) {
5428f4b041SThomas Gleixner 			kfree(desc);
5528f4b041SThomas Gleixner 			return NULL;
5628f4b041SThomas Gleixner 		}
5728f4b041SThomas Gleixner 	}
58aa48b6f7SJiang Liu 	return desc;
59aa48b6f7SJiang Liu }
60aa48b6f7SJiang Liu 
61cc9a246dSThomas Gleixner static void msi_free_desc(struct msi_desc *desc)
62aa48b6f7SJiang Liu {
63cc9a246dSThomas Gleixner 	kfree(desc->affinity);
64cc9a246dSThomas Gleixner 	kfree(desc);
65aa48b6f7SJiang Liu }
66aa48b6f7SJiang Liu 
67cd6cf065SThomas Gleixner static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
68cd6cf065SThomas Gleixner {
69f1139f90SThomas Gleixner 	struct xarray *xa = &md->__domains[MSI_DEFAULT_DOMAIN].store;
70cd6cf065SThomas Gleixner 	int ret;
71cd6cf065SThomas Gleixner 
72cd6cf065SThomas Gleixner 	desc->msi_index = index;
73f1139f90SThomas Gleixner 	ret = xa_insert(xa, index, desc, GFP_KERNEL);
74cd6cf065SThomas Gleixner 	if (ret)
75cd6cf065SThomas Gleixner 		msi_free_desc(desc);
76cd6cf065SThomas Gleixner 	return ret;
77cd6cf065SThomas Gleixner }
78cd6cf065SThomas Gleixner 
7960290525SThomas Gleixner /**
8060290525SThomas Gleixner  * msi_add_msi_desc - Allocate and initialize a MSI descriptor
8160290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptor is allocated
8260290525SThomas Gleixner  * @init_desc:	Pointer to an MSI descriptor to initialize the new descriptor
8360290525SThomas Gleixner  *
8460290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
8560290525SThomas Gleixner  */
8660290525SThomas Gleixner int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
8760290525SThomas Gleixner {
8860290525SThomas Gleixner 	struct msi_desc *desc;
8960290525SThomas Gleixner 
9060290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
9160290525SThomas Gleixner 
92cc9a246dSThomas Gleixner 	desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
9360290525SThomas Gleixner 	if (!desc)
9460290525SThomas Gleixner 		return -ENOMEM;
9560290525SThomas Gleixner 
96cd6cf065SThomas Gleixner 	/* Copy type specific data to the new descriptor. */
9760290525SThomas Gleixner 	desc->pci = init_desc->pci;
98cd6cf065SThomas Gleixner 	return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
9960290525SThomas Gleixner }
10060290525SThomas Gleixner 
10160290525SThomas Gleixner /**
10260290525SThomas Gleixner  * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
10360290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptors are allocated
10460290525SThomas Gleixner  * @index:	Index for the first MSI descriptor
10560290525SThomas Gleixner  * @ndesc:	Number of descriptors to allocate
10660290525SThomas Gleixner  *
10760290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
10860290525SThomas Gleixner  */
10960290525SThomas Gleixner static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
11060290525SThomas Gleixner {
111cd6cf065SThomas Gleixner 	unsigned int idx, last = index + ndesc - 1;
112cd6cf065SThomas Gleixner 	struct msi_desc *desc;
113cd6cf065SThomas Gleixner 	int ret;
11460290525SThomas Gleixner 
11560290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
11660290525SThomas Gleixner 
117cd6cf065SThomas Gleixner 	for (idx = index; idx <= last; idx++) {
118cc9a246dSThomas Gleixner 		desc = msi_alloc_desc(dev, 1, NULL);
11960290525SThomas Gleixner 		if (!desc)
120cd6cf065SThomas Gleixner 			goto fail_mem;
121cd6cf065SThomas Gleixner 		ret = msi_insert_desc(dev->msi.data, desc, idx);
122cd6cf065SThomas Gleixner 		if (ret)
12360290525SThomas Gleixner 			goto fail;
12460290525SThomas Gleixner 	}
12560290525SThomas Gleixner 	return 0;
12660290525SThomas Gleixner 
127cd6cf065SThomas Gleixner fail_mem:
128cd6cf065SThomas Gleixner 	ret = -ENOMEM;
12960290525SThomas Gleixner fail:
1302f2940d1SThomas Gleixner 	msi_free_msi_descs_range(dev, index, last);
131cd6cf065SThomas Gleixner 	return ret;
13260290525SThomas Gleixner }
133cd6cf065SThomas Gleixner 
134cd6cf065SThomas Gleixner static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
135cd6cf065SThomas Gleixner {
136cd6cf065SThomas Gleixner 	switch (filter) {
137cd6cf065SThomas Gleixner 	case MSI_DESC_ALL:
138cd6cf065SThomas Gleixner 		return true;
139cd6cf065SThomas Gleixner 	case MSI_DESC_NOTASSOCIATED:
140cd6cf065SThomas Gleixner 		return !desc->irq;
141cd6cf065SThomas Gleixner 	case MSI_DESC_ASSOCIATED:
142cd6cf065SThomas Gleixner 		return !!desc->irq;
143cd6cf065SThomas Gleixner 	}
144cd6cf065SThomas Gleixner 	WARN_ON_ONCE(1);
145cd6cf065SThomas Gleixner 	return false;
14660290525SThomas Gleixner }
14760290525SThomas Gleixner 
148645474e2SThomas Gleixner /**
149645474e2SThomas Gleixner  * msi_free_msi_descs_range - Free MSI descriptors of a device
150645474e2SThomas Gleixner  * @dev:		Device to free the descriptors
151645474e2SThomas Gleixner  * @first_index:	Index to start freeing from
152645474e2SThomas Gleixner  * @last_index:		Last index to be freed
153645474e2SThomas Gleixner  */
1542f2940d1SThomas Gleixner void msi_free_msi_descs_range(struct device *dev, unsigned int first_index,
1552f2940d1SThomas Gleixner 			      unsigned int last_index)
156645474e2SThomas Gleixner {
157f1139f90SThomas Gleixner 	struct xarray *xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
158645474e2SThomas Gleixner 	struct msi_desc *desc;
159cd6cf065SThomas Gleixner 	unsigned long idx;
160645474e2SThomas Gleixner 
161645474e2SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
162645474e2SThomas Gleixner 
163cd6cf065SThomas Gleixner 	xa_for_each_range(xa, idx, desc, first_index, last_index) {
164cd6cf065SThomas Gleixner 		xa_erase(xa, idx);
1652f2940d1SThomas Gleixner 
1662f2940d1SThomas Gleixner 		/* Leak the descriptor when it is still referenced */
1672f2940d1SThomas Gleixner 		if (WARN_ON_ONCE(msi_desc_match(desc, MSI_DESC_ASSOCIATED)))
1682f2940d1SThomas Gleixner 			continue;
169cc9a246dSThomas Gleixner 		msi_free_desc(desc);
170645474e2SThomas Gleixner 	}
171645474e2SThomas Gleixner }
172645474e2SThomas Gleixner 
17338b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
17438b6a1cfSJiang Liu {
17538b6a1cfSJiang Liu 	*msg = entry->msg;
17638b6a1cfSJiang Liu }
17738b6a1cfSJiang Liu 
17838b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
17938b6a1cfSJiang Liu {
18038b6a1cfSJiang Liu 	struct msi_desc *entry = irq_get_msi_desc(irq);
18138b6a1cfSJiang Liu 
18238b6a1cfSJiang Liu 	__get_cached_msi_msg(entry, msg);
18338b6a1cfSJiang Liu }
18438b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg);
18538b6a1cfSJiang Liu 
186013bd8e5SThomas Gleixner static void msi_device_data_release(struct device *dev, void *res)
187013bd8e5SThomas Gleixner {
188125282cdSThomas Gleixner 	struct msi_device_data *md = res;
189f1139f90SThomas Gleixner 	int i;
190125282cdSThomas Gleixner 
191f1139f90SThomas Gleixner 	for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) {
192f1139f90SThomas Gleixner 		WARN_ON_ONCE(!xa_empty(&md->__domains[i].store));
193f1139f90SThomas Gleixner 		xa_destroy(&md->__domains[i].store);
194f1139f90SThomas Gleixner 	}
195013bd8e5SThomas Gleixner 	dev->msi.data = NULL;
196013bd8e5SThomas Gleixner }
197013bd8e5SThomas Gleixner 
198013bd8e5SThomas Gleixner /**
199013bd8e5SThomas Gleixner  * msi_setup_device_data - Setup MSI device data
200013bd8e5SThomas Gleixner  * @dev:	Device for which MSI device data should be set up
201013bd8e5SThomas Gleixner  *
202013bd8e5SThomas Gleixner  * Return: 0 on success, appropriate error code otherwise
203013bd8e5SThomas Gleixner  *
204013bd8e5SThomas Gleixner  * This can be called more than once for @dev. If the MSI device data is
205013bd8e5SThomas Gleixner  * already allocated the call succeeds. The allocated memory is
206013bd8e5SThomas Gleixner  * automatically released when the device is destroyed.
207013bd8e5SThomas Gleixner  */
208013bd8e5SThomas Gleixner int msi_setup_device_data(struct device *dev)
209013bd8e5SThomas Gleixner {
210013bd8e5SThomas Gleixner 	struct msi_device_data *md;
211f1139f90SThomas Gleixner 	int ret, i;
212013bd8e5SThomas Gleixner 
213013bd8e5SThomas Gleixner 	if (dev->msi.data)
214013bd8e5SThomas Gleixner 		return 0;
215013bd8e5SThomas Gleixner 
216013bd8e5SThomas Gleixner 	md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
217013bd8e5SThomas Gleixner 	if (!md)
218013bd8e5SThomas Gleixner 		return -ENOMEM;
219013bd8e5SThomas Gleixner 
220bf5e758fSThomas Gleixner 	ret = msi_sysfs_create_group(dev);
221bf5e758fSThomas Gleixner 	if (ret) {
222bf5e758fSThomas Gleixner 		devres_free(md);
223bf5e758fSThomas Gleixner 		return ret;
224bf5e758fSThomas Gleixner 	}
225bf5e758fSThomas Gleixner 
226f1139f90SThomas Gleixner 	for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++)
227f1139f90SThomas Gleixner 		xa_init(&md->__domains[i].store);
228f1139f90SThomas Gleixner 
22964258eaaSThomas Gleixner 	/*
23064258eaaSThomas Gleixner 	 * If @dev::msi::domain is set and is a global MSI domain, copy the
23164258eaaSThomas Gleixner 	 * pointer into the domain array so all code can operate on domain
23264258eaaSThomas Gleixner 	 * ids. The NULL pointer check is required to keep the legacy
23364258eaaSThomas Gleixner 	 * architecture specific PCI/MSI support working.
23464258eaaSThomas Gleixner 	 */
23564258eaaSThomas Gleixner 	if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain))
23664258eaaSThomas Gleixner 		md->__domains[MSI_DEFAULT_DOMAIN].domain = dev->msi.domain;
23764258eaaSThomas Gleixner 
238b5f687f9SThomas Gleixner 	mutex_init(&md->mutex);
239013bd8e5SThomas Gleixner 	dev->msi.data = md;
240013bd8e5SThomas Gleixner 	devres_add(dev, md);
241013bd8e5SThomas Gleixner 	return 0;
242013bd8e5SThomas Gleixner }
243013bd8e5SThomas Gleixner 
244cf15f43aSThomas Gleixner /**
245b5f687f9SThomas Gleixner  * msi_lock_descs - Lock the MSI descriptor storage of a device
246b5f687f9SThomas Gleixner  * @dev:	Device to operate on
247b5f687f9SThomas Gleixner  */
248b5f687f9SThomas Gleixner void msi_lock_descs(struct device *dev)
249b5f687f9SThomas Gleixner {
250b5f687f9SThomas Gleixner 	mutex_lock(&dev->msi.data->mutex);
251b5f687f9SThomas Gleixner }
252b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_lock_descs);
253b5f687f9SThomas Gleixner 
254b5f687f9SThomas Gleixner /**
255b5f687f9SThomas Gleixner  * msi_unlock_descs - Unlock the MSI descriptor storage of a device
256b5f687f9SThomas Gleixner  * @dev:	Device to operate on
257b5f687f9SThomas Gleixner  */
258b5f687f9SThomas Gleixner void msi_unlock_descs(struct device *dev)
259b5f687f9SThomas Gleixner {
260f1139f90SThomas Gleixner 	/* Invalidate the index which was cached by the iterator */
26194ff94cfSThomas Gleixner 	dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX;
262b5f687f9SThomas Gleixner 	mutex_unlock(&dev->msi.data->mutex);
263b5f687f9SThomas Gleixner }
264b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_unlock_descs);
265b5f687f9SThomas Gleixner 
26694ff94cfSThomas Gleixner static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid,
26794ff94cfSThomas Gleixner 				      enum msi_desc_filter filter)
2681046f71dSThomas Gleixner {
26994ff94cfSThomas Gleixner 	struct xarray *xa = &md->__domains[domid].store;
2701046f71dSThomas Gleixner 	struct msi_desc *desc;
2711046f71dSThomas Gleixner 
272f1139f90SThomas Gleixner 	xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) {
2731046f71dSThomas Gleixner 		if (msi_desc_match(desc, filter))
2741046f71dSThomas Gleixner 			return desc;
2751046f71dSThomas Gleixner 	}
27694ff94cfSThomas Gleixner 	md->__iter_idx = MSI_XA_MAX_INDEX;
2771046f71dSThomas Gleixner 	return NULL;
2781046f71dSThomas Gleixner }
2791046f71dSThomas Gleixner 
2801046f71dSThomas Gleixner /**
28194ff94cfSThomas Gleixner  * msi_domain_first_desc - Get the first MSI descriptor of an irqdomain associated to a device
2821046f71dSThomas Gleixner  * @dev:	Device to operate on
28394ff94cfSThomas Gleixner  * @domid:	The id of the interrupt domain which should be walked.
2841046f71dSThomas Gleixner  * @filter:	Descriptor state filter
2851046f71dSThomas Gleixner  *
2861046f71dSThomas Gleixner  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
2871046f71dSThomas Gleixner  * must be invoked before the call.
2881046f71dSThomas Gleixner  *
2891046f71dSThomas Gleixner  * Return: Pointer to the first MSI descriptor matching the search
2901046f71dSThomas Gleixner  *	   criteria, NULL if none found.
2911046f71dSThomas Gleixner  */
29294ff94cfSThomas Gleixner struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
29394ff94cfSThomas Gleixner 				       enum msi_desc_filter filter)
2941046f71dSThomas Gleixner {
295cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
2961046f71dSThomas Gleixner 
29794ff94cfSThomas Gleixner 	if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
2981046f71dSThomas Gleixner 		return NULL;
2991046f71dSThomas Gleixner 
300cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
3011046f71dSThomas Gleixner 
302cd6cf065SThomas Gleixner 	md->__iter_idx = 0;
30394ff94cfSThomas Gleixner 	return msi_find_desc(md, domid, filter);
3041046f71dSThomas Gleixner }
30594ff94cfSThomas Gleixner EXPORT_SYMBOL_GPL(msi_domain_first_desc);
3061046f71dSThomas Gleixner 
3071046f71dSThomas Gleixner /**
3081046f71dSThomas Gleixner  * msi_next_desc - Get the next MSI descriptor of a device
3091046f71dSThomas Gleixner  * @dev:	Device to operate on
31094ff94cfSThomas Gleixner  * @domid:	The id of the interrupt domain which should be walked.
311fdd53404SThomas Gleixner  * @filter:	Descriptor state filter
3121046f71dSThomas Gleixner  *
3131046f71dSThomas Gleixner  * The first invocation of msi_next_desc() has to be preceeded by a
314cd6cf065SThomas Gleixner  * successful invocation of __msi_first_desc(). Consecutive invocations are
3151046f71dSThomas Gleixner  * only valid if the previous one was successful. All these operations have
3161046f71dSThomas Gleixner  * to be done within the same MSI mutex held region.
3171046f71dSThomas Gleixner  *
3181046f71dSThomas Gleixner  * Return: Pointer to the next MSI descriptor matching the search
3191046f71dSThomas Gleixner  *	   criteria, NULL if none found.
3201046f71dSThomas Gleixner  */
32194ff94cfSThomas Gleixner struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
32294ff94cfSThomas Gleixner 			       enum msi_desc_filter filter)
3231046f71dSThomas Gleixner {
324cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
3251046f71dSThomas Gleixner 
32694ff94cfSThomas Gleixner 	if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
3271046f71dSThomas Gleixner 		return NULL;
3281046f71dSThomas Gleixner 
329cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
3301046f71dSThomas Gleixner 
331cd6cf065SThomas Gleixner 	if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
3321046f71dSThomas Gleixner 		return NULL;
3331046f71dSThomas Gleixner 
334cd6cf065SThomas Gleixner 	md->__iter_idx++;
33594ff94cfSThomas Gleixner 	return msi_find_desc(md, domid, filter);
3361046f71dSThomas Gleixner }
3371046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_next_desc);
3381046f71dSThomas Gleixner 
339b5f687f9SThomas Gleixner /**
340*98043704SAhmed S. Darwish  * msi_domain_get_virq - Lookup the Linux interrupt number for a MSI index on a interrupt domain
341cf15f43aSThomas Gleixner  * @dev:	Device to operate on
342*98043704SAhmed S. Darwish  * @domid:	Domain ID of the interrupt domain associated to the device
343cf15f43aSThomas Gleixner  * @index:	MSI interrupt index to look for (0-based)
344cf15f43aSThomas Gleixner  *
345cf15f43aSThomas Gleixner  * Return: The Linux interrupt number on success (> 0), 0 if not found
346cf15f43aSThomas Gleixner  */
347*98043704SAhmed S. Darwish unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index)
348cf15f43aSThomas Gleixner {
349cf15f43aSThomas Gleixner 	struct msi_desc *desc;
350495c66acSThomas Gleixner 	unsigned int ret = 0;
351*98043704SAhmed S. Darwish 	bool pcimsi = false;
352f1139f90SThomas Gleixner 	struct xarray *xa;
353cf15f43aSThomas Gleixner 
354cf15f43aSThomas Gleixner 	if (!dev->msi.data)
355cf15f43aSThomas Gleixner 		return 0;
356cf15f43aSThomas Gleixner 
357*98043704SAhmed S. Darwish 	if (WARN_ON_ONCE(index > MSI_MAX_INDEX || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
358*98043704SAhmed S. Darwish 		return 0;
359*98043704SAhmed S. Darwish 
360*98043704SAhmed S. Darwish 	/* This check is only valid for the PCI default MSI domain */
361*98043704SAhmed S. Darwish 	if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN)
362*98043704SAhmed S. Darwish 		pcimsi = to_pci_dev(dev)->msi_enabled;
363cf15f43aSThomas Gleixner 
364495c66acSThomas Gleixner 	msi_lock_descs(dev);
365*98043704SAhmed S. Darwish 	xa = &dev->msi.data->__domains[domid].store;
366f1139f90SThomas Gleixner 	desc = xa_load(xa, pcimsi ? 0 : index);
367cd6cf065SThomas Gleixner 	if (desc && desc->irq) {
368cf15f43aSThomas Gleixner 		/*
369cd6cf065SThomas Gleixner 		 * PCI-MSI has only one descriptor for multiple interrupts.
370cf15f43aSThomas Gleixner 		 * PCI-MSIX and platform MSI use a descriptor per
371cf15f43aSThomas Gleixner 		 * interrupt.
372cf15f43aSThomas Gleixner 		 */
373cd6cf065SThomas Gleixner 		if (pcimsi) {
374cd6cf065SThomas Gleixner 			if (index < desc->nvec_used)
375cd6cf065SThomas Gleixner 				ret = desc->irq + index;
376cd6cf065SThomas Gleixner 		} else {
377495c66acSThomas Gleixner 			ret = desc->irq;
378cf15f43aSThomas Gleixner 		}
379495c66acSThomas Gleixner 	}
380*98043704SAhmed S. Darwish 
381495c66acSThomas Gleixner 	msi_unlock_descs(dev);
382495c66acSThomas Gleixner 	return ret;
383cf15f43aSThomas Gleixner }
384*98043704SAhmed S. Darwish EXPORT_SYMBOL_GPL(msi_domain_get_virq);
385cf15f43aSThomas Gleixner 
3861197528aSThomas Gleixner #ifdef CONFIG_SYSFS
387bf5e758fSThomas Gleixner static struct attribute *msi_dev_attrs[] = {
388bf5e758fSThomas Gleixner 	NULL
389bf5e758fSThomas Gleixner };
390bf5e758fSThomas Gleixner 
391bf5e758fSThomas Gleixner static const struct attribute_group msi_irqs_group = {
392bf5e758fSThomas Gleixner 	.name	= "msi_irqs",
393bf5e758fSThomas Gleixner 	.attrs	= msi_dev_attrs,
394bf5e758fSThomas Gleixner };
395bf5e758fSThomas Gleixner 
396bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev)
397bf5e758fSThomas Gleixner {
398bf5e758fSThomas Gleixner 	return devm_device_add_group(dev, &msi_irqs_group);
399bf5e758fSThomas Gleixner }
400bf5e758fSThomas Gleixner 
4012f170814SBarry Song static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
4022f170814SBarry Song 			     char *buf)
4032f170814SBarry Song {
4046ef7f771SThomas Gleixner 	/* MSI vs. MSIX is per device not per interrupt */
4056ef7f771SThomas Gleixner 	bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
4062f170814SBarry Song 
4072f170814SBarry Song 	return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
4082f170814SBarry Song }
4092f170814SBarry Song 
410bf5e758fSThomas Gleixner static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
4112f170814SBarry Song {
412bf5e758fSThomas Gleixner 	struct device_attribute *attrs = desc->sysfs_attrs;
4132f170814SBarry Song 	int i;
4142f170814SBarry Song 
415bf5e758fSThomas Gleixner 	if (!attrs)
416bf5e758fSThomas Gleixner 		return;
4172f170814SBarry Song 
418bf5e758fSThomas Gleixner 	desc->sysfs_attrs = NULL;
419bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
420bf5e758fSThomas Gleixner 		if (attrs[i].show)
421bf5e758fSThomas Gleixner 			sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
422bf5e758fSThomas Gleixner 		kfree(attrs[i].attr.name);
4232f170814SBarry Song 	}
424bf5e758fSThomas Gleixner 	kfree(attrs);
4252f170814SBarry Song }
4262f170814SBarry Song 
427bf5e758fSThomas Gleixner static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
428bf5e758fSThomas Gleixner {
429bf5e758fSThomas Gleixner 	struct device_attribute *attrs;
430bf5e758fSThomas Gleixner 	int ret, i;
4312f170814SBarry Song 
432bf5e758fSThomas Gleixner 	attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
433bf5e758fSThomas Gleixner 	if (!attrs)
434bf5e758fSThomas Gleixner 		return -ENOMEM;
4352f170814SBarry Song 
436bf5e758fSThomas Gleixner 	desc->sysfs_attrs = attrs;
437bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
438bf5e758fSThomas Gleixner 		sysfs_attr_init(&attrs[i].attr);
439bf5e758fSThomas Gleixner 		attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
440bf5e758fSThomas Gleixner 		if (!attrs[i].attr.name) {
441bf5e758fSThomas Gleixner 			ret = -ENOMEM;
442bf5e758fSThomas Gleixner 			goto fail;
4432f170814SBarry Song 		}
4442f170814SBarry Song 
445bf5e758fSThomas Gleixner 		attrs[i].attr.mode = 0444;
446bf5e758fSThomas Gleixner 		attrs[i].show = msi_mode_show;
447bf5e758fSThomas Gleixner 
448bf5e758fSThomas Gleixner 		ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
449bf5e758fSThomas Gleixner 		if (ret) {
450bf5e758fSThomas Gleixner 			attrs[i].show = NULL;
451bf5e758fSThomas Gleixner 			goto fail;
452bf5e758fSThomas Gleixner 		}
453bf5e758fSThomas Gleixner 	}
454bf5e758fSThomas Gleixner 	return 0;
455bf5e758fSThomas Gleixner 
456bf5e758fSThomas Gleixner fail:
457bf5e758fSThomas Gleixner 	msi_sysfs_remove_desc(dev, desc);
458bf5e758fSThomas Gleixner 	return ret;
459bf5e758fSThomas Gleixner }
460bf5e758fSThomas Gleixner 
461bf5e758fSThomas Gleixner #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
4622f170814SBarry Song /**
463bf6e054eSThomas Gleixner  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
464bf6e054eSThomas Gleixner  * @dev:	The device (PCI, platform etc) which will get sysfs entries
465bf6e054eSThomas Gleixner  */
466bf6e054eSThomas Gleixner int msi_device_populate_sysfs(struct device *dev)
467bf6e054eSThomas Gleixner {
468bf5e758fSThomas Gleixner 	struct msi_desc *desc;
469bf5e758fSThomas Gleixner 	int ret;
470bf6e054eSThomas Gleixner 
471bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
472bf5e758fSThomas Gleixner 		if (desc->sysfs_attrs)
473bf5e758fSThomas Gleixner 			continue;
474bf5e758fSThomas Gleixner 		ret = msi_sysfs_populate_desc(dev, desc);
475bf5e758fSThomas Gleixner 		if (ret)
476bf5e758fSThomas Gleixner 			return ret;
477bf5e758fSThomas Gleixner 	}
478bf6e054eSThomas Gleixner 	return 0;
479bf6e054eSThomas Gleixner }
480bf6e054eSThomas Gleixner 
481bf6e054eSThomas Gleixner /**
48224cff375SThomas Gleixner  * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
48324cff375SThomas Gleixner  * @dev:		The device (PCI, platform etc) for which to remove
48424cff375SThomas Gleixner  *			sysfs entries
4852f170814SBarry Song  */
48624cff375SThomas Gleixner void msi_device_destroy_sysfs(struct device *dev)
4872f170814SBarry Song {
488bf5e758fSThomas Gleixner 	struct msi_desc *desc;
4892f170814SBarry Song 
490bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ALL)
491bf5e758fSThomas Gleixner 		msi_sysfs_remove_desc(dev, desc);
4922f170814SBarry Song }
493bf5e758fSThomas Gleixner #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
494bf5e758fSThomas Gleixner #else /* CONFIG_SYSFS */
495bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
496bf5e758fSThomas Gleixner static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
497bf5e758fSThomas Gleixner static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
498bf5e758fSThomas Gleixner #endif /* !CONFIG_SYSFS */
4992f170814SBarry Song 
500762687ceSThomas Gleixner static int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec);
501057c97a1SThomas Gleixner static void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
502762687ceSThomas Gleixner 
50374faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data,
50474faaf7aSThomas Gleixner 					  struct msi_msg *msg)
50574faaf7aSThomas Gleixner {
50674faaf7aSThomas Gleixner 	data->chip->irq_write_msi_msg(data, msg);
50774faaf7aSThomas Gleixner }
50874faaf7aSThomas Gleixner 
5090be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
5100be8153cSMarc Zyngier {
5110be8153cSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
5120be8153cSMarc Zyngier 
5130be8153cSMarc Zyngier 	/*
5140be8153cSMarc Zyngier 	 * If the MSI provider has messed with the second message and
5150be8153cSMarc Zyngier 	 * not advertized that it is level-capable, signal the breakage.
5160be8153cSMarc Zyngier 	 */
5170be8153cSMarc Zyngier 	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
5180be8153cSMarc Zyngier 		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
5190be8153cSMarc Zyngier 		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
5200be8153cSMarc Zyngier }
5210be8153cSMarc Zyngier 
522f3cf8bb0SJiang Liu /**
523f3cf8bb0SJiang Liu  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
524f3cf8bb0SJiang Liu  * @irq_data:	The irq data associated to the interrupt
525f3cf8bb0SJiang Liu  * @mask:	The affinity mask to set
526f3cf8bb0SJiang Liu  * @force:	Flag to enforce setting (disable online checks)
527f3cf8bb0SJiang Liu  *
528f3cf8bb0SJiang Liu  * Intended to be used by MSI interrupt controllers which are
529f3cf8bb0SJiang Liu  * implemented with hierarchical domains.
5303b35e7e6SRandy Dunlap  *
5313b35e7e6SRandy Dunlap  * Return: IRQ_SET_MASK_* result code
532f3cf8bb0SJiang Liu  */
533f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data,
534f3cf8bb0SJiang Liu 			    const struct cpumask *mask, bool force)
535f3cf8bb0SJiang Liu {
536f3cf8bb0SJiang Liu 	struct irq_data *parent = irq_data->parent_data;
5370be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
538f3cf8bb0SJiang Liu 	int ret;
539f3cf8bb0SJiang Liu 
540f3cf8bb0SJiang Liu 	ret = parent->chip->irq_set_affinity(parent, mask, force);
541f3cf8bb0SJiang Liu 	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
5420be8153cSMarc Zyngier 		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5430be8153cSMarc Zyngier 		msi_check_level(irq_data->domain, msg);
5440be8153cSMarc Zyngier 		irq_chip_write_msi_msg(irq_data, msg);
545f3cf8bb0SJiang Liu 	}
546f3cf8bb0SJiang Liu 
547f3cf8bb0SJiang Liu 	return ret;
548f3cf8bb0SJiang Liu }
549f3cf8bb0SJiang Liu 
55072491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain,
55172491643SThomas Gleixner 			       struct irq_data *irq_data, bool early)
552f3cf8bb0SJiang Liu {
5530be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
554f3cf8bb0SJiang Liu 
5550be8153cSMarc Zyngier 	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5560be8153cSMarc Zyngier 	msi_check_level(irq_data->domain, msg);
5570be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
55872491643SThomas Gleixner 	return 0;
559f3cf8bb0SJiang Liu }
560f3cf8bb0SJiang Liu 
561f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain,
562f3cf8bb0SJiang Liu 				  struct irq_data *irq_data)
563f3cf8bb0SJiang Liu {
5640be8153cSMarc Zyngier 	struct msi_msg msg[2];
565f3cf8bb0SJiang Liu 
5660be8153cSMarc Zyngier 	memset(msg, 0, sizeof(msg));
5670be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
568f3cf8bb0SJiang Liu }
569f3cf8bb0SJiang Liu 
570f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
571f3cf8bb0SJiang Liu 			    unsigned int nr_irqs, void *arg)
572f3cf8bb0SJiang Liu {
573f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
574f3cf8bb0SJiang Liu 	struct msi_domain_ops *ops = info->ops;
575f3cf8bb0SJiang Liu 	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
576f3cf8bb0SJiang Liu 	int i, ret;
577f3cf8bb0SJiang Liu 
578f3cf8bb0SJiang Liu 	if (irq_find_mapping(domain, hwirq) > 0)
579f3cf8bb0SJiang Liu 		return -EEXIST;
580f3cf8bb0SJiang Liu 
581bf6f869fSLiu Jiang 	if (domain->parent) {
582f3cf8bb0SJiang Liu 		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
583f3cf8bb0SJiang Liu 		if (ret < 0)
584f3cf8bb0SJiang Liu 			return ret;
585bf6f869fSLiu Jiang 	}
586f3cf8bb0SJiang Liu 
587f3cf8bb0SJiang Liu 	for (i = 0; i < nr_irqs; i++) {
588f3cf8bb0SJiang Liu 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
589f3cf8bb0SJiang Liu 		if (ret < 0) {
590f3cf8bb0SJiang Liu 			if (ops->msi_free) {
591f3cf8bb0SJiang Liu 				for (i--; i > 0; i--)
592f3cf8bb0SJiang Liu 					ops->msi_free(domain, info, virq + i);
593f3cf8bb0SJiang Liu 			}
594f3cf8bb0SJiang Liu 			irq_domain_free_irqs_top(domain, virq, nr_irqs);
595f3cf8bb0SJiang Liu 			return ret;
596f3cf8bb0SJiang Liu 		}
597f3cf8bb0SJiang Liu 	}
598f3cf8bb0SJiang Liu 
599f3cf8bb0SJiang Liu 	return 0;
600f3cf8bb0SJiang Liu }
601f3cf8bb0SJiang Liu 
602f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
603f3cf8bb0SJiang Liu 			    unsigned int nr_irqs)
604f3cf8bb0SJiang Liu {
605f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
606f3cf8bb0SJiang Liu 	int i;
607f3cf8bb0SJiang Liu 
608f3cf8bb0SJiang Liu 	if (info->ops->msi_free) {
609f3cf8bb0SJiang Liu 		for (i = 0; i < nr_irqs; i++)
610f3cf8bb0SJiang Liu 			info->ops->msi_free(domain, info, virq + i);
611f3cf8bb0SJiang Liu 	}
612f3cf8bb0SJiang Liu 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
613f3cf8bb0SJiang Liu }
614f3cf8bb0SJiang Liu 
61501364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = {
616f3cf8bb0SJiang Liu 	.alloc		= msi_domain_alloc,
617f3cf8bb0SJiang Liu 	.free		= msi_domain_free,
618f3cf8bb0SJiang Liu 	.activate	= msi_domain_activate,
619f3cf8bb0SJiang Liu 	.deactivate	= msi_domain_deactivate,
620f3cf8bb0SJiang Liu };
621f3cf8bb0SJiang Liu 
622aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
623aeeb5965SJiang Liu 						msi_alloc_info_t *arg)
624aeeb5965SJiang Liu {
625aeeb5965SJiang Liu 	return arg->hwirq;
626aeeb5965SJiang Liu }
627aeeb5965SJiang Liu 
628aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
629aeeb5965SJiang Liu 				  int nvec, msi_alloc_info_t *arg)
630aeeb5965SJiang Liu {
631aeeb5965SJiang Liu 	memset(arg, 0, sizeof(*arg));
632aeeb5965SJiang Liu 	return 0;
633aeeb5965SJiang Liu }
634aeeb5965SJiang Liu 
635aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
636aeeb5965SJiang Liu 				    struct msi_desc *desc)
637aeeb5965SJiang Liu {
638aeeb5965SJiang Liu 	arg->desc = desc;
639aeeb5965SJiang Liu }
640aeeb5965SJiang Liu 
641aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain,
642aeeb5965SJiang Liu 			       struct msi_domain_info *info,
643aeeb5965SJiang Liu 			       unsigned int virq, irq_hw_number_t hwirq,
644aeeb5965SJiang Liu 			       msi_alloc_info_t *arg)
645aeeb5965SJiang Liu {
646aeeb5965SJiang Liu 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
647aeeb5965SJiang Liu 				      info->chip_data);
648aeeb5965SJiang Liu 	if (info->handler && info->handler_name) {
649aeeb5965SJiang Liu 		__irq_set_handler(virq, info->handler, 0, info->handler_name);
650aeeb5965SJiang Liu 		if (info->handler_data)
651aeeb5965SJiang Liu 			irq_set_handler_data(virq, info->handler_data);
652aeeb5965SJiang Liu 	}
653aeeb5965SJiang Liu 	return 0;
654aeeb5965SJiang Liu }
655aeeb5965SJiang Liu 
656aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = {
657aeeb5965SJiang Liu 	.get_hwirq		= msi_domain_ops_get_hwirq,
658aeeb5965SJiang Liu 	.msi_init		= msi_domain_ops_init,
659aeeb5965SJiang Liu 	.msi_prepare		= msi_domain_ops_prepare,
660aeeb5965SJiang Liu 	.set_desc		= msi_domain_ops_set_desc,
66143e9e705SThomas Gleixner 	.domain_alloc_irqs	= __msi_domain_alloc_irqs,
66243e9e705SThomas Gleixner 	.domain_free_irqs	= __msi_domain_free_irqs,
663aeeb5965SJiang Liu };
664aeeb5965SJiang Liu 
665aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info)
666aeeb5965SJiang Liu {
667aeeb5965SJiang Liu 	struct msi_domain_ops *ops = info->ops;
668aeeb5965SJiang Liu 
669aeeb5965SJiang Liu 	if (ops == NULL) {
670aeeb5965SJiang Liu 		info->ops = &msi_domain_ops_default;
671aeeb5965SJiang Liu 		return;
672aeeb5965SJiang Liu 	}
673aeeb5965SJiang Liu 
67443e9e705SThomas Gleixner 	if (ops->domain_alloc_irqs == NULL)
67543e9e705SThomas Gleixner 		ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
67643e9e705SThomas Gleixner 	if (ops->domain_free_irqs == NULL)
67743e9e705SThomas Gleixner 		ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
67843e9e705SThomas Gleixner 
67943e9e705SThomas Gleixner 	if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
68043e9e705SThomas Gleixner 		return;
68143e9e705SThomas Gleixner 
682aeeb5965SJiang Liu 	if (ops->get_hwirq == NULL)
683aeeb5965SJiang Liu 		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
684aeeb5965SJiang Liu 	if (ops->msi_init == NULL)
685aeeb5965SJiang Liu 		ops->msi_init = msi_domain_ops_default.msi_init;
686aeeb5965SJiang Liu 	if (ops->msi_prepare == NULL)
687aeeb5965SJiang Liu 		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
688aeeb5965SJiang Liu 	if (ops->set_desc == NULL)
689aeeb5965SJiang Liu 		ops->set_desc = msi_domain_ops_default.set_desc;
690aeeb5965SJiang Liu }
691aeeb5965SJiang Liu 
692aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info)
693aeeb5965SJiang Liu {
694aeeb5965SJiang Liu 	struct irq_chip *chip = info->chip;
695aeeb5965SJiang Liu 
6960701c53eSMarc Zyngier 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
697aeeb5965SJiang Liu 	if (!chip->irq_set_affinity)
698aeeb5965SJiang Liu 		chip->irq_set_affinity = msi_domain_set_affinity;
699aeeb5965SJiang Liu }
700aeeb5965SJiang Liu 
701f3cf8bb0SJiang Liu /**
7023b35e7e6SRandy Dunlap  * msi_create_irq_domain - Create an MSI interrupt domain
703be5436c8SMarc Zyngier  * @fwnode:	Optional fwnode of the interrupt controller
704f3cf8bb0SJiang Liu  * @info:	MSI domain info
705f3cf8bb0SJiang Liu  * @parent:	Parent irq domain
7063b35e7e6SRandy Dunlap  *
7073b35e7e6SRandy Dunlap  * Return: pointer to the created &struct irq_domain or %NULL on failure
708f3cf8bb0SJiang Liu  */
709be5436c8SMarc Zyngier struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
710f3cf8bb0SJiang Liu 					 struct msi_domain_info *info,
711f3cf8bb0SJiang Liu 					 struct irq_domain *parent)
712f3cf8bb0SJiang Liu {
713a97b852bSMarc Zyngier 	struct irq_domain *domain;
714a97b852bSMarc Zyngier 
715aeeb5965SJiang Liu 	msi_domain_update_dom_ops(info);
716aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
717aeeb5965SJiang Liu 		msi_domain_update_chip_ops(info);
718f3cf8bb0SJiang Liu 
719a97b852bSMarc Zyngier 	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
72088156f00SEric Auger 					     fwnode, &msi_domain_ops, info);
7210165308aSThomas Gleixner 
72222db089aSAhmed S. Darwish 	if (domain) {
72322db089aSAhmed S. Darwish 		if (!domain->name && info->chip)
724a97b852bSMarc Zyngier 			domain->name = info->chip->name;
72522db089aSAhmed S. Darwish 		irq_domain_update_bus_token(domain, info->bus_token);
72622db089aSAhmed S. Darwish 	}
727a97b852bSMarc Zyngier 
728a97b852bSMarc Zyngier 	return domain;
729f3cf8bb0SJiang Liu }
730f3cf8bb0SJiang Liu 
731b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
732b2eba39bSMarc Zyngier 			    int nvec, msi_alloc_info_t *arg)
733b2eba39bSMarc Zyngier {
734b2eba39bSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
735b2eba39bSMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
736b2eba39bSMarc Zyngier 
7372569f62cSThomas Gleixner 	return ops->msi_prepare(domain, dev, nvec, arg);
738b2eba39bSMarc Zyngier }
739b2eba39bSMarc Zyngier 
7402145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
741a80713feSThomas Gleixner 			     int virq_base, int nvec, msi_alloc_info_t *arg)
7422145ac93SMarc Zyngier {
7432145ac93SMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
7442145ac93SMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
7452145ac93SMarc Zyngier 	struct msi_desc *desc;
746f1139f90SThomas Gleixner 	struct xarray *xa;
747a80713feSThomas Gleixner 	int ret, virq;
7482145ac93SMarc Zyngier 
749a80713feSThomas Gleixner 	msi_lock_descs(dev);
750cd6cf065SThomas Gleixner 	ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
751cd6cf065SThomas Gleixner 	if (ret)
752cd6cf065SThomas Gleixner 		goto unlock;
7532145ac93SMarc Zyngier 
754f1139f90SThomas Gleixner 	xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
755f1139f90SThomas Gleixner 
756cd6cf065SThomas Gleixner 	for (virq = virq_base; virq < virq_base + nvec; virq++) {
757f1139f90SThomas Gleixner 		desc = xa_load(xa, virq);
758a80713feSThomas Gleixner 		desc->irq = virq;
7592145ac93SMarc Zyngier 
7602145ac93SMarc Zyngier 		ops->set_desc(arg, desc);
761a80713feSThomas Gleixner 		ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
7622145ac93SMarc Zyngier 		if (ret)
763a80713feSThomas Gleixner 			goto fail;
7642145ac93SMarc Zyngier 
765a80713feSThomas Gleixner 		irq_set_msi_desc(virq, desc);
7662145ac93SMarc Zyngier 	}
767a80713feSThomas Gleixner 	msi_unlock_descs(dev);
768a80713feSThomas Gleixner 	return 0;
7692145ac93SMarc Zyngier 
770a80713feSThomas Gleixner fail:
771a80713feSThomas Gleixner 	for (--virq; virq >= virq_base; virq--)
772a80713feSThomas Gleixner 		irq_domain_free_irqs_common(domain, virq, 1);
7732f2940d1SThomas Gleixner 	msi_free_msi_descs_range(dev, virq_base, virq_base + nvec - 1);
774cd6cf065SThomas Gleixner unlock:
775a80713feSThomas Gleixner 	msi_unlock_descs(dev);
7762145ac93SMarc Zyngier 	return ret;
7772145ac93SMarc Zyngier }
7782145ac93SMarc Zyngier 
779bc976233SThomas Gleixner /*
780bc976233SThomas Gleixner  * Carefully check whether the device can use reservation mode. If
781bc976233SThomas Gleixner  * reservation mode is enabled then the early activation will assign a
782bc976233SThomas Gleixner  * dummy vector to the device. If the PCI/MSI device does not support
783bc976233SThomas Gleixner  * masking of the entry then this can result in spurious interrupts when
784bc976233SThomas Gleixner  * the device driver is not absolutely careful. But even then a malfunction
785bc976233SThomas Gleixner  * of the hardware could result in a spurious interrupt on the dummy vector
786bc976233SThomas Gleixner  * and render the device unusable. If the entry can be masked then the core
787bc976233SThomas Gleixner  * logic will prevent the spurious interrupt and reservation mode can be
788bc976233SThomas Gleixner  * used. For now reservation mode is restricted to PCI/MSI.
789bc976233SThomas Gleixner  */
790bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain,
791bc976233SThomas Gleixner 				       struct msi_domain_info *info,
792bc976233SThomas Gleixner 				       struct device *dev)
793da5dd9e8SThomas Gleixner {
794bc976233SThomas Gleixner 	struct msi_desc *desc;
795bc976233SThomas Gleixner 
796c6c9e283SThomas Gleixner 	switch(domain->bus_token) {
797c6c9e283SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
798c6c9e283SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
799c6c9e283SThomas Gleixner 		break;
800c6c9e283SThomas Gleixner 	default:
801bc976233SThomas Gleixner 		return false;
802c6c9e283SThomas Gleixner 	}
803bc976233SThomas Gleixner 
804da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
805da5dd9e8SThomas Gleixner 		return false;
806bc976233SThomas Gleixner 
807bc976233SThomas Gleixner 	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
808bc976233SThomas Gleixner 		return false;
809bc976233SThomas Gleixner 
810bc976233SThomas Gleixner 	/*
811bc976233SThomas Gleixner 	 * Checking the first MSI descriptor is sufficient. MSIX supports
8129c8e9c96SThomas Gleixner 	 * masking and MSI does so when the can_mask attribute is set.
813bc976233SThomas Gleixner 	 */
814495c66acSThomas Gleixner 	desc = msi_first_desc(dev, MSI_DESC_ALL);
815e58f2259SThomas Gleixner 	return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
816da5dd9e8SThomas Gleixner }
817da5dd9e8SThomas Gleixner 
81889033762SThomas Gleixner static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
81989033762SThomas Gleixner 			       int allocated)
82089033762SThomas Gleixner {
82189033762SThomas Gleixner 	switch(domain->bus_token) {
82289033762SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
82389033762SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
82489033762SThomas Gleixner 		if (IS_ENABLED(CONFIG_PCI_MSI))
82589033762SThomas Gleixner 			break;
82689033762SThomas Gleixner 		fallthrough;
82789033762SThomas Gleixner 	default:
82889033762SThomas Gleixner 		return -ENOSPC;
82989033762SThomas Gleixner 	}
83089033762SThomas Gleixner 
83189033762SThomas Gleixner 	/* Let a failed PCI multi MSI allocation retry */
83289033762SThomas Gleixner 	if (desc->nvec_used > 1)
83389033762SThomas Gleixner 		return 1;
83489033762SThomas Gleixner 
83589033762SThomas Gleixner 	/* If there was a successful allocation let the caller know */
83689033762SThomas Gleixner 	return allocated ? allocated : -ENOSPC;
83789033762SThomas Gleixner }
83889033762SThomas Gleixner 
839ef8dd015SThomas Gleixner #define VIRQ_CAN_RESERVE	0x01
840ef8dd015SThomas Gleixner #define VIRQ_ACTIVATE		0x02
841ef8dd015SThomas Gleixner #define VIRQ_NOMASK_QUIRK	0x04
842ef8dd015SThomas Gleixner 
843ef8dd015SThomas Gleixner static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
844ef8dd015SThomas Gleixner {
845ef8dd015SThomas Gleixner 	struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
846ef8dd015SThomas Gleixner 	int ret;
847ef8dd015SThomas Gleixner 
848ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_CAN_RESERVE)) {
849ef8dd015SThomas Gleixner 		irqd_clr_can_reserve(irqd);
850ef8dd015SThomas Gleixner 		if (vflags & VIRQ_NOMASK_QUIRK)
851ef8dd015SThomas Gleixner 			irqd_set_msi_nomask_quirk(irqd);
852d802057cSMarc Zyngier 
853d802057cSMarc Zyngier 		/*
854d802057cSMarc Zyngier 		 * If the interrupt is managed but no CPU is available to
855d802057cSMarc Zyngier 		 * service it, shut it down until better times. Note that
856d802057cSMarc Zyngier 		 * we only do this on the !RESERVE path as x86 (the only
857d802057cSMarc Zyngier 		 * architecture using this flag) deals with this in a
858d802057cSMarc Zyngier 		 * different way by using a catch-all vector.
859d802057cSMarc Zyngier 		 */
860d802057cSMarc Zyngier 		if ((vflags & VIRQ_ACTIVATE) &&
861d802057cSMarc Zyngier 		    irqd_affinity_is_managed(irqd) &&
862d802057cSMarc Zyngier 		    !cpumask_intersects(irq_data_get_affinity_mask(irqd),
863d802057cSMarc Zyngier 					cpu_online_mask)) {
864d802057cSMarc Zyngier 			    irqd_set_managed_shutdown(irqd);
865d802057cSMarc Zyngier 			    return 0;
866d802057cSMarc Zyngier 		    }
867ef8dd015SThomas Gleixner 	}
868ef8dd015SThomas Gleixner 
869ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_ACTIVATE))
870ef8dd015SThomas Gleixner 		return 0;
871ef8dd015SThomas Gleixner 
872ef8dd015SThomas Gleixner 	ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
873ef8dd015SThomas Gleixner 	if (ret)
874ef8dd015SThomas Gleixner 		return ret;
875ef8dd015SThomas Gleixner 	/*
876ef8dd015SThomas Gleixner 	 * If the interrupt uses reservation mode, clear the activated bit
877ef8dd015SThomas Gleixner 	 * so request_irq() will assign the final vector.
878ef8dd015SThomas Gleixner 	 */
879ef8dd015SThomas Gleixner 	if (vflags & VIRQ_CAN_RESERVE)
880ef8dd015SThomas Gleixner 		irqd_clr_activated(irqd);
881ef8dd015SThomas Gleixner 	return 0;
882ef8dd015SThomas Gleixner }
883ef8dd015SThomas Gleixner 
884762687ceSThomas Gleixner static int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
885d9109698SJiang Liu 				   int nvec)
886d9109698SJiang Liu {
887d9109698SJiang Liu 	struct msi_domain_info *info = domain->host_data;
888d9109698SJiang Liu 	struct msi_domain_ops *ops = info->ops;
88906fde695SZenghui Yu 	msi_alloc_info_t arg = { };
890ef8dd015SThomas Gleixner 	unsigned int vflags = 0;
891ef8dd015SThomas Gleixner 	struct msi_desc *desc;
89289033762SThomas Gleixner 	int allocated = 0;
893b6140914SThomas Gleixner 	int i, ret, virq;
894d9109698SJiang Liu 
895b2eba39bSMarc Zyngier 	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
896d9109698SJiang Liu 	if (ret)
897d9109698SJiang Liu 		return ret;
898d9109698SJiang Liu 
899ef8dd015SThomas Gleixner 	/*
900ef8dd015SThomas Gleixner 	 * This flag is set by the PCI layer as we need to activate
901ef8dd015SThomas Gleixner 	 * the MSI entries before the PCI layer enables MSI in the
902ef8dd015SThomas Gleixner 	 * card. Otherwise the card latches a random msi message.
903ef8dd015SThomas Gleixner 	 */
904ef8dd015SThomas Gleixner 	if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
905ef8dd015SThomas Gleixner 		vflags |= VIRQ_ACTIVATE;
906ef8dd015SThomas Gleixner 
907ef8dd015SThomas Gleixner 	/*
908ef8dd015SThomas Gleixner 	 * Interrupt can use a reserved vector and will not occupy
909ef8dd015SThomas Gleixner 	 * a real device vector until the interrupt is requested.
910ef8dd015SThomas Gleixner 	 */
911ef8dd015SThomas Gleixner 	if (msi_check_reservation_mode(domain, info, dev)) {
912ef8dd015SThomas Gleixner 		vflags |= VIRQ_CAN_RESERVE;
913ef8dd015SThomas Gleixner 		/*
914ef8dd015SThomas Gleixner 		 * MSI affinity setting requires a special quirk (X86) when
915ef8dd015SThomas Gleixner 		 * reservation mode is active.
916ef8dd015SThomas Gleixner 		 */
9173dad5f9aSThomas Gleixner 		if (info->flags & MSI_FLAG_NOMASK_QUIRK)
918ef8dd015SThomas Gleixner 			vflags |= VIRQ_NOMASK_QUIRK;
919ef8dd015SThomas Gleixner 	}
920ef8dd015SThomas Gleixner 
921ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
922d9109698SJiang Liu 		ops->set_desc(&arg, desc);
923d9109698SJiang Liu 
924b6140914SThomas Gleixner 		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
92506ee6d57SThomas Gleixner 					       dev_to_node(dev), &arg, false,
9260972fa57SThomas Gleixner 					       desc->affinity);
9270f62d941SThomas Gleixner 		if (virq < 0)
9280f62d941SThomas Gleixner 			return msi_handle_pci_fail(domain, desc, allocated);
929d9109698SJiang Liu 
93007557ccbSThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
931d9109698SJiang Liu 			irq_set_msi_desc_off(virq, i, desc);
93207557ccbSThomas Gleixner 			irq_debugfs_copy_devname(virq + i, dev);
933ef8dd015SThomas Gleixner 			ret = msi_init_virq(domain, virq + i, vflags);
934bb9b428aSThomas Gleixner 			if (ret)
9350f62d941SThomas Gleixner 				return ret;
93674a5257aSThomas Gleixner 		}
937bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS) {
938bf5e758fSThomas Gleixner 			ret = msi_sysfs_populate_desc(dev, desc);
939bf5e758fSThomas Gleixner 			if (ret)
940bf5e758fSThomas Gleixner 				return ret;
941bf5e758fSThomas Gleixner 		}
942ef8dd015SThomas Gleixner 		allocated++;
943d9109698SJiang Liu 	}
944d9109698SJiang Liu 	return 0;
9450f62d941SThomas Gleixner }
9460f62d941SThomas Gleixner 
947645474e2SThomas Gleixner static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
948645474e2SThomas Gleixner 					   struct device *dev,
949645474e2SThomas Gleixner 					   unsigned int num_descs)
950645474e2SThomas Gleixner {
951645474e2SThomas Gleixner 	if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
952645474e2SThomas Gleixner 		return 0;
953645474e2SThomas Gleixner 
954645474e2SThomas Gleixner 	return msi_add_simple_msi_descs(dev, 0, num_descs);
955645474e2SThomas Gleixner }
956645474e2SThomas Gleixner 
9570f62d941SThomas Gleixner /**
9580f62d941SThomas Gleixner  * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
9590f62d941SThomas Gleixner  * @domain:	The domain to allocate from
9600f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
9610f62d941SThomas Gleixner  *		are allocated
9620f62d941SThomas Gleixner  * @nvec:	The number of interrupts to allocate
9630f62d941SThomas Gleixner  *
9640f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
9650f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
9660f62d941SThomas Gleixner  * allocation/free.
9670f62d941SThomas Gleixner  *
9680f62d941SThomas Gleixner  * Return: %0 on success or an error code.
9690f62d941SThomas Gleixner  */
9700f62d941SThomas Gleixner int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
9710f62d941SThomas Gleixner 				       int nvec)
9720f62d941SThomas Gleixner {
9730f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
9740f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
9750f62d941SThomas Gleixner 	int ret;
9760f62d941SThomas Gleixner 
9770f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
9780f62d941SThomas Gleixner 
9793e86a3a3SThomas Gleixner 	if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain))) {
9803e86a3a3SThomas Gleixner 		ret = -EINVAL;
9813e86a3a3SThomas Gleixner 		goto free;
9823e86a3a3SThomas Gleixner 	}
9833e86a3a3SThomas Gleixner 
9843e86a3a3SThomas Gleixner 	/* Frees allocated descriptors in case of failure. */
985645474e2SThomas Gleixner 	ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
986645474e2SThomas Gleixner 	if (ret)
9873e86a3a3SThomas Gleixner 		goto free;
988645474e2SThomas Gleixner 
9890f62d941SThomas Gleixner 	ret = ops->domain_alloc_irqs(domain, dev, nvec);
9903e86a3a3SThomas Gleixner 	if (!ret)
9913e86a3a3SThomas Gleixner 		return 0;
9923e86a3a3SThomas Gleixner free:
9930f62d941SThomas Gleixner 	msi_domain_free_irqs_descs_locked(domain, dev);
994bb9b428aSThomas Gleixner 	return ret;
995d9109698SJiang Liu }
996d9109698SJiang Liu 
997d9109698SJiang Liu /**
99843e9e705SThomas Gleixner  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
99943e9e705SThomas Gleixner  * @domain:	The domain to allocate from
1000d9109698SJiang Liu  * @dev:	Pointer to device struct of the device for which the interrupts
100143e9e705SThomas Gleixner  *		are allocated
100243e9e705SThomas Gleixner  * @nvec:	The number of interrupts to allocate
100343e9e705SThomas Gleixner  *
10043b35e7e6SRandy Dunlap  * Return: %0 on success or an error code.
1005d9109698SJiang Liu  */
10060f62d941SThomas Gleixner int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
100743e9e705SThomas Gleixner {
1008bf6e054eSThomas Gleixner 	int ret;
100943e9e705SThomas Gleixner 
10100f62d941SThomas Gleixner 	msi_lock_descs(dev);
10110f62d941SThomas Gleixner 	ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
10120f62d941SThomas Gleixner 	msi_unlock_descs(dev);
1013bf6e054eSThomas Gleixner 	return ret;
101443e9e705SThomas Gleixner }
101543e9e705SThomas Gleixner 
1016057c97a1SThomas Gleixner static void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
1017d9109698SJiang Liu {
1018bf5e758fSThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
1019ef8dd015SThomas Gleixner 	struct irq_data *irqd;
1020d9109698SJiang Liu 	struct msi_desc *desc;
1021dbbc9357SBixuan Cui 	int i;
1022dbbc9357SBixuan Cui 
1023ef8dd015SThomas Gleixner 	/* Only handle MSI entries which have an interrupt associated */
1024ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
1025ef8dd015SThomas Gleixner 		/* Make sure all interrupts are deactivated */
1026ef8dd015SThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
1027ef8dd015SThomas Gleixner 			irqd = irq_domain_get_irq_data(domain, desc->irq + i);
1028ef8dd015SThomas Gleixner 			if (irqd && irqd_is_activated(irqd))
1029ef8dd015SThomas Gleixner 				irq_domain_deactivate_irq(irqd);
1030dbbc9357SBixuan Cui 		}
1031d9109698SJiang Liu 
1032d9109698SJiang Liu 		irq_domain_free_irqs(desc->irq, desc->nvec_used);
1033bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS)
1034bf5e758fSThomas Gleixner 			msi_sysfs_remove_desc(dev, desc);
1035d9109698SJiang Liu 		desc->irq = 0;
1036d9109698SJiang Liu 	}
1037d9109698SJiang Liu }
1038d9109698SJiang Liu 
1039645474e2SThomas Gleixner static void msi_domain_free_msi_descs(struct msi_domain_info *info,
1040645474e2SThomas Gleixner 				      struct device *dev)
1041645474e2SThomas Gleixner {
1042645474e2SThomas Gleixner 	if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
1043645474e2SThomas Gleixner 		msi_free_msi_descs(dev);
1044645474e2SThomas Gleixner }
1045645474e2SThomas Gleixner 
1046d9109698SJiang Liu /**
10470f62d941SThomas Gleixner  * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
10480f62d941SThomas Gleixner  * @domain:	The domain to managing the interrupts
10490f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
10500f62d941SThomas Gleixner  *		are free
10510f62d941SThomas Gleixner  *
10520f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
10530f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
10540f62d941SThomas Gleixner  * allocation.
10550f62d941SThomas Gleixner  */
10560f62d941SThomas Gleixner void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
10570f62d941SThomas Gleixner {
10580f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
10590f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
10600f62d941SThomas Gleixner 
10610f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
10620f62d941SThomas Gleixner 
10633e86a3a3SThomas Gleixner 	if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain)))
10643e86a3a3SThomas Gleixner 		return;
10653e86a3a3SThomas Gleixner 
10660f62d941SThomas Gleixner 	ops->domain_free_irqs(domain, dev);
1067f6d3486aSThomas Gleixner 	if (ops->msi_post_free)
1068f6d3486aSThomas Gleixner 		ops->msi_post_free(domain, dev);
1069645474e2SThomas Gleixner 	msi_domain_free_msi_descs(info, dev);
10700f62d941SThomas Gleixner }
10710f62d941SThomas Gleixner 
10720f62d941SThomas Gleixner /**
10733b35e7e6SRandy Dunlap  * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
107443e9e705SThomas Gleixner  * @domain:	The domain to managing the interrupts
107543e9e705SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
107643e9e705SThomas Gleixner  *		are free
107743e9e705SThomas Gleixner  */
107843e9e705SThomas Gleixner void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
107943e9e705SThomas Gleixner {
10800f62d941SThomas Gleixner 	msi_lock_descs(dev);
10810f62d941SThomas Gleixner 	msi_domain_free_irqs_descs_locked(domain, dev);
10820f62d941SThomas Gleixner 	msi_unlock_descs(dev);
108343e9e705SThomas Gleixner }
108443e9e705SThomas Gleixner 
108543e9e705SThomas Gleixner /**
1086f3cf8bb0SJiang Liu  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1087f3cf8bb0SJiang Liu  * @domain:	The interrupt domain to retrieve data from
1088f3cf8bb0SJiang Liu  *
10893b35e7e6SRandy Dunlap  * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1090f3cf8bb0SJiang Liu  */
1091f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1092f3cf8bb0SJiang Liu {
1093f3cf8bb0SJiang Liu 	return (struct msi_domain_info *)domain->host_data;
1094f3cf8bb0SJiang Liu }
1095