xref: /openbmc/linux/kernel/irq/msi.c (revision 64258eaa)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2f3cf8bb0SJiang Liu /*
3f3cf8bb0SJiang Liu  * Copyright (C) 2014 Intel Corp.
4f3cf8bb0SJiang Liu  * Author: Jiang Liu <jiang.liu@linux.intel.com>
5f3cf8bb0SJiang Liu  *
6f3cf8bb0SJiang Liu  * This file is licensed under GPLv2.
7f3cf8bb0SJiang Liu  *
8a359f757SIngo Molnar  * This file contains common code to support Message Signaled Interrupts for
9f3cf8bb0SJiang Liu  * PCI compatible and non PCI compatible devices.
10f3cf8bb0SJiang Liu  */
11aeeb5965SJiang Liu #include <linux/types.h>
12aeeb5965SJiang Liu #include <linux/device.h>
13f3cf8bb0SJiang Liu #include <linux/irq.h>
14f3cf8bb0SJiang Liu #include <linux/irqdomain.h>
15f3cf8bb0SJiang Liu #include <linux/msi.h>
164e201566SMarc Zyngier #include <linux/slab.h>
173ba1f050SThomas Gleixner #include <linux/sysfs.h>
182f170814SBarry Song #include <linux/pci.h>
19d9109698SJiang Liu 
2007557ccbSThomas Gleixner #include "internals.h"
2107557ccbSThomas Gleixner 
22bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev);
23cc9a246dSThomas Gleixner 
2428f4b041SThomas Gleixner /**
25cc9a246dSThomas Gleixner  * msi_alloc_desc - Allocate an initialized msi_desc
2628f4b041SThomas Gleixner  * @dev:	Pointer to the device for which this is allocated
2728f4b041SThomas Gleixner  * @nvec:	The number of vectors used in this entry
2828f4b041SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array size of @nvec
2928f4b041SThomas Gleixner  *
303b35e7e6SRandy Dunlap  * If @affinity is not %NULL then an affinity array[@nvec] is allocated
31bec04037SDou Liyang  * and the affinity masks and flags from @affinity are copied.
323b35e7e6SRandy Dunlap  *
333b35e7e6SRandy Dunlap  * Return: pointer to allocated &msi_desc on success or %NULL on failure
3428f4b041SThomas Gleixner  */
35cc9a246dSThomas Gleixner static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
36bec04037SDou Liyang 					const struct irq_affinity_desc *affinity)
37aa48b6f7SJiang Liu {
38cc9a246dSThomas Gleixner 	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
3928f4b041SThomas Gleixner 
40aa48b6f7SJiang Liu 	if (!desc)
41aa48b6f7SJiang Liu 		return NULL;
42aa48b6f7SJiang Liu 
43aa48b6f7SJiang Liu 	desc->dev = dev;
4428f4b041SThomas Gleixner 	desc->nvec_used = nvec;
4528f4b041SThomas Gleixner 	if (affinity) {
46cc9a246dSThomas Gleixner 		desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
4728f4b041SThomas Gleixner 		if (!desc->affinity) {
4828f4b041SThomas Gleixner 			kfree(desc);
4928f4b041SThomas Gleixner 			return NULL;
5028f4b041SThomas Gleixner 		}
5128f4b041SThomas Gleixner 	}
52aa48b6f7SJiang Liu 	return desc;
53aa48b6f7SJiang Liu }
54aa48b6f7SJiang Liu 
55cc9a246dSThomas Gleixner static void msi_free_desc(struct msi_desc *desc)
56aa48b6f7SJiang Liu {
57cc9a246dSThomas Gleixner 	kfree(desc->affinity);
58cc9a246dSThomas Gleixner 	kfree(desc);
59aa48b6f7SJiang Liu }
60aa48b6f7SJiang Liu 
61cd6cf065SThomas Gleixner static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
62cd6cf065SThomas Gleixner {
63f1139f90SThomas Gleixner 	struct xarray *xa = &md->__domains[MSI_DEFAULT_DOMAIN].store;
64cd6cf065SThomas Gleixner 	int ret;
65cd6cf065SThomas Gleixner 
66cd6cf065SThomas Gleixner 	desc->msi_index = index;
67f1139f90SThomas Gleixner 	ret = xa_insert(xa, index, desc, GFP_KERNEL);
68cd6cf065SThomas Gleixner 	if (ret)
69cd6cf065SThomas Gleixner 		msi_free_desc(desc);
70cd6cf065SThomas Gleixner 	return ret;
71cd6cf065SThomas Gleixner }
72cd6cf065SThomas Gleixner 
7360290525SThomas Gleixner /**
7460290525SThomas Gleixner  * msi_add_msi_desc - Allocate and initialize a MSI descriptor
7560290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptor is allocated
7660290525SThomas Gleixner  * @init_desc:	Pointer to an MSI descriptor to initialize the new descriptor
7760290525SThomas Gleixner  *
7860290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
7960290525SThomas Gleixner  */
8060290525SThomas Gleixner int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
8160290525SThomas Gleixner {
8260290525SThomas Gleixner 	struct msi_desc *desc;
8360290525SThomas Gleixner 
8460290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
8560290525SThomas Gleixner 
86cc9a246dSThomas Gleixner 	desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
8760290525SThomas Gleixner 	if (!desc)
8860290525SThomas Gleixner 		return -ENOMEM;
8960290525SThomas Gleixner 
90cd6cf065SThomas Gleixner 	/* Copy type specific data to the new descriptor. */
9160290525SThomas Gleixner 	desc->pci = init_desc->pci;
92cd6cf065SThomas Gleixner 	return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
9360290525SThomas Gleixner }
9460290525SThomas Gleixner 
9560290525SThomas Gleixner /**
9660290525SThomas Gleixner  * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
9760290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptors are allocated
9860290525SThomas Gleixner  * @index:	Index for the first MSI descriptor
9960290525SThomas Gleixner  * @ndesc:	Number of descriptors to allocate
10060290525SThomas Gleixner  *
10160290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
10260290525SThomas Gleixner  */
10360290525SThomas Gleixner static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
10460290525SThomas Gleixner {
105cd6cf065SThomas Gleixner 	unsigned int idx, last = index + ndesc - 1;
106cd6cf065SThomas Gleixner 	struct msi_desc *desc;
107cd6cf065SThomas Gleixner 	int ret;
10860290525SThomas Gleixner 
10960290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
11060290525SThomas Gleixner 
111cd6cf065SThomas Gleixner 	for (idx = index; idx <= last; idx++) {
112cc9a246dSThomas Gleixner 		desc = msi_alloc_desc(dev, 1, NULL);
11360290525SThomas Gleixner 		if (!desc)
114cd6cf065SThomas Gleixner 			goto fail_mem;
115cd6cf065SThomas Gleixner 		ret = msi_insert_desc(dev->msi.data, desc, idx);
116cd6cf065SThomas Gleixner 		if (ret)
11760290525SThomas Gleixner 			goto fail;
11860290525SThomas Gleixner 	}
11960290525SThomas Gleixner 	return 0;
12060290525SThomas Gleixner 
121cd6cf065SThomas Gleixner fail_mem:
122cd6cf065SThomas Gleixner 	ret = -ENOMEM;
12360290525SThomas Gleixner fail:
1242f2940d1SThomas Gleixner 	msi_free_msi_descs_range(dev, index, last);
125cd6cf065SThomas Gleixner 	return ret;
12660290525SThomas Gleixner }
127cd6cf065SThomas Gleixner 
128cd6cf065SThomas Gleixner static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
129cd6cf065SThomas Gleixner {
130cd6cf065SThomas Gleixner 	switch (filter) {
131cd6cf065SThomas Gleixner 	case MSI_DESC_ALL:
132cd6cf065SThomas Gleixner 		return true;
133cd6cf065SThomas Gleixner 	case MSI_DESC_NOTASSOCIATED:
134cd6cf065SThomas Gleixner 		return !desc->irq;
135cd6cf065SThomas Gleixner 	case MSI_DESC_ASSOCIATED:
136cd6cf065SThomas Gleixner 		return !!desc->irq;
137cd6cf065SThomas Gleixner 	}
138cd6cf065SThomas Gleixner 	WARN_ON_ONCE(1);
139cd6cf065SThomas Gleixner 	return false;
14060290525SThomas Gleixner }
14160290525SThomas Gleixner 
142645474e2SThomas Gleixner /**
143645474e2SThomas Gleixner  * msi_free_msi_descs_range - Free MSI descriptors of a device
144645474e2SThomas Gleixner  * @dev:		Device to free the descriptors
145645474e2SThomas Gleixner  * @first_index:	Index to start freeing from
146645474e2SThomas Gleixner  * @last_index:		Last index to be freed
147645474e2SThomas Gleixner  */
1482f2940d1SThomas Gleixner void msi_free_msi_descs_range(struct device *dev, unsigned int first_index,
1492f2940d1SThomas Gleixner 			      unsigned int last_index)
150645474e2SThomas Gleixner {
151f1139f90SThomas Gleixner 	struct xarray *xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
152645474e2SThomas Gleixner 	struct msi_desc *desc;
153cd6cf065SThomas Gleixner 	unsigned long idx;
154645474e2SThomas Gleixner 
155645474e2SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
156645474e2SThomas Gleixner 
157cd6cf065SThomas Gleixner 	xa_for_each_range(xa, idx, desc, first_index, last_index) {
158cd6cf065SThomas Gleixner 		xa_erase(xa, idx);
1592f2940d1SThomas Gleixner 
1602f2940d1SThomas Gleixner 		/* Leak the descriptor when it is still referenced */
1612f2940d1SThomas Gleixner 		if (WARN_ON_ONCE(msi_desc_match(desc, MSI_DESC_ASSOCIATED)))
1622f2940d1SThomas Gleixner 			continue;
163cc9a246dSThomas Gleixner 		msi_free_desc(desc);
164645474e2SThomas Gleixner 	}
165645474e2SThomas Gleixner }
166645474e2SThomas Gleixner 
16738b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
16838b6a1cfSJiang Liu {
16938b6a1cfSJiang Liu 	*msg = entry->msg;
17038b6a1cfSJiang Liu }
17138b6a1cfSJiang Liu 
17238b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
17338b6a1cfSJiang Liu {
17438b6a1cfSJiang Liu 	struct msi_desc *entry = irq_get_msi_desc(irq);
17538b6a1cfSJiang Liu 
17638b6a1cfSJiang Liu 	__get_cached_msi_msg(entry, msg);
17738b6a1cfSJiang Liu }
17838b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg);
17938b6a1cfSJiang Liu 
180013bd8e5SThomas Gleixner static void msi_device_data_release(struct device *dev, void *res)
181013bd8e5SThomas Gleixner {
182125282cdSThomas Gleixner 	struct msi_device_data *md = res;
183f1139f90SThomas Gleixner 	int i;
184125282cdSThomas Gleixner 
185f1139f90SThomas Gleixner 	for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) {
186f1139f90SThomas Gleixner 		WARN_ON_ONCE(!xa_empty(&md->__domains[i].store));
187f1139f90SThomas Gleixner 		xa_destroy(&md->__domains[i].store);
188f1139f90SThomas Gleixner 	}
189013bd8e5SThomas Gleixner 	dev->msi.data = NULL;
190013bd8e5SThomas Gleixner }
191013bd8e5SThomas Gleixner 
192013bd8e5SThomas Gleixner /**
193013bd8e5SThomas Gleixner  * msi_setup_device_data - Setup MSI device data
194013bd8e5SThomas Gleixner  * @dev:	Device for which MSI device data should be set up
195013bd8e5SThomas Gleixner  *
196013bd8e5SThomas Gleixner  * Return: 0 on success, appropriate error code otherwise
197013bd8e5SThomas Gleixner  *
198013bd8e5SThomas Gleixner  * This can be called more than once for @dev. If the MSI device data is
199013bd8e5SThomas Gleixner  * already allocated the call succeeds. The allocated memory is
200013bd8e5SThomas Gleixner  * automatically released when the device is destroyed.
201013bd8e5SThomas Gleixner  */
202013bd8e5SThomas Gleixner int msi_setup_device_data(struct device *dev)
203013bd8e5SThomas Gleixner {
204013bd8e5SThomas Gleixner 	struct msi_device_data *md;
205f1139f90SThomas Gleixner 	int ret, i;
206013bd8e5SThomas Gleixner 
207013bd8e5SThomas Gleixner 	if (dev->msi.data)
208013bd8e5SThomas Gleixner 		return 0;
209013bd8e5SThomas Gleixner 
210013bd8e5SThomas Gleixner 	md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
211013bd8e5SThomas Gleixner 	if (!md)
212013bd8e5SThomas Gleixner 		return -ENOMEM;
213013bd8e5SThomas Gleixner 
214bf5e758fSThomas Gleixner 	ret = msi_sysfs_create_group(dev);
215bf5e758fSThomas Gleixner 	if (ret) {
216bf5e758fSThomas Gleixner 		devres_free(md);
217bf5e758fSThomas Gleixner 		return ret;
218bf5e758fSThomas Gleixner 	}
219bf5e758fSThomas Gleixner 
220f1139f90SThomas Gleixner 	for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++)
221f1139f90SThomas Gleixner 		xa_init(&md->__domains[i].store);
222f1139f90SThomas Gleixner 
223*64258eaaSThomas Gleixner 	/*
224*64258eaaSThomas Gleixner 	 * If @dev::msi::domain is set and is a global MSI domain, copy the
225*64258eaaSThomas Gleixner 	 * pointer into the domain array so all code can operate on domain
226*64258eaaSThomas Gleixner 	 * ids. The NULL pointer check is required to keep the legacy
227*64258eaaSThomas Gleixner 	 * architecture specific PCI/MSI support working.
228*64258eaaSThomas Gleixner 	 */
229*64258eaaSThomas Gleixner 	if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain))
230*64258eaaSThomas Gleixner 		md->__domains[MSI_DEFAULT_DOMAIN].domain = dev->msi.domain;
231*64258eaaSThomas Gleixner 
232b5f687f9SThomas Gleixner 	mutex_init(&md->mutex);
233013bd8e5SThomas Gleixner 	dev->msi.data = md;
234013bd8e5SThomas Gleixner 	devres_add(dev, md);
235013bd8e5SThomas Gleixner 	return 0;
236013bd8e5SThomas Gleixner }
237013bd8e5SThomas Gleixner 
238cf15f43aSThomas Gleixner /**
239b5f687f9SThomas Gleixner  * msi_lock_descs - Lock the MSI descriptor storage of a device
240b5f687f9SThomas Gleixner  * @dev:	Device to operate on
241b5f687f9SThomas Gleixner  */
242b5f687f9SThomas Gleixner void msi_lock_descs(struct device *dev)
243b5f687f9SThomas Gleixner {
244b5f687f9SThomas Gleixner 	mutex_lock(&dev->msi.data->mutex);
245b5f687f9SThomas Gleixner }
246b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_lock_descs);
247b5f687f9SThomas Gleixner 
248b5f687f9SThomas Gleixner /**
249b5f687f9SThomas Gleixner  * msi_unlock_descs - Unlock the MSI descriptor storage of a device
250b5f687f9SThomas Gleixner  * @dev:	Device to operate on
251b5f687f9SThomas Gleixner  */
252b5f687f9SThomas Gleixner void msi_unlock_descs(struct device *dev)
253b5f687f9SThomas Gleixner {
254f1139f90SThomas Gleixner 	/* Invalidate the index which was cached by the iterator */
255cd6cf065SThomas Gleixner 	dev->msi.data->__iter_idx = MSI_MAX_INDEX;
256b5f687f9SThomas Gleixner 	mutex_unlock(&dev->msi.data->mutex);
257b5f687f9SThomas Gleixner }
258b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_unlock_descs);
259b5f687f9SThomas Gleixner 
260cd6cf065SThomas Gleixner static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
2611046f71dSThomas Gleixner {
262f1139f90SThomas Gleixner 	struct xarray *xa = &md->__domains[MSI_DEFAULT_DOMAIN].store;
2631046f71dSThomas Gleixner 	struct msi_desc *desc;
2641046f71dSThomas Gleixner 
265f1139f90SThomas Gleixner 	xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) {
2661046f71dSThomas Gleixner 		if (msi_desc_match(desc, filter))
2671046f71dSThomas Gleixner 			return desc;
2681046f71dSThomas Gleixner 	}
269cd6cf065SThomas Gleixner 	md->__iter_idx = MSI_MAX_INDEX;
2701046f71dSThomas Gleixner 	return NULL;
2711046f71dSThomas Gleixner }
2721046f71dSThomas Gleixner 
2731046f71dSThomas Gleixner /**
2741046f71dSThomas Gleixner  * msi_first_desc - Get the first MSI descriptor of a device
2751046f71dSThomas Gleixner  * @dev:	Device to operate on
2761046f71dSThomas Gleixner  * @filter:	Descriptor state filter
2771046f71dSThomas Gleixner  *
2781046f71dSThomas Gleixner  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
2791046f71dSThomas Gleixner  * must be invoked before the call.
2801046f71dSThomas Gleixner  *
2811046f71dSThomas Gleixner  * Return: Pointer to the first MSI descriptor matching the search
2821046f71dSThomas Gleixner  *	   criteria, NULL if none found.
2831046f71dSThomas Gleixner  */
2841046f71dSThomas Gleixner struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
2851046f71dSThomas Gleixner {
286cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
2871046f71dSThomas Gleixner 
288cd6cf065SThomas Gleixner 	if (WARN_ON_ONCE(!md))
2891046f71dSThomas Gleixner 		return NULL;
2901046f71dSThomas Gleixner 
291cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
2921046f71dSThomas Gleixner 
293cd6cf065SThomas Gleixner 	md->__iter_idx = 0;
294cd6cf065SThomas Gleixner 	return msi_find_desc(md, filter);
2951046f71dSThomas Gleixner }
2961046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_first_desc);
2971046f71dSThomas Gleixner 
2981046f71dSThomas Gleixner /**
2991046f71dSThomas Gleixner  * msi_next_desc - Get the next MSI descriptor of a device
3001046f71dSThomas Gleixner  * @dev:	Device to operate on
301fdd53404SThomas Gleixner  * @filter:	Descriptor state filter
3021046f71dSThomas Gleixner  *
3031046f71dSThomas Gleixner  * The first invocation of msi_next_desc() has to be preceeded by a
304cd6cf065SThomas Gleixner  * successful invocation of __msi_first_desc(). Consecutive invocations are
3051046f71dSThomas Gleixner  * only valid if the previous one was successful. All these operations have
3061046f71dSThomas Gleixner  * to be done within the same MSI mutex held region.
3071046f71dSThomas Gleixner  *
3081046f71dSThomas Gleixner  * Return: Pointer to the next MSI descriptor matching the search
3091046f71dSThomas Gleixner  *	   criteria, NULL if none found.
3101046f71dSThomas Gleixner  */
3111046f71dSThomas Gleixner struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
3121046f71dSThomas Gleixner {
313cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
3141046f71dSThomas Gleixner 
315cd6cf065SThomas Gleixner 	if (WARN_ON_ONCE(!md))
3161046f71dSThomas Gleixner 		return NULL;
3171046f71dSThomas Gleixner 
318cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
3191046f71dSThomas Gleixner 
320cd6cf065SThomas Gleixner 	if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
3211046f71dSThomas Gleixner 		return NULL;
3221046f71dSThomas Gleixner 
323cd6cf065SThomas Gleixner 	md->__iter_idx++;
324cd6cf065SThomas Gleixner 	return msi_find_desc(md, filter);
3251046f71dSThomas Gleixner }
3261046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_next_desc);
3271046f71dSThomas Gleixner 
328b5f687f9SThomas Gleixner /**
329cf15f43aSThomas Gleixner  * msi_get_virq - Return Linux interrupt number of a MSI interrupt
330cf15f43aSThomas Gleixner  * @dev:	Device to operate on
331cf15f43aSThomas Gleixner  * @index:	MSI interrupt index to look for (0-based)
332cf15f43aSThomas Gleixner  *
333cf15f43aSThomas Gleixner  * Return: The Linux interrupt number on success (> 0), 0 if not found
334cf15f43aSThomas Gleixner  */
335cf15f43aSThomas Gleixner unsigned int msi_get_virq(struct device *dev, unsigned int index)
336cf15f43aSThomas Gleixner {
337cf15f43aSThomas Gleixner 	struct msi_desc *desc;
338495c66acSThomas Gleixner 	unsigned int ret = 0;
339f1139f90SThomas Gleixner 	struct xarray *xa;
340cf15f43aSThomas Gleixner 	bool pcimsi;
341cf15f43aSThomas Gleixner 
342cf15f43aSThomas Gleixner 	if (!dev->msi.data)
343cf15f43aSThomas Gleixner 		return 0;
344cf15f43aSThomas Gleixner 
345cf15f43aSThomas Gleixner 	pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
346cf15f43aSThomas Gleixner 
347495c66acSThomas Gleixner 	msi_lock_descs(dev);
348f1139f90SThomas Gleixner 	xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
349f1139f90SThomas Gleixner 	desc = xa_load(xa, pcimsi ? 0 : index);
350cd6cf065SThomas Gleixner 	if (desc && desc->irq) {
351cf15f43aSThomas Gleixner 		/*
352cd6cf065SThomas Gleixner 		 * PCI-MSI has only one descriptor for multiple interrupts.
353cf15f43aSThomas Gleixner 		 * PCI-MSIX and platform MSI use a descriptor per
354cf15f43aSThomas Gleixner 		 * interrupt.
355cf15f43aSThomas Gleixner 		 */
356cd6cf065SThomas Gleixner 		if (pcimsi) {
357cd6cf065SThomas Gleixner 			if (index < desc->nvec_used)
358cd6cf065SThomas Gleixner 				ret = desc->irq + index;
359cd6cf065SThomas Gleixner 		} else {
360495c66acSThomas Gleixner 			ret = desc->irq;
361cf15f43aSThomas Gleixner 		}
362495c66acSThomas Gleixner 	}
363495c66acSThomas Gleixner 	msi_unlock_descs(dev);
364495c66acSThomas Gleixner 	return ret;
365cf15f43aSThomas Gleixner }
366cf15f43aSThomas Gleixner EXPORT_SYMBOL_GPL(msi_get_virq);
367cf15f43aSThomas Gleixner 
3681197528aSThomas Gleixner #ifdef CONFIG_SYSFS
369bf5e758fSThomas Gleixner static struct attribute *msi_dev_attrs[] = {
370bf5e758fSThomas Gleixner 	NULL
371bf5e758fSThomas Gleixner };
372bf5e758fSThomas Gleixner 
373bf5e758fSThomas Gleixner static const struct attribute_group msi_irqs_group = {
374bf5e758fSThomas Gleixner 	.name	= "msi_irqs",
375bf5e758fSThomas Gleixner 	.attrs	= msi_dev_attrs,
376bf5e758fSThomas Gleixner };
377bf5e758fSThomas Gleixner 
378bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev)
379bf5e758fSThomas Gleixner {
380bf5e758fSThomas Gleixner 	return devm_device_add_group(dev, &msi_irqs_group);
381bf5e758fSThomas Gleixner }
382bf5e758fSThomas Gleixner 
3832f170814SBarry Song static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
3842f170814SBarry Song 			     char *buf)
3852f170814SBarry Song {
3866ef7f771SThomas Gleixner 	/* MSI vs. MSIX is per device not per interrupt */
3876ef7f771SThomas Gleixner 	bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
3882f170814SBarry Song 
3892f170814SBarry Song 	return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
3902f170814SBarry Song }
3912f170814SBarry Song 
392bf5e758fSThomas Gleixner static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
3932f170814SBarry Song {
394bf5e758fSThomas Gleixner 	struct device_attribute *attrs = desc->sysfs_attrs;
3952f170814SBarry Song 	int i;
3962f170814SBarry Song 
397bf5e758fSThomas Gleixner 	if (!attrs)
398bf5e758fSThomas Gleixner 		return;
3992f170814SBarry Song 
400bf5e758fSThomas Gleixner 	desc->sysfs_attrs = NULL;
401bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
402bf5e758fSThomas Gleixner 		if (attrs[i].show)
403bf5e758fSThomas Gleixner 			sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
404bf5e758fSThomas Gleixner 		kfree(attrs[i].attr.name);
4052f170814SBarry Song 	}
406bf5e758fSThomas Gleixner 	kfree(attrs);
4072f170814SBarry Song }
4082f170814SBarry Song 
409bf5e758fSThomas Gleixner static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
410bf5e758fSThomas Gleixner {
411bf5e758fSThomas Gleixner 	struct device_attribute *attrs;
412bf5e758fSThomas Gleixner 	int ret, i;
4132f170814SBarry Song 
414bf5e758fSThomas Gleixner 	attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
415bf5e758fSThomas Gleixner 	if (!attrs)
416bf5e758fSThomas Gleixner 		return -ENOMEM;
4172f170814SBarry Song 
418bf5e758fSThomas Gleixner 	desc->sysfs_attrs = attrs;
419bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
420bf5e758fSThomas Gleixner 		sysfs_attr_init(&attrs[i].attr);
421bf5e758fSThomas Gleixner 		attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
422bf5e758fSThomas Gleixner 		if (!attrs[i].attr.name) {
423bf5e758fSThomas Gleixner 			ret = -ENOMEM;
424bf5e758fSThomas Gleixner 			goto fail;
4252f170814SBarry Song 		}
4262f170814SBarry Song 
427bf5e758fSThomas Gleixner 		attrs[i].attr.mode = 0444;
428bf5e758fSThomas Gleixner 		attrs[i].show = msi_mode_show;
429bf5e758fSThomas Gleixner 
430bf5e758fSThomas Gleixner 		ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
431bf5e758fSThomas Gleixner 		if (ret) {
432bf5e758fSThomas Gleixner 			attrs[i].show = NULL;
433bf5e758fSThomas Gleixner 			goto fail;
434bf5e758fSThomas Gleixner 		}
435bf5e758fSThomas Gleixner 	}
436bf5e758fSThomas Gleixner 	return 0;
437bf5e758fSThomas Gleixner 
438bf5e758fSThomas Gleixner fail:
439bf5e758fSThomas Gleixner 	msi_sysfs_remove_desc(dev, desc);
440bf5e758fSThomas Gleixner 	return ret;
441bf5e758fSThomas Gleixner }
442bf5e758fSThomas Gleixner 
443bf5e758fSThomas Gleixner #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
4442f170814SBarry Song /**
445bf6e054eSThomas Gleixner  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
446bf6e054eSThomas Gleixner  * @dev:	The device (PCI, platform etc) which will get sysfs entries
447bf6e054eSThomas Gleixner  */
448bf6e054eSThomas Gleixner int msi_device_populate_sysfs(struct device *dev)
449bf6e054eSThomas Gleixner {
450bf5e758fSThomas Gleixner 	struct msi_desc *desc;
451bf5e758fSThomas Gleixner 	int ret;
452bf6e054eSThomas Gleixner 
453bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
454bf5e758fSThomas Gleixner 		if (desc->sysfs_attrs)
455bf5e758fSThomas Gleixner 			continue;
456bf5e758fSThomas Gleixner 		ret = msi_sysfs_populate_desc(dev, desc);
457bf5e758fSThomas Gleixner 		if (ret)
458bf5e758fSThomas Gleixner 			return ret;
459bf5e758fSThomas Gleixner 	}
460bf6e054eSThomas Gleixner 	return 0;
461bf6e054eSThomas Gleixner }
462bf6e054eSThomas Gleixner 
463bf6e054eSThomas Gleixner /**
46424cff375SThomas Gleixner  * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
46524cff375SThomas Gleixner  * @dev:		The device (PCI, platform etc) for which to remove
46624cff375SThomas Gleixner  *			sysfs entries
4672f170814SBarry Song  */
46824cff375SThomas Gleixner void msi_device_destroy_sysfs(struct device *dev)
4692f170814SBarry Song {
470bf5e758fSThomas Gleixner 	struct msi_desc *desc;
4712f170814SBarry Song 
472bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ALL)
473bf5e758fSThomas Gleixner 		msi_sysfs_remove_desc(dev, desc);
4742f170814SBarry Song }
475bf5e758fSThomas Gleixner #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
476bf5e758fSThomas Gleixner #else /* CONFIG_SYSFS */
477bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
478bf5e758fSThomas Gleixner static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
479bf5e758fSThomas Gleixner static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
480bf5e758fSThomas Gleixner #endif /* !CONFIG_SYSFS */
4812f170814SBarry Song 
482762687ceSThomas Gleixner static int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec);
483057c97a1SThomas Gleixner static void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
484762687ceSThomas Gleixner 
48574faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data,
48674faaf7aSThomas Gleixner 					  struct msi_msg *msg)
48774faaf7aSThomas Gleixner {
48874faaf7aSThomas Gleixner 	data->chip->irq_write_msi_msg(data, msg);
48974faaf7aSThomas Gleixner }
49074faaf7aSThomas Gleixner 
4910be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
4920be8153cSMarc Zyngier {
4930be8153cSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
4940be8153cSMarc Zyngier 
4950be8153cSMarc Zyngier 	/*
4960be8153cSMarc Zyngier 	 * If the MSI provider has messed with the second message and
4970be8153cSMarc Zyngier 	 * not advertized that it is level-capable, signal the breakage.
4980be8153cSMarc Zyngier 	 */
4990be8153cSMarc Zyngier 	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
5000be8153cSMarc Zyngier 		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
5010be8153cSMarc Zyngier 		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
5020be8153cSMarc Zyngier }
5030be8153cSMarc Zyngier 
504f3cf8bb0SJiang Liu /**
505f3cf8bb0SJiang Liu  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
506f3cf8bb0SJiang Liu  * @irq_data:	The irq data associated to the interrupt
507f3cf8bb0SJiang Liu  * @mask:	The affinity mask to set
508f3cf8bb0SJiang Liu  * @force:	Flag to enforce setting (disable online checks)
509f3cf8bb0SJiang Liu  *
510f3cf8bb0SJiang Liu  * Intended to be used by MSI interrupt controllers which are
511f3cf8bb0SJiang Liu  * implemented with hierarchical domains.
5123b35e7e6SRandy Dunlap  *
5133b35e7e6SRandy Dunlap  * Return: IRQ_SET_MASK_* result code
514f3cf8bb0SJiang Liu  */
515f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data,
516f3cf8bb0SJiang Liu 			    const struct cpumask *mask, bool force)
517f3cf8bb0SJiang Liu {
518f3cf8bb0SJiang Liu 	struct irq_data *parent = irq_data->parent_data;
5190be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
520f3cf8bb0SJiang Liu 	int ret;
521f3cf8bb0SJiang Liu 
522f3cf8bb0SJiang Liu 	ret = parent->chip->irq_set_affinity(parent, mask, force);
523f3cf8bb0SJiang Liu 	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
5240be8153cSMarc Zyngier 		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5250be8153cSMarc Zyngier 		msi_check_level(irq_data->domain, msg);
5260be8153cSMarc Zyngier 		irq_chip_write_msi_msg(irq_data, msg);
527f3cf8bb0SJiang Liu 	}
528f3cf8bb0SJiang Liu 
529f3cf8bb0SJiang Liu 	return ret;
530f3cf8bb0SJiang Liu }
531f3cf8bb0SJiang Liu 
53272491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain,
53372491643SThomas Gleixner 			       struct irq_data *irq_data, bool early)
534f3cf8bb0SJiang Liu {
5350be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
536f3cf8bb0SJiang Liu 
5370be8153cSMarc Zyngier 	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5380be8153cSMarc Zyngier 	msi_check_level(irq_data->domain, msg);
5390be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
54072491643SThomas Gleixner 	return 0;
541f3cf8bb0SJiang Liu }
542f3cf8bb0SJiang Liu 
543f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain,
544f3cf8bb0SJiang Liu 				  struct irq_data *irq_data)
545f3cf8bb0SJiang Liu {
5460be8153cSMarc Zyngier 	struct msi_msg msg[2];
547f3cf8bb0SJiang Liu 
5480be8153cSMarc Zyngier 	memset(msg, 0, sizeof(msg));
5490be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
550f3cf8bb0SJiang Liu }
551f3cf8bb0SJiang Liu 
552f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
553f3cf8bb0SJiang Liu 			    unsigned int nr_irqs, void *arg)
554f3cf8bb0SJiang Liu {
555f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
556f3cf8bb0SJiang Liu 	struct msi_domain_ops *ops = info->ops;
557f3cf8bb0SJiang Liu 	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
558f3cf8bb0SJiang Liu 	int i, ret;
559f3cf8bb0SJiang Liu 
560f3cf8bb0SJiang Liu 	if (irq_find_mapping(domain, hwirq) > 0)
561f3cf8bb0SJiang Liu 		return -EEXIST;
562f3cf8bb0SJiang Liu 
563bf6f869fSLiu Jiang 	if (domain->parent) {
564f3cf8bb0SJiang Liu 		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
565f3cf8bb0SJiang Liu 		if (ret < 0)
566f3cf8bb0SJiang Liu 			return ret;
567bf6f869fSLiu Jiang 	}
568f3cf8bb0SJiang Liu 
569f3cf8bb0SJiang Liu 	for (i = 0; i < nr_irqs; i++) {
570f3cf8bb0SJiang Liu 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
571f3cf8bb0SJiang Liu 		if (ret < 0) {
572f3cf8bb0SJiang Liu 			if (ops->msi_free) {
573f3cf8bb0SJiang Liu 				for (i--; i > 0; i--)
574f3cf8bb0SJiang Liu 					ops->msi_free(domain, info, virq + i);
575f3cf8bb0SJiang Liu 			}
576f3cf8bb0SJiang Liu 			irq_domain_free_irqs_top(domain, virq, nr_irqs);
577f3cf8bb0SJiang Liu 			return ret;
578f3cf8bb0SJiang Liu 		}
579f3cf8bb0SJiang Liu 	}
580f3cf8bb0SJiang Liu 
581f3cf8bb0SJiang Liu 	return 0;
582f3cf8bb0SJiang Liu }
583f3cf8bb0SJiang Liu 
584f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
585f3cf8bb0SJiang Liu 			    unsigned int nr_irqs)
586f3cf8bb0SJiang Liu {
587f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
588f3cf8bb0SJiang Liu 	int i;
589f3cf8bb0SJiang Liu 
590f3cf8bb0SJiang Liu 	if (info->ops->msi_free) {
591f3cf8bb0SJiang Liu 		for (i = 0; i < nr_irqs; i++)
592f3cf8bb0SJiang Liu 			info->ops->msi_free(domain, info, virq + i);
593f3cf8bb0SJiang Liu 	}
594f3cf8bb0SJiang Liu 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
595f3cf8bb0SJiang Liu }
596f3cf8bb0SJiang Liu 
59701364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = {
598f3cf8bb0SJiang Liu 	.alloc		= msi_domain_alloc,
599f3cf8bb0SJiang Liu 	.free		= msi_domain_free,
600f3cf8bb0SJiang Liu 	.activate	= msi_domain_activate,
601f3cf8bb0SJiang Liu 	.deactivate	= msi_domain_deactivate,
602f3cf8bb0SJiang Liu };
603f3cf8bb0SJiang Liu 
604aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
605aeeb5965SJiang Liu 						msi_alloc_info_t *arg)
606aeeb5965SJiang Liu {
607aeeb5965SJiang Liu 	return arg->hwirq;
608aeeb5965SJiang Liu }
609aeeb5965SJiang Liu 
610aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
611aeeb5965SJiang Liu 				  int nvec, msi_alloc_info_t *arg)
612aeeb5965SJiang Liu {
613aeeb5965SJiang Liu 	memset(arg, 0, sizeof(*arg));
614aeeb5965SJiang Liu 	return 0;
615aeeb5965SJiang Liu }
616aeeb5965SJiang Liu 
617aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
618aeeb5965SJiang Liu 				    struct msi_desc *desc)
619aeeb5965SJiang Liu {
620aeeb5965SJiang Liu 	arg->desc = desc;
621aeeb5965SJiang Liu }
622aeeb5965SJiang Liu 
623aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain,
624aeeb5965SJiang Liu 			       struct msi_domain_info *info,
625aeeb5965SJiang Liu 			       unsigned int virq, irq_hw_number_t hwirq,
626aeeb5965SJiang Liu 			       msi_alloc_info_t *arg)
627aeeb5965SJiang Liu {
628aeeb5965SJiang Liu 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
629aeeb5965SJiang Liu 				      info->chip_data);
630aeeb5965SJiang Liu 	if (info->handler && info->handler_name) {
631aeeb5965SJiang Liu 		__irq_set_handler(virq, info->handler, 0, info->handler_name);
632aeeb5965SJiang Liu 		if (info->handler_data)
633aeeb5965SJiang Liu 			irq_set_handler_data(virq, info->handler_data);
634aeeb5965SJiang Liu 	}
635aeeb5965SJiang Liu 	return 0;
636aeeb5965SJiang Liu }
637aeeb5965SJiang Liu 
638aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = {
639aeeb5965SJiang Liu 	.get_hwirq		= msi_domain_ops_get_hwirq,
640aeeb5965SJiang Liu 	.msi_init		= msi_domain_ops_init,
641aeeb5965SJiang Liu 	.msi_prepare		= msi_domain_ops_prepare,
642aeeb5965SJiang Liu 	.set_desc		= msi_domain_ops_set_desc,
64343e9e705SThomas Gleixner 	.domain_alloc_irqs	= __msi_domain_alloc_irqs,
64443e9e705SThomas Gleixner 	.domain_free_irqs	= __msi_domain_free_irqs,
645aeeb5965SJiang Liu };
646aeeb5965SJiang Liu 
647aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info)
648aeeb5965SJiang Liu {
649aeeb5965SJiang Liu 	struct msi_domain_ops *ops = info->ops;
650aeeb5965SJiang Liu 
651aeeb5965SJiang Liu 	if (ops == NULL) {
652aeeb5965SJiang Liu 		info->ops = &msi_domain_ops_default;
653aeeb5965SJiang Liu 		return;
654aeeb5965SJiang Liu 	}
655aeeb5965SJiang Liu 
65643e9e705SThomas Gleixner 	if (ops->domain_alloc_irqs == NULL)
65743e9e705SThomas Gleixner 		ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
65843e9e705SThomas Gleixner 	if (ops->domain_free_irqs == NULL)
65943e9e705SThomas Gleixner 		ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
66043e9e705SThomas Gleixner 
66143e9e705SThomas Gleixner 	if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
66243e9e705SThomas Gleixner 		return;
66343e9e705SThomas Gleixner 
664aeeb5965SJiang Liu 	if (ops->get_hwirq == NULL)
665aeeb5965SJiang Liu 		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
666aeeb5965SJiang Liu 	if (ops->msi_init == NULL)
667aeeb5965SJiang Liu 		ops->msi_init = msi_domain_ops_default.msi_init;
668aeeb5965SJiang Liu 	if (ops->msi_prepare == NULL)
669aeeb5965SJiang Liu 		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
670aeeb5965SJiang Liu 	if (ops->set_desc == NULL)
671aeeb5965SJiang Liu 		ops->set_desc = msi_domain_ops_default.set_desc;
672aeeb5965SJiang Liu }
673aeeb5965SJiang Liu 
674aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info)
675aeeb5965SJiang Liu {
676aeeb5965SJiang Liu 	struct irq_chip *chip = info->chip;
677aeeb5965SJiang Liu 
6780701c53eSMarc Zyngier 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
679aeeb5965SJiang Liu 	if (!chip->irq_set_affinity)
680aeeb5965SJiang Liu 		chip->irq_set_affinity = msi_domain_set_affinity;
681aeeb5965SJiang Liu }
682aeeb5965SJiang Liu 
683f3cf8bb0SJiang Liu /**
6843b35e7e6SRandy Dunlap  * msi_create_irq_domain - Create an MSI interrupt domain
685be5436c8SMarc Zyngier  * @fwnode:	Optional fwnode of the interrupt controller
686f3cf8bb0SJiang Liu  * @info:	MSI domain info
687f3cf8bb0SJiang Liu  * @parent:	Parent irq domain
6883b35e7e6SRandy Dunlap  *
6893b35e7e6SRandy Dunlap  * Return: pointer to the created &struct irq_domain or %NULL on failure
690f3cf8bb0SJiang Liu  */
691be5436c8SMarc Zyngier struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
692f3cf8bb0SJiang Liu 					 struct msi_domain_info *info,
693f3cf8bb0SJiang Liu 					 struct irq_domain *parent)
694f3cf8bb0SJiang Liu {
695a97b852bSMarc Zyngier 	struct irq_domain *domain;
696a97b852bSMarc Zyngier 
697aeeb5965SJiang Liu 	msi_domain_update_dom_ops(info);
698aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
699aeeb5965SJiang Liu 		msi_domain_update_chip_ops(info);
700f3cf8bb0SJiang Liu 
701a97b852bSMarc Zyngier 	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
70288156f00SEric Auger 					     fwnode, &msi_domain_ops, info);
7030165308aSThomas Gleixner 
70422db089aSAhmed S. Darwish 	if (domain) {
70522db089aSAhmed S. Darwish 		if (!domain->name && info->chip)
706a97b852bSMarc Zyngier 			domain->name = info->chip->name;
70722db089aSAhmed S. Darwish 		irq_domain_update_bus_token(domain, info->bus_token);
70822db089aSAhmed S. Darwish 	}
709a97b852bSMarc Zyngier 
710a97b852bSMarc Zyngier 	return domain;
711f3cf8bb0SJiang Liu }
712f3cf8bb0SJiang Liu 
713b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
714b2eba39bSMarc Zyngier 			    int nvec, msi_alloc_info_t *arg)
715b2eba39bSMarc Zyngier {
716b2eba39bSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
717b2eba39bSMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
718b2eba39bSMarc Zyngier 
7192569f62cSThomas Gleixner 	return ops->msi_prepare(domain, dev, nvec, arg);
720b2eba39bSMarc Zyngier }
721b2eba39bSMarc Zyngier 
7222145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
723a80713feSThomas Gleixner 			     int virq_base, int nvec, msi_alloc_info_t *arg)
7242145ac93SMarc Zyngier {
7252145ac93SMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
7262145ac93SMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
7272145ac93SMarc Zyngier 	struct msi_desc *desc;
728f1139f90SThomas Gleixner 	struct xarray *xa;
729a80713feSThomas Gleixner 	int ret, virq;
7302145ac93SMarc Zyngier 
731a80713feSThomas Gleixner 	msi_lock_descs(dev);
732cd6cf065SThomas Gleixner 	ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
733cd6cf065SThomas Gleixner 	if (ret)
734cd6cf065SThomas Gleixner 		goto unlock;
7352145ac93SMarc Zyngier 
736f1139f90SThomas Gleixner 	xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
737f1139f90SThomas Gleixner 
738cd6cf065SThomas Gleixner 	for (virq = virq_base; virq < virq_base + nvec; virq++) {
739f1139f90SThomas Gleixner 		desc = xa_load(xa, virq);
740a80713feSThomas Gleixner 		desc->irq = virq;
7412145ac93SMarc Zyngier 
7422145ac93SMarc Zyngier 		ops->set_desc(arg, desc);
743a80713feSThomas Gleixner 		ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
7442145ac93SMarc Zyngier 		if (ret)
745a80713feSThomas Gleixner 			goto fail;
7462145ac93SMarc Zyngier 
747a80713feSThomas Gleixner 		irq_set_msi_desc(virq, desc);
7482145ac93SMarc Zyngier 	}
749a80713feSThomas Gleixner 	msi_unlock_descs(dev);
750a80713feSThomas Gleixner 	return 0;
7512145ac93SMarc Zyngier 
752a80713feSThomas Gleixner fail:
753a80713feSThomas Gleixner 	for (--virq; virq >= virq_base; virq--)
754a80713feSThomas Gleixner 		irq_domain_free_irqs_common(domain, virq, 1);
7552f2940d1SThomas Gleixner 	msi_free_msi_descs_range(dev, virq_base, virq_base + nvec - 1);
756cd6cf065SThomas Gleixner unlock:
757a80713feSThomas Gleixner 	msi_unlock_descs(dev);
7582145ac93SMarc Zyngier 	return ret;
7592145ac93SMarc Zyngier }
7602145ac93SMarc Zyngier 
761bc976233SThomas Gleixner /*
762bc976233SThomas Gleixner  * Carefully check whether the device can use reservation mode. If
763bc976233SThomas Gleixner  * reservation mode is enabled then the early activation will assign a
764bc976233SThomas Gleixner  * dummy vector to the device. If the PCI/MSI device does not support
765bc976233SThomas Gleixner  * masking of the entry then this can result in spurious interrupts when
766bc976233SThomas Gleixner  * the device driver is not absolutely careful. But even then a malfunction
767bc976233SThomas Gleixner  * of the hardware could result in a spurious interrupt on the dummy vector
768bc976233SThomas Gleixner  * and render the device unusable. If the entry can be masked then the core
769bc976233SThomas Gleixner  * logic will prevent the spurious interrupt and reservation mode can be
770bc976233SThomas Gleixner  * used. For now reservation mode is restricted to PCI/MSI.
771bc976233SThomas Gleixner  */
772bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain,
773bc976233SThomas Gleixner 				       struct msi_domain_info *info,
774bc976233SThomas Gleixner 				       struct device *dev)
775da5dd9e8SThomas Gleixner {
776bc976233SThomas Gleixner 	struct msi_desc *desc;
777bc976233SThomas Gleixner 
778c6c9e283SThomas Gleixner 	switch(domain->bus_token) {
779c6c9e283SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
780c6c9e283SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
781c6c9e283SThomas Gleixner 		break;
782c6c9e283SThomas Gleixner 	default:
783bc976233SThomas Gleixner 		return false;
784c6c9e283SThomas Gleixner 	}
785bc976233SThomas Gleixner 
786da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
787da5dd9e8SThomas Gleixner 		return false;
788bc976233SThomas Gleixner 
789bc976233SThomas Gleixner 	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
790bc976233SThomas Gleixner 		return false;
791bc976233SThomas Gleixner 
792bc976233SThomas Gleixner 	/*
793bc976233SThomas Gleixner 	 * Checking the first MSI descriptor is sufficient. MSIX supports
7949c8e9c96SThomas Gleixner 	 * masking and MSI does so when the can_mask attribute is set.
795bc976233SThomas Gleixner 	 */
796495c66acSThomas Gleixner 	desc = msi_first_desc(dev, MSI_DESC_ALL);
797e58f2259SThomas Gleixner 	return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
798da5dd9e8SThomas Gleixner }
799da5dd9e8SThomas Gleixner 
80089033762SThomas Gleixner static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
80189033762SThomas Gleixner 			       int allocated)
80289033762SThomas Gleixner {
80389033762SThomas Gleixner 	switch(domain->bus_token) {
80489033762SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
80589033762SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
80689033762SThomas Gleixner 		if (IS_ENABLED(CONFIG_PCI_MSI))
80789033762SThomas Gleixner 			break;
80889033762SThomas Gleixner 		fallthrough;
80989033762SThomas Gleixner 	default:
81089033762SThomas Gleixner 		return -ENOSPC;
81189033762SThomas Gleixner 	}
81289033762SThomas Gleixner 
81389033762SThomas Gleixner 	/* Let a failed PCI multi MSI allocation retry */
81489033762SThomas Gleixner 	if (desc->nvec_used > 1)
81589033762SThomas Gleixner 		return 1;
81689033762SThomas Gleixner 
81789033762SThomas Gleixner 	/* If there was a successful allocation let the caller know */
81889033762SThomas Gleixner 	return allocated ? allocated : -ENOSPC;
81989033762SThomas Gleixner }
82089033762SThomas Gleixner 
821ef8dd015SThomas Gleixner #define VIRQ_CAN_RESERVE	0x01
822ef8dd015SThomas Gleixner #define VIRQ_ACTIVATE		0x02
823ef8dd015SThomas Gleixner #define VIRQ_NOMASK_QUIRK	0x04
824ef8dd015SThomas Gleixner 
825ef8dd015SThomas Gleixner static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
826ef8dd015SThomas Gleixner {
827ef8dd015SThomas Gleixner 	struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
828ef8dd015SThomas Gleixner 	int ret;
829ef8dd015SThomas Gleixner 
830ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_CAN_RESERVE)) {
831ef8dd015SThomas Gleixner 		irqd_clr_can_reserve(irqd);
832ef8dd015SThomas Gleixner 		if (vflags & VIRQ_NOMASK_QUIRK)
833ef8dd015SThomas Gleixner 			irqd_set_msi_nomask_quirk(irqd);
834d802057cSMarc Zyngier 
835d802057cSMarc Zyngier 		/*
836d802057cSMarc Zyngier 		 * If the interrupt is managed but no CPU is available to
837d802057cSMarc Zyngier 		 * service it, shut it down until better times. Note that
838d802057cSMarc Zyngier 		 * we only do this on the !RESERVE path as x86 (the only
839d802057cSMarc Zyngier 		 * architecture using this flag) deals with this in a
840d802057cSMarc Zyngier 		 * different way by using a catch-all vector.
841d802057cSMarc Zyngier 		 */
842d802057cSMarc Zyngier 		if ((vflags & VIRQ_ACTIVATE) &&
843d802057cSMarc Zyngier 		    irqd_affinity_is_managed(irqd) &&
844d802057cSMarc Zyngier 		    !cpumask_intersects(irq_data_get_affinity_mask(irqd),
845d802057cSMarc Zyngier 					cpu_online_mask)) {
846d802057cSMarc Zyngier 			    irqd_set_managed_shutdown(irqd);
847d802057cSMarc Zyngier 			    return 0;
848d802057cSMarc Zyngier 		    }
849ef8dd015SThomas Gleixner 	}
850ef8dd015SThomas Gleixner 
851ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_ACTIVATE))
852ef8dd015SThomas Gleixner 		return 0;
853ef8dd015SThomas Gleixner 
854ef8dd015SThomas Gleixner 	ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
855ef8dd015SThomas Gleixner 	if (ret)
856ef8dd015SThomas Gleixner 		return ret;
857ef8dd015SThomas Gleixner 	/*
858ef8dd015SThomas Gleixner 	 * If the interrupt uses reservation mode, clear the activated bit
859ef8dd015SThomas Gleixner 	 * so request_irq() will assign the final vector.
860ef8dd015SThomas Gleixner 	 */
861ef8dd015SThomas Gleixner 	if (vflags & VIRQ_CAN_RESERVE)
862ef8dd015SThomas Gleixner 		irqd_clr_activated(irqd);
863ef8dd015SThomas Gleixner 	return 0;
864ef8dd015SThomas Gleixner }
865ef8dd015SThomas Gleixner 
866762687ceSThomas Gleixner static int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
867d9109698SJiang Liu 				   int nvec)
868d9109698SJiang Liu {
869d9109698SJiang Liu 	struct msi_domain_info *info = domain->host_data;
870d9109698SJiang Liu 	struct msi_domain_ops *ops = info->ops;
87106fde695SZenghui Yu 	msi_alloc_info_t arg = { };
872ef8dd015SThomas Gleixner 	unsigned int vflags = 0;
873ef8dd015SThomas Gleixner 	struct msi_desc *desc;
87489033762SThomas Gleixner 	int allocated = 0;
875b6140914SThomas Gleixner 	int i, ret, virq;
876d9109698SJiang Liu 
877b2eba39bSMarc Zyngier 	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
878d9109698SJiang Liu 	if (ret)
879d9109698SJiang Liu 		return ret;
880d9109698SJiang Liu 
881ef8dd015SThomas Gleixner 	/*
882ef8dd015SThomas Gleixner 	 * This flag is set by the PCI layer as we need to activate
883ef8dd015SThomas Gleixner 	 * the MSI entries before the PCI layer enables MSI in the
884ef8dd015SThomas Gleixner 	 * card. Otherwise the card latches a random msi message.
885ef8dd015SThomas Gleixner 	 */
886ef8dd015SThomas Gleixner 	if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
887ef8dd015SThomas Gleixner 		vflags |= VIRQ_ACTIVATE;
888ef8dd015SThomas Gleixner 
889ef8dd015SThomas Gleixner 	/*
890ef8dd015SThomas Gleixner 	 * Interrupt can use a reserved vector and will not occupy
891ef8dd015SThomas Gleixner 	 * a real device vector until the interrupt is requested.
892ef8dd015SThomas Gleixner 	 */
893ef8dd015SThomas Gleixner 	if (msi_check_reservation_mode(domain, info, dev)) {
894ef8dd015SThomas Gleixner 		vflags |= VIRQ_CAN_RESERVE;
895ef8dd015SThomas Gleixner 		/*
896ef8dd015SThomas Gleixner 		 * MSI affinity setting requires a special quirk (X86) when
897ef8dd015SThomas Gleixner 		 * reservation mode is active.
898ef8dd015SThomas Gleixner 		 */
8993dad5f9aSThomas Gleixner 		if (info->flags & MSI_FLAG_NOMASK_QUIRK)
900ef8dd015SThomas Gleixner 			vflags |= VIRQ_NOMASK_QUIRK;
901ef8dd015SThomas Gleixner 	}
902ef8dd015SThomas Gleixner 
903ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
904d9109698SJiang Liu 		ops->set_desc(&arg, desc);
905d9109698SJiang Liu 
906b6140914SThomas Gleixner 		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
90706ee6d57SThomas Gleixner 					       dev_to_node(dev), &arg, false,
9080972fa57SThomas Gleixner 					       desc->affinity);
9090f62d941SThomas Gleixner 		if (virq < 0)
9100f62d941SThomas Gleixner 			return msi_handle_pci_fail(domain, desc, allocated);
911d9109698SJiang Liu 
91207557ccbSThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
913d9109698SJiang Liu 			irq_set_msi_desc_off(virq, i, desc);
91407557ccbSThomas Gleixner 			irq_debugfs_copy_devname(virq + i, dev);
915ef8dd015SThomas Gleixner 			ret = msi_init_virq(domain, virq + i, vflags);
916bb9b428aSThomas Gleixner 			if (ret)
9170f62d941SThomas Gleixner 				return ret;
91874a5257aSThomas Gleixner 		}
919bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS) {
920bf5e758fSThomas Gleixner 			ret = msi_sysfs_populate_desc(dev, desc);
921bf5e758fSThomas Gleixner 			if (ret)
922bf5e758fSThomas Gleixner 				return ret;
923bf5e758fSThomas Gleixner 		}
924ef8dd015SThomas Gleixner 		allocated++;
925d9109698SJiang Liu 	}
926d9109698SJiang Liu 	return 0;
9270f62d941SThomas Gleixner }
9280f62d941SThomas Gleixner 
929645474e2SThomas Gleixner static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
930645474e2SThomas Gleixner 					   struct device *dev,
931645474e2SThomas Gleixner 					   unsigned int num_descs)
932645474e2SThomas Gleixner {
933645474e2SThomas Gleixner 	if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
934645474e2SThomas Gleixner 		return 0;
935645474e2SThomas Gleixner 
936645474e2SThomas Gleixner 	return msi_add_simple_msi_descs(dev, 0, num_descs);
937645474e2SThomas Gleixner }
938645474e2SThomas Gleixner 
9390f62d941SThomas Gleixner /**
9400f62d941SThomas Gleixner  * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
9410f62d941SThomas Gleixner  * @domain:	The domain to allocate from
9420f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
9430f62d941SThomas Gleixner  *		are allocated
9440f62d941SThomas Gleixner  * @nvec:	The number of interrupts to allocate
9450f62d941SThomas Gleixner  *
9460f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
9470f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
9480f62d941SThomas Gleixner  * allocation/free.
9490f62d941SThomas Gleixner  *
9500f62d941SThomas Gleixner  * Return: %0 on success or an error code.
9510f62d941SThomas Gleixner  */
9520f62d941SThomas Gleixner int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
9530f62d941SThomas Gleixner 				       int nvec)
9540f62d941SThomas Gleixner {
9550f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
9560f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
9570f62d941SThomas Gleixner 	int ret;
9580f62d941SThomas Gleixner 
9590f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
9600f62d941SThomas Gleixner 
9613e86a3a3SThomas Gleixner 	if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain))) {
9623e86a3a3SThomas Gleixner 		ret = -EINVAL;
9633e86a3a3SThomas Gleixner 		goto free;
9643e86a3a3SThomas Gleixner 	}
9653e86a3a3SThomas Gleixner 
9663e86a3a3SThomas Gleixner 	/* Frees allocated descriptors in case of failure. */
967645474e2SThomas Gleixner 	ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
968645474e2SThomas Gleixner 	if (ret)
9693e86a3a3SThomas Gleixner 		goto free;
970645474e2SThomas Gleixner 
9710f62d941SThomas Gleixner 	ret = ops->domain_alloc_irqs(domain, dev, nvec);
9723e86a3a3SThomas Gleixner 	if (!ret)
9733e86a3a3SThomas Gleixner 		return 0;
9743e86a3a3SThomas Gleixner free:
9750f62d941SThomas Gleixner 	msi_domain_free_irqs_descs_locked(domain, dev);
976bb9b428aSThomas Gleixner 	return ret;
977d9109698SJiang Liu }
978d9109698SJiang Liu 
979d9109698SJiang Liu /**
98043e9e705SThomas Gleixner  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
98143e9e705SThomas Gleixner  * @domain:	The domain to allocate from
982d9109698SJiang Liu  * @dev:	Pointer to device struct of the device for which the interrupts
98343e9e705SThomas Gleixner  *		are allocated
98443e9e705SThomas Gleixner  * @nvec:	The number of interrupts to allocate
98543e9e705SThomas Gleixner  *
9863b35e7e6SRandy Dunlap  * Return: %0 on success or an error code.
987d9109698SJiang Liu  */
9880f62d941SThomas Gleixner int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
98943e9e705SThomas Gleixner {
990bf6e054eSThomas Gleixner 	int ret;
99143e9e705SThomas Gleixner 
9920f62d941SThomas Gleixner 	msi_lock_descs(dev);
9930f62d941SThomas Gleixner 	ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
9940f62d941SThomas Gleixner 	msi_unlock_descs(dev);
995bf6e054eSThomas Gleixner 	return ret;
99643e9e705SThomas Gleixner }
99743e9e705SThomas Gleixner 
998057c97a1SThomas Gleixner static void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
999d9109698SJiang Liu {
1000bf5e758fSThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
1001ef8dd015SThomas Gleixner 	struct irq_data *irqd;
1002d9109698SJiang Liu 	struct msi_desc *desc;
1003dbbc9357SBixuan Cui 	int i;
1004dbbc9357SBixuan Cui 
1005ef8dd015SThomas Gleixner 	/* Only handle MSI entries which have an interrupt associated */
1006ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
1007ef8dd015SThomas Gleixner 		/* Make sure all interrupts are deactivated */
1008ef8dd015SThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
1009ef8dd015SThomas Gleixner 			irqd = irq_domain_get_irq_data(domain, desc->irq + i);
1010ef8dd015SThomas Gleixner 			if (irqd && irqd_is_activated(irqd))
1011ef8dd015SThomas Gleixner 				irq_domain_deactivate_irq(irqd);
1012dbbc9357SBixuan Cui 		}
1013d9109698SJiang Liu 
1014d9109698SJiang Liu 		irq_domain_free_irqs(desc->irq, desc->nvec_used);
1015bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS)
1016bf5e758fSThomas Gleixner 			msi_sysfs_remove_desc(dev, desc);
1017d9109698SJiang Liu 		desc->irq = 0;
1018d9109698SJiang Liu 	}
1019d9109698SJiang Liu }
1020d9109698SJiang Liu 
1021645474e2SThomas Gleixner static void msi_domain_free_msi_descs(struct msi_domain_info *info,
1022645474e2SThomas Gleixner 				      struct device *dev)
1023645474e2SThomas Gleixner {
1024645474e2SThomas Gleixner 	if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
1025645474e2SThomas Gleixner 		msi_free_msi_descs(dev);
1026645474e2SThomas Gleixner }
1027645474e2SThomas Gleixner 
1028d9109698SJiang Liu /**
10290f62d941SThomas Gleixner  * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
10300f62d941SThomas Gleixner  * @domain:	The domain to managing the interrupts
10310f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
10320f62d941SThomas Gleixner  *		are free
10330f62d941SThomas Gleixner  *
10340f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
10350f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
10360f62d941SThomas Gleixner  * allocation.
10370f62d941SThomas Gleixner  */
10380f62d941SThomas Gleixner void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
10390f62d941SThomas Gleixner {
10400f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
10410f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
10420f62d941SThomas Gleixner 
10430f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
10440f62d941SThomas Gleixner 
10453e86a3a3SThomas Gleixner 	if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain)))
10463e86a3a3SThomas Gleixner 		return;
10473e86a3a3SThomas Gleixner 
10480f62d941SThomas Gleixner 	ops->domain_free_irqs(domain, dev);
1049f6d3486aSThomas Gleixner 	if (ops->msi_post_free)
1050f6d3486aSThomas Gleixner 		ops->msi_post_free(domain, dev);
1051645474e2SThomas Gleixner 	msi_domain_free_msi_descs(info, dev);
10520f62d941SThomas Gleixner }
10530f62d941SThomas Gleixner 
10540f62d941SThomas Gleixner /**
10553b35e7e6SRandy Dunlap  * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
105643e9e705SThomas Gleixner  * @domain:	The domain to managing the interrupts
105743e9e705SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
105843e9e705SThomas Gleixner  *		are free
105943e9e705SThomas Gleixner  */
106043e9e705SThomas Gleixner void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
106143e9e705SThomas Gleixner {
10620f62d941SThomas Gleixner 	msi_lock_descs(dev);
10630f62d941SThomas Gleixner 	msi_domain_free_irqs_descs_locked(domain, dev);
10640f62d941SThomas Gleixner 	msi_unlock_descs(dev);
106543e9e705SThomas Gleixner }
106643e9e705SThomas Gleixner 
106743e9e705SThomas Gleixner /**
1068f3cf8bb0SJiang Liu  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1069f3cf8bb0SJiang Liu  * @domain:	The interrupt domain to retrieve data from
1070f3cf8bb0SJiang Liu  *
10713b35e7e6SRandy Dunlap  * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1072f3cf8bb0SJiang Liu  */
1073f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1074f3cf8bb0SJiang Liu {
1075f3cf8bb0SJiang Liu 	return (struct msi_domain_info *)domain->host_data;
1076f3cf8bb0SJiang Liu }
1077