xref: /openbmc/linux/kernel/irq/msi.c (revision d802057c)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2f3cf8bb0SJiang Liu /*
3f3cf8bb0SJiang Liu  * Copyright (C) 2014 Intel Corp.
4f3cf8bb0SJiang Liu  * Author: Jiang Liu <jiang.liu@linux.intel.com>
5f3cf8bb0SJiang Liu  *
6f3cf8bb0SJiang Liu  * This file is licensed under GPLv2.
7f3cf8bb0SJiang Liu  *
8a359f757SIngo Molnar  * This file contains common code to support Message Signaled Interrupts for
9f3cf8bb0SJiang Liu  * PCI compatible and non PCI compatible devices.
10f3cf8bb0SJiang Liu  */
11aeeb5965SJiang Liu #include <linux/types.h>
12aeeb5965SJiang Liu #include <linux/device.h>
13f3cf8bb0SJiang Liu #include <linux/irq.h>
14f3cf8bb0SJiang Liu #include <linux/irqdomain.h>
15f3cf8bb0SJiang Liu #include <linux/msi.h>
164e201566SMarc Zyngier #include <linux/slab.h>
173ba1f050SThomas Gleixner #include <linux/sysfs.h>
182f170814SBarry Song #include <linux/pci.h>
19d9109698SJiang Liu 
2007557ccbSThomas Gleixner #include "internals.h"
2107557ccbSThomas Gleixner 
22bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev);
23cc9a246dSThomas Gleixner 
2428f4b041SThomas Gleixner /**
25cc9a246dSThomas Gleixner  * msi_alloc_desc - Allocate an initialized msi_desc
2628f4b041SThomas Gleixner  * @dev:	Pointer to the device for which this is allocated
2728f4b041SThomas Gleixner  * @nvec:	The number of vectors used in this entry
2828f4b041SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array size of @nvec
2928f4b041SThomas Gleixner  *
303b35e7e6SRandy Dunlap  * If @affinity is not %NULL then an affinity array[@nvec] is allocated
31bec04037SDou Liyang  * and the affinity masks and flags from @affinity are copied.
323b35e7e6SRandy Dunlap  *
333b35e7e6SRandy Dunlap  * Return: pointer to allocated &msi_desc on success or %NULL on failure
3428f4b041SThomas Gleixner  */
35cc9a246dSThomas Gleixner static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
36bec04037SDou Liyang 					const struct irq_affinity_desc *affinity)
37aa48b6f7SJiang Liu {
38cc9a246dSThomas Gleixner 	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
3928f4b041SThomas Gleixner 
40aa48b6f7SJiang Liu 	if (!desc)
41aa48b6f7SJiang Liu 		return NULL;
42aa48b6f7SJiang Liu 
43aa48b6f7SJiang Liu 	desc->dev = dev;
4428f4b041SThomas Gleixner 	desc->nvec_used = nvec;
4528f4b041SThomas Gleixner 	if (affinity) {
46cc9a246dSThomas Gleixner 		desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
4728f4b041SThomas Gleixner 		if (!desc->affinity) {
4828f4b041SThomas Gleixner 			kfree(desc);
4928f4b041SThomas Gleixner 			return NULL;
5028f4b041SThomas Gleixner 		}
5128f4b041SThomas Gleixner 	}
52aa48b6f7SJiang Liu 	return desc;
53aa48b6f7SJiang Liu }
54aa48b6f7SJiang Liu 
55cc9a246dSThomas Gleixner static void msi_free_desc(struct msi_desc *desc)
56aa48b6f7SJiang Liu {
57cc9a246dSThomas Gleixner 	kfree(desc->affinity);
58cc9a246dSThomas Gleixner 	kfree(desc);
59aa48b6f7SJiang Liu }
60aa48b6f7SJiang Liu 
61cd6cf065SThomas Gleixner static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
62cd6cf065SThomas Gleixner {
63cd6cf065SThomas Gleixner 	int ret;
64cd6cf065SThomas Gleixner 
65cd6cf065SThomas Gleixner 	desc->msi_index = index;
66cd6cf065SThomas Gleixner 	ret = xa_insert(&md->__store, index, desc, GFP_KERNEL);
67cd6cf065SThomas Gleixner 	if (ret)
68cd6cf065SThomas Gleixner 		msi_free_desc(desc);
69cd6cf065SThomas Gleixner 	return ret;
70cd6cf065SThomas Gleixner }
71cd6cf065SThomas Gleixner 
7260290525SThomas Gleixner /**
7360290525SThomas Gleixner  * msi_add_msi_desc - Allocate and initialize a MSI descriptor
7460290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptor is allocated
7560290525SThomas Gleixner  * @init_desc:	Pointer to an MSI descriptor to initialize the new descriptor
7660290525SThomas Gleixner  *
7760290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
7860290525SThomas Gleixner  */
7960290525SThomas Gleixner int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
8060290525SThomas Gleixner {
8160290525SThomas Gleixner 	struct msi_desc *desc;
8260290525SThomas Gleixner 
8360290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
8460290525SThomas Gleixner 
85cc9a246dSThomas Gleixner 	desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
8660290525SThomas Gleixner 	if (!desc)
8760290525SThomas Gleixner 		return -ENOMEM;
8860290525SThomas Gleixner 
89cd6cf065SThomas Gleixner 	/* Copy type specific data to the new descriptor. */
9060290525SThomas Gleixner 	desc->pci = init_desc->pci;
91cd6cf065SThomas Gleixner 	return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
9260290525SThomas Gleixner }
9360290525SThomas Gleixner 
9460290525SThomas Gleixner /**
9560290525SThomas Gleixner  * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
9660290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptors are allocated
9760290525SThomas Gleixner  * @index:	Index for the first MSI descriptor
9860290525SThomas Gleixner  * @ndesc:	Number of descriptors to allocate
9960290525SThomas Gleixner  *
10060290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
10160290525SThomas Gleixner  */
10260290525SThomas Gleixner static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
10360290525SThomas Gleixner {
104cd6cf065SThomas Gleixner 	unsigned int idx, last = index + ndesc - 1;
105cd6cf065SThomas Gleixner 	struct msi_desc *desc;
106cd6cf065SThomas Gleixner 	int ret;
10760290525SThomas Gleixner 
10860290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
10960290525SThomas Gleixner 
110cd6cf065SThomas Gleixner 	for (idx = index; idx <= last; idx++) {
111cc9a246dSThomas Gleixner 		desc = msi_alloc_desc(dev, 1, NULL);
11260290525SThomas Gleixner 		if (!desc)
113cd6cf065SThomas Gleixner 			goto fail_mem;
114cd6cf065SThomas Gleixner 		ret = msi_insert_desc(dev->msi.data, desc, idx);
115cd6cf065SThomas Gleixner 		if (ret)
11660290525SThomas Gleixner 			goto fail;
11760290525SThomas Gleixner 	}
11860290525SThomas Gleixner 	return 0;
11960290525SThomas Gleixner 
120cd6cf065SThomas Gleixner fail_mem:
121cd6cf065SThomas Gleixner 	ret = -ENOMEM;
12260290525SThomas Gleixner fail:
123cd6cf065SThomas Gleixner 	msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, last);
124cd6cf065SThomas Gleixner 	return ret;
12560290525SThomas Gleixner }
126cd6cf065SThomas Gleixner 
127cd6cf065SThomas Gleixner static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
128cd6cf065SThomas Gleixner {
129cd6cf065SThomas Gleixner 	switch (filter) {
130cd6cf065SThomas Gleixner 	case MSI_DESC_ALL:
131cd6cf065SThomas Gleixner 		return true;
132cd6cf065SThomas Gleixner 	case MSI_DESC_NOTASSOCIATED:
133cd6cf065SThomas Gleixner 		return !desc->irq;
134cd6cf065SThomas Gleixner 	case MSI_DESC_ASSOCIATED:
135cd6cf065SThomas Gleixner 		return !!desc->irq;
136cd6cf065SThomas Gleixner 	}
137cd6cf065SThomas Gleixner 	WARN_ON_ONCE(1);
138cd6cf065SThomas Gleixner 	return false;
13960290525SThomas Gleixner }
14060290525SThomas Gleixner 
141645474e2SThomas Gleixner /**
142645474e2SThomas Gleixner  * msi_free_msi_descs_range - Free MSI descriptors of a device
143645474e2SThomas Gleixner  * @dev:		Device to free the descriptors
144645474e2SThomas Gleixner  * @filter:		Descriptor state filter
145645474e2SThomas Gleixner  * @first_index:	Index to start freeing from
146645474e2SThomas Gleixner  * @last_index:		Last index to be freed
147645474e2SThomas Gleixner  */
148645474e2SThomas Gleixner void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
149645474e2SThomas Gleixner 			      unsigned int first_index, unsigned int last_index)
150645474e2SThomas Gleixner {
151cd6cf065SThomas Gleixner 	struct xarray *xa = &dev->msi.data->__store;
152645474e2SThomas Gleixner 	struct msi_desc *desc;
153cd6cf065SThomas Gleixner 	unsigned long idx;
154645474e2SThomas Gleixner 
155645474e2SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
156645474e2SThomas Gleixner 
157cd6cf065SThomas Gleixner 	xa_for_each_range(xa, idx, desc, first_index, last_index) {
158cd6cf065SThomas Gleixner 		if (msi_desc_match(desc, filter)) {
159cd6cf065SThomas Gleixner 			xa_erase(xa, idx);
160cc9a246dSThomas Gleixner 			msi_free_desc(desc);
161645474e2SThomas Gleixner 		}
162645474e2SThomas Gleixner 	}
163cd6cf065SThomas Gleixner }
164645474e2SThomas Gleixner 
16538b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
16638b6a1cfSJiang Liu {
16738b6a1cfSJiang Liu 	*msg = entry->msg;
16838b6a1cfSJiang Liu }
16938b6a1cfSJiang Liu 
17038b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
17138b6a1cfSJiang Liu {
17238b6a1cfSJiang Liu 	struct msi_desc *entry = irq_get_msi_desc(irq);
17338b6a1cfSJiang Liu 
17438b6a1cfSJiang Liu 	__get_cached_msi_msg(entry, msg);
17538b6a1cfSJiang Liu }
17638b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg);
17738b6a1cfSJiang Liu 
178013bd8e5SThomas Gleixner static void msi_device_data_release(struct device *dev, void *res)
179013bd8e5SThomas Gleixner {
180125282cdSThomas Gleixner 	struct msi_device_data *md = res;
181125282cdSThomas Gleixner 
182cd6cf065SThomas Gleixner 	WARN_ON_ONCE(!xa_empty(&md->__store));
183cd6cf065SThomas Gleixner 	xa_destroy(&md->__store);
184013bd8e5SThomas Gleixner 	dev->msi.data = NULL;
185013bd8e5SThomas Gleixner }
186013bd8e5SThomas Gleixner 
187013bd8e5SThomas Gleixner /**
188013bd8e5SThomas Gleixner  * msi_setup_device_data - Setup MSI device data
189013bd8e5SThomas Gleixner  * @dev:	Device for which MSI device data should be set up
190013bd8e5SThomas Gleixner  *
191013bd8e5SThomas Gleixner  * Return: 0 on success, appropriate error code otherwise
192013bd8e5SThomas Gleixner  *
193013bd8e5SThomas Gleixner  * This can be called more than once for @dev. If the MSI device data is
194013bd8e5SThomas Gleixner  * already allocated the call succeeds. The allocated memory is
195013bd8e5SThomas Gleixner  * automatically released when the device is destroyed.
196013bd8e5SThomas Gleixner  */
197013bd8e5SThomas Gleixner int msi_setup_device_data(struct device *dev)
198013bd8e5SThomas Gleixner {
199013bd8e5SThomas Gleixner 	struct msi_device_data *md;
200bf5e758fSThomas Gleixner 	int ret;
201013bd8e5SThomas Gleixner 
202013bd8e5SThomas Gleixner 	if (dev->msi.data)
203013bd8e5SThomas Gleixner 		return 0;
204013bd8e5SThomas Gleixner 
205013bd8e5SThomas Gleixner 	md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
206013bd8e5SThomas Gleixner 	if (!md)
207013bd8e5SThomas Gleixner 		return -ENOMEM;
208013bd8e5SThomas Gleixner 
209bf5e758fSThomas Gleixner 	ret = msi_sysfs_create_group(dev);
210bf5e758fSThomas Gleixner 	if (ret) {
211bf5e758fSThomas Gleixner 		devres_free(md);
212bf5e758fSThomas Gleixner 		return ret;
213bf5e758fSThomas Gleixner 	}
214bf5e758fSThomas Gleixner 
215cd6cf065SThomas Gleixner 	xa_init(&md->__store);
216b5f687f9SThomas Gleixner 	mutex_init(&md->mutex);
217013bd8e5SThomas Gleixner 	dev->msi.data = md;
218013bd8e5SThomas Gleixner 	devres_add(dev, md);
219013bd8e5SThomas Gleixner 	return 0;
220013bd8e5SThomas Gleixner }
221013bd8e5SThomas Gleixner 
222cf15f43aSThomas Gleixner /**
223b5f687f9SThomas Gleixner  * msi_lock_descs - Lock the MSI descriptor storage of a device
224b5f687f9SThomas Gleixner  * @dev:	Device to operate on
225b5f687f9SThomas Gleixner  */
226b5f687f9SThomas Gleixner void msi_lock_descs(struct device *dev)
227b5f687f9SThomas Gleixner {
228b5f687f9SThomas Gleixner 	mutex_lock(&dev->msi.data->mutex);
229b5f687f9SThomas Gleixner }
230b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_lock_descs);
231b5f687f9SThomas Gleixner 
232b5f687f9SThomas Gleixner /**
233b5f687f9SThomas Gleixner  * msi_unlock_descs - Unlock the MSI descriptor storage of a device
234b5f687f9SThomas Gleixner  * @dev:	Device to operate on
235b5f687f9SThomas Gleixner  */
236b5f687f9SThomas Gleixner void msi_unlock_descs(struct device *dev)
237b5f687f9SThomas Gleixner {
238cd6cf065SThomas Gleixner 	/* Invalidate the index wich was cached by the iterator */
239cd6cf065SThomas Gleixner 	dev->msi.data->__iter_idx = MSI_MAX_INDEX;
240b5f687f9SThomas Gleixner 	mutex_unlock(&dev->msi.data->mutex);
241b5f687f9SThomas Gleixner }
242b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_unlock_descs);
243b5f687f9SThomas Gleixner 
244cd6cf065SThomas Gleixner static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
2451046f71dSThomas Gleixner {
2461046f71dSThomas Gleixner 	struct msi_desc *desc;
2471046f71dSThomas Gleixner 
248cd6cf065SThomas Gleixner 	xa_for_each_start(&md->__store, md->__iter_idx, desc, md->__iter_idx) {
2491046f71dSThomas Gleixner 		if (msi_desc_match(desc, filter))
2501046f71dSThomas Gleixner 			return desc;
2511046f71dSThomas Gleixner 	}
252cd6cf065SThomas Gleixner 	md->__iter_idx = MSI_MAX_INDEX;
2531046f71dSThomas Gleixner 	return NULL;
2541046f71dSThomas Gleixner }
2551046f71dSThomas Gleixner 
2561046f71dSThomas Gleixner /**
2571046f71dSThomas Gleixner  * msi_first_desc - Get the first MSI descriptor of a device
2581046f71dSThomas Gleixner  * @dev:	Device to operate on
2591046f71dSThomas Gleixner  * @filter:	Descriptor state filter
2601046f71dSThomas Gleixner  *
2611046f71dSThomas Gleixner  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
2621046f71dSThomas Gleixner  * must be invoked before the call.
2631046f71dSThomas Gleixner  *
2641046f71dSThomas Gleixner  * Return: Pointer to the first MSI descriptor matching the search
2651046f71dSThomas Gleixner  *	   criteria, NULL if none found.
2661046f71dSThomas Gleixner  */
2671046f71dSThomas Gleixner struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
2681046f71dSThomas Gleixner {
269cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
2701046f71dSThomas Gleixner 
271cd6cf065SThomas Gleixner 	if (WARN_ON_ONCE(!md))
2721046f71dSThomas Gleixner 		return NULL;
2731046f71dSThomas Gleixner 
274cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
2751046f71dSThomas Gleixner 
276cd6cf065SThomas Gleixner 	md->__iter_idx = 0;
277cd6cf065SThomas Gleixner 	return msi_find_desc(md, filter);
2781046f71dSThomas Gleixner }
2791046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_first_desc);
2801046f71dSThomas Gleixner 
2811046f71dSThomas Gleixner /**
2821046f71dSThomas Gleixner  * msi_next_desc - Get the next MSI descriptor of a device
2831046f71dSThomas Gleixner  * @dev:	Device to operate on
2841046f71dSThomas Gleixner  *
2851046f71dSThomas Gleixner  * The first invocation of msi_next_desc() has to be preceeded by a
286cd6cf065SThomas Gleixner  * successful invocation of __msi_first_desc(). Consecutive invocations are
2871046f71dSThomas Gleixner  * only valid if the previous one was successful. All these operations have
2881046f71dSThomas Gleixner  * to be done within the same MSI mutex held region.
2891046f71dSThomas Gleixner  *
2901046f71dSThomas Gleixner  * Return: Pointer to the next MSI descriptor matching the search
2911046f71dSThomas Gleixner  *	   criteria, NULL if none found.
2921046f71dSThomas Gleixner  */
2931046f71dSThomas Gleixner struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
2941046f71dSThomas Gleixner {
295cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
2961046f71dSThomas Gleixner 
297cd6cf065SThomas Gleixner 	if (WARN_ON_ONCE(!md))
2981046f71dSThomas Gleixner 		return NULL;
2991046f71dSThomas Gleixner 
300cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
3011046f71dSThomas Gleixner 
302cd6cf065SThomas Gleixner 	if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
3031046f71dSThomas Gleixner 		return NULL;
3041046f71dSThomas Gleixner 
305cd6cf065SThomas Gleixner 	md->__iter_idx++;
306cd6cf065SThomas Gleixner 	return msi_find_desc(md, filter);
3071046f71dSThomas Gleixner }
3081046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_next_desc);
3091046f71dSThomas Gleixner 
310b5f687f9SThomas Gleixner /**
311cf15f43aSThomas Gleixner  * msi_get_virq - Return Linux interrupt number of a MSI interrupt
312cf15f43aSThomas Gleixner  * @dev:	Device to operate on
313cf15f43aSThomas Gleixner  * @index:	MSI interrupt index to look for (0-based)
314cf15f43aSThomas Gleixner  *
315cf15f43aSThomas Gleixner  * Return: The Linux interrupt number on success (> 0), 0 if not found
316cf15f43aSThomas Gleixner  */
317cf15f43aSThomas Gleixner unsigned int msi_get_virq(struct device *dev, unsigned int index)
318cf15f43aSThomas Gleixner {
319cf15f43aSThomas Gleixner 	struct msi_desc *desc;
320495c66acSThomas Gleixner 	unsigned int ret = 0;
321cf15f43aSThomas Gleixner 	bool pcimsi;
322cf15f43aSThomas Gleixner 
323cf15f43aSThomas Gleixner 	if (!dev->msi.data)
324cf15f43aSThomas Gleixner 		return 0;
325cf15f43aSThomas Gleixner 
326cf15f43aSThomas Gleixner 	pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
327cf15f43aSThomas Gleixner 
328495c66acSThomas Gleixner 	msi_lock_descs(dev);
329cd6cf065SThomas Gleixner 	desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index);
330cd6cf065SThomas Gleixner 	if (desc && desc->irq) {
331cf15f43aSThomas Gleixner 		/*
332cd6cf065SThomas Gleixner 		 * PCI-MSI has only one descriptor for multiple interrupts.
333cf15f43aSThomas Gleixner 		 * PCI-MSIX and platform MSI use a descriptor per
334cf15f43aSThomas Gleixner 		 * interrupt.
335cf15f43aSThomas Gleixner 		 */
336cd6cf065SThomas Gleixner 		if (pcimsi) {
337cd6cf065SThomas Gleixner 			if (index < desc->nvec_used)
338cd6cf065SThomas Gleixner 				ret = desc->irq + index;
339cd6cf065SThomas Gleixner 		} else {
340495c66acSThomas Gleixner 			ret = desc->irq;
341cf15f43aSThomas Gleixner 		}
342495c66acSThomas Gleixner 	}
343495c66acSThomas Gleixner 	msi_unlock_descs(dev);
344495c66acSThomas Gleixner 	return ret;
345cf15f43aSThomas Gleixner }
346cf15f43aSThomas Gleixner EXPORT_SYMBOL_GPL(msi_get_virq);
347cf15f43aSThomas Gleixner 
3481197528aSThomas Gleixner #ifdef CONFIG_SYSFS
349bf5e758fSThomas Gleixner static struct attribute *msi_dev_attrs[] = {
350bf5e758fSThomas Gleixner 	NULL
351bf5e758fSThomas Gleixner };
352bf5e758fSThomas Gleixner 
353bf5e758fSThomas Gleixner static const struct attribute_group msi_irqs_group = {
354bf5e758fSThomas Gleixner 	.name	= "msi_irqs",
355bf5e758fSThomas Gleixner 	.attrs	= msi_dev_attrs,
356bf5e758fSThomas Gleixner };
357bf5e758fSThomas Gleixner 
358bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev)
359bf5e758fSThomas Gleixner {
360bf5e758fSThomas Gleixner 	return devm_device_add_group(dev, &msi_irqs_group);
361bf5e758fSThomas Gleixner }
362bf5e758fSThomas Gleixner 
3632f170814SBarry Song static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
3642f170814SBarry Song 			     char *buf)
3652f170814SBarry Song {
3666ef7f771SThomas Gleixner 	/* MSI vs. MSIX is per device not per interrupt */
3676ef7f771SThomas Gleixner 	bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
3682f170814SBarry Song 
3692f170814SBarry Song 	return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
3702f170814SBarry Song }
3712f170814SBarry Song 
372bf5e758fSThomas Gleixner static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
3732f170814SBarry Song {
374bf5e758fSThomas Gleixner 	struct device_attribute *attrs = desc->sysfs_attrs;
3752f170814SBarry Song 	int i;
3762f170814SBarry Song 
377bf5e758fSThomas Gleixner 	if (!attrs)
378bf5e758fSThomas Gleixner 		return;
3792f170814SBarry Song 
380bf5e758fSThomas Gleixner 	desc->sysfs_attrs = NULL;
381bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
382bf5e758fSThomas Gleixner 		if (attrs[i].show)
383bf5e758fSThomas Gleixner 			sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
384bf5e758fSThomas Gleixner 		kfree(attrs[i].attr.name);
3852f170814SBarry Song 	}
386bf5e758fSThomas Gleixner 	kfree(attrs);
3872f170814SBarry Song }
3882f170814SBarry Song 
389bf5e758fSThomas Gleixner static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
390bf5e758fSThomas Gleixner {
391bf5e758fSThomas Gleixner 	struct device_attribute *attrs;
392bf5e758fSThomas Gleixner 	int ret, i;
3932f170814SBarry Song 
394bf5e758fSThomas Gleixner 	attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
395bf5e758fSThomas Gleixner 	if (!attrs)
396bf5e758fSThomas Gleixner 		return -ENOMEM;
3972f170814SBarry Song 
398bf5e758fSThomas Gleixner 	desc->sysfs_attrs = attrs;
399bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
400bf5e758fSThomas Gleixner 		sysfs_attr_init(&attrs[i].attr);
401bf5e758fSThomas Gleixner 		attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
402bf5e758fSThomas Gleixner 		if (!attrs[i].attr.name) {
403bf5e758fSThomas Gleixner 			ret = -ENOMEM;
404bf5e758fSThomas Gleixner 			goto fail;
4052f170814SBarry Song 		}
4062f170814SBarry Song 
407bf5e758fSThomas Gleixner 		attrs[i].attr.mode = 0444;
408bf5e758fSThomas Gleixner 		attrs[i].show = msi_mode_show;
409bf5e758fSThomas Gleixner 
410bf5e758fSThomas Gleixner 		ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
411bf5e758fSThomas Gleixner 		if (ret) {
412bf5e758fSThomas Gleixner 			attrs[i].show = NULL;
413bf5e758fSThomas Gleixner 			goto fail;
414bf5e758fSThomas Gleixner 		}
415bf5e758fSThomas Gleixner 	}
416bf5e758fSThomas Gleixner 	return 0;
417bf5e758fSThomas Gleixner 
418bf5e758fSThomas Gleixner fail:
419bf5e758fSThomas Gleixner 	msi_sysfs_remove_desc(dev, desc);
420bf5e758fSThomas Gleixner 	return ret;
421bf5e758fSThomas Gleixner }
422bf5e758fSThomas Gleixner 
423bf5e758fSThomas Gleixner #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
4242f170814SBarry Song /**
425bf6e054eSThomas Gleixner  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
426bf6e054eSThomas Gleixner  * @dev:	The device (PCI, platform etc) which will get sysfs entries
427bf6e054eSThomas Gleixner  */
428bf6e054eSThomas Gleixner int msi_device_populate_sysfs(struct device *dev)
429bf6e054eSThomas Gleixner {
430bf5e758fSThomas Gleixner 	struct msi_desc *desc;
431bf5e758fSThomas Gleixner 	int ret;
432bf6e054eSThomas Gleixner 
433bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
434bf5e758fSThomas Gleixner 		if (desc->sysfs_attrs)
435bf5e758fSThomas Gleixner 			continue;
436bf5e758fSThomas Gleixner 		ret = msi_sysfs_populate_desc(dev, desc);
437bf5e758fSThomas Gleixner 		if (ret)
438bf5e758fSThomas Gleixner 			return ret;
439bf5e758fSThomas Gleixner 	}
440bf6e054eSThomas Gleixner 	return 0;
441bf6e054eSThomas Gleixner }
442bf6e054eSThomas Gleixner 
443bf6e054eSThomas Gleixner /**
44424cff375SThomas Gleixner  * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
44524cff375SThomas Gleixner  * @dev:		The device (PCI, platform etc) for which to remove
44624cff375SThomas Gleixner  *			sysfs entries
4472f170814SBarry Song  */
44824cff375SThomas Gleixner void msi_device_destroy_sysfs(struct device *dev)
4492f170814SBarry Song {
450bf5e758fSThomas Gleixner 	struct msi_desc *desc;
4512f170814SBarry Song 
452bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ALL)
453bf5e758fSThomas Gleixner 		msi_sysfs_remove_desc(dev, desc);
4542f170814SBarry Song }
455bf5e758fSThomas Gleixner #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
456bf5e758fSThomas Gleixner #else /* CONFIG_SYSFS */
457bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
458bf5e758fSThomas Gleixner static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
459bf5e758fSThomas Gleixner static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
460bf5e758fSThomas Gleixner #endif /* !CONFIG_SYSFS */
4612f170814SBarry Song 
462f3cf8bb0SJiang Liu #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
46374faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data,
46474faaf7aSThomas Gleixner 					  struct msi_msg *msg)
46574faaf7aSThomas Gleixner {
46674faaf7aSThomas Gleixner 	data->chip->irq_write_msi_msg(data, msg);
46774faaf7aSThomas Gleixner }
46874faaf7aSThomas Gleixner 
4690be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
4700be8153cSMarc Zyngier {
4710be8153cSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
4720be8153cSMarc Zyngier 
4730be8153cSMarc Zyngier 	/*
4740be8153cSMarc Zyngier 	 * If the MSI provider has messed with the second message and
4750be8153cSMarc Zyngier 	 * not advertized that it is level-capable, signal the breakage.
4760be8153cSMarc Zyngier 	 */
4770be8153cSMarc Zyngier 	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
4780be8153cSMarc Zyngier 		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
4790be8153cSMarc Zyngier 		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
4800be8153cSMarc Zyngier }
4810be8153cSMarc Zyngier 
482f3cf8bb0SJiang Liu /**
483f3cf8bb0SJiang Liu  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
484f3cf8bb0SJiang Liu  * @irq_data:	The irq data associated to the interrupt
485f3cf8bb0SJiang Liu  * @mask:	The affinity mask to set
486f3cf8bb0SJiang Liu  * @force:	Flag to enforce setting (disable online checks)
487f3cf8bb0SJiang Liu  *
488f3cf8bb0SJiang Liu  * Intended to be used by MSI interrupt controllers which are
489f3cf8bb0SJiang Liu  * implemented with hierarchical domains.
4903b35e7e6SRandy Dunlap  *
4913b35e7e6SRandy Dunlap  * Return: IRQ_SET_MASK_* result code
492f3cf8bb0SJiang Liu  */
493f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data,
494f3cf8bb0SJiang Liu 			    const struct cpumask *mask, bool force)
495f3cf8bb0SJiang Liu {
496f3cf8bb0SJiang Liu 	struct irq_data *parent = irq_data->parent_data;
4970be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
498f3cf8bb0SJiang Liu 	int ret;
499f3cf8bb0SJiang Liu 
500f3cf8bb0SJiang Liu 	ret = parent->chip->irq_set_affinity(parent, mask, force);
501f3cf8bb0SJiang Liu 	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
5020be8153cSMarc Zyngier 		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5030be8153cSMarc Zyngier 		msi_check_level(irq_data->domain, msg);
5040be8153cSMarc Zyngier 		irq_chip_write_msi_msg(irq_data, msg);
505f3cf8bb0SJiang Liu 	}
506f3cf8bb0SJiang Liu 
507f3cf8bb0SJiang Liu 	return ret;
508f3cf8bb0SJiang Liu }
509f3cf8bb0SJiang Liu 
51072491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain,
51172491643SThomas Gleixner 			       struct irq_data *irq_data, bool early)
512f3cf8bb0SJiang Liu {
5130be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
514f3cf8bb0SJiang Liu 
5150be8153cSMarc Zyngier 	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5160be8153cSMarc Zyngier 	msi_check_level(irq_data->domain, msg);
5170be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
51872491643SThomas Gleixner 	return 0;
519f3cf8bb0SJiang Liu }
520f3cf8bb0SJiang Liu 
521f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain,
522f3cf8bb0SJiang Liu 				  struct irq_data *irq_data)
523f3cf8bb0SJiang Liu {
5240be8153cSMarc Zyngier 	struct msi_msg msg[2];
525f3cf8bb0SJiang Liu 
5260be8153cSMarc Zyngier 	memset(msg, 0, sizeof(msg));
5270be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
528f3cf8bb0SJiang Liu }
529f3cf8bb0SJiang Liu 
530f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
531f3cf8bb0SJiang Liu 			    unsigned int nr_irqs, void *arg)
532f3cf8bb0SJiang Liu {
533f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
534f3cf8bb0SJiang Liu 	struct msi_domain_ops *ops = info->ops;
535f3cf8bb0SJiang Liu 	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
536f3cf8bb0SJiang Liu 	int i, ret;
537f3cf8bb0SJiang Liu 
538f3cf8bb0SJiang Liu 	if (irq_find_mapping(domain, hwirq) > 0)
539f3cf8bb0SJiang Liu 		return -EEXIST;
540f3cf8bb0SJiang Liu 
541bf6f869fSLiu Jiang 	if (domain->parent) {
542f3cf8bb0SJiang Liu 		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
543f3cf8bb0SJiang Liu 		if (ret < 0)
544f3cf8bb0SJiang Liu 			return ret;
545bf6f869fSLiu Jiang 	}
546f3cf8bb0SJiang Liu 
547f3cf8bb0SJiang Liu 	for (i = 0; i < nr_irqs; i++) {
548f3cf8bb0SJiang Liu 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
549f3cf8bb0SJiang Liu 		if (ret < 0) {
550f3cf8bb0SJiang Liu 			if (ops->msi_free) {
551f3cf8bb0SJiang Liu 				for (i--; i > 0; i--)
552f3cf8bb0SJiang Liu 					ops->msi_free(domain, info, virq + i);
553f3cf8bb0SJiang Liu 			}
554f3cf8bb0SJiang Liu 			irq_domain_free_irqs_top(domain, virq, nr_irqs);
555f3cf8bb0SJiang Liu 			return ret;
556f3cf8bb0SJiang Liu 		}
557f3cf8bb0SJiang Liu 	}
558f3cf8bb0SJiang Liu 
559f3cf8bb0SJiang Liu 	return 0;
560f3cf8bb0SJiang Liu }
561f3cf8bb0SJiang Liu 
562f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
563f3cf8bb0SJiang Liu 			    unsigned int nr_irqs)
564f3cf8bb0SJiang Liu {
565f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
566f3cf8bb0SJiang Liu 	int i;
567f3cf8bb0SJiang Liu 
568f3cf8bb0SJiang Liu 	if (info->ops->msi_free) {
569f3cf8bb0SJiang Liu 		for (i = 0; i < nr_irqs; i++)
570f3cf8bb0SJiang Liu 			info->ops->msi_free(domain, info, virq + i);
571f3cf8bb0SJiang Liu 	}
572f3cf8bb0SJiang Liu 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
573f3cf8bb0SJiang Liu }
574f3cf8bb0SJiang Liu 
57501364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = {
576f3cf8bb0SJiang Liu 	.alloc		= msi_domain_alloc,
577f3cf8bb0SJiang Liu 	.free		= msi_domain_free,
578f3cf8bb0SJiang Liu 	.activate	= msi_domain_activate,
579f3cf8bb0SJiang Liu 	.deactivate	= msi_domain_deactivate,
580f3cf8bb0SJiang Liu };
581f3cf8bb0SJiang Liu 
582aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
583aeeb5965SJiang Liu 						msi_alloc_info_t *arg)
584aeeb5965SJiang Liu {
585aeeb5965SJiang Liu 	return arg->hwirq;
586aeeb5965SJiang Liu }
587aeeb5965SJiang Liu 
588aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
589aeeb5965SJiang Liu 				  int nvec, msi_alloc_info_t *arg)
590aeeb5965SJiang Liu {
591aeeb5965SJiang Liu 	memset(arg, 0, sizeof(*arg));
592aeeb5965SJiang Liu 	return 0;
593aeeb5965SJiang Liu }
594aeeb5965SJiang Liu 
595aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
596aeeb5965SJiang Liu 				    struct msi_desc *desc)
597aeeb5965SJiang Liu {
598aeeb5965SJiang Liu 	arg->desc = desc;
599aeeb5965SJiang Liu }
600aeeb5965SJiang Liu 
601aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain,
602aeeb5965SJiang Liu 			       struct msi_domain_info *info,
603aeeb5965SJiang Liu 			       unsigned int virq, irq_hw_number_t hwirq,
604aeeb5965SJiang Liu 			       msi_alloc_info_t *arg)
605aeeb5965SJiang Liu {
606aeeb5965SJiang Liu 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
607aeeb5965SJiang Liu 				      info->chip_data);
608aeeb5965SJiang Liu 	if (info->handler && info->handler_name) {
609aeeb5965SJiang Liu 		__irq_set_handler(virq, info->handler, 0, info->handler_name);
610aeeb5965SJiang Liu 		if (info->handler_data)
611aeeb5965SJiang Liu 			irq_set_handler_data(virq, info->handler_data);
612aeeb5965SJiang Liu 	}
613aeeb5965SJiang Liu 	return 0;
614aeeb5965SJiang Liu }
615aeeb5965SJiang Liu 
616aeeb5965SJiang Liu static int msi_domain_ops_check(struct irq_domain *domain,
617aeeb5965SJiang Liu 				struct msi_domain_info *info,
618aeeb5965SJiang Liu 				struct device *dev)
619aeeb5965SJiang Liu {
620aeeb5965SJiang Liu 	return 0;
621aeeb5965SJiang Liu }
622aeeb5965SJiang Liu 
623aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = {
624aeeb5965SJiang Liu 	.get_hwirq		= msi_domain_ops_get_hwirq,
625aeeb5965SJiang Liu 	.msi_init		= msi_domain_ops_init,
626aeeb5965SJiang Liu 	.msi_check		= msi_domain_ops_check,
627aeeb5965SJiang Liu 	.msi_prepare		= msi_domain_ops_prepare,
628aeeb5965SJiang Liu 	.set_desc		= msi_domain_ops_set_desc,
62943e9e705SThomas Gleixner 	.domain_alloc_irqs	= __msi_domain_alloc_irqs,
63043e9e705SThomas Gleixner 	.domain_free_irqs	= __msi_domain_free_irqs,
631aeeb5965SJiang Liu };
632aeeb5965SJiang Liu 
633aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info)
634aeeb5965SJiang Liu {
635aeeb5965SJiang Liu 	struct msi_domain_ops *ops = info->ops;
636aeeb5965SJiang Liu 
637aeeb5965SJiang Liu 	if (ops == NULL) {
638aeeb5965SJiang Liu 		info->ops = &msi_domain_ops_default;
639aeeb5965SJiang Liu 		return;
640aeeb5965SJiang Liu 	}
641aeeb5965SJiang Liu 
64243e9e705SThomas Gleixner 	if (ops->domain_alloc_irqs == NULL)
64343e9e705SThomas Gleixner 		ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
64443e9e705SThomas Gleixner 	if (ops->domain_free_irqs == NULL)
64543e9e705SThomas Gleixner 		ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
64643e9e705SThomas Gleixner 
64743e9e705SThomas Gleixner 	if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
64843e9e705SThomas Gleixner 		return;
64943e9e705SThomas Gleixner 
650aeeb5965SJiang Liu 	if (ops->get_hwirq == NULL)
651aeeb5965SJiang Liu 		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
652aeeb5965SJiang Liu 	if (ops->msi_init == NULL)
653aeeb5965SJiang Liu 		ops->msi_init = msi_domain_ops_default.msi_init;
654aeeb5965SJiang Liu 	if (ops->msi_check == NULL)
655aeeb5965SJiang Liu 		ops->msi_check = msi_domain_ops_default.msi_check;
656aeeb5965SJiang Liu 	if (ops->msi_prepare == NULL)
657aeeb5965SJiang Liu 		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
658aeeb5965SJiang Liu 	if (ops->set_desc == NULL)
659aeeb5965SJiang Liu 		ops->set_desc = msi_domain_ops_default.set_desc;
660aeeb5965SJiang Liu }
661aeeb5965SJiang Liu 
662aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info)
663aeeb5965SJiang Liu {
664aeeb5965SJiang Liu 	struct irq_chip *chip = info->chip;
665aeeb5965SJiang Liu 
6660701c53eSMarc Zyngier 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
667aeeb5965SJiang Liu 	if (!chip->irq_set_affinity)
668aeeb5965SJiang Liu 		chip->irq_set_affinity = msi_domain_set_affinity;
669aeeb5965SJiang Liu }
670aeeb5965SJiang Liu 
671f3cf8bb0SJiang Liu /**
6723b35e7e6SRandy Dunlap  * msi_create_irq_domain - Create an MSI interrupt domain
673be5436c8SMarc Zyngier  * @fwnode:	Optional fwnode of the interrupt controller
674f3cf8bb0SJiang Liu  * @info:	MSI domain info
675f3cf8bb0SJiang Liu  * @parent:	Parent irq domain
6763b35e7e6SRandy Dunlap  *
6773b35e7e6SRandy Dunlap  * Return: pointer to the created &struct irq_domain or %NULL on failure
678f3cf8bb0SJiang Liu  */
679be5436c8SMarc Zyngier struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
680f3cf8bb0SJiang Liu 					 struct msi_domain_info *info,
681f3cf8bb0SJiang Liu 					 struct irq_domain *parent)
682f3cf8bb0SJiang Liu {
683a97b852bSMarc Zyngier 	struct irq_domain *domain;
684a97b852bSMarc Zyngier 
685aeeb5965SJiang Liu 	msi_domain_update_dom_ops(info);
686aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
687aeeb5965SJiang Liu 		msi_domain_update_chip_ops(info);
688f3cf8bb0SJiang Liu 
689a97b852bSMarc Zyngier 	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
69088156f00SEric Auger 					     fwnode, &msi_domain_ops, info);
6910165308aSThomas Gleixner 
6920165308aSThomas Gleixner 	if (domain && !domain->name && info->chip)
693a97b852bSMarc Zyngier 		domain->name = info->chip->name;
694a97b852bSMarc Zyngier 
695a97b852bSMarc Zyngier 	return domain;
696f3cf8bb0SJiang Liu }
697f3cf8bb0SJiang Liu 
698b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
699b2eba39bSMarc Zyngier 			    int nvec, msi_alloc_info_t *arg)
700b2eba39bSMarc Zyngier {
701b2eba39bSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
702b2eba39bSMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
703b2eba39bSMarc Zyngier 	int ret;
704b2eba39bSMarc Zyngier 
705b2eba39bSMarc Zyngier 	ret = ops->msi_check(domain, info, dev);
706b2eba39bSMarc Zyngier 	if (ret == 0)
707b2eba39bSMarc Zyngier 		ret = ops->msi_prepare(domain, dev, nvec, arg);
708b2eba39bSMarc Zyngier 
709b2eba39bSMarc Zyngier 	return ret;
710b2eba39bSMarc Zyngier }
711b2eba39bSMarc Zyngier 
7122145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
713a80713feSThomas Gleixner 			     int virq_base, int nvec, msi_alloc_info_t *arg)
7142145ac93SMarc Zyngier {
7152145ac93SMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
7162145ac93SMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
7172145ac93SMarc Zyngier 	struct msi_desc *desc;
718a80713feSThomas Gleixner 	int ret, virq;
7192145ac93SMarc Zyngier 
720a80713feSThomas Gleixner 	msi_lock_descs(dev);
721cd6cf065SThomas Gleixner 	ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
722cd6cf065SThomas Gleixner 	if (ret)
723cd6cf065SThomas Gleixner 		goto unlock;
7242145ac93SMarc Zyngier 
725cd6cf065SThomas Gleixner 	for (virq = virq_base; virq < virq_base + nvec; virq++) {
726cd6cf065SThomas Gleixner 		desc = xa_load(&dev->msi.data->__store, virq);
727a80713feSThomas Gleixner 		desc->irq = virq;
7282145ac93SMarc Zyngier 
7292145ac93SMarc Zyngier 		ops->set_desc(arg, desc);
730a80713feSThomas Gleixner 		ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
7312145ac93SMarc Zyngier 		if (ret)
732a80713feSThomas Gleixner 			goto fail;
7332145ac93SMarc Zyngier 
734a80713feSThomas Gleixner 		irq_set_msi_desc(virq, desc);
7352145ac93SMarc Zyngier 	}
736a80713feSThomas Gleixner 	msi_unlock_descs(dev);
737a80713feSThomas Gleixner 	return 0;
7382145ac93SMarc Zyngier 
739a80713feSThomas Gleixner fail:
740a80713feSThomas Gleixner 	for (--virq; virq >= virq_base; virq--)
741a80713feSThomas Gleixner 		irq_domain_free_irqs_common(domain, virq, 1);
742a80713feSThomas Gleixner 	msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
743cd6cf065SThomas Gleixner unlock:
744a80713feSThomas Gleixner 	msi_unlock_descs(dev);
7452145ac93SMarc Zyngier 	return ret;
7462145ac93SMarc Zyngier }
7472145ac93SMarc Zyngier 
748bc976233SThomas Gleixner /*
749bc976233SThomas Gleixner  * Carefully check whether the device can use reservation mode. If
750bc976233SThomas Gleixner  * reservation mode is enabled then the early activation will assign a
751bc976233SThomas Gleixner  * dummy vector to the device. If the PCI/MSI device does not support
752bc976233SThomas Gleixner  * masking of the entry then this can result in spurious interrupts when
753bc976233SThomas Gleixner  * the device driver is not absolutely careful. But even then a malfunction
754bc976233SThomas Gleixner  * of the hardware could result in a spurious interrupt on the dummy vector
755bc976233SThomas Gleixner  * and render the device unusable. If the entry can be masked then the core
756bc976233SThomas Gleixner  * logic will prevent the spurious interrupt and reservation mode can be
757bc976233SThomas Gleixner  * used. For now reservation mode is restricted to PCI/MSI.
758bc976233SThomas Gleixner  */
759bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain,
760bc976233SThomas Gleixner 				       struct msi_domain_info *info,
761bc976233SThomas Gleixner 				       struct device *dev)
762da5dd9e8SThomas Gleixner {
763bc976233SThomas Gleixner 	struct msi_desc *desc;
764bc976233SThomas Gleixner 
765c6c9e283SThomas Gleixner 	switch(domain->bus_token) {
766c6c9e283SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
767c6c9e283SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
768c6c9e283SThomas Gleixner 		break;
769c6c9e283SThomas Gleixner 	default:
770bc976233SThomas Gleixner 		return false;
771c6c9e283SThomas Gleixner 	}
772bc976233SThomas Gleixner 
773da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
774da5dd9e8SThomas Gleixner 		return false;
775bc976233SThomas Gleixner 
776bc976233SThomas Gleixner 	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
777bc976233SThomas Gleixner 		return false;
778bc976233SThomas Gleixner 
779bc976233SThomas Gleixner 	/*
780bc976233SThomas Gleixner 	 * Checking the first MSI descriptor is sufficient. MSIX supports
7819c8e9c96SThomas Gleixner 	 * masking and MSI does so when the can_mask attribute is set.
782bc976233SThomas Gleixner 	 */
783495c66acSThomas Gleixner 	desc = msi_first_desc(dev, MSI_DESC_ALL);
784e58f2259SThomas Gleixner 	return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
785da5dd9e8SThomas Gleixner }
786da5dd9e8SThomas Gleixner 
78789033762SThomas Gleixner static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
78889033762SThomas Gleixner 			       int allocated)
78989033762SThomas Gleixner {
79089033762SThomas Gleixner 	switch(domain->bus_token) {
79189033762SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
79289033762SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
79389033762SThomas Gleixner 		if (IS_ENABLED(CONFIG_PCI_MSI))
79489033762SThomas Gleixner 			break;
79589033762SThomas Gleixner 		fallthrough;
79689033762SThomas Gleixner 	default:
79789033762SThomas Gleixner 		return -ENOSPC;
79889033762SThomas Gleixner 	}
79989033762SThomas Gleixner 
80089033762SThomas Gleixner 	/* Let a failed PCI multi MSI allocation retry */
80189033762SThomas Gleixner 	if (desc->nvec_used > 1)
80289033762SThomas Gleixner 		return 1;
80389033762SThomas Gleixner 
80489033762SThomas Gleixner 	/* If there was a successful allocation let the caller know */
80589033762SThomas Gleixner 	return allocated ? allocated : -ENOSPC;
80689033762SThomas Gleixner }
80789033762SThomas Gleixner 
808ef8dd015SThomas Gleixner #define VIRQ_CAN_RESERVE	0x01
809ef8dd015SThomas Gleixner #define VIRQ_ACTIVATE		0x02
810ef8dd015SThomas Gleixner #define VIRQ_NOMASK_QUIRK	0x04
811ef8dd015SThomas Gleixner 
812ef8dd015SThomas Gleixner static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
813ef8dd015SThomas Gleixner {
814ef8dd015SThomas Gleixner 	struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
815ef8dd015SThomas Gleixner 	int ret;
816ef8dd015SThomas Gleixner 
817ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_CAN_RESERVE)) {
818ef8dd015SThomas Gleixner 		irqd_clr_can_reserve(irqd);
819ef8dd015SThomas Gleixner 		if (vflags & VIRQ_NOMASK_QUIRK)
820ef8dd015SThomas Gleixner 			irqd_set_msi_nomask_quirk(irqd);
821*d802057cSMarc Zyngier 
822*d802057cSMarc Zyngier 		/*
823*d802057cSMarc Zyngier 		 * If the interrupt is managed but no CPU is available to
824*d802057cSMarc Zyngier 		 * service it, shut it down until better times. Note that
825*d802057cSMarc Zyngier 		 * we only do this on the !RESERVE path as x86 (the only
826*d802057cSMarc Zyngier 		 * architecture using this flag) deals with this in a
827*d802057cSMarc Zyngier 		 * different way by using a catch-all vector.
828*d802057cSMarc Zyngier 		 */
829*d802057cSMarc Zyngier 		if ((vflags & VIRQ_ACTIVATE) &&
830*d802057cSMarc Zyngier 		    irqd_affinity_is_managed(irqd) &&
831*d802057cSMarc Zyngier 		    !cpumask_intersects(irq_data_get_affinity_mask(irqd),
832*d802057cSMarc Zyngier 					cpu_online_mask)) {
833*d802057cSMarc Zyngier 			    irqd_set_managed_shutdown(irqd);
834*d802057cSMarc Zyngier 			    return 0;
835*d802057cSMarc Zyngier 		    }
836ef8dd015SThomas Gleixner 	}
837ef8dd015SThomas Gleixner 
838ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_ACTIVATE))
839ef8dd015SThomas Gleixner 		return 0;
840ef8dd015SThomas Gleixner 
841ef8dd015SThomas Gleixner 	ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
842ef8dd015SThomas Gleixner 	if (ret)
843ef8dd015SThomas Gleixner 		return ret;
844ef8dd015SThomas Gleixner 	/*
845ef8dd015SThomas Gleixner 	 * If the interrupt uses reservation mode, clear the activated bit
846ef8dd015SThomas Gleixner 	 * so request_irq() will assign the final vector.
847ef8dd015SThomas Gleixner 	 */
848ef8dd015SThomas Gleixner 	if (vflags & VIRQ_CAN_RESERVE)
849ef8dd015SThomas Gleixner 		irqd_clr_activated(irqd);
850ef8dd015SThomas Gleixner 	return 0;
851ef8dd015SThomas Gleixner }
852ef8dd015SThomas Gleixner 
85343e9e705SThomas Gleixner int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
854d9109698SJiang Liu 			    int nvec)
855d9109698SJiang Liu {
856d9109698SJiang Liu 	struct msi_domain_info *info = domain->host_data;
857d9109698SJiang Liu 	struct msi_domain_ops *ops = info->ops;
85806fde695SZenghui Yu 	msi_alloc_info_t arg = { };
859ef8dd015SThomas Gleixner 	unsigned int vflags = 0;
860ef8dd015SThomas Gleixner 	struct msi_desc *desc;
86189033762SThomas Gleixner 	int allocated = 0;
862b6140914SThomas Gleixner 	int i, ret, virq;
863d9109698SJiang Liu 
864b2eba39bSMarc Zyngier 	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
865d9109698SJiang Liu 	if (ret)
866d9109698SJiang Liu 		return ret;
867d9109698SJiang Liu 
868ef8dd015SThomas Gleixner 	/*
869ef8dd015SThomas Gleixner 	 * This flag is set by the PCI layer as we need to activate
870ef8dd015SThomas Gleixner 	 * the MSI entries before the PCI layer enables MSI in the
871ef8dd015SThomas Gleixner 	 * card. Otherwise the card latches a random msi message.
872ef8dd015SThomas Gleixner 	 */
873ef8dd015SThomas Gleixner 	if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
874ef8dd015SThomas Gleixner 		vflags |= VIRQ_ACTIVATE;
875ef8dd015SThomas Gleixner 
876ef8dd015SThomas Gleixner 	/*
877ef8dd015SThomas Gleixner 	 * Interrupt can use a reserved vector and will not occupy
878ef8dd015SThomas Gleixner 	 * a real device vector until the interrupt is requested.
879ef8dd015SThomas Gleixner 	 */
880ef8dd015SThomas Gleixner 	if (msi_check_reservation_mode(domain, info, dev)) {
881ef8dd015SThomas Gleixner 		vflags |= VIRQ_CAN_RESERVE;
882ef8dd015SThomas Gleixner 		/*
883ef8dd015SThomas Gleixner 		 * MSI affinity setting requires a special quirk (X86) when
884ef8dd015SThomas Gleixner 		 * reservation mode is active.
885ef8dd015SThomas Gleixner 		 */
886ef8dd015SThomas Gleixner 		if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
887ef8dd015SThomas Gleixner 			vflags |= VIRQ_NOMASK_QUIRK;
888ef8dd015SThomas Gleixner 	}
889ef8dd015SThomas Gleixner 
890ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
891d9109698SJiang Liu 		ops->set_desc(&arg, desc);
892d9109698SJiang Liu 
893b6140914SThomas Gleixner 		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
89406ee6d57SThomas Gleixner 					       dev_to_node(dev), &arg, false,
8950972fa57SThomas Gleixner 					       desc->affinity);
8960f62d941SThomas Gleixner 		if (virq < 0)
8970f62d941SThomas Gleixner 			return msi_handle_pci_fail(domain, desc, allocated);
898d9109698SJiang Liu 
89907557ccbSThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
900d9109698SJiang Liu 			irq_set_msi_desc_off(virq, i, desc);
90107557ccbSThomas Gleixner 			irq_debugfs_copy_devname(virq + i, dev);
902ef8dd015SThomas Gleixner 			ret = msi_init_virq(domain, virq + i, vflags);
903bb9b428aSThomas Gleixner 			if (ret)
9040f62d941SThomas Gleixner 				return ret;
90574a5257aSThomas Gleixner 		}
906bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS) {
907bf5e758fSThomas Gleixner 			ret = msi_sysfs_populate_desc(dev, desc);
908bf5e758fSThomas Gleixner 			if (ret)
909bf5e758fSThomas Gleixner 				return ret;
910bf5e758fSThomas Gleixner 		}
911ef8dd015SThomas Gleixner 		allocated++;
912d9109698SJiang Liu 	}
913d9109698SJiang Liu 	return 0;
9140f62d941SThomas Gleixner }
9150f62d941SThomas Gleixner 
916645474e2SThomas Gleixner static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
917645474e2SThomas Gleixner 					   struct device *dev,
918645474e2SThomas Gleixner 					   unsigned int num_descs)
919645474e2SThomas Gleixner {
920645474e2SThomas Gleixner 	if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
921645474e2SThomas Gleixner 		return 0;
922645474e2SThomas Gleixner 
923645474e2SThomas Gleixner 	return msi_add_simple_msi_descs(dev, 0, num_descs);
924645474e2SThomas Gleixner }
925645474e2SThomas Gleixner 
9260f62d941SThomas Gleixner /**
9270f62d941SThomas Gleixner  * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
9280f62d941SThomas Gleixner  * @domain:	The domain to allocate from
9290f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
9300f62d941SThomas Gleixner  *		are allocated
9310f62d941SThomas Gleixner  * @nvec:	The number of interrupts to allocate
9320f62d941SThomas Gleixner  *
9330f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
9340f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
9350f62d941SThomas Gleixner  * allocation/free.
9360f62d941SThomas Gleixner  *
9370f62d941SThomas Gleixner  * Return: %0 on success or an error code.
9380f62d941SThomas Gleixner  */
9390f62d941SThomas Gleixner int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
9400f62d941SThomas Gleixner 				       int nvec)
9410f62d941SThomas Gleixner {
9420f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
9430f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
9440f62d941SThomas Gleixner 	int ret;
9450f62d941SThomas Gleixner 
9460f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
9470f62d941SThomas Gleixner 
948645474e2SThomas Gleixner 	ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
949645474e2SThomas Gleixner 	if (ret)
950645474e2SThomas Gleixner 		return ret;
951645474e2SThomas Gleixner 
9520f62d941SThomas Gleixner 	ret = ops->domain_alloc_irqs(domain, dev, nvec);
9530f62d941SThomas Gleixner 	if (ret)
9540f62d941SThomas Gleixner 		msi_domain_free_irqs_descs_locked(domain, dev);
955bb9b428aSThomas Gleixner 	return ret;
956d9109698SJiang Liu }
957d9109698SJiang Liu 
958d9109698SJiang Liu /**
95943e9e705SThomas Gleixner  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
96043e9e705SThomas Gleixner  * @domain:	The domain to allocate from
961d9109698SJiang Liu  * @dev:	Pointer to device struct of the device for which the interrupts
96243e9e705SThomas Gleixner  *		are allocated
96343e9e705SThomas Gleixner  * @nvec:	The number of interrupts to allocate
96443e9e705SThomas Gleixner  *
9653b35e7e6SRandy Dunlap  * Return: %0 on success or an error code.
966d9109698SJiang Liu  */
9670f62d941SThomas Gleixner int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
96843e9e705SThomas Gleixner {
969bf6e054eSThomas Gleixner 	int ret;
97043e9e705SThomas Gleixner 
9710f62d941SThomas Gleixner 	msi_lock_descs(dev);
9720f62d941SThomas Gleixner 	ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
9730f62d941SThomas Gleixner 	msi_unlock_descs(dev);
974bf6e054eSThomas Gleixner 	return ret;
97543e9e705SThomas Gleixner }
97643e9e705SThomas Gleixner 
97743e9e705SThomas Gleixner void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
978d9109698SJiang Liu {
979bf5e758fSThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
980ef8dd015SThomas Gleixner 	struct irq_data *irqd;
981d9109698SJiang Liu 	struct msi_desc *desc;
982dbbc9357SBixuan Cui 	int i;
983dbbc9357SBixuan Cui 
984ef8dd015SThomas Gleixner 	/* Only handle MSI entries which have an interrupt associated */
985ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
986ef8dd015SThomas Gleixner 		/* Make sure all interrupts are deactivated */
987ef8dd015SThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
988ef8dd015SThomas Gleixner 			irqd = irq_domain_get_irq_data(domain, desc->irq + i);
989ef8dd015SThomas Gleixner 			if (irqd && irqd_is_activated(irqd))
990ef8dd015SThomas Gleixner 				irq_domain_deactivate_irq(irqd);
991dbbc9357SBixuan Cui 		}
992d9109698SJiang Liu 
993d9109698SJiang Liu 		irq_domain_free_irqs(desc->irq, desc->nvec_used);
994bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS)
995bf5e758fSThomas Gleixner 			msi_sysfs_remove_desc(dev, desc);
996d9109698SJiang Liu 		desc->irq = 0;
997d9109698SJiang Liu 	}
998d9109698SJiang Liu }
999d9109698SJiang Liu 
1000645474e2SThomas Gleixner static void msi_domain_free_msi_descs(struct msi_domain_info *info,
1001645474e2SThomas Gleixner 				      struct device *dev)
1002645474e2SThomas Gleixner {
1003645474e2SThomas Gleixner 	if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
1004645474e2SThomas Gleixner 		msi_free_msi_descs(dev);
1005645474e2SThomas Gleixner }
1006645474e2SThomas Gleixner 
1007d9109698SJiang Liu /**
10080f62d941SThomas Gleixner  * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
10090f62d941SThomas Gleixner  * @domain:	The domain to managing the interrupts
10100f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
10110f62d941SThomas Gleixner  *		are free
10120f62d941SThomas Gleixner  *
10130f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
10140f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
10150f62d941SThomas Gleixner  * allocation.
10160f62d941SThomas Gleixner  */
10170f62d941SThomas Gleixner void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
10180f62d941SThomas Gleixner {
10190f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
10200f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
10210f62d941SThomas Gleixner 
10220f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
10230f62d941SThomas Gleixner 
10240f62d941SThomas Gleixner 	ops->domain_free_irqs(domain, dev);
1025645474e2SThomas Gleixner 	msi_domain_free_msi_descs(info, dev);
10260f62d941SThomas Gleixner }
10270f62d941SThomas Gleixner 
10280f62d941SThomas Gleixner /**
10293b35e7e6SRandy Dunlap  * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
103043e9e705SThomas Gleixner  * @domain:	The domain to managing the interrupts
103143e9e705SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
103243e9e705SThomas Gleixner  *		are free
103343e9e705SThomas Gleixner  */
103443e9e705SThomas Gleixner void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
103543e9e705SThomas Gleixner {
10360f62d941SThomas Gleixner 	msi_lock_descs(dev);
10370f62d941SThomas Gleixner 	msi_domain_free_irqs_descs_locked(domain, dev);
10380f62d941SThomas Gleixner 	msi_unlock_descs(dev);
103943e9e705SThomas Gleixner }
104043e9e705SThomas Gleixner 
104143e9e705SThomas Gleixner /**
1042f3cf8bb0SJiang Liu  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1043f3cf8bb0SJiang Liu  * @domain:	The interrupt domain to retrieve data from
1044f3cf8bb0SJiang Liu  *
10453b35e7e6SRandy Dunlap  * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1046f3cf8bb0SJiang Liu  */
1047f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1048f3cf8bb0SJiang Liu {
1049f3cf8bb0SJiang Liu 	return (struct msi_domain_info *)domain->host_data;
1050f3cf8bb0SJiang Liu }
1051f3cf8bb0SJiang Liu 
1052f3cf8bb0SJiang Liu #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
1053