xref: /openbmc/linux/kernel/irq/msi.c (revision 2569f62c)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2f3cf8bb0SJiang Liu /*
3f3cf8bb0SJiang Liu  * Copyright (C) 2014 Intel Corp.
4f3cf8bb0SJiang Liu  * Author: Jiang Liu <jiang.liu@linux.intel.com>
5f3cf8bb0SJiang Liu  *
6f3cf8bb0SJiang Liu  * This file is licensed under GPLv2.
7f3cf8bb0SJiang Liu  *
8a359f757SIngo Molnar  * This file contains common code to support Message Signaled Interrupts for
9f3cf8bb0SJiang Liu  * PCI compatible and non PCI compatible devices.
10f3cf8bb0SJiang Liu  */
11aeeb5965SJiang Liu #include <linux/types.h>
12aeeb5965SJiang Liu #include <linux/device.h>
13f3cf8bb0SJiang Liu #include <linux/irq.h>
14f3cf8bb0SJiang Liu #include <linux/irqdomain.h>
15f3cf8bb0SJiang Liu #include <linux/msi.h>
164e201566SMarc Zyngier #include <linux/slab.h>
173ba1f050SThomas Gleixner #include <linux/sysfs.h>
182f170814SBarry Song #include <linux/pci.h>
19d9109698SJiang Liu 
2007557ccbSThomas Gleixner #include "internals.h"
2107557ccbSThomas Gleixner 
22bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev);
23cc9a246dSThomas Gleixner 
2428f4b041SThomas Gleixner /**
25cc9a246dSThomas Gleixner  * msi_alloc_desc - Allocate an initialized msi_desc
2628f4b041SThomas Gleixner  * @dev:	Pointer to the device for which this is allocated
2728f4b041SThomas Gleixner  * @nvec:	The number of vectors used in this entry
2828f4b041SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array size of @nvec
2928f4b041SThomas Gleixner  *
303b35e7e6SRandy Dunlap  * If @affinity is not %NULL then an affinity array[@nvec] is allocated
31bec04037SDou Liyang  * and the affinity masks and flags from @affinity are copied.
323b35e7e6SRandy Dunlap  *
333b35e7e6SRandy Dunlap  * Return: pointer to allocated &msi_desc on success or %NULL on failure
3428f4b041SThomas Gleixner  */
35cc9a246dSThomas Gleixner static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
36bec04037SDou Liyang 					const struct irq_affinity_desc *affinity)
37aa48b6f7SJiang Liu {
38cc9a246dSThomas Gleixner 	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
3928f4b041SThomas Gleixner 
40aa48b6f7SJiang Liu 	if (!desc)
41aa48b6f7SJiang Liu 		return NULL;
42aa48b6f7SJiang Liu 
43aa48b6f7SJiang Liu 	desc->dev = dev;
4428f4b041SThomas Gleixner 	desc->nvec_used = nvec;
4528f4b041SThomas Gleixner 	if (affinity) {
46cc9a246dSThomas Gleixner 		desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
4728f4b041SThomas Gleixner 		if (!desc->affinity) {
4828f4b041SThomas Gleixner 			kfree(desc);
4928f4b041SThomas Gleixner 			return NULL;
5028f4b041SThomas Gleixner 		}
5128f4b041SThomas Gleixner 	}
52aa48b6f7SJiang Liu 	return desc;
53aa48b6f7SJiang Liu }
54aa48b6f7SJiang Liu 
55cc9a246dSThomas Gleixner static void msi_free_desc(struct msi_desc *desc)
56aa48b6f7SJiang Liu {
57cc9a246dSThomas Gleixner 	kfree(desc->affinity);
58cc9a246dSThomas Gleixner 	kfree(desc);
59aa48b6f7SJiang Liu }
60aa48b6f7SJiang Liu 
61cd6cf065SThomas Gleixner static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
62cd6cf065SThomas Gleixner {
63cd6cf065SThomas Gleixner 	int ret;
64cd6cf065SThomas Gleixner 
65cd6cf065SThomas Gleixner 	desc->msi_index = index;
66cd6cf065SThomas Gleixner 	ret = xa_insert(&md->__store, index, desc, GFP_KERNEL);
67cd6cf065SThomas Gleixner 	if (ret)
68cd6cf065SThomas Gleixner 		msi_free_desc(desc);
69cd6cf065SThomas Gleixner 	return ret;
70cd6cf065SThomas Gleixner }
71cd6cf065SThomas Gleixner 
7260290525SThomas Gleixner /**
7360290525SThomas Gleixner  * msi_add_msi_desc - Allocate and initialize a MSI descriptor
7460290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptor is allocated
7560290525SThomas Gleixner  * @init_desc:	Pointer to an MSI descriptor to initialize the new descriptor
7660290525SThomas Gleixner  *
7760290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
7860290525SThomas Gleixner  */
7960290525SThomas Gleixner int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
8060290525SThomas Gleixner {
8160290525SThomas Gleixner 	struct msi_desc *desc;
8260290525SThomas Gleixner 
8360290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
8460290525SThomas Gleixner 
85cc9a246dSThomas Gleixner 	desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
8660290525SThomas Gleixner 	if (!desc)
8760290525SThomas Gleixner 		return -ENOMEM;
8860290525SThomas Gleixner 
89cd6cf065SThomas Gleixner 	/* Copy type specific data to the new descriptor. */
9060290525SThomas Gleixner 	desc->pci = init_desc->pci;
91cd6cf065SThomas Gleixner 	return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
9260290525SThomas Gleixner }
9360290525SThomas Gleixner 
9460290525SThomas Gleixner /**
9560290525SThomas Gleixner  * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
9660290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptors are allocated
9760290525SThomas Gleixner  * @index:	Index for the first MSI descriptor
9860290525SThomas Gleixner  * @ndesc:	Number of descriptors to allocate
9960290525SThomas Gleixner  *
10060290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
10160290525SThomas Gleixner  */
10260290525SThomas Gleixner static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
10360290525SThomas Gleixner {
104cd6cf065SThomas Gleixner 	unsigned int idx, last = index + ndesc - 1;
105cd6cf065SThomas Gleixner 	struct msi_desc *desc;
106cd6cf065SThomas Gleixner 	int ret;
10760290525SThomas Gleixner 
10860290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
10960290525SThomas Gleixner 
110cd6cf065SThomas Gleixner 	for (idx = index; idx <= last; idx++) {
111cc9a246dSThomas Gleixner 		desc = msi_alloc_desc(dev, 1, NULL);
11260290525SThomas Gleixner 		if (!desc)
113cd6cf065SThomas Gleixner 			goto fail_mem;
114cd6cf065SThomas Gleixner 		ret = msi_insert_desc(dev->msi.data, desc, idx);
115cd6cf065SThomas Gleixner 		if (ret)
11660290525SThomas Gleixner 			goto fail;
11760290525SThomas Gleixner 	}
11860290525SThomas Gleixner 	return 0;
11960290525SThomas Gleixner 
120cd6cf065SThomas Gleixner fail_mem:
121cd6cf065SThomas Gleixner 	ret = -ENOMEM;
12260290525SThomas Gleixner fail:
1232f2940d1SThomas Gleixner 	msi_free_msi_descs_range(dev, index, last);
124cd6cf065SThomas Gleixner 	return ret;
12560290525SThomas Gleixner }
126cd6cf065SThomas Gleixner 
127cd6cf065SThomas Gleixner static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
128cd6cf065SThomas Gleixner {
129cd6cf065SThomas Gleixner 	switch (filter) {
130cd6cf065SThomas Gleixner 	case MSI_DESC_ALL:
131cd6cf065SThomas Gleixner 		return true;
132cd6cf065SThomas Gleixner 	case MSI_DESC_NOTASSOCIATED:
133cd6cf065SThomas Gleixner 		return !desc->irq;
134cd6cf065SThomas Gleixner 	case MSI_DESC_ASSOCIATED:
135cd6cf065SThomas Gleixner 		return !!desc->irq;
136cd6cf065SThomas Gleixner 	}
137cd6cf065SThomas Gleixner 	WARN_ON_ONCE(1);
138cd6cf065SThomas Gleixner 	return false;
13960290525SThomas Gleixner }
14060290525SThomas Gleixner 
141645474e2SThomas Gleixner /**
142645474e2SThomas Gleixner  * msi_free_msi_descs_range - Free MSI descriptors of a device
143645474e2SThomas Gleixner  * @dev:		Device to free the descriptors
144645474e2SThomas Gleixner  * @first_index:	Index to start freeing from
145645474e2SThomas Gleixner  * @last_index:		Last index to be freed
146645474e2SThomas Gleixner  */
1472f2940d1SThomas Gleixner void msi_free_msi_descs_range(struct device *dev, unsigned int first_index,
1482f2940d1SThomas Gleixner 			      unsigned int last_index)
149645474e2SThomas Gleixner {
150cd6cf065SThomas Gleixner 	struct xarray *xa = &dev->msi.data->__store;
151645474e2SThomas Gleixner 	struct msi_desc *desc;
152cd6cf065SThomas Gleixner 	unsigned long idx;
153645474e2SThomas Gleixner 
154645474e2SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
155645474e2SThomas Gleixner 
156cd6cf065SThomas Gleixner 	xa_for_each_range(xa, idx, desc, first_index, last_index) {
157cd6cf065SThomas Gleixner 		xa_erase(xa, idx);
1582f2940d1SThomas Gleixner 
1592f2940d1SThomas Gleixner 		/* Leak the descriptor when it is still referenced */
1602f2940d1SThomas Gleixner 		if (WARN_ON_ONCE(msi_desc_match(desc, MSI_DESC_ASSOCIATED)))
1612f2940d1SThomas Gleixner 			continue;
162cc9a246dSThomas Gleixner 		msi_free_desc(desc);
163645474e2SThomas Gleixner 	}
164645474e2SThomas Gleixner }
165645474e2SThomas Gleixner 
16638b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
16738b6a1cfSJiang Liu {
16838b6a1cfSJiang Liu 	*msg = entry->msg;
16938b6a1cfSJiang Liu }
17038b6a1cfSJiang Liu 
17138b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
17238b6a1cfSJiang Liu {
17338b6a1cfSJiang Liu 	struct msi_desc *entry = irq_get_msi_desc(irq);
17438b6a1cfSJiang Liu 
17538b6a1cfSJiang Liu 	__get_cached_msi_msg(entry, msg);
17638b6a1cfSJiang Liu }
17738b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg);
17838b6a1cfSJiang Liu 
179013bd8e5SThomas Gleixner static void msi_device_data_release(struct device *dev, void *res)
180013bd8e5SThomas Gleixner {
181125282cdSThomas Gleixner 	struct msi_device_data *md = res;
182125282cdSThomas Gleixner 
183cd6cf065SThomas Gleixner 	WARN_ON_ONCE(!xa_empty(&md->__store));
184cd6cf065SThomas Gleixner 	xa_destroy(&md->__store);
185013bd8e5SThomas Gleixner 	dev->msi.data = NULL;
186013bd8e5SThomas Gleixner }
187013bd8e5SThomas Gleixner 
188013bd8e5SThomas Gleixner /**
189013bd8e5SThomas Gleixner  * msi_setup_device_data - Setup MSI device data
190013bd8e5SThomas Gleixner  * @dev:	Device for which MSI device data should be set up
191013bd8e5SThomas Gleixner  *
192013bd8e5SThomas Gleixner  * Return: 0 on success, appropriate error code otherwise
193013bd8e5SThomas Gleixner  *
194013bd8e5SThomas Gleixner  * This can be called more than once for @dev. If the MSI device data is
195013bd8e5SThomas Gleixner  * already allocated the call succeeds. The allocated memory is
196013bd8e5SThomas Gleixner  * automatically released when the device is destroyed.
197013bd8e5SThomas Gleixner  */
198013bd8e5SThomas Gleixner int msi_setup_device_data(struct device *dev)
199013bd8e5SThomas Gleixner {
200013bd8e5SThomas Gleixner 	struct msi_device_data *md;
201bf5e758fSThomas Gleixner 	int ret;
202013bd8e5SThomas Gleixner 
203013bd8e5SThomas Gleixner 	if (dev->msi.data)
204013bd8e5SThomas Gleixner 		return 0;
205013bd8e5SThomas Gleixner 
206013bd8e5SThomas Gleixner 	md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
207013bd8e5SThomas Gleixner 	if (!md)
208013bd8e5SThomas Gleixner 		return -ENOMEM;
209013bd8e5SThomas Gleixner 
210bf5e758fSThomas Gleixner 	ret = msi_sysfs_create_group(dev);
211bf5e758fSThomas Gleixner 	if (ret) {
212bf5e758fSThomas Gleixner 		devres_free(md);
213bf5e758fSThomas Gleixner 		return ret;
214bf5e758fSThomas Gleixner 	}
215bf5e758fSThomas Gleixner 
216cd6cf065SThomas Gleixner 	xa_init(&md->__store);
217b5f687f9SThomas Gleixner 	mutex_init(&md->mutex);
218013bd8e5SThomas Gleixner 	dev->msi.data = md;
219013bd8e5SThomas Gleixner 	devres_add(dev, md);
220013bd8e5SThomas Gleixner 	return 0;
221013bd8e5SThomas Gleixner }
222013bd8e5SThomas Gleixner 
223cf15f43aSThomas Gleixner /**
224b5f687f9SThomas Gleixner  * msi_lock_descs - Lock the MSI descriptor storage of a device
225b5f687f9SThomas Gleixner  * @dev:	Device to operate on
226b5f687f9SThomas Gleixner  */
227b5f687f9SThomas Gleixner void msi_lock_descs(struct device *dev)
228b5f687f9SThomas Gleixner {
229b5f687f9SThomas Gleixner 	mutex_lock(&dev->msi.data->mutex);
230b5f687f9SThomas Gleixner }
231b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_lock_descs);
232b5f687f9SThomas Gleixner 
233b5f687f9SThomas Gleixner /**
234b5f687f9SThomas Gleixner  * msi_unlock_descs - Unlock the MSI descriptor storage of a device
235b5f687f9SThomas Gleixner  * @dev:	Device to operate on
236b5f687f9SThomas Gleixner  */
237b5f687f9SThomas Gleixner void msi_unlock_descs(struct device *dev)
238b5f687f9SThomas Gleixner {
239cd6cf065SThomas Gleixner 	/* Invalidate the index wich was cached by the iterator */
240cd6cf065SThomas Gleixner 	dev->msi.data->__iter_idx = MSI_MAX_INDEX;
241b5f687f9SThomas Gleixner 	mutex_unlock(&dev->msi.data->mutex);
242b5f687f9SThomas Gleixner }
243b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_unlock_descs);
244b5f687f9SThomas Gleixner 
245cd6cf065SThomas Gleixner static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
2461046f71dSThomas Gleixner {
2471046f71dSThomas Gleixner 	struct msi_desc *desc;
2481046f71dSThomas Gleixner 
249cd6cf065SThomas Gleixner 	xa_for_each_start(&md->__store, md->__iter_idx, desc, md->__iter_idx) {
2501046f71dSThomas Gleixner 		if (msi_desc_match(desc, filter))
2511046f71dSThomas Gleixner 			return desc;
2521046f71dSThomas Gleixner 	}
253cd6cf065SThomas Gleixner 	md->__iter_idx = MSI_MAX_INDEX;
2541046f71dSThomas Gleixner 	return NULL;
2551046f71dSThomas Gleixner }
2561046f71dSThomas Gleixner 
2571046f71dSThomas Gleixner /**
2581046f71dSThomas Gleixner  * msi_first_desc - Get the first MSI descriptor of a device
2591046f71dSThomas Gleixner  * @dev:	Device to operate on
2601046f71dSThomas Gleixner  * @filter:	Descriptor state filter
2611046f71dSThomas Gleixner  *
2621046f71dSThomas Gleixner  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
2631046f71dSThomas Gleixner  * must be invoked before the call.
2641046f71dSThomas Gleixner  *
2651046f71dSThomas Gleixner  * Return: Pointer to the first MSI descriptor matching the search
2661046f71dSThomas Gleixner  *	   criteria, NULL if none found.
2671046f71dSThomas Gleixner  */
2681046f71dSThomas Gleixner struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
2691046f71dSThomas Gleixner {
270cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
2711046f71dSThomas Gleixner 
272cd6cf065SThomas Gleixner 	if (WARN_ON_ONCE(!md))
2731046f71dSThomas Gleixner 		return NULL;
2741046f71dSThomas Gleixner 
275cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
2761046f71dSThomas Gleixner 
277cd6cf065SThomas Gleixner 	md->__iter_idx = 0;
278cd6cf065SThomas Gleixner 	return msi_find_desc(md, filter);
2791046f71dSThomas Gleixner }
2801046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_first_desc);
2811046f71dSThomas Gleixner 
2821046f71dSThomas Gleixner /**
2831046f71dSThomas Gleixner  * msi_next_desc - Get the next MSI descriptor of a device
2841046f71dSThomas Gleixner  * @dev:	Device to operate on
285fdd53404SThomas Gleixner  * @filter:	Descriptor state filter
2861046f71dSThomas Gleixner  *
2871046f71dSThomas Gleixner  * The first invocation of msi_next_desc() has to be preceeded by a
288cd6cf065SThomas Gleixner  * successful invocation of __msi_first_desc(). Consecutive invocations are
2891046f71dSThomas Gleixner  * only valid if the previous one was successful. All these operations have
2901046f71dSThomas Gleixner  * to be done within the same MSI mutex held region.
2911046f71dSThomas Gleixner  *
2921046f71dSThomas Gleixner  * Return: Pointer to the next MSI descriptor matching the search
2931046f71dSThomas Gleixner  *	   criteria, NULL if none found.
2941046f71dSThomas Gleixner  */
2951046f71dSThomas Gleixner struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
2961046f71dSThomas Gleixner {
297cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
2981046f71dSThomas Gleixner 
299cd6cf065SThomas Gleixner 	if (WARN_ON_ONCE(!md))
3001046f71dSThomas Gleixner 		return NULL;
3011046f71dSThomas Gleixner 
302cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
3031046f71dSThomas Gleixner 
304cd6cf065SThomas Gleixner 	if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
3051046f71dSThomas Gleixner 		return NULL;
3061046f71dSThomas Gleixner 
307cd6cf065SThomas Gleixner 	md->__iter_idx++;
308cd6cf065SThomas Gleixner 	return msi_find_desc(md, filter);
3091046f71dSThomas Gleixner }
3101046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_next_desc);
3111046f71dSThomas Gleixner 
312b5f687f9SThomas Gleixner /**
313cf15f43aSThomas Gleixner  * msi_get_virq - Return Linux interrupt number of a MSI interrupt
314cf15f43aSThomas Gleixner  * @dev:	Device to operate on
315cf15f43aSThomas Gleixner  * @index:	MSI interrupt index to look for (0-based)
316cf15f43aSThomas Gleixner  *
317cf15f43aSThomas Gleixner  * Return: The Linux interrupt number on success (> 0), 0 if not found
318cf15f43aSThomas Gleixner  */
319cf15f43aSThomas Gleixner unsigned int msi_get_virq(struct device *dev, unsigned int index)
320cf15f43aSThomas Gleixner {
321cf15f43aSThomas Gleixner 	struct msi_desc *desc;
322495c66acSThomas Gleixner 	unsigned int ret = 0;
323cf15f43aSThomas Gleixner 	bool pcimsi;
324cf15f43aSThomas Gleixner 
325cf15f43aSThomas Gleixner 	if (!dev->msi.data)
326cf15f43aSThomas Gleixner 		return 0;
327cf15f43aSThomas Gleixner 
328cf15f43aSThomas Gleixner 	pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
329cf15f43aSThomas Gleixner 
330495c66acSThomas Gleixner 	msi_lock_descs(dev);
331cd6cf065SThomas Gleixner 	desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index);
332cd6cf065SThomas Gleixner 	if (desc && desc->irq) {
333cf15f43aSThomas Gleixner 		/*
334cd6cf065SThomas Gleixner 		 * PCI-MSI has only one descriptor for multiple interrupts.
335cf15f43aSThomas Gleixner 		 * PCI-MSIX and platform MSI use a descriptor per
336cf15f43aSThomas Gleixner 		 * interrupt.
337cf15f43aSThomas Gleixner 		 */
338cd6cf065SThomas Gleixner 		if (pcimsi) {
339cd6cf065SThomas Gleixner 			if (index < desc->nvec_used)
340cd6cf065SThomas Gleixner 				ret = desc->irq + index;
341cd6cf065SThomas Gleixner 		} else {
342495c66acSThomas Gleixner 			ret = desc->irq;
343cf15f43aSThomas Gleixner 		}
344495c66acSThomas Gleixner 	}
345495c66acSThomas Gleixner 	msi_unlock_descs(dev);
346495c66acSThomas Gleixner 	return ret;
347cf15f43aSThomas Gleixner }
348cf15f43aSThomas Gleixner EXPORT_SYMBOL_GPL(msi_get_virq);
349cf15f43aSThomas Gleixner 
3501197528aSThomas Gleixner #ifdef CONFIG_SYSFS
351bf5e758fSThomas Gleixner static struct attribute *msi_dev_attrs[] = {
352bf5e758fSThomas Gleixner 	NULL
353bf5e758fSThomas Gleixner };
354bf5e758fSThomas Gleixner 
355bf5e758fSThomas Gleixner static const struct attribute_group msi_irqs_group = {
356bf5e758fSThomas Gleixner 	.name	= "msi_irqs",
357bf5e758fSThomas Gleixner 	.attrs	= msi_dev_attrs,
358bf5e758fSThomas Gleixner };
359bf5e758fSThomas Gleixner 
360bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev)
361bf5e758fSThomas Gleixner {
362bf5e758fSThomas Gleixner 	return devm_device_add_group(dev, &msi_irqs_group);
363bf5e758fSThomas Gleixner }
364bf5e758fSThomas Gleixner 
3652f170814SBarry Song static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
3662f170814SBarry Song 			     char *buf)
3672f170814SBarry Song {
3686ef7f771SThomas Gleixner 	/* MSI vs. MSIX is per device not per interrupt */
3696ef7f771SThomas Gleixner 	bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
3702f170814SBarry Song 
3712f170814SBarry Song 	return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
3722f170814SBarry Song }
3732f170814SBarry Song 
374bf5e758fSThomas Gleixner static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
3752f170814SBarry Song {
376bf5e758fSThomas Gleixner 	struct device_attribute *attrs = desc->sysfs_attrs;
3772f170814SBarry Song 	int i;
3782f170814SBarry Song 
379bf5e758fSThomas Gleixner 	if (!attrs)
380bf5e758fSThomas Gleixner 		return;
3812f170814SBarry Song 
382bf5e758fSThomas Gleixner 	desc->sysfs_attrs = NULL;
383bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
384bf5e758fSThomas Gleixner 		if (attrs[i].show)
385bf5e758fSThomas Gleixner 			sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
386bf5e758fSThomas Gleixner 		kfree(attrs[i].attr.name);
3872f170814SBarry Song 	}
388bf5e758fSThomas Gleixner 	kfree(attrs);
3892f170814SBarry Song }
3902f170814SBarry Song 
391bf5e758fSThomas Gleixner static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
392bf5e758fSThomas Gleixner {
393bf5e758fSThomas Gleixner 	struct device_attribute *attrs;
394bf5e758fSThomas Gleixner 	int ret, i;
3952f170814SBarry Song 
396bf5e758fSThomas Gleixner 	attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
397bf5e758fSThomas Gleixner 	if (!attrs)
398bf5e758fSThomas Gleixner 		return -ENOMEM;
3992f170814SBarry Song 
400bf5e758fSThomas Gleixner 	desc->sysfs_attrs = attrs;
401bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
402bf5e758fSThomas Gleixner 		sysfs_attr_init(&attrs[i].attr);
403bf5e758fSThomas Gleixner 		attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
404bf5e758fSThomas Gleixner 		if (!attrs[i].attr.name) {
405bf5e758fSThomas Gleixner 			ret = -ENOMEM;
406bf5e758fSThomas Gleixner 			goto fail;
4072f170814SBarry Song 		}
4082f170814SBarry Song 
409bf5e758fSThomas Gleixner 		attrs[i].attr.mode = 0444;
410bf5e758fSThomas Gleixner 		attrs[i].show = msi_mode_show;
411bf5e758fSThomas Gleixner 
412bf5e758fSThomas Gleixner 		ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
413bf5e758fSThomas Gleixner 		if (ret) {
414bf5e758fSThomas Gleixner 			attrs[i].show = NULL;
415bf5e758fSThomas Gleixner 			goto fail;
416bf5e758fSThomas Gleixner 		}
417bf5e758fSThomas Gleixner 	}
418bf5e758fSThomas Gleixner 	return 0;
419bf5e758fSThomas Gleixner 
420bf5e758fSThomas Gleixner fail:
421bf5e758fSThomas Gleixner 	msi_sysfs_remove_desc(dev, desc);
422bf5e758fSThomas Gleixner 	return ret;
423bf5e758fSThomas Gleixner }
424bf5e758fSThomas Gleixner 
425bf5e758fSThomas Gleixner #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
4262f170814SBarry Song /**
427bf6e054eSThomas Gleixner  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
428bf6e054eSThomas Gleixner  * @dev:	The device (PCI, platform etc) which will get sysfs entries
429bf6e054eSThomas Gleixner  */
430bf6e054eSThomas Gleixner int msi_device_populate_sysfs(struct device *dev)
431bf6e054eSThomas Gleixner {
432bf5e758fSThomas Gleixner 	struct msi_desc *desc;
433bf5e758fSThomas Gleixner 	int ret;
434bf6e054eSThomas Gleixner 
435bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
436bf5e758fSThomas Gleixner 		if (desc->sysfs_attrs)
437bf5e758fSThomas Gleixner 			continue;
438bf5e758fSThomas Gleixner 		ret = msi_sysfs_populate_desc(dev, desc);
439bf5e758fSThomas Gleixner 		if (ret)
440bf5e758fSThomas Gleixner 			return ret;
441bf5e758fSThomas Gleixner 	}
442bf6e054eSThomas Gleixner 	return 0;
443bf6e054eSThomas Gleixner }
444bf6e054eSThomas Gleixner 
445bf6e054eSThomas Gleixner /**
44624cff375SThomas Gleixner  * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
44724cff375SThomas Gleixner  * @dev:		The device (PCI, platform etc) for which to remove
44824cff375SThomas Gleixner  *			sysfs entries
4492f170814SBarry Song  */
45024cff375SThomas Gleixner void msi_device_destroy_sysfs(struct device *dev)
4512f170814SBarry Song {
452bf5e758fSThomas Gleixner 	struct msi_desc *desc;
4532f170814SBarry Song 
454bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ALL)
455bf5e758fSThomas Gleixner 		msi_sysfs_remove_desc(dev, desc);
4562f170814SBarry Song }
457bf5e758fSThomas Gleixner #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
458bf5e758fSThomas Gleixner #else /* CONFIG_SYSFS */
459bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
460bf5e758fSThomas Gleixner static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
461bf5e758fSThomas Gleixner static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
462bf5e758fSThomas Gleixner #endif /* !CONFIG_SYSFS */
4632f170814SBarry Song 
464762687ceSThomas Gleixner static int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec);
465057c97a1SThomas Gleixner static void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
466762687ceSThomas Gleixner 
46774faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data,
46874faaf7aSThomas Gleixner 					  struct msi_msg *msg)
46974faaf7aSThomas Gleixner {
47074faaf7aSThomas Gleixner 	data->chip->irq_write_msi_msg(data, msg);
47174faaf7aSThomas Gleixner }
47274faaf7aSThomas Gleixner 
4730be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
4740be8153cSMarc Zyngier {
4750be8153cSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
4760be8153cSMarc Zyngier 
4770be8153cSMarc Zyngier 	/*
4780be8153cSMarc Zyngier 	 * If the MSI provider has messed with the second message and
4790be8153cSMarc Zyngier 	 * not advertized that it is level-capable, signal the breakage.
4800be8153cSMarc Zyngier 	 */
4810be8153cSMarc Zyngier 	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
4820be8153cSMarc Zyngier 		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
4830be8153cSMarc Zyngier 		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
4840be8153cSMarc Zyngier }
4850be8153cSMarc Zyngier 
486f3cf8bb0SJiang Liu /**
487f3cf8bb0SJiang Liu  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
488f3cf8bb0SJiang Liu  * @irq_data:	The irq data associated to the interrupt
489f3cf8bb0SJiang Liu  * @mask:	The affinity mask to set
490f3cf8bb0SJiang Liu  * @force:	Flag to enforce setting (disable online checks)
491f3cf8bb0SJiang Liu  *
492f3cf8bb0SJiang Liu  * Intended to be used by MSI interrupt controllers which are
493f3cf8bb0SJiang Liu  * implemented with hierarchical domains.
4943b35e7e6SRandy Dunlap  *
4953b35e7e6SRandy Dunlap  * Return: IRQ_SET_MASK_* result code
496f3cf8bb0SJiang Liu  */
497f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data,
498f3cf8bb0SJiang Liu 			    const struct cpumask *mask, bool force)
499f3cf8bb0SJiang Liu {
500f3cf8bb0SJiang Liu 	struct irq_data *parent = irq_data->parent_data;
5010be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
502f3cf8bb0SJiang Liu 	int ret;
503f3cf8bb0SJiang Liu 
504f3cf8bb0SJiang Liu 	ret = parent->chip->irq_set_affinity(parent, mask, force);
505f3cf8bb0SJiang Liu 	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
5060be8153cSMarc Zyngier 		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5070be8153cSMarc Zyngier 		msi_check_level(irq_data->domain, msg);
5080be8153cSMarc Zyngier 		irq_chip_write_msi_msg(irq_data, msg);
509f3cf8bb0SJiang Liu 	}
510f3cf8bb0SJiang Liu 
511f3cf8bb0SJiang Liu 	return ret;
512f3cf8bb0SJiang Liu }
513f3cf8bb0SJiang Liu 
51472491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain,
51572491643SThomas Gleixner 			       struct irq_data *irq_data, bool early)
516f3cf8bb0SJiang Liu {
5170be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
518f3cf8bb0SJiang Liu 
5190be8153cSMarc Zyngier 	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
5200be8153cSMarc Zyngier 	msi_check_level(irq_data->domain, msg);
5210be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
52272491643SThomas Gleixner 	return 0;
523f3cf8bb0SJiang Liu }
524f3cf8bb0SJiang Liu 
525f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain,
526f3cf8bb0SJiang Liu 				  struct irq_data *irq_data)
527f3cf8bb0SJiang Liu {
5280be8153cSMarc Zyngier 	struct msi_msg msg[2];
529f3cf8bb0SJiang Liu 
5300be8153cSMarc Zyngier 	memset(msg, 0, sizeof(msg));
5310be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
532f3cf8bb0SJiang Liu }
533f3cf8bb0SJiang Liu 
534f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
535f3cf8bb0SJiang Liu 			    unsigned int nr_irqs, void *arg)
536f3cf8bb0SJiang Liu {
537f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
538f3cf8bb0SJiang Liu 	struct msi_domain_ops *ops = info->ops;
539f3cf8bb0SJiang Liu 	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
540f3cf8bb0SJiang Liu 	int i, ret;
541f3cf8bb0SJiang Liu 
542f3cf8bb0SJiang Liu 	if (irq_find_mapping(domain, hwirq) > 0)
543f3cf8bb0SJiang Liu 		return -EEXIST;
544f3cf8bb0SJiang Liu 
545bf6f869fSLiu Jiang 	if (domain->parent) {
546f3cf8bb0SJiang Liu 		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
547f3cf8bb0SJiang Liu 		if (ret < 0)
548f3cf8bb0SJiang Liu 			return ret;
549bf6f869fSLiu Jiang 	}
550f3cf8bb0SJiang Liu 
551f3cf8bb0SJiang Liu 	for (i = 0; i < nr_irqs; i++) {
552f3cf8bb0SJiang Liu 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
553f3cf8bb0SJiang Liu 		if (ret < 0) {
554f3cf8bb0SJiang Liu 			if (ops->msi_free) {
555f3cf8bb0SJiang Liu 				for (i--; i > 0; i--)
556f3cf8bb0SJiang Liu 					ops->msi_free(domain, info, virq + i);
557f3cf8bb0SJiang Liu 			}
558f3cf8bb0SJiang Liu 			irq_domain_free_irqs_top(domain, virq, nr_irqs);
559f3cf8bb0SJiang Liu 			return ret;
560f3cf8bb0SJiang Liu 		}
561f3cf8bb0SJiang Liu 	}
562f3cf8bb0SJiang Liu 
563f3cf8bb0SJiang Liu 	return 0;
564f3cf8bb0SJiang Liu }
565f3cf8bb0SJiang Liu 
566f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
567f3cf8bb0SJiang Liu 			    unsigned int nr_irqs)
568f3cf8bb0SJiang Liu {
569f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
570f3cf8bb0SJiang Liu 	int i;
571f3cf8bb0SJiang Liu 
572f3cf8bb0SJiang Liu 	if (info->ops->msi_free) {
573f3cf8bb0SJiang Liu 		for (i = 0; i < nr_irqs; i++)
574f3cf8bb0SJiang Liu 			info->ops->msi_free(domain, info, virq + i);
575f3cf8bb0SJiang Liu 	}
576f3cf8bb0SJiang Liu 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
577f3cf8bb0SJiang Liu }
578f3cf8bb0SJiang Liu 
57901364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = {
580f3cf8bb0SJiang Liu 	.alloc		= msi_domain_alloc,
581f3cf8bb0SJiang Liu 	.free		= msi_domain_free,
582f3cf8bb0SJiang Liu 	.activate	= msi_domain_activate,
583f3cf8bb0SJiang Liu 	.deactivate	= msi_domain_deactivate,
584f3cf8bb0SJiang Liu };
585f3cf8bb0SJiang Liu 
586aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
587aeeb5965SJiang Liu 						msi_alloc_info_t *arg)
588aeeb5965SJiang Liu {
589aeeb5965SJiang Liu 	return arg->hwirq;
590aeeb5965SJiang Liu }
591aeeb5965SJiang Liu 
592aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
593aeeb5965SJiang Liu 				  int nvec, msi_alloc_info_t *arg)
594aeeb5965SJiang Liu {
595aeeb5965SJiang Liu 	memset(arg, 0, sizeof(*arg));
596aeeb5965SJiang Liu 	return 0;
597aeeb5965SJiang Liu }
598aeeb5965SJiang Liu 
599aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
600aeeb5965SJiang Liu 				    struct msi_desc *desc)
601aeeb5965SJiang Liu {
602aeeb5965SJiang Liu 	arg->desc = desc;
603aeeb5965SJiang Liu }
604aeeb5965SJiang Liu 
605aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain,
606aeeb5965SJiang Liu 			       struct msi_domain_info *info,
607aeeb5965SJiang Liu 			       unsigned int virq, irq_hw_number_t hwirq,
608aeeb5965SJiang Liu 			       msi_alloc_info_t *arg)
609aeeb5965SJiang Liu {
610aeeb5965SJiang Liu 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
611aeeb5965SJiang Liu 				      info->chip_data);
612aeeb5965SJiang Liu 	if (info->handler && info->handler_name) {
613aeeb5965SJiang Liu 		__irq_set_handler(virq, info->handler, 0, info->handler_name);
614aeeb5965SJiang Liu 		if (info->handler_data)
615aeeb5965SJiang Liu 			irq_set_handler_data(virq, info->handler_data);
616aeeb5965SJiang Liu 	}
617aeeb5965SJiang Liu 	return 0;
618aeeb5965SJiang Liu }
619aeeb5965SJiang Liu 
620aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = {
621aeeb5965SJiang Liu 	.get_hwirq		= msi_domain_ops_get_hwirq,
622aeeb5965SJiang Liu 	.msi_init		= msi_domain_ops_init,
623aeeb5965SJiang Liu 	.msi_prepare		= msi_domain_ops_prepare,
624aeeb5965SJiang Liu 	.set_desc		= msi_domain_ops_set_desc,
62543e9e705SThomas Gleixner 	.domain_alloc_irqs	= __msi_domain_alloc_irqs,
62643e9e705SThomas Gleixner 	.domain_free_irqs	= __msi_domain_free_irqs,
627aeeb5965SJiang Liu };
628aeeb5965SJiang Liu 
629aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info)
630aeeb5965SJiang Liu {
631aeeb5965SJiang Liu 	struct msi_domain_ops *ops = info->ops;
632aeeb5965SJiang Liu 
633aeeb5965SJiang Liu 	if (ops == NULL) {
634aeeb5965SJiang Liu 		info->ops = &msi_domain_ops_default;
635aeeb5965SJiang Liu 		return;
636aeeb5965SJiang Liu 	}
637aeeb5965SJiang Liu 
63843e9e705SThomas Gleixner 	if (ops->domain_alloc_irqs == NULL)
63943e9e705SThomas Gleixner 		ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
64043e9e705SThomas Gleixner 	if (ops->domain_free_irqs == NULL)
64143e9e705SThomas Gleixner 		ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
64243e9e705SThomas Gleixner 
64343e9e705SThomas Gleixner 	if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
64443e9e705SThomas Gleixner 		return;
64543e9e705SThomas Gleixner 
646aeeb5965SJiang Liu 	if (ops->get_hwirq == NULL)
647aeeb5965SJiang Liu 		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
648aeeb5965SJiang Liu 	if (ops->msi_init == NULL)
649aeeb5965SJiang Liu 		ops->msi_init = msi_domain_ops_default.msi_init;
650aeeb5965SJiang Liu 	if (ops->msi_prepare == NULL)
651aeeb5965SJiang Liu 		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
652aeeb5965SJiang Liu 	if (ops->set_desc == NULL)
653aeeb5965SJiang Liu 		ops->set_desc = msi_domain_ops_default.set_desc;
654aeeb5965SJiang Liu }
655aeeb5965SJiang Liu 
656aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info)
657aeeb5965SJiang Liu {
658aeeb5965SJiang Liu 	struct irq_chip *chip = info->chip;
659aeeb5965SJiang Liu 
6600701c53eSMarc Zyngier 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
661aeeb5965SJiang Liu 	if (!chip->irq_set_affinity)
662aeeb5965SJiang Liu 		chip->irq_set_affinity = msi_domain_set_affinity;
663aeeb5965SJiang Liu }
664aeeb5965SJiang Liu 
665f3cf8bb0SJiang Liu /**
6663b35e7e6SRandy Dunlap  * msi_create_irq_domain - Create an MSI interrupt domain
667be5436c8SMarc Zyngier  * @fwnode:	Optional fwnode of the interrupt controller
668f3cf8bb0SJiang Liu  * @info:	MSI domain info
669f3cf8bb0SJiang Liu  * @parent:	Parent irq domain
6703b35e7e6SRandy Dunlap  *
6713b35e7e6SRandy Dunlap  * Return: pointer to the created &struct irq_domain or %NULL on failure
672f3cf8bb0SJiang Liu  */
673be5436c8SMarc Zyngier struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
674f3cf8bb0SJiang Liu 					 struct msi_domain_info *info,
675f3cf8bb0SJiang Liu 					 struct irq_domain *parent)
676f3cf8bb0SJiang Liu {
677a97b852bSMarc Zyngier 	struct irq_domain *domain;
678a97b852bSMarc Zyngier 
679aeeb5965SJiang Liu 	msi_domain_update_dom_ops(info);
680aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
681aeeb5965SJiang Liu 		msi_domain_update_chip_ops(info);
682f3cf8bb0SJiang Liu 
683a97b852bSMarc Zyngier 	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
68488156f00SEric Auger 					     fwnode, &msi_domain_ops, info);
6850165308aSThomas Gleixner 
68622db089aSAhmed S. Darwish 	if (domain) {
68722db089aSAhmed S. Darwish 		if (!domain->name && info->chip)
688a97b852bSMarc Zyngier 			domain->name = info->chip->name;
68922db089aSAhmed S. Darwish 		irq_domain_update_bus_token(domain, info->bus_token);
69022db089aSAhmed S. Darwish 	}
691a97b852bSMarc Zyngier 
692a97b852bSMarc Zyngier 	return domain;
693f3cf8bb0SJiang Liu }
694f3cf8bb0SJiang Liu 
695b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
696b2eba39bSMarc Zyngier 			    int nvec, msi_alloc_info_t *arg)
697b2eba39bSMarc Zyngier {
698b2eba39bSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
699b2eba39bSMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
700b2eba39bSMarc Zyngier 
701*2569f62cSThomas Gleixner 	return ops->msi_prepare(domain, dev, nvec, arg);
702b2eba39bSMarc Zyngier }
703b2eba39bSMarc Zyngier 
7042145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
705a80713feSThomas Gleixner 			     int virq_base, int nvec, msi_alloc_info_t *arg)
7062145ac93SMarc Zyngier {
7072145ac93SMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
7082145ac93SMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
7092145ac93SMarc Zyngier 	struct msi_desc *desc;
710a80713feSThomas Gleixner 	int ret, virq;
7112145ac93SMarc Zyngier 
712a80713feSThomas Gleixner 	msi_lock_descs(dev);
713cd6cf065SThomas Gleixner 	ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
714cd6cf065SThomas Gleixner 	if (ret)
715cd6cf065SThomas Gleixner 		goto unlock;
7162145ac93SMarc Zyngier 
717cd6cf065SThomas Gleixner 	for (virq = virq_base; virq < virq_base + nvec; virq++) {
718cd6cf065SThomas Gleixner 		desc = xa_load(&dev->msi.data->__store, virq);
719a80713feSThomas Gleixner 		desc->irq = virq;
7202145ac93SMarc Zyngier 
7212145ac93SMarc Zyngier 		ops->set_desc(arg, desc);
722a80713feSThomas Gleixner 		ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
7232145ac93SMarc Zyngier 		if (ret)
724a80713feSThomas Gleixner 			goto fail;
7252145ac93SMarc Zyngier 
726a80713feSThomas Gleixner 		irq_set_msi_desc(virq, desc);
7272145ac93SMarc Zyngier 	}
728a80713feSThomas Gleixner 	msi_unlock_descs(dev);
729a80713feSThomas Gleixner 	return 0;
7302145ac93SMarc Zyngier 
731a80713feSThomas Gleixner fail:
732a80713feSThomas Gleixner 	for (--virq; virq >= virq_base; virq--)
733a80713feSThomas Gleixner 		irq_domain_free_irqs_common(domain, virq, 1);
7342f2940d1SThomas Gleixner 	msi_free_msi_descs_range(dev, virq_base, virq_base + nvec - 1);
735cd6cf065SThomas Gleixner unlock:
736a80713feSThomas Gleixner 	msi_unlock_descs(dev);
7372145ac93SMarc Zyngier 	return ret;
7382145ac93SMarc Zyngier }
7392145ac93SMarc Zyngier 
740bc976233SThomas Gleixner /*
741bc976233SThomas Gleixner  * Carefully check whether the device can use reservation mode. If
742bc976233SThomas Gleixner  * reservation mode is enabled then the early activation will assign a
743bc976233SThomas Gleixner  * dummy vector to the device. If the PCI/MSI device does not support
744bc976233SThomas Gleixner  * masking of the entry then this can result in spurious interrupts when
745bc976233SThomas Gleixner  * the device driver is not absolutely careful. But even then a malfunction
746bc976233SThomas Gleixner  * of the hardware could result in a spurious interrupt on the dummy vector
747bc976233SThomas Gleixner  * and render the device unusable. If the entry can be masked then the core
748bc976233SThomas Gleixner  * logic will prevent the spurious interrupt and reservation mode can be
749bc976233SThomas Gleixner  * used. For now reservation mode is restricted to PCI/MSI.
750bc976233SThomas Gleixner  */
751bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain,
752bc976233SThomas Gleixner 				       struct msi_domain_info *info,
753bc976233SThomas Gleixner 				       struct device *dev)
754da5dd9e8SThomas Gleixner {
755bc976233SThomas Gleixner 	struct msi_desc *desc;
756bc976233SThomas Gleixner 
757c6c9e283SThomas Gleixner 	switch(domain->bus_token) {
758c6c9e283SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
759c6c9e283SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
760c6c9e283SThomas Gleixner 		break;
761c6c9e283SThomas Gleixner 	default:
762bc976233SThomas Gleixner 		return false;
763c6c9e283SThomas Gleixner 	}
764bc976233SThomas Gleixner 
765da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
766da5dd9e8SThomas Gleixner 		return false;
767bc976233SThomas Gleixner 
768bc976233SThomas Gleixner 	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
769bc976233SThomas Gleixner 		return false;
770bc976233SThomas Gleixner 
771bc976233SThomas Gleixner 	/*
772bc976233SThomas Gleixner 	 * Checking the first MSI descriptor is sufficient. MSIX supports
7739c8e9c96SThomas Gleixner 	 * masking and MSI does so when the can_mask attribute is set.
774bc976233SThomas Gleixner 	 */
775495c66acSThomas Gleixner 	desc = msi_first_desc(dev, MSI_DESC_ALL);
776e58f2259SThomas Gleixner 	return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
777da5dd9e8SThomas Gleixner }
778da5dd9e8SThomas Gleixner 
77989033762SThomas Gleixner static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
78089033762SThomas Gleixner 			       int allocated)
78189033762SThomas Gleixner {
78289033762SThomas Gleixner 	switch(domain->bus_token) {
78389033762SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
78489033762SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
78589033762SThomas Gleixner 		if (IS_ENABLED(CONFIG_PCI_MSI))
78689033762SThomas Gleixner 			break;
78789033762SThomas Gleixner 		fallthrough;
78889033762SThomas Gleixner 	default:
78989033762SThomas Gleixner 		return -ENOSPC;
79089033762SThomas Gleixner 	}
79189033762SThomas Gleixner 
79289033762SThomas Gleixner 	/* Let a failed PCI multi MSI allocation retry */
79389033762SThomas Gleixner 	if (desc->nvec_used > 1)
79489033762SThomas Gleixner 		return 1;
79589033762SThomas Gleixner 
79689033762SThomas Gleixner 	/* If there was a successful allocation let the caller know */
79789033762SThomas Gleixner 	return allocated ? allocated : -ENOSPC;
79889033762SThomas Gleixner }
79989033762SThomas Gleixner 
800ef8dd015SThomas Gleixner #define VIRQ_CAN_RESERVE	0x01
801ef8dd015SThomas Gleixner #define VIRQ_ACTIVATE		0x02
802ef8dd015SThomas Gleixner #define VIRQ_NOMASK_QUIRK	0x04
803ef8dd015SThomas Gleixner 
804ef8dd015SThomas Gleixner static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
805ef8dd015SThomas Gleixner {
806ef8dd015SThomas Gleixner 	struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
807ef8dd015SThomas Gleixner 	int ret;
808ef8dd015SThomas Gleixner 
809ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_CAN_RESERVE)) {
810ef8dd015SThomas Gleixner 		irqd_clr_can_reserve(irqd);
811ef8dd015SThomas Gleixner 		if (vflags & VIRQ_NOMASK_QUIRK)
812ef8dd015SThomas Gleixner 			irqd_set_msi_nomask_quirk(irqd);
813d802057cSMarc Zyngier 
814d802057cSMarc Zyngier 		/*
815d802057cSMarc Zyngier 		 * If the interrupt is managed but no CPU is available to
816d802057cSMarc Zyngier 		 * service it, shut it down until better times. Note that
817d802057cSMarc Zyngier 		 * we only do this on the !RESERVE path as x86 (the only
818d802057cSMarc Zyngier 		 * architecture using this flag) deals with this in a
819d802057cSMarc Zyngier 		 * different way by using a catch-all vector.
820d802057cSMarc Zyngier 		 */
821d802057cSMarc Zyngier 		if ((vflags & VIRQ_ACTIVATE) &&
822d802057cSMarc Zyngier 		    irqd_affinity_is_managed(irqd) &&
823d802057cSMarc Zyngier 		    !cpumask_intersects(irq_data_get_affinity_mask(irqd),
824d802057cSMarc Zyngier 					cpu_online_mask)) {
825d802057cSMarc Zyngier 			    irqd_set_managed_shutdown(irqd);
826d802057cSMarc Zyngier 			    return 0;
827d802057cSMarc Zyngier 		    }
828ef8dd015SThomas Gleixner 	}
829ef8dd015SThomas Gleixner 
830ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_ACTIVATE))
831ef8dd015SThomas Gleixner 		return 0;
832ef8dd015SThomas Gleixner 
833ef8dd015SThomas Gleixner 	ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
834ef8dd015SThomas Gleixner 	if (ret)
835ef8dd015SThomas Gleixner 		return ret;
836ef8dd015SThomas Gleixner 	/*
837ef8dd015SThomas Gleixner 	 * If the interrupt uses reservation mode, clear the activated bit
838ef8dd015SThomas Gleixner 	 * so request_irq() will assign the final vector.
839ef8dd015SThomas Gleixner 	 */
840ef8dd015SThomas Gleixner 	if (vflags & VIRQ_CAN_RESERVE)
841ef8dd015SThomas Gleixner 		irqd_clr_activated(irqd);
842ef8dd015SThomas Gleixner 	return 0;
843ef8dd015SThomas Gleixner }
844ef8dd015SThomas Gleixner 
845762687ceSThomas Gleixner static int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
846d9109698SJiang Liu 				   int nvec)
847d9109698SJiang Liu {
848d9109698SJiang Liu 	struct msi_domain_info *info = domain->host_data;
849d9109698SJiang Liu 	struct msi_domain_ops *ops = info->ops;
85006fde695SZenghui Yu 	msi_alloc_info_t arg = { };
851ef8dd015SThomas Gleixner 	unsigned int vflags = 0;
852ef8dd015SThomas Gleixner 	struct msi_desc *desc;
85389033762SThomas Gleixner 	int allocated = 0;
854b6140914SThomas Gleixner 	int i, ret, virq;
855d9109698SJiang Liu 
856b2eba39bSMarc Zyngier 	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
857d9109698SJiang Liu 	if (ret)
858d9109698SJiang Liu 		return ret;
859d9109698SJiang Liu 
860ef8dd015SThomas Gleixner 	/*
861ef8dd015SThomas Gleixner 	 * This flag is set by the PCI layer as we need to activate
862ef8dd015SThomas Gleixner 	 * the MSI entries before the PCI layer enables MSI in the
863ef8dd015SThomas Gleixner 	 * card. Otherwise the card latches a random msi message.
864ef8dd015SThomas Gleixner 	 */
865ef8dd015SThomas Gleixner 	if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
866ef8dd015SThomas Gleixner 		vflags |= VIRQ_ACTIVATE;
867ef8dd015SThomas Gleixner 
868ef8dd015SThomas Gleixner 	/*
869ef8dd015SThomas Gleixner 	 * Interrupt can use a reserved vector and will not occupy
870ef8dd015SThomas Gleixner 	 * a real device vector until the interrupt is requested.
871ef8dd015SThomas Gleixner 	 */
872ef8dd015SThomas Gleixner 	if (msi_check_reservation_mode(domain, info, dev)) {
873ef8dd015SThomas Gleixner 		vflags |= VIRQ_CAN_RESERVE;
874ef8dd015SThomas Gleixner 		/*
875ef8dd015SThomas Gleixner 		 * MSI affinity setting requires a special quirk (X86) when
876ef8dd015SThomas Gleixner 		 * reservation mode is active.
877ef8dd015SThomas Gleixner 		 */
878ef8dd015SThomas Gleixner 		if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
879ef8dd015SThomas Gleixner 			vflags |= VIRQ_NOMASK_QUIRK;
880ef8dd015SThomas Gleixner 	}
881ef8dd015SThomas Gleixner 
882ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
883d9109698SJiang Liu 		ops->set_desc(&arg, desc);
884d9109698SJiang Liu 
885b6140914SThomas Gleixner 		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
88606ee6d57SThomas Gleixner 					       dev_to_node(dev), &arg, false,
8870972fa57SThomas Gleixner 					       desc->affinity);
8880f62d941SThomas Gleixner 		if (virq < 0)
8890f62d941SThomas Gleixner 			return msi_handle_pci_fail(domain, desc, allocated);
890d9109698SJiang Liu 
89107557ccbSThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
892d9109698SJiang Liu 			irq_set_msi_desc_off(virq, i, desc);
89307557ccbSThomas Gleixner 			irq_debugfs_copy_devname(virq + i, dev);
894ef8dd015SThomas Gleixner 			ret = msi_init_virq(domain, virq + i, vflags);
895bb9b428aSThomas Gleixner 			if (ret)
8960f62d941SThomas Gleixner 				return ret;
89774a5257aSThomas Gleixner 		}
898bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS) {
899bf5e758fSThomas Gleixner 			ret = msi_sysfs_populate_desc(dev, desc);
900bf5e758fSThomas Gleixner 			if (ret)
901bf5e758fSThomas Gleixner 				return ret;
902bf5e758fSThomas Gleixner 		}
903ef8dd015SThomas Gleixner 		allocated++;
904d9109698SJiang Liu 	}
905d9109698SJiang Liu 	return 0;
9060f62d941SThomas Gleixner }
9070f62d941SThomas Gleixner 
908645474e2SThomas Gleixner static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
909645474e2SThomas Gleixner 					   struct device *dev,
910645474e2SThomas Gleixner 					   unsigned int num_descs)
911645474e2SThomas Gleixner {
912645474e2SThomas Gleixner 	if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
913645474e2SThomas Gleixner 		return 0;
914645474e2SThomas Gleixner 
915645474e2SThomas Gleixner 	return msi_add_simple_msi_descs(dev, 0, num_descs);
916645474e2SThomas Gleixner }
917645474e2SThomas Gleixner 
9180f62d941SThomas Gleixner /**
9190f62d941SThomas Gleixner  * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
9200f62d941SThomas Gleixner  * @domain:	The domain to allocate from
9210f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
9220f62d941SThomas Gleixner  *		are allocated
9230f62d941SThomas Gleixner  * @nvec:	The number of interrupts to allocate
9240f62d941SThomas Gleixner  *
9250f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
9260f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
9270f62d941SThomas Gleixner  * allocation/free.
9280f62d941SThomas Gleixner  *
9290f62d941SThomas Gleixner  * Return: %0 on success or an error code.
9300f62d941SThomas Gleixner  */
9310f62d941SThomas Gleixner int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
9320f62d941SThomas Gleixner 				       int nvec)
9330f62d941SThomas Gleixner {
9340f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
9350f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
9360f62d941SThomas Gleixner 	int ret;
9370f62d941SThomas Gleixner 
9380f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
9390f62d941SThomas Gleixner 
940645474e2SThomas Gleixner 	ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
941645474e2SThomas Gleixner 	if (ret)
942645474e2SThomas Gleixner 		return ret;
943645474e2SThomas Gleixner 
9440f62d941SThomas Gleixner 	ret = ops->domain_alloc_irqs(domain, dev, nvec);
9450f62d941SThomas Gleixner 	if (ret)
9460f62d941SThomas Gleixner 		msi_domain_free_irqs_descs_locked(domain, dev);
947bb9b428aSThomas Gleixner 	return ret;
948d9109698SJiang Liu }
949d9109698SJiang Liu 
950d9109698SJiang Liu /**
95143e9e705SThomas Gleixner  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
95243e9e705SThomas Gleixner  * @domain:	The domain to allocate from
953d9109698SJiang Liu  * @dev:	Pointer to device struct of the device for which the interrupts
95443e9e705SThomas Gleixner  *		are allocated
95543e9e705SThomas Gleixner  * @nvec:	The number of interrupts to allocate
95643e9e705SThomas Gleixner  *
9573b35e7e6SRandy Dunlap  * Return: %0 on success or an error code.
958d9109698SJiang Liu  */
9590f62d941SThomas Gleixner int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
96043e9e705SThomas Gleixner {
961bf6e054eSThomas Gleixner 	int ret;
96243e9e705SThomas Gleixner 
9630f62d941SThomas Gleixner 	msi_lock_descs(dev);
9640f62d941SThomas Gleixner 	ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
9650f62d941SThomas Gleixner 	msi_unlock_descs(dev);
966bf6e054eSThomas Gleixner 	return ret;
96743e9e705SThomas Gleixner }
96843e9e705SThomas Gleixner 
969057c97a1SThomas Gleixner static void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
970d9109698SJiang Liu {
971bf5e758fSThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
972ef8dd015SThomas Gleixner 	struct irq_data *irqd;
973d9109698SJiang Liu 	struct msi_desc *desc;
974dbbc9357SBixuan Cui 	int i;
975dbbc9357SBixuan Cui 
976ef8dd015SThomas Gleixner 	/* Only handle MSI entries which have an interrupt associated */
977ef8dd015SThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
978ef8dd015SThomas Gleixner 		/* Make sure all interrupts are deactivated */
979ef8dd015SThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
980ef8dd015SThomas Gleixner 			irqd = irq_domain_get_irq_data(domain, desc->irq + i);
981ef8dd015SThomas Gleixner 			if (irqd && irqd_is_activated(irqd))
982ef8dd015SThomas Gleixner 				irq_domain_deactivate_irq(irqd);
983dbbc9357SBixuan Cui 		}
984d9109698SJiang Liu 
985d9109698SJiang Liu 		irq_domain_free_irqs(desc->irq, desc->nvec_used);
986bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS)
987bf5e758fSThomas Gleixner 			msi_sysfs_remove_desc(dev, desc);
988d9109698SJiang Liu 		desc->irq = 0;
989d9109698SJiang Liu 	}
990d9109698SJiang Liu }
991d9109698SJiang Liu 
992645474e2SThomas Gleixner static void msi_domain_free_msi_descs(struct msi_domain_info *info,
993645474e2SThomas Gleixner 				      struct device *dev)
994645474e2SThomas Gleixner {
995645474e2SThomas Gleixner 	if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
996645474e2SThomas Gleixner 		msi_free_msi_descs(dev);
997645474e2SThomas Gleixner }
998645474e2SThomas Gleixner 
999d9109698SJiang Liu /**
10000f62d941SThomas Gleixner  * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
10010f62d941SThomas Gleixner  * @domain:	The domain to managing the interrupts
10020f62d941SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
10030f62d941SThomas Gleixner  *		are free
10040f62d941SThomas Gleixner  *
10050f62d941SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
10060f62d941SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
10070f62d941SThomas Gleixner  * allocation.
10080f62d941SThomas Gleixner  */
10090f62d941SThomas Gleixner void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
10100f62d941SThomas Gleixner {
10110f62d941SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
10120f62d941SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
10130f62d941SThomas Gleixner 
10140f62d941SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
10150f62d941SThomas Gleixner 
10160f62d941SThomas Gleixner 	ops->domain_free_irqs(domain, dev);
1017f6d3486aSThomas Gleixner 	if (ops->msi_post_free)
1018f6d3486aSThomas Gleixner 		ops->msi_post_free(domain, dev);
1019645474e2SThomas Gleixner 	msi_domain_free_msi_descs(info, dev);
10200f62d941SThomas Gleixner }
10210f62d941SThomas Gleixner 
10220f62d941SThomas Gleixner /**
10233b35e7e6SRandy Dunlap  * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
102443e9e705SThomas Gleixner  * @domain:	The domain to managing the interrupts
102543e9e705SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
102643e9e705SThomas Gleixner  *		are free
102743e9e705SThomas Gleixner  */
102843e9e705SThomas Gleixner void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
102943e9e705SThomas Gleixner {
10300f62d941SThomas Gleixner 	msi_lock_descs(dev);
10310f62d941SThomas Gleixner 	msi_domain_free_irqs_descs_locked(domain, dev);
10320f62d941SThomas Gleixner 	msi_unlock_descs(dev);
103343e9e705SThomas Gleixner }
103443e9e705SThomas Gleixner 
103543e9e705SThomas Gleixner /**
1036f3cf8bb0SJiang Liu  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1037f3cf8bb0SJiang Liu  * @domain:	The interrupt domain to retrieve data from
1038f3cf8bb0SJiang Liu  *
10393b35e7e6SRandy Dunlap  * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1040f3cf8bb0SJiang Liu  */
1041f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1042f3cf8bb0SJiang Liu {
1043f3cf8bb0SJiang Liu 	return (struct msi_domain_info *)domain->host_data;
1044f3cf8bb0SJiang Liu }
1045