xref: /openbmc/linux/kernel/irq/msi.c (revision a80c0ace)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2f3cf8bb0SJiang Liu /*
3f3cf8bb0SJiang Liu  * Copyright (C) 2014 Intel Corp.
4f3cf8bb0SJiang Liu  * Author: Jiang Liu <jiang.liu@linux.intel.com>
5f3cf8bb0SJiang Liu  *
6f3cf8bb0SJiang Liu  * This file is licensed under GPLv2.
7f3cf8bb0SJiang Liu  *
8a359f757SIngo Molnar  * This file contains common code to support Message Signaled Interrupts for
9f3cf8bb0SJiang Liu  * PCI compatible and non PCI compatible devices.
10f3cf8bb0SJiang Liu  */
11aeeb5965SJiang Liu #include <linux/types.h>
12aeeb5965SJiang Liu #include <linux/device.h>
13f3cf8bb0SJiang Liu #include <linux/irq.h>
14f3cf8bb0SJiang Liu #include <linux/irqdomain.h>
15f3cf8bb0SJiang Liu #include <linux/msi.h>
164e201566SMarc Zyngier #include <linux/slab.h>
173ba1f050SThomas Gleixner #include <linux/sysfs.h>
182f170814SBarry Song #include <linux/pci.h>
19d9109698SJiang Liu 
2007557ccbSThomas Gleixner #include "internals.h"
2107557ccbSThomas Gleixner 
22377712c5SThomas Gleixner /**
23377712c5SThomas Gleixner  * struct msi_ctrl - MSI internal management control structure
24377712c5SThomas Gleixner  * @domid:	ID of the domain on which management operations should be done
25377712c5SThomas Gleixner  * @first:	First (hardware) slot index to operate on
26377712c5SThomas Gleixner  * @last:	Last (hardware) slot index to operate on
27f2480e7dSThomas Gleixner  * @nirqs:	The number of Linux interrupts to allocate. Can be larger
28f2480e7dSThomas Gleixner  *		than the range due to PCI/multi-MSI.
29377712c5SThomas Gleixner  */
30377712c5SThomas Gleixner struct msi_ctrl {
31377712c5SThomas Gleixner 	unsigned int			domid;
32377712c5SThomas Gleixner 	unsigned int			first;
33377712c5SThomas Gleixner 	unsigned int			last;
34f2480e7dSThomas Gleixner 	unsigned int			nirqs;
35377712c5SThomas Gleixner };
36377712c5SThomas Gleixner 
3794ff94cfSThomas Gleixner /* Invalid Xarray index which is outside of any searchable range */
3894ff94cfSThomas Gleixner #define MSI_XA_MAX_INDEX	(ULONG_MAX - 1)
3994ff94cfSThomas Gleixner /* The maximum domain size */
4094ff94cfSThomas Gleixner #define MSI_XA_DOMAIN_SIZE	(MSI_MAX_INDEX + 1)
4194ff94cfSThomas Gleixner 
42f2480e7dSThomas Gleixner static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl);
43bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev);
44cc9a246dSThomas Gleixner 
4594ff94cfSThomas Gleixner 
4628f4b041SThomas Gleixner /**
47cc9a246dSThomas Gleixner  * msi_alloc_desc - Allocate an initialized msi_desc
4828f4b041SThomas Gleixner  * @dev:	Pointer to the device for which this is allocated
4928f4b041SThomas Gleixner  * @nvec:	The number of vectors used in this entry
5028f4b041SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array size of @nvec
5128f4b041SThomas Gleixner  *
523b35e7e6SRandy Dunlap  * If @affinity is not %NULL then an affinity array[@nvec] is allocated
53bec04037SDou Liyang  * and the affinity masks and flags from @affinity are copied.
543b35e7e6SRandy Dunlap  *
553b35e7e6SRandy Dunlap  * Return: pointer to allocated &msi_desc on success or %NULL on failure
5628f4b041SThomas Gleixner  */
57cc9a246dSThomas Gleixner static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
58bec04037SDou Liyang 				       const struct irq_affinity_desc *affinity)
59aa48b6f7SJiang Liu {
60cc9a246dSThomas Gleixner 	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
6128f4b041SThomas Gleixner 
62aa48b6f7SJiang Liu 	if (!desc)
63aa48b6f7SJiang Liu 		return NULL;
64aa48b6f7SJiang Liu 
65aa48b6f7SJiang Liu 	desc->dev = dev;
6628f4b041SThomas Gleixner 	desc->nvec_used = nvec;
6728f4b041SThomas Gleixner 	if (affinity) {
68cc9a246dSThomas Gleixner 		desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
6928f4b041SThomas Gleixner 		if (!desc->affinity) {
7028f4b041SThomas Gleixner 			kfree(desc);
7128f4b041SThomas Gleixner 			return NULL;
7228f4b041SThomas Gleixner 		}
7328f4b041SThomas Gleixner 	}
74aa48b6f7SJiang Liu 	return desc;
75aa48b6f7SJiang Liu }
76aa48b6f7SJiang Liu 
77cc9a246dSThomas Gleixner static void msi_free_desc(struct msi_desc *desc)
78aa48b6f7SJiang Liu {
79cc9a246dSThomas Gleixner 	kfree(desc->affinity);
80cc9a246dSThomas Gleixner 	kfree(desc);
81aa48b6f7SJiang Liu }
82aa48b6f7SJiang Liu 
83fc8ab388SThomas Gleixner static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc,
84fc8ab388SThomas Gleixner 			   unsigned int domid, unsigned int index)
85cd6cf065SThomas Gleixner {
86fc8ab388SThomas Gleixner 	struct xarray *xa = &md->__domains[domid].store;
87cd6cf065SThomas Gleixner 	int ret;
88cd6cf065SThomas Gleixner 
89cd6cf065SThomas Gleixner 	desc->msi_index = index;
90f1139f90SThomas Gleixner 	ret = xa_insert(xa, index, desc, GFP_KERNEL);
91cd6cf065SThomas Gleixner 	if (ret)
92cd6cf065SThomas Gleixner 		msi_free_desc(desc);
93cd6cf065SThomas Gleixner 	return ret;
94cd6cf065SThomas Gleixner }
95cd6cf065SThomas Gleixner 
9660290525SThomas Gleixner /**
97fc8ab388SThomas Gleixner  * msi_domain_insert_msi_desc - Allocate and initialize a MSI descriptor and
981c893963SThomas Gleixner  *				insert it at @init_desc->msi_index
991c893963SThomas Gleixner  *
10060290525SThomas Gleixner  * @dev:	Pointer to the device for which the descriptor is allocated
101fc8ab388SThomas Gleixner  * @domid:	The id of the interrupt domain to which the desriptor is added
10260290525SThomas Gleixner  * @init_desc:	Pointer to an MSI descriptor to initialize the new descriptor
10360290525SThomas Gleixner  *
10460290525SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
10560290525SThomas Gleixner  */
106fc8ab388SThomas Gleixner int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
107fc8ab388SThomas Gleixner 			       struct msi_desc *init_desc)
10860290525SThomas Gleixner {
10960290525SThomas Gleixner 	struct msi_desc *desc;
11060290525SThomas Gleixner 
11160290525SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
11260290525SThomas Gleixner 
113cc9a246dSThomas Gleixner 	desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
11460290525SThomas Gleixner 	if (!desc)
11560290525SThomas Gleixner 		return -ENOMEM;
11660290525SThomas Gleixner 
117cd6cf065SThomas Gleixner 	/* Copy type specific data to the new descriptor. */
11860290525SThomas Gleixner 	desc->pci = init_desc->pci;
119fc8ab388SThomas Gleixner 
120fc8ab388SThomas Gleixner 	return msi_insert_desc(dev->msi.data, desc, domid, init_desc->msi_index);
12160290525SThomas Gleixner }
12260290525SThomas Gleixner 
123cd6cf065SThomas Gleixner static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
124cd6cf065SThomas Gleixner {
125cd6cf065SThomas Gleixner 	switch (filter) {
126cd6cf065SThomas Gleixner 	case MSI_DESC_ALL:
127cd6cf065SThomas Gleixner 		return true;
128cd6cf065SThomas Gleixner 	case MSI_DESC_NOTASSOCIATED:
129cd6cf065SThomas Gleixner 		return !desc->irq;
130cd6cf065SThomas Gleixner 	case MSI_DESC_ASSOCIATED:
131cd6cf065SThomas Gleixner 		return !!desc->irq;
132cd6cf065SThomas Gleixner 	}
133cd6cf065SThomas Gleixner 	WARN_ON_ONCE(1);
134cd6cf065SThomas Gleixner 	return false;
13560290525SThomas Gleixner }
13660290525SThomas Gleixner 
137377712c5SThomas Gleixner static bool msi_ctrl_valid(struct device *dev, struct msi_ctrl *ctrl)
138645474e2SThomas Gleixner {
139377712c5SThomas Gleixner 	if (WARN_ON_ONCE(ctrl->domid >= MSI_MAX_DEVICE_IRQDOMAINS ||
14040742716SThomas Gleixner 			 !dev->msi.data->__domains[ctrl->domid].domain ||
141377712c5SThomas Gleixner 			 ctrl->first > ctrl->last ||
142377712c5SThomas Gleixner 			 ctrl->first > MSI_MAX_INDEX ||
143377712c5SThomas Gleixner 			 ctrl->last > MSI_MAX_INDEX))
144377712c5SThomas Gleixner 		return false;
145377712c5SThomas Gleixner 	return true;
146377712c5SThomas Gleixner }
147377712c5SThomas Gleixner 
148377712c5SThomas Gleixner static void msi_domain_free_descs(struct device *dev, struct msi_ctrl *ctrl)
149377712c5SThomas Gleixner {
150645474e2SThomas Gleixner 	struct msi_desc *desc;
151377712c5SThomas Gleixner 	struct xarray *xa;
152cd6cf065SThomas Gleixner 	unsigned long idx;
153645474e2SThomas Gleixner 
154645474e2SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
155645474e2SThomas Gleixner 
156377712c5SThomas Gleixner 	if (!msi_ctrl_valid(dev, ctrl))
157377712c5SThomas Gleixner 		return;
158377712c5SThomas Gleixner 
159377712c5SThomas Gleixner 	xa = &dev->msi.data->__domains[ctrl->domid].store;
160377712c5SThomas Gleixner 	xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
161cd6cf065SThomas Gleixner 		xa_erase(xa, idx);
1622f2940d1SThomas Gleixner 
1632f2940d1SThomas Gleixner 		/* Leak the descriptor when it is still referenced */
1642f2940d1SThomas Gleixner 		if (WARN_ON_ONCE(msi_desc_match(desc, MSI_DESC_ASSOCIATED)))
1652f2940d1SThomas Gleixner 			continue;
166cc9a246dSThomas Gleixner 		msi_free_desc(desc);
167645474e2SThomas Gleixner 	}
168645474e2SThomas Gleixner }
169645474e2SThomas Gleixner 
170377712c5SThomas Gleixner /**
171377712c5SThomas Gleixner  * msi_domain_free_msi_descs_range - Free a range of MSI descriptors of a device in an irqdomain
172377712c5SThomas Gleixner  * @dev:	Device for which to free the descriptors
173377712c5SThomas Gleixner  * @domid:	Id of the domain to operate on
174377712c5SThomas Gleixner  * @first:	Index to start freeing from (inclusive)
175377712c5SThomas Gleixner  * @last:	Last index to be freed (inclusive)
176377712c5SThomas Gleixner  */
177377712c5SThomas Gleixner void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
178377712c5SThomas Gleixner 				     unsigned int first, unsigned int last)
179377712c5SThomas Gleixner {
180377712c5SThomas Gleixner 	struct msi_ctrl ctrl = {
181377712c5SThomas Gleixner 		.domid	= domid,
182377712c5SThomas Gleixner 		.first	= first,
183377712c5SThomas Gleixner 		.last	= last,
184377712c5SThomas Gleixner 	};
185377712c5SThomas Gleixner 
186377712c5SThomas Gleixner 	msi_domain_free_descs(dev, &ctrl);
187377712c5SThomas Gleixner }
188377712c5SThomas Gleixner 
18940742716SThomas Gleixner /**
19040742716SThomas Gleixner  * msi_domain_add_simple_msi_descs - Allocate and initialize MSI descriptors
19140742716SThomas Gleixner  * @dev:	Pointer to the device for which the descriptors are allocated
19240742716SThomas Gleixner  * @ctrl:	Allocation control struct
19340742716SThomas Gleixner  *
19440742716SThomas Gleixner  * Return: 0 on success or an appropriate failure code.
19540742716SThomas Gleixner  */
19640742716SThomas Gleixner static int msi_domain_add_simple_msi_descs(struct device *dev, struct msi_ctrl *ctrl)
19740742716SThomas Gleixner {
19840742716SThomas Gleixner 	struct msi_desc *desc;
19940742716SThomas Gleixner 	unsigned int idx;
20040742716SThomas Gleixner 	int ret;
20140742716SThomas Gleixner 
20240742716SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
20340742716SThomas Gleixner 
20440742716SThomas Gleixner 	if (!msi_ctrl_valid(dev, ctrl))
20540742716SThomas Gleixner 		return -EINVAL;
20640742716SThomas Gleixner 
20740742716SThomas Gleixner 	for (idx = ctrl->first; idx <= ctrl->last; idx++) {
20840742716SThomas Gleixner 		desc = msi_alloc_desc(dev, 1, NULL);
20940742716SThomas Gleixner 		if (!desc)
21040742716SThomas Gleixner 			goto fail_mem;
21140742716SThomas Gleixner 		ret = msi_insert_desc(dev->msi.data, desc, ctrl->domid, idx);
21240742716SThomas Gleixner 		if (ret)
21340742716SThomas Gleixner 			goto fail;
21440742716SThomas Gleixner 	}
21540742716SThomas Gleixner 	return 0;
21640742716SThomas Gleixner 
21740742716SThomas Gleixner fail_mem:
21840742716SThomas Gleixner 	ret = -ENOMEM;
21940742716SThomas Gleixner fail:
22040742716SThomas Gleixner 	msi_domain_free_descs(dev, ctrl);
22140742716SThomas Gleixner 	return ret;
22240742716SThomas Gleixner }
22340742716SThomas Gleixner 
22438b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
22538b6a1cfSJiang Liu {
22638b6a1cfSJiang Liu 	*msg = entry->msg;
22738b6a1cfSJiang Liu }
22838b6a1cfSJiang Liu 
22938b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
23038b6a1cfSJiang Liu {
23138b6a1cfSJiang Liu 	struct msi_desc *entry = irq_get_msi_desc(irq);
23238b6a1cfSJiang Liu 
23338b6a1cfSJiang Liu 	__get_cached_msi_msg(entry, msg);
23438b6a1cfSJiang Liu }
23538b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg);
23638b6a1cfSJiang Liu 
237013bd8e5SThomas Gleixner static void msi_device_data_release(struct device *dev, void *res)
238013bd8e5SThomas Gleixner {
239125282cdSThomas Gleixner 	struct msi_device_data *md = res;
240f1139f90SThomas Gleixner 	int i;
241125282cdSThomas Gleixner 
242f1139f90SThomas Gleixner 	for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) {
243f1139f90SThomas Gleixner 		WARN_ON_ONCE(!xa_empty(&md->__domains[i].store));
244f1139f90SThomas Gleixner 		xa_destroy(&md->__domains[i].store);
245f1139f90SThomas Gleixner 	}
246013bd8e5SThomas Gleixner 	dev->msi.data = NULL;
247013bd8e5SThomas Gleixner }
248013bd8e5SThomas Gleixner 
249013bd8e5SThomas Gleixner /**
250013bd8e5SThomas Gleixner  * msi_setup_device_data - Setup MSI device data
251013bd8e5SThomas Gleixner  * @dev:	Device for which MSI device data should be set up
252013bd8e5SThomas Gleixner  *
253013bd8e5SThomas Gleixner  * Return: 0 on success, appropriate error code otherwise
254013bd8e5SThomas Gleixner  *
255013bd8e5SThomas Gleixner  * This can be called more than once for @dev. If the MSI device data is
256013bd8e5SThomas Gleixner  * already allocated the call succeeds. The allocated memory is
257013bd8e5SThomas Gleixner  * automatically released when the device is destroyed.
258013bd8e5SThomas Gleixner  */
259013bd8e5SThomas Gleixner int msi_setup_device_data(struct device *dev)
260013bd8e5SThomas Gleixner {
261013bd8e5SThomas Gleixner 	struct msi_device_data *md;
262f1139f90SThomas Gleixner 	int ret, i;
263013bd8e5SThomas Gleixner 
264013bd8e5SThomas Gleixner 	if (dev->msi.data)
265013bd8e5SThomas Gleixner 		return 0;
266013bd8e5SThomas Gleixner 
267013bd8e5SThomas Gleixner 	md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
268013bd8e5SThomas Gleixner 	if (!md)
269013bd8e5SThomas Gleixner 		return -ENOMEM;
270013bd8e5SThomas Gleixner 
271bf5e758fSThomas Gleixner 	ret = msi_sysfs_create_group(dev);
272bf5e758fSThomas Gleixner 	if (ret) {
273bf5e758fSThomas Gleixner 		devres_free(md);
274bf5e758fSThomas Gleixner 		return ret;
275bf5e758fSThomas Gleixner 	}
276bf5e758fSThomas Gleixner 
277f1139f90SThomas Gleixner 	for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++)
278f1139f90SThomas Gleixner 		xa_init(&md->__domains[i].store);
279f1139f90SThomas Gleixner 
28064258eaaSThomas Gleixner 	/*
28164258eaaSThomas Gleixner 	 * If @dev::msi::domain is set and is a global MSI domain, copy the
28264258eaaSThomas Gleixner 	 * pointer into the domain array so all code can operate on domain
28364258eaaSThomas Gleixner 	 * ids. The NULL pointer check is required to keep the legacy
28464258eaaSThomas Gleixner 	 * architecture specific PCI/MSI support working.
28564258eaaSThomas Gleixner 	 */
28664258eaaSThomas Gleixner 	if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain))
28764258eaaSThomas Gleixner 		md->__domains[MSI_DEFAULT_DOMAIN].domain = dev->msi.domain;
28864258eaaSThomas Gleixner 
289b5f687f9SThomas Gleixner 	mutex_init(&md->mutex);
290013bd8e5SThomas Gleixner 	dev->msi.data = md;
291013bd8e5SThomas Gleixner 	devres_add(dev, md);
292013bd8e5SThomas Gleixner 	return 0;
293013bd8e5SThomas Gleixner }
294013bd8e5SThomas Gleixner 
295cf15f43aSThomas Gleixner /**
296b5f687f9SThomas Gleixner  * msi_lock_descs - Lock the MSI descriptor storage of a device
297b5f687f9SThomas Gleixner  * @dev:	Device to operate on
298b5f687f9SThomas Gleixner  */
299b5f687f9SThomas Gleixner void msi_lock_descs(struct device *dev)
300b5f687f9SThomas Gleixner {
301b5f687f9SThomas Gleixner 	mutex_lock(&dev->msi.data->mutex);
302b5f687f9SThomas Gleixner }
303b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_lock_descs);
304b5f687f9SThomas Gleixner 
305b5f687f9SThomas Gleixner /**
306b5f687f9SThomas Gleixner  * msi_unlock_descs - Unlock the MSI descriptor storage of a device
307b5f687f9SThomas Gleixner  * @dev:	Device to operate on
308b5f687f9SThomas Gleixner  */
309b5f687f9SThomas Gleixner void msi_unlock_descs(struct device *dev)
310b5f687f9SThomas Gleixner {
311f1139f90SThomas Gleixner 	/* Invalidate the index which was cached by the iterator */
31294ff94cfSThomas Gleixner 	dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX;
313b5f687f9SThomas Gleixner 	mutex_unlock(&dev->msi.data->mutex);
314b5f687f9SThomas Gleixner }
315b5f687f9SThomas Gleixner EXPORT_SYMBOL_GPL(msi_unlock_descs);
316b5f687f9SThomas Gleixner 
31794ff94cfSThomas Gleixner static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid,
31894ff94cfSThomas Gleixner 				      enum msi_desc_filter filter)
3191046f71dSThomas Gleixner {
32094ff94cfSThomas Gleixner 	struct xarray *xa = &md->__domains[domid].store;
3211046f71dSThomas Gleixner 	struct msi_desc *desc;
3221046f71dSThomas Gleixner 
323f1139f90SThomas Gleixner 	xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) {
3241046f71dSThomas Gleixner 		if (msi_desc_match(desc, filter))
3251046f71dSThomas Gleixner 			return desc;
3261046f71dSThomas Gleixner 	}
32794ff94cfSThomas Gleixner 	md->__iter_idx = MSI_XA_MAX_INDEX;
3281046f71dSThomas Gleixner 	return NULL;
3291046f71dSThomas Gleixner }
3301046f71dSThomas Gleixner 
3311046f71dSThomas Gleixner /**
33294ff94cfSThomas Gleixner  * msi_domain_first_desc - Get the first MSI descriptor of an irqdomain associated to a device
3331046f71dSThomas Gleixner  * @dev:	Device to operate on
33494ff94cfSThomas Gleixner  * @domid:	The id of the interrupt domain which should be walked.
3351046f71dSThomas Gleixner  * @filter:	Descriptor state filter
3361046f71dSThomas Gleixner  *
3371046f71dSThomas Gleixner  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
3381046f71dSThomas Gleixner  * must be invoked before the call.
3391046f71dSThomas Gleixner  *
3401046f71dSThomas Gleixner  * Return: Pointer to the first MSI descriptor matching the search
3411046f71dSThomas Gleixner  *	   criteria, NULL if none found.
3421046f71dSThomas Gleixner  */
34394ff94cfSThomas Gleixner struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
34494ff94cfSThomas Gleixner 				       enum msi_desc_filter filter)
3451046f71dSThomas Gleixner {
346cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
3471046f71dSThomas Gleixner 
34894ff94cfSThomas Gleixner 	if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
3491046f71dSThomas Gleixner 		return NULL;
3501046f71dSThomas Gleixner 
351cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
3521046f71dSThomas Gleixner 
353cd6cf065SThomas Gleixner 	md->__iter_idx = 0;
35494ff94cfSThomas Gleixner 	return msi_find_desc(md, domid, filter);
3551046f71dSThomas Gleixner }
35694ff94cfSThomas Gleixner EXPORT_SYMBOL_GPL(msi_domain_first_desc);
3571046f71dSThomas Gleixner 
3581046f71dSThomas Gleixner /**
3591046f71dSThomas Gleixner  * msi_next_desc - Get the next MSI descriptor of a device
3601046f71dSThomas Gleixner  * @dev:	Device to operate on
36194ff94cfSThomas Gleixner  * @domid:	The id of the interrupt domain which should be walked.
362fdd53404SThomas Gleixner  * @filter:	Descriptor state filter
3631046f71dSThomas Gleixner  *
3641046f71dSThomas Gleixner  * The first invocation of msi_next_desc() has to be preceeded by a
365cd6cf065SThomas Gleixner  * successful invocation of __msi_first_desc(). Consecutive invocations are
3661046f71dSThomas Gleixner  * only valid if the previous one was successful. All these operations have
3671046f71dSThomas Gleixner  * to be done within the same MSI mutex held region.
3681046f71dSThomas Gleixner  *
3691046f71dSThomas Gleixner  * Return: Pointer to the next MSI descriptor matching the search
3701046f71dSThomas Gleixner  *	   criteria, NULL if none found.
3711046f71dSThomas Gleixner  */
37294ff94cfSThomas Gleixner struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
37394ff94cfSThomas Gleixner 			       enum msi_desc_filter filter)
3741046f71dSThomas Gleixner {
375cd6cf065SThomas Gleixner 	struct msi_device_data *md = dev->msi.data;
3761046f71dSThomas Gleixner 
37794ff94cfSThomas Gleixner 	if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
3781046f71dSThomas Gleixner 		return NULL;
3791046f71dSThomas Gleixner 
380cd6cf065SThomas Gleixner 	lockdep_assert_held(&md->mutex);
3811046f71dSThomas Gleixner 
382cd6cf065SThomas Gleixner 	if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
3831046f71dSThomas Gleixner 		return NULL;
3841046f71dSThomas Gleixner 
385cd6cf065SThomas Gleixner 	md->__iter_idx++;
38694ff94cfSThomas Gleixner 	return msi_find_desc(md, domid, filter);
3871046f71dSThomas Gleixner }
3881046f71dSThomas Gleixner EXPORT_SYMBOL_GPL(msi_next_desc);
3891046f71dSThomas Gleixner 
390b5f687f9SThomas Gleixner /**
39198043704SAhmed S. Darwish  * msi_domain_get_virq - Lookup the Linux interrupt number for a MSI index on a interrupt domain
392cf15f43aSThomas Gleixner  * @dev:	Device to operate on
39398043704SAhmed S. Darwish  * @domid:	Domain ID of the interrupt domain associated to the device
394cf15f43aSThomas Gleixner  * @index:	MSI interrupt index to look for (0-based)
395cf15f43aSThomas Gleixner  *
396cf15f43aSThomas Gleixner  * Return: The Linux interrupt number on success (> 0), 0 if not found
397cf15f43aSThomas Gleixner  */
39898043704SAhmed S. Darwish unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index)
399cf15f43aSThomas Gleixner {
400cf15f43aSThomas Gleixner 	struct msi_desc *desc;
401495c66acSThomas Gleixner 	unsigned int ret = 0;
40298043704SAhmed S. Darwish 	bool pcimsi = false;
403f1139f90SThomas Gleixner 	struct xarray *xa;
404cf15f43aSThomas Gleixner 
405cf15f43aSThomas Gleixner 	if (!dev->msi.data)
406cf15f43aSThomas Gleixner 		return 0;
407cf15f43aSThomas Gleixner 
40898043704SAhmed S. Darwish 	if (WARN_ON_ONCE(index > MSI_MAX_INDEX || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
40998043704SAhmed S. Darwish 		return 0;
41098043704SAhmed S. Darwish 
41198043704SAhmed S. Darwish 	/* This check is only valid for the PCI default MSI domain */
41298043704SAhmed S. Darwish 	if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN)
41398043704SAhmed S. Darwish 		pcimsi = to_pci_dev(dev)->msi_enabled;
414cf15f43aSThomas Gleixner 
415495c66acSThomas Gleixner 	msi_lock_descs(dev);
41698043704SAhmed S. Darwish 	xa = &dev->msi.data->__domains[domid].store;
417f1139f90SThomas Gleixner 	desc = xa_load(xa, pcimsi ? 0 : index);
418cd6cf065SThomas Gleixner 	if (desc && desc->irq) {
419cf15f43aSThomas Gleixner 		/*
420cd6cf065SThomas Gleixner 		 * PCI-MSI has only one descriptor for multiple interrupts.
421cf15f43aSThomas Gleixner 		 * PCI-MSIX and platform MSI use a descriptor per
422cf15f43aSThomas Gleixner 		 * interrupt.
423cf15f43aSThomas Gleixner 		 */
424cd6cf065SThomas Gleixner 		if (pcimsi) {
425cd6cf065SThomas Gleixner 			if (index < desc->nvec_used)
426cd6cf065SThomas Gleixner 				ret = desc->irq + index;
427cd6cf065SThomas Gleixner 		} else {
428495c66acSThomas Gleixner 			ret = desc->irq;
429cf15f43aSThomas Gleixner 		}
430495c66acSThomas Gleixner 	}
43198043704SAhmed S. Darwish 
432495c66acSThomas Gleixner 	msi_unlock_descs(dev);
433495c66acSThomas Gleixner 	return ret;
434cf15f43aSThomas Gleixner }
43598043704SAhmed S. Darwish EXPORT_SYMBOL_GPL(msi_domain_get_virq);
436cf15f43aSThomas Gleixner 
4371197528aSThomas Gleixner #ifdef CONFIG_SYSFS
438bf5e758fSThomas Gleixner static struct attribute *msi_dev_attrs[] = {
439bf5e758fSThomas Gleixner 	NULL
440bf5e758fSThomas Gleixner };
441bf5e758fSThomas Gleixner 
442bf5e758fSThomas Gleixner static const struct attribute_group msi_irqs_group = {
443bf5e758fSThomas Gleixner 	.name	= "msi_irqs",
444bf5e758fSThomas Gleixner 	.attrs	= msi_dev_attrs,
445bf5e758fSThomas Gleixner };
446bf5e758fSThomas Gleixner 
447bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev)
448bf5e758fSThomas Gleixner {
449bf5e758fSThomas Gleixner 	return devm_device_add_group(dev, &msi_irqs_group);
450bf5e758fSThomas Gleixner }
451bf5e758fSThomas Gleixner 
4522f170814SBarry Song static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
4532f170814SBarry Song 			     char *buf)
4542f170814SBarry Song {
4556ef7f771SThomas Gleixner 	/* MSI vs. MSIX is per device not per interrupt */
4566ef7f771SThomas Gleixner 	bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
4572f170814SBarry Song 
4582f170814SBarry Song 	return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
4592f170814SBarry Song }
4602f170814SBarry Song 
461bf5e758fSThomas Gleixner static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
4622f170814SBarry Song {
463bf5e758fSThomas Gleixner 	struct device_attribute *attrs = desc->sysfs_attrs;
4642f170814SBarry Song 	int i;
4652f170814SBarry Song 
466bf5e758fSThomas Gleixner 	if (!attrs)
467bf5e758fSThomas Gleixner 		return;
4682f170814SBarry Song 
469bf5e758fSThomas Gleixner 	desc->sysfs_attrs = NULL;
470bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
471bf5e758fSThomas Gleixner 		if (attrs[i].show)
472bf5e758fSThomas Gleixner 			sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
473bf5e758fSThomas Gleixner 		kfree(attrs[i].attr.name);
4742f170814SBarry Song 	}
475bf5e758fSThomas Gleixner 	kfree(attrs);
4762f170814SBarry Song }
4772f170814SBarry Song 
478bf5e758fSThomas Gleixner static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
479bf5e758fSThomas Gleixner {
480bf5e758fSThomas Gleixner 	struct device_attribute *attrs;
481bf5e758fSThomas Gleixner 	int ret, i;
4822f170814SBarry Song 
483bf5e758fSThomas Gleixner 	attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
484bf5e758fSThomas Gleixner 	if (!attrs)
485bf5e758fSThomas Gleixner 		return -ENOMEM;
4862f170814SBarry Song 
487bf5e758fSThomas Gleixner 	desc->sysfs_attrs = attrs;
488bf5e758fSThomas Gleixner 	for (i = 0; i < desc->nvec_used; i++) {
489bf5e758fSThomas Gleixner 		sysfs_attr_init(&attrs[i].attr);
490bf5e758fSThomas Gleixner 		attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
491bf5e758fSThomas Gleixner 		if (!attrs[i].attr.name) {
492bf5e758fSThomas Gleixner 			ret = -ENOMEM;
493bf5e758fSThomas Gleixner 			goto fail;
4942f170814SBarry Song 		}
4952f170814SBarry Song 
496bf5e758fSThomas Gleixner 		attrs[i].attr.mode = 0444;
497bf5e758fSThomas Gleixner 		attrs[i].show = msi_mode_show;
498bf5e758fSThomas Gleixner 
499bf5e758fSThomas Gleixner 		ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
500bf5e758fSThomas Gleixner 		if (ret) {
501bf5e758fSThomas Gleixner 			attrs[i].show = NULL;
502bf5e758fSThomas Gleixner 			goto fail;
503bf5e758fSThomas Gleixner 		}
504bf5e758fSThomas Gleixner 	}
505bf5e758fSThomas Gleixner 	return 0;
506bf5e758fSThomas Gleixner 
507bf5e758fSThomas Gleixner fail:
508bf5e758fSThomas Gleixner 	msi_sysfs_remove_desc(dev, desc);
509bf5e758fSThomas Gleixner 	return ret;
510bf5e758fSThomas Gleixner }
511bf5e758fSThomas Gleixner 
512bf5e758fSThomas Gleixner #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
5132f170814SBarry Song /**
514bf6e054eSThomas Gleixner  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
515bf6e054eSThomas Gleixner  * @dev:	The device (PCI, platform etc) which will get sysfs entries
516bf6e054eSThomas Gleixner  */
517bf6e054eSThomas Gleixner int msi_device_populate_sysfs(struct device *dev)
518bf6e054eSThomas Gleixner {
519bf5e758fSThomas Gleixner 	struct msi_desc *desc;
520bf5e758fSThomas Gleixner 	int ret;
521bf6e054eSThomas Gleixner 
522bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
523bf5e758fSThomas Gleixner 		if (desc->sysfs_attrs)
524bf5e758fSThomas Gleixner 			continue;
525bf5e758fSThomas Gleixner 		ret = msi_sysfs_populate_desc(dev, desc);
526bf5e758fSThomas Gleixner 		if (ret)
527bf5e758fSThomas Gleixner 			return ret;
528bf5e758fSThomas Gleixner 	}
529bf6e054eSThomas Gleixner 	return 0;
530bf6e054eSThomas Gleixner }
531bf6e054eSThomas Gleixner 
532bf6e054eSThomas Gleixner /**
53324cff375SThomas Gleixner  * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
53424cff375SThomas Gleixner  * @dev:		The device (PCI, platform etc) for which to remove
53524cff375SThomas Gleixner  *			sysfs entries
5362f170814SBarry Song  */
53724cff375SThomas Gleixner void msi_device_destroy_sysfs(struct device *dev)
5382f170814SBarry Song {
539bf5e758fSThomas Gleixner 	struct msi_desc *desc;
5402f170814SBarry Song 
541bf5e758fSThomas Gleixner 	msi_for_each_desc(desc, dev, MSI_DESC_ALL)
542bf5e758fSThomas Gleixner 		msi_sysfs_remove_desc(dev, desc);
5432f170814SBarry Song }
544bf5e758fSThomas Gleixner #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
545bf5e758fSThomas Gleixner #else /* CONFIG_SYSFS */
546bf5e758fSThomas Gleixner static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
547bf5e758fSThomas Gleixner static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
548bf5e758fSThomas Gleixner static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
549bf5e758fSThomas Gleixner #endif /* !CONFIG_SYSFS */
5502f170814SBarry Song 
5514cd5f440SThomas Gleixner static struct irq_domain *msi_get_device_domain(struct device *dev, unsigned int domid)
5524cd5f440SThomas Gleixner {
5534cd5f440SThomas Gleixner 	struct irq_domain *domain;
5544cd5f440SThomas Gleixner 
5554cd5f440SThomas Gleixner 	lockdep_assert_held(&dev->msi.data->mutex);
5564cd5f440SThomas Gleixner 
5574cd5f440SThomas Gleixner 	if (WARN_ON_ONCE(domid >= MSI_MAX_DEVICE_IRQDOMAINS))
5584cd5f440SThomas Gleixner 		return NULL;
5594cd5f440SThomas Gleixner 
5604cd5f440SThomas Gleixner 	domain = dev->msi.data->__domains[domid].domain;
5614cd5f440SThomas Gleixner 	if (!domain)
5624cd5f440SThomas Gleixner 		return NULL;
5634cd5f440SThomas Gleixner 
5644cd5f440SThomas Gleixner 	if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain)))
5654cd5f440SThomas Gleixner 		return NULL;
5664cd5f440SThomas Gleixner 
5674cd5f440SThomas Gleixner 	return domain;
5684cd5f440SThomas Gleixner }
569762687ceSThomas Gleixner 
57074faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data,
57174faaf7aSThomas Gleixner 					  struct msi_msg *msg)
57274faaf7aSThomas Gleixner {
57374faaf7aSThomas Gleixner 	data->chip->irq_write_msi_msg(data, msg);
57474faaf7aSThomas Gleixner }
57574faaf7aSThomas Gleixner 
5760be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
5770be8153cSMarc Zyngier {
5780be8153cSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
5790be8153cSMarc Zyngier 
5800be8153cSMarc Zyngier 	/*
5810be8153cSMarc Zyngier 	 * If the MSI provider has messed with the second message and
5820be8153cSMarc Zyngier 	 * not advertized that it is level-capable, signal the breakage.
5830be8153cSMarc Zyngier 	 */
5840be8153cSMarc Zyngier 	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
5850be8153cSMarc Zyngier 		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
5860be8153cSMarc Zyngier 		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
5870be8153cSMarc Zyngier }
5880be8153cSMarc Zyngier 
589f3cf8bb0SJiang Liu /**
590f3cf8bb0SJiang Liu  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
591f3cf8bb0SJiang Liu  * @irq_data:	The irq data associated to the interrupt
592f3cf8bb0SJiang Liu  * @mask:	The affinity mask to set
593f3cf8bb0SJiang Liu  * @force:	Flag to enforce setting (disable online checks)
594f3cf8bb0SJiang Liu  *
595f3cf8bb0SJiang Liu  * Intended to be used by MSI interrupt controllers which are
596f3cf8bb0SJiang Liu  * implemented with hierarchical domains.
5973b35e7e6SRandy Dunlap  *
5983b35e7e6SRandy Dunlap  * Return: IRQ_SET_MASK_* result code
599f3cf8bb0SJiang Liu  */
600f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data,
601f3cf8bb0SJiang Liu 			    const struct cpumask *mask, bool force)
602f3cf8bb0SJiang Liu {
603f3cf8bb0SJiang Liu 	struct irq_data *parent = irq_data->parent_data;
6040be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
605f3cf8bb0SJiang Liu 	int ret;
606f3cf8bb0SJiang Liu 
607f3cf8bb0SJiang Liu 	ret = parent->chip->irq_set_affinity(parent, mask, force);
608f3cf8bb0SJiang Liu 	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
6090be8153cSMarc Zyngier 		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
6100be8153cSMarc Zyngier 		msi_check_level(irq_data->domain, msg);
6110be8153cSMarc Zyngier 		irq_chip_write_msi_msg(irq_data, msg);
612f3cf8bb0SJiang Liu 	}
613f3cf8bb0SJiang Liu 
614f3cf8bb0SJiang Liu 	return ret;
615f3cf8bb0SJiang Liu }
616f3cf8bb0SJiang Liu 
61772491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain,
61872491643SThomas Gleixner 			       struct irq_data *irq_data, bool early)
619f3cf8bb0SJiang Liu {
6200be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
621f3cf8bb0SJiang Liu 
6220be8153cSMarc Zyngier 	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
6230be8153cSMarc Zyngier 	msi_check_level(irq_data->domain, msg);
6240be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
62572491643SThomas Gleixner 	return 0;
626f3cf8bb0SJiang Liu }
627f3cf8bb0SJiang Liu 
628f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain,
629f3cf8bb0SJiang Liu 				  struct irq_data *irq_data)
630f3cf8bb0SJiang Liu {
6310be8153cSMarc Zyngier 	struct msi_msg msg[2];
632f3cf8bb0SJiang Liu 
6330be8153cSMarc Zyngier 	memset(msg, 0, sizeof(msg));
6340be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
635f3cf8bb0SJiang Liu }
636f3cf8bb0SJiang Liu 
637f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
638f3cf8bb0SJiang Liu 			    unsigned int nr_irqs, void *arg)
639f3cf8bb0SJiang Liu {
640f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
641f3cf8bb0SJiang Liu 	struct msi_domain_ops *ops = info->ops;
642f3cf8bb0SJiang Liu 	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
643f3cf8bb0SJiang Liu 	int i, ret;
644f3cf8bb0SJiang Liu 
645f3cf8bb0SJiang Liu 	if (irq_find_mapping(domain, hwirq) > 0)
646f3cf8bb0SJiang Liu 		return -EEXIST;
647f3cf8bb0SJiang Liu 
648bf6f869fSLiu Jiang 	if (domain->parent) {
649f3cf8bb0SJiang Liu 		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
650f3cf8bb0SJiang Liu 		if (ret < 0)
651f3cf8bb0SJiang Liu 			return ret;
652bf6f869fSLiu Jiang 	}
653f3cf8bb0SJiang Liu 
654f3cf8bb0SJiang Liu 	for (i = 0; i < nr_irqs; i++) {
655f3cf8bb0SJiang Liu 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
656f3cf8bb0SJiang Liu 		if (ret < 0) {
657f3cf8bb0SJiang Liu 			if (ops->msi_free) {
658f3cf8bb0SJiang Liu 				for (i--; i > 0; i--)
659f3cf8bb0SJiang Liu 					ops->msi_free(domain, info, virq + i);
660f3cf8bb0SJiang Liu 			}
661f3cf8bb0SJiang Liu 			irq_domain_free_irqs_top(domain, virq, nr_irqs);
662f3cf8bb0SJiang Liu 			return ret;
663f3cf8bb0SJiang Liu 		}
664f3cf8bb0SJiang Liu 	}
665f3cf8bb0SJiang Liu 
666f3cf8bb0SJiang Liu 	return 0;
667f3cf8bb0SJiang Liu }
668f3cf8bb0SJiang Liu 
669f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
670f3cf8bb0SJiang Liu 			    unsigned int nr_irqs)
671f3cf8bb0SJiang Liu {
672f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
673f3cf8bb0SJiang Liu 	int i;
674f3cf8bb0SJiang Liu 
675f3cf8bb0SJiang Liu 	if (info->ops->msi_free) {
676f3cf8bb0SJiang Liu 		for (i = 0; i < nr_irqs; i++)
677f3cf8bb0SJiang Liu 			info->ops->msi_free(domain, info, virq + i);
678f3cf8bb0SJiang Liu 	}
679f3cf8bb0SJiang Liu 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
680f3cf8bb0SJiang Liu }
681f3cf8bb0SJiang Liu 
68201364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = {
683f3cf8bb0SJiang Liu 	.alloc		= msi_domain_alloc,
684f3cf8bb0SJiang Liu 	.free		= msi_domain_free,
685f3cf8bb0SJiang Liu 	.activate	= msi_domain_activate,
686f3cf8bb0SJiang Liu 	.deactivate	= msi_domain_deactivate,
687f3cf8bb0SJiang Liu };
688f3cf8bb0SJiang Liu 
689aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
690aeeb5965SJiang Liu 						msi_alloc_info_t *arg)
691aeeb5965SJiang Liu {
692aeeb5965SJiang Liu 	return arg->hwirq;
693aeeb5965SJiang Liu }
694aeeb5965SJiang Liu 
695aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
696aeeb5965SJiang Liu 				  int nvec, msi_alloc_info_t *arg)
697aeeb5965SJiang Liu {
698aeeb5965SJiang Liu 	memset(arg, 0, sizeof(*arg));
699aeeb5965SJiang Liu 	return 0;
700aeeb5965SJiang Liu }
701aeeb5965SJiang Liu 
702aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
703aeeb5965SJiang Liu 				    struct msi_desc *desc)
704aeeb5965SJiang Liu {
705aeeb5965SJiang Liu 	arg->desc = desc;
706aeeb5965SJiang Liu }
707aeeb5965SJiang Liu 
708aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain,
709aeeb5965SJiang Liu 			       struct msi_domain_info *info,
710aeeb5965SJiang Liu 			       unsigned int virq, irq_hw_number_t hwirq,
711aeeb5965SJiang Liu 			       msi_alloc_info_t *arg)
712aeeb5965SJiang Liu {
713aeeb5965SJiang Liu 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
714aeeb5965SJiang Liu 				      info->chip_data);
715aeeb5965SJiang Liu 	if (info->handler && info->handler_name) {
716aeeb5965SJiang Liu 		__irq_set_handler(virq, info->handler, 0, info->handler_name);
717aeeb5965SJiang Liu 		if (info->handler_data)
718aeeb5965SJiang Liu 			irq_set_handler_data(virq, info->handler_data);
719aeeb5965SJiang Liu 	}
720aeeb5965SJiang Liu 	return 0;
721aeeb5965SJiang Liu }
722aeeb5965SJiang Liu 
723aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = {
724aeeb5965SJiang Liu 	.get_hwirq		= msi_domain_ops_get_hwirq,
725aeeb5965SJiang Liu 	.msi_init		= msi_domain_ops_init,
726aeeb5965SJiang Liu 	.msi_prepare		= msi_domain_ops_prepare,
727aeeb5965SJiang Liu 	.set_desc		= msi_domain_ops_set_desc,
728aeeb5965SJiang Liu };
729aeeb5965SJiang Liu 
730aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info)
731aeeb5965SJiang Liu {
732aeeb5965SJiang Liu 	struct msi_domain_ops *ops = info->ops;
733aeeb5965SJiang Liu 
734aeeb5965SJiang Liu 	if (ops == NULL) {
735aeeb5965SJiang Liu 		info->ops = &msi_domain_ops_default;
736aeeb5965SJiang Liu 		return;
737aeeb5965SJiang Liu 	}
738aeeb5965SJiang Liu 
73943e9e705SThomas Gleixner 	if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
74043e9e705SThomas Gleixner 		return;
74143e9e705SThomas Gleixner 
742aeeb5965SJiang Liu 	if (ops->get_hwirq == NULL)
743aeeb5965SJiang Liu 		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
744aeeb5965SJiang Liu 	if (ops->msi_init == NULL)
745aeeb5965SJiang Liu 		ops->msi_init = msi_domain_ops_default.msi_init;
746aeeb5965SJiang Liu 	if (ops->msi_prepare == NULL)
747aeeb5965SJiang Liu 		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
748aeeb5965SJiang Liu 	if (ops->set_desc == NULL)
749aeeb5965SJiang Liu 		ops->set_desc = msi_domain_ops_default.set_desc;
750aeeb5965SJiang Liu }
751aeeb5965SJiang Liu 
752aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info)
753aeeb5965SJiang Liu {
754aeeb5965SJiang Liu 	struct irq_chip *chip = info->chip;
755aeeb5965SJiang Liu 
7560701c53eSMarc Zyngier 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
757aeeb5965SJiang Liu 	if (!chip->irq_set_affinity)
758aeeb5965SJiang Liu 		chip->irq_set_affinity = msi_domain_set_affinity;
759aeeb5965SJiang Liu }
760aeeb5965SJiang Liu 
761*a80c0aceSThomas Gleixner static struct irq_domain *__msi_create_irq_domain(struct fwnode_handle *fwnode,
762f3cf8bb0SJiang Liu 						  struct msi_domain_info *info,
763*a80c0aceSThomas Gleixner 						  unsigned int flags,
764f3cf8bb0SJiang Liu 						  struct irq_domain *parent)
765f3cf8bb0SJiang Liu {
766a97b852bSMarc Zyngier 	struct irq_domain *domain;
767a97b852bSMarc Zyngier 
76861bf992fSThomas Gleixner 	if (info->hwsize > MSI_XA_DOMAIN_SIZE)
76961bf992fSThomas Gleixner 		return NULL;
77061bf992fSThomas Gleixner 
77161bf992fSThomas Gleixner 	/*
77261bf992fSThomas Gleixner 	 * Hardware size 0 is valid for backwards compatibility and for
77361bf992fSThomas Gleixner 	 * domains which are not backed by a hardware table. Grant the
77461bf992fSThomas Gleixner 	 * maximum index space.
77561bf992fSThomas Gleixner 	 */
77661bf992fSThomas Gleixner 	if (!info->hwsize)
77761bf992fSThomas Gleixner 		info->hwsize = MSI_XA_DOMAIN_SIZE;
77861bf992fSThomas Gleixner 
779aeeb5965SJiang Liu 	msi_domain_update_dom_ops(info);
780aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
781aeeb5965SJiang Liu 		msi_domain_update_chip_ops(info);
782f3cf8bb0SJiang Liu 
783*a80c0aceSThomas Gleixner 	domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0,
78488156f00SEric Auger 					     fwnode, &msi_domain_ops, info);
7850165308aSThomas Gleixner 
78622db089aSAhmed S. Darwish 	if (domain) {
78722db089aSAhmed S. Darwish 		if (!domain->name && info->chip)
788a97b852bSMarc Zyngier 			domain->name = info->chip->name;
78922db089aSAhmed S. Darwish 		irq_domain_update_bus_token(domain, info->bus_token);
79022db089aSAhmed S. Darwish 	}
791a97b852bSMarc Zyngier 
792a97b852bSMarc Zyngier 	return domain;
793f3cf8bb0SJiang Liu }
794f3cf8bb0SJiang Liu 
795b78780d9SThomas Gleixner /**
796*a80c0aceSThomas Gleixner  * msi_create_irq_domain - Create an MSI interrupt domain
797*a80c0aceSThomas Gleixner  * @fwnode:	Optional fwnode of the interrupt controller
798*a80c0aceSThomas Gleixner  * @info:	MSI domain info
799*a80c0aceSThomas Gleixner  * @parent:	Parent irq domain
800*a80c0aceSThomas Gleixner  *
801*a80c0aceSThomas Gleixner  * Return: pointer to the created &struct irq_domain or %NULL on failure
802*a80c0aceSThomas Gleixner  */
803*a80c0aceSThomas Gleixner struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
804*a80c0aceSThomas Gleixner 					 struct msi_domain_info *info,
805*a80c0aceSThomas Gleixner 					 struct irq_domain *parent)
806*a80c0aceSThomas Gleixner {
807*a80c0aceSThomas Gleixner 	return __msi_create_irq_domain(fwnode, info, 0, parent);
808*a80c0aceSThomas Gleixner }
809*a80c0aceSThomas Gleixner 
810*a80c0aceSThomas Gleixner /**
811b78780d9SThomas Gleixner  * msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down
812b78780d9SThomas Gleixner  *				  in the domain hierarchy
813b78780d9SThomas Gleixner  * @dev:		The device for which the domain should be created
814b78780d9SThomas Gleixner  * @domain:		The domain in the hierarchy this op is being called on
815b78780d9SThomas Gleixner  * @msi_parent_domain:	The IRQ_DOMAIN_FLAG_MSI_PARENT domain for the child to
816b78780d9SThomas Gleixner  *			be created
817b78780d9SThomas Gleixner  * @msi_child_info:	The MSI domain info of the IRQ_DOMAIN_FLAG_MSI_DEVICE
818b78780d9SThomas Gleixner  *			domain to be created
819b78780d9SThomas Gleixner  *
820b78780d9SThomas Gleixner  * Return: true on success, false otherwise
821b78780d9SThomas Gleixner  *
822b78780d9SThomas Gleixner  * This is the most complex problem of per device MSI domains and the
823b78780d9SThomas Gleixner  * underlying interrupt domain hierarchy:
824b78780d9SThomas Gleixner  *
825b78780d9SThomas Gleixner  * The device domain to be initialized requests the broadest feature set
826b78780d9SThomas Gleixner  * possible and the underlying domain hierarchy puts restrictions on it.
827b78780d9SThomas Gleixner  *
828b78780d9SThomas Gleixner  * That's trivial for a simple parent->child relationship, but it gets
829b78780d9SThomas Gleixner  * interesting with an intermediate domain: root->parent->child.  The
830b78780d9SThomas Gleixner  * intermediate 'parent' can expand the capabilities which the 'root'
831b78780d9SThomas Gleixner  * domain is providing. So that creates a classic hen and egg problem:
832b78780d9SThomas Gleixner  * Which entity is doing the restrictions/expansions?
833b78780d9SThomas Gleixner  *
834b78780d9SThomas Gleixner  * One solution is to let the root domain handle the initialization that's
835b78780d9SThomas Gleixner  * why there is the @domain and the @msi_parent_domain pointer.
836b78780d9SThomas Gleixner  */
837b78780d9SThomas Gleixner bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
838b78780d9SThomas Gleixner 				  struct irq_domain *msi_parent_domain,
839b78780d9SThomas Gleixner 				  struct msi_domain_info *msi_child_info)
840b78780d9SThomas Gleixner {
841b78780d9SThomas Gleixner 	struct irq_domain *parent = domain->parent;
842b78780d9SThomas Gleixner 
843b78780d9SThomas Gleixner 	if (WARN_ON_ONCE(!parent || !parent->msi_parent_ops ||
844b78780d9SThomas Gleixner 			 !parent->msi_parent_ops->init_dev_msi_info))
845b78780d9SThomas Gleixner 		return false;
846b78780d9SThomas Gleixner 
847b78780d9SThomas Gleixner 	return parent->msi_parent_ops->init_dev_msi_info(dev, parent, msi_parent_domain,
848b78780d9SThomas Gleixner 							 msi_child_info);
849b78780d9SThomas Gleixner }
850b78780d9SThomas Gleixner 
851b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
852b2eba39bSMarc Zyngier 			    int nvec, msi_alloc_info_t *arg)
853b2eba39bSMarc Zyngier {
854b2eba39bSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
855b2eba39bSMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
856b2eba39bSMarc Zyngier 
8572569f62cSThomas Gleixner 	return ops->msi_prepare(domain, dev, nvec, arg);
858b2eba39bSMarc Zyngier }
859b2eba39bSMarc Zyngier 
8602145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
861a80713feSThomas Gleixner 			     int virq_base, int nvec, msi_alloc_info_t *arg)
8622145ac93SMarc Zyngier {
8632145ac93SMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
8642145ac93SMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
86540742716SThomas Gleixner 	struct msi_ctrl ctrl = {
86640742716SThomas Gleixner 		.domid	= MSI_DEFAULT_DOMAIN,
86740742716SThomas Gleixner 		.first  = virq_base,
86840742716SThomas Gleixner 		.last	= virq_base + nvec - 1,
86940742716SThomas Gleixner 	};
8702145ac93SMarc Zyngier 	struct msi_desc *desc;
871f1139f90SThomas Gleixner 	struct xarray *xa;
872a80713feSThomas Gleixner 	int ret, virq;
8732145ac93SMarc Zyngier 
87440742716SThomas Gleixner 	if (!msi_ctrl_valid(dev, &ctrl))
87540742716SThomas Gleixner 		return -EINVAL;
87640742716SThomas Gleixner 
877a80713feSThomas Gleixner 	msi_lock_descs(dev);
87840742716SThomas Gleixner 	ret = msi_domain_add_simple_msi_descs(dev, &ctrl);
879cd6cf065SThomas Gleixner 	if (ret)
880cd6cf065SThomas Gleixner 		goto unlock;
8812145ac93SMarc Zyngier 
88240742716SThomas Gleixner 	xa = &dev->msi.data->__domains[ctrl.domid].store;
883f1139f90SThomas Gleixner 
884cd6cf065SThomas Gleixner 	for (virq = virq_base; virq < virq_base + nvec; virq++) {
885f1139f90SThomas Gleixner 		desc = xa_load(xa, virq);
886a80713feSThomas Gleixner 		desc->irq = virq;
8872145ac93SMarc Zyngier 
8882145ac93SMarc Zyngier 		ops->set_desc(arg, desc);
889a80713feSThomas Gleixner 		ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
8902145ac93SMarc Zyngier 		if (ret)
891a80713feSThomas Gleixner 			goto fail;
8922145ac93SMarc Zyngier 
893a80713feSThomas Gleixner 		irq_set_msi_desc(virq, desc);
8942145ac93SMarc Zyngier 	}
895a80713feSThomas Gleixner 	msi_unlock_descs(dev);
896a80713feSThomas Gleixner 	return 0;
8972145ac93SMarc Zyngier 
898a80713feSThomas Gleixner fail:
899a80713feSThomas Gleixner 	for (--virq; virq >= virq_base; virq--)
900a80713feSThomas Gleixner 		irq_domain_free_irqs_common(domain, virq, 1);
90140742716SThomas Gleixner 	msi_domain_free_descs(dev, &ctrl);
902cd6cf065SThomas Gleixner unlock:
903a80713feSThomas Gleixner 	msi_unlock_descs(dev);
9042145ac93SMarc Zyngier 	return ret;
9052145ac93SMarc Zyngier }
9062145ac93SMarc Zyngier 
907bc976233SThomas Gleixner /*
908bc976233SThomas Gleixner  * Carefully check whether the device can use reservation mode. If
909bc976233SThomas Gleixner  * reservation mode is enabled then the early activation will assign a
910bc976233SThomas Gleixner  * dummy vector to the device. If the PCI/MSI device does not support
911bc976233SThomas Gleixner  * masking of the entry then this can result in spurious interrupts when
912bc976233SThomas Gleixner  * the device driver is not absolutely careful. But even then a malfunction
913bc976233SThomas Gleixner  * of the hardware could result in a spurious interrupt on the dummy vector
914bc976233SThomas Gleixner  * and render the device unusable. If the entry can be masked then the core
915bc976233SThomas Gleixner  * logic will prevent the spurious interrupt and reservation mode can be
916bc976233SThomas Gleixner  * used. For now reservation mode is restricted to PCI/MSI.
917bc976233SThomas Gleixner  */
918bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain,
919bc976233SThomas Gleixner 				       struct msi_domain_info *info,
920bc976233SThomas Gleixner 				       struct device *dev)
921da5dd9e8SThomas Gleixner {
922bc976233SThomas Gleixner 	struct msi_desc *desc;
923bc976233SThomas Gleixner 
924c6c9e283SThomas Gleixner 	switch(domain->bus_token) {
925c6c9e283SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
926c6c9e283SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
927c6c9e283SThomas Gleixner 		break;
928c6c9e283SThomas Gleixner 	default:
929bc976233SThomas Gleixner 		return false;
930c6c9e283SThomas Gleixner 	}
931bc976233SThomas Gleixner 
932da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
933da5dd9e8SThomas Gleixner 		return false;
934bc976233SThomas Gleixner 
935bc976233SThomas Gleixner 	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
936bc976233SThomas Gleixner 		return false;
937bc976233SThomas Gleixner 
938bc976233SThomas Gleixner 	/*
939bc976233SThomas Gleixner 	 * Checking the first MSI descriptor is sufficient. MSIX supports
9409c8e9c96SThomas Gleixner 	 * masking and MSI does so when the can_mask attribute is set.
941bc976233SThomas Gleixner 	 */
942495c66acSThomas Gleixner 	desc = msi_first_desc(dev, MSI_DESC_ALL);
943e58f2259SThomas Gleixner 	return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
944da5dd9e8SThomas Gleixner }
945da5dd9e8SThomas Gleixner 
94689033762SThomas Gleixner static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
94789033762SThomas Gleixner 			       int allocated)
94889033762SThomas Gleixner {
94989033762SThomas Gleixner 	switch(domain->bus_token) {
95089033762SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
95189033762SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
95289033762SThomas Gleixner 		if (IS_ENABLED(CONFIG_PCI_MSI))
95389033762SThomas Gleixner 			break;
95489033762SThomas Gleixner 		fallthrough;
95589033762SThomas Gleixner 	default:
95689033762SThomas Gleixner 		return -ENOSPC;
95789033762SThomas Gleixner 	}
95889033762SThomas Gleixner 
95989033762SThomas Gleixner 	/* Let a failed PCI multi MSI allocation retry */
96089033762SThomas Gleixner 	if (desc->nvec_used > 1)
96189033762SThomas Gleixner 		return 1;
96289033762SThomas Gleixner 
96389033762SThomas Gleixner 	/* If there was a successful allocation let the caller know */
96489033762SThomas Gleixner 	return allocated ? allocated : -ENOSPC;
96589033762SThomas Gleixner }
96689033762SThomas Gleixner 
967ef8dd015SThomas Gleixner #define VIRQ_CAN_RESERVE	0x01
968ef8dd015SThomas Gleixner #define VIRQ_ACTIVATE		0x02
969ef8dd015SThomas Gleixner #define VIRQ_NOMASK_QUIRK	0x04
970ef8dd015SThomas Gleixner 
971ef8dd015SThomas Gleixner static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
972ef8dd015SThomas Gleixner {
973ef8dd015SThomas Gleixner 	struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
974ef8dd015SThomas Gleixner 	int ret;
975ef8dd015SThomas Gleixner 
976ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_CAN_RESERVE)) {
977ef8dd015SThomas Gleixner 		irqd_clr_can_reserve(irqd);
978ef8dd015SThomas Gleixner 		if (vflags & VIRQ_NOMASK_QUIRK)
979ef8dd015SThomas Gleixner 			irqd_set_msi_nomask_quirk(irqd);
980d802057cSMarc Zyngier 
981d802057cSMarc Zyngier 		/*
982d802057cSMarc Zyngier 		 * If the interrupt is managed but no CPU is available to
983d802057cSMarc Zyngier 		 * service it, shut it down until better times. Note that
984d802057cSMarc Zyngier 		 * we only do this on the !RESERVE path as x86 (the only
985d802057cSMarc Zyngier 		 * architecture using this flag) deals with this in a
986d802057cSMarc Zyngier 		 * different way by using a catch-all vector.
987d802057cSMarc Zyngier 		 */
988d802057cSMarc Zyngier 		if ((vflags & VIRQ_ACTIVATE) &&
989d802057cSMarc Zyngier 		    irqd_affinity_is_managed(irqd) &&
990d802057cSMarc Zyngier 		    !cpumask_intersects(irq_data_get_affinity_mask(irqd),
991d802057cSMarc Zyngier 					cpu_online_mask)) {
992d802057cSMarc Zyngier 			    irqd_set_managed_shutdown(irqd);
993d802057cSMarc Zyngier 			    return 0;
994d802057cSMarc Zyngier 		    }
995ef8dd015SThomas Gleixner 	}
996ef8dd015SThomas Gleixner 
997ef8dd015SThomas Gleixner 	if (!(vflags & VIRQ_ACTIVATE))
998ef8dd015SThomas Gleixner 		return 0;
999ef8dd015SThomas Gleixner 
1000ef8dd015SThomas Gleixner 	ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
1001ef8dd015SThomas Gleixner 	if (ret)
1002ef8dd015SThomas Gleixner 		return ret;
1003ef8dd015SThomas Gleixner 	/*
1004ef8dd015SThomas Gleixner 	 * If the interrupt uses reservation mode, clear the activated bit
1005ef8dd015SThomas Gleixner 	 * so request_irq() will assign the final vector.
1006ef8dd015SThomas Gleixner 	 */
1007ef8dd015SThomas Gleixner 	if (vflags & VIRQ_CAN_RESERVE)
1008ef8dd015SThomas Gleixner 		irqd_clr_activated(irqd);
1009ef8dd015SThomas Gleixner 	return 0;
1010ef8dd015SThomas Gleixner }
1011ef8dd015SThomas Gleixner 
1012f2480e7dSThomas Gleixner static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain,
1013f2480e7dSThomas Gleixner 				   struct msi_ctrl *ctrl)
1014d9109698SJiang Liu {
1015f2480e7dSThomas Gleixner 	struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store;
1016d9109698SJiang Liu 	struct msi_domain_info *info = domain->host_data;
1017d9109698SJiang Liu 	struct msi_domain_ops *ops = info->ops;
1018f2480e7dSThomas Gleixner 	unsigned int vflags = 0, allocated = 0;
101906fde695SZenghui Yu 	msi_alloc_info_t arg = { };
1020ef8dd015SThomas Gleixner 	struct msi_desc *desc;
1021f2480e7dSThomas Gleixner 	unsigned long idx;
1022b6140914SThomas Gleixner 	int i, ret, virq;
1023d9109698SJiang Liu 
1024f2480e7dSThomas Gleixner 	ret = msi_domain_prepare_irqs(domain, dev, ctrl->nirqs, &arg);
1025d9109698SJiang Liu 	if (ret)
1026d9109698SJiang Liu 		return ret;
1027d9109698SJiang Liu 
1028ef8dd015SThomas Gleixner 	/*
1029ef8dd015SThomas Gleixner 	 * This flag is set by the PCI layer as we need to activate
1030ef8dd015SThomas Gleixner 	 * the MSI entries before the PCI layer enables MSI in the
1031ef8dd015SThomas Gleixner 	 * card. Otherwise the card latches a random msi message.
1032ef8dd015SThomas Gleixner 	 */
1033ef8dd015SThomas Gleixner 	if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
1034ef8dd015SThomas Gleixner 		vflags |= VIRQ_ACTIVATE;
1035ef8dd015SThomas Gleixner 
1036ef8dd015SThomas Gleixner 	/*
1037ef8dd015SThomas Gleixner 	 * Interrupt can use a reserved vector and will not occupy
1038ef8dd015SThomas Gleixner 	 * a real device vector until the interrupt is requested.
1039ef8dd015SThomas Gleixner 	 */
1040ef8dd015SThomas Gleixner 	if (msi_check_reservation_mode(domain, info, dev)) {
1041ef8dd015SThomas Gleixner 		vflags |= VIRQ_CAN_RESERVE;
1042ef8dd015SThomas Gleixner 		/*
1043ef8dd015SThomas Gleixner 		 * MSI affinity setting requires a special quirk (X86) when
1044ef8dd015SThomas Gleixner 		 * reservation mode is active.
1045ef8dd015SThomas Gleixner 		 */
10463dad5f9aSThomas Gleixner 		if (info->flags & MSI_FLAG_NOMASK_QUIRK)
1047ef8dd015SThomas Gleixner 			vflags |= VIRQ_NOMASK_QUIRK;
1048ef8dd015SThomas Gleixner 	}
1049ef8dd015SThomas Gleixner 
1050f2480e7dSThomas Gleixner 	xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
1051f2480e7dSThomas Gleixner 		if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
1052f2480e7dSThomas Gleixner 			continue;
1053f2480e7dSThomas Gleixner 
1054f2480e7dSThomas Gleixner 		/* This should return -ECONFUSED... */
1055f2480e7dSThomas Gleixner 		if (WARN_ON_ONCE(allocated >= ctrl->nirqs))
1056f2480e7dSThomas Gleixner 			return -EINVAL;
1057f2480e7dSThomas Gleixner 
1058d9109698SJiang Liu 		ops->set_desc(&arg, desc);
1059d9109698SJiang Liu 
1060b6140914SThomas Gleixner 		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
106106ee6d57SThomas Gleixner 					       dev_to_node(dev), &arg, false,
10620972fa57SThomas Gleixner 					       desc->affinity);
10630f62d941SThomas Gleixner 		if (virq < 0)
10640f62d941SThomas Gleixner 			return msi_handle_pci_fail(domain, desc, allocated);
1065d9109698SJiang Liu 
106607557ccbSThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
1067d9109698SJiang Liu 			irq_set_msi_desc_off(virq, i, desc);
106807557ccbSThomas Gleixner 			irq_debugfs_copy_devname(virq + i, dev);
1069ef8dd015SThomas Gleixner 			ret = msi_init_virq(domain, virq + i, vflags);
1070bb9b428aSThomas Gleixner 			if (ret)
10710f62d941SThomas Gleixner 				return ret;
107274a5257aSThomas Gleixner 		}
1073bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS) {
1074bf5e758fSThomas Gleixner 			ret = msi_sysfs_populate_desc(dev, desc);
1075bf5e758fSThomas Gleixner 			if (ret)
1076bf5e758fSThomas Gleixner 				return ret;
1077bf5e758fSThomas Gleixner 		}
1078ef8dd015SThomas Gleixner 		allocated++;
1079d9109698SJiang Liu 	}
1080d9109698SJiang Liu 	return 0;
10810f62d941SThomas Gleixner }
10820f62d941SThomas Gleixner 
108340742716SThomas Gleixner static int msi_domain_alloc_simple_msi_descs(struct device *dev,
108440742716SThomas Gleixner 					     struct msi_domain_info *info,
1085f2480e7dSThomas Gleixner 					     struct msi_ctrl *ctrl)
1086645474e2SThomas Gleixner {
1087645474e2SThomas Gleixner 	if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
1088645474e2SThomas Gleixner 		return 0;
1089645474e2SThomas Gleixner 
1090f2480e7dSThomas Gleixner 	return msi_domain_add_simple_msi_descs(dev, ctrl);
1091f2480e7dSThomas Gleixner }
1092f2480e7dSThomas Gleixner 
1093f2480e7dSThomas Gleixner static int __msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
1094f2480e7dSThomas Gleixner {
1095f2480e7dSThomas Gleixner 	struct msi_domain_info *info;
1096f2480e7dSThomas Gleixner 	struct msi_domain_ops *ops;
1097f2480e7dSThomas Gleixner 	struct irq_domain *domain;
1098f2480e7dSThomas Gleixner 	int ret;
1099f2480e7dSThomas Gleixner 
1100f2480e7dSThomas Gleixner 	if (!msi_ctrl_valid(dev, ctrl))
1101f2480e7dSThomas Gleixner 		return -EINVAL;
1102f2480e7dSThomas Gleixner 
1103f2480e7dSThomas Gleixner 	domain = msi_get_device_domain(dev, ctrl->domid);
1104f2480e7dSThomas Gleixner 	if (!domain)
1105f2480e7dSThomas Gleixner 		return -ENODEV;
1106f2480e7dSThomas Gleixner 
1107f2480e7dSThomas Gleixner 	info = domain->host_data;
1108f2480e7dSThomas Gleixner 
1109f2480e7dSThomas Gleixner 	ret = msi_domain_alloc_simple_msi_descs(dev, info, ctrl);
1110f2480e7dSThomas Gleixner 	if (ret)
1111f2480e7dSThomas Gleixner 		return ret;
1112f2480e7dSThomas Gleixner 
1113f2480e7dSThomas Gleixner 	ops = info->ops;
1114f2480e7dSThomas Gleixner 	if (ops->domain_alloc_irqs)
1115f2480e7dSThomas Gleixner 		return ops->domain_alloc_irqs(domain, dev, ctrl->nirqs);
1116f2480e7dSThomas Gleixner 
1117f2480e7dSThomas Gleixner 	return __msi_domain_alloc_irqs(dev, domain, ctrl);
1118f2480e7dSThomas Gleixner }
1119f2480e7dSThomas Gleixner 
1120f2480e7dSThomas Gleixner static int msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
1121f2480e7dSThomas Gleixner {
1122f2480e7dSThomas Gleixner 	int ret = __msi_domain_alloc_locked(dev, ctrl);
1123f2480e7dSThomas Gleixner 
1124f2480e7dSThomas Gleixner 	if (ret)
1125f2480e7dSThomas Gleixner 		msi_domain_free_locked(dev, ctrl);
1126f2480e7dSThomas Gleixner 	return ret;
1127f2480e7dSThomas Gleixner }
1128f2480e7dSThomas Gleixner 
1129f2480e7dSThomas Gleixner /**
1130f2480e7dSThomas Gleixner  * msi_domain_alloc_irqs_range_locked - Allocate interrupts from a MSI interrupt domain
1131f2480e7dSThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
1132f2480e7dSThomas Gleixner  *		are allocated
1133f2480e7dSThomas Gleixner  * @domid:	Id of the interrupt domain to operate on
1134f2480e7dSThomas Gleixner  * @first:	First index to allocate (inclusive)
1135f2480e7dSThomas Gleixner  * @last:	Last index to allocate (inclusive)
1136f2480e7dSThomas Gleixner  *
1137f2480e7dSThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
1138f2480e7dSThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own descriptor
1139f2480e7dSThomas Gleixner  * allocation/free.
1140f2480e7dSThomas Gleixner  *
1141f2480e7dSThomas Gleixner  * Return: %0 on success or an error code.
1142f2480e7dSThomas Gleixner  */
1143f2480e7dSThomas Gleixner int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
1144f2480e7dSThomas Gleixner 				       unsigned int first, unsigned int last)
1145f2480e7dSThomas Gleixner {
1146f2480e7dSThomas Gleixner 	struct msi_ctrl ctrl = {
1147f2480e7dSThomas Gleixner 		.domid	= domid,
1148f2480e7dSThomas Gleixner 		.first	= first,
1149f2480e7dSThomas Gleixner 		.last	= last,
1150f2480e7dSThomas Gleixner 		.nirqs	= last + 1 - first,
1151f2480e7dSThomas Gleixner 	};
1152f2480e7dSThomas Gleixner 
1153f2480e7dSThomas Gleixner 	return msi_domain_alloc_locked(dev, &ctrl);
1154f2480e7dSThomas Gleixner }
1155f2480e7dSThomas Gleixner 
1156f2480e7dSThomas Gleixner /**
1157f2480e7dSThomas Gleixner  * msi_domain_alloc_irqs_range - Allocate interrupts from a MSI interrupt domain
1158f2480e7dSThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
1159f2480e7dSThomas Gleixner  *		are allocated
1160f2480e7dSThomas Gleixner  * @domid:	Id of the interrupt domain to operate on
1161f2480e7dSThomas Gleixner  * @first:	First index to allocate (inclusive)
1162f2480e7dSThomas Gleixner  * @last:	Last index to allocate (inclusive)
1163f2480e7dSThomas Gleixner  *
1164f2480e7dSThomas Gleixner  * Return: %0 on success or an error code.
1165f2480e7dSThomas Gleixner  */
1166f2480e7dSThomas Gleixner int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
1167f2480e7dSThomas Gleixner 				unsigned int first, unsigned int last)
1168f2480e7dSThomas Gleixner {
1169f2480e7dSThomas Gleixner 	int ret;
1170f2480e7dSThomas Gleixner 
1171f2480e7dSThomas Gleixner 	msi_lock_descs(dev);
1172f2480e7dSThomas Gleixner 	ret = msi_domain_alloc_irqs_range_locked(dev, domid, first, last);
1173f2480e7dSThomas Gleixner 	msi_unlock_descs(dev);
1174f2480e7dSThomas Gleixner 	return ret;
1175f2480e7dSThomas Gleixner }
1176f2480e7dSThomas Gleixner 
1177f2480e7dSThomas Gleixner /**
1178f2480e7dSThomas Gleixner  * msi_domain_alloc_irqs_all_locked - Allocate all interrupts from a MSI interrupt domain
1179f2480e7dSThomas Gleixner  *
1180f2480e7dSThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
1181f2480e7dSThomas Gleixner  *		are allocated
1182f2480e7dSThomas Gleixner  * @domid:	Id of the interrupt domain to operate on
1183f2480e7dSThomas Gleixner  * @nirqs:	The number of interrupts to allocate
1184f2480e7dSThomas Gleixner  *
1185f2480e7dSThomas Gleixner  * This function scans all MSI descriptors of the MSI domain and allocates interrupts
1186f2480e7dSThomas Gleixner  * for all unassigned ones. That function is to be used for MSI domain usage where
1187f2480e7dSThomas Gleixner  * the descriptor allocation is handled at the call site, e.g. PCI/MSI[X].
1188f2480e7dSThomas Gleixner  *
1189f2480e7dSThomas Gleixner  * Return: %0 on success or an error code.
1190f2480e7dSThomas Gleixner  */
1191f2480e7dSThomas Gleixner int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs)
1192f2480e7dSThomas Gleixner {
1193f2480e7dSThomas Gleixner 	struct msi_ctrl ctrl = {
1194f2480e7dSThomas Gleixner 		.domid	= domid,
1195f2480e7dSThomas Gleixner 		.first	= 0,
1196f2480e7dSThomas Gleixner 		.last	= MSI_MAX_INDEX,
1197f2480e7dSThomas Gleixner 		.nirqs	= nirqs,
1198f2480e7dSThomas Gleixner 	};
1199f2480e7dSThomas Gleixner 
1200f2480e7dSThomas Gleixner 	return msi_domain_alloc_locked(dev, &ctrl);
1201645474e2SThomas Gleixner }
1202645474e2SThomas Gleixner 
12034cd5f440SThomas Gleixner static void __msi_domain_free_irqs(struct device *dev, struct irq_domain *domain,
12044cd5f440SThomas Gleixner 				   struct msi_ctrl *ctrl)
1205d9109698SJiang Liu {
12064cd5f440SThomas Gleixner 	struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store;
1207bf5e758fSThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
1208ef8dd015SThomas Gleixner 	struct irq_data *irqd;
1209d9109698SJiang Liu 	struct msi_desc *desc;
12104cd5f440SThomas Gleixner 	unsigned long idx;
1211dbbc9357SBixuan Cui 	int i;
1212dbbc9357SBixuan Cui 
12134cd5f440SThomas Gleixner 	xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
1214ef8dd015SThomas Gleixner 		/* Only handle MSI entries which have an interrupt associated */
12154cd5f440SThomas Gleixner 		if (!msi_desc_match(desc, MSI_DESC_ASSOCIATED))
12164cd5f440SThomas Gleixner 			continue;
12174cd5f440SThomas Gleixner 
1218ef8dd015SThomas Gleixner 		/* Make sure all interrupts are deactivated */
1219ef8dd015SThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
1220ef8dd015SThomas Gleixner 			irqd = irq_domain_get_irq_data(domain, desc->irq + i);
1221ef8dd015SThomas Gleixner 			if (irqd && irqd_is_activated(irqd))
1222ef8dd015SThomas Gleixner 				irq_domain_deactivate_irq(irqd);
1223dbbc9357SBixuan Cui 		}
1224d9109698SJiang Liu 
1225d9109698SJiang Liu 		irq_domain_free_irqs(desc->irq, desc->nvec_used);
1226bf5e758fSThomas Gleixner 		if (info->flags & MSI_FLAG_DEV_SYSFS)
1227bf5e758fSThomas Gleixner 			msi_sysfs_remove_desc(dev, desc);
1228d9109698SJiang Liu 		desc->irq = 0;
1229d9109698SJiang Liu 	}
1230d9109698SJiang Liu }
1231d9109698SJiang Liu 
12324cd5f440SThomas Gleixner static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl)
1233645474e2SThomas Gleixner {
12344cd5f440SThomas Gleixner 	struct msi_domain_info *info;
12354cd5f440SThomas Gleixner 	struct msi_domain_ops *ops;
12364cd5f440SThomas Gleixner 	struct irq_domain *domain;
12374cd5f440SThomas Gleixner 
12384cd5f440SThomas Gleixner 	if (!msi_ctrl_valid(dev, ctrl))
12394cd5f440SThomas Gleixner 		return;
12404cd5f440SThomas Gleixner 
12414cd5f440SThomas Gleixner 	domain = msi_get_device_domain(dev, ctrl->domid);
12424cd5f440SThomas Gleixner 	if (!domain)
12434cd5f440SThomas Gleixner 		return;
12444cd5f440SThomas Gleixner 
12454cd5f440SThomas Gleixner 	info = domain->host_data;
12464cd5f440SThomas Gleixner 	ops = info->ops;
12474cd5f440SThomas Gleixner 
12484cd5f440SThomas Gleixner 	if (ops->domain_free_irqs)
12494cd5f440SThomas Gleixner 		ops->domain_free_irqs(domain, dev);
12504cd5f440SThomas Gleixner 	else
12514cd5f440SThomas Gleixner 		__msi_domain_free_irqs(dev, domain, ctrl);
12524cd5f440SThomas Gleixner 
12534cd5f440SThomas Gleixner 	if (ops->msi_post_free)
12544cd5f440SThomas Gleixner 		ops->msi_post_free(domain, dev);
12554cd5f440SThomas Gleixner 
1256645474e2SThomas Gleixner 	if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
12574cd5f440SThomas Gleixner 		msi_domain_free_descs(dev, ctrl);
12584cd5f440SThomas Gleixner }
12594cd5f440SThomas Gleixner 
12604cd5f440SThomas Gleixner /**
12614cd5f440SThomas Gleixner  * msi_domain_free_irqs_range_locked - Free a range of interrupts from a MSI interrupt domain
12624cd5f440SThomas Gleixner  *				       associated to @dev with msi_lock held
12634cd5f440SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
12644cd5f440SThomas Gleixner  *		are freed
12654cd5f440SThomas Gleixner  * @domid:	Id of the interrupt domain to operate on
12664cd5f440SThomas Gleixner  * @first:	First index to free (inclusive)
12674cd5f440SThomas Gleixner  * @last:	Last index to free (inclusive)
12684cd5f440SThomas Gleixner  */
12694cd5f440SThomas Gleixner void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
12704cd5f440SThomas Gleixner 				       unsigned int first, unsigned int last)
12714cd5f440SThomas Gleixner {
12724cd5f440SThomas Gleixner 	struct msi_ctrl ctrl = {
12734cd5f440SThomas Gleixner 		.domid	= domid,
12744cd5f440SThomas Gleixner 		.first	= first,
12754cd5f440SThomas Gleixner 		.last	= last,
12764cd5f440SThomas Gleixner 	};
12774cd5f440SThomas Gleixner 	msi_domain_free_locked(dev, &ctrl);
12784cd5f440SThomas Gleixner }
12794cd5f440SThomas Gleixner 
12804cd5f440SThomas Gleixner /**
12814cd5f440SThomas Gleixner  * msi_domain_free_irqs_range - Free a range of interrupts from a MSI interrupt domain
12824cd5f440SThomas Gleixner  *				associated to @dev
12834cd5f440SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
12844cd5f440SThomas Gleixner  *		are freed
12854cd5f440SThomas Gleixner  * @domid:	Id of the interrupt domain to operate on
12864cd5f440SThomas Gleixner  * @first:	First index to free (inclusive)
12874cd5f440SThomas Gleixner  * @last:	Last index to free (inclusive)
12884cd5f440SThomas Gleixner  */
12894cd5f440SThomas Gleixner void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
12904cd5f440SThomas Gleixner 				unsigned int first, unsigned int last)
12914cd5f440SThomas Gleixner {
12924cd5f440SThomas Gleixner 	msi_lock_descs(dev);
12934cd5f440SThomas Gleixner 	msi_domain_free_irqs_range_locked(dev, domid, first, last);
12944cd5f440SThomas Gleixner 	msi_unlock_descs(dev);
12954cd5f440SThomas Gleixner }
12964cd5f440SThomas Gleixner 
12974cd5f440SThomas Gleixner /**
12984cd5f440SThomas Gleixner  * msi_domain_free_irqs_all_locked - Free all interrupts from a MSI interrupt domain
12994cd5f440SThomas Gleixner  *				     associated to a device
13004cd5f440SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
13014cd5f440SThomas Gleixner  *		are freed
13024cd5f440SThomas Gleixner  * @domid:	The id of the domain to operate on
13034cd5f440SThomas Gleixner  *
13044cd5f440SThomas Gleixner  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
13054cd5f440SThomas Gleixner  * pair. Use this for MSI irqdomains which implement their own vector
13064cd5f440SThomas Gleixner  * allocation.
13074cd5f440SThomas Gleixner  */
13084cd5f440SThomas Gleixner void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid)
13094cd5f440SThomas Gleixner {
13104cd5f440SThomas Gleixner 	msi_domain_free_irqs_range_locked(dev, domid, 0, MSI_MAX_INDEX);
13114cd5f440SThomas Gleixner }
13124cd5f440SThomas Gleixner 
13134cd5f440SThomas Gleixner /**
13144cd5f440SThomas Gleixner  * msi_domain_free_irqs_all - Free all interrupts from a MSI interrupt domain
13154cd5f440SThomas Gleixner  *			      associated to a device
13164cd5f440SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
13174cd5f440SThomas Gleixner  *		are freed
13184cd5f440SThomas Gleixner  * @domid:	The id of the domain to operate on
13194cd5f440SThomas Gleixner  */
13204cd5f440SThomas Gleixner void msi_domain_free_irqs_all(struct device *dev, unsigned int domid)
13214cd5f440SThomas Gleixner {
13224cd5f440SThomas Gleixner 	msi_lock_descs(dev);
13234cd5f440SThomas Gleixner 	msi_domain_free_irqs_all_locked(dev, domid);
13244cd5f440SThomas Gleixner 	msi_unlock_descs(dev);
1325645474e2SThomas Gleixner }
1326645474e2SThomas Gleixner 
1327d9109698SJiang Liu /**
1328f3cf8bb0SJiang Liu  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1329f3cf8bb0SJiang Liu  * @domain:	The interrupt domain to retrieve data from
1330f3cf8bb0SJiang Liu  *
13313b35e7e6SRandy Dunlap  * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1332f3cf8bb0SJiang Liu  */
1333f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1334f3cf8bb0SJiang Liu {
1335f3cf8bb0SJiang Liu 	return (struct msi_domain_info *)domain->host_data;
1336f3cf8bb0SJiang Liu }
1337