xref: /openbmc/linux/kernel/irq/msi.c (revision 4c457e8c)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2f3cf8bb0SJiang Liu /*
3f3cf8bb0SJiang Liu  * Copyright (C) 2014 Intel Corp.
4f3cf8bb0SJiang Liu  * Author: Jiang Liu <jiang.liu@linux.intel.com>
5f3cf8bb0SJiang Liu  *
6f3cf8bb0SJiang Liu  * This file is licensed under GPLv2.
7f3cf8bb0SJiang Liu  *
8f3cf8bb0SJiang Liu  * This file contains common code to support Message Signalled Interrupt for
9f3cf8bb0SJiang Liu  * PCI compatible and non PCI compatible devices.
10f3cf8bb0SJiang Liu  */
11aeeb5965SJiang Liu #include <linux/types.h>
12aeeb5965SJiang Liu #include <linux/device.h>
13f3cf8bb0SJiang Liu #include <linux/irq.h>
14f3cf8bb0SJiang Liu #include <linux/irqdomain.h>
15f3cf8bb0SJiang Liu #include <linux/msi.h>
164e201566SMarc Zyngier #include <linux/slab.h>
17d9109698SJiang Liu 
1807557ccbSThomas Gleixner #include "internals.h"
1907557ccbSThomas Gleixner 
2028f4b041SThomas Gleixner /**
2128f4b041SThomas Gleixner  * alloc_msi_entry - Allocate an initialize msi_entry
2228f4b041SThomas Gleixner  * @dev:	Pointer to the device for which this is allocated
2328f4b041SThomas Gleixner  * @nvec:	The number of vectors used in this entry
2428f4b041SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array size of @nvec
2528f4b041SThomas Gleixner  *
26bec04037SDou Liyang  * If @affinity is not NULL then an affinity array[@nvec] is allocated
27bec04037SDou Liyang  * and the affinity masks and flags from @affinity are copied.
2828f4b041SThomas Gleixner  */
29bec04037SDou Liyang struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
30bec04037SDou Liyang 				 const struct irq_affinity_desc *affinity)
31aa48b6f7SJiang Liu {
3228f4b041SThomas Gleixner 	struct msi_desc *desc;
3328f4b041SThomas Gleixner 
3428f4b041SThomas Gleixner 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
35aa48b6f7SJiang Liu 	if (!desc)
36aa48b6f7SJiang Liu 		return NULL;
37aa48b6f7SJiang Liu 
38aa48b6f7SJiang Liu 	INIT_LIST_HEAD(&desc->list);
39aa48b6f7SJiang Liu 	desc->dev = dev;
4028f4b041SThomas Gleixner 	desc->nvec_used = nvec;
4128f4b041SThomas Gleixner 	if (affinity) {
4228f4b041SThomas Gleixner 		desc->affinity = kmemdup(affinity,
4328f4b041SThomas Gleixner 			nvec * sizeof(*desc->affinity), GFP_KERNEL);
4428f4b041SThomas Gleixner 		if (!desc->affinity) {
4528f4b041SThomas Gleixner 			kfree(desc);
4628f4b041SThomas Gleixner 			return NULL;
4728f4b041SThomas Gleixner 		}
4828f4b041SThomas Gleixner 	}
49aa48b6f7SJiang Liu 
50aa48b6f7SJiang Liu 	return desc;
51aa48b6f7SJiang Liu }
52aa48b6f7SJiang Liu 
53aa48b6f7SJiang Liu void free_msi_entry(struct msi_desc *entry)
54aa48b6f7SJiang Liu {
5528f4b041SThomas Gleixner 	kfree(entry->affinity);
56aa48b6f7SJiang Liu 	kfree(entry);
57aa48b6f7SJiang Liu }
58aa48b6f7SJiang Liu 
5938b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
6038b6a1cfSJiang Liu {
6138b6a1cfSJiang Liu 	*msg = entry->msg;
6238b6a1cfSJiang Liu }
6338b6a1cfSJiang Liu 
6438b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
6538b6a1cfSJiang Liu {
6638b6a1cfSJiang Liu 	struct msi_desc *entry = irq_get_msi_desc(irq);
6738b6a1cfSJiang Liu 
6838b6a1cfSJiang Liu 	__get_cached_msi_msg(entry, msg);
6938b6a1cfSJiang Liu }
7038b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg);
7138b6a1cfSJiang Liu 
72f3cf8bb0SJiang Liu #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
7374faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data,
7474faaf7aSThomas Gleixner 					  struct msi_msg *msg)
7574faaf7aSThomas Gleixner {
7674faaf7aSThomas Gleixner 	data->chip->irq_write_msi_msg(data, msg);
7774faaf7aSThomas Gleixner }
7874faaf7aSThomas Gleixner 
790be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
800be8153cSMarc Zyngier {
810be8153cSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
820be8153cSMarc Zyngier 
830be8153cSMarc Zyngier 	/*
840be8153cSMarc Zyngier 	 * If the MSI provider has messed with the second message and
850be8153cSMarc Zyngier 	 * not advertized that it is level-capable, signal the breakage.
860be8153cSMarc Zyngier 	 */
870be8153cSMarc Zyngier 	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
880be8153cSMarc Zyngier 		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
890be8153cSMarc Zyngier 		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
900be8153cSMarc Zyngier }
910be8153cSMarc Zyngier 
92f3cf8bb0SJiang Liu /**
93f3cf8bb0SJiang Liu  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
94f3cf8bb0SJiang Liu  * @irq_data:	The irq data associated to the interrupt
95f3cf8bb0SJiang Liu  * @mask:	The affinity mask to set
96f3cf8bb0SJiang Liu  * @force:	Flag to enforce setting (disable online checks)
97f3cf8bb0SJiang Liu  *
98f3cf8bb0SJiang Liu  * Intended to be used by MSI interrupt controllers which are
99f3cf8bb0SJiang Liu  * implemented with hierarchical domains.
100f3cf8bb0SJiang Liu  */
101f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data,
102f3cf8bb0SJiang Liu 			    const struct cpumask *mask, bool force)
103f3cf8bb0SJiang Liu {
104f3cf8bb0SJiang Liu 	struct irq_data *parent = irq_data->parent_data;
1050be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
106f3cf8bb0SJiang Liu 	int ret;
107f3cf8bb0SJiang Liu 
108f3cf8bb0SJiang Liu 	ret = parent->chip->irq_set_affinity(parent, mask, force);
109f3cf8bb0SJiang Liu 	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
1100be8153cSMarc Zyngier 		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
1110be8153cSMarc Zyngier 		msi_check_level(irq_data->domain, msg);
1120be8153cSMarc Zyngier 		irq_chip_write_msi_msg(irq_data, msg);
113f3cf8bb0SJiang Liu 	}
114f3cf8bb0SJiang Liu 
115f3cf8bb0SJiang Liu 	return ret;
116f3cf8bb0SJiang Liu }
117f3cf8bb0SJiang Liu 
11872491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain,
11972491643SThomas Gleixner 			       struct irq_data *irq_data, bool early)
120f3cf8bb0SJiang Liu {
1210be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
122f3cf8bb0SJiang Liu 
1230be8153cSMarc Zyngier 	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
1240be8153cSMarc Zyngier 	msi_check_level(irq_data->domain, msg);
1250be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
12672491643SThomas Gleixner 	return 0;
127f3cf8bb0SJiang Liu }
128f3cf8bb0SJiang Liu 
129f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain,
130f3cf8bb0SJiang Liu 				  struct irq_data *irq_data)
131f3cf8bb0SJiang Liu {
1320be8153cSMarc Zyngier 	struct msi_msg msg[2];
133f3cf8bb0SJiang Liu 
1340be8153cSMarc Zyngier 	memset(msg, 0, sizeof(msg));
1350be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
136f3cf8bb0SJiang Liu }
137f3cf8bb0SJiang Liu 
138f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
139f3cf8bb0SJiang Liu 			    unsigned int nr_irqs, void *arg)
140f3cf8bb0SJiang Liu {
141f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
142f3cf8bb0SJiang Liu 	struct msi_domain_ops *ops = info->ops;
143f3cf8bb0SJiang Liu 	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
144f3cf8bb0SJiang Liu 	int i, ret;
145f3cf8bb0SJiang Liu 
146f3cf8bb0SJiang Liu 	if (irq_find_mapping(domain, hwirq) > 0)
147f3cf8bb0SJiang Liu 		return -EEXIST;
148f3cf8bb0SJiang Liu 
149bf6f869fSLiu Jiang 	if (domain->parent) {
150f3cf8bb0SJiang Liu 		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
151f3cf8bb0SJiang Liu 		if (ret < 0)
152f3cf8bb0SJiang Liu 			return ret;
153bf6f869fSLiu Jiang 	}
154f3cf8bb0SJiang Liu 
155f3cf8bb0SJiang Liu 	for (i = 0; i < nr_irqs; i++) {
156f3cf8bb0SJiang Liu 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
157f3cf8bb0SJiang Liu 		if (ret < 0) {
158f3cf8bb0SJiang Liu 			if (ops->msi_free) {
159f3cf8bb0SJiang Liu 				for (i--; i > 0; i--)
160f3cf8bb0SJiang Liu 					ops->msi_free(domain, info, virq + i);
161f3cf8bb0SJiang Liu 			}
162f3cf8bb0SJiang Liu 			irq_domain_free_irqs_top(domain, virq, nr_irqs);
163f3cf8bb0SJiang Liu 			return ret;
164f3cf8bb0SJiang Liu 		}
165f3cf8bb0SJiang Liu 	}
166f3cf8bb0SJiang Liu 
167f3cf8bb0SJiang Liu 	return 0;
168f3cf8bb0SJiang Liu }
169f3cf8bb0SJiang Liu 
170f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
171f3cf8bb0SJiang Liu 			    unsigned int nr_irqs)
172f3cf8bb0SJiang Liu {
173f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
174f3cf8bb0SJiang Liu 	int i;
175f3cf8bb0SJiang Liu 
176f3cf8bb0SJiang Liu 	if (info->ops->msi_free) {
177f3cf8bb0SJiang Liu 		for (i = 0; i < nr_irqs; i++)
178f3cf8bb0SJiang Liu 			info->ops->msi_free(domain, info, virq + i);
179f3cf8bb0SJiang Liu 	}
180f3cf8bb0SJiang Liu 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
181f3cf8bb0SJiang Liu }
182f3cf8bb0SJiang Liu 
18301364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = {
184f3cf8bb0SJiang Liu 	.alloc		= msi_domain_alloc,
185f3cf8bb0SJiang Liu 	.free		= msi_domain_free,
186f3cf8bb0SJiang Liu 	.activate	= msi_domain_activate,
187f3cf8bb0SJiang Liu 	.deactivate	= msi_domain_deactivate,
188f3cf8bb0SJiang Liu };
189f3cf8bb0SJiang Liu 
190aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
191aeeb5965SJiang Liu 						msi_alloc_info_t *arg)
192aeeb5965SJiang Liu {
193aeeb5965SJiang Liu 	return arg->hwirq;
194aeeb5965SJiang Liu }
195aeeb5965SJiang Liu 
196aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
197aeeb5965SJiang Liu 				  int nvec, msi_alloc_info_t *arg)
198aeeb5965SJiang Liu {
199aeeb5965SJiang Liu 	memset(arg, 0, sizeof(*arg));
200aeeb5965SJiang Liu 	return 0;
201aeeb5965SJiang Liu }
202aeeb5965SJiang Liu 
203aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
204aeeb5965SJiang Liu 				    struct msi_desc *desc)
205aeeb5965SJiang Liu {
206aeeb5965SJiang Liu 	arg->desc = desc;
207aeeb5965SJiang Liu }
208aeeb5965SJiang Liu 
209aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain,
210aeeb5965SJiang Liu 			       struct msi_domain_info *info,
211aeeb5965SJiang Liu 			       unsigned int virq, irq_hw_number_t hwirq,
212aeeb5965SJiang Liu 			       msi_alloc_info_t *arg)
213aeeb5965SJiang Liu {
214aeeb5965SJiang Liu 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
215aeeb5965SJiang Liu 				      info->chip_data);
216aeeb5965SJiang Liu 	if (info->handler && info->handler_name) {
217aeeb5965SJiang Liu 		__irq_set_handler(virq, info->handler, 0, info->handler_name);
218aeeb5965SJiang Liu 		if (info->handler_data)
219aeeb5965SJiang Liu 			irq_set_handler_data(virq, info->handler_data);
220aeeb5965SJiang Liu 	}
221aeeb5965SJiang Liu 	return 0;
222aeeb5965SJiang Liu }
223aeeb5965SJiang Liu 
224aeeb5965SJiang Liu static int msi_domain_ops_check(struct irq_domain *domain,
225aeeb5965SJiang Liu 				struct msi_domain_info *info,
226aeeb5965SJiang Liu 				struct device *dev)
227aeeb5965SJiang Liu {
228aeeb5965SJiang Liu 	return 0;
229aeeb5965SJiang Liu }
230aeeb5965SJiang Liu 
231aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = {
232aeeb5965SJiang Liu 	.get_hwirq		= msi_domain_ops_get_hwirq,
233aeeb5965SJiang Liu 	.msi_init		= msi_domain_ops_init,
234aeeb5965SJiang Liu 	.msi_check		= msi_domain_ops_check,
235aeeb5965SJiang Liu 	.msi_prepare		= msi_domain_ops_prepare,
236aeeb5965SJiang Liu 	.set_desc		= msi_domain_ops_set_desc,
23743e9e705SThomas Gleixner 	.domain_alloc_irqs	= __msi_domain_alloc_irqs,
23843e9e705SThomas Gleixner 	.domain_free_irqs	= __msi_domain_free_irqs,
239aeeb5965SJiang Liu };
240aeeb5965SJiang Liu 
241aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info)
242aeeb5965SJiang Liu {
243aeeb5965SJiang Liu 	struct msi_domain_ops *ops = info->ops;
244aeeb5965SJiang Liu 
245aeeb5965SJiang Liu 	if (ops == NULL) {
246aeeb5965SJiang Liu 		info->ops = &msi_domain_ops_default;
247aeeb5965SJiang Liu 		return;
248aeeb5965SJiang Liu 	}
249aeeb5965SJiang Liu 
25043e9e705SThomas Gleixner 	if (ops->domain_alloc_irqs == NULL)
25143e9e705SThomas Gleixner 		ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
25243e9e705SThomas Gleixner 	if (ops->domain_free_irqs == NULL)
25343e9e705SThomas Gleixner 		ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
25443e9e705SThomas Gleixner 
25543e9e705SThomas Gleixner 	if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
25643e9e705SThomas Gleixner 		return;
25743e9e705SThomas Gleixner 
258aeeb5965SJiang Liu 	if (ops->get_hwirq == NULL)
259aeeb5965SJiang Liu 		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
260aeeb5965SJiang Liu 	if (ops->msi_init == NULL)
261aeeb5965SJiang Liu 		ops->msi_init = msi_domain_ops_default.msi_init;
262aeeb5965SJiang Liu 	if (ops->msi_check == NULL)
263aeeb5965SJiang Liu 		ops->msi_check = msi_domain_ops_default.msi_check;
264aeeb5965SJiang Liu 	if (ops->msi_prepare == NULL)
265aeeb5965SJiang Liu 		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
266aeeb5965SJiang Liu 	if (ops->set_desc == NULL)
267aeeb5965SJiang Liu 		ops->set_desc = msi_domain_ops_default.set_desc;
268aeeb5965SJiang Liu }
269aeeb5965SJiang Liu 
270aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info)
271aeeb5965SJiang Liu {
272aeeb5965SJiang Liu 	struct irq_chip *chip = info->chip;
273aeeb5965SJiang Liu 
2740701c53eSMarc Zyngier 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
275aeeb5965SJiang Liu 	if (!chip->irq_set_affinity)
276aeeb5965SJiang Liu 		chip->irq_set_affinity = msi_domain_set_affinity;
277aeeb5965SJiang Liu }
278aeeb5965SJiang Liu 
279f3cf8bb0SJiang Liu /**
280f3cf8bb0SJiang Liu  * msi_create_irq_domain - Create a MSI interrupt domain
281be5436c8SMarc Zyngier  * @fwnode:	Optional fwnode of the interrupt controller
282f3cf8bb0SJiang Liu  * @info:	MSI domain info
283f3cf8bb0SJiang Liu  * @parent:	Parent irq domain
284f3cf8bb0SJiang Liu  */
285be5436c8SMarc Zyngier struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
286f3cf8bb0SJiang Liu 					 struct msi_domain_info *info,
287f3cf8bb0SJiang Liu 					 struct irq_domain *parent)
288f3cf8bb0SJiang Liu {
289a97b852bSMarc Zyngier 	struct irq_domain *domain;
290a97b852bSMarc Zyngier 
291aeeb5965SJiang Liu 	msi_domain_update_dom_ops(info);
292aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
293aeeb5965SJiang Liu 		msi_domain_update_chip_ops(info);
294f3cf8bb0SJiang Liu 
295a97b852bSMarc Zyngier 	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
29688156f00SEric Auger 					     fwnode, &msi_domain_ops, info);
2970165308aSThomas Gleixner 
2980165308aSThomas Gleixner 	if (domain && !domain->name && info->chip)
299a97b852bSMarc Zyngier 		domain->name = info->chip->name;
300a97b852bSMarc Zyngier 
301a97b852bSMarc Zyngier 	return domain;
302f3cf8bb0SJiang Liu }
303f3cf8bb0SJiang Liu 
304b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
305b2eba39bSMarc Zyngier 			    int nvec, msi_alloc_info_t *arg)
306b2eba39bSMarc Zyngier {
307b2eba39bSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
308b2eba39bSMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
309b2eba39bSMarc Zyngier 	int ret;
310b2eba39bSMarc Zyngier 
311b2eba39bSMarc Zyngier 	ret = ops->msi_check(domain, info, dev);
312b2eba39bSMarc Zyngier 	if (ret == 0)
313b2eba39bSMarc Zyngier 		ret = ops->msi_prepare(domain, dev, nvec, arg);
314b2eba39bSMarc Zyngier 
315b2eba39bSMarc Zyngier 	return ret;
316b2eba39bSMarc Zyngier }
317b2eba39bSMarc Zyngier 
3182145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
3192145ac93SMarc Zyngier 			     int virq, int nvec, msi_alloc_info_t *arg)
3202145ac93SMarc Zyngier {
3212145ac93SMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
3222145ac93SMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
3232145ac93SMarc Zyngier 	struct msi_desc *desc;
3242145ac93SMarc Zyngier 	int ret = 0;
3252145ac93SMarc Zyngier 
3262145ac93SMarc Zyngier 	for_each_msi_entry(desc, dev) {
3272145ac93SMarc Zyngier 		/* Don't even try the multi-MSI brain damage. */
3282145ac93SMarc Zyngier 		if (WARN_ON(!desc->irq || desc->nvec_used != 1)) {
3292145ac93SMarc Zyngier 			ret = -EINVAL;
3302145ac93SMarc Zyngier 			break;
3312145ac93SMarc Zyngier 		}
3322145ac93SMarc Zyngier 
3332145ac93SMarc Zyngier 		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
3342145ac93SMarc Zyngier 			continue;
3352145ac93SMarc Zyngier 
3362145ac93SMarc Zyngier 		ops->set_desc(arg, desc);
3372145ac93SMarc Zyngier 		/* Assumes the domain mutex is held! */
338596a7a1dSJohn Keeping 		ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
339596a7a1dSJohn Keeping 						      arg);
3402145ac93SMarc Zyngier 		if (ret)
3412145ac93SMarc Zyngier 			break;
3422145ac93SMarc Zyngier 
343596a7a1dSJohn Keeping 		irq_set_msi_desc_off(desc->irq, 0, desc);
3442145ac93SMarc Zyngier 	}
3452145ac93SMarc Zyngier 
3462145ac93SMarc Zyngier 	if (ret) {
3472145ac93SMarc Zyngier 		/* Mop up the damage */
3482145ac93SMarc Zyngier 		for_each_msi_entry(desc, dev) {
3492145ac93SMarc Zyngier 			if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
3502145ac93SMarc Zyngier 				continue;
3512145ac93SMarc Zyngier 
3522145ac93SMarc Zyngier 			irq_domain_free_irqs_common(domain, desc->irq, 1);
3532145ac93SMarc Zyngier 		}
3542145ac93SMarc Zyngier 	}
3552145ac93SMarc Zyngier 
3562145ac93SMarc Zyngier 	return ret;
3572145ac93SMarc Zyngier }
3582145ac93SMarc Zyngier 
359bc976233SThomas Gleixner /*
360bc976233SThomas Gleixner  * Carefully check whether the device can use reservation mode. If
361bc976233SThomas Gleixner  * reservation mode is enabled then the early activation will assign a
362bc976233SThomas Gleixner  * dummy vector to the device. If the PCI/MSI device does not support
363bc976233SThomas Gleixner  * masking of the entry then this can result in spurious interrupts when
364bc976233SThomas Gleixner  * the device driver is not absolutely careful. But even then a malfunction
365bc976233SThomas Gleixner  * of the hardware could result in a spurious interrupt on the dummy vector
366bc976233SThomas Gleixner  * and render the device unusable. If the entry can be masked then the core
367bc976233SThomas Gleixner  * logic will prevent the spurious interrupt and reservation mode can be
368bc976233SThomas Gleixner  * used. For now reservation mode is restricted to PCI/MSI.
369bc976233SThomas Gleixner  */
370bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain,
371bc976233SThomas Gleixner 				       struct msi_domain_info *info,
372bc976233SThomas Gleixner 				       struct device *dev)
373da5dd9e8SThomas Gleixner {
374bc976233SThomas Gleixner 	struct msi_desc *desc;
375bc976233SThomas Gleixner 
376c6c9e283SThomas Gleixner 	switch(domain->bus_token) {
377c6c9e283SThomas Gleixner 	case DOMAIN_BUS_PCI_MSI:
378c6c9e283SThomas Gleixner 	case DOMAIN_BUS_VMD_MSI:
379c6c9e283SThomas Gleixner 		break;
380c6c9e283SThomas Gleixner 	default:
381bc976233SThomas Gleixner 		return false;
382c6c9e283SThomas Gleixner 	}
383bc976233SThomas Gleixner 
384da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
385da5dd9e8SThomas Gleixner 		return false;
386bc976233SThomas Gleixner 
387bc976233SThomas Gleixner 	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
388bc976233SThomas Gleixner 		return false;
389bc976233SThomas Gleixner 
390bc976233SThomas Gleixner 	/*
391bc976233SThomas Gleixner 	 * Checking the first MSI descriptor is sufficient. MSIX supports
392bc976233SThomas Gleixner 	 * masking and MSI does so when the maskbit is set.
393bc976233SThomas Gleixner 	 */
394bc976233SThomas Gleixner 	desc = first_msi_entry(dev);
395bc976233SThomas Gleixner 	return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
396da5dd9e8SThomas Gleixner }
397da5dd9e8SThomas Gleixner 
39843e9e705SThomas Gleixner int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
399d9109698SJiang Liu 			    int nvec)
400d9109698SJiang Liu {
401d9109698SJiang Liu 	struct msi_domain_info *info = domain->host_data;
402d9109698SJiang Liu 	struct msi_domain_ops *ops = info->ops;
403da5dd9e8SThomas Gleixner 	struct irq_data *irq_data;
404d9109698SJiang Liu 	struct msi_desc *desc;
40506fde695SZenghui Yu 	msi_alloc_info_t arg = { };
406b6140914SThomas Gleixner 	int i, ret, virq;
407da5dd9e8SThomas Gleixner 	bool can_reserve;
408d9109698SJiang Liu 
409b2eba39bSMarc Zyngier 	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
410d9109698SJiang Liu 	if (ret)
411d9109698SJiang Liu 		return ret;
412d9109698SJiang Liu 
413d9109698SJiang Liu 	for_each_msi_entry(desc, dev) {
414d9109698SJiang Liu 		ops->set_desc(&arg, desc);
415d9109698SJiang Liu 
416b6140914SThomas Gleixner 		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
41706ee6d57SThomas Gleixner 					       dev_to_node(dev), &arg, false,
4180972fa57SThomas Gleixner 					       desc->affinity);
419d9109698SJiang Liu 		if (virq < 0) {
420d9109698SJiang Liu 			ret = -ENOSPC;
421d9109698SJiang Liu 			if (ops->handle_error)
422d9109698SJiang Liu 				ret = ops->handle_error(domain, desc, ret);
423d9109698SJiang Liu 			if (ops->msi_finish)
424d9109698SJiang Liu 				ops->msi_finish(&arg, ret);
425d9109698SJiang Liu 			return ret;
426d9109698SJiang Liu 		}
427d9109698SJiang Liu 
42807557ccbSThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
429d9109698SJiang Liu 			irq_set_msi_desc_off(virq, i, desc);
43007557ccbSThomas Gleixner 			irq_debugfs_copy_devname(virq + i, dev);
43107557ccbSThomas Gleixner 		}
432d9109698SJiang Liu 	}
433d9109698SJiang Liu 
434d9109698SJiang Liu 	if (ops->msi_finish)
435d9109698SJiang Liu 		ops->msi_finish(&arg, 0);
436d9109698SJiang Liu 
437bc976233SThomas Gleixner 	can_reserve = msi_check_reservation_mode(domain, info, dev);
438da5dd9e8SThomas Gleixner 
439f3b0946dSMarc Zyngier 	/*
440f3b0946dSMarc Zyngier 	 * This flag is set by the PCI layer as we need to activate
441f3b0946dSMarc Zyngier 	 * the MSI entries before the PCI layer enables MSI in the
442f3b0946dSMarc Zyngier 	 * card. Otherwise the card latches a random msi message.
443f3b0946dSMarc Zyngier 	 */
444da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
445*4c457e8cSMarc Zyngier 		goto skip_activate;
446f3b0946dSMarc Zyngier 
447*4c457e8cSMarc Zyngier 	for_each_msi_vector(desc, i, dev) {
448*4c457e8cSMarc Zyngier 		if (desc->irq == i) {
449*4c457e8cSMarc Zyngier 			virq = desc->irq;
450*4c457e8cSMarc Zyngier 			dev_dbg(dev, "irq [%d-%d] for MSI\n",
451*4c457e8cSMarc Zyngier 				virq, virq + desc->nvec_used - 1);
452*4c457e8cSMarc Zyngier 		}
453*4c457e8cSMarc Zyngier 
454*4c457e8cSMarc Zyngier 		irq_data = irq_domain_get_irq_data(domain, i);
4556f1a4891SThomas Gleixner 		if (!can_reserve) {
456bc976233SThomas Gleixner 			irqd_clr_can_reserve(irq_data);
4576f1a4891SThomas Gleixner 			if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
4586f1a4891SThomas Gleixner 				irqd_set_msi_nomask_quirk(irq_data);
4596f1a4891SThomas Gleixner 		}
460bc976233SThomas Gleixner 		ret = irq_domain_activate_irq(irq_data, can_reserve);
461bb9b428aSThomas Gleixner 		if (ret)
462bb9b428aSThomas Gleixner 			goto cleanup;
463da5dd9e8SThomas Gleixner 	}
464da5dd9e8SThomas Gleixner 
465*4c457e8cSMarc Zyngier skip_activate:
466da5dd9e8SThomas Gleixner 	/*
467da5dd9e8SThomas Gleixner 	 * If these interrupts use reservation mode, clear the activated bit
468da5dd9e8SThomas Gleixner 	 * so request_irq() will assign the final vector.
469da5dd9e8SThomas Gleixner 	 */
470da5dd9e8SThomas Gleixner 	if (can_reserve) {
471*4c457e8cSMarc Zyngier 		for_each_msi_vector(desc, i, dev) {
472*4c457e8cSMarc Zyngier 			irq_data = irq_domain_get_irq_data(domain, i);
47322d0b12fSThomas Gleixner 			irqd_clr_activated(irq_data);
474f3b0946dSMarc Zyngier 		}
475d9109698SJiang Liu 	}
476d9109698SJiang Liu 	return 0;
477bb9b428aSThomas Gleixner 
478bb9b428aSThomas Gleixner cleanup:
479*4c457e8cSMarc Zyngier 	for_each_msi_vector(desc, i, dev) {
480*4c457e8cSMarc Zyngier 		irq_data = irq_domain_get_irq_data(domain, i);
481*4c457e8cSMarc Zyngier 		if (irqd_is_activated(irq_data))
482*4c457e8cSMarc Zyngier 			irq_domain_deactivate_irq(irq_data);
483bb9b428aSThomas Gleixner 	}
484bb9b428aSThomas Gleixner 	msi_domain_free_irqs(domain, dev);
485bb9b428aSThomas Gleixner 	return ret;
486d9109698SJiang Liu }
487d9109698SJiang Liu 
488d9109698SJiang Liu /**
48943e9e705SThomas Gleixner  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
49043e9e705SThomas Gleixner  * @domain:	The domain to allocate from
491d9109698SJiang Liu  * @dev:	Pointer to device struct of the device for which the interrupts
49243e9e705SThomas Gleixner  *		are allocated
49343e9e705SThomas Gleixner  * @nvec:	The number of interrupts to allocate
49443e9e705SThomas Gleixner  *
49543e9e705SThomas Gleixner  * Returns 0 on success or an error code.
496d9109698SJiang Liu  */
49743e9e705SThomas Gleixner int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
49843e9e705SThomas Gleixner 			  int nvec)
49943e9e705SThomas Gleixner {
50043e9e705SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
50143e9e705SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
50243e9e705SThomas Gleixner 
50343e9e705SThomas Gleixner 	return ops->domain_alloc_irqs(domain, dev, nvec);
50443e9e705SThomas Gleixner }
50543e9e705SThomas Gleixner 
50643e9e705SThomas Gleixner void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
507d9109698SJiang Liu {
508d9109698SJiang Liu 	struct msi_desc *desc;
509d9109698SJiang Liu 
510d9109698SJiang Liu 	for_each_msi_entry(desc, dev) {
511fe0c52fcSMarc Zyngier 		/*
512fe0c52fcSMarc Zyngier 		 * We might have failed to allocate an MSI early
513fe0c52fcSMarc Zyngier 		 * enough that there is no IRQ associated to this
514fe0c52fcSMarc Zyngier 		 * entry. If that's the case, don't do anything.
515fe0c52fcSMarc Zyngier 		 */
516fe0c52fcSMarc Zyngier 		if (desc->irq) {
517d9109698SJiang Liu 			irq_domain_free_irqs(desc->irq, desc->nvec_used);
518d9109698SJiang Liu 			desc->irq = 0;
519d9109698SJiang Liu 		}
520d9109698SJiang Liu 	}
521fe0c52fcSMarc Zyngier }
522d9109698SJiang Liu 
523d9109698SJiang Liu /**
52443e9e705SThomas Gleixner  * __msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
52543e9e705SThomas Gleixner  * @domain:	The domain to managing the interrupts
52643e9e705SThomas Gleixner  * @dev:	Pointer to device struct of the device for which the interrupts
52743e9e705SThomas Gleixner  *		are free
52843e9e705SThomas Gleixner  */
52943e9e705SThomas Gleixner void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
53043e9e705SThomas Gleixner {
53143e9e705SThomas Gleixner 	struct msi_domain_info *info = domain->host_data;
53243e9e705SThomas Gleixner 	struct msi_domain_ops *ops = info->ops;
53343e9e705SThomas Gleixner 
53443e9e705SThomas Gleixner 	return ops->domain_free_irqs(domain, dev);
53543e9e705SThomas Gleixner }
53643e9e705SThomas Gleixner 
53743e9e705SThomas Gleixner /**
538f3cf8bb0SJiang Liu  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
539f3cf8bb0SJiang Liu  * @domain:	The interrupt domain to retrieve data from
540f3cf8bb0SJiang Liu  *
541f3cf8bb0SJiang Liu  * Returns the pointer to the msi_domain_info stored in
542f3cf8bb0SJiang Liu  * @domain->host_data.
543f3cf8bb0SJiang Liu  */
544f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
545f3cf8bb0SJiang Liu {
546f3cf8bb0SJiang Liu 	return (struct msi_domain_info *)domain->host_data;
547f3cf8bb0SJiang Liu }
548f3cf8bb0SJiang Liu 
549f3cf8bb0SJiang Liu #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
550