xref: /openbmc/linux/kernel/irq/msi.c (revision 6f1a4891)
152a65ff5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2f3cf8bb0SJiang Liu /*
3f3cf8bb0SJiang Liu  * Copyright (C) 2014 Intel Corp.
4f3cf8bb0SJiang Liu  * Author: Jiang Liu <jiang.liu@linux.intel.com>
5f3cf8bb0SJiang Liu  *
6f3cf8bb0SJiang Liu  * This file is licensed under GPLv2.
7f3cf8bb0SJiang Liu  *
8f3cf8bb0SJiang Liu  * This file contains common code to support Message Signalled Interrupt for
9f3cf8bb0SJiang Liu  * PCI compatible and non PCI compatible devices.
10f3cf8bb0SJiang Liu  */
11aeeb5965SJiang Liu #include <linux/types.h>
12aeeb5965SJiang Liu #include <linux/device.h>
13f3cf8bb0SJiang Liu #include <linux/irq.h>
14f3cf8bb0SJiang Liu #include <linux/irqdomain.h>
15f3cf8bb0SJiang Liu #include <linux/msi.h>
164e201566SMarc Zyngier #include <linux/slab.h>
17d9109698SJiang Liu 
1807557ccbSThomas Gleixner #include "internals.h"
1907557ccbSThomas Gleixner 
2028f4b041SThomas Gleixner /**
2128f4b041SThomas Gleixner  * alloc_msi_entry - Allocate an initialize msi_entry
2228f4b041SThomas Gleixner  * @dev:	Pointer to the device for which this is allocated
2328f4b041SThomas Gleixner  * @nvec:	The number of vectors used in this entry
2428f4b041SThomas Gleixner  * @affinity:	Optional pointer to an affinity mask array size of @nvec
2528f4b041SThomas Gleixner  *
26bec04037SDou Liyang  * If @affinity is not NULL then an affinity array[@nvec] is allocated
27bec04037SDou Liyang  * and the affinity masks and flags from @affinity are copied.
2828f4b041SThomas Gleixner  */
29bec04037SDou Liyang struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
30bec04037SDou Liyang 				 const struct irq_affinity_desc *affinity)
31aa48b6f7SJiang Liu {
3228f4b041SThomas Gleixner 	struct msi_desc *desc;
3328f4b041SThomas Gleixner 
3428f4b041SThomas Gleixner 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
35aa48b6f7SJiang Liu 	if (!desc)
36aa48b6f7SJiang Liu 		return NULL;
37aa48b6f7SJiang Liu 
38aa48b6f7SJiang Liu 	INIT_LIST_HEAD(&desc->list);
39aa48b6f7SJiang Liu 	desc->dev = dev;
4028f4b041SThomas Gleixner 	desc->nvec_used = nvec;
4128f4b041SThomas Gleixner 	if (affinity) {
4228f4b041SThomas Gleixner 		desc->affinity = kmemdup(affinity,
4328f4b041SThomas Gleixner 			nvec * sizeof(*desc->affinity), GFP_KERNEL);
4428f4b041SThomas Gleixner 		if (!desc->affinity) {
4528f4b041SThomas Gleixner 			kfree(desc);
4628f4b041SThomas Gleixner 			return NULL;
4728f4b041SThomas Gleixner 		}
4828f4b041SThomas Gleixner 	}
49aa48b6f7SJiang Liu 
50aa48b6f7SJiang Liu 	return desc;
51aa48b6f7SJiang Liu }
52aa48b6f7SJiang Liu 
53aa48b6f7SJiang Liu void free_msi_entry(struct msi_desc *entry)
54aa48b6f7SJiang Liu {
5528f4b041SThomas Gleixner 	kfree(entry->affinity);
56aa48b6f7SJiang Liu 	kfree(entry);
57aa48b6f7SJiang Liu }
58aa48b6f7SJiang Liu 
5938b6a1cfSJiang Liu void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
6038b6a1cfSJiang Liu {
6138b6a1cfSJiang Liu 	*msg = entry->msg;
6238b6a1cfSJiang Liu }
6338b6a1cfSJiang Liu 
6438b6a1cfSJiang Liu void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
6538b6a1cfSJiang Liu {
6638b6a1cfSJiang Liu 	struct msi_desc *entry = irq_get_msi_desc(irq);
6738b6a1cfSJiang Liu 
6838b6a1cfSJiang Liu 	__get_cached_msi_msg(entry, msg);
6938b6a1cfSJiang Liu }
7038b6a1cfSJiang Liu EXPORT_SYMBOL_GPL(get_cached_msi_msg);
7138b6a1cfSJiang Liu 
72f3cf8bb0SJiang Liu #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
7374faaf7aSThomas Gleixner static inline void irq_chip_write_msi_msg(struct irq_data *data,
7474faaf7aSThomas Gleixner 					  struct msi_msg *msg)
7574faaf7aSThomas Gleixner {
7674faaf7aSThomas Gleixner 	data->chip->irq_write_msi_msg(data, msg);
7774faaf7aSThomas Gleixner }
7874faaf7aSThomas Gleixner 
790be8153cSMarc Zyngier static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
800be8153cSMarc Zyngier {
810be8153cSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
820be8153cSMarc Zyngier 
830be8153cSMarc Zyngier 	/*
840be8153cSMarc Zyngier 	 * If the MSI provider has messed with the second message and
850be8153cSMarc Zyngier 	 * not advertized that it is level-capable, signal the breakage.
860be8153cSMarc Zyngier 	 */
870be8153cSMarc Zyngier 	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
880be8153cSMarc Zyngier 		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
890be8153cSMarc Zyngier 		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
900be8153cSMarc Zyngier }
910be8153cSMarc Zyngier 
92f3cf8bb0SJiang Liu /**
93f3cf8bb0SJiang Liu  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
94f3cf8bb0SJiang Liu  * @irq_data:	The irq data associated to the interrupt
95f3cf8bb0SJiang Liu  * @mask:	The affinity mask to set
96f3cf8bb0SJiang Liu  * @force:	Flag to enforce setting (disable online checks)
97f3cf8bb0SJiang Liu  *
98f3cf8bb0SJiang Liu  * Intended to be used by MSI interrupt controllers which are
99f3cf8bb0SJiang Liu  * implemented with hierarchical domains.
100f3cf8bb0SJiang Liu  */
101f3cf8bb0SJiang Liu int msi_domain_set_affinity(struct irq_data *irq_data,
102f3cf8bb0SJiang Liu 			    const struct cpumask *mask, bool force)
103f3cf8bb0SJiang Liu {
104f3cf8bb0SJiang Liu 	struct irq_data *parent = irq_data->parent_data;
1050be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
106f3cf8bb0SJiang Liu 	int ret;
107f3cf8bb0SJiang Liu 
108f3cf8bb0SJiang Liu 	ret = parent->chip->irq_set_affinity(parent, mask, force);
109f3cf8bb0SJiang Liu 	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
1100be8153cSMarc Zyngier 		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
1110be8153cSMarc Zyngier 		msi_check_level(irq_data->domain, msg);
1120be8153cSMarc Zyngier 		irq_chip_write_msi_msg(irq_data, msg);
113f3cf8bb0SJiang Liu 	}
114f3cf8bb0SJiang Liu 
115f3cf8bb0SJiang Liu 	return ret;
116f3cf8bb0SJiang Liu }
117f3cf8bb0SJiang Liu 
11872491643SThomas Gleixner static int msi_domain_activate(struct irq_domain *domain,
11972491643SThomas Gleixner 			       struct irq_data *irq_data, bool early)
120f3cf8bb0SJiang Liu {
1210be8153cSMarc Zyngier 	struct msi_msg msg[2] = { [1] = { }, };
122f3cf8bb0SJiang Liu 
1230be8153cSMarc Zyngier 	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
1240be8153cSMarc Zyngier 	msi_check_level(irq_data->domain, msg);
1250be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
12672491643SThomas Gleixner 	return 0;
127f3cf8bb0SJiang Liu }
128f3cf8bb0SJiang Liu 
129f3cf8bb0SJiang Liu static void msi_domain_deactivate(struct irq_domain *domain,
130f3cf8bb0SJiang Liu 				  struct irq_data *irq_data)
131f3cf8bb0SJiang Liu {
1320be8153cSMarc Zyngier 	struct msi_msg msg[2];
133f3cf8bb0SJiang Liu 
1340be8153cSMarc Zyngier 	memset(msg, 0, sizeof(msg));
1350be8153cSMarc Zyngier 	irq_chip_write_msi_msg(irq_data, msg);
136f3cf8bb0SJiang Liu }
137f3cf8bb0SJiang Liu 
138f3cf8bb0SJiang Liu static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
139f3cf8bb0SJiang Liu 			    unsigned int nr_irqs, void *arg)
140f3cf8bb0SJiang Liu {
141f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
142f3cf8bb0SJiang Liu 	struct msi_domain_ops *ops = info->ops;
143f3cf8bb0SJiang Liu 	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
144f3cf8bb0SJiang Liu 	int i, ret;
145f3cf8bb0SJiang Liu 
146f3cf8bb0SJiang Liu 	if (irq_find_mapping(domain, hwirq) > 0)
147f3cf8bb0SJiang Liu 		return -EEXIST;
148f3cf8bb0SJiang Liu 
149bf6f869fSLiu Jiang 	if (domain->parent) {
150f3cf8bb0SJiang Liu 		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
151f3cf8bb0SJiang Liu 		if (ret < 0)
152f3cf8bb0SJiang Liu 			return ret;
153bf6f869fSLiu Jiang 	}
154f3cf8bb0SJiang Liu 
155f3cf8bb0SJiang Liu 	for (i = 0; i < nr_irqs; i++) {
156f3cf8bb0SJiang Liu 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
157f3cf8bb0SJiang Liu 		if (ret < 0) {
158f3cf8bb0SJiang Liu 			if (ops->msi_free) {
159f3cf8bb0SJiang Liu 				for (i--; i > 0; i--)
160f3cf8bb0SJiang Liu 					ops->msi_free(domain, info, virq + i);
161f3cf8bb0SJiang Liu 			}
162f3cf8bb0SJiang Liu 			irq_domain_free_irqs_top(domain, virq, nr_irqs);
163f3cf8bb0SJiang Liu 			return ret;
164f3cf8bb0SJiang Liu 		}
165f3cf8bb0SJiang Liu 	}
166f3cf8bb0SJiang Liu 
167f3cf8bb0SJiang Liu 	return 0;
168f3cf8bb0SJiang Liu }
169f3cf8bb0SJiang Liu 
170f3cf8bb0SJiang Liu static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
171f3cf8bb0SJiang Liu 			    unsigned int nr_irqs)
172f3cf8bb0SJiang Liu {
173f3cf8bb0SJiang Liu 	struct msi_domain_info *info = domain->host_data;
174f3cf8bb0SJiang Liu 	int i;
175f3cf8bb0SJiang Liu 
176f3cf8bb0SJiang Liu 	if (info->ops->msi_free) {
177f3cf8bb0SJiang Liu 		for (i = 0; i < nr_irqs; i++)
178f3cf8bb0SJiang Liu 			info->ops->msi_free(domain, info, virq + i);
179f3cf8bb0SJiang Liu 	}
180f3cf8bb0SJiang Liu 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
181f3cf8bb0SJiang Liu }
182f3cf8bb0SJiang Liu 
18301364028SKrzysztof Kozlowski static const struct irq_domain_ops msi_domain_ops = {
184f3cf8bb0SJiang Liu 	.alloc		= msi_domain_alloc,
185f3cf8bb0SJiang Liu 	.free		= msi_domain_free,
186f3cf8bb0SJiang Liu 	.activate	= msi_domain_activate,
187f3cf8bb0SJiang Liu 	.deactivate	= msi_domain_deactivate,
188f3cf8bb0SJiang Liu };
189f3cf8bb0SJiang Liu 
190aeeb5965SJiang Liu #ifdef GENERIC_MSI_DOMAIN_OPS
191aeeb5965SJiang Liu static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
192aeeb5965SJiang Liu 						msi_alloc_info_t *arg)
193aeeb5965SJiang Liu {
194aeeb5965SJiang Liu 	return arg->hwirq;
195aeeb5965SJiang Liu }
196aeeb5965SJiang Liu 
197aeeb5965SJiang Liu static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
198aeeb5965SJiang Liu 				  int nvec, msi_alloc_info_t *arg)
199aeeb5965SJiang Liu {
200aeeb5965SJiang Liu 	memset(arg, 0, sizeof(*arg));
201aeeb5965SJiang Liu 	return 0;
202aeeb5965SJiang Liu }
203aeeb5965SJiang Liu 
204aeeb5965SJiang Liu static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
205aeeb5965SJiang Liu 				    struct msi_desc *desc)
206aeeb5965SJiang Liu {
207aeeb5965SJiang Liu 	arg->desc = desc;
208aeeb5965SJiang Liu }
209aeeb5965SJiang Liu #else
210aeeb5965SJiang Liu #define msi_domain_ops_get_hwirq	NULL
211aeeb5965SJiang Liu #define msi_domain_ops_prepare		NULL
212aeeb5965SJiang Liu #define msi_domain_ops_set_desc		NULL
213aeeb5965SJiang Liu #endif /* !GENERIC_MSI_DOMAIN_OPS */
214aeeb5965SJiang Liu 
215aeeb5965SJiang Liu static int msi_domain_ops_init(struct irq_domain *domain,
216aeeb5965SJiang Liu 			       struct msi_domain_info *info,
217aeeb5965SJiang Liu 			       unsigned int virq, irq_hw_number_t hwirq,
218aeeb5965SJiang Liu 			       msi_alloc_info_t *arg)
219aeeb5965SJiang Liu {
220aeeb5965SJiang Liu 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
221aeeb5965SJiang Liu 				      info->chip_data);
222aeeb5965SJiang Liu 	if (info->handler && info->handler_name) {
223aeeb5965SJiang Liu 		__irq_set_handler(virq, info->handler, 0, info->handler_name);
224aeeb5965SJiang Liu 		if (info->handler_data)
225aeeb5965SJiang Liu 			irq_set_handler_data(virq, info->handler_data);
226aeeb5965SJiang Liu 	}
227aeeb5965SJiang Liu 	return 0;
228aeeb5965SJiang Liu }
229aeeb5965SJiang Liu 
230aeeb5965SJiang Liu static int msi_domain_ops_check(struct irq_domain *domain,
231aeeb5965SJiang Liu 				struct msi_domain_info *info,
232aeeb5965SJiang Liu 				struct device *dev)
233aeeb5965SJiang Liu {
234aeeb5965SJiang Liu 	return 0;
235aeeb5965SJiang Liu }
236aeeb5965SJiang Liu 
237aeeb5965SJiang Liu static struct msi_domain_ops msi_domain_ops_default = {
238aeeb5965SJiang Liu 	.get_hwirq	= msi_domain_ops_get_hwirq,
239aeeb5965SJiang Liu 	.msi_init	= msi_domain_ops_init,
240aeeb5965SJiang Liu 	.msi_check	= msi_domain_ops_check,
241aeeb5965SJiang Liu 	.msi_prepare	= msi_domain_ops_prepare,
242aeeb5965SJiang Liu 	.set_desc	= msi_domain_ops_set_desc,
243aeeb5965SJiang Liu };
244aeeb5965SJiang Liu 
245aeeb5965SJiang Liu static void msi_domain_update_dom_ops(struct msi_domain_info *info)
246aeeb5965SJiang Liu {
247aeeb5965SJiang Liu 	struct msi_domain_ops *ops = info->ops;
248aeeb5965SJiang Liu 
249aeeb5965SJiang Liu 	if (ops == NULL) {
250aeeb5965SJiang Liu 		info->ops = &msi_domain_ops_default;
251aeeb5965SJiang Liu 		return;
252aeeb5965SJiang Liu 	}
253aeeb5965SJiang Liu 
254aeeb5965SJiang Liu 	if (ops->get_hwirq == NULL)
255aeeb5965SJiang Liu 		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
256aeeb5965SJiang Liu 	if (ops->msi_init == NULL)
257aeeb5965SJiang Liu 		ops->msi_init = msi_domain_ops_default.msi_init;
258aeeb5965SJiang Liu 	if (ops->msi_check == NULL)
259aeeb5965SJiang Liu 		ops->msi_check = msi_domain_ops_default.msi_check;
260aeeb5965SJiang Liu 	if (ops->msi_prepare == NULL)
261aeeb5965SJiang Liu 		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
262aeeb5965SJiang Liu 	if (ops->set_desc == NULL)
263aeeb5965SJiang Liu 		ops->set_desc = msi_domain_ops_default.set_desc;
264aeeb5965SJiang Liu }
265aeeb5965SJiang Liu 
266aeeb5965SJiang Liu static void msi_domain_update_chip_ops(struct msi_domain_info *info)
267aeeb5965SJiang Liu {
268aeeb5965SJiang Liu 	struct irq_chip *chip = info->chip;
269aeeb5965SJiang Liu 
2700701c53eSMarc Zyngier 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
271aeeb5965SJiang Liu 	if (!chip->irq_set_affinity)
272aeeb5965SJiang Liu 		chip->irq_set_affinity = msi_domain_set_affinity;
273aeeb5965SJiang Liu }
274aeeb5965SJiang Liu 
275f3cf8bb0SJiang Liu /**
276f3cf8bb0SJiang Liu  * msi_create_irq_domain - Create a MSI interrupt domain
277be5436c8SMarc Zyngier  * @fwnode:	Optional fwnode of the interrupt controller
278f3cf8bb0SJiang Liu  * @info:	MSI domain info
279f3cf8bb0SJiang Liu  * @parent:	Parent irq domain
280f3cf8bb0SJiang Liu  */
281be5436c8SMarc Zyngier struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
282f3cf8bb0SJiang Liu 					 struct msi_domain_info *info,
283f3cf8bb0SJiang Liu 					 struct irq_domain *parent)
284f3cf8bb0SJiang Liu {
285a97b852bSMarc Zyngier 	struct irq_domain *domain;
286a97b852bSMarc Zyngier 
287aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
288aeeb5965SJiang Liu 		msi_domain_update_dom_ops(info);
289aeeb5965SJiang Liu 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
290aeeb5965SJiang Liu 		msi_domain_update_chip_ops(info);
291f3cf8bb0SJiang Liu 
292a97b852bSMarc Zyngier 	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
29388156f00SEric Auger 					     fwnode, &msi_domain_ops, info);
2940165308aSThomas Gleixner 
2950165308aSThomas Gleixner 	if (domain && !domain->name && info->chip)
296a97b852bSMarc Zyngier 		domain->name = info->chip->name;
297a97b852bSMarc Zyngier 
298a97b852bSMarc Zyngier 	return domain;
299f3cf8bb0SJiang Liu }
300f3cf8bb0SJiang Liu 
301b2eba39bSMarc Zyngier int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
302b2eba39bSMarc Zyngier 			    int nvec, msi_alloc_info_t *arg)
303b2eba39bSMarc Zyngier {
304b2eba39bSMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
305b2eba39bSMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
306b2eba39bSMarc Zyngier 	int ret;
307b2eba39bSMarc Zyngier 
308b2eba39bSMarc Zyngier 	ret = ops->msi_check(domain, info, dev);
309b2eba39bSMarc Zyngier 	if (ret == 0)
310b2eba39bSMarc Zyngier 		ret = ops->msi_prepare(domain, dev, nvec, arg);
311b2eba39bSMarc Zyngier 
312b2eba39bSMarc Zyngier 	return ret;
313b2eba39bSMarc Zyngier }
314b2eba39bSMarc Zyngier 
3152145ac93SMarc Zyngier int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
3162145ac93SMarc Zyngier 			     int virq, int nvec, msi_alloc_info_t *arg)
3172145ac93SMarc Zyngier {
3182145ac93SMarc Zyngier 	struct msi_domain_info *info = domain->host_data;
3192145ac93SMarc Zyngier 	struct msi_domain_ops *ops = info->ops;
3202145ac93SMarc Zyngier 	struct msi_desc *desc;
3212145ac93SMarc Zyngier 	int ret = 0;
3222145ac93SMarc Zyngier 
3232145ac93SMarc Zyngier 	for_each_msi_entry(desc, dev) {
3242145ac93SMarc Zyngier 		/* Don't even try the multi-MSI brain damage. */
3252145ac93SMarc Zyngier 		if (WARN_ON(!desc->irq || desc->nvec_used != 1)) {
3262145ac93SMarc Zyngier 			ret = -EINVAL;
3272145ac93SMarc Zyngier 			break;
3282145ac93SMarc Zyngier 		}
3292145ac93SMarc Zyngier 
3302145ac93SMarc Zyngier 		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
3312145ac93SMarc Zyngier 			continue;
3322145ac93SMarc Zyngier 
3332145ac93SMarc Zyngier 		ops->set_desc(arg, desc);
3342145ac93SMarc Zyngier 		/* Assumes the domain mutex is held! */
335596a7a1dSJohn Keeping 		ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
336596a7a1dSJohn Keeping 						      arg);
3372145ac93SMarc Zyngier 		if (ret)
3382145ac93SMarc Zyngier 			break;
3392145ac93SMarc Zyngier 
340596a7a1dSJohn Keeping 		irq_set_msi_desc_off(desc->irq, 0, desc);
3412145ac93SMarc Zyngier 	}
3422145ac93SMarc Zyngier 
3432145ac93SMarc Zyngier 	if (ret) {
3442145ac93SMarc Zyngier 		/* Mop up the damage */
3452145ac93SMarc Zyngier 		for_each_msi_entry(desc, dev) {
3462145ac93SMarc Zyngier 			if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
3472145ac93SMarc Zyngier 				continue;
3482145ac93SMarc Zyngier 
3492145ac93SMarc Zyngier 			irq_domain_free_irqs_common(domain, desc->irq, 1);
3502145ac93SMarc Zyngier 		}
3512145ac93SMarc Zyngier 	}
3522145ac93SMarc Zyngier 
3532145ac93SMarc Zyngier 	return ret;
3542145ac93SMarc Zyngier }
3552145ac93SMarc Zyngier 
356bc976233SThomas Gleixner /*
357bc976233SThomas Gleixner  * Carefully check whether the device can use reservation mode. If
358bc976233SThomas Gleixner  * reservation mode is enabled then the early activation will assign a
359bc976233SThomas Gleixner  * dummy vector to the device. If the PCI/MSI device does not support
360bc976233SThomas Gleixner  * masking of the entry then this can result in spurious interrupts when
361bc976233SThomas Gleixner  * the device driver is not absolutely careful. But even then a malfunction
362bc976233SThomas Gleixner  * of the hardware could result in a spurious interrupt on the dummy vector
363bc976233SThomas Gleixner  * and render the device unusable. If the entry can be masked then the core
364bc976233SThomas Gleixner  * logic will prevent the spurious interrupt and reservation mode can be
365bc976233SThomas Gleixner  * used. For now reservation mode is restricted to PCI/MSI.
366bc976233SThomas Gleixner  */
367bc976233SThomas Gleixner static bool msi_check_reservation_mode(struct irq_domain *domain,
368bc976233SThomas Gleixner 				       struct msi_domain_info *info,
369bc976233SThomas Gleixner 				       struct device *dev)
370da5dd9e8SThomas Gleixner {
371bc976233SThomas Gleixner 	struct msi_desc *desc;
372bc976233SThomas Gleixner 
373bc976233SThomas Gleixner 	if (domain->bus_token != DOMAIN_BUS_PCI_MSI)
374bc976233SThomas Gleixner 		return false;
375bc976233SThomas Gleixner 
376da5dd9e8SThomas Gleixner 	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
377da5dd9e8SThomas Gleixner 		return false;
378bc976233SThomas Gleixner 
379bc976233SThomas Gleixner 	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
380bc976233SThomas Gleixner 		return false;
381bc976233SThomas Gleixner 
382bc976233SThomas Gleixner 	/*
383bc976233SThomas Gleixner 	 * Checking the first MSI descriptor is sufficient. MSIX supports
384bc976233SThomas Gleixner 	 * masking and MSI does so when the maskbit is set.
385bc976233SThomas Gleixner 	 */
386bc976233SThomas Gleixner 	desc = first_msi_entry(dev);
387bc976233SThomas Gleixner 	return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
388da5dd9e8SThomas Gleixner }
389da5dd9e8SThomas Gleixner 
390f3cf8bb0SJiang Liu /**
391d9109698SJiang Liu  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
392d9109698SJiang Liu  * @domain:	The domain to allocate from
393d9109698SJiang Liu  * @dev:	Pointer to device struct of the device for which the interrupts
394d9109698SJiang Liu  *		are allocated
395d9109698SJiang Liu  * @nvec:	The number of interrupts to allocate
396d9109698SJiang Liu  *
397d9109698SJiang Liu  * Returns 0 on success or an error code.
398d9109698SJiang Liu  */
399d9109698SJiang Liu int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
400d9109698SJiang Liu 			  int nvec)
401d9109698SJiang Liu {
402d9109698SJiang Liu 	struct msi_domain_info *info = domain->host_data;
403d9109698SJiang Liu 	struct msi_domain_ops *ops = info->ops;
404da5dd9e8SThomas Gleixner 	struct irq_data *irq_data;
405d9109698SJiang Liu 	struct msi_desc *desc;
406da5dd9e8SThomas Gleixner 	msi_alloc_info_t arg;
407b6140914SThomas Gleixner 	int i, ret, virq;
408da5dd9e8SThomas Gleixner 	bool can_reserve;
409d9109698SJiang Liu 
410b2eba39bSMarc Zyngier 	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
411d9109698SJiang Liu 	if (ret)
412d9109698SJiang Liu 		return ret;
413d9109698SJiang Liu 
414d9109698SJiang Liu 	for_each_msi_entry(desc, dev) {
415d9109698SJiang Liu 		ops->set_desc(&arg, desc);
416d9109698SJiang Liu 
417b6140914SThomas Gleixner 		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
41806ee6d57SThomas Gleixner 					       dev_to_node(dev), &arg, false,
4190972fa57SThomas Gleixner 					       desc->affinity);
420d9109698SJiang Liu 		if (virq < 0) {
421d9109698SJiang Liu 			ret = -ENOSPC;
422d9109698SJiang Liu 			if (ops->handle_error)
423d9109698SJiang Liu 				ret = ops->handle_error(domain, desc, ret);
424d9109698SJiang Liu 			if (ops->msi_finish)
425d9109698SJiang Liu 				ops->msi_finish(&arg, ret);
426d9109698SJiang Liu 			return ret;
427d9109698SJiang Liu 		}
428d9109698SJiang Liu 
42907557ccbSThomas Gleixner 		for (i = 0; i < desc->nvec_used; i++) {
430d9109698SJiang Liu 			irq_set_msi_desc_off(virq, i, desc);
43107557ccbSThomas Gleixner 			irq_debugfs_copy_devname(virq + i, dev);
43207557ccbSThomas Gleixner 		}
433d9109698SJiang Liu 	}
434d9109698SJiang Liu 
435d9109698SJiang Liu 	if (ops->msi_finish)
436d9109698SJiang Liu 		ops->msi_finish(&arg, 0);
437d9109698SJiang Liu 
438bc976233SThomas Gleixner 	can_reserve = msi_check_reservation_mode(domain, info, dev);
439da5dd9e8SThomas Gleixner 
440d9109698SJiang Liu 	for_each_msi_entry(desc, dev) {
4414364e1a2SThomas Gleixner 		virq = desc->irq;
442d9109698SJiang Liu 		if (desc->nvec_used == 1)
443d9109698SJiang Liu 			dev_dbg(dev, "irq %d for MSI\n", virq);
444d9109698SJiang Liu 		else
445d9109698SJiang Liu 			dev_dbg(dev, "irq [%d-%d] for MSI\n",
446d9109698SJiang Liu 				virq, virq + desc->nvec_used - 1);
447f3b0946dSMarc Zyngier 		/*
448f3b0946dSMarc Zyngier 		 * This flag is set by the PCI layer as we need to activate
449f3b0946dSMarc Zyngier 		 * the MSI entries before the PCI layer enables MSI in the
450f3b0946dSMarc Zyngier 		 * card. Otherwise the card latches a random msi message.
451f3b0946dSMarc Zyngier 		 */
452da5dd9e8SThomas Gleixner 		if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
453da5dd9e8SThomas Gleixner 			continue;
454f3b0946dSMarc Zyngier 
455f3b0946dSMarc Zyngier 		irq_data = irq_domain_get_irq_data(domain, desc->irq);
4566f1a4891SThomas Gleixner 		if (!can_reserve) {
457bc976233SThomas Gleixner 			irqd_clr_can_reserve(irq_data);
4586f1a4891SThomas Gleixner 			if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
4596f1a4891SThomas Gleixner 				irqd_set_msi_nomask_quirk(irq_data);
4606f1a4891SThomas Gleixner 		}
461bc976233SThomas Gleixner 		ret = irq_domain_activate_irq(irq_data, can_reserve);
462bb9b428aSThomas Gleixner 		if (ret)
463bb9b428aSThomas Gleixner 			goto cleanup;
464da5dd9e8SThomas Gleixner 	}
465da5dd9e8SThomas Gleixner 
466da5dd9e8SThomas Gleixner 	/*
467da5dd9e8SThomas Gleixner 	 * If these interrupts use reservation mode, clear the activated bit
468da5dd9e8SThomas Gleixner 	 * so request_irq() will assign the final vector.
469da5dd9e8SThomas Gleixner 	 */
470da5dd9e8SThomas Gleixner 	if (can_reserve) {
471da5dd9e8SThomas Gleixner 		for_each_msi_entry(desc, dev) {
472da5dd9e8SThomas Gleixner 			irq_data = irq_domain_get_irq_data(domain, desc->irq);
47322d0b12fSThomas Gleixner 			irqd_clr_activated(irq_data);
474f3b0946dSMarc Zyngier 		}
475d9109698SJiang Liu 	}
476d9109698SJiang Liu 	return 0;
477bb9b428aSThomas Gleixner 
478bb9b428aSThomas Gleixner cleanup:
479bb9b428aSThomas Gleixner 	for_each_msi_entry(desc, dev) {
480bb9b428aSThomas Gleixner 		struct irq_data *irqd;
481bb9b428aSThomas Gleixner 
482bb9b428aSThomas Gleixner 		if (desc->irq == virq)
483bb9b428aSThomas Gleixner 			break;
484bb9b428aSThomas Gleixner 
485bb9b428aSThomas Gleixner 		irqd = irq_domain_get_irq_data(domain, desc->irq);
486bb9b428aSThomas Gleixner 		if (irqd_is_activated(irqd))
487bb9b428aSThomas Gleixner 			irq_domain_deactivate_irq(irqd);
488bb9b428aSThomas Gleixner 	}
489bb9b428aSThomas Gleixner 	msi_domain_free_irqs(domain, dev);
490bb9b428aSThomas Gleixner 	return ret;
491d9109698SJiang Liu }
492d9109698SJiang Liu 
493d9109698SJiang Liu /**
494d9109698SJiang Liu  * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
495d9109698SJiang Liu  * @domain:	The domain to managing the interrupts
496d9109698SJiang Liu  * @dev:	Pointer to device struct of the device for which the interrupts
497d9109698SJiang Liu  *		are free
498d9109698SJiang Liu  */
499d9109698SJiang Liu void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
500d9109698SJiang Liu {
501d9109698SJiang Liu 	struct msi_desc *desc;
502d9109698SJiang Liu 
503d9109698SJiang Liu 	for_each_msi_entry(desc, dev) {
504fe0c52fcSMarc Zyngier 		/*
505fe0c52fcSMarc Zyngier 		 * We might have failed to allocate an MSI early
506fe0c52fcSMarc Zyngier 		 * enough that there is no IRQ associated to this
507fe0c52fcSMarc Zyngier 		 * entry. If that's the case, don't do anything.
508fe0c52fcSMarc Zyngier 		 */
509fe0c52fcSMarc Zyngier 		if (desc->irq) {
510d9109698SJiang Liu 			irq_domain_free_irqs(desc->irq, desc->nvec_used);
511d9109698SJiang Liu 			desc->irq = 0;
512d9109698SJiang Liu 		}
513d9109698SJiang Liu 	}
514fe0c52fcSMarc Zyngier }
515d9109698SJiang Liu 
516d9109698SJiang Liu /**
517f3cf8bb0SJiang Liu  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
518f3cf8bb0SJiang Liu  * @domain:	The interrupt domain to retrieve data from
519f3cf8bb0SJiang Liu  *
520f3cf8bb0SJiang Liu  * Returns the pointer to the msi_domain_info stored in
521f3cf8bb0SJiang Liu  * @domain->host_data.
522f3cf8bb0SJiang Liu  */
523f3cf8bb0SJiang Liu struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
524f3cf8bb0SJiang Liu {
525f3cf8bb0SJiang Liu 	return (struct msi_domain_info *)domain->host_data;
526f3cf8bb0SJiang Liu }
527f3cf8bb0SJiang Liu 
528f3cf8bb0SJiang Liu #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
529