1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
244380982SJiang Liu /*
344380982SJiang Liu * Support of MSI, HPET and DMAR interrupts.
444380982SJiang Liu *
544380982SJiang Liu * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
644380982SJiang Liu * Moved from arch/x86/kernel/apic/io_apic.c.
752f518a3SJiang Liu * Jiang Liu <jiang.liu@linux.intel.com>
852f518a3SJiang Liu * Convert to hierarchical irqdomain
944380982SJiang Liu */
1044380982SJiang Liu #include <linux/mm.h>
1144380982SJiang Liu #include <linux/interrupt.h>
12447ae316SNicolai Stange #include <linux/irq.h>
1344380982SJiang Liu #include <linux/pci.h>
1444380982SJiang Liu #include <linux/dmar.h>
1544380982SJiang Liu #include <linux/hpet.h>
1644380982SJiang Liu #include <linux/msi.h>
17d746d1ebSJiang Liu #include <asm/irqdomain.h>
1844380982SJiang Liu #include <asm/hpet.h>
1944380982SJiang Liu #include <asm/hw_irq.h>
2044380982SJiang Liu #include <asm/apic.h>
2144380982SJiang Liu #include <asm/irq_remapping.h>
22ae72f315SThomas Gleixner #include <asm/xen/hypervisor.h>
2344380982SJiang Liu
242c681e6bSThomas Gleixner struct irq_domain *x86_pci_msi_default_domain __ro_after_init;
2552f518a3SJiang Liu
irq_msi_update_msg(struct irq_data * irqd,struct irq_cfg * cfg)266f1a4891SThomas Gleixner static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg)
276f1a4891SThomas Gleixner {
286f1a4891SThomas Gleixner struct msi_msg msg[2] = { [1] = { }, };
296f1a4891SThomas Gleixner
3047bea873SDavid Woodhouse __irq_msi_compose_msg(cfg, msg, false);
316f1a4891SThomas Gleixner irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg);
326f1a4891SThomas Gleixner }
336f1a4891SThomas Gleixner
346f1a4891SThomas Gleixner static int
msi_set_affinity(struct irq_data * irqd,const struct cpumask * mask,bool force)356f1a4891SThomas Gleixner msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
366f1a4891SThomas Gleixner {
376f1a4891SThomas Gleixner struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd);
386f1a4891SThomas Gleixner struct irq_data *parent = irqd->parent_data;
396f1a4891SThomas Gleixner unsigned int cpu;
406f1a4891SThomas Gleixner int ret;
416f1a4891SThomas Gleixner
426f1a4891SThomas Gleixner /* Save the current configuration */
436f1a4891SThomas Gleixner cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
446f1a4891SThomas Gleixner old_cfg = *cfg;
456f1a4891SThomas Gleixner
466f1a4891SThomas Gleixner /* Allocate a new target vector */
476f1a4891SThomas Gleixner ret = parent->chip->irq_set_affinity(parent, mask, force);
486f1a4891SThomas Gleixner if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
496f1a4891SThomas Gleixner return ret;
506f1a4891SThomas Gleixner
516f1a4891SThomas Gleixner /*
526f1a4891SThomas Gleixner * For non-maskable and non-remapped MSI interrupts the migration
536f1a4891SThomas Gleixner * to a different destination CPU and a different vector has to be
546f1a4891SThomas Gleixner * done careful to handle the possible stray interrupt which can be
556f1a4891SThomas Gleixner * caused by the non-atomic update of the address/data pair.
566f1a4891SThomas Gleixner *
576f1a4891SThomas Gleixner * Direct update is possible when:
58*119f7373SKoichiro Den * - The MSI is maskable (remapped MSI does not use this code path).
59*119f7373SKoichiro Den * The reservation mode bit is set in this case.
606f1a4891SThomas Gleixner * - The new vector is the same as the old vector
616f1a4891SThomas Gleixner * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
62ff363f48SThomas Gleixner * - The interrupt is not yet started up
636f1a4891SThomas Gleixner * - The new destination CPU is the same as the old destination CPU
646f1a4891SThomas Gleixner */
65*119f7373SKoichiro Den if (!irqd_can_reserve(irqd) ||
666f1a4891SThomas Gleixner cfg->vector == old_cfg.vector ||
676f1a4891SThomas Gleixner old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
68ff363f48SThomas Gleixner !irqd_is_started(irqd) ||
696f1a4891SThomas Gleixner cfg->dest_apicid == old_cfg.dest_apicid) {
706f1a4891SThomas Gleixner irq_msi_update_msg(irqd, cfg);
716f1a4891SThomas Gleixner return ret;
726f1a4891SThomas Gleixner }
736f1a4891SThomas Gleixner
746f1a4891SThomas Gleixner /*
756f1a4891SThomas Gleixner * Paranoia: Validate that the interrupt target is the local
766f1a4891SThomas Gleixner * CPU.
776f1a4891SThomas Gleixner */
786f1a4891SThomas Gleixner if (WARN_ON_ONCE(cpu != smp_processor_id())) {
796f1a4891SThomas Gleixner irq_msi_update_msg(irqd, cfg);
806f1a4891SThomas Gleixner return ret;
816f1a4891SThomas Gleixner }
826f1a4891SThomas Gleixner
836f1a4891SThomas Gleixner /*
846f1a4891SThomas Gleixner * Redirect the interrupt to the new vector on the current CPU
856f1a4891SThomas Gleixner * first. This might cause a spurious interrupt on this vector if
866f1a4891SThomas Gleixner * the device raises an interrupt right between this update and the
876f1a4891SThomas Gleixner * update to the final destination CPU.
886f1a4891SThomas Gleixner *
896f1a4891SThomas Gleixner * If the vector is in use then the installed device handler will
906f1a4891SThomas Gleixner * denote it as spurious which is no harm as this is a rare event
916f1a4891SThomas Gleixner * and interrupt handlers have to cope with spurious interrupts
926f1a4891SThomas Gleixner * anyway. If the vector is unused, then it is marked so it won't
93fa5e5c40SThomas Gleixner * trigger the 'No irq handler for vector' warning in
94fa5e5c40SThomas Gleixner * common_interrupt().
956f1a4891SThomas Gleixner *
966f1a4891SThomas Gleixner * This requires to hold vector lock to prevent concurrent updates to
976f1a4891SThomas Gleixner * the affected vector.
986f1a4891SThomas Gleixner */
996f1a4891SThomas Gleixner lock_vector_lock();
1006f1a4891SThomas Gleixner
1016f1a4891SThomas Gleixner /*
1026f1a4891SThomas Gleixner * Mark the new target vector on the local CPU if it is currently
1036f1a4891SThomas Gleixner * unused. Reuse the VECTOR_RETRIGGERED state which is also used in
1046f1a4891SThomas Gleixner * the CPU hotplug path for a similar purpose. This cannot be
1056f1a4891SThomas Gleixner * undone here as the current CPU has interrupts disabled and
1066f1a4891SThomas Gleixner * cannot handle the interrupt before the whole set_affinity()
1076f1a4891SThomas Gleixner * section is done. In the CPU unplug case, the current CPU is
1086f1a4891SThomas Gleixner * about to vanish and will not handle any interrupts anymore. The
1096f1a4891SThomas Gleixner * vector is cleaned up when the CPU comes online again.
1106f1a4891SThomas Gleixner */
1116f1a4891SThomas Gleixner if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector])))
1126f1a4891SThomas Gleixner this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED);
1136f1a4891SThomas Gleixner
1146f1a4891SThomas Gleixner /* Redirect it to the new vector on the local CPU temporarily */
1156f1a4891SThomas Gleixner old_cfg.vector = cfg->vector;
1166f1a4891SThomas Gleixner irq_msi_update_msg(irqd, &old_cfg);
1176f1a4891SThomas Gleixner
1186f1a4891SThomas Gleixner /* Now transition it to the target CPU */
1196f1a4891SThomas Gleixner irq_msi_update_msg(irqd, cfg);
1206f1a4891SThomas Gleixner
1216f1a4891SThomas Gleixner /*
1226f1a4891SThomas Gleixner * All interrupts after this point are now targeted at the new
1236f1a4891SThomas Gleixner * vector/CPU.
1246f1a4891SThomas Gleixner *
1256f1a4891SThomas Gleixner * Drop vector lock before testing whether the temporary assignment
1266f1a4891SThomas Gleixner * to the local CPU was hit by an interrupt raised in the device,
1276f1a4891SThomas Gleixner * because the retrigger function acquires vector lock again.
1286f1a4891SThomas Gleixner */
1296f1a4891SThomas Gleixner unlock_vector_lock();
1306f1a4891SThomas Gleixner
1316f1a4891SThomas Gleixner /*
1326f1a4891SThomas Gleixner * Check whether the transition raced with a device interrupt and
1336f1a4891SThomas Gleixner * is pending in the local APICs IRR. It is safe to do this outside
1346f1a4891SThomas Gleixner * of vector lock as the irq_desc::lock of this interrupt is still
1356f1a4891SThomas Gleixner * held and interrupts are disabled: The check is not accessing the
1366f1a4891SThomas Gleixner * underlying vector store. It's just checking the local APIC's
1376f1a4891SThomas Gleixner * IRR.
1386f1a4891SThomas Gleixner */
1396f1a4891SThomas Gleixner if (lapic_vector_set_in_irr(cfg->vector))
1406f1a4891SThomas Gleixner irq_data_get_irq_chip(irqd)->irq_retrigger(irqd);
1416f1a4891SThomas Gleixner
1426f1a4891SThomas Gleixner return ret;
1436f1a4891SThomas Gleixner }
1446f1a4891SThomas Gleixner
145b6d5fc3aSThomas Gleixner /**
146b6d5fc3aSThomas Gleixner * pci_dev_has_default_msi_parent_domain - Check whether the device has the default
147b6d5fc3aSThomas Gleixner * MSI parent domain associated
148b6d5fc3aSThomas Gleixner * @dev: Pointer to the PCI device
14944380982SJiang Liu */
pci_dev_has_default_msi_parent_domain(struct pci_dev * dev)150b6d5fc3aSThomas Gleixner bool pci_dev_has_default_msi_parent_domain(struct pci_dev *dev)
15152f518a3SJiang Liu {
152b6d5fc3aSThomas Gleixner struct irq_domain *domain = dev_get_msi_domain(&dev->dev);
15352f518a3SJiang Liu
154b6d5fc3aSThomas Gleixner if (!domain)
155b6d5fc3aSThomas Gleixner domain = dev_get_msi_domain(&dev->bus->dev);
156b6d5fc3aSThomas Gleixner if (!domain)
157b6d5fc3aSThomas Gleixner return false;
158b6d5fc3aSThomas Gleixner
159b6d5fc3aSThomas Gleixner return domain == x86_vector_domain;
16052f518a3SJiang Liu }
16152f518a3SJiang Liu
162b6d5fc3aSThomas Gleixner /**
163b6d5fc3aSThomas Gleixner * x86_msi_prepare - Setup of msi_alloc_info_t for allocations
164b6d5fc3aSThomas Gleixner * @domain: The domain for which this setup happens
165b6d5fc3aSThomas Gleixner * @dev: The device for which interrupts are allocated
166b6d5fc3aSThomas Gleixner * @nvec: The number of vectors to allocate
167b6d5fc3aSThomas Gleixner * @alloc: The allocation info structure to initialize
168b6d5fc3aSThomas Gleixner *
169b6d5fc3aSThomas Gleixner * This function is to be used for all types of MSI domains above the x86
170b6d5fc3aSThomas Gleixner * vector domain and any intermediates. It is always invoked from the
171b6d5fc3aSThomas Gleixner * top level interrupt domain. The domain specific allocation
172b6d5fc3aSThomas Gleixner * functionality is determined via the @domain's bus token which allows to
173b6d5fc3aSThomas Gleixner * map the X86 specific allocation type.
174b6d5fc3aSThomas Gleixner */
x86_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * alloc)175b6d5fc3aSThomas Gleixner static int x86_msi_prepare(struct irq_domain *domain, struct device *dev,
176b6d5fc3aSThomas Gleixner int nvec, msi_alloc_info_t *alloc)
177b6d5fc3aSThomas Gleixner {
178b6d5fc3aSThomas Gleixner struct msi_domain_info *info = domain->host_data;
17952f518a3SJiang Liu
180b6d5fc3aSThomas Gleixner init_irq_alloc_info(alloc, NULL);
1813dad5f9aSThomas Gleixner
182b6d5fc3aSThomas Gleixner switch (info->bus_token) {
183b6d5fc3aSThomas Gleixner case DOMAIN_BUS_PCI_DEVICE_MSI:
184b6d5fc3aSThomas Gleixner alloc->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
185b6d5fc3aSThomas Gleixner return 0;
186b6d5fc3aSThomas Gleixner case DOMAIN_BUS_PCI_DEVICE_MSIX:
1876e24c887SThomas Gleixner case DOMAIN_BUS_PCI_DEVICE_IMS:
188b6d5fc3aSThomas Gleixner alloc->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
189b6d5fc3aSThomas Gleixner return 0;
190b6d5fc3aSThomas Gleixner default:
191b6d5fc3aSThomas Gleixner return -EINVAL;
192b6d5fc3aSThomas Gleixner }
193b6d5fc3aSThomas Gleixner }
194b6d5fc3aSThomas Gleixner
195b6d5fc3aSThomas Gleixner /**
196b6d5fc3aSThomas Gleixner * x86_init_dev_msi_info - Domain info setup for MSI domains
197b6d5fc3aSThomas Gleixner * @dev: The device for which the domain should be created
198b6d5fc3aSThomas Gleixner * @domain: The (root) domain providing this callback
199b6d5fc3aSThomas Gleixner * @real_parent: The real parent domain of the to initialize domain
200b6d5fc3aSThomas Gleixner * @info: The domain info for the to initialize domain
201b6d5fc3aSThomas Gleixner *
202b6d5fc3aSThomas Gleixner * This function is to be used for all types of MSI domains above the x86
203b6d5fc3aSThomas Gleixner * vector domain and any intermediates. The domain specific functionality
204b6d5fc3aSThomas Gleixner * is determined via the @real_parent.
205b6d5fc3aSThomas Gleixner */
x86_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)206b6d5fc3aSThomas Gleixner static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
207b6d5fc3aSThomas Gleixner struct irq_domain *real_parent, struct msi_domain_info *info)
208b6d5fc3aSThomas Gleixner {
209b6d5fc3aSThomas Gleixner const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
210b6d5fc3aSThomas Gleixner
211b6d5fc3aSThomas Gleixner /* MSI parent domain specific settings */
212b6d5fc3aSThomas Gleixner switch (real_parent->bus_token) {
213b6d5fc3aSThomas Gleixner case DOMAIN_BUS_ANY:
214b6d5fc3aSThomas Gleixner /* Only the vector domain can have the ANY token */
215b6d5fc3aSThomas Gleixner if (WARN_ON_ONCE(domain != real_parent))
216b6d5fc3aSThomas Gleixner return false;
217b6d5fc3aSThomas Gleixner info->chip->irq_set_affinity = msi_set_affinity;
218b6d5fc3aSThomas Gleixner break;
2199a945234SThomas Gleixner case DOMAIN_BUS_DMAR:
220cc7594ffSThomas Gleixner case DOMAIN_BUS_AMDVI:
2219a945234SThomas Gleixner break;
222b6d5fc3aSThomas Gleixner default:
223b6d5fc3aSThomas Gleixner WARN_ON_ONCE(1);
224b6d5fc3aSThomas Gleixner return false;
225b6d5fc3aSThomas Gleixner }
226b6d5fc3aSThomas Gleixner
227b6d5fc3aSThomas Gleixner /* Is the target supported? */
228b6d5fc3aSThomas Gleixner switch(info->bus_token) {
229b6d5fc3aSThomas Gleixner case DOMAIN_BUS_PCI_DEVICE_MSI:
230b6d5fc3aSThomas Gleixner case DOMAIN_BUS_PCI_DEVICE_MSIX:
231b6d5fc3aSThomas Gleixner break;
2326e24c887SThomas Gleixner case DOMAIN_BUS_PCI_DEVICE_IMS:
2336e24c887SThomas Gleixner if (!(pops->supported_flags & MSI_FLAG_PCI_IMS))
2346e24c887SThomas Gleixner return false;
2356e24c887SThomas Gleixner break;
236b6d5fc3aSThomas Gleixner default:
237b6d5fc3aSThomas Gleixner WARN_ON_ONCE(1);
238b6d5fc3aSThomas Gleixner return false;
239b6d5fc3aSThomas Gleixner }
240b6d5fc3aSThomas Gleixner
241b6d5fc3aSThomas Gleixner /*
242b6d5fc3aSThomas Gleixner * Mask out the domain specific MSI feature flags which are not
243b6d5fc3aSThomas Gleixner * supported by the real parent.
244b6d5fc3aSThomas Gleixner */
245b6d5fc3aSThomas Gleixner info->flags &= pops->supported_flags;
246b6d5fc3aSThomas Gleixner /* Enforce the required flags */
247b6d5fc3aSThomas Gleixner info->flags |= X86_VECTOR_MSI_FLAGS_REQUIRED;
248b6d5fc3aSThomas Gleixner
249b6d5fc3aSThomas Gleixner /* This is always invoked from the top level MSI domain! */
250b6d5fc3aSThomas Gleixner info->ops->msi_prepare = x86_msi_prepare;
251b6d5fc3aSThomas Gleixner
252b6d5fc3aSThomas Gleixner info->chip->irq_ack = irq_chip_ack_parent;
253b6d5fc3aSThomas Gleixner info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
254b6d5fc3aSThomas Gleixner info->chip->flags |= IRQCHIP_SKIP_SET_WAKE |
255b6d5fc3aSThomas Gleixner IRQCHIP_AFFINITY_PRE_STARTUP;
256b6d5fc3aSThomas Gleixner
257b6d5fc3aSThomas Gleixner info->handler = handle_edge_irq;
258b6d5fc3aSThomas Gleixner info->handler_name = "edge";
259b6d5fc3aSThomas Gleixner
260b6d5fc3aSThomas Gleixner return true;
261b6d5fc3aSThomas Gleixner }
262b6d5fc3aSThomas Gleixner
263b6d5fc3aSThomas Gleixner static const struct msi_parent_ops x86_vector_msi_parent_ops = {
264b6d5fc3aSThomas Gleixner .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED,
265b6d5fc3aSThomas Gleixner .init_dev_msi_info = x86_init_dev_msi_info,
26652f518a3SJiang Liu };
26752f518a3SJiang Liu
native_create_pci_msi_domain(void)2686b15ffa0SThomas Gleixner struct irq_domain * __init native_create_pci_msi_domain(void)
26952f518a3SJiang Liu {
27049062454SThomas Gleixner if (apic_is_disabled)
2716b15ffa0SThomas Gleixner return NULL;
27252f518a3SJiang Liu
273b6d5fc3aSThomas Gleixner x86_vector_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
274b6d5fc3aSThomas Gleixner x86_vector_domain->msi_parent_ops = &x86_vector_msi_parent_ops;
275b6d5fc3aSThomas Gleixner return x86_vector_domain;
2766b15ffa0SThomas Gleixner }
2776b15ffa0SThomas Gleixner
x86_create_pci_msi_domain(void)2786b15ffa0SThomas Gleixner void __init x86_create_pci_msi_domain(void)
2796b15ffa0SThomas Gleixner {
2806b15ffa0SThomas Gleixner x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain();
281e3beca48SThomas Gleixner }
28252f518a3SJiang Liu
2834d5a4cccSThomas Gleixner /* Keep around for hyperV */
pci_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * arg)284b6d5fc3aSThomas Gleixner int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
285b6d5fc3aSThomas Gleixner msi_alloc_info_t *arg)
286b6d5fc3aSThomas Gleixner {
287b6d5fc3aSThomas Gleixner init_irq_alloc_info(arg, NULL);
288b6d5fc3aSThomas Gleixner
289b6d5fc3aSThomas Gleixner if (to_pci_dev(dev)->msix_enabled)
290b6d5fc3aSThomas Gleixner arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
291b6d5fc3aSThomas Gleixner else
292b6d5fc3aSThomas Gleixner arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
293b6d5fc3aSThomas Gleixner return 0;
294b6d5fc3aSThomas Gleixner }
295b6d5fc3aSThomas Gleixner EXPORT_SYMBOL_GPL(pci_msi_prepare);
296b6d5fc3aSThomas Gleixner
29744380982SJiang Liu #ifdef CONFIG_DMAR_TABLE
29847bea873SDavid Woodhouse /*
29947bea873SDavid Woodhouse * The Intel IOMMU (ab)uses the high bits of the MSI address to contain the
30047bea873SDavid Woodhouse * high bits of the destination APIC ID. This can't be done in the general
30147bea873SDavid Woodhouse * case for MSIs as it would be targeting real memory above 4GiB not the
30247bea873SDavid Woodhouse * APIC.
30347bea873SDavid Woodhouse */
dmar_msi_compose_msg(struct irq_data * data,struct msi_msg * msg)30447bea873SDavid Woodhouse static void dmar_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
30547bea873SDavid Woodhouse {
30647bea873SDavid Woodhouse __irq_msi_compose_msg(irqd_cfg(data), msg, true);
30747bea873SDavid Woodhouse }
30847bea873SDavid Woodhouse
dmar_msi_write_msg(struct irq_data * data,struct msi_msg * msg)30962ac1780SJiang Liu static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
31062ac1780SJiang Liu {
31162ac1780SJiang Liu dmar_msi_write(data->irq, msg);
31262ac1780SJiang Liu }
31362ac1780SJiang Liu
3140921f1daSJiang Liu static struct irq_chip dmar_msi_controller = {
31581dabe2eSJiang Liu .name = "DMAR-MSI",
31644380982SJiang Liu .irq_unmask = dmar_msi_unmask,
31744380982SJiang Liu .irq_mask = dmar_msi_mask,
3180921f1daSJiang Liu .irq_ack = irq_chip_ack_parent,
319e390d895SJiang Liu .irq_set_affinity = msi_domain_set_affinity,
3200921f1daSJiang Liu .irq_retrigger = irq_chip_retrigger_hierarchy,
32147bea873SDavid Woodhouse .irq_compose_msi_msg = dmar_msi_compose_msg,
32262ac1780SJiang Liu .irq_write_msi_msg = dmar_msi_write_msg,
323ff363f48SThomas Gleixner .flags = IRQCHIP_SKIP_SET_WAKE |
324ff363f48SThomas Gleixner IRQCHIP_AFFINITY_PRE_STARTUP,
32544380982SJiang Liu };
32644380982SJiang Liu
dmar_msi_init(struct irq_domain * domain,struct msi_domain_info * info,unsigned int virq,irq_hw_number_t hwirq,msi_alloc_info_t * arg)327e390d895SJiang Liu static int dmar_msi_init(struct irq_domain *domain,
328e390d895SJiang Liu struct msi_domain_info *info, unsigned int virq,
329e390d895SJiang Liu irq_hw_number_t hwirq, msi_alloc_info_t *arg)
3300921f1daSJiang Liu {
33155e03915SThomas Gleixner irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL,
33255e03915SThomas Gleixner handle_edge_irq, arg->data, "edge");
333e390d895SJiang Liu
334e390d895SJiang Liu return 0;
3350921f1daSJiang Liu }
3360921f1daSJiang Liu
337e390d895SJiang Liu static struct msi_domain_ops dmar_msi_domain_ops = {
338e390d895SJiang Liu .msi_init = dmar_msi_init,
339e390d895SJiang Liu };
3400921f1daSJiang Liu
341e390d895SJiang Liu static struct msi_domain_info dmar_msi_domain_info = {
342e390d895SJiang Liu .ops = &dmar_msi_domain_ops,
343e390d895SJiang Liu .chip = &dmar_msi_controller,
344d27e623aSThomas Gleixner .flags = MSI_FLAG_USE_DEF_DOM_OPS,
3450921f1daSJiang Liu };
3460921f1daSJiang Liu
dmar_get_irq_domain(void)3470921f1daSJiang Liu static struct irq_domain *dmar_get_irq_domain(void)
3480921f1daSJiang Liu {
3490921f1daSJiang Liu static struct irq_domain *dmar_domain;
3500921f1daSJiang Liu static DEFINE_MUTEX(dmar_lock);
351f8f37ca7SThomas Gleixner struct fwnode_handle *fn;
3520921f1daSJiang Liu
3530921f1daSJiang Liu mutex_lock(&dmar_lock);
354f8f37ca7SThomas Gleixner if (dmar_domain)
355f8f37ca7SThomas Gleixner goto out;
3560921f1daSJiang Liu
357f8f37ca7SThomas Gleixner fn = irq_domain_alloc_named_fwnode("DMAR-MSI");
358f8f37ca7SThomas Gleixner if (fn) {
359f8f37ca7SThomas Gleixner dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
360f8f37ca7SThomas Gleixner x86_vector_domain);
361e3beca48SThomas Gleixner if (!dmar_domain)
362f8f37ca7SThomas Gleixner irq_domain_free_fwnode(fn);
363f8f37ca7SThomas Gleixner }
364f8f37ca7SThomas Gleixner out:
365f8f37ca7SThomas Gleixner mutex_unlock(&dmar_lock);
3660921f1daSJiang Liu return dmar_domain;
3670921f1daSJiang Liu }
3680921f1daSJiang Liu
dmar_alloc_hwirq(int id,int node,void * arg)3690921f1daSJiang Liu int dmar_alloc_hwirq(int id, int node, void *arg)
3700921f1daSJiang Liu {
3710921f1daSJiang Liu struct irq_domain *domain = dmar_get_irq_domain();
3720921f1daSJiang Liu struct irq_alloc_info info;
3730921f1daSJiang Liu
3740921f1daSJiang Liu if (!domain)
3750921f1daSJiang Liu return -1;
3760921f1daSJiang Liu
3770921f1daSJiang Liu init_irq_alloc_info(&info, NULL);
3780921f1daSJiang Liu info.type = X86_IRQ_ALLOC_TYPE_DMAR;
37955e03915SThomas Gleixner info.devid = id;
3809006c133SThomas Gleixner info.hwirq = id;
38155e03915SThomas Gleixner info.data = arg;
3820921f1daSJiang Liu
3830921f1daSJiang Liu return irq_domain_alloc_irqs(domain, 1, node, &info);
384a62b32cdSJiang Liu }
385a62b32cdSJiang Liu
dmar_free_hwirq(int irq)386a62b32cdSJiang Liu void dmar_free_hwirq(int irq)
387a62b32cdSJiang Liu {
388a62b32cdSJiang Liu irq_domain_free_irqs(irq, 1);
389a62b32cdSJiang Liu }
39044380982SJiang Liu #endif
391ae72f315SThomas Gleixner
arch_restore_msi_irqs(struct pci_dev * dev)392ae72f315SThomas Gleixner bool arch_restore_msi_irqs(struct pci_dev *dev)
393ae72f315SThomas Gleixner {
394ae72f315SThomas Gleixner return xen_initdom_restore_msi(dev);
395ae72f315SThomas Gleixner }
396