1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support of MSI, HPET and DMAR interrupts. 4 * 5 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 6 * Moved from arch/x86/kernel/apic/io_apic.c. 7 * Jiang Liu <jiang.liu@linux.intel.com> 8 * Convert to hierarchical irqdomain 9 */ 10 #include <linux/mm.h> 11 #include <linux/interrupt.h> 12 #include <linux/irq.h> 13 #include <linux/pci.h> 14 #include <linux/dmar.h> 15 #include <linux/hpet.h> 16 #include <linux/msi.h> 17 #include <asm/irqdomain.h> 18 #include <asm/hpet.h> 19 #include <asm/hw_irq.h> 20 #include <asm/apic.h> 21 #include <asm/irq_remapping.h> 22 23 struct irq_domain *x86_pci_msi_default_domain __ro_after_init; 24 25 static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg) 26 { 27 struct msi_msg msg[2] = { [1] = { }, }; 28 29 __irq_msi_compose_msg(cfg, msg, false); 30 irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg); 31 } 32 33 static int 34 msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) 35 { 36 struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd); 37 struct irq_data *parent = irqd->parent_data; 38 unsigned int cpu; 39 int ret; 40 41 /* Save the current configuration */ 42 cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd)); 43 old_cfg = *cfg; 44 45 /* Allocate a new target vector */ 46 ret = parent->chip->irq_set_affinity(parent, mask, force); 47 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 48 return ret; 49 50 /* 51 * For non-maskable and non-remapped MSI interrupts the migration 52 * to a different destination CPU and a different vector has to be 53 * done careful to handle the possible stray interrupt which can be 54 * caused by the non-atomic update of the address/data pair. 55 * 56 * Direct update is possible when: 57 * - The MSI is maskable (remapped MSI does not use this code path)). 58 * The quirk bit is not set in this case. 59 * - The new vector is the same as the old vector 60 * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) 61 * - The new destination CPU is the same as the old destination CPU 62 */ 63 if (!irqd_msi_nomask_quirk(irqd) || 64 cfg->vector == old_cfg.vector || 65 old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || 66 cfg->dest_apicid == old_cfg.dest_apicid) { 67 irq_msi_update_msg(irqd, cfg); 68 return ret; 69 } 70 71 /* 72 * Paranoia: Validate that the interrupt target is the local 73 * CPU. 74 */ 75 if (WARN_ON_ONCE(cpu != smp_processor_id())) { 76 irq_msi_update_msg(irqd, cfg); 77 return ret; 78 } 79 80 /* 81 * Redirect the interrupt to the new vector on the current CPU 82 * first. This might cause a spurious interrupt on this vector if 83 * the device raises an interrupt right between this update and the 84 * update to the final destination CPU. 85 * 86 * If the vector is in use then the installed device handler will 87 * denote it as spurious which is no harm as this is a rare event 88 * and interrupt handlers have to cope with spurious interrupts 89 * anyway. If the vector is unused, then it is marked so it won't 90 * trigger the 'No irq handler for vector' warning in 91 * common_interrupt(). 92 * 93 * This requires to hold vector lock to prevent concurrent updates to 94 * the affected vector. 95 */ 96 lock_vector_lock(); 97 98 /* 99 * Mark the new target vector on the local CPU if it is currently 100 * unused. Reuse the VECTOR_RETRIGGERED state which is also used in 101 * the CPU hotplug path for a similar purpose. This cannot be 102 * undone here as the current CPU has interrupts disabled and 103 * cannot handle the interrupt before the whole set_affinity() 104 * section is done. In the CPU unplug case, the current CPU is 105 * about to vanish and will not handle any interrupts anymore. The 106 * vector is cleaned up when the CPU comes online again. 107 */ 108 if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector]))) 109 this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED); 110 111 /* Redirect it to the new vector on the local CPU temporarily */ 112 old_cfg.vector = cfg->vector; 113 irq_msi_update_msg(irqd, &old_cfg); 114 115 /* Now transition it to the target CPU */ 116 irq_msi_update_msg(irqd, cfg); 117 118 /* 119 * All interrupts after this point are now targeted at the new 120 * vector/CPU. 121 * 122 * Drop vector lock before testing whether the temporary assignment 123 * to the local CPU was hit by an interrupt raised in the device, 124 * because the retrigger function acquires vector lock again. 125 */ 126 unlock_vector_lock(); 127 128 /* 129 * Check whether the transition raced with a device interrupt and 130 * is pending in the local APICs IRR. It is safe to do this outside 131 * of vector lock as the irq_desc::lock of this interrupt is still 132 * held and interrupts are disabled: The check is not accessing the 133 * underlying vector store. It's just checking the local APIC's 134 * IRR. 135 */ 136 if (lapic_vector_set_in_irr(cfg->vector)) 137 irq_data_get_irq_chip(irqd)->irq_retrigger(irqd); 138 139 return ret; 140 } 141 142 /* 143 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 144 * which implement the MSI or MSI-X Capability Structure. 145 */ 146 static struct irq_chip pci_msi_controller = { 147 .name = "PCI-MSI", 148 .irq_unmask = pci_msi_unmask_irq, 149 .irq_mask = pci_msi_mask_irq, 150 .irq_ack = irq_chip_ack_parent, 151 .irq_retrigger = irq_chip_retrigger_hierarchy, 152 .irq_set_affinity = msi_set_affinity, 153 .flags = IRQCHIP_SKIP_SET_WAKE, 154 }; 155 156 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, 157 msi_alloc_info_t *arg) 158 { 159 struct pci_dev *pdev = to_pci_dev(dev); 160 struct msi_desc *desc = first_pci_msi_entry(pdev); 161 162 init_irq_alloc_info(arg, NULL); 163 if (desc->msi_attrib.is_msix) { 164 arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX; 165 } else { 166 arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI; 167 arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; 168 } 169 170 return 0; 171 } 172 EXPORT_SYMBOL_GPL(pci_msi_prepare); 173 174 static struct msi_domain_ops pci_msi_domain_ops = { 175 .msi_prepare = pci_msi_prepare, 176 }; 177 178 static struct msi_domain_info pci_msi_domain_info = { 179 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 180 MSI_FLAG_PCI_MSIX, 181 .ops = &pci_msi_domain_ops, 182 .chip = &pci_msi_controller, 183 .handler = handle_edge_irq, 184 .handler_name = "edge", 185 }; 186 187 struct irq_domain * __init native_create_pci_msi_domain(void) 188 { 189 struct fwnode_handle *fn; 190 struct irq_domain *d; 191 192 if (disable_apic) 193 return NULL; 194 195 fn = irq_domain_alloc_named_fwnode("PCI-MSI"); 196 if (!fn) 197 return NULL; 198 199 d = pci_msi_create_irq_domain(fn, &pci_msi_domain_info, 200 x86_vector_domain); 201 if (!d) { 202 irq_domain_free_fwnode(fn); 203 pr_warn("Failed to initialize PCI-MSI irqdomain.\n"); 204 } else { 205 d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; 206 } 207 return d; 208 } 209 210 void __init x86_create_pci_msi_domain(void) 211 { 212 x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain(); 213 } 214 215 #ifdef CONFIG_IRQ_REMAP 216 static struct irq_chip pci_msi_ir_controller = { 217 .name = "IR-PCI-MSI", 218 .irq_unmask = pci_msi_unmask_irq, 219 .irq_mask = pci_msi_mask_irq, 220 .irq_ack = irq_chip_ack_parent, 221 .irq_retrigger = irq_chip_retrigger_hierarchy, 222 .flags = IRQCHIP_SKIP_SET_WAKE, 223 }; 224 225 static struct msi_domain_info pci_msi_ir_domain_info = { 226 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 227 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, 228 .ops = &pci_msi_domain_ops, 229 .chip = &pci_msi_ir_controller, 230 .handler = handle_edge_irq, 231 .handler_name = "edge", 232 }; 233 234 struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent, 235 const char *name, int id) 236 { 237 struct fwnode_handle *fn; 238 struct irq_domain *d; 239 240 fn = irq_domain_alloc_named_id_fwnode(name, id); 241 if (!fn) 242 return NULL; 243 d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent); 244 if (!d) 245 irq_domain_free_fwnode(fn); 246 return d; 247 } 248 #endif 249 250 #ifdef CONFIG_DMAR_TABLE 251 /* 252 * The Intel IOMMU (ab)uses the high bits of the MSI address to contain the 253 * high bits of the destination APIC ID. This can't be done in the general 254 * case for MSIs as it would be targeting real memory above 4GiB not the 255 * APIC. 256 */ 257 static void dmar_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) 258 { 259 __irq_msi_compose_msg(irqd_cfg(data), msg, true); 260 } 261 262 static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg) 263 { 264 dmar_msi_write(data->irq, msg); 265 } 266 267 static struct irq_chip dmar_msi_controller = { 268 .name = "DMAR-MSI", 269 .irq_unmask = dmar_msi_unmask, 270 .irq_mask = dmar_msi_mask, 271 .irq_ack = irq_chip_ack_parent, 272 .irq_set_affinity = msi_domain_set_affinity, 273 .irq_retrigger = irq_chip_retrigger_hierarchy, 274 .irq_compose_msi_msg = dmar_msi_compose_msg, 275 .irq_write_msi_msg = dmar_msi_write_msg, 276 .flags = IRQCHIP_SKIP_SET_WAKE, 277 }; 278 279 static int dmar_msi_init(struct irq_domain *domain, 280 struct msi_domain_info *info, unsigned int virq, 281 irq_hw_number_t hwirq, msi_alloc_info_t *arg) 282 { 283 irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL, 284 handle_edge_irq, arg->data, "edge"); 285 286 return 0; 287 } 288 289 static struct msi_domain_ops dmar_msi_domain_ops = { 290 .msi_init = dmar_msi_init, 291 }; 292 293 static struct msi_domain_info dmar_msi_domain_info = { 294 .ops = &dmar_msi_domain_ops, 295 .chip = &dmar_msi_controller, 296 .flags = MSI_FLAG_USE_DEF_DOM_OPS, 297 }; 298 299 static struct irq_domain *dmar_get_irq_domain(void) 300 { 301 static struct irq_domain *dmar_domain; 302 static DEFINE_MUTEX(dmar_lock); 303 struct fwnode_handle *fn; 304 305 mutex_lock(&dmar_lock); 306 if (dmar_domain) 307 goto out; 308 309 fn = irq_domain_alloc_named_fwnode("DMAR-MSI"); 310 if (fn) { 311 dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, 312 x86_vector_domain); 313 if (!dmar_domain) 314 irq_domain_free_fwnode(fn); 315 } 316 out: 317 mutex_unlock(&dmar_lock); 318 return dmar_domain; 319 } 320 321 int dmar_alloc_hwirq(int id, int node, void *arg) 322 { 323 struct irq_domain *domain = dmar_get_irq_domain(); 324 struct irq_alloc_info info; 325 326 if (!domain) 327 return -1; 328 329 init_irq_alloc_info(&info, NULL); 330 info.type = X86_IRQ_ALLOC_TYPE_DMAR; 331 info.devid = id; 332 info.hwirq = id; 333 info.data = arg; 334 335 return irq_domain_alloc_irqs(domain, 1, node, &info); 336 } 337 338 void dmar_free_hwirq(int irq) 339 { 340 irq_domain_free_irqs(irq, 1); 341 } 342 #endif 343