1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define pr_fmt(fmt)     "DMAR-IR: " fmt
4 
5 #include <linux/interrupt.h>
6 #include <linux/dmar.h>
7 #include <linux/spinlock.h>
8 #include <linux/slab.h>
9 #include <linux/jiffies.h>
10 #include <linux/hpet.h>
11 #include <linux/pci.h>
12 #include <linux/irq.h>
13 #include <linux/intel-iommu.h>
14 #include <linux/acpi.h>
15 #include <linux/irqdomain.h>
16 #include <linux/crash_dump.h>
17 #include <asm/io_apic.h>
18 #include <asm/smp.h>
19 #include <asm/cpu.h>
20 #include <asm/irq_remapping.h>
21 #include <asm/pci-direct.h>
22 #include <asm/msidef.h>
23 
24 #include "../irq_remapping.h"
25 
26 enum irq_mode {
27 	IRQ_REMAPPING,
28 	IRQ_POSTING,
29 };
30 
31 struct ioapic_scope {
32 	struct intel_iommu *iommu;
33 	unsigned int id;
34 	unsigned int bus;	/* PCI bus number */
35 	unsigned int devfn;	/* PCI devfn number */
36 };
37 
38 struct hpet_scope {
39 	struct intel_iommu *iommu;
40 	u8 id;
41 	unsigned int bus;
42 	unsigned int devfn;
43 };
44 
45 struct irq_2_iommu {
46 	struct intel_iommu *iommu;
47 	u16 irte_index;
48 	u16 sub_handle;
49 	u8  irte_mask;
50 	enum irq_mode mode;
51 };
52 
53 struct intel_ir_data {
54 	struct irq_2_iommu			irq_2_iommu;
55 	struct irte				irte_entry;
56 	union {
57 		struct msi_msg			msi_entry;
58 	};
59 };
60 
61 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
62 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
63 
64 static int __read_mostly eim_mode;
65 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
66 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
67 
68 /*
69  * Lock ordering:
70  * ->dmar_global_lock
71  *	->irq_2_ir_lock
72  *		->qi->q_lock
73  *	->iommu->register_lock
74  * Note:
75  * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
76  * in single-threaded environment with interrupt disabled, so no need to tabke
77  * the dmar_global_lock.
78  */
79 DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
80 static const struct irq_domain_ops intel_ir_domain_ops;
81 
82 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
83 static int __init parse_ioapics_under_ir(void);
84 
85 static bool ir_pre_enabled(struct intel_iommu *iommu)
86 {
87 	return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
88 }
89 
90 static void clear_ir_pre_enabled(struct intel_iommu *iommu)
91 {
92 	iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
93 }
94 
95 static void init_ir_status(struct intel_iommu *iommu)
96 {
97 	u32 gsts;
98 
99 	gsts = readl(iommu->reg + DMAR_GSTS_REG);
100 	if (gsts & DMA_GSTS_IRES)
101 		iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
102 }
103 
104 static int alloc_irte(struct intel_iommu *iommu,
105 		      struct irq_2_iommu *irq_iommu, u16 count)
106 {
107 	struct ir_table *table = iommu->ir_table;
108 	unsigned int mask = 0;
109 	unsigned long flags;
110 	int index;
111 
112 	if (!count || !irq_iommu)
113 		return -1;
114 
115 	if (count > 1) {
116 		count = __roundup_pow_of_two(count);
117 		mask = ilog2(count);
118 	}
119 
120 	if (mask > ecap_max_handle_mask(iommu->ecap)) {
121 		pr_err("Requested mask %x exceeds the max invalidation handle"
122 		       " mask value %Lx\n", mask,
123 		       ecap_max_handle_mask(iommu->ecap));
124 		return -1;
125 	}
126 
127 	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
128 	index = bitmap_find_free_region(table->bitmap,
129 					INTR_REMAP_TABLE_ENTRIES, mask);
130 	if (index < 0) {
131 		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
132 	} else {
133 		irq_iommu->iommu = iommu;
134 		irq_iommu->irte_index =  index;
135 		irq_iommu->sub_handle = 0;
136 		irq_iommu->irte_mask = mask;
137 		irq_iommu->mode = IRQ_REMAPPING;
138 	}
139 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
140 
141 	return index;
142 }
143 
144 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
145 {
146 	struct qi_desc desc;
147 
148 	desc.qw0 = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
149 		   | QI_IEC_SELECTIVE;
150 	desc.qw1 = 0;
151 	desc.qw2 = 0;
152 	desc.qw3 = 0;
153 
154 	return qi_submit_sync(iommu, &desc, 1, 0);
155 }
156 
157 static int modify_irte(struct irq_2_iommu *irq_iommu,
158 		       struct irte *irte_modified)
159 {
160 	struct intel_iommu *iommu;
161 	unsigned long flags;
162 	struct irte *irte;
163 	int rc, index;
164 
165 	if (!irq_iommu)
166 		return -1;
167 
168 	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
169 
170 	iommu = irq_iommu->iommu;
171 
172 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
173 	irte = &iommu->ir_table->base[index];
174 
175 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
176 	if ((irte->pst == 1) || (irte_modified->pst == 1)) {
177 		bool ret;
178 
179 		ret = cmpxchg_double(&irte->low, &irte->high,
180 				     irte->low, irte->high,
181 				     irte_modified->low, irte_modified->high);
182 		/*
183 		 * We use cmpxchg16 to atomically update the 128-bit IRTE,
184 		 * and it cannot be updated by the hardware or other processors
185 		 * behind us, so the return value of cmpxchg16 should be the
186 		 * same as the old value.
187 		 */
188 		WARN_ON(!ret);
189 	} else
190 #endif
191 	{
192 		set_64bit(&irte->low, irte_modified->low);
193 		set_64bit(&irte->high, irte_modified->high);
194 	}
195 	__iommu_flush_cache(iommu, irte, sizeof(*irte));
196 
197 	rc = qi_flush_iec(iommu, index, 0);
198 
199 	/* Update iommu mode according to the IRTE mode */
200 	irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
201 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
202 
203 	return rc;
204 }
205 
206 static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
207 {
208 	int i;
209 
210 	for (i = 0; i < MAX_HPET_TBS; i++)
211 		if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
212 			return ir_hpet[i].iommu;
213 	return NULL;
214 }
215 
216 static struct intel_iommu *map_ioapic_to_ir(int apic)
217 {
218 	int i;
219 
220 	for (i = 0; i < MAX_IO_APICS; i++)
221 		if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
222 			return ir_ioapic[i].iommu;
223 	return NULL;
224 }
225 
226 static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
227 {
228 	struct dmar_drhd_unit *drhd;
229 
230 	drhd = dmar_find_matched_drhd_unit(dev);
231 	if (!drhd)
232 		return NULL;
233 
234 	return drhd->iommu;
235 }
236 
237 static int clear_entries(struct irq_2_iommu *irq_iommu)
238 {
239 	struct irte *start, *entry, *end;
240 	struct intel_iommu *iommu;
241 	int index;
242 
243 	if (irq_iommu->sub_handle)
244 		return 0;
245 
246 	iommu = irq_iommu->iommu;
247 	index = irq_iommu->irte_index;
248 
249 	start = iommu->ir_table->base + index;
250 	end = start + (1 << irq_iommu->irte_mask);
251 
252 	for (entry = start; entry < end; entry++) {
253 		set_64bit(&entry->low, 0);
254 		set_64bit(&entry->high, 0);
255 	}
256 	bitmap_release_region(iommu->ir_table->bitmap, index,
257 			      irq_iommu->irte_mask);
258 
259 	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
260 }
261 
262 /*
263  * source validation type
264  */
265 #define SVT_NO_VERIFY		0x0  /* no verification is required */
266 #define SVT_VERIFY_SID_SQ	0x1  /* verify using SID and SQ fields */
267 #define SVT_VERIFY_BUS		0x2  /* verify bus of request-id */
268 
269 /*
270  * source-id qualifier
271  */
272 #define SQ_ALL_16	0x0  /* verify all 16 bits of request-id */
273 #define SQ_13_IGNORE_1	0x1  /* verify most significant 13 bits, ignore
274 			      * the third least significant bit
275 			      */
276 #define SQ_13_IGNORE_2	0x2  /* verify most significant 13 bits, ignore
277 			      * the second and third least significant bits
278 			      */
279 #define SQ_13_IGNORE_3	0x3  /* verify most significant 13 bits, ignore
280 			      * the least three significant bits
281 			      */
282 
283 /*
284  * set SVT, SQ and SID fields of irte to verify
285  * source ids of interrupt requests
286  */
287 static void set_irte_sid(struct irte *irte, unsigned int svt,
288 			 unsigned int sq, unsigned int sid)
289 {
290 	if (disable_sourceid_checking)
291 		svt = SVT_NO_VERIFY;
292 	irte->svt = svt;
293 	irte->sq = sq;
294 	irte->sid = sid;
295 }
296 
297 /*
298  * Set an IRTE to match only the bus number. Interrupt requests that reference
299  * this IRTE must have a requester-id whose bus number is between or equal
300  * to the start_bus and end_bus arguments.
301  */
302 static void set_irte_verify_bus(struct irte *irte, unsigned int start_bus,
303 				unsigned int end_bus)
304 {
305 	set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
306 		     (start_bus << 8) | end_bus);
307 }
308 
309 static int set_ioapic_sid(struct irte *irte, int apic)
310 {
311 	int i;
312 	u16 sid = 0;
313 
314 	if (!irte)
315 		return -1;
316 
317 	down_read(&dmar_global_lock);
318 	for (i = 0; i < MAX_IO_APICS; i++) {
319 		if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
320 			sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
321 			break;
322 		}
323 	}
324 	up_read(&dmar_global_lock);
325 
326 	if (sid == 0) {
327 		pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
328 		return -1;
329 	}
330 
331 	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
332 
333 	return 0;
334 }
335 
336 static int set_hpet_sid(struct irte *irte, u8 id)
337 {
338 	int i;
339 	u16 sid = 0;
340 
341 	if (!irte)
342 		return -1;
343 
344 	down_read(&dmar_global_lock);
345 	for (i = 0; i < MAX_HPET_TBS; i++) {
346 		if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
347 			sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
348 			break;
349 		}
350 	}
351 	up_read(&dmar_global_lock);
352 
353 	if (sid == 0) {
354 		pr_warn("Failed to set source-id of HPET block (%d)\n", id);
355 		return -1;
356 	}
357 
358 	/*
359 	 * Should really use SQ_ALL_16. Some platforms are broken.
360 	 * While we figure out the right quirks for these broken platforms, use
361 	 * SQ_13_IGNORE_3 for now.
362 	 */
363 	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
364 
365 	return 0;
366 }
367 
368 struct set_msi_sid_data {
369 	struct pci_dev *pdev;
370 	u16 alias;
371 	int count;
372 	int busmatch_count;
373 };
374 
375 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
376 {
377 	struct set_msi_sid_data *data = opaque;
378 
379 	if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
380 		data->busmatch_count++;
381 
382 	data->pdev = pdev;
383 	data->alias = alias;
384 	data->count++;
385 
386 	return 0;
387 }
388 
389 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
390 {
391 	struct set_msi_sid_data data;
392 
393 	if (!irte || !dev)
394 		return -1;
395 
396 	data.count = 0;
397 	data.busmatch_count = 0;
398 	pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
399 
400 	/*
401 	 * DMA alias provides us with a PCI device and alias.  The only case
402 	 * where the it will return an alias on a different bus than the
403 	 * device is the case of a PCIe-to-PCI bridge, where the alias is for
404 	 * the subordinate bus.  In this case we can only verify the bus.
405 	 *
406 	 * If there are multiple aliases, all with the same bus number,
407 	 * then all we can do is verify the bus. This is typical in NTB
408 	 * hardware which use proxy IDs where the device will generate traffic
409 	 * from multiple devfn numbers on the same bus.
410 	 *
411 	 * If the alias device is on a different bus than our source device
412 	 * then we have a topology based alias, use it.
413 	 *
414 	 * Otherwise, the alias is for a device DMA quirk and we cannot
415 	 * assume that MSI uses the same requester ID.  Therefore use the
416 	 * original device.
417 	 */
418 	if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
419 		set_irte_verify_bus(irte, PCI_BUS_NUM(data.alias),
420 				    dev->bus->number);
421 	else if (data.count >= 2 && data.busmatch_count == data.count)
422 		set_irte_verify_bus(irte, dev->bus->number, dev->bus->number);
423 	else if (data.pdev->bus->number != dev->bus->number)
424 		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
425 	else
426 		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
427 			     pci_dev_id(dev));
428 
429 	return 0;
430 }
431 
432 static int iommu_load_old_irte(struct intel_iommu *iommu)
433 {
434 	struct irte *old_ir_table;
435 	phys_addr_t irt_phys;
436 	unsigned int i;
437 	size_t size;
438 	u64 irta;
439 
440 	/* Check whether the old ir-table has the same size as ours */
441 	irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
442 	if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
443 	     != INTR_REMAP_TABLE_REG_SIZE)
444 		return -EINVAL;
445 
446 	irt_phys = irta & VTD_PAGE_MASK;
447 	size     = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
448 
449 	/* Map the old IR table */
450 	old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
451 	if (!old_ir_table)
452 		return -ENOMEM;
453 
454 	/* Copy data over */
455 	memcpy(iommu->ir_table->base, old_ir_table, size);
456 
457 	__iommu_flush_cache(iommu, iommu->ir_table->base, size);
458 
459 	/*
460 	 * Now check the table for used entries and mark those as
461 	 * allocated in the bitmap
462 	 */
463 	for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
464 		if (iommu->ir_table->base[i].present)
465 			bitmap_set(iommu->ir_table->bitmap, i, 1);
466 	}
467 
468 	memunmap(old_ir_table);
469 
470 	return 0;
471 }
472 
473 
474 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
475 {
476 	unsigned long flags;
477 	u64 addr;
478 	u32 sts;
479 
480 	addr = virt_to_phys((void *)iommu->ir_table->base);
481 
482 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
483 
484 	dmar_writeq(iommu->reg + DMAR_IRTA_REG,
485 		    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
486 
487 	/* Set interrupt-remapping table pointer */
488 	writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
489 
490 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
491 		      readl, (sts & DMA_GSTS_IRTPS), sts);
492 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
493 
494 	/*
495 	 * Global invalidation of interrupt entry cache to make sure the
496 	 * hardware uses the new irq remapping table.
497 	 */
498 	qi_global_iec(iommu);
499 }
500 
501 static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
502 {
503 	unsigned long flags;
504 	u32 sts;
505 
506 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
507 
508 	/* Enable interrupt-remapping */
509 	iommu->gcmd |= DMA_GCMD_IRE;
510 	iommu->gcmd &= ~DMA_GCMD_CFI;  /* Block compatibility-format MSIs */
511 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
512 
513 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
514 		      readl, (sts & DMA_GSTS_IRES), sts);
515 
516 	/*
517 	 * With CFI clear in the Global Command register, we should be
518 	 * protected from dangerous (i.e. compatibility) interrupts
519 	 * regardless of x2apic status.  Check just to be sure.
520 	 */
521 	if (sts & DMA_GSTS_CFIS)
522 		WARN(1, KERN_WARNING
523 			"Compatibility-format IRQs enabled despite intr remapping;\n"
524 			"you are vulnerable to IRQ injection.\n");
525 
526 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
527 }
528 
529 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
530 {
531 	struct ir_table *ir_table;
532 	struct fwnode_handle *fn;
533 	unsigned long *bitmap;
534 	struct page *pages;
535 
536 	if (iommu->ir_table)
537 		return 0;
538 
539 	ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
540 	if (!ir_table)
541 		return -ENOMEM;
542 
543 	pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
544 				 INTR_REMAP_PAGE_ORDER);
545 	if (!pages) {
546 		pr_err("IR%d: failed to allocate pages of order %d\n",
547 		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
548 		goto out_free_table;
549 	}
550 
551 	bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
552 	if (bitmap == NULL) {
553 		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
554 		goto out_free_pages;
555 	}
556 
557 	fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
558 	if (!fn)
559 		goto out_free_bitmap;
560 
561 	iommu->ir_domain =
562 		irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
563 					    0, INTR_REMAP_TABLE_ENTRIES,
564 					    fn, &intel_ir_domain_ops,
565 					    iommu);
566 	if (!iommu->ir_domain) {
567 		irq_domain_free_fwnode(fn);
568 		pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
569 		goto out_free_bitmap;
570 	}
571 	iommu->ir_msi_domain =
572 		arch_create_remap_msi_irq_domain(iommu->ir_domain,
573 						 "INTEL-IR-MSI",
574 						 iommu->seq_id);
575 
576 	ir_table->base = page_address(pages);
577 	ir_table->bitmap = bitmap;
578 	iommu->ir_table = ir_table;
579 
580 	/*
581 	 * If the queued invalidation is already initialized,
582 	 * shouldn't disable it.
583 	 */
584 	if (!iommu->qi) {
585 		/*
586 		 * Clear previous faults.
587 		 */
588 		dmar_fault(-1, iommu);
589 		dmar_disable_qi(iommu);
590 
591 		if (dmar_enable_qi(iommu)) {
592 			pr_err("Failed to enable queued invalidation\n");
593 			goto out_free_bitmap;
594 		}
595 	}
596 
597 	init_ir_status(iommu);
598 
599 	if (ir_pre_enabled(iommu)) {
600 		if (!is_kdump_kernel()) {
601 			pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
602 				iommu->name);
603 			clear_ir_pre_enabled(iommu);
604 			iommu_disable_irq_remapping(iommu);
605 		} else if (iommu_load_old_irte(iommu))
606 			pr_err("Failed to copy IR table for %s from previous kernel\n",
607 			       iommu->name);
608 		else
609 			pr_info("Copied IR table for %s from previous kernel\n",
610 				iommu->name);
611 	}
612 
613 	iommu_set_irq_remapping(iommu, eim_mode);
614 
615 	return 0;
616 
617 out_free_bitmap:
618 	bitmap_free(bitmap);
619 out_free_pages:
620 	__free_pages(pages, INTR_REMAP_PAGE_ORDER);
621 out_free_table:
622 	kfree(ir_table);
623 
624 	iommu->ir_table  = NULL;
625 
626 	return -ENOMEM;
627 }
628 
629 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
630 {
631 	struct fwnode_handle *fn;
632 
633 	if (iommu && iommu->ir_table) {
634 		if (iommu->ir_msi_domain) {
635 			fn = iommu->ir_msi_domain->fwnode;
636 
637 			irq_domain_remove(iommu->ir_msi_domain);
638 			irq_domain_free_fwnode(fn);
639 			iommu->ir_msi_domain = NULL;
640 		}
641 		if (iommu->ir_domain) {
642 			fn = iommu->ir_domain->fwnode;
643 
644 			irq_domain_remove(iommu->ir_domain);
645 			irq_domain_free_fwnode(fn);
646 			iommu->ir_domain = NULL;
647 		}
648 		free_pages((unsigned long)iommu->ir_table->base,
649 			   INTR_REMAP_PAGE_ORDER);
650 		bitmap_free(iommu->ir_table->bitmap);
651 		kfree(iommu->ir_table);
652 		iommu->ir_table = NULL;
653 	}
654 }
655 
656 /*
657  * Disable Interrupt Remapping.
658  */
659 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
660 {
661 	unsigned long flags;
662 	u32 sts;
663 
664 	if (!ecap_ir_support(iommu->ecap))
665 		return;
666 
667 	/*
668 	 * global invalidation of interrupt entry cache before disabling
669 	 * interrupt-remapping.
670 	 */
671 	qi_global_iec(iommu);
672 
673 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
674 
675 	sts = readl(iommu->reg + DMAR_GSTS_REG);
676 	if (!(sts & DMA_GSTS_IRES))
677 		goto end;
678 
679 	iommu->gcmd &= ~DMA_GCMD_IRE;
680 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
681 
682 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
683 		      readl, !(sts & DMA_GSTS_IRES), sts);
684 
685 end:
686 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
687 }
688 
689 static int __init dmar_x2apic_optout(void)
690 {
691 	struct acpi_table_dmar *dmar;
692 	dmar = (struct acpi_table_dmar *)dmar_tbl;
693 	if (!dmar || no_x2apic_optout)
694 		return 0;
695 	return dmar->flags & DMAR_X2APIC_OPT_OUT;
696 }
697 
698 static void __init intel_cleanup_irq_remapping(void)
699 {
700 	struct dmar_drhd_unit *drhd;
701 	struct intel_iommu *iommu;
702 
703 	for_each_iommu(iommu, drhd) {
704 		if (ecap_ir_support(iommu->ecap)) {
705 			iommu_disable_irq_remapping(iommu);
706 			intel_teardown_irq_remapping(iommu);
707 		}
708 	}
709 
710 	if (x2apic_supported())
711 		pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
712 }
713 
714 static int __init intel_prepare_irq_remapping(void)
715 {
716 	struct dmar_drhd_unit *drhd;
717 	struct intel_iommu *iommu;
718 	int eim = 0;
719 
720 	if (irq_remap_broken) {
721 		pr_warn("This system BIOS has enabled interrupt remapping\n"
722 			"on a chipset that contains an erratum making that\n"
723 			"feature unstable.  To maintain system stability\n"
724 			"interrupt remapping is being disabled.  Please\n"
725 			"contact your BIOS vendor for an update\n");
726 		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
727 		return -ENODEV;
728 	}
729 
730 	if (dmar_table_init() < 0)
731 		return -ENODEV;
732 
733 	if (!dmar_ir_support())
734 		return -ENODEV;
735 
736 	if (parse_ioapics_under_ir()) {
737 		pr_info("Not enabling interrupt remapping\n");
738 		goto error;
739 	}
740 
741 	/* First make sure all IOMMUs support IRQ remapping */
742 	for_each_iommu(iommu, drhd)
743 		if (!ecap_ir_support(iommu->ecap))
744 			goto error;
745 
746 	/* Detect remapping mode: lapic or x2apic */
747 	if (x2apic_supported()) {
748 		eim = !dmar_x2apic_optout();
749 		if (!eim) {
750 			pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
751 			pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
752 		}
753 	}
754 
755 	for_each_iommu(iommu, drhd) {
756 		if (eim && !ecap_eim_support(iommu->ecap)) {
757 			pr_info("%s does not support EIM\n", iommu->name);
758 			eim = 0;
759 		}
760 	}
761 
762 	eim_mode = eim;
763 	if (eim)
764 		pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
765 
766 	/* Do the initializations early */
767 	for_each_iommu(iommu, drhd) {
768 		if (intel_setup_irq_remapping(iommu)) {
769 			pr_err("Failed to setup irq remapping for %s\n",
770 			       iommu->name);
771 			goto error;
772 		}
773 	}
774 
775 	return 0;
776 
777 error:
778 	intel_cleanup_irq_remapping();
779 	return -ENODEV;
780 }
781 
782 /*
783  * Set Posted-Interrupts capability.
784  */
785 static inline void set_irq_posting_cap(void)
786 {
787 	struct dmar_drhd_unit *drhd;
788 	struct intel_iommu *iommu;
789 
790 	if (!disable_irq_post) {
791 		/*
792 		 * If IRTE is in posted format, the 'pda' field goes across the
793 		 * 64-bit boundary, we need use cmpxchg16b to atomically update
794 		 * it. We only expose posted-interrupt when X86_FEATURE_CX16
795 		 * is supported. Actually, hardware platforms supporting PI
796 		 * should have X86_FEATURE_CX16 support, this has been confirmed
797 		 * with Intel hardware guys.
798 		 */
799 		if (boot_cpu_has(X86_FEATURE_CX16))
800 			intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
801 
802 		for_each_iommu(iommu, drhd)
803 			if (!cap_pi_support(iommu->cap)) {
804 				intel_irq_remap_ops.capability &=
805 						~(1 << IRQ_POSTING_CAP);
806 				break;
807 			}
808 	}
809 }
810 
811 static int __init intel_enable_irq_remapping(void)
812 {
813 	struct dmar_drhd_unit *drhd;
814 	struct intel_iommu *iommu;
815 	bool setup = false;
816 
817 	/*
818 	 * Setup Interrupt-remapping for all the DRHD's now.
819 	 */
820 	for_each_iommu(iommu, drhd) {
821 		if (!ir_pre_enabled(iommu))
822 			iommu_enable_irq_remapping(iommu);
823 		setup = true;
824 	}
825 
826 	if (!setup)
827 		goto error;
828 
829 	irq_remapping_enabled = 1;
830 
831 	set_irq_posting_cap();
832 
833 	pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
834 
835 	return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
836 
837 error:
838 	intel_cleanup_irq_remapping();
839 	return -1;
840 }
841 
842 static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
843 				   struct intel_iommu *iommu,
844 				   struct acpi_dmar_hardware_unit *drhd)
845 {
846 	struct acpi_dmar_pci_path *path;
847 	u8 bus;
848 	int count, free = -1;
849 
850 	bus = scope->bus;
851 	path = (struct acpi_dmar_pci_path *)(scope + 1);
852 	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
853 		/ sizeof(struct acpi_dmar_pci_path);
854 
855 	while (--count > 0) {
856 		/*
857 		 * Access PCI directly due to the PCI
858 		 * subsystem isn't initialized yet.
859 		 */
860 		bus = read_pci_config_byte(bus, path->device, path->function,
861 					   PCI_SECONDARY_BUS);
862 		path++;
863 	}
864 
865 	for (count = 0; count < MAX_HPET_TBS; count++) {
866 		if (ir_hpet[count].iommu == iommu &&
867 		    ir_hpet[count].id == scope->enumeration_id)
868 			return 0;
869 		else if (ir_hpet[count].iommu == NULL && free == -1)
870 			free = count;
871 	}
872 	if (free == -1) {
873 		pr_warn("Exceeded Max HPET blocks\n");
874 		return -ENOSPC;
875 	}
876 
877 	ir_hpet[free].iommu = iommu;
878 	ir_hpet[free].id    = scope->enumeration_id;
879 	ir_hpet[free].bus   = bus;
880 	ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
881 	pr_info("HPET id %d under DRHD base 0x%Lx\n",
882 		scope->enumeration_id, drhd->address);
883 
884 	return 0;
885 }
886 
887 static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
888 				     struct intel_iommu *iommu,
889 				     struct acpi_dmar_hardware_unit *drhd)
890 {
891 	struct acpi_dmar_pci_path *path;
892 	u8 bus;
893 	int count, free = -1;
894 
895 	bus = scope->bus;
896 	path = (struct acpi_dmar_pci_path *)(scope + 1);
897 	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
898 		/ sizeof(struct acpi_dmar_pci_path);
899 
900 	while (--count > 0) {
901 		/*
902 		 * Access PCI directly due to the PCI
903 		 * subsystem isn't initialized yet.
904 		 */
905 		bus = read_pci_config_byte(bus, path->device, path->function,
906 					   PCI_SECONDARY_BUS);
907 		path++;
908 	}
909 
910 	for (count = 0; count < MAX_IO_APICS; count++) {
911 		if (ir_ioapic[count].iommu == iommu &&
912 		    ir_ioapic[count].id == scope->enumeration_id)
913 			return 0;
914 		else if (ir_ioapic[count].iommu == NULL && free == -1)
915 			free = count;
916 	}
917 	if (free == -1) {
918 		pr_warn("Exceeded Max IO APICS\n");
919 		return -ENOSPC;
920 	}
921 
922 	ir_ioapic[free].bus   = bus;
923 	ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
924 	ir_ioapic[free].iommu = iommu;
925 	ir_ioapic[free].id    = scope->enumeration_id;
926 	pr_info("IOAPIC id %d under DRHD base  0x%Lx IOMMU %d\n",
927 		scope->enumeration_id, drhd->address, iommu->seq_id);
928 
929 	return 0;
930 }
931 
932 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
933 				      struct intel_iommu *iommu)
934 {
935 	int ret = 0;
936 	struct acpi_dmar_hardware_unit *drhd;
937 	struct acpi_dmar_device_scope *scope;
938 	void *start, *end;
939 
940 	drhd = (struct acpi_dmar_hardware_unit *)header;
941 	start = (void *)(drhd + 1);
942 	end = ((void *)drhd) + header->length;
943 
944 	while (start < end && ret == 0) {
945 		scope = start;
946 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
947 			ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
948 		else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
949 			ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
950 		start += scope->length;
951 	}
952 
953 	return ret;
954 }
955 
956 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
957 {
958 	int i;
959 
960 	for (i = 0; i < MAX_HPET_TBS; i++)
961 		if (ir_hpet[i].iommu == iommu)
962 			ir_hpet[i].iommu = NULL;
963 
964 	for (i = 0; i < MAX_IO_APICS; i++)
965 		if (ir_ioapic[i].iommu == iommu)
966 			ir_ioapic[i].iommu = NULL;
967 }
968 
969 /*
970  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
971  * hardware unit.
972  */
973 static int __init parse_ioapics_under_ir(void)
974 {
975 	struct dmar_drhd_unit *drhd;
976 	struct intel_iommu *iommu;
977 	bool ir_supported = false;
978 	int ioapic_idx;
979 
980 	for_each_iommu(iommu, drhd) {
981 		int ret;
982 
983 		if (!ecap_ir_support(iommu->ecap))
984 			continue;
985 
986 		ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
987 		if (ret)
988 			return ret;
989 
990 		ir_supported = true;
991 	}
992 
993 	if (!ir_supported)
994 		return -ENODEV;
995 
996 	for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
997 		int ioapic_id = mpc_ioapic_id(ioapic_idx);
998 		if (!map_ioapic_to_ir(ioapic_id)) {
999 			pr_err(FW_BUG "ioapic %d has no mapping iommu, "
1000 			       "interrupt remapping will be disabled\n",
1001 			       ioapic_id);
1002 			return -1;
1003 		}
1004 	}
1005 
1006 	return 0;
1007 }
1008 
1009 static int __init ir_dev_scope_init(void)
1010 {
1011 	int ret;
1012 
1013 	if (!irq_remapping_enabled)
1014 		return 0;
1015 
1016 	down_write(&dmar_global_lock);
1017 	ret = dmar_dev_scope_init();
1018 	up_write(&dmar_global_lock);
1019 
1020 	return ret;
1021 }
1022 rootfs_initcall(ir_dev_scope_init);
1023 
1024 static void disable_irq_remapping(void)
1025 {
1026 	struct dmar_drhd_unit *drhd;
1027 	struct intel_iommu *iommu = NULL;
1028 
1029 	/*
1030 	 * Disable Interrupt-remapping for all the DRHD's now.
1031 	 */
1032 	for_each_iommu(iommu, drhd) {
1033 		if (!ecap_ir_support(iommu->ecap))
1034 			continue;
1035 
1036 		iommu_disable_irq_remapping(iommu);
1037 	}
1038 
1039 	/*
1040 	 * Clear Posted-Interrupts capability.
1041 	 */
1042 	if (!disable_irq_post)
1043 		intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
1044 }
1045 
1046 static int reenable_irq_remapping(int eim)
1047 {
1048 	struct dmar_drhd_unit *drhd;
1049 	bool setup = false;
1050 	struct intel_iommu *iommu = NULL;
1051 
1052 	for_each_iommu(iommu, drhd)
1053 		if (iommu->qi)
1054 			dmar_reenable_qi(iommu);
1055 
1056 	/*
1057 	 * Setup Interrupt-remapping for all the DRHD's now.
1058 	 */
1059 	for_each_iommu(iommu, drhd) {
1060 		if (!ecap_ir_support(iommu->ecap))
1061 			continue;
1062 
1063 		/* Set up interrupt remapping for iommu.*/
1064 		iommu_set_irq_remapping(iommu, eim);
1065 		iommu_enable_irq_remapping(iommu);
1066 		setup = true;
1067 	}
1068 
1069 	if (!setup)
1070 		goto error;
1071 
1072 	set_irq_posting_cap();
1073 
1074 	return 0;
1075 
1076 error:
1077 	/*
1078 	 * handle error condition gracefully here!
1079 	 */
1080 	return -1;
1081 }
1082 
1083 static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
1084 {
1085 	memset(irte, 0, sizeof(*irte));
1086 
1087 	irte->present = 1;
1088 	irte->dst_mode = apic->irq_dest_mode;
1089 	/*
1090 	 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
1091 	 * actual level or edge trigger will be setup in the IO-APIC
1092 	 * RTE. This will help simplify level triggered irq migration.
1093 	 * For more details, see the comments (in io_apic.c) explainig IO-APIC
1094 	 * irq migration in the presence of interrupt-remapping.
1095 	*/
1096 	irte->trigger_mode = 0;
1097 	irte->dlvry_mode = apic->irq_delivery_mode;
1098 	irte->vector = vector;
1099 	irte->dest_id = IRTE_DEST(dest);
1100 	irte->redir_hint = 1;
1101 }
1102 
1103 static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
1104 {
1105 	struct intel_iommu *iommu = NULL;
1106 
1107 	if (!info)
1108 		return NULL;
1109 
1110 	switch (info->type) {
1111 	case X86_IRQ_ALLOC_TYPE_IOAPIC:
1112 		iommu = map_ioapic_to_ir(info->ioapic_id);
1113 		break;
1114 	case X86_IRQ_ALLOC_TYPE_HPET:
1115 		iommu = map_hpet_to_ir(info->hpet_id);
1116 		break;
1117 	case X86_IRQ_ALLOC_TYPE_MSI:
1118 	case X86_IRQ_ALLOC_TYPE_MSIX:
1119 		iommu = map_dev_to_ir(info->msi_dev);
1120 		break;
1121 	default:
1122 		BUG_ON(1);
1123 		break;
1124 	}
1125 
1126 	return iommu ? iommu->ir_domain : NULL;
1127 }
1128 
1129 static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
1130 {
1131 	struct intel_iommu *iommu;
1132 
1133 	if (!info)
1134 		return NULL;
1135 
1136 	switch (info->type) {
1137 	case X86_IRQ_ALLOC_TYPE_MSI:
1138 	case X86_IRQ_ALLOC_TYPE_MSIX:
1139 		iommu = map_dev_to_ir(info->msi_dev);
1140 		if (iommu)
1141 			return iommu->ir_msi_domain;
1142 		break;
1143 	default:
1144 		break;
1145 	}
1146 
1147 	return NULL;
1148 }
1149 
1150 struct irq_remap_ops intel_irq_remap_ops = {
1151 	.prepare		= intel_prepare_irq_remapping,
1152 	.enable			= intel_enable_irq_remapping,
1153 	.disable		= disable_irq_remapping,
1154 	.reenable		= reenable_irq_remapping,
1155 	.enable_faulting	= enable_drhd_fault_handling,
1156 	.get_ir_irq_domain	= intel_get_ir_irq_domain,
1157 	.get_irq_domain		= intel_get_irq_domain,
1158 };
1159 
1160 static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
1161 {
1162 	struct intel_ir_data *ir_data = irqd->chip_data;
1163 	struct irte *irte = &ir_data->irte_entry;
1164 	struct irq_cfg *cfg = irqd_cfg(irqd);
1165 
1166 	/*
1167 	 * Atomically updates the IRTE with the new destination, vector
1168 	 * and flushes the interrupt entry cache.
1169 	 */
1170 	irte->vector = cfg->vector;
1171 	irte->dest_id = IRTE_DEST(cfg->dest_apicid);
1172 
1173 	/* Update the hardware only if the interrupt is in remapped mode. */
1174 	if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
1175 		modify_irte(&ir_data->irq_2_iommu, irte);
1176 }
1177 
1178 /*
1179  * Migrate the IO-APIC irq in the presence of intr-remapping.
1180  *
1181  * For both level and edge triggered, irq migration is a simple atomic
1182  * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1183  *
1184  * For level triggered, we eliminate the io-apic RTE modification (with the
1185  * updated vector information), by using a virtual vector (io-apic pin number).
1186  * Real vector that is used for interrupting cpu will be coming from
1187  * the interrupt-remapping table entry.
1188  *
1189  * As the migration is a simple atomic update of IRTE, the same mechanism
1190  * is used to migrate MSI irq's in the presence of interrupt-remapping.
1191  */
1192 static int
1193 intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
1194 		      bool force)
1195 {
1196 	struct irq_data *parent = data->parent_data;
1197 	struct irq_cfg *cfg = irqd_cfg(data);
1198 	int ret;
1199 
1200 	ret = parent->chip->irq_set_affinity(parent, mask, force);
1201 	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
1202 		return ret;
1203 
1204 	intel_ir_reconfigure_irte(data, false);
1205 	/*
1206 	 * After this point, all the interrupts will start arriving
1207 	 * at the new destination. So, time to cleanup the previous
1208 	 * vector allocation.
1209 	 */
1210 	send_cleanup_vector(cfg);
1211 
1212 	return IRQ_SET_MASK_OK_DONE;
1213 }
1214 
1215 static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
1216 				     struct msi_msg *msg)
1217 {
1218 	struct intel_ir_data *ir_data = irq_data->chip_data;
1219 
1220 	*msg = ir_data->msi_entry;
1221 }
1222 
1223 static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
1224 {
1225 	struct intel_ir_data *ir_data = data->chip_data;
1226 	struct vcpu_data *vcpu_pi_info = info;
1227 
1228 	/* stop posting interrupts, back to remapping mode */
1229 	if (!vcpu_pi_info) {
1230 		modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
1231 	} else {
1232 		struct irte irte_pi;
1233 
1234 		/*
1235 		 * We are not caching the posted interrupt entry. We
1236 		 * copy the data from the remapped entry and modify
1237 		 * the fields which are relevant for posted mode. The
1238 		 * cached remapped entry is used for switching back to
1239 		 * remapped mode.
1240 		 */
1241 		memset(&irte_pi, 0, sizeof(irte_pi));
1242 		dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
1243 
1244 		/* Update the posted mode fields */
1245 		irte_pi.p_pst = 1;
1246 		irte_pi.p_urgent = 0;
1247 		irte_pi.p_vector = vcpu_pi_info->vector;
1248 		irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
1249 				(32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
1250 		irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
1251 				~(-1UL << PDA_HIGH_BIT);
1252 
1253 		modify_irte(&ir_data->irq_2_iommu, &irte_pi);
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 static struct irq_chip intel_ir_chip = {
1260 	.name			= "INTEL-IR",
1261 	.irq_ack		= apic_ack_irq,
1262 	.irq_set_affinity	= intel_ir_set_affinity,
1263 	.irq_compose_msi_msg	= intel_ir_compose_msi_msg,
1264 	.irq_set_vcpu_affinity	= intel_ir_set_vcpu_affinity,
1265 };
1266 
1267 static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
1268 					     struct irq_cfg *irq_cfg,
1269 					     struct irq_alloc_info *info,
1270 					     int index, int sub_handle)
1271 {
1272 	struct IR_IO_APIC_route_entry *entry;
1273 	struct irte *irte = &data->irte_entry;
1274 	struct msi_msg *msg = &data->msi_entry;
1275 
1276 	prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
1277 	switch (info->type) {
1278 	case X86_IRQ_ALLOC_TYPE_IOAPIC:
1279 		/* Set source-id of interrupt request */
1280 		set_ioapic_sid(irte, info->ioapic_id);
1281 		apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
1282 			info->ioapic_id, irte->present, irte->fpd,
1283 			irte->dst_mode, irte->redir_hint,
1284 			irte->trigger_mode, irte->dlvry_mode,
1285 			irte->avail, irte->vector, irte->dest_id,
1286 			irte->sid, irte->sq, irte->svt);
1287 
1288 		entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
1289 		info->ioapic_entry = NULL;
1290 		memset(entry, 0, sizeof(*entry));
1291 		entry->index2	= (index >> 15) & 0x1;
1292 		entry->zero	= 0;
1293 		entry->format	= 1;
1294 		entry->index	= (index & 0x7fff);
1295 		/*
1296 		 * IO-APIC RTE will be configured with virtual vector.
1297 		 * irq handler will do the explicit EOI to the io-apic.
1298 		 */
1299 		entry->vector	= info->ioapic_pin;
1300 		entry->mask	= 0;			/* enable IRQ */
1301 		entry->trigger	= info->ioapic_trigger;
1302 		entry->polarity	= info->ioapic_polarity;
1303 		if (info->ioapic_trigger)
1304 			entry->mask = 1; /* Mask level triggered irqs. */
1305 		break;
1306 
1307 	case X86_IRQ_ALLOC_TYPE_HPET:
1308 	case X86_IRQ_ALLOC_TYPE_MSI:
1309 	case X86_IRQ_ALLOC_TYPE_MSIX:
1310 		if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
1311 			set_hpet_sid(irte, info->hpet_id);
1312 		else
1313 			set_msi_sid(irte, info->msi_dev);
1314 
1315 		msg->address_hi = MSI_ADDR_BASE_HI;
1316 		msg->data = sub_handle;
1317 		msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1318 				  MSI_ADDR_IR_SHV |
1319 				  MSI_ADDR_IR_INDEX1(index) |
1320 				  MSI_ADDR_IR_INDEX2(index);
1321 		break;
1322 
1323 	default:
1324 		BUG_ON(1);
1325 		break;
1326 	}
1327 }
1328 
1329 static void intel_free_irq_resources(struct irq_domain *domain,
1330 				     unsigned int virq, unsigned int nr_irqs)
1331 {
1332 	struct irq_data *irq_data;
1333 	struct intel_ir_data *data;
1334 	struct irq_2_iommu *irq_iommu;
1335 	unsigned long flags;
1336 	int i;
1337 	for (i = 0; i < nr_irqs; i++) {
1338 		irq_data = irq_domain_get_irq_data(domain, virq  + i);
1339 		if (irq_data && irq_data->chip_data) {
1340 			data = irq_data->chip_data;
1341 			irq_iommu = &data->irq_2_iommu;
1342 			raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
1343 			clear_entries(irq_iommu);
1344 			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
1345 			irq_domain_reset_irq_data(irq_data);
1346 			kfree(data);
1347 		}
1348 	}
1349 }
1350 
1351 static int intel_irq_remapping_alloc(struct irq_domain *domain,
1352 				     unsigned int virq, unsigned int nr_irqs,
1353 				     void *arg)
1354 {
1355 	struct intel_iommu *iommu = domain->host_data;
1356 	struct irq_alloc_info *info = arg;
1357 	struct intel_ir_data *data, *ird;
1358 	struct irq_data *irq_data;
1359 	struct irq_cfg *irq_cfg;
1360 	int i, ret, index;
1361 
1362 	if (!info || !iommu)
1363 		return -EINVAL;
1364 	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
1365 	    info->type != X86_IRQ_ALLOC_TYPE_MSIX)
1366 		return -EINVAL;
1367 
1368 	/*
1369 	 * With IRQ remapping enabled, don't need contiguous CPU vectors
1370 	 * to support multiple MSI interrupts.
1371 	 */
1372 	if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
1373 		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
1374 
1375 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
1376 	if (ret < 0)
1377 		return ret;
1378 
1379 	ret = -ENOMEM;
1380 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1381 	if (!data)
1382 		goto out_free_parent;
1383 
1384 	down_read(&dmar_global_lock);
1385 	index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
1386 	up_read(&dmar_global_lock);
1387 	if (index < 0) {
1388 		pr_warn("Failed to allocate IRTE\n");
1389 		kfree(data);
1390 		goto out_free_parent;
1391 	}
1392 
1393 	for (i = 0; i < nr_irqs; i++) {
1394 		irq_data = irq_domain_get_irq_data(domain, virq + i);
1395 		irq_cfg = irqd_cfg(irq_data);
1396 		if (!irq_data || !irq_cfg) {
1397 			ret = -EINVAL;
1398 			goto out_free_data;
1399 		}
1400 
1401 		if (i > 0) {
1402 			ird = kzalloc(sizeof(*ird), GFP_KERNEL);
1403 			if (!ird)
1404 				goto out_free_data;
1405 			/* Initialize the common data */
1406 			ird->irq_2_iommu = data->irq_2_iommu;
1407 			ird->irq_2_iommu.sub_handle = i;
1408 		} else {
1409 			ird = data;
1410 		}
1411 
1412 		irq_data->hwirq = (index << 16) + i;
1413 		irq_data->chip_data = ird;
1414 		irq_data->chip = &intel_ir_chip;
1415 		intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
1416 		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
1417 	}
1418 	return 0;
1419 
1420 out_free_data:
1421 	intel_free_irq_resources(domain, virq, i);
1422 out_free_parent:
1423 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
1424 	return ret;
1425 }
1426 
1427 static void intel_irq_remapping_free(struct irq_domain *domain,
1428 				     unsigned int virq, unsigned int nr_irqs)
1429 {
1430 	intel_free_irq_resources(domain, virq, nr_irqs);
1431 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
1432 }
1433 
1434 static int intel_irq_remapping_activate(struct irq_domain *domain,
1435 					struct irq_data *irq_data, bool reserve)
1436 {
1437 	intel_ir_reconfigure_irte(irq_data, true);
1438 	return 0;
1439 }
1440 
1441 static void intel_irq_remapping_deactivate(struct irq_domain *domain,
1442 					   struct irq_data *irq_data)
1443 {
1444 	struct intel_ir_data *data = irq_data->chip_data;
1445 	struct irte entry;
1446 
1447 	memset(&entry, 0, sizeof(entry));
1448 	modify_irte(&data->irq_2_iommu, &entry);
1449 }
1450 
1451 static const struct irq_domain_ops intel_ir_domain_ops = {
1452 	.alloc = intel_irq_remapping_alloc,
1453 	.free = intel_irq_remapping_free,
1454 	.activate = intel_irq_remapping_activate,
1455 	.deactivate = intel_irq_remapping_deactivate,
1456 };
1457 
1458 /*
1459  * Support of Interrupt Remapping Unit Hotplug
1460  */
1461 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1462 {
1463 	int ret;
1464 	int eim = x2apic_enabled();
1465 
1466 	if (eim && !ecap_eim_support(iommu->ecap)) {
1467 		pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1468 			iommu->reg_phys, iommu->ecap);
1469 		return -ENODEV;
1470 	}
1471 
1472 	if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1473 		pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1474 			iommu->reg_phys);
1475 		return -ENODEV;
1476 	}
1477 
1478 	/* TODO: check all IOAPICs are covered by IOMMU */
1479 
1480 	/* Setup Interrupt-remapping now. */
1481 	ret = intel_setup_irq_remapping(iommu);
1482 	if (ret) {
1483 		pr_err("Failed to setup irq remapping for %s\n",
1484 		       iommu->name);
1485 		intel_teardown_irq_remapping(iommu);
1486 		ir_remove_ioapic_hpet_scope(iommu);
1487 	} else {
1488 		iommu_enable_irq_remapping(iommu);
1489 	}
1490 
1491 	return ret;
1492 }
1493 
1494 int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1495 {
1496 	int ret = 0;
1497 	struct intel_iommu *iommu = dmaru->iommu;
1498 
1499 	if (!irq_remapping_enabled)
1500 		return 0;
1501 	if (iommu == NULL)
1502 		return -EINVAL;
1503 	if (!ecap_ir_support(iommu->ecap))
1504 		return 0;
1505 	if (irq_remapping_cap(IRQ_POSTING_CAP) &&
1506 	    !cap_pi_support(iommu->cap))
1507 		return -EBUSY;
1508 
1509 	if (insert) {
1510 		if (!iommu->ir_table)
1511 			ret = dmar_ir_add(dmaru, iommu);
1512 	} else {
1513 		if (iommu->ir_table) {
1514 			if (!bitmap_empty(iommu->ir_table->bitmap,
1515 					  INTR_REMAP_TABLE_ENTRIES)) {
1516 				ret = -EBUSY;
1517 			} else {
1518 				iommu_disable_irq_remapping(iommu);
1519 				intel_teardown_irq_remapping(iommu);
1520 				ir_remove_ioapic_hpet_scope(iommu);
1521 			}
1522 		}
1523 	}
1524 
1525 	return ret;
1526 }
1527