1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define pr_fmt(fmt)     "DMAR-IR: " fmt
4 
5 #include <linux/interrupt.h>
6 #include <linux/dmar.h>
7 #include <linux/spinlock.h>
8 #include <linux/slab.h>
9 #include <linux/jiffies.h>
10 #include <linux/hpet.h>
11 #include <linux/pci.h>
12 #include <linux/irq.h>
13 #include <linux/acpi.h>
14 #include <linux/irqdomain.h>
15 #include <linux/crash_dump.h>
16 #include <asm/io_apic.h>
17 #include <asm/apic.h>
18 #include <asm/smp.h>
19 #include <asm/cpu.h>
20 #include <asm/irq_remapping.h>
21 #include <asm/pci-direct.h>
22 
23 #include "iommu.h"
24 #include "../irq_remapping.h"
25 #include "cap_audit.h"
26 
27 enum irq_mode {
28 	IRQ_REMAPPING,
29 	IRQ_POSTING,
30 };
31 
32 struct ioapic_scope {
33 	struct intel_iommu *iommu;
34 	unsigned int id;
35 	unsigned int bus;	/* PCI bus number */
36 	unsigned int devfn;	/* PCI devfn number */
37 };
38 
39 struct hpet_scope {
40 	struct intel_iommu *iommu;
41 	u8 id;
42 	unsigned int bus;
43 	unsigned int devfn;
44 };
45 
46 struct irq_2_iommu {
47 	struct intel_iommu *iommu;
48 	u16 irte_index;
49 	u16 sub_handle;
50 	u8  irte_mask;
51 	enum irq_mode mode;
52 };
53 
54 struct intel_ir_data {
55 	struct irq_2_iommu			irq_2_iommu;
56 	struct irte				irte_entry;
57 	union {
58 		struct msi_msg			msi_entry;
59 	};
60 };
61 
62 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
63 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
64 
65 static int __read_mostly eim_mode;
66 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
67 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
68 
69 /*
70  * Lock ordering:
71  * ->dmar_global_lock
72  *	->irq_2_ir_lock
73  *		->qi->q_lock
74  *	->iommu->register_lock
75  * Note:
76  * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
77  * in single-threaded environment with interrupt disabled, so no need to tabke
78  * the dmar_global_lock.
79  */
80 DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
81 static const struct irq_domain_ops intel_ir_domain_ops;
82 
83 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
84 static int __init parse_ioapics_under_ir(void);
85 static const struct msi_parent_ops dmar_msi_parent_ops, virt_dmar_msi_parent_ops;
86 
87 static bool ir_pre_enabled(struct intel_iommu *iommu)
88 {
89 	return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
90 }
91 
92 static void clear_ir_pre_enabled(struct intel_iommu *iommu)
93 {
94 	iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
95 }
96 
97 static void init_ir_status(struct intel_iommu *iommu)
98 {
99 	u32 gsts;
100 
101 	gsts = readl(iommu->reg + DMAR_GSTS_REG);
102 	if (gsts & DMA_GSTS_IRES)
103 		iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
104 }
105 
106 static int alloc_irte(struct intel_iommu *iommu,
107 		      struct irq_2_iommu *irq_iommu, u16 count)
108 {
109 	struct ir_table *table = iommu->ir_table;
110 	unsigned int mask = 0;
111 	unsigned long flags;
112 	int index;
113 
114 	if (!count || !irq_iommu)
115 		return -1;
116 
117 	if (count > 1) {
118 		count = __roundup_pow_of_two(count);
119 		mask = ilog2(count);
120 	}
121 
122 	if (mask > ecap_max_handle_mask(iommu->ecap)) {
123 		pr_err("Requested mask %x exceeds the max invalidation handle"
124 		       " mask value %Lx\n", mask,
125 		       ecap_max_handle_mask(iommu->ecap));
126 		return -1;
127 	}
128 
129 	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
130 	index = bitmap_find_free_region(table->bitmap,
131 					INTR_REMAP_TABLE_ENTRIES, mask);
132 	if (index < 0) {
133 		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
134 	} else {
135 		irq_iommu->iommu = iommu;
136 		irq_iommu->irte_index =  index;
137 		irq_iommu->sub_handle = 0;
138 		irq_iommu->irte_mask = mask;
139 		irq_iommu->mode = IRQ_REMAPPING;
140 	}
141 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
142 
143 	return index;
144 }
145 
146 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
147 {
148 	struct qi_desc desc;
149 
150 	desc.qw0 = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
151 		   | QI_IEC_SELECTIVE;
152 	desc.qw1 = 0;
153 	desc.qw2 = 0;
154 	desc.qw3 = 0;
155 
156 	return qi_submit_sync(iommu, &desc, 1, 0);
157 }
158 
159 static int modify_irte(struct irq_2_iommu *irq_iommu,
160 		       struct irte *irte_modified)
161 {
162 	struct intel_iommu *iommu;
163 	unsigned long flags;
164 	struct irte *irte;
165 	int rc, index;
166 
167 	if (!irq_iommu)
168 		return -1;
169 
170 	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
171 
172 	iommu = irq_iommu->iommu;
173 
174 	index = irq_iommu->irte_index + irq_iommu->sub_handle;
175 	irte = &iommu->ir_table->base[index];
176 
177 	if ((irte->pst == 1) || (irte_modified->pst == 1)) {
178 		bool ret;
179 
180 		ret = cmpxchg_double(&irte->low, &irte->high,
181 				     irte->low, irte->high,
182 				     irte_modified->low, irte_modified->high);
183 		/*
184 		 * We use cmpxchg16 to atomically update the 128-bit IRTE,
185 		 * and it cannot be updated by the hardware or other processors
186 		 * behind us, so the return value of cmpxchg16 should be the
187 		 * same as the old value.
188 		 */
189 		WARN_ON(!ret);
190 	} else {
191 		WRITE_ONCE(irte->low, irte_modified->low);
192 		WRITE_ONCE(irte->high, irte_modified->high);
193 	}
194 	__iommu_flush_cache(iommu, irte, sizeof(*irte));
195 
196 	rc = qi_flush_iec(iommu, index, 0);
197 
198 	/* Update iommu mode according to the IRTE mode */
199 	irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
200 	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
201 
202 	return rc;
203 }
204 
205 static struct intel_iommu *map_hpet_to_iommu(u8 hpet_id)
206 {
207 	int i;
208 
209 	for (i = 0; i < MAX_HPET_TBS; i++) {
210 		if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
211 			return ir_hpet[i].iommu;
212 	}
213 	return NULL;
214 }
215 
216 static struct intel_iommu *map_ioapic_to_iommu(int apic)
217 {
218 	int i;
219 
220 	for (i = 0; i < MAX_IO_APICS; i++) {
221 		if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
222 			return ir_ioapic[i].iommu;
223 	}
224 	return NULL;
225 }
226 
227 static struct irq_domain *map_dev_to_ir(struct pci_dev *dev)
228 {
229 	struct dmar_drhd_unit *drhd = dmar_find_matched_drhd_unit(dev);
230 
231 	return drhd ? drhd->iommu->ir_domain : NULL;
232 }
233 
234 static int clear_entries(struct irq_2_iommu *irq_iommu)
235 {
236 	struct irte *start, *entry, *end;
237 	struct intel_iommu *iommu;
238 	int index;
239 
240 	if (irq_iommu->sub_handle)
241 		return 0;
242 
243 	iommu = irq_iommu->iommu;
244 	index = irq_iommu->irte_index;
245 
246 	start = iommu->ir_table->base + index;
247 	end = start + (1 << irq_iommu->irte_mask);
248 
249 	for (entry = start; entry < end; entry++) {
250 		WRITE_ONCE(entry->low, 0);
251 		WRITE_ONCE(entry->high, 0);
252 	}
253 	bitmap_release_region(iommu->ir_table->bitmap, index,
254 			      irq_iommu->irte_mask);
255 
256 	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
257 }
258 
259 /*
260  * source validation type
261  */
262 #define SVT_NO_VERIFY		0x0  /* no verification is required */
263 #define SVT_VERIFY_SID_SQ	0x1  /* verify using SID and SQ fields */
264 #define SVT_VERIFY_BUS		0x2  /* verify bus of request-id */
265 
266 /*
267  * source-id qualifier
268  */
269 #define SQ_ALL_16	0x0  /* verify all 16 bits of request-id */
270 #define SQ_13_IGNORE_1	0x1  /* verify most significant 13 bits, ignore
271 			      * the third least significant bit
272 			      */
273 #define SQ_13_IGNORE_2	0x2  /* verify most significant 13 bits, ignore
274 			      * the second and third least significant bits
275 			      */
276 #define SQ_13_IGNORE_3	0x3  /* verify most significant 13 bits, ignore
277 			      * the least three significant bits
278 			      */
279 
280 /*
281  * set SVT, SQ and SID fields of irte to verify
282  * source ids of interrupt requests
283  */
284 static void set_irte_sid(struct irte *irte, unsigned int svt,
285 			 unsigned int sq, unsigned int sid)
286 {
287 	if (disable_sourceid_checking)
288 		svt = SVT_NO_VERIFY;
289 	irte->svt = svt;
290 	irte->sq = sq;
291 	irte->sid = sid;
292 }
293 
294 /*
295  * Set an IRTE to match only the bus number. Interrupt requests that reference
296  * this IRTE must have a requester-id whose bus number is between or equal
297  * to the start_bus and end_bus arguments.
298  */
299 static void set_irte_verify_bus(struct irte *irte, unsigned int start_bus,
300 				unsigned int end_bus)
301 {
302 	set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
303 		     (start_bus << 8) | end_bus);
304 }
305 
306 static int set_ioapic_sid(struct irte *irte, int apic)
307 {
308 	int i;
309 	u16 sid = 0;
310 
311 	if (!irte)
312 		return -1;
313 
314 	down_read(&dmar_global_lock);
315 	for (i = 0; i < MAX_IO_APICS; i++) {
316 		if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
317 			sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
318 			break;
319 		}
320 	}
321 	up_read(&dmar_global_lock);
322 
323 	if (sid == 0) {
324 		pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
325 		return -1;
326 	}
327 
328 	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
329 
330 	return 0;
331 }
332 
333 static int set_hpet_sid(struct irte *irte, u8 id)
334 {
335 	int i;
336 	u16 sid = 0;
337 
338 	if (!irte)
339 		return -1;
340 
341 	down_read(&dmar_global_lock);
342 	for (i = 0; i < MAX_HPET_TBS; i++) {
343 		if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
344 			sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
345 			break;
346 		}
347 	}
348 	up_read(&dmar_global_lock);
349 
350 	if (sid == 0) {
351 		pr_warn("Failed to set source-id of HPET block (%d)\n", id);
352 		return -1;
353 	}
354 
355 	/*
356 	 * Should really use SQ_ALL_16. Some platforms are broken.
357 	 * While we figure out the right quirks for these broken platforms, use
358 	 * SQ_13_IGNORE_3 for now.
359 	 */
360 	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
361 
362 	return 0;
363 }
364 
365 struct set_msi_sid_data {
366 	struct pci_dev *pdev;
367 	u16 alias;
368 	int count;
369 	int busmatch_count;
370 };
371 
372 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
373 {
374 	struct set_msi_sid_data *data = opaque;
375 
376 	if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
377 		data->busmatch_count++;
378 
379 	data->pdev = pdev;
380 	data->alias = alias;
381 	data->count++;
382 
383 	return 0;
384 }
385 
386 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
387 {
388 	struct set_msi_sid_data data;
389 
390 	if (!irte || !dev)
391 		return -1;
392 
393 	data.count = 0;
394 	data.busmatch_count = 0;
395 	pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
396 
397 	/*
398 	 * DMA alias provides us with a PCI device and alias.  The only case
399 	 * where the it will return an alias on a different bus than the
400 	 * device is the case of a PCIe-to-PCI bridge, where the alias is for
401 	 * the subordinate bus.  In this case we can only verify the bus.
402 	 *
403 	 * If there are multiple aliases, all with the same bus number,
404 	 * then all we can do is verify the bus. This is typical in NTB
405 	 * hardware which use proxy IDs where the device will generate traffic
406 	 * from multiple devfn numbers on the same bus.
407 	 *
408 	 * If the alias device is on a different bus than our source device
409 	 * then we have a topology based alias, use it.
410 	 *
411 	 * Otherwise, the alias is for a device DMA quirk and we cannot
412 	 * assume that MSI uses the same requester ID.  Therefore use the
413 	 * original device.
414 	 */
415 	if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
416 		set_irte_verify_bus(irte, PCI_BUS_NUM(data.alias),
417 				    dev->bus->number);
418 	else if (data.count >= 2 && data.busmatch_count == data.count)
419 		set_irte_verify_bus(irte, dev->bus->number, dev->bus->number);
420 	else if (data.pdev->bus->number != dev->bus->number)
421 		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
422 	else
423 		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
424 			     pci_dev_id(dev));
425 
426 	return 0;
427 }
428 
429 static int iommu_load_old_irte(struct intel_iommu *iommu)
430 {
431 	struct irte *old_ir_table;
432 	phys_addr_t irt_phys;
433 	unsigned int i;
434 	size_t size;
435 	u64 irta;
436 
437 	/* Check whether the old ir-table has the same size as ours */
438 	irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
439 	if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
440 	     != INTR_REMAP_TABLE_REG_SIZE)
441 		return -EINVAL;
442 
443 	irt_phys = irta & VTD_PAGE_MASK;
444 	size     = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
445 
446 	/* Map the old IR table */
447 	old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
448 	if (!old_ir_table)
449 		return -ENOMEM;
450 
451 	/* Copy data over */
452 	memcpy(iommu->ir_table->base, old_ir_table, size);
453 
454 	__iommu_flush_cache(iommu, iommu->ir_table->base, size);
455 
456 	/*
457 	 * Now check the table for used entries and mark those as
458 	 * allocated in the bitmap
459 	 */
460 	for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
461 		if (iommu->ir_table->base[i].present)
462 			bitmap_set(iommu->ir_table->bitmap, i, 1);
463 	}
464 
465 	memunmap(old_ir_table);
466 
467 	return 0;
468 }
469 
470 
471 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
472 {
473 	unsigned long flags;
474 	u64 addr;
475 	u32 sts;
476 
477 	addr = virt_to_phys((void *)iommu->ir_table->base);
478 
479 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
480 
481 	dmar_writeq(iommu->reg + DMAR_IRTA_REG,
482 		    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
483 
484 	/* Set interrupt-remapping table pointer */
485 	writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
486 
487 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
488 		      readl, (sts & DMA_GSTS_IRTPS), sts);
489 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
490 
491 	/*
492 	 * Global invalidation of interrupt entry cache to make sure the
493 	 * hardware uses the new irq remapping table.
494 	 */
495 	if (!cap_esirtps(iommu->cap))
496 		qi_global_iec(iommu);
497 }
498 
499 static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
500 {
501 	unsigned long flags;
502 	u32 sts;
503 
504 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
505 
506 	/* Enable interrupt-remapping */
507 	iommu->gcmd |= DMA_GCMD_IRE;
508 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
509 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
510 		      readl, (sts & DMA_GSTS_IRES), sts);
511 
512 	/* Block compatibility-format MSIs */
513 	if (sts & DMA_GSTS_CFIS) {
514 		iommu->gcmd &= ~DMA_GCMD_CFI;
515 		writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
516 		IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
517 			      readl, !(sts & DMA_GSTS_CFIS), sts);
518 	}
519 
520 	/*
521 	 * With CFI clear in the Global Command register, we should be
522 	 * protected from dangerous (i.e. compatibility) interrupts
523 	 * regardless of x2apic status.  Check just to be sure.
524 	 */
525 	if (sts & DMA_GSTS_CFIS)
526 		WARN(1, KERN_WARNING
527 			"Compatibility-format IRQs enabled despite intr remapping;\n"
528 			"you are vulnerable to IRQ injection.\n");
529 
530 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
531 }
532 
533 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
534 {
535 	struct ir_table *ir_table;
536 	struct fwnode_handle *fn;
537 	unsigned long *bitmap;
538 	struct page *pages;
539 
540 	if (iommu->ir_table)
541 		return 0;
542 
543 	ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
544 	if (!ir_table)
545 		return -ENOMEM;
546 
547 	pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
548 				 INTR_REMAP_PAGE_ORDER);
549 	if (!pages) {
550 		pr_err("IR%d: failed to allocate pages of order %d\n",
551 		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
552 		goto out_free_table;
553 	}
554 
555 	bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
556 	if (bitmap == NULL) {
557 		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
558 		goto out_free_pages;
559 	}
560 
561 	fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
562 	if (!fn)
563 		goto out_free_bitmap;
564 
565 	iommu->ir_domain =
566 		irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
567 					    0, INTR_REMAP_TABLE_ENTRIES,
568 					    fn, &intel_ir_domain_ops,
569 					    iommu);
570 	if (!iommu->ir_domain) {
571 		pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
572 		goto out_free_fwnode;
573 	}
574 
575 	irq_domain_update_bus_token(iommu->ir_domain,  DOMAIN_BUS_DMAR);
576 	iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
577 
578 	if (cap_caching_mode(iommu->cap))
579 		iommu->ir_domain->msi_parent_ops = &virt_dmar_msi_parent_ops;
580 	else
581 		iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
582 
583 	ir_table->base = page_address(pages);
584 	ir_table->bitmap = bitmap;
585 	iommu->ir_table = ir_table;
586 
587 	/*
588 	 * If the queued invalidation is already initialized,
589 	 * shouldn't disable it.
590 	 */
591 	if (!iommu->qi) {
592 		/*
593 		 * Clear previous faults.
594 		 */
595 		dmar_fault(-1, iommu);
596 		dmar_disable_qi(iommu);
597 
598 		if (dmar_enable_qi(iommu)) {
599 			pr_err("Failed to enable queued invalidation\n");
600 			goto out_free_ir_domain;
601 		}
602 	}
603 
604 	init_ir_status(iommu);
605 
606 	if (ir_pre_enabled(iommu)) {
607 		if (!is_kdump_kernel()) {
608 			pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
609 				iommu->name);
610 			clear_ir_pre_enabled(iommu);
611 			iommu_disable_irq_remapping(iommu);
612 		} else if (iommu_load_old_irte(iommu))
613 			pr_err("Failed to copy IR table for %s from previous kernel\n",
614 			       iommu->name);
615 		else
616 			pr_info("Copied IR table for %s from previous kernel\n",
617 				iommu->name);
618 	}
619 
620 	iommu_set_irq_remapping(iommu, eim_mode);
621 
622 	return 0;
623 
624 out_free_ir_domain:
625 	irq_domain_remove(iommu->ir_domain);
626 	iommu->ir_domain = NULL;
627 out_free_fwnode:
628 	irq_domain_free_fwnode(fn);
629 out_free_bitmap:
630 	bitmap_free(bitmap);
631 out_free_pages:
632 	__free_pages(pages, INTR_REMAP_PAGE_ORDER);
633 out_free_table:
634 	kfree(ir_table);
635 
636 	iommu->ir_table  = NULL;
637 
638 	return -ENOMEM;
639 }
640 
641 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
642 {
643 	struct fwnode_handle *fn;
644 
645 	if (iommu && iommu->ir_table) {
646 		if (iommu->ir_domain) {
647 			fn = iommu->ir_domain->fwnode;
648 
649 			irq_domain_remove(iommu->ir_domain);
650 			irq_domain_free_fwnode(fn);
651 			iommu->ir_domain = NULL;
652 		}
653 		free_pages((unsigned long)iommu->ir_table->base,
654 			   INTR_REMAP_PAGE_ORDER);
655 		bitmap_free(iommu->ir_table->bitmap);
656 		kfree(iommu->ir_table);
657 		iommu->ir_table = NULL;
658 	}
659 }
660 
661 /*
662  * Disable Interrupt Remapping.
663  */
664 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
665 {
666 	unsigned long flags;
667 	u32 sts;
668 
669 	if (!ecap_ir_support(iommu->ecap))
670 		return;
671 
672 	/*
673 	 * global invalidation of interrupt entry cache before disabling
674 	 * interrupt-remapping.
675 	 */
676 	if (!cap_esirtps(iommu->cap))
677 		qi_global_iec(iommu);
678 
679 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
680 
681 	sts = readl(iommu->reg + DMAR_GSTS_REG);
682 	if (!(sts & DMA_GSTS_IRES))
683 		goto end;
684 
685 	iommu->gcmd &= ~DMA_GCMD_IRE;
686 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
687 
688 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
689 		      readl, !(sts & DMA_GSTS_IRES), sts);
690 
691 end:
692 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
693 }
694 
695 static int __init dmar_x2apic_optout(void)
696 {
697 	struct acpi_table_dmar *dmar;
698 	dmar = (struct acpi_table_dmar *)dmar_tbl;
699 	if (!dmar || no_x2apic_optout)
700 		return 0;
701 	return dmar->flags & DMAR_X2APIC_OPT_OUT;
702 }
703 
704 static void __init intel_cleanup_irq_remapping(void)
705 {
706 	struct dmar_drhd_unit *drhd;
707 	struct intel_iommu *iommu;
708 
709 	for_each_iommu(iommu, drhd) {
710 		if (ecap_ir_support(iommu->ecap)) {
711 			iommu_disable_irq_remapping(iommu);
712 			intel_teardown_irq_remapping(iommu);
713 		}
714 	}
715 
716 	if (x2apic_supported())
717 		pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
718 }
719 
720 static int __init intel_prepare_irq_remapping(void)
721 {
722 	struct dmar_drhd_unit *drhd;
723 	struct intel_iommu *iommu;
724 	int eim = 0;
725 
726 	if (irq_remap_broken) {
727 		pr_warn("This system BIOS has enabled interrupt remapping\n"
728 			"on a chipset that contains an erratum making that\n"
729 			"feature unstable.  To maintain system stability\n"
730 			"interrupt remapping is being disabled.  Please\n"
731 			"contact your BIOS vendor for an update\n");
732 		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
733 		return -ENODEV;
734 	}
735 
736 	if (dmar_table_init() < 0)
737 		return -ENODEV;
738 
739 	if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
740 		return -ENODEV;
741 
742 	if (!dmar_ir_support())
743 		return -ENODEV;
744 
745 	if (parse_ioapics_under_ir()) {
746 		pr_info("Not enabling interrupt remapping\n");
747 		goto error;
748 	}
749 
750 	/* First make sure all IOMMUs support IRQ remapping */
751 	for_each_iommu(iommu, drhd)
752 		if (!ecap_ir_support(iommu->ecap))
753 			goto error;
754 
755 	/* Detect remapping mode: lapic or x2apic */
756 	if (x2apic_supported()) {
757 		eim = !dmar_x2apic_optout();
758 		if (!eim) {
759 			pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
760 			pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
761 		}
762 	}
763 
764 	for_each_iommu(iommu, drhd) {
765 		if (eim && !ecap_eim_support(iommu->ecap)) {
766 			pr_info("%s does not support EIM\n", iommu->name);
767 			eim = 0;
768 		}
769 	}
770 
771 	eim_mode = eim;
772 	if (eim)
773 		pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
774 
775 	/* Do the initializations early */
776 	for_each_iommu(iommu, drhd) {
777 		if (intel_setup_irq_remapping(iommu)) {
778 			pr_err("Failed to setup irq remapping for %s\n",
779 			       iommu->name);
780 			goto error;
781 		}
782 	}
783 
784 	return 0;
785 
786 error:
787 	intel_cleanup_irq_remapping();
788 	return -ENODEV;
789 }
790 
791 /*
792  * Set Posted-Interrupts capability.
793  */
794 static inline void set_irq_posting_cap(void)
795 {
796 	struct dmar_drhd_unit *drhd;
797 	struct intel_iommu *iommu;
798 
799 	if (!disable_irq_post) {
800 		/*
801 		 * If IRTE is in posted format, the 'pda' field goes across the
802 		 * 64-bit boundary, we need use cmpxchg16b to atomically update
803 		 * it. We only expose posted-interrupt when X86_FEATURE_CX16
804 		 * is supported. Actually, hardware platforms supporting PI
805 		 * should have X86_FEATURE_CX16 support, this has been confirmed
806 		 * with Intel hardware guys.
807 		 */
808 		if (boot_cpu_has(X86_FEATURE_CX16))
809 			intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
810 
811 		for_each_iommu(iommu, drhd)
812 			if (!cap_pi_support(iommu->cap)) {
813 				intel_irq_remap_ops.capability &=
814 						~(1 << IRQ_POSTING_CAP);
815 				break;
816 			}
817 	}
818 }
819 
820 static int __init intel_enable_irq_remapping(void)
821 {
822 	struct dmar_drhd_unit *drhd;
823 	struct intel_iommu *iommu;
824 	bool setup = false;
825 
826 	/*
827 	 * Setup Interrupt-remapping for all the DRHD's now.
828 	 */
829 	for_each_iommu(iommu, drhd) {
830 		if (!ir_pre_enabled(iommu))
831 			iommu_enable_irq_remapping(iommu);
832 		setup = true;
833 	}
834 
835 	if (!setup)
836 		goto error;
837 
838 	irq_remapping_enabled = 1;
839 
840 	set_irq_posting_cap();
841 
842 	pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
843 
844 	return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
845 
846 error:
847 	intel_cleanup_irq_remapping();
848 	return -1;
849 }
850 
851 static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
852 				   struct intel_iommu *iommu,
853 				   struct acpi_dmar_hardware_unit *drhd)
854 {
855 	struct acpi_dmar_pci_path *path;
856 	u8 bus;
857 	int count, free = -1;
858 
859 	bus = scope->bus;
860 	path = (struct acpi_dmar_pci_path *)(scope + 1);
861 	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
862 		/ sizeof(struct acpi_dmar_pci_path);
863 
864 	while (--count > 0) {
865 		/*
866 		 * Access PCI directly due to the PCI
867 		 * subsystem isn't initialized yet.
868 		 */
869 		bus = read_pci_config_byte(bus, path->device, path->function,
870 					   PCI_SECONDARY_BUS);
871 		path++;
872 	}
873 
874 	for (count = 0; count < MAX_HPET_TBS; count++) {
875 		if (ir_hpet[count].iommu == iommu &&
876 		    ir_hpet[count].id == scope->enumeration_id)
877 			return 0;
878 		else if (ir_hpet[count].iommu == NULL && free == -1)
879 			free = count;
880 	}
881 	if (free == -1) {
882 		pr_warn("Exceeded Max HPET blocks\n");
883 		return -ENOSPC;
884 	}
885 
886 	ir_hpet[free].iommu = iommu;
887 	ir_hpet[free].id    = scope->enumeration_id;
888 	ir_hpet[free].bus   = bus;
889 	ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
890 	pr_info("HPET id %d under DRHD base 0x%Lx\n",
891 		scope->enumeration_id, drhd->address);
892 
893 	return 0;
894 }
895 
896 static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
897 				     struct intel_iommu *iommu,
898 				     struct acpi_dmar_hardware_unit *drhd)
899 {
900 	struct acpi_dmar_pci_path *path;
901 	u8 bus;
902 	int count, free = -1;
903 
904 	bus = scope->bus;
905 	path = (struct acpi_dmar_pci_path *)(scope + 1);
906 	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
907 		/ sizeof(struct acpi_dmar_pci_path);
908 
909 	while (--count > 0) {
910 		/*
911 		 * Access PCI directly due to the PCI
912 		 * subsystem isn't initialized yet.
913 		 */
914 		bus = read_pci_config_byte(bus, path->device, path->function,
915 					   PCI_SECONDARY_BUS);
916 		path++;
917 	}
918 
919 	for (count = 0; count < MAX_IO_APICS; count++) {
920 		if (ir_ioapic[count].iommu == iommu &&
921 		    ir_ioapic[count].id == scope->enumeration_id)
922 			return 0;
923 		else if (ir_ioapic[count].iommu == NULL && free == -1)
924 			free = count;
925 	}
926 	if (free == -1) {
927 		pr_warn("Exceeded Max IO APICS\n");
928 		return -ENOSPC;
929 	}
930 
931 	ir_ioapic[free].bus   = bus;
932 	ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
933 	ir_ioapic[free].iommu = iommu;
934 	ir_ioapic[free].id    = scope->enumeration_id;
935 	pr_info("IOAPIC id %d under DRHD base  0x%Lx IOMMU %d\n",
936 		scope->enumeration_id, drhd->address, iommu->seq_id);
937 
938 	return 0;
939 }
940 
941 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
942 				      struct intel_iommu *iommu)
943 {
944 	int ret = 0;
945 	struct acpi_dmar_hardware_unit *drhd;
946 	struct acpi_dmar_device_scope *scope;
947 	void *start, *end;
948 
949 	drhd = (struct acpi_dmar_hardware_unit *)header;
950 	start = (void *)(drhd + 1);
951 	end = ((void *)drhd) + header->length;
952 
953 	while (start < end && ret == 0) {
954 		scope = start;
955 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
956 			ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
957 		else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
958 			ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
959 		start += scope->length;
960 	}
961 
962 	return ret;
963 }
964 
965 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
966 {
967 	int i;
968 
969 	for (i = 0; i < MAX_HPET_TBS; i++)
970 		if (ir_hpet[i].iommu == iommu)
971 			ir_hpet[i].iommu = NULL;
972 
973 	for (i = 0; i < MAX_IO_APICS; i++)
974 		if (ir_ioapic[i].iommu == iommu)
975 			ir_ioapic[i].iommu = NULL;
976 }
977 
978 /*
979  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
980  * hardware unit.
981  */
982 static int __init parse_ioapics_under_ir(void)
983 {
984 	struct dmar_drhd_unit *drhd;
985 	struct intel_iommu *iommu;
986 	bool ir_supported = false;
987 	int ioapic_idx;
988 
989 	for_each_iommu(iommu, drhd) {
990 		int ret;
991 
992 		if (!ecap_ir_support(iommu->ecap))
993 			continue;
994 
995 		ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
996 		if (ret)
997 			return ret;
998 
999 		ir_supported = true;
1000 	}
1001 
1002 	if (!ir_supported)
1003 		return -ENODEV;
1004 
1005 	for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
1006 		int ioapic_id = mpc_ioapic_id(ioapic_idx);
1007 		if (!map_ioapic_to_iommu(ioapic_id)) {
1008 			pr_err(FW_BUG "ioapic %d has no mapping iommu, "
1009 			       "interrupt remapping will be disabled\n",
1010 			       ioapic_id);
1011 			return -1;
1012 		}
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 static int __init ir_dev_scope_init(void)
1019 {
1020 	int ret;
1021 
1022 	if (!irq_remapping_enabled)
1023 		return 0;
1024 
1025 	down_write(&dmar_global_lock);
1026 	ret = dmar_dev_scope_init();
1027 	up_write(&dmar_global_lock);
1028 
1029 	return ret;
1030 }
1031 rootfs_initcall(ir_dev_scope_init);
1032 
1033 static void disable_irq_remapping(void)
1034 {
1035 	struct dmar_drhd_unit *drhd;
1036 	struct intel_iommu *iommu = NULL;
1037 
1038 	/*
1039 	 * Disable Interrupt-remapping for all the DRHD's now.
1040 	 */
1041 	for_each_iommu(iommu, drhd) {
1042 		if (!ecap_ir_support(iommu->ecap))
1043 			continue;
1044 
1045 		iommu_disable_irq_remapping(iommu);
1046 	}
1047 
1048 	/*
1049 	 * Clear Posted-Interrupts capability.
1050 	 */
1051 	if (!disable_irq_post)
1052 		intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
1053 }
1054 
1055 static int reenable_irq_remapping(int eim)
1056 {
1057 	struct dmar_drhd_unit *drhd;
1058 	bool setup = false;
1059 	struct intel_iommu *iommu = NULL;
1060 
1061 	for_each_iommu(iommu, drhd)
1062 		if (iommu->qi)
1063 			dmar_reenable_qi(iommu);
1064 
1065 	/*
1066 	 * Setup Interrupt-remapping for all the DRHD's now.
1067 	 */
1068 	for_each_iommu(iommu, drhd) {
1069 		if (!ecap_ir_support(iommu->ecap))
1070 			continue;
1071 
1072 		/* Set up interrupt remapping for iommu.*/
1073 		iommu_set_irq_remapping(iommu, eim);
1074 		iommu_enable_irq_remapping(iommu);
1075 		setup = true;
1076 	}
1077 
1078 	if (!setup)
1079 		goto error;
1080 
1081 	set_irq_posting_cap();
1082 
1083 	return 0;
1084 
1085 error:
1086 	/*
1087 	 * handle error condition gracefully here!
1088 	 */
1089 	return -1;
1090 }
1091 
1092 /*
1093  * Store the MSI remapping domain pointer in the device if enabled.
1094  *
1095  * This is called from dmar_pci_bus_add_dev() so it works even when DMA
1096  * remapping is disabled. Only update the pointer if the device is not
1097  * already handled by a non default PCI/MSI interrupt domain. This protects
1098  * e.g. VMD devices.
1099  */
1100 void intel_irq_remap_add_device(struct dmar_pci_notify_info *info)
1101 {
1102 	if (!irq_remapping_enabled || !pci_dev_has_default_msi_parent_domain(info->dev))
1103 		return;
1104 
1105 	dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev));
1106 }
1107 
1108 static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
1109 {
1110 	memset(irte, 0, sizeof(*irte));
1111 
1112 	irte->present = 1;
1113 	irte->dst_mode = apic->dest_mode_logical;
1114 	/*
1115 	 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
1116 	 * actual level or edge trigger will be setup in the IO-APIC
1117 	 * RTE. This will help simplify level triggered irq migration.
1118 	 * For more details, see the comments (in io_apic.c) explainig IO-APIC
1119 	 * irq migration in the presence of interrupt-remapping.
1120 	*/
1121 	irte->trigger_mode = 0;
1122 	irte->dlvry_mode = apic->delivery_mode;
1123 	irte->vector = vector;
1124 	irte->dest_id = IRTE_DEST(dest);
1125 	irte->redir_hint = 1;
1126 }
1127 
1128 struct irq_remap_ops intel_irq_remap_ops = {
1129 	.prepare		= intel_prepare_irq_remapping,
1130 	.enable			= intel_enable_irq_remapping,
1131 	.disable		= disable_irq_remapping,
1132 	.reenable		= reenable_irq_remapping,
1133 	.enable_faulting	= enable_drhd_fault_handling,
1134 };
1135 
1136 static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
1137 {
1138 	struct intel_ir_data *ir_data = irqd->chip_data;
1139 	struct irte *irte = &ir_data->irte_entry;
1140 	struct irq_cfg *cfg = irqd_cfg(irqd);
1141 
1142 	/*
1143 	 * Atomically updates the IRTE with the new destination, vector
1144 	 * and flushes the interrupt entry cache.
1145 	 */
1146 	irte->vector = cfg->vector;
1147 	irte->dest_id = IRTE_DEST(cfg->dest_apicid);
1148 
1149 	/* Update the hardware only if the interrupt is in remapped mode. */
1150 	if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
1151 		modify_irte(&ir_data->irq_2_iommu, irte);
1152 }
1153 
1154 /*
1155  * Migrate the IO-APIC irq in the presence of intr-remapping.
1156  *
1157  * For both level and edge triggered, irq migration is a simple atomic
1158  * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1159  *
1160  * For level triggered, we eliminate the io-apic RTE modification (with the
1161  * updated vector information), by using a virtual vector (io-apic pin number).
1162  * Real vector that is used for interrupting cpu will be coming from
1163  * the interrupt-remapping table entry.
1164  *
1165  * As the migration is a simple atomic update of IRTE, the same mechanism
1166  * is used to migrate MSI irq's in the presence of interrupt-remapping.
1167  */
1168 static int
1169 intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
1170 		      bool force)
1171 {
1172 	struct irq_data *parent = data->parent_data;
1173 	struct irq_cfg *cfg = irqd_cfg(data);
1174 	int ret;
1175 
1176 	ret = parent->chip->irq_set_affinity(parent, mask, force);
1177 	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
1178 		return ret;
1179 
1180 	intel_ir_reconfigure_irte(data, false);
1181 	/*
1182 	 * After this point, all the interrupts will start arriving
1183 	 * at the new destination. So, time to cleanup the previous
1184 	 * vector allocation.
1185 	 */
1186 	send_cleanup_vector(cfg);
1187 
1188 	return IRQ_SET_MASK_OK_DONE;
1189 }
1190 
1191 static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
1192 				     struct msi_msg *msg)
1193 {
1194 	struct intel_ir_data *ir_data = irq_data->chip_data;
1195 
1196 	*msg = ir_data->msi_entry;
1197 }
1198 
1199 static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
1200 {
1201 	struct intel_ir_data *ir_data = data->chip_data;
1202 	struct vcpu_data *vcpu_pi_info = info;
1203 
1204 	/* stop posting interrupts, back to remapping mode */
1205 	if (!vcpu_pi_info) {
1206 		modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
1207 	} else {
1208 		struct irte irte_pi;
1209 
1210 		/*
1211 		 * We are not caching the posted interrupt entry. We
1212 		 * copy the data from the remapped entry and modify
1213 		 * the fields which are relevant for posted mode. The
1214 		 * cached remapped entry is used for switching back to
1215 		 * remapped mode.
1216 		 */
1217 		memset(&irte_pi, 0, sizeof(irte_pi));
1218 		dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
1219 
1220 		/* Update the posted mode fields */
1221 		irte_pi.p_pst = 1;
1222 		irte_pi.p_urgent = 0;
1223 		irte_pi.p_vector = vcpu_pi_info->vector;
1224 		irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
1225 				(32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
1226 		irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
1227 				~(-1UL << PDA_HIGH_BIT);
1228 
1229 		modify_irte(&ir_data->irq_2_iommu, &irte_pi);
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static struct irq_chip intel_ir_chip = {
1236 	.name			= "INTEL-IR",
1237 	.irq_ack		= apic_ack_irq,
1238 	.irq_set_affinity	= intel_ir_set_affinity,
1239 	.irq_compose_msi_msg	= intel_ir_compose_msi_msg,
1240 	.irq_set_vcpu_affinity	= intel_ir_set_vcpu_affinity,
1241 };
1242 
1243 static void fill_msi_msg(struct msi_msg *msg, u32 index, u32 subhandle)
1244 {
1245 	memset(msg, 0, sizeof(*msg));
1246 
1247 	msg->arch_addr_lo.dmar_base_address = X86_MSI_BASE_ADDRESS_LOW;
1248 	msg->arch_addr_lo.dmar_subhandle_valid = true;
1249 	msg->arch_addr_lo.dmar_format = true;
1250 	msg->arch_addr_lo.dmar_index_0_14 = index & 0x7FFF;
1251 	msg->arch_addr_lo.dmar_index_15 = !!(index & 0x8000);
1252 
1253 	msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
1254 
1255 	msg->arch_data.dmar_subhandle = subhandle;
1256 }
1257 
1258 static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
1259 					     struct irq_cfg *irq_cfg,
1260 					     struct irq_alloc_info *info,
1261 					     int index, int sub_handle)
1262 {
1263 	struct irte *irte = &data->irte_entry;
1264 
1265 	prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
1266 
1267 	switch (info->type) {
1268 	case X86_IRQ_ALLOC_TYPE_IOAPIC:
1269 		/* Set source-id of interrupt request */
1270 		set_ioapic_sid(irte, info->devid);
1271 		apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
1272 			info->devid, irte->present, irte->fpd,
1273 			irte->dst_mode, irte->redir_hint,
1274 			irte->trigger_mode, irte->dlvry_mode,
1275 			irte->avail, irte->vector, irte->dest_id,
1276 			irte->sid, irte->sq, irte->svt);
1277 		sub_handle = info->ioapic.pin;
1278 		break;
1279 	case X86_IRQ_ALLOC_TYPE_HPET:
1280 		set_hpet_sid(irte, info->devid);
1281 		break;
1282 	case X86_IRQ_ALLOC_TYPE_PCI_MSI:
1283 	case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
1284 		set_msi_sid(irte,
1285 			    pci_real_dma_dev(msi_desc_to_pci_dev(info->desc)));
1286 		break;
1287 	default:
1288 		BUG_ON(1);
1289 		break;
1290 	}
1291 	fill_msi_msg(&data->msi_entry, index, sub_handle);
1292 }
1293 
1294 static void intel_free_irq_resources(struct irq_domain *domain,
1295 				     unsigned int virq, unsigned int nr_irqs)
1296 {
1297 	struct irq_data *irq_data;
1298 	struct intel_ir_data *data;
1299 	struct irq_2_iommu *irq_iommu;
1300 	unsigned long flags;
1301 	int i;
1302 	for (i = 0; i < nr_irqs; i++) {
1303 		irq_data = irq_domain_get_irq_data(domain, virq  + i);
1304 		if (irq_data && irq_data->chip_data) {
1305 			data = irq_data->chip_data;
1306 			irq_iommu = &data->irq_2_iommu;
1307 			raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
1308 			clear_entries(irq_iommu);
1309 			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
1310 			irq_domain_reset_irq_data(irq_data);
1311 			kfree(data);
1312 		}
1313 	}
1314 }
1315 
1316 static int intel_irq_remapping_alloc(struct irq_domain *domain,
1317 				     unsigned int virq, unsigned int nr_irqs,
1318 				     void *arg)
1319 {
1320 	struct intel_iommu *iommu = domain->host_data;
1321 	struct irq_alloc_info *info = arg;
1322 	struct intel_ir_data *data, *ird;
1323 	struct irq_data *irq_data;
1324 	struct irq_cfg *irq_cfg;
1325 	int i, ret, index;
1326 
1327 	if (!info || !iommu)
1328 		return -EINVAL;
1329 	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
1330 		return -EINVAL;
1331 
1332 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
1333 	if (ret < 0)
1334 		return ret;
1335 
1336 	ret = -ENOMEM;
1337 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1338 	if (!data)
1339 		goto out_free_parent;
1340 
1341 	down_read(&dmar_global_lock);
1342 	index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
1343 	up_read(&dmar_global_lock);
1344 	if (index < 0) {
1345 		pr_warn("Failed to allocate IRTE\n");
1346 		kfree(data);
1347 		goto out_free_parent;
1348 	}
1349 
1350 	for (i = 0; i < nr_irqs; i++) {
1351 		irq_data = irq_domain_get_irq_data(domain, virq + i);
1352 		irq_cfg = irqd_cfg(irq_data);
1353 		if (!irq_data || !irq_cfg) {
1354 			if (!i)
1355 				kfree(data);
1356 			ret = -EINVAL;
1357 			goto out_free_data;
1358 		}
1359 
1360 		if (i > 0) {
1361 			ird = kzalloc(sizeof(*ird), GFP_KERNEL);
1362 			if (!ird)
1363 				goto out_free_data;
1364 			/* Initialize the common data */
1365 			ird->irq_2_iommu = data->irq_2_iommu;
1366 			ird->irq_2_iommu.sub_handle = i;
1367 		} else {
1368 			ird = data;
1369 		}
1370 
1371 		irq_data->hwirq = (index << 16) + i;
1372 		irq_data->chip_data = ird;
1373 		irq_data->chip = &intel_ir_chip;
1374 		intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
1375 		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
1376 	}
1377 	return 0;
1378 
1379 out_free_data:
1380 	intel_free_irq_resources(domain, virq, i);
1381 out_free_parent:
1382 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
1383 	return ret;
1384 }
1385 
1386 static void intel_irq_remapping_free(struct irq_domain *domain,
1387 				     unsigned int virq, unsigned int nr_irqs)
1388 {
1389 	intel_free_irq_resources(domain, virq, nr_irqs);
1390 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
1391 }
1392 
1393 static int intel_irq_remapping_activate(struct irq_domain *domain,
1394 					struct irq_data *irq_data, bool reserve)
1395 {
1396 	intel_ir_reconfigure_irte(irq_data, true);
1397 	return 0;
1398 }
1399 
1400 static void intel_irq_remapping_deactivate(struct irq_domain *domain,
1401 					   struct irq_data *irq_data)
1402 {
1403 	struct intel_ir_data *data = irq_data->chip_data;
1404 	struct irte entry;
1405 
1406 	memset(&entry, 0, sizeof(entry));
1407 	modify_irte(&data->irq_2_iommu, &entry);
1408 }
1409 
1410 static int intel_irq_remapping_select(struct irq_domain *d,
1411 				      struct irq_fwspec *fwspec,
1412 				      enum irq_domain_bus_token bus_token)
1413 {
1414 	struct intel_iommu *iommu = NULL;
1415 
1416 	if (x86_fwspec_is_ioapic(fwspec))
1417 		iommu = map_ioapic_to_iommu(fwspec->param[0]);
1418 	else if (x86_fwspec_is_hpet(fwspec))
1419 		iommu = map_hpet_to_iommu(fwspec->param[0]);
1420 
1421 	return iommu && d == iommu->ir_domain;
1422 }
1423 
1424 static const struct irq_domain_ops intel_ir_domain_ops = {
1425 	.select = intel_irq_remapping_select,
1426 	.alloc = intel_irq_remapping_alloc,
1427 	.free = intel_irq_remapping_free,
1428 	.activate = intel_irq_remapping_activate,
1429 	.deactivate = intel_irq_remapping_deactivate,
1430 };
1431 
1432 static const struct msi_parent_ops dmar_msi_parent_ops = {
1433 	.supported_flags	= X86_VECTOR_MSI_FLAGS_SUPPORTED |
1434 				  MSI_FLAG_MULTI_PCI_MSI |
1435 				  MSI_FLAG_PCI_IMS,
1436 	.prefix			= "IR-",
1437 	.init_dev_msi_info	= msi_parent_init_dev_msi_info,
1438 };
1439 
1440 static const struct msi_parent_ops virt_dmar_msi_parent_ops = {
1441 	.supported_flags	= X86_VECTOR_MSI_FLAGS_SUPPORTED |
1442 				  MSI_FLAG_MULTI_PCI_MSI,
1443 	.prefix			= "vIR-",
1444 	.init_dev_msi_info	= msi_parent_init_dev_msi_info,
1445 };
1446 
1447 /*
1448  * Support of Interrupt Remapping Unit Hotplug
1449  */
1450 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1451 {
1452 	int ret;
1453 	int eim = x2apic_enabled();
1454 
1455 	ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
1456 	if (ret)
1457 		return ret;
1458 
1459 	if (eim && !ecap_eim_support(iommu->ecap)) {
1460 		pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1461 			iommu->reg_phys, iommu->ecap);
1462 		return -ENODEV;
1463 	}
1464 
1465 	if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1466 		pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1467 			iommu->reg_phys);
1468 		return -ENODEV;
1469 	}
1470 
1471 	/* TODO: check all IOAPICs are covered by IOMMU */
1472 
1473 	/* Setup Interrupt-remapping now. */
1474 	ret = intel_setup_irq_remapping(iommu);
1475 	if (ret) {
1476 		pr_err("Failed to setup irq remapping for %s\n",
1477 		       iommu->name);
1478 		intel_teardown_irq_remapping(iommu);
1479 		ir_remove_ioapic_hpet_scope(iommu);
1480 	} else {
1481 		iommu_enable_irq_remapping(iommu);
1482 	}
1483 
1484 	return ret;
1485 }
1486 
1487 int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1488 {
1489 	int ret = 0;
1490 	struct intel_iommu *iommu = dmaru->iommu;
1491 
1492 	if (!irq_remapping_enabled)
1493 		return 0;
1494 	if (iommu == NULL)
1495 		return -EINVAL;
1496 	if (!ecap_ir_support(iommu->ecap))
1497 		return 0;
1498 	if (irq_remapping_cap(IRQ_POSTING_CAP) &&
1499 	    !cap_pi_support(iommu->cap))
1500 		return -EBUSY;
1501 
1502 	if (insert) {
1503 		if (!iommu->ir_table)
1504 			ret = dmar_ir_add(dmaru, iommu);
1505 	} else {
1506 		if (iommu->ir_table) {
1507 			if (!bitmap_empty(iommu->ir_table->bitmap,
1508 					  INTR_REMAP_TABLE_ENTRIES)) {
1509 				ret = -EBUSY;
1510 			} else {
1511 				iommu_disable_irq_remapping(iommu);
1512 				intel_teardown_irq_remapping(iommu);
1513 				ir_remove_ioapic_hpet_scope(iommu);
1514 			}
1515 		}
1516 	}
1517 
1518 	return ret;
1519 }
1520