xref: /openbmc/linux/drivers/iommu/amd/init.c (revision f9e2f0e8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  *         Leo Duran <leo.duran@amd.com>
6  */
7 
8 #define pr_fmt(fmt)     "AMD-Vi: " fmt
9 #define dev_fmt(fmt)    pr_fmt(fmt)
10 
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/irq.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/export.h>
22 #include <linux/kmemleak.h>
23 #include <linux/cc_platform.h>
24 #include <linux/iopoll.h>
25 #include <asm/pci-direct.h>
26 #include <asm/iommu.h>
27 #include <asm/apic.h>
28 #include <asm/gart.h>
29 #include <asm/x86_init.h>
30 #include <asm/io_apic.h>
31 #include <asm/irq_remapping.h>
32 #include <asm/set_memory.h>
33 
34 #include <linux/crash_dump.h>
35 
36 #include "amd_iommu.h"
37 #include "../irq_remapping.h"
38 
39 /*
40  * definitions for the ACPI scanning code
41  */
42 #define IVRS_HEADER_LENGTH 48
43 
44 #define ACPI_IVHD_TYPE_MAX_SUPPORTED	0x40
45 #define ACPI_IVMD_TYPE_ALL              0x20
46 #define ACPI_IVMD_TYPE                  0x21
47 #define ACPI_IVMD_TYPE_RANGE            0x22
48 
49 #define IVHD_DEV_ALL                    0x01
50 #define IVHD_DEV_SELECT                 0x02
51 #define IVHD_DEV_SELECT_RANGE_START     0x03
52 #define IVHD_DEV_RANGE_END              0x04
53 #define IVHD_DEV_ALIAS                  0x42
54 #define IVHD_DEV_ALIAS_RANGE            0x43
55 #define IVHD_DEV_EXT_SELECT             0x46
56 #define IVHD_DEV_EXT_SELECT_RANGE       0x47
57 #define IVHD_DEV_SPECIAL		0x48
58 #define IVHD_DEV_ACPI_HID		0xf0
59 
60 #define UID_NOT_PRESENT                 0
61 #define UID_IS_INTEGER                  1
62 #define UID_IS_CHARACTER                2
63 
64 #define IVHD_SPECIAL_IOAPIC		1
65 #define IVHD_SPECIAL_HPET		2
66 
67 #define IVHD_FLAG_HT_TUN_EN_MASK        0x01
68 #define IVHD_FLAG_PASSPW_EN_MASK        0x02
69 #define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
70 #define IVHD_FLAG_ISOC_EN_MASK          0x08
71 
72 #define IVMD_FLAG_EXCL_RANGE            0x08
73 #define IVMD_FLAG_IW                    0x04
74 #define IVMD_FLAG_IR                    0x02
75 #define IVMD_FLAG_UNITY_MAP             0x01
76 
77 #define ACPI_DEVFLAG_INITPASS           0x01
78 #define ACPI_DEVFLAG_EXTINT             0x02
79 #define ACPI_DEVFLAG_NMI                0x04
80 #define ACPI_DEVFLAG_SYSMGT1            0x10
81 #define ACPI_DEVFLAG_SYSMGT2            0x20
82 #define ACPI_DEVFLAG_LINT0              0x40
83 #define ACPI_DEVFLAG_LINT1              0x80
84 #define ACPI_DEVFLAG_ATSDIS             0x10000000
85 
86 #define LOOP_TIMEOUT	2000000
87 
88 #define IVRS_GET_SBDF_ID(seg, bus, dev, fd)	(((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
89 						 | ((dev & 0x1f) << 3) | (fn & 0x7))
90 
91 /*
92  * ACPI table definitions
93  *
94  * These data structures are laid over the table to parse the important values
95  * out of it.
96  */
97 
98 /*
99  * structure describing one IOMMU in the ACPI table. Typically followed by one
100  * or more ivhd_entrys.
101  */
102 struct ivhd_header {
103 	u8 type;
104 	u8 flags;
105 	u16 length;
106 	u16 devid;
107 	u16 cap_ptr;
108 	u64 mmio_phys;
109 	u16 pci_seg;
110 	u16 info;
111 	u32 efr_attr;
112 
113 	/* Following only valid on IVHD type 11h and 40h */
114 	u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
115 	u64 efr_reg2;
116 } __attribute__((packed));
117 
118 /*
119  * A device entry describing which devices a specific IOMMU translates and
120  * which requestor ids they use.
121  */
122 struct ivhd_entry {
123 	u8 type;
124 	u16 devid;
125 	u8 flags;
126 	struct_group(ext_hid,
127 		u32 ext;
128 		u32 hidh;
129 	);
130 	u64 cid;
131 	u8 uidf;
132 	u8 uidl;
133 	u8 uid;
134 } __attribute__((packed));
135 
136 /*
137  * An AMD IOMMU memory definition structure. It defines things like exclusion
138  * ranges for devices and regions that should be unity mapped.
139  */
140 struct ivmd_header {
141 	u8 type;
142 	u8 flags;
143 	u16 length;
144 	u16 devid;
145 	u16 aux;
146 	u16 pci_seg;
147 	u8  resv[6];
148 	u64 range_start;
149 	u64 range_length;
150 } __attribute__((packed));
151 
152 bool amd_iommu_dump;
153 bool amd_iommu_irq_remap __read_mostly;
154 
155 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
156 
157 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
158 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
159 
160 static bool amd_iommu_detected;
161 static bool amd_iommu_disabled __initdata;
162 static bool amd_iommu_force_enable __initdata;
163 static int amd_iommu_target_ivhd_type;
164 
165 /* Global EFR and EFR2 registers */
166 u64 amd_iommu_efr;
167 u64 amd_iommu_efr2;
168 
169 /* SNP is enabled on the system? */
170 bool amd_iommu_snp_en;
171 EXPORT_SYMBOL(amd_iommu_snp_en);
172 
173 LIST_HEAD(amd_iommu_pci_seg_list);	/* list of all PCI segments */
174 LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
175 					   system */
176 
177 /* Array to assign indices to IOMMUs*/
178 struct amd_iommu *amd_iommus[MAX_IOMMUS];
179 
180 /* Number of IOMMUs present in the system */
181 static int amd_iommus_present;
182 
183 /* IOMMUs have a non-present cache? */
184 bool amd_iommu_np_cache __read_mostly;
185 bool amd_iommu_iotlb_sup __read_mostly = true;
186 
187 u32 amd_iommu_max_pasid __read_mostly = ~0;
188 
189 bool amd_iommu_v2_present __read_mostly;
190 static bool amd_iommu_pc_present __read_mostly;
191 bool amdr_ivrs_remap_support __read_mostly;
192 
193 bool amd_iommu_force_isolation __read_mostly;
194 
195 /*
196  * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
197  * to know which ones are already in use.
198  */
199 unsigned long *amd_iommu_pd_alloc_bitmap;
200 
201 enum iommu_init_state {
202 	IOMMU_START_STATE,
203 	IOMMU_IVRS_DETECTED,
204 	IOMMU_ACPI_FINISHED,
205 	IOMMU_ENABLED,
206 	IOMMU_PCI_INIT,
207 	IOMMU_INTERRUPTS_EN,
208 	IOMMU_INITIALIZED,
209 	IOMMU_NOT_FOUND,
210 	IOMMU_INIT_ERROR,
211 	IOMMU_CMDLINE_DISABLED,
212 };
213 
214 /* Early ioapic and hpet maps from kernel command line */
215 #define EARLY_MAP_SIZE		4
216 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
217 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
218 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
219 
220 static int __initdata early_ioapic_map_size;
221 static int __initdata early_hpet_map_size;
222 static int __initdata early_acpihid_map_size;
223 
224 static bool __initdata cmdline_maps;
225 
226 static enum iommu_init_state init_state = IOMMU_START_STATE;
227 
228 static int amd_iommu_enable_interrupts(void);
229 static int __init iommu_go_to_state(enum iommu_init_state state);
230 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
231 
232 static bool amd_iommu_pre_enabled = true;
233 
234 static u32 amd_iommu_ivinfo __initdata;
235 
236 bool translation_pre_enabled(struct amd_iommu *iommu)
237 {
238 	return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
239 }
240 
241 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
242 {
243 	iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
244 }
245 
246 static void init_translation_status(struct amd_iommu *iommu)
247 {
248 	u64 ctrl;
249 
250 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
251 	if (ctrl & (1<<CONTROL_IOMMU_EN))
252 		iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
253 }
254 
255 static inline unsigned long tbl_size(int entry_size, int last_bdf)
256 {
257 	unsigned shift = PAGE_SHIFT +
258 			 get_order((last_bdf + 1) * entry_size);
259 
260 	return 1UL << shift;
261 }
262 
263 int amd_iommu_get_num_iommus(void)
264 {
265 	return amd_iommus_present;
266 }
267 
268 /*
269  * Iterate through all the IOMMUs to get common EFR
270  * masks among all IOMMUs and warn if found inconsistency.
271  */
272 static void get_global_efr(void)
273 {
274 	struct amd_iommu *iommu;
275 
276 	for_each_iommu(iommu) {
277 		u64 tmp = iommu->features;
278 		u64 tmp2 = iommu->features2;
279 
280 		if (list_is_first(&iommu->list, &amd_iommu_list)) {
281 			amd_iommu_efr = tmp;
282 			amd_iommu_efr2 = tmp2;
283 			continue;
284 		}
285 
286 		if (amd_iommu_efr == tmp &&
287 		    amd_iommu_efr2 == tmp2)
288 			continue;
289 
290 		pr_err(FW_BUG
291 		       "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
292 		       tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
293 		       iommu->index, iommu->pci_seg->id,
294 		       PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
295 		       PCI_FUNC(iommu->devid));
296 
297 		amd_iommu_efr &= tmp;
298 		amd_iommu_efr2 &= tmp2;
299 	}
300 
301 	pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
302 }
303 
304 static bool check_feature_on_all_iommus(u64 mask)
305 {
306 	return !!(amd_iommu_efr & mask);
307 }
308 
309 /*
310  * For IVHD type 0x11/0x40, EFR is also available via IVHD.
311  * Default to IVHD EFR since it is available sooner
312  * (i.e. before PCI init).
313  */
314 static void __init early_iommu_features_init(struct amd_iommu *iommu,
315 					     struct ivhd_header *h)
316 {
317 	if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
318 		iommu->features = h->efr_reg;
319 		iommu->features2 = h->efr_reg2;
320 	}
321 	if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
322 		amdr_ivrs_remap_support = true;
323 }
324 
325 /* Access to l1 and l2 indexed register spaces */
326 
327 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
328 {
329 	u32 val;
330 
331 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
332 	pci_read_config_dword(iommu->dev, 0xfc, &val);
333 	return val;
334 }
335 
336 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
337 {
338 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
339 	pci_write_config_dword(iommu->dev, 0xfc, val);
340 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
341 }
342 
343 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
344 {
345 	u32 val;
346 
347 	pci_write_config_dword(iommu->dev, 0xf0, address);
348 	pci_read_config_dword(iommu->dev, 0xf4, &val);
349 	return val;
350 }
351 
352 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
353 {
354 	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
355 	pci_write_config_dword(iommu->dev, 0xf4, val);
356 }
357 
358 /****************************************************************************
359  *
360  * AMD IOMMU MMIO register space handling functions
361  *
362  * These functions are used to program the IOMMU device registers in
363  * MMIO space required for that driver.
364  *
365  ****************************************************************************/
366 
367 /*
368  * This function set the exclusion range in the IOMMU. DMA accesses to the
369  * exclusion range are passed through untranslated
370  */
371 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
372 {
373 	u64 start = iommu->exclusion_start & PAGE_MASK;
374 	u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
375 	u64 entry;
376 
377 	if (!iommu->exclusion_start)
378 		return;
379 
380 	entry = start | MMIO_EXCL_ENABLE_MASK;
381 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
382 			&entry, sizeof(entry));
383 
384 	entry = limit;
385 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
386 			&entry, sizeof(entry));
387 }
388 
389 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
390 {
391 	u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
392 	u64 entry = start & PM_ADDR_MASK;
393 
394 	if (!check_feature_on_all_iommus(FEATURE_SNP))
395 		return;
396 
397 	/* Note:
398 	 * Re-purpose Exclusion base/limit registers for Completion wait
399 	 * write-back base/limit.
400 	 */
401 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
402 		    &entry, sizeof(entry));
403 
404 	/* Note:
405 	 * Default to 4 Kbytes, which can be specified by setting base
406 	 * address equal to the limit address.
407 	 */
408 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
409 		    &entry, sizeof(entry));
410 }
411 
412 /* Programs the physical address of the device table into the IOMMU hardware */
413 static void iommu_set_device_table(struct amd_iommu *iommu)
414 {
415 	u64 entry;
416 	u32 dev_table_size = iommu->pci_seg->dev_table_size;
417 	void *dev_table = (void *)get_dev_table(iommu);
418 
419 	BUG_ON(iommu->mmio_base == NULL);
420 
421 	entry = iommu_virt_to_phys(dev_table);
422 	entry |= (dev_table_size >> 12) - 1;
423 	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
424 			&entry, sizeof(entry));
425 }
426 
427 /* Generic functions to enable/disable certain features of the IOMMU. */
428 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
429 {
430 	u64 ctrl;
431 
432 	ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
433 	ctrl |= (1ULL << bit);
434 	writeq(ctrl, iommu->mmio_base +  MMIO_CONTROL_OFFSET);
435 }
436 
437 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
438 {
439 	u64 ctrl;
440 
441 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
442 	ctrl &= ~(1ULL << bit);
443 	writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
444 }
445 
446 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
447 {
448 	u64 ctrl;
449 
450 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
451 	ctrl &= ~CTRL_INV_TO_MASK;
452 	ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
453 	writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
454 }
455 
456 /* Function to enable the hardware */
457 static void iommu_enable(struct amd_iommu *iommu)
458 {
459 	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
460 }
461 
462 static void iommu_disable(struct amd_iommu *iommu)
463 {
464 	if (!iommu->mmio_base)
465 		return;
466 
467 	/* Disable command buffer */
468 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
469 
470 	/* Disable event logging and event interrupts */
471 	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
472 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
473 
474 	/* Disable IOMMU GA_LOG */
475 	iommu_feature_disable(iommu, CONTROL_GALOG_EN);
476 	iommu_feature_disable(iommu, CONTROL_GAINT_EN);
477 
478 	/* Disable IOMMU hardware itself */
479 	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
480 }
481 
482 /*
483  * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
484  * the system has one.
485  */
486 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
487 {
488 	if (!request_mem_region(address, end, "amd_iommu")) {
489 		pr_err("Can not reserve memory region %llx-%llx for mmio\n",
490 			address, end);
491 		pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
492 		return NULL;
493 	}
494 
495 	return (u8 __iomem *)ioremap(address, end);
496 }
497 
498 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
499 {
500 	if (iommu->mmio_base)
501 		iounmap(iommu->mmio_base);
502 	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
503 }
504 
505 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
506 {
507 	u32 size = 0;
508 
509 	switch (h->type) {
510 	case 0x10:
511 		size = 24;
512 		break;
513 	case 0x11:
514 	case 0x40:
515 		size = 40;
516 		break;
517 	}
518 	return size;
519 }
520 
521 /****************************************************************************
522  *
523  * The functions below belong to the first pass of AMD IOMMU ACPI table
524  * parsing. In this pass we try to find out the highest device id this
525  * code has to handle. Upon this information the size of the shared data
526  * structures is determined later.
527  *
528  ****************************************************************************/
529 
530 /*
531  * This function calculates the length of a given IVHD entry
532  */
533 static inline int ivhd_entry_length(u8 *ivhd)
534 {
535 	u32 type = ((struct ivhd_entry *)ivhd)->type;
536 
537 	if (type < 0x80) {
538 		return 0x04 << (*ivhd >> 6);
539 	} else if (type == IVHD_DEV_ACPI_HID) {
540 		/* For ACPI_HID, offset 21 is uid len */
541 		return *((u8 *)ivhd + 21) + 22;
542 	}
543 	return 0;
544 }
545 
546 /*
547  * After reading the highest device id from the IOMMU PCI capability header
548  * this function looks if there is a higher device id defined in the ACPI table
549  */
550 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
551 {
552 	u8 *p = (void *)h, *end = (void *)h;
553 	struct ivhd_entry *dev;
554 	int last_devid = -EINVAL;
555 
556 	u32 ivhd_size = get_ivhd_header_size(h);
557 
558 	if (!ivhd_size) {
559 		pr_err("Unsupported IVHD type %#x\n", h->type);
560 		return -EINVAL;
561 	}
562 
563 	p += ivhd_size;
564 	end += h->length;
565 
566 	while (p < end) {
567 		dev = (struct ivhd_entry *)p;
568 		switch (dev->type) {
569 		case IVHD_DEV_ALL:
570 			/* Use maximum BDF value for DEV_ALL */
571 			return 0xffff;
572 		case IVHD_DEV_SELECT:
573 		case IVHD_DEV_RANGE_END:
574 		case IVHD_DEV_ALIAS:
575 		case IVHD_DEV_EXT_SELECT:
576 			/* all the above subfield types refer to device ids */
577 			if (dev->devid > last_devid)
578 				last_devid = dev->devid;
579 			break;
580 		default:
581 			break;
582 		}
583 		p += ivhd_entry_length(p);
584 	}
585 
586 	WARN_ON(p != end);
587 
588 	return last_devid;
589 }
590 
591 static int __init check_ivrs_checksum(struct acpi_table_header *table)
592 {
593 	int i;
594 	u8 checksum = 0, *p = (u8 *)table;
595 
596 	for (i = 0; i < table->length; ++i)
597 		checksum += p[i];
598 	if (checksum != 0) {
599 		/* ACPI table corrupt */
600 		pr_err(FW_BUG "IVRS invalid checksum\n");
601 		return -ENODEV;
602 	}
603 
604 	return 0;
605 }
606 
607 /*
608  * Iterate over all IVHD entries in the ACPI table and find the highest device
609  * id which we need to handle. This is the first of three functions which parse
610  * the ACPI table. So we check the checksum here.
611  */
612 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
613 {
614 	u8 *p = (u8 *)table, *end = (u8 *)table;
615 	struct ivhd_header *h;
616 	int last_devid, last_bdf = 0;
617 
618 	p += IVRS_HEADER_LENGTH;
619 
620 	end += table->length;
621 	while (p < end) {
622 		h = (struct ivhd_header *)p;
623 		if (h->pci_seg == pci_seg &&
624 		    h->type == amd_iommu_target_ivhd_type) {
625 			last_devid = find_last_devid_from_ivhd(h);
626 
627 			if (last_devid < 0)
628 				return -EINVAL;
629 			if (last_devid > last_bdf)
630 				last_bdf = last_devid;
631 		}
632 		p += h->length;
633 	}
634 	WARN_ON(p != end);
635 
636 	return last_bdf;
637 }
638 
639 /****************************************************************************
640  *
641  * The following functions belong to the code path which parses the ACPI table
642  * the second time. In this ACPI parsing iteration we allocate IOMMU specific
643  * data structures, initialize the per PCI segment device/alias/rlookup table
644  * and also basically initialize the hardware.
645  *
646  ****************************************************************************/
647 
648 /* Allocate per PCI segment device table */
649 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
650 {
651 	pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
652 						      get_order(pci_seg->dev_table_size));
653 	if (!pci_seg->dev_table)
654 		return -ENOMEM;
655 
656 	return 0;
657 }
658 
659 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
660 {
661 	free_pages((unsigned long)pci_seg->dev_table,
662 		    get_order(pci_seg->dev_table_size));
663 	pci_seg->dev_table = NULL;
664 }
665 
666 /* Allocate per PCI segment IOMMU rlookup table. */
667 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
668 {
669 	pci_seg->rlookup_table = (void *)__get_free_pages(
670 						GFP_KERNEL | __GFP_ZERO,
671 						get_order(pci_seg->rlookup_table_size));
672 	if (pci_seg->rlookup_table == NULL)
673 		return -ENOMEM;
674 
675 	return 0;
676 }
677 
678 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
679 {
680 	free_pages((unsigned long)pci_seg->rlookup_table,
681 		   get_order(pci_seg->rlookup_table_size));
682 	pci_seg->rlookup_table = NULL;
683 }
684 
685 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
686 {
687 	pci_seg->irq_lookup_table = (void *)__get_free_pages(
688 					     GFP_KERNEL | __GFP_ZERO,
689 					     get_order(pci_seg->rlookup_table_size));
690 	kmemleak_alloc(pci_seg->irq_lookup_table,
691 		       pci_seg->rlookup_table_size, 1, GFP_KERNEL);
692 	if (pci_seg->irq_lookup_table == NULL)
693 		return -ENOMEM;
694 
695 	return 0;
696 }
697 
698 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
699 {
700 	kmemleak_free(pci_seg->irq_lookup_table);
701 	free_pages((unsigned long)pci_seg->irq_lookup_table,
702 		   get_order(pci_seg->rlookup_table_size));
703 	pci_seg->irq_lookup_table = NULL;
704 }
705 
706 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
707 {
708 	int i;
709 
710 	pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
711 					get_order(pci_seg->alias_table_size));
712 	if (!pci_seg->alias_table)
713 		return -ENOMEM;
714 
715 	/*
716 	 * let all alias entries point to itself
717 	 */
718 	for (i = 0; i <= pci_seg->last_bdf; ++i)
719 		pci_seg->alias_table[i] = i;
720 
721 	return 0;
722 }
723 
724 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
725 {
726 	free_pages((unsigned long)pci_seg->alias_table,
727 		   get_order(pci_seg->alias_table_size));
728 	pci_seg->alias_table = NULL;
729 }
730 
731 /*
732  * Allocates the command buffer. This buffer is per AMD IOMMU. We can
733  * write commands to that buffer later and the IOMMU will execute them
734  * asynchronously
735  */
736 static int __init alloc_command_buffer(struct amd_iommu *iommu)
737 {
738 	iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
739 						  get_order(CMD_BUFFER_SIZE));
740 
741 	return iommu->cmd_buf ? 0 : -ENOMEM;
742 }
743 
744 /*
745  * This function restarts event logging in case the IOMMU experienced
746  * an event log buffer overflow.
747  */
748 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
749 {
750 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
751 	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
752 }
753 
754 /*
755  * This function resets the command buffer if the IOMMU stopped fetching
756  * commands from it.
757  */
758 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
759 {
760 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
761 
762 	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
763 	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
764 	iommu->cmd_buf_head = 0;
765 	iommu->cmd_buf_tail = 0;
766 
767 	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
768 }
769 
770 /*
771  * This function writes the command buffer address to the hardware and
772  * enables it.
773  */
774 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
775 {
776 	u64 entry;
777 
778 	BUG_ON(iommu->cmd_buf == NULL);
779 
780 	entry = iommu_virt_to_phys(iommu->cmd_buf);
781 	entry |= MMIO_CMD_SIZE_512;
782 
783 	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
784 		    &entry, sizeof(entry));
785 
786 	amd_iommu_reset_cmd_buffer(iommu);
787 }
788 
789 /*
790  * This function disables the command buffer
791  */
792 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
793 {
794 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
795 }
796 
797 static void __init free_command_buffer(struct amd_iommu *iommu)
798 {
799 	free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
800 }
801 
802 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
803 					 gfp_t gfp, size_t size)
804 {
805 	int order = get_order(size);
806 	void *buf = (void *)__get_free_pages(gfp, order);
807 
808 	if (buf &&
809 	    check_feature_on_all_iommus(FEATURE_SNP) &&
810 	    set_memory_4k((unsigned long)buf, (1 << order))) {
811 		free_pages((unsigned long)buf, order);
812 		buf = NULL;
813 	}
814 
815 	return buf;
816 }
817 
818 /* allocates the memory where the IOMMU will log its events to */
819 static int __init alloc_event_buffer(struct amd_iommu *iommu)
820 {
821 	iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
822 					      EVT_BUFFER_SIZE);
823 
824 	return iommu->evt_buf ? 0 : -ENOMEM;
825 }
826 
827 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
828 {
829 	u64 entry;
830 
831 	BUG_ON(iommu->evt_buf == NULL);
832 
833 	entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
834 
835 	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
836 		    &entry, sizeof(entry));
837 
838 	/* set head and tail to zero manually */
839 	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
840 	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
841 
842 	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
843 }
844 
845 /*
846  * This function disables the event log buffer
847  */
848 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
849 {
850 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
851 }
852 
853 static void __init free_event_buffer(struct amd_iommu *iommu)
854 {
855 	free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
856 }
857 
858 /* allocates the memory where the IOMMU will log its events to */
859 static int __init alloc_ppr_log(struct amd_iommu *iommu)
860 {
861 	iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
862 					      PPR_LOG_SIZE);
863 
864 	return iommu->ppr_log ? 0 : -ENOMEM;
865 }
866 
867 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
868 {
869 	u64 entry;
870 
871 	if (iommu->ppr_log == NULL)
872 		return;
873 
874 	entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
875 
876 	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
877 		    &entry, sizeof(entry));
878 
879 	/* set head and tail to zero manually */
880 	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
881 	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
882 
883 	iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
884 	iommu_feature_enable(iommu, CONTROL_PPR_EN);
885 }
886 
887 static void __init free_ppr_log(struct amd_iommu *iommu)
888 {
889 	free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
890 }
891 
892 static void free_ga_log(struct amd_iommu *iommu)
893 {
894 #ifdef CONFIG_IRQ_REMAP
895 	free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
896 	free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
897 #endif
898 }
899 
900 #ifdef CONFIG_IRQ_REMAP
901 static int iommu_ga_log_enable(struct amd_iommu *iommu)
902 {
903 	u32 status, i;
904 	u64 entry;
905 
906 	if (!iommu->ga_log)
907 		return -EINVAL;
908 
909 	entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
910 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
911 		    &entry, sizeof(entry));
912 	entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
913 		 (BIT_ULL(52)-1)) & ~7ULL;
914 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
915 		    &entry, sizeof(entry));
916 	writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
917 	writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
918 
919 
920 	iommu_feature_enable(iommu, CONTROL_GAINT_EN);
921 	iommu_feature_enable(iommu, CONTROL_GALOG_EN);
922 
923 	for (i = 0; i < LOOP_TIMEOUT; ++i) {
924 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
925 		if (status & (MMIO_STATUS_GALOG_RUN_MASK))
926 			break;
927 		udelay(10);
928 	}
929 
930 	if (WARN_ON(i >= LOOP_TIMEOUT))
931 		return -EINVAL;
932 
933 	return 0;
934 }
935 
936 static int iommu_init_ga_log(struct amd_iommu *iommu)
937 {
938 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
939 		return 0;
940 
941 	iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
942 					get_order(GA_LOG_SIZE));
943 	if (!iommu->ga_log)
944 		goto err_out;
945 
946 	iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
947 					get_order(8));
948 	if (!iommu->ga_log_tail)
949 		goto err_out;
950 
951 	return 0;
952 err_out:
953 	free_ga_log(iommu);
954 	return -EINVAL;
955 }
956 #endif /* CONFIG_IRQ_REMAP */
957 
958 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
959 {
960 	iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
961 
962 	return iommu->cmd_sem ? 0 : -ENOMEM;
963 }
964 
965 static void __init free_cwwb_sem(struct amd_iommu *iommu)
966 {
967 	if (iommu->cmd_sem)
968 		free_page((unsigned long)iommu->cmd_sem);
969 }
970 
971 static void iommu_enable_xt(struct amd_iommu *iommu)
972 {
973 #ifdef CONFIG_IRQ_REMAP
974 	/*
975 	 * XT mode (32-bit APIC destination ID) requires
976 	 * GA mode (128-bit IRTE support) as a prerequisite.
977 	 */
978 	if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
979 	    amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
980 		iommu_feature_enable(iommu, CONTROL_XT_EN);
981 #endif /* CONFIG_IRQ_REMAP */
982 }
983 
984 static void iommu_enable_gt(struct amd_iommu *iommu)
985 {
986 	if (!iommu_feature(iommu, FEATURE_GT))
987 		return;
988 
989 	iommu_feature_enable(iommu, CONTROL_GT_EN);
990 }
991 
992 /* sets a specific bit in the device table entry. */
993 static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
994 				u16 devid, u8 bit)
995 {
996 	int i = (bit >> 6) & 0x03;
997 	int _bit = bit & 0x3f;
998 
999 	dev_table[devid].data[i] |= (1UL << _bit);
1000 }
1001 
1002 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1003 {
1004 	struct dev_table_entry *dev_table = get_dev_table(iommu);
1005 
1006 	return __set_dev_entry_bit(dev_table, devid, bit);
1007 }
1008 
1009 static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
1010 			       u16 devid, u8 bit)
1011 {
1012 	int i = (bit >> 6) & 0x03;
1013 	int _bit = bit & 0x3f;
1014 
1015 	return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
1016 }
1017 
1018 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1019 {
1020 	struct dev_table_entry *dev_table = get_dev_table(iommu);
1021 
1022 	return __get_dev_entry_bit(dev_table, devid, bit);
1023 }
1024 
1025 static bool __copy_device_table(struct amd_iommu *iommu)
1026 {
1027 	u64 int_ctl, int_tab_len, entry = 0;
1028 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1029 	struct dev_table_entry *old_devtb = NULL;
1030 	u32 lo, hi, devid, old_devtb_size;
1031 	phys_addr_t old_devtb_phys;
1032 	u16 dom_id, dte_v, irq_v;
1033 	gfp_t gfp_flag;
1034 	u64 tmp;
1035 
1036 	/* Each IOMMU use separate device table with the same size */
1037 	lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
1038 	hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
1039 	entry = (((u64) hi) << 32) + lo;
1040 
1041 	old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
1042 	if (old_devtb_size != pci_seg->dev_table_size) {
1043 		pr_err("The device table size of IOMMU:%d is not expected!\n",
1044 			iommu->index);
1045 		return false;
1046 	}
1047 
1048 	/*
1049 	 * When SME is enabled in the first kernel, the entry includes the
1050 	 * memory encryption mask(sme_me_mask), we must remove the memory
1051 	 * encryption mask to obtain the true physical address in kdump kernel.
1052 	 */
1053 	old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
1054 
1055 	if (old_devtb_phys >= 0x100000000ULL) {
1056 		pr_err("The address of old device table is above 4G, not trustworthy!\n");
1057 		return false;
1058 	}
1059 	old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
1060 		    ? (__force void *)ioremap_encrypted(old_devtb_phys,
1061 							pci_seg->dev_table_size)
1062 		    : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
1063 
1064 	if (!old_devtb)
1065 		return false;
1066 
1067 	gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
1068 	pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
1069 						    get_order(pci_seg->dev_table_size));
1070 	if (pci_seg->old_dev_tbl_cpy == NULL) {
1071 		pr_err("Failed to allocate memory for copying old device table!\n");
1072 		memunmap(old_devtb);
1073 		return false;
1074 	}
1075 
1076 	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
1077 		pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
1078 		dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
1079 		dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
1080 
1081 		if (dte_v && dom_id) {
1082 			pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1083 			pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1084 			__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
1085 			/* If gcr3 table existed, mask it out */
1086 			if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1087 				tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1088 				tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1089 				pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1090 				tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1091 				tmp |= DTE_FLAG_GV;
1092 				pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1093 			}
1094 		}
1095 
1096 		irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1097 		int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1098 		int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1099 		if (irq_v && (int_ctl || int_tab_len)) {
1100 			if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1101 			    (int_tab_len != DTE_INTTABLEN)) {
1102 				pr_err("Wrong old irq remapping flag: %#x\n", devid);
1103 				memunmap(old_devtb);
1104 				return false;
1105 			}
1106 
1107 			pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1108 		}
1109 	}
1110 	memunmap(old_devtb);
1111 
1112 	return true;
1113 }
1114 
1115 static bool copy_device_table(void)
1116 {
1117 	struct amd_iommu *iommu;
1118 	struct amd_iommu_pci_seg *pci_seg;
1119 
1120 	if (!amd_iommu_pre_enabled)
1121 		return false;
1122 
1123 	pr_warn("Translation is already enabled - trying to copy translation structures\n");
1124 
1125 	/*
1126 	 * All IOMMUs within PCI segment shares common device table.
1127 	 * Hence copy device table only once per PCI segment.
1128 	 */
1129 	for_each_pci_segment(pci_seg) {
1130 		for_each_iommu(iommu) {
1131 			if (pci_seg->id != iommu->pci_seg->id)
1132 				continue;
1133 			if (!__copy_device_table(iommu))
1134 				return false;
1135 			break;
1136 		}
1137 	}
1138 
1139 	return true;
1140 }
1141 
1142 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
1143 {
1144 	int sysmgt;
1145 
1146 	sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
1147 		 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
1148 
1149 	if (sysmgt == 0x01)
1150 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
1151 }
1152 
1153 /*
1154  * This function takes the device specific flags read from the ACPI
1155  * table and sets up the device table entry with that information
1156  */
1157 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1158 					   u16 devid, u32 flags, u32 ext_flags)
1159 {
1160 	if (flags & ACPI_DEVFLAG_INITPASS)
1161 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
1162 	if (flags & ACPI_DEVFLAG_EXTINT)
1163 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
1164 	if (flags & ACPI_DEVFLAG_NMI)
1165 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
1166 	if (flags & ACPI_DEVFLAG_SYSMGT1)
1167 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
1168 	if (flags & ACPI_DEVFLAG_SYSMGT2)
1169 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
1170 	if (flags & ACPI_DEVFLAG_LINT0)
1171 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
1172 	if (flags & ACPI_DEVFLAG_LINT1)
1173 		set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
1174 
1175 	amd_iommu_apply_erratum_63(iommu, devid);
1176 
1177 	amd_iommu_set_rlookup_table(iommu, devid);
1178 }
1179 
1180 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
1181 {
1182 	struct devid_map *entry;
1183 	struct list_head *list;
1184 
1185 	if (type == IVHD_SPECIAL_IOAPIC)
1186 		list = &ioapic_map;
1187 	else if (type == IVHD_SPECIAL_HPET)
1188 		list = &hpet_map;
1189 	else
1190 		return -EINVAL;
1191 
1192 	list_for_each_entry(entry, list, list) {
1193 		if (!(entry->id == id && entry->cmd_line))
1194 			continue;
1195 
1196 		pr_info("Command-line override present for %s id %d - ignoring\n",
1197 			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1198 
1199 		*devid = entry->devid;
1200 
1201 		return 0;
1202 	}
1203 
1204 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1205 	if (!entry)
1206 		return -ENOMEM;
1207 
1208 	entry->id	= id;
1209 	entry->devid	= *devid;
1210 	entry->cmd_line	= cmd_line;
1211 
1212 	list_add_tail(&entry->list, list);
1213 
1214 	return 0;
1215 }
1216 
1217 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
1218 				      bool cmd_line)
1219 {
1220 	struct acpihid_map_entry *entry;
1221 	struct list_head *list = &acpihid_map;
1222 
1223 	list_for_each_entry(entry, list, list) {
1224 		if (strcmp(entry->hid, hid) ||
1225 		    (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1226 		    !entry->cmd_line)
1227 			continue;
1228 
1229 		pr_info("Command-line override for hid:%s uid:%s\n",
1230 			hid, uid);
1231 		*devid = entry->devid;
1232 		return 0;
1233 	}
1234 
1235 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1236 	if (!entry)
1237 		return -ENOMEM;
1238 
1239 	memcpy(entry->uid, uid, strlen(uid));
1240 	memcpy(entry->hid, hid, strlen(hid));
1241 	entry->devid = *devid;
1242 	entry->cmd_line	= cmd_line;
1243 	entry->root_devid = (entry->devid & (~0x7));
1244 
1245 	pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1246 		entry->cmd_line ? "cmd" : "ivrs",
1247 		entry->hid, entry->uid, entry->root_devid);
1248 
1249 	list_add_tail(&entry->list, list);
1250 	return 0;
1251 }
1252 
1253 static int __init add_early_maps(void)
1254 {
1255 	int i, ret;
1256 
1257 	for (i = 0; i < early_ioapic_map_size; ++i) {
1258 		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1259 					 early_ioapic_map[i].id,
1260 					 &early_ioapic_map[i].devid,
1261 					 early_ioapic_map[i].cmd_line);
1262 		if (ret)
1263 			return ret;
1264 	}
1265 
1266 	for (i = 0; i < early_hpet_map_size; ++i) {
1267 		ret = add_special_device(IVHD_SPECIAL_HPET,
1268 					 early_hpet_map[i].id,
1269 					 &early_hpet_map[i].devid,
1270 					 early_hpet_map[i].cmd_line);
1271 		if (ret)
1272 			return ret;
1273 	}
1274 
1275 	for (i = 0; i < early_acpihid_map_size; ++i) {
1276 		ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1277 					  early_acpihid_map[i].uid,
1278 					  &early_acpihid_map[i].devid,
1279 					  early_acpihid_map[i].cmd_line);
1280 		if (ret)
1281 			return ret;
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 /*
1288  * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1289  * initializes the hardware and our data structures with it.
1290  */
1291 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1292 					struct ivhd_header *h)
1293 {
1294 	u8 *p = (u8 *)h;
1295 	u8 *end = p, flags = 0;
1296 	u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
1297 	u32 dev_i, ext_flags = 0;
1298 	bool alias = false;
1299 	struct ivhd_entry *e;
1300 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1301 	u32 ivhd_size;
1302 	int ret;
1303 
1304 
1305 	ret = add_early_maps();
1306 	if (ret)
1307 		return ret;
1308 
1309 	amd_iommu_apply_ivrs_quirks();
1310 
1311 	/*
1312 	 * First save the recommended feature enable bits from ACPI
1313 	 */
1314 	iommu->acpi_flags = h->flags;
1315 
1316 	/*
1317 	 * Done. Now parse the device entries
1318 	 */
1319 	ivhd_size = get_ivhd_header_size(h);
1320 	if (!ivhd_size) {
1321 		pr_err("Unsupported IVHD type %#x\n", h->type);
1322 		return -EINVAL;
1323 	}
1324 
1325 	p += ivhd_size;
1326 
1327 	end += h->length;
1328 
1329 
1330 	while (p < end) {
1331 		e = (struct ivhd_entry *)p;
1332 		seg_id = pci_seg->id;
1333 
1334 		switch (e->type) {
1335 		case IVHD_DEV_ALL:
1336 
1337 			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
1338 
1339 			for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
1340 				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1341 			break;
1342 		case IVHD_DEV_SELECT:
1343 
1344 			DUMP_printk("  DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
1345 				    "flags: %02x\n",
1346 				    seg_id, PCI_BUS_NUM(e->devid),
1347 				    PCI_SLOT(e->devid),
1348 				    PCI_FUNC(e->devid),
1349 				    e->flags);
1350 
1351 			devid = e->devid;
1352 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1353 			break;
1354 		case IVHD_DEV_SELECT_RANGE_START:
1355 
1356 			DUMP_printk("  DEV_SELECT_RANGE_START\t "
1357 				    "devid: %04x:%02x:%02x.%x flags: %02x\n",
1358 				    seg_id, PCI_BUS_NUM(e->devid),
1359 				    PCI_SLOT(e->devid),
1360 				    PCI_FUNC(e->devid),
1361 				    e->flags);
1362 
1363 			devid_start = e->devid;
1364 			flags = e->flags;
1365 			ext_flags = 0;
1366 			alias = false;
1367 			break;
1368 		case IVHD_DEV_ALIAS:
1369 
1370 			DUMP_printk("  DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
1371 				    "flags: %02x devid_to: %02x:%02x.%x\n",
1372 				    seg_id, PCI_BUS_NUM(e->devid),
1373 				    PCI_SLOT(e->devid),
1374 				    PCI_FUNC(e->devid),
1375 				    e->flags,
1376 				    PCI_BUS_NUM(e->ext >> 8),
1377 				    PCI_SLOT(e->ext >> 8),
1378 				    PCI_FUNC(e->ext >> 8));
1379 
1380 			devid = e->devid;
1381 			devid_to = e->ext >> 8;
1382 			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
1383 			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1384 			pci_seg->alias_table[devid] = devid_to;
1385 			break;
1386 		case IVHD_DEV_ALIAS_RANGE:
1387 
1388 			DUMP_printk("  DEV_ALIAS_RANGE\t\t "
1389 				    "devid: %04x:%02x:%02x.%x flags: %02x "
1390 				    "devid_to: %04x:%02x:%02x.%x\n",
1391 				    seg_id, PCI_BUS_NUM(e->devid),
1392 				    PCI_SLOT(e->devid),
1393 				    PCI_FUNC(e->devid),
1394 				    e->flags,
1395 				    seg_id, PCI_BUS_NUM(e->ext >> 8),
1396 				    PCI_SLOT(e->ext >> 8),
1397 				    PCI_FUNC(e->ext >> 8));
1398 
1399 			devid_start = e->devid;
1400 			flags = e->flags;
1401 			devid_to = e->ext >> 8;
1402 			ext_flags = 0;
1403 			alias = true;
1404 			break;
1405 		case IVHD_DEV_EXT_SELECT:
1406 
1407 			DUMP_printk("  DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
1408 				    "flags: %02x ext: %08x\n",
1409 				    seg_id, PCI_BUS_NUM(e->devid),
1410 				    PCI_SLOT(e->devid),
1411 				    PCI_FUNC(e->devid),
1412 				    e->flags, e->ext);
1413 
1414 			devid = e->devid;
1415 			set_dev_entry_from_acpi(iommu, devid, e->flags,
1416 						e->ext);
1417 			break;
1418 		case IVHD_DEV_EXT_SELECT_RANGE:
1419 
1420 			DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
1421 				    "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
1422 				    seg_id, PCI_BUS_NUM(e->devid),
1423 				    PCI_SLOT(e->devid),
1424 				    PCI_FUNC(e->devid),
1425 				    e->flags, e->ext);
1426 
1427 			devid_start = e->devid;
1428 			flags = e->flags;
1429 			ext_flags = e->ext;
1430 			alias = false;
1431 			break;
1432 		case IVHD_DEV_RANGE_END:
1433 
1434 			DUMP_printk("  DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
1435 				    seg_id, PCI_BUS_NUM(e->devid),
1436 				    PCI_SLOT(e->devid),
1437 				    PCI_FUNC(e->devid));
1438 
1439 			devid = e->devid;
1440 			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1441 				if (alias) {
1442 					pci_seg->alias_table[dev_i] = devid_to;
1443 					set_dev_entry_from_acpi(iommu,
1444 						devid_to, flags, ext_flags);
1445 				}
1446 				set_dev_entry_from_acpi(iommu, dev_i,
1447 							flags, ext_flags);
1448 			}
1449 			break;
1450 		case IVHD_DEV_SPECIAL: {
1451 			u8 handle, type;
1452 			const char *var;
1453 			u32 devid;
1454 			int ret;
1455 
1456 			handle = e->ext & 0xff;
1457 			devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
1458 			type   = (e->ext >> 24) & 0xff;
1459 
1460 			if (type == IVHD_SPECIAL_IOAPIC)
1461 				var = "IOAPIC";
1462 			else if (type == IVHD_SPECIAL_HPET)
1463 				var = "HPET";
1464 			else
1465 				var = "UNKNOWN";
1466 
1467 			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
1468 				    var, (int)handle,
1469 				    seg_id, PCI_BUS_NUM(devid),
1470 				    PCI_SLOT(devid),
1471 				    PCI_FUNC(devid));
1472 
1473 			ret = add_special_device(type, handle, &devid, false);
1474 			if (ret)
1475 				return ret;
1476 
1477 			/*
1478 			 * add_special_device might update the devid in case a
1479 			 * command-line override is present. So call
1480 			 * set_dev_entry_from_acpi after add_special_device.
1481 			 */
1482 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1483 
1484 			break;
1485 		}
1486 		case IVHD_DEV_ACPI_HID: {
1487 			u32 devid;
1488 			u8 hid[ACPIHID_HID_LEN];
1489 			u8 uid[ACPIHID_UID_LEN];
1490 			int ret;
1491 
1492 			if (h->type != 0x40) {
1493 				pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1494 				       e->type);
1495 				break;
1496 			}
1497 
1498 			BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1499 			memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
1500 			hid[ACPIHID_HID_LEN - 1] = '\0';
1501 
1502 			if (!(*hid)) {
1503 				pr_err(FW_BUG "Invalid HID.\n");
1504 				break;
1505 			}
1506 
1507 			uid[0] = '\0';
1508 			switch (e->uidf) {
1509 			case UID_NOT_PRESENT:
1510 
1511 				if (e->uidl != 0)
1512 					pr_warn(FW_BUG "Invalid UID length.\n");
1513 
1514 				break;
1515 			case UID_IS_INTEGER:
1516 
1517 				sprintf(uid, "%d", e->uid);
1518 
1519 				break;
1520 			case UID_IS_CHARACTER:
1521 
1522 				memcpy(uid, &e->uid, e->uidl);
1523 				uid[e->uidl] = '\0';
1524 
1525 				break;
1526 			default:
1527 				break;
1528 			}
1529 
1530 			devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
1531 			DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
1532 				    hid, uid, seg_id,
1533 				    PCI_BUS_NUM(devid),
1534 				    PCI_SLOT(devid),
1535 				    PCI_FUNC(devid));
1536 
1537 			flags = e->flags;
1538 
1539 			ret = add_acpi_hid_device(hid, uid, &devid, false);
1540 			if (ret)
1541 				return ret;
1542 
1543 			/*
1544 			 * add_special_device might update the devid in case a
1545 			 * command-line override is present. So call
1546 			 * set_dev_entry_from_acpi after add_special_device.
1547 			 */
1548 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1549 
1550 			break;
1551 		}
1552 		default:
1553 			break;
1554 		}
1555 
1556 		p += ivhd_entry_length(p);
1557 	}
1558 
1559 	return 0;
1560 }
1561 
1562 /* Allocate PCI segment data structure */
1563 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
1564 					  struct acpi_table_header *ivrs_base)
1565 {
1566 	struct amd_iommu_pci_seg *pci_seg;
1567 	int last_bdf;
1568 
1569 	/*
1570 	 * First parse ACPI tables to find the largest Bus/Dev/Func we need to
1571 	 * handle in this PCI segment. Upon this information the shared data
1572 	 * structures for the PCI segments in the system will be allocated.
1573 	 */
1574 	last_bdf = find_last_devid_acpi(ivrs_base, id);
1575 	if (last_bdf < 0)
1576 		return NULL;
1577 
1578 	pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
1579 	if (pci_seg == NULL)
1580 		return NULL;
1581 
1582 	pci_seg->last_bdf = last_bdf;
1583 	DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
1584 	pci_seg->dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
1585 	pci_seg->alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
1586 	pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
1587 
1588 	pci_seg->id = id;
1589 	init_llist_head(&pci_seg->dev_data_list);
1590 	INIT_LIST_HEAD(&pci_seg->unity_map);
1591 	list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
1592 
1593 	if (alloc_dev_table(pci_seg))
1594 		return NULL;
1595 	if (alloc_alias_table(pci_seg))
1596 		return NULL;
1597 	if (alloc_rlookup_table(pci_seg))
1598 		return NULL;
1599 
1600 	return pci_seg;
1601 }
1602 
1603 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
1604 					struct acpi_table_header *ivrs_base)
1605 {
1606 	struct amd_iommu_pci_seg *pci_seg;
1607 
1608 	for_each_pci_segment(pci_seg) {
1609 		if (pci_seg->id == id)
1610 			return pci_seg;
1611 	}
1612 
1613 	return alloc_pci_segment(id, ivrs_base);
1614 }
1615 
1616 static void __init free_pci_segments(void)
1617 {
1618 	struct amd_iommu_pci_seg *pci_seg, *next;
1619 
1620 	for_each_pci_segment_safe(pci_seg, next) {
1621 		list_del(&pci_seg->list);
1622 		free_irq_lookup_table(pci_seg);
1623 		free_rlookup_table(pci_seg);
1624 		free_alias_table(pci_seg);
1625 		free_dev_table(pci_seg);
1626 		kfree(pci_seg);
1627 	}
1628 }
1629 
1630 static void __init free_iommu_one(struct amd_iommu *iommu)
1631 {
1632 	free_cwwb_sem(iommu);
1633 	free_command_buffer(iommu);
1634 	free_event_buffer(iommu);
1635 	free_ppr_log(iommu);
1636 	free_ga_log(iommu);
1637 	iommu_unmap_mmio_space(iommu);
1638 }
1639 
1640 static void __init free_iommu_all(void)
1641 {
1642 	struct amd_iommu *iommu, *next;
1643 
1644 	for_each_iommu_safe(iommu, next) {
1645 		list_del(&iommu->list);
1646 		free_iommu_one(iommu);
1647 		kfree(iommu);
1648 	}
1649 }
1650 
1651 /*
1652  * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1653  * Workaround:
1654  *     BIOS should disable L2B micellaneous clock gating by setting
1655  *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1656  */
1657 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1658 {
1659 	u32 value;
1660 
1661 	if ((boot_cpu_data.x86 != 0x15) ||
1662 	    (boot_cpu_data.x86_model < 0x10) ||
1663 	    (boot_cpu_data.x86_model > 0x1f))
1664 		return;
1665 
1666 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1667 	pci_read_config_dword(iommu->dev, 0xf4, &value);
1668 
1669 	if (value & BIT(2))
1670 		return;
1671 
1672 	/* Select NB indirect register 0x90 and enable writing */
1673 	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1674 
1675 	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1676 	pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1677 
1678 	/* Clear the enable writing bit */
1679 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1680 }
1681 
1682 /*
1683  * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1684  * Workaround:
1685  *     BIOS should enable ATS write permission check by setting
1686  *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1687  */
1688 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1689 {
1690 	u32 value;
1691 
1692 	if ((boot_cpu_data.x86 != 0x15) ||
1693 	    (boot_cpu_data.x86_model < 0x30) ||
1694 	    (boot_cpu_data.x86_model > 0x3f))
1695 		return;
1696 
1697 	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1698 	value = iommu_read_l2(iommu, 0x47);
1699 
1700 	if (value & BIT(0))
1701 		return;
1702 
1703 	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1704 	iommu_write_l2(iommu, 0x47, value | BIT(0));
1705 
1706 	pci_info(iommu->dev, "Applying ATS write check workaround\n");
1707 }
1708 
1709 /*
1710  * This function glues the initialization function for one IOMMU
1711  * together and also allocates the command buffer and programs the
1712  * hardware. It does NOT enable the IOMMU. This is done afterwards.
1713  */
1714 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
1715 				 struct acpi_table_header *ivrs_base)
1716 {
1717 	struct amd_iommu_pci_seg *pci_seg;
1718 
1719 	pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
1720 	if (pci_seg == NULL)
1721 		return -ENOMEM;
1722 	iommu->pci_seg = pci_seg;
1723 
1724 	raw_spin_lock_init(&iommu->lock);
1725 	iommu->cmd_sem_val = 0;
1726 
1727 	/* Add IOMMU to internal data structures */
1728 	list_add_tail(&iommu->list, &amd_iommu_list);
1729 	iommu->index = amd_iommus_present++;
1730 
1731 	if (unlikely(iommu->index >= MAX_IOMMUS)) {
1732 		WARN(1, "System has more IOMMUs than supported by this driver\n");
1733 		return -ENOSYS;
1734 	}
1735 
1736 	/* Index is fine - add IOMMU to the array */
1737 	amd_iommus[iommu->index] = iommu;
1738 
1739 	/*
1740 	 * Copy data from ACPI table entry to the iommu struct
1741 	 */
1742 	iommu->devid   = h->devid;
1743 	iommu->cap_ptr = h->cap_ptr;
1744 	iommu->mmio_phys = h->mmio_phys;
1745 
1746 	switch (h->type) {
1747 	case 0x10:
1748 		/* Check if IVHD EFR contains proper max banks/counters */
1749 		if ((h->efr_attr != 0) &&
1750 		    ((h->efr_attr & (0xF << 13)) != 0) &&
1751 		    ((h->efr_attr & (0x3F << 17)) != 0))
1752 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1753 		else
1754 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1755 
1756 		/*
1757 		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1758 		 * GAM also requires GA mode. Therefore, we need to
1759 		 * check cmpxchg16b support before enabling it.
1760 		 */
1761 		if (!boot_cpu_has(X86_FEATURE_CX16) ||
1762 		    ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1763 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1764 		break;
1765 	case 0x11:
1766 	case 0x40:
1767 		if (h->efr_reg & (1 << 9))
1768 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1769 		else
1770 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1771 
1772 		/*
1773 		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1774 		 * XT, GAM also requires GA mode. Therefore, we need to
1775 		 * check cmpxchg16b support before enabling them.
1776 		 */
1777 		if (!boot_cpu_has(X86_FEATURE_CX16) ||
1778 		    ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1779 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1780 			break;
1781 		}
1782 
1783 		if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1784 			amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1785 
1786 		early_iommu_features_init(iommu, h);
1787 
1788 		break;
1789 	default:
1790 		return -EINVAL;
1791 	}
1792 
1793 	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1794 						iommu->mmio_phys_end);
1795 	if (!iommu->mmio_base)
1796 		return -ENOMEM;
1797 
1798 	return init_iommu_from_acpi(iommu, h);
1799 }
1800 
1801 static int __init init_iommu_one_late(struct amd_iommu *iommu)
1802 {
1803 	int ret;
1804 
1805 	if (alloc_cwwb_sem(iommu))
1806 		return -ENOMEM;
1807 
1808 	if (alloc_command_buffer(iommu))
1809 		return -ENOMEM;
1810 
1811 	if (alloc_event_buffer(iommu))
1812 		return -ENOMEM;
1813 
1814 	iommu->int_enabled = false;
1815 
1816 	init_translation_status(iommu);
1817 	if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1818 		iommu_disable(iommu);
1819 		clear_translation_pre_enabled(iommu);
1820 		pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1821 			iommu->index);
1822 	}
1823 	if (amd_iommu_pre_enabled)
1824 		amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1825 
1826 	if (amd_iommu_irq_remap) {
1827 		ret = amd_iommu_create_irq_domain(iommu);
1828 		if (ret)
1829 			return ret;
1830 	}
1831 
1832 	/*
1833 	 * Make sure IOMMU is not considered to translate itself. The IVRS
1834 	 * table tells us so, but this is a lie!
1835 	 */
1836 	iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
1837 
1838 	return 0;
1839 }
1840 
1841 /**
1842  * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1843  * @ivrs: Pointer to the IVRS header
1844  *
1845  * This function search through all IVDB of the maximum supported IVHD
1846  */
1847 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1848 {
1849 	u8 *base = (u8 *)ivrs;
1850 	struct ivhd_header *ivhd = (struct ivhd_header *)
1851 					(base + IVRS_HEADER_LENGTH);
1852 	u8 last_type = ivhd->type;
1853 	u16 devid = ivhd->devid;
1854 
1855 	while (((u8 *)ivhd - base < ivrs->length) &&
1856 	       (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1857 		u8 *p = (u8 *) ivhd;
1858 
1859 		if (ivhd->devid == devid)
1860 			last_type = ivhd->type;
1861 		ivhd = (struct ivhd_header *)(p + ivhd->length);
1862 	}
1863 
1864 	return last_type;
1865 }
1866 
1867 /*
1868  * Iterates over all IOMMU entries in the ACPI table, allocates the
1869  * IOMMU structure and initializes it with init_iommu_one()
1870  */
1871 static int __init init_iommu_all(struct acpi_table_header *table)
1872 {
1873 	u8 *p = (u8 *)table, *end = (u8 *)table;
1874 	struct ivhd_header *h;
1875 	struct amd_iommu *iommu;
1876 	int ret;
1877 
1878 	end += table->length;
1879 	p += IVRS_HEADER_LENGTH;
1880 
1881 	/* Phase 1: Process all IVHD blocks */
1882 	while (p < end) {
1883 		h = (struct ivhd_header *)p;
1884 		if (*p == amd_iommu_target_ivhd_type) {
1885 
1886 			DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
1887 				    "flags: %01x info %04x\n",
1888 				    h->pci_seg, PCI_BUS_NUM(h->devid),
1889 				    PCI_SLOT(h->devid), PCI_FUNC(h->devid),
1890 				    h->cap_ptr, h->flags, h->info);
1891 			DUMP_printk("       mmio-addr: %016llx\n",
1892 				    h->mmio_phys);
1893 
1894 			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1895 			if (iommu == NULL)
1896 				return -ENOMEM;
1897 
1898 			ret = init_iommu_one(iommu, h, table);
1899 			if (ret)
1900 				return ret;
1901 		}
1902 		p += h->length;
1903 
1904 	}
1905 	WARN_ON(p != end);
1906 
1907 	/* Phase 2 : Early feature support check */
1908 	get_global_efr();
1909 
1910 	/* Phase 3 : Enabling IOMMU features */
1911 	for_each_iommu(iommu) {
1912 		ret = init_iommu_one_late(iommu);
1913 		if (ret)
1914 			return ret;
1915 	}
1916 
1917 	return 0;
1918 }
1919 
1920 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1921 {
1922 	u64 val;
1923 	struct pci_dev *pdev = iommu->dev;
1924 
1925 	if (!iommu_feature(iommu, FEATURE_PC))
1926 		return;
1927 
1928 	amd_iommu_pc_present = true;
1929 
1930 	pci_info(pdev, "IOMMU performance counters supported\n");
1931 
1932 	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1933 	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1934 	iommu->max_counters = (u8) ((val >> 7) & 0xf);
1935 
1936 	return;
1937 }
1938 
1939 static ssize_t amd_iommu_show_cap(struct device *dev,
1940 				  struct device_attribute *attr,
1941 				  char *buf)
1942 {
1943 	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1944 	return sprintf(buf, "%x\n", iommu->cap);
1945 }
1946 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1947 
1948 static ssize_t amd_iommu_show_features(struct device *dev,
1949 				       struct device_attribute *attr,
1950 				       char *buf)
1951 {
1952 	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1953 	return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features);
1954 }
1955 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1956 
1957 static struct attribute *amd_iommu_attrs[] = {
1958 	&dev_attr_cap.attr,
1959 	&dev_attr_features.attr,
1960 	NULL,
1961 };
1962 
1963 static struct attribute_group amd_iommu_group = {
1964 	.name = "amd-iommu",
1965 	.attrs = amd_iommu_attrs,
1966 };
1967 
1968 static const struct attribute_group *amd_iommu_groups[] = {
1969 	&amd_iommu_group,
1970 	NULL,
1971 };
1972 
1973 /*
1974  * Note: IVHD 0x11 and 0x40 also contains exact copy
1975  * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1976  * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
1977  */
1978 static void __init late_iommu_features_init(struct amd_iommu *iommu)
1979 {
1980 	u64 features, features2;
1981 
1982 	if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1983 		return;
1984 
1985 	/* read extended feature bits */
1986 	features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1987 	features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
1988 
1989 	if (!iommu->features) {
1990 		iommu->features = features;
1991 		iommu->features2 = features2;
1992 		return;
1993 	}
1994 
1995 	/*
1996 	 * Sanity check and warn if EFR values from
1997 	 * IVHD and MMIO conflict.
1998 	 */
1999 	if (features != iommu->features ||
2000 	    features2 != iommu->features2) {
2001 		pr_warn(FW_WARN
2002 			"EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
2003 			features, iommu->features,
2004 			features2, iommu->features2);
2005 	}
2006 }
2007 
2008 static int __init iommu_init_pci(struct amd_iommu *iommu)
2009 {
2010 	int cap_ptr = iommu->cap_ptr;
2011 	int ret;
2012 
2013 	iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2014 						 PCI_BUS_NUM(iommu->devid),
2015 						 iommu->devid & 0xff);
2016 	if (!iommu->dev)
2017 		return -ENODEV;
2018 
2019 	/* Prevent binding other PCI device drivers to IOMMU devices */
2020 	iommu->dev->match_driver = false;
2021 
2022 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
2023 			      &iommu->cap);
2024 
2025 	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
2026 		amd_iommu_iotlb_sup = false;
2027 
2028 	late_iommu_features_init(iommu);
2029 
2030 	if (iommu_feature(iommu, FEATURE_GT)) {
2031 		int glxval;
2032 		u32 max_pasid;
2033 		u64 pasmax;
2034 
2035 		pasmax = iommu->features & FEATURE_PASID_MASK;
2036 		pasmax >>= FEATURE_PASID_SHIFT;
2037 		max_pasid  = (1 << (pasmax + 1)) - 1;
2038 
2039 		amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
2040 
2041 		BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
2042 
2043 		glxval   = iommu->features & FEATURE_GLXVAL_MASK;
2044 		glxval >>= FEATURE_GLXVAL_SHIFT;
2045 
2046 		if (amd_iommu_max_glx_val == -1)
2047 			amd_iommu_max_glx_val = glxval;
2048 		else
2049 			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
2050 	}
2051 
2052 	if (iommu_feature(iommu, FEATURE_GT) &&
2053 	    iommu_feature(iommu, FEATURE_PPR)) {
2054 		iommu->is_iommu_v2   = true;
2055 		amd_iommu_v2_present = true;
2056 	}
2057 
2058 	if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
2059 		return -ENOMEM;
2060 
2061 	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
2062 		pr_info("Using strict mode due to virtualization\n");
2063 		iommu_set_dma_strict();
2064 		amd_iommu_np_cache = true;
2065 	}
2066 
2067 	init_iommu_perf_ctr(iommu);
2068 
2069 	if (amd_iommu_pgtable == AMD_IOMMU_V2) {
2070 		if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
2071 		    !iommu_feature(iommu, FEATURE_GT)) {
2072 			pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
2073 			amd_iommu_pgtable = AMD_IOMMU_V1;
2074 		} else if (iommu_default_passthrough()) {
2075 			pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
2076 			amd_iommu_pgtable = AMD_IOMMU_V1;
2077 		}
2078 	}
2079 
2080 	if (is_rd890_iommu(iommu->dev)) {
2081 		int i, j;
2082 
2083 		iommu->root_pdev =
2084 			pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2085 						    iommu->dev->bus->number,
2086 						    PCI_DEVFN(0, 0));
2087 
2088 		/*
2089 		 * Some rd890 systems may not be fully reconfigured by the
2090 		 * BIOS, so it's necessary for us to store this information so
2091 		 * it can be reprogrammed on resume
2092 		 */
2093 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
2094 				&iommu->stored_addr_lo);
2095 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
2096 				&iommu->stored_addr_hi);
2097 
2098 		/* Low bit locks writes to configuration space */
2099 		iommu->stored_addr_lo &= ~1;
2100 
2101 		for (i = 0; i < 6; i++)
2102 			for (j = 0; j < 0x12; j++)
2103 				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
2104 
2105 		for (i = 0; i < 0x83; i++)
2106 			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
2107 	}
2108 
2109 	amd_iommu_erratum_746_workaround(iommu);
2110 	amd_iommu_ats_write_check_workaround(iommu);
2111 
2112 	ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
2113 			       amd_iommu_groups, "ivhd%d", iommu->index);
2114 	if (ret)
2115 		return ret;
2116 
2117 	iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
2118 
2119 	return pci_enable_device(iommu->dev);
2120 }
2121 
2122 static void print_iommu_info(void)
2123 {
2124 	static const char * const feat_str[] = {
2125 		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
2126 		"IA", "GA", "HE", "PC"
2127 	};
2128 	struct amd_iommu *iommu;
2129 
2130 	for_each_iommu(iommu) {
2131 		struct pci_dev *pdev = iommu->dev;
2132 		int i;
2133 
2134 		pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
2135 
2136 		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
2137 			pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
2138 
2139 			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
2140 				if (iommu_feature(iommu, (1ULL << i)))
2141 					pr_cont(" %s", feat_str[i]);
2142 			}
2143 
2144 			if (iommu->features & FEATURE_GAM_VAPIC)
2145 				pr_cont(" GA_vAPIC");
2146 
2147 			if (iommu->features & FEATURE_SNP)
2148 				pr_cont(" SNP");
2149 
2150 			pr_cont("\n");
2151 		}
2152 	}
2153 	if (irq_remapping_enabled) {
2154 		pr_info("Interrupt remapping enabled\n");
2155 		if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2156 			pr_info("X2APIC enabled\n");
2157 	}
2158 	if (amd_iommu_pgtable == AMD_IOMMU_V2)
2159 		pr_info("V2 page table enabled\n");
2160 }
2161 
2162 static int __init amd_iommu_init_pci(void)
2163 {
2164 	struct amd_iommu *iommu;
2165 	struct amd_iommu_pci_seg *pci_seg;
2166 	int ret;
2167 
2168 	for_each_iommu(iommu) {
2169 		ret = iommu_init_pci(iommu);
2170 		if (ret) {
2171 			pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
2172 			       iommu->index, ret);
2173 			goto out;
2174 		}
2175 		/* Need to setup range after PCI init */
2176 		iommu_set_cwwb_range(iommu);
2177 	}
2178 
2179 	/*
2180 	 * Order is important here to make sure any unity map requirements are
2181 	 * fulfilled. The unity mappings are created and written to the device
2182 	 * table during the amd_iommu_init_api() call.
2183 	 *
2184 	 * After that we call init_device_table_dma() to make sure any
2185 	 * uninitialized DTE will block DMA, and in the end we flush the caches
2186 	 * of all IOMMUs to make sure the changes to the device table are
2187 	 * active.
2188 	 */
2189 	ret = amd_iommu_init_api();
2190 	if (ret) {
2191 		pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
2192 		       ret);
2193 		goto out;
2194 	}
2195 
2196 	for_each_pci_segment(pci_seg)
2197 		init_device_table_dma(pci_seg);
2198 
2199 	for_each_iommu(iommu)
2200 		iommu_flush_all_caches(iommu);
2201 
2202 	print_iommu_info();
2203 
2204 out:
2205 	return ret;
2206 }
2207 
2208 /****************************************************************************
2209  *
2210  * The following functions initialize the MSI interrupts for all IOMMUs
2211  * in the system. It's a bit challenging because there could be multiple
2212  * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2213  * pci_dev.
2214  *
2215  ****************************************************************************/
2216 
2217 static int iommu_setup_msi(struct amd_iommu *iommu)
2218 {
2219 	int r;
2220 
2221 	r = pci_enable_msi(iommu->dev);
2222 	if (r)
2223 		return r;
2224 
2225 	r = request_threaded_irq(iommu->dev->irq,
2226 				 amd_iommu_int_handler,
2227 				 amd_iommu_int_thread,
2228 				 0, "AMD-Vi",
2229 				 iommu);
2230 
2231 	if (r) {
2232 		pci_disable_msi(iommu->dev);
2233 		return r;
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 union intcapxt {
2240 	u64	capxt;
2241 	struct {
2242 		u64	reserved_0		:  2,
2243 			dest_mode_logical	:  1,
2244 			reserved_1		:  5,
2245 			destid_0_23		: 24,
2246 			vector			:  8,
2247 			reserved_2		: 16,
2248 			destid_24_31		:  8;
2249 	};
2250 } __attribute__ ((packed));
2251 
2252 
2253 static struct irq_chip intcapxt_controller;
2254 
2255 static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2256 				       struct irq_data *irqd, bool reserve)
2257 {
2258 	return 0;
2259 }
2260 
2261 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2262 					  struct irq_data *irqd)
2263 {
2264 }
2265 
2266 
2267 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2268 				    unsigned int nr_irqs, void *arg)
2269 {
2270 	struct irq_alloc_info *info = arg;
2271 	int i, ret;
2272 
2273 	if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2274 		return -EINVAL;
2275 
2276 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2277 	if (ret < 0)
2278 		return ret;
2279 
2280 	for (i = virq; i < virq + nr_irqs; i++) {
2281 		struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2282 
2283 		irqd->chip = &intcapxt_controller;
2284 		irqd->chip_data = info->data;
2285 		__irq_set_handler(i, handle_edge_irq, 0, "edge");
2286 	}
2287 
2288 	return ret;
2289 }
2290 
2291 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2292 				    unsigned int nr_irqs)
2293 {
2294 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
2295 }
2296 
2297 
2298 static void intcapxt_unmask_irq(struct irq_data *irqd)
2299 {
2300 	struct amd_iommu *iommu = irqd->chip_data;
2301 	struct irq_cfg *cfg = irqd_cfg(irqd);
2302 	union intcapxt xt;
2303 
2304 	xt.capxt = 0ULL;
2305 	xt.dest_mode_logical = apic->dest_mode_logical;
2306 	xt.vector = cfg->vector;
2307 	xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2308 	xt.destid_24_31 = cfg->dest_apicid >> 24;
2309 
2310 	/**
2311 	 * Current IOMMU implementation uses the same IRQ for all
2312 	 * 3 IOMMU interrupts.
2313 	 */
2314 	writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2315 	writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2316 	writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2317 }
2318 
2319 static void intcapxt_mask_irq(struct irq_data *irqd)
2320 {
2321 	struct amd_iommu *iommu = irqd->chip_data;
2322 
2323 	writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2324 	writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2325 	writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2326 }
2327 
2328 
2329 static int intcapxt_set_affinity(struct irq_data *irqd,
2330 				 const struct cpumask *mask, bool force)
2331 {
2332 	struct irq_data *parent = irqd->parent_data;
2333 	int ret;
2334 
2335 	ret = parent->chip->irq_set_affinity(parent, mask, force);
2336 	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2337 		return ret;
2338 	return 0;
2339 }
2340 
2341 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2342 {
2343 	return on ? -EOPNOTSUPP : 0;
2344 }
2345 
2346 static struct irq_chip intcapxt_controller = {
2347 	.name			= "IOMMU-MSI",
2348 	.irq_unmask		= intcapxt_unmask_irq,
2349 	.irq_mask		= intcapxt_mask_irq,
2350 	.irq_ack		= irq_chip_ack_parent,
2351 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
2352 	.irq_set_affinity       = intcapxt_set_affinity,
2353 	.irq_set_wake		= intcapxt_set_wake,
2354 	.flags			= IRQCHIP_MASK_ON_SUSPEND,
2355 };
2356 
2357 static const struct irq_domain_ops intcapxt_domain_ops = {
2358 	.alloc			= intcapxt_irqdomain_alloc,
2359 	.free			= intcapxt_irqdomain_free,
2360 	.activate		= intcapxt_irqdomain_activate,
2361 	.deactivate		= intcapxt_irqdomain_deactivate,
2362 };
2363 
2364 
2365 static struct irq_domain *iommu_irqdomain;
2366 
2367 static struct irq_domain *iommu_get_irqdomain(void)
2368 {
2369 	struct fwnode_handle *fn;
2370 
2371 	/* No need for locking here (yet) as the init is single-threaded */
2372 	if (iommu_irqdomain)
2373 		return iommu_irqdomain;
2374 
2375 	fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2376 	if (!fn)
2377 		return NULL;
2378 
2379 	iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2380 						      fn, &intcapxt_domain_ops,
2381 						      NULL);
2382 	if (!iommu_irqdomain)
2383 		irq_domain_free_fwnode(fn);
2384 
2385 	return iommu_irqdomain;
2386 }
2387 
2388 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2389 {
2390 	struct irq_domain *domain;
2391 	struct irq_alloc_info info;
2392 	int irq, ret;
2393 
2394 	domain = iommu_get_irqdomain();
2395 	if (!domain)
2396 		return -ENXIO;
2397 
2398 	init_irq_alloc_info(&info, NULL);
2399 	info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2400 	info.data = iommu;
2401 
2402 	irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
2403 	if (irq < 0) {
2404 		irq_domain_remove(domain);
2405 		return irq;
2406 	}
2407 
2408 	ret = request_threaded_irq(irq, amd_iommu_int_handler,
2409 				   amd_iommu_int_thread, 0, "AMD-Vi", iommu);
2410 	if (ret) {
2411 		irq_domain_free_irqs(irq, 1);
2412 		irq_domain_remove(domain);
2413 		return ret;
2414 	}
2415 
2416 	return 0;
2417 }
2418 
2419 static int iommu_init_irq(struct amd_iommu *iommu)
2420 {
2421 	int ret;
2422 
2423 	if (iommu->int_enabled)
2424 		goto enable_faults;
2425 
2426 	if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2427 		ret = iommu_setup_intcapxt(iommu);
2428 	else if (iommu->dev->msi_cap)
2429 		ret = iommu_setup_msi(iommu);
2430 	else
2431 		ret = -ENODEV;
2432 
2433 	if (ret)
2434 		return ret;
2435 
2436 	iommu->int_enabled = true;
2437 enable_faults:
2438 
2439 	if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2440 		iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2441 
2442 	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2443 
2444 	if (iommu->ppr_log != NULL)
2445 		iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2446 	return 0;
2447 }
2448 
2449 /****************************************************************************
2450  *
2451  * The next functions belong to the third pass of parsing the ACPI
2452  * table. In this last pass the memory mapping requirements are
2453  * gathered (like exclusion and unity mapping ranges).
2454  *
2455  ****************************************************************************/
2456 
2457 static void __init free_unity_maps(void)
2458 {
2459 	struct unity_map_entry *entry, *next;
2460 	struct amd_iommu_pci_seg *p, *pci_seg;
2461 
2462 	for_each_pci_segment_safe(pci_seg, p) {
2463 		list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
2464 			list_del(&entry->list);
2465 			kfree(entry);
2466 		}
2467 	}
2468 }
2469 
2470 /* called for unity map ACPI definition */
2471 static int __init init_unity_map_range(struct ivmd_header *m,
2472 				       struct acpi_table_header *ivrs_base)
2473 {
2474 	struct unity_map_entry *e = NULL;
2475 	struct amd_iommu_pci_seg *pci_seg;
2476 	char *s;
2477 
2478 	pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
2479 	if (pci_seg == NULL)
2480 		return -ENOMEM;
2481 
2482 	e = kzalloc(sizeof(*e), GFP_KERNEL);
2483 	if (e == NULL)
2484 		return -ENOMEM;
2485 
2486 	switch (m->type) {
2487 	default:
2488 		kfree(e);
2489 		return 0;
2490 	case ACPI_IVMD_TYPE:
2491 		s = "IVMD_TYPEi\t\t\t";
2492 		e->devid_start = e->devid_end = m->devid;
2493 		break;
2494 	case ACPI_IVMD_TYPE_ALL:
2495 		s = "IVMD_TYPE_ALL\t\t";
2496 		e->devid_start = 0;
2497 		e->devid_end = pci_seg->last_bdf;
2498 		break;
2499 	case ACPI_IVMD_TYPE_RANGE:
2500 		s = "IVMD_TYPE_RANGE\t\t";
2501 		e->devid_start = m->devid;
2502 		e->devid_end = m->aux;
2503 		break;
2504 	}
2505 	e->address_start = PAGE_ALIGN(m->range_start);
2506 	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2507 	e->prot = m->flags >> 1;
2508 
2509 	/*
2510 	 * Treat per-device exclusion ranges as r/w unity-mapped regions
2511 	 * since some buggy BIOSes might lead to the overwritten exclusion
2512 	 * range (exclusion_start and exclusion_length members). This
2513 	 * happens when there are multiple exclusion ranges (IVMD entries)
2514 	 * defined in ACPI table.
2515 	 */
2516 	if (m->flags & IVMD_FLAG_EXCL_RANGE)
2517 		e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2518 
2519 	DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
2520 		    "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
2521 		    " flags: %x\n", s, m->pci_seg,
2522 		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2523 		    PCI_FUNC(e->devid_start), m->pci_seg,
2524 		    PCI_BUS_NUM(e->devid_end),
2525 		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2526 		    e->address_start, e->address_end, m->flags);
2527 
2528 	list_add_tail(&e->list, &pci_seg->unity_map);
2529 
2530 	return 0;
2531 }
2532 
2533 /* iterates over all memory definitions we find in the ACPI table */
2534 static int __init init_memory_definitions(struct acpi_table_header *table)
2535 {
2536 	u8 *p = (u8 *)table, *end = (u8 *)table;
2537 	struct ivmd_header *m;
2538 
2539 	end += table->length;
2540 	p += IVRS_HEADER_LENGTH;
2541 
2542 	while (p < end) {
2543 		m = (struct ivmd_header *)p;
2544 		if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2545 			init_unity_map_range(m, table);
2546 
2547 		p += m->length;
2548 	}
2549 
2550 	return 0;
2551 }
2552 
2553 /*
2554  * Init the device table to not allow DMA access for devices
2555  */
2556 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2557 {
2558 	u32 devid;
2559 	struct dev_table_entry *dev_table = pci_seg->dev_table;
2560 
2561 	if (dev_table == NULL)
2562 		return;
2563 
2564 	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2565 		__set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
2566 		if (!amd_iommu_snp_en)
2567 			__set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
2568 	}
2569 }
2570 
2571 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2572 {
2573 	u32 devid;
2574 	struct dev_table_entry *dev_table = pci_seg->dev_table;
2575 
2576 	if (dev_table == NULL)
2577 		return;
2578 
2579 	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2580 		dev_table[devid].data[0] = 0ULL;
2581 		dev_table[devid].data[1] = 0ULL;
2582 	}
2583 }
2584 
2585 static void init_device_table(void)
2586 {
2587 	struct amd_iommu_pci_seg *pci_seg;
2588 	u32 devid;
2589 
2590 	if (!amd_iommu_irq_remap)
2591 		return;
2592 
2593 	for_each_pci_segment(pci_seg) {
2594 		for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
2595 			__set_dev_entry_bit(pci_seg->dev_table,
2596 					    devid, DEV_ENTRY_IRQ_TBL_EN);
2597 	}
2598 }
2599 
2600 static void iommu_init_flags(struct amd_iommu *iommu)
2601 {
2602 	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2603 		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2604 		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2605 
2606 	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2607 		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2608 		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2609 
2610 	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2611 		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2612 		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2613 
2614 	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2615 		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2616 		iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2617 
2618 	/*
2619 	 * make IOMMU memory accesses cache coherent
2620 	 */
2621 	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2622 
2623 	/* Set IOTLB invalidation timeout to 1s */
2624 	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2625 }
2626 
2627 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2628 {
2629 	int i, j;
2630 	u32 ioc_feature_control;
2631 	struct pci_dev *pdev = iommu->root_pdev;
2632 
2633 	/* RD890 BIOSes may not have completely reconfigured the iommu */
2634 	if (!is_rd890_iommu(iommu->dev) || !pdev)
2635 		return;
2636 
2637 	/*
2638 	 * First, we need to ensure that the iommu is enabled. This is
2639 	 * controlled by a register in the northbridge
2640 	 */
2641 
2642 	/* Select Northbridge indirect register 0x75 and enable writing */
2643 	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2644 	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2645 
2646 	/* Enable the iommu */
2647 	if (!(ioc_feature_control & 0x1))
2648 		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2649 
2650 	/* Restore the iommu BAR */
2651 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2652 			       iommu->stored_addr_lo);
2653 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2654 			       iommu->stored_addr_hi);
2655 
2656 	/* Restore the l1 indirect regs for each of the 6 l1s */
2657 	for (i = 0; i < 6; i++)
2658 		for (j = 0; j < 0x12; j++)
2659 			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2660 
2661 	/* Restore the l2 indirect regs */
2662 	for (i = 0; i < 0x83; i++)
2663 		iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2664 
2665 	/* Lock PCI setup registers */
2666 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2667 			       iommu->stored_addr_lo | 1);
2668 }
2669 
2670 static void iommu_enable_ga(struct amd_iommu *iommu)
2671 {
2672 #ifdef CONFIG_IRQ_REMAP
2673 	switch (amd_iommu_guest_ir) {
2674 	case AMD_IOMMU_GUEST_IR_VAPIC:
2675 	case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2676 		iommu_feature_enable(iommu, CONTROL_GA_EN);
2677 		iommu->irte_ops = &irte_128_ops;
2678 		break;
2679 	default:
2680 		iommu->irte_ops = &irte_32_ops;
2681 		break;
2682 	}
2683 #endif
2684 }
2685 
2686 static void early_enable_iommu(struct amd_iommu *iommu)
2687 {
2688 	iommu_disable(iommu);
2689 	iommu_init_flags(iommu);
2690 	iommu_set_device_table(iommu);
2691 	iommu_enable_command_buffer(iommu);
2692 	iommu_enable_event_buffer(iommu);
2693 	iommu_set_exclusion_range(iommu);
2694 	iommu_enable_ga(iommu);
2695 	iommu_enable_xt(iommu);
2696 	iommu_enable(iommu);
2697 	iommu_flush_all_caches(iommu);
2698 }
2699 
2700 /*
2701  * This function finally enables all IOMMUs found in the system after
2702  * they have been initialized.
2703  *
2704  * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2705  * the old content of device table entries. Not this case or copy failed,
2706  * just continue as normal kernel does.
2707  */
2708 static void early_enable_iommus(void)
2709 {
2710 	struct amd_iommu *iommu;
2711 	struct amd_iommu_pci_seg *pci_seg;
2712 
2713 	if (!copy_device_table()) {
2714 		/*
2715 		 * If come here because of failure in copying device table from old
2716 		 * kernel with all IOMMUs enabled, print error message and try to
2717 		 * free allocated old_dev_tbl_cpy.
2718 		 */
2719 		if (amd_iommu_pre_enabled)
2720 			pr_err("Failed to copy DEV table from previous kernel.\n");
2721 
2722 		for_each_pci_segment(pci_seg) {
2723 			if (pci_seg->old_dev_tbl_cpy != NULL) {
2724 				free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
2725 						get_order(pci_seg->dev_table_size));
2726 				pci_seg->old_dev_tbl_cpy = NULL;
2727 			}
2728 		}
2729 
2730 		for_each_iommu(iommu) {
2731 			clear_translation_pre_enabled(iommu);
2732 			early_enable_iommu(iommu);
2733 		}
2734 	} else {
2735 		pr_info("Copied DEV table from previous kernel.\n");
2736 
2737 		for_each_pci_segment(pci_seg) {
2738 			free_pages((unsigned long)pci_seg->dev_table,
2739 				   get_order(pci_seg->dev_table_size));
2740 			pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
2741 		}
2742 
2743 		for_each_iommu(iommu) {
2744 			iommu_disable_command_buffer(iommu);
2745 			iommu_disable_event_buffer(iommu);
2746 			iommu_enable_command_buffer(iommu);
2747 			iommu_enable_event_buffer(iommu);
2748 			iommu_enable_ga(iommu);
2749 			iommu_enable_xt(iommu);
2750 			iommu_set_device_table(iommu);
2751 			iommu_flush_all_caches(iommu);
2752 		}
2753 	}
2754 }
2755 
2756 static void enable_iommus_v2(void)
2757 {
2758 	struct amd_iommu *iommu;
2759 
2760 	for_each_iommu(iommu) {
2761 		iommu_enable_ppr_log(iommu);
2762 		iommu_enable_gt(iommu);
2763 	}
2764 }
2765 
2766 static void enable_iommus_vapic(void)
2767 {
2768 #ifdef CONFIG_IRQ_REMAP
2769 	u32 status, i;
2770 	struct amd_iommu *iommu;
2771 
2772 	for_each_iommu(iommu) {
2773 		/*
2774 		 * Disable GALog if already running. It could have been enabled
2775 		 * in the previous boot before kdump.
2776 		 */
2777 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2778 		if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2779 			continue;
2780 
2781 		iommu_feature_disable(iommu, CONTROL_GALOG_EN);
2782 		iommu_feature_disable(iommu, CONTROL_GAINT_EN);
2783 
2784 		/*
2785 		 * Need to set and poll check the GALOGRun bit to zero before
2786 		 * we can set/ modify GA Log registers safely.
2787 		 */
2788 		for (i = 0; i < LOOP_TIMEOUT; ++i) {
2789 			status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2790 			if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2791 				break;
2792 			udelay(10);
2793 		}
2794 
2795 		if (WARN_ON(i >= LOOP_TIMEOUT))
2796 			return;
2797 	}
2798 
2799 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2800 	    !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
2801 		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2802 		return;
2803 	}
2804 
2805 	if (amd_iommu_snp_en &&
2806 	    !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
2807 		pr_warn("Force to disable Virtual APIC due to SNP\n");
2808 		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2809 		return;
2810 	}
2811 
2812 	/* Enabling GAM and SNPAVIC support */
2813 	for_each_iommu(iommu) {
2814 		if (iommu_init_ga_log(iommu) ||
2815 		    iommu_ga_log_enable(iommu))
2816 			return;
2817 
2818 		iommu_feature_enable(iommu, CONTROL_GAM_EN);
2819 		if (amd_iommu_snp_en)
2820 			iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
2821 	}
2822 
2823 	amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2824 	pr_info("Virtual APIC enabled\n");
2825 #endif
2826 }
2827 
2828 static void enable_iommus(void)
2829 {
2830 	early_enable_iommus();
2831 	enable_iommus_vapic();
2832 	enable_iommus_v2();
2833 }
2834 
2835 static void disable_iommus(void)
2836 {
2837 	struct amd_iommu *iommu;
2838 
2839 	for_each_iommu(iommu)
2840 		iommu_disable(iommu);
2841 
2842 #ifdef CONFIG_IRQ_REMAP
2843 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2844 		amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2845 #endif
2846 }
2847 
2848 /*
2849  * Suspend/Resume support
2850  * disable suspend until real resume implemented
2851  */
2852 
2853 static void amd_iommu_resume(void)
2854 {
2855 	struct amd_iommu *iommu;
2856 
2857 	for_each_iommu(iommu)
2858 		iommu_apply_resume_quirks(iommu);
2859 
2860 	/* re-load the hardware */
2861 	enable_iommus();
2862 
2863 	amd_iommu_enable_interrupts();
2864 }
2865 
2866 static int amd_iommu_suspend(void)
2867 {
2868 	/* disable IOMMUs to go out of the way for BIOS */
2869 	disable_iommus();
2870 
2871 	return 0;
2872 }
2873 
2874 static struct syscore_ops amd_iommu_syscore_ops = {
2875 	.suspend = amd_iommu_suspend,
2876 	.resume = amd_iommu_resume,
2877 };
2878 
2879 static void __init free_iommu_resources(void)
2880 {
2881 	kmem_cache_destroy(amd_iommu_irq_cache);
2882 	amd_iommu_irq_cache = NULL;
2883 
2884 	free_iommu_all();
2885 	free_pci_segments();
2886 }
2887 
2888 /* SB IOAPIC is always on this device in AMD systems */
2889 #define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))
2890 
2891 static bool __init check_ioapic_information(void)
2892 {
2893 	const char *fw_bug = FW_BUG;
2894 	bool ret, has_sb_ioapic;
2895 	int idx;
2896 
2897 	has_sb_ioapic = false;
2898 	ret           = false;
2899 
2900 	/*
2901 	 * If we have map overrides on the kernel command line the
2902 	 * messages in this function might not describe firmware bugs
2903 	 * anymore - so be careful
2904 	 */
2905 	if (cmdline_maps)
2906 		fw_bug = "";
2907 
2908 	for (idx = 0; idx < nr_ioapics; idx++) {
2909 		int devid, id = mpc_ioapic_id(idx);
2910 
2911 		devid = get_ioapic_devid(id);
2912 		if (devid < 0) {
2913 			pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2914 				fw_bug, id);
2915 			ret = false;
2916 		} else if (devid == IOAPIC_SB_DEVID) {
2917 			has_sb_ioapic = true;
2918 			ret           = true;
2919 		}
2920 	}
2921 
2922 	if (!has_sb_ioapic) {
2923 		/*
2924 		 * We expect the SB IOAPIC to be listed in the IVRS
2925 		 * table. The system timer is connected to the SB IOAPIC
2926 		 * and if we don't have it in the list the system will
2927 		 * panic at boot time.  This situation usually happens
2928 		 * when the BIOS is buggy and provides us the wrong
2929 		 * device id for the IOAPIC in the system.
2930 		 */
2931 		pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2932 	}
2933 
2934 	if (!ret)
2935 		pr_err("Disabling interrupt remapping\n");
2936 
2937 	return ret;
2938 }
2939 
2940 static void __init free_dma_resources(void)
2941 {
2942 	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2943 		   get_order(MAX_DOMAIN_ID/8));
2944 	amd_iommu_pd_alloc_bitmap = NULL;
2945 
2946 	free_unity_maps();
2947 }
2948 
2949 static void __init ivinfo_init(void *ivrs)
2950 {
2951 	amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2952 }
2953 
2954 /*
2955  * This is the hardware init function for AMD IOMMU in the system.
2956  * This function is called either from amd_iommu_init or from the interrupt
2957  * remapping setup code.
2958  *
2959  * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2960  * four times:
2961  *
2962  *	1 pass) Discover the most comprehensive IVHD type to use.
2963  *
2964  *	2 pass) Find the highest PCI device id the driver has to handle.
2965  *		Upon this information the size of the data structures is
2966  *		determined that needs to be allocated.
2967  *
2968  *	3 pass) Initialize the data structures just allocated with the
2969  *		information in the ACPI table about available AMD IOMMUs
2970  *		in the system. It also maps the PCI devices in the
2971  *		system to specific IOMMUs
2972  *
2973  *	4 pass) After the basic data structures are allocated and
2974  *		initialized we update them with information about memory
2975  *		remapping requirements parsed out of the ACPI table in
2976  *		this last pass.
2977  *
2978  * After everything is set up the IOMMUs are enabled and the necessary
2979  * hotplug and suspend notifiers are registered.
2980  */
2981 static int __init early_amd_iommu_init(void)
2982 {
2983 	struct acpi_table_header *ivrs_base;
2984 	int remap_cache_sz, ret;
2985 	acpi_status status;
2986 
2987 	if (!amd_iommu_detected)
2988 		return -ENODEV;
2989 
2990 	status = acpi_get_table("IVRS", 0, &ivrs_base);
2991 	if (status == AE_NOT_FOUND)
2992 		return -ENODEV;
2993 	else if (ACPI_FAILURE(status)) {
2994 		const char *err = acpi_format_exception(status);
2995 		pr_err("IVRS table error: %s\n", err);
2996 		return -EINVAL;
2997 	}
2998 
2999 	/*
3000 	 * Validate checksum here so we don't need to do it when
3001 	 * we actually parse the table
3002 	 */
3003 	ret = check_ivrs_checksum(ivrs_base);
3004 	if (ret)
3005 		goto out;
3006 
3007 	ivinfo_init(ivrs_base);
3008 
3009 	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
3010 	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
3011 
3012 	/* Device table - directly used by all IOMMUs */
3013 	ret = -ENOMEM;
3014 
3015 	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
3016 					    GFP_KERNEL | __GFP_ZERO,
3017 					    get_order(MAX_DOMAIN_ID/8));
3018 	if (amd_iommu_pd_alloc_bitmap == NULL)
3019 		goto out;
3020 
3021 	/*
3022 	 * never allocate domain 0 because its used as the non-allocated and
3023 	 * error value placeholder
3024 	 */
3025 	__set_bit(0, amd_iommu_pd_alloc_bitmap);
3026 
3027 	/*
3028 	 * now the data structures are allocated and basically initialized
3029 	 * start the real acpi table scan
3030 	 */
3031 	ret = init_iommu_all(ivrs_base);
3032 	if (ret)
3033 		goto out;
3034 
3035 	/* Disable any previously enabled IOMMUs */
3036 	if (!is_kdump_kernel() || amd_iommu_disabled)
3037 		disable_iommus();
3038 
3039 	if (amd_iommu_irq_remap)
3040 		amd_iommu_irq_remap = check_ioapic_information();
3041 
3042 	if (amd_iommu_irq_remap) {
3043 		struct amd_iommu_pci_seg *pci_seg;
3044 		/*
3045 		 * Interrupt remapping enabled, create kmem_cache for the
3046 		 * remapping tables.
3047 		 */
3048 		ret = -ENOMEM;
3049 		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3050 			remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
3051 		else
3052 			remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
3053 		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
3054 							remap_cache_sz,
3055 							DTE_INTTAB_ALIGNMENT,
3056 							0, NULL);
3057 		if (!amd_iommu_irq_cache)
3058 			goto out;
3059 
3060 		for_each_pci_segment(pci_seg) {
3061 			if (alloc_irq_lookup_table(pci_seg))
3062 				goto out;
3063 		}
3064 	}
3065 
3066 	ret = init_memory_definitions(ivrs_base);
3067 	if (ret)
3068 		goto out;
3069 
3070 	/* init the device table */
3071 	init_device_table();
3072 
3073 out:
3074 	/* Don't leak any ACPI memory */
3075 	acpi_put_table(ivrs_base);
3076 
3077 	return ret;
3078 }
3079 
3080 static int amd_iommu_enable_interrupts(void)
3081 {
3082 	struct amd_iommu *iommu;
3083 	int ret = 0;
3084 
3085 	for_each_iommu(iommu) {
3086 		ret = iommu_init_irq(iommu);
3087 		if (ret)
3088 			goto out;
3089 	}
3090 
3091 out:
3092 	return ret;
3093 }
3094 
3095 static bool __init detect_ivrs(void)
3096 {
3097 	struct acpi_table_header *ivrs_base;
3098 	acpi_status status;
3099 	int i;
3100 
3101 	status = acpi_get_table("IVRS", 0, &ivrs_base);
3102 	if (status == AE_NOT_FOUND)
3103 		return false;
3104 	else if (ACPI_FAILURE(status)) {
3105 		const char *err = acpi_format_exception(status);
3106 		pr_err("IVRS table error: %s\n", err);
3107 		return false;
3108 	}
3109 
3110 	acpi_put_table(ivrs_base);
3111 
3112 	if (amd_iommu_force_enable)
3113 		goto out;
3114 
3115 	/* Don't use IOMMU if there is Stoney Ridge graphics */
3116 	for (i = 0; i < 32; i++) {
3117 		u32 pci_id;
3118 
3119 		pci_id = read_pci_config(0, i, 0, 0);
3120 		if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
3121 			pr_info("Disable IOMMU on Stoney Ridge\n");
3122 			return false;
3123 		}
3124 	}
3125 
3126 out:
3127 	/* Make sure ACS will be enabled during PCI probe */
3128 	pci_request_acs();
3129 
3130 	return true;
3131 }
3132 
3133 /****************************************************************************
3134  *
3135  * AMD IOMMU Initialization State Machine
3136  *
3137  ****************************************************************************/
3138 
3139 static int __init state_next(void)
3140 {
3141 	int ret = 0;
3142 
3143 	switch (init_state) {
3144 	case IOMMU_START_STATE:
3145 		if (!detect_ivrs()) {
3146 			init_state	= IOMMU_NOT_FOUND;
3147 			ret		= -ENODEV;
3148 		} else {
3149 			init_state	= IOMMU_IVRS_DETECTED;
3150 		}
3151 		break;
3152 	case IOMMU_IVRS_DETECTED:
3153 		if (amd_iommu_disabled) {
3154 			init_state = IOMMU_CMDLINE_DISABLED;
3155 			ret = -EINVAL;
3156 		} else {
3157 			ret = early_amd_iommu_init();
3158 			init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
3159 		}
3160 		break;
3161 	case IOMMU_ACPI_FINISHED:
3162 		early_enable_iommus();
3163 		x86_platform.iommu_shutdown = disable_iommus;
3164 		init_state = IOMMU_ENABLED;
3165 		break;
3166 	case IOMMU_ENABLED:
3167 		register_syscore_ops(&amd_iommu_syscore_ops);
3168 		ret = amd_iommu_init_pci();
3169 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
3170 		enable_iommus_vapic();
3171 		enable_iommus_v2();
3172 		break;
3173 	case IOMMU_PCI_INIT:
3174 		ret = amd_iommu_enable_interrupts();
3175 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
3176 		break;
3177 	case IOMMU_INTERRUPTS_EN:
3178 		init_state = IOMMU_INITIALIZED;
3179 		break;
3180 	case IOMMU_INITIALIZED:
3181 		/* Nothing to do */
3182 		break;
3183 	case IOMMU_NOT_FOUND:
3184 	case IOMMU_INIT_ERROR:
3185 	case IOMMU_CMDLINE_DISABLED:
3186 		/* Error states => do nothing */
3187 		ret = -EINVAL;
3188 		break;
3189 	default:
3190 		/* Unknown state */
3191 		BUG();
3192 	}
3193 
3194 	if (ret) {
3195 		free_dma_resources();
3196 		if (!irq_remapping_enabled) {
3197 			disable_iommus();
3198 			free_iommu_resources();
3199 		} else {
3200 			struct amd_iommu *iommu;
3201 			struct amd_iommu_pci_seg *pci_seg;
3202 
3203 			for_each_pci_segment(pci_seg)
3204 				uninit_device_table_dma(pci_seg);
3205 
3206 			for_each_iommu(iommu)
3207 				iommu_flush_all_caches(iommu);
3208 		}
3209 	}
3210 	return ret;
3211 }
3212 
3213 static int __init iommu_go_to_state(enum iommu_init_state state)
3214 {
3215 	int ret = -EINVAL;
3216 
3217 	while (init_state != state) {
3218 		if (init_state == IOMMU_NOT_FOUND         ||
3219 		    init_state == IOMMU_INIT_ERROR        ||
3220 		    init_state == IOMMU_CMDLINE_DISABLED)
3221 			break;
3222 		ret = state_next();
3223 	}
3224 
3225 	return ret;
3226 }
3227 
3228 #ifdef CONFIG_IRQ_REMAP
3229 int __init amd_iommu_prepare(void)
3230 {
3231 	int ret;
3232 
3233 	amd_iommu_irq_remap = true;
3234 
3235 	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3236 	if (ret) {
3237 		amd_iommu_irq_remap = false;
3238 		return ret;
3239 	}
3240 
3241 	return amd_iommu_irq_remap ? 0 : -ENODEV;
3242 }
3243 
3244 int __init amd_iommu_enable(void)
3245 {
3246 	int ret;
3247 
3248 	ret = iommu_go_to_state(IOMMU_ENABLED);
3249 	if (ret)
3250 		return ret;
3251 
3252 	irq_remapping_enabled = 1;
3253 	return amd_iommu_xt_mode;
3254 }
3255 
3256 void amd_iommu_disable(void)
3257 {
3258 	amd_iommu_suspend();
3259 }
3260 
3261 int amd_iommu_reenable(int mode)
3262 {
3263 	amd_iommu_resume();
3264 
3265 	return 0;
3266 }
3267 
3268 int __init amd_iommu_enable_faulting(void)
3269 {
3270 	/* We enable MSI later when PCI is initialized */
3271 	return 0;
3272 }
3273 #endif
3274 
3275 /*
3276  * This is the core init function for AMD IOMMU hardware in the system.
3277  * This function is called from the generic x86 DMA layer initialization
3278  * code.
3279  */
3280 static int __init amd_iommu_init(void)
3281 {
3282 	struct amd_iommu *iommu;
3283 	int ret;
3284 
3285 	ret = iommu_go_to_state(IOMMU_INITIALIZED);
3286 #ifdef CONFIG_GART_IOMMU
3287 	if (ret && list_empty(&amd_iommu_list)) {
3288 		/*
3289 		 * We failed to initialize the AMD IOMMU - try fallback
3290 		 * to GART if possible.
3291 		 */
3292 		gart_iommu_init();
3293 	}
3294 #endif
3295 
3296 	for_each_iommu(iommu)
3297 		amd_iommu_debugfs_setup(iommu);
3298 
3299 	return ret;
3300 }
3301 
3302 static bool amd_iommu_sme_check(void)
3303 {
3304 	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
3305 	    (boot_cpu_data.x86 != 0x17))
3306 		return true;
3307 
3308 	/* For Fam17h, a specific level of support is required */
3309 	if (boot_cpu_data.microcode >= 0x08001205)
3310 		return true;
3311 
3312 	if ((boot_cpu_data.microcode >= 0x08001126) &&
3313 	    (boot_cpu_data.microcode <= 0x080011ff))
3314 		return true;
3315 
3316 	pr_notice("IOMMU not currently supported when SME is active\n");
3317 
3318 	return false;
3319 }
3320 
3321 /****************************************************************************
3322  *
3323  * Early detect code. This code runs at IOMMU detection time in the DMA
3324  * layer. It just looks if there is an IVRS ACPI table to detect AMD
3325  * IOMMUs
3326  *
3327  ****************************************************************************/
3328 int __init amd_iommu_detect(void)
3329 {
3330 	int ret;
3331 
3332 	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3333 		return -ENODEV;
3334 
3335 	if (!amd_iommu_sme_check())
3336 		return -ENODEV;
3337 
3338 	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3339 	if (ret)
3340 		return ret;
3341 
3342 	amd_iommu_detected = true;
3343 	iommu_detected = 1;
3344 	x86_init.iommu.iommu_init = amd_iommu_init;
3345 
3346 	return 1;
3347 }
3348 
3349 /****************************************************************************
3350  *
3351  * Parsing functions for the AMD IOMMU specific kernel command line
3352  * options.
3353  *
3354  ****************************************************************************/
3355 
3356 static int __init parse_amd_iommu_dump(char *str)
3357 {
3358 	amd_iommu_dump = true;
3359 
3360 	return 1;
3361 }
3362 
3363 static int __init parse_amd_iommu_intr(char *str)
3364 {
3365 	for (; *str; ++str) {
3366 		if (strncmp(str, "legacy", 6) == 0) {
3367 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3368 			break;
3369 		}
3370 		if (strncmp(str, "vapic", 5) == 0) {
3371 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3372 			break;
3373 		}
3374 	}
3375 	return 1;
3376 }
3377 
3378 static int __init parse_amd_iommu_options(char *str)
3379 {
3380 	if (!str)
3381 		return -EINVAL;
3382 
3383 	while (*str) {
3384 		if (strncmp(str, "fullflush", 9) == 0) {
3385 			pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3386 			iommu_set_dma_strict();
3387 		} else if (strncmp(str, "force_enable", 12) == 0) {
3388 			amd_iommu_force_enable = true;
3389 		} else if (strncmp(str, "off", 3) == 0) {
3390 			amd_iommu_disabled = true;
3391 		} else if (strncmp(str, "force_isolation", 15) == 0) {
3392 			amd_iommu_force_isolation = true;
3393 		} else if (strncmp(str, "pgtbl_v1", 8) == 0) {
3394 			amd_iommu_pgtable = AMD_IOMMU_V1;
3395 		} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
3396 			amd_iommu_pgtable = AMD_IOMMU_V2;
3397 		} else {
3398 			pr_notice("Unknown option - '%s'\n", str);
3399 		}
3400 
3401 		str += strcspn(str, ",");
3402 		while (*str == ',')
3403 			str++;
3404 	}
3405 
3406 	return 1;
3407 }
3408 
3409 static int __init parse_ivrs_ioapic(char *str)
3410 {
3411 	u32 seg = 0, bus, dev, fn;
3412 	int ret, id, i;
3413 	u32 devid;
3414 
3415 	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3416 	if (ret != 4) {
3417 		ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
3418 		if (ret != 5) {
3419 			pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3420 			return 1;
3421 		}
3422 	}
3423 
3424 	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3425 		pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3426 			str);
3427 		return 1;
3428 	}
3429 
3430 	devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3431 
3432 	cmdline_maps			= true;
3433 	i				= early_ioapic_map_size++;
3434 	early_ioapic_map[i].id		= id;
3435 	early_ioapic_map[i].devid	= devid;
3436 	early_ioapic_map[i].cmd_line	= true;
3437 
3438 	return 1;
3439 }
3440 
3441 static int __init parse_ivrs_hpet(char *str)
3442 {
3443 	u32 seg = 0, bus, dev, fn;
3444 	int ret, id, i;
3445 	u32 devid;
3446 
3447 	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3448 	if (ret != 4) {
3449 		ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
3450 		if (ret != 5) {
3451 			pr_err("Invalid command line: ivrs_hpet%s\n", str);
3452 			return 1;
3453 		}
3454 	}
3455 
3456 	if (early_hpet_map_size == EARLY_MAP_SIZE) {
3457 		pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3458 			str);
3459 		return 1;
3460 	}
3461 
3462 	devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3463 
3464 	cmdline_maps			= true;
3465 	i				= early_hpet_map_size++;
3466 	early_hpet_map[i].id		= id;
3467 	early_hpet_map[i].devid		= devid;
3468 	early_hpet_map[i].cmd_line	= true;
3469 
3470 	return 1;
3471 }
3472 
3473 static int __init parse_ivrs_acpihid(char *str)
3474 {
3475 	u32 seg = 0, bus, dev, fn;
3476 	char *hid, *uid, *p;
3477 	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3478 	int ret, i;
3479 
3480 	ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3481 	if (ret != 4) {
3482 		ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid);
3483 		if (ret != 5) {
3484 			pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3485 			return 1;
3486 		}
3487 	}
3488 
3489 	p = acpiid;
3490 	hid = strsep(&p, ":");
3491 	uid = p;
3492 
3493 	if (!hid || !(*hid) || !uid) {
3494 		pr_err("Invalid command line: hid or uid\n");
3495 		return 1;
3496 	}
3497 
3498 	i = early_acpihid_map_size++;
3499 	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3500 	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3501 	early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3502 	early_acpihid_map[i].cmd_line	= true;
3503 
3504 	return 1;
3505 }
3506 
3507 __setup("amd_iommu_dump",	parse_amd_iommu_dump);
3508 __setup("amd_iommu=",		parse_amd_iommu_options);
3509 __setup("amd_iommu_intr=",	parse_amd_iommu_intr);
3510 __setup("ivrs_ioapic",		parse_ivrs_ioapic);
3511 __setup("ivrs_hpet",		parse_ivrs_hpet);
3512 __setup("ivrs_acpihid",		parse_ivrs_acpihid);
3513 
3514 bool amd_iommu_v2_supported(void)
3515 {
3516 	/*
3517 	 * Since DTE[Mode]=0 is prohibited on SNP-enabled system
3518 	 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
3519 	 * setting up IOMMUv1 page table.
3520 	 */
3521 	return amd_iommu_v2_present && !amd_iommu_snp_en;
3522 }
3523 EXPORT_SYMBOL(amd_iommu_v2_supported);
3524 
3525 struct amd_iommu *get_amd_iommu(unsigned int idx)
3526 {
3527 	unsigned int i = 0;
3528 	struct amd_iommu *iommu;
3529 
3530 	for_each_iommu(iommu)
3531 		if (i++ == idx)
3532 			return iommu;
3533 	return NULL;
3534 }
3535 
3536 /****************************************************************************
3537  *
3538  * IOMMU EFR Performance Counter support functionality. This code allows
3539  * access to the IOMMU PC functionality.
3540  *
3541  ****************************************************************************/
3542 
3543 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3544 {
3545 	struct amd_iommu *iommu = get_amd_iommu(idx);
3546 
3547 	if (iommu)
3548 		return iommu->max_banks;
3549 
3550 	return 0;
3551 }
3552 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3553 
3554 bool amd_iommu_pc_supported(void)
3555 {
3556 	return amd_iommu_pc_present;
3557 }
3558 EXPORT_SYMBOL(amd_iommu_pc_supported);
3559 
3560 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3561 {
3562 	struct amd_iommu *iommu = get_amd_iommu(idx);
3563 
3564 	if (iommu)
3565 		return iommu->max_counters;
3566 
3567 	return 0;
3568 }
3569 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3570 
3571 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3572 				u8 fxn, u64 *value, bool is_write)
3573 {
3574 	u32 offset;
3575 	u32 max_offset_lim;
3576 
3577 	/* Make sure the IOMMU PC resource is available */
3578 	if (!amd_iommu_pc_present)
3579 		return -ENODEV;
3580 
3581 	/* Check for valid iommu and pc register indexing */
3582 	if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3583 		return -ENODEV;
3584 
3585 	offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3586 
3587 	/* Limit the offset to the hw defined mmio region aperture */
3588 	max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3589 				(iommu->max_counters << 8) | 0x28);
3590 	if ((offset < MMIO_CNTR_REG_OFFSET) ||
3591 	    (offset > max_offset_lim))
3592 		return -EINVAL;
3593 
3594 	if (is_write) {
3595 		u64 val = *value & GENMASK_ULL(47, 0);
3596 
3597 		writel((u32)val, iommu->mmio_base + offset);
3598 		writel((val >> 32), iommu->mmio_base + offset + 4);
3599 	} else {
3600 		*value = readl(iommu->mmio_base + offset + 4);
3601 		*value <<= 32;
3602 		*value |= readl(iommu->mmio_base + offset);
3603 		*value &= GENMASK_ULL(47, 0);
3604 	}
3605 
3606 	return 0;
3607 }
3608 
3609 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3610 {
3611 	if (!iommu)
3612 		return -EINVAL;
3613 
3614 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3615 }
3616 
3617 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3618 {
3619 	if (!iommu)
3620 		return -EINVAL;
3621 
3622 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3623 }
3624 
3625 #ifdef CONFIG_AMD_MEM_ENCRYPT
3626 int amd_iommu_snp_enable(void)
3627 {
3628 	/*
3629 	 * The SNP support requires that IOMMU must be enabled, and is
3630 	 * not configured in the passthrough mode.
3631 	 */
3632 	if (no_iommu || iommu_default_passthrough()) {
3633 		pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
3634 		return -EINVAL;
3635 	}
3636 
3637 	/*
3638 	 * Prevent enabling SNP after IOMMU_ENABLED state because this process
3639 	 * affect how IOMMU driver sets up data structures and configures
3640 	 * IOMMU hardware.
3641 	 */
3642 	if (init_state > IOMMU_ENABLED) {
3643 		pr_err("SNP: Too late to enable SNP for IOMMU.\n");
3644 		return -EINVAL;
3645 	}
3646 
3647 	amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
3648 	if (!amd_iommu_snp_en)
3649 		return -EINVAL;
3650 
3651 	pr_info("SNP enabled\n");
3652 
3653 	/* Enforce IOMMU v1 pagetable when SNP is enabled. */
3654 	if (amd_iommu_pgtable != AMD_IOMMU_V1) {
3655 		pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
3656 		amd_iommu_pgtable = AMD_IOMMU_V1;
3657 	}
3658 
3659 	return 0;
3660 }
3661 #endif
3662