xref: /openbmc/linux/drivers/iommu/amd/init.c (revision f97cee494dc92395a668445bcd24d34c89f4ff8c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  *         Leo Duran <leo.duran@amd.com>
6  */
7 
8 #define pr_fmt(fmt)     "AMD-Vi: " fmt
9 #define dev_fmt(fmt)    pr_fmt(fmt)
10 
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/export.h>
21 #include <linux/kmemleak.h>
22 #include <linux/mem_encrypt.h>
23 #include <asm/pci-direct.h>
24 #include <asm/iommu.h>
25 #include <asm/apic.h>
26 #include <asm/msidef.h>
27 #include <asm/gart.h>
28 #include <asm/x86_init.h>
29 #include <asm/iommu_table.h>
30 #include <asm/io_apic.h>
31 #include <asm/irq_remapping.h>
32 
33 #include <linux/crash_dump.h>
34 
35 #include "amd_iommu.h"
36 #include "../irq_remapping.h"
37 
38 /*
39  * definitions for the ACPI scanning code
40  */
41 #define IVRS_HEADER_LENGTH 48
42 
43 #define ACPI_IVHD_TYPE_MAX_SUPPORTED	0x40
44 #define ACPI_IVMD_TYPE_ALL              0x20
45 #define ACPI_IVMD_TYPE                  0x21
46 #define ACPI_IVMD_TYPE_RANGE            0x22
47 
48 #define IVHD_DEV_ALL                    0x01
49 #define IVHD_DEV_SELECT                 0x02
50 #define IVHD_DEV_SELECT_RANGE_START     0x03
51 #define IVHD_DEV_RANGE_END              0x04
52 #define IVHD_DEV_ALIAS                  0x42
53 #define IVHD_DEV_ALIAS_RANGE            0x43
54 #define IVHD_DEV_EXT_SELECT             0x46
55 #define IVHD_DEV_EXT_SELECT_RANGE       0x47
56 #define IVHD_DEV_SPECIAL		0x48
57 #define IVHD_DEV_ACPI_HID		0xf0
58 
59 #define UID_NOT_PRESENT                 0
60 #define UID_IS_INTEGER                  1
61 #define UID_IS_CHARACTER                2
62 
63 #define IVHD_SPECIAL_IOAPIC		1
64 #define IVHD_SPECIAL_HPET		2
65 
66 #define IVHD_FLAG_HT_TUN_EN_MASK        0x01
67 #define IVHD_FLAG_PASSPW_EN_MASK        0x02
68 #define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
69 #define IVHD_FLAG_ISOC_EN_MASK          0x08
70 
71 #define IVMD_FLAG_EXCL_RANGE            0x08
72 #define IVMD_FLAG_IW                    0x04
73 #define IVMD_FLAG_IR                    0x02
74 #define IVMD_FLAG_UNITY_MAP             0x01
75 
76 #define ACPI_DEVFLAG_INITPASS           0x01
77 #define ACPI_DEVFLAG_EXTINT             0x02
78 #define ACPI_DEVFLAG_NMI                0x04
79 #define ACPI_DEVFLAG_SYSMGT1            0x10
80 #define ACPI_DEVFLAG_SYSMGT2            0x20
81 #define ACPI_DEVFLAG_LINT0              0x40
82 #define ACPI_DEVFLAG_LINT1              0x80
83 #define ACPI_DEVFLAG_ATSDIS             0x10000000
84 
85 #define LOOP_TIMEOUT	100000
86 /*
87  * ACPI table definitions
88  *
89  * These data structures are laid over the table to parse the important values
90  * out of it.
91  */
92 
93 extern const struct iommu_ops amd_iommu_ops;
94 
95 /*
96  * structure describing one IOMMU in the ACPI table. Typically followed by one
97  * or more ivhd_entrys.
98  */
99 struct ivhd_header {
100 	u8 type;
101 	u8 flags;
102 	u16 length;
103 	u16 devid;
104 	u16 cap_ptr;
105 	u64 mmio_phys;
106 	u16 pci_seg;
107 	u16 info;
108 	u32 efr_attr;
109 
110 	/* Following only valid on IVHD type 11h and 40h */
111 	u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
112 	u64 res;
113 } __attribute__((packed));
114 
115 /*
116  * A device entry describing which devices a specific IOMMU translates and
117  * which requestor ids they use.
118  */
119 struct ivhd_entry {
120 	u8 type;
121 	u16 devid;
122 	u8 flags;
123 	u32 ext;
124 	u32 hidh;
125 	u64 cid;
126 	u8 uidf;
127 	u8 uidl;
128 	u8 uid;
129 } __attribute__((packed));
130 
131 /*
132  * An AMD IOMMU memory definition structure. It defines things like exclusion
133  * ranges for devices and regions that should be unity mapped.
134  */
135 struct ivmd_header {
136 	u8 type;
137 	u8 flags;
138 	u16 length;
139 	u16 devid;
140 	u16 aux;
141 	u64 resv;
142 	u64 range_start;
143 	u64 range_length;
144 } __attribute__((packed));
145 
146 bool amd_iommu_dump;
147 bool amd_iommu_irq_remap __read_mostly;
148 
149 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
150 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
151 
152 static bool amd_iommu_detected;
153 static bool __initdata amd_iommu_disabled;
154 static int amd_iommu_target_ivhd_type;
155 
156 u16 amd_iommu_last_bdf;			/* largest PCI device id we have
157 					   to handle */
158 LIST_HEAD(amd_iommu_unity_map);		/* a list of required unity mappings
159 					   we find in ACPI */
160 bool amd_iommu_unmap_flush;		/* if true, flush on every unmap */
161 
162 LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
163 					   system */
164 
165 /* Array to assign indices to IOMMUs*/
166 struct amd_iommu *amd_iommus[MAX_IOMMUS];
167 
168 /* Number of IOMMUs present in the system */
169 static int amd_iommus_present;
170 
171 /* IOMMUs have a non-present cache? */
172 bool amd_iommu_np_cache __read_mostly;
173 bool amd_iommu_iotlb_sup __read_mostly = true;
174 
175 u32 amd_iommu_max_pasid __read_mostly = ~0;
176 
177 bool amd_iommu_v2_present __read_mostly;
178 static bool amd_iommu_pc_present __read_mostly;
179 
180 bool amd_iommu_force_isolation __read_mostly;
181 
182 /*
183  * Pointer to the device table which is shared by all AMD IOMMUs
184  * it is indexed by the PCI device id or the HT unit id and contains
185  * information about the domain the device belongs to as well as the
186  * page table root pointer.
187  */
188 struct dev_table_entry *amd_iommu_dev_table;
189 /*
190  * Pointer to a device table which the content of old device table
191  * will be copied to. It's only be used in kdump kernel.
192  */
193 static struct dev_table_entry *old_dev_tbl_cpy;
194 
195 /*
196  * The alias table is a driver specific data structure which contains the
197  * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
198  * More than one device can share the same requestor id.
199  */
200 u16 *amd_iommu_alias_table;
201 
202 /*
203  * The rlookup table is used to find the IOMMU which is responsible
204  * for a specific device. It is also indexed by the PCI device id.
205  */
206 struct amd_iommu **amd_iommu_rlookup_table;
207 EXPORT_SYMBOL(amd_iommu_rlookup_table);
208 
209 /*
210  * This table is used to find the irq remapping table for a given device id
211  * quickly.
212  */
213 struct irq_remap_table **irq_lookup_table;
214 
215 /*
216  * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
217  * to know which ones are already in use.
218  */
219 unsigned long *amd_iommu_pd_alloc_bitmap;
220 
221 static u32 dev_table_size;	/* size of the device table */
222 static u32 alias_table_size;	/* size of the alias table */
223 static u32 rlookup_table_size;	/* size if the rlookup table */
224 
225 enum iommu_init_state {
226 	IOMMU_START_STATE,
227 	IOMMU_IVRS_DETECTED,
228 	IOMMU_ACPI_FINISHED,
229 	IOMMU_ENABLED,
230 	IOMMU_PCI_INIT,
231 	IOMMU_INTERRUPTS_EN,
232 	IOMMU_DMA_OPS,
233 	IOMMU_INITIALIZED,
234 	IOMMU_NOT_FOUND,
235 	IOMMU_INIT_ERROR,
236 	IOMMU_CMDLINE_DISABLED,
237 };
238 
239 /* Early ioapic and hpet maps from kernel command line */
240 #define EARLY_MAP_SIZE		4
241 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
242 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
243 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
244 
245 static int __initdata early_ioapic_map_size;
246 static int __initdata early_hpet_map_size;
247 static int __initdata early_acpihid_map_size;
248 
249 static bool __initdata cmdline_maps;
250 
251 static enum iommu_init_state init_state = IOMMU_START_STATE;
252 
253 static int amd_iommu_enable_interrupts(void);
254 static int __init iommu_go_to_state(enum iommu_init_state state);
255 static void init_device_table_dma(void);
256 
257 static bool amd_iommu_pre_enabled = true;
258 
259 bool translation_pre_enabled(struct amd_iommu *iommu)
260 {
261 	return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
262 }
263 EXPORT_SYMBOL(translation_pre_enabled);
264 
265 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
266 {
267 	iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
268 }
269 
270 static void init_translation_status(struct amd_iommu *iommu)
271 {
272 	u64 ctrl;
273 
274 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
275 	if (ctrl & (1<<CONTROL_IOMMU_EN))
276 		iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
277 }
278 
279 static inline void update_last_devid(u16 devid)
280 {
281 	if (devid > amd_iommu_last_bdf)
282 		amd_iommu_last_bdf = devid;
283 }
284 
285 static inline unsigned long tbl_size(int entry_size)
286 {
287 	unsigned shift = PAGE_SHIFT +
288 			 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
289 
290 	return 1UL << shift;
291 }
292 
293 int amd_iommu_get_num_iommus(void)
294 {
295 	return amd_iommus_present;
296 }
297 
298 /* Access to l1 and l2 indexed register spaces */
299 
300 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
301 {
302 	u32 val;
303 
304 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
305 	pci_read_config_dword(iommu->dev, 0xfc, &val);
306 	return val;
307 }
308 
309 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
310 {
311 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
312 	pci_write_config_dword(iommu->dev, 0xfc, val);
313 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
314 }
315 
316 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
317 {
318 	u32 val;
319 
320 	pci_write_config_dword(iommu->dev, 0xf0, address);
321 	pci_read_config_dword(iommu->dev, 0xf4, &val);
322 	return val;
323 }
324 
325 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
326 {
327 	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
328 	pci_write_config_dword(iommu->dev, 0xf4, val);
329 }
330 
331 /****************************************************************************
332  *
333  * AMD IOMMU MMIO register space handling functions
334  *
335  * These functions are used to program the IOMMU device registers in
336  * MMIO space required for that driver.
337  *
338  ****************************************************************************/
339 
340 /*
341  * This function set the exclusion range in the IOMMU. DMA accesses to the
342  * exclusion range are passed through untranslated
343  */
344 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
345 {
346 	u64 start = iommu->exclusion_start & PAGE_MASK;
347 	u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
348 	u64 entry;
349 
350 	if (!iommu->exclusion_start)
351 		return;
352 
353 	entry = start | MMIO_EXCL_ENABLE_MASK;
354 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
355 			&entry, sizeof(entry));
356 
357 	entry = limit;
358 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
359 			&entry, sizeof(entry));
360 }
361 
362 /* Programs the physical address of the device table into the IOMMU hardware */
363 static void iommu_set_device_table(struct amd_iommu *iommu)
364 {
365 	u64 entry;
366 
367 	BUG_ON(iommu->mmio_base == NULL);
368 
369 	entry = iommu_virt_to_phys(amd_iommu_dev_table);
370 	entry |= (dev_table_size >> 12) - 1;
371 	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
372 			&entry, sizeof(entry));
373 }
374 
375 /* Generic functions to enable/disable certain features of the IOMMU. */
376 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
377 {
378 	u64 ctrl;
379 
380 	ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
381 	ctrl |= (1ULL << bit);
382 	writeq(ctrl, iommu->mmio_base +  MMIO_CONTROL_OFFSET);
383 }
384 
385 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
386 {
387 	u64 ctrl;
388 
389 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
390 	ctrl &= ~(1ULL << bit);
391 	writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
392 }
393 
394 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
395 {
396 	u64 ctrl;
397 
398 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
399 	ctrl &= ~CTRL_INV_TO_MASK;
400 	ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
401 	writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
402 }
403 
404 /* Function to enable the hardware */
405 static void iommu_enable(struct amd_iommu *iommu)
406 {
407 	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
408 }
409 
410 static void iommu_disable(struct amd_iommu *iommu)
411 {
412 	if (!iommu->mmio_base)
413 		return;
414 
415 	/* Disable command buffer */
416 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
417 
418 	/* Disable event logging and event interrupts */
419 	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
420 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
421 
422 	/* Disable IOMMU GA_LOG */
423 	iommu_feature_disable(iommu, CONTROL_GALOG_EN);
424 	iommu_feature_disable(iommu, CONTROL_GAINT_EN);
425 
426 	/* Disable IOMMU hardware itself */
427 	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
428 }
429 
430 /*
431  * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
432  * the system has one.
433  */
434 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
435 {
436 	if (!request_mem_region(address, end, "amd_iommu")) {
437 		pr_err("Can not reserve memory region %llx-%llx for mmio\n",
438 			address, end);
439 		pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
440 		return NULL;
441 	}
442 
443 	return (u8 __iomem *)ioremap(address, end);
444 }
445 
446 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
447 {
448 	if (iommu->mmio_base)
449 		iounmap(iommu->mmio_base);
450 	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
451 }
452 
453 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
454 {
455 	u32 size = 0;
456 
457 	switch (h->type) {
458 	case 0x10:
459 		size = 24;
460 		break;
461 	case 0x11:
462 	case 0x40:
463 		size = 40;
464 		break;
465 	}
466 	return size;
467 }
468 
469 /****************************************************************************
470  *
471  * The functions below belong to the first pass of AMD IOMMU ACPI table
472  * parsing. In this pass we try to find out the highest device id this
473  * code has to handle. Upon this information the size of the shared data
474  * structures is determined later.
475  *
476  ****************************************************************************/
477 
478 /*
479  * This function calculates the length of a given IVHD entry
480  */
481 static inline int ivhd_entry_length(u8 *ivhd)
482 {
483 	u32 type = ((struct ivhd_entry *)ivhd)->type;
484 
485 	if (type < 0x80) {
486 		return 0x04 << (*ivhd >> 6);
487 	} else if (type == IVHD_DEV_ACPI_HID) {
488 		/* For ACPI_HID, offset 21 is uid len */
489 		return *((u8 *)ivhd + 21) + 22;
490 	}
491 	return 0;
492 }
493 
494 /*
495  * After reading the highest device id from the IOMMU PCI capability header
496  * this function looks if there is a higher device id defined in the ACPI table
497  */
498 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
499 {
500 	u8 *p = (void *)h, *end = (void *)h;
501 	struct ivhd_entry *dev;
502 
503 	u32 ivhd_size = get_ivhd_header_size(h);
504 
505 	if (!ivhd_size) {
506 		pr_err("Unsupported IVHD type %#x\n", h->type);
507 		return -EINVAL;
508 	}
509 
510 	p += ivhd_size;
511 	end += h->length;
512 
513 	while (p < end) {
514 		dev = (struct ivhd_entry *)p;
515 		switch (dev->type) {
516 		case IVHD_DEV_ALL:
517 			/* Use maximum BDF value for DEV_ALL */
518 			update_last_devid(0xffff);
519 			break;
520 		case IVHD_DEV_SELECT:
521 		case IVHD_DEV_RANGE_END:
522 		case IVHD_DEV_ALIAS:
523 		case IVHD_DEV_EXT_SELECT:
524 			/* all the above subfield types refer to device ids */
525 			update_last_devid(dev->devid);
526 			break;
527 		default:
528 			break;
529 		}
530 		p += ivhd_entry_length(p);
531 	}
532 
533 	WARN_ON(p != end);
534 
535 	return 0;
536 }
537 
538 static int __init check_ivrs_checksum(struct acpi_table_header *table)
539 {
540 	int i;
541 	u8 checksum = 0, *p = (u8 *)table;
542 
543 	for (i = 0; i < table->length; ++i)
544 		checksum += p[i];
545 	if (checksum != 0) {
546 		/* ACPI table corrupt */
547 		pr_err(FW_BUG "IVRS invalid checksum\n");
548 		return -ENODEV;
549 	}
550 
551 	return 0;
552 }
553 
554 /*
555  * Iterate over all IVHD entries in the ACPI table and find the highest device
556  * id which we need to handle. This is the first of three functions which parse
557  * the ACPI table. So we check the checksum here.
558  */
559 static int __init find_last_devid_acpi(struct acpi_table_header *table)
560 {
561 	u8 *p = (u8 *)table, *end = (u8 *)table;
562 	struct ivhd_header *h;
563 
564 	p += IVRS_HEADER_LENGTH;
565 
566 	end += table->length;
567 	while (p < end) {
568 		h = (struct ivhd_header *)p;
569 		if (h->type == amd_iommu_target_ivhd_type) {
570 			int ret = find_last_devid_from_ivhd(h);
571 
572 			if (ret)
573 				return ret;
574 		}
575 		p += h->length;
576 	}
577 	WARN_ON(p != end);
578 
579 	return 0;
580 }
581 
582 /****************************************************************************
583  *
584  * The following functions belong to the code path which parses the ACPI table
585  * the second time. In this ACPI parsing iteration we allocate IOMMU specific
586  * data structures, initialize the device/alias/rlookup table and also
587  * basically initialize the hardware.
588  *
589  ****************************************************************************/
590 
591 /*
592  * Allocates the command buffer. This buffer is per AMD IOMMU. We can
593  * write commands to that buffer later and the IOMMU will execute them
594  * asynchronously
595  */
596 static int __init alloc_command_buffer(struct amd_iommu *iommu)
597 {
598 	iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
599 						  get_order(CMD_BUFFER_SIZE));
600 
601 	return iommu->cmd_buf ? 0 : -ENOMEM;
602 }
603 
604 /*
605  * This function resets the command buffer if the IOMMU stopped fetching
606  * commands from it.
607  */
608 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
609 {
610 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
611 
612 	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
613 	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
614 	iommu->cmd_buf_head = 0;
615 	iommu->cmd_buf_tail = 0;
616 
617 	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
618 }
619 
620 /*
621  * This function writes the command buffer address to the hardware and
622  * enables it.
623  */
624 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
625 {
626 	u64 entry;
627 
628 	BUG_ON(iommu->cmd_buf == NULL);
629 
630 	entry = iommu_virt_to_phys(iommu->cmd_buf);
631 	entry |= MMIO_CMD_SIZE_512;
632 
633 	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
634 		    &entry, sizeof(entry));
635 
636 	amd_iommu_reset_cmd_buffer(iommu);
637 }
638 
639 /*
640  * This function disables the command buffer
641  */
642 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
643 {
644 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
645 }
646 
647 static void __init free_command_buffer(struct amd_iommu *iommu)
648 {
649 	free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
650 }
651 
652 /* allocates the memory where the IOMMU will log its events to */
653 static int __init alloc_event_buffer(struct amd_iommu *iommu)
654 {
655 	iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
656 						  get_order(EVT_BUFFER_SIZE));
657 
658 	return iommu->evt_buf ? 0 : -ENOMEM;
659 }
660 
661 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
662 {
663 	u64 entry;
664 
665 	BUG_ON(iommu->evt_buf == NULL);
666 
667 	entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
668 
669 	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
670 		    &entry, sizeof(entry));
671 
672 	/* set head and tail to zero manually */
673 	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
674 	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
675 
676 	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
677 }
678 
679 /*
680  * This function disables the event log buffer
681  */
682 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
683 {
684 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
685 }
686 
687 static void __init free_event_buffer(struct amd_iommu *iommu)
688 {
689 	free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
690 }
691 
692 /* allocates the memory where the IOMMU will log its events to */
693 static int __init alloc_ppr_log(struct amd_iommu *iommu)
694 {
695 	iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
696 						  get_order(PPR_LOG_SIZE));
697 
698 	return iommu->ppr_log ? 0 : -ENOMEM;
699 }
700 
701 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
702 {
703 	u64 entry;
704 
705 	if (iommu->ppr_log == NULL)
706 		return;
707 
708 	entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
709 
710 	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
711 		    &entry, sizeof(entry));
712 
713 	/* set head and tail to zero manually */
714 	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
715 	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
716 
717 	iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
718 	iommu_feature_enable(iommu, CONTROL_PPR_EN);
719 }
720 
721 static void __init free_ppr_log(struct amd_iommu *iommu)
722 {
723 	free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
724 }
725 
726 static void free_ga_log(struct amd_iommu *iommu)
727 {
728 #ifdef CONFIG_IRQ_REMAP
729 	free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
730 	free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
731 #endif
732 }
733 
734 static int iommu_ga_log_enable(struct amd_iommu *iommu)
735 {
736 #ifdef CONFIG_IRQ_REMAP
737 	u32 status, i;
738 
739 	if (!iommu->ga_log)
740 		return -EINVAL;
741 
742 	status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
743 
744 	/* Check if already running */
745 	if (status & (MMIO_STATUS_GALOG_RUN_MASK))
746 		return 0;
747 
748 	iommu_feature_enable(iommu, CONTROL_GAINT_EN);
749 	iommu_feature_enable(iommu, CONTROL_GALOG_EN);
750 
751 	for (i = 0; i < LOOP_TIMEOUT; ++i) {
752 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
753 		if (status & (MMIO_STATUS_GALOG_RUN_MASK))
754 			break;
755 	}
756 
757 	if (i >= LOOP_TIMEOUT)
758 		return -EINVAL;
759 #endif /* CONFIG_IRQ_REMAP */
760 	return 0;
761 }
762 
763 #ifdef CONFIG_IRQ_REMAP
764 static int iommu_init_ga_log(struct amd_iommu *iommu)
765 {
766 	u64 entry;
767 
768 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
769 		return 0;
770 
771 	iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
772 					get_order(GA_LOG_SIZE));
773 	if (!iommu->ga_log)
774 		goto err_out;
775 
776 	iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
777 					get_order(8));
778 	if (!iommu->ga_log_tail)
779 		goto err_out;
780 
781 	entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
782 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
783 		    &entry, sizeof(entry));
784 	entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
785 		 (BIT_ULL(52)-1)) & ~7ULL;
786 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
787 		    &entry, sizeof(entry));
788 	writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
789 	writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
790 
791 	return 0;
792 err_out:
793 	free_ga_log(iommu);
794 	return -EINVAL;
795 }
796 #endif /* CONFIG_IRQ_REMAP */
797 
798 static int iommu_init_ga(struct amd_iommu *iommu)
799 {
800 	int ret = 0;
801 
802 #ifdef CONFIG_IRQ_REMAP
803 	/* Note: We have already checked GASup from IVRS table.
804 	 *       Now, we need to make sure that GAMSup is set.
805 	 */
806 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
807 	    !iommu_feature(iommu, FEATURE_GAM_VAPIC))
808 		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
809 
810 	ret = iommu_init_ga_log(iommu);
811 #endif /* CONFIG_IRQ_REMAP */
812 
813 	return ret;
814 }
815 
816 static void iommu_enable_xt(struct amd_iommu *iommu)
817 {
818 #ifdef CONFIG_IRQ_REMAP
819 	/*
820 	 * XT mode (32-bit APIC destination ID) requires
821 	 * GA mode (128-bit IRTE support) as a prerequisite.
822 	 */
823 	if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
824 	    amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
825 		iommu_feature_enable(iommu, CONTROL_XT_EN);
826 #endif /* CONFIG_IRQ_REMAP */
827 }
828 
829 static void iommu_enable_gt(struct amd_iommu *iommu)
830 {
831 	if (!iommu_feature(iommu, FEATURE_GT))
832 		return;
833 
834 	iommu_feature_enable(iommu, CONTROL_GT_EN);
835 }
836 
837 /* sets a specific bit in the device table entry. */
838 static void set_dev_entry_bit(u16 devid, u8 bit)
839 {
840 	int i = (bit >> 6) & 0x03;
841 	int _bit = bit & 0x3f;
842 
843 	amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
844 }
845 
846 static int get_dev_entry_bit(u16 devid, u8 bit)
847 {
848 	int i = (bit >> 6) & 0x03;
849 	int _bit = bit & 0x3f;
850 
851 	return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
852 }
853 
854 
855 static bool copy_device_table(void)
856 {
857 	u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
858 	struct dev_table_entry *old_devtb = NULL;
859 	u32 lo, hi, devid, old_devtb_size;
860 	phys_addr_t old_devtb_phys;
861 	struct amd_iommu *iommu;
862 	u16 dom_id, dte_v, irq_v;
863 	gfp_t gfp_flag;
864 	u64 tmp;
865 
866 	if (!amd_iommu_pre_enabled)
867 		return false;
868 
869 	pr_warn("Translation is already enabled - trying to copy translation structures\n");
870 	for_each_iommu(iommu) {
871 		/* All IOMMUs should use the same device table with the same size */
872 		lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
873 		hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
874 		entry = (((u64) hi) << 32) + lo;
875 		if (last_entry && last_entry != entry) {
876 			pr_err("IOMMU:%d should use the same dev table as others!\n",
877 				iommu->index);
878 			return false;
879 		}
880 		last_entry = entry;
881 
882 		old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
883 		if (old_devtb_size != dev_table_size) {
884 			pr_err("The device table size of IOMMU:%d is not expected!\n",
885 				iommu->index);
886 			return false;
887 		}
888 	}
889 
890 	/*
891 	 * When SME is enabled in the first kernel, the entry includes the
892 	 * memory encryption mask(sme_me_mask), we must remove the memory
893 	 * encryption mask to obtain the true physical address in kdump kernel.
894 	 */
895 	old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
896 
897 	if (old_devtb_phys >= 0x100000000ULL) {
898 		pr_err("The address of old device table is above 4G, not trustworthy!\n");
899 		return false;
900 	}
901 	old_devtb = (sme_active() && is_kdump_kernel())
902 		    ? (__force void *)ioremap_encrypted(old_devtb_phys,
903 							dev_table_size)
904 		    : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
905 
906 	if (!old_devtb)
907 		return false;
908 
909 	gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
910 	old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
911 				get_order(dev_table_size));
912 	if (old_dev_tbl_cpy == NULL) {
913 		pr_err("Failed to allocate memory for copying old device table!\n");
914 		return false;
915 	}
916 
917 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
918 		old_dev_tbl_cpy[devid] = old_devtb[devid];
919 		dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
920 		dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
921 
922 		if (dte_v && dom_id) {
923 			old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
924 			old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
925 			__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
926 			/* If gcr3 table existed, mask it out */
927 			if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
928 				tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
929 				tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
930 				old_dev_tbl_cpy[devid].data[1] &= ~tmp;
931 				tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
932 				tmp |= DTE_FLAG_GV;
933 				old_dev_tbl_cpy[devid].data[0] &= ~tmp;
934 			}
935 		}
936 
937 		irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
938 		int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
939 		int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
940 		if (irq_v && (int_ctl || int_tab_len)) {
941 			if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
942 			    (int_tab_len != DTE_IRQ_TABLE_LEN)) {
943 				pr_err("Wrong old irq remapping flag: %#x\n", devid);
944 				return false;
945 			}
946 
947 		        old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
948 		}
949 	}
950 	memunmap(old_devtb);
951 
952 	return true;
953 }
954 
955 void amd_iommu_apply_erratum_63(u16 devid)
956 {
957 	int sysmgt;
958 
959 	sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
960 		 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
961 
962 	if (sysmgt == 0x01)
963 		set_dev_entry_bit(devid, DEV_ENTRY_IW);
964 }
965 
966 /* Writes the specific IOMMU for a device into the rlookup table */
967 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
968 {
969 	amd_iommu_rlookup_table[devid] = iommu;
970 }
971 
972 /*
973  * This function takes the device specific flags read from the ACPI
974  * table and sets up the device table entry with that information
975  */
976 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
977 					   u16 devid, u32 flags, u32 ext_flags)
978 {
979 	if (flags & ACPI_DEVFLAG_INITPASS)
980 		set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
981 	if (flags & ACPI_DEVFLAG_EXTINT)
982 		set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
983 	if (flags & ACPI_DEVFLAG_NMI)
984 		set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
985 	if (flags & ACPI_DEVFLAG_SYSMGT1)
986 		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
987 	if (flags & ACPI_DEVFLAG_SYSMGT2)
988 		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
989 	if (flags & ACPI_DEVFLAG_LINT0)
990 		set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
991 	if (flags & ACPI_DEVFLAG_LINT1)
992 		set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
993 
994 	amd_iommu_apply_erratum_63(devid);
995 
996 	set_iommu_for_device(iommu, devid);
997 }
998 
999 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1000 {
1001 	struct devid_map *entry;
1002 	struct list_head *list;
1003 
1004 	if (type == IVHD_SPECIAL_IOAPIC)
1005 		list = &ioapic_map;
1006 	else if (type == IVHD_SPECIAL_HPET)
1007 		list = &hpet_map;
1008 	else
1009 		return -EINVAL;
1010 
1011 	list_for_each_entry(entry, list, list) {
1012 		if (!(entry->id == id && entry->cmd_line))
1013 			continue;
1014 
1015 		pr_info("Command-line override present for %s id %d - ignoring\n",
1016 			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1017 
1018 		*devid = entry->devid;
1019 
1020 		return 0;
1021 	}
1022 
1023 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1024 	if (!entry)
1025 		return -ENOMEM;
1026 
1027 	entry->id	= id;
1028 	entry->devid	= *devid;
1029 	entry->cmd_line	= cmd_line;
1030 
1031 	list_add_tail(&entry->list, list);
1032 
1033 	return 0;
1034 }
1035 
1036 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1037 				      bool cmd_line)
1038 {
1039 	struct acpihid_map_entry *entry;
1040 	struct list_head *list = &acpihid_map;
1041 
1042 	list_for_each_entry(entry, list, list) {
1043 		if (strcmp(entry->hid, hid) ||
1044 		    (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1045 		    !entry->cmd_line)
1046 			continue;
1047 
1048 		pr_info("Command-line override for hid:%s uid:%s\n",
1049 			hid, uid);
1050 		*devid = entry->devid;
1051 		return 0;
1052 	}
1053 
1054 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1055 	if (!entry)
1056 		return -ENOMEM;
1057 
1058 	memcpy(entry->uid, uid, strlen(uid));
1059 	memcpy(entry->hid, hid, strlen(hid));
1060 	entry->devid = *devid;
1061 	entry->cmd_line	= cmd_line;
1062 	entry->root_devid = (entry->devid & (~0x7));
1063 
1064 	pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1065 		entry->cmd_line ? "cmd" : "ivrs",
1066 		entry->hid, entry->uid, entry->root_devid);
1067 
1068 	list_add_tail(&entry->list, list);
1069 	return 0;
1070 }
1071 
1072 static int __init add_early_maps(void)
1073 {
1074 	int i, ret;
1075 
1076 	for (i = 0; i < early_ioapic_map_size; ++i) {
1077 		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1078 					 early_ioapic_map[i].id,
1079 					 &early_ioapic_map[i].devid,
1080 					 early_ioapic_map[i].cmd_line);
1081 		if (ret)
1082 			return ret;
1083 	}
1084 
1085 	for (i = 0; i < early_hpet_map_size; ++i) {
1086 		ret = add_special_device(IVHD_SPECIAL_HPET,
1087 					 early_hpet_map[i].id,
1088 					 &early_hpet_map[i].devid,
1089 					 early_hpet_map[i].cmd_line);
1090 		if (ret)
1091 			return ret;
1092 	}
1093 
1094 	for (i = 0; i < early_acpihid_map_size; ++i) {
1095 		ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1096 					  early_acpihid_map[i].uid,
1097 					  &early_acpihid_map[i].devid,
1098 					  early_acpihid_map[i].cmd_line);
1099 		if (ret)
1100 			return ret;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 /*
1107  * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1108  * initializes the hardware and our data structures with it.
1109  */
1110 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1111 					struct ivhd_header *h)
1112 {
1113 	u8 *p = (u8 *)h;
1114 	u8 *end = p, flags = 0;
1115 	u16 devid = 0, devid_start = 0, devid_to = 0;
1116 	u32 dev_i, ext_flags = 0;
1117 	bool alias = false;
1118 	struct ivhd_entry *e;
1119 	u32 ivhd_size;
1120 	int ret;
1121 
1122 
1123 	ret = add_early_maps();
1124 	if (ret)
1125 		return ret;
1126 
1127 	amd_iommu_apply_ivrs_quirks();
1128 
1129 	/*
1130 	 * First save the recommended feature enable bits from ACPI
1131 	 */
1132 	iommu->acpi_flags = h->flags;
1133 
1134 	/*
1135 	 * Done. Now parse the device entries
1136 	 */
1137 	ivhd_size = get_ivhd_header_size(h);
1138 	if (!ivhd_size) {
1139 		pr_err("Unsupported IVHD type %#x\n", h->type);
1140 		return -EINVAL;
1141 	}
1142 
1143 	p += ivhd_size;
1144 
1145 	end += h->length;
1146 
1147 
1148 	while (p < end) {
1149 		e = (struct ivhd_entry *)p;
1150 		switch (e->type) {
1151 		case IVHD_DEV_ALL:
1152 
1153 			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
1154 
1155 			for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1156 				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1157 			break;
1158 		case IVHD_DEV_SELECT:
1159 
1160 			DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1161 				    "flags: %02x\n",
1162 				    PCI_BUS_NUM(e->devid),
1163 				    PCI_SLOT(e->devid),
1164 				    PCI_FUNC(e->devid),
1165 				    e->flags);
1166 
1167 			devid = e->devid;
1168 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1169 			break;
1170 		case IVHD_DEV_SELECT_RANGE_START:
1171 
1172 			DUMP_printk("  DEV_SELECT_RANGE_START\t "
1173 				    "devid: %02x:%02x.%x flags: %02x\n",
1174 				    PCI_BUS_NUM(e->devid),
1175 				    PCI_SLOT(e->devid),
1176 				    PCI_FUNC(e->devid),
1177 				    e->flags);
1178 
1179 			devid_start = e->devid;
1180 			flags = e->flags;
1181 			ext_flags = 0;
1182 			alias = false;
1183 			break;
1184 		case IVHD_DEV_ALIAS:
1185 
1186 			DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1187 				    "flags: %02x devid_to: %02x:%02x.%x\n",
1188 				    PCI_BUS_NUM(e->devid),
1189 				    PCI_SLOT(e->devid),
1190 				    PCI_FUNC(e->devid),
1191 				    e->flags,
1192 				    PCI_BUS_NUM(e->ext >> 8),
1193 				    PCI_SLOT(e->ext >> 8),
1194 				    PCI_FUNC(e->ext >> 8));
1195 
1196 			devid = e->devid;
1197 			devid_to = e->ext >> 8;
1198 			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
1199 			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1200 			amd_iommu_alias_table[devid] = devid_to;
1201 			break;
1202 		case IVHD_DEV_ALIAS_RANGE:
1203 
1204 			DUMP_printk("  DEV_ALIAS_RANGE\t\t "
1205 				    "devid: %02x:%02x.%x flags: %02x "
1206 				    "devid_to: %02x:%02x.%x\n",
1207 				    PCI_BUS_NUM(e->devid),
1208 				    PCI_SLOT(e->devid),
1209 				    PCI_FUNC(e->devid),
1210 				    e->flags,
1211 				    PCI_BUS_NUM(e->ext >> 8),
1212 				    PCI_SLOT(e->ext >> 8),
1213 				    PCI_FUNC(e->ext >> 8));
1214 
1215 			devid_start = e->devid;
1216 			flags = e->flags;
1217 			devid_to = e->ext >> 8;
1218 			ext_flags = 0;
1219 			alias = true;
1220 			break;
1221 		case IVHD_DEV_EXT_SELECT:
1222 
1223 			DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1224 				    "flags: %02x ext: %08x\n",
1225 				    PCI_BUS_NUM(e->devid),
1226 				    PCI_SLOT(e->devid),
1227 				    PCI_FUNC(e->devid),
1228 				    e->flags, e->ext);
1229 
1230 			devid = e->devid;
1231 			set_dev_entry_from_acpi(iommu, devid, e->flags,
1232 						e->ext);
1233 			break;
1234 		case IVHD_DEV_EXT_SELECT_RANGE:
1235 
1236 			DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
1237 				    "%02x:%02x.%x flags: %02x ext: %08x\n",
1238 				    PCI_BUS_NUM(e->devid),
1239 				    PCI_SLOT(e->devid),
1240 				    PCI_FUNC(e->devid),
1241 				    e->flags, e->ext);
1242 
1243 			devid_start = e->devid;
1244 			flags = e->flags;
1245 			ext_flags = e->ext;
1246 			alias = false;
1247 			break;
1248 		case IVHD_DEV_RANGE_END:
1249 
1250 			DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1251 				    PCI_BUS_NUM(e->devid),
1252 				    PCI_SLOT(e->devid),
1253 				    PCI_FUNC(e->devid));
1254 
1255 			devid = e->devid;
1256 			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1257 				if (alias) {
1258 					amd_iommu_alias_table[dev_i] = devid_to;
1259 					set_dev_entry_from_acpi(iommu,
1260 						devid_to, flags, ext_flags);
1261 				}
1262 				set_dev_entry_from_acpi(iommu, dev_i,
1263 							flags, ext_flags);
1264 			}
1265 			break;
1266 		case IVHD_DEV_SPECIAL: {
1267 			u8 handle, type;
1268 			const char *var;
1269 			u16 devid;
1270 			int ret;
1271 
1272 			handle = e->ext & 0xff;
1273 			devid  = (e->ext >>  8) & 0xffff;
1274 			type   = (e->ext >> 24) & 0xff;
1275 
1276 			if (type == IVHD_SPECIAL_IOAPIC)
1277 				var = "IOAPIC";
1278 			else if (type == IVHD_SPECIAL_HPET)
1279 				var = "HPET";
1280 			else
1281 				var = "UNKNOWN";
1282 
1283 			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1284 				    var, (int)handle,
1285 				    PCI_BUS_NUM(devid),
1286 				    PCI_SLOT(devid),
1287 				    PCI_FUNC(devid));
1288 
1289 			ret = add_special_device(type, handle, &devid, false);
1290 			if (ret)
1291 				return ret;
1292 
1293 			/*
1294 			 * add_special_device might update the devid in case a
1295 			 * command-line override is present. So call
1296 			 * set_dev_entry_from_acpi after add_special_device.
1297 			 */
1298 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1299 
1300 			break;
1301 		}
1302 		case IVHD_DEV_ACPI_HID: {
1303 			u16 devid;
1304 			u8 hid[ACPIHID_HID_LEN];
1305 			u8 uid[ACPIHID_UID_LEN];
1306 			int ret;
1307 
1308 			if (h->type != 0x40) {
1309 				pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1310 				       e->type);
1311 				break;
1312 			}
1313 
1314 			memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1315 			hid[ACPIHID_HID_LEN - 1] = '\0';
1316 
1317 			if (!(*hid)) {
1318 				pr_err(FW_BUG "Invalid HID.\n");
1319 				break;
1320 			}
1321 
1322 			uid[0] = '\0';
1323 			switch (e->uidf) {
1324 			case UID_NOT_PRESENT:
1325 
1326 				if (e->uidl != 0)
1327 					pr_warn(FW_BUG "Invalid UID length.\n");
1328 
1329 				break;
1330 			case UID_IS_INTEGER:
1331 
1332 				sprintf(uid, "%d", e->uid);
1333 
1334 				break;
1335 			case UID_IS_CHARACTER:
1336 
1337 				memcpy(uid, &e->uid, e->uidl);
1338 				uid[e->uidl] = '\0';
1339 
1340 				break;
1341 			default:
1342 				break;
1343 			}
1344 
1345 			devid = e->devid;
1346 			DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1347 				    hid, uid,
1348 				    PCI_BUS_NUM(devid),
1349 				    PCI_SLOT(devid),
1350 				    PCI_FUNC(devid));
1351 
1352 			flags = e->flags;
1353 
1354 			ret = add_acpi_hid_device(hid, uid, &devid, false);
1355 			if (ret)
1356 				return ret;
1357 
1358 			/*
1359 			 * add_special_device might update the devid in case a
1360 			 * command-line override is present. So call
1361 			 * set_dev_entry_from_acpi after add_special_device.
1362 			 */
1363 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1364 
1365 			break;
1366 		}
1367 		default:
1368 			break;
1369 		}
1370 
1371 		p += ivhd_entry_length(p);
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 static void __init free_iommu_one(struct amd_iommu *iommu)
1378 {
1379 	free_command_buffer(iommu);
1380 	free_event_buffer(iommu);
1381 	free_ppr_log(iommu);
1382 	free_ga_log(iommu);
1383 	iommu_unmap_mmio_space(iommu);
1384 }
1385 
1386 static void __init free_iommu_all(void)
1387 {
1388 	struct amd_iommu *iommu, *next;
1389 
1390 	for_each_iommu_safe(iommu, next) {
1391 		list_del(&iommu->list);
1392 		free_iommu_one(iommu);
1393 		kfree(iommu);
1394 	}
1395 }
1396 
1397 /*
1398  * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1399  * Workaround:
1400  *     BIOS should disable L2B micellaneous clock gating by setting
1401  *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1402  */
1403 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1404 {
1405 	u32 value;
1406 
1407 	if ((boot_cpu_data.x86 != 0x15) ||
1408 	    (boot_cpu_data.x86_model < 0x10) ||
1409 	    (boot_cpu_data.x86_model > 0x1f))
1410 		return;
1411 
1412 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1413 	pci_read_config_dword(iommu->dev, 0xf4, &value);
1414 
1415 	if (value & BIT(2))
1416 		return;
1417 
1418 	/* Select NB indirect register 0x90 and enable writing */
1419 	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1420 
1421 	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1422 	pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1423 
1424 	/* Clear the enable writing bit */
1425 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1426 }
1427 
1428 /*
1429  * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1430  * Workaround:
1431  *     BIOS should enable ATS write permission check by setting
1432  *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1433  */
1434 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1435 {
1436 	u32 value;
1437 
1438 	if ((boot_cpu_data.x86 != 0x15) ||
1439 	    (boot_cpu_data.x86_model < 0x30) ||
1440 	    (boot_cpu_data.x86_model > 0x3f))
1441 		return;
1442 
1443 	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1444 	value = iommu_read_l2(iommu, 0x47);
1445 
1446 	if (value & BIT(0))
1447 		return;
1448 
1449 	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1450 	iommu_write_l2(iommu, 0x47, value | BIT(0));
1451 
1452 	pci_info(iommu->dev, "Applying ATS write check workaround\n");
1453 }
1454 
1455 /*
1456  * This function clues the initialization function for one IOMMU
1457  * together and also allocates the command buffer and programs the
1458  * hardware. It does NOT enable the IOMMU. This is done afterwards.
1459  */
1460 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1461 {
1462 	int ret;
1463 
1464 	raw_spin_lock_init(&iommu->lock);
1465 
1466 	/* Add IOMMU to internal data structures */
1467 	list_add_tail(&iommu->list, &amd_iommu_list);
1468 	iommu->index = amd_iommus_present++;
1469 
1470 	if (unlikely(iommu->index >= MAX_IOMMUS)) {
1471 		WARN(1, "System has more IOMMUs than supported by this driver\n");
1472 		return -ENOSYS;
1473 	}
1474 
1475 	/* Index is fine - add IOMMU to the array */
1476 	amd_iommus[iommu->index] = iommu;
1477 
1478 	/*
1479 	 * Copy data from ACPI table entry to the iommu struct
1480 	 */
1481 	iommu->devid   = h->devid;
1482 	iommu->cap_ptr = h->cap_ptr;
1483 	iommu->pci_seg = h->pci_seg;
1484 	iommu->mmio_phys = h->mmio_phys;
1485 
1486 	switch (h->type) {
1487 	case 0x10:
1488 		/* Check if IVHD EFR contains proper max banks/counters */
1489 		if ((h->efr_attr != 0) &&
1490 		    ((h->efr_attr & (0xF << 13)) != 0) &&
1491 		    ((h->efr_attr & (0x3F << 17)) != 0))
1492 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1493 		else
1494 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1495 
1496 		/*
1497 		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1498 		 * GAM also requires GA mode. Therefore, we need to
1499 		 * check cmpxchg16b support before enabling it.
1500 		 */
1501 		if (!boot_cpu_has(X86_FEATURE_CX16) ||
1502 		    ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1503 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1504 		break;
1505 	case 0x11:
1506 	case 0x40:
1507 		if (h->efr_reg & (1 << 9))
1508 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1509 		else
1510 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1511 
1512 		/*
1513 		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1514 		 * XT, GAM also requires GA mode. Therefore, we need to
1515 		 * check cmpxchg16b support before enabling them.
1516 		 */
1517 		if (!boot_cpu_has(X86_FEATURE_CX16) ||
1518 		    ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1519 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1520 			break;
1521 		}
1522 
1523 		/*
1524 		 * Note: Since iommu_update_intcapxt() leverages
1525 		 * the IOMMU MMIO access to MSI capability block registers
1526 		 * for MSI address lo/hi/data, we need to check both
1527 		 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
1528 		 */
1529 		if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1530 		    (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1531 			amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1532 		break;
1533 	default:
1534 		return -EINVAL;
1535 	}
1536 
1537 	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1538 						iommu->mmio_phys_end);
1539 	if (!iommu->mmio_base)
1540 		return -ENOMEM;
1541 
1542 	if (alloc_command_buffer(iommu))
1543 		return -ENOMEM;
1544 
1545 	if (alloc_event_buffer(iommu))
1546 		return -ENOMEM;
1547 
1548 	iommu->int_enabled = false;
1549 
1550 	init_translation_status(iommu);
1551 	if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1552 		iommu_disable(iommu);
1553 		clear_translation_pre_enabled(iommu);
1554 		pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1555 			iommu->index);
1556 	}
1557 	if (amd_iommu_pre_enabled)
1558 		amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1559 
1560 	ret = init_iommu_from_acpi(iommu, h);
1561 	if (ret)
1562 		return ret;
1563 
1564 	ret = amd_iommu_create_irq_domain(iommu);
1565 	if (ret)
1566 		return ret;
1567 
1568 	/*
1569 	 * Make sure IOMMU is not considered to translate itself. The IVRS
1570 	 * table tells us so, but this is a lie!
1571 	 */
1572 	amd_iommu_rlookup_table[iommu->devid] = NULL;
1573 
1574 	return 0;
1575 }
1576 
1577 /**
1578  * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1579  * @ivrs          Pointer to the IVRS header
1580  *
1581  * This function search through all IVDB of the maximum supported IVHD
1582  */
1583 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1584 {
1585 	u8 *base = (u8 *)ivrs;
1586 	struct ivhd_header *ivhd = (struct ivhd_header *)
1587 					(base + IVRS_HEADER_LENGTH);
1588 	u8 last_type = ivhd->type;
1589 	u16 devid = ivhd->devid;
1590 
1591 	while (((u8 *)ivhd - base < ivrs->length) &&
1592 	       (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1593 		u8 *p = (u8 *) ivhd;
1594 
1595 		if (ivhd->devid == devid)
1596 			last_type = ivhd->type;
1597 		ivhd = (struct ivhd_header *)(p + ivhd->length);
1598 	}
1599 
1600 	return last_type;
1601 }
1602 
1603 /*
1604  * Iterates over all IOMMU entries in the ACPI table, allocates the
1605  * IOMMU structure and initializes it with init_iommu_one()
1606  */
1607 static int __init init_iommu_all(struct acpi_table_header *table)
1608 {
1609 	u8 *p = (u8 *)table, *end = (u8 *)table;
1610 	struct ivhd_header *h;
1611 	struct amd_iommu *iommu;
1612 	int ret;
1613 
1614 	end += table->length;
1615 	p += IVRS_HEADER_LENGTH;
1616 
1617 	while (p < end) {
1618 		h = (struct ivhd_header *)p;
1619 		if (*p == amd_iommu_target_ivhd_type) {
1620 
1621 			DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1622 				    "seg: %d flags: %01x info %04x\n",
1623 				    PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1624 				    PCI_FUNC(h->devid), h->cap_ptr,
1625 				    h->pci_seg, h->flags, h->info);
1626 			DUMP_printk("       mmio-addr: %016llx\n",
1627 				    h->mmio_phys);
1628 
1629 			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1630 			if (iommu == NULL)
1631 				return -ENOMEM;
1632 
1633 			ret = init_iommu_one(iommu, h);
1634 			if (ret)
1635 				return ret;
1636 		}
1637 		p += h->length;
1638 
1639 	}
1640 	WARN_ON(p != end);
1641 
1642 	return 0;
1643 }
1644 
1645 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1646 				u8 fxn, u64 *value, bool is_write);
1647 
1648 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1649 {
1650 	struct pci_dev *pdev = iommu->dev;
1651 	u64 val = 0xabcd, val2 = 0, save_reg = 0;
1652 
1653 	if (!iommu_feature(iommu, FEATURE_PC))
1654 		return;
1655 
1656 	amd_iommu_pc_present = true;
1657 
1658 	/* save the value to restore, if writable */
1659 	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1660 		goto pc_false;
1661 
1662 	/* Check if the performance counters can be written to */
1663 	if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1664 	    (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1665 	    (val != val2))
1666 		goto pc_false;
1667 
1668 	/* restore */
1669 	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1670 		goto pc_false;
1671 
1672 	pci_info(pdev, "IOMMU performance counters supported\n");
1673 
1674 	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1675 	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1676 	iommu->max_counters = (u8) ((val >> 7) & 0xf);
1677 
1678 	return;
1679 
1680 pc_false:
1681 	pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1682 	amd_iommu_pc_present = false;
1683 	return;
1684 }
1685 
1686 static ssize_t amd_iommu_show_cap(struct device *dev,
1687 				  struct device_attribute *attr,
1688 				  char *buf)
1689 {
1690 	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1691 	return sprintf(buf, "%x\n", iommu->cap);
1692 }
1693 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1694 
1695 static ssize_t amd_iommu_show_features(struct device *dev,
1696 				       struct device_attribute *attr,
1697 				       char *buf)
1698 {
1699 	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1700 	return sprintf(buf, "%llx\n", iommu->features);
1701 }
1702 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1703 
1704 static struct attribute *amd_iommu_attrs[] = {
1705 	&dev_attr_cap.attr,
1706 	&dev_attr_features.attr,
1707 	NULL,
1708 };
1709 
1710 static struct attribute_group amd_iommu_group = {
1711 	.name = "amd-iommu",
1712 	.attrs = amd_iommu_attrs,
1713 };
1714 
1715 static const struct attribute_group *amd_iommu_groups[] = {
1716 	&amd_iommu_group,
1717 	NULL,
1718 };
1719 
1720 static int __init iommu_init_pci(struct amd_iommu *iommu)
1721 {
1722 	int cap_ptr = iommu->cap_ptr;
1723 	int ret;
1724 
1725 	iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1726 						 iommu->devid & 0xff);
1727 	if (!iommu->dev)
1728 		return -ENODEV;
1729 
1730 	/* Prevent binding other PCI device drivers to IOMMU devices */
1731 	iommu->dev->match_driver = false;
1732 
1733 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1734 			      &iommu->cap);
1735 
1736 	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1737 		amd_iommu_iotlb_sup = false;
1738 
1739 	/* read extended feature bits */
1740 	iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1741 
1742 	if (iommu_feature(iommu, FEATURE_GT)) {
1743 		int glxval;
1744 		u32 max_pasid;
1745 		u64 pasmax;
1746 
1747 		pasmax = iommu->features & FEATURE_PASID_MASK;
1748 		pasmax >>= FEATURE_PASID_SHIFT;
1749 		max_pasid  = (1 << (pasmax + 1)) - 1;
1750 
1751 		amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1752 
1753 		BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1754 
1755 		glxval   = iommu->features & FEATURE_GLXVAL_MASK;
1756 		glxval >>= FEATURE_GLXVAL_SHIFT;
1757 
1758 		if (amd_iommu_max_glx_val == -1)
1759 			amd_iommu_max_glx_val = glxval;
1760 		else
1761 			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1762 	}
1763 
1764 	if (iommu_feature(iommu, FEATURE_GT) &&
1765 	    iommu_feature(iommu, FEATURE_PPR)) {
1766 		iommu->is_iommu_v2   = true;
1767 		amd_iommu_v2_present = true;
1768 	}
1769 
1770 	if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1771 		return -ENOMEM;
1772 
1773 	ret = iommu_init_ga(iommu);
1774 	if (ret)
1775 		return ret;
1776 
1777 	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1778 		amd_iommu_np_cache = true;
1779 
1780 	init_iommu_perf_ctr(iommu);
1781 
1782 	if (is_rd890_iommu(iommu->dev)) {
1783 		int i, j;
1784 
1785 		iommu->root_pdev =
1786 			pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1787 						    PCI_DEVFN(0, 0));
1788 
1789 		/*
1790 		 * Some rd890 systems may not be fully reconfigured by the
1791 		 * BIOS, so it's necessary for us to store this information so
1792 		 * it can be reprogrammed on resume
1793 		 */
1794 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1795 				&iommu->stored_addr_lo);
1796 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1797 				&iommu->stored_addr_hi);
1798 
1799 		/* Low bit locks writes to configuration space */
1800 		iommu->stored_addr_lo &= ~1;
1801 
1802 		for (i = 0; i < 6; i++)
1803 			for (j = 0; j < 0x12; j++)
1804 				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1805 
1806 		for (i = 0; i < 0x83; i++)
1807 			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1808 	}
1809 
1810 	amd_iommu_erratum_746_workaround(iommu);
1811 	amd_iommu_ats_write_check_workaround(iommu);
1812 
1813 	iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1814 			       amd_iommu_groups, "ivhd%d", iommu->index);
1815 	iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1816 	iommu_device_register(&iommu->iommu);
1817 
1818 	return pci_enable_device(iommu->dev);
1819 }
1820 
1821 static void print_iommu_info(void)
1822 {
1823 	static const char * const feat_str[] = {
1824 		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1825 		"IA", "GA", "HE", "PC"
1826 	};
1827 	struct amd_iommu *iommu;
1828 
1829 	for_each_iommu(iommu) {
1830 		struct pci_dev *pdev = iommu->dev;
1831 		int i;
1832 
1833 		pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
1834 
1835 		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1836 			pci_info(pdev, "Extended features (%#llx):",
1837 				 iommu->features);
1838 			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1839 				if (iommu_feature(iommu, (1ULL << i)))
1840 					pr_cont(" %s", feat_str[i]);
1841 			}
1842 
1843 			if (iommu->features & FEATURE_GAM_VAPIC)
1844 				pr_cont(" GA_vAPIC");
1845 
1846 			pr_cont("\n");
1847 		}
1848 	}
1849 	if (irq_remapping_enabled) {
1850 		pr_info("Interrupt remapping enabled\n");
1851 		if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1852 			pr_info("Virtual APIC enabled\n");
1853 		if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1854 			pr_info("X2APIC enabled\n");
1855 	}
1856 }
1857 
1858 static int __init amd_iommu_init_pci(void)
1859 {
1860 	struct amd_iommu *iommu;
1861 	int ret = 0;
1862 
1863 	for_each_iommu(iommu) {
1864 		ret = iommu_init_pci(iommu);
1865 		if (ret)
1866 			break;
1867 	}
1868 
1869 	/*
1870 	 * Order is important here to make sure any unity map requirements are
1871 	 * fulfilled. The unity mappings are created and written to the device
1872 	 * table during the amd_iommu_init_api() call.
1873 	 *
1874 	 * After that we call init_device_table_dma() to make sure any
1875 	 * uninitialized DTE will block DMA, and in the end we flush the caches
1876 	 * of all IOMMUs to make sure the changes to the device table are
1877 	 * active.
1878 	 */
1879 	ret = amd_iommu_init_api();
1880 
1881 	init_device_table_dma();
1882 
1883 	for_each_iommu(iommu)
1884 		iommu_flush_all_caches(iommu);
1885 
1886 	if (!ret)
1887 		print_iommu_info();
1888 
1889 	return ret;
1890 }
1891 
1892 /****************************************************************************
1893  *
1894  * The following functions initialize the MSI interrupts for all IOMMUs
1895  * in the system. It's a bit challenging because there could be multiple
1896  * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1897  * pci_dev.
1898  *
1899  ****************************************************************************/
1900 
1901 static int iommu_setup_msi(struct amd_iommu *iommu)
1902 {
1903 	int r;
1904 
1905 	r = pci_enable_msi(iommu->dev);
1906 	if (r)
1907 		return r;
1908 
1909 	r = request_threaded_irq(iommu->dev->irq,
1910 				 amd_iommu_int_handler,
1911 				 amd_iommu_int_thread,
1912 				 0, "AMD-Vi",
1913 				 iommu);
1914 
1915 	if (r) {
1916 		pci_disable_msi(iommu->dev);
1917 		return r;
1918 	}
1919 
1920 	iommu->int_enabled = true;
1921 
1922 	return 0;
1923 }
1924 
1925 #define XT_INT_DEST_MODE(x)	(((x) & 0x1ULL) << 2)
1926 #define XT_INT_DEST_LO(x)	(((x) & 0xFFFFFFULL) << 8)
1927 #define XT_INT_VEC(x)		(((x) & 0xFFULL) << 32)
1928 #define XT_INT_DEST_HI(x)	((((x) >> 24) & 0xFFULL) << 56)
1929 
1930 /**
1931  * Setup the IntCapXT registers with interrupt routing information
1932  * based on the PCI MSI capability block registers, accessed via
1933  * MMIO MSI address low/hi and MSI data registers.
1934  */
1935 static void iommu_update_intcapxt(struct amd_iommu *iommu)
1936 {
1937 	u64 val;
1938 	u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
1939 	u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
1940 	u32 data    = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
1941 	bool dm     = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
1942 	u32 dest    = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
1943 
1944 	if (x2apic_enabled())
1945 		dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
1946 
1947 	val = XT_INT_VEC(data & 0xFF) |
1948 	      XT_INT_DEST_MODE(dm) |
1949 	      XT_INT_DEST_LO(dest) |
1950 	      XT_INT_DEST_HI(dest);
1951 
1952 	/**
1953 	 * Current IOMMU implemtation uses the same IRQ for all
1954 	 * 3 IOMMU interrupts.
1955 	 */
1956 	writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
1957 	writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
1958 	writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
1959 }
1960 
1961 static void _irq_notifier_notify(struct irq_affinity_notify *notify,
1962 				 const cpumask_t *mask)
1963 {
1964 	struct amd_iommu *iommu;
1965 
1966 	for_each_iommu(iommu) {
1967 		if (iommu->dev->irq == notify->irq) {
1968 			iommu_update_intcapxt(iommu);
1969 			break;
1970 		}
1971 	}
1972 }
1973 
1974 static void _irq_notifier_release(struct kref *ref)
1975 {
1976 }
1977 
1978 static int iommu_init_intcapxt(struct amd_iommu *iommu)
1979 {
1980 	int ret;
1981 	struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
1982 
1983 	/**
1984 	 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
1985 	 * which can be inferred from amd_iommu_xt_mode.
1986 	 */
1987 	if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
1988 		return 0;
1989 
1990 	/**
1991 	 * Also, we need to setup notifier to update the IntCapXT registers
1992 	 * whenever the irq affinity is changed from user-space.
1993 	 */
1994 	notify->irq = iommu->dev->irq;
1995 	notify->notify = _irq_notifier_notify,
1996 	notify->release = _irq_notifier_release,
1997 	ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
1998 	if (ret) {
1999 		pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2000 		       iommu->devid, iommu->dev->irq);
2001 		return ret;
2002 	}
2003 
2004 	iommu_update_intcapxt(iommu);
2005 	iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2006 	return ret;
2007 }
2008 
2009 static int iommu_init_msi(struct amd_iommu *iommu)
2010 {
2011 	int ret;
2012 
2013 	if (iommu->int_enabled)
2014 		goto enable_faults;
2015 
2016 	if (iommu->dev->msi_cap)
2017 		ret = iommu_setup_msi(iommu);
2018 	else
2019 		ret = -ENODEV;
2020 
2021 	if (ret)
2022 		return ret;
2023 
2024 enable_faults:
2025 	ret = iommu_init_intcapxt(iommu);
2026 	if (ret)
2027 		return ret;
2028 
2029 	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2030 
2031 	if (iommu->ppr_log != NULL)
2032 		iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2033 
2034 	iommu_ga_log_enable(iommu);
2035 
2036 	return 0;
2037 }
2038 
2039 /****************************************************************************
2040  *
2041  * The next functions belong to the third pass of parsing the ACPI
2042  * table. In this last pass the memory mapping requirements are
2043  * gathered (like exclusion and unity mapping ranges).
2044  *
2045  ****************************************************************************/
2046 
2047 static void __init free_unity_maps(void)
2048 {
2049 	struct unity_map_entry *entry, *next;
2050 
2051 	list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2052 		list_del(&entry->list);
2053 		kfree(entry);
2054 	}
2055 }
2056 
2057 /* called for unity map ACPI definition */
2058 static int __init init_unity_map_range(struct ivmd_header *m)
2059 {
2060 	struct unity_map_entry *e = NULL;
2061 	char *s;
2062 
2063 	e = kzalloc(sizeof(*e), GFP_KERNEL);
2064 	if (e == NULL)
2065 		return -ENOMEM;
2066 
2067 	switch (m->type) {
2068 	default:
2069 		kfree(e);
2070 		return 0;
2071 	case ACPI_IVMD_TYPE:
2072 		s = "IVMD_TYPEi\t\t\t";
2073 		e->devid_start = e->devid_end = m->devid;
2074 		break;
2075 	case ACPI_IVMD_TYPE_ALL:
2076 		s = "IVMD_TYPE_ALL\t\t";
2077 		e->devid_start = 0;
2078 		e->devid_end = amd_iommu_last_bdf;
2079 		break;
2080 	case ACPI_IVMD_TYPE_RANGE:
2081 		s = "IVMD_TYPE_RANGE\t\t";
2082 		e->devid_start = m->devid;
2083 		e->devid_end = m->aux;
2084 		break;
2085 	}
2086 	e->address_start = PAGE_ALIGN(m->range_start);
2087 	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2088 	e->prot = m->flags >> 1;
2089 
2090 	/*
2091 	 * Treat per-device exclusion ranges as r/w unity-mapped regions
2092 	 * since some buggy BIOSes might lead to the overwritten exclusion
2093 	 * range (exclusion_start and exclusion_length members). This
2094 	 * happens when there are multiple exclusion ranges (IVMD entries)
2095 	 * defined in ACPI table.
2096 	 */
2097 	if (m->flags & IVMD_FLAG_EXCL_RANGE)
2098 		e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2099 
2100 	DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2101 		    " range_start: %016llx range_end: %016llx flags: %x\n", s,
2102 		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2103 		    PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2104 		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2105 		    e->address_start, e->address_end, m->flags);
2106 
2107 	list_add_tail(&e->list, &amd_iommu_unity_map);
2108 
2109 	return 0;
2110 }
2111 
2112 /* iterates over all memory definitions we find in the ACPI table */
2113 static int __init init_memory_definitions(struct acpi_table_header *table)
2114 {
2115 	u8 *p = (u8 *)table, *end = (u8 *)table;
2116 	struct ivmd_header *m;
2117 
2118 	end += table->length;
2119 	p += IVRS_HEADER_LENGTH;
2120 
2121 	while (p < end) {
2122 		m = (struct ivmd_header *)p;
2123 		if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2124 			init_unity_map_range(m);
2125 
2126 		p += m->length;
2127 	}
2128 
2129 	return 0;
2130 }
2131 
2132 /*
2133  * Init the device table to not allow DMA access for devices
2134  */
2135 static void init_device_table_dma(void)
2136 {
2137 	u32 devid;
2138 
2139 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2140 		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2141 		set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2142 	}
2143 }
2144 
2145 static void __init uninit_device_table_dma(void)
2146 {
2147 	u32 devid;
2148 
2149 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2150 		amd_iommu_dev_table[devid].data[0] = 0ULL;
2151 		amd_iommu_dev_table[devid].data[1] = 0ULL;
2152 	}
2153 }
2154 
2155 static void init_device_table(void)
2156 {
2157 	u32 devid;
2158 
2159 	if (!amd_iommu_irq_remap)
2160 		return;
2161 
2162 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2163 		set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2164 }
2165 
2166 static void iommu_init_flags(struct amd_iommu *iommu)
2167 {
2168 	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2169 		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2170 		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2171 
2172 	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2173 		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2174 		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2175 
2176 	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2177 		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2178 		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2179 
2180 	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2181 		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2182 		iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2183 
2184 	/*
2185 	 * make IOMMU memory accesses cache coherent
2186 	 */
2187 	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2188 
2189 	/* Set IOTLB invalidation timeout to 1s */
2190 	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2191 }
2192 
2193 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2194 {
2195 	int i, j;
2196 	u32 ioc_feature_control;
2197 	struct pci_dev *pdev = iommu->root_pdev;
2198 
2199 	/* RD890 BIOSes may not have completely reconfigured the iommu */
2200 	if (!is_rd890_iommu(iommu->dev) || !pdev)
2201 		return;
2202 
2203 	/*
2204 	 * First, we need to ensure that the iommu is enabled. This is
2205 	 * controlled by a register in the northbridge
2206 	 */
2207 
2208 	/* Select Northbridge indirect register 0x75 and enable writing */
2209 	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2210 	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2211 
2212 	/* Enable the iommu */
2213 	if (!(ioc_feature_control & 0x1))
2214 		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2215 
2216 	/* Restore the iommu BAR */
2217 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2218 			       iommu->stored_addr_lo);
2219 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2220 			       iommu->stored_addr_hi);
2221 
2222 	/* Restore the l1 indirect regs for each of the 6 l1s */
2223 	for (i = 0; i < 6; i++)
2224 		for (j = 0; j < 0x12; j++)
2225 			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2226 
2227 	/* Restore the l2 indirect regs */
2228 	for (i = 0; i < 0x83; i++)
2229 		iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2230 
2231 	/* Lock PCI setup registers */
2232 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2233 			       iommu->stored_addr_lo | 1);
2234 }
2235 
2236 static void iommu_enable_ga(struct amd_iommu *iommu)
2237 {
2238 #ifdef CONFIG_IRQ_REMAP
2239 	switch (amd_iommu_guest_ir) {
2240 	case AMD_IOMMU_GUEST_IR_VAPIC:
2241 		iommu_feature_enable(iommu, CONTROL_GAM_EN);
2242 		fallthrough;
2243 	case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2244 		iommu_feature_enable(iommu, CONTROL_GA_EN);
2245 		iommu->irte_ops = &irte_128_ops;
2246 		break;
2247 	default:
2248 		iommu->irte_ops = &irte_32_ops;
2249 		break;
2250 	}
2251 #endif
2252 }
2253 
2254 static void early_enable_iommu(struct amd_iommu *iommu)
2255 {
2256 	iommu_disable(iommu);
2257 	iommu_init_flags(iommu);
2258 	iommu_set_device_table(iommu);
2259 	iommu_enable_command_buffer(iommu);
2260 	iommu_enable_event_buffer(iommu);
2261 	iommu_set_exclusion_range(iommu);
2262 	iommu_enable_ga(iommu);
2263 	iommu_enable_xt(iommu);
2264 	iommu_enable(iommu);
2265 	iommu_flush_all_caches(iommu);
2266 }
2267 
2268 /*
2269  * This function finally enables all IOMMUs found in the system after
2270  * they have been initialized.
2271  *
2272  * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2273  * the old content of device table entries. Not this case or copy failed,
2274  * just continue as normal kernel does.
2275  */
2276 static void early_enable_iommus(void)
2277 {
2278 	struct amd_iommu *iommu;
2279 
2280 
2281 	if (!copy_device_table()) {
2282 		/*
2283 		 * If come here because of failure in copying device table from old
2284 		 * kernel with all IOMMUs enabled, print error message and try to
2285 		 * free allocated old_dev_tbl_cpy.
2286 		 */
2287 		if (amd_iommu_pre_enabled)
2288 			pr_err("Failed to copy DEV table from previous kernel.\n");
2289 		if (old_dev_tbl_cpy != NULL)
2290 			free_pages((unsigned long)old_dev_tbl_cpy,
2291 					get_order(dev_table_size));
2292 
2293 		for_each_iommu(iommu) {
2294 			clear_translation_pre_enabled(iommu);
2295 			early_enable_iommu(iommu);
2296 		}
2297 	} else {
2298 		pr_info("Copied DEV table from previous kernel.\n");
2299 		free_pages((unsigned long)amd_iommu_dev_table,
2300 				get_order(dev_table_size));
2301 		amd_iommu_dev_table = old_dev_tbl_cpy;
2302 		for_each_iommu(iommu) {
2303 			iommu_disable_command_buffer(iommu);
2304 			iommu_disable_event_buffer(iommu);
2305 			iommu_enable_command_buffer(iommu);
2306 			iommu_enable_event_buffer(iommu);
2307 			iommu_enable_ga(iommu);
2308 			iommu_enable_xt(iommu);
2309 			iommu_set_device_table(iommu);
2310 			iommu_flush_all_caches(iommu);
2311 		}
2312 	}
2313 
2314 #ifdef CONFIG_IRQ_REMAP
2315 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2316 		amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2317 #endif
2318 }
2319 
2320 static void enable_iommus_v2(void)
2321 {
2322 	struct amd_iommu *iommu;
2323 
2324 	for_each_iommu(iommu) {
2325 		iommu_enable_ppr_log(iommu);
2326 		iommu_enable_gt(iommu);
2327 	}
2328 }
2329 
2330 static void enable_iommus(void)
2331 {
2332 	early_enable_iommus();
2333 
2334 	enable_iommus_v2();
2335 }
2336 
2337 static void disable_iommus(void)
2338 {
2339 	struct amd_iommu *iommu;
2340 
2341 	for_each_iommu(iommu)
2342 		iommu_disable(iommu);
2343 
2344 #ifdef CONFIG_IRQ_REMAP
2345 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2346 		amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2347 #endif
2348 }
2349 
2350 /*
2351  * Suspend/Resume support
2352  * disable suspend until real resume implemented
2353  */
2354 
2355 static void amd_iommu_resume(void)
2356 {
2357 	struct amd_iommu *iommu;
2358 
2359 	for_each_iommu(iommu)
2360 		iommu_apply_resume_quirks(iommu);
2361 
2362 	/* re-load the hardware */
2363 	enable_iommus();
2364 
2365 	amd_iommu_enable_interrupts();
2366 }
2367 
2368 static int amd_iommu_suspend(void)
2369 {
2370 	/* disable IOMMUs to go out of the way for BIOS */
2371 	disable_iommus();
2372 
2373 	return 0;
2374 }
2375 
2376 static struct syscore_ops amd_iommu_syscore_ops = {
2377 	.suspend = amd_iommu_suspend,
2378 	.resume = amd_iommu_resume,
2379 };
2380 
2381 static void __init free_iommu_resources(void)
2382 {
2383 	kmemleak_free(irq_lookup_table);
2384 	free_pages((unsigned long)irq_lookup_table,
2385 		   get_order(rlookup_table_size));
2386 	irq_lookup_table = NULL;
2387 
2388 	kmem_cache_destroy(amd_iommu_irq_cache);
2389 	amd_iommu_irq_cache = NULL;
2390 
2391 	free_pages((unsigned long)amd_iommu_rlookup_table,
2392 		   get_order(rlookup_table_size));
2393 	amd_iommu_rlookup_table = NULL;
2394 
2395 	free_pages((unsigned long)amd_iommu_alias_table,
2396 		   get_order(alias_table_size));
2397 	amd_iommu_alias_table = NULL;
2398 
2399 	free_pages((unsigned long)amd_iommu_dev_table,
2400 		   get_order(dev_table_size));
2401 	amd_iommu_dev_table = NULL;
2402 
2403 	free_iommu_all();
2404 }
2405 
2406 /* SB IOAPIC is always on this device in AMD systems */
2407 #define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))
2408 
2409 static bool __init check_ioapic_information(void)
2410 {
2411 	const char *fw_bug = FW_BUG;
2412 	bool ret, has_sb_ioapic;
2413 	int idx;
2414 
2415 	has_sb_ioapic = false;
2416 	ret           = false;
2417 
2418 	/*
2419 	 * If we have map overrides on the kernel command line the
2420 	 * messages in this function might not describe firmware bugs
2421 	 * anymore - so be careful
2422 	 */
2423 	if (cmdline_maps)
2424 		fw_bug = "";
2425 
2426 	for (idx = 0; idx < nr_ioapics; idx++) {
2427 		int devid, id = mpc_ioapic_id(idx);
2428 
2429 		devid = get_ioapic_devid(id);
2430 		if (devid < 0) {
2431 			pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2432 				fw_bug, id);
2433 			ret = false;
2434 		} else if (devid == IOAPIC_SB_DEVID) {
2435 			has_sb_ioapic = true;
2436 			ret           = true;
2437 		}
2438 	}
2439 
2440 	if (!has_sb_ioapic) {
2441 		/*
2442 		 * We expect the SB IOAPIC to be listed in the IVRS
2443 		 * table. The system timer is connected to the SB IOAPIC
2444 		 * and if we don't have it in the list the system will
2445 		 * panic at boot time.  This situation usually happens
2446 		 * when the BIOS is buggy and provides us the wrong
2447 		 * device id for the IOAPIC in the system.
2448 		 */
2449 		pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2450 	}
2451 
2452 	if (!ret)
2453 		pr_err("Disabling interrupt remapping\n");
2454 
2455 	return ret;
2456 }
2457 
2458 static void __init free_dma_resources(void)
2459 {
2460 	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2461 		   get_order(MAX_DOMAIN_ID/8));
2462 	amd_iommu_pd_alloc_bitmap = NULL;
2463 
2464 	free_unity_maps();
2465 }
2466 
2467 /*
2468  * This is the hardware init function for AMD IOMMU in the system.
2469  * This function is called either from amd_iommu_init or from the interrupt
2470  * remapping setup code.
2471  *
2472  * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2473  * four times:
2474  *
2475  *	1 pass) Discover the most comprehensive IVHD type to use.
2476  *
2477  *	2 pass) Find the highest PCI device id the driver has to handle.
2478  *		Upon this information the size of the data structures is
2479  *		determined that needs to be allocated.
2480  *
2481  *	3 pass) Initialize the data structures just allocated with the
2482  *		information in the ACPI table about available AMD IOMMUs
2483  *		in the system. It also maps the PCI devices in the
2484  *		system to specific IOMMUs
2485  *
2486  *	4 pass) After the basic data structures are allocated and
2487  *		initialized we update them with information about memory
2488  *		remapping requirements parsed out of the ACPI table in
2489  *		this last pass.
2490  *
2491  * After everything is set up the IOMMUs are enabled and the necessary
2492  * hotplug and suspend notifiers are registered.
2493  */
2494 static int __init early_amd_iommu_init(void)
2495 {
2496 	struct acpi_table_header *ivrs_base;
2497 	acpi_status status;
2498 	int i, remap_cache_sz, ret = 0;
2499 	u32 pci_id;
2500 
2501 	if (!amd_iommu_detected)
2502 		return -ENODEV;
2503 
2504 	status = acpi_get_table("IVRS", 0, &ivrs_base);
2505 	if (status == AE_NOT_FOUND)
2506 		return -ENODEV;
2507 	else if (ACPI_FAILURE(status)) {
2508 		const char *err = acpi_format_exception(status);
2509 		pr_err("IVRS table error: %s\n", err);
2510 		return -EINVAL;
2511 	}
2512 
2513 	/*
2514 	 * Validate checksum here so we don't need to do it when
2515 	 * we actually parse the table
2516 	 */
2517 	ret = check_ivrs_checksum(ivrs_base);
2518 	if (ret)
2519 		goto out;
2520 
2521 	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2522 	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2523 
2524 	/*
2525 	 * First parse ACPI tables to find the largest Bus/Dev/Func
2526 	 * we need to handle. Upon this information the shared data
2527 	 * structures for the IOMMUs in the system will be allocated
2528 	 */
2529 	ret = find_last_devid_acpi(ivrs_base);
2530 	if (ret)
2531 		goto out;
2532 
2533 	dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
2534 	alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2535 	rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2536 
2537 	/* Device table - directly used by all IOMMUs */
2538 	ret = -ENOMEM;
2539 	amd_iommu_dev_table = (void *)__get_free_pages(
2540 				      GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2541 				      get_order(dev_table_size));
2542 	if (amd_iommu_dev_table == NULL)
2543 		goto out;
2544 
2545 	/*
2546 	 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2547 	 * IOMMU see for that device
2548 	 */
2549 	amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2550 			get_order(alias_table_size));
2551 	if (amd_iommu_alias_table == NULL)
2552 		goto out;
2553 
2554 	/* IOMMU rlookup table - find the IOMMU for a specific device */
2555 	amd_iommu_rlookup_table = (void *)__get_free_pages(
2556 			GFP_KERNEL | __GFP_ZERO,
2557 			get_order(rlookup_table_size));
2558 	if (amd_iommu_rlookup_table == NULL)
2559 		goto out;
2560 
2561 	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2562 					    GFP_KERNEL | __GFP_ZERO,
2563 					    get_order(MAX_DOMAIN_ID/8));
2564 	if (amd_iommu_pd_alloc_bitmap == NULL)
2565 		goto out;
2566 
2567 	/*
2568 	 * let all alias entries point to itself
2569 	 */
2570 	for (i = 0; i <= amd_iommu_last_bdf; ++i)
2571 		amd_iommu_alias_table[i] = i;
2572 
2573 	/*
2574 	 * never allocate domain 0 because its used as the non-allocated and
2575 	 * error value placeholder
2576 	 */
2577 	__set_bit(0, amd_iommu_pd_alloc_bitmap);
2578 
2579 	/*
2580 	 * now the data structures are allocated and basically initialized
2581 	 * start the real acpi table scan
2582 	 */
2583 	ret = init_iommu_all(ivrs_base);
2584 	if (ret)
2585 		goto out;
2586 
2587 	/* Disable IOMMU if there's Stoney Ridge graphics */
2588 	for (i = 0; i < 32; i++) {
2589 		pci_id = read_pci_config(0, i, 0, 0);
2590 		if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2591 			pr_info("Disable IOMMU on Stoney Ridge\n");
2592 			amd_iommu_disabled = true;
2593 			break;
2594 		}
2595 	}
2596 
2597 	/* Disable any previously enabled IOMMUs */
2598 	if (!is_kdump_kernel() || amd_iommu_disabled)
2599 		disable_iommus();
2600 
2601 	if (amd_iommu_irq_remap)
2602 		amd_iommu_irq_remap = check_ioapic_information();
2603 
2604 	if (amd_iommu_irq_remap) {
2605 		/*
2606 		 * Interrupt remapping enabled, create kmem_cache for the
2607 		 * remapping tables.
2608 		 */
2609 		ret = -ENOMEM;
2610 		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2611 			remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2612 		else
2613 			remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2614 		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2615 							remap_cache_sz,
2616 							IRQ_TABLE_ALIGNMENT,
2617 							0, NULL);
2618 		if (!amd_iommu_irq_cache)
2619 			goto out;
2620 
2621 		irq_lookup_table = (void *)__get_free_pages(
2622 				GFP_KERNEL | __GFP_ZERO,
2623 				get_order(rlookup_table_size));
2624 		kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2625 			       1, GFP_KERNEL);
2626 		if (!irq_lookup_table)
2627 			goto out;
2628 	}
2629 
2630 	ret = init_memory_definitions(ivrs_base);
2631 	if (ret)
2632 		goto out;
2633 
2634 	/* init the device table */
2635 	init_device_table();
2636 
2637 out:
2638 	/* Don't leak any ACPI memory */
2639 	acpi_put_table(ivrs_base);
2640 	ivrs_base = NULL;
2641 
2642 	return ret;
2643 }
2644 
2645 static int amd_iommu_enable_interrupts(void)
2646 {
2647 	struct amd_iommu *iommu;
2648 	int ret = 0;
2649 
2650 	for_each_iommu(iommu) {
2651 		ret = iommu_init_msi(iommu);
2652 		if (ret)
2653 			goto out;
2654 	}
2655 
2656 out:
2657 	return ret;
2658 }
2659 
2660 static bool detect_ivrs(void)
2661 {
2662 	struct acpi_table_header *ivrs_base;
2663 	acpi_status status;
2664 
2665 	status = acpi_get_table("IVRS", 0, &ivrs_base);
2666 	if (status == AE_NOT_FOUND)
2667 		return false;
2668 	else if (ACPI_FAILURE(status)) {
2669 		const char *err = acpi_format_exception(status);
2670 		pr_err("IVRS table error: %s\n", err);
2671 		return false;
2672 	}
2673 
2674 	acpi_put_table(ivrs_base);
2675 
2676 	/* Make sure ACS will be enabled during PCI probe */
2677 	pci_request_acs();
2678 
2679 	return true;
2680 }
2681 
2682 /****************************************************************************
2683  *
2684  * AMD IOMMU Initialization State Machine
2685  *
2686  ****************************************************************************/
2687 
2688 static int __init state_next(void)
2689 {
2690 	int ret = 0;
2691 
2692 	switch (init_state) {
2693 	case IOMMU_START_STATE:
2694 		if (!detect_ivrs()) {
2695 			init_state	= IOMMU_NOT_FOUND;
2696 			ret		= -ENODEV;
2697 		} else {
2698 			init_state	= IOMMU_IVRS_DETECTED;
2699 		}
2700 		break;
2701 	case IOMMU_IVRS_DETECTED:
2702 		ret = early_amd_iommu_init();
2703 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2704 		if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2705 			pr_info("AMD IOMMU disabled\n");
2706 			init_state = IOMMU_CMDLINE_DISABLED;
2707 			ret = -EINVAL;
2708 		}
2709 		break;
2710 	case IOMMU_ACPI_FINISHED:
2711 		early_enable_iommus();
2712 		x86_platform.iommu_shutdown = disable_iommus;
2713 		init_state = IOMMU_ENABLED;
2714 		break;
2715 	case IOMMU_ENABLED:
2716 		register_syscore_ops(&amd_iommu_syscore_ops);
2717 		ret = amd_iommu_init_pci();
2718 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2719 		enable_iommus_v2();
2720 		break;
2721 	case IOMMU_PCI_INIT:
2722 		ret = amd_iommu_enable_interrupts();
2723 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2724 		break;
2725 	case IOMMU_INTERRUPTS_EN:
2726 		ret = amd_iommu_init_dma_ops();
2727 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2728 		break;
2729 	case IOMMU_DMA_OPS:
2730 		init_state = IOMMU_INITIALIZED;
2731 		break;
2732 	case IOMMU_INITIALIZED:
2733 		/* Nothing to do */
2734 		break;
2735 	case IOMMU_NOT_FOUND:
2736 	case IOMMU_INIT_ERROR:
2737 	case IOMMU_CMDLINE_DISABLED:
2738 		/* Error states => do nothing */
2739 		ret = -EINVAL;
2740 		break;
2741 	default:
2742 		/* Unknown state */
2743 		BUG();
2744 	}
2745 
2746 	if (ret) {
2747 		free_dma_resources();
2748 		if (!irq_remapping_enabled) {
2749 			disable_iommus();
2750 			free_iommu_resources();
2751 		} else {
2752 			struct amd_iommu *iommu;
2753 
2754 			uninit_device_table_dma();
2755 			for_each_iommu(iommu)
2756 				iommu_flush_all_caches(iommu);
2757 		}
2758 	}
2759 	return ret;
2760 }
2761 
2762 static int __init iommu_go_to_state(enum iommu_init_state state)
2763 {
2764 	int ret = -EINVAL;
2765 
2766 	while (init_state != state) {
2767 		if (init_state == IOMMU_NOT_FOUND         ||
2768 		    init_state == IOMMU_INIT_ERROR        ||
2769 		    init_state == IOMMU_CMDLINE_DISABLED)
2770 			break;
2771 		ret = state_next();
2772 	}
2773 
2774 	return ret;
2775 }
2776 
2777 #ifdef CONFIG_IRQ_REMAP
2778 int __init amd_iommu_prepare(void)
2779 {
2780 	int ret;
2781 
2782 	amd_iommu_irq_remap = true;
2783 
2784 	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2785 	if (ret)
2786 		return ret;
2787 	return amd_iommu_irq_remap ? 0 : -ENODEV;
2788 }
2789 
2790 int __init amd_iommu_enable(void)
2791 {
2792 	int ret;
2793 
2794 	ret = iommu_go_to_state(IOMMU_ENABLED);
2795 	if (ret)
2796 		return ret;
2797 
2798 	irq_remapping_enabled = 1;
2799 	return amd_iommu_xt_mode;
2800 }
2801 
2802 void amd_iommu_disable(void)
2803 {
2804 	amd_iommu_suspend();
2805 }
2806 
2807 int amd_iommu_reenable(int mode)
2808 {
2809 	amd_iommu_resume();
2810 
2811 	return 0;
2812 }
2813 
2814 int __init amd_iommu_enable_faulting(void)
2815 {
2816 	/* We enable MSI later when PCI is initialized */
2817 	return 0;
2818 }
2819 #endif
2820 
2821 /*
2822  * This is the core init function for AMD IOMMU hardware in the system.
2823  * This function is called from the generic x86 DMA layer initialization
2824  * code.
2825  */
2826 static int __init amd_iommu_init(void)
2827 {
2828 	struct amd_iommu *iommu;
2829 	int ret;
2830 
2831 	ret = iommu_go_to_state(IOMMU_INITIALIZED);
2832 #ifdef CONFIG_GART_IOMMU
2833 	if (ret && list_empty(&amd_iommu_list)) {
2834 		/*
2835 		 * We failed to initialize the AMD IOMMU - try fallback
2836 		 * to GART if possible.
2837 		 */
2838 		gart_iommu_init();
2839 	}
2840 #endif
2841 
2842 	for_each_iommu(iommu)
2843 		amd_iommu_debugfs_setup(iommu);
2844 
2845 	return ret;
2846 }
2847 
2848 static bool amd_iommu_sme_check(void)
2849 {
2850 	if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2851 		return true;
2852 
2853 	/* For Fam17h, a specific level of support is required */
2854 	if (boot_cpu_data.microcode >= 0x08001205)
2855 		return true;
2856 
2857 	if ((boot_cpu_data.microcode >= 0x08001126) &&
2858 	    (boot_cpu_data.microcode <= 0x080011ff))
2859 		return true;
2860 
2861 	pr_notice("IOMMU not currently supported when SME is active\n");
2862 
2863 	return false;
2864 }
2865 
2866 /****************************************************************************
2867  *
2868  * Early detect code. This code runs at IOMMU detection time in the DMA
2869  * layer. It just looks if there is an IVRS ACPI table to detect AMD
2870  * IOMMUs
2871  *
2872  ****************************************************************************/
2873 int __init amd_iommu_detect(void)
2874 {
2875 	int ret;
2876 
2877 	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2878 		return -ENODEV;
2879 
2880 	if (!amd_iommu_sme_check())
2881 		return -ENODEV;
2882 
2883 	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2884 	if (ret)
2885 		return ret;
2886 
2887 	amd_iommu_detected = true;
2888 	iommu_detected = 1;
2889 	x86_init.iommu.iommu_init = amd_iommu_init;
2890 
2891 	return 1;
2892 }
2893 
2894 /****************************************************************************
2895  *
2896  * Parsing functions for the AMD IOMMU specific kernel command line
2897  * options.
2898  *
2899  ****************************************************************************/
2900 
2901 static int __init parse_amd_iommu_dump(char *str)
2902 {
2903 	amd_iommu_dump = true;
2904 
2905 	return 1;
2906 }
2907 
2908 static int __init parse_amd_iommu_intr(char *str)
2909 {
2910 	for (; *str; ++str) {
2911 		if (strncmp(str, "legacy", 6) == 0) {
2912 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2913 			break;
2914 		}
2915 		if (strncmp(str, "vapic", 5) == 0) {
2916 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2917 			break;
2918 		}
2919 	}
2920 	return 1;
2921 }
2922 
2923 static int __init parse_amd_iommu_options(char *str)
2924 {
2925 	for (; *str; ++str) {
2926 		if (strncmp(str, "fullflush", 9) == 0)
2927 			amd_iommu_unmap_flush = true;
2928 		if (strncmp(str, "off", 3) == 0)
2929 			amd_iommu_disabled = true;
2930 		if (strncmp(str, "force_isolation", 15) == 0)
2931 			amd_iommu_force_isolation = true;
2932 	}
2933 
2934 	return 1;
2935 }
2936 
2937 static int __init parse_ivrs_ioapic(char *str)
2938 {
2939 	unsigned int bus, dev, fn;
2940 	int ret, id, i;
2941 	u16 devid;
2942 
2943 	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2944 
2945 	if (ret != 4) {
2946 		pr_err("Invalid command line: ivrs_ioapic%s\n", str);
2947 		return 1;
2948 	}
2949 
2950 	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2951 		pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2952 			str);
2953 		return 1;
2954 	}
2955 
2956 	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2957 
2958 	cmdline_maps			= true;
2959 	i				= early_ioapic_map_size++;
2960 	early_ioapic_map[i].id		= id;
2961 	early_ioapic_map[i].devid	= devid;
2962 	early_ioapic_map[i].cmd_line	= true;
2963 
2964 	return 1;
2965 }
2966 
2967 static int __init parse_ivrs_hpet(char *str)
2968 {
2969 	unsigned int bus, dev, fn;
2970 	int ret, id, i;
2971 	u16 devid;
2972 
2973 	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2974 
2975 	if (ret != 4) {
2976 		pr_err("Invalid command line: ivrs_hpet%s\n", str);
2977 		return 1;
2978 	}
2979 
2980 	if (early_hpet_map_size == EARLY_MAP_SIZE) {
2981 		pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
2982 			str);
2983 		return 1;
2984 	}
2985 
2986 	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2987 
2988 	cmdline_maps			= true;
2989 	i				= early_hpet_map_size++;
2990 	early_hpet_map[i].id		= id;
2991 	early_hpet_map[i].devid		= devid;
2992 	early_hpet_map[i].cmd_line	= true;
2993 
2994 	return 1;
2995 }
2996 
2997 static int __init parse_ivrs_acpihid(char *str)
2998 {
2999 	u32 bus, dev, fn;
3000 	char *hid, *uid, *p;
3001 	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3002 	int ret, i;
3003 
3004 	ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3005 	if (ret != 4) {
3006 		pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3007 		return 1;
3008 	}
3009 
3010 	p = acpiid;
3011 	hid = strsep(&p, ":");
3012 	uid = p;
3013 
3014 	if (!hid || !(*hid) || !uid) {
3015 		pr_err("Invalid command line: hid or uid\n");
3016 		return 1;
3017 	}
3018 
3019 	i = early_acpihid_map_size++;
3020 	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3021 	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3022 	early_acpihid_map[i].devid =
3023 		((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3024 	early_acpihid_map[i].cmd_line	= true;
3025 
3026 	return 1;
3027 }
3028 
3029 __setup("amd_iommu_dump",	parse_amd_iommu_dump);
3030 __setup("amd_iommu=",		parse_amd_iommu_options);
3031 __setup("amd_iommu_intr=",	parse_amd_iommu_intr);
3032 __setup("ivrs_ioapic",		parse_ivrs_ioapic);
3033 __setup("ivrs_hpet",		parse_ivrs_hpet);
3034 __setup("ivrs_acpihid",		parse_ivrs_acpihid);
3035 
3036 IOMMU_INIT_FINISH(amd_iommu_detect,
3037 		  gart_iommu_hole_init,
3038 		  NULL,
3039 		  NULL);
3040 
3041 bool amd_iommu_v2_supported(void)
3042 {
3043 	return amd_iommu_v2_present;
3044 }
3045 EXPORT_SYMBOL(amd_iommu_v2_supported);
3046 
3047 struct amd_iommu *get_amd_iommu(unsigned int idx)
3048 {
3049 	unsigned int i = 0;
3050 	struct amd_iommu *iommu;
3051 
3052 	for_each_iommu(iommu)
3053 		if (i++ == idx)
3054 			return iommu;
3055 	return NULL;
3056 }
3057 EXPORT_SYMBOL(get_amd_iommu);
3058 
3059 /****************************************************************************
3060  *
3061  * IOMMU EFR Performance Counter support functionality. This code allows
3062  * access to the IOMMU PC functionality.
3063  *
3064  ****************************************************************************/
3065 
3066 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3067 {
3068 	struct amd_iommu *iommu = get_amd_iommu(idx);
3069 
3070 	if (iommu)
3071 		return iommu->max_banks;
3072 
3073 	return 0;
3074 }
3075 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3076 
3077 bool amd_iommu_pc_supported(void)
3078 {
3079 	return amd_iommu_pc_present;
3080 }
3081 EXPORT_SYMBOL(amd_iommu_pc_supported);
3082 
3083 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3084 {
3085 	struct amd_iommu *iommu = get_amd_iommu(idx);
3086 
3087 	if (iommu)
3088 		return iommu->max_counters;
3089 
3090 	return 0;
3091 }
3092 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3093 
3094 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3095 				u8 fxn, u64 *value, bool is_write)
3096 {
3097 	u32 offset;
3098 	u32 max_offset_lim;
3099 
3100 	/* Make sure the IOMMU PC resource is available */
3101 	if (!amd_iommu_pc_present)
3102 		return -ENODEV;
3103 
3104 	/* Check for valid iommu and pc register indexing */
3105 	if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3106 		return -ENODEV;
3107 
3108 	offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3109 
3110 	/* Limit the offset to the hw defined mmio region aperture */
3111 	max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3112 				(iommu->max_counters << 8) | 0x28);
3113 	if ((offset < MMIO_CNTR_REG_OFFSET) ||
3114 	    (offset > max_offset_lim))
3115 		return -EINVAL;
3116 
3117 	if (is_write) {
3118 		u64 val = *value & GENMASK_ULL(47, 0);
3119 
3120 		writel((u32)val, iommu->mmio_base + offset);
3121 		writel((val >> 32), iommu->mmio_base + offset + 4);
3122 	} else {
3123 		*value = readl(iommu->mmio_base + offset + 4);
3124 		*value <<= 32;
3125 		*value |= readl(iommu->mmio_base + offset);
3126 		*value &= GENMASK_ULL(47, 0);
3127 	}
3128 
3129 	return 0;
3130 }
3131 
3132 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3133 {
3134 	if (!iommu)
3135 		return -EINVAL;
3136 
3137 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3138 }
3139 EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3140 
3141 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3142 {
3143 	if (!iommu)
3144 		return -EINVAL;
3145 
3146 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3147 }
3148 EXPORT_SYMBOL(amd_iommu_pc_set_reg);
3149