xref: /openbmc/linux/drivers/iommu/amd/iommu.c (revision 7a6ee0bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  *         Leo Duran <leo.duran@amd.com>
6  */
7 
8 #define pr_fmt(fmt)     "AMD-Vi: " fmt
9 #define dev_fmt(fmt)    pr_fmt(fmt)
10 
11 #include <linux/ratelimit.h>
12 #include <linux/pci.h>
13 #include <linux/acpi.h>
14 #include <linux/amba/bus.h>
15 #include <linux/platform_device.h>
16 #include <linux/pci-ats.h>
17 #include <linux/bitmap.h>
18 #include <linux/slab.h>
19 #include <linux/debugfs.h>
20 #include <linux/scatterlist.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/dma-direct.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/iommu-helper.h>
25 #include <linux/delay.h>
26 #include <linux/amd-iommu.h>
27 #include <linux/notifier.h>
28 #include <linux/export.h>
29 #include <linux/irq.h>
30 #include <linux/msi.h>
31 #include <linux/irqdomain.h>
32 #include <linux/percpu.h>
33 #include <linux/io-pgtable.h>
34 #include <linux/cc_platform.h>
35 #include <asm/irq_remapping.h>
36 #include <asm/io_apic.h>
37 #include <asm/apic.h>
38 #include <asm/hw_irq.h>
39 #include <asm/proto.h>
40 #include <asm/iommu.h>
41 #include <asm/gart.h>
42 #include <asm/dma.h>
43 
44 #include "amd_iommu.h"
45 #include "../irq_remapping.h"
46 
47 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
48 
49 #define LOOP_TIMEOUT	100000
50 
51 /* IO virtual address start page frame number */
52 #define IOVA_START_PFN		(1)
53 #define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
54 
55 /* Reserved IOVA ranges */
56 #define MSI_RANGE_START		(0xfee00000)
57 #define MSI_RANGE_END		(0xfeefffff)
58 #define HT_RANGE_START		(0xfd00000000ULL)
59 #define HT_RANGE_END		(0xffffffffffULL)
60 
61 #define DEFAULT_PGTABLE_LEVEL	PAGE_MODE_3_LEVEL
62 
63 static DEFINE_SPINLOCK(pd_bitmap_lock);
64 
65 /* List of all available dev_data structures */
66 static LLIST_HEAD(dev_data_list);
67 
68 LIST_HEAD(ioapic_map);
69 LIST_HEAD(hpet_map);
70 LIST_HEAD(acpihid_map);
71 
72 /*
73  * Domain for untranslated devices - only allocated
74  * if iommu=pt passed on kernel cmd line.
75  */
76 const struct iommu_ops amd_iommu_ops;
77 
78 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
79 int amd_iommu_max_glx_val = -1;
80 
81 /*
82  * general struct to manage commands send to an IOMMU
83  */
84 struct iommu_cmd {
85 	u32 data[4];
86 };
87 
88 struct kmem_cache *amd_iommu_irq_cache;
89 
90 static void detach_device(struct device *dev);
91 
92 /****************************************************************************
93  *
94  * Helper functions
95  *
96  ****************************************************************************/
97 
98 static inline u16 get_pci_device_id(struct device *dev)
99 {
100 	struct pci_dev *pdev = to_pci_dev(dev);
101 
102 	return pci_dev_id(pdev);
103 }
104 
105 static inline int get_acpihid_device_id(struct device *dev,
106 					struct acpihid_map_entry **entry)
107 {
108 	struct acpi_device *adev = ACPI_COMPANION(dev);
109 	struct acpihid_map_entry *p;
110 
111 	if (!adev)
112 		return -ENODEV;
113 
114 	list_for_each_entry(p, &acpihid_map, list) {
115 		if (acpi_dev_hid_uid_match(adev, p->hid,
116 					   p->uid[0] ? p->uid : NULL)) {
117 			if (entry)
118 				*entry = p;
119 			return p->devid;
120 		}
121 	}
122 	return -EINVAL;
123 }
124 
125 static inline int get_device_id(struct device *dev)
126 {
127 	int devid;
128 
129 	if (dev_is_pci(dev))
130 		devid = get_pci_device_id(dev);
131 	else
132 		devid = get_acpihid_device_id(dev, NULL);
133 
134 	return devid;
135 }
136 
137 static struct protection_domain *to_pdomain(struct iommu_domain *dom)
138 {
139 	return container_of(dom, struct protection_domain, domain);
140 }
141 
142 static struct iommu_dev_data *alloc_dev_data(u16 devid)
143 {
144 	struct iommu_dev_data *dev_data;
145 
146 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
147 	if (!dev_data)
148 		return NULL;
149 
150 	spin_lock_init(&dev_data->lock);
151 	dev_data->devid = devid;
152 	ratelimit_default_init(&dev_data->rs);
153 
154 	llist_add(&dev_data->dev_data_list, &dev_data_list);
155 	return dev_data;
156 }
157 
158 static struct iommu_dev_data *search_dev_data(u16 devid)
159 {
160 	struct iommu_dev_data *dev_data;
161 	struct llist_node *node;
162 
163 	if (llist_empty(&dev_data_list))
164 		return NULL;
165 
166 	node = dev_data_list.first;
167 	llist_for_each_entry(dev_data, node, dev_data_list) {
168 		if (dev_data->devid == devid)
169 			return dev_data;
170 	}
171 
172 	return NULL;
173 }
174 
175 static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
176 {
177 	u16 devid = pci_dev_id(pdev);
178 
179 	if (devid == alias)
180 		return 0;
181 
182 	amd_iommu_rlookup_table[alias] =
183 		amd_iommu_rlookup_table[devid];
184 	memcpy(amd_iommu_dev_table[alias].data,
185 	       amd_iommu_dev_table[devid].data,
186 	       sizeof(amd_iommu_dev_table[alias].data));
187 
188 	return 0;
189 }
190 
191 static void clone_aliases(struct pci_dev *pdev)
192 {
193 	if (!pdev)
194 		return;
195 
196 	/*
197 	 * The IVRS alias stored in the alias table may not be
198 	 * part of the PCI DMA aliases if it's bus differs
199 	 * from the original device.
200 	 */
201 	clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
202 
203 	pci_for_each_dma_alias(pdev, clone_alias, NULL);
204 }
205 
206 static struct pci_dev *setup_aliases(struct device *dev)
207 {
208 	struct pci_dev *pdev = to_pci_dev(dev);
209 	u16 ivrs_alias;
210 
211 	/* For ACPI HID devices, there are no aliases */
212 	if (!dev_is_pci(dev))
213 		return NULL;
214 
215 	/*
216 	 * Add the IVRS alias to the pci aliases if it is on the same
217 	 * bus. The IVRS table may know about a quirk that we don't.
218 	 */
219 	ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
220 	if (ivrs_alias != pci_dev_id(pdev) &&
221 	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
222 		pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
223 
224 	clone_aliases(pdev);
225 
226 	return pdev;
227 }
228 
229 static struct iommu_dev_data *find_dev_data(u16 devid)
230 {
231 	struct iommu_dev_data *dev_data;
232 	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
233 
234 	dev_data = search_dev_data(devid);
235 
236 	if (dev_data == NULL) {
237 		dev_data = alloc_dev_data(devid);
238 		if (!dev_data)
239 			return NULL;
240 
241 		if (translation_pre_enabled(iommu))
242 			dev_data->defer_attach = true;
243 	}
244 
245 	return dev_data;
246 }
247 
248 /*
249 * Find or create an IOMMU group for a acpihid device.
250 */
251 static struct iommu_group *acpihid_device_group(struct device *dev)
252 {
253 	struct acpihid_map_entry *p, *entry = NULL;
254 	int devid;
255 
256 	devid = get_acpihid_device_id(dev, &entry);
257 	if (devid < 0)
258 		return ERR_PTR(devid);
259 
260 	list_for_each_entry(p, &acpihid_map, list) {
261 		if ((devid == p->devid) && p->group)
262 			entry->group = p->group;
263 	}
264 
265 	if (!entry->group)
266 		entry->group = generic_device_group(dev);
267 	else
268 		iommu_group_ref_get(entry->group);
269 
270 	return entry->group;
271 }
272 
273 static bool pci_iommuv2_capable(struct pci_dev *pdev)
274 {
275 	static const int caps[] = {
276 		PCI_EXT_CAP_ID_PRI,
277 		PCI_EXT_CAP_ID_PASID,
278 	};
279 	int i, pos;
280 
281 	if (!pci_ats_supported(pdev))
282 		return false;
283 
284 	for (i = 0; i < 2; ++i) {
285 		pos = pci_find_ext_capability(pdev, caps[i]);
286 		if (pos == 0)
287 			return false;
288 	}
289 
290 	return true;
291 }
292 
293 /*
294  * This function checks if the driver got a valid device from the caller to
295  * avoid dereferencing invalid pointers.
296  */
297 static bool check_device(struct device *dev)
298 {
299 	int devid;
300 
301 	if (!dev)
302 		return false;
303 
304 	devid = get_device_id(dev);
305 	if (devid < 0)
306 		return false;
307 
308 	/* Out of our scope? */
309 	if (devid > amd_iommu_last_bdf)
310 		return false;
311 
312 	if (amd_iommu_rlookup_table[devid] == NULL)
313 		return false;
314 
315 	return true;
316 }
317 
318 static int iommu_init_device(struct device *dev)
319 {
320 	struct iommu_dev_data *dev_data;
321 	int devid;
322 
323 	if (dev_iommu_priv_get(dev))
324 		return 0;
325 
326 	devid = get_device_id(dev);
327 	if (devid < 0)
328 		return devid;
329 
330 	dev_data = find_dev_data(devid);
331 	if (!dev_data)
332 		return -ENOMEM;
333 
334 	dev_data->pdev = setup_aliases(dev);
335 
336 	/*
337 	 * By default we use passthrough mode for IOMMUv2 capable device.
338 	 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
339 	 * invalid address), we ignore the capability for the device so
340 	 * it'll be forced to go into translation mode.
341 	 */
342 	if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
343 	    dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
344 		struct amd_iommu *iommu;
345 
346 		iommu = amd_iommu_rlookup_table[dev_data->devid];
347 		dev_data->iommu_v2 = iommu->is_iommu_v2;
348 	}
349 
350 	dev_iommu_priv_set(dev, dev_data);
351 
352 	return 0;
353 }
354 
355 static void iommu_ignore_device(struct device *dev)
356 {
357 	int devid;
358 
359 	devid = get_device_id(dev);
360 	if (devid < 0)
361 		return;
362 
363 	amd_iommu_rlookup_table[devid] = NULL;
364 	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
365 
366 	setup_aliases(dev);
367 }
368 
369 static void amd_iommu_uninit_device(struct device *dev)
370 {
371 	struct iommu_dev_data *dev_data;
372 
373 	dev_data = dev_iommu_priv_get(dev);
374 	if (!dev_data)
375 		return;
376 
377 	if (dev_data->domain)
378 		detach_device(dev);
379 
380 	dev_iommu_priv_set(dev, NULL);
381 
382 	/*
383 	 * We keep dev_data around for unplugged devices and reuse it when the
384 	 * device is re-plugged - not doing so would introduce a ton of races.
385 	 */
386 }
387 
388 /****************************************************************************
389  *
390  * Interrupt handling functions
391  *
392  ****************************************************************************/
393 
394 static void dump_dte_entry(u16 devid)
395 {
396 	int i;
397 
398 	for (i = 0; i < 4; ++i)
399 		pr_err("DTE[%d]: %016llx\n", i,
400 			amd_iommu_dev_table[devid].data[i]);
401 }
402 
403 static void dump_command(unsigned long phys_addr)
404 {
405 	struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
406 	int i;
407 
408 	for (i = 0; i < 4; ++i)
409 		pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
410 }
411 
412 static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
413 {
414 	struct iommu_dev_data *dev_data = NULL;
415 	int devid, vmg_tag, flags;
416 	struct pci_dev *pdev;
417 	u64 spa;
418 
419 	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
420 	vmg_tag = (event[1]) & 0xFFFF;
421 	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
422 	spa     = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
423 
424 	pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
425 					   devid & 0xff);
426 	if (pdev)
427 		dev_data = dev_iommu_priv_get(&pdev->dev);
428 
429 	if (dev_data) {
430 		if (__ratelimit(&dev_data->rs)) {
431 			pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
432 				vmg_tag, spa, flags);
433 		}
434 	} else {
435 		pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
436 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
437 			vmg_tag, spa, flags);
438 	}
439 
440 	if (pdev)
441 		pci_dev_put(pdev);
442 }
443 
444 static void amd_iommu_report_rmp_fault(volatile u32 *event)
445 {
446 	struct iommu_dev_data *dev_data = NULL;
447 	int devid, flags_rmp, vmg_tag, flags;
448 	struct pci_dev *pdev;
449 	u64 gpa;
450 
451 	devid     = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
452 	flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
453 	vmg_tag   = (event[1]) & 0xFFFF;
454 	flags     = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
455 	gpa       = ((u64)event[3] << 32) | event[2];
456 
457 	pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
458 					   devid & 0xff);
459 	if (pdev)
460 		dev_data = dev_iommu_priv_get(&pdev->dev);
461 
462 	if (dev_data) {
463 		if (__ratelimit(&dev_data->rs)) {
464 			pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
465 				vmg_tag, gpa, flags_rmp, flags);
466 		}
467 	} else {
468 		pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
469 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
470 			vmg_tag, gpa, flags_rmp, flags);
471 	}
472 
473 	if (pdev)
474 		pci_dev_put(pdev);
475 }
476 
477 #define IS_IOMMU_MEM_TRANSACTION(flags)		\
478 	(((flags) & EVENT_FLAG_I) == 0)
479 
480 #define IS_WRITE_REQUEST(flags)			\
481 	((flags) & EVENT_FLAG_RW)
482 
483 static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
484 					u64 address, int flags)
485 {
486 	struct iommu_dev_data *dev_data = NULL;
487 	struct pci_dev *pdev;
488 
489 	pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
490 					   devid & 0xff);
491 	if (pdev)
492 		dev_data = dev_iommu_priv_get(&pdev->dev);
493 
494 	if (dev_data) {
495 		/*
496 		 * If this is a DMA fault (for which the I(nterrupt)
497 		 * bit will be unset), allow report_iommu_fault() to
498 		 * prevent logging it.
499 		 */
500 		if (IS_IOMMU_MEM_TRANSACTION(flags)) {
501 			if (!report_iommu_fault(&dev_data->domain->domain,
502 						&pdev->dev, address,
503 						IS_WRITE_REQUEST(flags) ?
504 							IOMMU_FAULT_WRITE :
505 							IOMMU_FAULT_READ))
506 				goto out;
507 		}
508 
509 		if (__ratelimit(&dev_data->rs)) {
510 			pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
511 				domain_id, address, flags);
512 		}
513 	} else {
514 		pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
515 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
516 			domain_id, address, flags);
517 	}
518 
519 out:
520 	if (pdev)
521 		pci_dev_put(pdev);
522 }
523 
524 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
525 {
526 	struct device *dev = iommu->iommu.dev;
527 	int type, devid, flags, tag;
528 	volatile u32 *event = __evt;
529 	int count = 0;
530 	u64 address;
531 	u32 pasid;
532 
533 retry:
534 	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
535 	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
536 	pasid   = (event[0] & EVENT_DOMID_MASK_HI) |
537 		  (event[1] & EVENT_DOMID_MASK_LO);
538 	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
539 	address = (u64)(((u64)event[3]) << 32) | event[2];
540 
541 	if (type == 0) {
542 		/* Did we hit the erratum? */
543 		if (++count == LOOP_TIMEOUT) {
544 			pr_err("No event written to event log\n");
545 			return;
546 		}
547 		udelay(1);
548 		goto retry;
549 	}
550 
551 	if (type == EVENT_TYPE_IO_FAULT) {
552 		amd_iommu_report_page_fault(devid, pasid, address, flags);
553 		return;
554 	}
555 
556 	switch (type) {
557 	case EVENT_TYPE_ILL_DEV:
558 		dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
559 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
560 			pasid, address, flags);
561 		dump_dte_entry(devid);
562 		break;
563 	case EVENT_TYPE_DEV_TAB_ERR:
564 		dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
565 			"address=0x%llx flags=0x%04x]\n",
566 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
567 			address, flags);
568 		break;
569 	case EVENT_TYPE_PAGE_TAB_ERR:
570 		dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
571 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
572 			pasid, address, flags);
573 		break;
574 	case EVENT_TYPE_ILL_CMD:
575 		dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
576 		dump_command(address);
577 		break;
578 	case EVENT_TYPE_CMD_HARD_ERR:
579 		dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
580 			address, flags);
581 		break;
582 	case EVENT_TYPE_IOTLB_INV_TO:
583 		dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
584 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
585 			address);
586 		break;
587 	case EVENT_TYPE_INV_DEV_REQ:
588 		dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
589 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
590 			pasid, address, flags);
591 		break;
592 	case EVENT_TYPE_RMP_FAULT:
593 		amd_iommu_report_rmp_fault(event);
594 		break;
595 	case EVENT_TYPE_RMP_HW_ERR:
596 		amd_iommu_report_rmp_hw_error(event);
597 		break;
598 	case EVENT_TYPE_INV_PPR_REQ:
599 		pasid = PPR_PASID(*((u64 *)__evt));
600 		tag = event[1] & 0x03FF;
601 		dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
602 			PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
603 			pasid, address, flags, tag);
604 		break;
605 	default:
606 		dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
607 			event[0], event[1], event[2], event[3]);
608 	}
609 
610 	memset(__evt, 0, 4 * sizeof(u32));
611 }
612 
613 static void iommu_poll_events(struct amd_iommu *iommu)
614 {
615 	u32 head, tail;
616 
617 	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
618 	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
619 
620 	while (head != tail) {
621 		iommu_print_event(iommu, iommu->evt_buf + head);
622 		head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
623 	}
624 
625 	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
626 }
627 
628 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
629 {
630 	struct amd_iommu_fault fault;
631 
632 	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
633 		pr_err_ratelimited("Unknown PPR request received\n");
634 		return;
635 	}
636 
637 	fault.address   = raw[1];
638 	fault.pasid     = PPR_PASID(raw[0]);
639 	fault.device_id = PPR_DEVID(raw[0]);
640 	fault.tag       = PPR_TAG(raw[0]);
641 	fault.flags     = PPR_FLAGS(raw[0]);
642 
643 	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
644 }
645 
646 static void iommu_poll_ppr_log(struct amd_iommu *iommu)
647 {
648 	u32 head, tail;
649 
650 	if (iommu->ppr_log == NULL)
651 		return;
652 
653 	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
654 	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
655 
656 	while (head != tail) {
657 		volatile u64 *raw;
658 		u64 entry[2];
659 		int i;
660 
661 		raw = (u64 *)(iommu->ppr_log + head);
662 
663 		/*
664 		 * Hardware bug: Interrupt may arrive before the entry is
665 		 * written to memory. If this happens we need to wait for the
666 		 * entry to arrive.
667 		 */
668 		for (i = 0; i < LOOP_TIMEOUT; ++i) {
669 			if (PPR_REQ_TYPE(raw[0]) != 0)
670 				break;
671 			udelay(1);
672 		}
673 
674 		/* Avoid memcpy function-call overhead */
675 		entry[0] = raw[0];
676 		entry[1] = raw[1];
677 
678 		/*
679 		 * To detect the hardware bug we need to clear the entry
680 		 * back to zero.
681 		 */
682 		raw[0] = raw[1] = 0UL;
683 
684 		/* Update head pointer of hardware ring-buffer */
685 		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
686 		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
687 
688 		/* Handle PPR entry */
689 		iommu_handle_ppr_entry(iommu, entry);
690 
691 		/* Refresh ring-buffer information */
692 		head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
693 		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
694 	}
695 }
696 
697 #ifdef CONFIG_IRQ_REMAP
698 static int (*iommu_ga_log_notifier)(u32);
699 
700 int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
701 {
702 	iommu_ga_log_notifier = notifier;
703 
704 	return 0;
705 }
706 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
707 
708 static void iommu_poll_ga_log(struct amd_iommu *iommu)
709 {
710 	u32 head, tail, cnt = 0;
711 
712 	if (iommu->ga_log == NULL)
713 		return;
714 
715 	head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
716 	tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
717 
718 	while (head != tail) {
719 		volatile u64 *raw;
720 		u64 log_entry;
721 
722 		raw = (u64 *)(iommu->ga_log + head);
723 		cnt++;
724 
725 		/* Avoid memcpy function-call overhead */
726 		log_entry = *raw;
727 
728 		/* Update head pointer of hardware ring-buffer */
729 		head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
730 		writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
731 
732 		/* Handle GA entry */
733 		switch (GA_REQ_TYPE(log_entry)) {
734 		case GA_GUEST_NR:
735 			if (!iommu_ga_log_notifier)
736 				break;
737 
738 			pr_debug("%s: devid=%#x, ga_tag=%#x\n",
739 				 __func__, GA_DEVID(log_entry),
740 				 GA_TAG(log_entry));
741 
742 			if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
743 				pr_err("GA log notifier failed.\n");
744 			break;
745 		default:
746 			break;
747 		}
748 	}
749 }
750 
751 static void
752 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
753 {
754 	if (!irq_remapping_enabled || !dev_is_pci(dev) ||
755 	    pci_dev_has_special_msi_domain(to_pci_dev(dev)))
756 		return;
757 
758 	dev_set_msi_domain(dev, iommu->msi_domain);
759 }
760 
761 #else /* CONFIG_IRQ_REMAP */
762 static inline void
763 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
764 #endif /* !CONFIG_IRQ_REMAP */
765 
766 #define AMD_IOMMU_INT_MASK	\
767 	(MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
768 	 MMIO_STATUS_EVT_INT_MASK | \
769 	 MMIO_STATUS_PPR_INT_MASK | \
770 	 MMIO_STATUS_GALOG_INT_MASK)
771 
772 irqreturn_t amd_iommu_int_thread(int irq, void *data)
773 {
774 	struct amd_iommu *iommu = (struct amd_iommu *) data;
775 	u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
776 
777 	while (status & AMD_IOMMU_INT_MASK) {
778 		/* Enable interrupt sources again */
779 		writel(AMD_IOMMU_INT_MASK,
780 			iommu->mmio_base + MMIO_STATUS_OFFSET);
781 
782 		if (status & MMIO_STATUS_EVT_INT_MASK) {
783 			pr_devel("Processing IOMMU Event Log\n");
784 			iommu_poll_events(iommu);
785 		}
786 
787 		if (status & MMIO_STATUS_PPR_INT_MASK) {
788 			pr_devel("Processing IOMMU PPR Log\n");
789 			iommu_poll_ppr_log(iommu);
790 		}
791 
792 #ifdef CONFIG_IRQ_REMAP
793 		if (status & MMIO_STATUS_GALOG_INT_MASK) {
794 			pr_devel("Processing IOMMU GA Log\n");
795 			iommu_poll_ga_log(iommu);
796 		}
797 #endif
798 
799 		if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
800 			pr_info_ratelimited("IOMMU event log overflow\n");
801 			amd_iommu_restart_event_logging(iommu);
802 		}
803 
804 		/*
805 		 * Hardware bug: ERBT1312
806 		 * When re-enabling interrupt (by writing 1
807 		 * to clear the bit), the hardware might also try to set
808 		 * the interrupt bit in the event status register.
809 		 * In this scenario, the bit will be set, and disable
810 		 * subsequent interrupts.
811 		 *
812 		 * Workaround: The IOMMU driver should read back the
813 		 * status register and check if the interrupt bits are cleared.
814 		 * If not, driver will need to go through the interrupt handler
815 		 * again and re-clear the bits
816 		 */
817 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
818 	}
819 	return IRQ_HANDLED;
820 }
821 
822 irqreturn_t amd_iommu_int_handler(int irq, void *data)
823 {
824 	return IRQ_WAKE_THREAD;
825 }
826 
827 /****************************************************************************
828  *
829  * IOMMU command queuing functions
830  *
831  ****************************************************************************/
832 
833 static int wait_on_sem(struct amd_iommu *iommu, u64 data)
834 {
835 	int i = 0;
836 
837 	while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
838 		udelay(1);
839 		i += 1;
840 	}
841 
842 	if (i == LOOP_TIMEOUT) {
843 		pr_alert("Completion-Wait loop timed out\n");
844 		return -EIO;
845 	}
846 
847 	return 0;
848 }
849 
850 static void copy_cmd_to_buffer(struct amd_iommu *iommu,
851 			       struct iommu_cmd *cmd)
852 {
853 	u8 *target;
854 	u32 tail;
855 
856 	/* Copy command to buffer */
857 	tail = iommu->cmd_buf_tail;
858 	target = iommu->cmd_buf + tail;
859 	memcpy(target, cmd, sizeof(*cmd));
860 
861 	tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
862 	iommu->cmd_buf_tail = tail;
863 
864 	/* Tell the IOMMU about it */
865 	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
866 }
867 
868 static void build_completion_wait(struct iommu_cmd *cmd,
869 				  struct amd_iommu *iommu,
870 				  u64 data)
871 {
872 	u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
873 
874 	memset(cmd, 0, sizeof(*cmd));
875 	cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
876 	cmd->data[1] = upper_32_bits(paddr);
877 	cmd->data[2] = data;
878 	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
879 }
880 
881 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
882 {
883 	memset(cmd, 0, sizeof(*cmd));
884 	cmd->data[0] = devid;
885 	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
886 }
887 
888 /*
889  * Builds an invalidation address which is suitable for one page or multiple
890  * pages. Sets the size bit (S) as needed is more than one page is flushed.
891  */
892 static inline u64 build_inv_address(u64 address, size_t size)
893 {
894 	u64 pages, end, msb_diff;
895 
896 	pages = iommu_num_pages(address, size, PAGE_SIZE);
897 
898 	if (pages == 1)
899 		return address & PAGE_MASK;
900 
901 	end = address + size - 1;
902 
903 	/*
904 	 * msb_diff would hold the index of the most significant bit that
905 	 * flipped between the start and end.
906 	 */
907 	msb_diff = fls64(end ^ address) - 1;
908 
909 	/*
910 	 * Bits 63:52 are sign extended. If for some reason bit 51 is different
911 	 * between the start and the end, invalidate everything.
912 	 */
913 	if (unlikely(msb_diff > 51)) {
914 		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
915 	} else {
916 		/*
917 		 * The msb-bit must be clear on the address. Just set all the
918 		 * lower bits.
919 		 */
920 		address |= (1ull << msb_diff) - 1;
921 	}
922 
923 	/* Clear bits 11:0 */
924 	address &= PAGE_MASK;
925 
926 	/* Set the size bit - we flush more than one 4kb page */
927 	return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
928 }
929 
930 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
931 				  size_t size, u16 domid, int pde)
932 {
933 	u64 inv_address = build_inv_address(address, size);
934 
935 	memset(cmd, 0, sizeof(*cmd));
936 	cmd->data[1] |= domid;
937 	cmd->data[2]  = lower_32_bits(inv_address);
938 	cmd->data[3]  = upper_32_bits(inv_address);
939 	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
940 	if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
941 		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
942 }
943 
944 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
945 				  u64 address, size_t size)
946 {
947 	u64 inv_address = build_inv_address(address, size);
948 
949 	memset(cmd, 0, sizeof(*cmd));
950 	cmd->data[0]  = devid;
951 	cmd->data[0] |= (qdep & 0xff) << 24;
952 	cmd->data[1]  = devid;
953 	cmd->data[2]  = lower_32_bits(inv_address);
954 	cmd->data[3]  = upper_32_bits(inv_address);
955 	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
956 }
957 
958 static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
959 				  u64 address, bool size)
960 {
961 	memset(cmd, 0, sizeof(*cmd));
962 
963 	address &= ~(0xfffULL);
964 
965 	cmd->data[0]  = pasid;
966 	cmd->data[1]  = domid;
967 	cmd->data[2]  = lower_32_bits(address);
968 	cmd->data[3]  = upper_32_bits(address);
969 	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
970 	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
971 	if (size)
972 		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
973 	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
974 }
975 
976 static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
977 				  int qdep, u64 address, bool size)
978 {
979 	memset(cmd, 0, sizeof(*cmd));
980 
981 	address &= ~(0xfffULL);
982 
983 	cmd->data[0]  = devid;
984 	cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
985 	cmd->data[0] |= (qdep  & 0xff) << 24;
986 	cmd->data[1]  = devid;
987 	cmd->data[1] |= (pasid & 0xff) << 16;
988 	cmd->data[2]  = lower_32_bits(address);
989 	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
990 	cmd->data[3]  = upper_32_bits(address);
991 	if (size)
992 		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
993 	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
994 }
995 
996 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
997 			       int status, int tag, bool gn)
998 {
999 	memset(cmd, 0, sizeof(*cmd));
1000 
1001 	cmd->data[0]  = devid;
1002 	if (gn) {
1003 		cmd->data[1]  = pasid;
1004 		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
1005 	}
1006 	cmd->data[3]  = tag & 0x1ff;
1007 	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
1008 
1009 	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
1010 }
1011 
1012 static void build_inv_all(struct iommu_cmd *cmd)
1013 {
1014 	memset(cmd, 0, sizeof(*cmd));
1015 	CMD_SET_TYPE(cmd, CMD_INV_ALL);
1016 }
1017 
1018 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1019 {
1020 	memset(cmd, 0, sizeof(*cmd));
1021 	cmd->data[0] = devid;
1022 	CMD_SET_TYPE(cmd, CMD_INV_IRT);
1023 }
1024 
1025 /*
1026  * Writes the command to the IOMMUs command buffer and informs the
1027  * hardware about the new command.
1028  */
1029 static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1030 				      struct iommu_cmd *cmd,
1031 				      bool sync)
1032 {
1033 	unsigned int count = 0;
1034 	u32 left, next_tail;
1035 
1036 	next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1037 again:
1038 	left      = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1039 
1040 	if (left <= 0x20) {
1041 		/* Skip udelay() the first time around */
1042 		if (count++) {
1043 			if (count == LOOP_TIMEOUT) {
1044 				pr_err("Command buffer timeout\n");
1045 				return -EIO;
1046 			}
1047 
1048 			udelay(1);
1049 		}
1050 
1051 		/* Update head and recheck remaining space */
1052 		iommu->cmd_buf_head = readl(iommu->mmio_base +
1053 					    MMIO_CMD_HEAD_OFFSET);
1054 
1055 		goto again;
1056 	}
1057 
1058 	copy_cmd_to_buffer(iommu, cmd);
1059 
1060 	/* Do we need to make sure all commands are processed? */
1061 	iommu->need_sync = sync;
1062 
1063 	return 0;
1064 }
1065 
1066 static int iommu_queue_command_sync(struct amd_iommu *iommu,
1067 				    struct iommu_cmd *cmd,
1068 				    bool sync)
1069 {
1070 	unsigned long flags;
1071 	int ret;
1072 
1073 	raw_spin_lock_irqsave(&iommu->lock, flags);
1074 	ret = __iommu_queue_command_sync(iommu, cmd, sync);
1075 	raw_spin_unlock_irqrestore(&iommu->lock, flags);
1076 
1077 	return ret;
1078 }
1079 
1080 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1081 {
1082 	return iommu_queue_command_sync(iommu, cmd, true);
1083 }
1084 
1085 /*
1086  * This function queues a completion wait command into the command
1087  * buffer of an IOMMU
1088  */
1089 static int iommu_completion_wait(struct amd_iommu *iommu)
1090 {
1091 	struct iommu_cmd cmd;
1092 	unsigned long flags;
1093 	int ret;
1094 	u64 data;
1095 
1096 	if (!iommu->need_sync)
1097 		return 0;
1098 
1099 	raw_spin_lock_irqsave(&iommu->lock, flags);
1100 
1101 	data = ++iommu->cmd_sem_val;
1102 	build_completion_wait(&cmd, iommu, data);
1103 
1104 	ret = __iommu_queue_command_sync(iommu, &cmd, false);
1105 	if (ret)
1106 		goto out_unlock;
1107 
1108 	ret = wait_on_sem(iommu, data);
1109 
1110 out_unlock:
1111 	raw_spin_unlock_irqrestore(&iommu->lock, flags);
1112 
1113 	return ret;
1114 }
1115 
1116 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1117 {
1118 	struct iommu_cmd cmd;
1119 
1120 	build_inv_dte(&cmd, devid);
1121 
1122 	return iommu_queue_command(iommu, &cmd);
1123 }
1124 
1125 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1126 {
1127 	u32 devid;
1128 
1129 	for (devid = 0; devid <= 0xffff; ++devid)
1130 		iommu_flush_dte(iommu, devid);
1131 
1132 	iommu_completion_wait(iommu);
1133 }
1134 
1135 /*
1136  * This function uses heavy locking and may disable irqs for some time. But
1137  * this is no issue because it is only called during resume.
1138  */
1139 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1140 {
1141 	u32 dom_id;
1142 
1143 	for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1144 		struct iommu_cmd cmd;
1145 		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1146 				      dom_id, 1);
1147 		iommu_queue_command(iommu, &cmd);
1148 	}
1149 
1150 	iommu_completion_wait(iommu);
1151 }
1152 
1153 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1154 {
1155 	struct iommu_cmd cmd;
1156 
1157 	build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1158 			      dom_id, 1);
1159 	iommu_queue_command(iommu, &cmd);
1160 
1161 	iommu_completion_wait(iommu);
1162 }
1163 
1164 static void amd_iommu_flush_all(struct amd_iommu *iommu)
1165 {
1166 	struct iommu_cmd cmd;
1167 
1168 	build_inv_all(&cmd);
1169 
1170 	iommu_queue_command(iommu, &cmd);
1171 	iommu_completion_wait(iommu);
1172 }
1173 
1174 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1175 {
1176 	struct iommu_cmd cmd;
1177 
1178 	build_inv_irt(&cmd, devid);
1179 
1180 	iommu_queue_command(iommu, &cmd);
1181 }
1182 
1183 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1184 {
1185 	u32 devid;
1186 
1187 	for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1188 		iommu_flush_irt(iommu, devid);
1189 
1190 	iommu_completion_wait(iommu);
1191 }
1192 
1193 void iommu_flush_all_caches(struct amd_iommu *iommu)
1194 {
1195 	if (iommu_feature(iommu, FEATURE_IA)) {
1196 		amd_iommu_flush_all(iommu);
1197 	} else {
1198 		amd_iommu_flush_dte_all(iommu);
1199 		amd_iommu_flush_irt_all(iommu);
1200 		amd_iommu_flush_tlb_all(iommu);
1201 	}
1202 }
1203 
1204 /*
1205  * Command send function for flushing on-device TLB
1206  */
1207 static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1208 			      u64 address, size_t size)
1209 {
1210 	struct amd_iommu *iommu;
1211 	struct iommu_cmd cmd;
1212 	int qdep;
1213 
1214 	qdep     = dev_data->ats.qdep;
1215 	iommu    = amd_iommu_rlookup_table[dev_data->devid];
1216 
1217 	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1218 
1219 	return iommu_queue_command(iommu, &cmd);
1220 }
1221 
1222 static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1223 {
1224 	struct amd_iommu *iommu = data;
1225 
1226 	return iommu_flush_dte(iommu, alias);
1227 }
1228 
1229 /*
1230  * Command send function for invalidating a device table entry
1231  */
1232 static int device_flush_dte(struct iommu_dev_data *dev_data)
1233 {
1234 	struct amd_iommu *iommu;
1235 	u16 alias;
1236 	int ret;
1237 
1238 	iommu = amd_iommu_rlookup_table[dev_data->devid];
1239 
1240 	if (dev_data->pdev)
1241 		ret = pci_for_each_dma_alias(dev_data->pdev,
1242 					     device_flush_dte_alias, iommu);
1243 	else
1244 		ret = iommu_flush_dte(iommu, dev_data->devid);
1245 	if (ret)
1246 		return ret;
1247 
1248 	alias = amd_iommu_alias_table[dev_data->devid];
1249 	if (alias != dev_data->devid) {
1250 		ret = iommu_flush_dte(iommu, alias);
1251 		if (ret)
1252 			return ret;
1253 	}
1254 
1255 	if (dev_data->ats.enabled)
1256 		ret = device_flush_iotlb(dev_data, 0, ~0UL);
1257 
1258 	return ret;
1259 }
1260 
1261 /*
1262  * TLB invalidation function which is called from the mapping functions.
1263  * It invalidates a single PTE if the range to flush is within a single
1264  * page. Otherwise it flushes the whole TLB of the IOMMU.
1265  */
1266 static void __domain_flush_pages(struct protection_domain *domain,
1267 				 u64 address, size_t size, int pde)
1268 {
1269 	struct iommu_dev_data *dev_data;
1270 	struct iommu_cmd cmd;
1271 	int ret = 0, i;
1272 
1273 	build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1274 
1275 	for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1276 		if (!domain->dev_iommu[i])
1277 			continue;
1278 
1279 		/*
1280 		 * Devices of this domain are behind this IOMMU
1281 		 * We need a TLB flush
1282 		 */
1283 		ret |= iommu_queue_command(amd_iommus[i], &cmd);
1284 	}
1285 
1286 	list_for_each_entry(dev_data, &domain->dev_list, list) {
1287 
1288 		if (!dev_data->ats.enabled)
1289 			continue;
1290 
1291 		ret |= device_flush_iotlb(dev_data, address, size);
1292 	}
1293 
1294 	WARN_ON(ret);
1295 }
1296 
1297 static void domain_flush_pages(struct protection_domain *domain,
1298 			       u64 address, size_t size, int pde)
1299 {
1300 	if (likely(!amd_iommu_np_cache)) {
1301 		__domain_flush_pages(domain, address, size, pde);
1302 		return;
1303 	}
1304 
1305 	/*
1306 	 * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
1307 	 * In such setups it is best to avoid flushes of ranges which are not
1308 	 * naturally aligned, since it would lead to flushes of unmodified
1309 	 * PTEs. Such flushes would require the hypervisor to do more work than
1310 	 * necessary. Therefore, perform repeated flushes of aligned ranges
1311 	 * until you cover the range. Each iteration flushes the smaller
1312 	 * between the natural alignment of the address that we flush and the
1313 	 * greatest naturally aligned region that fits in the range.
1314 	 */
1315 	while (size != 0) {
1316 		int addr_alignment = __ffs(address);
1317 		int size_alignment = __fls(size);
1318 		int min_alignment;
1319 		size_t flush_size;
1320 
1321 		/*
1322 		 * size is always non-zero, but address might be zero, causing
1323 		 * addr_alignment to be negative. As the casting of the
1324 		 * argument in __ffs(address) to long might trim the high bits
1325 		 * of the address on x86-32, cast to long when doing the check.
1326 		 */
1327 		if (likely((unsigned long)address != 0))
1328 			min_alignment = min(addr_alignment, size_alignment);
1329 		else
1330 			min_alignment = size_alignment;
1331 
1332 		flush_size = 1ul << min_alignment;
1333 
1334 		__domain_flush_pages(domain, address, flush_size, pde);
1335 		address += flush_size;
1336 		size -= flush_size;
1337 	}
1338 }
1339 
1340 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1341 void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
1342 {
1343 	domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1344 }
1345 
1346 void amd_iommu_domain_flush_complete(struct protection_domain *domain)
1347 {
1348 	int i;
1349 
1350 	for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1351 		if (domain && !domain->dev_iommu[i])
1352 			continue;
1353 
1354 		/*
1355 		 * Devices of this domain are behind this IOMMU
1356 		 * We need to wait for completion of all commands.
1357 		 */
1358 		iommu_completion_wait(amd_iommus[i]);
1359 	}
1360 }
1361 
1362 /* Flush the not present cache if it exists */
1363 static void domain_flush_np_cache(struct protection_domain *domain,
1364 		dma_addr_t iova, size_t size)
1365 {
1366 	if (unlikely(amd_iommu_np_cache)) {
1367 		unsigned long flags;
1368 
1369 		spin_lock_irqsave(&domain->lock, flags);
1370 		domain_flush_pages(domain, iova, size, 1);
1371 		amd_iommu_domain_flush_complete(domain);
1372 		spin_unlock_irqrestore(&domain->lock, flags);
1373 	}
1374 }
1375 
1376 
1377 /*
1378  * This function flushes the DTEs for all devices in domain
1379  */
1380 static void domain_flush_devices(struct protection_domain *domain)
1381 {
1382 	struct iommu_dev_data *dev_data;
1383 
1384 	list_for_each_entry(dev_data, &domain->dev_list, list)
1385 		device_flush_dte(dev_data);
1386 }
1387 
1388 /****************************************************************************
1389  *
1390  * The next functions belong to the domain allocation. A domain is
1391  * allocated for every IOMMU as the default domain. If device isolation
1392  * is enabled, every device get its own domain. The most important thing
1393  * about domains is the page table mapping the DMA address space they
1394  * contain.
1395  *
1396  ****************************************************************************/
1397 
1398 static u16 domain_id_alloc(void)
1399 {
1400 	int id;
1401 
1402 	spin_lock(&pd_bitmap_lock);
1403 	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1404 	BUG_ON(id == 0);
1405 	if (id > 0 && id < MAX_DOMAIN_ID)
1406 		__set_bit(id, amd_iommu_pd_alloc_bitmap);
1407 	else
1408 		id = 0;
1409 	spin_unlock(&pd_bitmap_lock);
1410 
1411 	return id;
1412 }
1413 
1414 static void domain_id_free(int id)
1415 {
1416 	spin_lock(&pd_bitmap_lock);
1417 	if (id > 0 && id < MAX_DOMAIN_ID)
1418 		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
1419 	spin_unlock(&pd_bitmap_lock);
1420 }
1421 
1422 static void free_gcr3_tbl_level1(u64 *tbl)
1423 {
1424 	u64 *ptr;
1425 	int i;
1426 
1427 	for (i = 0; i < 512; ++i) {
1428 		if (!(tbl[i] & GCR3_VALID))
1429 			continue;
1430 
1431 		ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1432 
1433 		free_page((unsigned long)ptr);
1434 	}
1435 }
1436 
1437 static void free_gcr3_tbl_level2(u64 *tbl)
1438 {
1439 	u64 *ptr;
1440 	int i;
1441 
1442 	for (i = 0; i < 512; ++i) {
1443 		if (!(tbl[i] & GCR3_VALID))
1444 			continue;
1445 
1446 		ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1447 
1448 		free_gcr3_tbl_level1(ptr);
1449 	}
1450 }
1451 
1452 static void free_gcr3_table(struct protection_domain *domain)
1453 {
1454 	if (domain->glx == 2)
1455 		free_gcr3_tbl_level2(domain->gcr3_tbl);
1456 	else if (domain->glx == 1)
1457 		free_gcr3_tbl_level1(domain->gcr3_tbl);
1458 	else
1459 		BUG_ON(domain->glx != 0);
1460 
1461 	free_page((unsigned long)domain->gcr3_tbl);
1462 }
1463 
1464 static void set_dte_entry(u16 devid, struct protection_domain *domain,
1465 			  bool ats, bool ppr)
1466 {
1467 	u64 pte_root = 0;
1468 	u64 flags = 0;
1469 	u32 old_domid;
1470 
1471 	if (domain->iop.mode != PAGE_MODE_NONE)
1472 		pte_root = iommu_virt_to_phys(domain->iop.root);
1473 
1474 	pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
1475 		    << DEV_ENTRY_MODE_SHIFT;
1476 	pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
1477 
1478 	flags = amd_iommu_dev_table[devid].data[1];
1479 
1480 	if (ats)
1481 		flags |= DTE_FLAG_IOTLB;
1482 
1483 	if (ppr) {
1484 		struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1485 
1486 		if (iommu_feature(iommu, FEATURE_EPHSUP))
1487 			pte_root |= 1ULL << DEV_ENTRY_PPR;
1488 	}
1489 
1490 	if (domain->flags & PD_IOMMUV2_MASK) {
1491 		u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
1492 		u64 glx  = domain->glx;
1493 		u64 tmp;
1494 
1495 		pte_root |= DTE_FLAG_GV;
1496 		pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1497 
1498 		/* First mask out possible old values for GCR3 table */
1499 		tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1500 		flags    &= ~tmp;
1501 
1502 		tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1503 		flags    &= ~tmp;
1504 
1505 		/* Encode GCR3 table into DTE */
1506 		tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1507 		pte_root |= tmp;
1508 
1509 		tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1510 		flags    |= tmp;
1511 
1512 		tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1513 		flags    |= tmp;
1514 	}
1515 
1516 	flags &= ~DEV_DOMID_MASK;
1517 	flags |= domain->id;
1518 
1519 	old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
1520 	amd_iommu_dev_table[devid].data[1]  = flags;
1521 	amd_iommu_dev_table[devid].data[0]  = pte_root;
1522 
1523 	/*
1524 	 * A kdump kernel might be replacing a domain ID that was copied from
1525 	 * the previous kernel--if so, it needs to flush the translation cache
1526 	 * entries for the old domain ID that is being overwritten
1527 	 */
1528 	if (old_domid) {
1529 		struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1530 
1531 		amd_iommu_flush_tlb_domid(iommu, old_domid);
1532 	}
1533 }
1534 
1535 static void clear_dte_entry(u16 devid)
1536 {
1537 	/* remove entry from the device table seen by the hardware */
1538 	amd_iommu_dev_table[devid].data[0]  = DTE_FLAG_V | DTE_FLAG_TV;
1539 	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
1540 
1541 	amd_iommu_apply_erratum_63(devid);
1542 }
1543 
1544 static void do_attach(struct iommu_dev_data *dev_data,
1545 		      struct protection_domain *domain)
1546 {
1547 	struct amd_iommu *iommu;
1548 	bool ats;
1549 
1550 	iommu = amd_iommu_rlookup_table[dev_data->devid];
1551 	ats   = dev_data->ats.enabled;
1552 
1553 	/* Update data structures */
1554 	dev_data->domain = domain;
1555 	list_add(&dev_data->list, &domain->dev_list);
1556 
1557 	/* Do reference counting */
1558 	domain->dev_iommu[iommu->index] += 1;
1559 	domain->dev_cnt                 += 1;
1560 
1561 	/* Update device table */
1562 	set_dte_entry(dev_data->devid, domain,
1563 		      ats, dev_data->iommu_v2);
1564 	clone_aliases(dev_data->pdev);
1565 
1566 	device_flush_dte(dev_data);
1567 }
1568 
1569 static void do_detach(struct iommu_dev_data *dev_data)
1570 {
1571 	struct protection_domain *domain = dev_data->domain;
1572 	struct amd_iommu *iommu;
1573 
1574 	iommu = amd_iommu_rlookup_table[dev_data->devid];
1575 
1576 	/* Update data structures */
1577 	dev_data->domain = NULL;
1578 	list_del(&dev_data->list);
1579 	clear_dte_entry(dev_data->devid);
1580 	clone_aliases(dev_data->pdev);
1581 
1582 	/* Flush the DTE entry */
1583 	device_flush_dte(dev_data);
1584 
1585 	/* Flush IOTLB */
1586 	amd_iommu_domain_flush_tlb_pde(domain);
1587 
1588 	/* Wait for the flushes to finish */
1589 	amd_iommu_domain_flush_complete(domain);
1590 
1591 	/* decrease reference counters - needs to happen after the flushes */
1592 	domain->dev_iommu[iommu->index] -= 1;
1593 	domain->dev_cnt                 -= 1;
1594 }
1595 
1596 static void pdev_iommuv2_disable(struct pci_dev *pdev)
1597 {
1598 	pci_disable_ats(pdev);
1599 	pci_disable_pri(pdev);
1600 	pci_disable_pasid(pdev);
1601 }
1602 
1603 static int pdev_iommuv2_enable(struct pci_dev *pdev)
1604 {
1605 	int ret;
1606 
1607 	/* Only allow access to user-accessible pages */
1608 	ret = pci_enable_pasid(pdev, 0);
1609 	if (ret)
1610 		goto out_err;
1611 
1612 	/* First reset the PRI state of the device */
1613 	ret = pci_reset_pri(pdev);
1614 	if (ret)
1615 		goto out_err;
1616 
1617 	/* Enable PRI */
1618 	/* FIXME: Hardcode number of outstanding requests for now */
1619 	ret = pci_enable_pri(pdev, 32);
1620 	if (ret)
1621 		goto out_err;
1622 
1623 	ret = pci_enable_ats(pdev, PAGE_SHIFT);
1624 	if (ret)
1625 		goto out_err;
1626 
1627 	return 0;
1628 
1629 out_err:
1630 	pci_disable_pri(pdev);
1631 	pci_disable_pasid(pdev);
1632 
1633 	return ret;
1634 }
1635 
1636 /*
1637  * If a device is not yet associated with a domain, this function makes the
1638  * device visible in the domain
1639  */
1640 static int attach_device(struct device *dev,
1641 			 struct protection_domain *domain)
1642 {
1643 	struct iommu_dev_data *dev_data;
1644 	struct pci_dev *pdev;
1645 	unsigned long flags;
1646 	int ret;
1647 
1648 	spin_lock_irqsave(&domain->lock, flags);
1649 
1650 	dev_data = dev_iommu_priv_get(dev);
1651 
1652 	spin_lock(&dev_data->lock);
1653 
1654 	ret = -EBUSY;
1655 	if (dev_data->domain != NULL)
1656 		goto out;
1657 
1658 	if (!dev_is_pci(dev))
1659 		goto skip_ats_check;
1660 
1661 	pdev = to_pci_dev(dev);
1662 	if (domain->flags & PD_IOMMUV2_MASK) {
1663 		struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
1664 
1665 		ret = -EINVAL;
1666 		if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
1667 			goto out;
1668 
1669 		if (dev_data->iommu_v2) {
1670 			if (pdev_iommuv2_enable(pdev) != 0)
1671 				goto out;
1672 
1673 			dev_data->ats.enabled = true;
1674 			dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
1675 			dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
1676 		}
1677 	} else if (amd_iommu_iotlb_sup &&
1678 		   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
1679 		dev_data->ats.enabled = true;
1680 		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
1681 	}
1682 
1683 skip_ats_check:
1684 	ret = 0;
1685 
1686 	do_attach(dev_data, domain);
1687 
1688 	/*
1689 	 * We might boot into a crash-kernel here. The crashed kernel
1690 	 * left the caches in the IOMMU dirty. So we have to flush
1691 	 * here to evict all dirty stuff.
1692 	 */
1693 	amd_iommu_domain_flush_tlb_pde(domain);
1694 
1695 	amd_iommu_domain_flush_complete(domain);
1696 
1697 out:
1698 	spin_unlock(&dev_data->lock);
1699 
1700 	spin_unlock_irqrestore(&domain->lock, flags);
1701 
1702 	return ret;
1703 }
1704 
1705 /*
1706  * Removes a device from a protection domain (with devtable_lock held)
1707  */
1708 static void detach_device(struct device *dev)
1709 {
1710 	struct protection_domain *domain;
1711 	struct iommu_dev_data *dev_data;
1712 	unsigned long flags;
1713 
1714 	dev_data = dev_iommu_priv_get(dev);
1715 	domain   = dev_data->domain;
1716 
1717 	spin_lock_irqsave(&domain->lock, flags);
1718 
1719 	spin_lock(&dev_data->lock);
1720 
1721 	/*
1722 	 * First check if the device is still attached. It might already
1723 	 * be detached from its domain because the generic
1724 	 * iommu_detach_group code detached it and we try again here in
1725 	 * our alias handling.
1726 	 */
1727 	if (WARN_ON(!dev_data->domain))
1728 		goto out;
1729 
1730 	do_detach(dev_data);
1731 
1732 	if (!dev_is_pci(dev))
1733 		goto out;
1734 
1735 	if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
1736 		pdev_iommuv2_disable(to_pci_dev(dev));
1737 	else if (dev_data->ats.enabled)
1738 		pci_disable_ats(to_pci_dev(dev));
1739 
1740 	dev_data->ats.enabled = false;
1741 
1742 out:
1743 	spin_unlock(&dev_data->lock);
1744 
1745 	spin_unlock_irqrestore(&domain->lock, flags);
1746 }
1747 
1748 static struct iommu_device *amd_iommu_probe_device(struct device *dev)
1749 {
1750 	struct iommu_device *iommu_dev;
1751 	struct amd_iommu *iommu;
1752 	int ret, devid;
1753 
1754 	if (!check_device(dev))
1755 		return ERR_PTR(-ENODEV);
1756 
1757 	devid = get_device_id(dev);
1758 	iommu = amd_iommu_rlookup_table[devid];
1759 
1760 	if (dev_iommu_priv_get(dev))
1761 		return &iommu->iommu;
1762 
1763 	ret = iommu_init_device(dev);
1764 	if (ret) {
1765 		if (ret != -ENOTSUPP)
1766 			dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
1767 		iommu_dev = ERR_PTR(ret);
1768 		iommu_ignore_device(dev);
1769 	} else {
1770 		amd_iommu_set_pci_msi_domain(dev, iommu);
1771 		iommu_dev = &iommu->iommu;
1772 	}
1773 
1774 	iommu_completion_wait(iommu);
1775 
1776 	return iommu_dev;
1777 }
1778 
1779 static void amd_iommu_probe_finalize(struct device *dev)
1780 {
1781 	/* Domains are initialized for this device - have a look what we ended up with */
1782 	set_dma_ops(dev, NULL);
1783 	iommu_setup_dma_ops(dev, 0, U64_MAX);
1784 }
1785 
1786 static void amd_iommu_release_device(struct device *dev)
1787 {
1788 	int devid = get_device_id(dev);
1789 	struct amd_iommu *iommu;
1790 
1791 	if (!check_device(dev))
1792 		return;
1793 
1794 	iommu = amd_iommu_rlookup_table[devid];
1795 
1796 	amd_iommu_uninit_device(dev);
1797 	iommu_completion_wait(iommu);
1798 }
1799 
1800 static struct iommu_group *amd_iommu_device_group(struct device *dev)
1801 {
1802 	if (dev_is_pci(dev))
1803 		return pci_device_group(dev);
1804 
1805 	return acpihid_device_group(dev);
1806 }
1807 
1808 /*****************************************************************************
1809  *
1810  * The next functions belong to the dma_ops mapping/unmapping code.
1811  *
1812  *****************************************************************************/
1813 
1814 static void update_device_table(struct protection_domain *domain)
1815 {
1816 	struct iommu_dev_data *dev_data;
1817 
1818 	list_for_each_entry(dev_data, &domain->dev_list, list) {
1819 		set_dte_entry(dev_data->devid, domain,
1820 			      dev_data->ats.enabled, dev_data->iommu_v2);
1821 		clone_aliases(dev_data->pdev);
1822 	}
1823 }
1824 
1825 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
1826 {
1827 	update_device_table(domain);
1828 	domain_flush_devices(domain);
1829 }
1830 
1831 void amd_iommu_domain_update(struct protection_domain *domain)
1832 {
1833 	/* Update device table */
1834 	amd_iommu_update_and_flush_device_table(domain);
1835 
1836 	/* Flush domain TLB(s) and wait for completion */
1837 	amd_iommu_domain_flush_tlb_pde(domain);
1838 	amd_iommu_domain_flush_complete(domain);
1839 }
1840 
1841 static void __init amd_iommu_init_dma_ops(void)
1842 {
1843 	swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
1844 }
1845 
1846 int __init amd_iommu_init_api(void)
1847 {
1848 	int err;
1849 
1850 	amd_iommu_init_dma_ops();
1851 
1852 	err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
1853 	if (err)
1854 		return err;
1855 #ifdef CONFIG_ARM_AMBA
1856 	err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
1857 	if (err)
1858 		return err;
1859 #endif
1860 	err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
1861 	if (err)
1862 		return err;
1863 
1864 	return 0;
1865 }
1866 
1867 /*****************************************************************************
1868  *
1869  * The following functions belong to the exported interface of AMD IOMMU
1870  *
1871  * This interface allows access to lower level functions of the IOMMU
1872  * like protection domain handling and assignement of devices to domains
1873  * which is not possible with the dma_ops interface.
1874  *
1875  *****************************************************************************/
1876 
1877 static void cleanup_domain(struct protection_domain *domain)
1878 {
1879 	struct iommu_dev_data *entry;
1880 	unsigned long flags;
1881 
1882 	spin_lock_irqsave(&domain->lock, flags);
1883 
1884 	while (!list_empty(&domain->dev_list)) {
1885 		entry = list_first_entry(&domain->dev_list,
1886 					 struct iommu_dev_data, list);
1887 		BUG_ON(!entry->domain);
1888 		do_detach(entry);
1889 	}
1890 
1891 	spin_unlock_irqrestore(&domain->lock, flags);
1892 }
1893 
1894 static void protection_domain_free(struct protection_domain *domain)
1895 {
1896 	if (!domain)
1897 		return;
1898 
1899 	if (domain->id)
1900 		domain_id_free(domain->id);
1901 
1902 	if (domain->iop.pgtbl_cfg.tlb)
1903 		free_io_pgtable_ops(&domain->iop.iop.ops);
1904 
1905 	kfree(domain);
1906 }
1907 
1908 static int protection_domain_init_v1(struct protection_domain *domain, int mode)
1909 {
1910 	u64 *pt_root = NULL;
1911 
1912 	BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
1913 
1914 	spin_lock_init(&domain->lock);
1915 	domain->id = domain_id_alloc();
1916 	if (!domain->id)
1917 		return -ENOMEM;
1918 	INIT_LIST_HEAD(&domain->dev_list);
1919 
1920 	if (mode != PAGE_MODE_NONE) {
1921 		pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1922 		if (!pt_root)
1923 			return -ENOMEM;
1924 	}
1925 
1926 	amd_iommu_domain_set_pgtable(domain, pt_root, mode);
1927 
1928 	return 0;
1929 }
1930 
1931 static struct protection_domain *protection_domain_alloc(unsigned int type)
1932 {
1933 	struct io_pgtable_ops *pgtbl_ops;
1934 	struct protection_domain *domain;
1935 	int pgtable = amd_iommu_pgtable;
1936 	int mode = DEFAULT_PGTABLE_LEVEL;
1937 	int ret;
1938 
1939 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1940 	if (!domain)
1941 		return NULL;
1942 
1943 	/*
1944 	 * Force IOMMU v1 page table when iommu=pt and
1945 	 * when allocating domain for pass-through devices.
1946 	 */
1947 	if (type == IOMMU_DOMAIN_IDENTITY) {
1948 		pgtable = AMD_IOMMU_V1;
1949 		mode = PAGE_MODE_NONE;
1950 	} else if (type == IOMMU_DOMAIN_UNMANAGED) {
1951 		pgtable = AMD_IOMMU_V1;
1952 	}
1953 
1954 	switch (pgtable) {
1955 	case AMD_IOMMU_V1:
1956 		ret = protection_domain_init_v1(domain, mode);
1957 		break;
1958 	default:
1959 		ret = -EINVAL;
1960 	}
1961 
1962 	if (ret)
1963 		goto out_err;
1964 
1965 	pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
1966 	if (!pgtbl_ops)
1967 		goto out_err;
1968 
1969 	return domain;
1970 out_err:
1971 	kfree(domain);
1972 	return NULL;
1973 }
1974 
1975 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
1976 {
1977 	struct protection_domain *domain;
1978 
1979 	domain = protection_domain_alloc(type);
1980 	if (!domain)
1981 		return NULL;
1982 
1983 	domain->domain.geometry.aperture_start = 0;
1984 	domain->domain.geometry.aperture_end   = ~0ULL;
1985 	domain->domain.geometry.force_aperture = true;
1986 
1987 	return &domain->domain;
1988 }
1989 
1990 static void amd_iommu_domain_free(struct iommu_domain *dom)
1991 {
1992 	struct protection_domain *domain;
1993 
1994 	domain = to_pdomain(dom);
1995 
1996 	if (domain->dev_cnt > 0)
1997 		cleanup_domain(domain);
1998 
1999 	BUG_ON(domain->dev_cnt != 0);
2000 
2001 	if (!dom)
2002 		return;
2003 
2004 	if (domain->flags & PD_IOMMUV2_MASK)
2005 		free_gcr3_table(domain);
2006 
2007 	protection_domain_free(domain);
2008 }
2009 
2010 static void amd_iommu_detach_device(struct iommu_domain *dom,
2011 				    struct device *dev)
2012 {
2013 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2014 	int devid = get_device_id(dev);
2015 	struct amd_iommu *iommu;
2016 
2017 	if (!check_device(dev))
2018 		return;
2019 
2020 	if (dev_data->domain != NULL)
2021 		detach_device(dev);
2022 
2023 	iommu = amd_iommu_rlookup_table[devid];
2024 	if (!iommu)
2025 		return;
2026 
2027 #ifdef CONFIG_IRQ_REMAP
2028 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2029 	    (dom->type == IOMMU_DOMAIN_UNMANAGED))
2030 		dev_data->use_vapic = 0;
2031 #endif
2032 
2033 	iommu_completion_wait(iommu);
2034 }
2035 
2036 static int amd_iommu_attach_device(struct iommu_domain *dom,
2037 				   struct device *dev)
2038 {
2039 	struct protection_domain *domain = to_pdomain(dom);
2040 	struct iommu_dev_data *dev_data;
2041 	struct amd_iommu *iommu;
2042 	int ret;
2043 
2044 	if (!check_device(dev))
2045 		return -EINVAL;
2046 
2047 	dev_data = dev_iommu_priv_get(dev);
2048 	dev_data->defer_attach = false;
2049 
2050 	iommu = amd_iommu_rlookup_table[dev_data->devid];
2051 	if (!iommu)
2052 		return -EINVAL;
2053 
2054 	if (dev_data->domain)
2055 		detach_device(dev);
2056 
2057 	ret = attach_device(dev, domain);
2058 
2059 #ifdef CONFIG_IRQ_REMAP
2060 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2061 		if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2062 			dev_data->use_vapic = 1;
2063 		else
2064 			dev_data->use_vapic = 0;
2065 	}
2066 #endif
2067 
2068 	iommu_completion_wait(iommu);
2069 
2070 	return ret;
2071 }
2072 
2073 static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
2074 				     unsigned long iova, size_t size)
2075 {
2076 	struct protection_domain *domain = to_pdomain(dom);
2077 	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2078 
2079 	if (ops->map)
2080 		domain_flush_np_cache(domain, iova, size);
2081 }
2082 
2083 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2084 			 phys_addr_t paddr, size_t page_size, int iommu_prot,
2085 			 gfp_t gfp)
2086 {
2087 	struct protection_domain *domain = to_pdomain(dom);
2088 	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2089 	int prot = 0;
2090 	int ret = -EINVAL;
2091 
2092 	if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2093 	    (domain->iop.mode == PAGE_MODE_NONE))
2094 		return -EINVAL;
2095 
2096 	if (iommu_prot & IOMMU_READ)
2097 		prot |= IOMMU_PROT_IR;
2098 	if (iommu_prot & IOMMU_WRITE)
2099 		prot |= IOMMU_PROT_IW;
2100 
2101 	if (ops->map)
2102 		ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
2103 
2104 	return ret;
2105 }
2106 
2107 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
2108 					    struct iommu_iotlb_gather *gather,
2109 					    unsigned long iova, size_t size)
2110 {
2111 	/*
2112 	 * AMD's IOMMU can flush as many pages as necessary in a single flush.
2113 	 * Unless we run in a virtual machine, which can be inferred according
2114 	 * to whether "non-present cache" is on, it is probably best to prefer
2115 	 * (potentially) too extensive TLB flushing (i.e., more misses) over
2116 	 * mutliple TLB flushes (i.e., more flushes). For virtual machines the
2117 	 * hypervisor needs to synchronize the host IOMMU PTEs with those of
2118 	 * the guest, and the trade-off is different: unnecessary TLB flushes
2119 	 * should be avoided.
2120 	 */
2121 	if (amd_iommu_np_cache &&
2122 	    iommu_iotlb_gather_is_disjoint(gather, iova, size))
2123 		iommu_iotlb_sync(domain, gather);
2124 
2125 	iommu_iotlb_gather_add_range(gather, iova, size);
2126 }
2127 
2128 static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2129 			      size_t page_size,
2130 			      struct iommu_iotlb_gather *gather)
2131 {
2132 	struct protection_domain *domain = to_pdomain(dom);
2133 	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2134 	size_t r;
2135 
2136 	if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2137 	    (domain->iop.mode == PAGE_MODE_NONE))
2138 		return 0;
2139 
2140 	r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
2141 
2142 	amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
2143 
2144 	return r;
2145 }
2146 
2147 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2148 					  dma_addr_t iova)
2149 {
2150 	struct protection_domain *domain = to_pdomain(dom);
2151 	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2152 
2153 	return ops->iova_to_phys(ops, iova);
2154 }
2155 
2156 static bool amd_iommu_capable(enum iommu_cap cap)
2157 {
2158 	switch (cap) {
2159 	case IOMMU_CAP_CACHE_COHERENCY:
2160 		return true;
2161 	case IOMMU_CAP_INTR_REMAP:
2162 		return (irq_remapping_enabled == 1);
2163 	case IOMMU_CAP_NOEXEC:
2164 		return false;
2165 	default:
2166 		break;
2167 	}
2168 
2169 	return false;
2170 }
2171 
2172 static void amd_iommu_get_resv_regions(struct device *dev,
2173 				       struct list_head *head)
2174 {
2175 	struct iommu_resv_region *region;
2176 	struct unity_map_entry *entry;
2177 	int devid;
2178 
2179 	devid = get_device_id(dev);
2180 	if (devid < 0)
2181 		return;
2182 
2183 	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
2184 		int type, prot = 0;
2185 		size_t length;
2186 
2187 		if (devid < entry->devid_start || devid > entry->devid_end)
2188 			continue;
2189 
2190 		type   = IOMMU_RESV_DIRECT;
2191 		length = entry->address_end - entry->address_start;
2192 		if (entry->prot & IOMMU_PROT_IR)
2193 			prot |= IOMMU_READ;
2194 		if (entry->prot & IOMMU_PROT_IW)
2195 			prot |= IOMMU_WRITE;
2196 		if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
2197 			/* Exclusion range */
2198 			type = IOMMU_RESV_RESERVED;
2199 
2200 		region = iommu_alloc_resv_region(entry->address_start,
2201 						 length, prot, type);
2202 		if (!region) {
2203 			dev_err(dev, "Out of memory allocating dm-regions\n");
2204 			return;
2205 		}
2206 		list_add_tail(&region->list, head);
2207 	}
2208 
2209 	region = iommu_alloc_resv_region(MSI_RANGE_START,
2210 					 MSI_RANGE_END - MSI_RANGE_START + 1,
2211 					 0, IOMMU_RESV_MSI);
2212 	if (!region)
2213 		return;
2214 	list_add_tail(&region->list, head);
2215 
2216 	region = iommu_alloc_resv_region(HT_RANGE_START,
2217 					 HT_RANGE_END - HT_RANGE_START + 1,
2218 					 0, IOMMU_RESV_RESERVED);
2219 	if (!region)
2220 		return;
2221 	list_add_tail(&region->list, head);
2222 }
2223 
2224 bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
2225 				  struct device *dev)
2226 {
2227 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2228 
2229 	return dev_data->defer_attach;
2230 }
2231 EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
2232 
2233 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2234 {
2235 	struct protection_domain *dom = to_pdomain(domain);
2236 	unsigned long flags;
2237 
2238 	spin_lock_irqsave(&dom->lock, flags);
2239 	amd_iommu_domain_flush_tlb_pde(dom);
2240 	amd_iommu_domain_flush_complete(dom);
2241 	spin_unlock_irqrestore(&dom->lock, flags);
2242 }
2243 
2244 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2245 				 struct iommu_iotlb_gather *gather)
2246 {
2247 	struct protection_domain *dom = to_pdomain(domain);
2248 	unsigned long flags;
2249 
2250 	spin_lock_irqsave(&dom->lock, flags);
2251 	domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
2252 	amd_iommu_domain_flush_complete(dom);
2253 	spin_unlock_irqrestore(&dom->lock, flags);
2254 }
2255 
2256 static int amd_iommu_def_domain_type(struct device *dev)
2257 {
2258 	struct iommu_dev_data *dev_data;
2259 
2260 	dev_data = dev_iommu_priv_get(dev);
2261 	if (!dev_data)
2262 		return 0;
2263 
2264 	/*
2265 	 * Do not identity map IOMMUv2 capable devices when memory encryption is
2266 	 * active, because some of those devices (AMD GPUs) don't have the
2267 	 * encryption bit in their DMA-mask and require remapping.
2268 	 */
2269 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
2270 		return IOMMU_DOMAIN_IDENTITY;
2271 
2272 	return 0;
2273 }
2274 
2275 const struct iommu_ops amd_iommu_ops = {
2276 	.capable = amd_iommu_capable,
2277 	.domain_alloc = amd_iommu_domain_alloc,
2278 	.domain_free  = amd_iommu_domain_free,
2279 	.attach_dev = amd_iommu_attach_device,
2280 	.detach_dev = amd_iommu_detach_device,
2281 	.map = amd_iommu_map,
2282 	.iotlb_sync_map	= amd_iommu_iotlb_sync_map,
2283 	.unmap = amd_iommu_unmap,
2284 	.iova_to_phys = amd_iommu_iova_to_phys,
2285 	.probe_device = amd_iommu_probe_device,
2286 	.release_device = amd_iommu_release_device,
2287 	.probe_finalize = amd_iommu_probe_finalize,
2288 	.device_group = amd_iommu_device_group,
2289 	.get_resv_regions = amd_iommu_get_resv_regions,
2290 	.put_resv_regions = generic_iommu_put_resv_regions,
2291 	.is_attach_deferred = amd_iommu_is_attach_deferred,
2292 	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
2293 	.flush_iotlb_all = amd_iommu_flush_iotlb_all,
2294 	.iotlb_sync = amd_iommu_iotlb_sync,
2295 	.def_domain_type = amd_iommu_def_domain_type,
2296 };
2297 
2298 /*****************************************************************************
2299  *
2300  * The next functions do a basic initialization of IOMMU for pass through
2301  * mode
2302  *
2303  * In passthrough mode the IOMMU is initialized and enabled but not used for
2304  * DMA-API translation.
2305  *
2306  *****************************************************************************/
2307 
2308 /* IOMMUv2 specific functions */
2309 int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
2310 {
2311 	return atomic_notifier_chain_register(&ppr_notifier, nb);
2312 }
2313 EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
2314 
2315 int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
2316 {
2317 	return atomic_notifier_chain_unregister(&ppr_notifier, nb);
2318 }
2319 EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
2320 
2321 void amd_iommu_domain_direct_map(struct iommu_domain *dom)
2322 {
2323 	struct protection_domain *domain = to_pdomain(dom);
2324 	unsigned long flags;
2325 
2326 	spin_lock_irqsave(&domain->lock, flags);
2327 
2328 	if (domain->iop.pgtbl_cfg.tlb)
2329 		free_io_pgtable_ops(&domain->iop.iop.ops);
2330 
2331 	spin_unlock_irqrestore(&domain->lock, flags);
2332 }
2333 EXPORT_SYMBOL(amd_iommu_domain_direct_map);
2334 
2335 int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
2336 {
2337 	struct protection_domain *domain = to_pdomain(dom);
2338 	unsigned long flags;
2339 	int levels, ret;
2340 
2341 	/* Number of GCR3 table levels required */
2342 	for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
2343 		levels += 1;
2344 
2345 	if (levels > amd_iommu_max_glx_val)
2346 		return -EINVAL;
2347 
2348 	spin_lock_irqsave(&domain->lock, flags);
2349 
2350 	/*
2351 	 * Save us all sanity checks whether devices already in the
2352 	 * domain support IOMMUv2. Just force that the domain has no
2353 	 * devices attached when it is switched into IOMMUv2 mode.
2354 	 */
2355 	ret = -EBUSY;
2356 	if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
2357 		goto out;
2358 
2359 	ret = -ENOMEM;
2360 	domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
2361 	if (domain->gcr3_tbl == NULL)
2362 		goto out;
2363 
2364 	domain->glx      = levels;
2365 	domain->flags   |= PD_IOMMUV2_MASK;
2366 
2367 	amd_iommu_domain_update(domain);
2368 
2369 	ret = 0;
2370 
2371 out:
2372 	spin_unlock_irqrestore(&domain->lock, flags);
2373 
2374 	return ret;
2375 }
2376 EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
2377 
2378 static int __flush_pasid(struct protection_domain *domain, u32 pasid,
2379 			 u64 address, bool size)
2380 {
2381 	struct iommu_dev_data *dev_data;
2382 	struct iommu_cmd cmd;
2383 	int i, ret;
2384 
2385 	if (!(domain->flags & PD_IOMMUV2_MASK))
2386 		return -EINVAL;
2387 
2388 	build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
2389 
2390 	/*
2391 	 * IOMMU TLB needs to be flushed before Device TLB to
2392 	 * prevent device TLB refill from IOMMU TLB
2393 	 */
2394 	for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
2395 		if (domain->dev_iommu[i] == 0)
2396 			continue;
2397 
2398 		ret = iommu_queue_command(amd_iommus[i], &cmd);
2399 		if (ret != 0)
2400 			goto out;
2401 	}
2402 
2403 	/* Wait until IOMMU TLB flushes are complete */
2404 	amd_iommu_domain_flush_complete(domain);
2405 
2406 	/* Now flush device TLBs */
2407 	list_for_each_entry(dev_data, &domain->dev_list, list) {
2408 		struct amd_iommu *iommu;
2409 		int qdep;
2410 
2411 		/*
2412 		   There might be non-IOMMUv2 capable devices in an IOMMUv2
2413 		 * domain.
2414 		 */
2415 		if (!dev_data->ats.enabled)
2416 			continue;
2417 
2418 		qdep  = dev_data->ats.qdep;
2419 		iommu = amd_iommu_rlookup_table[dev_data->devid];
2420 
2421 		build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
2422 				      qdep, address, size);
2423 
2424 		ret = iommu_queue_command(iommu, &cmd);
2425 		if (ret != 0)
2426 			goto out;
2427 	}
2428 
2429 	/* Wait until all device TLBs are flushed */
2430 	amd_iommu_domain_flush_complete(domain);
2431 
2432 	ret = 0;
2433 
2434 out:
2435 
2436 	return ret;
2437 }
2438 
2439 static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
2440 				  u64 address)
2441 {
2442 	return __flush_pasid(domain, pasid, address, false);
2443 }
2444 
2445 int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
2446 			 u64 address)
2447 {
2448 	struct protection_domain *domain = to_pdomain(dom);
2449 	unsigned long flags;
2450 	int ret;
2451 
2452 	spin_lock_irqsave(&domain->lock, flags);
2453 	ret = __amd_iommu_flush_page(domain, pasid, address);
2454 	spin_unlock_irqrestore(&domain->lock, flags);
2455 
2456 	return ret;
2457 }
2458 EXPORT_SYMBOL(amd_iommu_flush_page);
2459 
2460 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
2461 {
2462 	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
2463 			     true);
2464 }
2465 
2466 int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
2467 {
2468 	struct protection_domain *domain = to_pdomain(dom);
2469 	unsigned long flags;
2470 	int ret;
2471 
2472 	spin_lock_irqsave(&domain->lock, flags);
2473 	ret = __amd_iommu_flush_tlb(domain, pasid);
2474 	spin_unlock_irqrestore(&domain->lock, flags);
2475 
2476 	return ret;
2477 }
2478 EXPORT_SYMBOL(amd_iommu_flush_tlb);
2479 
2480 static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
2481 {
2482 	int index;
2483 	u64 *pte;
2484 
2485 	while (true) {
2486 
2487 		index = (pasid >> (9 * level)) & 0x1ff;
2488 		pte   = &root[index];
2489 
2490 		if (level == 0)
2491 			break;
2492 
2493 		if (!(*pte & GCR3_VALID)) {
2494 			if (!alloc)
2495 				return NULL;
2496 
2497 			root = (void *)get_zeroed_page(GFP_ATOMIC);
2498 			if (root == NULL)
2499 				return NULL;
2500 
2501 			*pte = iommu_virt_to_phys(root) | GCR3_VALID;
2502 		}
2503 
2504 		root = iommu_phys_to_virt(*pte & PAGE_MASK);
2505 
2506 		level -= 1;
2507 	}
2508 
2509 	return pte;
2510 }
2511 
2512 static int __set_gcr3(struct protection_domain *domain, u32 pasid,
2513 		      unsigned long cr3)
2514 {
2515 	u64 *pte;
2516 
2517 	if (domain->iop.mode != PAGE_MODE_NONE)
2518 		return -EINVAL;
2519 
2520 	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
2521 	if (pte == NULL)
2522 		return -ENOMEM;
2523 
2524 	*pte = (cr3 & PAGE_MASK) | GCR3_VALID;
2525 
2526 	return __amd_iommu_flush_tlb(domain, pasid);
2527 }
2528 
2529 static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
2530 {
2531 	u64 *pte;
2532 
2533 	if (domain->iop.mode != PAGE_MODE_NONE)
2534 		return -EINVAL;
2535 
2536 	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
2537 	if (pte == NULL)
2538 		return 0;
2539 
2540 	*pte = 0;
2541 
2542 	return __amd_iommu_flush_tlb(domain, pasid);
2543 }
2544 
2545 int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
2546 			      unsigned long cr3)
2547 {
2548 	struct protection_domain *domain = to_pdomain(dom);
2549 	unsigned long flags;
2550 	int ret;
2551 
2552 	spin_lock_irqsave(&domain->lock, flags);
2553 	ret = __set_gcr3(domain, pasid, cr3);
2554 	spin_unlock_irqrestore(&domain->lock, flags);
2555 
2556 	return ret;
2557 }
2558 EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
2559 
2560 int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
2561 {
2562 	struct protection_domain *domain = to_pdomain(dom);
2563 	unsigned long flags;
2564 	int ret;
2565 
2566 	spin_lock_irqsave(&domain->lock, flags);
2567 	ret = __clear_gcr3(domain, pasid);
2568 	spin_unlock_irqrestore(&domain->lock, flags);
2569 
2570 	return ret;
2571 }
2572 EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
2573 
2574 int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
2575 			   int status, int tag)
2576 {
2577 	struct iommu_dev_data *dev_data;
2578 	struct amd_iommu *iommu;
2579 	struct iommu_cmd cmd;
2580 
2581 	dev_data = dev_iommu_priv_get(&pdev->dev);
2582 	iommu    = amd_iommu_rlookup_table[dev_data->devid];
2583 
2584 	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
2585 			   tag, dev_data->pri_tlp);
2586 
2587 	return iommu_queue_command(iommu, &cmd);
2588 }
2589 EXPORT_SYMBOL(amd_iommu_complete_ppr);
2590 
2591 int amd_iommu_device_info(struct pci_dev *pdev,
2592                           struct amd_iommu_device_info *info)
2593 {
2594 	int max_pasids;
2595 	int pos;
2596 
2597 	if (pdev == NULL || info == NULL)
2598 		return -EINVAL;
2599 
2600 	if (!amd_iommu_v2_supported())
2601 		return -EINVAL;
2602 
2603 	memset(info, 0, sizeof(*info));
2604 
2605 	if (pci_ats_supported(pdev))
2606 		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
2607 
2608 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2609 	if (pos)
2610 		info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
2611 
2612 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
2613 	if (pos) {
2614 		int features;
2615 
2616 		max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
2617 		max_pasids = min(max_pasids, (1 << 20));
2618 
2619 		info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
2620 		info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
2621 
2622 		features = pci_pasid_features(pdev);
2623 		if (features & PCI_PASID_CAP_EXEC)
2624 			info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
2625 		if (features & PCI_PASID_CAP_PRIV)
2626 			info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
2627 	}
2628 
2629 	return 0;
2630 }
2631 EXPORT_SYMBOL(amd_iommu_device_info);
2632 
2633 #ifdef CONFIG_IRQ_REMAP
2634 
2635 /*****************************************************************************
2636  *
2637  * Interrupt Remapping Implementation
2638  *
2639  *****************************************************************************/
2640 
2641 static struct irq_chip amd_ir_chip;
2642 static DEFINE_SPINLOCK(iommu_table_lock);
2643 
2644 static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
2645 {
2646 	u64 dte;
2647 
2648 	dte	= amd_iommu_dev_table[devid].data[2];
2649 	dte	&= ~DTE_IRQ_PHYS_ADDR_MASK;
2650 	dte	|= iommu_virt_to_phys(table->table);
2651 	dte	|= DTE_IRQ_REMAP_INTCTL;
2652 	dte	|= DTE_INTTABLEN;
2653 	dte	|= DTE_IRQ_REMAP_ENABLE;
2654 
2655 	amd_iommu_dev_table[devid].data[2] = dte;
2656 }
2657 
2658 static struct irq_remap_table *get_irq_table(u16 devid)
2659 {
2660 	struct irq_remap_table *table;
2661 
2662 	if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
2663 		      "%s: no iommu for devid %x\n", __func__, devid))
2664 		return NULL;
2665 
2666 	table = irq_lookup_table[devid];
2667 	if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
2668 		return NULL;
2669 
2670 	return table;
2671 }
2672 
2673 static struct irq_remap_table *__alloc_irq_table(void)
2674 {
2675 	struct irq_remap_table *table;
2676 
2677 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2678 	if (!table)
2679 		return NULL;
2680 
2681 	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
2682 	if (!table->table) {
2683 		kfree(table);
2684 		return NULL;
2685 	}
2686 	raw_spin_lock_init(&table->lock);
2687 
2688 	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2689 		memset(table->table, 0,
2690 		       MAX_IRQS_PER_TABLE * sizeof(u32));
2691 	else
2692 		memset(table->table, 0,
2693 		       (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
2694 	return table;
2695 }
2696 
2697 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
2698 				  struct irq_remap_table *table)
2699 {
2700 	irq_lookup_table[devid] = table;
2701 	set_dte_irq_entry(devid, table);
2702 	iommu_flush_dte(iommu, devid);
2703 }
2704 
2705 static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
2706 				       void *data)
2707 {
2708 	struct irq_remap_table *table = data;
2709 
2710 	irq_lookup_table[alias] = table;
2711 	set_dte_irq_entry(alias, table);
2712 
2713 	iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
2714 
2715 	return 0;
2716 }
2717 
2718 static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
2719 {
2720 	struct irq_remap_table *table = NULL;
2721 	struct irq_remap_table *new_table = NULL;
2722 	struct amd_iommu *iommu;
2723 	unsigned long flags;
2724 	u16 alias;
2725 
2726 	spin_lock_irqsave(&iommu_table_lock, flags);
2727 
2728 	iommu = amd_iommu_rlookup_table[devid];
2729 	if (!iommu)
2730 		goto out_unlock;
2731 
2732 	table = irq_lookup_table[devid];
2733 	if (table)
2734 		goto out_unlock;
2735 
2736 	alias = amd_iommu_alias_table[devid];
2737 	table = irq_lookup_table[alias];
2738 	if (table) {
2739 		set_remap_table_entry(iommu, devid, table);
2740 		goto out_wait;
2741 	}
2742 	spin_unlock_irqrestore(&iommu_table_lock, flags);
2743 
2744 	/* Nothing there yet, allocate new irq remapping table */
2745 	new_table = __alloc_irq_table();
2746 	if (!new_table)
2747 		return NULL;
2748 
2749 	spin_lock_irqsave(&iommu_table_lock, flags);
2750 
2751 	table = irq_lookup_table[devid];
2752 	if (table)
2753 		goto out_unlock;
2754 
2755 	table = irq_lookup_table[alias];
2756 	if (table) {
2757 		set_remap_table_entry(iommu, devid, table);
2758 		goto out_wait;
2759 	}
2760 
2761 	table = new_table;
2762 	new_table = NULL;
2763 
2764 	if (pdev)
2765 		pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
2766 				       table);
2767 	else
2768 		set_remap_table_entry(iommu, devid, table);
2769 
2770 	if (devid != alias)
2771 		set_remap_table_entry(iommu, alias, table);
2772 
2773 out_wait:
2774 	iommu_completion_wait(iommu);
2775 
2776 out_unlock:
2777 	spin_unlock_irqrestore(&iommu_table_lock, flags);
2778 
2779 	if (new_table) {
2780 		kmem_cache_free(amd_iommu_irq_cache, new_table->table);
2781 		kfree(new_table);
2782 	}
2783 	return table;
2784 }
2785 
2786 static int alloc_irq_index(u16 devid, int count, bool align,
2787 			   struct pci_dev *pdev)
2788 {
2789 	struct irq_remap_table *table;
2790 	int index, c, alignment = 1;
2791 	unsigned long flags;
2792 	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
2793 
2794 	if (!iommu)
2795 		return -ENODEV;
2796 
2797 	table = alloc_irq_table(devid, pdev);
2798 	if (!table)
2799 		return -ENODEV;
2800 
2801 	if (align)
2802 		alignment = roundup_pow_of_two(count);
2803 
2804 	raw_spin_lock_irqsave(&table->lock, flags);
2805 
2806 	/* Scan table for free entries */
2807 	for (index = ALIGN(table->min_index, alignment), c = 0;
2808 	     index < MAX_IRQS_PER_TABLE;) {
2809 		if (!iommu->irte_ops->is_allocated(table, index)) {
2810 			c += 1;
2811 		} else {
2812 			c     = 0;
2813 			index = ALIGN(index + 1, alignment);
2814 			continue;
2815 		}
2816 
2817 		if (c == count)	{
2818 			for (; c != 0; --c)
2819 				iommu->irte_ops->set_allocated(table, index - c + 1);
2820 
2821 			index -= count - 1;
2822 			goto out;
2823 		}
2824 
2825 		index++;
2826 	}
2827 
2828 	index = -ENOSPC;
2829 
2830 out:
2831 	raw_spin_unlock_irqrestore(&table->lock, flags);
2832 
2833 	return index;
2834 }
2835 
2836 static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
2837 			  struct amd_ir_data *data)
2838 {
2839 	bool ret;
2840 	struct irq_remap_table *table;
2841 	struct amd_iommu *iommu;
2842 	unsigned long flags;
2843 	struct irte_ga *entry;
2844 
2845 	iommu = amd_iommu_rlookup_table[devid];
2846 	if (iommu == NULL)
2847 		return -EINVAL;
2848 
2849 	table = get_irq_table(devid);
2850 	if (!table)
2851 		return -ENOMEM;
2852 
2853 	raw_spin_lock_irqsave(&table->lock, flags);
2854 
2855 	entry = (struct irte_ga *)table->table;
2856 	entry = &entry[index];
2857 
2858 	ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
2859 			     entry->lo.val, entry->hi.val,
2860 			     irte->lo.val, irte->hi.val);
2861 	/*
2862 	 * We use cmpxchg16 to atomically update the 128-bit IRTE,
2863 	 * and it cannot be updated by the hardware or other processors
2864 	 * behind us, so the return value of cmpxchg16 should be the
2865 	 * same as the old value.
2866 	 */
2867 	WARN_ON(!ret);
2868 
2869 	if (data)
2870 		data->ref = entry;
2871 
2872 	raw_spin_unlock_irqrestore(&table->lock, flags);
2873 
2874 	iommu_flush_irt(iommu, devid);
2875 	iommu_completion_wait(iommu);
2876 
2877 	return 0;
2878 }
2879 
2880 static int modify_irte(u16 devid, int index, union irte *irte)
2881 {
2882 	struct irq_remap_table *table;
2883 	struct amd_iommu *iommu;
2884 	unsigned long flags;
2885 
2886 	iommu = amd_iommu_rlookup_table[devid];
2887 	if (iommu == NULL)
2888 		return -EINVAL;
2889 
2890 	table = get_irq_table(devid);
2891 	if (!table)
2892 		return -ENOMEM;
2893 
2894 	raw_spin_lock_irqsave(&table->lock, flags);
2895 	table->table[index] = irte->val;
2896 	raw_spin_unlock_irqrestore(&table->lock, flags);
2897 
2898 	iommu_flush_irt(iommu, devid);
2899 	iommu_completion_wait(iommu);
2900 
2901 	return 0;
2902 }
2903 
2904 static void free_irte(u16 devid, int index)
2905 {
2906 	struct irq_remap_table *table;
2907 	struct amd_iommu *iommu;
2908 	unsigned long flags;
2909 
2910 	iommu = amd_iommu_rlookup_table[devid];
2911 	if (iommu == NULL)
2912 		return;
2913 
2914 	table = get_irq_table(devid);
2915 	if (!table)
2916 		return;
2917 
2918 	raw_spin_lock_irqsave(&table->lock, flags);
2919 	iommu->irte_ops->clear_allocated(table, index);
2920 	raw_spin_unlock_irqrestore(&table->lock, flags);
2921 
2922 	iommu_flush_irt(iommu, devid);
2923 	iommu_completion_wait(iommu);
2924 }
2925 
2926 static void irte_prepare(void *entry,
2927 			 u32 delivery_mode, bool dest_mode,
2928 			 u8 vector, u32 dest_apicid, int devid)
2929 {
2930 	union irte *irte = (union irte *) entry;
2931 
2932 	irte->val                = 0;
2933 	irte->fields.vector      = vector;
2934 	irte->fields.int_type    = delivery_mode;
2935 	irte->fields.destination = dest_apicid;
2936 	irte->fields.dm          = dest_mode;
2937 	irte->fields.valid       = 1;
2938 }
2939 
2940 static void irte_ga_prepare(void *entry,
2941 			    u32 delivery_mode, bool dest_mode,
2942 			    u8 vector, u32 dest_apicid, int devid)
2943 {
2944 	struct irte_ga *irte = (struct irte_ga *) entry;
2945 
2946 	irte->lo.val                      = 0;
2947 	irte->hi.val                      = 0;
2948 	irte->lo.fields_remap.int_type    = delivery_mode;
2949 	irte->lo.fields_remap.dm          = dest_mode;
2950 	irte->hi.fields.vector            = vector;
2951 	irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
2952 	irte->hi.fields.destination       = APICID_TO_IRTE_DEST_HI(dest_apicid);
2953 	irte->lo.fields_remap.valid       = 1;
2954 }
2955 
2956 static void irte_activate(void *entry, u16 devid, u16 index)
2957 {
2958 	union irte *irte = (union irte *) entry;
2959 
2960 	irte->fields.valid = 1;
2961 	modify_irte(devid, index, irte);
2962 }
2963 
2964 static void irte_ga_activate(void *entry, u16 devid, u16 index)
2965 {
2966 	struct irte_ga *irte = (struct irte_ga *) entry;
2967 
2968 	irte->lo.fields_remap.valid = 1;
2969 	modify_irte_ga(devid, index, irte, NULL);
2970 }
2971 
2972 static void irte_deactivate(void *entry, u16 devid, u16 index)
2973 {
2974 	union irte *irte = (union irte *) entry;
2975 
2976 	irte->fields.valid = 0;
2977 	modify_irte(devid, index, irte);
2978 }
2979 
2980 static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
2981 {
2982 	struct irte_ga *irte = (struct irte_ga *) entry;
2983 
2984 	irte->lo.fields_remap.valid = 0;
2985 	modify_irte_ga(devid, index, irte, NULL);
2986 }
2987 
2988 static void irte_set_affinity(void *entry, u16 devid, u16 index,
2989 			      u8 vector, u32 dest_apicid)
2990 {
2991 	union irte *irte = (union irte *) entry;
2992 
2993 	irte->fields.vector = vector;
2994 	irte->fields.destination = dest_apicid;
2995 	modify_irte(devid, index, irte);
2996 }
2997 
2998 static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
2999 				 u8 vector, u32 dest_apicid)
3000 {
3001 	struct irte_ga *irte = (struct irte_ga *) entry;
3002 
3003 	if (!irte->lo.fields_remap.guest_mode) {
3004 		irte->hi.fields.vector = vector;
3005 		irte->lo.fields_remap.destination =
3006 					APICID_TO_IRTE_DEST_LO(dest_apicid);
3007 		irte->hi.fields.destination =
3008 					APICID_TO_IRTE_DEST_HI(dest_apicid);
3009 		modify_irte_ga(devid, index, irte, NULL);
3010 	}
3011 }
3012 
3013 #define IRTE_ALLOCATED (~1U)
3014 static void irte_set_allocated(struct irq_remap_table *table, int index)
3015 {
3016 	table->table[index] = IRTE_ALLOCATED;
3017 }
3018 
3019 static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3020 {
3021 	struct irte_ga *ptr = (struct irte_ga *)table->table;
3022 	struct irte_ga *irte = &ptr[index];
3023 
3024 	memset(&irte->lo.val, 0, sizeof(u64));
3025 	memset(&irte->hi.val, 0, sizeof(u64));
3026 	irte->hi.fields.vector = 0xff;
3027 }
3028 
3029 static bool irte_is_allocated(struct irq_remap_table *table, int index)
3030 {
3031 	union irte *ptr = (union irte *)table->table;
3032 	union irte *irte = &ptr[index];
3033 
3034 	return irte->val != 0;
3035 }
3036 
3037 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3038 {
3039 	struct irte_ga *ptr = (struct irte_ga *)table->table;
3040 	struct irte_ga *irte = &ptr[index];
3041 
3042 	return irte->hi.fields.vector != 0;
3043 }
3044 
3045 static void irte_clear_allocated(struct irq_remap_table *table, int index)
3046 {
3047 	table->table[index] = 0;
3048 }
3049 
3050 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3051 {
3052 	struct irte_ga *ptr = (struct irte_ga *)table->table;
3053 	struct irte_ga *irte = &ptr[index];
3054 
3055 	memset(&irte->lo.val, 0, sizeof(u64));
3056 	memset(&irte->hi.val, 0, sizeof(u64));
3057 }
3058 
3059 static int get_devid(struct irq_alloc_info *info)
3060 {
3061 	switch (info->type) {
3062 	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3063 		return get_ioapic_devid(info->devid);
3064 	case X86_IRQ_ALLOC_TYPE_HPET:
3065 		return get_hpet_devid(info->devid);
3066 	case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3067 	case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3068 		return get_device_id(msi_desc_to_dev(info->desc));
3069 	default:
3070 		WARN_ON_ONCE(1);
3071 		return -1;
3072 	}
3073 }
3074 
3075 struct irq_remap_ops amd_iommu_irq_ops = {
3076 	.prepare		= amd_iommu_prepare,
3077 	.enable			= amd_iommu_enable,
3078 	.disable		= amd_iommu_disable,
3079 	.reenable		= amd_iommu_reenable,
3080 	.enable_faulting	= amd_iommu_enable_faulting,
3081 };
3082 
3083 static void fill_msi_msg(struct msi_msg *msg, u32 index)
3084 {
3085 	msg->data = index;
3086 	msg->address_lo = 0;
3087 	msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
3088 	msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
3089 }
3090 
3091 static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3092 				       struct irq_cfg *irq_cfg,
3093 				       struct irq_alloc_info *info,
3094 				       int devid, int index, int sub_handle)
3095 {
3096 	struct irq_2_irte *irte_info = &data->irq_2_irte;
3097 	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3098 
3099 	if (!iommu)
3100 		return;
3101 
3102 	data->irq_2_irte.devid = devid;
3103 	data->irq_2_irte.index = index + sub_handle;
3104 	iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
3105 				 apic->dest_mode_logical, irq_cfg->vector,
3106 				 irq_cfg->dest_apicid, devid);
3107 
3108 	switch (info->type) {
3109 	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3110 	case X86_IRQ_ALLOC_TYPE_HPET:
3111 	case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3112 	case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3113 		fill_msi_msg(&data->msi_entry, irte_info->index);
3114 		break;
3115 
3116 	default:
3117 		BUG_ON(1);
3118 		break;
3119 	}
3120 }
3121 
3122 struct amd_irte_ops irte_32_ops = {
3123 	.prepare = irte_prepare,
3124 	.activate = irte_activate,
3125 	.deactivate = irte_deactivate,
3126 	.set_affinity = irte_set_affinity,
3127 	.set_allocated = irte_set_allocated,
3128 	.is_allocated = irte_is_allocated,
3129 	.clear_allocated = irte_clear_allocated,
3130 };
3131 
3132 struct amd_irte_ops irte_128_ops = {
3133 	.prepare = irte_ga_prepare,
3134 	.activate = irte_ga_activate,
3135 	.deactivate = irte_ga_deactivate,
3136 	.set_affinity = irte_ga_set_affinity,
3137 	.set_allocated = irte_ga_set_allocated,
3138 	.is_allocated = irte_ga_is_allocated,
3139 	.clear_allocated = irte_ga_clear_allocated,
3140 };
3141 
3142 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3143 			       unsigned int nr_irqs, void *arg)
3144 {
3145 	struct irq_alloc_info *info = arg;
3146 	struct irq_data *irq_data;
3147 	struct amd_ir_data *data = NULL;
3148 	struct irq_cfg *cfg;
3149 	int i, ret, devid;
3150 	int index;
3151 
3152 	if (!info)
3153 		return -EINVAL;
3154 	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
3155 	    info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
3156 		return -EINVAL;
3157 
3158 	/*
3159 	 * With IRQ remapping enabled, don't need contiguous CPU vectors
3160 	 * to support multiple MSI interrupts.
3161 	 */
3162 	if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
3163 		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
3164 
3165 	devid = get_devid(info);
3166 	if (devid < 0)
3167 		return -EINVAL;
3168 
3169 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3170 	if (ret < 0)
3171 		return ret;
3172 
3173 	if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3174 		struct irq_remap_table *table;
3175 		struct amd_iommu *iommu;
3176 
3177 		table = alloc_irq_table(devid, NULL);
3178 		if (table) {
3179 			if (!table->min_index) {
3180 				/*
3181 				 * Keep the first 32 indexes free for IOAPIC
3182 				 * interrupts.
3183 				 */
3184 				table->min_index = 32;
3185 				iommu = amd_iommu_rlookup_table[devid];
3186 				for (i = 0; i < 32; ++i)
3187 					iommu->irte_ops->set_allocated(table, i);
3188 			}
3189 			WARN_ON(table->min_index != 32);
3190 			index = info->ioapic.pin;
3191 		} else {
3192 			index = -ENOMEM;
3193 		}
3194 	} else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3195 		   info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3196 		bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3197 
3198 		index = alloc_irq_index(devid, nr_irqs, align,
3199 					msi_desc_to_pci_dev(info->desc));
3200 	} else {
3201 		index = alloc_irq_index(devid, nr_irqs, false, NULL);
3202 	}
3203 
3204 	if (index < 0) {
3205 		pr_warn("Failed to allocate IRTE\n");
3206 		ret = index;
3207 		goto out_free_parent;
3208 	}
3209 
3210 	for (i = 0; i < nr_irqs; i++) {
3211 		irq_data = irq_domain_get_irq_data(domain, virq + i);
3212 		cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3213 		if (!cfg) {
3214 			ret = -EINVAL;
3215 			goto out_free_data;
3216 		}
3217 
3218 		ret = -ENOMEM;
3219 		data = kzalloc(sizeof(*data), GFP_KERNEL);
3220 		if (!data)
3221 			goto out_free_data;
3222 
3223 		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3224 			data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
3225 		else
3226 			data->entry = kzalloc(sizeof(struct irte_ga),
3227 						     GFP_KERNEL);
3228 		if (!data->entry) {
3229 			kfree(data);
3230 			goto out_free_data;
3231 		}
3232 
3233 		irq_data->hwirq = (devid << 16) + i;
3234 		irq_data->chip_data = data;
3235 		irq_data->chip = &amd_ir_chip;
3236 		irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3237 		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
3238 	}
3239 
3240 	return 0;
3241 
3242 out_free_data:
3243 	for (i--; i >= 0; i--) {
3244 		irq_data = irq_domain_get_irq_data(domain, virq + i);
3245 		if (irq_data)
3246 			kfree(irq_data->chip_data);
3247 	}
3248 	for (i = 0; i < nr_irqs; i++)
3249 		free_irte(devid, index + i);
3250 out_free_parent:
3251 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
3252 	return ret;
3253 }
3254 
3255 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3256 			       unsigned int nr_irqs)
3257 {
3258 	struct irq_2_irte *irte_info;
3259 	struct irq_data *irq_data;
3260 	struct amd_ir_data *data;
3261 	int i;
3262 
3263 	for (i = 0; i < nr_irqs; i++) {
3264 		irq_data = irq_domain_get_irq_data(domain, virq  + i);
3265 		if (irq_data && irq_data->chip_data) {
3266 			data = irq_data->chip_data;
3267 			irte_info = &data->irq_2_irte;
3268 			free_irte(irte_info->devid, irte_info->index);
3269 			kfree(data->entry);
3270 			kfree(data);
3271 		}
3272 	}
3273 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
3274 }
3275 
3276 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3277 			       struct amd_ir_data *ir_data,
3278 			       struct irq_2_irte *irte_info,
3279 			       struct irq_cfg *cfg);
3280 
3281 static int irq_remapping_activate(struct irq_domain *domain,
3282 				  struct irq_data *irq_data, bool reserve)
3283 {
3284 	struct amd_ir_data *data = irq_data->chip_data;
3285 	struct irq_2_irte *irte_info = &data->irq_2_irte;
3286 	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3287 	struct irq_cfg *cfg = irqd_cfg(irq_data);
3288 
3289 	if (!iommu)
3290 		return 0;
3291 
3292 	iommu->irte_ops->activate(data->entry, irte_info->devid,
3293 				  irte_info->index);
3294 	amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3295 	return 0;
3296 }
3297 
3298 static void irq_remapping_deactivate(struct irq_domain *domain,
3299 				     struct irq_data *irq_data)
3300 {
3301 	struct amd_ir_data *data = irq_data->chip_data;
3302 	struct irq_2_irte *irte_info = &data->irq_2_irte;
3303 	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3304 
3305 	if (iommu)
3306 		iommu->irte_ops->deactivate(data->entry, irte_info->devid,
3307 					    irte_info->index);
3308 }
3309 
3310 static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
3311 				enum irq_domain_bus_token bus_token)
3312 {
3313 	struct amd_iommu *iommu;
3314 	int devid = -1;
3315 
3316 	if (!amd_iommu_irq_remap)
3317 		return 0;
3318 
3319 	if (x86_fwspec_is_ioapic(fwspec))
3320 		devid = get_ioapic_devid(fwspec->param[0]);
3321 	else if (x86_fwspec_is_hpet(fwspec))
3322 		devid = get_hpet_devid(fwspec->param[0]);
3323 
3324 	if (devid < 0)
3325 		return 0;
3326 
3327 	iommu = amd_iommu_rlookup_table[devid];
3328 	return iommu && iommu->ir_domain == d;
3329 }
3330 
3331 static const struct irq_domain_ops amd_ir_domain_ops = {
3332 	.select = irq_remapping_select,
3333 	.alloc = irq_remapping_alloc,
3334 	.free = irq_remapping_free,
3335 	.activate = irq_remapping_activate,
3336 	.deactivate = irq_remapping_deactivate,
3337 };
3338 
3339 int amd_iommu_activate_guest_mode(void *data)
3340 {
3341 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3342 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3343 	u64 valid;
3344 
3345 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3346 	    !entry || entry->lo.fields_vapic.guest_mode)
3347 		return 0;
3348 
3349 	valid = entry->lo.fields_vapic.valid;
3350 
3351 	entry->lo.val = 0;
3352 	entry->hi.val = 0;
3353 
3354 	entry->lo.fields_vapic.valid       = valid;
3355 	entry->lo.fields_vapic.guest_mode  = 1;
3356 	entry->lo.fields_vapic.ga_log_intr = 1;
3357 	entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
3358 	entry->hi.fields.vector            = ir_data->ga_vector;
3359 	entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
3360 
3361 	return modify_irte_ga(ir_data->irq_2_irte.devid,
3362 			      ir_data->irq_2_irte.index, entry, ir_data);
3363 }
3364 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
3365 
3366 int amd_iommu_deactivate_guest_mode(void *data)
3367 {
3368 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3369 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3370 	struct irq_cfg *cfg = ir_data->cfg;
3371 	u64 valid;
3372 
3373 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3374 	    !entry || !entry->lo.fields_vapic.guest_mode)
3375 		return 0;
3376 
3377 	valid = entry->lo.fields_remap.valid;
3378 
3379 	entry->lo.val = 0;
3380 	entry->hi.val = 0;
3381 
3382 	entry->lo.fields_remap.valid       = valid;
3383 	entry->lo.fields_remap.dm          = apic->dest_mode_logical;
3384 	entry->lo.fields_remap.int_type    = apic->delivery_mode;
3385 	entry->hi.fields.vector            = cfg->vector;
3386 	entry->lo.fields_remap.destination =
3387 				APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3388 	entry->hi.fields.destination =
3389 				APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3390 
3391 	return modify_irte_ga(ir_data->irq_2_irte.devid,
3392 			      ir_data->irq_2_irte.index, entry, ir_data);
3393 }
3394 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
3395 
3396 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
3397 {
3398 	int ret;
3399 	struct amd_iommu *iommu;
3400 	struct amd_iommu_pi_data *pi_data = vcpu_info;
3401 	struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
3402 	struct amd_ir_data *ir_data = data->chip_data;
3403 	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3404 	struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
3405 
3406 	/* Note:
3407 	 * This device has never been set up for guest mode.
3408 	 * we should not modify the IRTE
3409 	 */
3410 	if (!dev_data || !dev_data->use_vapic)
3411 		return 0;
3412 
3413 	ir_data->cfg = irqd_cfg(data);
3414 	pi_data->ir_data = ir_data;
3415 
3416 	/* Note:
3417 	 * SVM tries to set up for VAPIC mode, but we are in
3418 	 * legacy mode. So, we force legacy mode instead.
3419 	 */
3420 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
3421 		pr_debug("%s: Fall back to using intr legacy remap\n",
3422 			 __func__);
3423 		pi_data->is_guest_mode = false;
3424 	}
3425 
3426 	iommu = amd_iommu_rlookup_table[irte_info->devid];
3427 	if (iommu == NULL)
3428 		return -EINVAL;
3429 
3430 	pi_data->prev_ga_tag = ir_data->cached_ga_tag;
3431 	if (pi_data->is_guest_mode) {
3432 		ir_data->ga_root_ptr = (pi_data->base >> 12);
3433 		ir_data->ga_vector = vcpu_pi_info->vector;
3434 		ir_data->ga_tag = pi_data->ga_tag;
3435 		ret = amd_iommu_activate_guest_mode(ir_data);
3436 		if (!ret)
3437 			ir_data->cached_ga_tag = pi_data->ga_tag;
3438 	} else {
3439 		ret = amd_iommu_deactivate_guest_mode(ir_data);
3440 
3441 		/*
3442 		 * This communicates the ga_tag back to the caller
3443 		 * so that it can do all the necessary clean up.
3444 		 */
3445 		if (!ret)
3446 			ir_data->cached_ga_tag = 0;
3447 	}
3448 
3449 	return ret;
3450 }
3451 
3452 
3453 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3454 			       struct amd_ir_data *ir_data,
3455 			       struct irq_2_irte *irte_info,
3456 			       struct irq_cfg *cfg)
3457 {
3458 
3459 	/*
3460 	 * Atomically updates the IRTE with the new destination, vector
3461 	 * and flushes the interrupt entry cache.
3462 	 */
3463 	iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
3464 				      irte_info->index, cfg->vector,
3465 				      cfg->dest_apicid);
3466 }
3467 
3468 static int amd_ir_set_affinity(struct irq_data *data,
3469 			       const struct cpumask *mask, bool force)
3470 {
3471 	struct amd_ir_data *ir_data = data->chip_data;
3472 	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3473 	struct irq_cfg *cfg = irqd_cfg(data);
3474 	struct irq_data *parent = data->parent_data;
3475 	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
3476 	int ret;
3477 
3478 	if (!iommu)
3479 		return -ENODEV;
3480 
3481 	ret = parent->chip->irq_set_affinity(parent, mask, force);
3482 	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
3483 		return ret;
3484 
3485 	amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
3486 	/*
3487 	 * After this point, all the interrupts will start arriving
3488 	 * at the new destination. So, time to cleanup the previous
3489 	 * vector allocation.
3490 	 */
3491 	send_cleanup_vector(cfg);
3492 
3493 	return IRQ_SET_MASK_OK_DONE;
3494 }
3495 
3496 static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
3497 {
3498 	struct amd_ir_data *ir_data = irq_data->chip_data;
3499 
3500 	*msg = ir_data->msi_entry;
3501 }
3502 
3503 static struct irq_chip amd_ir_chip = {
3504 	.name			= "AMD-IR",
3505 	.irq_ack		= apic_ack_irq,
3506 	.irq_set_affinity	= amd_ir_set_affinity,
3507 	.irq_set_vcpu_affinity	= amd_ir_set_vcpu_affinity,
3508 	.irq_compose_msi_msg	= ir_compose_msi_msg,
3509 };
3510 
3511 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
3512 {
3513 	struct fwnode_handle *fn;
3514 
3515 	fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
3516 	if (!fn)
3517 		return -ENOMEM;
3518 	iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
3519 	if (!iommu->ir_domain) {
3520 		irq_domain_free_fwnode(fn);
3521 		return -ENOMEM;
3522 	}
3523 
3524 	iommu->ir_domain->parent = arch_get_ir_parent_domain();
3525 	iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
3526 							     "AMD-IR-MSI",
3527 							     iommu->index);
3528 	return 0;
3529 }
3530 
3531 int amd_iommu_update_ga(int cpu, bool is_run, void *data)
3532 {
3533 	unsigned long flags;
3534 	struct amd_iommu *iommu;
3535 	struct irq_remap_table *table;
3536 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3537 	int devid = ir_data->irq_2_irte.devid;
3538 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3539 	struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
3540 
3541 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3542 	    !ref || !entry || !entry->lo.fields_vapic.guest_mode)
3543 		return 0;
3544 
3545 	iommu = amd_iommu_rlookup_table[devid];
3546 	if (!iommu)
3547 		return -ENODEV;
3548 
3549 	table = get_irq_table(devid);
3550 	if (!table)
3551 		return -ENODEV;
3552 
3553 	raw_spin_lock_irqsave(&table->lock, flags);
3554 
3555 	if (ref->lo.fields_vapic.guest_mode) {
3556 		if (cpu >= 0) {
3557 			ref->lo.fields_vapic.destination =
3558 						APICID_TO_IRTE_DEST_LO(cpu);
3559 			ref->hi.fields.destination =
3560 						APICID_TO_IRTE_DEST_HI(cpu);
3561 		}
3562 		ref->lo.fields_vapic.is_run = is_run;
3563 		barrier();
3564 	}
3565 
3566 	raw_spin_unlock_irqrestore(&table->lock, flags);
3567 
3568 	iommu_flush_irt(iommu, devid);
3569 	iommu_completion_wait(iommu);
3570 	return 0;
3571 }
3572 EXPORT_SYMBOL(amd_iommu_update_ga);
3573 #endif
3574