1672cf6dfSJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only
2672cf6dfSJoerg Roedel /*
3672cf6dfSJoerg Roedel * Copyright (c) 2006, Intel Corporation.
4672cf6dfSJoerg Roedel *
5672cf6dfSJoerg Roedel * Copyright (C) 2006-2008 Intel Corporation
6672cf6dfSJoerg Roedel * Author: Ashok Raj <ashok.raj@intel.com>
7672cf6dfSJoerg Roedel * Author: Shaohua Li <shaohua.li@intel.com>
8672cf6dfSJoerg Roedel * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9672cf6dfSJoerg Roedel *
10672cf6dfSJoerg Roedel * This file implements early detection/parsing of Remapping Devices
11672cf6dfSJoerg Roedel * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
12672cf6dfSJoerg Roedel * tables.
13672cf6dfSJoerg Roedel *
14672cf6dfSJoerg Roedel * These routines are used by both DMA-remapping and Interrupt-remapping
15672cf6dfSJoerg Roedel */
16672cf6dfSJoerg Roedel
17672cf6dfSJoerg Roedel #define pr_fmt(fmt) "DMAR: " fmt
18672cf6dfSJoerg Roedel
19672cf6dfSJoerg Roedel #include <linux/pci.h>
20672cf6dfSJoerg Roedel #include <linux/dmar.h>
21672cf6dfSJoerg Roedel #include <linux/iova.h>
22672cf6dfSJoerg Roedel #include <linux/timer.h>
23672cf6dfSJoerg Roedel #include <linux/irq.h>
24672cf6dfSJoerg Roedel #include <linux/interrupt.h>
25672cf6dfSJoerg Roedel #include <linux/tboot.h>
26672cf6dfSJoerg Roedel #include <linux/dmi.h>
27672cf6dfSJoerg Roedel #include <linux/slab.h>
28672cf6dfSJoerg Roedel #include <linux/iommu.h>
29672cf6dfSJoerg Roedel #include <linux/numa.h>
30672cf6dfSJoerg Roedel #include <linux/limits.h>
31672cf6dfSJoerg Roedel #include <asm/irq_remapping.h>
32672cf6dfSJoerg Roedel
332585a279SLu Baolu #include "iommu.h"
34672cf6dfSJoerg Roedel #include "../irq_remapping.h"
3574eb87a0SLu Baolu #include "perf.h"
36933ab6d3SLu Baolu #include "trace.h"
37a6a5006dSKan Liang #include "perfmon.h"
38672cf6dfSJoerg Roedel
39672cf6dfSJoerg Roedel typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
40672cf6dfSJoerg Roedel struct dmar_res_callback {
41672cf6dfSJoerg Roedel dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
42672cf6dfSJoerg Roedel void *arg[ACPI_DMAR_TYPE_RESERVED];
43672cf6dfSJoerg Roedel bool ignore_unhandled;
44672cf6dfSJoerg Roedel bool print_entry;
45672cf6dfSJoerg Roedel };
46672cf6dfSJoerg Roedel
47672cf6dfSJoerg Roedel /*
48672cf6dfSJoerg Roedel * Assumptions:
49672cf6dfSJoerg Roedel * 1) The hotplug framework guarentees that DMAR unit will be hot-added
50672cf6dfSJoerg Roedel * before IO devices managed by that unit.
51672cf6dfSJoerg Roedel * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
52672cf6dfSJoerg Roedel * after IO devices managed by that unit.
53672cf6dfSJoerg Roedel * 3) Hotplug events are rare.
54672cf6dfSJoerg Roedel *
55672cf6dfSJoerg Roedel * Locking rules for DMA and interrupt remapping related global data structures:
56672cf6dfSJoerg Roedel * 1) Use dmar_global_lock in process context
57672cf6dfSJoerg Roedel * 2) Use RCU in interrupt context
58672cf6dfSJoerg Roedel */
59672cf6dfSJoerg Roedel DECLARE_RWSEM(dmar_global_lock);
60672cf6dfSJoerg Roedel LIST_HEAD(dmar_drhd_units);
61672cf6dfSJoerg Roedel
62672cf6dfSJoerg Roedel struct acpi_table_header * __initdata dmar_tbl;
63672cf6dfSJoerg Roedel static int dmar_dev_scope_status = 1;
64913432f2SLu Baolu static DEFINE_IDA(dmar_seq_ids);
65672cf6dfSJoerg Roedel
66672cf6dfSJoerg Roedel static int alloc_iommu(struct dmar_drhd_unit *drhd);
67672cf6dfSJoerg Roedel static void free_iommu(struct intel_iommu *iommu);
68672cf6dfSJoerg Roedel
dmar_register_drhd_unit(struct dmar_drhd_unit * drhd)69672cf6dfSJoerg Roedel static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
70672cf6dfSJoerg Roedel {
71672cf6dfSJoerg Roedel /*
72672cf6dfSJoerg Roedel * add INCLUDE_ALL at the tail, so scan the list will find it at
73672cf6dfSJoerg Roedel * the very end.
74672cf6dfSJoerg Roedel */
75672cf6dfSJoerg Roedel if (drhd->include_all)
76672cf6dfSJoerg Roedel list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
77672cf6dfSJoerg Roedel else
78672cf6dfSJoerg Roedel list_add_rcu(&drhd->list, &dmar_drhd_units);
79672cf6dfSJoerg Roedel }
80672cf6dfSJoerg Roedel
dmar_alloc_dev_scope(void * start,void * end,int * cnt)81672cf6dfSJoerg Roedel void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
82672cf6dfSJoerg Roedel {
83672cf6dfSJoerg Roedel struct acpi_dmar_device_scope *scope;
84672cf6dfSJoerg Roedel
85672cf6dfSJoerg Roedel *cnt = 0;
86672cf6dfSJoerg Roedel while (start < end) {
87672cf6dfSJoerg Roedel scope = start;
88672cf6dfSJoerg Roedel if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
89672cf6dfSJoerg Roedel scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
90672cf6dfSJoerg Roedel scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
91672cf6dfSJoerg Roedel (*cnt)++;
92672cf6dfSJoerg Roedel else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93672cf6dfSJoerg Roedel scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
94672cf6dfSJoerg Roedel pr_warn("Unsupported device scope\n");
95672cf6dfSJoerg Roedel }
96672cf6dfSJoerg Roedel start += scope->length;
97672cf6dfSJoerg Roedel }
98672cf6dfSJoerg Roedel if (*cnt == 0)
99672cf6dfSJoerg Roedel return NULL;
100672cf6dfSJoerg Roedel
101672cf6dfSJoerg Roedel return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
102672cf6dfSJoerg Roedel }
103672cf6dfSJoerg Roedel
dmar_free_dev_scope(struct dmar_dev_scope ** devices,int * cnt)104672cf6dfSJoerg Roedel void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
105672cf6dfSJoerg Roedel {
106672cf6dfSJoerg Roedel int i;
107672cf6dfSJoerg Roedel struct device *tmp_dev;
108672cf6dfSJoerg Roedel
109672cf6dfSJoerg Roedel if (*devices && *cnt) {
110672cf6dfSJoerg Roedel for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
111672cf6dfSJoerg Roedel put_device(tmp_dev);
112672cf6dfSJoerg Roedel kfree(*devices);
113672cf6dfSJoerg Roedel }
114672cf6dfSJoerg Roedel
115672cf6dfSJoerg Roedel *devices = NULL;
116672cf6dfSJoerg Roedel *cnt = 0;
117672cf6dfSJoerg Roedel }
118672cf6dfSJoerg Roedel
119672cf6dfSJoerg Roedel /* Optimize out kzalloc()/kfree() for normal cases */
120672cf6dfSJoerg Roedel static char dmar_pci_notify_info_buf[64];
121672cf6dfSJoerg Roedel
122672cf6dfSJoerg Roedel static struct dmar_pci_notify_info *
dmar_alloc_pci_notify_info(struct pci_dev * dev,unsigned long event)123672cf6dfSJoerg Roedel dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
124672cf6dfSJoerg Roedel {
125672cf6dfSJoerg Roedel int level = 0;
126672cf6dfSJoerg Roedel size_t size;
127672cf6dfSJoerg Roedel struct pci_dev *tmp;
128672cf6dfSJoerg Roedel struct dmar_pci_notify_info *info;
129672cf6dfSJoerg Roedel
130672cf6dfSJoerg Roedel /*
131672cf6dfSJoerg Roedel * Ignore devices that have a domain number higher than what can
132672cf6dfSJoerg Roedel * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
133672cf6dfSJoerg Roedel */
134672cf6dfSJoerg Roedel if (pci_domain_nr(dev->bus) > U16_MAX)
135672cf6dfSJoerg Roedel return NULL;
136672cf6dfSJoerg Roedel
137672cf6dfSJoerg Roedel /* Only generate path[] for device addition event */
138672cf6dfSJoerg Roedel if (event == BUS_NOTIFY_ADD_DEVICE)
139672cf6dfSJoerg Roedel for (tmp = dev; tmp; tmp = tmp->bus->self)
140672cf6dfSJoerg Roedel level++;
141672cf6dfSJoerg Roedel
142672cf6dfSJoerg Roedel size = struct_size(info, path, level);
143672cf6dfSJoerg Roedel if (size <= sizeof(dmar_pci_notify_info_buf)) {
144672cf6dfSJoerg Roedel info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
145672cf6dfSJoerg Roedel } else {
146672cf6dfSJoerg Roedel info = kzalloc(size, GFP_KERNEL);
147672cf6dfSJoerg Roedel if (!info) {
148672cf6dfSJoerg Roedel if (dmar_dev_scope_status == 0)
149672cf6dfSJoerg Roedel dmar_dev_scope_status = -ENOMEM;
150672cf6dfSJoerg Roedel return NULL;
151672cf6dfSJoerg Roedel }
152672cf6dfSJoerg Roedel }
153672cf6dfSJoerg Roedel
154672cf6dfSJoerg Roedel info->event = event;
155672cf6dfSJoerg Roedel info->dev = dev;
156672cf6dfSJoerg Roedel info->seg = pci_domain_nr(dev->bus);
157672cf6dfSJoerg Roedel info->level = level;
158672cf6dfSJoerg Roedel if (event == BUS_NOTIFY_ADD_DEVICE) {
159672cf6dfSJoerg Roedel for (tmp = dev; tmp; tmp = tmp->bus->self) {
160672cf6dfSJoerg Roedel level--;
161672cf6dfSJoerg Roedel info->path[level].bus = tmp->bus->number;
162672cf6dfSJoerg Roedel info->path[level].device = PCI_SLOT(tmp->devfn);
163672cf6dfSJoerg Roedel info->path[level].function = PCI_FUNC(tmp->devfn);
164672cf6dfSJoerg Roedel if (pci_is_root_bus(tmp->bus))
165672cf6dfSJoerg Roedel info->bus = tmp->bus->number;
166672cf6dfSJoerg Roedel }
167672cf6dfSJoerg Roedel }
168672cf6dfSJoerg Roedel
169672cf6dfSJoerg Roedel return info;
170672cf6dfSJoerg Roedel }
171672cf6dfSJoerg Roedel
dmar_free_pci_notify_info(struct dmar_pci_notify_info * info)172672cf6dfSJoerg Roedel static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
173672cf6dfSJoerg Roedel {
174672cf6dfSJoerg Roedel if ((void *)info != dmar_pci_notify_info_buf)
175672cf6dfSJoerg Roedel kfree(info);
176672cf6dfSJoerg Roedel }
177672cf6dfSJoerg Roedel
dmar_match_pci_path(struct dmar_pci_notify_info * info,int bus,struct acpi_dmar_pci_path * path,int count)178672cf6dfSJoerg Roedel static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
179672cf6dfSJoerg Roedel struct acpi_dmar_pci_path *path, int count)
180672cf6dfSJoerg Roedel {
181672cf6dfSJoerg Roedel int i;
182672cf6dfSJoerg Roedel
183672cf6dfSJoerg Roedel if (info->bus != bus)
184672cf6dfSJoerg Roedel goto fallback;
185672cf6dfSJoerg Roedel if (info->level != count)
186672cf6dfSJoerg Roedel goto fallback;
187672cf6dfSJoerg Roedel
188672cf6dfSJoerg Roedel for (i = 0; i < count; i++) {
189672cf6dfSJoerg Roedel if (path[i].device != info->path[i].device ||
190672cf6dfSJoerg Roedel path[i].function != info->path[i].function)
191672cf6dfSJoerg Roedel goto fallback;
192672cf6dfSJoerg Roedel }
193672cf6dfSJoerg Roedel
194672cf6dfSJoerg Roedel return true;
195672cf6dfSJoerg Roedel
196672cf6dfSJoerg Roedel fallback:
197672cf6dfSJoerg Roedel
198672cf6dfSJoerg Roedel if (count != 1)
199672cf6dfSJoerg Roedel return false;
200672cf6dfSJoerg Roedel
201672cf6dfSJoerg Roedel i = info->level - 1;
202672cf6dfSJoerg Roedel if (bus == info->path[i].bus &&
203672cf6dfSJoerg Roedel path[0].device == info->path[i].device &&
204672cf6dfSJoerg Roedel path[0].function == info->path[i].function) {
205672cf6dfSJoerg Roedel pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
206672cf6dfSJoerg Roedel bus, path[0].device, path[0].function);
207672cf6dfSJoerg Roedel return true;
208672cf6dfSJoerg Roedel }
209672cf6dfSJoerg Roedel
210672cf6dfSJoerg Roedel return false;
211672cf6dfSJoerg Roedel }
212672cf6dfSJoerg Roedel
213672cf6dfSJoerg Roedel /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
dmar_insert_dev_scope(struct dmar_pci_notify_info * info,void * start,void * end,u16 segment,struct dmar_dev_scope * devices,int devices_cnt)214672cf6dfSJoerg Roedel int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
215672cf6dfSJoerg Roedel void *start, void*end, u16 segment,
216672cf6dfSJoerg Roedel struct dmar_dev_scope *devices,
217672cf6dfSJoerg Roedel int devices_cnt)
218672cf6dfSJoerg Roedel {
219672cf6dfSJoerg Roedel int i, level;
220672cf6dfSJoerg Roedel struct device *tmp, *dev = &info->dev->dev;
221672cf6dfSJoerg Roedel struct acpi_dmar_device_scope *scope;
222672cf6dfSJoerg Roedel struct acpi_dmar_pci_path *path;
223672cf6dfSJoerg Roedel
224672cf6dfSJoerg Roedel if (segment != info->seg)
225672cf6dfSJoerg Roedel return 0;
226672cf6dfSJoerg Roedel
227672cf6dfSJoerg Roedel for (; start < end; start += scope->length) {
228672cf6dfSJoerg Roedel scope = start;
229672cf6dfSJoerg Roedel if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
230672cf6dfSJoerg Roedel scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
231672cf6dfSJoerg Roedel continue;
232672cf6dfSJoerg Roedel
233672cf6dfSJoerg Roedel path = (struct acpi_dmar_pci_path *)(scope + 1);
234672cf6dfSJoerg Roedel level = (scope->length - sizeof(*scope)) / sizeof(*path);
235672cf6dfSJoerg Roedel if (!dmar_match_pci_path(info, scope->bus, path, level))
236672cf6dfSJoerg Roedel continue;
237672cf6dfSJoerg Roedel
238672cf6dfSJoerg Roedel /*
239672cf6dfSJoerg Roedel * We expect devices with endpoint scope to have normal PCI
240672cf6dfSJoerg Roedel * headers, and devices with bridge scope to have bridge PCI
241672cf6dfSJoerg Roedel * headers. However PCI NTB devices may be listed in the
242672cf6dfSJoerg Roedel * DMAR table with bridge scope, even though they have a
243672cf6dfSJoerg Roedel * normal PCI header. NTB devices are identified by class
244672cf6dfSJoerg Roedel * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
245672cf6dfSJoerg Roedel * for this special case.
246672cf6dfSJoerg Roedel */
247672cf6dfSJoerg Roedel if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
248672cf6dfSJoerg Roedel info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
249672cf6dfSJoerg Roedel (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
250672cf6dfSJoerg Roedel (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
251672cf6dfSJoerg Roedel info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
252672cf6dfSJoerg Roedel pr_warn("Device scope type does not match for %s\n",
253672cf6dfSJoerg Roedel pci_name(info->dev));
254672cf6dfSJoerg Roedel return -EINVAL;
255672cf6dfSJoerg Roedel }
256672cf6dfSJoerg Roedel
257672cf6dfSJoerg Roedel for_each_dev_scope(devices, devices_cnt, i, tmp)
258672cf6dfSJoerg Roedel if (tmp == NULL) {
259672cf6dfSJoerg Roedel devices[i].bus = info->dev->bus->number;
260672cf6dfSJoerg Roedel devices[i].devfn = info->dev->devfn;
261672cf6dfSJoerg Roedel rcu_assign_pointer(devices[i].dev,
262672cf6dfSJoerg Roedel get_device(dev));
263672cf6dfSJoerg Roedel return 1;
264672cf6dfSJoerg Roedel }
265e60d63e3STina Zhang if (WARN_ON(i >= devices_cnt))
266e60d63e3STina Zhang return -EINVAL;
267672cf6dfSJoerg Roedel }
268672cf6dfSJoerg Roedel
269672cf6dfSJoerg Roedel return 0;
270672cf6dfSJoerg Roedel }
271672cf6dfSJoerg Roedel
dmar_remove_dev_scope(struct dmar_pci_notify_info * info,u16 segment,struct dmar_dev_scope * devices,int count)272672cf6dfSJoerg Roedel int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
273672cf6dfSJoerg Roedel struct dmar_dev_scope *devices, int count)
274672cf6dfSJoerg Roedel {
275672cf6dfSJoerg Roedel int index;
276672cf6dfSJoerg Roedel struct device *tmp;
277672cf6dfSJoerg Roedel
278672cf6dfSJoerg Roedel if (info->seg != segment)
279672cf6dfSJoerg Roedel return 0;
280672cf6dfSJoerg Roedel
281672cf6dfSJoerg Roedel for_each_active_dev_scope(devices, count, index, tmp)
282672cf6dfSJoerg Roedel if (tmp == &info->dev->dev) {
283672cf6dfSJoerg Roedel RCU_INIT_POINTER(devices[index].dev, NULL);
284672cf6dfSJoerg Roedel synchronize_rcu();
285672cf6dfSJoerg Roedel put_device(tmp);
286672cf6dfSJoerg Roedel return 1;
287672cf6dfSJoerg Roedel }
288672cf6dfSJoerg Roedel
289672cf6dfSJoerg Roedel return 0;
290672cf6dfSJoerg Roedel }
291672cf6dfSJoerg Roedel
dmar_pci_bus_add_dev(struct dmar_pci_notify_info * info)292672cf6dfSJoerg Roedel static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
293672cf6dfSJoerg Roedel {
294672cf6dfSJoerg Roedel int ret = 0;
295672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
296672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit *drhd;
297672cf6dfSJoerg Roedel
298672cf6dfSJoerg Roedel for_each_drhd_unit(dmaru) {
299672cf6dfSJoerg Roedel if (dmaru->include_all)
300672cf6dfSJoerg Roedel continue;
301672cf6dfSJoerg Roedel
302672cf6dfSJoerg Roedel drhd = container_of(dmaru->hdr,
303672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit, header);
304672cf6dfSJoerg Roedel ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
305672cf6dfSJoerg Roedel ((void *)drhd) + drhd->header.length,
306672cf6dfSJoerg Roedel dmaru->segment,
307672cf6dfSJoerg Roedel dmaru->devices, dmaru->devices_cnt);
308672cf6dfSJoerg Roedel if (ret)
309672cf6dfSJoerg Roedel break;
310672cf6dfSJoerg Roedel }
311672cf6dfSJoerg Roedel if (ret >= 0)
312672cf6dfSJoerg Roedel ret = dmar_iommu_notify_scope_dev(info);
313672cf6dfSJoerg Roedel if (ret < 0 && dmar_dev_scope_status == 0)
314672cf6dfSJoerg Roedel dmar_dev_scope_status = ret;
315672cf6dfSJoerg Roedel
31685a8dfc5SThomas Gleixner if (ret >= 0)
31785a8dfc5SThomas Gleixner intel_irq_remap_add_device(info);
31885a8dfc5SThomas Gleixner
319672cf6dfSJoerg Roedel return ret;
320672cf6dfSJoerg Roedel }
321672cf6dfSJoerg Roedel
dmar_pci_bus_del_dev(struct dmar_pci_notify_info * info)322672cf6dfSJoerg Roedel static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
323672cf6dfSJoerg Roedel {
324672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
325672cf6dfSJoerg Roedel
326672cf6dfSJoerg Roedel for_each_drhd_unit(dmaru)
327672cf6dfSJoerg Roedel if (dmar_remove_dev_scope(info, dmaru->segment,
328672cf6dfSJoerg Roedel dmaru->devices, dmaru->devices_cnt))
329672cf6dfSJoerg Roedel break;
330672cf6dfSJoerg Roedel dmar_iommu_notify_scope_dev(info);
331672cf6dfSJoerg Roedel }
332672cf6dfSJoerg Roedel
vf_inherit_msi_domain(struct pci_dev * pdev)333ff828729SThomas Gleixner static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
334ff828729SThomas Gleixner {
3353645a34fSLu Baolu struct pci_dev *physfn = pci_physfn(pdev);
3363645a34fSLu Baolu
3373645a34fSLu Baolu dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
338ff828729SThomas Gleixner }
339ff828729SThomas Gleixner
dmar_pci_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)340672cf6dfSJoerg Roedel static int dmar_pci_bus_notifier(struct notifier_block *nb,
341672cf6dfSJoerg Roedel unsigned long action, void *data)
342672cf6dfSJoerg Roedel {
343672cf6dfSJoerg Roedel struct pci_dev *pdev = to_pci_dev(data);
344672cf6dfSJoerg Roedel struct dmar_pci_notify_info *info;
345672cf6dfSJoerg Roedel
346672cf6dfSJoerg Roedel /* Only care about add/remove events for physical functions.
347672cf6dfSJoerg Roedel * For VFs we actually do the lookup based on the corresponding
348672cf6dfSJoerg Roedel * PF in device_to_iommu() anyway. */
349ff828729SThomas Gleixner if (pdev->is_virtfn) {
350ff828729SThomas Gleixner /*
351ff828729SThomas Gleixner * Ensure that the VF device inherits the irq domain of the
352ff828729SThomas Gleixner * PF device. Ideally the device would inherit the domain
353ff828729SThomas Gleixner * from the bus, but DMAR can have multiple units per bus
354ff828729SThomas Gleixner * which makes this impossible. The VF 'bus' could inherit
355ff828729SThomas Gleixner * from the PF device, but that's yet another x86'sism to
356ff828729SThomas Gleixner * inflict on everybody else.
357ff828729SThomas Gleixner */
358ff828729SThomas Gleixner if (action == BUS_NOTIFY_ADD_DEVICE)
359ff828729SThomas Gleixner vf_inherit_msi_domain(pdev);
360672cf6dfSJoerg Roedel return NOTIFY_DONE;
361ff828729SThomas Gleixner }
362ff828729SThomas Gleixner
363672cf6dfSJoerg Roedel if (action != BUS_NOTIFY_ADD_DEVICE &&
364672cf6dfSJoerg Roedel action != BUS_NOTIFY_REMOVED_DEVICE)
365672cf6dfSJoerg Roedel return NOTIFY_DONE;
366672cf6dfSJoerg Roedel
367672cf6dfSJoerg Roedel info = dmar_alloc_pci_notify_info(pdev, action);
368672cf6dfSJoerg Roedel if (!info)
369672cf6dfSJoerg Roedel return NOTIFY_DONE;
370672cf6dfSJoerg Roedel
371672cf6dfSJoerg Roedel down_write(&dmar_global_lock);
372672cf6dfSJoerg Roedel if (action == BUS_NOTIFY_ADD_DEVICE)
373672cf6dfSJoerg Roedel dmar_pci_bus_add_dev(info);
374672cf6dfSJoerg Roedel else if (action == BUS_NOTIFY_REMOVED_DEVICE)
375672cf6dfSJoerg Roedel dmar_pci_bus_del_dev(info);
376672cf6dfSJoerg Roedel up_write(&dmar_global_lock);
377672cf6dfSJoerg Roedel
378672cf6dfSJoerg Roedel dmar_free_pci_notify_info(info);
379672cf6dfSJoerg Roedel
380672cf6dfSJoerg Roedel return NOTIFY_OK;
381672cf6dfSJoerg Roedel }
382672cf6dfSJoerg Roedel
383672cf6dfSJoerg Roedel static struct notifier_block dmar_pci_bus_nb = {
384672cf6dfSJoerg Roedel .notifier_call = dmar_pci_bus_notifier,
385316f92a7SYian Chen .priority = 1,
386672cf6dfSJoerg Roedel };
387672cf6dfSJoerg Roedel
388672cf6dfSJoerg Roedel static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit * drhd)389672cf6dfSJoerg Roedel dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
390672cf6dfSJoerg Roedel {
391672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
392672cf6dfSJoerg Roedel
393672cf6dfSJoerg Roedel list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
394672cf6dfSJoerg Roedel dmar_rcu_check())
395672cf6dfSJoerg Roedel if (dmaru->segment == drhd->segment &&
396672cf6dfSJoerg Roedel dmaru->reg_base_addr == drhd->address)
397672cf6dfSJoerg Roedel return dmaru;
398672cf6dfSJoerg Roedel
399672cf6dfSJoerg Roedel return NULL;
400672cf6dfSJoerg Roedel }
401672cf6dfSJoerg Roedel
4023207fa32SKrzysztof Kozlowski /*
403672cf6dfSJoerg Roedel * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
404672cf6dfSJoerg Roedel * structure which uniquely represent one DMA remapping hardware unit
405672cf6dfSJoerg Roedel * present in the platform
406672cf6dfSJoerg Roedel */
dmar_parse_one_drhd(struct acpi_dmar_header * header,void * arg)407672cf6dfSJoerg Roedel static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
408672cf6dfSJoerg Roedel {
409672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit *drhd;
410672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
411672cf6dfSJoerg Roedel int ret;
412672cf6dfSJoerg Roedel
413672cf6dfSJoerg Roedel drhd = (struct acpi_dmar_hardware_unit *)header;
414672cf6dfSJoerg Roedel dmaru = dmar_find_dmaru(drhd);
415672cf6dfSJoerg Roedel if (dmaru)
416672cf6dfSJoerg Roedel goto out;
417672cf6dfSJoerg Roedel
418672cf6dfSJoerg Roedel dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
419672cf6dfSJoerg Roedel if (!dmaru)
420672cf6dfSJoerg Roedel return -ENOMEM;
421672cf6dfSJoerg Roedel
422672cf6dfSJoerg Roedel /*
423672cf6dfSJoerg Roedel * If header is allocated from slab by ACPI _DSM method, we need to
424672cf6dfSJoerg Roedel * copy the content because the memory buffer will be freed on return.
425672cf6dfSJoerg Roedel */
426672cf6dfSJoerg Roedel dmaru->hdr = (void *)(dmaru + 1);
427672cf6dfSJoerg Roedel memcpy(dmaru->hdr, header, header->length);
428672cf6dfSJoerg Roedel dmaru->reg_base_addr = drhd->address;
429672cf6dfSJoerg Roedel dmaru->segment = drhd->segment;
4304db96bfeSKan Liang /* The size of the register set is 2 ^ N 4 KB pages. */
4314db96bfeSKan Liang dmaru->reg_size = 1UL << (drhd->size + 12);
432672cf6dfSJoerg Roedel dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
433672cf6dfSJoerg Roedel dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
434672cf6dfSJoerg Roedel ((void *)drhd) + drhd->header.length,
435672cf6dfSJoerg Roedel &dmaru->devices_cnt);
436672cf6dfSJoerg Roedel if (dmaru->devices_cnt && dmaru->devices == NULL) {
437672cf6dfSJoerg Roedel kfree(dmaru);
438672cf6dfSJoerg Roedel return -ENOMEM;
439672cf6dfSJoerg Roedel }
440672cf6dfSJoerg Roedel
441672cf6dfSJoerg Roedel ret = alloc_iommu(dmaru);
442672cf6dfSJoerg Roedel if (ret) {
443672cf6dfSJoerg Roedel dmar_free_dev_scope(&dmaru->devices,
444672cf6dfSJoerg Roedel &dmaru->devices_cnt);
445672cf6dfSJoerg Roedel kfree(dmaru);
446672cf6dfSJoerg Roedel return ret;
447672cf6dfSJoerg Roedel }
448672cf6dfSJoerg Roedel dmar_register_drhd_unit(dmaru);
449672cf6dfSJoerg Roedel
450672cf6dfSJoerg Roedel out:
451672cf6dfSJoerg Roedel if (arg)
452672cf6dfSJoerg Roedel (*(int *)arg)++;
453672cf6dfSJoerg Roedel
454672cf6dfSJoerg Roedel return 0;
455672cf6dfSJoerg Roedel }
456672cf6dfSJoerg Roedel
dmar_free_drhd(struct dmar_drhd_unit * dmaru)457672cf6dfSJoerg Roedel static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
458672cf6dfSJoerg Roedel {
459672cf6dfSJoerg Roedel if (dmaru->devices && dmaru->devices_cnt)
460672cf6dfSJoerg Roedel dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
461672cf6dfSJoerg Roedel if (dmaru->iommu)
462672cf6dfSJoerg Roedel free_iommu(dmaru->iommu);
463672cf6dfSJoerg Roedel kfree(dmaru);
464672cf6dfSJoerg Roedel }
465672cf6dfSJoerg Roedel
dmar_parse_one_andd(struct acpi_dmar_header * header,void * arg)466672cf6dfSJoerg Roedel static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
467672cf6dfSJoerg Roedel void *arg)
468672cf6dfSJoerg Roedel {
469672cf6dfSJoerg Roedel struct acpi_dmar_andd *andd = (void *)header;
470672cf6dfSJoerg Roedel
471672cf6dfSJoerg Roedel /* Check for NUL termination within the designated length */
472672cf6dfSJoerg Roedel if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
473672cf6dfSJoerg Roedel pr_warn(FW_BUG
474672cf6dfSJoerg Roedel "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
475672cf6dfSJoerg Roedel "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
476672cf6dfSJoerg Roedel dmi_get_system_info(DMI_BIOS_VENDOR),
477672cf6dfSJoerg Roedel dmi_get_system_info(DMI_BIOS_VERSION),
478672cf6dfSJoerg Roedel dmi_get_system_info(DMI_PRODUCT_VERSION));
479672cf6dfSJoerg Roedel add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
480672cf6dfSJoerg Roedel return -EINVAL;
481672cf6dfSJoerg Roedel }
482672cf6dfSJoerg Roedel pr_info("ANDD device: %x name: %s\n", andd->device_number,
483672cf6dfSJoerg Roedel andd->device_name);
484672cf6dfSJoerg Roedel
485672cf6dfSJoerg Roedel return 0;
486672cf6dfSJoerg Roedel }
487672cf6dfSJoerg Roedel
488672cf6dfSJoerg Roedel #ifdef CONFIG_ACPI_NUMA
dmar_parse_one_rhsa(struct acpi_dmar_header * header,void * arg)489672cf6dfSJoerg Roedel static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
490672cf6dfSJoerg Roedel {
491672cf6dfSJoerg Roedel struct acpi_dmar_rhsa *rhsa;
492672cf6dfSJoerg Roedel struct dmar_drhd_unit *drhd;
493672cf6dfSJoerg Roedel
494672cf6dfSJoerg Roedel rhsa = (struct acpi_dmar_rhsa *)header;
495672cf6dfSJoerg Roedel for_each_drhd_unit(drhd) {
496672cf6dfSJoerg Roedel if (drhd->reg_base_addr == rhsa->base_address) {
49701feba59SJonathan Cameron int node = pxm_to_node(rhsa->proximity_domain);
498672cf6dfSJoerg Roedel
499b0b0b77eSAlexander Lobakin if (node != NUMA_NO_NODE && !node_online(node))
500672cf6dfSJoerg Roedel node = NUMA_NO_NODE;
501672cf6dfSJoerg Roedel drhd->iommu->node = node;
502672cf6dfSJoerg Roedel return 0;
503672cf6dfSJoerg Roedel }
504672cf6dfSJoerg Roedel }
505672cf6dfSJoerg Roedel pr_warn(FW_BUG
506672cf6dfSJoerg Roedel "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
507672cf6dfSJoerg Roedel "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
508672cf6dfSJoerg Roedel rhsa->base_address,
509672cf6dfSJoerg Roedel dmi_get_system_info(DMI_BIOS_VENDOR),
510672cf6dfSJoerg Roedel dmi_get_system_info(DMI_BIOS_VERSION),
511672cf6dfSJoerg Roedel dmi_get_system_info(DMI_PRODUCT_VERSION));
512672cf6dfSJoerg Roedel add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
513672cf6dfSJoerg Roedel
514672cf6dfSJoerg Roedel return 0;
515672cf6dfSJoerg Roedel }
516672cf6dfSJoerg Roedel #else
517672cf6dfSJoerg Roedel #define dmar_parse_one_rhsa dmar_res_noop
518672cf6dfSJoerg Roedel #endif
519672cf6dfSJoerg Roedel
520672cf6dfSJoerg Roedel static void
dmar_table_print_dmar_entry(struct acpi_dmar_header * header)521672cf6dfSJoerg Roedel dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
522672cf6dfSJoerg Roedel {
523672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit *drhd;
524672cf6dfSJoerg Roedel struct acpi_dmar_reserved_memory *rmrr;
525672cf6dfSJoerg Roedel struct acpi_dmar_atsr *atsr;
526672cf6dfSJoerg Roedel struct acpi_dmar_rhsa *rhsa;
52731a75cbbSYian Chen struct acpi_dmar_satc *satc;
528672cf6dfSJoerg Roedel
529672cf6dfSJoerg Roedel switch (header->type) {
530672cf6dfSJoerg Roedel case ACPI_DMAR_TYPE_HARDWARE_UNIT:
531672cf6dfSJoerg Roedel drhd = container_of(header, struct acpi_dmar_hardware_unit,
532672cf6dfSJoerg Roedel header);
533672cf6dfSJoerg Roedel pr_info("DRHD base: %#016Lx flags: %#x\n",
534672cf6dfSJoerg Roedel (unsigned long long)drhd->address, drhd->flags);
535672cf6dfSJoerg Roedel break;
536672cf6dfSJoerg Roedel case ACPI_DMAR_TYPE_RESERVED_MEMORY:
537672cf6dfSJoerg Roedel rmrr = container_of(header, struct acpi_dmar_reserved_memory,
538672cf6dfSJoerg Roedel header);
539672cf6dfSJoerg Roedel pr_info("RMRR base: %#016Lx end: %#016Lx\n",
540672cf6dfSJoerg Roedel (unsigned long long)rmrr->base_address,
541672cf6dfSJoerg Roedel (unsigned long long)rmrr->end_address);
542672cf6dfSJoerg Roedel break;
543672cf6dfSJoerg Roedel case ACPI_DMAR_TYPE_ROOT_ATS:
544672cf6dfSJoerg Roedel atsr = container_of(header, struct acpi_dmar_atsr, header);
545672cf6dfSJoerg Roedel pr_info("ATSR flags: %#x\n", atsr->flags);
546672cf6dfSJoerg Roedel break;
547672cf6dfSJoerg Roedel case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
548672cf6dfSJoerg Roedel rhsa = container_of(header, struct acpi_dmar_rhsa, header);
549672cf6dfSJoerg Roedel pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
550672cf6dfSJoerg Roedel (unsigned long long)rhsa->base_address,
551672cf6dfSJoerg Roedel rhsa->proximity_domain);
552672cf6dfSJoerg Roedel break;
553672cf6dfSJoerg Roedel case ACPI_DMAR_TYPE_NAMESPACE:
554672cf6dfSJoerg Roedel /* We don't print this here because we need to sanity-check
555672cf6dfSJoerg Roedel it first. So print it in dmar_parse_one_andd() instead. */
556672cf6dfSJoerg Roedel break;
55731a75cbbSYian Chen case ACPI_DMAR_TYPE_SATC:
55831a75cbbSYian Chen satc = container_of(header, struct acpi_dmar_satc, header);
55931a75cbbSYian Chen pr_info("SATC flags: 0x%x\n", satc->flags);
56031a75cbbSYian Chen break;
561672cf6dfSJoerg Roedel }
562672cf6dfSJoerg Roedel }
563672cf6dfSJoerg Roedel
564672cf6dfSJoerg Roedel /**
565672cf6dfSJoerg Roedel * dmar_table_detect - checks to see if the platform supports DMAR devices
566672cf6dfSJoerg Roedel */
dmar_table_detect(void)567672cf6dfSJoerg Roedel static int __init dmar_table_detect(void)
568672cf6dfSJoerg Roedel {
569672cf6dfSJoerg Roedel acpi_status status = AE_OK;
570672cf6dfSJoerg Roedel
571672cf6dfSJoerg Roedel /* if we could find DMAR table, then there are DMAR devices */
572672cf6dfSJoerg Roedel status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
573672cf6dfSJoerg Roedel
574672cf6dfSJoerg Roedel if (ACPI_SUCCESS(status) && !dmar_tbl) {
575672cf6dfSJoerg Roedel pr_warn("Unable to map DMAR\n");
576672cf6dfSJoerg Roedel status = AE_NOT_FOUND;
577672cf6dfSJoerg Roedel }
578672cf6dfSJoerg Roedel
579672cf6dfSJoerg Roedel return ACPI_SUCCESS(status) ? 0 : -ENOENT;
580672cf6dfSJoerg Roedel }
581672cf6dfSJoerg Roedel
dmar_walk_remapping_entries(struct acpi_dmar_header * start,size_t len,struct dmar_res_callback * cb)582672cf6dfSJoerg Roedel static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
583672cf6dfSJoerg Roedel size_t len, struct dmar_res_callback *cb)
584672cf6dfSJoerg Roedel {
585672cf6dfSJoerg Roedel struct acpi_dmar_header *iter, *next;
586672cf6dfSJoerg Roedel struct acpi_dmar_header *end = ((void *)start) + len;
587672cf6dfSJoerg Roedel
588672cf6dfSJoerg Roedel for (iter = start; iter < end; iter = next) {
589672cf6dfSJoerg Roedel next = (void *)iter + iter->length;
590672cf6dfSJoerg Roedel if (iter->length == 0) {
591672cf6dfSJoerg Roedel /* Avoid looping forever on bad ACPI tables */
592672cf6dfSJoerg Roedel pr_debug(FW_BUG "Invalid 0-length structure\n");
593672cf6dfSJoerg Roedel break;
594672cf6dfSJoerg Roedel } else if (next > end) {
595672cf6dfSJoerg Roedel /* Avoid passing table end */
596672cf6dfSJoerg Roedel pr_warn(FW_BUG "Record passes table end\n");
597672cf6dfSJoerg Roedel return -EINVAL;
598672cf6dfSJoerg Roedel }
599672cf6dfSJoerg Roedel
600672cf6dfSJoerg Roedel if (cb->print_entry)
601672cf6dfSJoerg Roedel dmar_table_print_dmar_entry(iter);
602672cf6dfSJoerg Roedel
603672cf6dfSJoerg Roedel if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
604672cf6dfSJoerg Roedel /* continue for forward compatibility */
605672cf6dfSJoerg Roedel pr_debug("Unknown DMAR structure type %d\n",
606672cf6dfSJoerg Roedel iter->type);
607672cf6dfSJoerg Roedel } else if (cb->cb[iter->type]) {
608672cf6dfSJoerg Roedel int ret;
609672cf6dfSJoerg Roedel
610672cf6dfSJoerg Roedel ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
611672cf6dfSJoerg Roedel if (ret)
612672cf6dfSJoerg Roedel return ret;
613672cf6dfSJoerg Roedel } else if (!cb->ignore_unhandled) {
614672cf6dfSJoerg Roedel pr_warn("No handler for DMAR structure type %d\n",
615672cf6dfSJoerg Roedel iter->type);
616672cf6dfSJoerg Roedel return -EINVAL;
617672cf6dfSJoerg Roedel }
618672cf6dfSJoerg Roedel }
619672cf6dfSJoerg Roedel
620672cf6dfSJoerg Roedel return 0;
621672cf6dfSJoerg Roedel }
622672cf6dfSJoerg Roedel
dmar_walk_dmar_table(struct acpi_table_dmar * dmar,struct dmar_res_callback * cb)623672cf6dfSJoerg Roedel static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
624672cf6dfSJoerg Roedel struct dmar_res_callback *cb)
625672cf6dfSJoerg Roedel {
626672cf6dfSJoerg Roedel return dmar_walk_remapping_entries((void *)(dmar + 1),
627672cf6dfSJoerg Roedel dmar->header.length - sizeof(*dmar), cb);
628672cf6dfSJoerg Roedel }
629672cf6dfSJoerg Roedel
630672cf6dfSJoerg Roedel /**
631672cf6dfSJoerg Roedel * parse_dmar_table - parses the DMA reporting table
632672cf6dfSJoerg Roedel */
633672cf6dfSJoerg Roedel static int __init
parse_dmar_table(void)634672cf6dfSJoerg Roedel parse_dmar_table(void)
635672cf6dfSJoerg Roedel {
636672cf6dfSJoerg Roedel struct acpi_table_dmar *dmar;
637672cf6dfSJoerg Roedel int drhd_count = 0;
638672cf6dfSJoerg Roedel int ret;
639672cf6dfSJoerg Roedel struct dmar_res_callback cb = {
640672cf6dfSJoerg Roedel .print_entry = true,
641672cf6dfSJoerg Roedel .ignore_unhandled = true,
642672cf6dfSJoerg Roedel .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
643672cf6dfSJoerg Roedel .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
644672cf6dfSJoerg Roedel .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
645672cf6dfSJoerg Roedel .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
646672cf6dfSJoerg Roedel .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
647672cf6dfSJoerg Roedel .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
64831a75cbbSYian Chen .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
649672cf6dfSJoerg Roedel };
650672cf6dfSJoerg Roedel
651672cf6dfSJoerg Roedel /*
652672cf6dfSJoerg Roedel * Do it again, earlier dmar_tbl mapping could be mapped with
653672cf6dfSJoerg Roedel * fixed map.
654672cf6dfSJoerg Roedel */
655672cf6dfSJoerg Roedel dmar_table_detect();
656672cf6dfSJoerg Roedel
657672cf6dfSJoerg Roedel /*
658672cf6dfSJoerg Roedel * ACPI tables may not be DMA protected by tboot, so use DMAR copy
659672cf6dfSJoerg Roedel * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
660672cf6dfSJoerg Roedel */
661672cf6dfSJoerg Roedel dmar_tbl = tboot_get_dmar_table(dmar_tbl);
662672cf6dfSJoerg Roedel
663672cf6dfSJoerg Roedel dmar = (struct acpi_table_dmar *)dmar_tbl;
664672cf6dfSJoerg Roedel if (!dmar)
665672cf6dfSJoerg Roedel return -ENODEV;
666672cf6dfSJoerg Roedel
667672cf6dfSJoerg Roedel if (dmar->width < PAGE_SHIFT - 1) {
668672cf6dfSJoerg Roedel pr_warn("Invalid DMAR haw\n");
669672cf6dfSJoerg Roedel return -EINVAL;
670672cf6dfSJoerg Roedel }
671672cf6dfSJoerg Roedel
672672cf6dfSJoerg Roedel pr_info("Host address width %d\n", dmar->width + 1);
673672cf6dfSJoerg Roedel ret = dmar_walk_dmar_table(dmar, &cb);
674672cf6dfSJoerg Roedel if (ret == 0 && drhd_count == 0)
675672cf6dfSJoerg Roedel pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
676672cf6dfSJoerg Roedel
677672cf6dfSJoerg Roedel return ret;
678672cf6dfSJoerg Roedel }
679672cf6dfSJoerg Roedel
dmar_pci_device_match(struct dmar_dev_scope devices[],int cnt,struct pci_dev * dev)680672cf6dfSJoerg Roedel static int dmar_pci_device_match(struct dmar_dev_scope devices[],
681672cf6dfSJoerg Roedel int cnt, struct pci_dev *dev)
682672cf6dfSJoerg Roedel {
683672cf6dfSJoerg Roedel int index;
684672cf6dfSJoerg Roedel struct device *tmp;
685672cf6dfSJoerg Roedel
686672cf6dfSJoerg Roedel while (dev) {
687672cf6dfSJoerg Roedel for_each_active_dev_scope(devices, cnt, index, tmp)
688672cf6dfSJoerg Roedel if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
689672cf6dfSJoerg Roedel return 1;
690672cf6dfSJoerg Roedel
691672cf6dfSJoerg Roedel /* Check our parent */
692672cf6dfSJoerg Roedel dev = dev->bus->self;
693672cf6dfSJoerg Roedel }
694672cf6dfSJoerg Roedel
695672cf6dfSJoerg Roedel return 0;
696672cf6dfSJoerg Roedel }
697672cf6dfSJoerg Roedel
698672cf6dfSJoerg Roedel struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev * dev)699672cf6dfSJoerg Roedel dmar_find_matched_drhd_unit(struct pci_dev *dev)
700672cf6dfSJoerg Roedel {
701672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
702672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit *drhd;
703672cf6dfSJoerg Roedel
704672cf6dfSJoerg Roedel dev = pci_physfn(dev);
705672cf6dfSJoerg Roedel
706672cf6dfSJoerg Roedel rcu_read_lock();
707672cf6dfSJoerg Roedel for_each_drhd_unit(dmaru) {
708672cf6dfSJoerg Roedel drhd = container_of(dmaru->hdr,
709672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit,
710672cf6dfSJoerg Roedel header);
711672cf6dfSJoerg Roedel
712672cf6dfSJoerg Roedel if (dmaru->include_all &&
713672cf6dfSJoerg Roedel drhd->segment == pci_domain_nr(dev->bus))
714672cf6dfSJoerg Roedel goto out;
715672cf6dfSJoerg Roedel
716672cf6dfSJoerg Roedel if (dmar_pci_device_match(dmaru->devices,
717672cf6dfSJoerg Roedel dmaru->devices_cnt, dev))
718672cf6dfSJoerg Roedel goto out;
719672cf6dfSJoerg Roedel }
720672cf6dfSJoerg Roedel dmaru = NULL;
721672cf6dfSJoerg Roedel out:
722672cf6dfSJoerg Roedel rcu_read_unlock();
723672cf6dfSJoerg Roedel
724672cf6dfSJoerg Roedel return dmaru;
725672cf6dfSJoerg Roedel }
726672cf6dfSJoerg Roedel
dmar_acpi_insert_dev_scope(u8 device_number,struct acpi_device * adev)727672cf6dfSJoerg Roedel static void __init dmar_acpi_insert_dev_scope(u8 device_number,
728672cf6dfSJoerg Roedel struct acpi_device *adev)
729672cf6dfSJoerg Roedel {
730672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
731672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit *drhd;
732672cf6dfSJoerg Roedel struct acpi_dmar_device_scope *scope;
733672cf6dfSJoerg Roedel struct device *tmp;
734672cf6dfSJoerg Roedel int i;
735672cf6dfSJoerg Roedel struct acpi_dmar_pci_path *path;
736672cf6dfSJoerg Roedel
737672cf6dfSJoerg Roedel for_each_drhd_unit(dmaru) {
738672cf6dfSJoerg Roedel drhd = container_of(dmaru->hdr,
739672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit,
740672cf6dfSJoerg Roedel header);
741672cf6dfSJoerg Roedel
742672cf6dfSJoerg Roedel for (scope = (void *)(drhd + 1);
743672cf6dfSJoerg Roedel (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
744672cf6dfSJoerg Roedel scope = ((void *)scope) + scope->length) {
745672cf6dfSJoerg Roedel if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
746672cf6dfSJoerg Roedel continue;
747672cf6dfSJoerg Roedel if (scope->enumeration_id != device_number)
748672cf6dfSJoerg Roedel continue;
749672cf6dfSJoerg Roedel
750672cf6dfSJoerg Roedel path = (void *)(scope + 1);
751672cf6dfSJoerg Roedel pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
752672cf6dfSJoerg Roedel dev_name(&adev->dev), dmaru->reg_base_addr,
753672cf6dfSJoerg Roedel scope->bus, path->device, path->function);
754672cf6dfSJoerg Roedel for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
755672cf6dfSJoerg Roedel if (tmp == NULL) {
756672cf6dfSJoerg Roedel dmaru->devices[i].bus = scope->bus;
757672cf6dfSJoerg Roedel dmaru->devices[i].devfn = PCI_DEVFN(path->device,
758672cf6dfSJoerg Roedel path->function);
759672cf6dfSJoerg Roedel rcu_assign_pointer(dmaru->devices[i].dev,
760672cf6dfSJoerg Roedel get_device(&adev->dev));
761672cf6dfSJoerg Roedel return;
762672cf6dfSJoerg Roedel }
763672cf6dfSJoerg Roedel BUG_ON(i >= dmaru->devices_cnt);
764672cf6dfSJoerg Roedel }
765672cf6dfSJoerg Roedel }
766672cf6dfSJoerg Roedel pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
767672cf6dfSJoerg Roedel device_number, dev_name(&adev->dev));
768672cf6dfSJoerg Roedel }
769672cf6dfSJoerg Roedel
dmar_acpi_dev_scope_init(void)770672cf6dfSJoerg Roedel static int __init dmar_acpi_dev_scope_init(void)
771672cf6dfSJoerg Roedel {
772672cf6dfSJoerg Roedel struct acpi_dmar_andd *andd;
773672cf6dfSJoerg Roedel
774672cf6dfSJoerg Roedel if (dmar_tbl == NULL)
775672cf6dfSJoerg Roedel return -ENODEV;
776672cf6dfSJoerg Roedel
777672cf6dfSJoerg Roedel for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
778672cf6dfSJoerg Roedel ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
779672cf6dfSJoerg Roedel andd = ((void *)andd) + andd->header.length) {
780672cf6dfSJoerg Roedel if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
781672cf6dfSJoerg Roedel acpi_handle h;
782672cf6dfSJoerg Roedel struct acpi_device *adev;
783672cf6dfSJoerg Roedel
784672cf6dfSJoerg Roedel if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
785672cf6dfSJoerg Roedel andd->device_name,
786672cf6dfSJoerg Roedel &h))) {
787672cf6dfSJoerg Roedel pr_err("Failed to find handle for ACPI object %s\n",
788672cf6dfSJoerg Roedel andd->device_name);
789672cf6dfSJoerg Roedel continue;
790672cf6dfSJoerg Roedel }
791f266c11bSRafael J. Wysocki adev = acpi_fetch_acpi_dev(h);
792f266c11bSRafael J. Wysocki if (!adev) {
793672cf6dfSJoerg Roedel pr_err("Failed to get device for ACPI object %s\n",
794672cf6dfSJoerg Roedel andd->device_name);
795672cf6dfSJoerg Roedel continue;
796672cf6dfSJoerg Roedel }
797672cf6dfSJoerg Roedel dmar_acpi_insert_dev_scope(andd->device_number, adev);
798672cf6dfSJoerg Roedel }
799672cf6dfSJoerg Roedel }
800672cf6dfSJoerg Roedel return 0;
801672cf6dfSJoerg Roedel }
802672cf6dfSJoerg Roedel
dmar_dev_scope_init(void)803672cf6dfSJoerg Roedel int __init dmar_dev_scope_init(void)
804672cf6dfSJoerg Roedel {
805672cf6dfSJoerg Roedel struct pci_dev *dev = NULL;
806672cf6dfSJoerg Roedel struct dmar_pci_notify_info *info;
807672cf6dfSJoerg Roedel
808672cf6dfSJoerg Roedel if (dmar_dev_scope_status != 1)
809672cf6dfSJoerg Roedel return dmar_dev_scope_status;
810672cf6dfSJoerg Roedel
811672cf6dfSJoerg Roedel if (list_empty(&dmar_drhd_units)) {
812672cf6dfSJoerg Roedel dmar_dev_scope_status = -ENODEV;
813672cf6dfSJoerg Roedel } else {
814672cf6dfSJoerg Roedel dmar_dev_scope_status = 0;
815672cf6dfSJoerg Roedel
816672cf6dfSJoerg Roedel dmar_acpi_dev_scope_init();
817672cf6dfSJoerg Roedel
818672cf6dfSJoerg Roedel for_each_pci_dev(dev) {
819672cf6dfSJoerg Roedel if (dev->is_virtfn)
820672cf6dfSJoerg Roedel continue;
821672cf6dfSJoerg Roedel
822672cf6dfSJoerg Roedel info = dmar_alloc_pci_notify_info(dev,
823672cf6dfSJoerg Roedel BUS_NOTIFY_ADD_DEVICE);
824672cf6dfSJoerg Roedel if (!info) {
8254bedbbd7SXiongfeng Wang pci_dev_put(dev);
826672cf6dfSJoerg Roedel return dmar_dev_scope_status;
827672cf6dfSJoerg Roedel } else {
828672cf6dfSJoerg Roedel dmar_pci_bus_add_dev(info);
829672cf6dfSJoerg Roedel dmar_free_pci_notify_info(info);
830672cf6dfSJoerg Roedel }
831672cf6dfSJoerg Roedel }
832672cf6dfSJoerg Roedel }
833672cf6dfSJoerg Roedel
834672cf6dfSJoerg Roedel return dmar_dev_scope_status;
835672cf6dfSJoerg Roedel }
836672cf6dfSJoerg Roedel
dmar_register_bus_notifier(void)837672cf6dfSJoerg Roedel void __init dmar_register_bus_notifier(void)
838672cf6dfSJoerg Roedel {
839672cf6dfSJoerg Roedel bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
840672cf6dfSJoerg Roedel }
841672cf6dfSJoerg Roedel
842672cf6dfSJoerg Roedel
dmar_table_init(void)843672cf6dfSJoerg Roedel int __init dmar_table_init(void)
844672cf6dfSJoerg Roedel {
845672cf6dfSJoerg Roedel static int dmar_table_initialized;
846672cf6dfSJoerg Roedel int ret;
847672cf6dfSJoerg Roedel
848672cf6dfSJoerg Roedel if (dmar_table_initialized == 0) {
849672cf6dfSJoerg Roedel ret = parse_dmar_table();
850672cf6dfSJoerg Roedel if (ret < 0) {
851672cf6dfSJoerg Roedel if (ret != -ENODEV)
852672cf6dfSJoerg Roedel pr_info("Parse DMAR table failure.\n");
853672cf6dfSJoerg Roedel } else if (list_empty(&dmar_drhd_units)) {
854672cf6dfSJoerg Roedel pr_info("No DMAR devices found\n");
855672cf6dfSJoerg Roedel ret = -ENODEV;
856672cf6dfSJoerg Roedel }
857672cf6dfSJoerg Roedel
858672cf6dfSJoerg Roedel if (ret < 0)
859672cf6dfSJoerg Roedel dmar_table_initialized = ret;
860672cf6dfSJoerg Roedel else
861672cf6dfSJoerg Roedel dmar_table_initialized = 1;
862672cf6dfSJoerg Roedel }
863672cf6dfSJoerg Roedel
864672cf6dfSJoerg Roedel return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
865672cf6dfSJoerg Roedel }
866672cf6dfSJoerg Roedel
warn_invalid_dmar(u64 addr,const char * message)867672cf6dfSJoerg Roedel static void warn_invalid_dmar(u64 addr, const char *message)
868672cf6dfSJoerg Roedel {
869672cf6dfSJoerg Roedel pr_warn_once(FW_BUG
870672cf6dfSJoerg Roedel "Your BIOS is broken; DMAR reported at address %llx%s!\n"
871672cf6dfSJoerg Roedel "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
872672cf6dfSJoerg Roedel addr, message,
873672cf6dfSJoerg Roedel dmi_get_system_info(DMI_BIOS_VENDOR),
874672cf6dfSJoerg Roedel dmi_get_system_info(DMI_BIOS_VERSION),
875672cf6dfSJoerg Roedel dmi_get_system_info(DMI_PRODUCT_VERSION));
876672cf6dfSJoerg Roedel add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
877672cf6dfSJoerg Roedel }
878672cf6dfSJoerg Roedel
879672cf6dfSJoerg Roedel static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header * entry,void * arg)880672cf6dfSJoerg Roedel dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
881672cf6dfSJoerg Roedel {
882672cf6dfSJoerg Roedel struct acpi_dmar_hardware_unit *drhd;
883672cf6dfSJoerg Roedel void __iomem *addr;
884672cf6dfSJoerg Roedel u64 cap, ecap;
885672cf6dfSJoerg Roedel
886672cf6dfSJoerg Roedel drhd = (void *)entry;
887672cf6dfSJoerg Roedel if (!drhd->address) {
888672cf6dfSJoerg Roedel warn_invalid_dmar(0, "");
889672cf6dfSJoerg Roedel return -EINVAL;
890672cf6dfSJoerg Roedel }
891672cf6dfSJoerg Roedel
892672cf6dfSJoerg Roedel if (arg)
893672cf6dfSJoerg Roedel addr = ioremap(drhd->address, VTD_PAGE_SIZE);
894672cf6dfSJoerg Roedel else
895672cf6dfSJoerg Roedel addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
896672cf6dfSJoerg Roedel if (!addr) {
897672cf6dfSJoerg Roedel pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
898672cf6dfSJoerg Roedel return -EINVAL;
899672cf6dfSJoerg Roedel }
900672cf6dfSJoerg Roedel
901672cf6dfSJoerg Roedel cap = dmar_readq(addr + DMAR_CAP_REG);
902672cf6dfSJoerg Roedel ecap = dmar_readq(addr + DMAR_ECAP_REG);
903672cf6dfSJoerg Roedel
904672cf6dfSJoerg Roedel if (arg)
905672cf6dfSJoerg Roedel iounmap(addr);
906672cf6dfSJoerg Roedel else
907672cf6dfSJoerg Roedel early_iounmap(addr, VTD_PAGE_SIZE);
908672cf6dfSJoerg Roedel
909672cf6dfSJoerg Roedel if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
910672cf6dfSJoerg Roedel warn_invalid_dmar(drhd->address, " returns all ones");
911672cf6dfSJoerg Roedel return -EINVAL;
912672cf6dfSJoerg Roedel }
913672cf6dfSJoerg Roedel
914672cf6dfSJoerg Roedel return 0;
915672cf6dfSJoerg Roedel }
916672cf6dfSJoerg Roedel
detect_intel_iommu(void)91778013eaaSChristoph Hellwig void __init detect_intel_iommu(void)
918672cf6dfSJoerg Roedel {
919672cf6dfSJoerg Roedel int ret;
920672cf6dfSJoerg Roedel struct dmar_res_callback validate_drhd_cb = {
921672cf6dfSJoerg Roedel .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
922672cf6dfSJoerg Roedel .ignore_unhandled = true,
923672cf6dfSJoerg Roedel };
924672cf6dfSJoerg Roedel
925672cf6dfSJoerg Roedel down_write(&dmar_global_lock);
926672cf6dfSJoerg Roedel ret = dmar_table_detect();
927672cf6dfSJoerg Roedel if (!ret)
928672cf6dfSJoerg Roedel ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
929672cf6dfSJoerg Roedel &validate_drhd_cb);
93050310600SLu Baolu if (!ret && !no_iommu && !iommu_detected &&
93150310600SLu Baolu (!dmar_disabled || dmar_platform_optin())) {
932672cf6dfSJoerg Roedel iommu_detected = 1;
933672cf6dfSJoerg Roedel /* Make sure ACS will be enabled */
934672cf6dfSJoerg Roedel pci_request_acs();
935672cf6dfSJoerg Roedel }
936672cf6dfSJoerg Roedel
937672cf6dfSJoerg Roedel #ifdef CONFIG_X86
938672cf6dfSJoerg Roedel if (!ret) {
939672cf6dfSJoerg Roedel x86_init.iommu.iommu_init = intel_iommu_init;
940672cf6dfSJoerg Roedel x86_platform.iommu_shutdown = intel_iommu_shutdown;
941672cf6dfSJoerg Roedel }
942672cf6dfSJoerg Roedel
943672cf6dfSJoerg Roedel #endif
944672cf6dfSJoerg Roedel
945672cf6dfSJoerg Roedel if (dmar_tbl) {
946672cf6dfSJoerg Roedel acpi_put_table(dmar_tbl);
947672cf6dfSJoerg Roedel dmar_tbl = NULL;
948672cf6dfSJoerg Roedel }
949672cf6dfSJoerg Roedel up_write(&dmar_global_lock);
950672cf6dfSJoerg Roedel }
951672cf6dfSJoerg Roedel
unmap_iommu(struct intel_iommu * iommu)952672cf6dfSJoerg Roedel static void unmap_iommu(struct intel_iommu *iommu)
953672cf6dfSJoerg Roedel {
954672cf6dfSJoerg Roedel iounmap(iommu->reg);
955672cf6dfSJoerg Roedel release_mem_region(iommu->reg_phys, iommu->reg_size);
956672cf6dfSJoerg Roedel }
957672cf6dfSJoerg Roedel
958672cf6dfSJoerg Roedel /**
959672cf6dfSJoerg Roedel * map_iommu: map the iommu's registers
960672cf6dfSJoerg Roedel * @iommu: the iommu to map
9614db96bfeSKan Liang * @drhd: DMA remapping hardware definition structure
962672cf6dfSJoerg Roedel *
963672cf6dfSJoerg Roedel * Memory map the iommu's registers. Start w/ a single page, and
964672cf6dfSJoerg Roedel * possibly expand if that turns out to be insufficent.
965672cf6dfSJoerg Roedel */
map_iommu(struct intel_iommu * iommu,struct dmar_drhd_unit * drhd)9664db96bfeSKan Liang static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
967672cf6dfSJoerg Roedel {
9684db96bfeSKan Liang u64 phys_addr = drhd->reg_base_addr;
969672cf6dfSJoerg Roedel int map_size, err=0;
970672cf6dfSJoerg Roedel
971672cf6dfSJoerg Roedel iommu->reg_phys = phys_addr;
9724db96bfeSKan Liang iommu->reg_size = drhd->reg_size;
973672cf6dfSJoerg Roedel
974672cf6dfSJoerg Roedel if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
975672cf6dfSJoerg Roedel pr_err("Can't reserve memory\n");
976672cf6dfSJoerg Roedel err = -EBUSY;
977672cf6dfSJoerg Roedel goto out;
978672cf6dfSJoerg Roedel }
979672cf6dfSJoerg Roedel
980672cf6dfSJoerg Roedel iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
981672cf6dfSJoerg Roedel if (!iommu->reg) {
982672cf6dfSJoerg Roedel pr_err("Can't map the region\n");
983672cf6dfSJoerg Roedel err = -ENOMEM;
984672cf6dfSJoerg Roedel goto release;
985672cf6dfSJoerg Roedel }
986672cf6dfSJoerg Roedel
987672cf6dfSJoerg Roedel iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
988672cf6dfSJoerg Roedel iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
989672cf6dfSJoerg Roedel
990672cf6dfSJoerg Roedel if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
991672cf6dfSJoerg Roedel err = -EINVAL;
992672cf6dfSJoerg Roedel warn_invalid_dmar(phys_addr, " returns all ones");
993672cf6dfSJoerg Roedel goto unmap;
994672cf6dfSJoerg Roedel }
995672cf6dfSJoerg Roedel
996672cf6dfSJoerg Roedel /* the registers might be more than one page */
997672cf6dfSJoerg Roedel map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
998672cf6dfSJoerg Roedel cap_max_fault_reg_offset(iommu->cap));
999672cf6dfSJoerg Roedel map_size = VTD_PAGE_ALIGN(map_size);
1000672cf6dfSJoerg Roedel if (map_size > iommu->reg_size) {
1001672cf6dfSJoerg Roedel iounmap(iommu->reg);
1002672cf6dfSJoerg Roedel release_mem_region(iommu->reg_phys, iommu->reg_size);
1003672cf6dfSJoerg Roedel iommu->reg_size = map_size;
1004672cf6dfSJoerg Roedel if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1005672cf6dfSJoerg Roedel iommu->name)) {
1006672cf6dfSJoerg Roedel pr_err("Can't reserve memory\n");
1007672cf6dfSJoerg Roedel err = -EBUSY;
1008672cf6dfSJoerg Roedel goto out;
1009672cf6dfSJoerg Roedel }
1010672cf6dfSJoerg Roedel iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1011672cf6dfSJoerg Roedel if (!iommu->reg) {
1012672cf6dfSJoerg Roedel pr_err("Can't map the region\n");
1013672cf6dfSJoerg Roedel err = -ENOMEM;
1014672cf6dfSJoerg Roedel goto release;
1015672cf6dfSJoerg Roedel }
1016672cf6dfSJoerg Roedel }
1017dc578758SKan Liang
1018dc578758SKan Liang if (cap_ecmds(iommu->cap)) {
1019dc578758SKan Liang int i;
1020dc578758SKan Liang
1021dc578758SKan Liang for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) {
1022dc578758SKan Liang iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
1023dc578758SKan Liang i * DMA_ECMD_REG_STEP);
1024dc578758SKan Liang }
1025dc578758SKan Liang }
1026dc578758SKan Liang
1027672cf6dfSJoerg Roedel err = 0;
1028672cf6dfSJoerg Roedel goto out;
1029672cf6dfSJoerg Roedel
1030672cf6dfSJoerg Roedel unmap:
1031672cf6dfSJoerg Roedel iounmap(iommu->reg);
1032672cf6dfSJoerg Roedel release:
1033672cf6dfSJoerg Roedel release_mem_region(iommu->reg_phys, iommu->reg_size);
1034672cf6dfSJoerg Roedel out:
1035672cf6dfSJoerg Roedel return err;
1036672cf6dfSJoerg Roedel }
1037672cf6dfSJoerg Roedel
alloc_iommu(struct dmar_drhd_unit * drhd)1038672cf6dfSJoerg Roedel static int alloc_iommu(struct dmar_drhd_unit *drhd)
1039672cf6dfSJoerg Roedel {
1040672cf6dfSJoerg Roedel struct intel_iommu *iommu;
1041672cf6dfSJoerg Roedel u32 ver, sts;
1042c40aaaacSDavid Woodhouse int agaw = -1;
1043c40aaaacSDavid Woodhouse int msagaw = -1;
1044672cf6dfSJoerg Roedel int err;
1045672cf6dfSJoerg Roedel
1046672cf6dfSJoerg Roedel if (!drhd->reg_base_addr) {
1047672cf6dfSJoerg Roedel warn_invalid_dmar(0, "");
1048672cf6dfSJoerg Roedel return -EINVAL;
1049672cf6dfSJoerg Roedel }
1050672cf6dfSJoerg Roedel
1051672cf6dfSJoerg Roedel iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1052672cf6dfSJoerg Roedel if (!iommu)
1053672cf6dfSJoerg Roedel return -ENOMEM;
1054672cf6dfSJoerg Roedel
1055913432f2SLu Baolu iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1056913432f2SLu Baolu DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
1057913432f2SLu Baolu if (iommu->seq_id < 0) {
1058672cf6dfSJoerg Roedel pr_err("Failed to allocate seq_id\n");
1059913432f2SLu Baolu err = iommu->seq_id;
1060672cf6dfSJoerg Roedel goto error;
1061672cf6dfSJoerg Roedel }
1062913432f2SLu Baolu sprintf(iommu->name, "dmar%d", iommu->seq_id);
1063672cf6dfSJoerg Roedel
10644db96bfeSKan Liang err = map_iommu(iommu, drhd);
1065672cf6dfSJoerg Roedel if (err) {
1066672cf6dfSJoerg Roedel pr_err("Failed to map %s\n", iommu->name);
1067672cf6dfSJoerg Roedel goto error_free_seq_id;
1068672cf6dfSJoerg Roedel }
1069672cf6dfSJoerg Roedel
1070672cf6dfSJoerg Roedel err = -EINVAL;
1071bfd3c6b9SLu Baolu if (!cap_sagaw(iommu->cap) &&
1072bfd3c6b9SLu Baolu (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1073c40aaaacSDavid Woodhouse pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1074c40aaaacSDavid Woodhouse iommu->name);
1075c40aaaacSDavid Woodhouse drhd->ignored = 1;
1076c40aaaacSDavid Woodhouse }
1077c40aaaacSDavid Woodhouse
1078c40aaaacSDavid Woodhouse if (!drhd->ignored) {
1079672cf6dfSJoerg Roedel agaw = iommu_calculate_agaw(iommu);
1080672cf6dfSJoerg Roedel if (agaw < 0) {
1081672cf6dfSJoerg Roedel pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1082672cf6dfSJoerg Roedel iommu->seq_id);
1083c40aaaacSDavid Woodhouse drhd->ignored = 1;
1084672cf6dfSJoerg Roedel }
1085c40aaaacSDavid Woodhouse }
1086c40aaaacSDavid Woodhouse if (!drhd->ignored) {
1087672cf6dfSJoerg Roedel msagaw = iommu_calculate_max_sagaw(iommu);
1088672cf6dfSJoerg Roedel if (msagaw < 0) {
1089672cf6dfSJoerg Roedel pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1090672cf6dfSJoerg Roedel iommu->seq_id);
1091c40aaaacSDavid Woodhouse drhd->ignored = 1;
1092c40aaaacSDavid Woodhouse agaw = -1;
1093c40aaaacSDavid Woodhouse }
1094672cf6dfSJoerg Roedel }
1095672cf6dfSJoerg Roedel iommu->agaw = agaw;
1096672cf6dfSJoerg Roedel iommu->msagaw = msagaw;
1097672cf6dfSJoerg Roedel iommu->segment = drhd->segment;
1098672cf6dfSJoerg Roedel
1099672cf6dfSJoerg Roedel iommu->node = NUMA_NO_NODE;
1100672cf6dfSJoerg Roedel
1101672cf6dfSJoerg Roedel ver = readl(iommu->reg + DMAR_VER_REG);
1102672cf6dfSJoerg Roedel pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1103672cf6dfSJoerg Roedel iommu->name,
1104672cf6dfSJoerg Roedel (unsigned long long)drhd->reg_base_addr,
1105672cf6dfSJoerg Roedel DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1106672cf6dfSJoerg Roedel (unsigned long long)iommu->cap,
1107672cf6dfSJoerg Roedel (unsigned long long)iommu->ecap);
1108672cf6dfSJoerg Roedel
1109672cf6dfSJoerg Roedel /* Reflect status in gcmd */
1110672cf6dfSJoerg Roedel sts = readl(iommu->reg + DMAR_GSTS_REG);
1111672cf6dfSJoerg Roedel if (sts & DMA_GSTS_IRES)
1112672cf6dfSJoerg Roedel iommu->gcmd |= DMA_GCMD_IRE;
1113672cf6dfSJoerg Roedel if (sts & DMA_GSTS_TES)
1114672cf6dfSJoerg Roedel iommu->gcmd |= DMA_GCMD_TE;
1115672cf6dfSJoerg Roedel if (sts & DMA_GSTS_QIES)
1116672cf6dfSJoerg Roedel iommu->gcmd |= DMA_GCMD_QIE;
1117672cf6dfSJoerg Roedel
1118a6a5006dSKan Liang if (alloc_iommu_pmu(iommu))
1119a6a5006dSKan Liang pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1120a6a5006dSKan Liang
1121672cf6dfSJoerg Roedel raw_spin_lock_init(&iommu->register_lock);
1122672cf6dfSJoerg Roedel
1123c40aaaacSDavid Woodhouse /*
11241adf3cc2SLu Baolu * A value of N in PSS field of eCap register indicates hardware
11251adf3cc2SLu Baolu * supports PASID field of N+1 bits.
11261adf3cc2SLu Baolu */
11271adf3cc2SLu Baolu if (pasid_supported(iommu))
11281adf3cc2SLu Baolu iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
11291adf3cc2SLu Baolu
11301adf3cc2SLu Baolu /*
1131c40aaaacSDavid Woodhouse * This is only for hotplug; at boot time intel_iommu_enabled won't
1132c40aaaacSDavid Woodhouse * be set yet. When intel_iommu_init() runs, it registers the units
1133c40aaaacSDavid Woodhouse * present at boot time, then sets intel_iommu_enabled.
1134c40aaaacSDavid Woodhouse */
1135c40aaaacSDavid Woodhouse if (intel_iommu_enabled && !drhd->ignored) {
1136672cf6dfSJoerg Roedel err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1137672cf6dfSJoerg Roedel intel_iommu_groups,
1138672cf6dfSJoerg Roedel "%s", iommu->name);
1139672cf6dfSJoerg Roedel if (err)
1140672cf6dfSJoerg Roedel goto err_unmap;
1141672cf6dfSJoerg Roedel
11422d471b20SRobin Murphy err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1143672cf6dfSJoerg Roedel if (err)
11440ee74d5aSRolf Eike Beer goto err_sysfs;
1145d8a7c0cfSKan Liang
1146d8a7c0cfSKan Liang iommu_pmu_register(iommu);
1147672cf6dfSJoerg Roedel }
1148672cf6dfSJoerg Roedel
1149672cf6dfSJoerg Roedel drhd->iommu = iommu;
1150b1012ca8SLu Baolu iommu->drhd = drhd;
1151672cf6dfSJoerg Roedel
1152672cf6dfSJoerg Roedel return 0;
1153672cf6dfSJoerg Roedel
11540ee74d5aSRolf Eike Beer err_sysfs:
11550ee74d5aSRolf Eike Beer iommu_device_sysfs_remove(&iommu->iommu);
1156672cf6dfSJoerg Roedel err_unmap:
1157a6a5006dSKan Liang free_iommu_pmu(iommu);
1158672cf6dfSJoerg Roedel unmap_iommu(iommu);
1159672cf6dfSJoerg Roedel error_free_seq_id:
1160913432f2SLu Baolu ida_free(&dmar_seq_ids, iommu->seq_id);
1161672cf6dfSJoerg Roedel error:
1162672cf6dfSJoerg Roedel kfree(iommu);
1163672cf6dfSJoerg Roedel return err;
1164672cf6dfSJoerg Roedel }
1165672cf6dfSJoerg Roedel
free_iommu(struct intel_iommu * iommu)1166672cf6dfSJoerg Roedel static void free_iommu(struct intel_iommu *iommu)
1167672cf6dfSJoerg Roedel {
11689def3b1aSBartosz Golaszewski if (intel_iommu_enabled && !iommu->drhd->ignored) {
1169d8a7c0cfSKan Liang iommu_pmu_unregister(iommu);
1170672cf6dfSJoerg Roedel iommu_device_unregister(&iommu->iommu);
1171672cf6dfSJoerg Roedel iommu_device_sysfs_remove(&iommu->iommu);
1172672cf6dfSJoerg Roedel }
1173672cf6dfSJoerg Roedel
1174a6a5006dSKan Liang free_iommu_pmu(iommu);
1175a6a5006dSKan Liang
1176672cf6dfSJoerg Roedel if (iommu->irq) {
1177672cf6dfSJoerg Roedel if (iommu->pr_irq) {
1178672cf6dfSJoerg Roedel free_irq(iommu->pr_irq, iommu);
1179672cf6dfSJoerg Roedel dmar_free_hwirq(iommu->pr_irq);
1180672cf6dfSJoerg Roedel iommu->pr_irq = 0;
1181672cf6dfSJoerg Roedel }
1182672cf6dfSJoerg Roedel free_irq(iommu->irq, iommu);
1183672cf6dfSJoerg Roedel dmar_free_hwirq(iommu->irq);
1184672cf6dfSJoerg Roedel iommu->irq = 0;
1185672cf6dfSJoerg Roedel }
1186672cf6dfSJoerg Roedel
1187672cf6dfSJoerg Roedel if (iommu->qi) {
1188672cf6dfSJoerg Roedel free_page((unsigned long)iommu->qi->desc);
1189672cf6dfSJoerg Roedel kfree(iommu->qi->desc_status);
1190672cf6dfSJoerg Roedel kfree(iommu->qi);
1191672cf6dfSJoerg Roedel }
1192672cf6dfSJoerg Roedel
1193672cf6dfSJoerg Roedel if (iommu->reg)
1194672cf6dfSJoerg Roedel unmap_iommu(iommu);
1195672cf6dfSJoerg Roedel
1196913432f2SLu Baolu ida_free(&dmar_seq_ids, iommu->seq_id);
1197672cf6dfSJoerg Roedel kfree(iommu);
1198672cf6dfSJoerg Roedel }
1199672cf6dfSJoerg Roedel
1200672cf6dfSJoerg Roedel /*
1201672cf6dfSJoerg Roedel * Reclaim all the submitted descriptors which have completed its work.
1202672cf6dfSJoerg Roedel */
reclaim_free_desc(struct q_inval * qi)1203672cf6dfSJoerg Roedel static inline void reclaim_free_desc(struct q_inval *qi)
1204672cf6dfSJoerg Roedel {
1205*dfdbc5baSSanjay K Kumar while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) {
1206672cf6dfSJoerg Roedel qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1207672cf6dfSJoerg Roedel qi->free_cnt++;
1208672cf6dfSJoerg Roedel }
1209672cf6dfSJoerg Roedel }
1210672cf6dfSJoerg Roedel
qi_type_string(u8 type)12116ca69e58SLu Baolu static const char *qi_type_string(u8 type)
12126ca69e58SLu Baolu {
12136ca69e58SLu Baolu switch (type) {
12146ca69e58SLu Baolu case QI_CC_TYPE:
12156ca69e58SLu Baolu return "Context-cache Invalidation";
12166ca69e58SLu Baolu case QI_IOTLB_TYPE:
12176ca69e58SLu Baolu return "IOTLB Invalidation";
12186ca69e58SLu Baolu case QI_DIOTLB_TYPE:
12196ca69e58SLu Baolu return "Device-TLB Invalidation";
12206ca69e58SLu Baolu case QI_IEC_TYPE:
12216ca69e58SLu Baolu return "Interrupt Entry Cache Invalidation";
12226ca69e58SLu Baolu case QI_IWD_TYPE:
12236ca69e58SLu Baolu return "Invalidation Wait";
12246ca69e58SLu Baolu case QI_EIOTLB_TYPE:
12256ca69e58SLu Baolu return "PASID-based IOTLB Invalidation";
12266ca69e58SLu Baolu case QI_PC_TYPE:
12276ca69e58SLu Baolu return "PASID-cache Invalidation";
12286ca69e58SLu Baolu case QI_DEIOTLB_TYPE:
12296ca69e58SLu Baolu return "PASID-based Device-TLB Invalidation";
12306ca69e58SLu Baolu case QI_PGRP_RESP_TYPE:
12316ca69e58SLu Baolu return "Page Group Response";
12326ca69e58SLu Baolu default:
12336ca69e58SLu Baolu return "UNKNOWN";
12346ca69e58SLu Baolu }
12356ca69e58SLu Baolu }
12366ca69e58SLu Baolu
qi_dump_fault(struct intel_iommu * iommu,u32 fault)12376ca69e58SLu Baolu static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
12386ca69e58SLu Baolu {
12396ca69e58SLu Baolu unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
12406ca69e58SLu Baolu u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
12416ca69e58SLu Baolu struct qi_desc *desc = iommu->qi->desc + head;
12426ca69e58SLu Baolu
12436ca69e58SLu Baolu if (fault & DMA_FSTS_IQE)
12446ca69e58SLu Baolu pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
12456ca69e58SLu Baolu DMAR_IQER_REG_IQEI(iqe_err));
12466ca69e58SLu Baolu if (fault & DMA_FSTS_ITE)
12476ca69e58SLu Baolu pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
12486ca69e58SLu Baolu DMAR_IQER_REG_ITESID(iqe_err));
12496ca69e58SLu Baolu if (fault & DMA_FSTS_ICE)
12506ca69e58SLu Baolu pr_err("VT-d detected Invalidation Completion Error: SID %llx",
12516ca69e58SLu Baolu DMAR_IQER_REG_ICESID(iqe_err));
12526ca69e58SLu Baolu
12536ca69e58SLu Baolu pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
12546ca69e58SLu Baolu qi_type_string(desc->qw0 & 0xf),
12556ca69e58SLu Baolu (unsigned long long)desc->qw0,
12566ca69e58SLu Baolu (unsigned long long)desc->qw1);
12576ca69e58SLu Baolu
12586ca69e58SLu Baolu head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
12596ca69e58SLu Baolu head <<= qi_shift(iommu);
12606ca69e58SLu Baolu desc = iommu->qi->desc + head;
12616ca69e58SLu Baolu
12626ca69e58SLu Baolu pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
12636ca69e58SLu Baolu qi_type_string(desc->qw0 & 0xf),
12646ca69e58SLu Baolu (unsigned long long)desc->qw0,
12656ca69e58SLu Baolu (unsigned long long)desc->qw1);
12666ca69e58SLu Baolu }
12676ca69e58SLu Baolu
qi_check_fault(struct intel_iommu * iommu,int index,int wait_index)1268672cf6dfSJoerg Roedel static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1269672cf6dfSJoerg Roedel {
1270672cf6dfSJoerg Roedel u32 fault;
1271672cf6dfSJoerg Roedel int head, tail;
1272672cf6dfSJoerg Roedel struct q_inval *qi = iommu->qi;
1273672cf6dfSJoerg Roedel int shift = qi_shift(iommu);
1274672cf6dfSJoerg Roedel
1275672cf6dfSJoerg Roedel if (qi->desc_status[wait_index] == QI_ABORT)
1276672cf6dfSJoerg Roedel return -EAGAIN;
1277672cf6dfSJoerg Roedel
1278672cf6dfSJoerg Roedel fault = readl(iommu->reg + DMAR_FSTS_REG);
12796ca69e58SLu Baolu if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
12806ca69e58SLu Baolu qi_dump_fault(iommu, fault);
1281672cf6dfSJoerg Roedel
1282672cf6dfSJoerg Roedel /*
1283672cf6dfSJoerg Roedel * If IQE happens, the head points to the descriptor associated
1284672cf6dfSJoerg Roedel * with the error. No new descriptors are fetched until the IQE
1285672cf6dfSJoerg Roedel * is cleared.
1286672cf6dfSJoerg Roedel */
1287672cf6dfSJoerg Roedel if (fault & DMA_FSTS_IQE) {
1288672cf6dfSJoerg Roedel head = readl(iommu->reg + DMAR_IQH_REG);
1289672cf6dfSJoerg Roedel if ((head >> shift) == index) {
1290672cf6dfSJoerg Roedel struct qi_desc *desc = qi->desc + head;
1291672cf6dfSJoerg Roedel
1292672cf6dfSJoerg Roedel /*
1293672cf6dfSJoerg Roedel * desc->qw2 and desc->qw3 are either reserved or
1294672cf6dfSJoerg Roedel * used by software as private data. We won't print
1295672cf6dfSJoerg Roedel * out these two qw's for security consideration.
1296672cf6dfSJoerg Roedel */
1297672cf6dfSJoerg Roedel memcpy(desc, qi->desc + (wait_index << shift),
1298672cf6dfSJoerg Roedel 1 << shift);
1299672cf6dfSJoerg Roedel writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
13006ca69e58SLu Baolu pr_info("Invalidation Queue Error (IQE) cleared\n");
1301672cf6dfSJoerg Roedel return -EINVAL;
1302672cf6dfSJoerg Roedel }
1303672cf6dfSJoerg Roedel }
1304672cf6dfSJoerg Roedel
1305672cf6dfSJoerg Roedel /*
1306672cf6dfSJoerg Roedel * If ITE happens, all pending wait_desc commands are aborted.
1307672cf6dfSJoerg Roedel * No new descriptors are fetched until the ITE is cleared.
1308672cf6dfSJoerg Roedel */
1309672cf6dfSJoerg Roedel if (fault & DMA_FSTS_ITE) {
1310672cf6dfSJoerg Roedel head = readl(iommu->reg + DMAR_IQH_REG);
1311672cf6dfSJoerg Roedel head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1312672cf6dfSJoerg Roedel head |= 1;
1313672cf6dfSJoerg Roedel tail = readl(iommu->reg + DMAR_IQT_REG);
1314672cf6dfSJoerg Roedel tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1315672cf6dfSJoerg Roedel
1316672cf6dfSJoerg Roedel writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
13176ca69e58SLu Baolu pr_info("Invalidation Time-out Error (ITE) cleared\n");
1318672cf6dfSJoerg Roedel
1319672cf6dfSJoerg Roedel do {
1320672cf6dfSJoerg Roedel if (qi->desc_status[head] == QI_IN_USE)
1321672cf6dfSJoerg Roedel qi->desc_status[head] = QI_ABORT;
1322672cf6dfSJoerg Roedel head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1323672cf6dfSJoerg Roedel } while (head != tail);
1324672cf6dfSJoerg Roedel
1325672cf6dfSJoerg Roedel if (qi->desc_status[wait_index] == QI_ABORT)
1326672cf6dfSJoerg Roedel return -EAGAIN;
1327672cf6dfSJoerg Roedel }
1328672cf6dfSJoerg Roedel
13296ca69e58SLu Baolu if (fault & DMA_FSTS_ICE) {
1330672cf6dfSJoerg Roedel writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
13316ca69e58SLu Baolu pr_info("Invalidation Completion Error (ICE) cleared\n");
13326ca69e58SLu Baolu }
1333672cf6dfSJoerg Roedel
1334672cf6dfSJoerg Roedel return 0;
1335672cf6dfSJoerg Roedel }
1336672cf6dfSJoerg Roedel
1337672cf6dfSJoerg Roedel /*
1338672cf6dfSJoerg Roedel * Function to submit invalidation descriptors of all types to the queued
1339672cf6dfSJoerg Roedel * invalidation interface(QI). Multiple descriptors can be submitted at a
1340672cf6dfSJoerg Roedel * time, a wait descriptor will be appended to each submission to ensure
1341672cf6dfSJoerg Roedel * hardware has completed the invalidation before return. Wait descriptors
1342672cf6dfSJoerg Roedel * can be part of the submission but it will not be polled for completion.
1343672cf6dfSJoerg Roedel */
qi_submit_sync(struct intel_iommu * iommu,struct qi_desc * desc,unsigned int count,unsigned long options)1344672cf6dfSJoerg Roedel int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1345672cf6dfSJoerg Roedel unsigned int count, unsigned long options)
1346672cf6dfSJoerg Roedel {
1347672cf6dfSJoerg Roedel struct q_inval *qi = iommu->qi;
134874eb87a0SLu Baolu s64 devtlb_start_ktime = 0;
134974eb87a0SLu Baolu s64 iotlb_start_ktime = 0;
135074eb87a0SLu Baolu s64 iec_start_ktime = 0;
1351672cf6dfSJoerg Roedel struct qi_desc wait_desc;
1352672cf6dfSJoerg Roedel int wait_index, index;
1353672cf6dfSJoerg Roedel unsigned long flags;
1354672cf6dfSJoerg Roedel int offset, shift;
1355672cf6dfSJoerg Roedel int rc, i;
135674eb87a0SLu Baolu u64 type;
1357672cf6dfSJoerg Roedel
1358672cf6dfSJoerg Roedel if (!qi)
1359672cf6dfSJoerg Roedel return 0;
1360672cf6dfSJoerg Roedel
136174eb87a0SLu Baolu type = desc->qw0 & GENMASK_ULL(3, 0);
136274eb87a0SLu Baolu
136374eb87a0SLu Baolu if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
136474eb87a0SLu Baolu dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
136574eb87a0SLu Baolu iotlb_start_ktime = ktime_to_ns(ktime_get());
136674eb87a0SLu Baolu
136774eb87a0SLu Baolu if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
136874eb87a0SLu Baolu dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
136974eb87a0SLu Baolu devtlb_start_ktime = ktime_to_ns(ktime_get());
137074eb87a0SLu Baolu
137174eb87a0SLu Baolu if (type == QI_IEC_TYPE &&
137274eb87a0SLu Baolu dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
137374eb87a0SLu Baolu iec_start_ktime = ktime_to_ns(ktime_get());
137474eb87a0SLu Baolu
1375672cf6dfSJoerg Roedel restart:
1376672cf6dfSJoerg Roedel rc = 0;
1377672cf6dfSJoerg Roedel
1378672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&qi->q_lock, flags);
1379672cf6dfSJoerg Roedel /*
1380672cf6dfSJoerg Roedel * Check if we have enough empty slots in the queue to submit,
1381672cf6dfSJoerg Roedel * the calculation is based on:
1382672cf6dfSJoerg Roedel * # of desc + 1 wait desc + 1 space between head and tail
1383672cf6dfSJoerg Roedel */
1384672cf6dfSJoerg Roedel while (qi->free_cnt < count + 2) {
1385672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1386672cf6dfSJoerg Roedel cpu_relax();
1387672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&qi->q_lock, flags);
1388672cf6dfSJoerg Roedel }
1389672cf6dfSJoerg Roedel
1390672cf6dfSJoerg Roedel index = qi->free_head;
1391672cf6dfSJoerg Roedel wait_index = (index + count) % QI_LENGTH;
1392672cf6dfSJoerg Roedel shift = qi_shift(iommu);
1393672cf6dfSJoerg Roedel
1394672cf6dfSJoerg Roedel for (i = 0; i < count; i++) {
1395672cf6dfSJoerg Roedel offset = ((index + i) % QI_LENGTH) << shift;
1396672cf6dfSJoerg Roedel memcpy(qi->desc + offset, &desc[i], 1 << shift);
1397672cf6dfSJoerg Roedel qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1398f2dd8717SLu Baolu trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1399f2dd8717SLu Baolu desc[i].qw2, desc[i].qw3);
1400672cf6dfSJoerg Roedel }
1401672cf6dfSJoerg Roedel qi->desc_status[wait_index] = QI_IN_USE;
1402672cf6dfSJoerg Roedel
1403672cf6dfSJoerg Roedel wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1404672cf6dfSJoerg Roedel QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1405672cf6dfSJoerg Roedel if (options & QI_OPT_WAIT_DRAIN)
1406672cf6dfSJoerg Roedel wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1407672cf6dfSJoerg Roedel wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1408672cf6dfSJoerg Roedel wait_desc.qw2 = 0;
1409672cf6dfSJoerg Roedel wait_desc.qw3 = 0;
1410672cf6dfSJoerg Roedel
1411672cf6dfSJoerg Roedel offset = wait_index << shift;
1412672cf6dfSJoerg Roedel memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1413672cf6dfSJoerg Roedel
1414672cf6dfSJoerg Roedel qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1415672cf6dfSJoerg Roedel qi->free_cnt -= count + 1;
1416672cf6dfSJoerg Roedel
1417672cf6dfSJoerg Roedel /*
1418672cf6dfSJoerg Roedel * update the HW tail register indicating the presence of
1419672cf6dfSJoerg Roedel * new descriptors.
1420672cf6dfSJoerg Roedel */
1421672cf6dfSJoerg Roedel writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1422672cf6dfSJoerg Roedel
14237cfa7abbSJacob Pan while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
1424672cf6dfSJoerg Roedel /*
1425672cf6dfSJoerg Roedel * We will leave the interrupts disabled, to prevent interrupt
1426672cf6dfSJoerg Roedel * context to queue another cmd while a cmd is already submitted
1427672cf6dfSJoerg Roedel * and waiting for completion on this cpu. This is to avoid
1428672cf6dfSJoerg Roedel * a deadlock where the interrupt context can wait indefinitely
1429672cf6dfSJoerg Roedel * for free slots in the queue.
1430672cf6dfSJoerg Roedel */
1431672cf6dfSJoerg Roedel rc = qi_check_fault(iommu, index, wait_index);
1432672cf6dfSJoerg Roedel if (rc)
1433672cf6dfSJoerg Roedel break;
1434672cf6dfSJoerg Roedel
1435672cf6dfSJoerg Roedel raw_spin_unlock(&qi->q_lock);
1436672cf6dfSJoerg Roedel cpu_relax();
1437672cf6dfSJoerg Roedel raw_spin_lock(&qi->q_lock);
1438672cf6dfSJoerg Roedel }
1439672cf6dfSJoerg Roedel
1440*dfdbc5baSSanjay K Kumar /*
1441*dfdbc5baSSanjay K Kumar * The reclaim code can free descriptors from multiple submissions
1442*dfdbc5baSSanjay K Kumar * starting from the tail of the queue. When count == 0, the
1443*dfdbc5baSSanjay K Kumar * status of the standalone wait descriptor at the tail of the queue
1444*dfdbc5baSSanjay K Kumar * must be set to QI_FREE to allow the reclaim code to proceed.
1445*dfdbc5baSSanjay K Kumar * It is also possible that descriptors from one of the previous
1446*dfdbc5baSSanjay K Kumar * submissions has to be reclaimed by a subsequent submission.
1447*dfdbc5baSSanjay K Kumar */
1448*dfdbc5baSSanjay K Kumar for (i = 0; i <= count; i++)
1449*dfdbc5baSSanjay K Kumar qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE;
1450672cf6dfSJoerg Roedel
1451672cf6dfSJoerg Roedel reclaim_free_desc(qi);
1452672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1453672cf6dfSJoerg Roedel
1454672cf6dfSJoerg Roedel if (rc == -EAGAIN)
1455672cf6dfSJoerg Roedel goto restart;
1456672cf6dfSJoerg Roedel
145774eb87a0SLu Baolu if (iotlb_start_ktime)
145874eb87a0SLu Baolu dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
145974eb87a0SLu Baolu ktime_to_ns(ktime_get()) - iotlb_start_ktime);
146074eb87a0SLu Baolu
146174eb87a0SLu Baolu if (devtlb_start_ktime)
146274eb87a0SLu Baolu dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
146374eb87a0SLu Baolu ktime_to_ns(ktime_get()) - devtlb_start_ktime);
146474eb87a0SLu Baolu
146574eb87a0SLu Baolu if (iec_start_ktime)
146674eb87a0SLu Baolu dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
146774eb87a0SLu Baolu ktime_to_ns(ktime_get()) - iec_start_ktime);
146874eb87a0SLu Baolu
1469672cf6dfSJoerg Roedel return rc;
1470672cf6dfSJoerg Roedel }
1471672cf6dfSJoerg Roedel
1472672cf6dfSJoerg Roedel /*
1473672cf6dfSJoerg Roedel * Flush the global interrupt entry cache.
1474672cf6dfSJoerg Roedel */
qi_global_iec(struct intel_iommu * iommu)1475672cf6dfSJoerg Roedel void qi_global_iec(struct intel_iommu *iommu)
1476672cf6dfSJoerg Roedel {
1477672cf6dfSJoerg Roedel struct qi_desc desc;
1478672cf6dfSJoerg Roedel
1479672cf6dfSJoerg Roedel desc.qw0 = QI_IEC_TYPE;
1480672cf6dfSJoerg Roedel desc.qw1 = 0;
1481672cf6dfSJoerg Roedel desc.qw2 = 0;
1482672cf6dfSJoerg Roedel desc.qw3 = 0;
1483672cf6dfSJoerg Roedel
1484672cf6dfSJoerg Roedel /* should never fail */
1485672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
1486672cf6dfSJoerg Roedel }
1487672cf6dfSJoerg Roedel
qi_flush_context(struct intel_iommu * iommu,u16 did,u16 sid,u8 fm,u64 type)1488672cf6dfSJoerg Roedel void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1489672cf6dfSJoerg Roedel u64 type)
1490672cf6dfSJoerg Roedel {
1491672cf6dfSJoerg Roedel struct qi_desc desc;
1492672cf6dfSJoerg Roedel
1493672cf6dfSJoerg Roedel desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1494672cf6dfSJoerg Roedel | QI_CC_GRAN(type) | QI_CC_TYPE;
1495672cf6dfSJoerg Roedel desc.qw1 = 0;
1496672cf6dfSJoerg Roedel desc.qw2 = 0;
1497672cf6dfSJoerg Roedel desc.qw3 = 0;
1498672cf6dfSJoerg Roedel
1499672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
1500672cf6dfSJoerg Roedel }
1501672cf6dfSJoerg Roedel
qi_flush_iotlb(struct intel_iommu * iommu,u16 did,u64 addr,unsigned int size_order,u64 type)1502672cf6dfSJoerg Roedel void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1503672cf6dfSJoerg Roedel unsigned int size_order, u64 type)
1504672cf6dfSJoerg Roedel {
1505672cf6dfSJoerg Roedel u8 dw = 0, dr = 0;
1506672cf6dfSJoerg Roedel
1507672cf6dfSJoerg Roedel struct qi_desc desc;
1508672cf6dfSJoerg Roedel int ih = 0;
1509672cf6dfSJoerg Roedel
1510672cf6dfSJoerg Roedel if (cap_write_drain(iommu->cap))
1511672cf6dfSJoerg Roedel dw = 1;
1512672cf6dfSJoerg Roedel
1513672cf6dfSJoerg Roedel if (cap_read_drain(iommu->cap))
1514672cf6dfSJoerg Roedel dr = 1;
1515672cf6dfSJoerg Roedel
1516672cf6dfSJoerg Roedel desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1517672cf6dfSJoerg Roedel | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1518672cf6dfSJoerg Roedel desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1519672cf6dfSJoerg Roedel | QI_IOTLB_AM(size_order);
1520672cf6dfSJoerg Roedel desc.qw2 = 0;
1521672cf6dfSJoerg Roedel desc.qw3 = 0;
1522672cf6dfSJoerg Roedel
1523672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
1524672cf6dfSJoerg Roedel }
1525672cf6dfSJoerg Roedel
qi_flush_dev_iotlb(struct intel_iommu * iommu,u16 sid,u16 pfsid,u16 qdep,u64 addr,unsigned mask)1526672cf6dfSJoerg Roedel void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1527672cf6dfSJoerg Roedel u16 qdep, u64 addr, unsigned mask)
1528672cf6dfSJoerg Roedel {
1529672cf6dfSJoerg Roedel struct qi_desc desc;
1530672cf6dfSJoerg Roedel
15317ccb5777SLu Baolu /*
15327ccb5777SLu Baolu * VT-d spec, section 4.3:
15337ccb5777SLu Baolu *
15347ccb5777SLu Baolu * Software is recommended to not submit any Device-TLB invalidation
15357ccb5777SLu Baolu * requests while address remapping hardware is disabled.
15367ccb5777SLu Baolu */
15377ccb5777SLu Baolu if (!(iommu->gcmd & DMA_GCMD_TE))
15387ccb5777SLu Baolu return;
15397ccb5777SLu Baolu
1540672cf6dfSJoerg Roedel if (mask) {
1541672cf6dfSJoerg Roedel addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1542672cf6dfSJoerg Roedel desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1543672cf6dfSJoerg Roedel } else
1544672cf6dfSJoerg Roedel desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1545672cf6dfSJoerg Roedel
1546672cf6dfSJoerg Roedel if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1547672cf6dfSJoerg Roedel qdep = 0;
1548672cf6dfSJoerg Roedel
1549672cf6dfSJoerg Roedel desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1550672cf6dfSJoerg Roedel QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1551672cf6dfSJoerg Roedel desc.qw2 = 0;
1552672cf6dfSJoerg Roedel desc.qw3 = 0;
1553672cf6dfSJoerg Roedel
1554672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
1555672cf6dfSJoerg Roedel }
1556672cf6dfSJoerg Roedel
1557672cf6dfSJoerg Roedel /* PASID-based IOTLB invalidation */
qi_flush_piotlb(struct intel_iommu * iommu,u16 did,u32 pasid,u64 addr,unsigned long npages,bool ih)1558672cf6dfSJoerg Roedel void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1559672cf6dfSJoerg Roedel unsigned long npages, bool ih)
1560672cf6dfSJoerg Roedel {
1561672cf6dfSJoerg Roedel struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1562672cf6dfSJoerg Roedel
1563672cf6dfSJoerg Roedel /*
1564672cf6dfSJoerg Roedel * npages == -1 means a PASID-selective invalidation, otherwise,
1565672cf6dfSJoerg Roedel * a positive value for Page-selective-within-PASID invalidation.
1566672cf6dfSJoerg Roedel * 0 is not a valid input.
1567672cf6dfSJoerg Roedel */
1568672cf6dfSJoerg Roedel if (WARN_ON(!npages)) {
1569672cf6dfSJoerg Roedel pr_err("Invalid input npages = %ld\n", npages);
1570672cf6dfSJoerg Roedel return;
1571672cf6dfSJoerg Roedel }
1572672cf6dfSJoerg Roedel
1573672cf6dfSJoerg Roedel if (npages == -1) {
1574672cf6dfSJoerg Roedel desc.qw0 = QI_EIOTLB_PASID(pasid) |
1575672cf6dfSJoerg Roedel QI_EIOTLB_DID(did) |
1576672cf6dfSJoerg Roedel QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1577672cf6dfSJoerg Roedel QI_EIOTLB_TYPE;
1578672cf6dfSJoerg Roedel desc.qw1 = 0;
1579672cf6dfSJoerg Roedel } else {
1580672cf6dfSJoerg Roedel int mask = ilog2(__roundup_pow_of_two(npages));
1581672cf6dfSJoerg Roedel unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1582672cf6dfSJoerg Roedel
15831efd17e7SLu Baolu if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
15841efd17e7SLu Baolu addr = ALIGN_DOWN(addr, align);
1585672cf6dfSJoerg Roedel
1586672cf6dfSJoerg Roedel desc.qw0 = QI_EIOTLB_PASID(pasid) |
1587672cf6dfSJoerg Roedel QI_EIOTLB_DID(did) |
1588672cf6dfSJoerg Roedel QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1589672cf6dfSJoerg Roedel QI_EIOTLB_TYPE;
1590672cf6dfSJoerg Roedel desc.qw1 = QI_EIOTLB_ADDR(addr) |
1591672cf6dfSJoerg Roedel QI_EIOTLB_IH(ih) |
1592672cf6dfSJoerg Roedel QI_EIOTLB_AM(mask);
1593672cf6dfSJoerg Roedel }
1594672cf6dfSJoerg Roedel
1595672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
1596672cf6dfSJoerg Roedel }
1597672cf6dfSJoerg Roedel
1598672cf6dfSJoerg Roedel /* PASID-based device IOTLB Invalidate */
qi_flush_dev_iotlb_pasid(struct intel_iommu * iommu,u16 sid,u16 pfsid,u32 pasid,u16 qdep,u64 addr,unsigned int size_order)1599672cf6dfSJoerg Roedel void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
160078df6c86SJacob Pan u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
1601672cf6dfSJoerg Roedel {
1602672cf6dfSJoerg Roedel unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1603672cf6dfSJoerg Roedel struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1604672cf6dfSJoerg Roedel
16057ccb5777SLu Baolu /*
16067ccb5777SLu Baolu * VT-d spec, section 4.3:
16077ccb5777SLu Baolu *
16087ccb5777SLu Baolu * Software is recommended to not submit any Device-TLB invalidation
16097ccb5777SLu Baolu * requests while address remapping hardware is disabled.
16107ccb5777SLu Baolu */
16117ccb5777SLu Baolu if (!(iommu->gcmd & DMA_GCMD_TE))
16127ccb5777SLu Baolu return;
16137ccb5777SLu Baolu
1614672cf6dfSJoerg Roedel desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1615672cf6dfSJoerg Roedel QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1616672cf6dfSJoerg Roedel QI_DEV_IOTLB_PFSID(pfsid);
1617672cf6dfSJoerg Roedel
1618672cf6dfSJoerg Roedel /*
1619672cf6dfSJoerg Roedel * If S bit is 0, we only flush a single page. If S bit is set,
1620672cf6dfSJoerg Roedel * The least significant zero bit indicates the invalidation address
1621672cf6dfSJoerg Roedel * range. VT-d spec 6.5.2.6.
1622672cf6dfSJoerg Roedel * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1623672cf6dfSJoerg Roedel * size order = 0 is PAGE_SIZE 4KB
1624672cf6dfSJoerg Roedel * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1625672cf6dfSJoerg Roedel * ECAP.
1626672cf6dfSJoerg Roedel */
1627494b3688SLu Baolu if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1628288d08e7SLiu Yi L pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1629288d08e7SLiu Yi L addr, size_order);
1630288d08e7SLiu Yi L
1631288d08e7SLiu Yi L /* Take page address */
1632288d08e7SLiu Yi L desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1633288d08e7SLiu Yi L
1634288d08e7SLiu Yi L if (size_order) {
1635288d08e7SLiu Yi L /*
1636288d08e7SLiu Yi L * Existing 0s in address below size_order may be the least
1637288d08e7SLiu Yi L * significant bit, we must set them to 1s to avoid having
1638288d08e7SLiu Yi L * smaller size than desired.
1639288d08e7SLiu Yi L */
1640288d08e7SLiu Yi L desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1641288d08e7SLiu Yi L VTD_PAGE_SHIFT);
1642288d08e7SLiu Yi L /* Clear size_order bit to indicate size */
1643288d08e7SLiu Yi L desc.qw1 &= ~mask;
1644288d08e7SLiu Yi L /* Set the S bit to indicate flushing more than 1 page */
1645672cf6dfSJoerg Roedel desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1646288d08e7SLiu Yi L }
1647672cf6dfSJoerg Roedel
1648672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
1649672cf6dfSJoerg Roedel }
1650672cf6dfSJoerg Roedel
qi_flush_pasid_cache(struct intel_iommu * iommu,u16 did,u64 granu,u32 pasid)1651672cf6dfSJoerg Roedel void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1652c7b6bac9SFenghua Yu u64 granu, u32 pasid)
1653672cf6dfSJoerg Roedel {
1654672cf6dfSJoerg Roedel struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1655672cf6dfSJoerg Roedel
1656672cf6dfSJoerg Roedel desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1657672cf6dfSJoerg Roedel QI_PC_GRAN(granu) | QI_PC_TYPE;
1658672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
1659672cf6dfSJoerg Roedel }
1660672cf6dfSJoerg Roedel
1661672cf6dfSJoerg Roedel /*
1662672cf6dfSJoerg Roedel * Disable Queued Invalidation interface.
1663672cf6dfSJoerg Roedel */
dmar_disable_qi(struct intel_iommu * iommu)1664672cf6dfSJoerg Roedel void dmar_disable_qi(struct intel_iommu *iommu)
1665672cf6dfSJoerg Roedel {
1666672cf6dfSJoerg Roedel unsigned long flags;
1667672cf6dfSJoerg Roedel u32 sts;
1668672cf6dfSJoerg Roedel cycles_t start_time = get_cycles();
1669672cf6dfSJoerg Roedel
1670672cf6dfSJoerg Roedel if (!ecap_qis(iommu->ecap))
1671672cf6dfSJoerg Roedel return;
1672672cf6dfSJoerg Roedel
1673672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flags);
1674672cf6dfSJoerg Roedel
1675672cf6dfSJoerg Roedel sts = readl(iommu->reg + DMAR_GSTS_REG);
1676672cf6dfSJoerg Roedel if (!(sts & DMA_GSTS_QIES))
1677672cf6dfSJoerg Roedel goto end;
1678672cf6dfSJoerg Roedel
1679672cf6dfSJoerg Roedel /*
1680672cf6dfSJoerg Roedel * Give a chance to HW to complete the pending invalidation requests.
1681672cf6dfSJoerg Roedel */
1682672cf6dfSJoerg Roedel while ((readl(iommu->reg + DMAR_IQT_REG) !=
1683672cf6dfSJoerg Roedel readl(iommu->reg + DMAR_IQH_REG)) &&
1684672cf6dfSJoerg Roedel (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1685672cf6dfSJoerg Roedel cpu_relax();
1686672cf6dfSJoerg Roedel
1687672cf6dfSJoerg Roedel iommu->gcmd &= ~DMA_GCMD_QIE;
1688672cf6dfSJoerg Roedel writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1689672cf6dfSJoerg Roedel
1690672cf6dfSJoerg Roedel IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1691672cf6dfSJoerg Roedel !(sts & DMA_GSTS_QIES), sts);
1692672cf6dfSJoerg Roedel end:
1693672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1694672cf6dfSJoerg Roedel }
1695672cf6dfSJoerg Roedel
1696672cf6dfSJoerg Roedel /*
1697672cf6dfSJoerg Roedel * Enable queued invalidation.
1698672cf6dfSJoerg Roedel */
__dmar_enable_qi(struct intel_iommu * iommu)1699672cf6dfSJoerg Roedel static void __dmar_enable_qi(struct intel_iommu *iommu)
1700672cf6dfSJoerg Roedel {
1701672cf6dfSJoerg Roedel u32 sts;
1702672cf6dfSJoerg Roedel unsigned long flags;
1703672cf6dfSJoerg Roedel struct q_inval *qi = iommu->qi;
1704672cf6dfSJoerg Roedel u64 val = virt_to_phys(qi->desc);
1705672cf6dfSJoerg Roedel
1706672cf6dfSJoerg Roedel qi->free_head = qi->free_tail = 0;
1707672cf6dfSJoerg Roedel qi->free_cnt = QI_LENGTH;
1708672cf6dfSJoerg Roedel
1709672cf6dfSJoerg Roedel /*
1710672cf6dfSJoerg Roedel * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1711672cf6dfSJoerg Roedel * is present.
1712672cf6dfSJoerg Roedel */
1713672cf6dfSJoerg Roedel if (ecap_smts(iommu->ecap))
1714b31064f8STina Zhang val |= BIT_ULL(11) | BIT_ULL(0);
1715672cf6dfSJoerg Roedel
1716672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flags);
1717672cf6dfSJoerg Roedel
1718672cf6dfSJoerg Roedel /* write zero to the tail reg */
1719672cf6dfSJoerg Roedel writel(0, iommu->reg + DMAR_IQT_REG);
1720672cf6dfSJoerg Roedel
1721672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1722672cf6dfSJoerg Roedel
1723672cf6dfSJoerg Roedel iommu->gcmd |= DMA_GCMD_QIE;
1724672cf6dfSJoerg Roedel writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1725672cf6dfSJoerg Roedel
1726672cf6dfSJoerg Roedel /* Make sure hardware complete it */
1727672cf6dfSJoerg Roedel IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1728672cf6dfSJoerg Roedel
1729672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1730672cf6dfSJoerg Roedel }
1731672cf6dfSJoerg Roedel
1732672cf6dfSJoerg Roedel /*
1733672cf6dfSJoerg Roedel * Enable Queued Invalidation interface. This is a must to support
1734672cf6dfSJoerg Roedel * interrupt-remapping. Also used by DMA-remapping, which replaces
1735672cf6dfSJoerg Roedel * register based IOTLB invalidation.
1736672cf6dfSJoerg Roedel */
dmar_enable_qi(struct intel_iommu * iommu)1737672cf6dfSJoerg Roedel int dmar_enable_qi(struct intel_iommu *iommu)
1738672cf6dfSJoerg Roedel {
1739672cf6dfSJoerg Roedel struct q_inval *qi;
1740672cf6dfSJoerg Roedel struct page *desc_page;
1741672cf6dfSJoerg Roedel
1742672cf6dfSJoerg Roedel if (!ecap_qis(iommu->ecap))
1743672cf6dfSJoerg Roedel return -ENOENT;
1744672cf6dfSJoerg Roedel
1745672cf6dfSJoerg Roedel /*
1746672cf6dfSJoerg Roedel * queued invalidation is already setup and enabled.
1747672cf6dfSJoerg Roedel */
1748672cf6dfSJoerg Roedel if (iommu->qi)
1749672cf6dfSJoerg Roedel return 0;
1750672cf6dfSJoerg Roedel
1751672cf6dfSJoerg Roedel iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1752672cf6dfSJoerg Roedel if (!iommu->qi)
1753672cf6dfSJoerg Roedel return -ENOMEM;
1754672cf6dfSJoerg Roedel
1755672cf6dfSJoerg Roedel qi = iommu->qi;
1756672cf6dfSJoerg Roedel
1757672cf6dfSJoerg Roedel /*
1758672cf6dfSJoerg Roedel * Need two pages to accommodate 256 descriptors of 256 bits each
1759672cf6dfSJoerg Roedel * if the remapping hardware supports scalable mode translation.
1760672cf6dfSJoerg Roedel */
1761672cf6dfSJoerg Roedel desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1762672cf6dfSJoerg Roedel !!ecap_smts(iommu->ecap));
1763672cf6dfSJoerg Roedel if (!desc_page) {
1764672cf6dfSJoerg Roedel kfree(qi);
1765672cf6dfSJoerg Roedel iommu->qi = NULL;
1766672cf6dfSJoerg Roedel return -ENOMEM;
1767672cf6dfSJoerg Roedel }
1768672cf6dfSJoerg Roedel
1769672cf6dfSJoerg Roedel qi->desc = page_address(desc_page);
1770672cf6dfSJoerg Roedel
1771672cf6dfSJoerg Roedel qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1772672cf6dfSJoerg Roedel if (!qi->desc_status) {
1773672cf6dfSJoerg Roedel free_page((unsigned long) qi->desc);
1774672cf6dfSJoerg Roedel kfree(qi);
1775672cf6dfSJoerg Roedel iommu->qi = NULL;
1776672cf6dfSJoerg Roedel return -ENOMEM;
1777672cf6dfSJoerg Roedel }
1778672cf6dfSJoerg Roedel
1779672cf6dfSJoerg Roedel raw_spin_lock_init(&qi->q_lock);
1780672cf6dfSJoerg Roedel
1781672cf6dfSJoerg Roedel __dmar_enable_qi(iommu);
1782672cf6dfSJoerg Roedel
1783672cf6dfSJoerg Roedel return 0;
1784672cf6dfSJoerg Roedel }
1785672cf6dfSJoerg Roedel
1786672cf6dfSJoerg Roedel /* iommu interrupt handling. Most stuff are MSI-like. */
1787672cf6dfSJoerg Roedel
1788672cf6dfSJoerg Roedel enum faulttype {
1789672cf6dfSJoerg Roedel DMA_REMAP,
1790672cf6dfSJoerg Roedel INTR_REMAP,
1791672cf6dfSJoerg Roedel UNKNOWN,
1792672cf6dfSJoerg Roedel };
1793672cf6dfSJoerg Roedel
1794672cf6dfSJoerg Roedel static const char *dma_remap_fault_reasons[] =
1795672cf6dfSJoerg Roedel {
1796672cf6dfSJoerg Roedel "Software",
1797672cf6dfSJoerg Roedel "Present bit in root entry is clear",
1798672cf6dfSJoerg Roedel "Present bit in context entry is clear",
1799672cf6dfSJoerg Roedel "Invalid context entry",
1800672cf6dfSJoerg Roedel "Access beyond MGAW",
1801672cf6dfSJoerg Roedel "PTE Write access is not set",
1802672cf6dfSJoerg Roedel "PTE Read access is not set",
1803672cf6dfSJoerg Roedel "Next page table ptr is invalid",
1804672cf6dfSJoerg Roedel "Root table address invalid",
1805672cf6dfSJoerg Roedel "Context table ptr is invalid",
1806672cf6dfSJoerg Roedel "non-zero reserved fields in RTP",
1807672cf6dfSJoerg Roedel "non-zero reserved fields in CTP",
1808672cf6dfSJoerg Roedel "non-zero reserved fields in PTE",
1809672cf6dfSJoerg Roedel "PCE for translation request specifies blocking",
1810672cf6dfSJoerg Roedel };
1811672cf6dfSJoerg Roedel
1812672cf6dfSJoerg Roedel static const char * const dma_remap_sm_fault_reasons[] = {
1813672cf6dfSJoerg Roedel "SM: Invalid Root Table Address",
1814672cf6dfSJoerg Roedel "SM: TTM 0 for request with PASID",
1815672cf6dfSJoerg Roedel "SM: TTM 0 for page group request",
1816672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1817672cf6dfSJoerg Roedel "SM: Error attempting to access Root Entry",
1818672cf6dfSJoerg Roedel "SM: Present bit in Root Entry is clear",
1819672cf6dfSJoerg Roedel "SM: Non-zero reserved field set in Root Entry",
1820672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1821672cf6dfSJoerg Roedel "SM: Error attempting to access Context Entry",
1822672cf6dfSJoerg Roedel "SM: Present bit in Context Entry is clear",
1823672cf6dfSJoerg Roedel "SM: Non-zero reserved field set in the Context Entry",
1824672cf6dfSJoerg Roedel "SM: Invalid Context Entry",
1825672cf6dfSJoerg Roedel "SM: DTE field in Context Entry is clear",
1826672cf6dfSJoerg Roedel "SM: PASID Enable field in Context Entry is clear",
1827672cf6dfSJoerg Roedel "SM: PASID is larger than the max in Context Entry",
1828672cf6dfSJoerg Roedel "SM: PRE field in Context-Entry is clear",
1829672cf6dfSJoerg Roedel "SM: RID_PASID field error in Context-Entry",
1830672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1831672cf6dfSJoerg Roedel "SM: Error attempting to access the PASID Directory Entry",
1832672cf6dfSJoerg Roedel "SM: Present bit in Directory Entry is clear",
1833672cf6dfSJoerg Roedel "SM: Non-zero reserved field set in PASID Directory Entry",
1834672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1835672cf6dfSJoerg Roedel "SM: Error attempting to access PASID Table Entry",
1836672cf6dfSJoerg Roedel "SM: Present bit in PASID Table Entry is clear",
1837672cf6dfSJoerg Roedel "SM: Non-zero reserved field set in PASID Table Entry",
1838672cf6dfSJoerg Roedel "SM: Invalid Scalable-Mode PASID Table Entry",
1839672cf6dfSJoerg Roedel "SM: ERE field is clear in PASID Table Entry",
1840672cf6dfSJoerg Roedel "SM: SRE field is clear in PASID Table Entry",
1841672cf6dfSJoerg Roedel "Unknown", "Unknown",/* 0x5E-0x5F */
1842672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1843672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1844672cf6dfSJoerg Roedel "SM: Error attempting to access first-level paging entry",
1845672cf6dfSJoerg Roedel "SM: Present bit in first-level paging entry is clear",
1846672cf6dfSJoerg Roedel "SM: Non-zero reserved field set in first-level paging entry",
1847672cf6dfSJoerg Roedel "SM: Error attempting to access FL-PML4 entry",
1848672cf6dfSJoerg Roedel "SM: First-level entry address beyond MGAW in Nested translation",
1849672cf6dfSJoerg Roedel "SM: Read permission error in FL-PML4 entry in Nested translation",
1850672cf6dfSJoerg Roedel "SM: Read permission error in first-level paging entry in Nested translation",
1851672cf6dfSJoerg Roedel "SM: Write permission error in first-level paging entry in Nested translation",
1852672cf6dfSJoerg Roedel "SM: Error attempting to access second-level paging entry",
1853672cf6dfSJoerg Roedel "SM: Read/Write permission error in second-level paging entry",
1854672cf6dfSJoerg Roedel "SM: Non-zero reserved field set in second-level paging entry",
1855672cf6dfSJoerg Roedel "SM: Invalid second-level page table pointer",
1856672cf6dfSJoerg Roedel "SM: A/D bit update needed in second-level entry when set up in no snoop",
1857672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1858672cf6dfSJoerg Roedel "SM: Address in first-level translation is not canonical",
1859672cf6dfSJoerg Roedel "SM: U/S set 0 for first-level translation with user privilege",
1860672cf6dfSJoerg Roedel "SM: No execute permission for request with PASID and ER=1",
1861672cf6dfSJoerg Roedel "SM: Address beyond the DMA hardware max",
1862672cf6dfSJoerg Roedel "SM: Second-level entry address beyond the max",
1863672cf6dfSJoerg Roedel "SM: No write permission for Write/AtomicOp request",
1864672cf6dfSJoerg Roedel "SM: No read permission for Read/AtomicOp request",
1865672cf6dfSJoerg Roedel "SM: Invalid address-interrupt address",
1866672cf6dfSJoerg Roedel "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1867672cf6dfSJoerg Roedel "SM: A/D bit update needed in first-level entry when set up in no snoop",
1868672cf6dfSJoerg Roedel };
1869672cf6dfSJoerg Roedel
1870672cf6dfSJoerg Roedel static const char *irq_remap_fault_reasons[] =
1871672cf6dfSJoerg Roedel {
1872672cf6dfSJoerg Roedel "Detected reserved fields in the decoded interrupt-remapped request",
1873672cf6dfSJoerg Roedel "Interrupt index exceeded the interrupt-remapping table size",
1874672cf6dfSJoerg Roedel "Present field in the IRTE entry is clear",
1875672cf6dfSJoerg Roedel "Error accessing interrupt-remapping table pointed by IRTA_REG",
1876672cf6dfSJoerg Roedel "Detected reserved fields in the IRTE entry",
1877672cf6dfSJoerg Roedel "Blocked a compatibility format interrupt request",
1878672cf6dfSJoerg Roedel "Blocked an interrupt request due to source-id verification failure",
1879672cf6dfSJoerg Roedel };
1880672cf6dfSJoerg Roedel
dmar_get_fault_reason(u8 fault_reason,int * fault_type)1881672cf6dfSJoerg Roedel static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1882672cf6dfSJoerg Roedel {
1883672cf6dfSJoerg Roedel if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1884672cf6dfSJoerg Roedel ARRAY_SIZE(irq_remap_fault_reasons))) {
1885672cf6dfSJoerg Roedel *fault_type = INTR_REMAP;
1886672cf6dfSJoerg Roedel return irq_remap_fault_reasons[fault_reason - 0x20];
1887672cf6dfSJoerg Roedel } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1888672cf6dfSJoerg Roedel ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1889672cf6dfSJoerg Roedel *fault_type = DMA_REMAP;
1890672cf6dfSJoerg Roedel return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1891672cf6dfSJoerg Roedel } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1892672cf6dfSJoerg Roedel *fault_type = DMA_REMAP;
1893672cf6dfSJoerg Roedel return dma_remap_fault_reasons[fault_reason];
1894672cf6dfSJoerg Roedel } else {
1895672cf6dfSJoerg Roedel *fault_type = UNKNOWN;
1896672cf6dfSJoerg Roedel return "Unknown";
1897672cf6dfSJoerg Roedel }
1898672cf6dfSJoerg Roedel }
1899672cf6dfSJoerg Roedel
1900672cf6dfSJoerg Roedel
dmar_msi_reg(struct intel_iommu * iommu,int irq)1901672cf6dfSJoerg Roedel static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1902672cf6dfSJoerg Roedel {
1903672cf6dfSJoerg Roedel if (iommu->irq == irq)
1904672cf6dfSJoerg Roedel return DMAR_FECTL_REG;
1905672cf6dfSJoerg Roedel else if (iommu->pr_irq == irq)
1906672cf6dfSJoerg Roedel return DMAR_PECTL_REG;
19074a0d4265SKan Liang else if (iommu->perf_irq == irq)
19084a0d4265SKan Liang return DMAR_PERFINTRCTL_REG;
1909672cf6dfSJoerg Roedel else
1910672cf6dfSJoerg Roedel BUG();
1911672cf6dfSJoerg Roedel }
1912672cf6dfSJoerg Roedel
dmar_msi_unmask(struct irq_data * data)1913672cf6dfSJoerg Roedel void dmar_msi_unmask(struct irq_data *data)
1914672cf6dfSJoerg Roedel {
1915672cf6dfSJoerg Roedel struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1916672cf6dfSJoerg Roedel int reg = dmar_msi_reg(iommu, data->irq);
1917672cf6dfSJoerg Roedel unsigned long flag;
1918672cf6dfSJoerg Roedel
1919672cf6dfSJoerg Roedel /* unmask it */
1920672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flag);
1921672cf6dfSJoerg Roedel writel(0, iommu->reg + reg);
1922672cf6dfSJoerg Roedel /* Read a reg to force flush the post write */
1923672cf6dfSJoerg Roedel readl(iommu->reg + reg);
1924672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1925672cf6dfSJoerg Roedel }
1926672cf6dfSJoerg Roedel
dmar_msi_mask(struct irq_data * data)1927672cf6dfSJoerg Roedel void dmar_msi_mask(struct irq_data *data)
1928672cf6dfSJoerg Roedel {
1929672cf6dfSJoerg Roedel struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1930672cf6dfSJoerg Roedel int reg = dmar_msi_reg(iommu, data->irq);
1931672cf6dfSJoerg Roedel unsigned long flag;
1932672cf6dfSJoerg Roedel
1933672cf6dfSJoerg Roedel /* mask it */
1934672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flag);
1935672cf6dfSJoerg Roedel writel(DMA_FECTL_IM, iommu->reg + reg);
1936672cf6dfSJoerg Roedel /* Read a reg to force flush the post write */
1937672cf6dfSJoerg Roedel readl(iommu->reg + reg);
1938672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1939672cf6dfSJoerg Roedel }
1940672cf6dfSJoerg Roedel
dmar_msi_write(int irq,struct msi_msg * msg)1941672cf6dfSJoerg Roedel void dmar_msi_write(int irq, struct msi_msg *msg)
1942672cf6dfSJoerg Roedel {
1943672cf6dfSJoerg Roedel struct intel_iommu *iommu = irq_get_handler_data(irq);
1944672cf6dfSJoerg Roedel int reg = dmar_msi_reg(iommu, irq);
1945672cf6dfSJoerg Roedel unsigned long flag;
1946672cf6dfSJoerg Roedel
1947672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flag);
1948672cf6dfSJoerg Roedel writel(msg->data, iommu->reg + reg + 4);
1949672cf6dfSJoerg Roedel writel(msg->address_lo, iommu->reg + reg + 8);
1950672cf6dfSJoerg Roedel writel(msg->address_hi, iommu->reg + reg + 12);
1951672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1952672cf6dfSJoerg Roedel }
1953672cf6dfSJoerg Roedel
dmar_msi_read(int irq,struct msi_msg * msg)1954672cf6dfSJoerg Roedel void dmar_msi_read(int irq, struct msi_msg *msg)
1955672cf6dfSJoerg Roedel {
1956672cf6dfSJoerg Roedel struct intel_iommu *iommu = irq_get_handler_data(irq);
1957672cf6dfSJoerg Roedel int reg = dmar_msi_reg(iommu, irq);
1958672cf6dfSJoerg Roedel unsigned long flag;
1959672cf6dfSJoerg Roedel
1960672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flag);
1961672cf6dfSJoerg Roedel msg->data = readl(iommu->reg + reg + 4);
1962672cf6dfSJoerg Roedel msg->address_lo = readl(iommu->reg + reg + 8);
1963672cf6dfSJoerg Roedel msg->address_hi = readl(iommu->reg + reg + 12);
1964672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1965672cf6dfSJoerg Roedel }
1966672cf6dfSJoerg Roedel
dmar_fault_do_one(struct intel_iommu * iommu,int type,u8 fault_reason,u32 pasid,u16 source_id,unsigned long long addr)1967672cf6dfSJoerg Roedel static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1968c7b6bac9SFenghua Yu u8 fault_reason, u32 pasid, u16 source_id,
1969672cf6dfSJoerg Roedel unsigned long long addr)
1970672cf6dfSJoerg Roedel {
1971672cf6dfSJoerg Roedel const char *reason;
1972672cf6dfSJoerg Roedel int fault_type;
1973672cf6dfSJoerg Roedel
1974672cf6dfSJoerg Roedel reason = dmar_get_fault_reason(fault_reason, &fault_type);
1975672cf6dfSJoerg Roedel
1976914ff771SKyung Min Park if (fault_type == INTR_REMAP) {
19770b482d0cSBjorn Helgaas pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
1978672cf6dfSJoerg Roedel source_id >> 8, PCI_SLOT(source_id & 0xFF),
1979672cf6dfSJoerg Roedel PCI_FUNC(source_id & 0xFF), addr >> 48,
1980672cf6dfSJoerg Roedel fault_reason, reason);
1981914ff771SKyung Min Park
1982914ff771SKyung Min Park return 0;
1983914ff771SKyung Min Park }
1984914ff771SKyung Min Park
1985fffaed1eSJacob Pan if (pasid == IOMMU_PASID_INVALID)
19860b482d0cSBjorn Helgaas pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1987672cf6dfSJoerg Roedel type ? "DMA Read" : "DMA Write",
1988672cf6dfSJoerg Roedel source_id >> 8, PCI_SLOT(source_id & 0xFF),
1989719a1933SLu Baolu PCI_FUNC(source_id & 0xFF), addr,
1990672cf6dfSJoerg Roedel fault_reason, reason);
1991719a1933SLu Baolu else
19920b482d0cSBjorn Helgaas pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1993719a1933SLu Baolu type ? "DMA Read" : "DMA Write", pasid,
1994719a1933SLu Baolu source_id >> 8, PCI_SLOT(source_id & 0xFF),
1995719a1933SLu Baolu PCI_FUNC(source_id & 0xFF), addr,
1996719a1933SLu Baolu fault_reason, reason);
1997719a1933SLu Baolu
1998914ff771SKyung Min Park dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
1999914ff771SKyung Min Park
2000672cf6dfSJoerg Roedel return 0;
2001672cf6dfSJoerg Roedel }
2002672cf6dfSJoerg Roedel
2003672cf6dfSJoerg Roedel #define PRIMARY_FAULT_REG_LEN (16)
dmar_fault(int irq,void * dev_id)2004672cf6dfSJoerg Roedel irqreturn_t dmar_fault(int irq, void *dev_id)
2005672cf6dfSJoerg Roedel {
2006672cf6dfSJoerg Roedel struct intel_iommu *iommu = dev_id;
2007672cf6dfSJoerg Roedel int reg, fault_index;
2008672cf6dfSJoerg Roedel u32 fault_status;
2009672cf6dfSJoerg Roedel unsigned long flag;
2010672cf6dfSJoerg Roedel static DEFINE_RATELIMIT_STATE(rs,
2011672cf6dfSJoerg Roedel DEFAULT_RATELIMIT_INTERVAL,
2012672cf6dfSJoerg Roedel DEFAULT_RATELIMIT_BURST);
2013672cf6dfSJoerg Roedel
2014672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flag);
2015672cf6dfSJoerg Roedel fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2016672cf6dfSJoerg Roedel if (fault_status && __ratelimit(&rs))
2017672cf6dfSJoerg Roedel pr_err("DRHD: handling fault status reg %x\n", fault_status);
2018672cf6dfSJoerg Roedel
2019672cf6dfSJoerg Roedel /* TBD: ignore advanced fault log currently */
2020672cf6dfSJoerg Roedel if (!(fault_status & DMA_FSTS_PPF))
2021672cf6dfSJoerg Roedel goto unlock_exit;
2022672cf6dfSJoerg Roedel
2023672cf6dfSJoerg Roedel fault_index = dma_fsts_fault_record_index(fault_status);
2024672cf6dfSJoerg Roedel reg = cap_fault_reg_offset(iommu->cap);
2025672cf6dfSJoerg Roedel while (1) {
2026672cf6dfSJoerg Roedel /* Disable printing, simply clear the fault when ratelimited */
2027672cf6dfSJoerg Roedel bool ratelimited = !__ratelimit(&rs);
2028672cf6dfSJoerg Roedel u8 fault_reason;
2029672cf6dfSJoerg Roedel u16 source_id;
2030672cf6dfSJoerg Roedel u64 guest_addr;
2031c7b6bac9SFenghua Yu u32 pasid;
2032c7b6bac9SFenghua Yu int type;
2033672cf6dfSJoerg Roedel u32 data;
2034672cf6dfSJoerg Roedel bool pasid_present;
2035672cf6dfSJoerg Roedel
2036672cf6dfSJoerg Roedel /* highest 32 bits */
2037672cf6dfSJoerg Roedel data = readl(iommu->reg + reg +
2038672cf6dfSJoerg Roedel fault_index * PRIMARY_FAULT_REG_LEN + 12);
2039672cf6dfSJoerg Roedel if (!(data & DMA_FRCD_F))
2040672cf6dfSJoerg Roedel break;
2041672cf6dfSJoerg Roedel
2042672cf6dfSJoerg Roedel if (!ratelimited) {
2043672cf6dfSJoerg Roedel fault_reason = dma_frcd_fault_reason(data);
2044672cf6dfSJoerg Roedel type = dma_frcd_type(data);
2045672cf6dfSJoerg Roedel
2046672cf6dfSJoerg Roedel pasid = dma_frcd_pasid_value(data);
2047672cf6dfSJoerg Roedel data = readl(iommu->reg + reg +
2048672cf6dfSJoerg Roedel fault_index * PRIMARY_FAULT_REG_LEN + 8);
2049672cf6dfSJoerg Roedel source_id = dma_frcd_source_id(data);
2050672cf6dfSJoerg Roedel
2051672cf6dfSJoerg Roedel pasid_present = dma_frcd_pasid_present(data);
2052672cf6dfSJoerg Roedel guest_addr = dmar_readq(iommu->reg + reg +
2053672cf6dfSJoerg Roedel fault_index * PRIMARY_FAULT_REG_LEN);
2054672cf6dfSJoerg Roedel guest_addr = dma_frcd_page_addr(guest_addr);
2055672cf6dfSJoerg Roedel }
2056672cf6dfSJoerg Roedel
2057672cf6dfSJoerg Roedel /* clear the fault */
2058672cf6dfSJoerg Roedel writel(DMA_FRCD_F, iommu->reg + reg +
2059672cf6dfSJoerg Roedel fault_index * PRIMARY_FAULT_REG_LEN + 12);
2060672cf6dfSJoerg Roedel
2061672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2062672cf6dfSJoerg Roedel
2063672cf6dfSJoerg Roedel if (!ratelimited)
2064672cf6dfSJoerg Roedel /* Using pasid -1 if pasid is not present */
2065672cf6dfSJoerg Roedel dmar_fault_do_one(iommu, type, fault_reason,
2066fffaed1eSJacob Pan pasid_present ? pasid : IOMMU_PASID_INVALID,
2067672cf6dfSJoerg Roedel source_id, guest_addr);
2068672cf6dfSJoerg Roedel
2069672cf6dfSJoerg Roedel fault_index++;
2070672cf6dfSJoerg Roedel if (fault_index >= cap_num_fault_regs(iommu->cap))
2071672cf6dfSJoerg Roedel fault_index = 0;
2072672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flag);
2073672cf6dfSJoerg Roedel }
2074672cf6dfSJoerg Roedel
2075672cf6dfSJoerg Roedel writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
2076672cf6dfSJoerg Roedel iommu->reg + DMAR_FSTS_REG);
2077672cf6dfSJoerg Roedel
2078672cf6dfSJoerg Roedel unlock_exit:
2079672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2080672cf6dfSJoerg Roedel return IRQ_HANDLED;
2081672cf6dfSJoerg Roedel }
2082672cf6dfSJoerg Roedel
dmar_set_interrupt(struct intel_iommu * iommu)2083672cf6dfSJoerg Roedel int dmar_set_interrupt(struct intel_iommu *iommu)
2084672cf6dfSJoerg Roedel {
2085672cf6dfSJoerg Roedel int irq, ret;
2086672cf6dfSJoerg Roedel
2087672cf6dfSJoerg Roedel /*
2088672cf6dfSJoerg Roedel * Check if the fault interrupt is already initialized.
2089672cf6dfSJoerg Roedel */
2090672cf6dfSJoerg Roedel if (iommu->irq)
2091672cf6dfSJoerg Roedel return 0;
2092672cf6dfSJoerg Roedel
2093672cf6dfSJoerg Roedel irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2094672cf6dfSJoerg Roedel if (irq > 0) {
2095672cf6dfSJoerg Roedel iommu->irq = irq;
2096672cf6dfSJoerg Roedel } else {
2097672cf6dfSJoerg Roedel pr_err("No free IRQ vectors\n");
2098672cf6dfSJoerg Roedel return -EINVAL;
2099672cf6dfSJoerg Roedel }
2100672cf6dfSJoerg Roedel
2101672cf6dfSJoerg Roedel ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2102672cf6dfSJoerg Roedel if (ret)
2103672cf6dfSJoerg Roedel pr_err("Can't request irq\n");
2104672cf6dfSJoerg Roedel return ret;
2105672cf6dfSJoerg Roedel }
2106672cf6dfSJoerg Roedel
enable_drhd_fault_handling(void)2107672cf6dfSJoerg Roedel int __init enable_drhd_fault_handling(void)
2108672cf6dfSJoerg Roedel {
2109672cf6dfSJoerg Roedel struct dmar_drhd_unit *drhd;
2110672cf6dfSJoerg Roedel struct intel_iommu *iommu;
2111672cf6dfSJoerg Roedel
2112672cf6dfSJoerg Roedel /*
2113672cf6dfSJoerg Roedel * Enable fault control interrupt.
2114672cf6dfSJoerg Roedel */
2115672cf6dfSJoerg Roedel for_each_iommu(iommu, drhd) {
2116672cf6dfSJoerg Roedel u32 fault_status;
2117672cf6dfSJoerg Roedel int ret = dmar_set_interrupt(iommu);
2118672cf6dfSJoerg Roedel
2119672cf6dfSJoerg Roedel if (ret) {
2120672cf6dfSJoerg Roedel pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
2121672cf6dfSJoerg Roedel (unsigned long long)drhd->reg_base_addr, ret);
2122672cf6dfSJoerg Roedel return -1;
2123672cf6dfSJoerg Roedel }
2124672cf6dfSJoerg Roedel
2125672cf6dfSJoerg Roedel /*
2126672cf6dfSJoerg Roedel * Clear any previous faults.
2127672cf6dfSJoerg Roedel */
2128672cf6dfSJoerg Roedel dmar_fault(iommu->irq, iommu);
2129672cf6dfSJoerg Roedel fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2130672cf6dfSJoerg Roedel writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2131672cf6dfSJoerg Roedel }
2132672cf6dfSJoerg Roedel
2133672cf6dfSJoerg Roedel return 0;
2134672cf6dfSJoerg Roedel }
2135672cf6dfSJoerg Roedel
2136672cf6dfSJoerg Roedel /*
2137672cf6dfSJoerg Roedel * Re-enable Queued Invalidation interface.
2138672cf6dfSJoerg Roedel */
dmar_reenable_qi(struct intel_iommu * iommu)2139672cf6dfSJoerg Roedel int dmar_reenable_qi(struct intel_iommu *iommu)
2140672cf6dfSJoerg Roedel {
2141672cf6dfSJoerg Roedel if (!ecap_qis(iommu->ecap))
2142672cf6dfSJoerg Roedel return -ENOENT;
2143672cf6dfSJoerg Roedel
2144672cf6dfSJoerg Roedel if (!iommu->qi)
2145672cf6dfSJoerg Roedel return -ENOENT;
2146672cf6dfSJoerg Roedel
2147672cf6dfSJoerg Roedel /*
2148672cf6dfSJoerg Roedel * First disable queued invalidation.
2149672cf6dfSJoerg Roedel */
2150672cf6dfSJoerg Roedel dmar_disable_qi(iommu);
2151672cf6dfSJoerg Roedel /*
2152672cf6dfSJoerg Roedel * Then enable queued invalidation again. Since there is no pending
2153672cf6dfSJoerg Roedel * invalidation requests now, it's safe to re-enable queued
2154672cf6dfSJoerg Roedel * invalidation.
2155672cf6dfSJoerg Roedel */
2156672cf6dfSJoerg Roedel __dmar_enable_qi(iommu);
2157672cf6dfSJoerg Roedel
2158672cf6dfSJoerg Roedel return 0;
2159672cf6dfSJoerg Roedel }
2160672cf6dfSJoerg Roedel
2161672cf6dfSJoerg Roedel /*
2162672cf6dfSJoerg Roedel * Check interrupt remapping support in DMAR table description.
2163672cf6dfSJoerg Roedel */
dmar_ir_support(void)2164672cf6dfSJoerg Roedel int __init dmar_ir_support(void)
2165672cf6dfSJoerg Roedel {
2166672cf6dfSJoerg Roedel struct acpi_table_dmar *dmar;
2167672cf6dfSJoerg Roedel dmar = (struct acpi_table_dmar *)dmar_tbl;
2168672cf6dfSJoerg Roedel if (!dmar)
2169672cf6dfSJoerg Roedel return 0;
2170672cf6dfSJoerg Roedel return dmar->flags & 0x1;
2171672cf6dfSJoerg Roedel }
2172672cf6dfSJoerg Roedel
2173672cf6dfSJoerg Roedel /* Check whether DMAR units are in use */
dmar_in_use(void)2174672cf6dfSJoerg Roedel static inline bool dmar_in_use(void)
2175672cf6dfSJoerg Roedel {
2176672cf6dfSJoerg Roedel return irq_remapping_enabled || intel_iommu_enabled;
2177672cf6dfSJoerg Roedel }
2178672cf6dfSJoerg Roedel
dmar_free_unused_resources(void)2179672cf6dfSJoerg Roedel static int __init dmar_free_unused_resources(void)
2180672cf6dfSJoerg Roedel {
2181672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru, *dmaru_n;
2182672cf6dfSJoerg Roedel
2183672cf6dfSJoerg Roedel if (dmar_in_use())
2184672cf6dfSJoerg Roedel return 0;
2185672cf6dfSJoerg Roedel
2186672cf6dfSJoerg Roedel if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2187672cf6dfSJoerg Roedel bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2188672cf6dfSJoerg Roedel
2189672cf6dfSJoerg Roedel down_write(&dmar_global_lock);
2190672cf6dfSJoerg Roedel list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2191672cf6dfSJoerg Roedel list_del(&dmaru->list);
2192672cf6dfSJoerg Roedel dmar_free_drhd(dmaru);
2193672cf6dfSJoerg Roedel }
2194672cf6dfSJoerg Roedel up_write(&dmar_global_lock);
2195672cf6dfSJoerg Roedel
2196672cf6dfSJoerg Roedel return 0;
2197672cf6dfSJoerg Roedel }
2198672cf6dfSJoerg Roedel
2199672cf6dfSJoerg Roedel late_initcall(dmar_free_unused_resources);
2200672cf6dfSJoerg Roedel
2201672cf6dfSJoerg Roedel /*
2202672cf6dfSJoerg Roedel * DMAR Hotplug Support
2203672cf6dfSJoerg Roedel * For more details, please refer to Intel(R) Virtualization Technology
2204672cf6dfSJoerg Roedel * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2205672cf6dfSJoerg Roedel * "Remapping Hardware Unit Hot Plug".
2206672cf6dfSJoerg Roedel */
2207672cf6dfSJoerg Roedel static guid_t dmar_hp_guid =
2208672cf6dfSJoerg Roedel GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2209672cf6dfSJoerg Roedel 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2210672cf6dfSJoerg Roedel
2211672cf6dfSJoerg Roedel /*
2212672cf6dfSJoerg Roedel * Currently there's only one revision and BIOS will not check the revision id,
2213672cf6dfSJoerg Roedel * so use 0 for safety.
2214672cf6dfSJoerg Roedel */
2215672cf6dfSJoerg Roedel #define DMAR_DSM_REV_ID 0
2216672cf6dfSJoerg Roedel #define DMAR_DSM_FUNC_DRHD 1
2217672cf6dfSJoerg Roedel #define DMAR_DSM_FUNC_ATSR 2
2218672cf6dfSJoerg Roedel #define DMAR_DSM_FUNC_RHSA 3
221931a75cbbSYian Chen #define DMAR_DSM_FUNC_SATC 4
2220672cf6dfSJoerg Roedel
dmar_detect_dsm(acpi_handle handle,int func)2221672cf6dfSJoerg Roedel static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2222672cf6dfSJoerg Roedel {
2223672cf6dfSJoerg Roedel return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2224672cf6dfSJoerg Roedel }
2225672cf6dfSJoerg Roedel
dmar_walk_dsm_resource(acpi_handle handle,int func,dmar_res_handler_t handler,void * arg)2226672cf6dfSJoerg Roedel static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2227672cf6dfSJoerg Roedel dmar_res_handler_t handler, void *arg)
2228672cf6dfSJoerg Roedel {
2229672cf6dfSJoerg Roedel int ret = -ENODEV;
2230672cf6dfSJoerg Roedel union acpi_object *obj;
2231672cf6dfSJoerg Roedel struct acpi_dmar_header *start;
2232672cf6dfSJoerg Roedel struct dmar_res_callback callback;
2233672cf6dfSJoerg Roedel static int res_type[] = {
2234672cf6dfSJoerg Roedel [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2235672cf6dfSJoerg Roedel [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2236672cf6dfSJoerg Roedel [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
223731a75cbbSYian Chen [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
2238672cf6dfSJoerg Roedel };
2239672cf6dfSJoerg Roedel
2240672cf6dfSJoerg Roedel if (!dmar_detect_dsm(handle, func))
2241672cf6dfSJoerg Roedel return 0;
2242672cf6dfSJoerg Roedel
2243672cf6dfSJoerg Roedel obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2244672cf6dfSJoerg Roedel func, NULL, ACPI_TYPE_BUFFER);
2245672cf6dfSJoerg Roedel if (!obj)
2246672cf6dfSJoerg Roedel return -ENODEV;
2247672cf6dfSJoerg Roedel
2248672cf6dfSJoerg Roedel memset(&callback, 0, sizeof(callback));
2249672cf6dfSJoerg Roedel callback.cb[res_type[func]] = handler;
2250672cf6dfSJoerg Roedel callback.arg[res_type[func]] = arg;
2251672cf6dfSJoerg Roedel start = (struct acpi_dmar_header *)obj->buffer.pointer;
2252672cf6dfSJoerg Roedel ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2253672cf6dfSJoerg Roedel
2254672cf6dfSJoerg Roedel ACPI_FREE(obj);
2255672cf6dfSJoerg Roedel
2256672cf6dfSJoerg Roedel return ret;
2257672cf6dfSJoerg Roedel }
2258672cf6dfSJoerg Roedel
dmar_hp_add_drhd(struct acpi_dmar_header * header,void * arg)2259672cf6dfSJoerg Roedel static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2260672cf6dfSJoerg Roedel {
2261672cf6dfSJoerg Roedel int ret;
2262672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
2263672cf6dfSJoerg Roedel
2264672cf6dfSJoerg Roedel dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2265672cf6dfSJoerg Roedel if (!dmaru)
2266672cf6dfSJoerg Roedel return -ENODEV;
2267672cf6dfSJoerg Roedel
2268672cf6dfSJoerg Roedel ret = dmar_ir_hotplug(dmaru, true);
2269672cf6dfSJoerg Roedel if (ret == 0)
2270672cf6dfSJoerg Roedel ret = dmar_iommu_hotplug(dmaru, true);
2271672cf6dfSJoerg Roedel
2272672cf6dfSJoerg Roedel return ret;
2273672cf6dfSJoerg Roedel }
2274672cf6dfSJoerg Roedel
dmar_hp_remove_drhd(struct acpi_dmar_header * header,void * arg)2275672cf6dfSJoerg Roedel static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2276672cf6dfSJoerg Roedel {
2277672cf6dfSJoerg Roedel int i, ret;
2278672cf6dfSJoerg Roedel struct device *dev;
2279672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
2280672cf6dfSJoerg Roedel
2281672cf6dfSJoerg Roedel dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2282672cf6dfSJoerg Roedel if (!dmaru)
2283672cf6dfSJoerg Roedel return 0;
2284672cf6dfSJoerg Roedel
2285672cf6dfSJoerg Roedel /*
2286672cf6dfSJoerg Roedel * All PCI devices managed by this unit should have been destroyed.
2287672cf6dfSJoerg Roedel */
2288672cf6dfSJoerg Roedel if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2289672cf6dfSJoerg Roedel for_each_active_dev_scope(dmaru->devices,
2290672cf6dfSJoerg Roedel dmaru->devices_cnt, i, dev)
2291672cf6dfSJoerg Roedel return -EBUSY;
2292672cf6dfSJoerg Roedel }
2293672cf6dfSJoerg Roedel
2294672cf6dfSJoerg Roedel ret = dmar_ir_hotplug(dmaru, false);
2295672cf6dfSJoerg Roedel if (ret == 0)
2296672cf6dfSJoerg Roedel ret = dmar_iommu_hotplug(dmaru, false);
2297672cf6dfSJoerg Roedel
2298672cf6dfSJoerg Roedel return ret;
2299672cf6dfSJoerg Roedel }
2300672cf6dfSJoerg Roedel
dmar_hp_release_drhd(struct acpi_dmar_header * header,void * arg)2301672cf6dfSJoerg Roedel static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2302672cf6dfSJoerg Roedel {
2303672cf6dfSJoerg Roedel struct dmar_drhd_unit *dmaru;
2304672cf6dfSJoerg Roedel
2305672cf6dfSJoerg Roedel dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2306672cf6dfSJoerg Roedel if (dmaru) {
2307672cf6dfSJoerg Roedel list_del_rcu(&dmaru->list);
2308672cf6dfSJoerg Roedel synchronize_rcu();
2309672cf6dfSJoerg Roedel dmar_free_drhd(dmaru);
2310672cf6dfSJoerg Roedel }
2311672cf6dfSJoerg Roedel
2312672cf6dfSJoerg Roedel return 0;
2313672cf6dfSJoerg Roedel }
2314672cf6dfSJoerg Roedel
dmar_hotplug_insert(acpi_handle handle)2315672cf6dfSJoerg Roedel static int dmar_hotplug_insert(acpi_handle handle)
2316672cf6dfSJoerg Roedel {
2317672cf6dfSJoerg Roedel int ret;
2318672cf6dfSJoerg Roedel int drhd_count = 0;
2319672cf6dfSJoerg Roedel
2320672cf6dfSJoerg Roedel ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2321672cf6dfSJoerg Roedel &dmar_validate_one_drhd, (void *)1);
2322672cf6dfSJoerg Roedel if (ret)
2323672cf6dfSJoerg Roedel goto out;
2324672cf6dfSJoerg Roedel
2325672cf6dfSJoerg Roedel ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2326672cf6dfSJoerg Roedel &dmar_parse_one_drhd, (void *)&drhd_count);
2327672cf6dfSJoerg Roedel if (ret == 0 && drhd_count == 0) {
2328672cf6dfSJoerg Roedel pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2329672cf6dfSJoerg Roedel goto out;
2330672cf6dfSJoerg Roedel } else if (ret) {
2331672cf6dfSJoerg Roedel goto release_drhd;
2332672cf6dfSJoerg Roedel }
2333672cf6dfSJoerg Roedel
2334672cf6dfSJoerg Roedel ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2335672cf6dfSJoerg Roedel &dmar_parse_one_rhsa, NULL);
2336672cf6dfSJoerg Roedel if (ret)
2337672cf6dfSJoerg Roedel goto release_drhd;
2338672cf6dfSJoerg Roedel
2339672cf6dfSJoerg Roedel ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2340672cf6dfSJoerg Roedel &dmar_parse_one_atsr, NULL);
2341672cf6dfSJoerg Roedel if (ret)
2342672cf6dfSJoerg Roedel goto release_atsr;
2343672cf6dfSJoerg Roedel
2344672cf6dfSJoerg Roedel ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2345672cf6dfSJoerg Roedel &dmar_hp_add_drhd, NULL);
2346672cf6dfSJoerg Roedel if (!ret)
2347672cf6dfSJoerg Roedel return 0;
2348672cf6dfSJoerg Roedel
2349672cf6dfSJoerg Roedel dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2350672cf6dfSJoerg Roedel &dmar_hp_remove_drhd, NULL);
2351672cf6dfSJoerg Roedel release_atsr:
2352672cf6dfSJoerg Roedel dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2353672cf6dfSJoerg Roedel &dmar_release_one_atsr, NULL);
2354672cf6dfSJoerg Roedel release_drhd:
2355672cf6dfSJoerg Roedel dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2356672cf6dfSJoerg Roedel &dmar_hp_release_drhd, NULL);
2357672cf6dfSJoerg Roedel out:
2358672cf6dfSJoerg Roedel return ret;
2359672cf6dfSJoerg Roedel }
2360672cf6dfSJoerg Roedel
dmar_hotplug_remove(acpi_handle handle)2361672cf6dfSJoerg Roedel static int dmar_hotplug_remove(acpi_handle handle)
2362672cf6dfSJoerg Roedel {
2363672cf6dfSJoerg Roedel int ret;
2364672cf6dfSJoerg Roedel
2365672cf6dfSJoerg Roedel ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2366672cf6dfSJoerg Roedel &dmar_check_one_atsr, NULL);
2367672cf6dfSJoerg Roedel if (ret)
2368672cf6dfSJoerg Roedel return ret;
2369672cf6dfSJoerg Roedel
2370672cf6dfSJoerg Roedel ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2371672cf6dfSJoerg Roedel &dmar_hp_remove_drhd, NULL);
2372672cf6dfSJoerg Roedel if (ret == 0) {
2373672cf6dfSJoerg Roedel WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2374672cf6dfSJoerg Roedel &dmar_release_one_atsr, NULL));
2375672cf6dfSJoerg Roedel WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2376672cf6dfSJoerg Roedel &dmar_hp_release_drhd, NULL));
2377672cf6dfSJoerg Roedel } else {
2378672cf6dfSJoerg Roedel dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2379672cf6dfSJoerg Roedel &dmar_hp_add_drhd, NULL);
2380672cf6dfSJoerg Roedel }
2381672cf6dfSJoerg Roedel
2382672cf6dfSJoerg Roedel return ret;
2383672cf6dfSJoerg Roedel }
2384672cf6dfSJoerg Roedel
dmar_get_dsm_handle(acpi_handle handle,u32 lvl,void * context,void ** retval)2385672cf6dfSJoerg Roedel static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2386672cf6dfSJoerg Roedel void *context, void **retval)
2387672cf6dfSJoerg Roedel {
2388672cf6dfSJoerg Roedel acpi_handle *phdl = retval;
2389672cf6dfSJoerg Roedel
2390672cf6dfSJoerg Roedel if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2391672cf6dfSJoerg Roedel *phdl = handle;
2392672cf6dfSJoerg Roedel return AE_CTRL_TERMINATE;
2393672cf6dfSJoerg Roedel }
2394672cf6dfSJoerg Roedel
2395672cf6dfSJoerg Roedel return AE_OK;
2396672cf6dfSJoerg Roedel }
2397672cf6dfSJoerg Roedel
dmar_device_hotplug(acpi_handle handle,bool insert)2398672cf6dfSJoerg Roedel static int dmar_device_hotplug(acpi_handle handle, bool insert)
2399672cf6dfSJoerg Roedel {
2400672cf6dfSJoerg Roedel int ret;
2401672cf6dfSJoerg Roedel acpi_handle tmp = NULL;
2402672cf6dfSJoerg Roedel acpi_status status;
2403672cf6dfSJoerg Roedel
2404672cf6dfSJoerg Roedel if (!dmar_in_use())
2405672cf6dfSJoerg Roedel return 0;
2406672cf6dfSJoerg Roedel
2407672cf6dfSJoerg Roedel if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2408672cf6dfSJoerg Roedel tmp = handle;
2409672cf6dfSJoerg Roedel } else {
2410672cf6dfSJoerg Roedel status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2411672cf6dfSJoerg Roedel ACPI_UINT32_MAX,
2412672cf6dfSJoerg Roedel dmar_get_dsm_handle,
2413672cf6dfSJoerg Roedel NULL, NULL, &tmp);
2414672cf6dfSJoerg Roedel if (ACPI_FAILURE(status)) {
2415672cf6dfSJoerg Roedel pr_warn("Failed to locate _DSM method.\n");
2416672cf6dfSJoerg Roedel return -ENXIO;
2417672cf6dfSJoerg Roedel }
2418672cf6dfSJoerg Roedel }
2419672cf6dfSJoerg Roedel if (tmp == NULL)
2420672cf6dfSJoerg Roedel return 0;
2421672cf6dfSJoerg Roedel
2422672cf6dfSJoerg Roedel down_write(&dmar_global_lock);
2423672cf6dfSJoerg Roedel if (insert)
2424672cf6dfSJoerg Roedel ret = dmar_hotplug_insert(tmp);
2425672cf6dfSJoerg Roedel else
2426672cf6dfSJoerg Roedel ret = dmar_hotplug_remove(tmp);
2427672cf6dfSJoerg Roedel up_write(&dmar_global_lock);
2428672cf6dfSJoerg Roedel
2429672cf6dfSJoerg Roedel return ret;
2430672cf6dfSJoerg Roedel }
2431672cf6dfSJoerg Roedel
dmar_device_add(acpi_handle handle)2432672cf6dfSJoerg Roedel int dmar_device_add(acpi_handle handle)
2433672cf6dfSJoerg Roedel {
2434672cf6dfSJoerg Roedel return dmar_device_hotplug(handle, true);
2435672cf6dfSJoerg Roedel }
2436672cf6dfSJoerg Roedel
dmar_device_remove(acpi_handle handle)2437672cf6dfSJoerg Roedel int dmar_device_remove(acpi_handle handle)
2438672cf6dfSJoerg Roedel {
2439672cf6dfSJoerg Roedel return dmar_device_hotplug(handle, false);
2440672cf6dfSJoerg Roedel }
2441672cf6dfSJoerg Roedel
2442672cf6dfSJoerg Roedel /*
2443672cf6dfSJoerg Roedel * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2444672cf6dfSJoerg Roedel *
2445672cf6dfSJoerg Roedel * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2446672cf6dfSJoerg Roedel * the ACPI DMAR table. This means that the platform boot firmware has made
2447672cf6dfSJoerg Roedel * sure no device can issue DMA outside of RMRR regions.
2448672cf6dfSJoerg Roedel */
dmar_platform_optin(void)2449672cf6dfSJoerg Roedel bool dmar_platform_optin(void)
2450672cf6dfSJoerg Roedel {
2451672cf6dfSJoerg Roedel struct acpi_table_dmar *dmar;
2452672cf6dfSJoerg Roedel acpi_status status;
2453672cf6dfSJoerg Roedel bool ret;
2454672cf6dfSJoerg Roedel
2455672cf6dfSJoerg Roedel status = acpi_get_table(ACPI_SIG_DMAR, 0,
2456672cf6dfSJoerg Roedel (struct acpi_table_header **)&dmar);
2457672cf6dfSJoerg Roedel if (ACPI_FAILURE(status))
2458672cf6dfSJoerg Roedel return false;
2459672cf6dfSJoerg Roedel
2460672cf6dfSJoerg Roedel ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2461672cf6dfSJoerg Roedel acpi_put_table((struct acpi_table_header *)dmar);
2462672cf6dfSJoerg Roedel
2463672cf6dfSJoerg Roedel return ret;
2464672cf6dfSJoerg Roedel }
2465672cf6dfSJoerg Roedel EXPORT_SYMBOL_GPL(dmar_platform_optin);
2466