1672cf6dfSJoerg Roedel // SPDX-License-Identifier: GPL-2.0
2367f82deSAditya Srivastava /*
3672cf6dfSJoerg Roedel * intel-pasid.c - PASID idr, table and entry manipulation
4672cf6dfSJoerg Roedel *
5672cf6dfSJoerg Roedel * Copyright (C) 2018 Intel Corporation
6672cf6dfSJoerg Roedel *
7672cf6dfSJoerg Roedel * Author: Lu Baolu <baolu.lu@linux.intel.com>
8672cf6dfSJoerg Roedel */
9672cf6dfSJoerg Roedel
10672cf6dfSJoerg Roedel #define pr_fmt(fmt) "DMAR: " fmt
11672cf6dfSJoerg Roedel
12672cf6dfSJoerg Roedel #include <linux/bitops.h>
13672cf6dfSJoerg Roedel #include <linux/cpufeature.h>
14672cf6dfSJoerg Roedel #include <linux/dmar.h>
15672cf6dfSJoerg Roedel #include <linux/iommu.h>
16672cf6dfSJoerg Roedel #include <linux/memory.h>
17672cf6dfSJoerg Roedel #include <linux/pci.h>
18672cf6dfSJoerg Roedel #include <linux/pci-ats.h>
19672cf6dfSJoerg Roedel #include <linux/spinlock.h>
20672cf6dfSJoerg Roedel
212585a279SLu Baolu #include "iommu.h"
2202f3effdSLu Baolu #include "pasid.h"
23672cf6dfSJoerg Roedel
24672cf6dfSJoerg Roedel /*
25672cf6dfSJoerg Roedel * Intel IOMMU system wide PASID name space:
26672cf6dfSJoerg Roedel */
27672cf6dfSJoerg Roedel u32 intel_pasid_max_id = PASID_MAX;
28672cf6dfSJoerg Roedel
vcmd_alloc_pasid(struct intel_iommu * iommu,u32 * pasid)29c7b6bac9SFenghua Yu int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
30672cf6dfSJoerg Roedel {
31672cf6dfSJoerg Roedel unsigned long flags;
32672cf6dfSJoerg Roedel u8 status_code;
33672cf6dfSJoerg Roedel int ret = 0;
34672cf6dfSJoerg Roedel u64 res;
35672cf6dfSJoerg Roedel
36672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flags);
37672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
38672cf6dfSJoerg Roedel IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
39672cf6dfSJoerg Roedel !(res & VCMD_VRSP_IP), res);
40672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
41672cf6dfSJoerg Roedel
42672cf6dfSJoerg Roedel status_code = VCMD_VRSP_SC(res);
43672cf6dfSJoerg Roedel switch (status_code) {
44672cf6dfSJoerg Roedel case VCMD_VRSP_SC_SUCCESS:
45672cf6dfSJoerg Roedel *pasid = VCMD_VRSP_RESULT_PASID(res);
46672cf6dfSJoerg Roedel break;
47672cf6dfSJoerg Roedel case VCMD_VRSP_SC_NO_PASID_AVAIL:
48672cf6dfSJoerg Roedel pr_info("IOMMU: %s: No PASID available\n", iommu->name);
49672cf6dfSJoerg Roedel ret = -ENOSPC;
50672cf6dfSJoerg Roedel break;
51672cf6dfSJoerg Roedel default:
52672cf6dfSJoerg Roedel ret = -ENODEV;
53672cf6dfSJoerg Roedel pr_warn("IOMMU: %s: Unexpected error code %d\n",
54672cf6dfSJoerg Roedel iommu->name, status_code);
55672cf6dfSJoerg Roedel }
56672cf6dfSJoerg Roedel
57672cf6dfSJoerg Roedel return ret;
58672cf6dfSJoerg Roedel }
59672cf6dfSJoerg Roedel
vcmd_free_pasid(struct intel_iommu * iommu,u32 pasid)60c7b6bac9SFenghua Yu void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
61672cf6dfSJoerg Roedel {
62672cf6dfSJoerg Roedel unsigned long flags;
63672cf6dfSJoerg Roedel u8 status_code;
64672cf6dfSJoerg Roedel u64 res;
65672cf6dfSJoerg Roedel
66672cf6dfSJoerg Roedel raw_spin_lock_irqsave(&iommu->register_lock, flags);
67672cf6dfSJoerg Roedel dmar_writeq(iommu->reg + DMAR_VCMD_REG,
68672cf6dfSJoerg Roedel VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
69672cf6dfSJoerg Roedel IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
70672cf6dfSJoerg Roedel !(res & VCMD_VRSP_IP), res);
71672cf6dfSJoerg Roedel raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
72672cf6dfSJoerg Roedel
73672cf6dfSJoerg Roedel status_code = VCMD_VRSP_SC(res);
74672cf6dfSJoerg Roedel switch (status_code) {
75672cf6dfSJoerg Roedel case VCMD_VRSP_SC_SUCCESS:
76672cf6dfSJoerg Roedel break;
77672cf6dfSJoerg Roedel case VCMD_VRSP_SC_INVALID_PASID:
78672cf6dfSJoerg Roedel pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
79672cf6dfSJoerg Roedel break;
80672cf6dfSJoerg Roedel default:
81672cf6dfSJoerg Roedel pr_warn("IOMMU: %s: Unexpected error code %d\n",
82672cf6dfSJoerg Roedel iommu->name, status_code);
83672cf6dfSJoerg Roedel }
84672cf6dfSJoerg Roedel }
85672cf6dfSJoerg Roedel
86672cf6dfSJoerg Roedel /*
87672cf6dfSJoerg Roedel * Per device pasid table management:
88672cf6dfSJoerg Roedel */
89672cf6dfSJoerg Roedel
90672cf6dfSJoerg Roedel /*
91672cf6dfSJoerg Roedel * Allocate a pasid table for @dev. It should be called in a
92672cf6dfSJoerg Roedel * single-thread context.
93672cf6dfSJoerg Roedel */
intel_pasid_alloc_table(struct device * dev)94672cf6dfSJoerg Roedel int intel_pasid_alloc_table(struct device *dev)
95672cf6dfSJoerg Roedel {
96672cf6dfSJoerg Roedel struct device_domain_info *info;
97672cf6dfSJoerg Roedel struct pasid_table *pasid_table;
98672cf6dfSJoerg Roedel struct page *pages;
99c7b6bac9SFenghua Yu u32 max_pasid = 0;
1004140d77aSLu Baolu int order, size;
101672cf6dfSJoerg Roedel
102672cf6dfSJoerg Roedel might_sleep();
103586081d3SLu Baolu info = dev_iommu_priv_get(dev);
104bd7ebb77SNicolin Chen if (WARN_ON(!info || !dev_is_pci(dev)))
105bd7ebb77SNicolin Chen return -ENODEV;
106bd7ebb77SNicolin Chen if (WARN_ON(info->pasid_table))
107bd7ebb77SNicolin Chen return -EEXIST;
108672cf6dfSJoerg Roedel
109672cf6dfSJoerg Roedel pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
110672cf6dfSJoerg Roedel if (!pasid_table)
111672cf6dfSJoerg Roedel return -ENOMEM;
112672cf6dfSJoerg Roedel
113672cf6dfSJoerg Roedel if (info->pasid_supported)
114c7b6bac9SFenghua Yu max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
115672cf6dfSJoerg Roedel intel_pasid_max_id);
116672cf6dfSJoerg Roedel
117672cf6dfSJoerg Roedel size = max_pasid >> (PASID_PDE_SHIFT - 3);
118672cf6dfSJoerg Roedel order = size ? get_order(size) : 0;
119672cf6dfSJoerg Roedel pages = alloc_pages_node(info->iommu->node,
120672cf6dfSJoerg Roedel GFP_KERNEL | __GFP_ZERO, order);
121672cf6dfSJoerg Roedel if (!pages) {
122672cf6dfSJoerg Roedel kfree(pasid_table);
123672cf6dfSJoerg Roedel return -ENOMEM;
124672cf6dfSJoerg Roedel }
125672cf6dfSJoerg Roedel
126672cf6dfSJoerg Roedel pasid_table->table = page_address(pages);
127672cf6dfSJoerg Roedel pasid_table->order = order;
128672cf6dfSJoerg Roedel pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
1294140d77aSLu Baolu info->pasid_table = pasid_table;
130672cf6dfSJoerg Roedel
131194b3348SJacob Pan if (!ecap_coherent(info->iommu->ecap))
1328a3b8e63SYanfei Xu clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
133194b3348SJacob Pan
134672cf6dfSJoerg Roedel return 0;
135672cf6dfSJoerg Roedel }
136672cf6dfSJoerg Roedel
intel_pasid_free_table(struct device * dev)137672cf6dfSJoerg Roedel void intel_pasid_free_table(struct device *dev)
138672cf6dfSJoerg Roedel {
139672cf6dfSJoerg Roedel struct device_domain_info *info;
140672cf6dfSJoerg Roedel struct pasid_table *pasid_table;
141672cf6dfSJoerg Roedel struct pasid_dir_entry *dir;
142672cf6dfSJoerg Roedel struct pasid_entry *table;
143672cf6dfSJoerg Roedel int i, max_pde;
144672cf6dfSJoerg Roedel
145586081d3SLu Baolu info = dev_iommu_priv_get(dev);
146672cf6dfSJoerg Roedel if (!info || !dev_is_pci(dev) || !info->pasid_table)
147672cf6dfSJoerg Roedel return;
148672cf6dfSJoerg Roedel
149672cf6dfSJoerg Roedel pasid_table = info->pasid_table;
1504140d77aSLu Baolu info->pasid_table = NULL;
151672cf6dfSJoerg Roedel
152672cf6dfSJoerg Roedel /* Free scalable mode PASID directory tables: */
153672cf6dfSJoerg Roedel dir = pasid_table->table;
154672cf6dfSJoerg Roedel max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
155672cf6dfSJoerg Roedel for (i = 0; i < max_pde; i++) {
156672cf6dfSJoerg Roedel table = get_pasid_table_from_pde(&dir[i]);
157672cf6dfSJoerg Roedel free_pgtable_page(table);
158672cf6dfSJoerg Roedel }
159672cf6dfSJoerg Roedel
160672cf6dfSJoerg Roedel free_pages((unsigned long)pasid_table->table, pasid_table->order);
161672cf6dfSJoerg Roedel kfree(pasid_table);
162672cf6dfSJoerg Roedel }
163672cf6dfSJoerg Roedel
intel_pasid_get_table(struct device * dev)164672cf6dfSJoerg Roedel struct pasid_table *intel_pasid_get_table(struct device *dev)
165672cf6dfSJoerg Roedel {
166672cf6dfSJoerg Roedel struct device_domain_info *info;
167672cf6dfSJoerg Roedel
168586081d3SLu Baolu info = dev_iommu_priv_get(dev);
169672cf6dfSJoerg Roedel if (!info)
170672cf6dfSJoerg Roedel return NULL;
171672cf6dfSJoerg Roedel
172672cf6dfSJoerg Roedel return info->pasid_table;
173672cf6dfSJoerg Roedel }
174672cf6dfSJoerg Roedel
intel_pasid_get_dev_max_id(struct device * dev)175442b8183SLu Baolu static int intel_pasid_get_dev_max_id(struct device *dev)
176672cf6dfSJoerg Roedel {
177672cf6dfSJoerg Roedel struct device_domain_info *info;
178672cf6dfSJoerg Roedel
179586081d3SLu Baolu info = dev_iommu_priv_get(dev);
180672cf6dfSJoerg Roedel if (!info || !info->pasid_table)
181672cf6dfSJoerg Roedel return 0;
182672cf6dfSJoerg Roedel
183672cf6dfSJoerg Roedel return info->pasid_table->max_pasid;
184672cf6dfSJoerg Roedel }
185672cf6dfSJoerg Roedel
intel_pasid_get_entry(struct device * dev,u32 pasid)186442b8183SLu Baolu static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
187672cf6dfSJoerg Roedel {
188672cf6dfSJoerg Roedel struct device_domain_info *info;
189672cf6dfSJoerg Roedel struct pasid_table *pasid_table;
190672cf6dfSJoerg Roedel struct pasid_dir_entry *dir;
191672cf6dfSJoerg Roedel struct pasid_entry *entries;
192672cf6dfSJoerg Roedel int dir_index, index;
193672cf6dfSJoerg Roedel
194672cf6dfSJoerg Roedel pasid_table = intel_pasid_get_table(dev);
195c7b6bac9SFenghua Yu if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
196672cf6dfSJoerg Roedel return NULL;
197672cf6dfSJoerg Roedel
198672cf6dfSJoerg Roedel dir = pasid_table->table;
199586081d3SLu Baolu info = dev_iommu_priv_get(dev);
200672cf6dfSJoerg Roedel dir_index = pasid >> PASID_PDE_SHIFT;
201672cf6dfSJoerg Roedel index = pasid & PASID_PTE_MASK;
202672cf6dfSJoerg Roedel
203803766cbSLu Baolu retry:
204672cf6dfSJoerg Roedel entries = get_pasid_table_from_pde(&dir[dir_index]);
205672cf6dfSJoerg Roedel if (!entries) {
2062552d3a2SJason Gunthorpe entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
207803766cbSLu Baolu if (!entries)
208672cf6dfSJoerg Roedel return NULL;
209672cf6dfSJoerg Roedel
210803766cbSLu Baolu /*
211803766cbSLu Baolu * The pasid directory table entry won't be freed after
212803766cbSLu Baolu * allocation. No worry about the race with free and
213803766cbSLu Baolu * clear. However, this entry might be populated by others
214803766cbSLu Baolu * while we are preparing it. Use theirs with a retry.
215803766cbSLu Baolu */
216803766cbSLu Baolu if (cmpxchg64(&dir[dir_index].val, 0ULL,
217803766cbSLu Baolu (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
218803766cbSLu Baolu free_pgtable_page(entries);
219803766cbSLu Baolu goto retry;
220672cf6dfSJoerg Roedel }
221194b3348SJacob Pan if (!ecap_coherent(info->iommu->ecap)) {
222194b3348SJacob Pan clflush_cache_range(entries, VTD_PAGE_SIZE);
223194b3348SJacob Pan clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
224194b3348SJacob Pan }
225803766cbSLu Baolu }
226672cf6dfSJoerg Roedel
227672cf6dfSJoerg Roedel return &entries[index];
228672cf6dfSJoerg Roedel }
229672cf6dfSJoerg Roedel
230672cf6dfSJoerg Roedel /*
231672cf6dfSJoerg Roedel * Interfaces for PASID table entry manipulation:
232672cf6dfSJoerg Roedel */
pasid_clear_entry(struct pasid_entry * pe)233672cf6dfSJoerg Roedel static inline void pasid_clear_entry(struct pasid_entry *pe)
234672cf6dfSJoerg Roedel {
235672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[0], 0);
236672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[1], 0);
237672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[2], 0);
238672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[3], 0);
239672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[4], 0);
240672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[5], 0);
241672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[6], 0);
242672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[7], 0);
243672cf6dfSJoerg Roedel }
244672cf6dfSJoerg Roedel
pasid_clear_entry_with_fpd(struct pasid_entry * pe)245672cf6dfSJoerg Roedel static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
246672cf6dfSJoerg Roedel {
247672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
248672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[1], 0);
249672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[2], 0);
250672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[3], 0);
251672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[4], 0);
252672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[5], 0);
253672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[6], 0);
254672cf6dfSJoerg Roedel WRITE_ONCE(pe->val[7], 0);
255672cf6dfSJoerg Roedel }
256672cf6dfSJoerg Roedel
257672cf6dfSJoerg Roedel static void
intel_pasid_clear_entry(struct device * dev,u32 pasid,bool fault_ignore)258c7b6bac9SFenghua Yu intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
259672cf6dfSJoerg Roedel {
260672cf6dfSJoerg Roedel struct pasid_entry *pe;
261672cf6dfSJoerg Roedel
262672cf6dfSJoerg Roedel pe = intel_pasid_get_entry(dev, pasid);
263672cf6dfSJoerg Roedel if (WARN_ON(!pe))
264672cf6dfSJoerg Roedel return;
265672cf6dfSJoerg Roedel
266672cf6dfSJoerg Roedel if (fault_ignore && pasid_pte_is_present(pe))
267672cf6dfSJoerg Roedel pasid_clear_entry_with_fpd(pe);
268672cf6dfSJoerg Roedel else
269672cf6dfSJoerg Roedel pasid_clear_entry(pe);
270672cf6dfSJoerg Roedel }
271672cf6dfSJoerg Roedel
pasid_set_bits(u64 * ptr,u64 mask,u64 bits)272672cf6dfSJoerg Roedel static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
273672cf6dfSJoerg Roedel {
274672cf6dfSJoerg Roedel u64 old;
275672cf6dfSJoerg Roedel
276672cf6dfSJoerg Roedel old = READ_ONCE(*ptr);
277672cf6dfSJoerg Roedel WRITE_ONCE(*ptr, (old & ~mask) | bits);
278672cf6dfSJoerg Roedel }
279672cf6dfSJoerg Roedel
280672cf6dfSJoerg Roedel /*
281672cf6dfSJoerg Roedel * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
282672cf6dfSJoerg Roedel * PASID entry.
283672cf6dfSJoerg Roedel */
284672cf6dfSJoerg Roedel static inline void
pasid_set_domain_id(struct pasid_entry * pe,u64 value)285672cf6dfSJoerg Roedel pasid_set_domain_id(struct pasid_entry *pe, u64 value)
286672cf6dfSJoerg Roedel {
287672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
288672cf6dfSJoerg Roedel }
289672cf6dfSJoerg Roedel
290672cf6dfSJoerg Roedel /*
291672cf6dfSJoerg Roedel * Get domain ID value of a scalable mode PASID entry.
292672cf6dfSJoerg Roedel */
293672cf6dfSJoerg Roedel static inline u16
pasid_get_domain_id(struct pasid_entry * pe)294672cf6dfSJoerg Roedel pasid_get_domain_id(struct pasid_entry *pe)
295672cf6dfSJoerg Roedel {
296672cf6dfSJoerg Roedel return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
297672cf6dfSJoerg Roedel }
298672cf6dfSJoerg Roedel
299672cf6dfSJoerg Roedel /*
300672cf6dfSJoerg Roedel * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
301672cf6dfSJoerg Roedel * of a scalable mode PASID entry.
302672cf6dfSJoerg Roedel */
303672cf6dfSJoerg Roedel static inline void
pasid_set_slptr(struct pasid_entry * pe,u64 value)304672cf6dfSJoerg Roedel pasid_set_slptr(struct pasid_entry *pe, u64 value)
305672cf6dfSJoerg Roedel {
306672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
307672cf6dfSJoerg Roedel }
308672cf6dfSJoerg Roedel
309672cf6dfSJoerg Roedel /*
310672cf6dfSJoerg Roedel * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
311672cf6dfSJoerg Roedel * entry.
312672cf6dfSJoerg Roedel */
313672cf6dfSJoerg Roedel static inline void
pasid_set_address_width(struct pasid_entry * pe,u64 value)314672cf6dfSJoerg Roedel pasid_set_address_width(struct pasid_entry *pe, u64 value)
315672cf6dfSJoerg Roedel {
316672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
317672cf6dfSJoerg Roedel }
318672cf6dfSJoerg Roedel
319672cf6dfSJoerg Roedel /*
320672cf6dfSJoerg Roedel * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
321672cf6dfSJoerg Roedel * of a scalable mode PASID entry.
322672cf6dfSJoerg Roedel */
323672cf6dfSJoerg Roedel static inline void
pasid_set_translation_type(struct pasid_entry * pe,u64 value)324672cf6dfSJoerg Roedel pasid_set_translation_type(struct pasid_entry *pe, u64 value)
325672cf6dfSJoerg Roedel {
326672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
327672cf6dfSJoerg Roedel }
328672cf6dfSJoerg Roedel
329672cf6dfSJoerg Roedel /*
330672cf6dfSJoerg Roedel * Enable fault processing by clearing the FPD(Fault Processing
331672cf6dfSJoerg Roedel * Disable) field (Bit 1) of a scalable mode PASID entry.
332672cf6dfSJoerg Roedel */
pasid_set_fault_enable(struct pasid_entry * pe)333672cf6dfSJoerg Roedel static inline void pasid_set_fault_enable(struct pasid_entry *pe)
334672cf6dfSJoerg Roedel {
335672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[0], 1 << 1, 0);
336672cf6dfSJoerg Roedel }
337672cf6dfSJoerg Roedel
338672cf6dfSJoerg Roedel /*
339f68c7f53SJacob Pan * Setup the WPE(Write Protect Enable) field (Bit 132) of a
340f68c7f53SJacob Pan * scalable mode PASID entry.
341f68c7f53SJacob Pan */
pasid_set_wpe(struct pasid_entry * pe)342f68c7f53SJacob Pan static inline void pasid_set_wpe(struct pasid_entry *pe)
343f68c7f53SJacob Pan {
344f68c7f53SJacob Pan pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
345f68c7f53SJacob Pan }
346f68c7f53SJacob Pan
347f68c7f53SJacob Pan /*
348672cf6dfSJoerg Roedel * Setup the P(Present) field (Bit 0) of a scalable mode PASID
349672cf6dfSJoerg Roedel * entry.
350672cf6dfSJoerg Roedel */
pasid_set_present(struct pasid_entry * pe)351672cf6dfSJoerg Roedel static inline void pasid_set_present(struct pasid_entry *pe)
352672cf6dfSJoerg Roedel {
353672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[0], 1 << 0, 1);
354672cf6dfSJoerg Roedel }
355672cf6dfSJoerg Roedel
356672cf6dfSJoerg Roedel /*
357672cf6dfSJoerg Roedel * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
358672cf6dfSJoerg Roedel * entry.
359672cf6dfSJoerg Roedel */
pasid_set_page_snoop(struct pasid_entry * pe,bool value)360672cf6dfSJoerg Roedel static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
361672cf6dfSJoerg Roedel {
362672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
363672cf6dfSJoerg Roedel }
364672cf6dfSJoerg Roedel
365672cf6dfSJoerg Roedel /*
366e06d2443SLu Baolu * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
367e06d2443SLu Baolu * entry. It is required when XD bit of the first level page table
368e06d2443SLu Baolu * entry is about to be set.
369e06d2443SLu Baolu */
pasid_set_nxe(struct pasid_entry * pe)370e06d2443SLu Baolu static inline void pasid_set_nxe(struct pasid_entry *pe)
371e06d2443SLu Baolu {
372e06d2443SLu Baolu pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
373e06d2443SLu Baolu }
374e06d2443SLu Baolu
375e06d2443SLu Baolu /*
3766c00612dSLu Baolu * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
3776c00612dSLu Baolu * PASID entry.
3786c00612dSLu Baolu */
3796c00612dSLu Baolu static inline void
pasid_set_pgsnp(struct pasid_entry * pe)3806c00612dSLu Baolu pasid_set_pgsnp(struct pasid_entry *pe)
3816c00612dSLu Baolu {
3826c00612dSLu Baolu pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
3836c00612dSLu Baolu }
3846c00612dSLu Baolu
3856c00612dSLu Baolu /*
386672cf6dfSJoerg Roedel * Setup the First Level Page table Pointer field (Bit 140~191)
387672cf6dfSJoerg Roedel * of a scalable mode PASID entry.
388672cf6dfSJoerg Roedel */
389672cf6dfSJoerg Roedel static inline void
pasid_set_flptr(struct pasid_entry * pe,u64 value)390672cf6dfSJoerg Roedel pasid_set_flptr(struct pasid_entry *pe, u64 value)
391672cf6dfSJoerg Roedel {
392672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
393672cf6dfSJoerg Roedel }
394672cf6dfSJoerg Roedel
395672cf6dfSJoerg Roedel /*
396672cf6dfSJoerg Roedel * Setup the First Level Paging Mode field (Bit 130~131) of a
397672cf6dfSJoerg Roedel * scalable mode PASID entry.
398672cf6dfSJoerg Roedel */
399672cf6dfSJoerg Roedel static inline void
pasid_set_flpm(struct pasid_entry * pe,u64 value)400672cf6dfSJoerg Roedel pasid_set_flpm(struct pasid_entry *pe, u64 value)
401672cf6dfSJoerg Roedel {
402672cf6dfSJoerg Roedel pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
403672cf6dfSJoerg Roedel }
404672cf6dfSJoerg Roedel
405672cf6dfSJoerg Roedel static void
pasid_cache_invalidation_with_pasid(struct intel_iommu * iommu,u16 did,u32 pasid)406672cf6dfSJoerg Roedel pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
407c7b6bac9SFenghua Yu u16 did, u32 pasid)
408672cf6dfSJoerg Roedel {
409672cf6dfSJoerg Roedel struct qi_desc desc;
410672cf6dfSJoerg Roedel
411672cf6dfSJoerg Roedel desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
412672cf6dfSJoerg Roedel QI_PC_PASID(pasid) | QI_PC_TYPE;
413672cf6dfSJoerg Roedel desc.qw1 = 0;
414672cf6dfSJoerg Roedel desc.qw2 = 0;
415672cf6dfSJoerg Roedel desc.qw3 = 0;
416672cf6dfSJoerg Roedel
417672cf6dfSJoerg Roedel qi_submit_sync(iommu, &desc, 1, 0);
418672cf6dfSJoerg Roedel }
419672cf6dfSJoerg Roedel
420672cf6dfSJoerg Roedel static void
devtlb_invalidation_with_pasid(struct intel_iommu * iommu,struct device * dev,u32 pasid)421672cf6dfSJoerg Roedel devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
422c7b6bac9SFenghua Yu struct device *dev, u32 pasid)
423672cf6dfSJoerg Roedel {
424672cf6dfSJoerg Roedel struct device_domain_info *info;
425672cf6dfSJoerg Roedel u16 sid, qdep, pfsid;
426672cf6dfSJoerg Roedel
427586081d3SLu Baolu info = dev_iommu_priv_get(dev);
428672cf6dfSJoerg Roedel if (!info || !info->ats_enabled)
429672cf6dfSJoerg Roedel return;
430672cf6dfSJoerg Roedel
431*2b74b2a9SEthan Zhao if (pci_dev_is_disconnected(to_pci_dev(dev)))
432*2b74b2a9SEthan Zhao return;
433*2b74b2a9SEthan Zhao
434672cf6dfSJoerg Roedel sid = info->bus << 8 | info->devfn;
435672cf6dfSJoerg Roedel qdep = info->ats_qdep;
436672cf6dfSJoerg Roedel pfsid = info->pfsid;
437672cf6dfSJoerg Roedel
438e7e69461SJacob Pan /*
439e7e69461SJacob Pan * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
440e7e69461SJacob Pan * devTLB flush w/o PASID should be used. For non-zero PASID under
441e7e69461SJacob Pan * SVA usage, device could do DMA with multiple PASIDs. It is more
442e7e69461SJacob Pan * efficient to flush devTLB specific to the PASID.
443e7e69461SJacob Pan */
44442987801SJacob Pan if (pasid == IOMMU_NO_PASID)
445672cf6dfSJoerg Roedel qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
446e7e69461SJacob Pan else
447e7e69461SJacob Pan qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
448672cf6dfSJoerg Roedel }
449672cf6dfSJoerg Roedel
intel_pasid_tear_down_entry(struct intel_iommu * iommu,struct device * dev,u32 pasid,bool fault_ignore)450672cf6dfSJoerg Roedel void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
451c7b6bac9SFenghua Yu u32 pasid, bool fault_ignore)
452672cf6dfSJoerg Roedel {
453672cf6dfSJoerg Roedel struct pasid_entry *pte;
4548798d364SLiu Yi L u16 did, pgtt;
455672cf6dfSJoerg Roedel
4568430fd3fSLu Baolu spin_lock(&iommu->lock);
457672cf6dfSJoerg Roedel pte = intel_pasid_get_entry(dev, pasid);
4588430fd3fSLu Baolu if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
4598430fd3fSLu Baolu spin_unlock(&iommu->lock);
460672cf6dfSJoerg Roedel return;
4618430fd3fSLu Baolu }
4628b74b6abSLu Baolu
463672cf6dfSJoerg Roedel did = pasid_get_domain_id(pte);
4648798d364SLiu Yi L pgtt = pasid_pte_get_pgtt(pte);
465672cf6dfSJoerg Roedel intel_pasid_clear_entry(dev, pasid, fault_ignore);
4668430fd3fSLu Baolu spin_unlock(&iommu->lock);
467672cf6dfSJoerg Roedel
468672cf6dfSJoerg Roedel if (!ecap_coherent(iommu->ecap))
469672cf6dfSJoerg Roedel clflush_cache_range(pte, sizeof(*pte));
470672cf6dfSJoerg Roedel
471672cf6dfSJoerg Roedel pasid_cache_invalidation_with_pasid(iommu, did, pasid);
4728798d364SLiu Yi L
4738798d364SLiu Yi L if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
4749872f9bdSLu Baolu qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
4758798d364SLiu Yi L else
4768798d364SLiu Yi L iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
477672cf6dfSJoerg Roedel
478672cf6dfSJoerg Roedel /* Device IOTLB doesn't need to be flushed in caching mode. */
479672cf6dfSJoerg Roedel if (!cap_caching_mode(iommu->cap))
480672cf6dfSJoerg Roedel devtlb_invalidation_with_pasid(iommu, dev, pasid);
481672cf6dfSJoerg Roedel }
482672cf6dfSJoerg Roedel
483423d39d8SLiu Yi L /*
484423d39d8SLiu Yi L * This function flushes cache for a newly setup pasid table entry.
485423d39d8SLiu Yi L * Caller of it should not modify the in-use pasid table entries.
486423d39d8SLiu Yi L */
pasid_flush_caches(struct intel_iommu * iommu,struct pasid_entry * pte,u32 pasid,u16 did)487672cf6dfSJoerg Roedel static void pasid_flush_caches(struct intel_iommu *iommu,
488672cf6dfSJoerg Roedel struct pasid_entry *pte,
489c7b6bac9SFenghua Yu u32 pasid, u16 did)
490672cf6dfSJoerg Roedel {
491672cf6dfSJoerg Roedel if (!ecap_coherent(iommu->ecap))
492672cf6dfSJoerg Roedel clflush_cache_range(pte, sizeof(*pte));
493672cf6dfSJoerg Roedel
494672cf6dfSJoerg Roedel if (cap_caching_mode(iommu->cap)) {
495672cf6dfSJoerg Roedel pasid_cache_invalidation_with_pasid(iommu, did, pasid);
4969872f9bdSLu Baolu qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
497672cf6dfSJoerg Roedel } else {
498672cf6dfSJoerg Roedel iommu_flush_write_buffer(iommu);
499672cf6dfSJoerg Roedel }
500672cf6dfSJoerg Roedel }
501672cf6dfSJoerg Roedel
502672cf6dfSJoerg Roedel /*
503672cf6dfSJoerg Roedel * Set up the scalable mode pasid table entry for first only
504672cf6dfSJoerg Roedel * translation type.
505672cf6dfSJoerg Roedel */
intel_pasid_setup_first_level(struct intel_iommu * iommu,struct device * dev,pgd_t * pgd,u32 pasid,u16 did,int flags)506672cf6dfSJoerg Roedel int intel_pasid_setup_first_level(struct intel_iommu *iommu,
507672cf6dfSJoerg Roedel struct device *dev, pgd_t *pgd,
508c7b6bac9SFenghua Yu u32 pasid, u16 did, int flags)
509672cf6dfSJoerg Roedel {
510672cf6dfSJoerg Roedel struct pasid_entry *pte;
511672cf6dfSJoerg Roedel
512672cf6dfSJoerg Roedel if (!ecap_flts(iommu->ecap)) {
513672cf6dfSJoerg Roedel pr_err("No first level translation support on %s\n",
514672cf6dfSJoerg Roedel iommu->name);
515672cf6dfSJoerg Roedel return -EINVAL;
516672cf6dfSJoerg Roedel }
517672cf6dfSJoerg Roedel
518b722cb32SYi Liu if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
5198430fd3fSLu Baolu pr_err("No 5-level paging support for first-level on %s\n",
5208430fd3fSLu Baolu iommu->name);
5218430fd3fSLu Baolu return -EINVAL;
5228430fd3fSLu Baolu }
5238430fd3fSLu Baolu
5248430fd3fSLu Baolu spin_lock(&iommu->lock);
5258430fd3fSLu Baolu pte = intel_pasid_get_entry(dev, pasid);
5268430fd3fSLu Baolu if (!pte) {
5278430fd3fSLu Baolu spin_unlock(&iommu->lock);
5288430fd3fSLu Baolu return -ENODEV;
5298430fd3fSLu Baolu }
5308430fd3fSLu Baolu
5318430fd3fSLu Baolu if (pasid_pte_is_present(pte)) {
5328430fd3fSLu Baolu spin_unlock(&iommu->lock);
533423d39d8SLiu Yi L return -EBUSY;
5348430fd3fSLu Baolu }
535423d39d8SLiu Yi L
536672cf6dfSJoerg Roedel pasid_clear_entry(pte);
537672cf6dfSJoerg Roedel
538672cf6dfSJoerg Roedel /* Setup the first level page table pointer: */
539672cf6dfSJoerg Roedel pasid_set_flptr(pte, (u64)__pa(pgd));
540672cf6dfSJoerg Roedel
5418430fd3fSLu Baolu if (flags & PASID_FLAG_FL5LP)
542672cf6dfSJoerg Roedel pasid_set_flpm(pte, 1);
543672cf6dfSJoerg Roedel
5446c00612dSLu Baolu if (flags & PASID_FLAG_PAGE_SNOOP)
5456c00612dSLu Baolu pasid_set_pgsnp(pte);
5466c00612dSLu Baolu
547672cf6dfSJoerg Roedel pasid_set_domain_id(pte, did);
548672cf6dfSJoerg Roedel pasid_set_address_width(pte, iommu->agaw);
549672cf6dfSJoerg Roedel pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
550e06d2443SLu Baolu pasid_set_nxe(pte);
551672cf6dfSJoerg Roedel
552672cf6dfSJoerg Roedel /* Setup Present and PASID Granular Transfer Type: */
553672cf6dfSJoerg Roedel pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
554672cf6dfSJoerg Roedel pasid_set_present(pte);
5558430fd3fSLu Baolu spin_unlock(&iommu->lock);
5568430fd3fSLu Baolu
557672cf6dfSJoerg Roedel pasid_flush_caches(iommu, pte, pasid, did);
558672cf6dfSJoerg Roedel
559672cf6dfSJoerg Roedel return 0;
560672cf6dfSJoerg Roedel }
561672cf6dfSJoerg Roedel
562672cf6dfSJoerg Roedel /*
563672cf6dfSJoerg Roedel * Skip top levels of page tables for iommu which has less agaw
564672cf6dfSJoerg Roedel * than default. Unnecessary for PT mode.
565672cf6dfSJoerg Roedel */
iommu_skip_agaw(struct dmar_domain * domain,struct intel_iommu * iommu,struct dma_pte ** pgd)566672cf6dfSJoerg Roedel static inline int iommu_skip_agaw(struct dmar_domain *domain,
567672cf6dfSJoerg Roedel struct intel_iommu *iommu,
568672cf6dfSJoerg Roedel struct dma_pte **pgd)
569672cf6dfSJoerg Roedel {
570672cf6dfSJoerg Roedel int agaw;
571672cf6dfSJoerg Roedel
572672cf6dfSJoerg Roedel for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
573672cf6dfSJoerg Roedel *pgd = phys_to_virt(dma_pte_addr(*pgd));
574672cf6dfSJoerg Roedel if (!dma_pte_present(*pgd))
575672cf6dfSJoerg Roedel return -EINVAL;
576672cf6dfSJoerg Roedel }
577672cf6dfSJoerg Roedel
578672cf6dfSJoerg Roedel return agaw;
579672cf6dfSJoerg Roedel }
580672cf6dfSJoerg Roedel
581672cf6dfSJoerg Roedel /*
582672cf6dfSJoerg Roedel * Set up the scalable mode pasid entry for second only translation type.
583672cf6dfSJoerg Roedel */
intel_pasid_setup_second_level(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)584672cf6dfSJoerg Roedel int intel_pasid_setup_second_level(struct intel_iommu *iommu,
585672cf6dfSJoerg Roedel struct dmar_domain *domain,
586c7b6bac9SFenghua Yu struct device *dev, u32 pasid)
587672cf6dfSJoerg Roedel {
588672cf6dfSJoerg Roedel struct pasid_entry *pte;
589672cf6dfSJoerg Roedel struct dma_pte *pgd;
590672cf6dfSJoerg Roedel u64 pgd_val;
591672cf6dfSJoerg Roedel int agaw;
592672cf6dfSJoerg Roedel u16 did;
593672cf6dfSJoerg Roedel
594672cf6dfSJoerg Roedel /*
595672cf6dfSJoerg Roedel * If hardware advertises no support for second level
596672cf6dfSJoerg Roedel * translation, return directly.
597672cf6dfSJoerg Roedel */
598672cf6dfSJoerg Roedel if (!ecap_slts(iommu->ecap)) {
599672cf6dfSJoerg Roedel pr_err("No second level translation support on %s\n",
600672cf6dfSJoerg Roedel iommu->name);
601672cf6dfSJoerg Roedel return -EINVAL;
602672cf6dfSJoerg Roedel }
603672cf6dfSJoerg Roedel
604672cf6dfSJoerg Roedel pgd = domain->pgd;
605672cf6dfSJoerg Roedel agaw = iommu_skip_agaw(domain, iommu, &pgd);
606672cf6dfSJoerg Roedel if (agaw < 0) {
607672cf6dfSJoerg Roedel dev_err(dev, "Invalid domain page table\n");
608672cf6dfSJoerg Roedel return -EINVAL;
609672cf6dfSJoerg Roedel }
610672cf6dfSJoerg Roedel
611672cf6dfSJoerg Roedel pgd_val = virt_to_phys(pgd);
612ba949f4cSLu Baolu did = domain_id_iommu(domain, iommu);
613672cf6dfSJoerg Roedel
6148430fd3fSLu Baolu spin_lock(&iommu->lock);
615672cf6dfSJoerg Roedel pte = intel_pasid_get_entry(dev, pasid);
616672cf6dfSJoerg Roedel if (!pte) {
6178430fd3fSLu Baolu spin_unlock(&iommu->lock);
618672cf6dfSJoerg Roedel return -ENODEV;
619672cf6dfSJoerg Roedel }
620672cf6dfSJoerg Roedel
6218430fd3fSLu Baolu if (pasid_pte_is_present(pte)) {
6228430fd3fSLu Baolu spin_unlock(&iommu->lock);
623423d39d8SLiu Yi L return -EBUSY;
6248430fd3fSLu Baolu }
625423d39d8SLiu Yi L
626672cf6dfSJoerg Roedel pasid_clear_entry(pte);
627672cf6dfSJoerg Roedel pasid_set_domain_id(pte, did);
628672cf6dfSJoerg Roedel pasid_set_slptr(pte, pgd_val);
629672cf6dfSJoerg Roedel pasid_set_address_width(pte, agaw);
630672cf6dfSJoerg Roedel pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
631672cf6dfSJoerg Roedel pasid_set_fault_enable(pte);
632672cf6dfSJoerg Roedel pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
633672cf6dfSJoerg Roedel
634672cf6dfSJoerg Roedel pasid_set_present(pte);
6358430fd3fSLu Baolu spin_unlock(&iommu->lock);
6368430fd3fSLu Baolu
637672cf6dfSJoerg Roedel pasid_flush_caches(iommu, pte, pasid, did);
638672cf6dfSJoerg Roedel
639672cf6dfSJoerg Roedel return 0;
640672cf6dfSJoerg Roedel }
641672cf6dfSJoerg Roedel
642672cf6dfSJoerg Roedel /*
643672cf6dfSJoerg Roedel * Set up the scalable mode pasid entry for passthrough translation type.
644672cf6dfSJoerg Roedel */
intel_pasid_setup_pass_through(struct intel_iommu * iommu,struct dmar_domain * domain,struct device * dev,u32 pasid)645672cf6dfSJoerg Roedel int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
646672cf6dfSJoerg Roedel struct dmar_domain *domain,
647c7b6bac9SFenghua Yu struct device *dev, u32 pasid)
648672cf6dfSJoerg Roedel {
649672cf6dfSJoerg Roedel u16 did = FLPT_DEFAULT_DID;
650672cf6dfSJoerg Roedel struct pasid_entry *pte;
651672cf6dfSJoerg Roedel
6528430fd3fSLu Baolu spin_lock(&iommu->lock);
653672cf6dfSJoerg Roedel pte = intel_pasid_get_entry(dev, pasid);
654672cf6dfSJoerg Roedel if (!pte) {
6558430fd3fSLu Baolu spin_unlock(&iommu->lock);
656672cf6dfSJoerg Roedel return -ENODEV;
657672cf6dfSJoerg Roedel }
658672cf6dfSJoerg Roedel
6598430fd3fSLu Baolu if (pasid_pte_is_present(pte)) {
6608430fd3fSLu Baolu spin_unlock(&iommu->lock);
661423d39d8SLiu Yi L return -EBUSY;
6628430fd3fSLu Baolu }
663423d39d8SLiu Yi L
664672cf6dfSJoerg Roedel pasid_clear_entry(pte);
665672cf6dfSJoerg Roedel pasid_set_domain_id(pte, did);
666672cf6dfSJoerg Roedel pasid_set_address_width(pte, iommu->agaw);
667672cf6dfSJoerg Roedel pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
668672cf6dfSJoerg Roedel pasid_set_fault_enable(pte);
669672cf6dfSJoerg Roedel pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
670672cf6dfSJoerg Roedel pasid_set_present(pte);
6718430fd3fSLu Baolu spin_unlock(&iommu->lock);
6728430fd3fSLu Baolu
673672cf6dfSJoerg Roedel pasid_flush_caches(iommu, pte, pasid, did);
674672cf6dfSJoerg Roedel
675672cf6dfSJoerg Roedel return 0;
676672cf6dfSJoerg Roedel }
677fc0051cbSLu Baolu
678fc0051cbSLu Baolu /*
679fc0051cbSLu Baolu * Set the page snoop control for a pasid entry which has been set up.
680fc0051cbSLu Baolu */
intel_pasid_setup_page_snoop_control(struct intel_iommu * iommu,struct device * dev,u32 pasid)681fc0051cbSLu Baolu void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
682fc0051cbSLu Baolu struct device *dev, u32 pasid)
683fc0051cbSLu Baolu {
684fc0051cbSLu Baolu struct pasid_entry *pte;
685fc0051cbSLu Baolu u16 did;
686fc0051cbSLu Baolu
687fc0051cbSLu Baolu spin_lock(&iommu->lock);
688fc0051cbSLu Baolu pte = intel_pasid_get_entry(dev, pasid);
689fc0051cbSLu Baolu if (WARN_ON(!pte || !pasid_pte_is_present(pte))) {
690fc0051cbSLu Baolu spin_unlock(&iommu->lock);
691fc0051cbSLu Baolu return;
692fc0051cbSLu Baolu }
693fc0051cbSLu Baolu
694fc0051cbSLu Baolu pasid_set_pgsnp(pte);
695fc0051cbSLu Baolu did = pasid_get_domain_id(pte);
696fc0051cbSLu Baolu spin_unlock(&iommu->lock);
697fc0051cbSLu Baolu
698fc0051cbSLu Baolu if (!ecap_coherent(iommu->ecap))
699fc0051cbSLu Baolu clflush_cache_range(pte, sizeof(*pte));
700fc0051cbSLu Baolu
701fc0051cbSLu Baolu /*
702fc0051cbSLu Baolu * VT-d spec 3.4 table23 states guides for cache invalidation:
703fc0051cbSLu Baolu *
704fc0051cbSLu Baolu * - PASID-selective-within-Domain PASID-cache invalidation
705fc0051cbSLu Baolu * - PASID-selective PASID-based IOTLB invalidation
706fc0051cbSLu Baolu * - If (pasid is RID_PASID)
707fc0051cbSLu Baolu * - Global Device-TLB invalidation to affected functions
708fc0051cbSLu Baolu * Else
709fc0051cbSLu Baolu * - PASID-based Device-TLB invalidation (with S=1 and
710fc0051cbSLu Baolu * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
711fc0051cbSLu Baolu */
712fc0051cbSLu Baolu pasid_cache_invalidation_with_pasid(iommu, did, pasid);
713fc0051cbSLu Baolu qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
714fc0051cbSLu Baolu
715fc0051cbSLu Baolu /* Device IOTLB doesn't need to be flushed in caching mode. */
716fc0051cbSLu Baolu if (!cap_caching_mode(iommu->cap))
717fc0051cbSLu Baolu devtlb_invalidation_with_pasid(iommu, dev, pasid);
718fc0051cbSLu Baolu }
719