1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU-agnostic AMD IO page table v2 allocator.
4 *
5 * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
6 * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
7 * Author: Vasant Hegde <vasant.hegde@amd.com>
8 */
9
10 #define pr_fmt(fmt) "AMD-Vi: " fmt
11 #define dev_fmt(fmt) pr_fmt(fmt)
12
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16
17 #include <asm/barrier.h>
18
19 #include "amd_iommu_types.h"
20 #include "amd_iommu.h"
21
22 #define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
23 #define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
24 #define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
25 #define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
26 #define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
27 #define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
28 #define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
29 #define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
30 #define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
31
32 #define MAX_PTRS_PER_PAGE 512
33
34 #define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
35 #define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
36
37
get_pgtable_level(void)38 static inline int get_pgtable_level(void)
39 {
40 return amd_iommu_gpt_level;
41 }
42
is_large_pte(u64 pte)43 static inline bool is_large_pte(u64 pte)
44 {
45 return (pte & IOMMU_PAGE_PSE);
46 }
47
set_pgtable_attr(u64 * page)48 static inline u64 set_pgtable_attr(u64 *page)
49 {
50 u64 prot;
51
52 prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
53 prot |= IOMMU_PAGE_ACCESS;
54
55 return (iommu_virt_to_phys(page) | prot);
56 }
57
get_pgtable_pte(u64 pte)58 static inline void *get_pgtable_pte(u64 pte)
59 {
60 return iommu_phys_to_virt(pte & PM_ADDR_MASK);
61 }
62
set_pte_attr(u64 paddr,u64 pg_size,int prot)63 static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
64 {
65 u64 pte;
66
67 pte = __sme_set(paddr & PM_ADDR_MASK);
68 pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
69 pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
70
71 if (prot & IOMMU_PROT_IW)
72 pte |= IOMMU_PAGE_RW;
73
74 /* Large page */
75 if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
76 pte |= IOMMU_PAGE_PSE;
77
78 return pte;
79 }
80
get_alloc_page_size(u64 size)81 static inline u64 get_alloc_page_size(u64 size)
82 {
83 if (size >= IOMMU_PAGE_SIZE_1G)
84 return IOMMU_PAGE_SIZE_1G;
85
86 if (size >= IOMMU_PAGE_SIZE_2M)
87 return IOMMU_PAGE_SIZE_2M;
88
89 return PAGE_SIZE;
90 }
91
page_size_to_level(u64 pg_size)92 static inline int page_size_to_level(u64 pg_size)
93 {
94 if (pg_size == IOMMU_PAGE_SIZE_1G)
95 return PAGE_MODE_3_LEVEL;
96 if (pg_size == IOMMU_PAGE_SIZE_2M)
97 return PAGE_MODE_2_LEVEL;
98
99 return PAGE_MODE_1_LEVEL;
100 }
101
free_pgtable_page(u64 * pt)102 static inline void free_pgtable_page(u64 *pt)
103 {
104 free_page((unsigned long)pt);
105 }
106
free_pgtable(u64 * pt,int level)107 static void free_pgtable(u64 *pt, int level)
108 {
109 u64 *p;
110 int i;
111
112 for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
113 /* PTE present? */
114 if (!IOMMU_PTE_PRESENT(pt[i]))
115 continue;
116
117 if (is_large_pte(pt[i]))
118 continue;
119
120 /*
121 * Free the next level. No need to look at l1 tables here since
122 * they can only contain leaf PTEs; just free them directly.
123 */
124 p = get_pgtable_pte(pt[i]);
125 if (level > 2)
126 free_pgtable(p, level - 1);
127 else
128 free_pgtable_page(p);
129 }
130
131 free_pgtable_page(pt);
132 }
133
134 /* Allocate page table */
v2_alloc_pte(int nid,u64 * pgd,unsigned long iova,unsigned long pg_size,gfp_t gfp,bool * updated)135 static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
136 unsigned long pg_size, gfp_t gfp, bool *updated)
137 {
138 u64 *pte, *page;
139 int level, end_level;
140
141 level = get_pgtable_level() - 1;
142 end_level = page_size_to_level(pg_size);
143 pte = &pgd[PM_LEVEL_INDEX(level, iova)];
144 iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
145
146 while (level >= end_level) {
147 u64 __pte, __npte;
148
149 __pte = *pte;
150
151 if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
152 /* Unmap large pte */
153 cmpxchg64(pte, *pte, 0ULL);
154 *updated = true;
155 continue;
156 }
157
158 if (!IOMMU_PTE_PRESENT(__pte)) {
159 page = alloc_pgtable_page(nid, gfp);
160 if (!page)
161 return NULL;
162
163 __npte = set_pgtable_attr(page);
164 /* pte could have been changed somewhere. */
165 if (cmpxchg64(pte, __pte, __npte) != __pte)
166 free_pgtable_page(page);
167 else if (IOMMU_PTE_PRESENT(__pte))
168 *updated = true;
169
170 continue;
171 }
172
173 level -= 1;
174 pte = get_pgtable_pte(__pte);
175 pte = &pte[PM_LEVEL_INDEX(level, iova)];
176 }
177
178 /* Tear down existing pte entries */
179 if (IOMMU_PTE_PRESENT(*pte)) {
180 u64 *__pte;
181
182 *updated = true;
183 __pte = get_pgtable_pte(*pte);
184 cmpxchg64(pte, *pte, 0ULL);
185 if (pg_size == IOMMU_PAGE_SIZE_1G)
186 free_pgtable(__pte, end_level - 1);
187 else if (pg_size == IOMMU_PAGE_SIZE_2M)
188 free_pgtable_page(__pte);
189 }
190
191 return pte;
192 }
193
194 /*
195 * This function checks if there is a PTE for a given dma address.
196 * If there is one, it returns the pointer to it.
197 */
fetch_pte(struct amd_io_pgtable * pgtable,unsigned long iova,unsigned long * page_size)198 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
199 unsigned long iova, unsigned long *page_size)
200 {
201 u64 *pte;
202 int level;
203
204 level = get_pgtable_level() - 1;
205 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
206 /* Default page size is 4K */
207 *page_size = PAGE_SIZE;
208
209 while (level) {
210 /* Not present */
211 if (!IOMMU_PTE_PRESENT(*pte))
212 return NULL;
213
214 /* Walk to the next level */
215 pte = get_pgtable_pte(*pte);
216 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
217
218 /* Large page */
219 if (is_large_pte(*pte)) {
220 if (level == PAGE_MODE_3_LEVEL)
221 *page_size = IOMMU_PAGE_SIZE_1G;
222 else if (level == PAGE_MODE_2_LEVEL)
223 *page_size = IOMMU_PAGE_SIZE_2M;
224 else
225 return NULL; /* Wrongly set PSE bit in PTE */
226
227 break;
228 }
229
230 level -= 1;
231 }
232
233 return pte;
234 }
235
iommu_v2_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)236 static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
237 phys_addr_t paddr, size_t pgsize, size_t pgcount,
238 int prot, gfp_t gfp, size_t *mapped)
239 {
240 struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
241 struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
242 u64 *pte;
243 unsigned long map_size;
244 unsigned long mapped_size = 0;
245 unsigned long o_iova = iova;
246 size_t size = pgcount << __ffs(pgsize);
247 int count = 0;
248 int ret = 0;
249 bool updated = false;
250
251 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
252 return -EINVAL;
253
254 if (!(prot & IOMMU_PROT_MASK))
255 return -EINVAL;
256
257 while (mapped_size < size) {
258 map_size = get_alloc_page_size(pgsize);
259 pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd,
260 iova, map_size, gfp, &updated);
261 if (!pte) {
262 ret = -EINVAL;
263 goto out;
264 }
265
266 *pte = set_pte_attr(paddr, map_size, prot);
267
268 count++;
269 iova += map_size;
270 paddr += map_size;
271 mapped_size += map_size;
272 }
273
274 out:
275 if (updated) {
276 if (count > 1)
277 amd_iommu_flush_tlb(&pdom->domain, 0);
278 else
279 amd_iommu_flush_page(&pdom->domain, 0, o_iova);
280 }
281
282 if (mapped)
283 *mapped += mapped_size;
284
285 return ret;
286 }
287
iommu_v2_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)288 static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
289 unsigned long iova,
290 size_t pgsize, size_t pgcount,
291 struct iommu_iotlb_gather *gather)
292 {
293 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
294 struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
295 unsigned long unmap_size;
296 unsigned long unmapped = 0;
297 size_t size = pgcount << __ffs(pgsize);
298 u64 *pte;
299
300 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
301 return 0;
302
303 while (unmapped < size) {
304 pte = fetch_pte(pgtable, iova, &unmap_size);
305 if (!pte)
306 return unmapped;
307
308 *pte = 0ULL;
309
310 iova = (iova & ~(unmap_size - 1)) + unmap_size;
311 unmapped += unmap_size;
312 }
313
314 return unmapped;
315 }
316
iommu_v2_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)317 static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
318 {
319 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
320 unsigned long offset_mask, pte_pgsize;
321 u64 *pte, __pte;
322
323 pte = fetch_pte(pgtable, iova, &pte_pgsize);
324 if (!pte || !IOMMU_PTE_PRESENT(*pte))
325 return 0;
326
327 offset_mask = pte_pgsize - 1;
328 __pte = __sme_clr(*pte & PM_ADDR_MASK);
329
330 return (__pte & ~offset_mask) | (iova & offset_mask);
331 }
332
333 /*
334 * ----------------------------------------------------
335 */
v2_tlb_flush_all(void * cookie)336 static void v2_tlb_flush_all(void *cookie)
337 {
338 }
339
v2_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)340 static void v2_tlb_flush_walk(unsigned long iova, size_t size,
341 size_t granule, void *cookie)
342 {
343 }
344
v2_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)345 static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
346 unsigned long iova, size_t granule,
347 void *cookie)
348 {
349 }
350
351 static const struct iommu_flush_ops v2_flush_ops = {
352 .tlb_flush_all = v2_tlb_flush_all,
353 .tlb_flush_walk = v2_tlb_flush_walk,
354 .tlb_add_page = v2_tlb_add_page,
355 };
356
v2_free_pgtable(struct io_pgtable * iop)357 static void v2_free_pgtable(struct io_pgtable *iop)
358 {
359 struct protection_domain *pdom;
360 struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
361
362 pdom = container_of(pgtable, struct protection_domain, iop);
363 if (!(pdom->flags & PD_IOMMUV2_MASK))
364 return;
365
366 /*
367 * Make changes visible to IOMMUs. No need to clear gcr3 entry
368 * as gcr3 table is already freed.
369 */
370 amd_iommu_domain_update(pdom);
371
372 /* Free page table */
373 free_pgtable(pgtable->pgd, get_pgtable_level());
374 }
375
v2_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)376 static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
377 {
378 struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
379 struct protection_domain *pdom = (struct protection_domain *)cookie;
380 int ret;
381 int ias = IOMMU_IN_ADDR_BIT_SIZE;
382
383 pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
384 if (!pgtable->pgd)
385 return NULL;
386
387 ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
388 if (ret)
389 goto err_free_pgd;
390
391 if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
392 ias = 57;
393
394 pgtable->iop.ops.map_pages = iommu_v2_map_pages;
395 pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
396 pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
397
398 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
399 cfg->ias = ias,
400 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
401 cfg->tlb = &v2_flush_ops;
402
403 return &pgtable->iop;
404
405 err_free_pgd:
406 free_pgtable_page(pgtable->pgd);
407
408 return NULL;
409 }
410
411 struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
412 .alloc = v2_alloc_pgtable,
413 .free = v2_free_pgtable,
414 };
415