xref: /openbmc/linux/arch/arm64/mm/hugetlbpage.c (revision 4c5a116a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/mm/hugetlbpage.c
4  *
5  * Copyright (C) 2013 Linaro Ltd.
6  *
7  * Based on arch/x86/mm/hugetlbpage.c.
8  */
9 
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/pgalloc.h>
21 
22 /*
23  * HugeTLB Support Matrix
24  *
25  * ---------------------------------------------------
26  * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
27  * ---------------------------------------------------
28  * |     4K    |   64K    |   2M  |    32M   |   1G  |
29  * |    16K    |    2M    |  32M  |     1G   |       |
30  * |    64K    |    2M    | 512M  |    16G   |       |
31  * ---------------------------------------------------
32  */
33 
34 /*
35  * Reserve CMA areas for the largest supported gigantic
36  * huge page when requested. Any other smaller gigantic
37  * huge pages could still be served from those areas.
38  */
39 #ifdef CONFIG_CMA
40 void __init arm64_hugetlb_cma_reserve(void)
41 {
42 	int order;
43 
44 #ifdef CONFIG_ARM64_4K_PAGES
45 	order = PUD_SHIFT - PAGE_SHIFT;
46 #else
47 	order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
48 #endif
49 	/*
50 	 * HugeTLB CMA reservation is required for gigantic
51 	 * huge pages which could not be allocated via the
52 	 * page allocator. Just warn if there is any change
53 	 * breaking this assumption.
54 	 */
55 	WARN_ON(order <= MAX_ORDER);
56 	hugetlb_cma_reserve(order);
57 }
58 #endif /* CONFIG_CMA */
59 
60 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
61 bool arch_hugetlb_migration_supported(struct hstate *h)
62 {
63 	size_t pagesize = huge_page_size(h);
64 
65 	switch (pagesize) {
66 #ifdef CONFIG_ARM64_4K_PAGES
67 	case PUD_SIZE:
68 #endif
69 	case PMD_SIZE:
70 	case CONT_PMD_SIZE:
71 	case CONT_PTE_SIZE:
72 		return true;
73 	}
74 	pr_warn("%s: unrecognized huge page size 0x%lx\n",
75 			__func__, pagesize);
76 	return false;
77 }
78 #endif
79 
80 int pmd_huge(pmd_t pmd)
81 {
82 	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
83 }
84 
85 int pud_huge(pud_t pud)
86 {
87 #ifndef __PAGETABLE_PMD_FOLDED
88 	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
89 #else
90 	return 0;
91 #endif
92 }
93 
94 /*
95  * Select all bits except the pfn
96  */
97 static inline pgprot_t pte_pgprot(pte_t pte)
98 {
99 	unsigned long pfn = pte_pfn(pte);
100 
101 	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
102 }
103 
104 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
105 			   pte_t *ptep, size_t *pgsize)
106 {
107 	pgd_t *pgdp = pgd_offset(mm, addr);
108 	p4d_t *p4dp;
109 	pud_t *pudp;
110 	pmd_t *pmdp;
111 
112 	*pgsize = PAGE_SIZE;
113 	p4dp = p4d_offset(pgdp, addr);
114 	pudp = pud_offset(p4dp, addr);
115 	pmdp = pmd_offset(pudp, addr);
116 	if ((pte_t *)pmdp == ptep) {
117 		*pgsize = PMD_SIZE;
118 		return CONT_PMDS;
119 	}
120 	return CONT_PTES;
121 }
122 
123 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
124 {
125 	int contig_ptes = 0;
126 
127 	*pgsize = size;
128 
129 	switch (size) {
130 #ifdef CONFIG_ARM64_4K_PAGES
131 	case PUD_SIZE:
132 #endif
133 	case PMD_SIZE:
134 		contig_ptes = 1;
135 		break;
136 	case CONT_PMD_SIZE:
137 		*pgsize = PMD_SIZE;
138 		contig_ptes = CONT_PMDS;
139 		break;
140 	case CONT_PTE_SIZE:
141 		*pgsize = PAGE_SIZE;
142 		contig_ptes = CONT_PTES;
143 		break;
144 	}
145 
146 	return contig_ptes;
147 }
148 
149 /*
150  * Changing some bits of contiguous entries requires us to follow a
151  * Break-Before-Make approach, breaking the whole contiguous set
152  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
153  * "Misprogramming of the Contiguous bit", page D4-1762.
154  *
155  * This helper performs the break step.
156  */
157 static pte_t get_clear_flush(struct mm_struct *mm,
158 			     unsigned long addr,
159 			     pte_t *ptep,
160 			     unsigned long pgsize,
161 			     unsigned long ncontig)
162 {
163 	pte_t orig_pte = huge_ptep_get(ptep);
164 	bool valid = pte_valid(orig_pte);
165 	unsigned long i, saddr = addr;
166 
167 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
168 		pte_t pte = ptep_get_and_clear(mm, addr, ptep);
169 
170 		/*
171 		 * If HW_AFDBM is enabled, then the HW could turn on
172 		 * the dirty or accessed bit for any page in the set,
173 		 * so check them all.
174 		 */
175 		if (pte_dirty(pte))
176 			orig_pte = pte_mkdirty(orig_pte);
177 
178 		if (pte_young(pte))
179 			orig_pte = pte_mkyoung(orig_pte);
180 	}
181 
182 	if (valid) {
183 		struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
184 		flush_tlb_range(&vma, saddr, addr);
185 	}
186 	return orig_pte;
187 }
188 
189 /*
190  * Changing some bits of contiguous entries requires us to follow a
191  * Break-Before-Make approach, breaking the whole contiguous set
192  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
193  * "Misprogramming of the Contiguous bit", page D4-1762.
194  *
195  * This helper performs the break step for use cases where the
196  * original pte is not needed.
197  */
198 static void clear_flush(struct mm_struct *mm,
199 			     unsigned long addr,
200 			     pte_t *ptep,
201 			     unsigned long pgsize,
202 			     unsigned long ncontig)
203 {
204 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
205 	unsigned long i, saddr = addr;
206 
207 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
208 		pte_clear(mm, addr, ptep);
209 
210 	flush_tlb_range(&vma, saddr, addr);
211 }
212 
213 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
214 			    pte_t *ptep, pte_t pte)
215 {
216 	size_t pgsize;
217 	int i;
218 	int ncontig;
219 	unsigned long pfn, dpfn;
220 	pgprot_t hugeprot;
221 
222 	/*
223 	 * Code needs to be expanded to handle huge swap and migration
224 	 * entries. Needed for HUGETLB and MEMORY_FAILURE.
225 	 */
226 	WARN_ON(!pte_present(pte));
227 
228 	if (!pte_cont(pte)) {
229 		set_pte_at(mm, addr, ptep, pte);
230 		return;
231 	}
232 
233 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
234 	pfn = pte_pfn(pte);
235 	dpfn = pgsize >> PAGE_SHIFT;
236 	hugeprot = pte_pgprot(pte);
237 
238 	clear_flush(mm, addr, ptep, pgsize, ncontig);
239 
240 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
241 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
242 }
243 
244 void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
245 			  pte_t *ptep, pte_t pte, unsigned long sz)
246 {
247 	int i, ncontig;
248 	size_t pgsize;
249 
250 	ncontig = num_contig_ptes(sz, &pgsize);
251 
252 	for (i = 0; i < ncontig; i++, ptep++)
253 		set_pte(ptep, pte);
254 }
255 
256 pte_t *huge_pte_alloc(struct mm_struct *mm,
257 		      unsigned long addr, unsigned long sz)
258 {
259 	pgd_t *pgdp;
260 	p4d_t *p4dp;
261 	pud_t *pudp;
262 	pmd_t *pmdp;
263 	pte_t *ptep = NULL;
264 
265 	pgdp = pgd_offset(mm, addr);
266 	p4dp = p4d_offset(pgdp, addr);
267 	pudp = pud_alloc(mm, p4dp, addr);
268 	if (!pudp)
269 		return NULL;
270 
271 	if (sz == PUD_SIZE) {
272 		ptep = (pte_t *)pudp;
273 	} else if (sz == (CONT_PTE_SIZE)) {
274 		pmdp = pmd_alloc(mm, pudp, addr);
275 		if (!pmdp)
276 			return NULL;
277 
278 		WARN_ON(addr & (sz - 1));
279 		/*
280 		 * Note that if this code were ever ported to the
281 		 * 32-bit arm platform then it will cause trouble in
282 		 * the case where CONFIG_HIGHPTE is set, since there
283 		 * will be no pte_unmap() to correspond with this
284 		 * pte_alloc_map().
285 		 */
286 		ptep = pte_alloc_map(mm, pmdp, addr);
287 	} else if (sz == PMD_SIZE) {
288 		if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
289 		    pud_none(READ_ONCE(*pudp)))
290 			ptep = huge_pmd_share(mm, addr, pudp);
291 		else
292 			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
293 	} else if (sz == (CONT_PMD_SIZE)) {
294 		pmdp = pmd_alloc(mm, pudp, addr);
295 		WARN_ON(addr & (sz - 1));
296 		return (pte_t *)pmdp;
297 	}
298 
299 	return ptep;
300 }
301 
302 pte_t *huge_pte_offset(struct mm_struct *mm,
303 		       unsigned long addr, unsigned long sz)
304 {
305 	pgd_t *pgdp;
306 	p4d_t *p4dp;
307 	pud_t *pudp, pud;
308 	pmd_t *pmdp, pmd;
309 
310 	pgdp = pgd_offset(mm, addr);
311 	if (!pgd_present(READ_ONCE(*pgdp)))
312 		return NULL;
313 
314 	p4dp = p4d_offset(pgdp, addr);
315 	if (!p4d_present(READ_ONCE(*p4dp)))
316 		return NULL;
317 
318 	pudp = pud_offset(p4dp, addr);
319 	pud = READ_ONCE(*pudp);
320 	if (sz != PUD_SIZE && pud_none(pud))
321 		return NULL;
322 	/* hugepage or swap? */
323 	if (pud_huge(pud) || !pud_present(pud))
324 		return (pte_t *)pudp;
325 	/* table; check the next level */
326 
327 	if (sz == CONT_PMD_SIZE)
328 		addr &= CONT_PMD_MASK;
329 
330 	pmdp = pmd_offset(pudp, addr);
331 	pmd = READ_ONCE(*pmdp);
332 	if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
333 	    pmd_none(pmd))
334 		return NULL;
335 	if (pmd_huge(pmd) || !pmd_present(pmd))
336 		return (pte_t *)pmdp;
337 
338 	if (sz == CONT_PTE_SIZE)
339 		return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
340 
341 	return NULL;
342 }
343 
344 pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
345 			 struct page *page, int writable)
346 {
347 	size_t pagesize = huge_page_size(hstate_vma(vma));
348 
349 	if (pagesize == CONT_PTE_SIZE) {
350 		entry = pte_mkcont(entry);
351 	} else if (pagesize == CONT_PMD_SIZE) {
352 		entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
353 	} else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
354 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
355 			__func__, pagesize);
356 	}
357 	return entry;
358 }
359 
360 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
361 		    pte_t *ptep, unsigned long sz)
362 {
363 	int i, ncontig;
364 	size_t pgsize;
365 
366 	ncontig = num_contig_ptes(sz, &pgsize);
367 
368 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
369 		pte_clear(mm, addr, ptep);
370 }
371 
372 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
373 			      unsigned long addr, pte_t *ptep)
374 {
375 	int ncontig;
376 	size_t pgsize;
377 	pte_t orig_pte = huge_ptep_get(ptep);
378 
379 	if (!pte_cont(orig_pte))
380 		return ptep_get_and_clear(mm, addr, ptep);
381 
382 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
383 
384 	return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
385 }
386 
387 /*
388  * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
389  * and write permission.
390  *
391  * For a contiguous huge pte range we need to check whether or not write
392  * permission has to change only on the first pte in the set. Then for
393  * all the contiguous ptes we need to check whether or not there is a
394  * discrepancy between dirty or young.
395  */
396 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
397 {
398 	int i;
399 
400 	if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
401 		return 1;
402 
403 	for (i = 0; i < ncontig; i++) {
404 		pte_t orig_pte = huge_ptep_get(ptep + i);
405 
406 		if (pte_dirty(pte) != pte_dirty(orig_pte))
407 			return 1;
408 
409 		if (pte_young(pte) != pte_young(orig_pte))
410 			return 1;
411 	}
412 
413 	return 0;
414 }
415 
416 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
417 			       unsigned long addr, pte_t *ptep,
418 			       pte_t pte, int dirty)
419 {
420 	int ncontig, i;
421 	size_t pgsize = 0;
422 	unsigned long pfn = pte_pfn(pte), dpfn;
423 	pgprot_t hugeprot;
424 	pte_t orig_pte;
425 
426 	if (!pte_cont(pte))
427 		return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
428 
429 	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
430 	dpfn = pgsize >> PAGE_SHIFT;
431 
432 	if (!__cont_access_flags_changed(ptep, pte, ncontig))
433 		return 0;
434 
435 	orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
436 
437 	/* Make sure we don't lose the dirty or young state */
438 	if (pte_dirty(orig_pte))
439 		pte = pte_mkdirty(pte);
440 
441 	if (pte_young(orig_pte))
442 		pte = pte_mkyoung(pte);
443 
444 	hugeprot = pte_pgprot(pte);
445 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
446 		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
447 
448 	return 1;
449 }
450 
451 void huge_ptep_set_wrprotect(struct mm_struct *mm,
452 			     unsigned long addr, pte_t *ptep)
453 {
454 	unsigned long pfn, dpfn;
455 	pgprot_t hugeprot;
456 	int ncontig, i;
457 	size_t pgsize;
458 	pte_t pte;
459 
460 	if (!pte_cont(READ_ONCE(*ptep))) {
461 		ptep_set_wrprotect(mm, addr, ptep);
462 		return;
463 	}
464 
465 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
466 	dpfn = pgsize >> PAGE_SHIFT;
467 
468 	pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
469 	pte = pte_wrprotect(pte);
470 
471 	hugeprot = pte_pgprot(pte);
472 	pfn = pte_pfn(pte);
473 
474 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
475 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
476 }
477 
478 void huge_ptep_clear_flush(struct vm_area_struct *vma,
479 			   unsigned long addr, pte_t *ptep)
480 {
481 	size_t pgsize;
482 	int ncontig;
483 
484 	if (!pte_cont(READ_ONCE(*ptep))) {
485 		ptep_clear_flush(vma, addr, ptep);
486 		return;
487 	}
488 
489 	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
490 	clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
491 }
492 
493 static int __init hugetlbpage_init(void)
494 {
495 #ifdef CONFIG_ARM64_4K_PAGES
496 	hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
497 #endif
498 	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
499 	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
500 	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
501 
502 	return 0;
503 }
504 arch_initcall(hugetlbpage_init);
505 
506 bool __init arch_hugetlb_valid_size(unsigned long size)
507 {
508 	switch (size) {
509 #ifdef CONFIG_ARM64_4K_PAGES
510 	case PUD_SIZE:
511 #endif
512 	case CONT_PMD_SIZE:
513 	case PMD_SIZE:
514 	case CONT_PTE_SIZE:
515 		return true;
516 	}
517 
518 	return false;
519 }
520