xref: /openbmc/linux/arch/riscv/mm/pgtable.c (revision 6de298ff)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <asm/pgalloc.h>
4 #include <linux/gfp.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
7 
8 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
9 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
10 {
11 	return 0;
12 }
13 
14 void p4d_clear_huge(p4d_t *p4d)
15 {
16 }
17 
18 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
19 {
20 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
21 
22 	set_pud(pud, new_pud);
23 	return 1;
24 }
25 
26 int pud_clear_huge(pud_t *pud)
27 {
28 	if (!pud_leaf(READ_ONCE(*pud)))
29 		return 0;
30 	pud_clear(pud);
31 	return 1;
32 }
33 
34 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
35 {
36 	pmd_t *pmd = pud_pgtable(*pud);
37 	int i;
38 
39 	pud_clear(pud);
40 
41 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
42 
43 	for (i = 0; i < PTRS_PER_PMD; i++) {
44 		if (!pmd_none(pmd[i])) {
45 			pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
46 
47 			pte_free_kernel(NULL, pte);
48 		}
49 	}
50 
51 	pmd_free(NULL, pmd);
52 
53 	return 1;
54 }
55 
56 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
57 {
58 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
59 
60 	set_pmd(pmd, new_pmd);
61 	return 1;
62 }
63 
64 int pmd_clear_huge(pmd_t *pmd)
65 {
66 	if (!pmd_leaf(READ_ONCE(*pmd)))
67 		return 0;
68 	pmd_clear(pmd);
69 	return 1;
70 }
71 
72 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
73 {
74 	pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
75 
76 	pmd_clear(pmd);
77 
78 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
79 	pte_free_kernel(NULL, pte);
80 	return 1;
81 }
82 
83 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
84 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
85 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
86 					unsigned long address, pmd_t *pmdp)
87 {
88 	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
89 
90 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
91 	VM_BUG_ON(pmd_trans_huge(*pmdp));
92 	/*
93 	 * When leaf PTE entries (regular pages) are collapsed into a leaf
94 	 * PMD entry (huge page), a valid non-leaf PTE is converted into a
95 	 * valid leaf PTE at the level 1 page table.  Since the sfence.vma
96 	 * forms that specify an address only apply to leaf PTEs, we need a
97 	 * global flush here.  collapse_huge_page() assumes these flushes are
98 	 * eager, so just do the fence here.
99 	 */
100 	flush_tlb_mm(vma->vm_mm);
101 	return pmd;
102 }
103 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
104