xref: /openbmc/linux/arch/riscv/mm/pgtable.c (revision e0316069)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <asm/pgalloc.h>
4 #include <linux/gfp.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
7 
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)8 int ptep_set_access_flags(struct vm_area_struct *vma,
9 			  unsigned long address, pte_t *ptep,
10 			  pte_t entry, int dirty)
11 {
12 	if (!pte_same(ptep_get(ptep), entry))
13 		__set_pte_at(ptep, entry);
14 	/*
15 	 * update_mmu_cache will unconditionally execute, handling both
16 	 * the case that the PTE changed and the spurious fault case.
17 	 */
18 	return true;
19 }
20 
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)21 int ptep_test_and_clear_young(struct vm_area_struct *vma,
22 			      unsigned long address,
23 			      pte_t *ptep)
24 {
25 	if (!pte_young(ptep_get(ptep)))
26 		return 0;
27 	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
28 }
29 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
30 
31 #ifdef CONFIG_64BIT
pud_offset(p4d_t * p4d,unsigned long address)32 pud_t *pud_offset(p4d_t *p4d, unsigned long address)
33 {
34 	if (pgtable_l4_enabled)
35 		return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
36 
37 	return (pud_t *)p4d;
38 }
39 
p4d_offset(pgd_t * pgd,unsigned long address)40 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
41 {
42 	if (pgtable_l5_enabled)
43 		return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
44 
45 	return (p4d_t *)pgd;
46 }
47 #endif
48 
49 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
p4d_set_huge(p4d_t * p4d,phys_addr_t addr,pgprot_t prot)50 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
51 {
52 	return 0;
53 }
54 
p4d_clear_huge(p4d_t * p4d)55 void p4d_clear_huge(p4d_t *p4d)
56 {
57 }
58 
pud_set_huge(pud_t * pud,phys_addr_t phys,pgprot_t prot)59 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
60 {
61 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
62 
63 	set_pud(pud, new_pud);
64 	return 1;
65 }
66 
pud_clear_huge(pud_t * pud)67 int pud_clear_huge(pud_t *pud)
68 {
69 	if (!pud_leaf(pudp_get(pud)))
70 		return 0;
71 	pud_clear(pud);
72 	return 1;
73 }
74 
pud_free_pmd_page(pud_t * pud,unsigned long addr)75 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
76 {
77 	pmd_t *pmd = pud_pgtable(pudp_get(pud));
78 	int i;
79 
80 	pud_clear(pud);
81 
82 	flush_tlb_kernel_range(addr, addr + PUD_SIZE);
83 
84 	for (i = 0; i < PTRS_PER_PMD; i++) {
85 		if (!pmd_none(pmd[i])) {
86 			pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
87 
88 			pte_free_kernel(NULL, pte);
89 		}
90 	}
91 
92 	pmd_free(NULL, pmd);
93 
94 	return 1;
95 }
96 
pmd_set_huge(pmd_t * pmd,phys_addr_t phys,pgprot_t prot)97 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
98 {
99 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
100 
101 	set_pmd(pmd, new_pmd);
102 	return 1;
103 }
104 
pmd_clear_huge(pmd_t * pmd)105 int pmd_clear_huge(pmd_t *pmd)
106 {
107 	if (!pmd_leaf(pmdp_get(pmd)))
108 		return 0;
109 	pmd_clear(pmd);
110 	return 1;
111 }
112 
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)113 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
114 {
115 	pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
116 
117 	pmd_clear(pmd);
118 
119 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
120 	pte_free_kernel(NULL, pte);
121 	return 1;
122 }
123 
124 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)126 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
127 					unsigned long address, pmd_t *pmdp)
128 {
129 	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
130 
131 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
132 	VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
133 	/*
134 	 * When leaf PTE entries (regular pages) are collapsed into a leaf
135 	 * PMD entry (huge page), a valid non-leaf PTE is converted into a
136 	 * valid leaf PTE at the level 1 page table.  Since the sfence.vma
137 	 * forms that specify an address only apply to leaf PTEs, we need a
138 	 * global flush here.  collapse_huge_page() assumes these flushes are
139 	 * eager, so just do the fence here.
140 	 */
141 	flush_tlb_mm(vma->vm_mm);
142 	return pmd;
143 }
144 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
145