1310f541aSLiu Shixin // SPDX-License-Identifier: GPL-2.0
2310f541aSLiu Shixin
3310f541aSLiu Shixin #include <asm/pgalloc.h>
4310f541aSLiu Shixin #include <linux/gfp.h>
5310f541aSLiu Shixin #include <linux/kernel.h>
6310f541aSLiu Shixin #include <linux/pgtable.h>
7310f541aSLiu Shixin
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)8*e0316069SAlexandre Ghiti int ptep_set_access_flags(struct vm_area_struct *vma,
9*e0316069SAlexandre Ghiti unsigned long address, pte_t *ptep,
10*e0316069SAlexandre Ghiti pte_t entry, int dirty)
11*e0316069SAlexandre Ghiti {
12*e0316069SAlexandre Ghiti if (!pte_same(ptep_get(ptep), entry))
13*e0316069SAlexandre Ghiti __set_pte_at(ptep, entry);
14*e0316069SAlexandre Ghiti /*
15*e0316069SAlexandre Ghiti * update_mmu_cache will unconditionally execute, handling both
16*e0316069SAlexandre Ghiti * the case that the PTE changed and the spurious fault case.
17*e0316069SAlexandre Ghiti */
18*e0316069SAlexandre Ghiti return true;
19*e0316069SAlexandre Ghiti }
20*e0316069SAlexandre Ghiti
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)21*e0316069SAlexandre Ghiti int ptep_test_and_clear_young(struct vm_area_struct *vma,
22*e0316069SAlexandre Ghiti unsigned long address,
23*e0316069SAlexandre Ghiti pte_t *ptep)
24*e0316069SAlexandre Ghiti {
25*e0316069SAlexandre Ghiti if (!pte_young(ptep_get(ptep)))
26*e0316069SAlexandre Ghiti return 0;
27*e0316069SAlexandre Ghiti return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
28*e0316069SAlexandre Ghiti }
29*e0316069SAlexandre Ghiti EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
30*e0316069SAlexandre Ghiti
31*e0316069SAlexandre Ghiti #ifdef CONFIG_64BIT
pud_offset(p4d_t * p4d,unsigned long address)32*e0316069SAlexandre Ghiti pud_t *pud_offset(p4d_t *p4d, unsigned long address)
33*e0316069SAlexandre Ghiti {
34*e0316069SAlexandre Ghiti if (pgtable_l4_enabled)
35*e0316069SAlexandre Ghiti return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
36*e0316069SAlexandre Ghiti
37*e0316069SAlexandre Ghiti return (pud_t *)p4d;
38*e0316069SAlexandre Ghiti }
39*e0316069SAlexandre Ghiti
p4d_offset(pgd_t * pgd,unsigned long address)40*e0316069SAlexandre Ghiti p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
41*e0316069SAlexandre Ghiti {
42*e0316069SAlexandre Ghiti if (pgtable_l5_enabled)
43*e0316069SAlexandre Ghiti return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
44*e0316069SAlexandre Ghiti
45*e0316069SAlexandre Ghiti return (p4d_t *)pgd;
46*e0316069SAlexandre Ghiti }
47*e0316069SAlexandre Ghiti #endif
48*e0316069SAlexandre Ghiti
49310f541aSLiu Shixin #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
p4d_set_huge(p4d_t * p4d,phys_addr_t addr,pgprot_t prot)50310f541aSLiu Shixin int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
51310f541aSLiu Shixin {
52310f541aSLiu Shixin return 0;
53310f541aSLiu Shixin }
54310f541aSLiu Shixin
p4d_clear_huge(p4d_t * p4d)55310f541aSLiu Shixin void p4d_clear_huge(p4d_t *p4d)
56310f541aSLiu Shixin {
57310f541aSLiu Shixin }
58310f541aSLiu Shixin
pud_set_huge(pud_t * pud,phys_addr_t phys,pgprot_t prot)59310f541aSLiu Shixin int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
60310f541aSLiu Shixin {
61310f541aSLiu Shixin pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
62310f541aSLiu Shixin
63310f541aSLiu Shixin set_pud(pud, new_pud);
64310f541aSLiu Shixin return 1;
65310f541aSLiu Shixin }
66310f541aSLiu Shixin
pud_clear_huge(pud_t * pud)67310f541aSLiu Shixin int pud_clear_huge(pud_t *pud)
68310f541aSLiu Shixin {
69*e0316069SAlexandre Ghiti if (!pud_leaf(pudp_get(pud)))
70310f541aSLiu Shixin return 0;
71310f541aSLiu Shixin pud_clear(pud);
72310f541aSLiu Shixin return 1;
73310f541aSLiu Shixin }
74310f541aSLiu Shixin
pud_free_pmd_page(pud_t * pud,unsigned long addr)75310f541aSLiu Shixin int pud_free_pmd_page(pud_t *pud, unsigned long addr)
76310f541aSLiu Shixin {
77*e0316069SAlexandre Ghiti pmd_t *pmd = pud_pgtable(pudp_get(pud));
78310f541aSLiu Shixin int i;
79310f541aSLiu Shixin
80310f541aSLiu Shixin pud_clear(pud);
81310f541aSLiu Shixin
82310f541aSLiu Shixin flush_tlb_kernel_range(addr, addr + PUD_SIZE);
83310f541aSLiu Shixin
84310f541aSLiu Shixin for (i = 0; i < PTRS_PER_PMD; i++) {
85310f541aSLiu Shixin if (!pmd_none(pmd[i])) {
86310f541aSLiu Shixin pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
87310f541aSLiu Shixin
88310f541aSLiu Shixin pte_free_kernel(NULL, pte);
89310f541aSLiu Shixin }
90310f541aSLiu Shixin }
91310f541aSLiu Shixin
92310f541aSLiu Shixin pmd_free(NULL, pmd);
93310f541aSLiu Shixin
94310f541aSLiu Shixin return 1;
95310f541aSLiu Shixin }
96310f541aSLiu Shixin
pmd_set_huge(pmd_t * pmd,phys_addr_t phys,pgprot_t prot)97310f541aSLiu Shixin int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
98310f541aSLiu Shixin {
99310f541aSLiu Shixin pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
100310f541aSLiu Shixin
101310f541aSLiu Shixin set_pmd(pmd, new_pmd);
102310f541aSLiu Shixin return 1;
103310f541aSLiu Shixin }
104310f541aSLiu Shixin
pmd_clear_huge(pmd_t * pmd)105310f541aSLiu Shixin int pmd_clear_huge(pmd_t *pmd)
106310f541aSLiu Shixin {
107*e0316069SAlexandre Ghiti if (!pmd_leaf(pmdp_get(pmd)))
108310f541aSLiu Shixin return 0;
109310f541aSLiu Shixin pmd_clear(pmd);
110310f541aSLiu Shixin return 1;
111310f541aSLiu Shixin }
112310f541aSLiu Shixin
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)113310f541aSLiu Shixin int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
114310f541aSLiu Shixin {
115*e0316069SAlexandre Ghiti pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
116310f541aSLiu Shixin
117310f541aSLiu Shixin pmd_clear(pmd);
118310f541aSLiu Shixin
119310f541aSLiu Shixin flush_tlb_kernel_range(addr, addr + PMD_SIZE);
120310f541aSLiu Shixin pte_free_kernel(NULL, pte);
121310f541aSLiu Shixin return 1;
122310f541aSLiu Shixin }
123310f541aSLiu Shixin
124310f541aSLiu Shixin #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
125f0293cd1SMayuresh Chitale #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)126f0293cd1SMayuresh Chitale pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
127f0293cd1SMayuresh Chitale unsigned long address, pmd_t *pmdp)
128f0293cd1SMayuresh Chitale {
129f0293cd1SMayuresh Chitale pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
130f0293cd1SMayuresh Chitale
131f0293cd1SMayuresh Chitale VM_BUG_ON(address & ~HPAGE_PMD_MASK);
132*e0316069SAlexandre Ghiti VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
133f0293cd1SMayuresh Chitale /*
134f0293cd1SMayuresh Chitale * When leaf PTE entries (regular pages) are collapsed into a leaf
135f0293cd1SMayuresh Chitale * PMD entry (huge page), a valid non-leaf PTE is converted into a
136f0293cd1SMayuresh Chitale * valid leaf PTE at the level 1 page table. Since the sfence.vma
137f0293cd1SMayuresh Chitale * forms that specify an address only apply to leaf PTEs, we need a
138f0293cd1SMayuresh Chitale * global flush here. collapse_huge_page() assumes these flushes are
139f0293cd1SMayuresh Chitale * eager, so just do the fence here.
140f0293cd1SMayuresh Chitale */
141f0293cd1SMayuresh Chitale flush_tlb_mm(vma->vm_mm);
142f0293cd1SMayuresh Chitale return pmd;
143f0293cd1SMayuresh Chitale }
144f0293cd1SMayuresh Chitale #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
145