1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21965aae3SH. Peter Anvin #ifndef _ASM_X86_PGALLOC_H 31965aae3SH. Peter Anvin #define _ASM_X86_PGALLOC_H 4bb898558SAl Viro 5bb898558SAl Viro #include <linux/threads.h> 6bb898558SAl Viro #include <linux/mm.h> /* for struct page */ 7bb898558SAl Viro #include <linux/pagemap.h> 8bb898558SAl Viro 95fba4af4SMike Rapoport #define __HAVE_ARCH_PTE_ALLOC_ONE 10f9cb654cSMike Rapoport #define __HAVE_ARCH_PGD_FREE 111355c31eSMike Rapoport #include <asm-generic/pgalloc.h> 125fba4af4SMike Rapoport 13bb898558SAl Viro static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } 14bb898558SAl Viro 15fdc0269eSJuergen Gross #ifdef CONFIG_PARAVIRT_XXL 16bb898558SAl Viro #include <asm/paravirt.h> 17bb898558SAl Viro #else 18bb898558SAl Viro #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) 19bb898558SAl Viro static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} 20bb898558SAl Viro static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} 21bb898558SAl Viro static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} 22bb898558SAl Viro static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, 23bb898558SAl Viro unsigned long start, unsigned long count) {} 24bb898558SAl Viro static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} 25335437fbSKirill A. Shutemov static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} 26bb898558SAl Viro static inline void paravirt_release_pte(unsigned long pfn) {} 27bb898558SAl Viro static inline void paravirt_release_pmd(unsigned long pfn) {} 28bb898558SAl Viro static inline void paravirt_release_pud(unsigned long pfn) {} 29335437fbSKirill A. Shutemov static inline void paravirt_release_p4d(unsigned long pfn) {} 30bb898558SAl Viro #endif 31bb898558SAl Viro 32bb898558SAl Viro /* 3314315592SIan Campbell * Flags to use when allocating a user page table page. 3414315592SIan Campbell */ 3514315592SIan Campbell extern gfp_t __userpte_alloc_gfp; 3614315592SIan Campbell 37d9e9a641SDave Hansen #ifdef CONFIG_PAGE_TABLE_ISOLATION 38d9e9a641SDave Hansen /* 39d9e9a641SDave Hansen * Instead of one PGD, we acquire two PGDs. Being order-1, it is 40d9e9a641SDave Hansen * both 8k in size and 8k-aligned. That lets us just flip bit 12 41d9e9a641SDave Hansen * in a pointer to swap between the two 4k halves. 42d9e9a641SDave Hansen */ 43d9e9a641SDave Hansen #define PGD_ALLOCATION_ORDER 1 44d9e9a641SDave Hansen #else 45d9e9a641SDave Hansen #define PGD_ALLOCATION_ORDER 0 46d9e9a641SDave Hansen #endif 47d9e9a641SDave Hansen 4814315592SIan Campbell /* 49bb898558SAl Viro * Allocate and free page tables. 50bb898558SAl Viro */ 51bb898558SAl Viro extern pgd_t *pgd_alloc(struct mm_struct *); 52bb898558SAl Viro extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 53bb898558SAl Viro 544cf58924SJoel Fernandes (Google) extern pgtable_t pte_alloc_one(struct mm_struct *); 55bb898558SAl Viro 569e1b32caSBenjamin Herrenschmidt extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); 579e1b32caSBenjamin Herrenschmidt 589e1b32caSBenjamin Herrenschmidt static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 599e1b32caSBenjamin Herrenschmidt unsigned long address) 609e1b32caSBenjamin Herrenschmidt { 619e1b32caSBenjamin Herrenschmidt ___pte_free_tlb(tlb, pte); 629e1b32caSBenjamin Herrenschmidt } 63bb898558SAl Viro 64bb898558SAl Viro static inline void pmd_populate_kernel(struct mm_struct *mm, 65bb898558SAl Viro pmd_t *pmd, pte_t *pte) 66bb898558SAl Viro { 67bb898558SAl Viro paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); 68bb898558SAl Viro set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); 69bb898558SAl Viro } 70bb898558SAl Viro 710a9fe8caSDan Williams static inline void pmd_populate_kernel_safe(struct mm_struct *mm, 720a9fe8caSDan Williams pmd_t *pmd, pte_t *pte) 730a9fe8caSDan Williams { 740a9fe8caSDan Williams paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); 750a9fe8caSDan Williams set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); 760a9fe8caSDan Williams } 770a9fe8caSDan Williams 78bb898558SAl Viro static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 79bb898558SAl Viro struct page *pte) 80bb898558SAl Viro { 81bb898558SAl Viro unsigned long pfn = page_to_pfn(pte); 82bb898558SAl Viro 83bb898558SAl Viro paravirt_alloc_pte(mm, pfn); 84bb898558SAl Viro set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); 85bb898558SAl Viro } 86bb898558SAl Viro 87bb898558SAl Viro #define pmd_pgtable(pmd) pmd_page(pmd) 88bb898558SAl Viro 8998233368SKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 2 909e1b32caSBenjamin Herrenschmidt extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); 919e1b32caSBenjamin Herrenschmidt 929e1b32caSBenjamin Herrenschmidt static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 93b595076aSUwe Kleine-König unsigned long address) 949e1b32caSBenjamin Herrenschmidt { 959e1b32caSBenjamin Herrenschmidt ___pmd_free_tlb(tlb, pmd); 969e1b32caSBenjamin Herrenschmidt } 97bb898558SAl Viro 98bb898558SAl Viro #ifdef CONFIG_X86_PAE 99bb898558SAl Viro extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); 100bb898558SAl Viro #else /* !CONFIG_X86_PAE */ 101bb898558SAl Viro static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 102bb898558SAl Viro { 103bb898558SAl Viro paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); 104bb898558SAl Viro set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); 105bb898558SAl Viro } 1060a9fe8caSDan Williams 1070a9fe8caSDan Williams static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 1080a9fe8caSDan Williams { 1090a9fe8caSDan Williams paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); 1100a9fe8caSDan Williams set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd))); 1110a9fe8caSDan Williams } 112bb898558SAl Viro #endif /* CONFIG_X86_PAE */ 113bb898558SAl Viro 11498233368SKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 3 115f2a6a705SKirill A. Shutemov static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 116bb898558SAl Viro { 117bb898558SAl Viro paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); 118f2a6a705SKirill A. Shutemov set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); 119bb898558SAl Viro } 120bb898558SAl Viro 1210a9fe8caSDan Williams static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 1220a9fe8caSDan Williams { 1230a9fe8caSDan Williams paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); 1240a9fe8caSDan Williams set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud))); 1250a9fe8caSDan Williams } 1260a9fe8caSDan Williams 1279e1b32caSBenjamin Herrenschmidt extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); 1289e1b32caSBenjamin Herrenschmidt 1299e1b32caSBenjamin Herrenschmidt static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 1309e1b32caSBenjamin Herrenschmidt unsigned long address) 1319e1b32caSBenjamin Herrenschmidt { 1329e1b32caSBenjamin Herrenschmidt ___pud_free_tlb(tlb, pud); 1339e1b32caSBenjamin Herrenschmidt } 1349e1b32caSBenjamin Herrenschmidt 135f2a6a705SKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS > 4 136f2a6a705SKirill A. Shutemov static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 137f2a6a705SKirill A. Shutemov { 138ed7588d5SKirill A. Shutemov if (!pgtable_l5_enabled()) 13998219ddaSKirill A. Shutemov return; 140f2a6a705SKirill A. Shutemov paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); 141f2a6a705SKirill A. Shutemov set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); 142f2a6a705SKirill A. Shutemov } 143f2a6a705SKirill A. Shutemov 1440a9fe8caSDan Williams static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 1450a9fe8caSDan Williams { 1460a9fe8caSDan Williams if (!pgtable_l5_enabled()) 1470a9fe8caSDan Williams return; 1480a9fe8caSDan Williams paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); 1490a9fe8caSDan Williams set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); 1500a9fe8caSDan Williams } 1510a9fe8caSDan Williams 152f2a6a705SKirill A. Shutemov static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) 153f2a6a705SKirill A. Shutemov { 154f2a6a705SKirill A. Shutemov gfp_t gfp = GFP_KERNEL_ACCOUNT; 155f2a6a705SKirill A. Shutemov 156f2a6a705SKirill A. Shutemov if (mm == &init_mm) 157f2a6a705SKirill A. Shutemov gfp &= ~__GFP_ACCOUNT; 158f2a6a705SKirill A. Shutemov return (p4d_t *)get_zeroed_page(gfp); 159f2a6a705SKirill A. Shutemov } 160f2a6a705SKirill A. Shutemov 161f2a6a705SKirill A. Shutemov static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) 162f2a6a705SKirill A. Shutemov { 1630e311d23SAndrey Ryabinin if (!pgtable_l5_enabled()) 1640e311d23SAndrey Ryabinin return; 1650e311d23SAndrey Ryabinin 166f2a6a705SKirill A. Shutemov BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); 167f2a6a705SKirill A. Shutemov free_page((unsigned long)p4d); 168f2a6a705SKirill A. Shutemov } 169f2a6a705SKirill A. Shutemov 170f2a6a705SKirill A. Shutemov extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); 171f2a6a705SKirill A. Shutemov 172f2a6a705SKirill A. Shutemov static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 173f2a6a705SKirill A. Shutemov unsigned long address) 174f2a6a705SKirill A. Shutemov { 175ed7588d5SKirill A. Shutemov if (pgtable_l5_enabled()) 176f2a6a705SKirill A. Shutemov ___p4d_free_tlb(tlb, p4d); 177f2a6a705SKirill A. Shutemov } 178f2a6a705SKirill A. Shutemov 179f2a6a705SKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 4 */ 18098233368SKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 18198233368SKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 182bb898558SAl Viro 1831965aae3SH. Peter Anvin #endif /* _ASM_X86_PGALLOC_H */ 184