1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_PGALLOC_H 3 #define _ASM_PGALLOC_H 4 5 #include <linux/gfp.h> 6 #include <linux/mm.h> 7 #include <linux/threads.h> 8 #include <asm/processor.h> 9 #include <asm/fixmap.h> 10 11 #include <asm/cache.h> 12 13 #define __HAVE_ARCH_PMD_FREE 14 #define __HAVE_ARCH_PGD_FREE 15 #include <asm-generic/pgalloc.h> 16 17 /* Allocate the top level pgd (page directory) 18 * 19 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we 20 * allocate the first pmd adjacent to the pgd. This means that we can 21 * subtract a constant offset to get to it. The pmd and pgd sizes are 22 * arranged so that a single pmd covers 4GB (giving a full 64-bit 23 * process access to 8TB) so our lookups are effectively L2 for the 24 * first 4GB of the kernel (i.e. for all ILP32 processes and all the 25 * kernel for machines with under 4GB of memory) */ 26 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 27 { 28 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 29 PGD_ALLOC_ORDER); 30 pgd_t *actual_pgd = pgd; 31 32 if (likely(pgd != NULL)) { 33 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); 34 #if CONFIG_PGTABLE_LEVELS == 3 35 actual_pgd += PTRS_PER_PGD; 36 /* Populate first pmd with allocated memory. We mark it 37 * with PxD_FLAG_ATTACHED as a signal to the system that this 38 * pmd entry may not be cleared. */ 39 set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT | 40 PxD_FLAG_VALID | 41 PxD_FLAG_ATTACHED) 42 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT))); 43 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as 44 * a signal that this pmd may not be freed */ 45 set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED)); 46 #endif 47 } 48 spin_lock_init(pgd_spinlock(actual_pgd)); 49 return actual_pgd; 50 } 51 52 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 53 { 54 #if CONFIG_PGTABLE_LEVELS == 3 55 pgd -= PTRS_PER_PGD; 56 #endif 57 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); 58 } 59 60 #if CONFIG_PGTABLE_LEVELS == 3 61 62 /* Three Level Page Table Support for pmd's */ 63 64 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 65 { 66 set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + 67 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT))); 68 } 69 70 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 71 { 72 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { 73 /* 74 * This is the permanent pmd attached to the pgd; 75 * cannot free it. 76 * Increment the counter to compensate for the decrement 77 * done by generic mm code. 78 */ 79 mm_inc_nr_pmds(mm); 80 return; 81 } 82 free_pages((unsigned long)pmd, PMD_ORDER); 83 } 84 85 #endif 86 87 static inline void 88 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 89 { 90 #if CONFIG_PGTABLE_LEVELS == 3 91 /* preserve the gateway marker if this is the beginning of 92 * the permanent pmd */ 93 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 94 set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | 95 PxD_FLAG_VALID | 96 PxD_FLAG_ATTACHED) 97 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); 98 else 99 #endif 100 set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) 101 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); 102 } 103 104 #define pmd_populate(mm, pmd, pte_page) \ 105 pmd_populate_kernel(mm, pmd, page_address(pte_page)) 106 #define pmd_pgtable(pmd) pmd_page(pmd) 107 108 #endif 109