1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ALPHA_PGALLOC_H 3 #define _ALPHA_PGALLOC_H 4 5 #include <linux/mm.h> 6 #include <linux/mmzone.h> 7 8 /* 9 * Allocate and free page tables. The xxx_kernel() versions are 10 * used to allocate a kernel page table - this turns on ASN bits 11 * if any. 12 */ 13 14 static inline void 15 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) 16 { 17 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); 18 } 19 #define pmd_pgtable(pmd) pmd_page(pmd) 20 21 static inline void 22 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 23 { 24 pmd_set(pmd, pte); 25 } 26 27 static inline void 28 pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 29 { 30 pgd_set(pgd, pmd); 31 } 32 33 extern pgd_t *pgd_alloc(struct mm_struct *mm); 34 35 static inline void 36 pgd_free(struct mm_struct *mm, pgd_t *pgd) 37 { 38 free_page((unsigned long)pgd); 39 } 40 41 static inline pmd_t * 42 pmd_alloc_one(struct mm_struct *mm, unsigned long address) 43 { 44 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 45 return ret; 46 } 47 48 static inline void 49 pmd_free(struct mm_struct *mm, pmd_t *pmd) 50 { 51 free_page((unsigned long)pmd); 52 } 53 54 static inline pte_t * 55 pte_alloc_one_kernel(struct mm_struct *mm) 56 { 57 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 58 return pte; 59 } 60 61 static inline void 62 pte_free_kernel(struct mm_struct *mm, pte_t *pte) 63 { 64 free_page((unsigned long)pte); 65 } 66 67 static inline pgtable_t 68 pte_alloc_one(struct mm_struct *mm) 69 { 70 pte_t *pte = pte_alloc_one_kernel(mm); 71 struct page *page; 72 73 if (!pte) 74 return NULL; 75 page = virt_to_page(pte); 76 if (!pgtable_page_ctor(page)) { 77 __free_page(page); 78 return NULL; 79 } 80 return page; 81 } 82 83 static inline void 84 pte_free(struct mm_struct *mm, pgtable_t page) 85 { 86 pgtable_page_dtor(page); 87 __free_page(page); 88 } 89 90 #define check_pgt_cache() do { } while (0) 91 92 #endif /* _ALPHA_PGALLOC_H */ 93