1 #ifndef _ALPHA_PGALLOC_H 2 #define _ALPHA_PGALLOC_H 3 4 #include <linux/mm.h> 5 #include <linux/mmzone.h> 6 7 /* 8 * Allocate and free page tables. The xxx_kernel() versions are 9 * used to allocate a kernel page table - this turns on ASN bits 10 * if any. 11 */ 12 13 static inline void 14 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) 15 { 16 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); 17 } 18 #define pmd_pgtable(pmd) pmd_page(pmd) 19 20 static inline void 21 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 22 { 23 pmd_set(pmd, pte); 24 } 25 26 static inline void 27 pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 28 { 29 pgd_set(pgd, pmd); 30 } 31 32 extern pgd_t *pgd_alloc(struct mm_struct *mm); 33 34 static inline void 35 pgd_free(struct mm_struct *mm, pgd_t *pgd) 36 { 37 free_page((unsigned long)pgd); 38 } 39 40 static inline pmd_t * 41 pmd_alloc_one(struct mm_struct *mm, unsigned long address) 42 { 43 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 44 return ret; 45 } 46 47 static inline void 48 pmd_free(struct mm_struct *mm, pmd_t *pmd) 49 { 50 free_page((unsigned long)pmd); 51 } 52 53 static inline pte_t * 54 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 55 { 56 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 57 return pte; 58 } 59 60 static inline void 61 pte_free_kernel(struct mm_struct *mm, pte_t *pte) 62 { 63 free_page((unsigned long)pte); 64 } 65 66 static inline pgtable_t 67 pte_alloc_one(struct mm_struct *mm, unsigned long address) 68 { 69 pte_t *pte = pte_alloc_one_kernel(mm, address); 70 struct page *page; 71 72 if (!pte) 73 return NULL; 74 page = virt_to_page(pte); 75 if (!pgtable_page_ctor(page)) { 76 __free_page(page); 77 return NULL; 78 } 79 return page; 80 } 81 82 static inline void 83 pte_free(struct mm_struct *mm, pgtable_t page) 84 { 85 pgtable_page_dtor(page); 86 __free_page(page); 87 } 88 89 #define check_pgt_cache() do { } while (0) 90 91 #endif /* _ALPHA_PGALLOC_H */ 92