1 #ifndef _ASM_POWERPC_PGALLOC_64_H 2 #define _ASM_POWERPC_PGALLOC_64_H 3 /* 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/cpumask.h> 12 #include <linux/percpu.h> 13 14 struct vmemmap_backing { 15 struct vmemmap_backing *list; 16 unsigned long phys; 17 unsigned long virt_addr; 18 }; 19 extern struct vmemmap_backing *vmemmap_list; 20 21 /* 22 * Functions that deal with pagetables that could be at any level of 23 * the table need to be passed an "index_size" so they know how to 24 * handle allocation. For PTE pages (which are linked to a struct 25 * page for now, and drawn from the main get_free_pages() pool), the 26 * allocation size will be (2^index_size * sizeof(pointer)) and 27 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). 28 * 29 * The maximum index size needs to be big enough to allow any 30 * pagetable sizes we need, but small enough to fit in the low bits of 31 * any page table pointer. In other words all pagetables, even tiny 32 * ones, must be aligned to allow at least enough low 0 bits to 33 * contain this value. This value is also used as a mask, so it must 34 * be one less than a power of two. 35 */ 36 #define MAX_PGTABLE_INDEX_SIZE 0xf 37 38 extern struct kmem_cache *pgtable_cache[]; 39 #define PGT_CACHE(shift) pgtable_cache[shift] 40 41 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 42 { 43 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), 44 pgtable_gfp_flags(mm, GFP_KERNEL)); 45 } 46 47 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 48 { 49 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); 50 } 51 52 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) 53 54 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 55 { 56 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 57 pgtable_gfp_flags(mm, GFP_KERNEL)); 58 } 59 60 static inline void pud_free(struct mm_struct *mm, pud_t *pud) 61 { 62 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud); 63 } 64 65 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 66 { 67 pud_set(pud, (unsigned long)pmd); 68 } 69 70 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 71 pte_t *pte) 72 { 73 pmd_set(pmd, (unsigned long)pte); 74 } 75 76 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 77 pgtable_t pte_page) 78 { 79 pmd_set(pmd, (unsigned long)page_address(pte_page)); 80 } 81 82 #define pmd_pgtable(pmd) pmd_page(pmd) 83 84 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 85 { 86 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 87 pgtable_gfp_flags(mm, GFP_KERNEL)); 88 } 89 90 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 91 { 92 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); 93 } 94 95 96 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 97 { 98 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 99 } 100 101 static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 102 { 103 struct page *page; 104 pte_t *pte; 105 106 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); 107 if (!pte) 108 return NULL; 109 page = virt_to_page(pte); 110 if (!pgtable_page_ctor(page)) { 111 __free_page(page); 112 return NULL; 113 } 114 return page; 115 } 116 117 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 118 { 119 free_page((unsigned long)pte); 120 } 121 122 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) 123 { 124 pgtable_page_dtor(ptepage); 125 __free_page(ptepage); 126 } 127 128 static inline void pgtable_free(void *table, int shift) 129 { 130 if (!shift) { 131 pgtable_page_dtor(virt_to_page(table)); 132 free_page((unsigned long)table); 133 } else { 134 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 135 kmem_cache_free(PGT_CACHE(shift), table); 136 } 137 } 138 139 #define get_hugepd_cache_index(x) (x) 140 #ifdef CONFIG_SMP 141 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) 142 { 143 unsigned long pgf = (unsigned long)table; 144 145 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); 146 pgf |= shift; 147 tlb_remove_table(tlb, (void *)pgf); 148 } 149 150 static inline void __tlb_remove_table(void *_table) 151 { 152 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); 153 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; 154 155 pgtable_free(table, shift); 156 } 157 158 #else 159 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) 160 { 161 pgtable_free(table, shift); 162 } 163 #endif 164 165 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 166 unsigned long address) 167 { 168 tlb_flush_pgtable(tlb, address); 169 pgtable_free_tlb(tlb, page_address(table), 0); 170 } 171 172 #define __pmd_free_tlb(tlb, pmd, addr) \ 173 pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX) 174 #ifndef CONFIG_PPC_64K_PAGES 175 #define __pud_free_tlb(tlb, pud, addr) \ 176 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE) 177 178 #endif /* CONFIG_PPC_64K_PAGES */ 179 180 #define check_pgt_cache() do { } while (0) 181 182 #endif /* _ASM_POWERPC_PGALLOC_64_H */ 183