1 #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H 2 #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H 3 /* 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/cpumask.h> 12 #include <linux/percpu.h> 13 14 struct vmemmap_backing { 15 struct vmemmap_backing *list; 16 unsigned long phys; 17 unsigned long virt_addr; 18 }; 19 extern struct vmemmap_backing *vmemmap_list; 20 21 /* 22 * Functions that deal with pagetables that could be at any level of 23 * the table need to be passed an "index_size" so they know how to 24 * handle allocation. For PTE pages (which are linked to a struct 25 * page for now, and drawn from the main get_free_pages() pool), the 26 * allocation size will be (2^index_size * sizeof(pointer)) and 27 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). 28 * 29 * The maximum index size needs to be big enough to allow any 30 * pagetable sizes we need, but small enough to fit in the low bits of 31 * any page table pointer. In other words all pagetables, even tiny 32 * ones, must be aligned to allow at least enough low 0 bits to 33 * contain this value. This value is also used as a mask, so it must 34 * be one less than a power of two. 35 */ 36 #define MAX_PGTABLE_INDEX_SIZE 0xf 37 38 extern struct kmem_cache *pgtable_cache[]; 39 #define PGT_CACHE(shift) ({ \ 40 BUG_ON(!(shift)); \ 41 pgtable_cache[(shift) - 1]; \ 42 }) 43 44 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); 45 extern void pte_fragment_free(unsigned long *, int); 46 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); 47 #ifdef CONFIG_SMP 48 extern void __tlb_remove_table(void *_table); 49 #endif 50 51 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) 52 { 53 #ifdef CONFIG_PPC_64K_PAGES 54 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); 55 #else 56 struct page *page; 57 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), 58 4); 59 if (!page) 60 return NULL; 61 return (pgd_t *) page_address(page); 62 #endif 63 } 64 65 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) 66 { 67 #ifdef CONFIG_PPC_64K_PAGES 68 free_page((unsigned long)pgd); 69 #else 70 free_pages((unsigned long)pgd, 4); 71 #endif 72 } 73 74 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 75 { 76 pgd_t *pgd; 77 78 if (radix_enabled()) 79 return radix__pgd_alloc(mm); 80 81 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), 82 pgtable_gfp_flags(mm, GFP_KERNEL)); 83 memset(pgd, 0, PGD_TABLE_SIZE); 84 85 return pgd; 86 } 87 88 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 89 { 90 if (radix_enabled()) 91 return radix__pgd_free(mm, pgd); 92 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); 93 } 94 95 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 96 { 97 pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS); 98 } 99 100 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 101 { 102 return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), 103 pgtable_gfp_flags(mm, GFP_KERNEL)); 104 } 105 106 static inline void pud_free(struct mm_struct *mm, pud_t *pud) 107 { 108 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); 109 } 110 111 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 112 { 113 pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS); 114 } 115 116 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 117 unsigned long address) 118 { 119 /* 120 * By now all the pud entries should be none entries. So go 121 * ahead and flush the page walk cache 122 */ 123 flush_tlb_pgtable(tlb, address); 124 pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX); 125 } 126 127 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 128 { 129 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 130 pgtable_gfp_flags(mm, GFP_KERNEL)); 131 } 132 133 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 134 { 135 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); 136 } 137 138 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 139 unsigned long address) 140 { 141 /* 142 * By now all the pud entries should be none entries. So go 143 * ahead and flush the page walk cache 144 */ 145 flush_tlb_pgtable(tlb, address); 146 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); 147 } 148 149 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 150 pte_t *pte) 151 { 152 pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS); 153 } 154 155 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 156 pgtable_t pte_page) 157 { 158 pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS); 159 } 160 161 static inline pgtable_t pmd_pgtable(pmd_t pmd) 162 { 163 return (pgtable_t)pmd_page_vaddr(pmd); 164 } 165 166 #ifdef CONFIG_PPC_4K_PAGES 167 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 168 unsigned long address) 169 { 170 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 171 } 172 173 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 174 unsigned long address) 175 { 176 struct page *page; 177 pte_t *pte; 178 179 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); 180 if (!pte) 181 return NULL; 182 page = virt_to_page(pte); 183 if (!pgtable_page_ctor(page)) { 184 __free_page(page); 185 return NULL; 186 } 187 return pte; 188 } 189 #else /* if CONFIG_PPC_64K_PAGES */ 190 191 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 192 unsigned long address) 193 { 194 return (pte_t *)pte_fragment_alloc(mm, address, 1); 195 } 196 197 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 198 unsigned long address) 199 { 200 return (pgtable_t)pte_fragment_alloc(mm, address, 0); 201 } 202 #endif 203 204 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 205 { 206 pte_fragment_free((unsigned long *)pte, 1); 207 } 208 209 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) 210 { 211 pte_fragment_free((unsigned long *)ptepage, 0); 212 } 213 214 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 215 unsigned long address) 216 { 217 /* 218 * By now all the pud entries should be none entries. So go 219 * ahead and flush the page walk cache 220 */ 221 flush_tlb_pgtable(tlb, address); 222 pgtable_free_tlb(tlb, table, 0); 223 } 224 225 #define check_pgt_cache() do { } while (0) 226 227 #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */ 228