1 #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H 2 #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H 3 /* 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/cpumask.h> 12 #include <linux/kmemleak.h> 13 #include <linux/percpu.h> 14 15 struct vmemmap_backing { 16 struct vmemmap_backing *list; 17 unsigned long phys; 18 unsigned long virt_addr; 19 }; 20 extern struct vmemmap_backing *vmemmap_list; 21 22 /* 23 * Functions that deal with pagetables that could be at any level of 24 * the table need to be passed an "index_size" so they know how to 25 * handle allocation. For PTE pages (which are linked to a struct 26 * page for now, and drawn from the main get_free_pages() pool), the 27 * allocation size will be (2^index_size * sizeof(pointer)) and 28 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). 29 * 30 * The maximum index size needs to be big enough to allow any 31 * pagetable sizes we need, but small enough to fit in the low bits of 32 * any page table pointer. In other words all pagetables, even tiny 33 * ones, must be aligned to allow at least enough low 0 bits to 34 * contain this value. This value is also used as a mask, so it must 35 * be one less than a power of two. 36 */ 37 #define MAX_PGTABLE_INDEX_SIZE 0xf 38 39 extern struct kmem_cache *pgtable_cache[]; 40 #define PGT_CACHE(shift) pgtable_cache[shift] 41 42 extern pte_t *pte_fragment_alloc(struct mm_struct *, int); 43 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); 44 extern void pte_fragment_free(unsigned long *, int); 45 extern void pmd_fragment_free(unsigned long *); 46 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); 47 #ifdef CONFIG_SMP 48 extern void __tlb_remove_table(void *_table); 49 #endif 50 void pte_frag_destroy(void *pte_frag); 51 52 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) 53 { 54 #ifdef CONFIG_PPC_64K_PAGES 55 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); 56 #else 57 struct page *page; 58 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), 59 4); 60 if (!page) 61 return NULL; 62 return (pgd_t *) page_address(page); 63 #endif 64 } 65 66 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) 67 { 68 #ifdef CONFIG_PPC_64K_PAGES 69 free_page((unsigned long)pgd); 70 #else 71 free_pages((unsigned long)pgd, 4); 72 #endif 73 } 74 75 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 76 { 77 pgd_t *pgd; 78 79 if (radix_enabled()) 80 return radix__pgd_alloc(mm); 81 82 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), 83 pgtable_gfp_flags(mm, GFP_KERNEL)); 84 /* 85 * Don't scan the PGD for pointers, it contains references to PUDs but 86 * those references are not full pointers and so can't be recognised by 87 * kmemleak. 88 */ 89 kmemleak_no_scan(pgd); 90 91 /* 92 * With hugetlb, we don't clear the second half of the page table. 93 * If we share the same slab cache with the pmd or pud level table, 94 * we need to make sure we zero out the full table on alloc. 95 * With 4K we don't store slot in the second half. Hence we don't 96 * need to do this for 4k. 97 */ 98 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \ 99 (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) 100 memset(pgd, 0, PGD_TABLE_SIZE); 101 #endif 102 return pgd; 103 } 104 105 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 106 { 107 if (radix_enabled()) 108 return radix__pgd_free(mm, pgd); 109 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); 110 } 111 112 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 113 { 114 pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS); 115 } 116 117 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 118 { 119 pud_t *pud; 120 121 pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), 122 pgtable_gfp_flags(mm, GFP_KERNEL)); 123 /* 124 * Tell kmemleak to ignore the PUD, that means don't scan it for 125 * pointers and don't consider it a leak. PUDs are typically only 126 * referred to by their PGD, but kmemleak is not able to recognise those 127 * as pointers, leading to false leak reports. 128 */ 129 kmemleak_ignore(pud); 130 131 return pud; 132 } 133 134 static inline void pud_free(struct mm_struct *mm, pud_t *pud) 135 { 136 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); 137 } 138 139 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 140 { 141 pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS); 142 } 143 144 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 145 unsigned long address) 146 { 147 /* 148 * By now all the pud entries should be none entries. So go 149 * ahead and flush the page walk cache 150 */ 151 flush_tlb_pgtable(tlb, address); 152 pgtable_free_tlb(tlb, pud, PUD_INDEX); 153 } 154 155 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 156 { 157 return pmd_fragment_alloc(mm, addr); 158 } 159 160 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 161 { 162 pmd_fragment_free((unsigned long *)pmd); 163 } 164 165 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 166 unsigned long address) 167 { 168 /* 169 * By now all the pud entries should be none entries. So go 170 * ahead and flush the page walk cache 171 */ 172 flush_tlb_pgtable(tlb, address); 173 return pgtable_free_tlb(tlb, pmd, PMD_INDEX); 174 } 175 176 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 177 pte_t *pte) 178 { 179 pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS); 180 } 181 182 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 183 pgtable_t pte_page) 184 { 185 pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS); 186 } 187 188 static inline pgtable_t pmd_pgtable(pmd_t pmd) 189 { 190 return (pgtable_t)pmd_page_vaddr(pmd); 191 } 192 193 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 194 { 195 return (pte_t *)pte_fragment_alloc(mm, 1); 196 } 197 198 static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 199 { 200 return (pgtable_t)pte_fragment_alloc(mm, 0); 201 } 202 203 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 204 { 205 pte_fragment_free((unsigned long *)pte, 1); 206 } 207 208 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) 209 { 210 pte_fragment_free((unsigned long *)ptepage, 0); 211 } 212 213 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 214 unsigned long address) 215 { 216 /* 217 * By now all the pud entries should be none entries. So go 218 * ahead and flush the page walk cache 219 */ 220 flush_tlb_pgtable(tlb, address); 221 pgtable_free_tlb(tlb, table, PTE_INDEX); 222 } 223 224 #define check_pgt_cache() do { } while (0) 225 226 extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; 227 static inline void update_page_count(int psize, long count) 228 { 229 if (IS_ENABLED(CONFIG_PROC_FS)) 230 atomic_long_add(count, &direct_pages_count[psize]); 231 } 232 233 #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */ 234