1 #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
2 #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
3 /*
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
13 
14 struct vmemmap_backing {
15 	struct vmemmap_backing *list;
16 	unsigned long phys;
17 	unsigned long virt_addr;
18 };
19 extern struct vmemmap_backing *vmemmap_list;
20 
21 /*
22  * Functions that deal with pagetables that could be at any level of
23  * the table need to be passed an "index_size" so they know how to
24  * handle allocation.  For PTE pages (which are linked to a struct
25  * page for now, and drawn from the main get_free_pages() pool), the
26  * allocation size will be (2^index_size * sizeof(pointer)) and
27  * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
28  *
29  * The maximum index size needs to be big enough to allow any
30  * pagetable sizes we need, but small enough to fit in the low bits of
31  * any page table pointer.  In other words all pagetables, even tiny
32  * ones, must be aligned to allow at least enough low 0 bits to
33  * contain this value.  This value is also used as a mask, so it must
34  * be one less than a power of two.
35  */
36 #define MAX_PGTABLE_INDEX_SIZE	0xf
37 
38 extern struct kmem_cache *pgtable_cache[];
39 #define PGT_CACHE(shift) ({				\
40 			BUG_ON(!(shift));		\
41 			pgtable_cache[(shift) - 1];	\
42 		})
43 
44 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
45 
46 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
47 extern void pte_fragment_free(unsigned long *, int);
48 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
49 #ifdef CONFIG_SMP
50 extern void __tlb_remove_table(void *_table);
51 #endif
52 
53 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
54 {
55 #ifdef CONFIG_PPC_64K_PAGES
56 	return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
57 #else
58 	struct page *page;
59 	page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
60 				4);
61 	if (!page)
62 		return NULL;
63 	return (pgd_t *) page_address(page);
64 #endif
65 }
66 
67 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
68 {
69 #ifdef CONFIG_PPC_64K_PAGES
70 	free_page((unsigned long)pgd);
71 #else
72 	free_pages((unsigned long)pgd, 4);
73 #endif
74 }
75 
76 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
77 {
78 	if (radix_enabled())
79 		return radix__pgd_alloc(mm);
80 	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
81 		pgtable_gfp_flags(mm, GFP_KERNEL));
82 }
83 
84 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
85 {
86 	if (radix_enabled())
87 		return radix__pgd_free(mm, pgd);
88 	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
89 }
90 
91 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
92 {
93 	pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
94 }
95 
96 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
97 {
98 	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
99 		pgtable_gfp_flags(mm, GFP_KERNEL));
100 }
101 
102 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
103 {
104 	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
105 }
106 
107 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
108 {
109 	pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
110 }
111 
112 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
113                                   unsigned long address)
114 {
115 	/*
116 	 * By now all the pud entries should be none entries. So go
117 	 * ahead and flush the page walk cache
118 	 */
119 	flush_tlb_pgtable(tlb, address);
120         pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
121 }
122 
123 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
124 {
125 	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
126 		pgtable_gfp_flags(mm, GFP_KERNEL));
127 }
128 
129 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
130 {
131 	kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
132 }
133 
134 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
135                                   unsigned long address)
136 {
137 	/*
138 	 * By now all the pud entries should be none entries. So go
139 	 * ahead and flush the page walk cache
140 	 */
141 	flush_tlb_pgtable(tlb, address);
142         return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
143 }
144 
145 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
146 				       pte_t *pte)
147 {
148 	pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
149 }
150 
151 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
152 				pgtable_t pte_page)
153 {
154 	pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
155 }
156 
157 static inline pgtable_t pmd_pgtable(pmd_t pmd)
158 {
159 	return (pgtable_t)pmd_page_vaddr(pmd);
160 }
161 
162 #ifdef CONFIG_PPC_4K_PAGES
163 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
164 					  unsigned long address)
165 {
166 	return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
167 }
168 
169 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
170 				      unsigned long address)
171 {
172 	struct page *page;
173 	pte_t *pte;
174 
175 	pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
176 	if (!pte)
177 		return NULL;
178 	page = virt_to_page(pte);
179 	if (!pgtable_page_ctor(page)) {
180 		__free_page(page);
181 		return NULL;
182 	}
183 	return pte;
184 }
185 #else /* if CONFIG_PPC_64K_PAGES */
186 
187 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
188 					  unsigned long address)
189 {
190 	return (pte_t *)pte_fragment_alloc(mm, address, 1);
191 }
192 
193 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
194 				      unsigned long address)
195 {
196 	return (pgtable_t)pte_fragment_alloc(mm, address, 0);
197 }
198 #endif
199 
200 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
201 {
202 	pte_fragment_free((unsigned long *)pte, 1);
203 }
204 
205 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
206 {
207 	pte_fragment_free((unsigned long *)ptepage, 0);
208 }
209 
210 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
211 				  unsigned long address)
212 {
213 	/*
214 	 * By now all the pud entries should be none entries. So go
215 	 * ahead and flush the page walk cache
216 	 */
217 	flush_tlb_pgtable(tlb, address);
218 	pgtable_free_tlb(tlb, table, 0);
219 }
220 
221 #define check_pgt_cache()	do { } while (0)
222 
223 #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
224