1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGALLOC_32_H
3 #define _ASM_POWERPC_PGALLOC_32_H
4 
5 #include <linux/threads.h>
6 #include <linux/slab.h>
7 
8 /*
9  * Functions that deal with pagetables that could be at any level of
10  * the table need to be passed an "index_size" so they know how to
11  * handle allocation.  For PTE pages (which are linked to a struct
12  * page for now, and drawn from the main get_free_pages() pool), the
13  * allocation size will be (2^index_size * sizeof(pointer)) and
14  * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
15  *
16  * The maximum index size needs to be big enough to allow any
17  * pagetable sizes we need, but small enough to fit in the low bits of
18  * any page table pointer.  In other words all pagetables, even tiny
19  * ones, must be aligned to allow at least enough low 0 bits to
20  * contain this value.  This value is also used as a mask, so it must
21  * be one less than a power of two.
22  */
23 #define MAX_PGTABLE_INDEX_SIZE	0xf
24 
25 extern void __bad_pte(pmd_t *pmd);
26 
27 extern struct kmem_cache *pgtable_cache[];
28 #define PGT_CACHE(shift) pgtable_cache[shift]
29 
30 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31 {
32 	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
33 			pgtable_gfp_flags(mm, GFP_KERNEL));
34 }
35 
36 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
37 {
38 	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
39 }
40 
41 /*
42  * We don't have any real pmd's, and this code never triggers because
43  * the pgd will always be present..
44  */
45 /* #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); }) */
46 #define pmd_free(mm, x) 		do { } while (0)
47 #define __pmd_free_tlb(tlb,x,a)		do { } while (0)
48 /* #define pgd_populate(mm, pmd, pte)      BUG() */
49 
50 #ifndef CONFIG_BOOKE
51 
52 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
53 				       pte_t *pte)
54 {
55 	*pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
56 }
57 
58 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
59 				pgtable_t pte_page)
60 {
61 	*pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
62 }
63 
64 #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
65 #else
66 
67 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
68 				       pte_t *pte)
69 {
70 	*pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
71 }
72 
73 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
74 				pgtable_t pte_page)
75 {
76 	*pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
77 }
78 
79 #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
80 #endif
81 
82 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
83 extern pgtable_t pte_alloc_one(struct mm_struct *mm);
84 void pte_frag_destroy(void *pte_frag);
85 pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
86 void pte_fragment_free(unsigned long *table, int kernel);
87 
88 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
89 {
90 	pte_fragment_free((unsigned long *)pte, 1);
91 }
92 
93 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
94 {
95 	pte_fragment_free((unsigned long *)ptepage, 0);
96 }
97 
98 static inline void pgtable_free(void *table, unsigned index_size)
99 {
100 	if (!index_size) {
101 		pte_fragment_free((unsigned long *)table, 0);
102 	} else {
103 		BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
104 		kmem_cache_free(PGT_CACHE(index_size), table);
105 	}
106 }
107 
108 #define check_pgt_cache()	do { } while (0)
109 #define get_hugepd_cache_index(x)	(x)
110 
111 #ifdef CONFIG_SMP
112 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
113 				    void *table, int shift)
114 {
115 	unsigned long pgf = (unsigned long)table;
116 	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
117 	pgf |= shift;
118 	tlb_remove_table(tlb, (void *)pgf);
119 }
120 
121 static inline void __tlb_remove_table(void *_table)
122 {
123 	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
124 	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
125 
126 	pgtable_free(table, shift);
127 }
128 #else
129 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
130 				    void *table, int shift)
131 {
132 	pgtable_free(table, shift);
133 }
134 #endif
135 
136 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
137 				  unsigned long address)
138 {
139 	tlb_flush_pgtable(tlb, address);
140 	pgtable_free_tlb(tlb, table, 0);
141 }
142 #endif /* _ASM_POWERPC_PGALLOC_32_H */
143