xref: /openbmc/linux/arch/powerpc/include/asm/hugetlb.h (revision 8730046c)
1 #ifndef _ASM_POWERPC_HUGETLB_H
2 #define _ASM_POWERPC_HUGETLB_H
3 
4 #ifdef CONFIG_HUGETLB_PAGE
5 #include <asm/page.h>
6 #include <asm-generic/hugetlb.h>
7 
8 extern struct kmem_cache *hugepte_cache;
9 
10 #ifdef CONFIG_PPC_BOOK3S_64
11 
12 #include <asm/book3s/64/hugetlb.h>
13 /*
14  * This should work for other subarchs too. But right now we use the
15  * new format only for 64bit book3s
16  */
17 static inline pte_t *hugepd_page(hugepd_t hpd)
18 {
19 	BUG_ON(!hugepd_ok(hpd));
20 	/*
21 	 * We have only four bits to encode, MMU page size
22 	 */
23 	BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
24 	return __va(hpd.pd & HUGEPD_ADDR_MASK);
25 }
26 
27 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28 {
29 	return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
30 }
31 
32 static inline unsigned int hugepd_shift(hugepd_t hpd)
33 {
34 	return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
35 }
36 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
37 				      unsigned long vmaddr)
38 {
39 	if (radix_enabled())
40 		return radix__flush_hugetlb_page(vma, vmaddr);
41 }
42 
43 static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
44 					      unsigned long vmaddr)
45 {
46 	if (radix_enabled())
47 		return radix__local_flush_hugetlb_page(vma, vmaddr);
48 }
49 #else
50 
51 static inline pte_t *hugepd_page(hugepd_t hpd)
52 {
53 	BUG_ON(!hugepd_ok(hpd));
54 #ifdef CONFIG_PPC_8xx
55 	return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
56 #else
57 	return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
58 #endif
59 }
60 
61 static inline unsigned int hugepd_shift(hugepd_t hpd)
62 {
63 #ifdef CONFIG_PPC_8xx
64 	return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
65 #else
66 	return hpd.pd & HUGEPD_SHIFT_MASK;
67 #endif
68 }
69 
70 #endif /* CONFIG_PPC_BOOK3S_64 */
71 
72 
73 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
74 				    unsigned pdshift)
75 {
76 	/*
77 	 * On FSL BookE, we have multiple higher-level table entries that
78 	 * point to the same hugepte.  Just use the first one since they're all
79 	 * identical.  So for that case, idx=0.
80 	 */
81 	unsigned long idx = 0;
82 
83 	pte_t *dir = hugepd_page(hpd);
84 #ifndef CONFIG_PPC_FSL_BOOK3E
85 	idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
86 #endif
87 
88 	return dir + idx;
89 }
90 
91 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
92 				 unsigned long addr, unsigned *shift);
93 
94 void flush_dcache_icache_hugepage(struct page *page);
95 
96 #if defined(CONFIG_PPC_MM_SLICES)
97 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
98 			   unsigned long len);
99 #else
100 static inline int is_hugepage_only_range(struct mm_struct *mm,
101 					 unsigned long addr,
102 					 unsigned long len)
103 {
104 	return 0;
105 }
106 #endif
107 
108 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
109 			    pte_t pte);
110 #ifdef CONFIG_PPC_8xx
111 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
112 				      unsigned long vmaddr)
113 {
114 	flush_tlb_page(vma, vmaddr);
115 }
116 #else
117 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
118 #endif
119 
120 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
121 			    unsigned long end, unsigned long floor,
122 			    unsigned long ceiling);
123 
124 /*
125  * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
126  * to override the version in mm/hugetlb.c
127  */
128 #define vma_mmu_pagesize vma_mmu_pagesize
129 
130 /*
131  * If the arch doesn't supply something else, assume that hugepage
132  * size aligned regions are ok without further preparation.
133  */
134 static inline int prepare_hugepage_range(struct file *file,
135 			unsigned long addr, unsigned long len)
136 {
137 	struct hstate *h = hstate_file(file);
138 	if (len & ~huge_page_mask(h))
139 		return -EINVAL;
140 	if (addr & ~huge_page_mask(h))
141 		return -EINVAL;
142 	return 0;
143 }
144 
145 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
146 				   pte_t *ptep, pte_t pte)
147 {
148 	set_pte_at(mm, addr, ptep, pte);
149 }
150 
151 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
152 					    unsigned long addr, pte_t *ptep)
153 {
154 #ifdef CONFIG_PPC64
155 	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
156 #else
157 	return __pte(pte_update(ptep, ~0UL, 0));
158 #endif
159 }
160 
161 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
162 					 unsigned long addr, pte_t *ptep)
163 {
164 	pte_t pte;
165 	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
166 	flush_hugetlb_page(vma, addr);
167 }
168 
169 static inline int huge_pte_none(pte_t pte)
170 {
171 	return pte_none(pte);
172 }
173 
174 static inline pte_t huge_pte_wrprotect(pte_t pte)
175 {
176 	return pte_wrprotect(pte);
177 }
178 
179 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
180 					     unsigned long addr, pte_t *ptep,
181 					     pte_t pte, int dirty)
182 {
183 #ifdef HUGETLB_NEED_PRELOAD
184 	/*
185 	 * The "return 1" forces a call of update_mmu_cache, which will write a
186 	 * TLB entry.  Without this, platforms that don't do a write of the TLB
187 	 * entry in the TLB miss handler asm will fault ad infinitum.
188 	 */
189 	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
190 	return 1;
191 #else
192 	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
193 #endif
194 }
195 
196 static inline pte_t huge_ptep_get(pte_t *ptep)
197 {
198 	return *ptep;
199 }
200 
201 static inline void arch_clear_hugepage_flags(struct page *page)
202 {
203 }
204 
205 #else /* ! CONFIG_HUGETLB_PAGE */
206 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
207 				      unsigned long vmaddr)
208 {
209 }
210 
211 #define hugepd_shift(x) 0
212 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
213 				    unsigned pdshift)
214 {
215 	return 0;
216 }
217 #endif /* CONFIG_HUGETLB_PAGE */
218 
219 /*
220  * FSL Book3E platforms require special gpage handling - the gpages
221  * are reserved early in the boot process by memblock instead of via
222  * the .dts as on IBM platforms.
223  */
224 #if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
225     defined(CONFIG_PPC_8xx))
226 extern void __init reserve_hugetlb_gpages(void);
227 #else
228 static inline void reserve_hugetlb_gpages(void)
229 {
230 }
231 #endif
232 
233 #endif /* _ASM_POWERPC_HUGETLB_H */
234