xref: /openbmc/linux/arch/powerpc/include/asm/hugetlb.h (revision 01a6e126)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_HUGETLB_H
3 #define _ASM_POWERPC_HUGETLB_H
4 
5 #ifdef CONFIG_HUGETLB_PAGE
6 #include <asm/page.h>
7 #include <asm-generic/hugetlb.h>
8 
9 extern struct kmem_cache *hugepte_cache;
10 
11 #ifdef CONFIG_PPC_BOOK3S_64
12 
13 #include <asm/book3s/64/hugetlb.h>
14 /*
15  * This should work for other subarchs too. But right now we use the
16  * new format only for 64bit book3s
17  */
18 static inline pte_t *hugepd_page(hugepd_t hpd)
19 {
20 	BUG_ON(!hugepd_ok(hpd));
21 	/*
22 	 * We have only four bits to encode, MMU page size
23 	 */
24 	BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
25 	return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
26 }
27 
28 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
29 {
30 	return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
31 }
32 
33 static inline unsigned int hugepd_shift(hugepd_t hpd)
34 {
35 	return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
36 }
37 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
38 				      unsigned long vmaddr)
39 {
40 	if (radix_enabled())
41 		return radix__flush_hugetlb_page(vma, vmaddr);
42 }
43 
44 #else
45 
46 static inline pte_t *hugepd_page(hugepd_t hpd)
47 {
48 	BUG_ON(!hugepd_ok(hpd));
49 #ifdef CONFIG_PPC_8xx
50 	return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
51 #else
52 	return (pte_t *)((hpd_val(hpd) &
53 			  ~HUGEPD_SHIFT_MASK) | PD_HUGE);
54 #endif
55 }
56 
57 static inline unsigned int hugepd_shift(hugepd_t hpd)
58 {
59 #ifdef CONFIG_PPC_8xx
60 	return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
61 #else
62 	return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
63 #endif
64 }
65 
66 #endif /* CONFIG_PPC_BOOK3S_64 */
67 
68 
69 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
70 				    unsigned pdshift)
71 {
72 	/*
73 	 * On FSL BookE, we have multiple higher-level table entries that
74 	 * point to the same hugepte.  Just use the first one since they're all
75 	 * identical.  So for that case, idx=0.
76 	 */
77 	unsigned long idx = 0;
78 
79 	pte_t *dir = hugepd_page(hpd);
80 #ifndef CONFIG_PPC_FSL_BOOK3E
81 	idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
82 #endif
83 
84 	return dir + idx;
85 }
86 
87 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
88 				 unsigned long addr, unsigned *shift);
89 
90 void flush_dcache_icache_hugepage(struct page *page);
91 
92 #if defined(CONFIG_PPC_MM_SLICES)
93 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
94 			   unsigned long len);
95 #else
96 static inline int is_hugepage_only_range(struct mm_struct *mm,
97 					 unsigned long addr,
98 					 unsigned long len)
99 {
100 	return 0;
101 }
102 #endif
103 
104 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
105 			    pte_t pte);
106 #ifdef CONFIG_PPC_8xx
107 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
108 				      unsigned long vmaddr)
109 {
110 	flush_tlb_page(vma, vmaddr);
111 }
112 #else
113 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
114 #endif
115 
116 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
117 			    unsigned long end, unsigned long floor,
118 			    unsigned long ceiling);
119 
120 /*
121  * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
122  * to override the version in mm/hugetlb.c
123  */
124 #define vma_mmu_pagesize vma_mmu_pagesize
125 
126 /*
127  * If the arch doesn't supply something else, assume that hugepage
128  * size aligned regions are ok without further preparation.
129  */
130 static inline int prepare_hugepage_range(struct file *file,
131 			unsigned long addr, unsigned long len)
132 {
133 	struct hstate *h = hstate_file(file);
134 	if (len & ~huge_page_mask(h))
135 		return -EINVAL;
136 	if (addr & ~huge_page_mask(h))
137 		return -EINVAL;
138 	return 0;
139 }
140 
141 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
142 				   pte_t *ptep, pte_t pte)
143 {
144 	set_pte_at(mm, addr, ptep, pte);
145 }
146 
147 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
148 					    unsigned long addr, pte_t *ptep)
149 {
150 #ifdef CONFIG_PPC64
151 	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
152 #else
153 	return __pte(pte_update(ptep, ~0UL, 0));
154 #endif
155 }
156 
157 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
158 					 unsigned long addr, pte_t *ptep)
159 {
160 	pte_t pte;
161 	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
162 	flush_hugetlb_page(vma, addr);
163 }
164 
165 static inline int huge_pte_none(pte_t pte)
166 {
167 	return pte_none(pte);
168 }
169 
170 static inline pte_t huge_pte_wrprotect(pte_t pte)
171 {
172 	return pte_wrprotect(pte);
173 }
174 
175 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
176 					     unsigned long addr, pte_t *ptep,
177 					     pte_t pte, int dirty)
178 {
179 #ifdef HUGETLB_NEED_PRELOAD
180 	/*
181 	 * The "return 1" forces a call of update_mmu_cache, which will write a
182 	 * TLB entry.  Without this, platforms that don't do a write of the TLB
183 	 * entry in the TLB miss handler asm will fault ad infinitum.
184 	 */
185 	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
186 	return 1;
187 #else
188 	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
189 #endif
190 }
191 
192 static inline pte_t huge_ptep_get(pte_t *ptep)
193 {
194 	return *ptep;
195 }
196 
197 static inline void arch_clear_hugepage_flags(struct page *page)
198 {
199 }
200 
201 #else /* ! CONFIG_HUGETLB_PAGE */
202 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
203 				      unsigned long vmaddr)
204 {
205 }
206 
207 #define hugepd_shift(x) 0
208 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
209 				    unsigned pdshift)
210 {
211 	return 0;
212 }
213 #endif /* CONFIG_HUGETLB_PAGE */
214 
215 #endif /* _ASM_POWERPC_HUGETLB_H */
216