xref: /openbmc/linux/arch/powerpc/include/asm/hugetlb.h (revision fb960bd2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_HUGETLB_H
3 #define _ASM_POWERPC_HUGETLB_H
4 
5 #ifdef CONFIG_HUGETLB_PAGE
6 #include <asm/page.h>
7 #include <asm-generic/hugetlb.h>
8 
9 extern struct kmem_cache *hugepte_cache;
10 
11 #ifdef CONFIG_PPC_BOOK3S_64
12 
13 #include <asm/book3s/64/hugetlb.h>
14 /*
15  * This should work for other subarchs too. But right now we use the
16  * new format only for 64bit book3s
17  */
18 static inline pte_t *hugepd_page(hugepd_t hpd)
19 {
20 	BUG_ON(!hugepd_ok(hpd));
21 	/*
22 	 * We have only four bits to encode, MMU page size
23 	 */
24 	BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
25 	return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
26 }
27 
28 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
29 {
30 	return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
31 }
32 
33 static inline unsigned int hugepd_shift(hugepd_t hpd)
34 {
35 	return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
36 }
37 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
38 				      unsigned long vmaddr)
39 {
40 	if (radix_enabled())
41 		return radix__flush_hugetlb_page(vma, vmaddr);
42 }
43 
44 #else
45 
46 static inline pte_t *hugepd_page(hugepd_t hpd)
47 {
48 	BUG_ON(!hugepd_ok(hpd));
49 #ifdef CONFIG_PPC_8xx
50 	return (pte_t *)__va(hpd_val(hpd) &
51 			     ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
52 #else
53 	return (pte_t *)((hpd_val(hpd) &
54 			  ~HUGEPD_SHIFT_MASK) | PD_HUGE);
55 #endif
56 }
57 
58 static inline unsigned int hugepd_shift(hugepd_t hpd)
59 {
60 #ifdef CONFIG_PPC_8xx
61 	return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
62 #else
63 	return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
64 #endif
65 }
66 
67 #endif /* CONFIG_PPC_BOOK3S_64 */
68 
69 
70 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
71 				    unsigned pdshift)
72 {
73 	/*
74 	 * On FSL BookE, we have multiple higher-level table entries that
75 	 * point to the same hugepte.  Just use the first one since they're all
76 	 * identical.  So for that case, idx=0.
77 	 */
78 	unsigned long idx = 0;
79 
80 	pte_t *dir = hugepd_page(hpd);
81 #ifndef CONFIG_PPC_FSL_BOOK3E
82 	idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
83 #endif
84 
85 	return dir + idx;
86 }
87 
88 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
89 				 unsigned long addr, unsigned *shift);
90 
91 void flush_dcache_icache_hugepage(struct page *page);
92 
93 #if defined(CONFIG_PPC_MM_SLICES)
94 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
95 			   unsigned long len);
96 #else
97 static inline int is_hugepage_only_range(struct mm_struct *mm,
98 					 unsigned long addr,
99 					 unsigned long len)
100 {
101 	return 0;
102 }
103 #endif
104 
105 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
106 			    pte_t pte);
107 #ifdef CONFIG_PPC_8xx
108 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
109 				      unsigned long vmaddr)
110 {
111 	flush_tlb_page(vma, vmaddr);
112 }
113 #else
114 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
115 #endif
116 
117 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
118 			    unsigned long end, unsigned long floor,
119 			    unsigned long ceiling);
120 
121 /*
122  * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
123  * to override the version in mm/hugetlb.c
124  */
125 #define vma_mmu_pagesize vma_mmu_pagesize
126 
127 /*
128  * If the arch doesn't supply something else, assume that hugepage
129  * size aligned regions are ok without further preparation.
130  */
131 static inline int prepare_hugepage_range(struct file *file,
132 			unsigned long addr, unsigned long len)
133 {
134 	struct hstate *h = hstate_file(file);
135 	if (len & ~huge_page_mask(h))
136 		return -EINVAL;
137 	if (addr & ~huge_page_mask(h))
138 		return -EINVAL;
139 	return 0;
140 }
141 
142 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
143 				   pte_t *ptep, pte_t pte)
144 {
145 	set_pte_at(mm, addr, ptep, pte);
146 }
147 
148 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
149 					    unsigned long addr, pte_t *ptep)
150 {
151 #ifdef CONFIG_PPC64
152 	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
153 #else
154 	return __pte(pte_update(ptep, ~0UL, 0));
155 #endif
156 }
157 
158 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
159 					 unsigned long addr, pte_t *ptep)
160 {
161 	pte_t pte;
162 	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
163 	flush_hugetlb_page(vma, addr);
164 }
165 
166 static inline int huge_pte_none(pte_t pte)
167 {
168 	return pte_none(pte);
169 }
170 
171 static inline pte_t huge_pte_wrprotect(pte_t pte)
172 {
173 	return pte_wrprotect(pte);
174 }
175 
176 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
177 					     unsigned long addr, pte_t *ptep,
178 					     pte_t pte, int dirty)
179 {
180 #ifdef HUGETLB_NEED_PRELOAD
181 	/*
182 	 * The "return 1" forces a call of update_mmu_cache, which will write a
183 	 * TLB entry.  Without this, platforms that don't do a write of the TLB
184 	 * entry in the TLB miss handler asm will fault ad infinitum.
185 	 */
186 	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
187 	return 1;
188 #else
189 	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
190 #endif
191 }
192 
193 static inline pte_t huge_ptep_get(pte_t *ptep)
194 {
195 	return *ptep;
196 }
197 
198 static inline void arch_clear_hugepage_flags(struct page *page)
199 {
200 }
201 
202 #else /* ! CONFIG_HUGETLB_PAGE */
203 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
204 				      unsigned long vmaddr)
205 {
206 }
207 
208 #define hugepd_shift(x) 0
209 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
210 				    unsigned pdshift)
211 {
212 	return 0;
213 }
214 #endif /* CONFIG_HUGETLB_PAGE */
215 
216 #endif /* _ASM_POWERPC_HUGETLB_H */
217