1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_HUGETLB_H 3 #define _ASM_POWERPC_HUGETLB_H 4 5 #ifdef CONFIG_HUGETLB_PAGE 6 #include <asm/page.h> 7 #include <asm-generic/hugetlb.h> 8 9 extern struct kmem_cache *hugepte_cache; 10 11 #ifdef CONFIG_PPC_BOOK3S_64 12 13 #include <asm/book3s/64/hugetlb.h> 14 /* 15 * This should work for other subarchs too. But right now we use the 16 * new format only for 64bit book3s 17 */ 18 static inline pte_t *hugepd_page(hugepd_t hpd) 19 { 20 BUG_ON(!hugepd_ok(hpd)); 21 /* 22 * We have only four bits to encode, MMU page size 23 */ 24 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); 25 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK); 26 } 27 28 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) 29 { 30 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2; 31 } 32 33 static inline unsigned int hugepd_shift(hugepd_t hpd) 34 { 35 return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); 36 } 37 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 38 unsigned long vmaddr) 39 { 40 if (radix_enabled()) 41 return radix__flush_hugetlb_page(vma, vmaddr); 42 } 43 44 static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma, 45 unsigned long vmaddr) 46 { 47 if (radix_enabled()) 48 return radix__local_flush_hugetlb_page(vma, vmaddr); 49 } 50 #else 51 52 static inline pte_t *hugepd_page(hugepd_t hpd) 53 { 54 BUG_ON(!hugepd_ok(hpd)); 55 #ifdef CONFIG_PPC_8xx 56 return (pte_t *)__va(hpd_val(hpd) & 57 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); 58 #else 59 return (pte_t *)((hpd_val(hpd) & 60 ~HUGEPD_SHIFT_MASK) | PD_HUGE); 61 #endif 62 } 63 64 static inline unsigned int hugepd_shift(hugepd_t hpd) 65 { 66 #ifdef CONFIG_PPC_8xx 67 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; 68 #else 69 return hpd_val(hpd) & HUGEPD_SHIFT_MASK; 70 #endif 71 } 72 73 #endif /* CONFIG_PPC_BOOK3S_64 */ 74 75 76 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, 77 unsigned pdshift) 78 { 79 /* 80 * On FSL BookE, we have multiple higher-level table entries that 81 * point to the same hugepte. Just use the first one since they're all 82 * identical. So for that case, idx=0. 83 */ 84 unsigned long idx = 0; 85 86 pte_t *dir = hugepd_page(hpd); 87 #ifndef CONFIG_PPC_FSL_BOOK3E 88 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); 89 #endif 90 91 return dir + idx; 92 } 93 94 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, 95 unsigned long addr, unsigned *shift); 96 97 void flush_dcache_icache_hugepage(struct page *page); 98 99 #if defined(CONFIG_PPC_MM_SLICES) 100 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 101 unsigned long len); 102 #else 103 static inline int is_hugepage_only_range(struct mm_struct *mm, 104 unsigned long addr, 105 unsigned long len) 106 { 107 return 0; 108 } 109 #endif 110 111 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, 112 pte_t pte); 113 #ifdef CONFIG_PPC_8xx 114 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 115 unsigned long vmaddr) 116 { 117 flush_tlb_page(vma, vmaddr); 118 } 119 #else 120 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 121 #endif 122 123 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 124 unsigned long end, unsigned long floor, 125 unsigned long ceiling); 126 127 /* 128 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs 129 * to override the version in mm/hugetlb.c 130 */ 131 #define vma_mmu_pagesize vma_mmu_pagesize 132 133 /* 134 * If the arch doesn't supply something else, assume that hugepage 135 * size aligned regions are ok without further preparation. 136 */ 137 static inline int prepare_hugepage_range(struct file *file, 138 unsigned long addr, unsigned long len) 139 { 140 struct hstate *h = hstate_file(file); 141 if (len & ~huge_page_mask(h)) 142 return -EINVAL; 143 if (addr & ~huge_page_mask(h)) 144 return -EINVAL; 145 return 0; 146 } 147 148 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 149 pte_t *ptep, pte_t pte) 150 { 151 set_pte_at(mm, addr, ptep, pte); 152 } 153 154 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 155 unsigned long addr, pte_t *ptep) 156 { 157 #ifdef CONFIG_PPC64 158 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); 159 #else 160 return __pte(pte_update(ptep, ~0UL, 0)); 161 #endif 162 } 163 164 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 165 unsigned long addr, pte_t *ptep) 166 { 167 pte_t pte; 168 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 169 flush_hugetlb_page(vma, addr); 170 } 171 172 static inline int huge_pte_none(pte_t pte) 173 { 174 return pte_none(pte); 175 } 176 177 static inline pte_t huge_pte_wrprotect(pte_t pte) 178 { 179 return pte_wrprotect(pte); 180 } 181 182 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, 183 unsigned long addr, pte_t *ptep, 184 pte_t pte, int dirty) 185 { 186 #ifdef HUGETLB_NEED_PRELOAD 187 /* 188 * The "return 1" forces a call of update_mmu_cache, which will write a 189 * TLB entry. Without this, platforms that don't do a write of the TLB 190 * entry in the TLB miss handler asm will fault ad infinitum. 191 */ 192 ptep_set_access_flags(vma, addr, ptep, pte, dirty); 193 return 1; 194 #else 195 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); 196 #endif 197 } 198 199 static inline pte_t huge_ptep_get(pte_t *ptep) 200 { 201 return *ptep; 202 } 203 204 static inline void arch_clear_hugepage_flags(struct page *page) 205 { 206 } 207 208 #else /* ! CONFIG_HUGETLB_PAGE */ 209 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 210 unsigned long vmaddr) 211 { 212 } 213 214 #define hugepd_shift(x) 0 215 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, 216 unsigned pdshift) 217 { 218 return 0; 219 } 220 #endif /* CONFIG_HUGETLB_PAGE */ 221 222 #endif /* _ASM_POWERPC_HUGETLB_H */ 223