1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_HUGETLB_H 3 #define _ASM_POWERPC_HUGETLB_H 4 5 #ifdef CONFIG_HUGETLB_PAGE 6 #include <asm/page.h> 7 #include <asm-generic/hugetlb.h> 8 9 extern struct kmem_cache *hugepte_cache; 10 11 #ifdef CONFIG_PPC_BOOK3S_64 12 13 #include <asm/book3s/64/hugetlb.h> 14 /* 15 * This should work for other subarchs too. But right now we use the 16 * new format only for 64bit book3s 17 */ 18 static inline pte_t *hugepd_page(hugepd_t hpd) 19 { 20 BUG_ON(!hugepd_ok(hpd)); 21 /* 22 * We have only four bits to encode, MMU page size 23 */ 24 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); 25 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK); 26 } 27 28 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) 29 { 30 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2; 31 } 32 33 static inline unsigned int hugepd_shift(hugepd_t hpd) 34 { 35 return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); 36 } 37 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 38 unsigned long vmaddr) 39 { 40 if (radix_enabled()) 41 return radix__flush_hugetlb_page(vma, vmaddr); 42 } 43 44 #else 45 46 static inline pte_t *hugepd_page(hugepd_t hpd) 47 { 48 BUG_ON(!hugepd_ok(hpd)); 49 #ifdef CONFIG_PPC_8xx 50 return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK); 51 #else 52 return (pte_t *)((hpd_val(hpd) & 53 ~HUGEPD_SHIFT_MASK) | PD_HUGE); 54 #endif 55 } 56 57 static inline unsigned int hugepd_shift(hugepd_t hpd) 58 { 59 #ifdef CONFIG_PPC_8xx 60 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; 61 #else 62 return hpd_val(hpd) & HUGEPD_SHIFT_MASK; 63 #endif 64 } 65 66 #endif /* CONFIG_PPC_BOOK3S_64 */ 67 68 69 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, 70 unsigned pdshift) 71 { 72 /* 73 * On FSL BookE, we have multiple higher-level table entries that 74 * point to the same hugepte. Just use the first one since they're all 75 * identical. So for that case, idx=0. 76 */ 77 unsigned long idx = 0; 78 79 pte_t *dir = hugepd_page(hpd); 80 #ifndef CONFIG_PPC_FSL_BOOK3E 81 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); 82 #endif 83 84 return dir + idx; 85 } 86 87 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, 88 unsigned long addr, unsigned *shift); 89 90 void flush_dcache_icache_hugepage(struct page *page); 91 92 int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 93 unsigned long len); 94 95 static inline int is_hugepage_only_range(struct mm_struct *mm, 96 unsigned long addr, 97 unsigned long len) 98 { 99 if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) 100 return slice_is_hugepage_only_range(mm, addr, len); 101 return 0; 102 } 103 104 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, 105 pte_t pte); 106 #ifdef CONFIG_PPC_8xx 107 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 108 unsigned long vmaddr) 109 { 110 flush_tlb_page(vma, vmaddr); 111 } 112 #else 113 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 114 #endif 115 116 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 117 unsigned long end, unsigned long floor, 118 unsigned long ceiling); 119 120 /* 121 * If the arch doesn't supply something else, assume that hugepage 122 * size aligned regions are ok without further preparation. 123 */ 124 static inline int prepare_hugepage_range(struct file *file, 125 unsigned long addr, unsigned long len) 126 { 127 struct hstate *h = hstate_file(file); 128 if (len & ~huge_page_mask(h)) 129 return -EINVAL; 130 if (addr & ~huge_page_mask(h)) 131 return -EINVAL; 132 return 0; 133 } 134 135 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 136 pte_t *ptep, pte_t pte) 137 { 138 set_pte_at(mm, addr, ptep, pte); 139 } 140 141 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 142 unsigned long addr, pte_t *ptep) 143 { 144 #ifdef CONFIG_PPC64 145 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); 146 #else 147 return __pte(pte_update(ptep, ~0UL, 0)); 148 #endif 149 } 150 151 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 152 unsigned long addr, pte_t *ptep) 153 { 154 pte_t pte; 155 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 156 flush_hugetlb_page(vma, addr); 157 } 158 159 static inline int huge_pte_none(pte_t pte) 160 { 161 return pte_none(pte); 162 } 163 164 static inline pte_t huge_pte_wrprotect(pte_t pte) 165 { 166 return pte_wrprotect(pte); 167 } 168 169 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, 170 unsigned long addr, pte_t *ptep, 171 pte_t pte, int dirty) 172 { 173 #ifdef HUGETLB_NEED_PRELOAD 174 /* 175 * The "return 1" forces a call of update_mmu_cache, which will write a 176 * TLB entry. Without this, platforms that don't do a write of the TLB 177 * entry in the TLB miss handler asm will fault ad infinitum. 178 */ 179 ptep_set_access_flags(vma, addr, ptep, pte, dirty); 180 return 1; 181 #else 182 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); 183 #endif 184 } 185 186 static inline pte_t huge_ptep_get(pte_t *ptep) 187 { 188 return *ptep; 189 } 190 191 static inline void arch_clear_hugepage_flags(struct page *page) 192 { 193 } 194 195 #else /* ! CONFIG_HUGETLB_PAGE */ 196 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 197 unsigned long vmaddr) 198 { 199 } 200 201 #define hugepd_shift(x) 0 202 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, 203 unsigned pdshift) 204 { 205 return 0; 206 } 207 #endif /* CONFIG_HUGETLB_PAGE */ 208 209 #endif /* _ASM_POWERPC_HUGETLB_H */ 210