1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H 3 #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H 4 5 #define H_PTE_INDEX_SIZE 8 6 #define H_PMD_INDEX_SIZE 10 7 #define H_PUD_INDEX_SIZE 7 8 #define H_PGD_INDEX_SIZE 8 9 10 /* 11 * 64k aligned address free up few of the lower bits of RPN for us 12 * We steal that here. For more deatils look at pte_pfn/pfn_pte() 13 */ 14 #define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */ 15 #define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */ 16 /* 17 * We need to differentiate between explicit huge page and THP huge 18 * page, since THP huge page also need to track real subpage details 19 */ 20 #define H_PAGE_THP_HUGE H_PAGE_4K_PFN 21 22 /* 23 * Used to track subpage group valid if H_PAGE_COMBO is set 24 * This overloads H_PAGE_F_GIX and H_PAGE_F_SECOND 25 */ 26 #define H_PAGE_COMBO_VALID (H_PAGE_F_GIX | H_PAGE_F_SECOND) 27 28 /* PTE flags to conserve for HPTE identification */ 29 #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_F_SECOND | \ 30 H_PAGE_F_GIX | H_PAGE_HASHPTE | H_PAGE_COMBO) 31 /* 32 * we support 16 fragments per PTE page of 64K size. 33 */ 34 #define H_PTE_FRAG_NR 16 35 /* 36 * We use a 2K PTE page fragment and another 2K for storing 37 * real_pte_t hash index 38 */ 39 #define H_PTE_FRAG_SIZE_SHIFT 12 40 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) 41 42 #ifndef __ASSEMBLY__ 43 #include <asm/errno.h> 44 45 /* 46 * With 64K pages on hash table, we have a special PTE format that 47 * uses a second "half" of the page table to encode sub-page information 48 * in order to deal with 64K made of 4K HW pages. Thus we override the 49 * generic accessors and iterators here 50 */ 51 #define __real_pte __real_pte 52 static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) 53 { 54 real_pte_t rpte; 55 unsigned long *hidxp; 56 57 rpte.pte = pte; 58 rpte.hidx = 0; 59 if (pte_val(pte) & H_PAGE_COMBO) { 60 /* 61 * Make sure we order the hidx load against the H_PAGE_COMBO 62 * check. The store side ordering is done in __hash_page_4K 63 */ 64 smp_rmb(); 65 hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); 66 rpte.hidx = *hidxp; 67 } 68 return rpte; 69 } 70 71 #define HIDX_BITS(x, index) (x << (index << 2)) 72 73 static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) 74 { 75 if ((pte_val(rpte.pte) & H_PAGE_COMBO)) 76 return (rpte.hidx >> (index<<2)) & 0xf; 77 return (pte_val(rpte.pte) >> H_PAGE_F_GIX_SHIFT) & 0xf; 78 } 79 80 /* 81 * Commit the hidx and return PTE bits that needs to be modified. The caller is 82 * expected to modify the PTE bits accordingly and commit the PTE to memory. 83 */ 84 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, 85 unsigned int subpg_index, unsigned long hidx) 86 { 87 unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); 88 89 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index); 90 *hidxp = rpte.hidx | HIDX_BITS(hidx, subpg_index); 91 92 /* 93 * Anyone reading PTE must ensure hidx bits are read after reading the 94 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be 95 * used for that. 96 */ 97 smp_wmb(); 98 99 /* No PTE bits to be modified, return 0x0UL */ 100 return 0x0UL; 101 } 102 103 #define __rpte_to_pte(r) ((r).pte) 104 extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); 105 /* 106 * Trick: we set __end to va + 64k, which happens works for 107 * a 16M page as well as we want only one iteration 108 */ 109 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ 110 do { \ 111 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ 112 unsigned __split = (psize == MMU_PAGE_4K || \ 113 psize == MMU_PAGE_64K_AP); \ 114 shift = mmu_psize_defs[psize].shift; \ 115 for (index = 0; vpn < __end; index++, \ 116 vpn += (1L << (shift - VPN_SHIFT))) { \ 117 if (!__split || __rpte_sub_valid(rpte, index)) \ 118 do { 119 120 #define pte_iterate_hashed_end() } while(0); } } while(0) 121 122 #define pte_pagesize_index(mm, addr, pte) \ 123 (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) 124 125 extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 126 unsigned long pfn, unsigned long size, pgprot_t); 127 static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr, 128 unsigned long pfn, pgprot_t prot) 129 { 130 if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) { 131 WARN(1, "remap_4k_pfn called with wrong pfn value\n"); 132 return -EINVAL; 133 } 134 return remap_pfn_range(vma, addr, pfn, PAGE_SIZE, 135 __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN)); 136 } 137 138 #define H_PTE_TABLE_SIZE PTE_FRAG_SIZE 139 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 140 #define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ 141 (sizeof(unsigned long) << PMD_INDEX_SIZE)) 142 #else 143 #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 144 #endif 145 #define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 146 #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 147 148 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 149 static inline char *get_hpte_slot_array(pmd_t *pmdp) 150 { 151 /* 152 * The hpte hindex is stored in the pgtable whose address is in the 153 * second half of the PMD 154 * 155 * Order this load with the test for pmd_trans_huge in the caller 156 */ 157 smp_rmb(); 158 return *(char **)(pmdp + PTRS_PER_PMD); 159 160 161 } 162 /* 163 * The linux hugepage PMD now include the pmd entries followed by the address 164 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. 165 * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per 166 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and 167 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. 168 * 169 * The top three bits are intentionally left as zero. This memory location 170 * are also used as normal page PTE pointers. So if we have any pointers 171 * left around while we collapse a hugepage, we need to make sure 172 * _PAGE_PRESENT bit of that is zero when we look at them 173 */ 174 static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) 175 { 176 return hpte_slot_array[index] & 0x1; 177 } 178 179 static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, 180 int index) 181 { 182 return hpte_slot_array[index] >> 1; 183 } 184 185 static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, 186 unsigned int index, unsigned int hidx) 187 { 188 hpte_slot_array[index] = (hidx << 1) | 0x1; 189 } 190 191 /* 192 * 193 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs 194 * page. The hugetlbfs page table walking and mangling paths are totally 195 * separated form the core VM paths and they're differentiated by 196 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. 197 * 198 * pmd_trans_huge() is defined as false at build time if 199 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build 200 * time in such case. 201 * 202 * For ppc64 we need to differntiate from explicit hugepages from THP, because 203 * for THP we also track the subpage details at the pmd level. We don't do 204 * that for explicit huge pages. 205 * 206 */ 207 static inline int hash__pmd_trans_huge(pmd_t pmd) 208 { 209 return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == 210 (_PAGE_PTE | H_PAGE_THP_HUGE)); 211 } 212 213 static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) 214 { 215 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); 216 } 217 218 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) 219 { 220 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); 221 } 222 223 extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, 224 unsigned long addr, pmd_t *pmdp, 225 unsigned long clr, unsigned long set); 226 extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, 227 unsigned long address, pmd_t *pmdp); 228 extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 229 pgtable_t pgtable); 230 extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 231 extern void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma, 232 unsigned long address, pmd_t *pmdp); 233 extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, 234 unsigned long addr, pmd_t *pmdp); 235 extern int hash__has_transparent_hugepage(void); 236 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 237 #endif /* __ASSEMBLY__ */ 238 239 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ 240