1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H 3 #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H 4 5 #define H_PTE_INDEX_SIZE 8 6 #define H_PMD_INDEX_SIZE 10 7 #define H_PUD_INDEX_SIZE 7 8 #define H_PGD_INDEX_SIZE 8 9 10 /* 11 * 64k aligned address free up few of the lower bits of RPN for us 12 * We steal that here. For more deatils look at pte_pfn/pfn_pte() 13 */ 14 #define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */ 15 #define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */ 16 #define H_PAGE_BUSY _RPAGE_RPN44 /* software: PTE & hash are busy */ 17 #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ 18 19 /* memory key bits. */ 20 #define H_PTE_PKEY_BIT0 _RPAGE_RSV1 21 #define H_PTE_PKEY_BIT1 _RPAGE_RSV2 22 #define H_PTE_PKEY_BIT2 _RPAGE_RSV3 23 #define H_PTE_PKEY_BIT3 _RPAGE_RSV4 24 #define H_PTE_PKEY_BIT4 _RPAGE_RSV5 25 26 /* 27 * We need to differentiate between explicit huge page and THP huge 28 * page, since THP huge page also need to track real subpage details 29 */ 30 #define H_PAGE_THP_HUGE H_PAGE_4K_PFN 31 32 /* PTE flags to conserve for HPTE identification */ 33 #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO) 34 /* 35 * we support 16 fragments per PTE page of 64K size. 36 */ 37 #define H_PTE_FRAG_NR 16 38 /* 39 * We use a 2K PTE page fragment and another 2K for storing 40 * real_pte_t hash index 41 */ 42 #define H_PTE_FRAG_SIZE_SHIFT 12 43 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) 44 45 #ifndef __ASSEMBLY__ 46 #include <asm/errno.h> 47 48 /* 49 * With 64K pages on hash table, we have a special PTE format that 50 * uses a second "half" of the page table to encode sub-page information 51 * in order to deal with 64K made of 4K HW pages. Thus we override the 52 * generic accessors and iterators here 53 */ 54 #define __real_pte __real_pte 55 static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset) 56 { 57 real_pte_t rpte; 58 unsigned long *hidxp; 59 60 rpte.pte = pte; 61 62 /* 63 * Ensure that we do not read the hidx before we read the PTE. Because 64 * the writer side is expected to finish writing the hidx first followed 65 * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that. 66 */ 67 smp_rmb(); 68 69 hidxp = (unsigned long *)(ptep + offset); 70 rpte.hidx = *hidxp; 71 return rpte; 72 } 73 74 /* 75 * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented 76 * as 1, 1 as 2,... , and 0xf as 0. This convention lets us represent a 77 * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when 78 * allocated. We dont have to zero them gain; thus save on the initialization. 79 */ 80 #define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */ 81 #define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL) /* shift forward by one */ 82 #define HIDX_BITS(x, index) (x << (index << 2)) 83 #define BITS_TO_HIDX(x, index) ((x >> (index << 2)) & 0xfUL) 84 #define INVALID_RPTE_HIDX 0x0UL 85 86 static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) 87 { 88 return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index)); 89 } 90 91 /* 92 * Commit the hidx and return PTE bits that needs to be modified. The caller is 93 * expected to modify the PTE bits accordingly and commit the PTE to memory. 94 */ 95 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, 96 unsigned int subpg_index, 97 unsigned long hidx, int offset) 98 { 99 unsigned long *hidxp = (unsigned long *)(ptep + offset); 100 101 rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index); 102 *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index); 103 104 /* 105 * Anyone reading PTE must ensure hidx bits are read after reading the 106 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be 107 * used for that. 108 */ 109 smp_wmb(); 110 111 /* No PTE bits to be modified, return 0x0UL */ 112 return 0x0UL; 113 } 114 115 #define __rpte_to_pte(r) ((r).pte) 116 extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); 117 /* 118 * Trick: we set __end to va + 64k, which happens works for 119 * a 16M page as well as we want only one iteration 120 */ 121 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ 122 do { \ 123 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ 124 unsigned __split = (psize == MMU_PAGE_4K || \ 125 psize == MMU_PAGE_64K_AP); \ 126 shift = mmu_psize_defs[psize].shift; \ 127 for (index = 0; vpn < __end; index++, \ 128 vpn += (1L << (shift - VPN_SHIFT))) { \ 129 if (!__split || __rpte_sub_valid(rpte, index)) \ 130 do { 131 132 #define pte_iterate_hashed_end() } while(0); } } while(0) 133 134 #define pte_pagesize_index(mm, addr, pte) \ 135 (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) 136 137 extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 138 unsigned long pfn, unsigned long size, pgprot_t); 139 static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr, 140 unsigned long pfn, pgprot_t prot) 141 { 142 if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) { 143 WARN(1, "remap_4k_pfn called with wrong pfn value\n"); 144 return -EINVAL; 145 } 146 return remap_pfn_range(vma, addr, pfn, PAGE_SIZE, 147 __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN)); 148 } 149 150 #define H_PTE_TABLE_SIZE PTE_FRAG_SIZE 151 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE) 152 #define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ 153 (sizeof(unsigned long) << PMD_INDEX_SIZE)) 154 #else 155 #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 156 #endif 157 #ifdef CONFIG_HUGETLB_PAGE 158 #define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \ 159 (sizeof(unsigned long) << PUD_INDEX_SIZE)) 160 #else 161 #define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 162 #endif 163 #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 164 165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 166 static inline char *get_hpte_slot_array(pmd_t *pmdp) 167 { 168 /* 169 * The hpte hindex is stored in the pgtable whose address is in the 170 * second half of the PMD 171 * 172 * Order this load with the test for pmd_trans_huge in the caller 173 */ 174 smp_rmb(); 175 return *(char **)(pmdp + PTRS_PER_PMD); 176 177 178 } 179 /* 180 * The linux hugepage PMD now include the pmd entries followed by the address 181 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. 182 * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per 183 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and 184 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. 185 * 186 * The top three bits are intentionally left as zero. This memory location 187 * are also used as normal page PTE pointers. So if we have any pointers 188 * left around while we collapse a hugepage, we need to make sure 189 * _PAGE_PRESENT bit of that is zero when we look at them 190 */ 191 static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) 192 { 193 return hpte_slot_array[index] & 0x1; 194 } 195 196 static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, 197 int index) 198 { 199 return hpte_slot_array[index] >> 1; 200 } 201 202 static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, 203 unsigned int index, unsigned int hidx) 204 { 205 hpte_slot_array[index] = (hidx << 1) | 0x1; 206 } 207 208 /* 209 * 210 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs 211 * page. The hugetlbfs page table walking and mangling paths are totally 212 * separated form the core VM paths and they're differentiated by 213 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. 214 * 215 * pmd_trans_huge() is defined as false at build time if 216 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build 217 * time in such case. 218 * 219 * For ppc64 we need to differntiate from explicit hugepages from THP, because 220 * for THP we also track the subpage details at the pmd level. We don't do 221 * that for explicit huge pages. 222 * 223 */ 224 static inline int hash__pmd_trans_huge(pmd_t pmd) 225 { 226 return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == 227 (_PAGE_PTE | H_PAGE_THP_HUGE)); 228 } 229 230 static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) 231 { 232 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); 233 } 234 235 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) 236 { 237 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); 238 } 239 240 extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, 241 unsigned long addr, pmd_t *pmdp, 242 unsigned long clr, unsigned long set); 243 extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, 244 unsigned long address, pmd_t *pmdp); 245 extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 246 pgtable_t pgtable); 247 extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 248 extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, 249 unsigned long addr, pmd_t *pmdp); 250 extern int hash__has_transparent_hugepage(void); 251 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 252 #endif /* __ASSEMBLY__ */ 253 254 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ 255