1 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H 2 #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H 3 4 #define H_PTE_INDEX_SIZE 8 5 #define H_PMD_INDEX_SIZE 5 6 #define H_PUD_INDEX_SIZE 5 7 #define H_PGD_INDEX_SIZE 12 8 9 /* With 4k base page size, hugepage PTEs go at the PMD level */ 10 #define MIN_HUGEPTE_SHIFT PAGE_SHIFT 11 12 #define H_PAGE_COMBO 0x00001000 /* this is a combo 4k page */ 13 #define H_PAGE_4K_PFN 0x00002000 /* PFN is for a single 4k page */ 14 /* 15 * We need to differentiate between explicit huge page and THP huge 16 * page, since THP huge page also need to track real subpage details 17 */ 18 #define H_PAGE_THP_HUGE H_PAGE_4K_PFN 19 20 /* 21 * Used to track subpage group valid if H_PAGE_COMBO is set 22 * This overloads H_PAGE_F_GIX and H_PAGE_F_SECOND 23 */ 24 #define H_PAGE_COMBO_VALID (H_PAGE_F_GIX | H_PAGE_F_SECOND) 25 26 /* PTE flags to conserve for HPTE identification */ 27 #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_F_SECOND | \ 28 H_PAGE_F_GIX | H_PAGE_HASHPTE | H_PAGE_COMBO) 29 /* 30 * we support 16 fragments per PTE page of 64K size. 31 */ 32 #define H_PTE_FRAG_NR 16 33 /* 34 * We use a 2K PTE page fragment and another 2K for storing 35 * real_pte_t hash index 36 */ 37 #define H_PTE_FRAG_SIZE_SHIFT 12 38 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) 39 40 #ifndef __ASSEMBLY__ 41 #include <asm/errno.h> 42 43 /* 44 * With 64K pages on hash table, we have a special PTE format that 45 * uses a second "half" of the page table to encode sub-page information 46 * in order to deal with 64K made of 4K HW pages. Thus we override the 47 * generic accessors and iterators here 48 */ 49 #define __real_pte __real_pte 50 static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) 51 { 52 real_pte_t rpte; 53 unsigned long *hidxp; 54 55 rpte.pte = pte; 56 rpte.hidx = 0; 57 if (pte_val(pte) & H_PAGE_COMBO) { 58 /* 59 * Make sure we order the hidx load against the H_PAGE_COMBO 60 * check. The store side ordering is done in __hash_page_4K 61 */ 62 smp_rmb(); 63 hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); 64 rpte.hidx = *hidxp; 65 } 66 return rpte; 67 } 68 69 static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) 70 { 71 if ((pte_val(rpte.pte) & H_PAGE_COMBO)) 72 return (rpte.hidx >> (index<<2)) & 0xf; 73 return (pte_val(rpte.pte) >> H_PAGE_F_GIX_SHIFT) & 0xf; 74 } 75 76 #define __rpte_to_pte(r) ((r).pte) 77 extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); 78 /* 79 * Trick: we set __end to va + 64k, which happens works for 80 * a 16M page as well as we want only one iteration 81 */ 82 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ 83 do { \ 84 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ 85 unsigned __split = (psize == MMU_PAGE_4K || \ 86 psize == MMU_PAGE_64K_AP); \ 87 shift = mmu_psize_defs[psize].shift; \ 88 for (index = 0; vpn < __end; index++, \ 89 vpn += (1L << (shift - VPN_SHIFT))) { \ 90 if (!__split || __rpte_sub_valid(rpte, index)) \ 91 do { 92 93 #define pte_iterate_hashed_end() } while(0); } } while(0) 94 95 #define pte_pagesize_index(mm, addr, pte) \ 96 (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) 97 98 extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 99 unsigned long pfn, unsigned long size, pgprot_t); 100 static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr, 101 unsigned long pfn, pgprot_t prot) 102 { 103 if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) { 104 WARN(1, "remap_4k_pfn called with wrong pfn value\n"); 105 return -EINVAL; 106 } 107 return remap_pfn_range(vma, addr, pfn, PAGE_SIZE, 108 __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN)); 109 } 110 111 #define H_PTE_TABLE_SIZE PTE_FRAG_SIZE 112 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 113 #define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ 114 (sizeof(unsigned long) << PMD_INDEX_SIZE)) 115 #else 116 #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 117 #endif 118 #define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 119 #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 120 121 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 122 static inline char *get_hpte_slot_array(pmd_t *pmdp) 123 { 124 /* 125 * The hpte hindex is stored in the pgtable whose address is in the 126 * second half of the PMD 127 * 128 * Order this load with the test for pmd_trans_huge in the caller 129 */ 130 smp_rmb(); 131 return *(char **)(pmdp + PTRS_PER_PMD); 132 133 134 } 135 /* 136 * The linux hugepage PMD now include the pmd entries followed by the address 137 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. 138 * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per 139 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and 140 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. 141 * 142 * The top three bits are intentionally left as zero. This memory location 143 * are also used as normal page PTE pointers. So if we have any pointers 144 * left around while we collapse a hugepage, we need to make sure 145 * _PAGE_PRESENT bit of that is zero when we look at them 146 */ 147 static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) 148 { 149 return hpte_slot_array[index] & 0x1; 150 } 151 152 static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, 153 int index) 154 { 155 return hpte_slot_array[index] >> 1; 156 } 157 158 static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, 159 unsigned int index, unsigned int hidx) 160 { 161 hpte_slot_array[index] = (hidx << 1) | 0x1; 162 } 163 164 /* 165 * 166 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs 167 * page. The hugetlbfs page table walking and mangling paths are totally 168 * separated form the core VM paths and they're differentiated by 169 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. 170 * 171 * pmd_trans_huge() is defined as false at build time if 172 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build 173 * time in such case. 174 * 175 * For ppc64 we need to differntiate from explicit hugepages from THP, because 176 * for THP we also track the subpage details at the pmd level. We don't do 177 * that for explicit huge pages. 178 * 179 */ 180 static inline int hash__pmd_trans_huge(pmd_t pmd) 181 { 182 return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == 183 (_PAGE_PTE | H_PAGE_THP_HUGE)); 184 } 185 186 static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) 187 { 188 return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); 189 } 190 191 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) 192 { 193 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); 194 } 195 196 extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, 197 unsigned long addr, pmd_t *pmdp, 198 unsigned long clr, unsigned long set); 199 extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, 200 unsigned long address, pmd_t *pmdp); 201 extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 202 pgtable_t pgtable); 203 extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 204 extern void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma, 205 unsigned long address, pmd_t *pmdp); 206 extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, 207 unsigned long addr, pmd_t *pmdp); 208 extern int hash__has_transparent_hugepage(void); 209 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 210 #endif /* __ASSEMBLY__ */ 211 212 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ 213