1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H 3 #define _ASM_POWERPC_BOOK3S_64_HASH_H 4 #ifdef __KERNEL__ 5 6 /* 7 * Common bits between 4K and 64K pages in a linux-style PTE. 8 * Additional bits may be defined in pgtable-hash64-*.h 9 * 10 */ 11 #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS 12 13 #ifdef CONFIG_PPC_64K_PAGES 14 #include <asm/book3s/64/hash-64k.h> 15 #else 16 #include <asm/book3s/64/hash-4k.h> 17 #endif 18 19 /* 20 * Size of EA range mapped by our pagetables. 21 */ 22 #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \ 23 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) 24 #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) 25 26 /* 27 * We store the slot details in the second half of page table. 28 * Increase the pud level table so that hugetlb ptes can be stored 29 * at pud level. 30 */ 31 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) 32 #define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1) 33 #else 34 #define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE) 35 #endif 36 /* 37 * Define the address range of the kernel non-linear virtual area 38 */ 39 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) 40 #define H_KERN_VIRT_SIZE ASM_CONST(0x0000400000000000) /* 64T */ 41 42 /* 43 * The vmalloc space starts at the beginning of that region, and 44 * occupies half of it on hash CPUs and a quarter of it on Book3E 45 * (we keep a quarter for the virtual memmap) 46 */ 47 #define H_VMALLOC_START H_KERN_VIRT_START 48 #define H_VMALLOC_SIZE ASM_CONST(0x380000000000) /* 56T */ 49 #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) 50 51 #define H_KERN_IO_START H_VMALLOC_END 52 53 /* 54 * Region IDs 55 */ 56 #define REGION_SHIFT 60UL 57 #define REGION_MASK (0xfUL << REGION_SHIFT) 58 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 59 60 #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START)) 61 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 62 #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ 63 #define USER_REGION_ID (0UL) 64 65 /* 66 * Defines the address of the vmemap area, in its own region on 67 * hash table CPUs. 68 */ 69 #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 70 71 #ifdef CONFIG_PPC_MM_SLICES 72 #define HAVE_ARCH_UNMAPPED_AREA 73 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 74 #endif /* CONFIG_PPC_MM_SLICES */ 75 76 77 /* PTEIDX nibble */ 78 #define _PTEIDX_SECONDARY 0x8 79 #define _PTEIDX_GROUP_IX 0x7 80 81 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1) 82 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1) 83 84 #ifndef __ASSEMBLY__ 85 #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) 86 #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) 87 static inline int hash__pgd_bad(pgd_t pgd) 88 { 89 return (pgd_val(pgd) == 0); 90 } 91 #ifdef CONFIG_STRICT_KERNEL_RWX 92 extern void hash__mark_rodata_ro(void); 93 extern void hash__mark_initmem_nx(void); 94 #endif 95 96 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 97 pte_t *ptep, unsigned long pte, int huge); 98 extern unsigned long htab_convert_pte_flags(unsigned long pteflags); 99 /* Atomic PTE updates */ 100 static inline unsigned long hash__pte_update(struct mm_struct *mm, 101 unsigned long addr, 102 pte_t *ptep, unsigned long clr, 103 unsigned long set, 104 int huge) 105 { 106 __be64 old_be, tmp_be; 107 unsigned long old; 108 109 __asm__ __volatile__( 110 "1: ldarx %0,0,%3 # pte_update\n\ 111 and. %1,%0,%6\n\ 112 bne- 1b \n\ 113 andc %1,%0,%4 \n\ 114 or %1,%1,%7\n\ 115 stdcx. %1,0,%3 \n\ 116 bne- 1b" 117 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep) 118 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep), 119 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) 120 : "cc" ); 121 /* huge pages use the old page table lock */ 122 if (!huge) 123 assert_pte_locked(mm, addr); 124 125 old = be64_to_cpu(old_be); 126 if (old & H_PAGE_HASHPTE) 127 hpte_need_flush(mm, addr, ptep, old, huge); 128 129 return old; 130 } 131 132 /* Set the dirty and/or accessed bits atomically in a linux PTE, this 133 * function doesn't need to flush the hash entry 134 */ 135 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry) 136 { 137 __be64 old, tmp, val, mask; 138 139 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE | 140 _PAGE_EXEC | _PAGE_SOFT_DIRTY); 141 142 val = pte_raw(entry) & mask; 143 144 __asm__ __volatile__( 145 "1: ldarx %0,0,%4\n\ 146 and. %1,%0,%6\n\ 147 bne- 1b \n\ 148 or %0,%3,%0\n\ 149 stdcx. %0,0,%4\n\ 150 bne- 1b" 151 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 152 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY)) 153 :"cc"); 154 } 155 156 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b) 157 { 158 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); 159 } 160 161 static inline int hash__pte_none(pte_t pte) 162 { 163 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0; 164 } 165 166 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift, 167 int ssize, real_pte_t rpte, unsigned int subpg_index); 168 169 /* This low level function performs the actual PTE insertion 170 * Setting the PTE depends on the MMU type and other factors. It's 171 * an horrible mess that I'm not going to try to clean up now but 172 * I'm keeping it in one place rather than spread around 173 */ 174 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr, 175 pte_t *ptep, pte_t pte, int percpu) 176 { 177 /* 178 * Anything else just stores the PTE normally. That covers all 64-bit 179 * cases, and 32-bit non-hash with 32-bit PTEs. 180 */ 181 *ptep = pte; 182 } 183 184 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 185 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 186 pmd_t *pmdp, unsigned long old_pmd); 187 #else 188 static inline void hpte_do_hugepage_flush(struct mm_struct *mm, 189 unsigned long addr, pmd_t *pmdp, 190 unsigned long old_pmd) 191 { 192 WARN(1, "%s called with THP disabled\n", __func__); 193 } 194 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 195 196 197 extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, 198 unsigned long flags); 199 extern int __meminit hash__vmemmap_create_mapping(unsigned long start, 200 unsigned long page_size, 201 unsigned long phys); 202 extern void hash__vmemmap_remove_mapping(unsigned long start, 203 unsigned long page_size); 204 205 int hash__create_section_mapping(unsigned long start, unsigned long end, int nid); 206 int hash__remove_section_mapping(unsigned long start, unsigned long end); 207 208 #endif /* !__ASSEMBLY__ */ 209 #endif /* __KERNEL__ */ 210 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ 211