1 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H 2 #define _ASM_POWERPC_BOOK3S_64_HASH_H 3 #ifdef __KERNEL__ 4 5 /* 6 * Common bits between 4K and 64K pages in a linux-style PTE. 7 * Additional bits may be defined in pgtable-hash64-*.h 8 * 9 */ 10 #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS 11 #define H_PAGE_F_GIX_SHIFT 56 12 #define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */ 13 #define H_PAGE_F_SECOND _RPAGE_RSV2 /* HPTE is in 2ndary HPTEG */ 14 #define H_PAGE_F_GIX (_RPAGE_RSV3 | _RPAGE_RSV4 | _RPAGE_RPN44) 15 #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ 16 17 #ifdef CONFIG_PPC_64K_PAGES 18 #include <asm/book3s/64/hash-64k.h> 19 #else 20 #include <asm/book3s/64/hash-4k.h> 21 #endif 22 23 /* 24 * Size of EA range mapped by our pagetables. 25 */ 26 #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \ 27 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) 28 #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) 29 30 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES) 31 /* 32 * only with hash 64k we need to use the second half of pmd page table 33 * to store pointer to deposited pgtable_t 34 */ 35 #define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1) 36 #else 37 #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE 38 #endif 39 /* 40 * Define the address range of the kernel non-linear virtual area 41 */ 42 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) 43 #define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) 44 45 /* 46 * The vmalloc space starts at the beginning of that region, and 47 * occupies half of it on hash CPUs and a quarter of it on Book3E 48 * (we keep a quarter for the virtual memmap) 49 */ 50 #define H_VMALLOC_START H_KERN_VIRT_START 51 #define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1) 52 #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) 53 54 /* 55 * Region IDs 56 */ 57 #define REGION_SHIFT 60UL 58 #define REGION_MASK (0xfUL << REGION_SHIFT) 59 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 60 61 #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START)) 62 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 63 #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ 64 #define USER_REGION_ID (0UL) 65 66 /* 67 * Defines the address of the vmemap area, in its own region on 68 * hash table CPUs. 69 */ 70 #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 71 72 #ifdef CONFIG_PPC_MM_SLICES 73 #define HAVE_ARCH_UNMAPPED_AREA 74 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 75 #endif /* CONFIG_PPC_MM_SLICES */ 76 77 78 /* PTEIDX nibble */ 79 #define _PTEIDX_SECONDARY 0x8 80 #define _PTEIDX_GROUP_IX 0x7 81 82 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1) 83 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1) 84 85 #ifndef __ASSEMBLY__ 86 #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) 87 #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) 88 static inline int hash__pgd_bad(pgd_t pgd) 89 { 90 return (pgd_val(pgd) == 0); 91 } 92 93 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 94 pte_t *ptep, unsigned long pte, int huge); 95 extern unsigned long htab_convert_pte_flags(unsigned long pteflags); 96 /* Atomic PTE updates */ 97 static inline unsigned long hash__pte_update(struct mm_struct *mm, 98 unsigned long addr, 99 pte_t *ptep, unsigned long clr, 100 unsigned long set, 101 int huge) 102 { 103 __be64 old_be, tmp_be; 104 unsigned long old; 105 106 __asm__ __volatile__( 107 "1: ldarx %0,0,%3 # pte_update\n\ 108 and. %1,%0,%6\n\ 109 bne- 1b \n\ 110 andc %1,%0,%4 \n\ 111 or %1,%1,%7\n\ 112 stdcx. %1,0,%3 \n\ 113 bne- 1b" 114 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep) 115 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep), 116 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) 117 : "cc" ); 118 /* huge pages use the old page table lock */ 119 if (!huge) 120 assert_pte_locked(mm, addr); 121 122 old = be64_to_cpu(old_be); 123 if (old & H_PAGE_HASHPTE) 124 hpte_need_flush(mm, addr, ptep, old, huge); 125 126 return old; 127 } 128 129 /* Set the dirty and/or accessed bits atomically in a linux PTE, this 130 * function doesn't need to flush the hash entry 131 */ 132 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry) 133 { 134 __be64 old, tmp, val, mask; 135 136 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE | 137 _PAGE_EXEC | _PAGE_SOFT_DIRTY); 138 139 val = pte_raw(entry) & mask; 140 141 __asm__ __volatile__( 142 "1: ldarx %0,0,%4\n\ 143 and. %1,%0,%6\n\ 144 bne- 1b \n\ 145 or %0,%3,%0\n\ 146 stdcx. %0,0,%4\n\ 147 bne- 1b" 148 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 149 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY)) 150 :"cc"); 151 } 152 153 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b) 154 { 155 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); 156 } 157 158 static inline int hash__pte_none(pte_t pte) 159 { 160 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0; 161 } 162 163 /* This low level function performs the actual PTE insertion 164 * Setting the PTE depends on the MMU type and other factors. It's 165 * an horrible mess that I'm not going to try to clean up now but 166 * I'm keeping it in one place rather than spread around 167 */ 168 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr, 169 pte_t *ptep, pte_t pte, int percpu) 170 { 171 /* 172 * Anything else just stores the PTE normally. That covers all 64-bit 173 * cases, and 32-bit non-hash with 32-bit PTEs. 174 */ 175 *ptep = pte; 176 } 177 178 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 179 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 180 pmd_t *pmdp, unsigned long old_pmd); 181 #else 182 static inline void hpte_do_hugepage_flush(struct mm_struct *mm, 183 unsigned long addr, pmd_t *pmdp, 184 unsigned long old_pmd) 185 { 186 WARN(1, "%s called with THP disabled\n", __func__); 187 } 188 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 189 190 191 extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, 192 unsigned long flags); 193 extern int __meminit hash__vmemmap_create_mapping(unsigned long start, 194 unsigned long page_size, 195 unsigned long phys); 196 extern void hash__vmemmap_remove_mapping(unsigned long start, 197 unsigned long page_size); 198 199 int hash__create_section_mapping(unsigned long start, unsigned long end); 200 int hash__remove_section_mapping(unsigned long start, unsigned long end); 201 202 #endif /* !__ASSEMBLY__ */ 203 #endif /* __KERNEL__ */ 204 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ 205