1 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H 2 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H 3 4 #include <asm-generic/pgtable-nopmd.h> 5 6 #ifndef __ASSEMBLY__ 7 #include <linux/sched.h> 8 #include <linux/threads.h> 9 #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ 10 11 extern unsigned long ioremap_bot; 12 13 #ifdef CONFIG_44x 14 extern int icache_44x_need_flush; 15 #endif 16 17 #endif /* __ASSEMBLY__ */ 18 19 /* 20 * The normal case is that PTEs are 32-bits and we have a 1-page 21 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 22 * 23 * For any >32-bit physical address platform, we can use the following 24 * two level page table layout where the pgdir is 8KB and the MS 13 bits 25 * are an index to the second level table. The combined pgdir/pmd first 26 * level has 2048 entries and the second level has 512 64-bit PTE entries. 27 * -Matt 28 */ 29 /* PGDIR_SHIFT determines what a top-level page table entry can map */ 30 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) 31 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 32 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 33 34 /* 35 * entries per page directory level: our page-table tree is two-level, so 36 * we don't really have any PMD directory. 37 */ 38 #ifndef __ASSEMBLY__ 39 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) 40 #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) 41 #endif /* __ASSEMBLY__ */ 42 43 #define PTRS_PER_PTE (1 << PTE_SHIFT) 44 #define PTRS_PER_PMD 1 45 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 46 47 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 48 #define FIRST_USER_ADDRESS 0UL 49 50 #define pte_ERROR(e) \ 51 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ 52 (unsigned long long)pte_val(e)) 53 #define pgd_ERROR(e) \ 54 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 55 56 /* 57 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary 58 * value (for now) on others, from where we can start layout kernel 59 * virtual space that goes below PKMAP and FIXMAP 60 */ 61 #ifdef CONFIG_HIGHMEM 62 #define KVIRT_TOP PKMAP_BASE 63 #else 64 #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ 65 #endif 66 67 /* 68 * ioremap_bot starts at that address. Early ioremaps move down from there, 69 * until mem_init() at which point this becomes the top of the vmalloc 70 * and ioremap space 71 */ 72 #ifdef CONFIG_NOT_COHERENT_CACHE 73 #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) 74 #else 75 #define IOREMAP_TOP KVIRT_TOP 76 #endif 77 78 /* 79 * Just any arbitrary offset to the start of the vmalloc VM area: the 80 * current 16MB value just means that there will be a 64MB "hole" after the 81 * physical memory until the kernel virtual memory starts. That means that 82 * any out-of-bounds memory accesses will hopefully be caught. 83 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 84 * area for the same reason. ;) 85 * 86 * We no longer map larger than phys RAM with the BATs so we don't have 87 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry 88 * about clashes between our early calls to ioremap() that start growing down 89 * from IOREMAP_TOP being run into the VM area allocations (growing upwards 90 * from VMALLOC_START). For this reason we have ioremap_bot to check when 91 * we actually run into our mappings setup in the early boot with the VM 92 * system. This really does become a problem for machines with good amounts 93 * of RAM. -- Cort 94 */ 95 #define VMALLOC_OFFSET (0x1000000) /* 16M */ 96 #ifdef PPC_PIN_SIZE 97 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 98 #else 99 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 100 #endif 101 #define VMALLOC_END ioremap_bot 102 103 /* 104 * Bits in a linux-style PTE. These match the bits in the 105 * (hardware-defined) PowerPC PTE as closely as possible. 106 */ 107 108 #if defined(CONFIG_40x) 109 #include <asm/nohash/32/pte-40x.h> 110 #elif defined(CONFIG_44x) 111 #include <asm/nohash/32/pte-44x.h> 112 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) 113 #include <asm/nohash/pte-book3e.h> 114 #elif defined(CONFIG_FSL_BOOKE) 115 #include <asm/nohash/32/pte-fsl-booke.h> 116 #elif defined(CONFIG_8xx) 117 #include <asm/nohash/32/pte-8xx.h> 118 #endif 119 120 /* And here we include common definitions */ 121 #include <asm/pte-common.h> 122 123 #ifndef __ASSEMBLY__ 124 125 #define pte_clear(mm, addr, ptep) \ 126 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) 127 128 #define pmd_none(pmd) (!pmd_val(pmd)) 129 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 130 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 131 static inline void pmd_clear(pmd_t *pmdp) 132 { 133 *pmdp = __pmd(0); 134 } 135 136 137 138 /* 139 * When flushing the tlb entry for a page, we also need to flush the hash 140 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. 141 */ 142 extern int flush_hash_pages(unsigned context, unsigned long va, 143 unsigned long pmdval, int count); 144 145 /* Add an HPTE to the hash table */ 146 extern void add_hash_page(unsigned context, unsigned long va, 147 unsigned long pmdval); 148 149 /* Flush an entry from the TLB/hash table */ 150 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 151 unsigned long address); 152 153 /* 154 * PTE updates. This function is called whenever an existing 155 * valid PTE is updated. This does -not- include set_pte_at() 156 * which nowadays only sets a new PTE. 157 * 158 * Depending on the type of MMU, we may need to use atomic updates 159 * and the PTE may be either 32 or 64 bit wide. In the later case, 160 * when using atomic updates, only the low part of the PTE is 161 * accessed atomically. 162 * 163 * In addition, on 44x, we also maintain a global flag indicating 164 * that an executable user mapping was modified, which is needed 165 * to properly flush the virtually tagged instruction cache of 166 * those implementations. 167 */ 168 #ifndef CONFIG_PTE_64BIT 169 static inline unsigned long pte_update(pte_t *p, 170 unsigned long clr, 171 unsigned long set) 172 { 173 #ifdef PTE_ATOMIC_UPDATES 174 unsigned long old, tmp; 175 176 __asm__ __volatile__("\ 177 1: lwarx %0,0,%3\n\ 178 andc %1,%0,%4\n\ 179 or %1,%1,%5\n" 180 PPC405_ERR77(0,%3) 181 " stwcx. %1,0,%3\n\ 182 bne- 1b" 183 : "=&r" (old), "=&r" (tmp), "=m" (*p) 184 : "r" (p), "r" (clr), "r" (set), "m" (*p) 185 : "cc" ); 186 #else /* PTE_ATOMIC_UPDATES */ 187 unsigned long old = pte_val(*p); 188 *p = __pte((old & ~clr) | set); 189 #endif /* !PTE_ATOMIC_UPDATES */ 190 191 #ifdef CONFIG_44x 192 if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) 193 icache_44x_need_flush = 1; 194 #endif 195 return old; 196 } 197 #else /* CONFIG_PTE_64BIT */ 198 static inline unsigned long long pte_update(pte_t *p, 199 unsigned long clr, 200 unsigned long set) 201 { 202 #ifdef PTE_ATOMIC_UPDATES 203 unsigned long long old; 204 unsigned long tmp; 205 206 __asm__ __volatile__("\ 207 1: lwarx %L0,0,%4\n\ 208 lwzx %0,0,%3\n\ 209 andc %1,%L0,%5\n\ 210 or %1,%1,%6\n" 211 PPC405_ERR77(0,%3) 212 " stwcx. %1,0,%4\n\ 213 bne- 1b" 214 : "=&r" (old), "=&r" (tmp), "=m" (*p) 215 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) 216 : "cc" ); 217 #else /* PTE_ATOMIC_UPDATES */ 218 unsigned long long old = pte_val(*p); 219 *p = __pte((old & ~(unsigned long long)clr) | set); 220 #endif /* !PTE_ATOMIC_UPDATES */ 221 222 #ifdef CONFIG_44x 223 if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) 224 icache_44x_need_flush = 1; 225 #endif 226 return old; 227 } 228 #endif /* CONFIG_PTE_64BIT */ 229 230 /* 231 * 2.6 calls this without flushing the TLB entry; this is wrong 232 * for our hash-based implementation, we fix that up here. 233 */ 234 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 235 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) 236 { 237 unsigned long old; 238 old = pte_update(ptep, _PAGE_ACCESSED, 0); 239 #if _PAGE_HASHPTE != 0 240 if (old & _PAGE_HASHPTE) { 241 unsigned long ptephys = __pa(ptep) & PAGE_MASK; 242 flush_hash_pages(context, addr, ptephys, 1); 243 } 244 #endif 245 return (old & _PAGE_ACCESSED) != 0; 246 } 247 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 248 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) 249 250 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 251 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 252 pte_t *ptep) 253 { 254 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); 255 } 256 257 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 258 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 259 pte_t *ptep) 260 { 261 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); 262 } 263 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 264 unsigned long addr, pte_t *ptep) 265 { 266 ptep_set_wrprotect(mm, addr, ptep); 267 } 268 269 270 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 271 { 272 unsigned long set = pte_val(entry) & 273 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 274 unsigned long clr = ~pte_val(entry) & _PAGE_RO; 275 276 pte_update(ptep, clr, set); 277 } 278 279 #define __HAVE_ARCH_PTE_SAME 280 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) 281 282 /* 283 * Note that on Book E processors, the pmd contains the kernel virtual 284 * (lowmem) address of the pte page. The physical address is less useful 285 * because everything runs with translation enabled (even the TLB miss 286 * handler). On everything else the pmd contains the physical address 287 * of the pte page. -- paulus 288 */ 289 #ifndef CONFIG_BOOKE 290 #define pmd_page_vaddr(pmd) \ 291 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 292 #define pmd_page(pmd) \ 293 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 294 #else 295 #define pmd_page_vaddr(pmd) \ 296 ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) 297 #define pmd_page(pmd) \ 298 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) 299 #endif 300 301 /* to find an entry in a kernel page-table-directory */ 302 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 303 304 /* to find an entry in a page-table-directory */ 305 #define pgd_index(address) ((address) >> PGDIR_SHIFT) 306 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 307 308 /* Find an entry in the third-level page table.. */ 309 #define pte_index(address) \ 310 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 311 #define pte_offset_kernel(dir, addr) \ 312 (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ 313 pte_index(addr)) 314 #define pte_offset_map(dir, addr) \ 315 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) 316 #define pte_unmap(pte) kunmap_atomic(pte) 317 318 /* 319 * Encode and decode a swap entry. 320 * Note that the bits we use in a PTE for representing a swap entry 321 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). 322 * -- paulus 323 */ 324 #define __swp_type(entry) ((entry).val & 0x1f) 325 #define __swp_offset(entry) ((entry).val >> 5) 326 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 327 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 328 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 329 330 #ifndef CONFIG_PPC_4K_PAGES 331 void pgtable_cache_init(void); 332 #else 333 /* 334 * No page table caches to initialise 335 */ 336 #define pgtable_cache_init() do { } while (0) 337 #endif 338 339 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, 340 pmd_t **pmdp); 341 342 #endif /* !__ASSEMBLY__ */ 343 344 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ 345