1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_PGTABLE_64_H 10 #define _ASM_PGTABLE_64_H 11 12 #include <linux/compiler.h> 13 #include <linux/linkage.h> 14 15 #include <asm/addrspace.h> 16 #include <asm/page.h> 17 #include <asm/cachectl.h> 18 #include <asm/fixmap.h> 19 20 #define __ARCH_USE_5LEVEL_HACK 21 #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) 22 #include <asm-generic/pgtable-nopmd.h> 23 #elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48)) 24 #include <asm-generic/pgtable-nopud.h> 25 #endif 26 27 /* 28 * Each address space has 2 4K pages as its page directory, giving 1024 29 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a 30 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page 31 * tables. Each page table is also a single 4K page, giving 512 (== 32 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to 33 * invalid_pmd_table, each pmd entry is initialized to point to 34 * invalid_pte_table, each pte is initialized to 0. When memory is low, 35 * and a pmd table or a page table allocation fails, empty_bad_pmd_table 36 * and empty_bad_page_table is returned back to higher layer code, so 37 * that the failure is recognized later on. Linux does not seem to 38 * handle these failures very well though. The empty_bad_page_table has 39 * invalid pte entries in it, to force page faults. 40 * 41 * Kernel mappings: kernel mappings are held in the swapper_pg_table. 42 * The layout is identical to userspace except it's indexed with the 43 * fault address - VMALLOC_START. 44 */ 45 46 47 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 48 #ifdef __PAGETABLE_PMD_FOLDED 49 #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3) 50 #else 51 52 /* PMD_SHIFT determines the size of the area a second-level page table can map */ 53 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) 54 #define PMD_SIZE (1UL << PMD_SHIFT) 55 #define PMD_MASK (~(PMD_SIZE-1)) 56 57 # ifdef __PAGETABLE_PUD_FOLDED 58 # define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) 59 # endif 60 #endif 61 62 #ifndef __PAGETABLE_PUD_FOLDED 63 #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) 64 #define PUD_SIZE (1UL << PUD_SHIFT) 65 #define PUD_MASK (~(PUD_SIZE-1)) 66 #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) 67 #endif 68 69 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 70 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 71 72 /* 73 * For 4kB page size we use a 3 level page tree and an 8kB pud, which 74 * permits us mapping 40 bits of virtual address space. 75 * 76 * We used to implement 41 bits by having an order 1 pmd level but that seemed 77 * rather pointless. 78 * 79 * For 8kB page size we use a 3 level page tree which permits a total of 80 * 8TB of address space. Alternatively a 33-bit / 8GB organization using 81 * two levels would be easy to implement. 82 * 83 * For 16kB page size we use a 2 level page tree which permits a total of 84 * 36 bits of virtual address space. We could add a third level but it seems 85 * like at the moment there's no need for this. 86 * 87 * For 64kB page size we use a 2 level page table tree for a total of 42 bits 88 * of virtual address space. 89 */ 90 #ifdef CONFIG_PAGE_SIZE_4KB 91 # ifdef CONFIG_MIPS_VA_BITS_48 92 # define PGD_ORDER 0 93 # define PUD_ORDER 0 94 # else 95 # define PGD_ORDER 1 96 # define PUD_ORDER aieeee_attempt_to_allocate_pud 97 # endif 98 #define PMD_ORDER 0 99 #define PTE_ORDER 0 100 #endif 101 #ifdef CONFIG_PAGE_SIZE_8KB 102 #define PGD_ORDER 0 103 #define PUD_ORDER aieeee_attempt_to_allocate_pud 104 #define PMD_ORDER 0 105 #define PTE_ORDER 0 106 #endif 107 #ifdef CONFIG_PAGE_SIZE_16KB 108 #ifdef CONFIG_MIPS_VA_BITS_48 109 #define PGD_ORDER 1 110 #else 111 #define PGD_ORDER 0 112 #endif 113 #define PUD_ORDER aieeee_attempt_to_allocate_pud 114 #define PMD_ORDER 0 115 #define PTE_ORDER 0 116 #endif 117 #ifdef CONFIG_PAGE_SIZE_32KB 118 #define PGD_ORDER 0 119 #define PUD_ORDER aieeee_attempt_to_allocate_pud 120 #define PMD_ORDER 0 121 #define PTE_ORDER 0 122 #endif 123 #ifdef CONFIG_PAGE_SIZE_64KB 124 #define PGD_ORDER 0 125 #define PUD_ORDER aieeee_attempt_to_allocate_pud 126 #ifdef CONFIG_MIPS_VA_BITS_48 127 #define PMD_ORDER 0 128 #else 129 #define PMD_ORDER aieeee_attempt_to_allocate_pmd 130 #endif 131 #define PTE_ORDER 0 132 #endif 133 134 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 135 #ifndef __PAGETABLE_PUD_FOLDED 136 #define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t)) 137 #endif 138 #ifndef __PAGETABLE_PMD_FOLDED 139 #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) 140 #endif 141 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 142 143 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) 144 #define FIRST_USER_ADDRESS 0UL 145 146 /* 147 * TLB refill handlers also map the vmalloc area into xuseg. Avoid 148 * the first couple of pages so NULL pointer dereferences will still 149 * reliably trap. 150 */ 151 #define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE)) 152 #define VMALLOC_END \ 153 (MAP_BASE + \ 154 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \ 155 (1UL << cpu_vmbits)) - (1UL << 32)) 156 157 #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \ 158 VMALLOC_START != CKSSEG 159 /* Load modules into 32bit-compatible segment. */ 160 #define MODULE_START CKSSEG 161 #define MODULE_END (FIXADDR_START-2*PAGE_SIZE) 162 #endif 163 164 #define pte_ERROR(e) \ 165 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 166 #ifndef __PAGETABLE_PMD_FOLDED 167 #define pmd_ERROR(e) \ 168 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 169 #endif 170 #ifndef __PAGETABLE_PUD_FOLDED 171 #define pud_ERROR(e) \ 172 printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 173 #endif 174 #define pgd_ERROR(e) \ 175 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 176 177 extern pte_t invalid_pte_table[PTRS_PER_PTE]; 178 extern pte_t empty_bad_page_table[PTRS_PER_PTE]; 179 180 #ifndef __PAGETABLE_PUD_FOLDED 181 /* 182 * For 4-level pagetables we defines these ourselves, for 3-level the 183 * definitions are below, for 2-level the 184 * definitions are supplied by <asm-generic/pgtable-nopmd.h>. 185 */ 186 typedef struct { unsigned long pud; } pud_t; 187 #define pud_val(x) ((x).pud) 188 #define __pud(x) ((pud_t) { (x) }) 189 190 extern pud_t invalid_pud_table[PTRS_PER_PUD]; 191 192 /* 193 * Empty pgd entries point to the invalid_pud_table. 194 */ 195 static inline int pgd_none(pgd_t pgd) 196 { 197 return pgd_val(pgd) == (unsigned long)invalid_pud_table; 198 } 199 200 static inline int pgd_bad(pgd_t pgd) 201 { 202 if (unlikely(pgd_val(pgd) & ~PAGE_MASK)) 203 return 1; 204 205 return 0; 206 } 207 208 static inline int pgd_present(pgd_t pgd) 209 { 210 return pgd_val(pgd) != (unsigned long)invalid_pud_table; 211 } 212 213 static inline void pgd_clear(pgd_t *pgdp) 214 { 215 pgd_val(*pgdp) = (unsigned long)invalid_pud_table; 216 } 217 218 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 219 220 static inline unsigned long pgd_page_vaddr(pgd_t pgd) 221 { 222 return pgd_val(pgd); 223 } 224 225 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 226 { 227 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); 228 } 229 230 static inline void set_pgd(pgd_t *pgd, pgd_t pgdval) 231 { 232 *pgd = pgdval; 233 } 234 235 #endif 236 237 #ifndef __PAGETABLE_PMD_FOLDED 238 /* 239 * For 3-level pagetables we defines these ourselves, for 2-level the 240 * definitions are supplied by <asm-generic/pgtable-nopmd.h>. 241 */ 242 typedef struct { unsigned long pmd; } pmd_t; 243 #define pmd_val(x) ((x).pmd) 244 #define __pmd(x) ((pmd_t) { (x) } ) 245 246 247 extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; 248 #endif 249 250 /* 251 * Empty pgd/pmd entries point to the invalid_pte_table. 252 */ 253 static inline int pmd_none(pmd_t pmd) 254 { 255 return pmd_val(pmd) == (unsigned long) invalid_pte_table; 256 } 257 258 static inline int pmd_bad(pmd_t pmd) 259 { 260 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 261 /* pmd_huge(pmd) but inline */ 262 if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) 263 return 0; 264 #endif 265 266 if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) 267 return 1; 268 269 return 0; 270 } 271 272 static inline int pmd_present(pmd_t pmd) 273 { 274 return pmd_val(pmd) != (unsigned long) invalid_pte_table; 275 } 276 277 static inline void pmd_clear(pmd_t *pmdp) 278 { 279 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 280 } 281 #ifndef __PAGETABLE_PMD_FOLDED 282 283 /* 284 * Empty pud entries point to the invalid_pmd_table. 285 */ 286 static inline int pud_none(pud_t pud) 287 { 288 return pud_val(pud) == (unsigned long) invalid_pmd_table; 289 } 290 291 static inline int pud_bad(pud_t pud) 292 { 293 return pud_val(pud) & ~PAGE_MASK; 294 } 295 296 static inline int pud_present(pud_t pud) 297 { 298 return pud_val(pud) != (unsigned long) invalid_pmd_table; 299 } 300 301 static inline void pud_clear(pud_t *pudp) 302 { 303 pud_val(*pudp) = ((unsigned long) invalid_pmd_table); 304 } 305 #endif 306 307 #define pte_page(x) pfn_to_page(pte_pfn(x)) 308 309 #ifdef CONFIG_CPU_VR41XX 310 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 311 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 312 #else 313 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 314 #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 315 #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 316 #endif 317 318 #define __pgd_offset(address) pgd_index(address) 319 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 320 #define __pmd_offset(address) pmd_index(address) 321 322 /* to find an entry in a kernel page-table-directory */ 323 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 324 325 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 326 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 327 328 /* to find an entry in a page-table-directory */ 329 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) 330 331 #ifndef __PAGETABLE_PMD_FOLDED 332 static inline unsigned long pud_page_vaddr(pud_t pud) 333 { 334 return pud_val(pud); 335 } 336 #define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) 337 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) 338 339 /* Find an entry in the second-level page table.. */ 340 static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) 341 { 342 return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address); 343 } 344 #endif 345 346 /* Find an entry in the third-level page table.. */ 347 #define __pte_offset(address) \ 348 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 349 #define pte_offset(dir, address) \ 350 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 351 #define pte_offset_kernel(dir, address) \ 352 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 353 #define pte_offset_map(dir, address) \ 354 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 355 #define pte_unmap(pte) ((void)(pte)) 356 357 /* 358 * Initialize a new pgd / pmd table with invalid pointers. 359 */ 360 extern void pgd_init(unsigned long page); 361 extern void pud_init(unsigned long page, unsigned long pagetable); 362 extern void pmd_init(unsigned long page, unsigned long pagetable); 363 364 /* 365 * Non-present pages: high 40 bits are offset, next 8 bits type, 366 * low 16 bits zero. 367 */ 368 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 369 { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; } 370 371 #define __swp_type(x) (((x).val >> 16) & 0xff) 372 #define __swp_offset(x) ((x).val >> 24) 373 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) 374 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 375 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 376 377 #endif /* _ASM_PGTABLE_64_H */ 378