1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_PGTABLE_32_H 10 #define _ASM_PGTABLE_32_H 11 12 #include <asm/addrspace.h> 13 #include <asm/page.h> 14 15 #include <linux/linkage.h> 16 #include <asm/cachectl.h> 17 #include <asm/fixmap.h> 18 19 #include <asm-generic/pgtable-nopmd.h> 20 21 extern int temp_tlb_entry; 22 23 /* 24 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries 25 * starting at the top and working down. This is for populating the 26 * TLB before trap_init() puts the TLB miss handler in place. It 27 * should be used only for entries matching the actual page tables, 28 * to prevent inconsistencies. 29 */ 30 extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, 31 unsigned long entryhi, unsigned long pagemask); 32 33 /* 34 * Basically we have the same two-level (which is the logical three level 35 * Linux page table layout folded) page tables as the i386. Some day 36 * when we have proper page coloring support we can have a 1% quicker 37 * tlb refill handling mechanism, but for now it is a bit slower but 38 * works even with the cache aliasing problem the R4k and above have. 39 */ 40 41 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 42 #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) 43 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 44 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 45 46 /* 47 * Entries per page directory level: we use two-level, so 48 * we don't really have any PUD/PMD directory physically. 49 */ 50 #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) 51 #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) 52 #define PUD_ORDER aieeee_attempt_to_allocate_pud 53 #define PMD_ORDER 1 54 #define PTE_ORDER 0 55 56 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) 57 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 58 59 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 60 #define FIRST_USER_ADDRESS 0UL 61 62 #define VMALLOC_START MAP_BASE 63 64 #define PKMAP_BASE (0xfe000000UL) 65 66 #ifdef CONFIG_HIGHMEM 67 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 68 #else 69 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 70 #endif 71 72 #ifdef CONFIG_PHYS_ADDR_T_64BIT 73 #define pte_ERROR(e) \ 74 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) 75 #else 76 #define pte_ERROR(e) \ 77 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 78 #endif 79 #define pgd_ERROR(e) \ 80 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 81 82 extern void load_pgd(unsigned long pg_dir); 83 84 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; 85 86 /* 87 * Empty pgd/pmd entries point to the invalid_pte_table. 88 */ 89 static inline int pmd_none(pmd_t pmd) 90 { 91 return pmd_val(pmd) == (unsigned long) invalid_pte_table; 92 } 93 94 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 95 96 static inline int pmd_present(pmd_t pmd) 97 { 98 return pmd_val(pmd) != (unsigned long) invalid_pte_table; 99 } 100 101 static inline void pmd_clear(pmd_t *pmdp) 102 { 103 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 104 } 105 106 #if defined(CONFIG_XPA) 107 108 #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) 109 static inline pte_t 110 pfn_pte(unsigned long pfn, pgprot_t prot) 111 { 112 pte_t pte; 113 114 pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) | 115 (pgprot_val(prot) & ~_PFNX_MASK); 116 pte.pte_high = (pfn << _PFN_SHIFT) | 117 (pgprot_val(prot) & ~_PFN_MASK); 118 return pte; 119 } 120 121 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 122 123 #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) 124 125 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 126 { 127 pte_t pte; 128 129 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); 130 pte.pte_low = pgprot_val(prot); 131 132 return pte; 133 } 134 135 #else 136 137 #ifdef CONFIG_CPU_VR41XX 138 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 139 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 140 #else 141 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 142 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) 143 #endif 144 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ 145 146 #define pte_page(x) pfn_to_page(pte_pfn(x)) 147 148 #define __pgd_offset(address) pgd_index(address) 149 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 150 #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 151 152 /* to find an entry in a kernel page-table-directory */ 153 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 154 155 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 156 157 /* to find an entry in a page-table-directory */ 158 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) 159 160 /* Find an entry in the third-level page table.. */ 161 #define __pte_offset(address) \ 162 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 163 #define pte_offset(dir, address) \ 164 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 165 #define pte_offset_kernel(dir, address) \ 166 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 167 168 #define pte_offset_map(dir, address) \ 169 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 170 #define pte_unmap(pte) ((void)(pte)) 171 172 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 173 174 /* Swap entries must have VALID bit cleared. */ 175 #define __swp_type(x) (((x).val >> 10) & 0x1f) 176 #define __swp_offset(x) ((x).val >> 15) 177 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) 178 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 179 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 180 181 #else 182 183 #if defined(CONFIG_XPA) 184 185 /* Swap entries must have VALID and GLOBAL bits cleared. */ 186 #define __swp_type(x) (((x).val >> 4) & 0x1f) 187 #define __swp_offset(x) ((x).val >> 9) 188 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) }) 189 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 190 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 191 192 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 193 194 /* Swap entries must have VALID and GLOBAL bits cleared. */ 195 #define __swp_type(x) (((x).val >> 2) & 0x1f) 196 #define __swp_offset(x) ((x).val >> 7) 197 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) 198 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 199 #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 200 201 #else 202 /* 203 * Constraints: 204 * _PAGE_PRESENT at bit 0 205 * _PAGE_MODIFIED at bit 4 206 * _PAGE_GLOBAL at bit 6 207 * _PAGE_VALID at bit 7 208 */ 209 #define __swp_type(x) (((x).val >> 8) & 0x1f) 210 #define __swp_offset(x) ((x).val >> 13) 211 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) 212 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 213 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 214 215 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ 216 217 #endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */ 218 219 #endif /* _ASM_PGTABLE_32_H */ 220