xref: /openbmc/linux/arch/mips/include/asm/pgtable-64.h (revision 4f205687)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_64_H
10 #define _ASM_PGTABLE_64_H
11 
12 #include <linux/compiler.h>
13 #include <linux/linkage.h>
14 
15 #include <asm/addrspace.h>
16 #include <asm/page.h>
17 #include <asm/cachectl.h>
18 #include <asm/fixmap.h>
19 
20 #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
21 #include <asm-generic/pgtable-nopmd.h>
22 #else
23 #include <asm-generic/pgtable-nopud.h>
24 #endif
25 
26 /*
27  * Each address space has 2 4K pages as its page directory, giving 1024
28  * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
29  * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
30  * tables. Each page table is also a single 4K page, giving 512 (==
31  * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
32  * invalid_pmd_table, each pmd entry is initialized to point to
33  * invalid_pte_table, each pte is initialized to 0. When memory is low,
34  * and a pmd table or a page table allocation fails, empty_bad_pmd_table
35  * and empty_bad_page_table is returned back to higher layer code, so
36  * that the failure is recognized later on. Linux does not seem to
37  * handle these failures very well though. The empty_bad_page_table has
38  * invalid pte entries in it, to force page faults.
39  *
40  * Kernel mappings: kernel mappings are held in the swapper_pg_table.
41  * The layout is identical to userspace except it's indexed with the
42  * fault address - VMALLOC_START.
43  */
44 
45 
46 /* PGDIR_SHIFT determines what a third-level page table entry can map */
47 #ifdef __PAGETABLE_PMD_FOLDED
48 #define PGDIR_SHIFT	(PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
49 #else
50 
51 /* PMD_SHIFT determines the size of the area a second-level page table can map */
52 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
53 #define PMD_SIZE	(1UL << PMD_SHIFT)
54 #define PMD_MASK	(~(PMD_SIZE-1))
55 
56 
57 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
58 #endif
59 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
60 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
61 
62 /*
63  * For 4kB page size we use a 3 level page tree and an 8kB pud, which
64  * permits us mapping 40 bits of virtual address space.
65  *
66  * We used to implement 41 bits by having an order 1 pmd level but that seemed
67  * rather pointless.
68  *
69  * For 8kB page size we use a 3 level page tree which permits a total of
70  * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
71  * two levels would be easy to implement.
72  *
73  * For 16kB page size we use a 2 level page tree which permits a total of
74  * 36 bits of virtual address space.  We could add a third level but it seems
75  * like at the moment there's no need for this.
76  *
77  * For 64kB page size we use a 2 level page table tree for a total of 42 bits
78  * of virtual address space.
79  */
80 #ifdef CONFIG_PAGE_SIZE_4KB
81 #define PGD_ORDER		1
82 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
83 #define PMD_ORDER		0
84 #define PTE_ORDER		0
85 #endif
86 #ifdef CONFIG_PAGE_SIZE_8KB
87 #define PGD_ORDER		0
88 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
89 #define PMD_ORDER		0
90 #define PTE_ORDER		0
91 #endif
92 #ifdef CONFIG_PAGE_SIZE_16KB
93 #ifdef CONFIG_MIPS_VA_BITS_48
94 #define PGD_ORDER               1
95 #else
96 #define PGD_ORDER               0
97 #endif
98 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
99 #define PMD_ORDER		0
100 #define PTE_ORDER		0
101 #endif
102 #ifdef CONFIG_PAGE_SIZE_32KB
103 #define PGD_ORDER		0
104 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
105 #define PMD_ORDER		0
106 #define PTE_ORDER		0
107 #endif
108 #ifdef CONFIG_PAGE_SIZE_64KB
109 #define PGD_ORDER		0
110 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
111 #ifdef CONFIG_MIPS_VA_BITS_48
112 #define PMD_ORDER		0
113 #else
114 #define PMD_ORDER		aieeee_attempt_to_allocate_pmd
115 #endif
116 #define PTE_ORDER		0
117 #endif
118 
119 #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
120 #ifndef __PAGETABLE_PMD_FOLDED
121 #define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
122 #endif
123 #define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
124 
125 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
126 #define FIRST_USER_ADDRESS	0UL
127 
128 /*
129  * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
130  * the first couple of pages so NULL pointer dereferences will still
131  * reliably trap.
132  */
133 #define VMALLOC_START		(MAP_BASE + (2 * PAGE_SIZE))
134 #define VMALLOC_END	\
135 	(MAP_BASE + \
136 	 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
137 	     (1UL << cpu_vmbits)) - (1UL << 32))
138 
139 #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
140 	VMALLOC_START != CKSSEG
141 /* Load modules into 32bit-compatible segment. */
142 #define MODULE_START	CKSSEG
143 #define MODULE_END	(FIXADDR_START-2*PAGE_SIZE)
144 #endif
145 
146 #define pte_ERROR(e) \
147 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
148 #ifndef __PAGETABLE_PMD_FOLDED
149 #define pmd_ERROR(e) \
150 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
151 #endif
152 #define pgd_ERROR(e) \
153 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
154 
155 extern pte_t invalid_pte_table[PTRS_PER_PTE];
156 extern pte_t empty_bad_page_table[PTRS_PER_PTE];
157 
158 
159 #ifndef __PAGETABLE_PMD_FOLDED
160 /*
161  * For 3-level pagetables we defines these ourselves, for 2-level the
162  * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
163  */
164 typedef struct { unsigned long pmd; } pmd_t;
165 #define pmd_val(x)	((x).pmd)
166 #define __pmd(x)	((pmd_t) { (x) } )
167 
168 
169 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
170 #endif
171 
172 /*
173  * Empty pgd/pmd entries point to the invalid_pte_table.
174  */
175 static inline int pmd_none(pmd_t pmd)
176 {
177 	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
178 }
179 
180 static inline int pmd_bad(pmd_t pmd)
181 {
182 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
183 	/* pmd_huge(pmd) but inline */
184 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
185 		return 0;
186 #endif
187 
188 	if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
189 		return 1;
190 
191 	return 0;
192 }
193 
194 static inline int pmd_present(pmd_t pmd)
195 {
196 	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
197 }
198 
199 static inline void pmd_clear(pmd_t *pmdp)
200 {
201 	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
202 }
203 #ifndef __PAGETABLE_PMD_FOLDED
204 
205 /*
206  * Empty pud entries point to the invalid_pmd_table.
207  */
208 static inline int pud_none(pud_t pud)
209 {
210 	return pud_val(pud) == (unsigned long) invalid_pmd_table;
211 }
212 
213 static inline int pud_bad(pud_t pud)
214 {
215 	return pud_val(pud) & ~PAGE_MASK;
216 }
217 
218 static inline int pud_present(pud_t pud)
219 {
220 	return pud_val(pud) != (unsigned long) invalid_pmd_table;
221 }
222 
223 static inline void pud_clear(pud_t *pudp)
224 {
225 	pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
226 }
227 #endif
228 
229 #define pte_page(x)		pfn_to_page(pte_pfn(x))
230 
231 #ifdef CONFIG_CPU_VR41XX
232 #define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
233 #define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
234 #else
235 #define pte_pfn(x)		((unsigned long)((x).pte >> _PFN_SHIFT))
236 #define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
237 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
238 #endif
239 
240 #define __pgd_offset(address)	pgd_index(address)
241 #define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
242 #define __pmd_offset(address)	pmd_index(address)
243 
244 /* to find an entry in a kernel page-table-directory */
245 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
246 
247 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
248 #define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
249 
250 /* to find an entry in a page-table-directory */
251 #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
252 
253 #ifndef __PAGETABLE_PMD_FOLDED
254 static inline unsigned long pud_page_vaddr(pud_t pud)
255 {
256 	return pud_val(pud);
257 }
258 #define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
259 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
260 
261 /* Find an entry in the second-level page table.. */
262 static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
263 {
264 	return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
265 }
266 #endif
267 
268 /* Find an entry in the third-level page table.. */
269 #define __pte_offset(address)						\
270 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
271 #define pte_offset(dir, address)					\
272 	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
273 #define pte_offset_kernel(dir, address)					\
274 	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
275 #define pte_offset_map(dir, address)					\
276 	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
277 #define pte_unmap(pte) ((void)(pte))
278 
279 /*
280  * Initialize a new pgd / pmd table with invalid pointers.
281  */
282 extern void pgd_init(unsigned long page);
283 extern void pmd_init(unsigned long page, unsigned long pagetable);
284 
285 /*
286  * Non-present pages:  high 40 bits are offset, next 8 bits type,
287  * low 16 bits zero.
288  */
289 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
290 { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
291 
292 #define __swp_type(x)		(((x).val >> 16) & 0xff)
293 #define __swp_offset(x)		((x).val >> 24)
294 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
295 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
296 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
297 
298 #endif /* _ASM_PGTABLE_64_H */
299