xref: /openbmc/linux/arch/nios2/include/asm/pgtable.h (revision 4a075bd4)
1 /*
2  * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
3  * Copyright (C) 2009 Wind River Systems Inc
4  *
5  * Based on asm/pgtable-32.h from mips which is:
6  *
7  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
8  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 
15 #ifndef _ASM_NIOS2_PGTABLE_H
16 #define _ASM_NIOS2_PGTABLE_H
17 
18 #include <linux/io.h>
19 #include <linux/bug.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 
24 #include <asm/pgtable-bits.h>
25 #define __ARCH_USE_5LEVEL_HACK
26 #include <asm-generic/pgtable-nopmd.h>
27 
28 #define FIRST_USER_ADDRESS	0UL
29 
30 #define VMALLOC_START		CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
31 #define VMALLOC_END		(CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
32 
33 struct mm_struct;
34 
35 /* Helper macro */
36 #define MKP(x, w, r) __pgprot(_PAGE_PRESENT | _PAGE_CACHED |		\
37 				((x) ? _PAGE_EXEC : 0) |		\
38 				((r) ? _PAGE_READ : 0) |		\
39 				((w) ? _PAGE_WRITE : 0))
40 /*
41  * These are the macros that generic kernel code needs
42  * (to populate protection_map[])
43  */
44 
45 /* Remove W bit on private pages for COW support */
46 #define __P000	MKP(0, 0, 0)
47 #define __P001	MKP(0, 0, 1)
48 #define __P010	MKP(0, 0, 0)	/* COW */
49 #define __P011	MKP(0, 0, 1)	/* COW */
50 #define __P100	MKP(1, 0, 0)
51 #define __P101	MKP(1, 0, 1)
52 #define __P110	MKP(1, 0, 0)	/* COW */
53 #define __P111	MKP(1, 0, 1)	/* COW */
54 
55 /* Shared pages can have exact HW mapping */
56 #define __S000	MKP(0, 0, 0)
57 #define __S001	MKP(0, 0, 1)
58 #define __S010	MKP(0, 1, 0)
59 #define __S011	MKP(0, 1, 1)
60 #define __S100	MKP(1, 0, 0)
61 #define __S101	MKP(1, 0, 1)
62 #define __S110	MKP(1, 1, 0)
63 #define __S111	MKP(1, 1, 1)
64 
65 /* Used all over the kernel */
66 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
67 			     _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL)
68 
69 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
70 			     _PAGE_WRITE | _PAGE_ACCESSED)
71 
72 #define PAGE_COPY MKP(0, 0, 1)
73 
74 #define PGD_ORDER	0
75 #define PTE_ORDER	0
76 
77 #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
78 #define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
79 
80 #define USER_PTRS_PER_PGD	\
81 	(CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
82 
83 #define PGDIR_SHIFT	22
84 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
85 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
86 
87 /*
88  * ZERO_PAGE is a global shared page that is always zero: used
89  * for zero-mapped memory areas etc..
90  */
91 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
92 #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
93 
94 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
95 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
96 
97 /*
98  * (pmds are folded into puds so this doesn't get actually called,
99  * but the define is needed for a generic inline function.)
100  */
101 static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
102 {
103 	pmdptr->pud.pgd.pgd = pmdval.pud.pgd.pgd;
104 }
105 
106 /* to find an entry in a page-table-directory */
107 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
108 #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
109 
110 static inline int pte_write(pte_t pte)		\
111 	{ return pte_val(pte) & _PAGE_WRITE; }
112 static inline int pte_dirty(pte_t pte)		\
113 	{ return pte_val(pte) & _PAGE_DIRTY; }
114 static inline int pte_young(pte_t pte)		\
115 	{ return pte_val(pte) & _PAGE_ACCESSED; }
116 static inline int pte_special(pte_t pte)	{ return 0; }
117 
118 #define pgprot_noncached pgprot_noncached
119 
120 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
121 {
122 	unsigned long prot = pgprot_val(_prot);
123 
124 	prot &= ~_PAGE_CACHED;
125 
126 	return __pgprot(prot);
127 }
128 
129 static inline int pte_none(pte_t pte)
130 {
131 	return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf));
132 }
133 
134 static inline int pte_present(pte_t pte)	\
135 	{ return pte_val(pte) & _PAGE_PRESENT; }
136 
137 /*
138  * The following only work if pte_present() is true.
139  * Undefined behaviour if not..
140  */
141 static inline pte_t pte_wrprotect(pte_t pte)
142 {
143 	pte_val(pte) &= ~_PAGE_WRITE;
144 	return pte;
145 }
146 
147 static inline pte_t pte_mkclean(pte_t pte)
148 {
149 	pte_val(pte) &= ~_PAGE_DIRTY;
150 	return pte;
151 }
152 
153 static inline pte_t pte_mkold(pte_t pte)
154 {
155 	pte_val(pte) &= ~_PAGE_ACCESSED;
156 	return pte;
157 }
158 
159 static inline pte_t pte_mkwrite(pte_t pte)
160 {
161 	pte_val(pte) |= _PAGE_WRITE;
162 	return pte;
163 }
164 
165 static inline pte_t pte_mkdirty(pte_t pte)
166 {
167 	pte_val(pte) |= _PAGE_DIRTY;
168 	return pte;
169 }
170 
171 static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
172 
173 static inline pte_t pte_mkyoung(pte_t pte)
174 {
175 	pte_val(pte) |= _PAGE_ACCESSED;
176 	return pte;
177 }
178 
179 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
180 {
181 	const unsigned long mask = _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC;
182 
183 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
184 	return pte;
185 }
186 
187 static inline int pmd_present(pmd_t pmd)
188 {
189 	return (pmd_val(pmd) != (unsigned long) invalid_pte_table)
190 			&& (pmd_val(pmd) != 0UL);
191 }
192 
193 static inline void pmd_clear(pmd_t *pmdp)
194 {
195 	pmd_val(*pmdp) = (unsigned long) invalid_pte_table;
196 }
197 
198 #define pte_pfn(pte)		(pte_val(pte) & 0xfffff)
199 #define pfn_pte(pfn, prot)	(__pte(pfn | pgprot_val(prot)))
200 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
201 
202 /*
203  * Store a linux PTE into the linux page table.
204  */
205 static inline void set_pte(pte_t *ptep, pte_t pteval)
206 {
207 	*ptep = pteval;
208 }
209 
210 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
211 			      pte_t *ptep, pte_t pteval)
212 {
213 	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pteval));
214 
215 	flush_dcache_range(paddr, paddr + PAGE_SIZE);
216 	set_pte(ptep, pteval);
217 }
218 
219 static inline int pmd_none(pmd_t pmd)
220 {
221 	return (pmd_val(pmd) ==
222 		(unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL);
223 }
224 
225 #define pmd_bad(pmd)	(pmd_val(pmd) & ~PAGE_MASK)
226 
227 static inline void pte_clear(struct mm_struct *mm,
228 				unsigned long addr, pte_t *ptep)
229 {
230 	pte_t null;
231 
232 	pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
233 
234 	set_pte_at(mm, addr, ptep, null);
235 }
236 
237 /*
238  * Conversion functions: convert a page and protection to a page entry,
239  * and a page entry and page directory to the page they refer to.
240  */
241 #define mk_pte(page, prot)	(pfn_pte(page_to_pfn(page), prot))
242 
243 #define pte_unmap(pte)	do { } while (0)
244 
245 /*
246  * Conversion functions: convert a page and protection to a page entry,
247  * and a page entry and page directory to the page they refer to.
248  */
249 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
250 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
251 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
252 
253 #define pte_offset_map(dir, addr)			\
254 	((pte_t *) page_address(pmd_page(*dir)) +	\
255 	 (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
256 
257 /* to find an entry in a kernel page-table-directory */
258 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
259 
260 /* Get the address to the PTE for a vaddr in specific directory */
261 #define pte_offset_kernel(dir, addr)			\
262 	((pte_t *) pmd_page_vaddr(*(dir)) +		\
263 	 (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
264 
265 #define pte_ERROR(e) \
266 	pr_err("%s:%d: bad pte %08lx.\n", \
267 		__FILE__, __LINE__, pte_val(e))
268 #define pgd_ERROR(e) \
269 	pr_err("%s:%d: bad pgd %08lx.\n", \
270 		__FILE__, __LINE__, pgd_val(e))
271 
272 /*
273  * Encode and decode a swap entry (must be !pte_none(pte) && !pte_present(pte):
274  *
275  * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 ...  1  0
276  *  0  0  0  0 type.  0  0  0  0  0  0 offset.........
277  *
278  * This gives us up to 2**2 = 4 swap files and 2**20 * 4K = 4G per swap file.
279  *
280  * Note that the offset field is always non-zero, thus !pte_none(pte) is always
281  * true.
282  */
283 #define __swp_type(swp)		(((swp).val >> 26) & 0x3)
284 #define __swp_offset(swp)	((swp).val & 0xfffff)
285 #define __swp_entry(type, off)	((swp_entry_t) { (((type) & 0x3) << 26) \
286 						 | ((off) & 0xfffff) })
287 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
288 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
289 
290 #define kern_addr_valid(addr)		(1)
291 
292 #include <asm-generic/pgtable.h>
293 
294 #define pgtable_cache_init()		do { } while (0)
295 
296 extern void __init paging_init(void);
297 extern void __init mmu_init(void);
298 
299 extern void update_mmu_cache(struct vm_area_struct *vma,
300 			     unsigned long address, pte_t *pte);
301 
302 #endif /* _ASM_NIOS2_PGTABLE_H */
303