xref: /openbmc/linux/arch/arm/include/asm/pgtable.h (revision 206a81c1)
1 /*
2  *  arch/arm/include/asm/pgtable.h
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
12 
13 #include <linux/const.h>
14 #include <asm/proc-fns.h>
15 
16 #ifndef CONFIG_MMU
17 
18 #include <asm-generic/4level-fixup.h>
19 #include <asm/pgtable-nommu.h>
20 
21 #else
22 
23 #include <asm-generic/pgtable-nopud.h>
24 #include <asm/memory.h>
25 #include <asm/pgtable-hwdef.h>
26 
27 
28 #include <asm/tlbflush.h>
29 
30 #ifdef CONFIG_ARM_LPAE
31 #include <asm/pgtable-3level.h>
32 #else
33 #include <asm/pgtable-2level.h>
34 #endif
35 
36 /*
37  * Just any arbitrary offset to the start of the vmalloc VM area: the
38  * current 8MB value just means that there will be a 8MB "hole" after the
39  * physical memory until the kernel virtual memory starts.  That means that
40  * any out-of-bounds memory accesses will hopefully be caught.
41  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
42  * area for the same reason. ;)
43  */
44 #define VMALLOC_OFFSET		(8*1024*1024)
45 #define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
46 #define VMALLOC_END		0xff000000UL
47 
48 #define LIBRARY_TEXT_START	0x0c000000
49 
50 #ifndef __ASSEMBLY__
51 extern void __pte_error(const char *file, int line, pte_t);
52 extern void __pmd_error(const char *file, int line, pmd_t);
53 extern void __pgd_error(const char *file, int line, pgd_t);
54 
55 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte)
56 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd)
57 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
58 
59 /*
60  * This is the lowest virtual address we can permit any user space
61  * mapping to be mapped at.  This is particularly important for
62  * non-high vector CPUs.
63  */
64 #define FIRST_USER_ADDRESS	(PAGE_SIZE * 2)
65 
66 /*
67  * Use TASK_SIZE as the ceiling argument for free_pgtables() and
68  * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
69  * page shared between user and kernel).
70  */
71 #ifdef CONFIG_ARM_LPAE
72 #define USER_PGTABLES_CEILING	TASK_SIZE
73 #endif
74 
75 /*
76  * The pgprot_* and protection_map entries will be fixed up in runtime
77  * to include the cachable and bufferable bits based on memory policy,
78  * as well as any architecture dependent bits like global/ASID and SMP
79  * shared mapping bits.
80  */
81 #define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
82 
83 extern pgprot_t		pgprot_user;
84 extern pgprot_t		pgprot_kernel;
85 extern pgprot_t		pgprot_hyp_device;
86 extern pgprot_t		pgprot_s2;
87 extern pgprot_t		pgprot_s2_device;
88 
89 #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
90 
91 #define PAGE_NONE		_MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
92 #define PAGE_SHARED		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
93 #define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER)
94 #define PAGE_COPY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
95 #define PAGE_COPY_EXEC		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
96 #define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
97 #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
98 #define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN)
99 #define PAGE_KERNEL_EXEC	pgprot_kernel
100 #define PAGE_HYP		_MOD_PROT(pgprot_kernel, L_PTE_HYP)
101 #define PAGE_HYP_DEVICE		_MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
102 #define PAGE_S2			_MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
103 #define PAGE_S2_DEVICE		_MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
104 
105 #define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
106 #define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
107 #define __PAGE_SHARED_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER)
108 #define __PAGE_COPY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
109 #define __PAGE_COPY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
110 #define __PAGE_READONLY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
111 #define __PAGE_READONLY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
112 
113 #define __pgprot_modify(prot,mask,bits)		\
114 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
115 
116 #define pgprot_noncached(prot) \
117 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
118 
119 #define pgprot_writecombine(prot) \
120 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
121 
122 #define pgprot_stronglyordered(prot) \
123 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
124 
125 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
126 #define pgprot_dmacoherent(prot) \
127 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
128 #define __HAVE_PHYS_MEM_ACCESS_PROT
129 struct file;
130 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
131 				     unsigned long size, pgprot_t vma_prot);
132 #else
133 #define pgprot_dmacoherent(prot) \
134 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
135 #endif
136 
137 #endif /* __ASSEMBLY__ */
138 
139 /*
140  * The table below defines the page protection levels that we insert into our
141  * Linux page table version.  These get translated into the best that the
142  * architecture can perform.  Note that on most ARM hardware:
143  *  1) We cannot do execute protection
144  *  2) If we could do execute protection, then read is implied
145  *  3) write implies read permissions
146  */
147 #define __P000  __PAGE_NONE
148 #define __P001  __PAGE_READONLY
149 #define __P010  __PAGE_COPY
150 #define __P011  __PAGE_COPY
151 #define __P100  __PAGE_READONLY_EXEC
152 #define __P101  __PAGE_READONLY_EXEC
153 #define __P110  __PAGE_COPY_EXEC
154 #define __P111  __PAGE_COPY_EXEC
155 
156 #define __S000  __PAGE_NONE
157 #define __S001  __PAGE_READONLY
158 #define __S010  __PAGE_SHARED
159 #define __S011  __PAGE_SHARED
160 #define __S100  __PAGE_READONLY_EXEC
161 #define __S101  __PAGE_READONLY_EXEC
162 #define __S110  __PAGE_SHARED_EXEC
163 #define __S111  __PAGE_SHARED_EXEC
164 
165 #ifndef __ASSEMBLY__
166 /*
167  * ZERO_PAGE is a global shared page that is always zero: used
168  * for zero-mapped memory areas etc..
169  */
170 extern struct page *empty_zero_page;
171 #define ZERO_PAGE(vaddr)	(empty_zero_page)
172 
173 
174 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
175 
176 /* to find an entry in a page-table-directory */
177 #define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
178 
179 #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
180 
181 /* to find an entry in a kernel page-table-directory */
182 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
183 
184 #define pmd_none(pmd)		(!pmd_val(pmd))
185 #define pmd_present(pmd)	(pmd_val(pmd))
186 
187 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
188 {
189 	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
190 }
191 
192 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
193 
194 #ifndef CONFIG_HIGHPTE
195 #define __pte_map(pmd)		pmd_page_vaddr(*(pmd))
196 #define __pte_unmap(pte)	do { } while (0)
197 #else
198 #define __pte_map(pmd)		(pte_t *)kmap_atomic(pmd_page(*(pmd)))
199 #define __pte_unmap(pte)	kunmap_atomic(pte)
200 #endif
201 
202 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
203 
204 #define pte_offset_kernel(pmd,addr)	(pmd_page_vaddr(*(pmd)) + pte_index(addr))
205 
206 #define pte_offset_map(pmd,addr)	(__pte_map(pmd) + pte_index(addr))
207 #define pte_unmap(pte)			__pte_unmap(pte)
208 
209 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
210 #define pfn_pte(pfn,prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
211 
212 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
213 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot)
214 
215 #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
216 
217 #define pte_none(pte)		(!pte_val(pte))
218 #define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
219 #define pte_valid(pte)		(pte_val(pte) & L_PTE_VALID)
220 #define pte_accessible(mm, pte)	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
221 #define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY))
222 #define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY)
223 #define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG)
224 #define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN))
225 #define pte_special(pte)	(0)
226 
227 #define pte_valid_user(pte)	\
228 	(pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
229 
230 #if __LINUX_ARM_ARCH__ < 6
231 static inline void __sync_icache_dcache(pte_t pteval)
232 {
233 }
234 #else
235 extern void __sync_icache_dcache(pte_t pteval);
236 #endif
237 
238 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
239 			      pte_t *ptep, pte_t pteval)
240 {
241 	unsigned long ext = 0;
242 
243 	if (addr < TASK_SIZE && pte_valid_user(pteval)) {
244 		__sync_icache_dcache(pteval);
245 		ext |= PTE_EXT_NG;
246 	}
247 
248 	set_pte_ext(ptep, pteval, ext);
249 }
250 
251 #define PTE_BIT_FUNC(fn,op) \
252 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
253 
254 PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
255 PTE_BIT_FUNC(mkwrite,   &= ~L_PTE_RDONLY);
256 PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
257 PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
258 PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
259 PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
260 PTE_BIT_FUNC(mkexec,   &= ~L_PTE_XN);
261 PTE_BIT_FUNC(mknexec,   |= L_PTE_XN);
262 
263 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
264 
265 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
266 {
267 	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
268 		L_PTE_NONE | L_PTE_VALID;
269 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
270 	return pte;
271 }
272 
273 /*
274  * Encode and decode a swap entry.  Swap entries are stored in the Linux
275  * page tables as follows:
276  *
277  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
278  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
279  *   <--------------- offset ----------------------> < type -> 0 0 0
280  *
281  * This gives us up to 31 swap files and 64GB per swap file.  Note that
282  * the offset field is always non-zero.
283  */
284 #define __SWP_TYPE_SHIFT	3
285 #define __SWP_TYPE_BITS		5
286 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
287 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
288 
289 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
290 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
291 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
292 
293 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
294 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
295 
296 /*
297  * It is an error for the kernel to have more swap files than we can
298  * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
299  * is increased beyond what we presently support.
300  */
301 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
302 
303 /*
304  * Encode and decode a file entry.  File entries are stored in the Linux
305  * page tables as follows:
306  *
307  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
308  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
309  *   <----------------------- offset ------------------------> 1 0 0
310  */
311 #define pte_file(pte)		(pte_val(pte) & L_PTE_FILE)
312 #define pte_to_pgoff(x)		(pte_val(x) >> 3)
313 #define pgoff_to_pte(x)		__pte(((x) << 3) | L_PTE_FILE)
314 
315 #define PTE_FILE_MAX_BITS	29
316 
317 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
318 /* FIXME: this is not correct */
319 #define kern_addr_valid(addr)	(1)
320 
321 #include <asm-generic/pgtable.h>
322 
323 /*
324  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
325  */
326 #define HAVE_ARCH_UNMAPPED_AREA
327 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
328 
329 #define pgtable_cache_init() do { } while (0)
330 
331 #endif /* !__ASSEMBLY__ */
332 
333 #endif /* CONFIG_MMU */
334 
335 #endif /* _ASMARM_PGTABLE_H */
336