xref: /openbmc/linux/arch/powerpc/include/asm/page.h (revision ba61bb17)
1 #ifndef _ASM_POWERPC_PAGE_H
2 #define _ASM_POWERPC_PAGE_H
3 
4 /*
5  * Copyright (C) 2001,2005 IBM Corporation.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #ifndef __ASSEMBLY__
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #else
17 #include <asm/types.h>
18 #endif
19 #include <asm/asm-compat.h>
20 #include <asm/kdump.h>
21 
22 /*
23  * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
24  * on PPC44x). For PPC64 we support either 4K or 64K software
25  * page size. When using 64K pages however, whether we are really supporting
26  * 64K pages in HW or not is irrelevant to those definitions.
27  */
28 #if defined(CONFIG_PPC_256K_PAGES)
29 #define PAGE_SHIFT		18
30 #elif defined(CONFIG_PPC_64K_PAGES)
31 #define PAGE_SHIFT		16
32 #elif defined(CONFIG_PPC_16K_PAGES)
33 #define PAGE_SHIFT		14
34 #else
35 #define PAGE_SHIFT		12
36 #endif
37 
38 #define PAGE_SIZE		(ASM_CONST(1) << PAGE_SHIFT)
39 
40 #ifndef __ASSEMBLY__
41 #ifdef CONFIG_HUGETLB_PAGE
42 extern bool hugetlb_disabled;
43 extern unsigned int HPAGE_SHIFT;
44 #else
45 #define HPAGE_SHIFT PAGE_SHIFT
46 #endif
47 #define HPAGE_SIZE		((1UL) << HPAGE_SHIFT)
48 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
49 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
50 #define HUGE_MAX_HSTATE		(MMU_PAGE_COUNT-1)
51 #endif
52 
53 /*
54  * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
55  * assign PAGE_MASK to a larger type it gets extended the way we want
56  * (i.e. with 1s in the high bits)
57  */
58 #define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
59 
60 /*
61  * KERNELBASE is the virtual address of the start of the kernel, it's often
62  * the same as PAGE_OFFSET, but _might not be_.
63  *
64  * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
65  *
66  * PAGE_OFFSET is the virtual address of the start of lowmem.
67  *
68  * PHYSICAL_START is the physical address of the start of the kernel.
69  *
70  * MEMORY_START is the physical address of the start of lowmem.
71  *
72  * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
73  * ppc32 and based on how they are set we determine MEMORY_START.
74  *
75  * For the linear mapping the following equation should be true:
76  * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
77  *
78  * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
79  *
80  * There are two ways to determine a physical address from a virtual one:
81  * va = pa + PAGE_OFFSET - MEMORY_START
82  * va = pa + KERNELBASE - PHYSICAL_START
83  *
84  * If you want to know something's offset from the start of the kernel you
85  * should subtract KERNELBASE.
86  *
87  * If you want to test if something's a kernel address, use is_kernel_addr().
88  */
89 
90 #define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
91 #define PAGE_OFFSET	ASM_CONST(CONFIG_PAGE_OFFSET)
92 #define LOAD_OFFSET	ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
93 
94 #if defined(CONFIG_NONSTATIC_KERNEL)
95 #ifndef __ASSEMBLY__
96 
97 extern phys_addr_t memstart_addr;
98 extern phys_addr_t kernstart_addr;
99 
100 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
101 extern long long virt_phys_offset;
102 #endif
103 
104 #endif /* __ASSEMBLY__ */
105 #define PHYSICAL_START	kernstart_addr
106 
107 #else	/* !CONFIG_NONSTATIC_KERNEL */
108 #define PHYSICAL_START	ASM_CONST(CONFIG_PHYSICAL_START)
109 #endif
110 
111 /* See Description below for VIRT_PHYS_OFFSET */
112 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
113 #ifdef CONFIG_RELOCATABLE
114 #define VIRT_PHYS_OFFSET virt_phys_offset
115 #else
116 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
117 #endif
118 #endif
119 
120 #ifdef CONFIG_PPC64
121 #define MEMORY_START	0UL
122 #elif defined(CONFIG_NONSTATIC_KERNEL)
123 #define MEMORY_START	memstart_addr
124 #else
125 #define MEMORY_START	(PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
126 #endif
127 
128 #ifdef CONFIG_FLATMEM
129 #define ARCH_PFN_OFFSET		((unsigned long)(MEMORY_START >> PAGE_SHIFT))
130 #ifndef __ASSEMBLY__
131 extern unsigned long max_mapnr;
132 static inline bool pfn_valid(unsigned long pfn)
133 {
134 	unsigned long min_pfn = ARCH_PFN_OFFSET;
135 
136 	return pfn >= min_pfn && pfn < max_mapnr;
137 }
138 #endif
139 #endif
140 
141 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
142 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
143 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
144 
145 #ifdef CONFIG_PPC_BOOK3S_64
146 /*
147  * On hash the vmalloc and other regions alias to the kernel region when passed
148  * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
149  * return true for some vmalloc addresses, which is incorrect. So explicitly
150  * check that the address is in the kernel region.
151  */
152 #define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
153 				pfn_valid(virt_to_pfn(kaddr)))
154 #else
155 #define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
156 #endif
157 
158 /*
159  * On Book-E parts we need __va to parse the device tree and we can't
160  * determine MEMORY_START until then.  However we can determine PHYSICAL_START
161  * from information at hand (program counter, TLB lookup).
162  *
163  * On BookE with RELOCATABLE && PPC32
164  *
165  *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
166  *   address without any restriction on the page alignment.
167  *
168  *   We find the runtime address of _stext and relocate ourselves based on
169  *   the following calculation:
170  *
171  *  	  virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
172  *  				MODULO(_stext.run,256M)
173  *   and create the following mapping:
174  *
175  * 	  ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
176  *
177  *   When we process relocations, we cannot depend on the
178  *   existing equation for the __va()/__pa() translations:
179  *
180  * 	   __va(x) = (x)  - PHYSICAL_START + KERNELBASE
181  *
182  *   Where:
183  *   	 PHYSICAL_START = kernstart_addr = Physical address of _stext
184  *  	 KERNELBASE = Compiled virtual address of _stext.
185  *
186  *   This formula holds true iff, kernel load address is TLB page aligned.
187  *
188  *   In our case, we need to also account for the shift in the kernel Virtual
189  *   address.
190  *
191  *   E.g.,
192  *
193  *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
194  *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
195  *
196  *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
197  *                 = 0xbc100000 , which is wrong.
198  *
199  *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
200  *      	according to our mapping.
201  *
202  *   Hence we use the following formula to get the translations right:
203  *
204  * 	  __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
205  *
206  * 	  Where :
207  * 		PHYSICAL_START = dynamic load address.(kernstart_addr variable)
208  * 		Effective KERNELBASE = virtual_base =
209  * 				     = ALIGN_DOWN(KERNELBASE,256M) +
210  * 						MODULO(PHYSICAL_START,256M)
211  *
212  * 	To make the cost of __va() / __pa() more light weight, we introduce
213  * 	a new variable virt_phys_offset, which will hold :
214  *
215  * 	virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
216  * 			 = ALIGN_DOWN(KERNELBASE,256M) -
217  * 			 	ALIGN_DOWN(PHYSICALSTART,256M)
218  *
219  * 	Hence :
220  *
221  * 	__va(x) = x - PHYSICAL_START + Effective KERNELBASE
222  * 		= x + virt_phys_offset
223  *
224  * 		and
225  * 	__pa(x) = x + PHYSICAL_START - Effective KERNELBASE
226  * 		= x - virt_phys_offset
227  *
228  * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
229  * the other definitions for __va & __pa.
230  */
231 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
232 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
233 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
234 #else
235 #ifdef CONFIG_PPC64
236 /*
237  * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
238  * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
239  */
240 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
241 #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
242 
243 #else /* 32-bit, non book E */
244 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
245 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
246 #endif
247 #endif
248 
249 /*
250  * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
251  * and needs to be executable.  This means the whole heap ends
252  * up being executable.
253  */
254 #define VM_DATA_DEFAULT_FLAGS32 \
255 	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
256 				 VM_READ | VM_WRITE | \
257 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
258 
259 #define VM_DATA_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
260 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
261 
262 #ifdef __powerpc64__
263 #include <asm/page_64.h>
264 #else
265 #include <asm/page_32.h>
266 #endif
267 
268 /* align addr on a size boundary - adjust address up/down if needed */
269 #define _ALIGN_UP(addr, size)   __ALIGN_KERNEL(addr, size)
270 #define _ALIGN_DOWN(addr, size)	((addr)&(~((typeof(addr))(size)-1)))
271 
272 /* align addr on a size boundary - adjust address up if needed */
273 #define _ALIGN(addr,size)     _ALIGN_UP(addr,size)
274 
275 /*
276  * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
277  * "kernelness", use is_kernel_addr() - it should do what you want.
278  */
279 #ifdef CONFIG_PPC_BOOK3E_64
280 #define is_kernel_addr(x)	((x) >= 0x8000000000000000ul)
281 #else
282 #define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
283 #endif
284 
285 #ifndef CONFIG_PPC_BOOK3S_64
286 /*
287  * Use the top bit of the higher-level page table entries to indicate whether
288  * the entries we point to contain hugepages.  This works because we know that
289  * the page tables live in kernel space.  If we ever decide to support having
290  * page tables at arbitrary addresses, this breaks and will have to change.
291  */
292 #ifdef CONFIG_PPC64
293 #define PD_HUGE 0x8000000000000000
294 #else
295 #define PD_HUGE 0x80000000
296 #endif
297 
298 #else	/* CONFIG_PPC_BOOK3S_64 */
299 /*
300  * Book3S 64 stores real addresses in the hugepd entries to
301  * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
302  */
303 #define HUGEPD_ADDR_MASK	(0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
304 #endif /* CONFIG_PPC_BOOK3S_64 */
305 
306 /*
307  * Some number of bits at the level of the page table that points to
308  * a hugepte are used to encode the size.  This masks those bits.
309  */
310 #define HUGEPD_SHIFT_MASK     0x3f
311 
312 #ifndef __ASSEMBLY__
313 
314 #ifdef CONFIG_PPC_BOOK3S_64
315 #include <asm/pgtable-be-types.h>
316 #else
317 #include <asm/pgtable-types.h>
318 #endif
319 
320 
321 #ifndef CONFIG_HUGETLB_PAGE
322 #define is_hugepd(pdep)		(0)
323 #define pgd_huge(pgd)		(0)
324 #endif /* CONFIG_HUGETLB_PAGE */
325 
326 struct page;
327 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
328 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
329 		struct page *p);
330 extern int page_is_ram(unsigned long pfn);
331 extern int devmem_is_allowed(unsigned long pfn);
332 
333 #ifdef CONFIG_PPC_SMLPAR
334 void arch_free_page(struct page *page, int order);
335 #define HAVE_ARCH_FREE_PAGE
336 #endif
337 
338 struct vm_area_struct;
339 #ifdef CONFIG_PPC_BOOK3S_64
340 /*
341  * For BOOK3s 64 with 4k and 64K linux page size
342  * we want to use pointers, because the page table
343  * actually store pfn
344  */
345 typedef pte_t *pgtable_t;
346 #else
347 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
348 typedef pte_t *pgtable_t;
349 #else
350 typedef struct page *pgtable_t;
351 #endif
352 #endif
353 
354 #include <asm-generic/memory_model.h>
355 #endif /* __ASSEMBLY__ */
356 #include <asm/slice.h>
357 
358 #endif /* _ASM_POWERPC_PAGE_H */
359