xref: /openbmc/linux/arch/powerpc/include/asm/page_64.h (revision ec0c464cdbf38bf6ddabec8bfa595bd421cab203)
1b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_PAGE_64_H
2b8b572e1SStephen Rothwell #define _ASM_POWERPC_PAGE_64_H
3b8b572e1SStephen Rothwell 
4b8b572e1SStephen Rothwell /*
5b8b572e1SStephen Rothwell  * Copyright (C) 2001 PPC64 Team, IBM Corp
6b8b572e1SStephen Rothwell  *
7b8b572e1SStephen Rothwell  * This program is free software; you can redistribute it and/or
8b8b572e1SStephen Rothwell  * modify it under the terms of the GNU General Public License
9b8b572e1SStephen Rothwell  * as published by the Free Software Foundation; either version
10b8b572e1SStephen Rothwell  * 2 of the License, or (at your option) any later version.
11b8b572e1SStephen Rothwell  */
12b8b572e1SStephen Rothwell 
13*ec0c464cSChristophe Leroy #include <asm/asm-const.h>
14*ec0c464cSChristophe Leroy 
15b8b572e1SStephen Rothwell /*
16b8b572e1SStephen Rothwell  * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
17b8b572e1SStephen Rothwell  * specific, every notion of page number shared with the firmware, TCEs,
18b8b572e1SStephen Rothwell  * iommu, etc... still uses a page size of 4K.
19b8b572e1SStephen Rothwell  */
20b8b572e1SStephen Rothwell #define HW_PAGE_SHIFT		12
21b8b572e1SStephen Rothwell #define HW_PAGE_SIZE		(ASM_CONST(1) << HW_PAGE_SHIFT)
22b8b572e1SStephen Rothwell #define HW_PAGE_MASK		(~(HW_PAGE_SIZE-1))
23b8b572e1SStephen Rothwell 
24b8b572e1SStephen Rothwell /*
25b8b572e1SStephen Rothwell  * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
26b8b572e1SStephen Rothwell  * HW_PAGE_SHIFT, that is 4K pages.
27b8b572e1SStephen Rothwell  */
28b8b572e1SStephen Rothwell #define PAGE_FACTOR		(PAGE_SHIFT - HW_PAGE_SHIFT)
29b8b572e1SStephen Rothwell 
30b8b572e1SStephen Rothwell /* Segment size; normal 256M segments */
31b8b572e1SStephen Rothwell #define SID_SHIFT		28
32b8b572e1SStephen Rothwell #define SID_MASK		ASM_CONST(0xfffffffff)
33b8b572e1SStephen Rothwell #define ESID_MASK		0xfffffffff0000000UL
34b8b572e1SStephen Rothwell #define GET_ESID(x)		(((x) >> SID_SHIFT) & SID_MASK)
35b8b572e1SStephen Rothwell 
36b8b572e1SStephen Rothwell /* 1T segments */
37b8b572e1SStephen Rothwell #define SID_SHIFT_1T		40
38b8b572e1SStephen Rothwell #define SID_MASK_1T		0xffffffUL
39b8b572e1SStephen Rothwell #define ESID_MASK_1T		0xffffff0000000000UL
40b8b572e1SStephen Rothwell #define GET_ESID_1T(x)		(((x) >> SID_SHIFT_1T) & SID_MASK_1T)
41b8b572e1SStephen Rothwell 
42b8b572e1SStephen Rothwell #ifndef __ASSEMBLY__
43b8b572e1SStephen Rothwell #include <asm/cache.h>
44b8b572e1SStephen Rothwell 
45b8b572e1SStephen Rothwell typedef unsigned long pte_basic_t;
46b8b572e1SStephen Rothwell 
47e35735b9SAnton Blanchard static inline void clear_page(void *addr)
48b8b572e1SStephen Rothwell {
49e35735b9SAnton Blanchard 	unsigned long iterations;
50e35735b9SAnton Blanchard 	unsigned long onex, twox, fourx, eightx;
51b8b572e1SStephen Rothwell 
52e2827fe5SBenjamin Herrenschmidt 	iterations = ppc64_caches.l1d.blocks_per_page / 8;
53b8b572e1SStephen Rothwell 
54e35735b9SAnton Blanchard 	/*
55e35735b9SAnton Blanchard 	 * Some verisions of gcc use multiply instructions to
56e35735b9SAnton Blanchard 	 * calculate the offsets so lets give it a hand to
57e35735b9SAnton Blanchard 	 * do better.
58e35735b9SAnton Blanchard 	 */
59e2827fe5SBenjamin Herrenschmidt 	onex = ppc64_caches.l1d.block_size;
60e35735b9SAnton Blanchard 	twox = onex << 1;
61e35735b9SAnton Blanchard 	fourx = onex << 2;
62e35735b9SAnton Blanchard 	eightx = onex << 3;
63e35735b9SAnton Blanchard 
64e35735b9SAnton Blanchard 	asm volatile(
65b8b572e1SStephen Rothwell 	"mtctr	%1	# clear_page\n\
66e35735b9SAnton Blanchard 	.balign	16\n\
67b8b572e1SStephen Rothwell 1:	dcbz	0,%0\n\
68e35735b9SAnton Blanchard 	dcbz	%3,%0\n\
69e35735b9SAnton Blanchard 	dcbz	%4,%0\n\
70e35735b9SAnton Blanchard 	dcbz	%5,%0\n\
71e35735b9SAnton Blanchard 	dcbz	%6,%0\n\
72e35735b9SAnton Blanchard 	dcbz	%7,%0\n\
73e35735b9SAnton Blanchard 	dcbz	%8,%0\n\
74e35735b9SAnton Blanchard 	dcbz	%9,%0\n\
75e35735b9SAnton Blanchard 	add	%0,%0,%10\n\
76b8b572e1SStephen Rothwell 	bdnz+	1b"
77e35735b9SAnton Blanchard 	: "=&r" (addr)
78e35735b9SAnton Blanchard 	: "r" (iterations), "0" (addr), "b" (onex), "b" (twox),
79e35735b9SAnton Blanchard 		"b" (twox+onex), "b" (fourx), "b" (fourx+onex),
80e35735b9SAnton Blanchard 		"b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
81b8b572e1SStephen Rothwell 	: "ctr", "memory");
82b8b572e1SStephen Rothwell }
83b8b572e1SStephen Rothwell 
84d988f0e3SAnton Blanchard extern void copy_page(void *to, void *from);
85b8b572e1SStephen Rothwell 
86b8b572e1SStephen Rothwell /* Log 2 of page table size */
87b8b572e1SStephen Rothwell extern u64 ppc64_pft_size;
88b8b572e1SStephen Rothwell 
89b8b572e1SStephen Rothwell #endif /* __ASSEMBLY__ */
90b8b572e1SStephen Rothwell 
91b8b572e1SStephen Rothwell #define VM_DATA_DEFAULT_FLAGS \
92cab175f9SDenis Kirjanov 	(is_32bit_task() ? \
93b8b572e1SStephen Rothwell 	 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
94b8b572e1SStephen Rothwell 
95b8b572e1SStephen Rothwell /*
96b8b572e1SStephen Rothwell  * This is the default if a program doesn't have a PT_GNU_STACK
97b8b572e1SStephen Rothwell  * program header entry. The PPC64 ELF ABI has a non executable stack
9825985edcSLucas De Marchi  * stack by default, so in the absence of a PT_GNU_STACK program header
99b8b572e1SStephen Rothwell  * we turn execute permission off.
100b8b572e1SStephen Rothwell  */
101b8b572e1SStephen Rothwell #define VM_STACK_DEFAULT_FLAGS32	(VM_READ | VM_WRITE | VM_EXEC | \
102b8b572e1SStephen Rothwell 					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
103b8b572e1SStephen Rothwell 
104b8b572e1SStephen Rothwell #define VM_STACK_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
105b8b572e1SStephen Rothwell 					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
106b8b572e1SStephen Rothwell 
107b8b572e1SStephen Rothwell #define VM_STACK_DEFAULT_FLAGS \
108cab175f9SDenis Kirjanov 	(is_32bit_task() ? \
109b8b572e1SStephen Rothwell 	 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
110b8b572e1SStephen Rothwell 
1115b17e1cdSArnd Bergmann #include <asm-generic/getorder.h>
112b8b572e1SStephen Rothwell 
113b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_PAGE_64_H */
114