xref: /openbmc/linux/arch/powerpc/include/asm/page_64.h (revision c62da0c3)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_PAGE_64_H
3b8b572e1SStephen Rothwell #define _ASM_POWERPC_PAGE_64_H
4b8b572e1SStephen Rothwell 
5b8b572e1SStephen Rothwell /*
6b8b572e1SStephen Rothwell  * Copyright (C) 2001 PPC64 Team, IBM Corp
7b8b572e1SStephen Rothwell  */
8b8b572e1SStephen Rothwell 
9ec0c464cSChristophe Leroy #include <asm/asm-const.h>
10ec0c464cSChristophe Leroy 
11b8b572e1SStephen Rothwell /*
12b8b572e1SStephen Rothwell  * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
13b8b572e1SStephen Rothwell  * specific, every notion of page number shared with the firmware, TCEs,
14b8b572e1SStephen Rothwell  * iommu, etc... still uses a page size of 4K.
15b8b572e1SStephen Rothwell  */
16b8b572e1SStephen Rothwell #define HW_PAGE_SHIFT		12
17b8b572e1SStephen Rothwell #define HW_PAGE_SIZE		(ASM_CONST(1) << HW_PAGE_SHIFT)
18b8b572e1SStephen Rothwell #define HW_PAGE_MASK		(~(HW_PAGE_SIZE-1))
19b8b572e1SStephen Rothwell 
20b8b572e1SStephen Rothwell /*
21b8b572e1SStephen Rothwell  * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
22b8b572e1SStephen Rothwell  * HW_PAGE_SHIFT, that is 4K pages.
23b8b572e1SStephen Rothwell  */
24b8b572e1SStephen Rothwell #define PAGE_FACTOR		(PAGE_SHIFT - HW_PAGE_SHIFT)
25b8b572e1SStephen Rothwell 
26b8b572e1SStephen Rothwell /* Segment size; normal 256M segments */
27b8b572e1SStephen Rothwell #define SID_SHIFT		28
28b8b572e1SStephen Rothwell #define SID_MASK		ASM_CONST(0xfffffffff)
29b8b572e1SStephen Rothwell #define ESID_MASK		0xfffffffff0000000UL
30b8b572e1SStephen Rothwell #define GET_ESID(x)		(((x) >> SID_SHIFT) & SID_MASK)
31b8b572e1SStephen Rothwell 
32b8b572e1SStephen Rothwell /* 1T segments */
33b8b572e1SStephen Rothwell #define SID_SHIFT_1T		40
34b8b572e1SStephen Rothwell #define SID_MASK_1T		0xffffffUL
35b8b572e1SStephen Rothwell #define ESID_MASK_1T		0xffffff0000000000UL
36b8b572e1SStephen Rothwell #define GET_ESID_1T(x)		(((x) >> SID_SHIFT_1T) & SID_MASK_1T)
37b8b572e1SStephen Rothwell 
38b8b572e1SStephen Rothwell #ifndef __ASSEMBLY__
39b8b572e1SStephen Rothwell #include <asm/cache.h>
40b8b572e1SStephen Rothwell 
41b8b572e1SStephen Rothwell typedef unsigned long pte_basic_t;
42b8b572e1SStephen Rothwell 
clear_page(void * addr)43e35735b9SAnton Blanchard static inline void clear_page(void *addr)
44b8b572e1SStephen Rothwell {
45e35735b9SAnton Blanchard 	unsigned long iterations;
46e35735b9SAnton Blanchard 	unsigned long onex, twox, fourx, eightx;
47b8b572e1SStephen Rothwell 
48e2827fe5SBenjamin Herrenschmidt 	iterations = ppc64_caches.l1d.blocks_per_page / 8;
49b8b572e1SStephen Rothwell 
50e35735b9SAnton Blanchard 	/*
51e35735b9SAnton Blanchard 	 * Some verisions of gcc use multiply instructions to
52e35735b9SAnton Blanchard 	 * calculate the offsets so lets give it a hand to
53e35735b9SAnton Blanchard 	 * do better.
54e35735b9SAnton Blanchard 	 */
55e2827fe5SBenjamin Herrenschmidt 	onex = ppc64_caches.l1d.block_size;
56e35735b9SAnton Blanchard 	twox = onex << 1;
57e35735b9SAnton Blanchard 	fourx = onex << 2;
58e35735b9SAnton Blanchard 	eightx = onex << 3;
59e35735b9SAnton Blanchard 
60e35735b9SAnton Blanchard 	asm volatile(
61b8b572e1SStephen Rothwell 	"mtctr	%1	# clear_page\n\
62e35735b9SAnton Blanchard 	.balign	16\n\
63b8b572e1SStephen Rothwell 1:	dcbz	0,%0\n\
64e35735b9SAnton Blanchard 	dcbz	%3,%0\n\
65e35735b9SAnton Blanchard 	dcbz	%4,%0\n\
66e35735b9SAnton Blanchard 	dcbz	%5,%0\n\
67e35735b9SAnton Blanchard 	dcbz	%6,%0\n\
68e35735b9SAnton Blanchard 	dcbz	%7,%0\n\
69e35735b9SAnton Blanchard 	dcbz	%8,%0\n\
70e35735b9SAnton Blanchard 	dcbz	%9,%0\n\
71e35735b9SAnton Blanchard 	add	%0,%0,%10\n\
72b8b572e1SStephen Rothwell 	bdnz+	1b"
73e35735b9SAnton Blanchard 	: "=&r" (addr)
74e35735b9SAnton Blanchard 	: "r" (iterations), "0" (addr), "b" (onex), "b" (twox),
75e35735b9SAnton Blanchard 		"b" (twox+onex), "b" (fourx), "b" (fourx+onex),
76e35735b9SAnton Blanchard 		"b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
77b8b572e1SStephen Rothwell 	: "ctr", "memory");
78b8b572e1SStephen Rothwell }
79b8b572e1SStephen Rothwell 
80d988f0e3SAnton Blanchard extern void copy_page(void *to, void *from);
81b8b572e1SStephen Rothwell 
82b8b572e1SStephen Rothwell /* Log 2 of page table size */
83b8b572e1SStephen Rothwell extern u64 ppc64_pft_size;
84b8b572e1SStephen Rothwell 
85b8b572e1SStephen Rothwell #endif /* __ASSEMBLY__ */
86b8b572e1SStephen Rothwell 
87b8b572e1SStephen Rothwell #define VM_DATA_DEFAULT_FLAGS \
88cab175f9SDenis Kirjanov 	(is_32bit_task() ? \
89b8b572e1SStephen Rothwell 	 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
90b8b572e1SStephen Rothwell 
91b8b572e1SStephen Rothwell /*
92b8b572e1SStephen Rothwell  * This is the default if a program doesn't have a PT_GNU_STACK
93b8b572e1SStephen Rothwell  * program header entry. The PPC64 ELF ABI has a non executable stack
9425985edcSLucas De Marchi  * stack by default, so in the absence of a PT_GNU_STACK program header
95b8b572e1SStephen Rothwell  * we turn execute permission off.
96b8b572e1SStephen Rothwell  */
97c62da0c3SAnshuman Khandual #define VM_STACK_DEFAULT_FLAGS32	VM_DATA_FLAGS_EXEC
98c62da0c3SAnshuman Khandual #define VM_STACK_DEFAULT_FLAGS64	VM_DATA_FLAGS_NON_EXEC
99b8b572e1SStephen Rothwell 
100b8b572e1SStephen Rothwell #define VM_STACK_DEFAULT_FLAGS \
101cab175f9SDenis Kirjanov 	(is_32bit_task() ? \
102b8b572e1SStephen Rothwell 	 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
103b8b572e1SStephen Rothwell 
1045b17e1cdSArnd Bergmann #include <asm-generic/getorder.h>
105b8b572e1SStephen Rothwell 
106b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_PAGE_64_H */
107