xref: /openbmc/linux/arch/powerpc/include/asm/page_32.h (revision 3dc4b6fb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PAGE_32_H
3 #define _ASM_POWERPC_PAGE_32_H
4 
5 #include <asm/cache.h>
6 
7 #if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
8 #if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
9 #error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
10 #endif
11 #endif
12 
13 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_DEFAULT_FLAGS32
14 
15 #ifdef CONFIG_NOT_COHERENT_CACHE
16 #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
17 #endif
18 
19 #ifdef CONFIG_PTE_64BIT
20 #define PTE_FLAGS_OFFSET	4	/* offset of PTE flags, in bytes */
21 #else
22 #define PTE_FLAGS_OFFSET	0
23 #endif
24 
25 #if defined(CONFIG_PPC_256K_PAGES) || \
26     (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
27 #define PTE_SHIFT	(PAGE_SHIFT - PTE_T_LOG2 - 2)	/* 1/4 of a page */
28 #else
29 #define PTE_SHIFT	(PAGE_SHIFT - PTE_T_LOG2)	/* full page */
30 #endif
31 
32 #ifndef __ASSEMBLY__
33 /*
34  * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
35  * physical addressing.
36  */
37 #ifdef CONFIG_PTE_64BIT
38 typedef unsigned long long pte_basic_t;
39 #else
40 typedef unsigned long pte_basic_t;
41 #endif
42 
43 #include <asm/bug.h>
44 
45 /*
46  * Clear page using the dcbz instruction, which doesn't cause any
47  * memory traffic (except to write out any cache lines which get
48  * displaced).  This only works on cacheable memory.
49  */
50 static inline void clear_page(void *addr)
51 {
52 	unsigned int i;
53 
54 	WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1));
55 
56 	for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
57 		dcbz(addr);
58 }
59 extern void copy_page(void *to, void *from);
60 
61 #include <asm-generic/getorder.h>
62 
63 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
64 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
65 
66 #endif /* __ASSEMBLY__ */
67 
68 #endif /* _ASM_POWERPC_PAGE_32_H */
69