xref: /openbmc/linux/arch/powerpc/include/asm/page_32.h (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_PAGE_32_H
3b8b572e1SStephen Rothwell #define _ASM_POWERPC_PAGE_32_H
4b8b572e1SStephen Rothwell 
55736f96dSChristophe Leroy #include <asm/cache.h>
65736f96dSChristophe Leroy 
7b8b572e1SStephen Rothwell #if defined(CONFIG_PHYSICAL_ALIGN) && (CONFIG_PHYSICAL_START != 0)
8b8b572e1SStephen Rothwell #if (CONFIG_PHYSICAL_START % CONFIG_PHYSICAL_ALIGN) != 0
9b8b572e1SStephen Rothwell #error "CONFIG_PHYSICAL_START must be a multiple of CONFIG_PHYSICAL_ALIGN"
10b8b572e1SStephen Rothwell #endif
11b8b572e1SStephen Rothwell #endif
12b8b572e1SStephen Rothwell 
13b8b572e1SStephen Rothwell #define VM_DATA_DEFAULT_FLAGS	VM_DATA_DEFAULT_FLAGS32
14b8b572e1SStephen Rothwell 
1555c8fc3fSChristophe Leroy #if defined(CONFIG_PPC_256K_PAGES) || \
1655c8fc3fSChristophe Leroy     (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
17e1240122SYuri Tikhonov #define PTE_SHIFT	(PAGE_SHIFT - PTE_T_LOG2 - 2)	/* 1/4 of a page */
18e1240122SYuri Tikhonov #else
19ca9153a3SIlya Yanok #define PTE_SHIFT	(PAGE_SHIFT - PTE_T_LOG2)	/* full page */
20e1240122SYuri Tikhonov #endif
21ca9153a3SIlya Yanok 
22b8b572e1SStephen Rothwell #ifndef __ASSEMBLY__
23b8b572e1SStephen Rothwell /*
24b8b572e1SStephen Rothwell  * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
254ee7084eSBecky Bruce  * physical addressing.
26b8b572e1SStephen Rothwell  */
27b8b572e1SStephen Rothwell #ifdef CONFIG_PTE_64BIT
28b8b572e1SStephen Rothwell typedef unsigned long long pte_basic_t;
29b8b572e1SStephen Rothwell #else
30b8b572e1SStephen Rothwell typedef unsigned long pte_basic_t;
31b8b572e1SStephen Rothwell #endif
32b8b572e1SStephen Rothwell 
33*7ab0b7cbSChristophe Leroy #include <asm/bug.h>
34*7ab0b7cbSChristophe Leroy 
355736f96dSChristophe Leroy /*
365736f96dSChristophe Leroy  * Clear page using the dcbz instruction, which doesn't cause any
375736f96dSChristophe Leroy  * memory traffic (except to write out any cache lines which get
385736f96dSChristophe Leroy  * displaced).  This only works on cacheable memory.
395736f96dSChristophe Leroy  */
clear_page(void * addr)405736f96dSChristophe Leroy static inline void clear_page(void *addr)
415736f96dSChristophe Leroy {
425736f96dSChristophe Leroy 	unsigned int i;
435736f96dSChristophe Leroy 
44*7ab0b7cbSChristophe Leroy 	WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1));
45*7ab0b7cbSChristophe Leroy 
465736f96dSChristophe Leroy 	for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
475736f96dSChristophe Leroy 		dcbz(addr);
485736f96dSChristophe Leroy }
49b8b572e1SStephen Rothwell extern void copy_page(void *to, void *from);
50b8b572e1SStephen Rothwell 
515b17e1cdSArnd Bergmann #include <asm-generic/getorder.h>
52b8b572e1SStephen Rothwell 
53ca9153a3SIlya Yanok #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
54ca9153a3SIlya Yanok #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
55ca9153a3SIlya Yanok 
56b8b572e1SStephen Rothwell #endif /* __ASSEMBLY__ */
57b8b572e1SStephen Rothwell 
58b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_PAGE_32_H */
59