xref: /openbmc/linux/arch/arm/include/asm/page.h (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
24baa9922SRussell King /*
34baa9922SRussell King  *  arch/arm/include/asm/page.h
44baa9922SRussell King  *
54baa9922SRussell King  *  Copyright (C) 1995-2003 Russell King
64baa9922SRussell King  */
74baa9922SRussell King #ifndef _ASMARM_PAGE_H
84baa9922SRussell King #define _ASMARM_PAGE_H
94baa9922SRussell King 
104baa9922SRussell King /* PAGE_SHIFT determines the page size */
114baa9922SRussell King #define PAGE_SHIFT		12
12f6430a93SLinus Walleij #define PAGE_SIZE		(_AC(1,UL) << PAGE_SHIFT)
13926edcc7SCyril Chemparathy #define PAGE_MASK		(~((1 << PAGE_SHIFT) - 1))
144baa9922SRussell King 
154baa9922SRussell King #ifndef __ASSEMBLY__
164baa9922SRussell King 
174baa9922SRussell King #ifndef CONFIG_MMU
184baa9922SRussell King 
19a1ce3928SDavid Howells #include <asm/page-nommu.h>
204baa9922SRussell King 
214baa9922SRussell King #else
224baa9922SRussell King 
234baa9922SRussell King #include <asm/glue.h>
244baa9922SRussell King 
254baa9922SRussell King /*
264baa9922SRussell King  *	User Space Model
274baa9922SRussell King  *	================
284baa9922SRussell King  *
294baa9922SRussell King  *	This section selects the correct set of functions for dealing with
304baa9922SRussell King  *	page-based copying and clearing for user space for the particular
314baa9922SRussell King  *	processor(s) we're building for.
324baa9922SRussell King  *
334baa9922SRussell King  *	We have the following to choose from:
344baa9922SRussell King  *	  v4wt		- ARMv4 with writethrough cache, without minicache
354baa9922SRussell King  *	  v4wb		- ARMv4 with writeback cache, without minicache
364baa9922SRussell King  *	  v4_mc		- ARMv4 with minicache
374baa9922SRussell King  *	  xscale	- Xscale
384baa9922SRussell King  *	  xsc3		- XScalev3
394baa9922SRussell King  */
404baa9922SRussell King #undef _USER
414baa9922SRussell King #undef MULTI_USER
424baa9922SRussell King 
434baa9922SRussell King #ifdef CONFIG_CPU_COPY_V4WT
444baa9922SRussell King # ifdef _USER
454baa9922SRussell King #  define MULTI_USER 1
464baa9922SRussell King # else
474baa9922SRussell King #  define _USER v4wt
484baa9922SRussell King # endif
494baa9922SRussell King #endif
504baa9922SRussell King 
514baa9922SRussell King #ifdef CONFIG_CPU_COPY_V4WB
524baa9922SRussell King # ifdef _USER
534baa9922SRussell King #  define MULTI_USER 1
544baa9922SRussell King # else
554baa9922SRussell King #  define _USER v4wb
564baa9922SRussell King # endif
574baa9922SRussell King #endif
584baa9922SRussell King 
594baa9922SRussell King #ifdef CONFIG_CPU_COPY_FEROCEON
604baa9922SRussell King # ifdef _USER
614baa9922SRussell King #  define MULTI_USER 1
624baa9922SRussell King # else
634baa9922SRussell King #  define _USER feroceon
644baa9922SRussell King # endif
654baa9922SRussell King #endif
664baa9922SRussell King 
6728853ac8SPaulius Zaleckas #ifdef CONFIG_CPU_COPY_FA
6828853ac8SPaulius Zaleckas # ifdef _USER
6928853ac8SPaulius Zaleckas #  define MULTI_USER 1
7028853ac8SPaulius Zaleckas # else
7128853ac8SPaulius Zaleckas #  define _USER fa
7228853ac8SPaulius Zaleckas # endif
7328853ac8SPaulius Zaleckas #endif
7428853ac8SPaulius Zaleckas 
754baa9922SRussell King #ifdef CONFIG_CPU_SA1100
764baa9922SRussell King # ifdef _USER
774baa9922SRussell King #  define MULTI_USER 1
784baa9922SRussell King # else
794baa9922SRussell King #  define _USER v4_mc
804baa9922SRussell King # endif
814baa9922SRussell King #endif
824baa9922SRussell King 
834baa9922SRussell King #ifdef CONFIG_CPU_XSCALE
844baa9922SRussell King # ifdef _USER
854baa9922SRussell King #  define MULTI_USER 1
864baa9922SRussell King # else
874baa9922SRussell King #  define _USER xscale_mc
884baa9922SRussell King # endif
894baa9922SRussell King #endif
904baa9922SRussell King 
914baa9922SRussell King #ifdef CONFIG_CPU_XSC3
924baa9922SRussell King # ifdef _USER
934baa9922SRussell King #  define MULTI_USER 1
944baa9922SRussell King # else
954baa9922SRussell King #  define _USER xsc3_mc
964baa9922SRussell King # endif
974baa9922SRussell King #endif
984baa9922SRussell King 
994baa9922SRussell King #ifdef CONFIG_CPU_COPY_V6
1004baa9922SRussell King # define MULTI_USER 1
1014baa9922SRussell King #endif
1024baa9922SRussell King 
1034baa9922SRussell King #if !defined(_USER) && !defined(MULTI_USER)
1044baa9922SRussell King #error Unknown user operations model
1054baa9922SRussell King #endif
1064baa9922SRussell King 
107063b0a42SRussell King struct page;
108f00a75c0SRussell King struct vm_area_struct;
109063b0a42SRussell King 
1104baa9922SRussell King struct cpu_user_fns {
111303c6443SRussell King 	void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
112063b0a42SRussell King 	void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
113f00a75c0SRussell King 			unsigned long vaddr, struct vm_area_struct *vma);
1144baa9922SRussell King };
1154baa9922SRussell King 
116*34bde7f2SArnd Bergmann void fa_copy_user_highpage(struct page *to, struct page *from,
117*34bde7f2SArnd Bergmann 	unsigned long vaddr, struct vm_area_struct *vma);
118*34bde7f2SArnd Bergmann void fa_clear_user_highpage(struct page *page, unsigned long vaddr);
119*34bde7f2SArnd Bergmann void feroceon_copy_user_highpage(struct page *to, struct page *from,
120*34bde7f2SArnd Bergmann 	unsigned long vaddr, struct vm_area_struct *vma);
121*34bde7f2SArnd Bergmann void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr);
122*34bde7f2SArnd Bergmann void v4_mc_copy_user_highpage(struct page *to, struct page *from,
123*34bde7f2SArnd Bergmann 	unsigned long vaddr, struct vm_area_struct *vma);
124*34bde7f2SArnd Bergmann void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
125*34bde7f2SArnd Bergmann void v4wb_copy_user_highpage(struct page *to, struct page *from,
126*34bde7f2SArnd Bergmann 	unsigned long vaddr, struct vm_area_struct *vma);
127*34bde7f2SArnd Bergmann void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr);
128*34bde7f2SArnd Bergmann void v4wt_copy_user_highpage(struct page *to, struct page *from,
129*34bde7f2SArnd Bergmann 	unsigned long vaddr, struct vm_area_struct *vma);
130*34bde7f2SArnd Bergmann void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr);
131*34bde7f2SArnd Bergmann void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
132*34bde7f2SArnd Bergmann 	unsigned long vaddr, struct vm_area_struct *vma);
133*34bde7f2SArnd Bergmann void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
134*34bde7f2SArnd Bergmann void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
135*34bde7f2SArnd Bergmann 	unsigned long vaddr, struct vm_area_struct *vma);
136*34bde7f2SArnd Bergmann void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr);
137*34bde7f2SArnd Bergmann 
1384baa9922SRussell King #ifdef MULTI_USER
1394baa9922SRussell King extern struct cpu_user_fns cpu_user;
1404baa9922SRussell King 
141303c6443SRussell King #define __cpu_clear_user_highpage	cpu_user.cpu_clear_user_highpage
142063b0a42SRussell King #define __cpu_copy_user_highpage	cpu_user.cpu_copy_user_highpage
1434baa9922SRussell King 
1444baa9922SRussell King #else
1454baa9922SRussell King 
146303c6443SRussell King #define __cpu_clear_user_highpage	__glue(_USER,_clear_user_highpage)
147063b0a42SRussell King #define __cpu_copy_user_highpage	__glue(_USER,_copy_user_highpage)
1484baa9922SRussell King 
149303c6443SRussell King extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
150063b0a42SRussell King extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
151f00a75c0SRussell King 			unsigned long vaddr, struct vm_area_struct *vma);
1524baa9922SRussell King #endif
1534baa9922SRussell King 
154303c6443SRussell King #define clear_user_highpage(page,vaddr)		\
155303c6443SRussell King 	 __cpu_clear_user_highpage(page, vaddr)
156063b0a42SRussell King 
157063b0a42SRussell King #define __HAVE_ARCH_COPY_USER_HIGHPAGE
158063b0a42SRussell King #define copy_user_highpage(to,from,vaddr,vma)	\
159f00a75c0SRussell King 	__cpu_copy_user_highpage(to, from, vaddr, vma)
1604baa9922SRussell King 
16159f0cb0fSRussell King #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
1624baa9922SRussell King extern void copy_page(void *to, const void *from);
1634baa9922SRussell King 
164a5463cd3SRussell King #ifdef CONFIG_KUSER_HELPERS
165f9d4861fSWill Deacon #define __HAVE_ARCH_GATE_AREA 1
166a5463cd3SRussell King #endif
167f9d4861fSWill Deacon 
168dcfdae04SCatalin Marinas #ifdef CONFIG_ARM_LPAE
169dcfdae04SCatalin Marinas #include <asm/pgtable-3level-types.h>
170dcfdae04SCatalin Marinas #else
17117f57211SCatalin Marinas #include <asm/pgtable-2level-types.h>
172a1c510d0SArd Biesheuvel #ifdef CONFIG_VMAP_STACK
173a1c510d0SArd Biesheuvel #define ARCH_PAGE_TABLE_SYNC_MASK	PGTBL_PMD_MODIFIED
174a1c510d0SArd Biesheuvel #endif
175d31e23afSArd Biesheuvel #endif
176a1c510d0SArd Biesheuvel 
1774baa9922SRussell King #endif /* CONFIG_MMU */
1784baa9922SRussell King 
1794baa9922SRussell King typedef struct page *pgtable_t;
1804baa9922SRussell King 
1817b7bf499SWill Deacon #ifdef CONFIG_HAVE_ARCH_PFN_VALID
182b7cfda9fSRussell King extern int pfn_valid(unsigned long);
1836069b9ecSMike Rapoport (IBM) #define pfn_valid pfn_valid
184b7cfda9fSRussell King #endif
185b7cfda9fSRussell King 
1864baa9922SRussell King #endif /* !__ASSEMBLY__ */
1874baa9922SRussell King 
188a9ff6961SLinus Walleij #include <asm/memory.h>
189a9ff6961SLinus Walleij 
190c62da0c3SAnshuman Khandual #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
1914baa9922SRussell King 
1925b17e1cdSArnd Bergmann #include <asm-generic/getorder.h>
1936069b9ecSMike Rapoport (IBM) #include <asm-generic/memory_model.h>
1944baa9922SRussell King 
1954baa9922SRussell King #endif
196