xref: /openbmc/linux/arch/arm64/include/asm/mmu.h (revision 9e255e2b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_MMU_H
6 #define __ASM_MMU_H
7 
8 #include <asm/cputype.h>
9 
10 #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
11 #define USER_ASID_BIT	48
12 #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
13 #define TTBR_ASID_MASK	(UL(0xffff) << 48)
14 
15 #ifndef __ASSEMBLY__
16 
17 #include <linux/refcount.h>
18 
19 typedef struct {
20 	atomic64_t	id;
21 #ifdef CONFIG_COMPAT
22 	void		*sigpage;
23 #endif
24 	refcount_t	pinned;
25 	void		*vdso;
26 	unsigned long	flags;
27 } mm_context_t;
28 
29 /*
30  * This macro is only used by the TLBI and low-level switch_mm() code,
31  * neither of which can race with an ASID change. We therefore don't
32  * need to reload the counter using atomic64_read().
33  */
34 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
35 
36 static inline bool arm64_kernel_unmapped_at_el0(void)
37 {
38 	return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
39 }
40 
41 extern void arm64_memblock_init(void);
42 extern void paging_init(void);
43 extern void bootmem_init(void);
44 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
45 extern void init_mem_pgprot(void);
46 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
47 			       unsigned long virt, phys_addr_t size,
48 			       pgprot_t prot, bool page_mappings_only);
49 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
50 extern void mark_linear_text_alias_ro(void);
51 extern bool kaslr_requires_kpti(void);
52 
53 #define INIT_MM_CONTEXT(name)	\
54 	.pgd = init_pg_dir,
55 
56 #endif	/* !__ASSEMBLY__ */
57 #endif
58