xref: /openbmc/linux/arch/arm64/include/asm/mmu.h (revision 55fd7e02)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_MMU_H
6 #define __ASM_MMU_H
7 
8 #include <asm/cputype.h>
9 
10 #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
11 #define USER_ASID_BIT	48
12 #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
13 #define TTBR_ASID_MASK	(UL(0xffff) << 48)
14 
15 #define BP_HARDEN_EL2_SLOTS 4
16 #define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
17 
18 #ifndef __ASSEMBLY__
19 
20 typedef struct {
21 	atomic64_t	id;
22 #ifdef CONFIG_COMPAT
23 	void		*sigpage;
24 #endif
25 	void		*vdso;
26 	unsigned long	flags;
27 } mm_context_t;
28 
29 /*
30  * This macro is only used by the TLBI and low-level switch_mm() code,
31  * neither of which can race with an ASID change. We therefore don't
32  * need to reload the counter using atomic64_read().
33  */
34 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
35 
36 static inline bool arm64_kernel_unmapped_at_el0(void)
37 {
38 	return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
39 }
40 
41 typedef void (*bp_hardening_cb_t)(void);
42 
43 struct bp_hardening_data {
44 	int			hyp_vectors_slot;
45 	bp_hardening_cb_t	fn;
46 };
47 
48 #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
49      defined(CONFIG_HARDEN_EL2_VECTORS))
50 
51 extern char __bp_harden_hyp_vecs[];
52 extern atomic_t arm64_el2_vector_last_slot;
53 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
54 
55 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
56 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
57 
58 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
59 {
60 	return this_cpu_ptr(&bp_hardening_data);
61 }
62 
63 static inline void arm64_apply_bp_hardening(void)
64 {
65 	struct bp_hardening_data *d;
66 
67 	if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
68 		return;
69 
70 	d = arm64_get_bp_hardening_data();
71 	if (d->fn)
72 		d->fn();
73 }
74 #else
75 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
76 {
77 	return NULL;
78 }
79 
80 static inline void arm64_apply_bp_hardening(void)	{ }
81 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
82 
83 extern void arm64_memblock_init(void);
84 extern void paging_init(void);
85 extern void bootmem_init(void);
86 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
87 extern void init_mem_pgprot(void);
88 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
89 			       unsigned long virt, phys_addr_t size,
90 			       pgprot_t prot, bool page_mappings_only);
91 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
92 extern void mark_linear_text_alias_ro(void);
93 extern bool kaslr_requires_kpti(void);
94 
95 #define INIT_MM_CONTEXT(name)	\
96 	.pgd = init_pg_dir,
97 
98 #endif	/* !__ASSEMBLY__ */
99 #endif
100