xref: /openbmc/linux/arch/arm64/include/asm/mmu.h (revision 78bb17f7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_MMU_H
6 #define __ASM_MMU_H
7 
8 #include <asm/cputype.h>
9 
10 #define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
11 #define USER_ASID_BIT	48
12 #define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
13 #define TTBR_ASID_MASK	(UL(0xffff) << 48)
14 
15 #define BP_HARDEN_EL2_SLOTS 4
16 #define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
17 
18 #ifndef __ASSEMBLY__
19 
20 typedef struct {
21 	atomic64_t	id;
22 	void		*vdso;
23 	unsigned long	flags;
24 } mm_context_t;
25 
26 /*
27  * This macro is only used by the TLBI and low-level switch_mm() code,
28  * neither of which can race with an ASID change. We therefore don't
29  * need to reload the counter using atomic64_read().
30  */
31 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
32 
33 static inline bool arm64_kernel_unmapped_at_el0(void)
34 {
35 	return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
36 }
37 
38 typedef void (*bp_hardening_cb_t)(void);
39 
40 struct bp_hardening_data {
41 	int			hyp_vectors_slot;
42 	bp_hardening_cb_t	fn;
43 };
44 
45 #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||	\
46      defined(CONFIG_HARDEN_EL2_VECTORS))
47 
48 extern char __bp_harden_hyp_vecs[];
49 extern atomic_t arm64_el2_vector_last_slot;
50 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
51 
52 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
53 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
54 
55 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
56 {
57 	return this_cpu_ptr(&bp_hardening_data);
58 }
59 
60 static inline void arm64_apply_bp_hardening(void)
61 {
62 	struct bp_hardening_data *d;
63 
64 	if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
65 		return;
66 
67 	d = arm64_get_bp_hardening_data();
68 	if (d->fn)
69 		d->fn();
70 }
71 #else
72 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
73 {
74 	return NULL;
75 }
76 
77 static inline void arm64_apply_bp_hardening(void)	{ }
78 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
79 
80 extern void arm64_memblock_init(void);
81 extern void paging_init(void);
82 extern void bootmem_init(void);
83 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
84 extern void init_mem_pgprot(void);
85 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
86 			       unsigned long virt, phys_addr_t size,
87 			       pgprot_t prot, bool page_mappings_only);
88 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
89 extern void mark_linear_text_alias_ro(void);
90 extern bool kaslr_requires_kpti(void);
91 
92 #define INIT_MM_CONTEXT(name)	\
93 	.pgd = init_pg_dir,
94 
95 #endif	/* !__ASSEMBLY__ */
96 #endif
97