1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_MMU_H 17 #define __ASM_MMU_H 18 19 #include <asm/cputype.h> 20 21 #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ 22 #define USER_ASID_BIT 48 23 #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) 24 #define TTBR_ASID_MASK (UL(0xffff) << 48) 25 26 #define BP_HARDEN_EL2_SLOTS 4 27 28 #ifndef __ASSEMBLY__ 29 30 typedef struct { 31 atomic64_t id; 32 void *vdso; 33 unsigned long flags; 34 } mm_context_t; 35 36 /* 37 * This macro is only used by the TLBI code, which cannot race with an 38 * ASID change and therefore doesn't need to reload the counter using 39 * atomic64_read. 40 */ 41 #define ASID(mm) ((mm)->context.id.counter & 0xffff) 42 43 static inline bool arm64_kernel_unmapped_at_el0(void) 44 { 45 return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && 46 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); 47 } 48 49 static inline bool arm64_kernel_use_ng_mappings(void) 50 { 51 bool tx1_bug; 52 53 /* What's a kpti? Use global mappings if we don't know. */ 54 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) 55 return false; 56 57 /* 58 * Note: this function is called before the CPU capabilities have 59 * been configured, so our early mappings will be global. If we 60 * later determine that kpti is required, then 61 * kpti_install_ng_mappings() will make them non-global. 62 */ 63 if (arm64_kernel_unmapped_at_el0()) 64 return true; 65 66 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 67 return false; 68 69 /* 70 * KASLR is enabled so we're going to be enabling kpti on non-broken 71 * CPUs regardless of their susceptibility to Meltdown. Rather 72 * than force everybody to go through the G -> nG dance later on, 73 * just put down non-global mappings from the beginning. 74 */ 75 if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { 76 tx1_bug = false; 77 #ifndef MODULE 78 } else if (!static_branch_likely(&arm64_const_caps_ready)) { 79 extern const struct midr_range cavium_erratum_27456_cpus[]; 80 81 tx1_bug = is_midr_in_range_list(read_cpuid_id(), 82 cavium_erratum_27456_cpus); 83 #endif 84 } else { 85 tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); 86 } 87 88 return !tx1_bug && kaslr_offset() > 0; 89 } 90 91 typedef void (*bp_hardening_cb_t)(void); 92 93 struct bp_hardening_data { 94 int hyp_vectors_slot; 95 bp_hardening_cb_t fn; 96 }; 97 98 #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ 99 defined(CONFIG_HARDEN_EL2_VECTORS)) 100 extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; 101 extern atomic_t arm64_el2_vector_last_slot; 102 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */ 103 104 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 105 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 106 107 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) 108 { 109 return this_cpu_ptr(&bp_hardening_data); 110 } 111 112 static inline void arm64_apply_bp_hardening(void) 113 { 114 struct bp_hardening_data *d; 115 116 if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) 117 return; 118 119 d = arm64_get_bp_hardening_data(); 120 if (d->fn) 121 d->fn(); 122 } 123 #else 124 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) 125 { 126 return NULL; 127 } 128 129 static inline void arm64_apply_bp_hardening(void) { } 130 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ 131 132 extern void arm64_memblock_init(void); 133 extern void paging_init(void); 134 extern void bootmem_init(void); 135 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); 136 extern void init_mem_pgprot(void); 137 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 138 unsigned long virt, phys_addr_t size, 139 pgprot_t prot, bool page_mappings_only); 140 extern void *fixmap_remap_fdt(phys_addr_t dt_phys); 141 extern void mark_linear_text_alias_ro(void); 142 143 #define INIT_MM_CONTEXT(name) \ 144 .pgd = init_pg_dir, 145 146 #endif /* !__ASSEMBLY__ */ 147 #endif 148