1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Based on arch/arm/mm/proc.S 4 * 5 * Copyright (C) 2001 Deep Blue Solutions Ltd. 6 * Copyright (C) 2012 ARM Ltd. 7 * Author: Catalin Marinas <catalin.marinas@arm.com> 8 */ 9 10#include <linux/init.h> 11#include <linux/linkage.h> 12#include <linux/pgtable.h> 13#include <linux/cfi_types.h> 14#include <asm/assembler.h> 15#include <asm/asm-offsets.h> 16#include <asm/asm_pointer_auth.h> 17#include <asm/hwcap.h> 18#include <asm/kernel-pgtable.h> 19#include <asm/pgtable-hwdef.h> 20#include <asm/cpufeature.h> 21#include <asm/alternative.h> 22#include <asm/smp.h> 23#include <asm/sysreg.h> 24 25#ifdef CONFIG_ARM64_64K_PAGES 26#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K 27#elif defined(CONFIG_ARM64_16K_PAGES) 28#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K 29#else /* CONFIG_ARM64_4K_PAGES */ 30#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 31#endif 32 33#ifdef CONFIG_RANDOMIZE_BASE 34#define TCR_KASLR_FLAGS TCR_NFD1 35#else 36#define TCR_KASLR_FLAGS 0 37#endif 38 39#define TCR_SMP_FLAGS TCR_SHARED 40 41/* PTWs cacheable, inner/outer WBWA */ 42#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 43 44#ifdef CONFIG_KASAN_SW_TAGS 45#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 46#else 47#define TCR_KASAN_SW_FLAGS 0 48#endif 49 50#ifdef CONFIG_KASAN_HW_TAGS 51#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 52#elif defined(CONFIG_ARM64_MTE) 53/* 54 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on 55 * TBI being enabled at EL1. 56 */ 57#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 58#else 59#define TCR_MTE_FLAGS 0 60#endif 61 62/* 63 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and 64 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE. 65 */ 66#define MAIR_EL1_SET \ 67 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 68 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 69 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 70 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 71 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) 72 73#ifdef CONFIG_CPU_PM 74/** 75 * cpu_do_suspend - save CPU registers context 76 * 77 * x0: virtual address of context pointer 78 * 79 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. 80 */ 81SYM_FUNC_START(cpu_do_suspend) 82 mrs x2, tpidr_el0 83 mrs x3, tpidrro_el0 84 mrs x4, contextidr_el1 85 mrs x5, osdlr_el1 86 mrs x6, cpacr_el1 87 mrs x7, tcr_el1 88 mrs x8, vbar_el1 89 mrs x9, mdscr_el1 90 mrs x10, oslsr_el1 91 mrs x11, sctlr_el1 92 get_this_cpu_offset x12 93 mrs x13, sp_el0 94 stp x2, x3, [x0] 95 stp x4, x5, [x0, #16] 96 stp x6, x7, [x0, #32] 97 stp x8, x9, [x0, #48] 98 stp x10, x11, [x0, #64] 99 stp x12, x13, [x0, #80] 100 /* 101 * Save x18 as it may be used as a platform register, e.g. by shadow 102 * call stack. 103 */ 104 str x18, [x0, #96] 105 ret 106SYM_FUNC_END(cpu_do_suspend) 107 108/** 109 * cpu_do_resume - restore CPU register context 110 * 111 * x0: Address of context pointer 112 */ 113SYM_FUNC_START(cpu_do_resume) 114 ldp x2, x3, [x0] 115 ldp x4, x5, [x0, #16] 116 ldp x6, x8, [x0, #32] 117 ldp x9, x10, [x0, #48] 118 ldp x11, x12, [x0, #64] 119 ldp x13, x14, [x0, #80] 120 /* 121 * Restore x18, as it may be used as a platform register, and clear 122 * the buffer to minimize the risk of exposure when used for shadow 123 * call stack. 124 */ 125 ldr x18, [x0, #96] 126 str xzr, [x0, #96] 127 msr tpidr_el0, x2 128 msr tpidrro_el0, x3 129 msr contextidr_el1, x4 130 msr cpacr_el1, x6 131 132 /* Don't change t0sz here, mask those bits when restoring */ 133 mrs x7, tcr_el1 134 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 135 136 msr tcr_el1, x8 137 msr vbar_el1, x9 138 139 /* 140 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking 141 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug 142 * exception. Mask them until local_daif_restore() in cpu_suspend() 143 * resets them. 144 */ 145 disable_daif 146 msr mdscr_el1, x10 147 148 msr sctlr_el1, x12 149 set_this_cpu_offset x13 150 msr sp_el0, x14 151 /* 152 * Restore oslsr_el1 by writing oslar_el1 153 */ 154 msr osdlr_el1, x5 155 ubfx x11, x11, #1, #1 156 msr oslar_el1, x11 157 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 158 reset_amuserenr_el0 x0 // Disable AMU access from EL0 159 160alternative_if ARM64_HAS_RAS_EXTN 161 msr_s SYS_DISR_EL1, xzr 162alternative_else_nop_endif 163 164 ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 165 isb 166 ret 167SYM_FUNC_END(cpu_do_resume) 168#endif 169 170 .pushsection ".idmap.text", "a" 171 172.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 173 adrp \tmp1, reserved_pg_dir 174 phys_to_ttbr \tmp2, \tmp1 175 offset_ttbr1 \tmp2, \tmp1 176 msr ttbr1_el1, \tmp2 177 isb 178 tlbi vmalle1 179 dsb nsh 180 isb 181.endm 182 183/* 184 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) 185 * 186 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be 187 * called by anything else. It can only be executed from a TTBR0 mapping. 188 */ 189SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) 190 __idmap_cpu_set_reserved_ttbr1 x1, x3 191 192 offset_ttbr1 x0, x3 193 msr ttbr1_el1, x0 194 isb 195 196 ret 197SYM_FUNC_END(idmap_cpu_replace_ttbr1) 198 .popsection 199 200#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 201 202#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) 203 204 .pushsection ".idmap.text", "a" 205 206 .macro kpti_mk_tbl_ng, type, num_entries 207 add end_\type\()p, cur_\type\()p, #\num_entries * 8 208.Ldo_\type: 209 ldr \type, [cur_\type\()p] // Load the entry 210 tbz \type, #0, .Lnext_\type // Skip invalid and 211 tbnz \type, #11, .Lnext_\type // non-global entries 212 orr \type, \type, #PTE_NG // Same bit for blocks and pages 213 str \type, [cur_\type\()p] // Update the entry 214 .ifnc \type, pte 215 tbnz \type, #1, .Lderef_\type 216 .endif 217.Lnext_\type: 218 add cur_\type\()p, cur_\type\()p, #8 219 cmp cur_\type\()p, end_\type\()p 220 b.ne .Ldo_\type 221 .endm 222 223 /* 224 * Dereference the current table entry and map it into the temporary 225 * fixmap slot associated with the current level. 226 */ 227 .macro kpti_map_pgtbl, type, level 228 str xzr, [temp_pte, #8 * (\level + 1)] // break before make 229 dsb nshst 230 add pte, temp_pte, #PAGE_SIZE * (\level + 1) 231 lsr pte, pte, #12 232 tlbi vaae1, pte 233 dsb nsh 234 isb 235 236 phys_to_pte pte, cur_\type\()p 237 add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1) 238 orr pte, pte, pte_flags 239 str pte, [temp_pte, #8 * (\level + 1)] 240 dsb nshst 241 .endm 242 243/* 244 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd, 245 * unsigned long temp_pte_va) 246 * 247 * Called exactly once from stop_machine context by each CPU found during boot. 248 */ 249 .pushsection ".data", "aw", %progbits 250SYM_DATA(__idmap_kpti_flag, .long 1) 251 .popsection 252 253SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) 254 cpu .req w0 255 temp_pte .req x0 256 num_cpus .req w1 257 pte_flags .req x1 258 temp_pgd_phys .req x2 259 swapper_ttb .req x3 260 flag_ptr .req x4 261 cur_pgdp .req x5 262 end_pgdp .req x6 263 pgd .req x7 264 cur_pudp .req x8 265 end_pudp .req x9 266 cur_pmdp .req x11 267 end_pmdp .req x12 268 cur_ptep .req x14 269 end_ptep .req x15 270 pte .req x16 271 valid .req x17 272 273 mov x5, x3 // preserve temp_pte arg 274 mrs swapper_ttb, ttbr1_el1 275 adr_l flag_ptr, __idmap_kpti_flag 276 277 cbnz cpu, __idmap_kpti_secondary 278 279 /* We're the boot CPU. Wait for the others to catch up */ 280 sevl 2811: wfe 282 ldaxr w17, [flag_ptr] 283 eor w17, w17, num_cpus 284 cbnz w17, 1b 285 286 /* Switch to the temporary page tables on this CPU only */ 287 __idmap_cpu_set_reserved_ttbr1 x8, x9 288 offset_ttbr1 temp_pgd_phys, x8 289 msr ttbr1_el1, temp_pgd_phys 290 isb 291 292 mov temp_pte, x5 293 mov pte_flags, #KPTI_NG_PTE_FLAGS 294 295 /* Everybody is enjoying the idmap, so we can rewrite swapper. */ 296 /* PGD */ 297 adrp cur_pgdp, swapper_pg_dir 298 kpti_map_pgtbl pgd, 0 299 kpti_mk_tbl_ng pgd, PTRS_PER_PGD 300 301 /* Ensure all the updated entries are visible to secondary CPUs */ 302 dsb ishst 303 304 /* We're done: fire up swapper_pg_dir again */ 305 __idmap_cpu_set_reserved_ttbr1 x8, x9 306 msr ttbr1_el1, swapper_ttb 307 isb 308 309 /* Set the flag to zero to indicate that we're all done */ 310 str wzr, [flag_ptr] 311 ret 312 313.Lderef_pgd: 314 /* PUD */ 315 .if CONFIG_PGTABLE_LEVELS > 3 316 pud .req x10 317 pte_to_phys cur_pudp, pgd 318 kpti_map_pgtbl pud, 1 319 kpti_mk_tbl_ng pud, PTRS_PER_PUD 320 b .Lnext_pgd 321 .else /* CONFIG_PGTABLE_LEVELS <= 3 */ 322 pud .req pgd 323 .set .Lnext_pud, .Lnext_pgd 324 .endif 325 326.Lderef_pud: 327 /* PMD */ 328 .if CONFIG_PGTABLE_LEVELS > 2 329 pmd .req x13 330 pte_to_phys cur_pmdp, pud 331 kpti_map_pgtbl pmd, 2 332 kpti_mk_tbl_ng pmd, PTRS_PER_PMD 333 b .Lnext_pud 334 .else /* CONFIG_PGTABLE_LEVELS <= 2 */ 335 pmd .req pgd 336 .set .Lnext_pmd, .Lnext_pgd 337 .endif 338 339.Lderef_pmd: 340 /* PTE */ 341 pte_to_phys cur_ptep, pmd 342 kpti_map_pgtbl pte, 3 343 kpti_mk_tbl_ng pte, PTRS_PER_PTE 344 b .Lnext_pmd 345 346 .unreq cpu 347 .unreq temp_pte 348 .unreq num_cpus 349 .unreq pte_flags 350 .unreq temp_pgd_phys 351 .unreq cur_pgdp 352 .unreq end_pgdp 353 .unreq pgd 354 .unreq cur_pudp 355 .unreq end_pudp 356 .unreq pud 357 .unreq cur_pmdp 358 .unreq end_pmdp 359 .unreq pmd 360 .unreq cur_ptep 361 .unreq end_ptep 362 .unreq pte 363 .unreq valid 364 365 /* Secondary CPUs end up here */ 366__idmap_kpti_secondary: 367 /* Uninstall swapper before surgery begins */ 368 __idmap_cpu_set_reserved_ttbr1 x16, x17 369 370 /* Increment the flag to let the boot CPU we're ready */ 3711: ldxr w16, [flag_ptr] 372 add w16, w16, #1 373 stxr w17, w16, [flag_ptr] 374 cbnz w17, 1b 375 376 /* Wait for the boot CPU to finish messing around with swapper */ 377 sevl 3781: wfe 379 ldxr w16, [flag_ptr] 380 cbnz w16, 1b 381 382 /* All done, act like nothing happened */ 383 msr ttbr1_el1, swapper_ttb 384 isb 385 ret 386 387 .unreq swapper_ttb 388 .unreq flag_ptr 389SYM_FUNC_END(idmap_kpti_install_ng_mappings) 390 .popsection 391#endif 392 393/* 394 * __cpu_setup 395 * 396 * Initialise the processor for turning the MMU on. 397 * 398 * Input: 399 * x0 - actual number of VA bits (ignored unless VA_BITS > 48) 400 * Output: 401 * Return in x0 the value of the SCTLR_EL1 register. 402 */ 403 .pushsection ".idmap.text", "a" 404SYM_FUNC_START(__cpu_setup) 405 tlbi vmalle1 // Invalidate local TLB 406 dsb nsh 407 408 mov x1, #3 << 20 409 msr cpacr_el1, x1 // Enable FP/ASIMD 410 mov x1, #1 << 12 // Reset mdscr_el1 and disable 411 msr mdscr_el1, x1 // access to the DCC from EL0 412 isb // Unmask debug exceptions now, 413 enable_dbg // since this is per-cpu 414 reset_pmuserenr_el0 x1 // Disable PMU access from EL0 415 reset_amuserenr_el0 x1 // Disable AMU access from EL0 416 417 /* 418 * Default values for VMSA control registers. These will be adjusted 419 * below depending on detected CPU features. 420 */ 421 mair .req x17 422 tcr .req x16 423 mov_q mair, MAIR_EL1_SET 424 mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ 425 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ 426 TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS 427 428 tcr_clear_errata_bits tcr, x9, x5 429 430#ifdef CONFIG_ARM64_VA_BITS_52 431 sub x9, xzr, x0 432 add x9, x9, #64 433 tcr_set_t1sz tcr, x9 434#else 435 idmap_get_t0sz x9 436#endif 437 tcr_set_t0sz tcr, x9 438 439 /* 440 * Set the IPS bits in TCR_EL1. 441 */ 442 tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 443#ifdef CONFIG_ARM64_HW_AFDBM 444 /* 445 * Enable hardware update of the Access Flags bit. 446 * Hardware dirty bit management is enabled later, 447 * via capabilities. 448 */ 449 mrs x9, ID_AA64MMFR1_EL1 450 and x9, x9, #0xf 451 cbz x9, 1f 452 orr tcr, tcr, #TCR_HA // hardware Access flag update 4531: 454#endif /* CONFIG_ARM64_HW_AFDBM */ 455 msr mair_el1, mair 456 msr tcr_el1, tcr 457 /* 458 * Prepare SCTLR 459 */ 460 mov_q x0, INIT_SCTLR_EL1_MMU_ON 461 ret // return to head.S 462 463 .unreq mair 464 .unreq tcr 465SYM_FUNC_END(__cpu_setup) 466