1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Based on arch/arm/mm/proc.S 4 * 5 * Copyright (C) 2001 Deep Blue Solutions Ltd. 6 * Copyright (C) 2012 ARM Ltd. 7 * Author: Catalin Marinas <catalin.marinas@arm.com> 8 */ 9 10#include <linux/init.h> 11#include <linux/linkage.h> 12#include <linux/pgtable.h> 13#include <asm/assembler.h> 14#include <asm/asm-offsets.h> 15#include <asm/asm_pointer_auth.h> 16#include <asm/hwcap.h> 17#include <asm/kernel-pgtable.h> 18#include <asm/pgtable-hwdef.h> 19#include <asm/cpufeature.h> 20#include <asm/alternative.h> 21#include <asm/smp.h> 22#include <asm/sysreg.h> 23 24#ifdef CONFIG_ARM64_64K_PAGES 25#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K 26#elif defined(CONFIG_ARM64_16K_PAGES) 27#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K 28#else /* CONFIG_ARM64_4K_PAGES */ 29#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 30#endif 31 32#ifdef CONFIG_RANDOMIZE_BASE 33#define TCR_KASLR_FLAGS TCR_NFD1 34#else 35#define TCR_KASLR_FLAGS 0 36#endif 37 38#define TCR_SMP_FLAGS TCR_SHARED 39 40/* PTWs cacheable, inner/outer WBWA */ 41#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 42 43#ifdef CONFIG_KASAN_SW_TAGS 44#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 45#else 46#define TCR_KASAN_SW_FLAGS 0 47#endif 48 49#ifdef CONFIG_KASAN_HW_TAGS 50#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 51#else 52/* 53 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on 54 * TBI being enabled at EL1. 55 */ 56#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 57#endif 58 59/* 60 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and 61 * changed during __cpu_setup to Normal Tagged if the system supports MTE. 62 */ 63#define MAIR_EL1_SET \ 64 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 65 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 66 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 67 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 68 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) 69 70#ifdef CONFIG_CPU_PM 71/** 72 * cpu_do_suspend - save CPU registers context 73 * 74 * x0: virtual address of context pointer 75 * 76 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. 77 */ 78SYM_FUNC_START(cpu_do_suspend) 79 mrs x2, tpidr_el0 80 mrs x3, tpidrro_el0 81 mrs x4, contextidr_el1 82 mrs x5, osdlr_el1 83 mrs x6, cpacr_el1 84 mrs x7, tcr_el1 85 mrs x8, vbar_el1 86 mrs x9, mdscr_el1 87 mrs x10, oslsr_el1 88 mrs x11, sctlr_el1 89 get_this_cpu_offset x12 90 mrs x13, sp_el0 91 stp x2, x3, [x0] 92 stp x4, x5, [x0, #16] 93 stp x6, x7, [x0, #32] 94 stp x8, x9, [x0, #48] 95 stp x10, x11, [x0, #64] 96 stp x12, x13, [x0, #80] 97 /* 98 * Save x18 as it may be used as a platform register, e.g. by shadow 99 * call stack. 100 */ 101 str x18, [x0, #96] 102 ret 103SYM_FUNC_END(cpu_do_suspend) 104 105/** 106 * cpu_do_resume - restore CPU register context 107 * 108 * x0: Address of context pointer 109 */ 110 .pushsection ".idmap.text", "awx" 111SYM_FUNC_START(cpu_do_resume) 112 ldp x2, x3, [x0] 113 ldp x4, x5, [x0, #16] 114 ldp x6, x8, [x0, #32] 115 ldp x9, x10, [x0, #48] 116 ldp x11, x12, [x0, #64] 117 ldp x13, x14, [x0, #80] 118 /* 119 * Restore x18, as it may be used as a platform register, and clear 120 * the buffer to minimize the risk of exposure when used for shadow 121 * call stack. 122 */ 123 ldr x18, [x0, #96] 124 str xzr, [x0, #96] 125 msr tpidr_el0, x2 126 msr tpidrro_el0, x3 127 msr contextidr_el1, x4 128 msr cpacr_el1, x6 129 130 /* Don't change t0sz here, mask those bits when restoring */ 131 mrs x7, tcr_el1 132 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 133 134 msr tcr_el1, x8 135 msr vbar_el1, x9 136 137 /* 138 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking 139 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug 140 * exception. Mask them until local_daif_restore() in cpu_suspend() 141 * resets them. 142 */ 143 disable_daif 144 msr mdscr_el1, x10 145 146 msr sctlr_el1, x12 147 set_this_cpu_offset x13 148 msr sp_el0, x14 149 /* 150 * Restore oslsr_el1 by writing oslar_el1 151 */ 152 msr osdlr_el1, x5 153 ubfx x11, x11, #1, #1 154 msr oslar_el1, x11 155 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 156 reset_amuserenr_el0 x0 // Disable AMU access from EL0 157 158alternative_if ARM64_HAS_RAS_EXTN 159 msr_s SYS_DISR_EL1, xzr 160alternative_else_nop_endif 161 162 ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 163 isb 164 ret 165SYM_FUNC_END(cpu_do_resume) 166 .popsection 167#endif 168 169 .pushsection ".idmap.text", "awx" 170 171.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 172 adrp \tmp1, reserved_pg_dir 173 phys_to_ttbr \tmp2, \tmp1 174 offset_ttbr1 \tmp2, \tmp1 175 msr ttbr1_el1, \tmp2 176 isb 177 tlbi vmalle1 178 dsb nsh 179 isb 180.endm 181 182/* 183 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) 184 * 185 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be 186 * called by anything else. It can only be executed from a TTBR0 mapping. 187 */ 188SYM_FUNC_START(idmap_cpu_replace_ttbr1) 189 save_and_disable_daif flags=x2 190 191 __idmap_cpu_set_reserved_ttbr1 x1, x3 192 193 offset_ttbr1 x0, x3 194 msr ttbr1_el1, x0 195 isb 196 197 restore_daif x2 198 199 ret 200SYM_FUNC_END(idmap_cpu_replace_ttbr1) 201 .popsection 202 203#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 204 205#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) 206 207 .pushsection ".idmap.text", "awx" 208 209 .macro kpti_mk_tbl_ng, type, num_entries 210 add end_\type\()p, cur_\type\()p, #\num_entries * 8 211.Ldo_\type: 212 ldr \type, [cur_\type\()p] // Load the entry 213 tbz \type, #0, .Lnext_\type // Skip invalid and 214 tbnz \type, #11, .Lnext_\type // non-global entries 215 orr \type, \type, #PTE_NG // Same bit for blocks and pages 216 str \type, [cur_\type\()p] // Update the entry 217 .ifnc \type, pte 218 tbnz \type, #1, .Lderef_\type 219 .endif 220.Lnext_\type: 221 add cur_\type\()p, cur_\type\()p, #8 222 cmp cur_\type\()p, end_\type\()p 223 b.ne .Ldo_\type 224 .endm 225 226 /* 227 * Dereference the current table entry and map it into the temporary 228 * fixmap slot associated with the current level. 229 */ 230 .macro kpti_map_pgtbl, type, level 231 str xzr, [temp_pte, #8 * (\level + 1)] // break before make 232 dsb nshst 233 add pte, temp_pte, #PAGE_SIZE * (\level + 1) 234 lsr pte, pte, #12 235 tlbi vaae1, pte 236 dsb nsh 237 isb 238 239 phys_to_pte pte, cur_\type\()p 240 add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1) 241 orr pte, pte, pte_flags 242 str pte, [temp_pte, #8 * (\level + 1)] 243 dsb nshst 244 .endm 245 246/* 247 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd, 248 * unsigned long temp_pte_va) 249 * 250 * Called exactly once from stop_machine context by each CPU found during boot. 251 */ 252 .pushsection ".data", "aw", %progbits 253SYM_DATA(__idmap_kpti_flag, .long 1) 254 .popsection 255 256SYM_FUNC_START(idmap_kpti_install_ng_mappings) 257 cpu .req w0 258 temp_pte .req x0 259 num_cpus .req w1 260 pte_flags .req x1 261 temp_pgd_phys .req x2 262 swapper_ttb .req x3 263 flag_ptr .req x4 264 cur_pgdp .req x5 265 end_pgdp .req x6 266 pgd .req x7 267 cur_pudp .req x8 268 end_pudp .req x9 269 cur_pmdp .req x11 270 end_pmdp .req x12 271 cur_ptep .req x14 272 end_ptep .req x15 273 pte .req x16 274 valid .req x17 275 276 mov x5, x3 // preserve temp_pte arg 277 mrs swapper_ttb, ttbr1_el1 278 adr_l flag_ptr, __idmap_kpti_flag 279 280 cbnz cpu, __idmap_kpti_secondary 281 282 /* We're the boot CPU. Wait for the others to catch up */ 283 sevl 2841: wfe 285 ldaxr w17, [flag_ptr] 286 eor w17, w17, num_cpus 287 cbnz w17, 1b 288 289 /* Switch to the temporary page tables on this CPU only */ 290 __idmap_cpu_set_reserved_ttbr1 x8, x9 291 offset_ttbr1 temp_pgd_phys, x8 292 msr ttbr1_el1, temp_pgd_phys 293 isb 294 295 mov temp_pte, x5 296 mov pte_flags, #KPTI_NG_PTE_FLAGS 297 298 /* Everybody is enjoying the idmap, so we can rewrite swapper. */ 299 /* PGD */ 300 adrp cur_pgdp, swapper_pg_dir 301 kpti_map_pgtbl pgd, 0 302 kpti_mk_tbl_ng pgd, PTRS_PER_PGD 303 304 /* Ensure all the updated entries are visible to secondary CPUs */ 305 dsb ishst 306 307 /* We're done: fire up swapper_pg_dir again */ 308 __idmap_cpu_set_reserved_ttbr1 x8, x9 309 msr ttbr1_el1, swapper_ttb 310 isb 311 312 /* Set the flag to zero to indicate that we're all done */ 313 str wzr, [flag_ptr] 314 ret 315 316.Lderef_pgd: 317 /* PUD */ 318 .if CONFIG_PGTABLE_LEVELS > 3 319 pud .req x10 320 pte_to_phys cur_pudp, pgd 321 kpti_map_pgtbl pud, 1 322 kpti_mk_tbl_ng pud, PTRS_PER_PUD 323 b .Lnext_pgd 324 .else /* CONFIG_PGTABLE_LEVELS <= 3 */ 325 pud .req pgd 326 .set .Lnext_pud, .Lnext_pgd 327 .endif 328 329.Lderef_pud: 330 /* PMD */ 331 .if CONFIG_PGTABLE_LEVELS > 2 332 pmd .req x13 333 pte_to_phys cur_pmdp, pud 334 kpti_map_pgtbl pmd, 2 335 kpti_mk_tbl_ng pmd, PTRS_PER_PMD 336 b .Lnext_pud 337 .else /* CONFIG_PGTABLE_LEVELS <= 2 */ 338 pmd .req pgd 339 .set .Lnext_pmd, .Lnext_pgd 340 .endif 341 342.Lderef_pmd: 343 /* PTE */ 344 pte_to_phys cur_ptep, pmd 345 kpti_map_pgtbl pte, 3 346 kpti_mk_tbl_ng pte, PTRS_PER_PTE 347 b .Lnext_pmd 348 349 .unreq cpu 350 .unreq temp_pte 351 .unreq num_cpus 352 .unreq pte_flags 353 .unreq temp_pgd_phys 354 .unreq cur_pgdp 355 .unreq end_pgdp 356 .unreq pgd 357 .unreq cur_pudp 358 .unreq end_pudp 359 .unreq pud 360 .unreq cur_pmdp 361 .unreq end_pmdp 362 .unreq pmd 363 .unreq cur_ptep 364 .unreq end_ptep 365 .unreq pte 366 .unreq valid 367 368 /* Secondary CPUs end up here */ 369__idmap_kpti_secondary: 370 /* Uninstall swapper before surgery begins */ 371 __idmap_cpu_set_reserved_ttbr1 x16, x17 372 373 /* Increment the flag to let the boot CPU we're ready */ 3741: ldxr w16, [flag_ptr] 375 add w16, w16, #1 376 stxr w17, w16, [flag_ptr] 377 cbnz w17, 1b 378 379 /* Wait for the boot CPU to finish messing around with swapper */ 380 sevl 3811: wfe 382 ldxr w16, [flag_ptr] 383 cbnz w16, 1b 384 385 /* All done, act like nothing happened */ 386 msr ttbr1_el1, swapper_ttb 387 isb 388 ret 389 390 .unreq swapper_ttb 391 .unreq flag_ptr 392SYM_FUNC_END(idmap_kpti_install_ng_mappings) 393 .popsection 394#endif 395 396/* 397 * __cpu_setup 398 * 399 * Initialise the processor for turning the MMU on. 400 * 401 * Input: 402 * x0 - actual number of VA bits (ignored unless VA_BITS > 48) 403 * Output: 404 * Return in x0 the value of the SCTLR_EL1 register. 405 */ 406 .pushsection ".idmap.text", "awx" 407SYM_FUNC_START(__cpu_setup) 408 tlbi vmalle1 // Invalidate local TLB 409 dsb nsh 410 411 mov x1, #3 << 20 412 msr cpacr_el1, x1 // Enable FP/ASIMD 413 mov x1, #1 << 12 // Reset mdscr_el1 and disable 414 msr mdscr_el1, x1 // access to the DCC from EL0 415 isb // Unmask debug exceptions now, 416 enable_dbg // since this is per-cpu 417 reset_pmuserenr_el0 x1 // Disable PMU access from EL0 418 reset_amuserenr_el0 x1 // Disable AMU access from EL0 419 420 /* 421 * Default values for VMSA control registers. These will be adjusted 422 * below depending on detected CPU features. 423 */ 424 mair .req x17 425 tcr .req x16 426 mov_q mair, MAIR_EL1_SET 427 mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ 428 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ 429 TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS 430 431#ifdef CONFIG_ARM64_MTE 432 /* 433 * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported 434 * (ID_AA64PFR1_EL1[11:8] > 1). 435 */ 436 mrs x10, ID_AA64PFR1_EL1 437 ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 438 cmp x10, #ID_AA64PFR1_MTE 439 b.lt 1f 440 441 /* Normal Tagged memory type at the corresponding MAIR index */ 442 mov x10, #MAIR_ATTR_NORMAL_TAGGED 443 bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8 444 445 mov x10, #KERNEL_GCR_EL1 446 msr_s SYS_GCR_EL1, x10 447 448 /* 449 * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then 450 * RGSR_EL1.SEED must be non-zero for IRG to produce 451 * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we 452 * must initialize it. 453 */ 454 mrs x10, CNTVCT_EL0 455 ands x10, x10, #SYS_RGSR_EL1_SEED_MASK 456 csinc x10, x10, xzr, ne 457 lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT 458 msr_s SYS_RGSR_EL1, x10 459 460 /* clear any pending tag check faults in TFSR*_EL1 */ 461 msr_s SYS_TFSR_EL1, xzr 462 msr_s SYS_TFSRE0_EL1, xzr 463 464 /* set the TCR_EL1 bits */ 465 mov_q x10, TCR_MTE_FLAGS 466 orr tcr, tcr, x10 4671: 468#endif 469 tcr_clear_errata_bits tcr, x9, x5 470 471#ifdef CONFIG_ARM64_VA_BITS_52 472 sub x9, xzr, x0 473 add x9, x9, #64 474 tcr_set_t1sz tcr, x9 475#else 476 idmap_get_t0sz x9 477#endif 478 tcr_set_t0sz tcr, x9 479 480 /* 481 * Set the IPS bits in TCR_EL1. 482 */ 483 tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 484#ifdef CONFIG_ARM64_HW_AFDBM 485 /* 486 * Enable hardware update of the Access Flags bit. 487 * Hardware dirty bit management is enabled later, 488 * via capabilities. 489 */ 490 mrs x9, ID_AA64MMFR1_EL1 491 and x9, x9, #0xf 492 cbz x9, 1f 493 orr tcr, tcr, #TCR_HA // hardware Access flag update 4941: 495#endif /* CONFIG_ARM64_HW_AFDBM */ 496 msr mair_el1, mair 497 msr tcr_el1, tcr 498 /* 499 * Prepare SCTLR 500 */ 501 mov_q x0, INIT_SCTLR_EL1_MMU_ON 502 ret // return to head.S 503 504 .unreq mair 505 .unreq tcr 506SYM_FUNC_END(__cpu_setup) 507