1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Based on arch/arm/mm/proc.S 4 * 5 * Copyright (C) 2001 Deep Blue Solutions Ltd. 6 * Copyright (C) 2012 ARM Ltd. 7 * Author: Catalin Marinas <catalin.marinas@arm.com> 8 */ 9 10#include <linux/init.h> 11#include <linux/linkage.h> 12#include <asm/assembler.h> 13#include <asm/asm-offsets.h> 14#include <asm/asm_pointer_auth.h> 15#include <asm/hwcap.h> 16#include <asm/pgtable.h> 17#include <asm/pgtable-hwdef.h> 18#include <asm/cpufeature.h> 19#include <asm/alternative.h> 20#include <asm/smp.h> 21 22#ifdef CONFIG_ARM64_64K_PAGES 23#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K 24#elif defined(CONFIG_ARM64_16K_PAGES) 25#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K 26#else /* CONFIG_ARM64_4K_PAGES */ 27#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 28#endif 29 30#ifdef CONFIG_RANDOMIZE_BASE 31#define TCR_KASLR_FLAGS TCR_NFD1 32#else 33#define TCR_KASLR_FLAGS 0 34#endif 35 36#define TCR_SMP_FLAGS TCR_SHARED 37 38/* PTWs cacheable, inner/outer WBWA */ 39#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 40 41#ifdef CONFIG_KASAN_SW_TAGS 42#define TCR_KASAN_FLAGS TCR_TBI1 43#else 44#define TCR_KASAN_FLAGS 0 45#endif 46 47/* Default MAIR_EL1 */ 48#define MAIR_EL1_SET \ 49 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 50 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 51 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ 52 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 53 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 54 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) 55 56#ifdef CONFIG_CPU_PM 57/** 58 * cpu_do_suspend - save CPU registers context 59 * 60 * x0: virtual address of context pointer 61 */ 62SYM_FUNC_START(cpu_do_suspend) 63 mrs x2, tpidr_el0 64 mrs x3, tpidrro_el0 65 mrs x4, contextidr_el1 66 mrs x5, osdlr_el1 67 mrs x6, cpacr_el1 68 mrs x7, tcr_el1 69 mrs x8, vbar_el1 70 mrs x9, mdscr_el1 71 mrs x10, oslsr_el1 72 mrs x11, sctlr_el1 73alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 74 mrs x12, tpidr_el1 75alternative_else 76 mrs x12, tpidr_el2 77alternative_endif 78 mrs x13, sp_el0 79 stp x2, x3, [x0] 80 stp x4, x5, [x0, #16] 81 stp x6, x7, [x0, #32] 82 stp x8, x9, [x0, #48] 83 stp x10, x11, [x0, #64] 84 stp x12, x13, [x0, #80] 85 ret 86SYM_FUNC_END(cpu_do_suspend) 87 88/** 89 * cpu_do_resume - restore CPU register context 90 * 91 * x0: Address of context pointer 92 */ 93 .pushsection ".idmap.text", "awx" 94SYM_FUNC_START(cpu_do_resume) 95 ldp x2, x3, [x0] 96 ldp x4, x5, [x0, #16] 97 ldp x6, x8, [x0, #32] 98 ldp x9, x10, [x0, #48] 99 ldp x11, x12, [x0, #64] 100 ldp x13, x14, [x0, #80] 101 msr tpidr_el0, x2 102 msr tpidrro_el0, x3 103 msr contextidr_el1, x4 104 msr cpacr_el1, x6 105 106 /* Don't change t0sz here, mask those bits when restoring */ 107 mrs x7, tcr_el1 108 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 109 110 msr tcr_el1, x8 111 msr vbar_el1, x9 112 113 /* 114 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking 115 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug 116 * exception. Mask them until local_daif_restore() in cpu_suspend() 117 * resets them. 118 */ 119 disable_daif 120 msr mdscr_el1, x10 121 122 msr sctlr_el1, x12 123alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 124 msr tpidr_el1, x13 125alternative_else 126 msr tpidr_el2, x13 127alternative_endif 128 msr sp_el0, x14 129 /* 130 * Restore oslsr_el1 by writing oslar_el1 131 */ 132 msr osdlr_el1, x5 133 ubfx x11, x11, #1, #1 134 msr oslar_el1, x11 135 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 136 reset_amuserenr_el0 x0 // Disable AMU access from EL0 137 138alternative_if ARM64_HAS_RAS_EXTN 139 msr_s SYS_DISR_EL1, xzr 140alternative_else_nop_endif 141 142 ptrauth_keys_install_kernel x14, 0, x1, x2, x3 143 isb 144 ret 145SYM_FUNC_END(cpu_do_resume) 146 .popsection 147#endif 148 149 .pushsection ".idmap.text", "awx" 150 151.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 152 adrp \tmp1, empty_zero_page 153 phys_to_ttbr \tmp2, \tmp1 154 offset_ttbr1 \tmp2, \tmp1 155 msr ttbr1_el1, \tmp2 156 isb 157 tlbi vmalle1 158 dsb nsh 159 isb 160.endm 161 162/* 163 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) 164 * 165 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be 166 * called by anything else. It can only be executed from a TTBR0 mapping. 167 */ 168SYM_FUNC_START(idmap_cpu_replace_ttbr1) 169 save_and_disable_daif flags=x2 170 171 __idmap_cpu_set_reserved_ttbr1 x1, x3 172 173 offset_ttbr1 x0, x3 174 msr ttbr1_el1, x0 175 isb 176 177 restore_daif x2 178 179 ret 180SYM_FUNC_END(idmap_cpu_replace_ttbr1) 181 .popsection 182 183#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 184 .pushsection ".idmap.text", "awx" 185 186 .macro __idmap_kpti_get_pgtable_ent, type 187 dc cvac, cur_\()\type\()p // Ensure any existing dirty 188 dmb sy // lines are written back before 189 ldr \type, [cur_\()\type\()p] // loading the entry 190 tbz \type, #0, skip_\()\type // Skip invalid and 191 tbnz \type, #11, skip_\()\type // non-global entries 192 .endm 193 194 .macro __idmap_kpti_put_pgtable_ent_ng, type 195 orr \type, \type, #PTE_NG // Same bit for blocks and pages 196 str \type, [cur_\()\type\()p] // Update the entry and ensure 197 dmb sy // that it is visible to all 198 dc civac, cur_\()\type\()p // CPUs. 199 .endm 200 201/* 202 * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) 203 * 204 * Called exactly once from stop_machine context by each CPU found during boot. 205 */ 206__idmap_kpti_flag: 207 .long 1 208SYM_FUNC_START(idmap_kpti_install_ng_mappings) 209 cpu .req w0 210 num_cpus .req w1 211 swapper_pa .req x2 212 swapper_ttb .req x3 213 flag_ptr .req x4 214 cur_pgdp .req x5 215 end_pgdp .req x6 216 pgd .req x7 217 cur_pudp .req x8 218 end_pudp .req x9 219 pud .req x10 220 cur_pmdp .req x11 221 end_pmdp .req x12 222 pmd .req x13 223 cur_ptep .req x14 224 end_ptep .req x15 225 pte .req x16 226 227 mrs swapper_ttb, ttbr1_el1 228 restore_ttbr1 swapper_ttb 229 adr flag_ptr, __idmap_kpti_flag 230 231 cbnz cpu, __idmap_kpti_secondary 232 233 /* We're the boot CPU. Wait for the others to catch up */ 234 sevl 2351: wfe 236 ldaxr w17, [flag_ptr] 237 eor w17, w17, num_cpus 238 cbnz w17, 1b 239 240 /* We need to walk swapper, so turn off the MMU. */ 241 pre_disable_mmu_workaround 242 mrs x17, sctlr_el1 243 bic x17, x17, #SCTLR_ELx_M 244 msr sctlr_el1, x17 245 isb 246 247 /* Everybody is enjoying the idmap, so we can rewrite swapper. */ 248 /* PGD */ 249 mov cur_pgdp, swapper_pa 250 add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) 251do_pgd: __idmap_kpti_get_pgtable_ent pgd 252 tbnz pgd, #1, walk_puds 253next_pgd: 254 __idmap_kpti_put_pgtable_ent_ng pgd 255skip_pgd: 256 add cur_pgdp, cur_pgdp, #8 257 cmp cur_pgdp, end_pgdp 258 b.ne do_pgd 259 260 /* Publish the updated tables and nuke all the TLBs */ 261 dsb sy 262 tlbi vmalle1is 263 dsb ish 264 isb 265 266 /* We're done: fire up the MMU again */ 267 mrs x17, sctlr_el1 268 orr x17, x17, #SCTLR_ELx_M 269 msr sctlr_el1, x17 270 isb 271 272 /* 273 * Invalidate the local I-cache so that any instructions fetched 274 * speculatively from the PoC are discarded, since they may have 275 * been dynamically patched at the PoU. 276 */ 277 ic iallu 278 dsb nsh 279 isb 280 281 /* Set the flag to zero to indicate that we're all done */ 282 str wzr, [flag_ptr] 283 ret 284 285 /* PUD */ 286walk_puds: 287 .if CONFIG_PGTABLE_LEVELS > 3 288 pte_to_phys cur_pudp, pgd 289 add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) 290do_pud: __idmap_kpti_get_pgtable_ent pud 291 tbnz pud, #1, walk_pmds 292next_pud: 293 __idmap_kpti_put_pgtable_ent_ng pud 294skip_pud: 295 add cur_pudp, cur_pudp, 8 296 cmp cur_pudp, end_pudp 297 b.ne do_pud 298 b next_pgd 299 .else /* CONFIG_PGTABLE_LEVELS <= 3 */ 300 mov pud, pgd 301 b walk_pmds 302next_pud: 303 b next_pgd 304 .endif 305 306 /* PMD */ 307walk_pmds: 308 .if CONFIG_PGTABLE_LEVELS > 2 309 pte_to_phys cur_pmdp, pud 310 add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) 311do_pmd: __idmap_kpti_get_pgtable_ent pmd 312 tbnz pmd, #1, walk_ptes 313next_pmd: 314 __idmap_kpti_put_pgtable_ent_ng pmd 315skip_pmd: 316 add cur_pmdp, cur_pmdp, #8 317 cmp cur_pmdp, end_pmdp 318 b.ne do_pmd 319 b next_pud 320 .else /* CONFIG_PGTABLE_LEVELS <= 2 */ 321 mov pmd, pud 322 b walk_ptes 323next_pmd: 324 b next_pud 325 .endif 326 327 /* PTE */ 328walk_ptes: 329 pte_to_phys cur_ptep, pmd 330 add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) 331do_pte: __idmap_kpti_get_pgtable_ent pte 332 __idmap_kpti_put_pgtable_ent_ng pte 333skip_pte: 334 add cur_ptep, cur_ptep, #8 335 cmp cur_ptep, end_ptep 336 b.ne do_pte 337 b next_pmd 338 339 .unreq cpu 340 .unreq num_cpus 341 .unreq swapper_pa 342 .unreq cur_pgdp 343 .unreq end_pgdp 344 .unreq pgd 345 .unreq cur_pudp 346 .unreq end_pudp 347 .unreq pud 348 .unreq cur_pmdp 349 .unreq end_pmdp 350 .unreq pmd 351 .unreq cur_ptep 352 .unreq end_ptep 353 .unreq pte 354 355 /* Secondary CPUs end up here */ 356__idmap_kpti_secondary: 357 /* Uninstall swapper before surgery begins */ 358 __idmap_cpu_set_reserved_ttbr1 x16, x17 359 360 /* Increment the flag to let the boot CPU we're ready */ 3611: ldxr w16, [flag_ptr] 362 add w16, w16, #1 363 stxr w17, w16, [flag_ptr] 364 cbnz w17, 1b 365 366 /* Wait for the boot CPU to finish messing around with swapper */ 367 sevl 3681: wfe 369 ldxr w16, [flag_ptr] 370 cbnz w16, 1b 371 372 /* All done, act like nothing happened */ 373 offset_ttbr1 swapper_ttb, x16 374 msr ttbr1_el1, swapper_ttb 375 isb 376 ret 377 378 .unreq swapper_ttb 379 .unreq flag_ptr 380SYM_FUNC_END(idmap_kpti_install_ng_mappings) 381 .popsection 382#endif 383 384/* 385 * __cpu_setup 386 * 387 * Initialise the processor for turning the MMU on. 388 * 389 * Input: 390 * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME. 391 * Output: 392 * Return in x0 the value of the SCTLR_EL1 register. 393 */ 394 .pushsection ".idmap.text", "awx" 395SYM_FUNC_START(__cpu_setup) 396 tlbi vmalle1 // Invalidate local TLB 397 dsb nsh 398 399 mov x1, #3 << 20 400 msr cpacr_el1, x1 // Enable FP/ASIMD 401 mov x1, #1 << 12 // Reset mdscr_el1 and disable 402 msr mdscr_el1, x1 // access to the DCC from EL0 403 isb // Unmask debug exceptions now, 404 enable_dbg // since this is per-cpu 405 reset_pmuserenr_el0 x1 // Disable PMU access from EL0 406 reset_amuserenr_el0 x1 // Disable AMU access from EL0 407 408 /* 409 * Memory region attributes 410 */ 411 mov_q x5, MAIR_EL1_SET 412 msr mair_el1, x5 413 /* 414 * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for 415 * both user and kernel. 416 */ 417 mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ 418 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ 419 TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS 420 tcr_clear_errata_bits x10, x9, x5 421 422#ifdef CONFIG_ARM64_VA_BITS_52 423 ldr_l x9, vabits_actual 424 sub x9, xzr, x9 425 add x9, x9, #64 426 tcr_set_t1sz x10, x9 427#else 428 ldr_l x9, idmap_t0sz 429#endif 430 tcr_set_t0sz x10, x9 431 432 /* 433 * Set the IPS bits in TCR_EL1. 434 */ 435 tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 436#ifdef CONFIG_ARM64_HW_AFDBM 437 /* 438 * Enable hardware update of the Access Flags bit. 439 * Hardware dirty bit management is enabled later, 440 * via capabilities. 441 */ 442 mrs x9, ID_AA64MMFR1_EL1 443 and x9, x9, #0xf 444 cbz x9, 1f 445 orr x10, x10, #TCR_HA // hardware Access flag update 4461: 447#endif /* CONFIG_ARM64_HW_AFDBM */ 448 msr tcr_el1, x10 449 mov x1, x0 450 /* 451 * Prepare SCTLR 452 */ 453 mov_q x0, SCTLR_EL1_SET 454 455#ifdef CONFIG_ARM64_PTR_AUTH 456 /* No ptrauth setup for run time cpus */ 457 cmp x1, #ARM64_CPU_RUNTIME 458 b.eq 3f 459 460 /* Check if the CPU supports ptrauth */ 461 mrs x2, id_aa64isar1_el1 462 ubfx x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8 463 cbz x2, 3f 464 465 /* 466 * The primary cpu keys are reset here and can be 467 * re-initialised with some proper values later. 468 */ 469 msr_s SYS_APIAKEYLO_EL1, xzr 470 msr_s SYS_APIAKEYHI_EL1, xzr 471 472 /* Just enable ptrauth for primary cpu */ 473 cmp x1, #ARM64_CPU_BOOT_PRIMARY 474 b.eq 2f 475 476 /* if !system_supports_address_auth() then skip enable */ 477alternative_if_not ARM64_HAS_ADDRESS_AUTH 478 b 3f 479alternative_else_nop_endif 480 481 /* Install ptrauth key for secondary cpus */ 482 adr_l x2, secondary_data 483 ldr x3, [x2, #CPU_BOOT_TASK] // get secondary_data.task 484 cbz x3, 2f // check for slow booting cpus 485 ldp x3, x4, [x2, #CPU_BOOT_PTRAUTH_KEY] 486 msr_s SYS_APIAKEYLO_EL1, x3 487 msr_s SYS_APIAKEYHI_EL1, x4 488 4892: /* Enable ptrauth instructions */ 490 ldr x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ 491 SCTLR_ELx_ENDA | SCTLR_ELx_ENDB 492 orr x0, x0, x2 4933: 494#endif 495 ret // return to head.S 496SYM_FUNC_END(__cpu_setup) 497