1 /* 2 * OMAP4 SMP source file. It contains platform specific functions 3 * needed for the linux smp kernel. 4 * 5 * Copyright (C) 2009 Texas Instruments, Inc. 6 * 7 * Author: 8 * Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * 10 * Platform file needed for the OMAP4 SMP. This file is based on arm 11 * realview smp platform. 12 * * Copyright (c) 2002 ARM Limited. 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 #include <linux/init.h> 19 #include <linux/device.h> 20 #include <linux/smp.h> 21 #include <linux/io.h> 22 #include <linux/irqchip/arm-gic.h> 23 24 #include <asm/sections.h> 25 #include <asm/smp_scu.h> 26 #include <asm/virt.h> 27 28 #include "omap-secure.h" 29 #include "omap-wakeupgen.h" 30 #include <asm/cputype.h> 31 32 #include "soc.h" 33 #include "iomap.h" 34 #include "common.h" 35 #include "clockdomain.h" 36 #include "pm.h" 37 38 #define CPU_MASK 0xff0ffff0 39 #define CPU_CORTEX_A9 0x410FC090 40 #define CPU_CORTEX_A15 0x410FC0F0 41 42 #define OMAP5_CORE_COUNT 0x2 43 44 #define AUX_CORE_BOOT0_GP_RELEASE 0x020 45 #define AUX_CORE_BOOT0_HS_RELEASE 0x200 46 47 struct omap_smp_config { 48 unsigned long cpu1_rstctrl_pa; 49 void __iomem *cpu1_rstctrl_va; 50 void __iomem *scu_base; 51 void __iomem *wakeupgen_base; 52 void *startup_addr; 53 }; 54 55 static struct omap_smp_config cfg; 56 57 static const struct omap_smp_config omap443x_cfg __initconst = { 58 .cpu1_rstctrl_pa = 0x4824380c, 59 .startup_addr = omap4_secondary_startup, 60 }; 61 62 static const struct omap_smp_config omap446x_cfg __initconst = { 63 .cpu1_rstctrl_pa = 0x4824380c, 64 .startup_addr = omap4460_secondary_startup, 65 }; 66 67 static const struct omap_smp_config omap5_cfg __initconst = { 68 .cpu1_rstctrl_pa = 0x48243810, 69 .startup_addr = omap5_secondary_startup, 70 }; 71 72 static DEFINE_SPINLOCK(boot_lock); 73 74 void __iomem *omap4_get_scu_base(void) 75 { 76 return cfg.scu_base; 77 } 78 79 #ifdef CONFIG_OMAP5_ERRATA_801819 80 void omap5_erratum_workaround_801819(void) 81 { 82 u32 acr, revidr; 83 u32 acr_mask; 84 85 /* REVIDR[3] indicates erratum fix available on silicon */ 86 asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr)); 87 if (revidr & (0x1 << 3)) 88 return; 89 90 asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); 91 /* 92 * BIT(27) - Disables streaming. All write-allocate lines allocate in 93 * the L1 or L2 cache. 94 * BIT(25) - Disables streaming. All write-allocate lines allocate in 95 * the L1 cache. 96 */ 97 acr_mask = (0x3 << 25) | (0x3 << 27); 98 /* do we already have it done.. if yes, skip expensive smc */ 99 if ((acr & acr_mask) == acr_mask) 100 return; 101 102 acr |= acr_mask; 103 omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); 104 105 pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n", 106 __func__, smp_processor_id()); 107 } 108 #else 109 static inline void omap5_erratum_workaround_801819(void) { } 110 #endif 111 112 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 113 /* 114 * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with 115 * ICIALLU) to activate the workaround for secondary Core. 116 * NOTE: it is assumed that the primary core's configuration is done 117 * by the boot loader (kernel will detect a misconfiguration and complain 118 * if this is not done). 119 * 120 * In General Purpose(GP) devices, ACR bit settings can only be done 121 * by ROM code in "secure world" using the smc call and there is no 122 * option to update the "firmware" on such devices. This also works for 123 * High security(HS) devices, as a backup option in case the 124 * "update" is not done in the "security firmware". 125 */ 126 static void omap5_secondary_harden_predictor(void) 127 { 128 u32 acr, acr_mask; 129 130 asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); 131 132 /* 133 * ACTLR[0] (Enable invalidates of BTB with ICIALLU) 134 */ 135 acr_mask = BIT(0); 136 137 /* Do we already have it done.. if yes, skip expensive smc */ 138 if ((acr & acr_mask) == acr_mask) 139 return; 140 141 acr |= acr_mask; 142 omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); 143 144 pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n", 145 __func__, smp_processor_id()); 146 } 147 #else 148 static inline void omap5_secondary_harden_predictor(void) { } 149 #endif 150 151 static void omap4_secondary_init(unsigned int cpu) 152 { 153 /* 154 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. 155 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA 156 * init and for CPU1, a secure PPA API provided. CPU0 must be ON 157 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+. 158 * OMAP443X GP devices- SMP bit isn't accessible. 159 * OMAP446X GP devices - SMP bit access is enabled on both CPUs. 160 */ 161 if (soc_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) 162 omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, 163 4, 0, 0, 0, 0, 0); 164 165 if (soc_is_omap54xx() || soc_is_dra7xx()) { 166 /* 167 * Configure the CNTFRQ register for the secondary cpu's which 168 * indicates the frequency of the cpu local timers. 169 */ 170 set_cntfreq(); 171 /* Configure ACR to disable streaming WA for 801819 */ 172 omap5_erratum_workaround_801819(); 173 /* Enable ACR to allow for ICUALLU workaround */ 174 omap5_secondary_harden_predictor(); 175 } 176 177 /* 178 * Synchronise with the boot thread. 179 */ 180 spin_lock(&boot_lock); 181 spin_unlock(&boot_lock); 182 } 183 184 static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) 185 { 186 static struct clockdomain *cpu1_clkdm; 187 static bool booted; 188 static struct powerdomain *cpu1_pwrdm; 189 190 /* 191 * Set synchronisation state between this boot processor 192 * and the secondary one 193 */ 194 spin_lock(&boot_lock); 195 196 /* 197 * Update the AuxCoreBoot0 with boot state for secondary core. 198 * omap4_secondary_startup() routine will hold the secondary core till 199 * the AuxCoreBoot1 register is updated with cpu state 200 * A barrier is added to ensure that write buffer is drained 201 */ 202 if (omap_secure_apis_support()) 203 omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE, 204 0xfffffdff); 205 else 206 writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE, 207 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 208 209 if (!cpu1_clkdm && !cpu1_pwrdm) { 210 cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); 211 cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm"); 212 } 213 214 /* 215 * The SGI(Software Generated Interrupts) are not wakeup capable 216 * from low power states. This is known limitation on OMAP4 and 217 * needs to be worked around by using software forced clockdomain 218 * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to 219 * software force wakeup. The clockdomain is then put back to 220 * hardware supervised mode. 221 * More details can be found in OMAP4430 TRM - Version J 222 * Section : 223 * 4.3.4.2 Power States of CPU0 and CPU1 224 */ 225 if (booted && cpu1_pwrdm && cpu1_clkdm) { 226 /* 227 * GIC distributor control register has changed between 228 * CortexA9 r1pX and r2pX. The Control Register secure 229 * banked version is now composed of 2 bits: 230 * bit 0 == Secure Enable 231 * bit 1 == Non-Secure Enable 232 * The Non-Secure banked register has not changed 233 * Because the ROM Code is based on the r1pX GIC, the CPU1 234 * GIC restoration will cause a problem to CPU0 Non-Secure SW. 235 * The workaround must be: 236 * 1) Before doing the CPU1 wakeup, CPU0 must disable 237 * the GIC distributor 238 * 2) CPU1 must re-enable the GIC distributor on 239 * it's wakeup path. 240 */ 241 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { 242 local_irq_disable(); 243 gic_dist_disable(); 244 } 245 246 /* 247 * Ensure that CPU power state is set to ON to avoid CPU 248 * powerdomain transition on wfi 249 */ 250 clkdm_deny_idle_nolock(cpu1_clkdm); 251 pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON); 252 clkdm_allow_idle_nolock(cpu1_clkdm); 253 254 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { 255 while (gic_dist_disabled()) { 256 udelay(1); 257 cpu_relax(); 258 } 259 gic_timer_retrigger(); 260 local_irq_enable(); 261 } 262 } else { 263 dsb_sev(); 264 booted = true; 265 } 266 267 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 268 269 /* 270 * Now the secondary core is starting up let it run its 271 * calibrations, then wait for it to finish 272 */ 273 spin_unlock(&boot_lock); 274 275 return 0; 276 } 277 278 /* 279 * Initialise the CPU possible map early - this describes the CPUs 280 * which may be present or become present in the system. 281 */ 282 static void __init omap4_smp_init_cpus(void) 283 { 284 unsigned int i = 0, ncores = 1, cpu_id; 285 286 /* Use ARM cpuid check here, as SoC detection will not work so early */ 287 cpu_id = read_cpuid_id() & CPU_MASK; 288 if (cpu_id == CPU_CORTEX_A9) { 289 /* 290 * Currently we can't call ioremap here because 291 * SoC detection won't work until after init_early. 292 */ 293 cfg.scu_base = OMAP2_L4_IO_ADDRESS(scu_a9_get_base()); 294 BUG_ON(!cfg.scu_base); 295 ncores = scu_get_core_count(cfg.scu_base); 296 } else if (cpu_id == CPU_CORTEX_A15) { 297 ncores = OMAP5_CORE_COUNT; 298 } 299 300 /* sanity check */ 301 if (ncores > nr_cpu_ids) { 302 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", 303 ncores, nr_cpu_ids); 304 ncores = nr_cpu_ids; 305 } 306 307 for (i = 0; i < ncores; i++) 308 set_cpu_possible(i, true); 309 } 310 311 /* 312 * For now, just make sure the start-up address is not within the booting 313 * kernel space as that means we just overwrote whatever secondary_startup() 314 * code there was. 315 */ 316 static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr) 317 { 318 if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start))) 319 return false; 320 321 return true; 322 } 323 324 /* 325 * We may need to reset CPU1 before configuring, otherwise kexec boot can end 326 * up trying to use old kernel startup address or suspend-resume will 327 * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper 328 * idle states. 329 */ 330 static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) 331 { 332 unsigned long cpu1_startup_pa, cpu1_ns_pa_addr; 333 bool needs_reset = false; 334 u32 released; 335 336 if (omap_secure_apis_support()) 337 released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE; 338 else 339 released = readl_relaxed(cfg.wakeupgen_base + 340 OMAP_AUX_CORE_BOOT_0) & 341 AUX_CORE_BOOT0_GP_RELEASE; 342 if (released) { 343 pr_warn("smp: CPU1 not parked?\n"); 344 345 return; 346 } 347 348 cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + 349 OMAP_AUX_CORE_BOOT_1); 350 351 /* Did the configured secondary_startup() get overwritten? */ 352 if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) 353 needs_reset = true; 354 355 /* 356 * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a 357 * deeper idle state in WFI and will wake to an invalid address. 358 */ 359 if ((soc_is_omap44xx() || soc_is_omap54xx())) { 360 cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr(); 361 if (!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) 362 needs_reset = true; 363 } else { 364 cpu1_ns_pa_addr = 0; 365 } 366 367 if (!needs_reset || !c->cpu1_rstctrl_va) 368 return; 369 370 pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n", 371 cpu1_startup_pa, cpu1_ns_pa_addr); 372 373 writel_relaxed(1, c->cpu1_rstctrl_va); 374 readl_relaxed(c->cpu1_rstctrl_va); 375 writel_relaxed(0, c->cpu1_rstctrl_va); 376 } 377 378 static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) 379 { 380 const struct omap_smp_config *c = NULL; 381 382 if (soc_is_omap443x()) 383 c = &omap443x_cfg; 384 else if (soc_is_omap446x()) 385 c = &omap446x_cfg; 386 else if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) 387 c = &omap5_cfg; 388 389 if (!c) { 390 pr_err("%s Unknown SMP SoC?\n", __func__); 391 return; 392 } 393 394 /* Must preserve cfg.scu_base set earlier */ 395 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; 396 cfg.startup_addr = c->startup_addr; 397 cfg.wakeupgen_base = omap_get_wakeupgen_base(); 398 399 if (soc_is_dra74x() || soc_is_omap54xx() || soc_is_dra76x()) { 400 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 401 cfg.startup_addr = omap5_secondary_hyp_startup; 402 omap5_erratum_workaround_801819(); 403 } 404 405 cfg.cpu1_rstctrl_va = ioremap(cfg.cpu1_rstctrl_pa, 4); 406 if (!cfg.cpu1_rstctrl_va) 407 return; 408 409 /* 410 * Initialise the SCU and wake up the secondary core using 411 * wakeup_secondary(). 412 */ 413 if (cfg.scu_base) 414 scu_enable(cfg.scu_base); 415 416 omap4_smp_maybe_reset_cpu1(&cfg); 417 418 /* 419 * Write the address of secondary startup routine into the 420 * AuxCoreBoot1 where ROM code will jump and start executing 421 * on secondary core once out of WFE 422 * A barrier is added to ensure that write buffer is drained 423 */ 424 if (omap_secure_apis_support()) 425 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); 426 else 427 writel_relaxed(__pa_symbol(cfg.startup_addr), 428 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1); 429 } 430 431 const struct smp_operations omap4_smp_ops __initconst = { 432 .smp_init_cpus = omap4_smp_init_cpus, 433 .smp_prepare_cpus = omap4_smp_prepare_cpus, 434 .smp_secondary_init = omap4_secondary_init, 435 .smp_boot_secondary = omap4_boot_secondary, 436 #ifdef CONFIG_HOTPLUG_CPU 437 .cpu_die = omap4_cpu_die, 438 .cpu_kill = omap4_cpu_kill, 439 #endif 440 }; 441