1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. 3 // http://www.samsung.com 4 // 5 // Cloned from linux/arch/arm/mach-vexpress/platsmp.c 6 // 7 // Copyright (C) 2002 ARM Ltd. 8 // All Rights Reserved 9 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/delay.h> 13 #include <linux/jiffies.h> 14 #include <linux/smp.h> 15 #include <linux/io.h> 16 #include <linux/of_address.h> 17 #include <linux/soc/samsung/exynos-regs-pmu.h> 18 19 #include <asm/cacheflush.h> 20 #include <asm/cp15.h> 21 #include <asm/smp_plat.h> 22 #include <asm/smp_scu.h> 23 #include <asm/firmware.h> 24 25 #include <mach/map.h> 26 27 #include "common.h" 28 29 extern void exynos4_secondary_startup(void); 30 31 /* XXX exynos_pen_release is cargo culted code - DO NOT COPY XXX */ 32 volatile int exynos_pen_release = -1; 33 34 #ifdef CONFIG_HOTPLUG_CPU 35 static inline void cpu_leave_lowpower(u32 core_id) 36 { 37 unsigned int v; 38 39 asm volatile( 40 "mrc p15, 0, %0, c1, c0, 0\n" 41 " orr %0, %0, %1\n" 42 " mcr p15, 0, %0, c1, c0, 0\n" 43 " mrc p15, 0, %0, c1, c0, 1\n" 44 " orr %0, %0, %2\n" 45 " mcr p15, 0, %0, c1, c0, 1\n" 46 : "=&r" (v) 47 : "Ir" (CR_C), "Ir" (0x40) 48 : "cc"); 49 } 50 51 static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 52 { 53 u32 mpidr = cpu_logical_map(cpu); 54 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 55 56 for (;;) { 57 58 /* Turn the CPU off on next WFI instruction. */ 59 exynos_cpu_power_down(core_id); 60 61 wfi(); 62 63 if (exynos_pen_release == core_id) { 64 /* 65 * OK, proper wakeup, we're done 66 */ 67 break; 68 } 69 70 /* 71 * Getting here, means that we have come out of WFI without 72 * having been woken up - this shouldn't happen 73 * 74 * Just note it happening - when we're woken, we can report 75 * its occurrence. 76 */ 77 (*spurious)++; 78 } 79 } 80 #endif /* CONFIG_HOTPLUG_CPU */ 81 82 /** 83 * exynos_core_power_down : power down the specified cpu 84 * @cpu : the cpu to power down 85 * 86 * Power down the specified cpu. The sequence must be finished by a 87 * call to cpu_do_idle() 88 * 89 */ 90 void exynos_cpu_power_down(int cpu) 91 { 92 u32 core_conf; 93 94 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { 95 /* 96 * Bypass power down for CPU0 during suspend. Check for 97 * the SYS_PWR_REG value to decide if we are suspending 98 * the system. 99 */ 100 int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG); 101 102 if (!(val & S5P_CORE_LOCAL_PWR_EN)) 103 return; 104 } 105 106 core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 107 core_conf &= ~S5P_CORE_LOCAL_PWR_EN; 108 pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 109 } 110 111 /** 112 * exynos_cpu_power_up : power up the specified cpu 113 * @cpu : the cpu to power up 114 * 115 * Power up the specified cpu 116 */ 117 void exynos_cpu_power_up(int cpu) 118 { 119 u32 core_conf = S5P_CORE_LOCAL_PWR_EN; 120 121 if (soc_is_exynos3250()) 122 core_conf |= S5P_CORE_AUTOWAKEUP_EN; 123 124 pmu_raw_writel(core_conf, 125 EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 126 } 127 128 /** 129 * exynos_cpu_power_state : returns the power state of the cpu 130 * @cpu : the cpu to retrieve the power state from 131 * 132 */ 133 int exynos_cpu_power_state(int cpu) 134 { 135 return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) & 136 S5P_CORE_LOCAL_PWR_EN); 137 } 138 139 /** 140 * exynos_cluster_power_down : power down the specified cluster 141 * @cluster : the cluster to power down 142 */ 143 void exynos_cluster_power_down(int cluster) 144 { 145 pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); 146 } 147 148 /** 149 * exynos_cluster_power_up : power up the specified cluster 150 * @cluster : the cluster to power up 151 */ 152 void exynos_cluster_power_up(int cluster) 153 { 154 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, 155 EXYNOS_COMMON_CONFIGURATION(cluster)); 156 } 157 158 /** 159 * exynos_cluster_power_state : returns the power state of the cluster 160 * @cluster : the cluster to retrieve the power state from 161 * 162 */ 163 int exynos_cluster_power_state(int cluster) 164 { 165 return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) & 166 S5P_CORE_LOCAL_PWR_EN); 167 } 168 169 /** 170 * exynos_scu_enable : enables SCU for Cortex-A9 based system 171 */ 172 void exynos_scu_enable(void) 173 { 174 struct device_node *np; 175 static void __iomem *scu_base; 176 177 if (!scu_base) { 178 np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); 179 if (np) { 180 scu_base = of_iomap(np, 0); 181 of_node_put(np); 182 } else { 183 scu_base = ioremap(scu_a9_get_base(), SZ_4K); 184 } 185 } 186 scu_enable(scu_base); 187 } 188 189 static void __iomem *cpu_boot_reg_base(void) 190 { 191 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1) 192 return pmu_base_addr + S5P_INFORM5; 193 return sysram_base_addr; 194 } 195 196 static inline void __iomem *cpu_boot_reg(int cpu) 197 { 198 void __iomem *boot_reg; 199 200 boot_reg = cpu_boot_reg_base(); 201 if (!boot_reg) 202 return IOMEM_ERR_PTR(-ENODEV); 203 if (soc_is_exynos4412()) 204 boot_reg += 4*cpu; 205 else if (soc_is_exynos5420() || soc_is_exynos5800()) 206 boot_reg += 4; 207 return boot_reg; 208 } 209 210 /* 211 * Set wake up by local power mode and execute software reset for given core. 212 * 213 * Currently this is needed only when booting secondary CPU on Exynos3250. 214 */ 215 void exynos_core_restart(u32 core_id) 216 { 217 unsigned int timeout = 16; 218 u32 val; 219 220 if (!of_machine_is_compatible("samsung,exynos3250")) 221 return; 222 223 while (timeout && !pmu_raw_readl(S5P_PMU_SPARE2)) { 224 timeout--; 225 udelay(10); 226 } 227 if (timeout == 0) { 228 pr_err("cpu core %u restart failed\n", core_id); 229 return; 230 } 231 udelay(10); 232 233 val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id)); 234 val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG; 235 pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id)); 236 237 pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET); 238 } 239 240 /* 241 * XXX CARGO CULTED CODE - DO NOT COPY XXX 242 * 243 * Write exynos_pen_release in a way that is guaranteed to be visible to 244 * all observers, irrespective of whether they're taking part in coherency 245 * or not. This is necessary for the hotplug code to work reliably. 246 */ 247 static void exynos_write_pen_release(int val) 248 { 249 exynos_pen_release = val; 250 smp_wmb(); 251 sync_cache_w(&exynos_pen_release); 252 } 253 254 static DEFINE_SPINLOCK(boot_lock); 255 256 static void exynos_secondary_init(unsigned int cpu) 257 { 258 /* 259 * let the primary processor know we're out of the 260 * pen, then head off into the C entry point 261 */ 262 exynos_write_pen_release(-1); 263 264 /* 265 * Synchronise with the boot thread. 266 */ 267 spin_lock(&boot_lock); 268 spin_unlock(&boot_lock); 269 } 270 271 int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) 272 { 273 int ret; 274 275 /* 276 * Try to set boot address using firmware first 277 * and fall back to boot register if it fails. 278 */ 279 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); 280 if (ret && ret != -ENOSYS) 281 goto fail; 282 if (ret == -ENOSYS) { 283 void __iomem *boot_reg = cpu_boot_reg(core_id); 284 285 if (IS_ERR(boot_reg)) { 286 ret = PTR_ERR(boot_reg); 287 goto fail; 288 } 289 writel_relaxed(boot_addr, boot_reg); 290 ret = 0; 291 } 292 fail: 293 return ret; 294 } 295 296 int exynos_get_boot_addr(u32 core_id, unsigned long *boot_addr) 297 { 298 int ret; 299 300 /* 301 * Try to get boot address using firmware first 302 * and fall back to boot register if it fails. 303 */ 304 ret = call_firmware_op(get_cpu_boot_addr, core_id, boot_addr); 305 if (ret && ret != -ENOSYS) 306 goto fail; 307 if (ret == -ENOSYS) { 308 void __iomem *boot_reg = cpu_boot_reg(core_id); 309 310 if (IS_ERR(boot_reg)) { 311 ret = PTR_ERR(boot_reg); 312 goto fail; 313 } 314 *boot_addr = readl_relaxed(boot_reg); 315 ret = 0; 316 } 317 fail: 318 return ret; 319 } 320 321 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 322 { 323 unsigned long timeout; 324 u32 mpidr = cpu_logical_map(cpu); 325 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 326 int ret = -ENOSYS; 327 328 /* 329 * Set synchronisation state between this boot processor 330 * and the secondary one 331 */ 332 spin_lock(&boot_lock); 333 334 /* 335 * The secondary processor is waiting to be released from 336 * the holding pen - release it, then wait for it to flag 337 * that it has been released by resetting exynos_pen_release. 338 * 339 * Note that "exynos_pen_release" is the hardware CPU core ID, whereas 340 * "cpu" is Linux's internal ID. 341 */ 342 exynos_write_pen_release(core_id); 343 344 if (!exynos_cpu_power_state(core_id)) { 345 exynos_cpu_power_up(core_id); 346 timeout = 10; 347 348 /* wait max 10 ms until cpu1 is on */ 349 while (exynos_cpu_power_state(core_id) 350 != S5P_CORE_LOCAL_PWR_EN) { 351 if (timeout == 0) 352 break; 353 timeout--; 354 mdelay(1); 355 } 356 357 if (timeout == 0) { 358 printk(KERN_ERR "cpu1 power enable failed"); 359 spin_unlock(&boot_lock); 360 return -ETIMEDOUT; 361 } 362 } 363 364 exynos_core_restart(core_id); 365 366 /* 367 * Send the secondary CPU a soft interrupt, thereby causing 368 * the boot monitor to read the system wide flags register, 369 * and branch to the address found there. 370 */ 371 372 timeout = jiffies + (1 * HZ); 373 while (time_before(jiffies, timeout)) { 374 unsigned long boot_addr; 375 376 smp_rmb(); 377 378 boot_addr = __pa_symbol(exynos4_secondary_startup); 379 380 ret = exynos_set_boot_addr(core_id, boot_addr); 381 if (ret) 382 goto fail; 383 384 call_firmware_op(cpu_boot, core_id); 385 386 if (soc_is_exynos3250()) 387 dsb_sev(); 388 else 389 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 390 391 if (exynos_pen_release == -1) 392 break; 393 394 udelay(10); 395 } 396 397 if (exynos_pen_release != -1) 398 ret = -ETIMEDOUT; 399 400 /* 401 * now the secondary core is starting up let it run its 402 * calibrations, then wait for it to finish 403 */ 404 fail: 405 spin_unlock(&boot_lock); 406 407 return exynos_pen_release != -1 ? ret : 0; 408 } 409 410 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) 411 { 412 exynos_sysram_init(); 413 414 exynos_set_delayed_reset_assertion(true); 415 416 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 417 exynos_scu_enable(); 418 } 419 420 #ifdef CONFIG_HOTPLUG_CPU 421 /* 422 * platform-specific code to shutdown a CPU 423 * 424 * Called with IRQs disabled 425 */ 426 static void exynos_cpu_die(unsigned int cpu) 427 { 428 int spurious = 0; 429 u32 mpidr = cpu_logical_map(cpu); 430 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 431 432 v7_exit_coherency_flush(louis); 433 434 platform_do_lowpower(cpu, &spurious); 435 436 /* 437 * bring this CPU back into the world of cache 438 * coherency, and then restore interrupts 439 */ 440 cpu_leave_lowpower(core_id); 441 442 if (spurious) 443 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 444 } 445 #endif /* CONFIG_HOTPLUG_CPU */ 446 447 const struct smp_operations exynos_smp_ops __initconst = { 448 .smp_prepare_cpus = exynos_smp_prepare_cpus, 449 .smp_secondary_init = exynos_secondary_init, 450 .smp_boot_secondary = exynos_boot_secondary, 451 #ifdef CONFIG_HOTPLUG_CPU 452 .cpu_die = exynos_cpu_die, 453 #endif 454 }; 455