14552386aSPankaj Dubey /* 283014579SKukjin Kim * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. 383014579SKukjin Kim * http://www.samsung.com 483014579SKukjin Kim * 583014579SKukjin Kim * Cloned from linux/arch/arm/mach-vexpress/platsmp.c 683014579SKukjin Kim * 783014579SKukjin Kim * Copyright (C) 2002 ARM Ltd. 883014579SKukjin Kim * All Rights Reserved 983014579SKukjin Kim * 1083014579SKukjin Kim * This program is free software; you can redistribute it and/or modify 1183014579SKukjin Kim * it under the terms of the GNU General Public License version 2 as 1283014579SKukjin Kim * published by the Free Software Foundation. 1383014579SKukjin Kim */ 1483014579SKukjin Kim 1583014579SKukjin Kim #include <linux/init.h> 1683014579SKukjin Kim #include <linux/errno.h> 1783014579SKukjin Kim #include <linux/delay.h> 1883014579SKukjin Kim #include <linux/device.h> 1983014579SKukjin Kim #include <linux/jiffies.h> 2083014579SKukjin Kim #include <linux/smp.h> 2183014579SKukjin Kim #include <linux/io.h> 22b3205deaSSachin Kamat #include <linux/of_address.h> 2383014579SKukjin Kim 2483014579SKukjin Kim #include <asm/cacheflush.h> 256f0b7c0cSKrzysztof Kozlowski #include <asm/cp15.h> 26eb50439bSWill Deacon #include <asm/smp_plat.h> 2783014579SKukjin Kim #include <asm/smp_scu.h> 28beddf63fSTomasz Figa #include <asm/firmware.h> 2983014579SKukjin Kim 302e94ac42SPankaj Dubey #include <mach/map.h> 312e94ac42SPankaj Dubey 3206853ae4SMarc Zyngier #include "common.h" 3365c9a853SKukjin Kim #include "regs-pmu.h" 3406853ae4SMarc Zyngier 3583014579SKukjin Kim extern void exynos4_secondary_startup(void); 3683014579SKukjin Kim 3713cfa6c4SKrzysztof Kozlowski /* 3813cfa6c4SKrzysztof Kozlowski * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs 3913cfa6c4SKrzysztof Kozlowski * during hot-(un)plugging CPUx. 4013cfa6c4SKrzysztof Kozlowski * 4113cfa6c4SKrzysztof Kozlowski * The feature can be cleared safely during first boot of secondary CPU. 4213cfa6c4SKrzysztof Kozlowski * 4313cfa6c4SKrzysztof Kozlowski * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering 4413cfa6c4SKrzysztof Kozlowski * down a CPU so the CPU idle clock down feature could properly detect global 4513cfa6c4SKrzysztof Kozlowski * idle state when CPUx is off. 4613cfa6c4SKrzysztof Kozlowski */ 4713cfa6c4SKrzysztof Kozlowski static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable) 4813cfa6c4SKrzysztof Kozlowski { 4913cfa6c4SKrzysztof Kozlowski if (soc_is_exynos4()) { 5013cfa6c4SKrzysztof Kozlowski unsigned int tmp; 5113cfa6c4SKrzysztof Kozlowski 5213cfa6c4SKrzysztof Kozlowski tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id)); 5313cfa6c4SKrzysztof Kozlowski if (enable) 5413cfa6c4SKrzysztof Kozlowski tmp |= S5P_USE_DELAYED_RESET_ASSERTION; 5513cfa6c4SKrzysztof Kozlowski else 5613cfa6c4SKrzysztof Kozlowski tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION); 5713cfa6c4SKrzysztof Kozlowski pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id)); 5813cfa6c4SKrzysztof Kozlowski } 5913cfa6c4SKrzysztof Kozlowski } 6013cfa6c4SKrzysztof Kozlowski 616f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU 6213cfa6c4SKrzysztof Kozlowski static inline void cpu_leave_lowpower(u32 core_id) 636f0b7c0cSKrzysztof Kozlowski { 646f0b7c0cSKrzysztof Kozlowski unsigned int v; 656f0b7c0cSKrzysztof Kozlowski 666f0b7c0cSKrzysztof Kozlowski asm volatile( 676f0b7c0cSKrzysztof Kozlowski "mrc p15, 0, %0, c1, c0, 0\n" 686f0b7c0cSKrzysztof Kozlowski " orr %0, %0, %1\n" 696f0b7c0cSKrzysztof Kozlowski " mcr p15, 0, %0, c1, c0, 0\n" 706f0b7c0cSKrzysztof Kozlowski " mrc p15, 0, %0, c1, c0, 1\n" 716f0b7c0cSKrzysztof Kozlowski " orr %0, %0, %2\n" 726f0b7c0cSKrzysztof Kozlowski " mcr p15, 0, %0, c1, c0, 1\n" 736f0b7c0cSKrzysztof Kozlowski : "=&r" (v) 746f0b7c0cSKrzysztof Kozlowski : "Ir" (CR_C), "Ir" (0x40) 756f0b7c0cSKrzysztof Kozlowski : "cc"); 7613cfa6c4SKrzysztof Kozlowski 7713cfa6c4SKrzysztof Kozlowski exynos_set_delayed_reset_assertion(core_id, false); 786f0b7c0cSKrzysztof Kozlowski } 796f0b7c0cSKrzysztof Kozlowski 806f0b7c0cSKrzysztof Kozlowski static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 816f0b7c0cSKrzysztof Kozlowski { 826f0b7c0cSKrzysztof Kozlowski u32 mpidr = cpu_logical_map(cpu); 836f0b7c0cSKrzysztof Kozlowski u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 846f0b7c0cSKrzysztof Kozlowski 856f0b7c0cSKrzysztof Kozlowski for (;;) { 866f0b7c0cSKrzysztof Kozlowski 876f0b7c0cSKrzysztof Kozlowski /* Turn the CPU off on next WFI instruction. */ 886f0b7c0cSKrzysztof Kozlowski exynos_cpu_power_down(core_id); 896f0b7c0cSKrzysztof Kozlowski 9013cfa6c4SKrzysztof Kozlowski /* 9113cfa6c4SKrzysztof Kozlowski * Exynos4 SoCs require setting 9213cfa6c4SKrzysztof Kozlowski * USE_DELAYED_RESET_ASSERTION so the CPU idle 9313cfa6c4SKrzysztof Kozlowski * clock down feature could properly detect 9413cfa6c4SKrzysztof Kozlowski * global idle state when CPUx is off. 9513cfa6c4SKrzysztof Kozlowski */ 9613cfa6c4SKrzysztof Kozlowski exynos_set_delayed_reset_assertion(core_id, true); 9713cfa6c4SKrzysztof Kozlowski 986f0b7c0cSKrzysztof Kozlowski wfi(); 996f0b7c0cSKrzysztof Kozlowski 1006f0b7c0cSKrzysztof Kozlowski if (pen_release == core_id) { 1016f0b7c0cSKrzysztof Kozlowski /* 1026f0b7c0cSKrzysztof Kozlowski * OK, proper wakeup, we're done 1036f0b7c0cSKrzysztof Kozlowski */ 1046f0b7c0cSKrzysztof Kozlowski break; 1056f0b7c0cSKrzysztof Kozlowski } 1066f0b7c0cSKrzysztof Kozlowski 1076f0b7c0cSKrzysztof Kozlowski /* 1086f0b7c0cSKrzysztof Kozlowski * Getting here, means that we have come out of WFI without 1096f0b7c0cSKrzysztof Kozlowski * having been woken up - this shouldn't happen 1106f0b7c0cSKrzysztof Kozlowski * 1116f0b7c0cSKrzysztof Kozlowski * Just note it happening - when we're woken, we can report 1126f0b7c0cSKrzysztof Kozlowski * its occurrence. 1136f0b7c0cSKrzysztof Kozlowski */ 1146f0b7c0cSKrzysztof Kozlowski (*spurious)++; 1156f0b7c0cSKrzysztof Kozlowski } 1166f0b7c0cSKrzysztof Kozlowski } 1176f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */ 1186f0b7c0cSKrzysztof Kozlowski 1197310d99fSKrzysztof Kozlowski /** 1207310d99fSKrzysztof Kozlowski * exynos_core_power_down : power down the specified cpu 1217310d99fSKrzysztof Kozlowski * @cpu : the cpu to power down 1227310d99fSKrzysztof Kozlowski * 1237310d99fSKrzysztof Kozlowski * Power down the specified cpu. The sequence must be finished by a 1247310d99fSKrzysztof Kozlowski * call to cpu_do_idle() 1257310d99fSKrzysztof Kozlowski * 1267310d99fSKrzysztof Kozlowski */ 1277310d99fSKrzysztof Kozlowski void exynos_cpu_power_down(int cpu) 1287310d99fSKrzysztof Kozlowski { 129*497ab3b3SBartlomiej Zolnierkiewicz u32 core_conf; 130*497ab3b3SBartlomiej Zolnierkiewicz 131adc548d7SAbhilash Kesavan if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") || 132adc548d7SAbhilash Kesavan of_machine_is_compatible("samsung,exynos5800"))) { 133adc548d7SAbhilash Kesavan /* 134adc548d7SAbhilash Kesavan * Bypass power down for CPU0 during suspend. Check for 135adc548d7SAbhilash Kesavan * the SYS_PWR_REG value to decide if we are suspending 136adc548d7SAbhilash Kesavan * the system. 137adc548d7SAbhilash Kesavan */ 138adc548d7SAbhilash Kesavan int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG); 139adc548d7SAbhilash Kesavan 140adc548d7SAbhilash Kesavan if (!(val & S5P_CORE_LOCAL_PWR_EN)) 141adc548d7SAbhilash Kesavan return; 142adc548d7SAbhilash Kesavan } 143*497ab3b3SBartlomiej Zolnierkiewicz 144*497ab3b3SBartlomiej Zolnierkiewicz core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 145*497ab3b3SBartlomiej Zolnierkiewicz core_conf &= ~S5P_CORE_LOCAL_PWR_EN; 146*497ab3b3SBartlomiej Zolnierkiewicz pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 1477310d99fSKrzysztof Kozlowski } 1487310d99fSKrzysztof Kozlowski 1497310d99fSKrzysztof Kozlowski /** 1507310d99fSKrzysztof Kozlowski * exynos_cpu_power_up : power up the specified cpu 1517310d99fSKrzysztof Kozlowski * @cpu : the cpu to power up 1527310d99fSKrzysztof Kozlowski * 1537310d99fSKrzysztof Kozlowski * Power up the specified cpu 1547310d99fSKrzysztof Kozlowski */ 1557310d99fSKrzysztof Kozlowski void exynos_cpu_power_up(int cpu) 1567310d99fSKrzysztof Kozlowski { 157*497ab3b3SBartlomiej Zolnierkiewicz u32 core_conf = S5P_CORE_LOCAL_PWR_EN; 158*497ab3b3SBartlomiej Zolnierkiewicz 159*497ab3b3SBartlomiej Zolnierkiewicz if (soc_is_exynos3250()) 160*497ab3b3SBartlomiej Zolnierkiewicz core_conf |= S5P_CORE_AUTOWAKEUP_EN; 161*497ab3b3SBartlomiej Zolnierkiewicz 162*497ab3b3SBartlomiej Zolnierkiewicz pmu_raw_writel(core_conf, 1637310d99fSKrzysztof Kozlowski EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 1647310d99fSKrzysztof Kozlowski } 1657310d99fSKrzysztof Kozlowski 1667310d99fSKrzysztof Kozlowski /** 1677310d99fSKrzysztof Kozlowski * exynos_cpu_power_state : returns the power state of the cpu 1687310d99fSKrzysztof Kozlowski * @cpu : the cpu to retrieve the power state from 1697310d99fSKrzysztof Kozlowski * 1707310d99fSKrzysztof Kozlowski */ 1717310d99fSKrzysztof Kozlowski int exynos_cpu_power_state(int cpu) 1727310d99fSKrzysztof Kozlowski { 173944483d0SArnd Bergmann return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) & 1747310d99fSKrzysztof Kozlowski S5P_CORE_LOCAL_PWR_EN); 1757310d99fSKrzysztof Kozlowski } 1767310d99fSKrzysztof Kozlowski 1777310d99fSKrzysztof Kozlowski /** 1787310d99fSKrzysztof Kozlowski * exynos_cluster_power_down : power down the specified cluster 1797310d99fSKrzysztof Kozlowski * @cluster : the cluster to power down 1807310d99fSKrzysztof Kozlowski */ 1817310d99fSKrzysztof Kozlowski void exynos_cluster_power_down(int cluster) 1827310d99fSKrzysztof Kozlowski { 183944483d0SArnd Bergmann pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); 1847310d99fSKrzysztof Kozlowski } 1857310d99fSKrzysztof Kozlowski 1867310d99fSKrzysztof Kozlowski /** 1877310d99fSKrzysztof Kozlowski * exynos_cluster_power_up : power up the specified cluster 1887310d99fSKrzysztof Kozlowski * @cluster : the cluster to power up 1897310d99fSKrzysztof Kozlowski */ 1907310d99fSKrzysztof Kozlowski void exynos_cluster_power_up(int cluster) 1917310d99fSKrzysztof Kozlowski { 192944483d0SArnd Bergmann pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, 1937310d99fSKrzysztof Kozlowski EXYNOS_COMMON_CONFIGURATION(cluster)); 1947310d99fSKrzysztof Kozlowski } 1957310d99fSKrzysztof Kozlowski 1967310d99fSKrzysztof Kozlowski /** 1977310d99fSKrzysztof Kozlowski * exynos_cluster_power_state : returns the power state of the cluster 1987310d99fSKrzysztof Kozlowski * @cluster : the cluster to retrieve the power state from 1997310d99fSKrzysztof Kozlowski * 2007310d99fSKrzysztof Kozlowski */ 2017310d99fSKrzysztof Kozlowski int exynos_cluster_power_state(int cluster) 2027310d99fSKrzysztof Kozlowski { 203944483d0SArnd Bergmann return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) & 2047310d99fSKrzysztof Kozlowski S5P_CORE_LOCAL_PWR_EN); 2057310d99fSKrzysztof Kozlowski } 2067310d99fSKrzysztof Kozlowski 207712eddf7SBartlomiej Zolnierkiewicz void __iomem *cpu_boot_reg_base(void) 2081f054f52STomasz Figa { 2091f054f52STomasz Figa if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1) 2102e94ac42SPankaj Dubey return pmu_base_addr + S5P_INFORM5; 211b3205deaSSachin Kamat return sysram_base_addr; 2121f054f52STomasz Figa } 2131f054f52STomasz Figa 2141f054f52STomasz Figa static inline void __iomem *cpu_boot_reg(int cpu) 2151f054f52STomasz Figa { 2161f054f52STomasz Figa void __iomem *boot_reg; 2171f054f52STomasz Figa 2181f054f52STomasz Figa boot_reg = cpu_boot_reg_base(); 219b3205deaSSachin Kamat if (!boot_reg) 220b3205deaSSachin Kamat return ERR_PTR(-ENODEV); 2211f054f52STomasz Figa if (soc_is_exynos4412()) 2221f054f52STomasz Figa boot_reg += 4*cpu; 22386c6f148SArun Kumar K else if (soc_is_exynos5420() || soc_is_exynos5800()) 2241580be3dSChander Kashyap boot_reg += 4; 2251f054f52STomasz Figa return boot_reg; 2261f054f52STomasz Figa } 22783014579SKukjin Kim 22883014579SKukjin Kim /* 229b588aaecSKrzysztof Kozlowski * Set wake up by local power mode and execute software reset for given core. 230b588aaecSKrzysztof Kozlowski * 231b588aaecSKrzysztof Kozlowski * Currently this is needed only when booting secondary CPU on Exynos3250. 232b588aaecSKrzysztof Kozlowski */ 233b588aaecSKrzysztof Kozlowski static void exynos_core_restart(u32 core_id) 234b588aaecSKrzysztof Kozlowski { 235b588aaecSKrzysztof Kozlowski u32 val; 236b588aaecSKrzysztof Kozlowski 237b588aaecSKrzysztof Kozlowski if (!of_machine_is_compatible("samsung,exynos3250")) 238b588aaecSKrzysztof Kozlowski return; 239b588aaecSKrzysztof Kozlowski 240*497ab3b3SBartlomiej Zolnierkiewicz while (!pmu_raw_readl(S5P_PMU_SPARE2)) 241*497ab3b3SBartlomiej Zolnierkiewicz udelay(10); 242*497ab3b3SBartlomiej Zolnierkiewicz udelay(10); 243*497ab3b3SBartlomiej Zolnierkiewicz 244b588aaecSKrzysztof Kozlowski val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id)); 245b588aaecSKrzysztof Kozlowski val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG; 246b588aaecSKrzysztof Kozlowski pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id)); 247b588aaecSKrzysztof Kozlowski 248b588aaecSKrzysztof Kozlowski pr_info("CPU%u: Software reset\n", core_id); 249b588aaecSKrzysztof Kozlowski pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET); 250b588aaecSKrzysztof Kozlowski } 251b588aaecSKrzysztof Kozlowski 252b588aaecSKrzysztof Kozlowski /* 25383014579SKukjin Kim * Write pen_release in a way that is guaranteed to be visible to all 25483014579SKukjin Kim * observers, irrespective of whether they're taking part in coherency 25583014579SKukjin Kim * or not. This is necessary for the hotplug code to work reliably. 25683014579SKukjin Kim */ 25783014579SKukjin Kim static void write_pen_release(int val) 25883014579SKukjin Kim { 25983014579SKukjin Kim pen_release = val; 26083014579SKukjin Kim smp_wmb(); 261f45913fdSNicolas Pitre sync_cache_w(&pen_release); 26283014579SKukjin Kim } 26383014579SKukjin Kim 26483014579SKukjin Kim static void __iomem *scu_base_addr(void) 26583014579SKukjin Kim { 26683014579SKukjin Kim return (void __iomem *)(S5P_VA_SCU); 26783014579SKukjin Kim } 26883014579SKukjin Kim 26983014579SKukjin Kim static DEFINE_SPINLOCK(boot_lock); 27083014579SKukjin Kim 2718bd26e3aSPaul Gortmaker static void exynos_secondary_init(unsigned int cpu) 27283014579SKukjin Kim { 27383014579SKukjin Kim /* 27483014579SKukjin Kim * let the primary processor know we're out of the 27583014579SKukjin Kim * pen, then head off into the C entry point 27683014579SKukjin Kim */ 27783014579SKukjin Kim write_pen_release(-1); 27883014579SKukjin Kim 27983014579SKukjin Kim /* 28083014579SKukjin Kim * Synchronise with the boot thread. 28183014579SKukjin Kim */ 28283014579SKukjin Kim spin_lock(&boot_lock); 28383014579SKukjin Kim spin_unlock(&boot_lock); 28483014579SKukjin Kim } 28583014579SKukjin Kim 2868bd26e3aSPaul Gortmaker static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 28783014579SKukjin Kim { 28883014579SKukjin Kim unsigned long timeout; 2899637f30eSTomasz Figa u32 mpidr = cpu_logical_map(cpu); 2909637f30eSTomasz Figa u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 291b3205deaSSachin Kamat int ret = -ENOSYS; 29283014579SKukjin Kim 29383014579SKukjin Kim /* 29483014579SKukjin Kim * Set synchronisation state between this boot processor 29583014579SKukjin Kim * and the secondary one 29683014579SKukjin Kim */ 29783014579SKukjin Kim spin_lock(&boot_lock); 29883014579SKukjin Kim 29983014579SKukjin Kim /* 30083014579SKukjin Kim * The secondary processor is waiting to be released from 30183014579SKukjin Kim * the holding pen - release it, then wait for it to flag 30283014579SKukjin Kim * that it has been released by resetting pen_release. 30383014579SKukjin Kim * 3049637f30eSTomasz Figa * Note that "pen_release" is the hardware CPU core ID, whereas 30583014579SKukjin Kim * "cpu" is Linux's internal ID. 30683014579SKukjin Kim */ 3079637f30eSTomasz Figa write_pen_release(core_id); 30883014579SKukjin Kim 3099637f30eSTomasz Figa if (!exynos_cpu_power_state(core_id)) { 3109637f30eSTomasz Figa exynos_cpu_power_up(core_id); 31183014579SKukjin Kim timeout = 10; 31283014579SKukjin Kim 31383014579SKukjin Kim /* wait max 10 ms until cpu1 is on */ 3149637f30eSTomasz Figa while (exynos_cpu_power_state(core_id) 3159637f30eSTomasz Figa != S5P_CORE_LOCAL_PWR_EN) { 31683014579SKukjin Kim if (timeout-- == 0) 31783014579SKukjin Kim break; 31883014579SKukjin Kim 31983014579SKukjin Kim mdelay(1); 32083014579SKukjin Kim } 32183014579SKukjin Kim 32283014579SKukjin Kim if (timeout == 0) { 32383014579SKukjin Kim printk(KERN_ERR "cpu1 power enable failed"); 32483014579SKukjin Kim spin_unlock(&boot_lock); 32583014579SKukjin Kim return -ETIMEDOUT; 32683014579SKukjin Kim } 32783014579SKukjin Kim } 328b588aaecSKrzysztof Kozlowski 329b588aaecSKrzysztof Kozlowski exynos_core_restart(core_id); 330b588aaecSKrzysztof Kozlowski 33183014579SKukjin Kim /* 33283014579SKukjin Kim * Send the secondary CPU a soft interrupt, thereby causing 33383014579SKukjin Kim * the boot monitor to read the system wide flags register, 33483014579SKukjin Kim * and branch to the address found there. 33583014579SKukjin Kim */ 33683014579SKukjin Kim 33783014579SKukjin Kim timeout = jiffies + (1 * HZ); 33883014579SKukjin Kim while (time_before(jiffies, timeout)) { 339beddf63fSTomasz Figa unsigned long boot_addr; 340beddf63fSTomasz Figa 34183014579SKukjin Kim smp_rmb(); 34283014579SKukjin Kim 343beddf63fSTomasz Figa boot_addr = virt_to_phys(exynos4_secondary_startup); 344beddf63fSTomasz Figa 345beddf63fSTomasz Figa /* 346beddf63fSTomasz Figa * Try to set boot address using firmware first 347beddf63fSTomasz Figa * and fall back to boot register if it fails. 348beddf63fSTomasz Figa */ 3499637f30eSTomasz Figa ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); 350b3205deaSSachin Kamat if (ret && ret != -ENOSYS) 351b3205deaSSachin Kamat goto fail; 352b3205deaSSachin Kamat if (ret == -ENOSYS) { 3539637f30eSTomasz Figa void __iomem *boot_reg = cpu_boot_reg(core_id); 354b3205deaSSachin Kamat 355b3205deaSSachin Kamat if (IS_ERR(boot_reg)) { 356b3205deaSSachin Kamat ret = PTR_ERR(boot_reg); 357b3205deaSSachin Kamat goto fail; 358b3205deaSSachin Kamat } 35968ba947cSKrzysztof Kozlowski __raw_writel(boot_addr, boot_reg); 360b3205deaSSachin Kamat } 361beddf63fSTomasz Figa 3629637f30eSTomasz Figa call_firmware_op(cpu_boot, core_id); 363beddf63fSTomasz Figa 364*497ab3b3SBartlomiej Zolnierkiewicz if (soc_is_exynos3250()) 365*497ab3b3SBartlomiej Zolnierkiewicz dsb_sev(); 366*497ab3b3SBartlomiej Zolnierkiewicz else 367b1cffebfSRob Herring arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 36883014579SKukjin Kim 36983014579SKukjin Kim if (pen_release == -1) 37083014579SKukjin Kim break; 37183014579SKukjin Kim 37283014579SKukjin Kim udelay(10); 37383014579SKukjin Kim } 37483014579SKukjin Kim 37513cfa6c4SKrzysztof Kozlowski /* No harm if this is called during first boot of secondary CPU */ 37613cfa6c4SKrzysztof Kozlowski exynos_set_delayed_reset_assertion(core_id, false); 37713cfa6c4SKrzysztof Kozlowski 37883014579SKukjin Kim /* 37983014579SKukjin Kim * now the secondary core is starting up let it run its 38083014579SKukjin Kim * calibrations, then wait for it to finish 38183014579SKukjin Kim */ 382b3205deaSSachin Kamat fail: 38383014579SKukjin Kim spin_unlock(&boot_lock); 38483014579SKukjin Kim 385b3205deaSSachin Kamat return pen_release != -1 ? ret : 0; 38683014579SKukjin Kim } 38783014579SKukjin Kim 38883014579SKukjin Kim /* 38983014579SKukjin Kim * Initialise the CPU possible map early - this describes the CPUs 39083014579SKukjin Kim * which may be present or become present in the system. 39183014579SKukjin Kim */ 39283014579SKukjin Kim 39306853ae4SMarc Zyngier static void __init exynos_smp_init_cpus(void) 39483014579SKukjin Kim { 39583014579SKukjin Kim void __iomem *scu_base = scu_base_addr(); 39683014579SKukjin Kim unsigned int i, ncores; 39783014579SKukjin Kim 398af040ffcSRussell King if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 39983014579SKukjin Kim ncores = scu_base ? scu_get_core_count(scu_base) : 1; 4001897d2f3SChander Kashyap else 4011897d2f3SChander Kashyap /* 4021897d2f3SChander Kashyap * CPU Nodes are passed thru DT and set_cpu_possible 4031897d2f3SChander Kashyap * is set by "arm_dt_init_cpu_maps". 4041897d2f3SChander Kashyap */ 4051897d2f3SChander Kashyap return; 40683014579SKukjin Kim 40783014579SKukjin Kim /* sanity check */ 40883014579SKukjin Kim if (ncores > nr_cpu_ids) { 40983014579SKukjin Kim pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", 41083014579SKukjin Kim ncores, nr_cpu_ids); 41183014579SKukjin Kim ncores = nr_cpu_ids; 41283014579SKukjin Kim } 41383014579SKukjin Kim 41483014579SKukjin Kim for (i = 0; i < ncores; i++) 41583014579SKukjin Kim set_cpu_possible(i, true); 41683014579SKukjin Kim } 41783014579SKukjin Kim 41806853ae4SMarc Zyngier static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) 41983014579SKukjin Kim { 4201f054f52STomasz Figa int i; 4211f054f52STomasz Figa 4221754c42eSOlof Johansson exynos_sysram_init(); 4231754c42eSOlof Johansson 424af040ffcSRussell King if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 42583014579SKukjin Kim scu_enable(scu_base_addr()); 42683014579SKukjin Kim 42783014579SKukjin Kim /* 42883014579SKukjin Kim * Write the address of secondary startup into the 42983014579SKukjin Kim * system-wide flags register. The boot monitor waits 43083014579SKukjin Kim * until it receives a soft interrupt, and then the 43183014579SKukjin Kim * secondary CPU branches to this address. 432beddf63fSTomasz Figa * 433beddf63fSTomasz Figa * Try using firmware operation first and fall back to 434beddf63fSTomasz Figa * boot register if it fails. 43583014579SKukjin Kim */ 436beddf63fSTomasz Figa for (i = 1; i < max_cpus; ++i) { 437beddf63fSTomasz Figa unsigned long boot_addr; 4389637f30eSTomasz Figa u32 mpidr; 4399637f30eSTomasz Figa u32 core_id; 440b3205deaSSachin Kamat int ret; 441beddf63fSTomasz Figa 4429637f30eSTomasz Figa mpidr = cpu_logical_map(i); 4439637f30eSTomasz Figa core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 444beddf63fSTomasz Figa boot_addr = virt_to_phys(exynos4_secondary_startup); 445beddf63fSTomasz Figa 4469637f30eSTomasz Figa ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); 447b3205deaSSachin Kamat if (ret && ret != -ENOSYS) 448b3205deaSSachin Kamat break; 449b3205deaSSachin Kamat if (ret == -ENOSYS) { 4509637f30eSTomasz Figa void __iomem *boot_reg = cpu_boot_reg(core_id); 451b3205deaSSachin Kamat 452b3205deaSSachin Kamat if (IS_ERR(boot_reg)) 453b3205deaSSachin Kamat break; 45468ba947cSKrzysztof Kozlowski __raw_writel(boot_addr, boot_reg); 455beddf63fSTomasz Figa } 45683014579SKukjin Kim } 457b3205deaSSachin Kamat } 45806853ae4SMarc Zyngier 4596f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU 4606f0b7c0cSKrzysztof Kozlowski /* 4616f0b7c0cSKrzysztof Kozlowski * platform-specific code to shutdown a CPU 4626f0b7c0cSKrzysztof Kozlowski * 4636f0b7c0cSKrzysztof Kozlowski * Called with IRQs disabled 4646f0b7c0cSKrzysztof Kozlowski */ 46527b9ee85SKrzysztof Kozlowski static void exynos_cpu_die(unsigned int cpu) 4666f0b7c0cSKrzysztof Kozlowski { 4676f0b7c0cSKrzysztof Kozlowski int spurious = 0; 46813cfa6c4SKrzysztof Kozlowski u32 mpidr = cpu_logical_map(cpu); 46913cfa6c4SKrzysztof Kozlowski u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 4706f0b7c0cSKrzysztof Kozlowski 4716f0b7c0cSKrzysztof Kozlowski v7_exit_coherency_flush(louis); 4726f0b7c0cSKrzysztof Kozlowski 4736f0b7c0cSKrzysztof Kozlowski platform_do_lowpower(cpu, &spurious); 4746f0b7c0cSKrzysztof Kozlowski 4756f0b7c0cSKrzysztof Kozlowski /* 4766f0b7c0cSKrzysztof Kozlowski * bring this CPU back into the world of cache 4776f0b7c0cSKrzysztof Kozlowski * coherency, and then restore interrupts 4786f0b7c0cSKrzysztof Kozlowski */ 47913cfa6c4SKrzysztof Kozlowski cpu_leave_lowpower(core_id); 4806f0b7c0cSKrzysztof Kozlowski 4816f0b7c0cSKrzysztof Kozlowski if (spurious) 4826f0b7c0cSKrzysztof Kozlowski pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 4836f0b7c0cSKrzysztof Kozlowski } 4846f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */ 4856f0b7c0cSKrzysztof Kozlowski 48606853ae4SMarc Zyngier struct smp_operations exynos_smp_ops __initdata = { 48706853ae4SMarc Zyngier .smp_init_cpus = exynos_smp_init_cpus, 48806853ae4SMarc Zyngier .smp_prepare_cpus = exynos_smp_prepare_cpus, 48906853ae4SMarc Zyngier .smp_secondary_init = exynos_secondary_init, 49006853ae4SMarc Zyngier .smp_boot_secondary = exynos_boot_secondary, 49106853ae4SMarc Zyngier #ifdef CONFIG_HOTPLUG_CPU 49206853ae4SMarc Zyngier .cpu_die = exynos_cpu_die, 49306853ae4SMarc Zyngier #endif 49406853ae4SMarc Zyngier }; 495