14552386aSPankaj Dubey /* 283014579SKukjin Kim * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. 383014579SKukjin Kim * http://www.samsung.com 483014579SKukjin Kim * 583014579SKukjin Kim * Cloned from linux/arch/arm/mach-vexpress/platsmp.c 683014579SKukjin Kim * 783014579SKukjin Kim * Copyright (C) 2002 ARM Ltd. 883014579SKukjin Kim * All Rights Reserved 983014579SKukjin Kim * 1083014579SKukjin Kim * This program is free software; you can redistribute it and/or modify 1183014579SKukjin Kim * it under the terms of the GNU General Public License version 2 as 1283014579SKukjin Kim * published by the Free Software Foundation. 1383014579SKukjin Kim */ 1483014579SKukjin Kim 1583014579SKukjin Kim #include <linux/init.h> 1683014579SKukjin Kim #include <linux/errno.h> 1783014579SKukjin Kim #include <linux/delay.h> 1883014579SKukjin Kim #include <linux/device.h> 1983014579SKukjin Kim #include <linux/jiffies.h> 2083014579SKukjin Kim #include <linux/smp.h> 2183014579SKukjin Kim #include <linux/io.h> 22b3205deaSSachin Kamat #include <linux/of_address.h> 2383014579SKukjin Kim 2483014579SKukjin Kim #include <asm/cacheflush.h> 256f0b7c0cSKrzysztof Kozlowski #include <asm/cp15.h> 26eb50439bSWill Deacon #include <asm/smp_plat.h> 2783014579SKukjin Kim #include <asm/smp_scu.h> 28beddf63fSTomasz Figa #include <asm/firmware.h> 2983014579SKukjin Kim 302e94ac42SPankaj Dubey #include <mach/map.h> 312e94ac42SPankaj Dubey 3206853ae4SMarc Zyngier #include "common.h" 3365c9a853SKukjin Kim #include "regs-pmu.h" 3406853ae4SMarc Zyngier 3583014579SKukjin Kim extern void exynos4_secondary_startup(void); 3683014579SKukjin Kim 3713cfa6c4SKrzysztof Kozlowski /* 3813cfa6c4SKrzysztof Kozlowski * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs 3913cfa6c4SKrzysztof Kozlowski * during hot-(un)plugging CPUx. 4013cfa6c4SKrzysztof Kozlowski * 4113cfa6c4SKrzysztof Kozlowski * The feature can be cleared safely during first boot of secondary CPU. 4213cfa6c4SKrzysztof Kozlowski * 4313cfa6c4SKrzysztof Kozlowski * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering 4413cfa6c4SKrzysztof Kozlowski * down a CPU so the CPU idle clock down feature could properly detect global 4513cfa6c4SKrzysztof Kozlowski * idle state when CPUx is off. 4613cfa6c4SKrzysztof Kozlowski */ 4713cfa6c4SKrzysztof Kozlowski static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable) 4813cfa6c4SKrzysztof Kozlowski { 4913cfa6c4SKrzysztof Kozlowski if (soc_is_exynos4()) { 5013cfa6c4SKrzysztof Kozlowski unsigned int tmp; 5113cfa6c4SKrzysztof Kozlowski 5213cfa6c4SKrzysztof Kozlowski tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id)); 5313cfa6c4SKrzysztof Kozlowski if (enable) 5413cfa6c4SKrzysztof Kozlowski tmp |= S5P_USE_DELAYED_RESET_ASSERTION; 5513cfa6c4SKrzysztof Kozlowski else 5613cfa6c4SKrzysztof Kozlowski tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION); 5713cfa6c4SKrzysztof Kozlowski pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id)); 5813cfa6c4SKrzysztof Kozlowski } 5913cfa6c4SKrzysztof Kozlowski } 6013cfa6c4SKrzysztof Kozlowski 616f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU 6213cfa6c4SKrzysztof Kozlowski static inline void cpu_leave_lowpower(u32 core_id) 636f0b7c0cSKrzysztof Kozlowski { 646f0b7c0cSKrzysztof Kozlowski unsigned int v; 656f0b7c0cSKrzysztof Kozlowski 666f0b7c0cSKrzysztof Kozlowski asm volatile( 676f0b7c0cSKrzysztof Kozlowski "mrc p15, 0, %0, c1, c0, 0\n" 686f0b7c0cSKrzysztof Kozlowski " orr %0, %0, %1\n" 696f0b7c0cSKrzysztof Kozlowski " mcr p15, 0, %0, c1, c0, 0\n" 706f0b7c0cSKrzysztof Kozlowski " mrc p15, 0, %0, c1, c0, 1\n" 716f0b7c0cSKrzysztof Kozlowski " orr %0, %0, %2\n" 726f0b7c0cSKrzysztof Kozlowski " mcr p15, 0, %0, c1, c0, 1\n" 736f0b7c0cSKrzysztof Kozlowski : "=&r" (v) 746f0b7c0cSKrzysztof Kozlowski : "Ir" (CR_C), "Ir" (0x40) 756f0b7c0cSKrzysztof Kozlowski : "cc"); 7613cfa6c4SKrzysztof Kozlowski 7713cfa6c4SKrzysztof Kozlowski exynos_set_delayed_reset_assertion(core_id, false); 786f0b7c0cSKrzysztof Kozlowski } 796f0b7c0cSKrzysztof Kozlowski 806f0b7c0cSKrzysztof Kozlowski static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 816f0b7c0cSKrzysztof Kozlowski { 826f0b7c0cSKrzysztof Kozlowski u32 mpidr = cpu_logical_map(cpu); 836f0b7c0cSKrzysztof Kozlowski u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 846f0b7c0cSKrzysztof Kozlowski 856f0b7c0cSKrzysztof Kozlowski for (;;) { 866f0b7c0cSKrzysztof Kozlowski 876f0b7c0cSKrzysztof Kozlowski /* Turn the CPU off on next WFI instruction. */ 886f0b7c0cSKrzysztof Kozlowski exynos_cpu_power_down(core_id); 896f0b7c0cSKrzysztof Kozlowski 9013cfa6c4SKrzysztof Kozlowski /* 9113cfa6c4SKrzysztof Kozlowski * Exynos4 SoCs require setting 9213cfa6c4SKrzysztof Kozlowski * USE_DELAYED_RESET_ASSERTION so the CPU idle 9313cfa6c4SKrzysztof Kozlowski * clock down feature could properly detect 9413cfa6c4SKrzysztof Kozlowski * global idle state when CPUx is off. 9513cfa6c4SKrzysztof Kozlowski */ 9613cfa6c4SKrzysztof Kozlowski exynos_set_delayed_reset_assertion(core_id, true); 9713cfa6c4SKrzysztof Kozlowski 986f0b7c0cSKrzysztof Kozlowski wfi(); 996f0b7c0cSKrzysztof Kozlowski 1006f0b7c0cSKrzysztof Kozlowski if (pen_release == core_id) { 1016f0b7c0cSKrzysztof Kozlowski /* 1026f0b7c0cSKrzysztof Kozlowski * OK, proper wakeup, we're done 1036f0b7c0cSKrzysztof Kozlowski */ 1046f0b7c0cSKrzysztof Kozlowski break; 1056f0b7c0cSKrzysztof Kozlowski } 1066f0b7c0cSKrzysztof Kozlowski 1076f0b7c0cSKrzysztof Kozlowski /* 1086f0b7c0cSKrzysztof Kozlowski * Getting here, means that we have come out of WFI without 1096f0b7c0cSKrzysztof Kozlowski * having been woken up - this shouldn't happen 1106f0b7c0cSKrzysztof Kozlowski * 1116f0b7c0cSKrzysztof Kozlowski * Just note it happening - when we're woken, we can report 1126f0b7c0cSKrzysztof Kozlowski * its occurrence. 1136f0b7c0cSKrzysztof Kozlowski */ 1146f0b7c0cSKrzysztof Kozlowski (*spurious)++; 1156f0b7c0cSKrzysztof Kozlowski } 1166f0b7c0cSKrzysztof Kozlowski } 1176f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */ 1186f0b7c0cSKrzysztof Kozlowski 1197310d99fSKrzysztof Kozlowski /** 1207310d99fSKrzysztof Kozlowski * exynos_core_power_down : power down the specified cpu 1217310d99fSKrzysztof Kozlowski * @cpu : the cpu to power down 1227310d99fSKrzysztof Kozlowski * 1237310d99fSKrzysztof Kozlowski * Power down the specified cpu. The sequence must be finished by a 1247310d99fSKrzysztof Kozlowski * call to cpu_do_idle() 1257310d99fSKrzysztof Kozlowski * 1267310d99fSKrzysztof Kozlowski */ 1277310d99fSKrzysztof Kozlowski void exynos_cpu_power_down(int cpu) 1287310d99fSKrzysztof Kozlowski { 129*ca489c58SKrzysztof Kozlowski if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { 130adc548d7SAbhilash Kesavan /* 131adc548d7SAbhilash Kesavan * Bypass power down for CPU0 during suspend. Check for 132adc548d7SAbhilash Kesavan * the SYS_PWR_REG value to decide if we are suspending 133adc548d7SAbhilash Kesavan * the system. 134adc548d7SAbhilash Kesavan */ 135adc548d7SAbhilash Kesavan int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG); 136adc548d7SAbhilash Kesavan 137adc548d7SAbhilash Kesavan if (!(val & S5P_CORE_LOCAL_PWR_EN)) 138adc548d7SAbhilash Kesavan return; 139adc548d7SAbhilash Kesavan } 140944483d0SArnd Bergmann pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 1417310d99fSKrzysztof Kozlowski } 1427310d99fSKrzysztof Kozlowski 1437310d99fSKrzysztof Kozlowski /** 1447310d99fSKrzysztof Kozlowski * exynos_cpu_power_up : power up the specified cpu 1457310d99fSKrzysztof Kozlowski * @cpu : the cpu to power up 1467310d99fSKrzysztof Kozlowski * 1477310d99fSKrzysztof Kozlowski * Power up the specified cpu 1487310d99fSKrzysztof Kozlowski */ 1497310d99fSKrzysztof Kozlowski void exynos_cpu_power_up(int cpu) 1507310d99fSKrzysztof Kozlowski { 151944483d0SArnd Bergmann pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, 1527310d99fSKrzysztof Kozlowski EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 1537310d99fSKrzysztof Kozlowski } 1547310d99fSKrzysztof Kozlowski 1557310d99fSKrzysztof Kozlowski /** 1567310d99fSKrzysztof Kozlowski * exynos_cpu_power_state : returns the power state of the cpu 1577310d99fSKrzysztof Kozlowski * @cpu : the cpu to retrieve the power state from 1587310d99fSKrzysztof Kozlowski * 1597310d99fSKrzysztof Kozlowski */ 1607310d99fSKrzysztof Kozlowski int exynos_cpu_power_state(int cpu) 1617310d99fSKrzysztof Kozlowski { 162944483d0SArnd Bergmann return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) & 1637310d99fSKrzysztof Kozlowski S5P_CORE_LOCAL_PWR_EN); 1647310d99fSKrzysztof Kozlowski } 1657310d99fSKrzysztof Kozlowski 1667310d99fSKrzysztof Kozlowski /** 1677310d99fSKrzysztof Kozlowski * exynos_cluster_power_down : power down the specified cluster 1687310d99fSKrzysztof Kozlowski * @cluster : the cluster to power down 1697310d99fSKrzysztof Kozlowski */ 1707310d99fSKrzysztof Kozlowski void exynos_cluster_power_down(int cluster) 1717310d99fSKrzysztof Kozlowski { 172944483d0SArnd Bergmann pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); 1737310d99fSKrzysztof Kozlowski } 1747310d99fSKrzysztof Kozlowski 1757310d99fSKrzysztof Kozlowski /** 1767310d99fSKrzysztof Kozlowski * exynos_cluster_power_up : power up the specified cluster 1777310d99fSKrzysztof Kozlowski * @cluster : the cluster to power up 1787310d99fSKrzysztof Kozlowski */ 1797310d99fSKrzysztof Kozlowski void exynos_cluster_power_up(int cluster) 1807310d99fSKrzysztof Kozlowski { 181944483d0SArnd Bergmann pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, 1827310d99fSKrzysztof Kozlowski EXYNOS_COMMON_CONFIGURATION(cluster)); 1837310d99fSKrzysztof Kozlowski } 1847310d99fSKrzysztof Kozlowski 1857310d99fSKrzysztof Kozlowski /** 1867310d99fSKrzysztof Kozlowski * exynos_cluster_power_state : returns the power state of the cluster 1877310d99fSKrzysztof Kozlowski * @cluster : the cluster to retrieve the power state from 1887310d99fSKrzysztof Kozlowski * 1897310d99fSKrzysztof Kozlowski */ 1907310d99fSKrzysztof Kozlowski int exynos_cluster_power_state(int cluster) 1917310d99fSKrzysztof Kozlowski { 192944483d0SArnd Bergmann return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) & 1937310d99fSKrzysztof Kozlowski S5P_CORE_LOCAL_PWR_EN); 1947310d99fSKrzysztof Kozlowski } 1957310d99fSKrzysztof Kozlowski 196712eddf7SBartlomiej Zolnierkiewicz void __iomem *cpu_boot_reg_base(void) 1971f054f52STomasz Figa { 1981f054f52STomasz Figa if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1) 1992e94ac42SPankaj Dubey return pmu_base_addr + S5P_INFORM5; 200b3205deaSSachin Kamat return sysram_base_addr; 2011f054f52STomasz Figa } 2021f054f52STomasz Figa 2031f054f52STomasz Figa static inline void __iomem *cpu_boot_reg(int cpu) 2041f054f52STomasz Figa { 2051f054f52STomasz Figa void __iomem *boot_reg; 2061f054f52STomasz Figa 2071f054f52STomasz Figa boot_reg = cpu_boot_reg_base(); 208b3205deaSSachin Kamat if (!boot_reg) 209b3205deaSSachin Kamat return ERR_PTR(-ENODEV); 2101f054f52STomasz Figa if (soc_is_exynos4412()) 2111f054f52STomasz Figa boot_reg += 4*cpu; 21286c6f148SArun Kumar K else if (soc_is_exynos5420() || soc_is_exynos5800()) 2131580be3dSChander Kashyap boot_reg += 4; 2141f054f52STomasz Figa return boot_reg; 2151f054f52STomasz Figa } 21683014579SKukjin Kim 21783014579SKukjin Kim /* 218b588aaecSKrzysztof Kozlowski * Set wake up by local power mode and execute software reset for given core. 219b588aaecSKrzysztof Kozlowski * 220b588aaecSKrzysztof Kozlowski * Currently this is needed only when booting secondary CPU on Exynos3250. 221b588aaecSKrzysztof Kozlowski */ 222b588aaecSKrzysztof Kozlowski static void exynos_core_restart(u32 core_id) 223b588aaecSKrzysztof Kozlowski { 224b588aaecSKrzysztof Kozlowski u32 val; 225b588aaecSKrzysztof Kozlowski 226b588aaecSKrzysztof Kozlowski if (!of_machine_is_compatible("samsung,exynos3250")) 227b588aaecSKrzysztof Kozlowski return; 228b588aaecSKrzysztof Kozlowski 229b588aaecSKrzysztof Kozlowski val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id)); 230b588aaecSKrzysztof Kozlowski val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG; 231b588aaecSKrzysztof Kozlowski pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id)); 232b588aaecSKrzysztof Kozlowski 233b588aaecSKrzysztof Kozlowski pr_info("CPU%u: Software reset\n", core_id); 234b588aaecSKrzysztof Kozlowski pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET); 235b588aaecSKrzysztof Kozlowski } 236b588aaecSKrzysztof Kozlowski 237b588aaecSKrzysztof Kozlowski /* 23883014579SKukjin Kim * Write pen_release in a way that is guaranteed to be visible to all 23983014579SKukjin Kim * observers, irrespective of whether they're taking part in coherency 24083014579SKukjin Kim * or not. This is necessary for the hotplug code to work reliably. 24183014579SKukjin Kim */ 24283014579SKukjin Kim static void write_pen_release(int val) 24383014579SKukjin Kim { 24483014579SKukjin Kim pen_release = val; 24583014579SKukjin Kim smp_wmb(); 246f45913fdSNicolas Pitre sync_cache_w(&pen_release); 24783014579SKukjin Kim } 24883014579SKukjin Kim 24983014579SKukjin Kim static void __iomem *scu_base_addr(void) 25083014579SKukjin Kim { 25183014579SKukjin Kim return (void __iomem *)(S5P_VA_SCU); 25283014579SKukjin Kim } 25383014579SKukjin Kim 25483014579SKukjin Kim static DEFINE_SPINLOCK(boot_lock); 25583014579SKukjin Kim 2568bd26e3aSPaul Gortmaker static void exynos_secondary_init(unsigned int cpu) 25783014579SKukjin Kim { 25883014579SKukjin Kim /* 25983014579SKukjin Kim * let the primary processor know we're out of the 26083014579SKukjin Kim * pen, then head off into the C entry point 26183014579SKukjin Kim */ 26283014579SKukjin Kim write_pen_release(-1); 26383014579SKukjin Kim 26483014579SKukjin Kim /* 26583014579SKukjin Kim * Synchronise with the boot thread. 26683014579SKukjin Kim */ 26783014579SKukjin Kim spin_lock(&boot_lock); 26883014579SKukjin Kim spin_unlock(&boot_lock); 26983014579SKukjin Kim } 27083014579SKukjin Kim 2718bd26e3aSPaul Gortmaker static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 27283014579SKukjin Kim { 27383014579SKukjin Kim unsigned long timeout; 2749637f30eSTomasz Figa u32 mpidr = cpu_logical_map(cpu); 2759637f30eSTomasz Figa u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 276b3205deaSSachin Kamat int ret = -ENOSYS; 27783014579SKukjin Kim 27883014579SKukjin Kim /* 27983014579SKukjin Kim * Set synchronisation state between this boot processor 28083014579SKukjin Kim * and the secondary one 28183014579SKukjin Kim */ 28283014579SKukjin Kim spin_lock(&boot_lock); 28383014579SKukjin Kim 28483014579SKukjin Kim /* 28583014579SKukjin Kim * The secondary processor is waiting to be released from 28683014579SKukjin Kim * the holding pen - release it, then wait for it to flag 28783014579SKukjin Kim * that it has been released by resetting pen_release. 28883014579SKukjin Kim * 2899637f30eSTomasz Figa * Note that "pen_release" is the hardware CPU core ID, whereas 29083014579SKukjin Kim * "cpu" is Linux's internal ID. 29183014579SKukjin Kim */ 2929637f30eSTomasz Figa write_pen_release(core_id); 29383014579SKukjin Kim 2949637f30eSTomasz Figa if (!exynos_cpu_power_state(core_id)) { 2959637f30eSTomasz Figa exynos_cpu_power_up(core_id); 29683014579SKukjin Kim timeout = 10; 29783014579SKukjin Kim 29883014579SKukjin Kim /* wait max 10 ms until cpu1 is on */ 2999637f30eSTomasz Figa while (exynos_cpu_power_state(core_id) 3009637f30eSTomasz Figa != S5P_CORE_LOCAL_PWR_EN) { 30183014579SKukjin Kim if (timeout-- == 0) 30283014579SKukjin Kim break; 30383014579SKukjin Kim 30483014579SKukjin Kim mdelay(1); 30583014579SKukjin Kim } 30683014579SKukjin Kim 30783014579SKukjin Kim if (timeout == 0) { 30883014579SKukjin Kim printk(KERN_ERR "cpu1 power enable failed"); 30983014579SKukjin Kim spin_unlock(&boot_lock); 31083014579SKukjin Kim return -ETIMEDOUT; 31183014579SKukjin Kim } 31283014579SKukjin Kim } 313b588aaecSKrzysztof Kozlowski 314b588aaecSKrzysztof Kozlowski exynos_core_restart(core_id); 315b588aaecSKrzysztof Kozlowski 31683014579SKukjin Kim /* 31783014579SKukjin Kim * Send the secondary CPU a soft interrupt, thereby causing 31883014579SKukjin Kim * the boot monitor to read the system wide flags register, 31983014579SKukjin Kim * and branch to the address found there. 32083014579SKukjin Kim */ 32183014579SKukjin Kim 32283014579SKukjin Kim timeout = jiffies + (1 * HZ); 32383014579SKukjin Kim while (time_before(jiffies, timeout)) { 324beddf63fSTomasz Figa unsigned long boot_addr; 325beddf63fSTomasz Figa 32683014579SKukjin Kim smp_rmb(); 32783014579SKukjin Kim 328beddf63fSTomasz Figa boot_addr = virt_to_phys(exynos4_secondary_startup); 329beddf63fSTomasz Figa 330beddf63fSTomasz Figa /* 331beddf63fSTomasz Figa * Try to set boot address using firmware first 332beddf63fSTomasz Figa * and fall back to boot register if it fails. 333beddf63fSTomasz Figa */ 3349637f30eSTomasz Figa ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); 335b3205deaSSachin Kamat if (ret && ret != -ENOSYS) 336b3205deaSSachin Kamat goto fail; 337b3205deaSSachin Kamat if (ret == -ENOSYS) { 3389637f30eSTomasz Figa void __iomem *boot_reg = cpu_boot_reg(core_id); 339b3205deaSSachin Kamat 340b3205deaSSachin Kamat if (IS_ERR(boot_reg)) { 341b3205deaSSachin Kamat ret = PTR_ERR(boot_reg); 342b3205deaSSachin Kamat goto fail; 343b3205deaSSachin Kamat } 34468ba947cSKrzysztof Kozlowski __raw_writel(boot_addr, boot_reg); 345b3205deaSSachin Kamat } 346beddf63fSTomasz Figa 3479637f30eSTomasz Figa call_firmware_op(cpu_boot, core_id); 348beddf63fSTomasz Figa 349b1cffebfSRob Herring arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 35083014579SKukjin Kim 35183014579SKukjin Kim if (pen_release == -1) 35283014579SKukjin Kim break; 35383014579SKukjin Kim 35483014579SKukjin Kim udelay(10); 35583014579SKukjin Kim } 35683014579SKukjin Kim 35713cfa6c4SKrzysztof Kozlowski /* No harm if this is called during first boot of secondary CPU */ 35813cfa6c4SKrzysztof Kozlowski exynos_set_delayed_reset_assertion(core_id, false); 35913cfa6c4SKrzysztof Kozlowski 36083014579SKukjin Kim /* 36183014579SKukjin Kim * now the secondary core is starting up let it run its 36283014579SKukjin Kim * calibrations, then wait for it to finish 36383014579SKukjin Kim */ 364b3205deaSSachin Kamat fail: 36583014579SKukjin Kim spin_unlock(&boot_lock); 36683014579SKukjin Kim 367b3205deaSSachin Kamat return pen_release != -1 ? ret : 0; 36883014579SKukjin Kim } 36983014579SKukjin Kim 37083014579SKukjin Kim /* 37183014579SKukjin Kim * Initialise the CPU possible map early - this describes the CPUs 37283014579SKukjin Kim * which may be present or become present in the system. 37383014579SKukjin Kim */ 37483014579SKukjin Kim 37506853ae4SMarc Zyngier static void __init exynos_smp_init_cpus(void) 37683014579SKukjin Kim { 37783014579SKukjin Kim void __iomem *scu_base = scu_base_addr(); 37883014579SKukjin Kim unsigned int i, ncores; 37983014579SKukjin Kim 380af040ffcSRussell King if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 38183014579SKukjin Kim ncores = scu_base ? scu_get_core_count(scu_base) : 1; 3821897d2f3SChander Kashyap else 3831897d2f3SChander Kashyap /* 3841897d2f3SChander Kashyap * CPU Nodes are passed thru DT and set_cpu_possible 3851897d2f3SChander Kashyap * is set by "arm_dt_init_cpu_maps". 3861897d2f3SChander Kashyap */ 3871897d2f3SChander Kashyap return; 38883014579SKukjin Kim 38983014579SKukjin Kim /* sanity check */ 39083014579SKukjin Kim if (ncores > nr_cpu_ids) { 39183014579SKukjin Kim pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", 39283014579SKukjin Kim ncores, nr_cpu_ids); 39383014579SKukjin Kim ncores = nr_cpu_ids; 39483014579SKukjin Kim } 39583014579SKukjin Kim 39683014579SKukjin Kim for (i = 0; i < ncores; i++) 39783014579SKukjin Kim set_cpu_possible(i, true); 39883014579SKukjin Kim } 39983014579SKukjin Kim 40006853ae4SMarc Zyngier static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) 40183014579SKukjin Kim { 4021f054f52STomasz Figa int i; 4031f054f52STomasz Figa 4041754c42eSOlof Johansson exynos_sysram_init(); 4051754c42eSOlof Johansson 406af040ffcSRussell King if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 40783014579SKukjin Kim scu_enable(scu_base_addr()); 40883014579SKukjin Kim 40983014579SKukjin Kim /* 41083014579SKukjin Kim * Write the address of secondary startup into the 41183014579SKukjin Kim * system-wide flags register. The boot monitor waits 41283014579SKukjin Kim * until it receives a soft interrupt, and then the 41383014579SKukjin Kim * secondary CPU branches to this address. 414beddf63fSTomasz Figa * 415beddf63fSTomasz Figa * Try using firmware operation first and fall back to 416beddf63fSTomasz Figa * boot register if it fails. 41783014579SKukjin Kim */ 418beddf63fSTomasz Figa for (i = 1; i < max_cpus; ++i) { 419beddf63fSTomasz Figa unsigned long boot_addr; 4209637f30eSTomasz Figa u32 mpidr; 4219637f30eSTomasz Figa u32 core_id; 422b3205deaSSachin Kamat int ret; 423beddf63fSTomasz Figa 4249637f30eSTomasz Figa mpidr = cpu_logical_map(i); 4259637f30eSTomasz Figa core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 426beddf63fSTomasz Figa boot_addr = virt_to_phys(exynos4_secondary_startup); 427beddf63fSTomasz Figa 4289637f30eSTomasz Figa ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); 429b3205deaSSachin Kamat if (ret && ret != -ENOSYS) 430b3205deaSSachin Kamat break; 431b3205deaSSachin Kamat if (ret == -ENOSYS) { 4329637f30eSTomasz Figa void __iomem *boot_reg = cpu_boot_reg(core_id); 433b3205deaSSachin Kamat 434b3205deaSSachin Kamat if (IS_ERR(boot_reg)) 435b3205deaSSachin Kamat break; 43668ba947cSKrzysztof Kozlowski __raw_writel(boot_addr, boot_reg); 437beddf63fSTomasz Figa } 43883014579SKukjin Kim } 439b3205deaSSachin Kamat } 44006853ae4SMarc Zyngier 4416f0b7c0cSKrzysztof Kozlowski #ifdef CONFIG_HOTPLUG_CPU 4426f0b7c0cSKrzysztof Kozlowski /* 4436f0b7c0cSKrzysztof Kozlowski * platform-specific code to shutdown a CPU 4446f0b7c0cSKrzysztof Kozlowski * 4456f0b7c0cSKrzysztof Kozlowski * Called with IRQs disabled 4466f0b7c0cSKrzysztof Kozlowski */ 44727b9ee85SKrzysztof Kozlowski static void exynos_cpu_die(unsigned int cpu) 4486f0b7c0cSKrzysztof Kozlowski { 4496f0b7c0cSKrzysztof Kozlowski int spurious = 0; 45013cfa6c4SKrzysztof Kozlowski u32 mpidr = cpu_logical_map(cpu); 45113cfa6c4SKrzysztof Kozlowski u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 4526f0b7c0cSKrzysztof Kozlowski 4536f0b7c0cSKrzysztof Kozlowski v7_exit_coherency_flush(louis); 4546f0b7c0cSKrzysztof Kozlowski 4556f0b7c0cSKrzysztof Kozlowski platform_do_lowpower(cpu, &spurious); 4566f0b7c0cSKrzysztof Kozlowski 4576f0b7c0cSKrzysztof Kozlowski /* 4586f0b7c0cSKrzysztof Kozlowski * bring this CPU back into the world of cache 4596f0b7c0cSKrzysztof Kozlowski * coherency, and then restore interrupts 4606f0b7c0cSKrzysztof Kozlowski */ 46113cfa6c4SKrzysztof Kozlowski cpu_leave_lowpower(core_id); 4626f0b7c0cSKrzysztof Kozlowski 4636f0b7c0cSKrzysztof Kozlowski if (spurious) 4646f0b7c0cSKrzysztof Kozlowski pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 4656f0b7c0cSKrzysztof Kozlowski } 4666f0b7c0cSKrzysztof Kozlowski #endif /* CONFIG_HOTPLUG_CPU */ 4676f0b7c0cSKrzysztof Kozlowski 46806853ae4SMarc Zyngier struct smp_operations exynos_smp_ops __initdata = { 46906853ae4SMarc Zyngier .smp_init_cpus = exynos_smp_init_cpus, 47006853ae4SMarc Zyngier .smp_prepare_cpus = exynos_smp_prepare_cpus, 47106853ae4SMarc Zyngier .smp_secondary_init = exynos_secondary_init, 47206853ae4SMarc Zyngier .smp_boot_secondary = exynos_boot_secondary, 47306853ae4SMarc Zyngier #ifdef CONFIG_HOTPLUG_CPU 47406853ae4SMarc Zyngier .cpu_die = exynos_cpu_die, 47506853ae4SMarc Zyngier #endif 47606853ae4SMarc Zyngier }; 477