1 /* 2 * SMP support for R-Mobile / SH-Mobile - sh73a0 portion 3 * 4 * Copyright (C) 2010 Magnus Damm 5 * Copyright (C) 2010 Takashi Yoshii 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 */ 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/smp.h> 23 #include <linux/spinlock.h> 24 #include <linux/io.h> 25 #include <linux/delay.h> 26 #include <mach/common.h> 27 #include <asm/smp_plat.h> 28 #include <mach/sh73a0.h> 29 #include <asm/smp_scu.h> 30 #include <asm/smp_twd.h> 31 #include <asm/hardware/gic.h> 32 33 #define WUPCR IOMEM(0xe6151010) 34 #define SRESCR IOMEM(0xe6151018) 35 #define PSTR IOMEM(0xe6151040) 36 #define SBAR IOMEM(0xe6180020) 37 #define APARMBAREA IOMEM(0xe6f10020) 38 39 static void __iomem *scu_base_addr(void) 40 { 41 return (void __iomem *)0xf0000000; 42 } 43 44 static DEFINE_SPINLOCK(scu_lock); 45 static unsigned long tmp; 46 47 #ifdef CONFIG_HAVE_ARM_TWD 48 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); 49 void __init sh73a0_register_twd(void) 50 { 51 twd_local_timer_register(&twd_local_timer); 52 } 53 #endif 54 55 static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) 56 { 57 void __iomem *scu_base = scu_base_addr(); 58 59 spin_lock(&scu_lock); 60 tmp = __raw_readl(scu_base + 8); 61 tmp &= ~clr; 62 tmp |= set; 63 spin_unlock(&scu_lock); 64 65 /* disable cache coherency after releasing the lock */ 66 __raw_writel(tmp, scu_base + 8); 67 } 68 69 static unsigned int __init sh73a0_get_core_count(void) 70 { 71 void __iomem *scu_base = scu_base_addr(); 72 73 return scu_get_core_count(scu_base); 74 } 75 76 static void __cpuinit sh73a0_secondary_init(unsigned int cpu) 77 { 78 gic_secondary_init(0); 79 } 80 81 static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle) 82 { 83 cpu = cpu_logical_map(cpu); 84 85 /* enable cache coherency */ 86 modify_scu_cpu_psr(0, 3 << (cpu * 8)); 87 88 if (((__raw_readl(PSTR) >> (4 * cpu)) & 3) == 3) 89 __raw_writel(1 << cpu, WUPCR); /* wake up */ 90 else 91 __raw_writel(1 << cpu, SRESCR); /* reset */ 92 93 return 0; 94 } 95 96 static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus) 97 { 98 int cpu = cpu_logical_map(0); 99 100 scu_enable(scu_base_addr()); 101 102 /* Map the reset vector (in headsmp.S) */ 103 __raw_writel(0, APARMBAREA); /* 4k */ 104 __raw_writel(__pa(shmobile_secondary_vector), SBAR); 105 106 /* enable cache coherency on CPU0 */ 107 modify_scu_cpu_psr(0, 3 << (cpu * 8)); 108 } 109 110 static void __init sh73a0_smp_init_cpus(void) 111 { 112 unsigned int ncores = sh73a0_get_core_count(); 113 114 shmobile_smp_init_cpus(ncores); 115 } 116 117 static int __maybe_unused sh73a0_cpu_kill(unsigned int cpu) 118 { 119 int k; 120 121 /* this function is running on another CPU than the offline target, 122 * here we need wait for shutdown code in platform_cpu_die() to 123 * finish before asking SoC-specific code to power off the CPU core. 124 */ 125 for (k = 0; k < 1000; k++) { 126 if (shmobile_cpu_is_dead(cpu)) 127 return 1; 128 129 mdelay(1); 130 } 131 132 return 0; 133 } 134 135 136 struct smp_operations sh73a0_smp_ops __initdata = { 137 .smp_init_cpus = sh73a0_smp_init_cpus, 138 .smp_prepare_cpus = sh73a0_smp_prepare_cpus, 139 .smp_secondary_init = sh73a0_secondary_init, 140 .smp_boot_secondary = sh73a0_boot_secondary, 141 #ifdef CONFIG_HOTPLUG_CPU 142 .cpu_kill = sh73a0_cpu_kill, 143 .cpu_die = shmobile_cpu_die, 144 .cpu_disable = shmobile_cpu_disable, 145 #endif 146 }; 147