1df0ac8a4SKevin Cernekee /* 2df0ac8a4SKevin Cernekee * This file is subject to the terms and conditions of the GNU General Public 3df0ac8a4SKevin Cernekee * License. See the file "COPYING" in the main directory of this archive 4df0ac8a4SKevin Cernekee * for more details. 5df0ac8a4SKevin Cernekee * 6df0ac8a4SKevin Cernekee * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) 7df0ac8a4SKevin Cernekee * 8df0ac8a4SKevin Cernekee * SMP support for BMIPS 9df0ac8a4SKevin Cernekee */ 10df0ac8a4SKevin Cernekee 11df0ac8a4SKevin Cernekee #include <linux/init.h> 12df0ac8a4SKevin Cernekee #include <linux/sched.h> 13df0ac8a4SKevin Cernekee #include <linux/mm.h> 14df0ac8a4SKevin Cernekee #include <linux/delay.h> 15df0ac8a4SKevin Cernekee #include <linux/smp.h> 16df0ac8a4SKevin Cernekee #include <linux/interrupt.h> 17df0ac8a4SKevin Cernekee #include <linux/spinlock.h> 18df0ac8a4SKevin Cernekee #include <linux/cpu.h> 19df0ac8a4SKevin Cernekee #include <linux/cpumask.h> 20df0ac8a4SKevin Cernekee #include <linux/reboot.h> 21df0ac8a4SKevin Cernekee #include <linux/io.h> 22df0ac8a4SKevin Cernekee #include <linux/compiler.h> 23df0ac8a4SKevin Cernekee #include <linux/linkage.h> 24df0ac8a4SKevin Cernekee #include <linux/bug.h> 25df0ac8a4SKevin Cernekee #include <linux/kernel.h> 26df0ac8a4SKevin Cernekee 27df0ac8a4SKevin Cernekee #include <asm/time.h> 28df0ac8a4SKevin Cernekee #include <asm/pgtable.h> 29df0ac8a4SKevin Cernekee #include <asm/processor.h> 30df0ac8a4SKevin Cernekee #include <asm/bootinfo.h> 31df0ac8a4SKevin Cernekee #include <asm/pmon.h> 32df0ac8a4SKevin Cernekee #include <asm/cacheflush.h> 33df0ac8a4SKevin Cernekee #include <asm/tlbflush.h> 34df0ac8a4SKevin Cernekee #include <asm/mipsregs.h> 35df0ac8a4SKevin Cernekee #include <asm/bmips.h> 36df0ac8a4SKevin Cernekee #include <asm/traps.h> 37df0ac8a4SKevin Cernekee #include <asm/barrier.h> 38df0ac8a4SKevin Cernekee 39df0ac8a4SKevin Cernekee static int __maybe_unused max_cpus = 1; 40df0ac8a4SKevin Cernekee 41df0ac8a4SKevin Cernekee /* these may be configured by the platform code */ 42df0ac8a4SKevin Cernekee int bmips_smp_enabled = 1; 43df0ac8a4SKevin Cernekee int bmips_cpu_offset; 44df0ac8a4SKevin Cernekee cpumask_t bmips_booted_mask; 45df0ac8a4SKevin Cernekee 46df0ac8a4SKevin Cernekee #ifdef CONFIG_SMP 47df0ac8a4SKevin Cernekee 48df0ac8a4SKevin Cernekee /* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ 49df0ac8a4SKevin Cernekee unsigned long bmips_smp_boot_sp; 50df0ac8a4SKevin Cernekee unsigned long bmips_smp_boot_gp; 51df0ac8a4SKevin Cernekee 52df0ac8a4SKevin Cernekee static void bmips_send_ipi_single(int cpu, unsigned int action); 53df0ac8a4SKevin Cernekee static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); 54df0ac8a4SKevin Cernekee 55df0ac8a4SKevin Cernekee /* SW interrupts 0,1 are used for interprocessor signaling */ 56df0ac8a4SKevin Cernekee #define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) 57df0ac8a4SKevin Cernekee #define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1) 58df0ac8a4SKevin Cernekee 59df0ac8a4SKevin Cernekee #define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift)) 60df0ac8a4SKevin Cernekee #define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8)) 61df0ac8a4SKevin Cernekee #define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8)) 62df0ac8a4SKevin Cernekee #define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0)) 63df0ac8a4SKevin Cernekee 64df0ac8a4SKevin Cernekee static void __init bmips_smp_setup(void) 65df0ac8a4SKevin Cernekee { 664df715aaSFlorian Fainelli int i, cpu = 1, boot_cpu = 0; 67df0ac8a4SKevin Cernekee 68df0ac8a4SKevin Cernekee #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 69df0ac8a4SKevin Cernekee /* arbitration priority */ 70df0ac8a4SKevin Cernekee clear_c0_brcm_cmt_ctrl(0x30); 71df0ac8a4SKevin Cernekee 72df0ac8a4SKevin Cernekee /* NBK and weak order flags */ 73df0ac8a4SKevin Cernekee set_c0_brcm_config_0(0x30000); 74df0ac8a4SKevin Cernekee 754df715aaSFlorian Fainelli /* Find out if we are running on TP0 or TP1 */ 764df715aaSFlorian Fainelli boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); 774df715aaSFlorian Fainelli 78df0ac8a4SKevin Cernekee /* 79df0ac8a4SKevin Cernekee * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread 80df0ac8a4SKevin Cernekee * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output 81df0ac8a4SKevin Cernekee * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output 824df715aaSFlorian Fainelli * 834df715aaSFlorian Fainelli * If booting from TP1, leave the existing CMT interrupt routing 844df715aaSFlorian Fainelli * such that TP0 responds to SW1 and TP1 responds to SW0. 85df0ac8a4SKevin Cernekee */ 864df715aaSFlorian Fainelli if (boot_cpu == 0) 87df0ac8a4SKevin Cernekee change_c0_brcm_cmt_intr(0xf8018000, 88df0ac8a4SKevin Cernekee (0x02 << 27) | (0x03 << 15)); 894df715aaSFlorian Fainelli else 904df715aaSFlorian Fainelli change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27)); 91df0ac8a4SKevin Cernekee 92df0ac8a4SKevin Cernekee /* single core, 2 threads (2 pipelines) */ 93df0ac8a4SKevin Cernekee max_cpus = 2; 94df0ac8a4SKevin Cernekee #elif defined(CONFIG_CPU_BMIPS5000) 95df0ac8a4SKevin Cernekee /* enable raceless SW interrupts */ 96df0ac8a4SKevin Cernekee set_c0_brcm_config(0x03 << 22); 97df0ac8a4SKevin Cernekee 98df0ac8a4SKevin Cernekee /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ 99df0ac8a4SKevin Cernekee change_c0_brcm_mode(0x1f << 27, 0x02 << 27); 100df0ac8a4SKevin Cernekee 101df0ac8a4SKevin Cernekee /* N cores, 2 threads per core */ 102df0ac8a4SKevin Cernekee max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; 103df0ac8a4SKevin Cernekee 104df0ac8a4SKevin Cernekee /* clear any pending SW interrupts */ 105df0ac8a4SKevin Cernekee for (i = 0; i < max_cpus; i++) { 106df0ac8a4SKevin Cernekee write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); 107df0ac8a4SKevin Cernekee write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); 108df0ac8a4SKevin Cernekee } 109df0ac8a4SKevin Cernekee #endif 110df0ac8a4SKevin Cernekee 111df0ac8a4SKevin Cernekee if (!bmips_smp_enabled) 112df0ac8a4SKevin Cernekee max_cpus = 1; 113df0ac8a4SKevin Cernekee 114df0ac8a4SKevin Cernekee /* this can be overridden by the BSP */ 115df0ac8a4SKevin Cernekee if (!board_ebase_setup) 116df0ac8a4SKevin Cernekee board_ebase_setup = &bmips_ebase_setup; 117df0ac8a4SKevin Cernekee 1184df715aaSFlorian Fainelli __cpu_number_map[boot_cpu] = 0; 1194df715aaSFlorian Fainelli __cpu_logical_map[0] = boot_cpu; 1204df715aaSFlorian Fainelli 121df0ac8a4SKevin Cernekee for (i = 0; i < max_cpus; i++) { 1224df715aaSFlorian Fainelli if (i != boot_cpu) { 1234df715aaSFlorian Fainelli __cpu_number_map[i] = cpu; 1244df715aaSFlorian Fainelli __cpu_logical_map[cpu] = i; 1254df715aaSFlorian Fainelli cpu++; 1264df715aaSFlorian Fainelli } 127df0ac8a4SKevin Cernekee set_cpu_possible(i, 1); 128df0ac8a4SKevin Cernekee set_cpu_present(i, 1); 129df0ac8a4SKevin Cernekee } 130df0ac8a4SKevin Cernekee } 131df0ac8a4SKevin Cernekee 132df0ac8a4SKevin Cernekee /* 133df0ac8a4SKevin Cernekee * IPI IRQ setup - runs on CPU0 134df0ac8a4SKevin Cernekee */ 135df0ac8a4SKevin Cernekee static void bmips_prepare_cpus(unsigned int max_cpus) 136df0ac8a4SKevin Cernekee { 137df0ac8a4SKevin Cernekee if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 138df0ac8a4SKevin Cernekee "smp_ipi0", NULL)) 139df0ac8a4SKevin Cernekee panic("Can't request IPI0 interrupt\n"); 140df0ac8a4SKevin Cernekee if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 141df0ac8a4SKevin Cernekee "smp_ipi1", NULL)) 142df0ac8a4SKevin Cernekee panic("Can't request IPI1 interrupt\n"); 143df0ac8a4SKevin Cernekee } 144df0ac8a4SKevin Cernekee 145df0ac8a4SKevin Cernekee /* 146df0ac8a4SKevin Cernekee * Tell the hardware to boot CPUx - runs on CPU0 147df0ac8a4SKevin Cernekee */ 148df0ac8a4SKevin Cernekee static void bmips_boot_secondary(int cpu, struct task_struct *idle) 149df0ac8a4SKevin Cernekee { 150df0ac8a4SKevin Cernekee bmips_smp_boot_sp = __KSTK_TOS(idle); 151df0ac8a4SKevin Cernekee bmips_smp_boot_gp = (unsigned long)task_thread_info(idle); 152df0ac8a4SKevin Cernekee mb(); 153df0ac8a4SKevin Cernekee 154df0ac8a4SKevin Cernekee /* 155df0ac8a4SKevin Cernekee * Initial boot sequence for secondary CPU: 156df0ac8a4SKevin Cernekee * bmips_reset_nmi_vec @ a000_0000 -> 157df0ac8a4SKevin Cernekee * bmips_smp_entry -> 158df0ac8a4SKevin Cernekee * plat_wired_tlb_setup (cached function call; optional) -> 159df0ac8a4SKevin Cernekee * start_secondary (cached jump) 160df0ac8a4SKevin Cernekee * 161df0ac8a4SKevin Cernekee * Warm restart sequence: 162df0ac8a4SKevin Cernekee * play_dead WAIT loop -> 163df0ac8a4SKevin Cernekee * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC -> 164df0ac8a4SKevin Cernekee * eret to play_dead -> 165df0ac8a4SKevin Cernekee * bmips_secondary_reentry -> 166df0ac8a4SKevin Cernekee * start_secondary 167df0ac8a4SKevin Cernekee */ 168df0ac8a4SKevin Cernekee 169df0ac8a4SKevin Cernekee pr_info("SMP: Booting CPU%d...\n", cpu); 170df0ac8a4SKevin Cernekee 171df0ac8a4SKevin Cernekee if (cpumask_test_cpu(cpu, &bmips_booted_mask)) 172df0ac8a4SKevin Cernekee bmips_send_ipi_single(cpu, 0); 173df0ac8a4SKevin Cernekee else { 174df0ac8a4SKevin Cernekee #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 1754df715aaSFlorian Fainelli /* Reset slave TP1 if booting from TP0 */ 1764df715aaSFlorian Fainelli if (cpu_logical_map(cpu) == 0) 177df0ac8a4SKevin Cernekee set_c0_brcm_cmt_ctrl(0x01); 178df0ac8a4SKevin Cernekee #elif defined(CONFIG_CPU_BMIPS5000) 179df0ac8a4SKevin Cernekee if (cpu & 0x01) 180df0ac8a4SKevin Cernekee write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); 181df0ac8a4SKevin Cernekee else { 182df0ac8a4SKevin Cernekee /* 183df0ac8a4SKevin Cernekee * core N thread 0 was already booted; just 184df0ac8a4SKevin Cernekee * pulse the NMI line 185df0ac8a4SKevin Cernekee */ 186df0ac8a4SKevin Cernekee bmips_write_zscm_reg(0x210, 0xc0000000); 187df0ac8a4SKevin Cernekee udelay(10); 188df0ac8a4SKevin Cernekee bmips_write_zscm_reg(0x210, 0x00); 189df0ac8a4SKevin Cernekee } 190df0ac8a4SKevin Cernekee #endif 191df0ac8a4SKevin Cernekee cpumask_set_cpu(cpu, &bmips_booted_mask); 192df0ac8a4SKevin Cernekee } 193df0ac8a4SKevin Cernekee } 194df0ac8a4SKevin Cernekee 195df0ac8a4SKevin Cernekee /* 196df0ac8a4SKevin Cernekee * Early setup - runs on secondary CPU after cache probe 197df0ac8a4SKevin Cernekee */ 198df0ac8a4SKevin Cernekee static void bmips_init_secondary(void) 199df0ac8a4SKevin Cernekee { 200df0ac8a4SKevin Cernekee /* move NMI vector to kseg0, in case XKS01 is enabled */ 201df0ac8a4SKevin Cernekee 202df0ac8a4SKevin Cernekee #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 203df0ac8a4SKevin Cernekee void __iomem *cbr = BMIPS_GET_CBR(); 204df0ac8a4SKevin Cernekee unsigned long old_vec; 205df0ac8a4SKevin Cernekee 206df0ac8a4SKevin Cernekee old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1); 207df0ac8a4SKevin Cernekee __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1); 208df0ac8a4SKevin Cernekee 209df0ac8a4SKevin Cernekee clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); 210df0ac8a4SKevin Cernekee #elif defined(CONFIG_CPU_BMIPS5000) 211df0ac8a4SKevin Cernekee write_c0_brcm_bootvec(read_c0_brcm_bootvec() & 212df0ac8a4SKevin Cernekee (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); 213df0ac8a4SKevin Cernekee 214df0ac8a4SKevin Cernekee write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); 215df0ac8a4SKevin Cernekee #endif 216df0ac8a4SKevin Cernekee } 217df0ac8a4SKevin Cernekee 218df0ac8a4SKevin Cernekee /* 219df0ac8a4SKevin Cernekee * Late setup - runs on secondary CPU before entering the idle loop 220df0ac8a4SKevin Cernekee */ 221df0ac8a4SKevin Cernekee static void bmips_smp_finish(void) 222df0ac8a4SKevin Cernekee { 223df0ac8a4SKevin Cernekee pr_info("SMP: CPU%d is running\n", smp_processor_id()); 224856ac3c6SYong Zhang 225856ac3c6SYong Zhang /* make sure there won't be a timer interrupt for a little while */ 226856ac3c6SYong Zhang write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); 227856ac3c6SYong Zhang 228856ac3c6SYong Zhang irq_enable_hazard(); 229856ac3c6SYong Zhang set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); 230856ac3c6SYong Zhang irq_enable_hazard(); 231df0ac8a4SKevin Cernekee } 232df0ac8a4SKevin Cernekee 233df0ac8a4SKevin Cernekee /* 234df0ac8a4SKevin Cernekee * Runs on CPU0 after all CPUs have been booted 235df0ac8a4SKevin Cernekee */ 236df0ac8a4SKevin Cernekee static void bmips_cpus_done(void) 237df0ac8a4SKevin Cernekee { 238df0ac8a4SKevin Cernekee } 239df0ac8a4SKevin Cernekee 240df0ac8a4SKevin Cernekee #if defined(CONFIG_CPU_BMIPS5000) 241df0ac8a4SKevin Cernekee 242df0ac8a4SKevin Cernekee /* 243df0ac8a4SKevin Cernekee * BMIPS5000 raceless IPIs 244df0ac8a4SKevin Cernekee * 245df0ac8a4SKevin Cernekee * Each CPU has two inbound SW IRQs which are independent of all other CPUs. 246df0ac8a4SKevin Cernekee * IPI0 is used for SMP_RESCHEDULE_YOURSELF 247df0ac8a4SKevin Cernekee * IPI1 is used for SMP_CALL_FUNCTION 248df0ac8a4SKevin Cernekee */ 249df0ac8a4SKevin Cernekee 250df0ac8a4SKevin Cernekee static void bmips_send_ipi_single(int cpu, unsigned int action) 251df0ac8a4SKevin Cernekee { 252df0ac8a4SKevin Cernekee write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); 253df0ac8a4SKevin Cernekee } 254df0ac8a4SKevin Cernekee 255df0ac8a4SKevin Cernekee static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) 256df0ac8a4SKevin Cernekee { 257df0ac8a4SKevin Cernekee int action = irq - IPI0_IRQ; 258df0ac8a4SKevin Cernekee 259df0ac8a4SKevin Cernekee write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action)); 260df0ac8a4SKevin Cernekee 261df0ac8a4SKevin Cernekee if (action == 0) 262df0ac8a4SKevin Cernekee scheduler_ipi(); 263df0ac8a4SKevin Cernekee else 264df0ac8a4SKevin Cernekee smp_call_function_interrupt(); 265df0ac8a4SKevin Cernekee 266df0ac8a4SKevin Cernekee return IRQ_HANDLED; 267df0ac8a4SKevin Cernekee } 268df0ac8a4SKevin Cernekee 269df0ac8a4SKevin Cernekee #else 270df0ac8a4SKevin Cernekee 271df0ac8a4SKevin Cernekee /* 272df0ac8a4SKevin Cernekee * BMIPS43xx racey IPIs 273df0ac8a4SKevin Cernekee * 274df0ac8a4SKevin Cernekee * We use one inbound SW IRQ for each CPU. 275df0ac8a4SKevin Cernekee * 276df0ac8a4SKevin Cernekee * A spinlock must be held in order to keep CPUx from accidentally clearing 277df0ac8a4SKevin Cernekee * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The 278df0ac8a4SKevin Cernekee * same spinlock is used to protect the action masks. 279df0ac8a4SKevin Cernekee */ 280df0ac8a4SKevin Cernekee 281df0ac8a4SKevin Cernekee static DEFINE_SPINLOCK(ipi_lock); 282df0ac8a4SKevin Cernekee static DEFINE_PER_CPU(int, ipi_action_mask); 283df0ac8a4SKevin Cernekee 284df0ac8a4SKevin Cernekee static void bmips_send_ipi_single(int cpu, unsigned int action) 285df0ac8a4SKevin Cernekee { 286df0ac8a4SKevin Cernekee unsigned long flags; 287df0ac8a4SKevin Cernekee 288df0ac8a4SKevin Cernekee spin_lock_irqsave(&ipi_lock, flags); 289df0ac8a4SKevin Cernekee set_c0_cause(cpu ? C_SW1 : C_SW0); 290df0ac8a4SKevin Cernekee per_cpu(ipi_action_mask, cpu) |= action; 291df0ac8a4SKevin Cernekee irq_enable_hazard(); 292df0ac8a4SKevin Cernekee spin_unlock_irqrestore(&ipi_lock, flags); 293df0ac8a4SKevin Cernekee } 294df0ac8a4SKevin Cernekee 295df0ac8a4SKevin Cernekee static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) 296df0ac8a4SKevin Cernekee { 297df0ac8a4SKevin Cernekee unsigned long flags; 298df0ac8a4SKevin Cernekee int action, cpu = irq - IPI0_IRQ; 299df0ac8a4SKevin Cernekee 300df0ac8a4SKevin Cernekee spin_lock_irqsave(&ipi_lock, flags); 301df0ac8a4SKevin Cernekee action = __get_cpu_var(ipi_action_mask); 302df0ac8a4SKevin Cernekee per_cpu(ipi_action_mask, cpu) = 0; 303df0ac8a4SKevin Cernekee clear_c0_cause(cpu ? C_SW1 : C_SW0); 304df0ac8a4SKevin Cernekee spin_unlock_irqrestore(&ipi_lock, flags); 305df0ac8a4SKevin Cernekee 306df0ac8a4SKevin Cernekee if (action & SMP_RESCHEDULE_YOURSELF) 307df0ac8a4SKevin Cernekee scheduler_ipi(); 308df0ac8a4SKevin Cernekee if (action & SMP_CALL_FUNCTION) 309df0ac8a4SKevin Cernekee smp_call_function_interrupt(); 310df0ac8a4SKevin Cernekee 311df0ac8a4SKevin Cernekee return IRQ_HANDLED; 312df0ac8a4SKevin Cernekee } 313df0ac8a4SKevin Cernekee 314df0ac8a4SKevin Cernekee #endif /* BMIPS type */ 315df0ac8a4SKevin Cernekee 316df0ac8a4SKevin Cernekee static void bmips_send_ipi_mask(const struct cpumask *mask, 317df0ac8a4SKevin Cernekee unsigned int action) 318df0ac8a4SKevin Cernekee { 319df0ac8a4SKevin Cernekee unsigned int i; 320df0ac8a4SKevin Cernekee 321df0ac8a4SKevin Cernekee for_each_cpu(i, mask) 322df0ac8a4SKevin Cernekee bmips_send_ipi_single(i, action); 323df0ac8a4SKevin Cernekee } 324df0ac8a4SKevin Cernekee 325df0ac8a4SKevin Cernekee #ifdef CONFIG_HOTPLUG_CPU 326df0ac8a4SKevin Cernekee 327df0ac8a4SKevin Cernekee static int bmips_cpu_disable(void) 328df0ac8a4SKevin Cernekee { 329df0ac8a4SKevin Cernekee unsigned int cpu = smp_processor_id(); 330df0ac8a4SKevin Cernekee 331df0ac8a4SKevin Cernekee if (cpu == 0) 332df0ac8a4SKevin Cernekee return -EBUSY; 333df0ac8a4SKevin Cernekee 334df0ac8a4SKevin Cernekee pr_info("SMP: CPU%d is offline\n", cpu); 335df0ac8a4SKevin Cernekee 3360b5f9c00SRusty Russell set_cpu_online(cpu, false); 337df0ac8a4SKevin Cernekee cpu_clear(cpu, cpu_callin_map); 338df0ac8a4SKevin Cernekee 339df0ac8a4SKevin Cernekee local_flush_tlb_all(); 340df0ac8a4SKevin Cernekee local_flush_icache_range(0, ~0); 341df0ac8a4SKevin Cernekee 342df0ac8a4SKevin Cernekee return 0; 343df0ac8a4SKevin Cernekee } 344df0ac8a4SKevin Cernekee 345df0ac8a4SKevin Cernekee static void bmips_cpu_die(unsigned int cpu) 346df0ac8a4SKevin Cernekee { 347df0ac8a4SKevin Cernekee } 348df0ac8a4SKevin Cernekee 349df0ac8a4SKevin Cernekee void __ref play_dead(void) 350df0ac8a4SKevin Cernekee { 351df0ac8a4SKevin Cernekee idle_task_exit(); 352df0ac8a4SKevin Cernekee 353df0ac8a4SKevin Cernekee /* flush data cache */ 354df0ac8a4SKevin Cernekee _dma_cache_wback_inv(0, ~0); 355df0ac8a4SKevin Cernekee 356df0ac8a4SKevin Cernekee /* 357df0ac8a4SKevin Cernekee * Wakeup is on SW0 or SW1; disable everything else 358df0ac8a4SKevin Cernekee * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux 359df0ac8a4SKevin Cernekee * IRQ handlers; this clears ST0_IE and returns immediately. 360df0ac8a4SKevin Cernekee */ 361df0ac8a4SKevin Cernekee clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); 362df0ac8a4SKevin Cernekee change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, 363df0ac8a4SKevin Cernekee IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); 364df0ac8a4SKevin Cernekee irq_disable_hazard(); 365df0ac8a4SKevin Cernekee 366df0ac8a4SKevin Cernekee /* 367df0ac8a4SKevin Cernekee * wait for SW interrupt from bmips_boot_secondary(), then jump 368df0ac8a4SKevin Cernekee * back to start_secondary() 369df0ac8a4SKevin Cernekee */ 370df0ac8a4SKevin Cernekee __asm__ __volatile__( 371df0ac8a4SKevin Cernekee " wait\n" 372df0ac8a4SKevin Cernekee " j bmips_secondary_reentry\n" 373df0ac8a4SKevin Cernekee : : : "memory"); 374df0ac8a4SKevin Cernekee } 375df0ac8a4SKevin Cernekee 376df0ac8a4SKevin Cernekee #endif /* CONFIG_HOTPLUG_CPU */ 377df0ac8a4SKevin Cernekee 378df0ac8a4SKevin Cernekee struct plat_smp_ops bmips_smp_ops = { 379df0ac8a4SKevin Cernekee .smp_setup = bmips_smp_setup, 380df0ac8a4SKevin Cernekee .prepare_cpus = bmips_prepare_cpus, 381df0ac8a4SKevin Cernekee .boot_secondary = bmips_boot_secondary, 382df0ac8a4SKevin Cernekee .smp_finish = bmips_smp_finish, 383df0ac8a4SKevin Cernekee .init_secondary = bmips_init_secondary, 384df0ac8a4SKevin Cernekee .cpus_done = bmips_cpus_done, 385df0ac8a4SKevin Cernekee .send_ipi_single = bmips_send_ipi_single, 386df0ac8a4SKevin Cernekee .send_ipi_mask = bmips_send_ipi_mask, 387df0ac8a4SKevin Cernekee #ifdef CONFIG_HOTPLUG_CPU 388df0ac8a4SKevin Cernekee .cpu_disable = bmips_cpu_disable, 389df0ac8a4SKevin Cernekee .cpu_die = bmips_cpu_die, 390df0ac8a4SKevin Cernekee #endif 391df0ac8a4SKevin Cernekee }; 392df0ac8a4SKevin Cernekee 393df0ac8a4SKevin Cernekee #endif /* CONFIG_SMP */ 394df0ac8a4SKevin Cernekee 395df0ac8a4SKevin Cernekee /*********************************************************************** 396df0ac8a4SKevin Cernekee * BMIPS vector relocation 397df0ac8a4SKevin Cernekee * This is primarily used for SMP boot, but it is applicable to some 398df0ac8a4SKevin Cernekee * UP BMIPS systems as well. 399df0ac8a4SKevin Cernekee ***********************************************************************/ 400df0ac8a4SKevin Cernekee 401df0ac8a4SKevin Cernekee static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) 402df0ac8a4SKevin Cernekee { 403df0ac8a4SKevin Cernekee memcpy((void *)dst, start, end - start); 404df0ac8a4SKevin Cernekee dma_cache_wback((unsigned long)start, end - start); 405df0ac8a4SKevin Cernekee local_flush_icache_range(dst, dst + (end - start)); 406df0ac8a4SKevin Cernekee instruction_hazard(); 407df0ac8a4SKevin Cernekee } 408df0ac8a4SKevin Cernekee 409df0ac8a4SKevin Cernekee static inline void __cpuinit bmips_nmi_handler_setup(void) 410df0ac8a4SKevin Cernekee { 411df0ac8a4SKevin Cernekee bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, 412df0ac8a4SKevin Cernekee &bmips_reset_nmi_vec_end); 413df0ac8a4SKevin Cernekee bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, 414df0ac8a4SKevin Cernekee &bmips_smp_int_vec_end); 415df0ac8a4SKevin Cernekee } 416df0ac8a4SKevin Cernekee 417df0ac8a4SKevin Cernekee void __cpuinit bmips_ebase_setup(void) 418df0ac8a4SKevin Cernekee { 419df0ac8a4SKevin Cernekee unsigned long new_ebase = ebase; 420df0ac8a4SKevin Cernekee void __iomem __maybe_unused *cbr; 421df0ac8a4SKevin Cernekee 422df0ac8a4SKevin Cernekee BUG_ON(ebase != CKSEG0); 423df0ac8a4SKevin Cernekee 424df0ac8a4SKevin Cernekee #if defined(CONFIG_CPU_BMIPS4350) 425df0ac8a4SKevin Cernekee /* 426df0ac8a4SKevin Cernekee * BMIPS4350 cannot relocate the normal vectors, but it 427df0ac8a4SKevin Cernekee * can relocate the BEV=1 vectors. So CPU1 starts up at 428df0ac8a4SKevin Cernekee * the relocated BEV=1, IV=0 general exception vector @ 429df0ac8a4SKevin Cernekee * 0xa000_0380. 430df0ac8a4SKevin Cernekee * 431df0ac8a4SKevin Cernekee * set_uncached_handler() is used here because: 432df0ac8a4SKevin Cernekee * - CPU1 will run this from uncached space 433df0ac8a4SKevin Cernekee * - None of the cacheflush functions are set up yet 434df0ac8a4SKevin Cernekee */ 435df0ac8a4SKevin Cernekee set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, 436df0ac8a4SKevin Cernekee &bmips_smp_int_vec, 0x80); 437df0ac8a4SKevin Cernekee __sync(); 438df0ac8a4SKevin Cernekee return; 439df0ac8a4SKevin Cernekee #elif defined(CONFIG_CPU_BMIPS4380) 440df0ac8a4SKevin Cernekee /* 441df0ac8a4SKevin Cernekee * 0x8000_0000: reset/NMI (initially in kseg1) 442df0ac8a4SKevin Cernekee * 0x8000_0400: normal vectors 443df0ac8a4SKevin Cernekee */ 444df0ac8a4SKevin Cernekee new_ebase = 0x80000400; 445df0ac8a4SKevin Cernekee cbr = BMIPS_GET_CBR(); 446df0ac8a4SKevin Cernekee __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); 447df0ac8a4SKevin Cernekee __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); 448df0ac8a4SKevin Cernekee #elif defined(CONFIG_CPU_BMIPS5000) 449df0ac8a4SKevin Cernekee /* 450df0ac8a4SKevin Cernekee * 0x8000_0000: reset/NMI (initially in kseg1) 451df0ac8a4SKevin Cernekee * 0x8000_1000: normal vectors 452df0ac8a4SKevin Cernekee */ 453df0ac8a4SKevin Cernekee new_ebase = 0x80001000; 454df0ac8a4SKevin Cernekee write_c0_brcm_bootvec(0xa0088008); 455df0ac8a4SKevin Cernekee write_c0_ebase(new_ebase); 456df0ac8a4SKevin Cernekee if (max_cpus > 2) 457df0ac8a4SKevin Cernekee bmips_write_zscm_reg(0xa0, 0xa008a008); 458df0ac8a4SKevin Cernekee #else 459df0ac8a4SKevin Cernekee return; 460df0ac8a4SKevin Cernekee #endif 461df0ac8a4SKevin Cernekee board_nmi_handler_setup = &bmips_nmi_handler_setup; 462df0ac8a4SKevin Cernekee ebase = new_ebase; 463df0ac8a4SKevin Cernekee } 464df0ac8a4SKevin Cernekee 465df0ac8a4SKevin Cernekee asmlinkage void __weak plat_wired_tlb_setup(void) 466df0ac8a4SKevin Cernekee { 467df0ac8a4SKevin Cernekee /* 468df0ac8a4SKevin Cernekee * Called when starting/restarting a secondary CPU. 469df0ac8a4SKevin Cernekee * Kernel stacks and other important data might only be accessible 470df0ac8a4SKevin Cernekee * once the wired entries are present. 471df0ac8a4SKevin Cernekee */ 472df0ac8a4SKevin Cernekee } 473