15ad57078SPaul Mackerras /* 25ad57078SPaul Mackerras * SMP support for ppc. 35ad57078SPaul Mackerras * 45ad57078SPaul Mackerras * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 55ad57078SPaul Mackerras * deal of code from the sparc and intel versions. 65ad57078SPaul Mackerras * 75ad57078SPaul Mackerras * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 85ad57078SPaul Mackerras * 95ad57078SPaul Mackerras * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 105ad57078SPaul Mackerras * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 115ad57078SPaul Mackerras * 125ad57078SPaul Mackerras * This program is free software; you can redistribute it and/or 135ad57078SPaul Mackerras * modify it under the terms of the GNU General Public License 145ad57078SPaul Mackerras * as published by the Free Software Foundation; either version 155ad57078SPaul Mackerras * 2 of the License, or (at your option) any later version. 165ad57078SPaul Mackerras */ 175ad57078SPaul Mackerras 185ad57078SPaul Mackerras #undef DEBUG 195ad57078SPaul Mackerras 205ad57078SPaul Mackerras #include <linux/kernel.h> 214b16f8e2SPaul Gortmaker #include <linux/export.h> 2268e21be2SIngo Molnar #include <linux/sched/mm.h> 23105ab3d8SIngo Molnar #include <linux/sched/topology.h> 245ad57078SPaul Mackerras #include <linux/smp.h> 255ad57078SPaul Mackerras #include <linux/interrupt.h> 265ad57078SPaul Mackerras #include <linux/delay.h> 275ad57078SPaul Mackerras #include <linux/init.h> 285ad57078SPaul Mackerras #include <linux/spinlock.h> 295ad57078SPaul Mackerras #include <linux/cache.h> 305ad57078SPaul Mackerras #include <linux/err.h> 318a25a2fdSKay Sievers #include <linux/device.h> 325ad57078SPaul Mackerras #include <linux/cpu.h> 335ad57078SPaul Mackerras #include <linux/notifier.h> 344b703a23SAnton Blanchard #include <linux/topology.h> 35665e87ffSDaniel Axtens #include <linux/profile.h> 365ad57078SPaul Mackerras 375ad57078SPaul Mackerras #include <asm/ptrace.h> 3860063497SArun Sharma #include <linux/atomic.h> 395ad57078SPaul Mackerras #include <asm/irq.h> 401b67bee1SSrivatsa S. Bhat #include <asm/hw_irq.h> 41441c19c8SMichael Ellerman #include <asm/kvm_ppc.h> 42b866cc21SNicholas Piggin #include <asm/dbell.h> 435ad57078SPaul Mackerras #include <asm/page.h> 445ad57078SPaul Mackerras #include <asm/pgtable.h> 455ad57078SPaul Mackerras #include <asm/prom.h> 465ad57078SPaul Mackerras #include <asm/smp.h> 475ad57078SPaul Mackerras #include <asm/time.h> 485ad57078SPaul Mackerras #include <asm/machdep.h> 49e2075f79SNathan Lynch #include <asm/cputhreads.h> 505ad57078SPaul Mackerras #include <asm/cputable.h> 515ad57078SPaul Mackerras #include <asm/mpic.h> 52a7f290daSBenjamin Herrenschmidt #include <asm/vdso_datapage.h> 535ad57078SPaul Mackerras #ifdef CONFIG_PPC64 545ad57078SPaul Mackerras #include <asm/paca.h> 555ad57078SPaul Mackerras #endif 5618ad51ddSAnton Blanchard #include <asm/vdso.h> 57ae3a197eSDavid Howells #include <asm/debug.h> 581217d34bSAnton Blanchard #include <asm/kexec.h> 5942f5b4caSDaniel Axtens #include <asm/asm-prototypes.h> 60b92a226eSKevin Hao #include <asm/cpu_has_feature.h> 615ad57078SPaul Mackerras 625ad57078SPaul Mackerras #ifdef DEBUG 63f9e4ec57SMichael Ellerman #include <asm/udbg.h> 645ad57078SPaul Mackerras #define DBG(fmt...) udbg_printf(fmt) 655ad57078SPaul Mackerras #else 665ad57078SPaul Mackerras #define DBG(fmt...) 675ad57078SPaul Mackerras #endif 685ad57078SPaul Mackerras 69c56e5853SBenjamin Herrenschmidt #ifdef CONFIG_HOTPLUG_CPU 70fb82b839SBenjamin Herrenschmidt /* State of each CPU during hotplug phases */ 71fb82b839SBenjamin Herrenschmidt static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 72c56e5853SBenjamin Herrenschmidt #endif 73c56e5853SBenjamin Herrenschmidt 74f9e4ec57SMichael Ellerman struct thread_info *secondary_ti; 75f9e4ec57SMichael Ellerman 76cc1ba8eaSAnton Blanchard DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 77cc1ba8eaSAnton Blanchard DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 785ad57078SPaul Mackerras 79d5a7430dSMike Travis EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 80440a0857SNathan Lynch EXPORT_PER_CPU_SYMBOL(cpu_core_map); 815ad57078SPaul Mackerras 825ad57078SPaul Mackerras /* SMP operations for this machine */ 835ad57078SPaul Mackerras struct smp_ops_t *smp_ops; 845ad57078SPaul Mackerras 857ccbe504SBenjamin Herrenschmidt /* Can't be static due to PowerMac hackery */ 867ccbe504SBenjamin Herrenschmidt volatile unsigned int cpu_callin_map[NR_CPUS]; 875ad57078SPaul Mackerras 885ad57078SPaul Mackerras int smt_enabled_at_boot = 1; 895ad57078SPaul Mackerras 903cd85250SAndy Fleming /* 913cd85250SAndy Fleming * Returns 1 if the specified cpu should be brought up during boot. 923cd85250SAndy Fleming * Used to inhibit booting threads if they've been disabled or 933cd85250SAndy Fleming * limited on the command line 943cd85250SAndy Fleming */ 953cd85250SAndy Fleming int smp_generic_cpu_bootable(unsigned int nr) 963cd85250SAndy Fleming { 973cd85250SAndy Fleming /* Special case - we inhibit secondary thread startup 983cd85250SAndy Fleming * during boot if the user requests it. 993cd85250SAndy Fleming */ 1003cd85250SAndy Fleming if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) { 1013cd85250SAndy Fleming if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) 1023cd85250SAndy Fleming return 0; 1033cd85250SAndy Fleming if (smt_enabled_at_boot 1043cd85250SAndy Fleming && cpu_thread_in_core(nr) >= smt_enabled_at_boot) 1053cd85250SAndy Fleming return 0; 1063cd85250SAndy Fleming } 1073cd85250SAndy Fleming 1083cd85250SAndy Fleming return 1; 1093cd85250SAndy Fleming } 1103cd85250SAndy Fleming 1113cd85250SAndy Fleming 1125ad57078SPaul Mackerras #ifdef CONFIG_PPC64 113cad5cef6SGreg Kroah-Hartman int smp_generic_kick_cpu(int nr) 1145ad57078SPaul Mackerras { 115*c642af9cSSantosh Sivaraj if (nr < 0 || nr >= nr_cpu_ids) 116f8d0d5dcSSantosh Sivaraj return -EINVAL; 1175ad57078SPaul Mackerras 1185ad57078SPaul Mackerras /* 1195ad57078SPaul Mackerras * The processor is currently spinning, waiting for the 1205ad57078SPaul Mackerras * cpu_start field to become non-zero After we set cpu_start, 1215ad57078SPaul Mackerras * the processor will continue on to secondary_start 1225ad57078SPaul Mackerras */ 123fb82b839SBenjamin Herrenschmidt if (!paca[nr].cpu_start) { 1245ad57078SPaul Mackerras paca[nr].cpu_start = 1; 1255ad57078SPaul Mackerras smp_mb(); 126fb82b839SBenjamin Herrenschmidt return 0; 127fb82b839SBenjamin Herrenschmidt } 128fb82b839SBenjamin Herrenschmidt 129fb82b839SBenjamin Herrenschmidt #ifdef CONFIG_HOTPLUG_CPU 130fb82b839SBenjamin Herrenschmidt /* 131fb82b839SBenjamin Herrenschmidt * Ok it's not there, so it might be soft-unplugged, let's 132fb82b839SBenjamin Herrenschmidt * try to bring it back 133fb82b839SBenjamin Herrenschmidt */ 134ae5cab47SZhao Chenhui generic_set_cpu_up(nr); 135fb82b839SBenjamin Herrenschmidt smp_wmb(); 136fb82b839SBenjamin Herrenschmidt smp_send_reschedule(nr); 137fb82b839SBenjamin Herrenschmidt #endif /* CONFIG_HOTPLUG_CPU */ 138de300974SMichael Ellerman 139de300974SMichael Ellerman return 0; 1405ad57078SPaul Mackerras } 141fb82b839SBenjamin Herrenschmidt #endif /* CONFIG_PPC64 */ 1425ad57078SPaul Mackerras 14325ddd738SMilton Miller static irqreturn_t call_function_action(int irq, void *data) 14425ddd738SMilton Miller { 14525ddd738SMilton Miller generic_smp_call_function_interrupt(); 14625ddd738SMilton Miller return IRQ_HANDLED; 14725ddd738SMilton Miller } 14825ddd738SMilton Miller 14925ddd738SMilton Miller static irqreturn_t reschedule_action(int irq, void *data) 15025ddd738SMilton Miller { 151184748ccSPeter Zijlstra scheduler_ipi(); 15225ddd738SMilton Miller return IRQ_HANDLED; 15325ddd738SMilton Miller } 15425ddd738SMilton Miller 1551b67bee1SSrivatsa S. Bhat static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) 15625ddd738SMilton Miller { 1571b67bee1SSrivatsa S. Bhat tick_broadcast_ipi_handler(); 15825ddd738SMilton Miller return IRQ_HANDLED; 15925ddd738SMilton Miller } 16025ddd738SMilton Miller 161ddd703caSNicholas Piggin #ifdef CONFIG_NMI_IPI 162ddd703caSNicholas Piggin static irqreturn_t nmi_ipi_action(int irq, void *data) 16325ddd738SMilton Miller { 164ddd703caSNicholas Piggin smp_handle_nmi_ipi(get_irq_regs()); 16523d72bfdSMilton Miller return IRQ_HANDLED; 16623d72bfdSMilton Miller } 167ddd703caSNicholas Piggin #endif 16825ddd738SMilton Miller 16925ddd738SMilton Miller static irq_handler_t smp_ipi_action[] = { 17025ddd738SMilton Miller [PPC_MSG_CALL_FUNCTION] = call_function_action, 17125ddd738SMilton Miller [PPC_MSG_RESCHEDULE] = reschedule_action, 1721b67bee1SSrivatsa S. Bhat [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, 173ddd703caSNicholas Piggin #ifdef CONFIG_NMI_IPI 174ddd703caSNicholas Piggin [PPC_MSG_NMI_IPI] = nmi_ipi_action, 175ddd703caSNicholas Piggin #endif 17625ddd738SMilton Miller }; 17725ddd738SMilton Miller 178ddd703caSNicholas Piggin /* 179ddd703caSNicholas Piggin * The NMI IPI is a fallback and not truly non-maskable. It is simpler 180ddd703caSNicholas Piggin * than going through the call function infrastructure, and strongly 181ddd703caSNicholas Piggin * serialized, so it is more appropriate for debugging. 182ddd703caSNicholas Piggin */ 18325ddd738SMilton Miller const char *smp_ipi_name[] = { 18425ddd738SMilton Miller [PPC_MSG_CALL_FUNCTION] = "ipi call function", 18525ddd738SMilton Miller [PPC_MSG_RESCHEDULE] = "ipi reschedule", 1861b67bee1SSrivatsa S. Bhat [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", 187ddd703caSNicholas Piggin [PPC_MSG_NMI_IPI] = "nmi ipi", 18825ddd738SMilton Miller }; 18925ddd738SMilton Miller 19025ddd738SMilton Miller /* optional function to request ipi, for controllers with >= 4 ipis */ 19125ddd738SMilton Miller int smp_request_message_ipi(int virq, int msg) 19225ddd738SMilton Miller { 19325ddd738SMilton Miller int err; 19425ddd738SMilton Miller 195ddd703caSNicholas Piggin if (msg < 0 || msg > PPC_MSG_NMI_IPI) 19625ddd738SMilton Miller return -EINVAL; 197ddd703caSNicholas Piggin #ifndef CONFIG_NMI_IPI 198ddd703caSNicholas Piggin if (msg == PPC_MSG_NMI_IPI) 19925ddd738SMilton Miller return 1; 20025ddd738SMilton Miller #endif 201ddd703caSNicholas Piggin 2023b5e16d7SThomas Gleixner err = request_irq(virq, smp_ipi_action[msg], 203e6651de9SZhao Chenhui IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, 204b0d436c7SAnton Blanchard smp_ipi_name[msg], NULL); 20525ddd738SMilton Miller WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 20625ddd738SMilton Miller virq, smp_ipi_name[msg], err); 20725ddd738SMilton Miller 20825ddd738SMilton Miller return err; 20925ddd738SMilton Miller } 21025ddd738SMilton Miller 2111ece355bSMilton Miller #ifdef CONFIG_PPC_SMP_MUXED_IPI 21223d72bfdSMilton Miller struct cpu_messages { 213bd7f561fSSuresh Warrier long messages; /* current messages */ 21423d72bfdSMilton Miller }; 21523d72bfdSMilton Miller static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 21623d72bfdSMilton Miller 21731639c77SSuresh Warrier void smp_muxed_ipi_set_message(int cpu, int msg) 21823d72bfdSMilton Miller { 21923d72bfdSMilton Miller struct cpu_messages *info = &per_cpu(ipi_message, cpu); 22071454272SMilton Miller char *message = (char *)&info->messages; 22123d72bfdSMilton Miller 2229fb1b36cSPaul Mackerras /* 2239fb1b36cSPaul Mackerras * Order previous accesses before accesses in the IPI handler. 2249fb1b36cSPaul Mackerras */ 2259fb1b36cSPaul Mackerras smp_mb(); 22671454272SMilton Miller message[msg] = 1; 22731639c77SSuresh Warrier } 22831639c77SSuresh Warrier 22931639c77SSuresh Warrier void smp_muxed_ipi_message_pass(int cpu, int msg) 23031639c77SSuresh Warrier { 23131639c77SSuresh Warrier smp_muxed_ipi_set_message(cpu, msg); 232b866cc21SNicholas Piggin 2339fb1b36cSPaul Mackerras /* 2349fb1b36cSPaul Mackerras * cause_ipi functions are required to include a full barrier 2359fb1b36cSPaul Mackerras * before doing whatever causes the IPI. 2369fb1b36cSPaul Mackerras */ 237b866cc21SNicholas Piggin smp_ops->cause_ipi(cpu); 23823d72bfdSMilton Miller } 23923d72bfdSMilton Miller 2400654de1cSAnton Blanchard #ifdef __BIG_ENDIAN__ 241bd7f561fSSuresh Warrier #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) 2420654de1cSAnton Blanchard #else 243bd7f561fSSuresh Warrier #define IPI_MESSAGE(A) (1uL << (8 * (A))) 2440654de1cSAnton Blanchard #endif 2450654de1cSAnton Blanchard 24623d72bfdSMilton Miller irqreturn_t smp_ipi_demux(void) 24723d72bfdSMilton Miller { 24823d72bfdSMilton Miller mb(); /* order any irq clear */ 24971454272SMilton Miller 250b87ac021SNicholas Piggin return smp_ipi_demux_relaxed(); 251b87ac021SNicholas Piggin } 252b87ac021SNicholas Piggin 253b87ac021SNicholas Piggin /* sync-free variant. Callers should ensure synchronization */ 254b87ac021SNicholas Piggin irqreturn_t smp_ipi_demux_relaxed(void) 255b87ac021SNicholas Piggin { 256b866cc21SNicholas Piggin struct cpu_messages *info; 25723d72bfdSMilton Miller unsigned long all; 25823d72bfdSMilton Miller 259b866cc21SNicholas Piggin info = this_cpu_ptr(&ipi_message); 26071454272SMilton Miller do { 2619fb1b36cSPaul Mackerras all = xchg(&info->messages, 0); 262e17769ebSSuresh E. Warrier #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 263e17769ebSSuresh E. Warrier /* 264e17769ebSSuresh E. Warrier * Must check for PPC_MSG_RM_HOST_ACTION messages 265e17769ebSSuresh E. Warrier * before PPC_MSG_CALL_FUNCTION messages because when 266e17769ebSSuresh E. Warrier * a VM is destroyed, we call kick_all_cpus_sync() 267e17769ebSSuresh E. Warrier * to ensure that any pending PPC_MSG_RM_HOST_ACTION 268e17769ebSSuresh E. Warrier * messages have completed before we free any VCPUs. 269e17769ebSSuresh E. Warrier */ 270e17769ebSSuresh E. Warrier if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) 271e17769ebSSuresh E. Warrier kvmppc_xics_ipi_action(); 272e17769ebSSuresh E. Warrier #endif 2730654de1cSAnton Blanchard if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) 27423d72bfdSMilton Miller generic_smp_call_function_interrupt(); 2750654de1cSAnton Blanchard if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) 276880102e7SBenjamin Herrenschmidt scheduler_ipi(); 2771b67bee1SSrivatsa S. Bhat if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) 2781b67bee1SSrivatsa S. Bhat tick_broadcast_ipi_handler(); 279ddd703caSNicholas Piggin #ifdef CONFIG_NMI_IPI 280ddd703caSNicholas Piggin if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) 281ddd703caSNicholas Piggin nmi_ipi_action(0, NULL); 282ddd703caSNicholas Piggin #endif 28371454272SMilton Miller } while (info->messages); 28471454272SMilton Miller 28523d72bfdSMilton Miller return IRQ_HANDLED; 28623d72bfdSMilton Miller } 2871ece355bSMilton Miller #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 28823d72bfdSMilton Miller 2899ca980dcSPaul Mackerras static inline void do_message_pass(int cpu, int msg) 2909ca980dcSPaul Mackerras { 2919ca980dcSPaul Mackerras if (smp_ops->message_pass) 2929ca980dcSPaul Mackerras smp_ops->message_pass(cpu, msg); 2939ca980dcSPaul Mackerras #ifdef CONFIG_PPC_SMP_MUXED_IPI 2949ca980dcSPaul Mackerras else 2959ca980dcSPaul Mackerras smp_muxed_ipi_message_pass(cpu, msg); 2969ca980dcSPaul Mackerras #endif 2979ca980dcSPaul Mackerras } 2989ca980dcSPaul Mackerras 2995ad57078SPaul Mackerras void smp_send_reschedule(int cpu) 3005ad57078SPaul Mackerras { 3018cffc6acSBenjamin Herrenschmidt if (likely(smp_ops)) 3029ca980dcSPaul Mackerras do_message_pass(cpu, PPC_MSG_RESCHEDULE); 3035ad57078SPaul Mackerras } 304de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(smp_send_reschedule); 3055ad57078SPaul Mackerras 306b7d7a240SJens Axboe void arch_send_call_function_single_ipi(int cpu) 307b7d7a240SJens Axboe { 308402d9a1eSSrivatsa S. Bhat do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 309b7d7a240SJens Axboe } 310b7d7a240SJens Axboe 311f063ea02SRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask) 312b7d7a240SJens Axboe { 313b7d7a240SJens Axboe unsigned int cpu; 314b7d7a240SJens Axboe 315f063ea02SRusty Russell for_each_cpu(cpu, mask) 3169ca980dcSPaul Mackerras do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 317b7d7a240SJens Axboe } 318b7d7a240SJens Axboe 319ddd703caSNicholas Piggin #ifdef CONFIG_NMI_IPI 320ddd703caSNicholas Piggin 321ddd703caSNicholas Piggin /* 322ddd703caSNicholas Piggin * "NMI IPI" system. 323ddd703caSNicholas Piggin * 324ddd703caSNicholas Piggin * NMI IPIs may not be recoverable, so should not be used as ongoing part of 325ddd703caSNicholas Piggin * a running system. They can be used for crash, debug, halt/reboot, etc. 326ddd703caSNicholas Piggin * 327ddd703caSNicholas Piggin * NMI IPIs are globally single threaded. No more than one in progress at 328ddd703caSNicholas Piggin * any time. 329ddd703caSNicholas Piggin * 330ddd703caSNicholas Piggin * The IPI call waits with interrupts disabled until all targets enter the 331ddd703caSNicholas Piggin * NMI handler, then the call returns. 332ddd703caSNicholas Piggin * 333ddd703caSNicholas Piggin * No new NMI can be initiated until targets exit the handler. 334ddd703caSNicholas Piggin * 335ddd703caSNicholas Piggin * The IPI call may time out without all targets entering the NMI handler. 336ddd703caSNicholas Piggin * In that case, there is some logic to recover (and ignore subsequent 337ddd703caSNicholas Piggin * NMI interrupts that may eventually be raised), but the platform interrupt 338ddd703caSNicholas Piggin * handler may not be able to distinguish this from other exception causes, 339ddd703caSNicholas Piggin * which may cause a crash. 340ddd703caSNicholas Piggin */ 341ddd703caSNicholas Piggin 342ddd703caSNicholas Piggin static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); 343ddd703caSNicholas Piggin static struct cpumask nmi_ipi_pending_mask; 344ddd703caSNicholas Piggin static int nmi_ipi_busy_count = 0; 345ddd703caSNicholas Piggin static void (*nmi_ipi_function)(struct pt_regs *) = NULL; 346ddd703caSNicholas Piggin 347ddd703caSNicholas Piggin static void nmi_ipi_lock_start(unsigned long *flags) 348ddd703caSNicholas Piggin { 349ddd703caSNicholas Piggin raw_local_irq_save(*flags); 350ddd703caSNicholas Piggin hard_irq_disable(); 351ddd703caSNicholas Piggin while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 352ddd703caSNicholas Piggin raw_local_irq_restore(*flags); 353ddd703caSNicholas Piggin cpu_relax(); 354ddd703caSNicholas Piggin raw_local_irq_save(*flags); 355ddd703caSNicholas Piggin hard_irq_disable(); 356ddd703caSNicholas Piggin } 357ddd703caSNicholas Piggin } 358ddd703caSNicholas Piggin 359ddd703caSNicholas Piggin static void nmi_ipi_lock(void) 360ddd703caSNicholas Piggin { 361ddd703caSNicholas Piggin while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 362ddd703caSNicholas Piggin cpu_relax(); 363ddd703caSNicholas Piggin } 364ddd703caSNicholas Piggin 365ddd703caSNicholas Piggin static void nmi_ipi_unlock(void) 366ddd703caSNicholas Piggin { 367ddd703caSNicholas Piggin smp_mb(); 368ddd703caSNicholas Piggin WARN_ON(atomic_read(&__nmi_ipi_lock) != 1); 369ddd703caSNicholas Piggin atomic_set(&__nmi_ipi_lock, 0); 370ddd703caSNicholas Piggin } 371ddd703caSNicholas Piggin 372ddd703caSNicholas Piggin static void nmi_ipi_unlock_end(unsigned long *flags) 373ddd703caSNicholas Piggin { 374ddd703caSNicholas Piggin nmi_ipi_unlock(); 375ddd703caSNicholas Piggin raw_local_irq_restore(*flags); 376ddd703caSNicholas Piggin } 377ddd703caSNicholas Piggin 378ddd703caSNicholas Piggin /* 379ddd703caSNicholas Piggin * Platform NMI handler calls this to ack 380ddd703caSNicholas Piggin */ 381ddd703caSNicholas Piggin int smp_handle_nmi_ipi(struct pt_regs *regs) 382ddd703caSNicholas Piggin { 383ddd703caSNicholas Piggin void (*fn)(struct pt_regs *); 384ddd703caSNicholas Piggin unsigned long flags; 385ddd703caSNicholas Piggin int me = raw_smp_processor_id(); 386ddd703caSNicholas Piggin int ret = 0; 387ddd703caSNicholas Piggin 388ddd703caSNicholas Piggin /* 389ddd703caSNicholas Piggin * Unexpected NMIs are possible here because the interrupt may not 390ddd703caSNicholas Piggin * be able to distinguish NMI IPIs from other types of NMIs, or 391ddd703caSNicholas Piggin * because the caller may have timed out. 392ddd703caSNicholas Piggin */ 393ddd703caSNicholas Piggin nmi_ipi_lock_start(&flags); 394ddd703caSNicholas Piggin if (!nmi_ipi_busy_count) 395ddd703caSNicholas Piggin goto out; 396ddd703caSNicholas Piggin if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask)) 397ddd703caSNicholas Piggin goto out; 398ddd703caSNicholas Piggin 399ddd703caSNicholas Piggin fn = nmi_ipi_function; 400ddd703caSNicholas Piggin if (!fn) 401ddd703caSNicholas Piggin goto out; 402ddd703caSNicholas Piggin 403ddd703caSNicholas Piggin cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 404ddd703caSNicholas Piggin nmi_ipi_busy_count++; 405ddd703caSNicholas Piggin nmi_ipi_unlock(); 406ddd703caSNicholas Piggin 407ddd703caSNicholas Piggin ret = 1; 408ddd703caSNicholas Piggin 409ddd703caSNicholas Piggin fn(regs); 410ddd703caSNicholas Piggin 411ddd703caSNicholas Piggin nmi_ipi_lock(); 412ddd703caSNicholas Piggin nmi_ipi_busy_count--; 413ddd703caSNicholas Piggin out: 414ddd703caSNicholas Piggin nmi_ipi_unlock_end(&flags); 415ddd703caSNicholas Piggin 416ddd703caSNicholas Piggin return ret; 417ddd703caSNicholas Piggin } 418ddd703caSNicholas Piggin 419ddd703caSNicholas Piggin static void do_smp_send_nmi_ipi(int cpu) 420ddd703caSNicholas Piggin { 421c64af645SNicholas Piggin if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) 422c64af645SNicholas Piggin return; 423c64af645SNicholas Piggin 424ddd703caSNicholas Piggin if (cpu >= 0) { 425ddd703caSNicholas Piggin do_message_pass(cpu, PPC_MSG_NMI_IPI); 426ddd703caSNicholas Piggin } else { 427ddd703caSNicholas Piggin int c; 428ddd703caSNicholas Piggin 429ddd703caSNicholas Piggin for_each_online_cpu(c) { 430ddd703caSNicholas Piggin if (c == raw_smp_processor_id()) 431ddd703caSNicholas Piggin continue; 432ddd703caSNicholas Piggin do_message_pass(c, PPC_MSG_NMI_IPI); 433ddd703caSNicholas Piggin } 434ddd703caSNicholas Piggin } 435ddd703caSNicholas Piggin } 436ddd703caSNicholas Piggin 437ddd703caSNicholas Piggin /* 438ddd703caSNicholas Piggin * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. 439ddd703caSNicholas Piggin * - fn is the target callback function. 440ddd703caSNicholas Piggin * - delay_us > 0 is the delay before giving up waiting for targets to 441ddd703caSNicholas Piggin * enter the handler, == 0 specifies indefinite delay. 442ddd703caSNicholas Piggin */ 443ddd703caSNicholas Piggin static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) 444ddd703caSNicholas Piggin { 445ddd703caSNicholas Piggin unsigned long flags; 446ddd703caSNicholas Piggin int me = raw_smp_processor_id(); 447ddd703caSNicholas Piggin int ret = 1; 448ddd703caSNicholas Piggin 449ddd703caSNicholas Piggin BUG_ON(cpu == me); 450ddd703caSNicholas Piggin BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); 451ddd703caSNicholas Piggin 452ddd703caSNicholas Piggin if (unlikely(!smp_ops)) 453ddd703caSNicholas Piggin return 0; 454ddd703caSNicholas Piggin 455ddd703caSNicholas Piggin /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */ 456ddd703caSNicholas Piggin nmi_ipi_lock_start(&flags); 457ddd703caSNicholas Piggin while (nmi_ipi_busy_count) { 458ddd703caSNicholas Piggin nmi_ipi_unlock_end(&flags); 459ddd703caSNicholas Piggin cpu_relax(); 460ddd703caSNicholas Piggin nmi_ipi_lock_start(&flags); 461ddd703caSNicholas Piggin } 462ddd703caSNicholas Piggin 463ddd703caSNicholas Piggin nmi_ipi_function = fn; 464ddd703caSNicholas Piggin 465ddd703caSNicholas Piggin if (cpu < 0) { 466ddd703caSNicholas Piggin /* ALL_OTHERS */ 467ddd703caSNicholas Piggin cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); 468ddd703caSNicholas Piggin cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 469ddd703caSNicholas Piggin } else { 470ddd703caSNicholas Piggin /* cpumask starts clear */ 471ddd703caSNicholas Piggin cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); 472ddd703caSNicholas Piggin } 473ddd703caSNicholas Piggin nmi_ipi_busy_count++; 474ddd703caSNicholas Piggin nmi_ipi_unlock(); 475ddd703caSNicholas Piggin 476ddd703caSNicholas Piggin do_smp_send_nmi_ipi(cpu); 477ddd703caSNicholas Piggin 478ddd703caSNicholas Piggin while (!cpumask_empty(&nmi_ipi_pending_mask)) { 479ddd703caSNicholas Piggin udelay(1); 480ddd703caSNicholas Piggin if (delay_us) { 481ddd703caSNicholas Piggin delay_us--; 482ddd703caSNicholas Piggin if (!delay_us) 483ddd703caSNicholas Piggin break; 484ddd703caSNicholas Piggin } 485ddd703caSNicholas Piggin } 486ddd703caSNicholas Piggin 487ddd703caSNicholas Piggin nmi_ipi_lock(); 488ddd703caSNicholas Piggin if (!cpumask_empty(&nmi_ipi_pending_mask)) { 489ddd703caSNicholas Piggin /* Could not gather all CPUs */ 490ddd703caSNicholas Piggin ret = 0; 491ddd703caSNicholas Piggin cpumask_clear(&nmi_ipi_pending_mask); 492ddd703caSNicholas Piggin } 493ddd703caSNicholas Piggin nmi_ipi_busy_count--; 494ddd703caSNicholas Piggin nmi_ipi_unlock_end(&flags); 495ddd703caSNicholas Piggin 496ddd703caSNicholas Piggin return ret; 497ddd703caSNicholas Piggin } 498ddd703caSNicholas Piggin #endif /* CONFIG_NMI_IPI */ 499ddd703caSNicholas Piggin 5001b67bee1SSrivatsa S. Bhat #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 5011b67bee1SSrivatsa S. Bhat void tick_broadcast(const struct cpumask *mask) 5021b67bee1SSrivatsa S. Bhat { 5031b67bee1SSrivatsa S. Bhat unsigned int cpu; 5041b67bee1SSrivatsa S. Bhat 5051b67bee1SSrivatsa S. Bhat for_each_cpu(cpu, mask) 5061b67bee1SSrivatsa S. Bhat do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); 5071b67bee1SSrivatsa S. Bhat } 5081b67bee1SSrivatsa S. Bhat #endif 5091b67bee1SSrivatsa S. Bhat 510ddd703caSNicholas Piggin #ifdef CONFIG_DEBUGGER 511ddd703caSNicholas Piggin void debugger_ipi_callback(struct pt_regs *regs) 512ddd703caSNicholas Piggin { 513ddd703caSNicholas Piggin debugger_ipi(regs); 514ddd703caSNicholas Piggin } 515ddd703caSNicholas Piggin 516e0476371SMilton Miller void smp_send_debugger_break(void) 5175ad57078SPaul Mackerras { 518ddd703caSNicholas Piggin smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); 5195ad57078SPaul Mackerras } 5205ad57078SPaul Mackerras #endif 5215ad57078SPaul Mackerras 522da665885SThiago Jung Bauermann #ifdef CONFIG_KEXEC_CORE 523cc532915SMichael Ellerman void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 524cc532915SMichael Ellerman { 525ddd703caSNicholas Piggin smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); 526cc532915SMichael Ellerman } 527cc532915SMichael Ellerman #endif 528cc532915SMichael Ellerman 5295ad57078SPaul Mackerras static void stop_this_cpu(void *dummy) 5305ad57078SPaul Mackerras { 5318389b37dSValentine Barshak /* Remove this CPU */ 5328389b37dSValentine Barshak set_cpu_online(smp_processor_id(), false); 5338389b37dSValentine Barshak 5345ad57078SPaul Mackerras local_irq_disable(); 5355ad57078SPaul Mackerras while (1) 5365ad57078SPaul Mackerras ; 5375ad57078SPaul Mackerras } 5385ad57078SPaul Mackerras 5398fd7675cSSatyam Sharma void smp_send_stop(void) 5408fd7675cSSatyam Sharma { 5418691e5a8SJens Axboe smp_call_function(stop_this_cpu, NULL, 0); 5425ad57078SPaul Mackerras } 5435ad57078SPaul Mackerras 5445ad57078SPaul Mackerras struct thread_info *current_set[NR_CPUS]; 5455ad57078SPaul Mackerras 546cad5cef6SGreg Kroah-Hartman static void smp_store_cpu_info(int id) 5475ad57078SPaul Mackerras { 5486b7487fcSTejun Heo per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 5493160b097SBecky Bruce #ifdef CONFIG_PPC_FSL_BOOK3E 5503160b097SBecky Bruce per_cpu(next_tlbcam_idx, id) 5513160b097SBecky Bruce = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 5523160b097SBecky Bruce #endif 5535ad57078SPaul Mackerras } 5545ad57078SPaul Mackerras 5555ad57078SPaul Mackerras void __init smp_prepare_cpus(unsigned int max_cpus) 5565ad57078SPaul Mackerras { 5575ad57078SPaul Mackerras unsigned int cpu; 5585ad57078SPaul Mackerras 5595ad57078SPaul Mackerras DBG("smp_prepare_cpus\n"); 5605ad57078SPaul Mackerras 5615ad57078SPaul Mackerras /* 5625ad57078SPaul Mackerras * setup_cpu may need to be called on the boot cpu. We havent 5635ad57078SPaul Mackerras * spun any cpus up but lets be paranoid. 5645ad57078SPaul Mackerras */ 5655ad57078SPaul Mackerras BUG_ON(boot_cpuid != smp_processor_id()); 5665ad57078SPaul Mackerras 5675ad57078SPaul Mackerras /* Fixup boot cpu */ 5685ad57078SPaul Mackerras smp_store_cpu_info(boot_cpuid); 5695ad57078SPaul Mackerras cpu_callin_map[boot_cpuid] = 1; 5705ad57078SPaul Mackerras 571cc1ba8eaSAnton Blanchard for_each_possible_cpu(cpu) { 572cc1ba8eaSAnton Blanchard zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 573cc1ba8eaSAnton Blanchard GFP_KERNEL, cpu_to_node(cpu)); 574cc1ba8eaSAnton Blanchard zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 575cc1ba8eaSAnton Blanchard GFP_KERNEL, cpu_to_node(cpu)); 5762fabf084SNishanth Aravamudan /* 5772fabf084SNishanth Aravamudan * numa_node_id() works after this. 5782fabf084SNishanth Aravamudan */ 579bc3c4327SLi Zhong if (cpu_present(cpu)) { 5802fabf084SNishanth Aravamudan set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); 581bc3c4327SLi Zhong set_cpu_numa_mem(cpu, 582bc3c4327SLi Zhong local_memory_node(numa_cpu_lookup_table[cpu])); 583bc3c4327SLi Zhong } 584cc1ba8eaSAnton Blanchard } 585cc1ba8eaSAnton Blanchard 586cc1ba8eaSAnton Blanchard cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 587cc1ba8eaSAnton Blanchard cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 588cc1ba8eaSAnton Blanchard 589dfee0efeSChen Gang if (smp_ops && smp_ops->probe) 590dfee0efeSChen Gang smp_ops->probe(); 5915ad57078SPaul Mackerras } 5925ad57078SPaul Mackerras 593cad5cef6SGreg Kroah-Hartman void smp_prepare_boot_cpu(void) 5945ad57078SPaul Mackerras { 5955ad57078SPaul Mackerras BUG_ON(smp_processor_id() != boot_cpuid); 5965ad57078SPaul Mackerras #ifdef CONFIG_PPC64 5975ad57078SPaul Mackerras paca[boot_cpuid].__current = current; 5985ad57078SPaul Mackerras #endif 5998c272261SNishanth Aravamudan set_numa_node(numa_cpu_lookup_table[boot_cpuid]); 600b5e2fc1cSAl Viro current_set[boot_cpuid] = task_thread_info(current); 6015ad57078SPaul Mackerras } 6025ad57078SPaul Mackerras 6035ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU 6045ad57078SPaul Mackerras 6055ad57078SPaul Mackerras int generic_cpu_disable(void) 6065ad57078SPaul Mackerras { 6075ad57078SPaul Mackerras unsigned int cpu = smp_processor_id(); 6085ad57078SPaul Mackerras 6095ad57078SPaul Mackerras if (cpu == boot_cpuid) 6105ad57078SPaul Mackerras return -EBUSY; 6115ad57078SPaul Mackerras 612ea0f1cabSRusty Russell set_cpu_online(cpu, false); 613799d6046SPaul Mackerras #ifdef CONFIG_PPC64 614a7f290daSBenjamin Herrenschmidt vdso_data->processorCount--; 615094fe2e7SPaul Mackerras #endif 616a978e139SBenjamin Herrenschmidt /* Update affinity of all IRQs previously aimed at this CPU */ 617a978e139SBenjamin Herrenschmidt irq_migrate_all_off_this_cpu(); 618a978e139SBenjamin Herrenschmidt 619687b8f24SMichael Ellerman /* 620687b8f24SMichael Ellerman * Depending on the details of the interrupt controller, it's possible 621687b8f24SMichael Ellerman * that one of the interrupts we just migrated away from this CPU is 622687b8f24SMichael Ellerman * actually already pending on this CPU. If we leave it in that state 623687b8f24SMichael Ellerman * the interrupt will never be EOI'ed, and will never fire again. So 624687b8f24SMichael Ellerman * temporarily enable interrupts here, to allow any pending interrupt to 625687b8f24SMichael Ellerman * be received (and EOI'ed), before we take this CPU offline. 626687b8f24SMichael Ellerman */ 627a978e139SBenjamin Herrenschmidt local_irq_enable(); 628a978e139SBenjamin Herrenschmidt mdelay(1); 629a978e139SBenjamin Herrenschmidt local_irq_disable(); 630a978e139SBenjamin Herrenschmidt 6315ad57078SPaul Mackerras return 0; 6325ad57078SPaul Mackerras } 6335ad57078SPaul Mackerras 6345ad57078SPaul Mackerras void generic_cpu_die(unsigned int cpu) 6355ad57078SPaul Mackerras { 6365ad57078SPaul Mackerras int i; 6375ad57078SPaul Mackerras 6385ad57078SPaul Mackerras for (i = 0; i < 100; i++) { 6395ad57078SPaul Mackerras smp_rmb(); 6402f4f1f81Schenhui zhao if (is_cpu_dead(cpu)) 6415ad57078SPaul Mackerras return; 6425ad57078SPaul Mackerras msleep(100); 6435ad57078SPaul Mackerras } 6445ad57078SPaul Mackerras printk(KERN_ERR "CPU%d didn't die...\n", cpu); 6455ad57078SPaul Mackerras } 6465ad57078SPaul Mackerras 647105765f4SBenjamin Herrenschmidt void generic_set_cpu_dead(unsigned int cpu) 648105765f4SBenjamin Herrenschmidt { 649105765f4SBenjamin Herrenschmidt per_cpu(cpu_state, cpu) = CPU_DEAD; 650105765f4SBenjamin Herrenschmidt } 651fb82b839SBenjamin Herrenschmidt 652ae5cab47SZhao Chenhui /* 653ae5cab47SZhao Chenhui * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise 654ae5cab47SZhao Chenhui * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), 655ae5cab47SZhao Chenhui * which makes the delay in generic_cpu_die() not happen. 656ae5cab47SZhao Chenhui */ 657ae5cab47SZhao Chenhui void generic_set_cpu_up(unsigned int cpu) 658ae5cab47SZhao Chenhui { 659ae5cab47SZhao Chenhui per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 660ae5cab47SZhao Chenhui } 661ae5cab47SZhao Chenhui 662fb82b839SBenjamin Herrenschmidt int generic_check_cpu_restart(unsigned int cpu) 663fb82b839SBenjamin Herrenschmidt { 664fb82b839SBenjamin Herrenschmidt return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 665fb82b839SBenjamin Herrenschmidt } 666512691d4SPaul Mackerras 6672f4f1f81Schenhui zhao int is_cpu_dead(unsigned int cpu) 6682f4f1f81Schenhui zhao { 6692f4f1f81Schenhui zhao return per_cpu(cpu_state, cpu) == CPU_DEAD; 6702f4f1f81Schenhui zhao } 6712f4f1f81Schenhui zhao 672441c19c8SMichael Ellerman static bool secondaries_inhibited(void) 673512691d4SPaul Mackerras { 674441c19c8SMichael Ellerman return kvm_hv_mode_active(); 675512691d4SPaul Mackerras } 676512691d4SPaul Mackerras 677512691d4SPaul Mackerras #else /* HOTPLUG_CPU */ 678512691d4SPaul Mackerras 679512691d4SPaul Mackerras #define secondaries_inhibited() 0 680512691d4SPaul Mackerras 6815ad57078SPaul Mackerras #endif 6825ad57078SPaul Mackerras 68317e32eacSThomas Gleixner static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 684c56e5853SBenjamin Herrenschmidt { 68517e32eacSThomas Gleixner struct thread_info *ti = task_thread_info(idle); 686c56e5853SBenjamin Herrenschmidt 687c56e5853SBenjamin Herrenschmidt #ifdef CONFIG_PPC64 68817e32eacSThomas Gleixner paca[cpu].__current = idle; 689c56e5853SBenjamin Herrenschmidt paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 690c56e5853SBenjamin Herrenschmidt #endif 691c56e5853SBenjamin Herrenschmidt ti->cpu = cpu; 69217e32eacSThomas Gleixner secondary_ti = current_set[cpu] = ti; 693c56e5853SBenjamin Herrenschmidt } 694c56e5853SBenjamin Herrenschmidt 695061d19f2SPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *tidle) 6965ad57078SPaul Mackerras { 697c56e5853SBenjamin Herrenschmidt int rc, c; 6985ad57078SPaul Mackerras 699512691d4SPaul Mackerras /* 700512691d4SPaul Mackerras * Don't allow secondary threads to come online if inhibited 701512691d4SPaul Mackerras */ 702512691d4SPaul Mackerras if (threads_per_core > 1 && secondaries_inhibited() && 7036f5e40a3SMichael Ellerman cpu_thread_in_subcore(cpu)) 704512691d4SPaul Mackerras return -EBUSY; 705512691d4SPaul Mackerras 7068cffc6acSBenjamin Herrenschmidt if (smp_ops == NULL || 7078cffc6acSBenjamin Herrenschmidt (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 7085ad57078SPaul Mackerras return -EINVAL; 7095ad57078SPaul Mackerras 71017e32eacSThomas Gleixner cpu_idle_thread_init(cpu, tidle); 711c560bbceSkerstin jonsson 71214d4ae5cSBenjamin Herrenschmidt /* 71314d4ae5cSBenjamin Herrenschmidt * The platform might need to allocate resources prior to bringing 71414d4ae5cSBenjamin Herrenschmidt * up the CPU 71514d4ae5cSBenjamin Herrenschmidt */ 71614d4ae5cSBenjamin Herrenschmidt if (smp_ops->prepare_cpu) { 71714d4ae5cSBenjamin Herrenschmidt rc = smp_ops->prepare_cpu(cpu); 71814d4ae5cSBenjamin Herrenschmidt if (rc) 71914d4ae5cSBenjamin Herrenschmidt return rc; 72014d4ae5cSBenjamin Herrenschmidt } 72114d4ae5cSBenjamin Herrenschmidt 7225ad57078SPaul Mackerras /* Make sure callin-map entry is 0 (can be leftover a CPU 7235ad57078SPaul Mackerras * hotplug 7245ad57078SPaul Mackerras */ 7255ad57078SPaul Mackerras cpu_callin_map[cpu] = 0; 7265ad57078SPaul Mackerras 7275ad57078SPaul Mackerras /* The information for processor bringup must 7285ad57078SPaul Mackerras * be written out to main store before we release 7295ad57078SPaul Mackerras * the processor. 7305ad57078SPaul Mackerras */ 7315ad57078SPaul Mackerras smp_mb(); 7325ad57078SPaul Mackerras 7335ad57078SPaul Mackerras /* wake up cpus */ 7345ad57078SPaul Mackerras DBG("smp: kicking cpu %d\n", cpu); 735de300974SMichael Ellerman rc = smp_ops->kick_cpu(cpu); 736de300974SMichael Ellerman if (rc) { 737de300974SMichael Ellerman pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 738de300974SMichael Ellerman return rc; 739de300974SMichael Ellerman } 7405ad57078SPaul Mackerras 7415ad57078SPaul Mackerras /* 7425ad57078SPaul Mackerras * wait to see if the cpu made a callin (is actually up). 7435ad57078SPaul Mackerras * use this value that I found through experimentation. 7445ad57078SPaul Mackerras * -- Cort 7455ad57078SPaul Mackerras */ 7465ad57078SPaul Mackerras if (system_state < SYSTEM_RUNNING) 747ee0339f2SJon Loeliger for (c = 50000; c && !cpu_callin_map[cpu]; c--) 7485ad57078SPaul Mackerras udelay(100); 7495ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU 7505ad57078SPaul Mackerras else 7515ad57078SPaul Mackerras /* 7525ad57078SPaul Mackerras * CPUs can take much longer to come up in the 7535ad57078SPaul Mackerras * hotplug case. Wait five seconds. 7545ad57078SPaul Mackerras */ 75567764263SGautham R Shenoy for (c = 5000; c && !cpu_callin_map[cpu]; c--) 75667764263SGautham R Shenoy msleep(1); 7575ad57078SPaul Mackerras #endif 7585ad57078SPaul Mackerras 7595ad57078SPaul Mackerras if (!cpu_callin_map[cpu]) { 7606685a477SSigned-off-by: Darren Hart printk(KERN_ERR "Processor %u is stuck.\n", cpu); 7615ad57078SPaul Mackerras return -ENOENT; 7625ad57078SPaul Mackerras } 7635ad57078SPaul Mackerras 7646685a477SSigned-off-by: Darren Hart DBG("Processor %u found.\n", cpu); 7655ad57078SPaul Mackerras 7665ad57078SPaul Mackerras if (smp_ops->give_timebase) 7675ad57078SPaul Mackerras smp_ops->give_timebase(); 7685ad57078SPaul Mackerras 769875ebe94SMichael Ellerman /* Wait until cpu puts itself in the online & active maps */ 770e9d867a6SPeter Zijlstra (Intel) while (!cpu_online(cpu)) 7715ad57078SPaul Mackerras cpu_relax(); 7725ad57078SPaul Mackerras 7735ad57078SPaul Mackerras return 0; 7745ad57078SPaul Mackerras } 7755ad57078SPaul Mackerras 776e9efed3bSNathan Lynch /* Return the value of the reg property corresponding to the given 777e9efed3bSNathan Lynch * logical cpu. 778e9efed3bSNathan Lynch */ 779e9efed3bSNathan Lynch int cpu_to_core_id(int cpu) 780e9efed3bSNathan Lynch { 781e9efed3bSNathan Lynch struct device_node *np; 782f8a1883aSAnton Blanchard const __be32 *reg; 783e9efed3bSNathan Lynch int id = -1; 784e9efed3bSNathan Lynch 785e9efed3bSNathan Lynch np = of_get_cpu_node(cpu, NULL); 786e9efed3bSNathan Lynch if (!np) 787e9efed3bSNathan Lynch goto out; 788e9efed3bSNathan Lynch 789e9efed3bSNathan Lynch reg = of_get_property(np, "reg", NULL); 790e9efed3bSNathan Lynch if (!reg) 791e9efed3bSNathan Lynch goto out; 792e9efed3bSNathan Lynch 793f8a1883aSAnton Blanchard id = be32_to_cpup(reg); 794e9efed3bSNathan Lynch out: 795e9efed3bSNathan Lynch of_node_put(np); 796e9efed3bSNathan Lynch return id; 797e9efed3bSNathan Lynch } 798f8ab4810SMauricio Faria de Oliveira EXPORT_SYMBOL_GPL(cpu_to_core_id); 799e9efed3bSNathan Lynch 80099d86705SVaidyanathan Srinivasan /* Helper routines for cpu to core mapping */ 80199d86705SVaidyanathan Srinivasan int cpu_core_index_of_thread(int cpu) 80299d86705SVaidyanathan Srinivasan { 80399d86705SVaidyanathan Srinivasan return cpu >> threads_shift; 80499d86705SVaidyanathan Srinivasan } 80599d86705SVaidyanathan Srinivasan EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 80699d86705SVaidyanathan Srinivasan 80799d86705SVaidyanathan Srinivasan int cpu_first_thread_of_core(int core) 80899d86705SVaidyanathan Srinivasan { 80999d86705SVaidyanathan Srinivasan return core << threads_shift; 81099d86705SVaidyanathan Srinivasan } 81199d86705SVaidyanathan Srinivasan EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 81299d86705SVaidyanathan Srinivasan 813256f2d4bSPaul Mackerras static void traverse_siblings_chip_id(int cpu, bool add, int chipid) 814256f2d4bSPaul Mackerras { 815256f2d4bSPaul Mackerras const struct cpumask *mask; 816256f2d4bSPaul Mackerras struct device_node *np; 817256f2d4bSPaul Mackerras int i, plen; 818256f2d4bSPaul Mackerras const __be32 *prop; 819256f2d4bSPaul Mackerras 820256f2d4bSPaul Mackerras mask = add ? cpu_online_mask : cpu_present_mask; 821256f2d4bSPaul Mackerras for_each_cpu(i, mask) { 822256f2d4bSPaul Mackerras np = of_get_cpu_node(i, NULL); 823256f2d4bSPaul Mackerras if (!np) 824256f2d4bSPaul Mackerras continue; 825256f2d4bSPaul Mackerras prop = of_get_property(np, "ibm,chip-id", &plen); 826256f2d4bSPaul Mackerras if (prop && plen == sizeof(int) && 827256f2d4bSPaul Mackerras of_read_number(prop, 1) == chipid) { 828256f2d4bSPaul Mackerras if (add) { 829256f2d4bSPaul Mackerras cpumask_set_cpu(cpu, cpu_core_mask(i)); 830256f2d4bSPaul Mackerras cpumask_set_cpu(i, cpu_core_mask(cpu)); 831256f2d4bSPaul Mackerras } else { 832256f2d4bSPaul Mackerras cpumask_clear_cpu(cpu, cpu_core_mask(i)); 833256f2d4bSPaul Mackerras cpumask_clear_cpu(i, cpu_core_mask(cpu)); 834256f2d4bSPaul Mackerras } 835256f2d4bSPaul Mackerras } 836256f2d4bSPaul Mackerras of_node_put(np); 837256f2d4bSPaul Mackerras } 838256f2d4bSPaul Mackerras } 839256f2d4bSPaul Mackerras 840104699c0SKOSAKI Motohiro /* Must be called when no change can occur to cpu_present_mask, 841440a0857SNathan Lynch * i.e. during cpu online or offline. 842440a0857SNathan Lynch */ 843440a0857SNathan Lynch static struct device_node *cpu_to_l2cache(int cpu) 844440a0857SNathan Lynch { 845440a0857SNathan Lynch struct device_node *np; 846b2ea25b9SNathan Lynch struct device_node *cache; 847440a0857SNathan Lynch 848440a0857SNathan Lynch if (!cpu_present(cpu)) 849440a0857SNathan Lynch return NULL; 850440a0857SNathan Lynch 851440a0857SNathan Lynch np = of_get_cpu_node(cpu, NULL); 852440a0857SNathan Lynch if (np == NULL) 853440a0857SNathan Lynch return NULL; 854440a0857SNathan Lynch 855b2ea25b9SNathan Lynch cache = of_find_next_cache_node(np); 856b2ea25b9SNathan Lynch 857440a0857SNathan Lynch of_node_put(np); 858440a0857SNathan Lynch 859b2ea25b9SNathan Lynch return cache; 860440a0857SNathan Lynch } 8615ad57078SPaul Mackerras 862a8a5356cSPaul Mackerras static void traverse_core_siblings(int cpu, bool add) 863a8a5356cSPaul Mackerras { 864256f2d4bSPaul Mackerras struct device_node *l2_cache, *np; 865a8a5356cSPaul Mackerras const struct cpumask *mask; 866256f2d4bSPaul Mackerras int i, chip, plen; 867256f2d4bSPaul Mackerras const __be32 *prop; 868256f2d4bSPaul Mackerras 869256f2d4bSPaul Mackerras /* First see if we have ibm,chip-id properties in cpu nodes */ 870256f2d4bSPaul Mackerras np = of_get_cpu_node(cpu, NULL); 871256f2d4bSPaul Mackerras if (np) { 872256f2d4bSPaul Mackerras chip = -1; 873256f2d4bSPaul Mackerras prop = of_get_property(np, "ibm,chip-id", &plen); 874256f2d4bSPaul Mackerras if (prop && plen == sizeof(int)) 875256f2d4bSPaul Mackerras chip = of_read_number(prop, 1); 876256f2d4bSPaul Mackerras of_node_put(np); 877256f2d4bSPaul Mackerras if (chip >= 0) { 878256f2d4bSPaul Mackerras traverse_siblings_chip_id(cpu, add, chip); 879256f2d4bSPaul Mackerras return; 880256f2d4bSPaul Mackerras } 881256f2d4bSPaul Mackerras } 882a8a5356cSPaul Mackerras 883a8a5356cSPaul Mackerras l2_cache = cpu_to_l2cache(cpu); 884a8a5356cSPaul Mackerras mask = add ? cpu_online_mask : cpu_present_mask; 885a8a5356cSPaul Mackerras for_each_cpu(i, mask) { 886256f2d4bSPaul Mackerras np = cpu_to_l2cache(i); 887a8a5356cSPaul Mackerras if (!np) 888a8a5356cSPaul Mackerras continue; 889a8a5356cSPaul Mackerras if (np == l2_cache) { 890a8a5356cSPaul Mackerras if (add) { 891a8a5356cSPaul Mackerras cpumask_set_cpu(cpu, cpu_core_mask(i)); 892a8a5356cSPaul Mackerras cpumask_set_cpu(i, cpu_core_mask(cpu)); 893a8a5356cSPaul Mackerras } else { 894a8a5356cSPaul Mackerras cpumask_clear_cpu(cpu, cpu_core_mask(i)); 895a8a5356cSPaul Mackerras cpumask_clear_cpu(i, cpu_core_mask(cpu)); 896a8a5356cSPaul Mackerras } 897a8a5356cSPaul Mackerras } 898a8a5356cSPaul Mackerras of_node_put(np); 899a8a5356cSPaul Mackerras } 900a8a5356cSPaul Mackerras of_node_put(l2_cache); 901a8a5356cSPaul Mackerras } 902a8a5356cSPaul Mackerras 9035ad57078SPaul Mackerras /* Activate a secondary processor. */ 904061d19f2SPaul Gortmaker void start_secondary(void *unused) 9055ad57078SPaul Mackerras { 9065ad57078SPaul Mackerras unsigned int cpu = smp_processor_id(); 907e2075f79SNathan Lynch int i, base; 9085ad57078SPaul Mackerras 909f1f10076SVegard Nossum mmgrab(&init_mm); 9105ad57078SPaul Mackerras current->active_mm = &init_mm; 9115ad57078SPaul Mackerras 9125ad57078SPaul Mackerras smp_store_cpu_info(cpu); 9135ad57078SPaul Mackerras set_dec(tb_ticks_per_jiffy); 914e4d76e1cSAndrew Morton preempt_disable(); 9151be6f10fSMichael Ellerman cpu_callin_map[cpu] = 1; 9165ad57078SPaul Mackerras 917757cbd46SKumar Gala if (smp_ops->setup_cpu) 9185ad57078SPaul Mackerras smp_ops->setup_cpu(cpu); 9195ad57078SPaul Mackerras if (smp_ops->take_timebase) 9205ad57078SPaul Mackerras smp_ops->take_timebase(); 9215ad57078SPaul Mackerras 922d831d0b8STony Breeds secondary_cpu_time_init(); 923d831d0b8STony Breeds 924aeeafbfaSBenjamin Herrenschmidt #ifdef CONFIG_PPC64 925aeeafbfaSBenjamin Herrenschmidt if (system_state == SYSTEM_RUNNING) 926aeeafbfaSBenjamin Herrenschmidt vdso_data->processorCount++; 92718ad51ddSAnton Blanchard 92818ad51ddSAnton Blanchard vdso_getcpu_init(); 929aeeafbfaSBenjamin Herrenschmidt #endif 930e2075f79SNathan Lynch /* Update sibling maps */ 93199d86705SVaidyanathan Srinivasan base = cpu_first_thread_sibling(cpu); 932e2075f79SNathan Lynch for (i = 0; i < threads_per_core; i++) { 933cce606feSLi Zhong if (cpu_is_offline(base + i) && (cpu != base + i)) 934e2075f79SNathan Lynch continue; 935cc1ba8eaSAnton Blanchard cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); 936cc1ba8eaSAnton Blanchard cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); 937440a0857SNathan Lynch 938440a0857SNathan Lynch /* cpu_core_map should be a superset of 939440a0857SNathan Lynch * cpu_sibling_map even if we don't have cache 940440a0857SNathan Lynch * information, so update the former here, too. 941440a0857SNathan Lynch */ 942cc1ba8eaSAnton Blanchard cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 943cc1ba8eaSAnton Blanchard cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 944e2075f79SNathan Lynch } 945a8a5356cSPaul Mackerras traverse_core_siblings(cpu, true); 9465ad57078SPaul Mackerras 947bc3c4327SLi Zhong set_numa_node(numa_cpu_lookup_table[cpu]); 948bc3c4327SLi Zhong set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); 949bc3c4327SLi Zhong 950cce606feSLi Zhong smp_wmb(); 951cce606feSLi Zhong notify_cpu_starting(cpu); 952cce606feSLi Zhong set_cpu_online(cpu, true); 953cce606feSLi Zhong 9545ad57078SPaul Mackerras local_irq_enable(); 9555ad57078SPaul Mackerras 956fc6d73d6SThomas Gleixner cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 957fa3f82c8SBenjamin Herrenschmidt 958fa3f82c8SBenjamin Herrenschmidt BUG(); 9595ad57078SPaul Mackerras } 9605ad57078SPaul Mackerras 9615ad57078SPaul Mackerras int setup_profiling_timer(unsigned int multiplier) 9625ad57078SPaul Mackerras { 9635ad57078SPaul Mackerras return 0; 9645ad57078SPaul Mackerras } 9655ad57078SPaul Mackerras 966607b45e9SVincent Guittot #ifdef CONFIG_SCHED_SMT 967607b45e9SVincent Guittot /* cpumask of CPUs with asymetric SMT dependancy */ 968b6220ad6SGuenter Roeck static int powerpc_smt_flags(void) 969607b45e9SVincent Guittot { 9705d4dfdddSNicolas Pitre int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 971607b45e9SVincent Guittot 972607b45e9SVincent Guittot if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 973607b45e9SVincent Guittot printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 974607b45e9SVincent Guittot flags |= SD_ASYM_PACKING; 975607b45e9SVincent Guittot } 976607b45e9SVincent Guittot return flags; 977607b45e9SVincent Guittot } 978607b45e9SVincent Guittot #endif 979607b45e9SVincent Guittot 980607b45e9SVincent Guittot static struct sched_domain_topology_level powerpc_topology[] = { 981607b45e9SVincent Guittot #ifdef CONFIG_SCHED_SMT 982607b45e9SVincent Guittot { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, 983607b45e9SVincent Guittot #endif 984607b45e9SVincent Guittot { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 985607b45e9SVincent Guittot { NULL, }, 986607b45e9SVincent Guittot }; 987607b45e9SVincent Guittot 9886d11b87dSThomas Gleixner static __init long smp_setup_cpu_workfn(void *data __always_unused) 9896d11b87dSThomas Gleixner { 9906d11b87dSThomas Gleixner smp_ops->setup_cpu(boot_cpuid); 9916d11b87dSThomas Gleixner return 0; 9926d11b87dSThomas Gleixner } 9936d11b87dSThomas Gleixner 9945ad57078SPaul Mackerras void __init smp_cpus_done(unsigned int max_cpus) 9955ad57078SPaul Mackerras { 9966d11b87dSThomas Gleixner /* 9976d11b87dSThomas Gleixner * We want the setup_cpu() here to be called on the boot CPU, but 9986d11b87dSThomas Gleixner * init might run on any CPU, so make sure it's invoked on the boot 9996d11b87dSThomas Gleixner * CPU. 10005ad57078SPaul Mackerras */ 1001757cbd46SKumar Gala if (smp_ops && smp_ops->setup_cpu) 10026d11b87dSThomas Gleixner work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL); 10034b703a23SAnton Blanchard 1004d7294445SBenjamin Herrenschmidt if (smp_ops && smp_ops->bringup_done) 1005d7294445SBenjamin Herrenschmidt smp_ops->bringup_done(); 1006d7294445SBenjamin Herrenschmidt 10074b703a23SAnton Blanchard dump_numa_cpu_topology(); 1008d7294445SBenjamin Herrenschmidt 1009607b45e9SVincent Guittot set_sched_topology(powerpc_topology); 1010e1f0ece1SMichael Neuling } 1011e1f0ece1SMichael Neuling 10125ad57078SPaul Mackerras #ifdef CONFIG_HOTPLUG_CPU 10135ad57078SPaul Mackerras int __cpu_disable(void) 10145ad57078SPaul Mackerras { 1015e2075f79SNathan Lynch int cpu = smp_processor_id(); 1016e2075f79SNathan Lynch int base, i; 1017e2075f79SNathan Lynch int err; 10185ad57078SPaul Mackerras 1019e2075f79SNathan Lynch if (!smp_ops->cpu_disable) 10205ad57078SPaul Mackerras return -ENOSYS; 1021e2075f79SNathan Lynch 1022e2075f79SNathan Lynch err = smp_ops->cpu_disable(); 1023e2075f79SNathan Lynch if (err) 1024e2075f79SNathan Lynch return err; 1025e2075f79SNathan Lynch 1026e2075f79SNathan Lynch /* Update sibling maps */ 102799d86705SVaidyanathan Srinivasan base = cpu_first_thread_sibling(cpu); 102819ab58d1SBoqun Feng for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) { 1029cc1ba8eaSAnton Blanchard cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 1030cc1ba8eaSAnton Blanchard cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 1031cc1ba8eaSAnton Blanchard cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 1032cc1ba8eaSAnton Blanchard cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 1033e2075f79SNathan Lynch } 1034a8a5356cSPaul Mackerras traverse_core_siblings(cpu, false); 1035440a0857SNathan Lynch 1036e2075f79SNathan Lynch return 0; 10375ad57078SPaul Mackerras } 10385ad57078SPaul Mackerras 10395ad57078SPaul Mackerras void __cpu_die(unsigned int cpu) 10405ad57078SPaul Mackerras { 10415ad57078SPaul Mackerras if (smp_ops->cpu_die) 10425ad57078SPaul Mackerras smp_ops->cpu_die(cpu); 10435ad57078SPaul Mackerras } 1044d0174c72SNathan Fontenot 1045abb17f9cSMilton Miller void cpu_die(void) 1046abb17f9cSMilton Miller { 1047abb17f9cSMilton Miller if (ppc_md.cpu_die) 1048abb17f9cSMilton Miller ppc_md.cpu_die(); 1049fa3f82c8SBenjamin Herrenschmidt 1050fa3f82c8SBenjamin Herrenschmidt /* If we return, we re-enter start_secondary */ 1051fa3f82c8SBenjamin Herrenschmidt start_secondary_resume(); 1052abb17f9cSMilton Miller } 1053fa3f82c8SBenjamin Herrenschmidt 10545ad57078SPaul Mackerras #endif 1055