xref: /openbmc/linux/arch/powerpc/kernel/smp.c (revision aa74c44b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SMP support for ppc.
4  *
5  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6  * deal of code from the sparc and intel versions.
7  *
8  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9  *
10  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12  */
13 
14 #undef DEBUG
15 
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 #include <linux/pgtable.h>
37 #include <linux/clockchips.h>
38 
39 #include <asm/ptrace.h>
40 #include <linux/atomic.h>
41 #include <asm/irq.h>
42 #include <asm/hw_irq.h>
43 #include <asm/kvm_ppc.h>
44 #include <asm/dbell.h>
45 #include <asm/page.h>
46 #include <asm/prom.h>
47 #include <asm/smp.h>
48 #include <asm/time.h>
49 #include <asm/machdep.h>
50 #include <asm/cputhreads.h>
51 #include <asm/cputable.h>
52 #include <asm/mpic.h>
53 #include <asm/vdso_datapage.h>
54 #ifdef CONFIG_PPC64
55 #include <asm/paca.h>
56 #endif
57 #include <asm/vdso.h>
58 #include <asm/debug.h>
59 #include <asm/kexec.h>
60 #include <asm/asm-prototypes.h>
61 #include <asm/cpu_has_feature.h>
62 #include <asm/ftrace.h>
63 #include <asm/kup.h>
64 #include <asm/fadump.h>
65 
66 #ifdef DEBUG
67 #include <asm/udbg.h>
68 #define DBG(fmt...) udbg_printf(fmt)
69 #else
70 #define DBG(fmt...)
71 #endif
72 
73 #ifdef CONFIG_HOTPLUG_CPU
74 /* State of each CPU during hotplug phases */
75 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
76 #endif
77 
78 struct task_struct *secondary_current;
79 bool has_big_cores;
80 bool coregroup_enabled;
81 bool thread_group_shares_l2;
82 bool thread_group_shares_l3;
83 
84 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
85 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
86 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
87 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
88 static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
89 
90 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
91 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
92 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
93 EXPORT_SYMBOL_GPL(has_big_cores);
94 
95 enum {
96 #ifdef CONFIG_SCHED_SMT
97 	smt_idx,
98 #endif
99 	cache_idx,
100 	mc_idx,
101 	die_idx,
102 };
103 
104 #define MAX_THREAD_LIST_SIZE	8
105 #define THREAD_GROUP_SHARE_L1   1
106 #define THREAD_GROUP_SHARE_L2_L3 2
107 struct thread_groups {
108 	unsigned int property;
109 	unsigned int nr_groups;
110 	unsigned int threads_per_group;
111 	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
112 };
113 
114 /* Maximum number of properties that groups of threads within a core can share */
115 #define MAX_THREAD_GROUP_PROPERTIES 2
116 
117 struct thread_groups_list {
118 	unsigned int nr_properties;
119 	struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
120 };
121 
122 static struct thread_groups_list tgl[NR_CPUS] __initdata;
123 /*
124  * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
125  * the set its siblings that share the L1-cache.
126  */
127 DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
128 
129 /*
130  * On some big-cores system, thread_group_l2_cache_map for each CPU
131  * corresponds to the set its siblings within the core that share the
132  * L2-cache.
133  */
134 DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
135 
136 /*
137  * On P10, thread_group_l3_cache_map for each CPU is equal to the
138  * thread_group_l2_cache_map
139  */
140 DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
141 
142 /* SMP operations for this machine */
143 struct smp_ops_t *smp_ops;
144 
145 /* Can't be static due to PowerMac hackery */
146 volatile unsigned int cpu_callin_map[NR_CPUS];
147 
148 int smt_enabled_at_boot = 1;
149 
150 /*
151  * Returns 1 if the specified cpu should be brought up during boot.
152  * Used to inhibit booting threads if they've been disabled or
153  * limited on the command line
154  */
155 int smp_generic_cpu_bootable(unsigned int nr)
156 {
157 	/* Special case - we inhibit secondary thread startup
158 	 * during boot if the user requests it.
159 	 */
160 	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
161 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
162 			return 0;
163 		if (smt_enabled_at_boot
164 		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
165 			return 0;
166 	}
167 
168 	return 1;
169 }
170 
171 
172 #ifdef CONFIG_PPC64
173 int smp_generic_kick_cpu(int nr)
174 {
175 	if (nr < 0 || nr >= nr_cpu_ids)
176 		return -EINVAL;
177 
178 	/*
179 	 * The processor is currently spinning, waiting for the
180 	 * cpu_start field to become non-zero After we set cpu_start,
181 	 * the processor will continue on to secondary_start
182 	 */
183 	if (!paca_ptrs[nr]->cpu_start) {
184 		paca_ptrs[nr]->cpu_start = 1;
185 		smp_mb();
186 		return 0;
187 	}
188 
189 #ifdef CONFIG_HOTPLUG_CPU
190 	/*
191 	 * Ok it's not there, so it might be soft-unplugged, let's
192 	 * try to bring it back
193 	 */
194 	generic_set_cpu_up(nr);
195 	smp_wmb();
196 	smp_send_reschedule(nr);
197 #endif /* CONFIG_HOTPLUG_CPU */
198 
199 	return 0;
200 }
201 #endif /* CONFIG_PPC64 */
202 
203 static irqreturn_t call_function_action(int irq, void *data)
204 {
205 	generic_smp_call_function_interrupt();
206 	return IRQ_HANDLED;
207 }
208 
209 static irqreturn_t reschedule_action(int irq, void *data)
210 {
211 	scheduler_ipi();
212 	return IRQ_HANDLED;
213 }
214 
215 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
216 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
217 {
218 	timer_broadcast_interrupt();
219 	return IRQ_HANDLED;
220 }
221 #endif
222 
223 #ifdef CONFIG_NMI_IPI
224 static irqreturn_t nmi_ipi_action(int irq, void *data)
225 {
226 	smp_handle_nmi_ipi(get_irq_regs());
227 	return IRQ_HANDLED;
228 }
229 #endif
230 
231 static irq_handler_t smp_ipi_action[] = {
232 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
233 	[PPC_MSG_RESCHEDULE] = reschedule_action,
234 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
235 	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
236 #endif
237 #ifdef CONFIG_NMI_IPI
238 	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
239 #endif
240 };
241 
242 /*
243  * The NMI IPI is a fallback and not truly non-maskable. It is simpler
244  * than going through the call function infrastructure, and strongly
245  * serialized, so it is more appropriate for debugging.
246  */
247 const char *smp_ipi_name[] = {
248 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
249 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
250 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
251 	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
252 #endif
253 #ifdef CONFIG_NMI_IPI
254 	[PPC_MSG_NMI_IPI] = "nmi ipi",
255 #endif
256 };
257 
258 /* optional function to request ipi, for controllers with >= 4 ipis */
259 int smp_request_message_ipi(int virq, int msg)
260 {
261 	int err;
262 
263 	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
264 		return -EINVAL;
265 #ifndef CONFIG_NMI_IPI
266 	if (msg == PPC_MSG_NMI_IPI)
267 		return 1;
268 #endif
269 
270 	err = request_irq(virq, smp_ipi_action[msg],
271 			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
272 			  smp_ipi_name[msg], NULL);
273 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
274 		virq, smp_ipi_name[msg], err);
275 
276 	return err;
277 }
278 
279 #ifdef CONFIG_PPC_SMP_MUXED_IPI
280 struct cpu_messages {
281 	long messages;			/* current messages */
282 };
283 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
284 
285 void smp_muxed_ipi_set_message(int cpu, int msg)
286 {
287 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
288 	char *message = (char *)&info->messages;
289 
290 	/*
291 	 * Order previous accesses before accesses in the IPI handler.
292 	 */
293 	smp_mb();
294 	message[msg] = 1;
295 }
296 
297 void smp_muxed_ipi_message_pass(int cpu, int msg)
298 {
299 	smp_muxed_ipi_set_message(cpu, msg);
300 
301 	/*
302 	 * cause_ipi functions are required to include a full barrier
303 	 * before doing whatever causes the IPI.
304 	 */
305 	smp_ops->cause_ipi(cpu);
306 }
307 
308 #ifdef __BIG_ENDIAN__
309 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
310 #else
311 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
312 #endif
313 
314 irqreturn_t smp_ipi_demux(void)
315 {
316 	mb();	/* order any irq clear */
317 
318 	return smp_ipi_demux_relaxed();
319 }
320 
321 /* sync-free variant. Callers should ensure synchronization */
322 irqreturn_t smp_ipi_demux_relaxed(void)
323 {
324 	struct cpu_messages *info;
325 	unsigned long all;
326 
327 	info = this_cpu_ptr(&ipi_message);
328 	do {
329 		all = xchg(&info->messages, 0);
330 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
331 		/*
332 		 * Must check for PPC_MSG_RM_HOST_ACTION messages
333 		 * before PPC_MSG_CALL_FUNCTION messages because when
334 		 * a VM is destroyed, we call kick_all_cpus_sync()
335 		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
336 		 * messages have completed before we free any VCPUs.
337 		 */
338 		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
339 			kvmppc_xics_ipi_action();
340 #endif
341 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
342 			generic_smp_call_function_interrupt();
343 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
344 			scheduler_ipi();
345 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
346 		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
347 			timer_broadcast_interrupt();
348 #endif
349 #ifdef CONFIG_NMI_IPI
350 		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
351 			nmi_ipi_action(0, NULL);
352 #endif
353 	} while (info->messages);
354 
355 	return IRQ_HANDLED;
356 }
357 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
358 
359 static inline void do_message_pass(int cpu, int msg)
360 {
361 	if (smp_ops->message_pass)
362 		smp_ops->message_pass(cpu, msg);
363 #ifdef CONFIG_PPC_SMP_MUXED_IPI
364 	else
365 		smp_muxed_ipi_message_pass(cpu, msg);
366 #endif
367 }
368 
369 void smp_send_reschedule(int cpu)
370 {
371 	if (likely(smp_ops))
372 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
373 }
374 EXPORT_SYMBOL_GPL(smp_send_reschedule);
375 
376 void arch_send_call_function_single_ipi(int cpu)
377 {
378 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
379 }
380 
381 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
382 {
383 	unsigned int cpu;
384 
385 	for_each_cpu(cpu, mask)
386 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
387 }
388 
389 #ifdef CONFIG_NMI_IPI
390 
391 /*
392  * "NMI IPI" system.
393  *
394  * NMI IPIs may not be recoverable, so should not be used as ongoing part of
395  * a running system. They can be used for crash, debug, halt/reboot, etc.
396  *
397  * The IPI call waits with interrupts disabled until all targets enter the
398  * NMI handler, then returns. Subsequent IPIs can be issued before targets
399  * have returned from their handlers, so there is no guarantee about
400  * concurrency or re-entrancy.
401  *
402  * A new NMI can be issued before all targets exit the handler.
403  *
404  * The IPI call may time out without all targets entering the NMI handler.
405  * In that case, there is some logic to recover (and ignore subsequent
406  * NMI interrupts that may eventually be raised), but the platform interrupt
407  * handler may not be able to distinguish this from other exception causes,
408  * which may cause a crash.
409  */
410 
411 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
412 static struct cpumask nmi_ipi_pending_mask;
413 static bool nmi_ipi_busy = false;
414 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
415 
416 static void nmi_ipi_lock_start(unsigned long *flags)
417 {
418 	raw_local_irq_save(*flags);
419 	hard_irq_disable();
420 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
421 		raw_local_irq_restore(*flags);
422 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
423 		raw_local_irq_save(*flags);
424 		hard_irq_disable();
425 	}
426 }
427 
428 static void nmi_ipi_lock(void)
429 {
430 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
431 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
432 }
433 
434 static void nmi_ipi_unlock(void)
435 {
436 	smp_mb();
437 	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
438 	atomic_set(&__nmi_ipi_lock, 0);
439 }
440 
441 static void nmi_ipi_unlock_end(unsigned long *flags)
442 {
443 	nmi_ipi_unlock();
444 	raw_local_irq_restore(*flags);
445 }
446 
447 /*
448  * Platform NMI handler calls this to ack
449  */
450 int smp_handle_nmi_ipi(struct pt_regs *regs)
451 {
452 	void (*fn)(struct pt_regs *) = NULL;
453 	unsigned long flags;
454 	int me = raw_smp_processor_id();
455 	int ret = 0;
456 
457 	/*
458 	 * Unexpected NMIs are possible here because the interrupt may not
459 	 * be able to distinguish NMI IPIs from other types of NMIs, or
460 	 * because the caller may have timed out.
461 	 */
462 	nmi_ipi_lock_start(&flags);
463 	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
464 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
465 		fn = READ_ONCE(nmi_ipi_function);
466 		WARN_ON_ONCE(!fn);
467 		ret = 1;
468 	}
469 	nmi_ipi_unlock_end(&flags);
470 
471 	if (fn)
472 		fn(regs);
473 
474 	return ret;
475 }
476 
477 static void do_smp_send_nmi_ipi(int cpu, bool safe)
478 {
479 	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
480 		return;
481 
482 	if (cpu >= 0) {
483 		do_message_pass(cpu, PPC_MSG_NMI_IPI);
484 	} else {
485 		int c;
486 
487 		for_each_online_cpu(c) {
488 			if (c == raw_smp_processor_id())
489 				continue;
490 			do_message_pass(c, PPC_MSG_NMI_IPI);
491 		}
492 	}
493 }
494 
495 /*
496  * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
497  * - fn is the target callback function.
498  * - delay_us > 0 is the delay before giving up waiting for targets to
499  *   begin executing the handler, == 0 specifies indefinite delay.
500  */
501 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
502 				u64 delay_us, bool safe)
503 {
504 	unsigned long flags;
505 	int me = raw_smp_processor_id();
506 	int ret = 1;
507 
508 	BUG_ON(cpu == me);
509 	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
510 
511 	if (unlikely(!smp_ops))
512 		return 0;
513 
514 	nmi_ipi_lock_start(&flags);
515 	while (nmi_ipi_busy) {
516 		nmi_ipi_unlock_end(&flags);
517 		spin_until_cond(!nmi_ipi_busy);
518 		nmi_ipi_lock_start(&flags);
519 	}
520 	nmi_ipi_busy = true;
521 	nmi_ipi_function = fn;
522 
523 	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
524 
525 	if (cpu < 0) {
526 		/* ALL_OTHERS */
527 		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
528 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
529 	} else {
530 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
531 	}
532 
533 	nmi_ipi_unlock();
534 
535 	/* Interrupts remain hard disabled */
536 
537 	do_smp_send_nmi_ipi(cpu, safe);
538 
539 	nmi_ipi_lock();
540 	/* nmi_ipi_busy is set here, so unlock/lock is okay */
541 	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
542 		nmi_ipi_unlock();
543 		udelay(1);
544 		nmi_ipi_lock();
545 		if (delay_us) {
546 			delay_us--;
547 			if (!delay_us)
548 				break;
549 		}
550 	}
551 
552 	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
553 		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
554 		ret = 0;
555 		cpumask_clear(&nmi_ipi_pending_mask);
556 	}
557 
558 	nmi_ipi_function = NULL;
559 	nmi_ipi_busy = false;
560 
561 	nmi_ipi_unlock_end(&flags);
562 
563 	return ret;
564 }
565 
566 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
567 {
568 	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
569 }
570 
571 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
572 {
573 	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
574 }
575 #endif /* CONFIG_NMI_IPI */
576 
577 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
578 void tick_broadcast(const struct cpumask *mask)
579 {
580 	unsigned int cpu;
581 
582 	for_each_cpu(cpu, mask)
583 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
584 }
585 #endif
586 
587 #ifdef CONFIG_DEBUGGER
588 static void debugger_ipi_callback(struct pt_regs *regs)
589 {
590 	debugger_ipi(regs);
591 }
592 
593 void smp_send_debugger_break(void)
594 {
595 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
596 }
597 #endif
598 
599 #ifdef CONFIG_KEXEC_CORE
600 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
601 {
602 	int cpu;
603 
604 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
605 	if (kdump_in_progress() && crash_wake_offline) {
606 		for_each_present_cpu(cpu) {
607 			if (cpu_online(cpu))
608 				continue;
609 			/*
610 			 * crash_ipi_callback will wait for
611 			 * all cpus, including offline CPUs.
612 			 * We don't care about nmi_ipi_function.
613 			 * Offline cpus will jump straight into
614 			 * crash_ipi_callback, we can skip the
615 			 * entire NMI dance and waiting for
616 			 * cpus to clear pending mask, etc.
617 			 */
618 			do_smp_send_nmi_ipi(cpu, false);
619 		}
620 	}
621 }
622 #endif
623 
624 #ifdef CONFIG_NMI_IPI
625 static void crash_stop_this_cpu(struct pt_regs *regs)
626 #else
627 static void crash_stop_this_cpu(void *dummy)
628 #endif
629 {
630 	/*
631 	 * Just busy wait here and avoid marking CPU as offline to ensure
632 	 * register data is captured appropriately.
633 	 */
634 	while (1)
635 		cpu_relax();
636 }
637 
638 void crash_smp_send_stop(void)
639 {
640 	static bool stopped = false;
641 
642 	/*
643 	 * In case of fadump, register data for all CPUs is captured by f/w
644 	 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
645 	 * this rtas call to avoid tricky post processing of those CPUs'
646 	 * backtraces.
647 	 */
648 	if (should_fadump_crash())
649 		return;
650 
651 	if (stopped)
652 		return;
653 
654 	stopped = true;
655 
656 #ifdef CONFIG_NMI_IPI
657 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
658 #else
659 	smp_call_function(crash_stop_this_cpu, NULL, 0);
660 #endif /* CONFIG_NMI_IPI */
661 }
662 
663 #ifdef CONFIG_NMI_IPI
664 static void nmi_stop_this_cpu(struct pt_regs *regs)
665 {
666 	/*
667 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
668 	 */
669 	set_cpu_online(smp_processor_id(), false);
670 
671 	spin_begin();
672 	while (1)
673 		spin_cpu_relax();
674 }
675 
676 void smp_send_stop(void)
677 {
678 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
679 }
680 
681 #else /* CONFIG_NMI_IPI */
682 
683 static void stop_this_cpu(void *dummy)
684 {
685 	hard_irq_disable();
686 
687 	/*
688 	 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
689 	 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
690 	 * to know other CPUs are offline before it breaks locks to flush
691 	 * printk buffers, in case we panic()ed while holding the lock.
692 	 */
693 	set_cpu_online(smp_processor_id(), false);
694 
695 	spin_begin();
696 	while (1)
697 		spin_cpu_relax();
698 }
699 
700 void smp_send_stop(void)
701 {
702 	static bool stopped = false;
703 
704 	/*
705 	 * Prevent waiting on csd lock from a previous smp_send_stop.
706 	 * This is racy, but in general callers try to do the right
707 	 * thing and only fire off one smp_send_stop (e.g., see
708 	 * kernel/panic.c)
709 	 */
710 	if (stopped)
711 		return;
712 
713 	stopped = true;
714 
715 	smp_call_function(stop_this_cpu, NULL, 0);
716 }
717 #endif /* CONFIG_NMI_IPI */
718 
719 struct task_struct *current_set[NR_CPUS];
720 
721 static void smp_store_cpu_info(int id)
722 {
723 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
724 #ifdef CONFIG_PPC_FSL_BOOK3E
725 	per_cpu(next_tlbcam_idx, id)
726 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
727 #endif
728 }
729 
730 /*
731  * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
732  * rather than just passing around the cpumask we pass around a function that
733  * returns the that cpumask for the given CPU.
734  */
735 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
736 {
737 	cpumask_set_cpu(i, get_cpumask(j));
738 	cpumask_set_cpu(j, get_cpumask(i));
739 }
740 
741 #ifdef CONFIG_HOTPLUG_CPU
742 static void set_cpus_unrelated(int i, int j,
743 		struct cpumask *(*get_cpumask)(int))
744 {
745 	cpumask_clear_cpu(i, get_cpumask(j));
746 	cpumask_clear_cpu(j, get_cpumask(i));
747 }
748 #endif
749 
750 /*
751  * Extends set_cpus_related. Instead of setting one CPU at a time in
752  * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
753  */
754 static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
755 				struct cpumask *(*dstmask)(int))
756 {
757 	struct cpumask *mask;
758 	int k;
759 
760 	mask = srcmask(j);
761 	for_each_cpu(k, srcmask(i))
762 		cpumask_or(dstmask(k), dstmask(k), mask);
763 
764 	if (i == j)
765 		return;
766 
767 	mask = srcmask(i);
768 	for_each_cpu(k, srcmask(j))
769 		cpumask_or(dstmask(k), dstmask(k), mask);
770 }
771 
772 /*
773  * parse_thread_groups: Parses the "ibm,thread-groups" device tree
774  *                      property for the CPU device node @dn and stores
775  *                      the parsed output in the thread_groups_list
776  *                      structure @tglp.
777  *
778  * @dn: The device node of the CPU device.
779  * @tglp: Pointer to a thread group list structure into which the parsed
780  *      output of "ibm,thread-groups" is stored.
781  *
782  * ibm,thread-groups[0..N-1] array defines which group of threads in
783  * the CPU-device node can be grouped together based on the property.
784  *
785  * This array can represent thread groupings for multiple properties.
786  *
787  * ibm,thread-groups[i + 0] tells us the property based on which the
788  * threads are being grouped together. If this value is 1, it implies
789  * that the threads in the same group share L1, translation cache. If
790  * the value is 2, it implies that the threads in the same group share
791  * the same L2 cache.
792  *
793  * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
794  * property ibm,thread-groups[i]
795  *
796  * ibm,thread-groups[i+2] tells us the number of threads in each such
797  * group.
798  * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
799  *
800  * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
801  * "ibm,ppc-interrupt-server#s" arranged as per their membership in
802  * the grouping.
803  *
804  * Example:
805  * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
806  * This can be decomposed up into two consecutive arrays:
807  * a) [1,2,4,8,10,12,14,9,11,13,15]
808  * b) [2,2,4,8,10,12,14,9,11,13,15]
809  *
810  * where in,
811  *
812  * a) provides information of Property "1" being shared by "2" groups,
813  *  each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
814  *  the first group is {8,10,12,14} and the
815  *  "ibm,ppc-interrupt-server#s" of the second group is
816  *  {9,11,13,15}. Property "1" is indicative of the thread in the
817  *  group sharing L1 cache, translation cache and Instruction Data
818  *  flow.
819  *
820  * b) provides information of Property "2" being shared by "2" groups,
821  *  each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
822  *  the first group is {8,10,12,14} and the
823  *  "ibm,ppc-interrupt-server#s" of the second group is
824  *  {9,11,13,15}. Property "2" indicates that the threads in each
825  *  group share the L2-cache.
826  *
827  * Returns 0 on success, -EINVAL if the property does not exist,
828  * -ENODATA if property does not have a value, and -EOVERFLOW if the
829  * property data isn't large enough.
830  */
831 static int parse_thread_groups(struct device_node *dn,
832 			       struct thread_groups_list *tglp)
833 {
834 	unsigned int property_idx = 0;
835 	u32 *thread_group_array;
836 	size_t total_threads;
837 	int ret = 0, count;
838 	u32 *thread_list;
839 	int i = 0;
840 
841 	count = of_property_count_u32_elems(dn, "ibm,thread-groups");
842 	thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
843 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
844 					 thread_group_array, count);
845 	if (ret)
846 		goto out_free;
847 
848 	while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
849 		int j;
850 		struct thread_groups *tg = &tglp->property_tgs[property_idx++];
851 
852 		tg->property = thread_group_array[i];
853 		tg->nr_groups = thread_group_array[i + 1];
854 		tg->threads_per_group = thread_group_array[i + 2];
855 		total_threads = tg->nr_groups * tg->threads_per_group;
856 
857 		thread_list = &thread_group_array[i + 3];
858 
859 		for (j = 0; j < total_threads; j++)
860 			tg->thread_list[j] = thread_list[j];
861 		i = i + 3 + total_threads;
862 	}
863 
864 	tglp->nr_properties = property_idx;
865 
866 out_free:
867 	kfree(thread_group_array);
868 	return ret;
869 }
870 
871 /*
872  * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
873  *                              that @cpu belongs to.
874  *
875  * @cpu : The logical CPU whose thread group is being searched.
876  * @tg : The thread-group structure of the CPU node which @cpu belongs
877  *       to.
878  *
879  * Returns the index to tg->thread_list that points to the the start
880  * of the thread_group that @cpu belongs to.
881  *
882  * Returns -1 if cpu doesn't belong to any of the groups pointed to by
883  * tg->thread_list.
884  */
885 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
886 {
887 	int hw_cpu_id = get_hard_smp_processor_id(cpu);
888 	int i, j;
889 
890 	for (i = 0; i < tg->nr_groups; i++) {
891 		int group_start = i * tg->threads_per_group;
892 
893 		for (j = 0; j < tg->threads_per_group; j++) {
894 			int idx = group_start + j;
895 
896 			if (tg->thread_list[idx] == hw_cpu_id)
897 				return group_start;
898 		}
899 	}
900 
901 	return -1;
902 }
903 
904 static struct thread_groups *__init get_thread_groups(int cpu,
905 						      int group_property,
906 						      int *err)
907 {
908 	struct device_node *dn = of_get_cpu_node(cpu, NULL);
909 	struct thread_groups_list *cpu_tgl = &tgl[cpu];
910 	struct thread_groups *tg = NULL;
911 	int i;
912 	*err = 0;
913 
914 	if (!dn) {
915 		*err = -ENODATA;
916 		return NULL;
917 	}
918 
919 	if (!cpu_tgl->nr_properties) {
920 		*err = parse_thread_groups(dn, cpu_tgl);
921 		if (*err)
922 			goto out;
923 	}
924 
925 	for (i = 0; i < cpu_tgl->nr_properties; i++) {
926 		if (cpu_tgl->property_tgs[i].property == group_property) {
927 			tg = &cpu_tgl->property_tgs[i];
928 			break;
929 		}
930 	}
931 
932 	if (!tg)
933 		*err = -EINVAL;
934 out:
935 	of_node_put(dn);
936 	return tg;
937 }
938 
939 static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
940 					       int cpu, int cpu_group_start)
941 {
942 	int first_thread = cpu_first_thread_sibling(cpu);
943 	int i;
944 
945 	zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
946 
947 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
948 		int i_group_start = get_cpu_thread_group_start(i, tg);
949 
950 		if (unlikely(i_group_start == -1)) {
951 			WARN_ON_ONCE(1);
952 			return -ENODATA;
953 		}
954 
955 		if (i_group_start == cpu_group_start)
956 			cpumask_set_cpu(i, *mask);
957 	}
958 
959 	return 0;
960 }
961 
962 static int __init init_thread_group_cache_map(int cpu, int cache_property)
963 
964 {
965 	int cpu_group_start = -1, err = 0;
966 	struct thread_groups *tg = NULL;
967 	cpumask_var_t *mask = NULL;
968 
969 	if (cache_property != THREAD_GROUP_SHARE_L1 &&
970 	    cache_property != THREAD_GROUP_SHARE_L2_L3)
971 		return -EINVAL;
972 
973 	tg = get_thread_groups(cpu, cache_property, &err);
974 
975 	if (!tg)
976 		return err;
977 
978 	cpu_group_start = get_cpu_thread_group_start(cpu, tg);
979 
980 	if (unlikely(cpu_group_start == -1)) {
981 		WARN_ON_ONCE(1);
982 		return -ENODATA;
983 	}
984 
985 	if (cache_property == THREAD_GROUP_SHARE_L1) {
986 		mask = &per_cpu(thread_group_l1_cache_map, cpu);
987 		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
988 	}
989 	else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
990 		mask = &per_cpu(thread_group_l2_cache_map, cpu);
991 		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
992 		mask = &per_cpu(thread_group_l3_cache_map, cpu);
993 		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
994 	}
995 
996 
997 	return 0;
998 }
999 
1000 static bool shared_caches;
1001 
1002 #ifdef CONFIG_SCHED_SMT
1003 /* cpumask of CPUs with asymmetric SMT dependency */
1004 static int powerpc_smt_flags(void)
1005 {
1006 	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1007 
1008 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1009 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1010 		flags |= SD_ASYM_PACKING;
1011 	}
1012 	return flags;
1013 }
1014 #endif
1015 
1016 /*
1017  * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1018  * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1019  * since the migrated task remains cache hot. We want to take advantage of this
1020  * at the scheduler level so an extra topology level is required.
1021  */
1022 static int powerpc_shared_cache_flags(void)
1023 {
1024 	return SD_SHARE_PKG_RESOURCES;
1025 }
1026 
1027 /*
1028  * We can't just pass cpu_l2_cache_mask() directly because
1029  * returns a non-const pointer and the compiler barfs on that.
1030  */
1031 static const struct cpumask *shared_cache_mask(int cpu)
1032 {
1033 	return per_cpu(cpu_l2_cache_map, cpu);
1034 }
1035 
1036 #ifdef CONFIG_SCHED_SMT
1037 static const struct cpumask *smallcore_smt_mask(int cpu)
1038 {
1039 	return cpu_smallcore_mask(cpu);
1040 }
1041 #endif
1042 
1043 static struct cpumask *cpu_coregroup_mask(int cpu)
1044 {
1045 	return per_cpu(cpu_coregroup_map, cpu);
1046 }
1047 
1048 static bool has_coregroup_support(void)
1049 {
1050 	return coregroup_enabled;
1051 }
1052 
1053 static const struct cpumask *cpu_mc_mask(int cpu)
1054 {
1055 	return cpu_coregroup_mask(cpu);
1056 }
1057 
1058 static struct sched_domain_topology_level powerpc_topology[] = {
1059 #ifdef CONFIG_SCHED_SMT
1060 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1061 #endif
1062 	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1063 	{ cpu_mc_mask, SD_INIT_NAME(MC) },
1064 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1065 	{ NULL, },
1066 };
1067 
1068 static int __init init_big_cores(void)
1069 {
1070 	int cpu;
1071 
1072 	for_each_possible_cpu(cpu) {
1073 		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1074 
1075 		if (err)
1076 			return err;
1077 
1078 		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1079 					GFP_KERNEL,
1080 					cpu_to_node(cpu));
1081 	}
1082 
1083 	has_big_cores = true;
1084 
1085 	for_each_possible_cpu(cpu) {
1086 		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
1087 
1088 		if (err)
1089 			return err;
1090 	}
1091 
1092 	thread_group_shares_l2 = true;
1093 	thread_group_shares_l3 = true;
1094 	pr_debug("L2/L3 cache only shared by the threads in the small core\n");
1095 
1096 	return 0;
1097 }
1098 
1099 void __init smp_prepare_cpus(unsigned int max_cpus)
1100 {
1101 	unsigned int cpu;
1102 
1103 	DBG("smp_prepare_cpus\n");
1104 
1105 	/*
1106 	 * setup_cpu may need to be called on the boot cpu. We havent
1107 	 * spun any cpus up but lets be paranoid.
1108 	 */
1109 	BUG_ON(boot_cpuid != smp_processor_id());
1110 
1111 	/* Fixup boot cpu */
1112 	smp_store_cpu_info(boot_cpuid);
1113 	cpu_callin_map[boot_cpuid] = 1;
1114 
1115 	for_each_possible_cpu(cpu) {
1116 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1117 					GFP_KERNEL, cpu_to_node(cpu));
1118 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1119 					GFP_KERNEL, cpu_to_node(cpu));
1120 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1121 					GFP_KERNEL, cpu_to_node(cpu));
1122 		if (has_coregroup_support())
1123 			zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1124 						GFP_KERNEL, cpu_to_node(cpu));
1125 
1126 #ifdef CONFIG_NUMA
1127 		/*
1128 		 * numa_node_id() works after this.
1129 		 */
1130 		if (cpu_present(cpu)) {
1131 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1132 			set_cpu_numa_mem(cpu,
1133 				local_memory_node(numa_cpu_lookup_table[cpu]));
1134 		}
1135 #endif
1136 	}
1137 
1138 	/* Init the cpumasks so the boot CPU is related to itself */
1139 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1140 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1141 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1142 
1143 	if (has_coregroup_support())
1144 		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1145 
1146 	init_big_cores();
1147 	if (has_big_cores) {
1148 		cpumask_set_cpu(boot_cpuid,
1149 				cpu_smallcore_mask(boot_cpuid));
1150 	}
1151 
1152 	if (cpu_to_chip_id(boot_cpuid) != -1) {
1153 		int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1154 
1155 		/*
1156 		 * All threads of a core will all belong to the same core,
1157 		 * chip_id_lookup_table will have one entry per core.
1158 		 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1159 		 * other CPUs, will also not have chip-id.
1160 		 */
1161 		chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1162 		if (chip_id_lookup_table)
1163 			memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1164 	}
1165 
1166 	if (smp_ops && smp_ops->probe)
1167 		smp_ops->probe();
1168 }
1169 
1170 void smp_prepare_boot_cpu(void)
1171 {
1172 	BUG_ON(smp_processor_id() != boot_cpuid);
1173 #ifdef CONFIG_PPC64
1174 	paca_ptrs[boot_cpuid]->__current = current;
1175 #endif
1176 	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1177 	current_set[boot_cpuid] = current;
1178 }
1179 
1180 #ifdef CONFIG_HOTPLUG_CPU
1181 
1182 int generic_cpu_disable(void)
1183 {
1184 	unsigned int cpu = smp_processor_id();
1185 
1186 	if (cpu == boot_cpuid)
1187 		return -EBUSY;
1188 
1189 	set_cpu_online(cpu, false);
1190 #ifdef CONFIG_PPC64
1191 	vdso_data->processorCount--;
1192 #endif
1193 	/* Update affinity of all IRQs previously aimed at this CPU */
1194 	irq_migrate_all_off_this_cpu();
1195 
1196 	/*
1197 	 * Depending on the details of the interrupt controller, it's possible
1198 	 * that one of the interrupts we just migrated away from this CPU is
1199 	 * actually already pending on this CPU. If we leave it in that state
1200 	 * the interrupt will never be EOI'ed, and will never fire again. So
1201 	 * temporarily enable interrupts here, to allow any pending interrupt to
1202 	 * be received (and EOI'ed), before we take this CPU offline.
1203 	 */
1204 	local_irq_enable();
1205 	mdelay(1);
1206 	local_irq_disable();
1207 
1208 	return 0;
1209 }
1210 
1211 void generic_cpu_die(unsigned int cpu)
1212 {
1213 	int i;
1214 
1215 	for (i = 0; i < 100; i++) {
1216 		smp_rmb();
1217 		if (is_cpu_dead(cpu))
1218 			return;
1219 		msleep(100);
1220 	}
1221 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1222 }
1223 
1224 void generic_set_cpu_dead(unsigned int cpu)
1225 {
1226 	per_cpu(cpu_state, cpu) = CPU_DEAD;
1227 }
1228 
1229 /*
1230  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1231  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1232  * which makes the delay in generic_cpu_die() not happen.
1233  */
1234 void generic_set_cpu_up(unsigned int cpu)
1235 {
1236 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1237 }
1238 
1239 int generic_check_cpu_restart(unsigned int cpu)
1240 {
1241 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1242 }
1243 
1244 int is_cpu_dead(unsigned int cpu)
1245 {
1246 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
1247 }
1248 
1249 static bool secondaries_inhibited(void)
1250 {
1251 	return kvm_hv_mode_active();
1252 }
1253 
1254 #else /* HOTPLUG_CPU */
1255 
1256 #define secondaries_inhibited()		0
1257 
1258 #endif
1259 
1260 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1261 {
1262 #ifdef CONFIG_PPC64
1263 	paca_ptrs[cpu]->__current = idle;
1264 	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1265 				 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1266 #endif
1267 	task_thread_info(idle)->cpu = cpu;
1268 	secondary_current = current_set[cpu] = idle;
1269 }
1270 
1271 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1272 {
1273 	int rc, c;
1274 
1275 	/*
1276 	 * Don't allow secondary threads to come online if inhibited
1277 	 */
1278 	if (threads_per_core > 1 && secondaries_inhibited() &&
1279 	    cpu_thread_in_subcore(cpu))
1280 		return -EBUSY;
1281 
1282 	if (smp_ops == NULL ||
1283 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1284 		return -EINVAL;
1285 
1286 	cpu_idle_thread_init(cpu, tidle);
1287 
1288 	/*
1289 	 * The platform might need to allocate resources prior to bringing
1290 	 * up the CPU
1291 	 */
1292 	if (smp_ops->prepare_cpu) {
1293 		rc = smp_ops->prepare_cpu(cpu);
1294 		if (rc)
1295 			return rc;
1296 	}
1297 
1298 	/* Make sure callin-map entry is 0 (can be leftover a CPU
1299 	 * hotplug
1300 	 */
1301 	cpu_callin_map[cpu] = 0;
1302 
1303 	/* The information for processor bringup must
1304 	 * be written out to main store before we release
1305 	 * the processor.
1306 	 */
1307 	smp_mb();
1308 
1309 	/* wake up cpus */
1310 	DBG("smp: kicking cpu %d\n", cpu);
1311 	rc = smp_ops->kick_cpu(cpu);
1312 	if (rc) {
1313 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1314 		return rc;
1315 	}
1316 
1317 	/*
1318 	 * wait to see if the cpu made a callin (is actually up).
1319 	 * use this value that I found through experimentation.
1320 	 * -- Cort
1321 	 */
1322 	if (system_state < SYSTEM_RUNNING)
1323 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1324 			udelay(100);
1325 #ifdef CONFIG_HOTPLUG_CPU
1326 	else
1327 		/*
1328 		 * CPUs can take much longer to come up in the
1329 		 * hotplug case.  Wait five seconds.
1330 		 */
1331 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1332 			msleep(1);
1333 #endif
1334 
1335 	if (!cpu_callin_map[cpu]) {
1336 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1337 		return -ENOENT;
1338 	}
1339 
1340 	DBG("Processor %u found.\n", cpu);
1341 
1342 	if (smp_ops->give_timebase)
1343 		smp_ops->give_timebase();
1344 
1345 	/* Wait until cpu puts itself in the online & active maps */
1346 	spin_until_cond(cpu_online(cpu));
1347 
1348 	return 0;
1349 }
1350 
1351 /* Return the value of the reg property corresponding to the given
1352  * logical cpu.
1353  */
1354 int cpu_to_core_id(int cpu)
1355 {
1356 	struct device_node *np;
1357 	int id = -1;
1358 
1359 	np = of_get_cpu_node(cpu, NULL);
1360 	if (!np)
1361 		goto out;
1362 
1363 	id = of_get_cpu_hwid(np, 0);
1364 out:
1365 	of_node_put(np);
1366 	return id;
1367 }
1368 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1369 
1370 /* Helper routines for cpu to core mapping */
1371 int cpu_core_index_of_thread(int cpu)
1372 {
1373 	return cpu >> threads_shift;
1374 }
1375 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1376 
1377 int cpu_first_thread_of_core(int core)
1378 {
1379 	return core << threads_shift;
1380 }
1381 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1382 
1383 /* Must be called when no change can occur to cpu_present_mask,
1384  * i.e. during cpu online or offline.
1385  */
1386 static struct device_node *cpu_to_l2cache(int cpu)
1387 {
1388 	struct device_node *np;
1389 	struct device_node *cache;
1390 
1391 	if (!cpu_present(cpu))
1392 		return NULL;
1393 
1394 	np = of_get_cpu_node(cpu, NULL);
1395 	if (np == NULL)
1396 		return NULL;
1397 
1398 	cache = of_find_next_cache_node(np);
1399 
1400 	of_node_put(np);
1401 
1402 	return cache;
1403 }
1404 
1405 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1406 {
1407 	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1408 	struct device_node *l2_cache, *np;
1409 	int i;
1410 
1411 	if (has_big_cores)
1412 		submask_fn = cpu_smallcore_mask;
1413 
1414 	/*
1415 	 * If the threads in a thread-group share L2 cache, then the
1416 	 * L2-mask can be obtained from thread_group_l2_cache_map.
1417 	 */
1418 	if (thread_group_shares_l2) {
1419 		cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1420 
1421 		for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1422 			if (cpu_online(i))
1423 				set_cpus_related(i, cpu, cpu_l2_cache_mask);
1424 		}
1425 
1426 		/* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1427 		if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1428 		    !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1429 			pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1430 				     cpu);
1431 		}
1432 
1433 		return true;
1434 	}
1435 
1436 	l2_cache = cpu_to_l2cache(cpu);
1437 	if (!l2_cache || !*mask) {
1438 		/* Assume only core siblings share cache with this CPU */
1439 		for_each_cpu(i, cpu_sibling_mask(cpu))
1440 			set_cpus_related(cpu, i, cpu_l2_cache_mask);
1441 
1442 		return false;
1443 	}
1444 
1445 	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1446 
1447 	/* Update l2-cache mask with all the CPUs that are part of submask */
1448 	or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1449 
1450 	/* Skip all CPUs already part of current CPU l2-cache mask */
1451 	cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1452 
1453 	for_each_cpu(i, *mask) {
1454 		/*
1455 		 * when updating the marks the current CPU has not been marked
1456 		 * online, but we need to update the cache masks
1457 		 */
1458 		np = cpu_to_l2cache(i);
1459 
1460 		/* Skip all CPUs already part of current CPU l2-cache */
1461 		if (np == l2_cache) {
1462 			or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1463 			cpumask_andnot(*mask, *mask, submask_fn(i));
1464 		} else {
1465 			cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1466 		}
1467 
1468 		of_node_put(np);
1469 	}
1470 	of_node_put(l2_cache);
1471 
1472 	return true;
1473 }
1474 
1475 #ifdef CONFIG_HOTPLUG_CPU
1476 static void remove_cpu_from_masks(int cpu)
1477 {
1478 	struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1479 	int i;
1480 
1481 	unmap_cpu_from_node(cpu);
1482 
1483 	if (shared_caches)
1484 		mask_fn = cpu_l2_cache_mask;
1485 
1486 	for_each_cpu(i, mask_fn(cpu)) {
1487 		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1488 		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1489 		if (has_big_cores)
1490 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1491 	}
1492 
1493 	for_each_cpu(i, cpu_core_mask(cpu))
1494 		set_cpus_unrelated(cpu, i, cpu_core_mask);
1495 
1496 	if (has_coregroup_support()) {
1497 		for_each_cpu(i, cpu_coregroup_mask(cpu))
1498 			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1499 	}
1500 }
1501 #endif
1502 
1503 static inline void add_cpu_to_smallcore_masks(int cpu)
1504 {
1505 	int i;
1506 
1507 	if (!has_big_cores)
1508 		return;
1509 
1510 	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1511 
1512 	for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1513 		if (cpu_online(i))
1514 			set_cpus_related(i, cpu, cpu_smallcore_mask);
1515 	}
1516 }
1517 
1518 static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1519 {
1520 	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1521 	int coregroup_id = cpu_to_coregroup_id(cpu);
1522 	int i;
1523 
1524 	if (shared_caches)
1525 		submask_fn = cpu_l2_cache_mask;
1526 
1527 	if (!*mask) {
1528 		/* Assume only siblings are part of this CPU's coregroup */
1529 		for_each_cpu(i, submask_fn(cpu))
1530 			set_cpus_related(cpu, i, cpu_coregroup_mask);
1531 
1532 		return;
1533 	}
1534 
1535 	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1536 
1537 	/* Update coregroup mask with all the CPUs that are part of submask */
1538 	or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1539 
1540 	/* Skip all CPUs already part of coregroup mask */
1541 	cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1542 
1543 	for_each_cpu(i, *mask) {
1544 		/* Skip all CPUs not part of this coregroup */
1545 		if (coregroup_id == cpu_to_coregroup_id(i)) {
1546 			or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1547 			cpumask_andnot(*mask, *mask, submask_fn(i));
1548 		} else {
1549 			cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1550 		}
1551 	}
1552 }
1553 
1554 static void add_cpu_to_masks(int cpu)
1555 {
1556 	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1557 	int first_thread = cpu_first_thread_sibling(cpu);
1558 	cpumask_var_t mask;
1559 	int chip_id = -1;
1560 	bool ret;
1561 	int i;
1562 
1563 	/*
1564 	 * This CPU will not be in the online mask yet so we need to manually
1565 	 * add it to it's own thread sibling mask.
1566 	 */
1567 	map_cpu_to_node(cpu, cpu_to_node(cpu));
1568 	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1569 	cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1570 
1571 	for (i = first_thread; i < first_thread + threads_per_core; i++)
1572 		if (cpu_online(i))
1573 			set_cpus_related(i, cpu, cpu_sibling_mask);
1574 
1575 	add_cpu_to_smallcore_masks(cpu);
1576 
1577 	/* In CPU-hotplug path, hence use GFP_ATOMIC */
1578 	ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1579 	update_mask_by_l2(cpu, &mask);
1580 
1581 	if (has_coregroup_support())
1582 		update_coregroup_mask(cpu, &mask);
1583 
1584 	if (chip_id_lookup_table && ret)
1585 		chip_id = cpu_to_chip_id(cpu);
1586 
1587 	if (shared_caches)
1588 		submask_fn = cpu_l2_cache_mask;
1589 
1590 	/* Update core_mask with all the CPUs that are part of submask */
1591 	or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1592 
1593 	/* Skip all CPUs already part of current CPU core mask */
1594 	cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1595 
1596 	/* If chip_id is -1; limit the cpu_core_mask to within DIE*/
1597 	if (chip_id == -1)
1598 		cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1599 
1600 	for_each_cpu(i, mask) {
1601 		if (chip_id == cpu_to_chip_id(i)) {
1602 			or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1603 			cpumask_andnot(mask, mask, submask_fn(i));
1604 		} else {
1605 			cpumask_andnot(mask, mask, cpu_core_mask(i));
1606 		}
1607 	}
1608 
1609 	free_cpumask_var(mask);
1610 }
1611 
1612 /* Activate a secondary processor. */
1613 void start_secondary(void *unused)
1614 {
1615 	unsigned int cpu = raw_smp_processor_id();
1616 
1617 	/* PPC64 calls setup_kup() in early_setup_secondary() */
1618 	if (IS_ENABLED(CONFIG_PPC32))
1619 		setup_kup();
1620 
1621 	mmgrab(&init_mm);
1622 	current->active_mm = &init_mm;
1623 
1624 	smp_store_cpu_info(cpu);
1625 	set_dec(tb_ticks_per_jiffy);
1626 	rcu_cpu_starting(cpu);
1627 	cpu_callin_map[cpu] = 1;
1628 
1629 	if (smp_ops->setup_cpu)
1630 		smp_ops->setup_cpu(cpu);
1631 	if (smp_ops->take_timebase)
1632 		smp_ops->take_timebase();
1633 
1634 	secondary_cpu_time_init();
1635 
1636 #ifdef CONFIG_PPC64
1637 	if (system_state == SYSTEM_RUNNING)
1638 		vdso_data->processorCount++;
1639 
1640 	vdso_getcpu_init();
1641 #endif
1642 	set_numa_node(numa_cpu_lookup_table[cpu]);
1643 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1644 
1645 	/* Update topology CPU masks */
1646 	add_cpu_to_masks(cpu);
1647 
1648 	/*
1649 	 * Check for any shared caches. Note that this must be done on a
1650 	 * per-core basis because one core in the pair might be disabled.
1651 	 */
1652 	if (!shared_caches) {
1653 		struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1654 		struct cpumask *mask = cpu_l2_cache_mask(cpu);
1655 
1656 		if (has_big_cores)
1657 			sibling_mask = cpu_smallcore_mask;
1658 
1659 		if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1660 			shared_caches = true;
1661 	}
1662 
1663 	smp_wmb();
1664 	notify_cpu_starting(cpu);
1665 	set_cpu_online(cpu, true);
1666 
1667 	boot_init_stack_canary();
1668 
1669 	local_irq_enable();
1670 
1671 	/* We can enable ftrace for secondary cpus now */
1672 	this_cpu_enable_ftrace();
1673 
1674 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1675 
1676 	BUG();
1677 }
1678 
1679 #ifdef CONFIG_PROFILING
1680 int setup_profiling_timer(unsigned int multiplier)
1681 {
1682 	return 0;
1683 }
1684 #endif
1685 
1686 static void __init fixup_topology(void)
1687 {
1688 	int i;
1689 
1690 #ifdef CONFIG_SCHED_SMT
1691 	if (has_big_cores) {
1692 		pr_info("Big cores detected but using small core scheduling\n");
1693 		powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1694 	}
1695 #endif
1696 
1697 	if (!has_coregroup_support())
1698 		powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1699 
1700 	/*
1701 	 * Try to consolidate topology levels here instead of
1702 	 * allowing scheduler to degenerate.
1703 	 * - Dont consolidate if masks are different.
1704 	 * - Dont consolidate if sd_flags exists and are different.
1705 	 */
1706 	for (i = 1; i <= die_idx; i++) {
1707 		if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1708 			continue;
1709 
1710 		if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1711 				powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1712 			continue;
1713 
1714 		if (!powerpc_topology[i - 1].sd_flags)
1715 			powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1716 
1717 		powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1718 		powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1719 #ifdef CONFIG_SCHED_DEBUG
1720 		powerpc_topology[i].name = powerpc_topology[i + 1].name;
1721 #endif
1722 	}
1723 }
1724 
1725 void __init smp_cpus_done(unsigned int max_cpus)
1726 {
1727 	/*
1728 	 * We are running pinned to the boot CPU, see rest_init().
1729 	 */
1730 	if (smp_ops && smp_ops->setup_cpu)
1731 		smp_ops->setup_cpu(boot_cpuid);
1732 
1733 	if (smp_ops && smp_ops->bringup_done)
1734 		smp_ops->bringup_done();
1735 
1736 	dump_numa_cpu_topology();
1737 
1738 	fixup_topology();
1739 	set_sched_topology(powerpc_topology);
1740 }
1741 
1742 #ifdef CONFIG_HOTPLUG_CPU
1743 int __cpu_disable(void)
1744 {
1745 	int cpu = smp_processor_id();
1746 	int err;
1747 
1748 	if (!smp_ops->cpu_disable)
1749 		return -ENOSYS;
1750 
1751 	this_cpu_disable_ftrace();
1752 
1753 	err = smp_ops->cpu_disable();
1754 	if (err)
1755 		return err;
1756 
1757 	/* Update sibling maps */
1758 	remove_cpu_from_masks(cpu);
1759 
1760 	return 0;
1761 }
1762 
1763 void __cpu_die(unsigned int cpu)
1764 {
1765 	if (smp_ops->cpu_die)
1766 		smp_ops->cpu_die(cpu);
1767 }
1768 
1769 void arch_cpu_idle_dead(void)
1770 {
1771 	/*
1772 	 * Disable on the down path. This will be re-enabled by
1773 	 * start_secondary() via start_secondary_resume() below
1774 	 */
1775 	this_cpu_disable_ftrace();
1776 
1777 	if (smp_ops->cpu_offline_self)
1778 		smp_ops->cpu_offline_self();
1779 
1780 	/* If we return, we re-enter start_secondary */
1781 	start_secondary_resume();
1782 }
1783 
1784 #endif
1785