xref: /openbmc/linux/arch/powerpc/kernel/smp.c (revision 0cabf991)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SMP support for ppc.
4  *
5  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6  * deal of code from the sparc and intel versions.
7  *
8  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9  *
10  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12  */
13 
14 #undef DEBUG
15 
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 #include <linux/pgtable.h>
37 
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
40 #include <asm/irq.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
44 #include <asm/page.h>
45 #include <asm/prom.h>
46 #include <asm/smp.h>
47 #include <asm/time.h>
48 #include <asm/machdep.h>
49 #include <asm/cputhreads.h>
50 #include <asm/cputable.h>
51 #include <asm/mpic.h>
52 #include <asm/vdso_datapage.h>
53 #ifdef CONFIG_PPC64
54 #include <asm/paca.h>
55 #endif
56 #include <asm/vdso.h>
57 #include <asm/debug.h>
58 #include <asm/kexec.h>
59 #include <asm/asm-prototypes.h>
60 #include <asm/cpu_has_feature.h>
61 #include <asm/ftrace.h>
62 #include <asm/kup.h>
63 
64 #ifdef DEBUG
65 #include <asm/udbg.h>
66 #define DBG(fmt...) udbg_printf(fmt)
67 #else
68 #define DBG(fmt...)
69 #endif
70 
71 #ifdef CONFIG_HOTPLUG_CPU
72 /* State of each CPU during hotplug phases */
73 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
74 #endif
75 
76 struct task_struct *secondary_current;
77 bool has_big_cores;
78 
79 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
80 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
81 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
83 
84 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
85 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
86 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
87 EXPORT_SYMBOL_GPL(has_big_cores);
88 
89 #define MAX_THREAD_LIST_SIZE	8
90 #define THREAD_GROUP_SHARE_L1   1
91 struct thread_groups {
92 	unsigned int property;
93 	unsigned int nr_groups;
94 	unsigned int threads_per_group;
95 	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
96 };
97 
98 /*
99  * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
100  * the set its siblings that share the L1-cache.
101  */
102 DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
103 
104 /* SMP operations for this machine */
105 struct smp_ops_t *smp_ops;
106 
107 /* Can't be static due to PowerMac hackery */
108 volatile unsigned int cpu_callin_map[NR_CPUS];
109 
110 int smt_enabled_at_boot = 1;
111 
112 /*
113  * Returns 1 if the specified cpu should be brought up during boot.
114  * Used to inhibit booting threads if they've been disabled or
115  * limited on the command line
116  */
117 int smp_generic_cpu_bootable(unsigned int nr)
118 {
119 	/* Special case - we inhibit secondary thread startup
120 	 * during boot if the user requests it.
121 	 */
122 	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
123 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
124 			return 0;
125 		if (smt_enabled_at_boot
126 		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
127 			return 0;
128 	}
129 
130 	return 1;
131 }
132 
133 
134 #ifdef CONFIG_PPC64
135 int smp_generic_kick_cpu(int nr)
136 {
137 	if (nr < 0 || nr >= nr_cpu_ids)
138 		return -EINVAL;
139 
140 	/*
141 	 * The processor is currently spinning, waiting for the
142 	 * cpu_start field to become non-zero After we set cpu_start,
143 	 * the processor will continue on to secondary_start
144 	 */
145 	if (!paca_ptrs[nr]->cpu_start) {
146 		paca_ptrs[nr]->cpu_start = 1;
147 		smp_mb();
148 		return 0;
149 	}
150 
151 #ifdef CONFIG_HOTPLUG_CPU
152 	/*
153 	 * Ok it's not there, so it might be soft-unplugged, let's
154 	 * try to bring it back
155 	 */
156 	generic_set_cpu_up(nr);
157 	smp_wmb();
158 	smp_send_reschedule(nr);
159 #endif /* CONFIG_HOTPLUG_CPU */
160 
161 	return 0;
162 }
163 #endif /* CONFIG_PPC64 */
164 
165 static irqreturn_t call_function_action(int irq, void *data)
166 {
167 	generic_smp_call_function_interrupt();
168 	return IRQ_HANDLED;
169 }
170 
171 static irqreturn_t reschedule_action(int irq, void *data)
172 {
173 	scheduler_ipi();
174 	return IRQ_HANDLED;
175 }
176 
177 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
178 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
179 {
180 	timer_broadcast_interrupt();
181 	return IRQ_HANDLED;
182 }
183 #endif
184 
185 #ifdef CONFIG_NMI_IPI
186 static irqreturn_t nmi_ipi_action(int irq, void *data)
187 {
188 	smp_handle_nmi_ipi(get_irq_regs());
189 	return IRQ_HANDLED;
190 }
191 #endif
192 
193 static irq_handler_t smp_ipi_action[] = {
194 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
195 	[PPC_MSG_RESCHEDULE] = reschedule_action,
196 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
197 	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
198 #endif
199 #ifdef CONFIG_NMI_IPI
200 	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
201 #endif
202 };
203 
204 /*
205  * The NMI IPI is a fallback and not truly non-maskable. It is simpler
206  * than going through the call function infrastructure, and strongly
207  * serialized, so it is more appropriate for debugging.
208  */
209 const char *smp_ipi_name[] = {
210 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
211 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
212 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
213 	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
214 #endif
215 #ifdef CONFIG_NMI_IPI
216 	[PPC_MSG_NMI_IPI] = "nmi ipi",
217 #endif
218 };
219 
220 /* optional function to request ipi, for controllers with >= 4 ipis */
221 int smp_request_message_ipi(int virq, int msg)
222 {
223 	int err;
224 
225 	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
226 		return -EINVAL;
227 #ifndef CONFIG_NMI_IPI
228 	if (msg == PPC_MSG_NMI_IPI)
229 		return 1;
230 #endif
231 
232 	err = request_irq(virq, smp_ipi_action[msg],
233 			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
234 			  smp_ipi_name[msg], NULL);
235 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
236 		virq, smp_ipi_name[msg], err);
237 
238 	return err;
239 }
240 
241 #ifdef CONFIG_PPC_SMP_MUXED_IPI
242 struct cpu_messages {
243 	long messages;			/* current messages */
244 };
245 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
246 
247 void smp_muxed_ipi_set_message(int cpu, int msg)
248 {
249 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
250 	char *message = (char *)&info->messages;
251 
252 	/*
253 	 * Order previous accesses before accesses in the IPI handler.
254 	 */
255 	smp_mb();
256 	message[msg] = 1;
257 }
258 
259 void smp_muxed_ipi_message_pass(int cpu, int msg)
260 {
261 	smp_muxed_ipi_set_message(cpu, msg);
262 
263 	/*
264 	 * cause_ipi functions are required to include a full barrier
265 	 * before doing whatever causes the IPI.
266 	 */
267 	smp_ops->cause_ipi(cpu);
268 }
269 
270 #ifdef __BIG_ENDIAN__
271 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
272 #else
273 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
274 #endif
275 
276 irqreturn_t smp_ipi_demux(void)
277 {
278 	mb();	/* order any irq clear */
279 
280 	return smp_ipi_demux_relaxed();
281 }
282 
283 /* sync-free variant. Callers should ensure synchronization */
284 irqreturn_t smp_ipi_demux_relaxed(void)
285 {
286 	struct cpu_messages *info;
287 	unsigned long all;
288 
289 	info = this_cpu_ptr(&ipi_message);
290 	do {
291 		all = xchg(&info->messages, 0);
292 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
293 		/*
294 		 * Must check for PPC_MSG_RM_HOST_ACTION messages
295 		 * before PPC_MSG_CALL_FUNCTION messages because when
296 		 * a VM is destroyed, we call kick_all_cpus_sync()
297 		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
298 		 * messages have completed before we free any VCPUs.
299 		 */
300 		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
301 			kvmppc_xics_ipi_action();
302 #endif
303 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
304 			generic_smp_call_function_interrupt();
305 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
306 			scheduler_ipi();
307 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
308 		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
309 			timer_broadcast_interrupt();
310 #endif
311 #ifdef CONFIG_NMI_IPI
312 		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
313 			nmi_ipi_action(0, NULL);
314 #endif
315 	} while (info->messages);
316 
317 	return IRQ_HANDLED;
318 }
319 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
320 
321 static inline void do_message_pass(int cpu, int msg)
322 {
323 	if (smp_ops->message_pass)
324 		smp_ops->message_pass(cpu, msg);
325 #ifdef CONFIG_PPC_SMP_MUXED_IPI
326 	else
327 		smp_muxed_ipi_message_pass(cpu, msg);
328 #endif
329 }
330 
331 void smp_send_reschedule(int cpu)
332 {
333 	if (likely(smp_ops))
334 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
335 }
336 EXPORT_SYMBOL_GPL(smp_send_reschedule);
337 
338 void arch_send_call_function_single_ipi(int cpu)
339 {
340 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
341 }
342 
343 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
344 {
345 	unsigned int cpu;
346 
347 	for_each_cpu(cpu, mask)
348 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
349 }
350 
351 #ifdef CONFIG_NMI_IPI
352 
353 /*
354  * "NMI IPI" system.
355  *
356  * NMI IPIs may not be recoverable, so should not be used as ongoing part of
357  * a running system. They can be used for crash, debug, halt/reboot, etc.
358  *
359  * The IPI call waits with interrupts disabled until all targets enter the
360  * NMI handler, then returns. Subsequent IPIs can be issued before targets
361  * have returned from their handlers, so there is no guarantee about
362  * concurrency or re-entrancy.
363  *
364  * A new NMI can be issued before all targets exit the handler.
365  *
366  * The IPI call may time out without all targets entering the NMI handler.
367  * In that case, there is some logic to recover (and ignore subsequent
368  * NMI interrupts that may eventually be raised), but the platform interrupt
369  * handler may not be able to distinguish this from other exception causes,
370  * which may cause a crash.
371  */
372 
373 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
374 static struct cpumask nmi_ipi_pending_mask;
375 static bool nmi_ipi_busy = false;
376 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
377 
378 static void nmi_ipi_lock_start(unsigned long *flags)
379 {
380 	raw_local_irq_save(*flags);
381 	hard_irq_disable();
382 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
383 		raw_local_irq_restore(*flags);
384 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
385 		raw_local_irq_save(*flags);
386 		hard_irq_disable();
387 	}
388 }
389 
390 static void nmi_ipi_lock(void)
391 {
392 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
393 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
394 }
395 
396 static void nmi_ipi_unlock(void)
397 {
398 	smp_mb();
399 	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
400 	atomic_set(&__nmi_ipi_lock, 0);
401 }
402 
403 static void nmi_ipi_unlock_end(unsigned long *flags)
404 {
405 	nmi_ipi_unlock();
406 	raw_local_irq_restore(*flags);
407 }
408 
409 /*
410  * Platform NMI handler calls this to ack
411  */
412 int smp_handle_nmi_ipi(struct pt_regs *regs)
413 {
414 	void (*fn)(struct pt_regs *) = NULL;
415 	unsigned long flags;
416 	int me = raw_smp_processor_id();
417 	int ret = 0;
418 
419 	/*
420 	 * Unexpected NMIs are possible here because the interrupt may not
421 	 * be able to distinguish NMI IPIs from other types of NMIs, or
422 	 * because the caller may have timed out.
423 	 */
424 	nmi_ipi_lock_start(&flags);
425 	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
426 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
427 		fn = READ_ONCE(nmi_ipi_function);
428 		WARN_ON_ONCE(!fn);
429 		ret = 1;
430 	}
431 	nmi_ipi_unlock_end(&flags);
432 
433 	if (fn)
434 		fn(regs);
435 
436 	return ret;
437 }
438 
439 static void do_smp_send_nmi_ipi(int cpu, bool safe)
440 {
441 	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
442 		return;
443 
444 	if (cpu >= 0) {
445 		do_message_pass(cpu, PPC_MSG_NMI_IPI);
446 	} else {
447 		int c;
448 
449 		for_each_online_cpu(c) {
450 			if (c == raw_smp_processor_id())
451 				continue;
452 			do_message_pass(c, PPC_MSG_NMI_IPI);
453 		}
454 	}
455 }
456 
457 /*
458  * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
459  * - fn is the target callback function.
460  * - delay_us > 0 is the delay before giving up waiting for targets to
461  *   begin executing the handler, == 0 specifies indefinite delay.
462  */
463 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
464 				u64 delay_us, bool safe)
465 {
466 	unsigned long flags;
467 	int me = raw_smp_processor_id();
468 	int ret = 1;
469 
470 	BUG_ON(cpu == me);
471 	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
472 
473 	if (unlikely(!smp_ops))
474 		return 0;
475 
476 	nmi_ipi_lock_start(&flags);
477 	while (nmi_ipi_busy) {
478 		nmi_ipi_unlock_end(&flags);
479 		spin_until_cond(!nmi_ipi_busy);
480 		nmi_ipi_lock_start(&flags);
481 	}
482 	nmi_ipi_busy = true;
483 	nmi_ipi_function = fn;
484 
485 	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
486 
487 	if (cpu < 0) {
488 		/* ALL_OTHERS */
489 		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
490 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
491 	} else {
492 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
493 	}
494 
495 	nmi_ipi_unlock();
496 
497 	/* Interrupts remain hard disabled */
498 
499 	do_smp_send_nmi_ipi(cpu, safe);
500 
501 	nmi_ipi_lock();
502 	/* nmi_ipi_busy is set here, so unlock/lock is okay */
503 	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
504 		nmi_ipi_unlock();
505 		udelay(1);
506 		nmi_ipi_lock();
507 		if (delay_us) {
508 			delay_us--;
509 			if (!delay_us)
510 				break;
511 		}
512 	}
513 
514 	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
515 		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
516 		ret = 0;
517 		cpumask_clear(&nmi_ipi_pending_mask);
518 	}
519 
520 	nmi_ipi_function = NULL;
521 	nmi_ipi_busy = false;
522 
523 	nmi_ipi_unlock_end(&flags);
524 
525 	return ret;
526 }
527 
528 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
529 {
530 	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
531 }
532 
533 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
534 {
535 	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
536 }
537 #endif /* CONFIG_NMI_IPI */
538 
539 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
540 void tick_broadcast(const struct cpumask *mask)
541 {
542 	unsigned int cpu;
543 
544 	for_each_cpu(cpu, mask)
545 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
546 }
547 #endif
548 
549 #ifdef CONFIG_DEBUGGER
550 void debugger_ipi_callback(struct pt_regs *regs)
551 {
552 	debugger_ipi(regs);
553 }
554 
555 void smp_send_debugger_break(void)
556 {
557 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
558 }
559 #endif
560 
561 #ifdef CONFIG_KEXEC_CORE
562 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
563 {
564 	int cpu;
565 
566 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
567 	if (kdump_in_progress() && crash_wake_offline) {
568 		for_each_present_cpu(cpu) {
569 			if (cpu_online(cpu))
570 				continue;
571 			/*
572 			 * crash_ipi_callback will wait for
573 			 * all cpus, including offline CPUs.
574 			 * We don't care about nmi_ipi_function.
575 			 * Offline cpus will jump straight into
576 			 * crash_ipi_callback, we can skip the
577 			 * entire NMI dance and waiting for
578 			 * cpus to clear pending mask, etc.
579 			 */
580 			do_smp_send_nmi_ipi(cpu, false);
581 		}
582 	}
583 }
584 #endif
585 
586 #ifdef CONFIG_NMI_IPI
587 static void nmi_stop_this_cpu(struct pt_regs *regs)
588 {
589 	/*
590 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
591 	 */
592 	spin_begin();
593 	while (1)
594 		spin_cpu_relax();
595 }
596 
597 void smp_send_stop(void)
598 {
599 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
600 }
601 
602 #else /* CONFIG_NMI_IPI */
603 
604 static void stop_this_cpu(void *dummy)
605 {
606 	hard_irq_disable();
607 	spin_begin();
608 	while (1)
609 		spin_cpu_relax();
610 }
611 
612 void smp_send_stop(void)
613 {
614 	static bool stopped = false;
615 
616 	/*
617 	 * Prevent waiting on csd lock from a previous smp_send_stop.
618 	 * This is racy, but in general callers try to do the right
619 	 * thing and only fire off one smp_send_stop (e.g., see
620 	 * kernel/panic.c)
621 	 */
622 	if (stopped)
623 		return;
624 
625 	stopped = true;
626 
627 	smp_call_function(stop_this_cpu, NULL, 0);
628 }
629 #endif /* CONFIG_NMI_IPI */
630 
631 struct task_struct *current_set[NR_CPUS];
632 
633 static void smp_store_cpu_info(int id)
634 {
635 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
636 #ifdef CONFIG_PPC_FSL_BOOK3E
637 	per_cpu(next_tlbcam_idx, id)
638 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
639 #endif
640 }
641 
642 /*
643  * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
644  * rather than just passing around the cpumask we pass around a function that
645  * returns the that cpumask for the given CPU.
646  */
647 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
648 {
649 	cpumask_set_cpu(i, get_cpumask(j));
650 	cpumask_set_cpu(j, get_cpumask(i));
651 }
652 
653 #ifdef CONFIG_HOTPLUG_CPU
654 static void set_cpus_unrelated(int i, int j,
655 		struct cpumask *(*get_cpumask)(int))
656 {
657 	cpumask_clear_cpu(i, get_cpumask(j));
658 	cpumask_clear_cpu(j, get_cpumask(i));
659 }
660 #endif
661 
662 /*
663  * parse_thread_groups: Parses the "ibm,thread-groups" device tree
664  *                      property for the CPU device node @dn and stores
665  *                      the parsed output in the thread_groups
666  *                      structure @tg if the ibm,thread-groups[0]
667  *                      matches @property.
668  *
669  * @dn: The device node of the CPU device.
670  * @tg: Pointer to a thread group structure into which the parsed
671  *      output of "ibm,thread-groups" is stored.
672  * @property: The property of the thread-group that the caller is
673  *            interested in.
674  *
675  * ibm,thread-groups[0..N-1] array defines which group of threads in
676  * the CPU-device node can be grouped together based on the property.
677  *
678  * ibm,thread-groups[0] tells us the property based on which the
679  * threads are being grouped together. If this value is 1, it implies
680  * that the threads in the same group share L1, translation cache.
681  *
682  * ibm,thread-groups[1] tells us how many such thread groups exist.
683  *
684  * ibm,thread-groups[2] tells us the number of threads in each such
685  * group.
686  *
687  * ibm,thread-groups[3..N-1] is the list of threads identified by
688  * "ibm,ppc-interrupt-server#s" arranged as per their membership in
689  * the grouping.
690  *
691  * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
692  * implies that there are 2 groups of 4 threads each, where each group
693  * of threads share L1, translation cache.
694  *
695  * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
696  * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
697  * 11, 12} structure
698  *
699  * Returns 0 on success, -EINVAL if the property does not exist,
700  * -ENODATA if property does not have a value, and -EOVERFLOW if the
701  * property data isn't large enough.
702  */
703 static int parse_thread_groups(struct device_node *dn,
704 			       struct thread_groups *tg,
705 			       unsigned int property)
706 {
707 	int i;
708 	u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
709 	u32 *thread_list;
710 	size_t total_threads;
711 	int ret;
712 
713 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
714 					 thread_group_array, 3);
715 	if (ret)
716 		return ret;
717 
718 	tg->property = thread_group_array[0];
719 	tg->nr_groups = thread_group_array[1];
720 	tg->threads_per_group = thread_group_array[2];
721 	if (tg->property != property ||
722 	    tg->nr_groups < 1 ||
723 	    tg->threads_per_group < 1)
724 		return -ENODATA;
725 
726 	total_threads = tg->nr_groups * tg->threads_per_group;
727 
728 	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
729 					 thread_group_array,
730 					 3 + total_threads);
731 	if (ret)
732 		return ret;
733 
734 	thread_list = &thread_group_array[3];
735 
736 	for (i = 0 ; i < total_threads; i++)
737 		tg->thread_list[i] = thread_list[i];
738 
739 	return 0;
740 }
741 
742 /*
743  * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
744  *                              that @cpu belongs to.
745  *
746  * @cpu : The logical CPU whose thread group is being searched.
747  * @tg : The thread-group structure of the CPU node which @cpu belongs
748  *       to.
749  *
750  * Returns the index to tg->thread_list that points to the the start
751  * of the thread_group that @cpu belongs to.
752  *
753  * Returns -1 if cpu doesn't belong to any of the groups pointed to by
754  * tg->thread_list.
755  */
756 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
757 {
758 	int hw_cpu_id = get_hard_smp_processor_id(cpu);
759 	int i, j;
760 
761 	for (i = 0; i < tg->nr_groups; i++) {
762 		int group_start = i * tg->threads_per_group;
763 
764 		for (j = 0; j < tg->threads_per_group; j++) {
765 			int idx = group_start + j;
766 
767 			if (tg->thread_list[idx] == hw_cpu_id)
768 				return group_start;
769 		}
770 	}
771 
772 	return -1;
773 }
774 
775 static int init_cpu_l1_cache_map(int cpu)
776 
777 {
778 	struct device_node *dn = of_get_cpu_node(cpu, NULL);
779 	struct thread_groups tg = {.property = 0,
780 				   .nr_groups = 0,
781 				   .threads_per_group = 0};
782 	int first_thread = cpu_first_thread_sibling(cpu);
783 	int i, cpu_group_start = -1, err = 0;
784 
785 	if (!dn)
786 		return -ENODATA;
787 
788 	err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
789 	if (err)
790 		goto out;
791 
792 	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
793 				GFP_KERNEL,
794 				cpu_to_node(cpu));
795 
796 	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
797 
798 	if (unlikely(cpu_group_start == -1)) {
799 		WARN_ON_ONCE(1);
800 		err = -ENODATA;
801 		goto out;
802 	}
803 
804 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
805 		int i_group_start = get_cpu_thread_group_start(i, &tg);
806 
807 		if (unlikely(i_group_start == -1)) {
808 			WARN_ON_ONCE(1);
809 			err = -ENODATA;
810 			goto out;
811 		}
812 
813 		if (i_group_start == cpu_group_start)
814 			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
815 	}
816 
817 out:
818 	of_node_put(dn);
819 	return err;
820 }
821 
822 static int init_big_cores(void)
823 {
824 	int cpu;
825 
826 	for_each_possible_cpu(cpu) {
827 		int err = init_cpu_l1_cache_map(cpu);
828 
829 		if (err)
830 			return err;
831 
832 		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
833 					GFP_KERNEL,
834 					cpu_to_node(cpu));
835 	}
836 
837 	has_big_cores = true;
838 	return 0;
839 }
840 
841 void __init smp_prepare_cpus(unsigned int max_cpus)
842 {
843 	unsigned int cpu;
844 
845 	DBG("smp_prepare_cpus\n");
846 
847 	/*
848 	 * setup_cpu may need to be called on the boot cpu. We havent
849 	 * spun any cpus up but lets be paranoid.
850 	 */
851 	BUG_ON(boot_cpuid != smp_processor_id());
852 
853 	/* Fixup boot cpu */
854 	smp_store_cpu_info(boot_cpuid);
855 	cpu_callin_map[boot_cpuid] = 1;
856 
857 	for_each_possible_cpu(cpu) {
858 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
859 					GFP_KERNEL, cpu_to_node(cpu));
860 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
861 					GFP_KERNEL, cpu_to_node(cpu));
862 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
863 					GFP_KERNEL, cpu_to_node(cpu));
864 		/*
865 		 * numa_node_id() works after this.
866 		 */
867 		if (cpu_present(cpu)) {
868 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
869 			set_cpu_numa_mem(cpu,
870 				local_memory_node(numa_cpu_lookup_table[cpu]));
871 		}
872 	}
873 
874 	/* Init the cpumasks so the boot CPU is related to itself */
875 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
876 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
877 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
878 
879 	init_big_cores();
880 	if (has_big_cores) {
881 		cpumask_set_cpu(boot_cpuid,
882 				cpu_smallcore_mask(boot_cpuid));
883 	}
884 
885 	if (smp_ops && smp_ops->probe)
886 		smp_ops->probe();
887 }
888 
889 void smp_prepare_boot_cpu(void)
890 {
891 	BUG_ON(smp_processor_id() != boot_cpuid);
892 #ifdef CONFIG_PPC64
893 	paca_ptrs[boot_cpuid]->__current = current;
894 #endif
895 	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
896 	current_set[boot_cpuid] = current;
897 }
898 
899 #ifdef CONFIG_HOTPLUG_CPU
900 
901 int generic_cpu_disable(void)
902 {
903 	unsigned int cpu = smp_processor_id();
904 
905 	if (cpu == boot_cpuid)
906 		return -EBUSY;
907 
908 	set_cpu_online(cpu, false);
909 #ifdef CONFIG_PPC64
910 	vdso_data->processorCount--;
911 #endif
912 	/* Update affinity of all IRQs previously aimed at this CPU */
913 	irq_migrate_all_off_this_cpu();
914 
915 	/*
916 	 * Depending on the details of the interrupt controller, it's possible
917 	 * that one of the interrupts we just migrated away from this CPU is
918 	 * actually already pending on this CPU. If we leave it in that state
919 	 * the interrupt will never be EOI'ed, and will never fire again. So
920 	 * temporarily enable interrupts here, to allow any pending interrupt to
921 	 * be received (and EOI'ed), before we take this CPU offline.
922 	 */
923 	local_irq_enable();
924 	mdelay(1);
925 	local_irq_disable();
926 
927 	return 0;
928 }
929 
930 void generic_cpu_die(unsigned int cpu)
931 {
932 	int i;
933 
934 	for (i = 0; i < 100; i++) {
935 		smp_rmb();
936 		if (is_cpu_dead(cpu))
937 			return;
938 		msleep(100);
939 	}
940 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
941 }
942 
943 void generic_set_cpu_dead(unsigned int cpu)
944 {
945 	per_cpu(cpu_state, cpu) = CPU_DEAD;
946 }
947 
948 /*
949  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
950  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
951  * which makes the delay in generic_cpu_die() not happen.
952  */
953 void generic_set_cpu_up(unsigned int cpu)
954 {
955 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
956 }
957 
958 int generic_check_cpu_restart(unsigned int cpu)
959 {
960 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
961 }
962 
963 int is_cpu_dead(unsigned int cpu)
964 {
965 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
966 }
967 
968 static bool secondaries_inhibited(void)
969 {
970 	return kvm_hv_mode_active();
971 }
972 
973 #else /* HOTPLUG_CPU */
974 
975 #define secondaries_inhibited()		0
976 
977 #endif
978 
979 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
980 {
981 #ifdef CONFIG_PPC64
982 	paca_ptrs[cpu]->__current = idle;
983 	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
984 				 THREAD_SIZE - STACK_FRAME_OVERHEAD;
985 #endif
986 	idle->cpu = cpu;
987 	secondary_current = current_set[cpu] = idle;
988 }
989 
990 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
991 {
992 	int rc, c;
993 
994 	/*
995 	 * Don't allow secondary threads to come online if inhibited
996 	 */
997 	if (threads_per_core > 1 && secondaries_inhibited() &&
998 	    cpu_thread_in_subcore(cpu))
999 		return -EBUSY;
1000 
1001 	if (smp_ops == NULL ||
1002 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1003 		return -EINVAL;
1004 
1005 	cpu_idle_thread_init(cpu, tidle);
1006 
1007 	/*
1008 	 * The platform might need to allocate resources prior to bringing
1009 	 * up the CPU
1010 	 */
1011 	if (smp_ops->prepare_cpu) {
1012 		rc = smp_ops->prepare_cpu(cpu);
1013 		if (rc)
1014 			return rc;
1015 	}
1016 
1017 	/* Make sure callin-map entry is 0 (can be leftover a CPU
1018 	 * hotplug
1019 	 */
1020 	cpu_callin_map[cpu] = 0;
1021 
1022 	/* The information for processor bringup must
1023 	 * be written out to main store before we release
1024 	 * the processor.
1025 	 */
1026 	smp_mb();
1027 
1028 	/* wake up cpus */
1029 	DBG("smp: kicking cpu %d\n", cpu);
1030 	rc = smp_ops->kick_cpu(cpu);
1031 	if (rc) {
1032 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1033 		return rc;
1034 	}
1035 
1036 	/*
1037 	 * wait to see if the cpu made a callin (is actually up).
1038 	 * use this value that I found through experimentation.
1039 	 * -- Cort
1040 	 */
1041 	if (system_state < SYSTEM_RUNNING)
1042 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1043 			udelay(100);
1044 #ifdef CONFIG_HOTPLUG_CPU
1045 	else
1046 		/*
1047 		 * CPUs can take much longer to come up in the
1048 		 * hotplug case.  Wait five seconds.
1049 		 */
1050 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1051 			msleep(1);
1052 #endif
1053 
1054 	if (!cpu_callin_map[cpu]) {
1055 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1056 		return -ENOENT;
1057 	}
1058 
1059 	DBG("Processor %u found.\n", cpu);
1060 
1061 	if (smp_ops->give_timebase)
1062 		smp_ops->give_timebase();
1063 
1064 	/* Wait until cpu puts itself in the online & active maps */
1065 	spin_until_cond(cpu_online(cpu));
1066 
1067 	return 0;
1068 }
1069 
1070 /* Return the value of the reg property corresponding to the given
1071  * logical cpu.
1072  */
1073 int cpu_to_core_id(int cpu)
1074 {
1075 	struct device_node *np;
1076 	const __be32 *reg;
1077 	int id = -1;
1078 
1079 	np = of_get_cpu_node(cpu, NULL);
1080 	if (!np)
1081 		goto out;
1082 
1083 	reg = of_get_property(np, "reg", NULL);
1084 	if (!reg)
1085 		goto out;
1086 
1087 	id = be32_to_cpup(reg);
1088 out:
1089 	of_node_put(np);
1090 	return id;
1091 }
1092 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1093 
1094 /* Helper routines for cpu to core mapping */
1095 int cpu_core_index_of_thread(int cpu)
1096 {
1097 	return cpu >> threads_shift;
1098 }
1099 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1100 
1101 int cpu_first_thread_of_core(int core)
1102 {
1103 	return core << threads_shift;
1104 }
1105 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1106 
1107 /* Must be called when no change can occur to cpu_present_mask,
1108  * i.e. during cpu online or offline.
1109  */
1110 static struct device_node *cpu_to_l2cache(int cpu)
1111 {
1112 	struct device_node *np;
1113 	struct device_node *cache;
1114 
1115 	if (!cpu_present(cpu))
1116 		return NULL;
1117 
1118 	np = of_get_cpu_node(cpu, NULL);
1119 	if (np == NULL)
1120 		return NULL;
1121 
1122 	cache = of_find_next_cache_node(np);
1123 
1124 	of_node_put(np);
1125 
1126 	return cache;
1127 }
1128 
1129 static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1130 {
1131 	struct device_node *l2_cache, *np;
1132 	int i;
1133 
1134 	l2_cache = cpu_to_l2cache(cpu);
1135 	if (!l2_cache)
1136 		return false;
1137 
1138 	for_each_cpu(i, cpu_online_mask) {
1139 		/*
1140 		 * when updating the marks the current CPU has not been marked
1141 		 * online, but we need to update the cache masks
1142 		 */
1143 		np = cpu_to_l2cache(i);
1144 		if (!np)
1145 			continue;
1146 
1147 		if (np == l2_cache)
1148 			set_cpus_related(cpu, i, mask_fn);
1149 
1150 		of_node_put(np);
1151 	}
1152 	of_node_put(l2_cache);
1153 
1154 	return true;
1155 }
1156 
1157 #ifdef CONFIG_HOTPLUG_CPU
1158 static void remove_cpu_from_masks(int cpu)
1159 {
1160 	int i;
1161 
1162 	/* NB: cpu_core_mask is a superset of the others */
1163 	for_each_cpu(i, cpu_core_mask(cpu)) {
1164 		set_cpus_unrelated(cpu, i, cpu_core_mask);
1165 		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1166 		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1167 		if (has_big_cores)
1168 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1169 	}
1170 }
1171 #endif
1172 
1173 static inline void add_cpu_to_smallcore_masks(int cpu)
1174 {
1175 	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1176 	int i, first_thread = cpu_first_thread_sibling(cpu);
1177 
1178 	if (!has_big_cores)
1179 		return;
1180 
1181 	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1182 
1183 	for (i = first_thread; i < first_thread + threads_per_core; i++) {
1184 		if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1185 			set_cpus_related(i, cpu, cpu_smallcore_mask);
1186 	}
1187 }
1188 
1189 int get_physical_package_id(int cpu)
1190 {
1191 	int pkg_id = cpu_to_chip_id(cpu);
1192 
1193 	/*
1194 	 * If the platform is PowerNV or Guest on KVM, ibm,chip-id is
1195 	 * defined. Hence we would return the chip-id as the result of
1196 	 * get_physical_package_id.
1197 	 */
1198 	if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
1199 	    IS_ENABLED(CONFIG_PPC_SPLPAR)) {
1200 		struct device_node *np = of_get_cpu_node(cpu, NULL);
1201 		pkg_id = of_node_to_nid(np);
1202 		of_node_put(np);
1203 	}
1204 
1205 	return pkg_id;
1206 }
1207 EXPORT_SYMBOL_GPL(get_physical_package_id);
1208 
1209 static void add_cpu_to_masks(int cpu)
1210 {
1211 	int first_thread = cpu_first_thread_sibling(cpu);
1212 	int pkg_id = get_physical_package_id(cpu);
1213 	int i;
1214 
1215 	/*
1216 	 * This CPU will not be in the online mask yet so we need to manually
1217 	 * add it to it's own thread sibling mask.
1218 	 */
1219 	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1220 
1221 	for (i = first_thread; i < first_thread + threads_per_core; i++)
1222 		if (cpu_online(i))
1223 			set_cpus_related(i, cpu, cpu_sibling_mask);
1224 
1225 	add_cpu_to_smallcore_masks(cpu);
1226 	/*
1227 	 * Copy the thread sibling mask into the cache sibling mask
1228 	 * and mark any CPUs that share an L2 with this CPU.
1229 	 */
1230 	for_each_cpu(i, cpu_sibling_mask(cpu))
1231 		set_cpus_related(cpu, i, cpu_l2_cache_mask);
1232 	update_mask_by_l2(cpu, cpu_l2_cache_mask);
1233 
1234 	/*
1235 	 * Copy the cache sibling mask into core sibling mask and mark
1236 	 * any CPUs on the same chip as this CPU.
1237 	 */
1238 	for_each_cpu(i, cpu_l2_cache_mask(cpu))
1239 		set_cpus_related(cpu, i, cpu_core_mask);
1240 
1241 	if (pkg_id == -1)
1242 		return;
1243 
1244 	for_each_cpu(i, cpu_online_mask)
1245 		if (get_physical_package_id(i) == pkg_id)
1246 			set_cpus_related(cpu, i, cpu_core_mask);
1247 }
1248 
1249 static bool shared_caches;
1250 
1251 /* Activate a secondary processor. */
1252 void start_secondary(void *unused)
1253 {
1254 	unsigned int cpu = smp_processor_id();
1255 	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1256 
1257 	mmgrab(&init_mm);
1258 	current->active_mm = &init_mm;
1259 
1260 	smp_store_cpu_info(cpu);
1261 	set_dec(tb_ticks_per_jiffy);
1262 	preempt_disable();
1263 	cpu_callin_map[cpu] = 1;
1264 
1265 	if (smp_ops->setup_cpu)
1266 		smp_ops->setup_cpu(cpu);
1267 	if (smp_ops->take_timebase)
1268 		smp_ops->take_timebase();
1269 
1270 	secondary_cpu_time_init();
1271 
1272 #ifdef CONFIG_PPC64
1273 	if (system_state == SYSTEM_RUNNING)
1274 		vdso_data->processorCount++;
1275 
1276 	vdso_getcpu_init();
1277 #endif
1278 	/* Update topology CPU masks */
1279 	add_cpu_to_masks(cpu);
1280 
1281 	if (has_big_cores)
1282 		sibling_mask = cpu_smallcore_mask;
1283 	/*
1284 	 * Check for any shared caches. Note that this must be done on a
1285 	 * per-core basis because one core in the pair might be disabled.
1286 	 */
1287 	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1288 		shared_caches = true;
1289 
1290 	set_numa_node(numa_cpu_lookup_table[cpu]);
1291 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1292 
1293 	smp_wmb();
1294 	notify_cpu_starting(cpu);
1295 	set_cpu_online(cpu, true);
1296 
1297 	boot_init_stack_canary();
1298 
1299 	local_irq_enable();
1300 
1301 	/* We can enable ftrace for secondary cpus now */
1302 	this_cpu_enable_ftrace();
1303 
1304 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1305 
1306 	BUG();
1307 }
1308 
1309 int setup_profiling_timer(unsigned int multiplier)
1310 {
1311 	return 0;
1312 }
1313 
1314 #ifdef CONFIG_SCHED_SMT
1315 /* cpumask of CPUs with asymetric SMT dependancy */
1316 static int powerpc_smt_flags(void)
1317 {
1318 	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1319 
1320 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1321 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1322 		flags |= SD_ASYM_PACKING;
1323 	}
1324 	return flags;
1325 }
1326 #endif
1327 
1328 static struct sched_domain_topology_level powerpc_topology[] = {
1329 #ifdef CONFIG_SCHED_SMT
1330 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1331 #endif
1332 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1333 	{ NULL, },
1334 };
1335 
1336 /*
1337  * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1338  * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1339  * since the migrated task remains cache hot. We want to take advantage of this
1340  * at the scheduler level so an extra topology level is required.
1341  */
1342 static int powerpc_shared_cache_flags(void)
1343 {
1344 	return SD_SHARE_PKG_RESOURCES;
1345 }
1346 
1347 /*
1348  * We can't just pass cpu_l2_cache_mask() directly because
1349  * returns a non-const pointer and the compiler barfs on that.
1350  */
1351 static const struct cpumask *shared_cache_mask(int cpu)
1352 {
1353 	return cpu_l2_cache_mask(cpu);
1354 }
1355 
1356 #ifdef CONFIG_SCHED_SMT
1357 static const struct cpumask *smallcore_smt_mask(int cpu)
1358 {
1359 	return cpu_smallcore_mask(cpu);
1360 }
1361 #endif
1362 
1363 static struct sched_domain_topology_level power9_topology[] = {
1364 #ifdef CONFIG_SCHED_SMT
1365 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1366 #endif
1367 	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1368 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1369 	{ NULL, },
1370 };
1371 
1372 void __init smp_cpus_done(unsigned int max_cpus)
1373 {
1374 	/*
1375 	 * We are running pinned to the boot CPU, see rest_init().
1376 	 */
1377 	if (smp_ops && smp_ops->setup_cpu)
1378 		smp_ops->setup_cpu(boot_cpuid);
1379 
1380 	if (smp_ops && smp_ops->bringup_done)
1381 		smp_ops->bringup_done();
1382 
1383 	dump_numa_cpu_topology();
1384 
1385 #ifdef CONFIG_SCHED_SMT
1386 	if (has_big_cores) {
1387 		pr_info("Big cores detected but using small core scheduling\n");
1388 		power9_topology[0].mask = smallcore_smt_mask;
1389 		powerpc_topology[0].mask = smallcore_smt_mask;
1390 	}
1391 #endif
1392 	/*
1393 	 * If any CPU detects that it's sharing a cache with another CPU then
1394 	 * use the deeper topology that is aware of this sharing.
1395 	 */
1396 	if (shared_caches) {
1397 		pr_info("Using shared cache scheduler topology\n");
1398 		set_sched_topology(power9_topology);
1399 	} else {
1400 		pr_info("Using standard scheduler topology\n");
1401 		set_sched_topology(powerpc_topology);
1402 	}
1403 }
1404 
1405 #ifdef CONFIG_HOTPLUG_CPU
1406 int __cpu_disable(void)
1407 {
1408 	int cpu = smp_processor_id();
1409 	int err;
1410 
1411 	if (!smp_ops->cpu_disable)
1412 		return -ENOSYS;
1413 
1414 	this_cpu_disable_ftrace();
1415 
1416 	err = smp_ops->cpu_disable();
1417 	if (err)
1418 		return err;
1419 
1420 	/* Update sibling maps */
1421 	remove_cpu_from_masks(cpu);
1422 
1423 	return 0;
1424 }
1425 
1426 void __cpu_die(unsigned int cpu)
1427 {
1428 	if (smp_ops->cpu_die)
1429 		smp_ops->cpu_die(cpu);
1430 }
1431 
1432 void cpu_die(void)
1433 {
1434 	/*
1435 	 * Disable on the down path. This will be re-enabled by
1436 	 * start_secondary() via start_secondary_resume() below
1437 	 */
1438 	this_cpu_disable_ftrace();
1439 
1440 	if (ppc_md.cpu_die)
1441 		ppc_md.cpu_die();
1442 
1443 	/* If we return, we re-enter start_secondary */
1444 	start_secondary_resume();
1445 }
1446 
1447 #endif
1448