xref: /openbmc/linux/arch/powerpc/kernel/smp.c (revision da2ef666)
1 /*
2  * SMP support for ppc.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5  * deal of code from the sparc and intel versions.
6  *
7  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8  *
9  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 #undef DEBUG
19 
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
36 #include <linux/processor.h>
37 
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
40 #include <asm/irq.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
44 #include <asm/page.h>
45 #include <asm/pgtable.h>
46 #include <asm/prom.h>
47 #include <asm/smp.h>
48 #include <asm/time.h>
49 #include <asm/machdep.h>
50 #include <asm/cputhreads.h>
51 #include <asm/cputable.h>
52 #include <asm/mpic.h>
53 #include <asm/vdso_datapage.h>
54 #ifdef CONFIG_PPC64
55 #include <asm/paca.h>
56 #endif
57 #include <asm/vdso.h>
58 #include <asm/debug.h>
59 #include <asm/kexec.h>
60 #include <asm/asm-prototypes.h>
61 #include <asm/cpu_has_feature.h>
62 #include <asm/ftrace.h>
63 
64 #ifdef DEBUG
65 #include <asm/udbg.h>
66 #define DBG(fmt...) udbg_printf(fmt)
67 #else
68 #define DBG(fmt...)
69 #endif
70 
71 #ifdef CONFIG_HOTPLUG_CPU
72 /* State of each CPU during hotplug phases */
73 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
74 #endif
75 
76 struct thread_info *secondary_ti;
77 
78 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
79 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
80 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
81 
82 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
83 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
84 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
85 
86 /* SMP operations for this machine */
87 struct smp_ops_t *smp_ops;
88 
89 /* Can't be static due to PowerMac hackery */
90 volatile unsigned int cpu_callin_map[NR_CPUS];
91 
92 int smt_enabled_at_boot = 1;
93 
94 /*
95  * Returns 1 if the specified cpu should be brought up during boot.
96  * Used to inhibit booting threads if they've been disabled or
97  * limited on the command line
98  */
99 int smp_generic_cpu_bootable(unsigned int nr)
100 {
101 	/* Special case - we inhibit secondary thread startup
102 	 * during boot if the user requests it.
103 	 */
104 	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
105 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
106 			return 0;
107 		if (smt_enabled_at_boot
108 		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
109 			return 0;
110 	}
111 
112 	return 1;
113 }
114 
115 
116 #ifdef CONFIG_PPC64
117 int smp_generic_kick_cpu(int nr)
118 {
119 	if (nr < 0 || nr >= nr_cpu_ids)
120 		return -EINVAL;
121 
122 	/*
123 	 * The processor is currently spinning, waiting for the
124 	 * cpu_start field to become non-zero After we set cpu_start,
125 	 * the processor will continue on to secondary_start
126 	 */
127 	if (!paca_ptrs[nr]->cpu_start) {
128 		paca_ptrs[nr]->cpu_start = 1;
129 		smp_mb();
130 		return 0;
131 	}
132 
133 #ifdef CONFIG_HOTPLUG_CPU
134 	/*
135 	 * Ok it's not there, so it might be soft-unplugged, let's
136 	 * try to bring it back
137 	 */
138 	generic_set_cpu_up(nr);
139 	smp_wmb();
140 	smp_send_reschedule(nr);
141 #endif /* CONFIG_HOTPLUG_CPU */
142 
143 	return 0;
144 }
145 #endif /* CONFIG_PPC64 */
146 
147 static irqreturn_t call_function_action(int irq, void *data)
148 {
149 	generic_smp_call_function_interrupt();
150 	return IRQ_HANDLED;
151 }
152 
153 static irqreturn_t reschedule_action(int irq, void *data)
154 {
155 	scheduler_ipi();
156 	return IRQ_HANDLED;
157 }
158 
159 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
160 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
161 {
162 	timer_broadcast_interrupt();
163 	return IRQ_HANDLED;
164 }
165 #endif
166 
167 #ifdef CONFIG_NMI_IPI
168 static irqreturn_t nmi_ipi_action(int irq, void *data)
169 {
170 	smp_handle_nmi_ipi(get_irq_regs());
171 	return IRQ_HANDLED;
172 }
173 #endif
174 
175 static irq_handler_t smp_ipi_action[] = {
176 	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
177 	[PPC_MSG_RESCHEDULE] = reschedule_action,
178 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
179 	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
180 #endif
181 #ifdef CONFIG_NMI_IPI
182 	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
183 #endif
184 };
185 
186 /*
187  * The NMI IPI is a fallback and not truly non-maskable. It is simpler
188  * than going through the call function infrastructure, and strongly
189  * serialized, so it is more appropriate for debugging.
190  */
191 const char *smp_ipi_name[] = {
192 	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
193 	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
194 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
195 	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
196 #endif
197 #ifdef CONFIG_NMI_IPI
198 	[PPC_MSG_NMI_IPI] = "nmi ipi",
199 #endif
200 };
201 
202 /* optional function to request ipi, for controllers with >= 4 ipis */
203 int smp_request_message_ipi(int virq, int msg)
204 {
205 	int err;
206 
207 	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
208 		return -EINVAL;
209 #ifndef CONFIG_NMI_IPI
210 	if (msg == PPC_MSG_NMI_IPI)
211 		return 1;
212 #endif
213 
214 	err = request_irq(virq, smp_ipi_action[msg],
215 			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
216 			  smp_ipi_name[msg], NULL);
217 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
218 		virq, smp_ipi_name[msg], err);
219 
220 	return err;
221 }
222 
223 #ifdef CONFIG_PPC_SMP_MUXED_IPI
224 struct cpu_messages {
225 	long messages;			/* current messages */
226 };
227 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
228 
229 void smp_muxed_ipi_set_message(int cpu, int msg)
230 {
231 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
232 	char *message = (char *)&info->messages;
233 
234 	/*
235 	 * Order previous accesses before accesses in the IPI handler.
236 	 */
237 	smp_mb();
238 	message[msg] = 1;
239 }
240 
241 void smp_muxed_ipi_message_pass(int cpu, int msg)
242 {
243 	smp_muxed_ipi_set_message(cpu, msg);
244 
245 	/*
246 	 * cause_ipi functions are required to include a full barrier
247 	 * before doing whatever causes the IPI.
248 	 */
249 	smp_ops->cause_ipi(cpu);
250 }
251 
252 #ifdef __BIG_ENDIAN__
253 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
254 #else
255 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
256 #endif
257 
258 irqreturn_t smp_ipi_demux(void)
259 {
260 	mb();	/* order any irq clear */
261 
262 	return smp_ipi_demux_relaxed();
263 }
264 
265 /* sync-free variant. Callers should ensure synchronization */
266 irqreturn_t smp_ipi_demux_relaxed(void)
267 {
268 	struct cpu_messages *info;
269 	unsigned long all;
270 
271 	info = this_cpu_ptr(&ipi_message);
272 	do {
273 		all = xchg(&info->messages, 0);
274 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
275 		/*
276 		 * Must check for PPC_MSG_RM_HOST_ACTION messages
277 		 * before PPC_MSG_CALL_FUNCTION messages because when
278 		 * a VM is destroyed, we call kick_all_cpus_sync()
279 		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
280 		 * messages have completed before we free any VCPUs.
281 		 */
282 		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
283 			kvmppc_xics_ipi_action();
284 #endif
285 		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
286 			generic_smp_call_function_interrupt();
287 		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
288 			scheduler_ipi();
289 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
290 		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
291 			timer_broadcast_interrupt();
292 #endif
293 #ifdef CONFIG_NMI_IPI
294 		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
295 			nmi_ipi_action(0, NULL);
296 #endif
297 	} while (info->messages);
298 
299 	return IRQ_HANDLED;
300 }
301 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
302 
303 static inline void do_message_pass(int cpu, int msg)
304 {
305 	if (smp_ops->message_pass)
306 		smp_ops->message_pass(cpu, msg);
307 #ifdef CONFIG_PPC_SMP_MUXED_IPI
308 	else
309 		smp_muxed_ipi_message_pass(cpu, msg);
310 #endif
311 }
312 
313 void smp_send_reschedule(int cpu)
314 {
315 	if (likely(smp_ops))
316 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
317 }
318 EXPORT_SYMBOL_GPL(smp_send_reschedule);
319 
320 void arch_send_call_function_single_ipi(int cpu)
321 {
322 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
323 }
324 
325 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
326 {
327 	unsigned int cpu;
328 
329 	for_each_cpu(cpu, mask)
330 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
331 }
332 
333 #ifdef CONFIG_NMI_IPI
334 
335 /*
336  * "NMI IPI" system.
337  *
338  * NMI IPIs may not be recoverable, so should not be used as ongoing part of
339  * a running system. They can be used for crash, debug, halt/reboot, etc.
340  *
341  * NMI IPIs are globally single threaded. No more than one in progress at
342  * any time.
343  *
344  * The IPI call waits with interrupts disabled until all targets enter the
345  * NMI handler, then the call returns.
346  *
347  * No new NMI can be initiated until targets exit the handler.
348  *
349  * The IPI call may time out without all targets entering the NMI handler.
350  * In that case, there is some logic to recover (and ignore subsequent
351  * NMI interrupts that may eventually be raised), but the platform interrupt
352  * handler may not be able to distinguish this from other exception causes,
353  * which may cause a crash.
354  */
355 
356 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
357 static struct cpumask nmi_ipi_pending_mask;
358 static int nmi_ipi_busy_count = 0;
359 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
360 
361 static void nmi_ipi_lock_start(unsigned long *flags)
362 {
363 	raw_local_irq_save(*flags);
364 	hard_irq_disable();
365 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
366 		raw_local_irq_restore(*flags);
367 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
368 		raw_local_irq_save(*flags);
369 		hard_irq_disable();
370 	}
371 }
372 
373 static void nmi_ipi_lock(void)
374 {
375 	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
376 		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
377 }
378 
379 static void nmi_ipi_unlock(void)
380 {
381 	smp_mb();
382 	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
383 	atomic_set(&__nmi_ipi_lock, 0);
384 }
385 
386 static void nmi_ipi_unlock_end(unsigned long *flags)
387 {
388 	nmi_ipi_unlock();
389 	raw_local_irq_restore(*flags);
390 }
391 
392 /*
393  * Platform NMI handler calls this to ack
394  */
395 int smp_handle_nmi_ipi(struct pt_regs *regs)
396 {
397 	void (*fn)(struct pt_regs *);
398 	unsigned long flags;
399 	int me = raw_smp_processor_id();
400 	int ret = 0;
401 
402 	/*
403 	 * Unexpected NMIs are possible here because the interrupt may not
404 	 * be able to distinguish NMI IPIs from other types of NMIs, or
405 	 * because the caller may have timed out.
406 	 */
407 	nmi_ipi_lock_start(&flags);
408 	if (!nmi_ipi_busy_count)
409 		goto out;
410 	if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
411 		goto out;
412 
413 	fn = nmi_ipi_function;
414 	if (!fn)
415 		goto out;
416 
417 	cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
418 	nmi_ipi_busy_count++;
419 	nmi_ipi_unlock();
420 
421 	ret = 1;
422 
423 	fn(regs);
424 
425 	nmi_ipi_lock();
426 	if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
427 		nmi_ipi_busy_count--;
428 out:
429 	nmi_ipi_unlock_end(&flags);
430 
431 	return ret;
432 }
433 
434 static void do_smp_send_nmi_ipi(int cpu, bool safe)
435 {
436 	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
437 		return;
438 
439 	if (cpu >= 0) {
440 		do_message_pass(cpu, PPC_MSG_NMI_IPI);
441 	} else {
442 		int c;
443 
444 		for_each_online_cpu(c) {
445 			if (c == raw_smp_processor_id())
446 				continue;
447 			do_message_pass(c, PPC_MSG_NMI_IPI);
448 		}
449 	}
450 }
451 
452 /*
453  * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
454  * - fn is the target callback function.
455  * - delay_us > 0 is the delay before giving up waiting for targets to
456  *   complete executing the handler, == 0 specifies indefinite delay.
457  */
458 int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
459 {
460 	unsigned long flags;
461 	int me = raw_smp_processor_id();
462 	int ret = 1;
463 
464 	BUG_ON(cpu == me);
465 	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
466 
467 	if (unlikely(!smp_ops))
468 		return 0;
469 
470 	/* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
471 	nmi_ipi_lock_start(&flags);
472 	while (nmi_ipi_busy_count) {
473 		nmi_ipi_unlock_end(&flags);
474 		spin_until_cond(nmi_ipi_busy_count == 0);
475 		nmi_ipi_lock_start(&flags);
476 	}
477 
478 	nmi_ipi_function = fn;
479 
480 	if (cpu < 0) {
481 		/* ALL_OTHERS */
482 		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
483 		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
484 	} else {
485 		/* cpumask starts clear */
486 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
487 	}
488 	nmi_ipi_busy_count++;
489 	nmi_ipi_unlock();
490 
491 	do_smp_send_nmi_ipi(cpu, safe);
492 
493 	nmi_ipi_lock();
494 	/* nmi_ipi_busy_count is held here, so unlock/lock is okay */
495 	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
496 		nmi_ipi_unlock();
497 		udelay(1);
498 		nmi_ipi_lock();
499 		if (delay_us) {
500 			delay_us--;
501 			if (!delay_us)
502 				break;
503 		}
504 	}
505 
506 	while (nmi_ipi_busy_count > 1) {
507 		nmi_ipi_unlock();
508 		udelay(1);
509 		nmi_ipi_lock();
510 		if (delay_us) {
511 			delay_us--;
512 			if (!delay_us)
513 				break;
514 		}
515 	}
516 
517 	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
518 		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
519 		ret = 0;
520 		cpumask_clear(&nmi_ipi_pending_mask);
521 	}
522 	if (nmi_ipi_busy_count > 1) {
523 		/* Timeout waiting for CPUs to execute fn */
524 		ret = 0;
525 		nmi_ipi_busy_count = 1;
526 	}
527 
528 	nmi_ipi_busy_count--;
529 	nmi_ipi_unlock_end(&flags);
530 
531 	return ret;
532 }
533 
534 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
535 {
536 	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
537 }
538 
539 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
540 {
541 	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
542 }
543 #endif /* CONFIG_NMI_IPI */
544 
545 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
546 void tick_broadcast(const struct cpumask *mask)
547 {
548 	unsigned int cpu;
549 
550 	for_each_cpu(cpu, mask)
551 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
552 }
553 #endif
554 
555 #ifdef CONFIG_DEBUGGER
556 void debugger_ipi_callback(struct pt_regs *regs)
557 {
558 	debugger_ipi(regs);
559 }
560 
561 void smp_send_debugger_break(void)
562 {
563 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
564 }
565 #endif
566 
567 #ifdef CONFIG_KEXEC_CORE
568 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
569 {
570 	int cpu;
571 
572 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
573 	if (kdump_in_progress() && crash_wake_offline) {
574 		for_each_present_cpu(cpu) {
575 			if (cpu_online(cpu))
576 				continue;
577 			/*
578 			 * crash_ipi_callback will wait for
579 			 * all cpus, including offline CPUs.
580 			 * We don't care about nmi_ipi_function.
581 			 * Offline cpus will jump straight into
582 			 * crash_ipi_callback, we can skip the
583 			 * entire NMI dance and waiting for
584 			 * cpus to clear pending mask, etc.
585 			 */
586 			do_smp_send_nmi_ipi(cpu, false);
587 		}
588 	}
589 }
590 #endif
591 
592 #ifdef CONFIG_NMI_IPI
593 static void nmi_stop_this_cpu(struct pt_regs *regs)
594 {
595 	/*
596 	 * This is a special case because it never returns, so the NMI IPI
597 	 * handling would never mark it as done, which makes any later
598 	 * smp_send_nmi_ipi() call spin forever. Mark it done now.
599 	 *
600 	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
601 	 */
602 	nmi_ipi_lock();
603 	if (nmi_ipi_busy_count > 1)
604 		nmi_ipi_busy_count--;
605 	nmi_ipi_unlock();
606 
607 	spin_begin();
608 	while (1)
609 		spin_cpu_relax();
610 }
611 
612 void smp_send_stop(void)
613 {
614 	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
615 }
616 
617 #else /* CONFIG_NMI_IPI */
618 
619 static void stop_this_cpu(void *dummy)
620 {
621 	hard_irq_disable();
622 	spin_begin();
623 	while (1)
624 		spin_cpu_relax();
625 }
626 
627 void smp_send_stop(void)
628 {
629 	static bool stopped = false;
630 
631 	/*
632 	 * Prevent waiting on csd lock from a previous smp_send_stop.
633 	 * This is racy, but in general callers try to do the right
634 	 * thing and only fire off one smp_send_stop (e.g., see
635 	 * kernel/panic.c)
636 	 */
637 	if (stopped)
638 		return;
639 
640 	stopped = true;
641 
642 	smp_call_function(stop_this_cpu, NULL, 0);
643 }
644 #endif /* CONFIG_NMI_IPI */
645 
646 struct thread_info *current_set[NR_CPUS];
647 
648 static void smp_store_cpu_info(int id)
649 {
650 	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
651 #ifdef CONFIG_PPC_FSL_BOOK3E
652 	per_cpu(next_tlbcam_idx, id)
653 		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
654 #endif
655 }
656 
657 /*
658  * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
659  * rather than just passing around the cpumask we pass around a function that
660  * returns the that cpumask for the given CPU.
661  */
662 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
663 {
664 	cpumask_set_cpu(i, get_cpumask(j));
665 	cpumask_set_cpu(j, get_cpumask(i));
666 }
667 
668 #ifdef CONFIG_HOTPLUG_CPU
669 static void set_cpus_unrelated(int i, int j,
670 		struct cpumask *(*get_cpumask)(int))
671 {
672 	cpumask_clear_cpu(i, get_cpumask(j));
673 	cpumask_clear_cpu(j, get_cpumask(i));
674 }
675 #endif
676 
677 void __init smp_prepare_cpus(unsigned int max_cpus)
678 {
679 	unsigned int cpu;
680 
681 	DBG("smp_prepare_cpus\n");
682 
683 	/*
684 	 * setup_cpu may need to be called on the boot cpu. We havent
685 	 * spun any cpus up but lets be paranoid.
686 	 */
687 	BUG_ON(boot_cpuid != smp_processor_id());
688 
689 	/* Fixup boot cpu */
690 	smp_store_cpu_info(boot_cpuid);
691 	cpu_callin_map[boot_cpuid] = 1;
692 
693 	for_each_possible_cpu(cpu) {
694 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
695 					GFP_KERNEL, cpu_to_node(cpu));
696 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
697 					GFP_KERNEL, cpu_to_node(cpu));
698 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
699 					GFP_KERNEL, cpu_to_node(cpu));
700 		/*
701 		 * numa_node_id() works after this.
702 		 */
703 		if (cpu_present(cpu)) {
704 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
705 			set_cpu_numa_mem(cpu,
706 				local_memory_node(numa_cpu_lookup_table[cpu]));
707 		}
708 	}
709 
710 	/* Init the cpumasks so the boot CPU is related to itself */
711 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
712 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
713 	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
714 
715 	if (smp_ops && smp_ops->probe)
716 		smp_ops->probe();
717 }
718 
719 void smp_prepare_boot_cpu(void)
720 {
721 	BUG_ON(smp_processor_id() != boot_cpuid);
722 #ifdef CONFIG_PPC64
723 	paca_ptrs[boot_cpuid]->__current = current;
724 #endif
725 	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
726 	current_set[boot_cpuid] = task_thread_info(current);
727 }
728 
729 #ifdef CONFIG_HOTPLUG_CPU
730 
731 int generic_cpu_disable(void)
732 {
733 	unsigned int cpu = smp_processor_id();
734 
735 	if (cpu == boot_cpuid)
736 		return -EBUSY;
737 
738 	set_cpu_online(cpu, false);
739 #ifdef CONFIG_PPC64
740 	vdso_data->processorCount--;
741 #endif
742 	/* Update affinity of all IRQs previously aimed at this CPU */
743 	irq_migrate_all_off_this_cpu();
744 
745 	/*
746 	 * Depending on the details of the interrupt controller, it's possible
747 	 * that one of the interrupts we just migrated away from this CPU is
748 	 * actually already pending on this CPU. If we leave it in that state
749 	 * the interrupt will never be EOI'ed, and will never fire again. So
750 	 * temporarily enable interrupts here, to allow any pending interrupt to
751 	 * be received (and EOI'ed), before we take this CPU offline.
752 	 */
753 	local_irq_enable();
754 	mdelay(1);
755 	local_irq_disable();
756 
757 	return 0;
758 }
759 
760 void generic_cpu_die(unsigned int cpu)
761 {
762 	int i;
763 
764 	for (i = 0; i < 100; i++) {
765 		smp_rmb();
766 		if (is_cpu_dead(cpu))
767 			return;
768 		msleep(100);
769 	}
770 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
771 }
772 
773 void generic_set_cpu_dead(unsigned int cpu)
774 {
775 	per_cpu(cpu_state, cpu) = CPU_DEAD;
776 }
777 
778 /*
779  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
780  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
781  * which makes the delay in generic_cpu_die() not happen.
782  */
783 void generic_set_cpu_up(unsigned int cpu)
784 {
785 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
786 }
787 
788 int generic_check_cpu_restart(unsigned int cpu)
789 {
790 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
791 }
792 
793 int is_cpu_dead(unsigned int cpu)
794 {
795 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
796 }
797 
798 static bool secondaries_inhibited(void)
799 {
800 	return kvm_hv_mode_active();
801 }
802 
803 #else /* HOTPLUG_CPU */
804 
805 #define secondaries_inhibited()		0
806 
807 #endif
808 
809 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
810 {
811 	struct thread_info *ti = task_thread_info(idle);
812 
813 #ifdef CONFIG_PPC64
814 	paca_ptrs[cpu]->__current = idle;
815 	paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
816 #endif
817 	ti->cpu = cpu;
818 	secondary_ti = current_set[cpu] = ti;
819 }
820 
821 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
822 {
823 	int rc, c;
824 
825 	/*
826 	 * Don't allow secondary threads to come online if inhibited
827 	 */
828 	if (threads_per_core > 1 && secondaries_inhibited() &&
829 	    cpu_thread_in_subcore(cpu))
830 		return -EBUSY;
831 
832 	if (smp_ops == NULL ||
833 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
834 		return -EINVAL;
835 
836 	cpu_idle_thread_init(cpu, tidle);
837 
838 	/*
839 	 * The platform might need to allocate resources prior to bringing
840 	 * up the CPU
841 	 */
842 	if (smp_ops->prepare_cpu) {
843 		rc = smp_ops->prepare_cpu(cpu);
844 		if (rc)
845 			return rc;
846 	}
847 
848 	/* Make sure callin-map entry is 0 (can be leftover a CPU
849 	 * hotplug
850 	 */
851 	cpu_callin_map[cpu] = 0;
852 
853 	/* The information for processor bringup must
854 	 * be written out to main store before we release
855 	 * the processor.
856 	 */
857 	smp_mb();
858 
859 	/* wake up cpus */
860 	DBG("smp: kicking cpu %d\n", cpu);
861 	rc = smp_ops->kick_cpu(cpu);
862 	if (rc) {
863 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
864 		return rc;
865 	}
866 
867 	/*
868 	 * wait to see if the cpu made a callin (is actually up).
869 	 * use this value that I found through experimentation.
870 	 * -- Cort
871 	 */
872 	if (system_state < SYSTEM_RUNNING)
873 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
874 			udelay(100);
875 #ifdef CONFIG_HOTPLUG_CPU
876 	else
877 		/*
878 		 * CPUs can take much longer to come up in the
879 		 * hotplug case.  Wait five seconds.
880 		 */
881 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
882 			msleep(1);
883 #endif
884 
885 	if (!cpu_callin_map[cpu]) {
886 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
887 		return -ENOENT;
888 	}
889 
890 	DBG("Processor %u found.\n", cpu);
891 
892 	if (smp_ops->give_timebase)
893 		smp_ops->give_timebase();
894 
895 	/* Wait until cpu puts itself in the online & active maps */
896 	spin_until_cond(cpu_online(cpu));
897 
898 	return 0;
899 }
900 
901 /* Return the value of the reg property corresponding to the given
902  * logical cpu.
903  */
904 int cpu_to_core_id(int cpu)
905 {
906 	struct device_node *np;
907 	const __be32 *reg;
908 	int id = -1;
909 
910 	np = of_get_cpu_node(cpu, NULL);
911 	if (!np)
912 		goto out;
913 
914 	reg = of_get_property(np, "reg", NULL);
915 	if (!reg)
916 		goto out;
917 
918 	id = be32_to_cpup(reg);
919 out:
920 	of_node_put(np);
921 	return id;
922 }
923 EXPORT_SYMBOL_GPL(cpu_to_core_id);
924 
925 /* Helper routines for cpu to core mapping */
926 int cpu_core_index_of_thread(int cpu)
927 {
928 	return cpu >> threads_shift;
929 }
930 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
931 
932 int cpu_first_thread_of_core(int core)
933 {
934 	return core << threads_shift;
935 }
936 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
937 
938 /* Must be called when no change can occur to cpu_present_mask,
939  * i.e. during cpu online or offline.
940  */
941 static struct device_node *cpu_to_l2cache(int cpu)
942 {
943 	struct device_node *np;
944 	struct device_node *cache;
945 
946 	if (!cpu_present(cpu))
947 		return NULL;
948 
949 	np = of_get_cpu_node(cpu, NULL);
950 	if (np == NULL)
951 		return NULL;
952 
953 	cache = of_find_next_cache_node(np);
954 
955 	of_node_put(np);
956 
957 	return cache;
958 }
959 
960 static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
961 {
962 	struct device_node *l2_cache, *np;
963 	int i;
964 
965 	l2_cache = cpu_to_l2cache(cpu);
966 	if (!l2_cache)
967 		return false;
968 
969 	for_each_cpu(i, cpu_online_mask) {
970 		/*
971 		 * when updating the marks the current CPU has not been marked
972 		 * online, but we need to update the cache masks
973 		 */
974 		np = cpu_to_l2cache(i);
975 		if (!np)
976 			continue;
977 
978 		if (np == l2_cache)
979 			set_cpus_related(cpu, i, mask_fn);
980 
981 		of_node_put(np);
982 	}
983 	of_node_put(l2_cache);
984 
985 	return true;
986 }
987 
988 #ifdef CONFIG_HOTPLUG_CPU
989 static void remove_cpu_from_masks(int cpu)
990 {
991 	int i;
992 
993 	/* NB: cpu_core_mask is a superset of the others */
994 	for_each_cpu(i, cpu_core_mask(cpu)) {
995 		set_cpus_unrelated(cpu, i, cpu_core_mask);
996 		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
997 		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
998 	}
999 }
1000 #endif
1001 
1002 static void add_cpu_to_masks(int cpu)
1003 {
1004 	int first_thread = cpu_first_thread_sibling(cpu);
1005 	int chipid = cpu_to_chip_id(cpu);
1006 	int i;
1007 
1008 	/*
1009 	 * This CPU will not be in the online mask yet so we need to manually
1010 	 * add it to it's own thread sibling mask.
1011 	 */
1012 	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1013 
1014 	for (i = first_thread; i < first_thread + threads_per_core; i++)
1015 		if (cpu_online(i))
1016 			set_cpus_related(i, cpu, cpu_sibling_mask);
1017 
1018 	/*
1019 	 * Copy the thread sibling mask into the cache sibling mask
1020 	 * and mark any CPUs that share an L2 with this CPU.
1021 	 */
1022 	for_each_cpu(i, cpu_sibling_mask(cpu))
1023 		set_cpus_related(cpu, i, cpu_l2_cache_mask);
1024 	update_mask_by_l2(cpu, cpu_l2_cache_mask);
1025 
1026 	/*
1027 	 * Copy the cache sibling mask into core sibling mask and mark
1028 	 * any CPUs on the same chip as this CPU.
1029 	 */
1030 	for_each_cpu(i, cpu_l2_cache_mask(cpu))
1031 		set_cpus_related(cpu, i, cpu_core_mask);
1032 
1033 	if (chipid == -1)
1034 		return;
1035 
1036 	for_each_cpu(i, cpu_online_mask)
1037 		if (cpu_to_chip_id(i) == chipid)
1038 			set_cpus_related(cpu, i, cpu_core_mask);
1039 }
1040 
1041 static bool shared_caches;
1042 
1043 /* Activate a secondary processor. */
1044 void start_secondary(void *unused)
1045 {
1046 	unsigned int cpu = smp_processor_id();
1047 
1048 	mmgrab(&init_mm);
1049 	current->active_mm = &init_mm;
1050 
1051 	smp_store_cpu_info(cpu);
1052 	set_dec(tb_ticks_per_jiffy);
1053 	preempt_disable();
1054 	cpu_callin_map[cpu] = 1;
1055 
1056 	if (smp_ops->setup_cpu)
1057 		smp_ops->setup_cpu(cpu);
1058 	if (smp_ops->take_timebase)
1059 		smp_ops->take_timebase();
1060 
1061 	secondary_cpu_time_init();
1062 
1063 #ifdef CONFIG_PPC64
1064 	if (system_state == SYSTEM_RUNNING)
1065 		vdso_data->processorCount++;
1066 
1067 	vdso_getcpu_init();
1068 #endif
1069 	/* Update topology CPU masks */
1070 	add_cpu_to_masks(cpu);
1071 
1072 	/*
1073 	 * Check for any shared caches. Note that this must be done on a
1074 	 * per-core basis because one core in the pair might be disabled.
1075 	 */
1076 	if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
1077 		shared_caches = true;
1078 
1079 	set_numa_node(numa_cpu_lookup_table[cpu]);
1080 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1081 
1082 	smp_wmb();
1083 	notify_cpu_starting(cpu);
1084 	set_cpu_online(cpu, true);
1085 
1086 	local_irq_enable();
1087 
1088 	/* We can enable ftrace for secondary cpus now */
1089 	this_cpu_enable_ftrace();
1090 
1091 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1092 
1093 	BUG();
1094 }
1095 
1096 int setup_profiling_timer(unsigned int multiplier)
1097 {
1098 	return 0;
1099 }
1100 
1101 #ifdef CONFIG_SCHED_SMT
1102 /* cpumask of CPUs with asymetric SMT dependancy */
1103 static int powerpc_smt_flags(void)
1104 {
1105 	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1106 
1107 	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1108 		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1109 		flags |= SD_ASYM_PACKING;
1110 	}
1111 	return flags;
1112 }
1113 #endif
1114 
1115 static struct sched_domain_topology_level powerpc_topology[] = {
1116 #ifdef CONFIG_SCHED_SMT
1117 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1118 #endif
1119 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1120 	{ NULL, },
1121 };
1122 
1123 /*
1124  * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1125  * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1126  * since the migrated task remains cache hot. We want to take advantage of this
1127  * at the scheduler level so an extra topology level is required.
1128  */
1129 static int powerpc_shared_cache_flags(void)
1130 {
1131 	return SD_SHARE_PKG_RESOURCES;
1132 }
1133 
1134 /*
1135  * We can't just pass cpu_l2_cache_mask() directly because
1136  * returns a non-const pointer and the compiler barfs on that.
1137  */
1138 static const struct cpumask *shared_cache_mask(int cpu)
1139 {
1140 	return cpu_l2_cache_mask(cpu);
1141 }
1142 
1143 static struct sched_domain_topology_level power9_topology[] = {
1144 #ifdef CONFIG_SCHED_SMT
1145 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1146 #endif
1147 	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1148 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1149 	{ NULL, },
1150 };
1151 
1152 void __init smp_cpus_done(unsigned int max_cpus)
1153 {
1154 	/*
1155 	 * We are running pinned to the boot CPU, see rest_init().
1156 	 */
1157 	if (smp_ops && smp_ops->setup_cpu)
1158 		smp_ops->setup_cpu(boot_cpuid);
1159 
1160 	if (smp_ops && smp_ops->bringup_done)
1161 		smp_ops->bringup_done();
1162 
1163 	/*
1164 	 * On a shared LPAR, associativity needs to be requested.
1165 	 * Hence, get numa topology before dumping cpu topology
1166 	 */
1167 	shared_proc_topology_init();
1168 	dump_numa_cpu_topology();
1169 
1170 	/*
1171 	 * If any CPU detects that it's sharing a cache with another CPU then
1172 	 * use the deeper topology that is aware of this sharing.
1173 	 */
1174 	if (shared_caches) {
1175 		pr_info("Using shared cache scheduler topology\n");
1176 		set_sched_topology(power9_topology);
1177 	} else {
1178 		pr_info("Using standard scheduler topology\n");
1179 		set_sched_topology(powerpc_topology);
1180 	}
1181 }
1182 
1183 #ifdef CONFIG_HOTPLUG_CPU
1184 int __cpu_disable(void)
1185 {
1186 	int cpu = smp_processor_id();
1187 	int err;
1188 
1189 	if (!smp_ops->cpu_disable)
1190 		return -ENOSYS;
1191 
1192 	this_cpu_disable_ftrace();
1193 
1194 	err = smp_ops->cpu_disable();
1195 	if (err)
1196 		return err;
1197 
1198 	/* Update sibling maps */
1199 	remove_cpu_from_masks(cpu);
1200 
1201 	return 0;
1202 }
1203 
1204 void __cpu_die(unsigned int cpu)
1205 {
1206 	if (smp_ops->cpu_die)
1207 		smp_ops->cpu_die(cpu);
1208 }
1209 
1210 void cpu_die(void)
1211 {
1212 	/*
1213 	 * Disable on the down path. This will be re-enabled by
1214 	 * start_secondary() via start_secondary_resume() below
1215 	 */
1216 	this_cpu_disable_ftrace();
1217 
1218 	if (ppc_md.cpu_die)
1219 		ppc_md.cpu_die();
1220 
1221 	/* If we return, we re-enter start_secondary */
1222 	start_secondary_resume();
1223 }
1224 
1225 #endif
1226