1 /*
2  * SMP support for PowerNV machines.
3  *
4  * Copyright 2011 IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/cpu.h>
22 
23 #include <asm/irq.h>
24 #include <asm/smp.h>
25 #include <asm/paca.h>
26 #include <asm/machdep.h>
27 #include <asm/cputable.h>
28 #include <asm/firmware.h>
29 #include <asm/vdso_datapage.h>
30 #include <asm/cputhreads.h>
31 #include <asm/xics.h>
32 #include <asm/xive.h>
33 #include <asm/opal.h>
34 #include <asm/runlatch.h>
35 #include <asm/code-patching.h>
36 #include <asm/dbell.h>
37 #include <asm/kvm_ppc.h>
38 #include <asm/ppc-opcode.h>
39 #include <asm/cpuidle.h>
40 
41 #include "powernv.h"
42 
43 #ifdef DEBUG
44 #include <asm/udbg.h>
45 #define DBG(fmt...) udbg_printf(fmt)
46 #else
47 #define DBG(fmt...)
48 #endif
49 
50 static void pnv_smp_setup_cpu(int cpu)
51 {
52 	if (xive_enabled())
53 		xive_smp_setup_cpu();
54 	else if (cpu != boot_cpuid)
55 		xics_setup_cpu();
56 }
57 
58 static int pnv_smp_kick_cpu(int nr)
59 {
60 	unsigned int pcpu = get_hard_smp_processor_id(nr);
61 	unsigned long start_here =
62 			__pa(ppc_function_entry(generic_secondary_smp_init));
63 	long rc;
64 	uint8_t status;
65 
66 	if (nr < 0 || nr >= nr_cpu_ids)
67 		return -EINVAL;
68 
69 	/*
70 	 * If we already started or OPAL is not supported, we just
71 	 * kick the CPU via the PACA
72 	 */
73 	if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
74 		goto kick;
75 
76 	/*
77 	 * At this point, the CPU can either be spinning on the way in
78 	 * from kexec or be inside OPAL waiting to be started for the
79 	 * first time. OPAL v3 allows us to query OPAL to know if it
80 	 * has the CPUs, so we do that
81 	 */
82 	rc = opal_query_cpu_status(pcpu, &status);
83 	if (rc != OPAL_SUCCESS) {
84 		pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
85 		return -ENODEV;
86 	}
87 
88 	/*
89 	 * Already started, just kick it, probably coming from
90 	 * kexec and spinning
91 	 */
92 	if (status == OPAL_THREAD_STARTED)
93 		goto kick;
94 
95 	/*
96 	 * Available/inactive, let's kick it
97 	 */
98 	if (status == OPAL_THREAD_INACTIVE) {
99 		pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
100 		rc = opal_start_cpu(pcpu, start_here);
101 		if (rc != OPAL_SUCCESS) {
102 			pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
103 			return -ENODEV;
104 		}
105 	} else {
106 		/*
107 		 * An unavailable CPU (or any other unknown status)
108 		 * shouldn't be started. It should also
109 		 * not be in the possible map but currently it can
110 		 * happen
111 		 */
112 		pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
113 			 " (status %d)...\n", nr, pcpu, status);
114 		return -ENODEV;
115 	}
116 
117 kick:
118 	return smp_generic_kick_cpu(nr);
119 }
120 
121 #ifdef CONFIG_HOTPLUG_CPU
122 
123 static int pnv_smp_cpu_disable(void)
124 {
125 	int cpu = smp_processor_id();
126 
127 	/* This is identical to pSeries... might consolidate by
128 	 * moving migrate_irqs_away to a ppc_md with default to
129 	 * the generic fixup_irqs. --BenH.
130 	 */
131 	set_cpu_online(cpu, false);
132 	vdso_data->processorCount--;
133 	if (cpu == boot_cpuid)
134 		boot_cpuid = cpumask_any(cpu_online_mask);
135 	if (xive_enabled())
136 		xive_smp_disable_cpu();
137 	else
138 		xics_migrate_irqs_away();
139 	return 0;
140 }
141 
142 static void pnv_smp_cpu_kill_self(void)
143 {
144 	unsigned int cpu;
145 	unsigned long srr1, wmask;
146 
147 	/* Standard hot unplug procedure */
148 	/*
149 	 * This hard disables local interurpts, ensuring we have no lazy
150 	 * irqs pending.
151 	 */
152 	WARN_ON(irqs_disabled());
153 	hard_irq_disable();
154 	WARN_ON(lazy_irq_pending());
155 
156 	idle_task_exit();
157 	current->active_mm = NULL; /* for sanity */
158 	cpu = smp_processor_id();
159 	DBG("CPU%d offline\n", cpu);
160 	generic_set_cpu_dead(cpu);
161 	smp_wmb();
162 
163 	wmask = SRR1_WAKEMASK;
164 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
165 		wmask = SRR1_WAKEMASK_P8;
166 
167 	/* We don't want to take decrementer interrupts while we are offline,
168 	 * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
169 	 * enabled as to let IPIs in.
170 	 */
171 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
172 
173 	while (!generic_check_cpu_restart(cpu)) {
174 		/*
175 		 * Clear IPI flag, since we don't handle IPIs while
176 		 * offline, except for those when changing micro-threading
177 		 * mode, which are handled explicitly below, and those
178 		 * for coming online, which are handled via
179 		 * generic_check_cpu_restart() calls.
180 		 */
181 		kvmppc_set_host_ipi(cpu, 0);
182 
183 		srr1 = pnv_cpu_offline(cpu);
184 
185 		WARN_ON(lazy_irq_pending());
186 
187 		/*
188 		 * If the SRR1 value indicates that we woke up due to
189 		 * an external interrupt, then clear the interrupt.
190 		 * We clear the interrupt before checking for the
191 		 * reason, so as to avoid a race where we wake up for
192 		 * some other reason, find nothing and clear the interrupt
193 		 * just as some other cpu is sending us an interrupt.
194 		 * If we returned from power7_nap as a result of
195 		 * having finished executing in a KVM guest, then srr1
196 		 * contains 0.
197 		 */
198 		if (((srr1 & wmask) == SRR1_WAKEEE) ||
199 		    ((srr1 & wmask) == SRR1_WAKEHVI)) {
200 			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
201 				if (xive_enabled())
202 					xive_flush_interrupt();
203 				else
204 					icp_opal_flush_interrupt();
205 			} else
206 				icp_native_flush_interrupt();
207 		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
208 			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
209 			asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
210 		}
211 		smp_mb();
212 
213 		if (cpu_core_split_required())
214 			continue;
215 
216 		if (srr1 && !generic_check_cpu_restart(cpu))
217 			DBG("CPU%d Unexpected exit while offline srr1=%lx!\n",
218 					cpu, srr1);
219 
220 	}
221 
222 	/* Re-enable decrementer interrupts */
223 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
224 	DBG("CPU%d coming online...\n", cpu);
225 }
226 
227 #endif /* CONFIG_HOTPLUG_CPU */
228 
229 static int pnv_cpu_bootable(unsigned int nr)
230 {
231 	/*
232 	 * Starting with POWER8, the subcore logic relies on all threads of a
233 	 * core being booted so that they can participate in split mode
234 	 * switches. So on those machines we ignore the smt_enabled_at_boot
235 	 * setting (smt-enabled on the kernel command line).
236 	 */
237 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
238 		return 1;
239 
240 	return smp_generic_cpu_bootable(nr);
241 }
242 
243 static int pnv_smp_prepare_cpu(int cpu)
244 {
245 	if (xive_enabled())
246 		return xive_smp_prepare_cpu(cpu);
247 	return 0;
248 }
249 
250 /* Cause IPI as setup by the interrupt controller (xics or xive) */
251 static void (*ic_cause_ipi)(int cpu);
252 
253 static void pnv_cause_ipi(int cpu)
254 {
255 	if (doorbell_try_core_ipi(cpu))
256 		return;
257 
258 	ic_cause_ipi(cpu);
259 }
260 
261 static void pnv_p9_dd1_cause_ipi(int cpu)
262 {
263 	int this_cpu = get_cpu();
264 
265 	/*
266 	 * POWER9 DD1 has a global addressed msgsnd, but for now we restrict
267 	 * IPIs to same core, because it requires additional synchronization
268 	 * for inter-core doorbells which we do not implement.
269 	 */
270 	if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu)))
271 		doorbell_global_ipi(cpu);
272 	else
273 		ic_cause_ipi(cpu);
274 
275 	put_cpu();
276 }
277 
278 static void __init pnv_smp_probe(void)
279 {
280 	if (xive_enabled())
281 		xive_smp_probe();
282 	else
283 		xics_smp_probe();
284 
285 	if (cpu_has_feature(CPU_FTR_DBELL)) {
286 		ic_cause_ipi = smp_ops->cause_ipi;
287 		WARN_ON(!ic_cause_ipi);
288 
289 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
290 			if (cpu_has_feature(CPU_FTR_POWER9_DD1))
291 				smp_ops->cause_ipi = pnv_p9_dd1_cause_ipi;
292 			else
293 				smp_ops->cause_ipi = doorbell_global_ipi;
294 		} else {
295 			smp_ops->cause_ipi = pnv_cause_ipi;
296 		}
297 	}
298 }
299 
300 static struct smp_ops_t pnv_smp_ops = {
301 	.message_pass	= NULL, /* Use smp_muxed_ipi_message_pass */
302 	.cause_ipi	= NULL,	/* Filled at runtime by pnv_smp_probe() */
303 	.cause_nmi_ipi	= NULL,
304 	.probe		= pnv_smp_probe,
305 	.prepare_cpu	= pnv_smp_prepare_cpu,
306 	.kick_cpu	= pnv_smp_kick_cpu,
307 	.setup_cpu	= pnv_smp_setup_cpu,
308 	.cpu_bootable	= pnv_cpu_bootable,
309 #ifdef CONFIG_HOTPLUG_CPU
310 	.cpu_disable	= pnv_smp_cpu_disable,
311 	.cpu_die	= generic_cpu_die,
312 #endif /* CONFIG_HOTPLUG_CPU */
313 };
314 
315 /* This is called very early during platform setup_arch */
316 void __init pnv_smp_init(void)
317 {
318 	smp_ops = &pnv_smp_ops;
319 
320 #ifdef CONFIG_HOTPLUG_CPU
321 	ppc_md.cpu_die	= pnv_smp_cpu_kill_self;
322 #endif
323 }
324