1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SMP support for PowerNV machines.
4  *
5  * Copyright 2011 IBM Corp.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/delay.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/cpu.h>
18 
19 #include <asm/irq.h>
20 #include <asm/smp.h>
21 #include <asm/paca.h>
22 #include <asm/machdep.h>
23 #include <asm/cputable.h>
24 #include <asm/firmware.h>
25 #include <asm/vdso_datapage.h>
26 #include <asm/cputhreads.h>
27 #include <asm/xics.h>
28 #include <asm/xive.h>
29 #include <asm/opal.h>
30 #include <asm/runlatch.h>
31 #include <asm/code-patching.h>
32 #include <asm/dbell.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/ppc-opcode.h>
35 #include <asm/cpuidle.h>
36 #include <asm/kexec.h>
37 #include <asm/reg.h>
38 #include <asm/powernv.h>
39 
40 #include "powernv.h"
41 
42 #ifdef DEBUG
43 #include <asm/udbg.h>
44 #define DBG(fmt...) udbg_printf(fmt)
45 #else
46 #define DBG(fmt...)
47 #endif
48 
49 static void pnv_smp_setup_cpu(int cpu)
50 {
51 	/*
52 	 * P9 workaround for CI vector load (see traps.c),
53 	 * enable the corresponding HMI interrupt
54 	 */
55 	if (pvr_version_is(PVR_POWER9))
56 		mtspr(SPRN_HMEER, mfspr(SPRN_HMEER) | PPC_BIT(17));
57 
58 	if (xive_enabled())
59 		xive_smp_setup_cpu();
60 	else if (cpu != boot_cpuid)
61 		xics_setup_cpu();
62 }
63 
64 static int pnv_smp_kick_cpu(int nr)
65 {
66 	unsigned int pcpu;
67 	unsigned long start_here =
68 			__pa(ppc_function_entry(generic_secondary_smp_init));
69 	long rc;
70 	uint8_t status;
71 
72 	if (nr < 0 || nr >= nr_cpu_ids)
73 		return -EINVAL;
74 
75 	pcpu = get_hard_smp_processor_id(nr);
76 	/*
77 	 * If we already started or OPAL is not supported, we just
78 	 * kick the CPU via the PACA
79 	 */
80 	if (paca_ptrs[nr]->cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
81 		goto kick;
82 
83 	/*
84 	 * At this point, the CPU can either be spinning on the way in
85 	 * from kexec or be inside OPAL waiting to be started for the
86 	 * first time. OPAL v3 allows us to query OPAL to know if it
87 	 * has the CPUs, so we do that
88 	 */
89 	rc = opal_query_cpu_status(pcpu, &status);
90 	if (rc != OPAL_SUCCESS) {
91 		pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
92 		return -ENODEV;
93 	}
94 
95 	/*
96 	 * Already started, just kick it, probably coming from
97 	 * kexec and spinning
98 	 */
99 	if (status == OPAL_THREAD_STARTED)
100 		goto kick;
101 
102 	/*
103 	 * Available/inactive, let's kick it
104 	 */
105 	if (status == OPAL_THREAD_INACTIVE) {
106 		pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
107 		rc = opal_start_cpu(pcpu, start_here);
108 		if (rc != OPAL_SUCCESS) {
109 			pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
110 			return -ENODEV;
111 		}
112 	} else {
113 		/*
114 		 * An unavailable CPU (or any other unknown status)
115 		 * shouldn't be started. It should also
116 		 * not be in the possible map but currently it can
117 		 * happen
118 		 */
119 		pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
120 			 " (status %d)...\n", nr, pcpu, status);
121 		return -ENODEV;
122 	}
123 
124 kick:
125 	return smp_generic_kick_cpu(nr);
126 }
127 
128 #ifdef CONFIG_HOTPLUG_CPU
129 
130 static int pnv_smp_cpu_disable(void)
131 {
132 	int cpu = smp_processor_id();
133 
134 	/* This is identical to pSeries... might consolidate by
135 	 * moving migrate_irqs_away to a ppc_md with default to
136 	 * the generic fixup_irqs. --BenH.
137 	 */
138 	set_cpu_online(cpu, false);
139 	vdso_data->processorCount--;
140 	if (cpu == boot_cpuid)
141 		boot_cpuid = cpumask_any(cpu_online_mask);
142 	if (xive_enabled())
143 		xive_smp_disable_cpu();
144 	else
145 		xics_migrate_irqs_away();
146 	return 0;
147 }
148 
149 static void pnv_flush_interrupts(void)
150 {
151 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
152 		if (xive_enabled())
153 			xive_flush_interrupt();
154 		else
155 			icp_opal_flush_interrupt();
156 	} else {
157 		icp_native_flush_interrupt();
158 	}
159 }
160 
161 static void pnv_smp_cpu_kill_self(void)
162 {
163 	unsigned long srr1, unexpected_mask, wmask;
164 	unsigned int cpu;
165 	u64 lpcr_val;
166 
167 	/* Standard hot unplug procedure */
168 
169 	idle_task_exit();
170 	cpu = smp_processor_id();
171 	DBG("CPU%d offline\n", cpu);
172 	generic_set_cpu_dead(cpu);
173 	smp_wmb();
174 
175 	wmask = SRR1_WAKEMASK;
176 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
177 		wmask = SRR1_WAKEMASK_P8;
178 
179 	/*
180 	 * This turns the irq soft-disabled state we're called with, into a
181 	 * hard-disabled state with pending irq_happened interrupts cleared.
182 	 *
183 	 * PACA_IRQ_DEC   - Decrementer should be ignored.
184 	 * PACA_IRQ_HMI   - Can be ignored, processing is done in real mode.
185 	 * PACA_IRQ_DBELL, EE, PMI - Unexpected.
186 	 */
187 	hard_irq_disable();
188 	if (generic_check_cpu_restart(cpu))
189 		goto out;
190 
191 	unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS);
192 	if (local_paca->irq_happened & unexpected_mask) {
193 		if (local_paca->irq_happened & PACA_IRQ_EE)
194 			pnv_flush_interrupts();
195 		DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n",
196 				cpu, local_paca->irq_happened);
197 	}
198 	local_paca->irq_happened = PACA_IRQ_HARD_DIS;
199 
200 	/*
201 	 * We don't want to take decrementer interrupts while we are
202 	 * offline, so clear LPCR:PECE1. We keep PECE2 (and
203 	 * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
204 	 *
205 	 * If the CPU gets woken up by a special wakeup, ensure that
206 	 * the SLW engine sets LPCR with decrementer bit cleared, else
207 	 * the CPU will come back to the kernel due to a spurious
208 	 * wakeup.
209 	 */
210 	lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
211 	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
212 
213 	while (!generic_check_cpu_restart(cpu)) {
214 		/*
215 		 * Clear IPI flag, since we don't handle IPIs while
216 		 * offline, except for those when changing micro-threading
217 		 * mode, which are handled explicitly below, and those
218 		 * for coming online, which are handled via
219 		 * generic_check_cpu_restart() calls.
220 		 */
221 		kvmppc_clear_host_ipi(cpu);
222 
223 		srr1 = pnv_cpu_offline(cpu);
224 
225 		WARN_ON_ONCE(!irqs_disabled());
226 		WARN_ON(lazy_irq_pending());
227 
228 		/*
229 		 * If the SRR1 value indicates that we woke up due to
230 		 * an external interrupt, then clear the interrupt.
231 		 * We clear the interrupt before checking for the
232 		 * reason, so as to avoid a race where we wake up for
233 		 * some other reason, find nothing and clear the interrupt
234 		 * just as some other cpu is sending us an interrupt.
235 		 * If we returned from power7_nap as a result of
236 		 * having finished executing in a KVM guest, then srr1
237 		 * contains 0.
238 		 */
239 		if (((srr1 & wmask) == SRR1_WAKEEE) ||
240 		    ((srr1 & wmask) == SRR1_WAKEHVI)) {
241 			pnv_flush_interrupts();
242 		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
243 			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
244 			asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
245 		} else if ((srr1 & wmask) == SRR1_WAKERESET) {
246 			irq_set_pending_from_srr1(srr1);
247 			/* Does not return */
248 		}
249 
250 		smp_mb();
251 
252 		/*
253 		 * For kdump kernels, we process the ipi and jump to
254 		 * crash_ipi_callback
255 		 */
256 		if (kdump_in_progress()) {
257 			/*
258 			 * If we got to this point, we've not used
259 			 * NMI's, otherwise we would have gone
260 			 * via the SRR1_WAKERESET path. We are
261 			 * using regular IPI's for waking up offline
262 			 * threads.
263 			 */
264 			struct pt_regs regs;
265 
266 			ppc_save_regs(&regs);
267 			crash_ipi_callback(&regs);
268 			/* Does not return */
269 		}
270 
271 		if (cpu_core_split_required())
272 			continue;
273 
274 		if (srr1 && !generic_check_cpu_restart(cpu))
275 			DBG("CPU%d Unexpected exit while offline srr1=%lx!\n",
276 					cpu, srr1);
277 
278 	}
279 
280 	/*
281 	 * Re-enable decrementer interrupts in LPCR.
282 	 *
283 	 * Further, we want stop states to be woken up by decrementer
284 	 * for non-hotplug cases. So program the LPCR via stop api as
285 	 * well.
286 	 */
287 	lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
288 	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
289 out:
290 	DBG("CPU%d coming online...\n", cpu);
291 }
292 
293 #endif /* CONFIG_HOTPLUG_CPU */
294 
295 static int pnv_cpu_bootable(unsigned int nr)
296 {
297 	/*
298 	 * Starting with POWER8, the subcore logic relies on all threads of a
299 	 * core being booted so that they can participate in split mode
300 	 * switches. So on those machines we ignore the smt_enabled_at_boot
301 	 * setting (smt-enabled on the kernel command line).
302 	 */
303 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
304 		return 1;
305 
306 	return smp_generic_cpu_bootable(nr);
307 }
308 
309 static int pnv_smp_prepare_cpu(int cpu)
310 {
311 	if (xive_enabled())
312 		return xive_smp_prepare_cpu(cpu);
313 	return 0;
314 }
315 
316 /* Cause IPI as setup by the interrupt controller (xics or xive) */
317 static void (*ic_cause_ipi)(int cpu);
318 
319 static void pnv_cause_ipi(int cpu)
320 {
321 	if (doorbell_try_core_ipi(cpu))
322 		return;
323 
324 	ic_cause_ipi(cpu);
325 }
326 
327 static void __init pnv_smp_probe(void)
328 {
329 	if (xive_enabled())
330 		xive_smp_probe();
331 	else
332 		xics_smp_probe();
333 
334 	if (cpu_has_feature(CPU_FTR_DBELL)) {
335 		ic_cause_ipi = smp_ops->cause_ipi;
336 		WARN_ON(!ic_cause_ipi);
337 
338 		if (cpu_has_feature(CPU_FTR_ARCH_300))
339 			smp_ops->cause_ipi = doorbell_global_ipi;
340 		else
341 			smp_ops->cause_ipi = pnv_cause_ipi;
342 	}
343 }
344 
345 static int pnv_system_reset_exception(struct pt_regs *regs)
346 {
347 	if (smp_handle_nmi_ipi(regs))
348 		return 1;
349 	return 0;
350 }
351 
352 static int pnv_cause_nmi_ipi(int cpu)
353 {
354 	int64_t rc;
355 
356 	if (cpu >= 0) {
357 		int h = get_hard_smp_processor_id(cpu);
358 
359 		if (opal_check_token(OPAL_QUIESCE))
360 			opal_quiesce(QUIESCE_HOLD, h);
361 
362 		rc = opal_signal_system_reset(h);
363 
364 		if (opal_check_token(OPAL_QUIESCE))
365 			opal_quiesce(QUIESCE_RESUME, h);
366 
367 		if (rc != OPAL_SUCCESS)
368 			return 0;
369 		return 1;
370 
371 	} else if (cpu == NMI_IPI_ALL_OTHERS) {
372 		bool success = true;
373 		int c;
374 
375 		if (opal_check_token(OPAL_QUIESCE))
376 			opal_quiesce(QUIESCE_HOLD, -1);
377 
378 		/*
379 		 * We do not use broadcasts (yet), because it's not clear
380 		 * exactly what semantics Linux wants or the firmware should
381 		 * provide.
382 		 */
383 		for_each_online_cpu(c) {
384 			if (c == smp_processor_id())
385 				continue;
386 
387 			rc = opal_signal_system_reset(
388 						get_hard_smp_processor_id(c));
389 			if (rc != OPAL_SUCCESS)
390 				success = false;
391 		}
392 
393 		if (opal_check_token(OPAL_QUIESCE))
394 			opal_quiesce(QUIESCE_RESUME, -1);
395 
396 		if (success)
397 			return 1;
398 
399 		/*
400 		 * Caller will fall back to doorbells, which may pick
401 		 * up the remainders.
402 		 */
403 	}
404 
405 	return 0;
406 }
407 
408 static struct smp_ops_t pnv_smp_ops = {
409 	.message_pass	= NULL, /* Use smp_muxed_ipi_message_pass */
410 	.cause_ipi	= NULL,	/* Filled at runtime by pnv_smp_probe() */
411 	.cause_nmi_ipi	= NULL,
412 	.probe		= pnv_smp_probe,
413 	.prepare_cpu	= pnv_smp_prepare_cpu,
414 	.kick_cpu	= pnv_smp_kick_cpu,
415 	.setup_cpu	= pnv_smp_setup_cpu,
416 	.cpu_bootable	= pnv_cpu_bootable,
417 #ifdef CONFIG_HOTPLUG_CPU
418 	.cpu_disable	= pnv_smp_cpu_disable,
419 	.cpu_die	= generic_cpu_die,
420 #endif /* CONFIG_HOTPLUG_CPU */
421 };
422 
423 /* This is called very early during platform setup_arch */
424 void __init pnv_smp_init(void)
425 {
426 	if (opal_check_token(OPAL_SIGNAL_SYSTEM_RESET)) {
427 		ppc_md.system_reset_exception = pnv_system_reset_exception;
428 		pnv_smp_ops.cause_nmi_ipi = pnv_cause_nmi_ipi;
429 	}
430 	smp_ops = &pnv_smp_ops;
431 
432 #ifdef CONFIG_HOTPLUG_CPU
433 	ppc_md.cpu_die	= pnv_smp_cpu_kill_self;
434 #ifdef CONFIG_KEXEC_CORE
435 	crash_wake_offline = 1;
436 #endif
437 #endif
438 }
439