1 /*
2  * SMP support for PowerNV machines.
3  *
4  * Copyright 2011 IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/cpu.h>
21 
22 #include <asm/irq.h>
23 #include <asm/smp.h>
24 #include <asm/paca.h>
25 #include <asm/machdep.h>
26 #include <asm/cputable.h>
27 #include <asm/firmware.h>
28 #include <asm/rtas.h>
29 #include <asm/vdso_datapage.h>
30 #include <asm/cputhreads.h>
31 #include <asm/xics.h>
32 #include <asm/opal.h>
33 #include <asm/runlatch.h>
34 #include <asm/code-patching.h>
35 #include <asm/dbell.h>
36 
37 #include "powernv.h"
38 
39 #ifdef DEBUG
40 #include <asm/udbg.h>
41 #define DBG(fmt...) udbg_printf(fmt)
42 #else
43 #define DBG(fmt...)
44 #endif
45 
46 static void pnv_smp_setup_cpu(int cpu)
47 {
48 	if (cpu != boot_cpuid)
49 		xics_setup_cpu();
50 
51 #ifdef CONFIG_PPC_DOORBELL
52 	if (cpu_has_feature(CPU_FTR_DBELL))
53 		doorbell_setup_this_cpu();
54 #endif
55 }
56 
57 static int pnv_smp_kick_cpu(int nr)
58 {
59 	unsigned int pcpu = get_hard_smp_processor_id(nr);
60 	unsigned long start_here =
61 			__pa(ppc_function_entry(generic_secondary_smp_init));
62 	long rc;
63 
64 	BUG_ON(nr < 0 || nr >= NR_CPUS);
65 
66 	/*
67 	 * If we already started or OPALv2 is not supported, we just
68 	 * kick the CPU via the PACA
69 	 */
70 	if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
71 		goto kick;
72 
73 	/*
74 	 * At this point, the CPU can either be spinning on the way in
75 	 * from kexec or be inside OPAL waiting to be started for the
76 	 * first time. OPAL v3 allows us to query OPAL to know if it
77 	 * has the CPUs, so we do that
78 	 */
79 	if (firmware_has_feature(FW_FEATURE_OPALv3)) {
80 		uint8_t status;
81 
82 		rc = opal_query_cpu_status(pcpu, &status);
83 		if (rc != OPAL_SUCCESS) {
84 			pr_warn("OPAL Error %ld querying CPU %d state\n",
85 				rc, nr);
86 			return -ENODEV;
87 		}
88 
89 		/*
90 		 * Already started, just kick it, probably coming from
91 		 * kexec and spinning
92 		 */
93 		if (status == OPAL_THREAD_STARTED)
94 			goto kick;
95 
96 		/*
97 		 * Available/inactive, let's kick it
98 		 */
99 		if (status == OPAL_THREAD_INACTIVE) {
100 			pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
101 				 nr, pcpu);
102 			rc = opal_start_cpu(pcpu, start_here);
103 			if (rc != OPAL_SUCCESS) {
104 				pr_warn("OPAL Error %ld starting CPU %d\n",
105 					rc, nr);
106 				return -ENODEV;
107 			}
108 		} else {
109 			/*
110 			 * An unavailable CPU (or any other unknown status)
111 			 * shouldn't be started. It should also
112 			 * not be in the possible map but currently it can
113 			 * happen
114 			 */
115 			pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
116 				 " (status %d)...\n", nr, pcpu, status);
117 			return -ENODEV;
118 		}
119 	} else {
120 		/*
121 		 * On OPAL v2, we just kick it and hope for the best,
122 		 * we must not test the error from opal_start_cpu() or
123 		 * we would fail to get CPUs from kexec.
124 		 */
125 		opal_start_cpu(pcpu, start_here);
126 	}
127  kick:
128 	return smp_generic_kick_cpu(nr);
129 }
130 
131 #ifdef CONFIG_HOTPLUG_CPU
132 
133 static int pnv_smp_cpu_disable(void)
134 {
135 	int cpu = smp_processor_id();
136 
137 	/* This is identical to pSeries... might consolidate by
138 	 * moving migrate_irqs_away to a ppc_md with default to
139 	 * the generic fixup_irqs. --BenH.
140 	 */
141 	set_cpu_online(cpu, false);
142 	vdso_data->processorCount--;
143 	if (cpu == boot_cpuid)
144 		boot_cpuid = cpumask_any(cpu_online_mask);
145 	xics_migrate_irqs_away();
146 	return 0;
147 }
148 
149 static void pnv_smp_cpu_kill_self(void)
150 {
151 	unsigned int cpu;
152 	unsigned long srr1;
153 	u32 idle_states;
154 
155 	/* Standard hot unplug procedure */
156 	local_irq_disable();
157 	idle_task_exit();
158 	current->active_mm = NULL; /* for sanity */
159 	cpu = smp_processor_id();
160 	DBG("CPU%d offline\n", cpu);
161 	generic_set_cpu_dead(cpu);
162 	smp_wmb();
163 
164 	idle_states = pnv_get_supported_cpuidle_states();
165 	/* We don't want to take decrementer interrupts while we are offline,
166 	 * so clear LPCR:PECE1. We keep PECE2 enabled.
167 	 */
168 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
169 	while (!generic_check_cpu_restart(cpu)) {
170 
171 		ppc64_runlatch_off();
172 
173 		if (idle_states & OPAL_PM_WINKLE_ENABLED)
174 			srr1 = power7_winkle();
175 		else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
176 				(idle_states & OPAL_PM_SLEEP_ENABLED_ER1))
177 			srr1 = power7_sleep();
178 		else
179 			srr1 = power7_nap(1);
180 
181 		ppc64_runlatch_on();
182 
183 		/*
184 		 * If the SRR1 value indicates that we woke up due to
185 		 * an external interrupt, then clear the interrupt.
186 		 * We clear the interrupt before checking for the
187 		 * reason, so as to avoid a race where we wake up for
188 		 * some other reason, find nothing and clear the interrupt
189 		 * just as some other cpu is sending us an interrupt.
190 		 * If we returned from power7_nap as a result of
191 		 * having finished executing in a KVM guest, then srr1
192 		 * contains 0.
193 		 */
194 		if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
195 			icp_native_flush_interrupt();
196 			local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
197 			smp_mb();
198 		}
199 
200 		if (cpu_core_split_required())
201 			continue;
202 
203 		if (!generic_check_cpu_restart(cpu))
204 			DBG("CPU%d Unexpected exit while offline !\n", cpu);
205 	}
206 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
207 	DBG("CPU%d coming online...\n", cpu);
208 }
209 
210 #endif /* CONFIG_HOTPLUG_CPU */
211 
212 static int pnv_cpu_bootable(unsigned int nr)
213 {
214 	/*
215 	 * Starting with POWER8, the subcore logic relies on all threads of a
216 	 * core being booted so that they can participate in split mode
217 	 * switches. So on those machines we ignore the smt_enabled_at_boot
218 	 * setting (smt-enabled on the kernel command line).
219 	 */
220 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
221 		return 1;
222 
223 	return smp_generic_cpu_bootable(nr);
224 }
225 
226 static struct smp_ops_t pnv_smp_ops = {
227 	.message_pass	= smp_muxed_ipi_message_pass,
228 	.cause_ipi	= NULL,	/* Filled at runtime by xics_smp_probe() */
229 	.probe		= xics_smp_probe,
230 	.kick_cpu	= pnv_smp_kick_cpu,
231 	.setup_cpu	= pnv_smp_setup_cpu,
232 	.cpu_bootable	= pnv_cpu_bootable,
233 #ifdef CONFIG_HOTPLUG_CPU
234 	.cpu_disable	= pnv_smp_cpu_disable,
235 	.cpu_die	= generic_cpu_die,
236 #endif /* CONFIG_HOTPLUG_CPU */
237 };
238 
239 /* This is called very early during platform setup_arch */
240 void __init pnv_smp_init(void)
241 {
242 	smp_ops = &pnv_smp_ops;
243 
244 	/* XXX We don't yet have a proper entry point from HAL, for
245 	 * now we rely on kexec-style entry from BML
246 	 */
247 
248 #ifdef CONFIG_PPC_RTAS
249 	/* Non-lpar has additional take/give timebase */
250 	if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
251 		smp_ops->give_timebase = rtas_give_timebase;
252 		smp_ops->take_timebase = rtas_take_timebase;
253 	}
254 #endif /* CONFIG_PPC_RTAS */
255 
256 #ifdef CONFIG_HOTPLUG_CPU
257 	ppc_md.cpu_die	= pnv_smp_cpu_kill_self;
258 #endif
259 }
260