1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SMP support for pSeries machines. 4 * 5 * Dave Engebretsen, Peter Bergner, and 6 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 7 * 8 * Plus various changes from other IBM teams... 9 */ 10 11 12 #include <linux/kernel.h> 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/init.h> 18 #include <linux/spinlock.h> 19 #include <linux/cache.h> 20 #include <linux/err.h> 21 #include <linux/device.h> 22 #include <linux/cpu.h> 23 #include <linux/pgtable.h> 24 25 #include <asm/ptrace.h> 26 #include <linux/atomic.h> 27 #include <asm/irq.h> 28 #include <asm/page.h> 29 #include <asm/io.h> 30 #include <asm/prom.h> 31 #include <asm/smp.h> 32 #include <asm/paca.h> 33 #include <asm/machdep.h> 34 #include <asm/cputable.h> 35 #include <asm/firmware.h> 36 #include <asm/rtas.h> 37 #include <asm/vdso_datapage.h> 38 #include <asm/cputhreads.h> 39 #include <asm/xics.h> 40 #include <asm/xive.h> 41 #include <asm/dbell.h> 42 #include <asm/plpar_wrappers.h> 43 #include <asm/code-patching.h> 44 #include <asm/svm.h> 45 46 #include "pseries.h" 47 48 /* 49 * The Primary thread of each non-boot processor was started from the OF client 50 * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. 51 */ 52 static cpumask_var_t of_spin_mask; 53 54 /* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */ 55 int smp_query_cpu_stopped(unsigned int pcpu) 56 { 57 int cpu_status, status; 58 int qcss_tok = rtas_token("query-cpu-stopped-state"); 59 60 if (qcss_tok == RTAS_UNKNOWN_SERVICE) { 61 printk_once(KERN_INFO 62 "Firmware doesn't support query-cpu-stopped-state\n"); 63 return QCSS_HARDWARE_ERROR; 64 } 65 66 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); 67 if (status != 0) { 68 printk(KERN_ERR 69 "RTAS query-cpu-stopped-state failed: %i\n", status); 70 return status; 71 } 72 73 return cpu_status; 74 } 75 76 /** 77 * smp_startup_cpu() - start the given cpu 78 * 79 * At boot time, there is nothing to do for primary threads which were 80 * started from Open Firmware. For anything else, call RTAS with the 81 * appropriate start location. 82 * 83 * Returns: 84 * 0 - failure 85 * 1 - success 86 */ 87 static inline int smp_startup_cpu(unsigned int lcpu) 88 { 89 int status; 90 unsigned long start_here = 91 __pa(ppc_function_entry(generic_secondary_smp_init)); 92 unsigned int pcpu; 93 int start_cpu; 94 95 if (cpumask_test_cpu(lcpu, of_spin_mask)) 96 /* Already started by OF and sitting in spin loop */ 97 return 1; 98 99 pcpu = get_hard_smp_processor_id(lcpu); 100 101 /* Check to see if the CPU out of FW already for kexec */ 102 if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){ 103 cpumask_set_cpu(lcpu, of_spin_mask); 104 return 1; 105 } 106 107 /* Fixup atomic count: it exited inside IRQ handler. */ 108 task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0; 109 110 /* 111 * If the RTAS start-cpu token does not exist then presume the 112 * cpu is already spinning. 113 */ 114 start_cpu = rtas_token("start-cpu"); 115 if (start_cpu == RTAS_UNKNOWN_SERVICE) 116 return 1; 117 118 status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu); 119 if (status != 0) { 120 printk(KERN_ERR "start-cpu failed: %i\n", status); 121 return 0; 122 } 123 124 return 1; 125 } 126 127 static void smp_setup_cpu(int cpu) 128 { 129 if (xive_enabled()) 130 xive_smp_setup_cpu(); 131 else if (cpu != boot_cpuid) 132 xics_setup_cpu(); 133 134 if (firmware_has_feature(FW_FEATURE_SPLPAR)) 135 vpa_init(cpu); 136 137 cpumask_clear_cpu(cpu, of_spin_mask); 138 } 139 140 static int smp_pSeries_kick_cpu(int nr) 141 { 142 if (nr < 0 || nr >= nr_cpu_ids) 143 return -EINVAL; 144 145 if (!smp_startup_cpu(nr)) 146 return -ENOENT; 147 148 /* 149 * The processor is currently spinning, waiting for the 150 * cpu_start field to become non-zero After we set cpu_start, 151 * the processor will continue on to secondary_start 152 */ 153 paca_ptrs[nr]->cpu_start = 1; 154 155 return 0; 156 } 157 158 static int pseries_smp_prepare_cpu(int cpu) 159 { 160 if (xive_enabled()) 161 return xive_smp_prepare_cpu(cpu); 162 return 0; 163 } 164 165 /* Cause IPI as setup by the interrupt controller (xics or xive) */ 166 static void (*ic_cause_ipi)(int cpu) __ro_after_init; 167 168 /* Use msgsndp doorbells target is a sibling, else use interrupt controller */ 169 static void dbell_or_ic_cause_ipi(int cpu) 170 { 171 if (doorbell_try_core_ipi(cpu)) 172 return; 173 174 ic_cause_ipi(cpu); 175 } 176 177 static int pseries_cause_nmi_ipi(int cpu) 178 { 179 int hwcpu; 180 181 if (cpu == NMI_IPI_ALL_OTHERS) { 182 hwcpu = H_SIGNAL_SYS_RESET_ALL_OTHERS; 183 } else { 184 if (cpu < 0) { 185 WARN_ONCE(true, "incorrect cpu parameter %d", cpu); 186 return 0; 187 } 188 189 hwcpu = get_hard_smp_processor_id(cpu); 190 } 191 192 if (plpar_signal_sys_reset(hwcpu) == H_SUCCESS) 193 return 1; 194 195 return 0; 196 } 197 198 static __init void pSeries_smp_probe(void) 199 { 200 if (xive_enabled()) 201 xive_smp_probe(); 202 else 203 xics_smp_probe(); 204 205 /* No doorbell facility, must use the interrupt controller for IPIs */ 206 if (!cpu_has_feature(CPU_FTR_DBELL)) 207 return; 208 209 /* Doorbells can only be used for IPIs between SMT siblings */ 210 if (!cpu_has_feature(CPU_FTR_SMT)) 211 return; 212 213 if (is_kvm_guest()) { 214 /* 215 * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp 216 * faults to the hypervisor which then reads the instruction 217 * from guest memory, which tends to be slower than using XIVE. 218 */ 219 if (xive_enabled()) 220 return; 221 222 /* 223 * XICS hcalls aren't as fast, so we can use msgsndp (which 224 * also helps exercise KVM emulation), however KVM can't 225 * emulate secure guests because it can't read the instruction 226 * out of their memory. 227 */ 228 if (is_secure_guest()) 229 return; 230 } 231 232 /* 233 * Under PowerVM, FSCR[MSGP] is enabled as guest vCPU siblings are 234 * gang scheduled on the same physical core, so doorbells are always 235 * faster than the interrupt controller, and they can be used by 236 * secure guests. 237 */ 238 239 ic_cause_ipi = smp_ops->cause_ipi; 240 smp_ops->cause_ipi = dbell_or_ic_cause_ipi; 241 } 242 243 static struct smp_ops_t pseries_smp_ops = { 244 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ 245 .cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */ 246 .cause_nmi_ipi = pseries_cause_nmi_ipi, 247 .probe = pSeries_smp_probe, 248 .prepare_cpu = pseries_smp_prepare_cpu, 249 .kick_cpu = smp_pSeries_kick_cpu, 250 .setup_cpu = smp_setup_cpu, 251 .cpu_bootable = smp_generic_cpu_bootable, 252 }; 253 254 /* This is called very early */ 255 void __init smp_init_pseries(void) 256 { 257 int i; 258 259 pr_debug(" -> smp_init_pSeries()\n"); 260 smp_ops = &pseries_smp_ops; 261 262 alloc_bootmem_cpumask_var(&of_spin_mask); 263 264 /* 265 * Mark threads which are still spinning in hold loops 266 * 267 * We know prom_init will not have started them if RTAS supports 268 * query-cpu-stopped-state. 269 */ 270 if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) { 271 if (cpu_has_feature(CPU_FTR_SMT)) { 272 for_each_present_cpu(i) { 273 if (cpu_thread_in_core(i) == 0) 274 cpumask_set_cpu(i, of_spin_mask); 275 } 276 } else 277 cpumask_copy(of_spin_mask, cpu_present_mask); 278 279 cpumask_clear_cpu(boot_cpuid, of_spin_mask); 280 } 281 282 /* Non-lpar has additional take/give timebase */ 283 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 284 smp_ops->give_timebase = rtas_give_timebase; 285 smp_ops->take_timebase = rtas_take_timebase; 286 } 287 288 pr_debug(" <- smp_init_pSeries()\n"); 289 } 290