1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Architecture specific (PPC64) functions for kexec based crash dumps. 4 * 5 * Copyright (C) 2005, IBM Corp. 6 * 7 * Created by: Haren Myneni 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/smp.h> 12 #include <linux/reboot.h> 13 #include <linux/kexec.h> 14 #include <linux/export.h> 15 #include <linux/crash_dump.h> 16 #include <linux/delay.h> 17 #include <linux/irq.h> 18 #include <linux/types.h> 19 20 #include <asm/processor.h> 21 #include <asm/machdep.h> 22 #include <asm/kexec.h> 23 #include <asm/prom.h> 24 #include <asm/smp.h> 25 #include <asm/setjmp.h> 26 #include <asm/debug.h> 27 #include <asm/interrupt.h> 28 29 /* 30 * The primary CPU waits a while for all secondary CPUs to enter. This is to 31 * avoid sending an IPI if the secondary CPUs are entering 32 * crash_kexec_secondary on their own (eg via a system reset). 33 * 34 * The secondary timeout has to be longer than the primary. Both timeouts are 35 * in milliseconds. 36 */ 37 #define PRIMARY_TIMEOUT 500 38 #define SECONDARY_TIMEOUT 1000 39 40 #define IPI_TIMEOUT 10000 41 #define REAL_MODE_TIMEOUT 10000 42 43 static int time_to_dump; 44 /* 45 * crash_wake_offline should be set to 1 by platforms that intend to wake 46 * up offline cpus prior to jumping to a kdump kernel. Currently powernv 47 * sets it to 1, since we want to avoid things from happening when an 48 * offline CPU wakes up due to something like an HMI (malfunction error), 49 * which propagates to all threads. 50 */ 51 int crash_wake_offline; 52 53 #define CRASH_HANDLER_MAX 3 54 /* List of shutdown handles */ 55 static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX]; 56 static DEFINE_SPINLOCK(crash_handlers_lock); 57 58 static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; 59 static int crash_shutdown_cpu = -1; 60 61 static int handle_fault(struct pt_regs *regs) 62 { 63 if (crash_shutdown_cpu == smp_processor_id()) 64 longjmp(crash_shutdown_buf, 1); 65 return 0; 66 } 67 68 #ifdef CONFIG_SMP 69 70 static atomic_t cpus_in_crash; 71 void crash_ipi_callback(struct pt_regs *regs) 72 { 73 static cpumask_t cpus_state_saved = CPU_MASK_NONE; 74 75 int cpu = smp_processor_id(); 76 77 hard_irq_disable(); 78 if (!cpumask_test_cpu(cpu, &cpus_state_saved)) { 79 crash_save_cpu(regs, cpu); 80 cpumask_set_cpu(cpu, &cpus_state_saved); 81 } 82 83 atomic_inc(&cpus_in_crash); 84 smp_mb__after_atomic(); 85 86 /* 87 * Starting the kdump boot. 88 * This barrier is needed to make sure that all CPUs are stopped. 89 */ 90 while (!time_to_dump) 91 cpu_relax(); 92 93 if (ppc_md.kexec_cpu_down) 94 ppc_md.kexec_cpu_down(1, 1); 95 96 #ifdef CONFIG_PPC64 97 kexec_smp_wait(); 98 #else 99 for (;;); /* FIXME */ 100 #endif 101 102 /* NOTREACHED */ 103 } 104 105 static void crash_kexec_prepare_cpus(int cpu) 106 { 107 unsigned int msecs; 108 volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ 109 volatile int tries = 0; 110 int (*old_handler)(struct pt_regs *regs); 111 112 printk(KERN_EMERG "Sending IPI to other CPUs\n"); 113 114 if (crash_wake_offline) 115 ncpus = num_present_cpus() - 1; 116 117 crash_send_ipi(crash_ipi_callback); 118 smp_wmb(); 119 120 again: 121 /* 122 * FIXME: Until we will have the way to stop other CPUs reliably, 123 * the crash CPU will send an IPI and wait for other CPUs to 124 * respond. 125 */ 126 msecs = IPI_TIMEOUT; 127 while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0)) 128 mdelay(1); 129 130 /* Would it be better to replace the trap vector here? */ 131 132 if (atomic_read(&cpus_in_crash) >= ncpus) { 133 printk(KERN_EMERG "IPI complete\n"); 134 return; 135 } 136 137 printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n", 138 ncpus - atomic_read(&cpus_in_crash)); 139 140 /* 141 * If we have a panic timeout set then we can't wait indefinitely 142 * for someone to activate system reset. We also give up on the 143 * second time through if system reset fail to work. 144 */ 145 if ((panic_timeout > 0) || (tries > 0)) 146 return; 147 148 /* 149 * A system reset will cause all CPUs to take an 0x100 exception. 150 * The primary CPU returns here via setjmp, and the secondary 151 * CPUs reexecute the crash_kexec_secondary path. 152 */ 153 old_handler = __debugger; 154 __debugger = handle_fault; 155 crash_shutdown_cpu = smp_processor_id(); 156 157 if (setjmp(crash_shutdown_buf) == 0) { 158 printk(KERN_EMERG "Activate system reset (dumprestart) " 159 "to stop other cpu(s)\n"); 160 161 /* 162 * A system reset will force all CPUs to execute the 163 * crash code again. We need to reset cpus_in_crash so we 164 * wait for everyone to do this. 165 */ 166 atomic_set(&cpus_in_crash, 0); 167 smp_mb(); 168 169 while (atomic_read(&cpus_in_crash) < ncpus) 170 cpu_relax(); 171 } 172 173 crash_shutdown_cpu = -1; 174 __debugger = old_handler; 175 176 tries++; 177 goto again; 178 } 179 180 /* 181 * This function will be called by secondary cpus. 182 */ 183 void crash_kexec_secondary(struct pt_regs *regs) 184 { 185 unsigned long flags; 186 int msecs = SECONDARY_TIMEOUT; 187 188 local_irq_save(flags); 189 190 /* Wait for the primary crash CPU to signal its progress */ 191 while (crashing_cpu < 0) { 192 if (--msecs < 0) { 193 /* No response, kdump image may not have been loaded */ 194 local_irq_restore(flags); 195 return; 196 } 197 198 mdelay(1); 199 } 200 201 crash_ipi_callback(regs); 202 } 203 204 #else /* ! CONFIG_SMP */ 205 206 static void crash_kexec_prepare_cpus(int cpu) 207 { 208 /* 209 * move the secondaries to us so that we can copy 210 * the new kernel 0-0x100 safely 211 * 212 * do this if kexec in setup.c ? 213 */ 214 #ifdef CONFIG_PPC64 215 smp_release_cpus(); 216 #else 217 /* FIXME */ 218 #endif 219 } 220 221 void crash_kexec_secondary(struct pt_regs *regs) 222 { 223 } 224 #endif /* CONFIG_SMP */ 225 226 /* wait for all the CPUs to hit real mode but timeout if they don't come in */ 227 #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) 228 static void __maybe_unused crash_kexec_wait_realmode(int cpu) 229 { 230 unsigned int msecs; 231 int i; 232 233 msecs = REAL_MODE_TIMEOUT; 234 for (i=0; i < nr_cpu_ids && msecs > 0; i++) { 235 if (i == cpu) 236 continue; 237 238 while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) { 239 barrier(); 240 if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0)) 241 break; 242 msecs--; 243 mdelay(1); 244 } 245 } 246 mb(); 247 } 248 #else 249 static inline void crash_kexec_wait_realmode(int cpu) {} 250 #endif /* CONFIG_SMP && CONFIG_PPC64 */ 251 252 /* 253 * Register a function to be called on shutdown. Only use this if you 254 * can't reset your device in the second kernel. 255 */ 256 int crash_shutdown_register(crash_shutdown_t handler) 257 { 258 unsigned int i, rc; 259 260 spin_lock(&crash_handlers_lock); 261 for (i = 0 ; i < CRASH_HANDLER_MAX; i++) 262 if (!crash_shutdown_handles[i]) { 263 /* Insert handle at first empty entry */ 264 crash_shutdown_handles[i] = handler; 265 rc = 0; 266 break; 267 } 268 269 if (i == CRASH_HANDLER_MAX) { 270 printk(KERN_ERR "Crash shutdown handles full, " 271 "not registered.\n"); 272 rc = 1; 273 } 274 275 spin_unlock(&crash_handlers_lock); 276 return rc; 277 } 278 EXPORT_SYMBOL(crash_shutdown_register); 279 280 int crash_shutdown_unregister(crash_shutdown_t handler) 281 { 282 unsigned int i, rc; 283 284 spin_lock(&crash_handlers_lock); 285 for (i = 0 ; i < CRASH_HANDLER_MAX; i++) 286 if (crash_shutdown_handles[i] == handler) 287 break; 288 289 if (i == CRASH_HANDLER_MAX) { 290 printk(KERN_ERR "Crash shutdown handle not found\n"); 291 rc = 1; 292 } else { 293 /* Shift handles down */ 294 for (; i < (CRASH_HANDLER_MAX - 1); i++) 295 crash_shutdown_handles[i] = 296 crash_shutdown_handles[i+1]; 297 /* 298 * Reset last entry to NULL now that it has been shifted down, 299 * this will allow new handles to be added here. 300 */ 301 crash_shutdown_handles[i] = NULL; 302 rc = 0; 303 } 304 305 spin_unlock(&crash_handlers_lock); 306 return rc; 307 } 308 EXPORT_SYMBOL(crash_shutdown_unregister); 309 310 void default_machine_crash_shutdown(struct pt_regs *regs) 311 { 312 unsigned int i; 313 int (*old_handler)(struct pt_regs *regs); 314 315 /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */ 316 printk_deferred_enter(); 317 318 /* 319 * This function is only called after the system 320 * has panicked or is otherwise in a critical state. 321 * The minimum amount of code to allow a kexec'd kernel 322 * to run successfully needs to happen here. 323 * 324 * In practice this means stopping other cpus in 325 * an SMP system. 326 * The kernel is broken so disable interrupts. 327 */ 328 hard_irq_disable(); 329 330 /* 331 * Make a note of crashing cpu. Will be used in machine_kexec 332 * such that another IPI will not be sent. 333 */ 334 crashing_cpu = smp_processor_id(); 335 336 /* 337 * If we came in via system reset, wait a while for the secondary 338 * CPUs to enter. 339 */ 340 if (TRAP(regs) == INTERRUPT_SYSTEM_RESET) 341 mdelay(PRIMARY_TIMEOUT); 342 343 crash_kexec_prepare_cpus(crashing_cpu); 344 345 crash_save_cpu(regs, crashing_cpu); 346 347 time_to_dump = 1; 348 349 crash_kexec_wait_realmode(crashing_cpu); 350 351 machine_kexec_mask_interrupts(); 352 353 /* 354 * Call registered shutdown routines safely. Swap out 355 * __debugger_fault_handler, and replace on exit. 356 */ 357 old_handler = __debugger_fault_handler; 358 __debugger_fault_handler = handle_fault; 359 crash_shutdown_cpu = smp_processor_id(); 360 for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) { 361 if (setjmp(crash_shutdown_buf) == 0) { 362 /* 363 * Insert syncs and delay to ensure 364 * instructions in the dangerous region don't 365 * leak away from this protected region. 366 */ 367 asm volatile("sync; isync"); 368 /* dangerous region */ 369 crash_shutdown_handles[i](); 370 asm volatile("sync; isync"); 371 } 372 } 373 crash_shutdown_cpu = -1; 374 __debugger_fault_handler = old_handler; 375 376 if (ppc_md.kexec_cpu_down) 377 ppc_md.kexec_cpu_down(1, 0); 378 } 379