1d9953105SMichael Ellerman /* 2d9953105SMichael Ellerman * Copyright (C) 2001 Dave Engebretsen IBM Corporation 3d9953105SMichael Ellerman * 4d9953105SMichael Ellerman * This program is free software; you can redistribute it and/or modify 5d9953105SMichael Ellerman * it under the terms of the GNU General Public License as published by 6d9953105SMichael Ellerman * the Free Software Foundation; either version 2 of the License, or 7d9953105SMichael Ellerman * (at your option) any later version. 8d9953105SMichael Ellerman * 9d9953105SMichael Ellerman * This program is distributed in the hope that it will be useful, 10d9953105SMichael Ellerman * but WITHOUT ANY WARRANTY; without even the implied warranty of 11d9953105SMichael Ellerman * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12d9953105SMichael Ellerman * GNU General Public License for more details. 13d9953105SMichael Ellerman * 14d9953105SMichael Ellerman * You should have received a copy of the GNU General Public License 15d9953105SMichael Ellerman * along with this program; if not, write to the Free Software 16d9953105SMichael Ellerman * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17d9953105SMichael Ellerman */ 18d9953105SMichael Ellerman 19d9953105SMichael Ellerman /* Change Activity: 20d9953105SMichael Ellerman * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support. 21d9953105SMichael Ellerman * End Change Activity 22d9953105SMichael Ellerman */ 23d9953105SMichael Ellerman 24d9953105SMichael Ellerman #include <linux/errno.h> 25d9953105SMichael Ellerman #include <linux/threads.h> 26d9953105SMichael Ellerman #include <linux/kernel_stat.h> 27d9953105SMichael Ellerman #include <linux/signal.h> 28d9953105SMichael Ellerman #include <linux/sched.h> 29d9953105SMichael Ellerman #include <linux/ioport.h> 30d9953105SMichael Ellerman #include <linux/interrupt.h> 31d9953105SMichael Ellerman #include <linux/timex.h> 32d9953105SMichael Ellerman #include <linux/init.h> 33d9953105SMichael Ellerman #include <linux/slab.h> 34d9953105SMichael Ellerman #include <linux/pci.h> 35d9953105SMichael Ellerman #include <linux/delay.h> 36d9953105SMichael Ellerman #include <linux/irq.h> 37d9953105SMichael Ellerman #include <linux/random.h> 38d9953105SMichael Ellerman #include <linux/sysrq.h> 39d9953105SMichael Ellerman #include <linux/bitops.h> 40d9953105SMichael Ellerman 41d9953105SMichael Ellerman #include <asm/uaccess.h> 42d9953105SMichael Ellerman #include <asm/system.h> 43d9953105SMichael Ellerman #include <asm/io.h> 44d9953105SMichael Ellerman #include <asm/pgtable.h> 45d9953105SMichael Ellerman #include <asm/irq.h> 46d9953105SMichael Ellerman #include <asm/cache.h> 47d9953105SMichael Ellerman #include <asm/prom.h> 48d9953105SMichael Ellerman #include <asm/ptrace.h> 49d9953105SMichael Ellerman #include <asm/machdep.h> 50d9953105SMichael Ellerman #include <asm/rtas.h> 51dcad47fcSDavid Gibson #include <asm/udbg.h> 528c4f1f29SMichael Ellerman #include <asm/firmware.h> 53d9953105SMichael Ellerman 54c902be71SArnd Bergmann #include "ras.h" 55c902be71SArnd Bergmann 56d9953105SMichael Ellerman static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; 57d9953105SMichael Ellerman static DEFINE_SPINLOCK(ras_log_buf_lock); 58d9953105SMichael Ellerman 598c4f1f29SMichael Ellerman char mce_data_buf[RTAS_ERROR_LOG_MAX]; 60d9953105SMichael Ellerman 61d9953105SMichael Ellerman static int ras_get_sensor_state_token; 62d9953105SMichael Ellerman static int ras_check_exception_token; 63d9953105SMichael Ellerman 64d9953105SMichael Ellerman #define EPOW_SENSOR_TOKEN 9 65d9953105SMichael Ellerman #define EPOW_SENSOR_INDEX 0 66d9953105SMichael Ellerman #define RAS_VECTOR_OFFSET 0x500 67d9953105SMichael Ellerman 68d9953105SMichael Ellerman static irqreturn_t ras_epow_interrupt(int irq, void *dev_id, 69d9953105SMichael Ellerman struct pt_regs * regs); 70d9953105SMichael Ellerman static irqreturn_t ras_error_interrupt(int irq, void *dev_id, 71d9953105SMichael Ellerman struct pt_regs * regs); 72d9953105SMichael Ellerman 73d9953105SMichael Ellerman /* #define DEBUG */ 74d9953105SMichael Ellerman 75*0ebfff14SBenjamin Herrenschmidt 76*0ebfff14SBenjamin Herrenschmidt static void request_ras_irqs(struct device_node *np, 77d9953105SMichael Ellerman irqreturn_t (*handler)(int, void *, struct pt_regs *), 78d9953105SMichael Ellerman const char *name) 79d9953105SMichael Ellerman { 80*0ebfff14SBenjamin Herrenschmidt int i, index, count = 0; 81*0ebfff14SBenjamin Herrenschmidt struct of_irq oirq; 82*0ebfff14SBenjamin Herrenschmidt u32 *opicprop; 83*0ebfff14SBenjamin Herrenschmidt unsigned int opicplen; 84*0ebfff14SBenjamin Herrenschmidt unsigned int virqs[16]; 85d9953105SMichael Ellerman 86*0ebfff14SBenjamin Herrenschmidt /* Check for obsolete "open-pic-interrupt" property. If present, then 87*0ebfff14SBenjamin Herrenschmidt * map those interrupts using the default interrupt host and default 88*0ebfff14SBenjamin Herrenschmidt * trigger 89*0ebfff14SBenjamin Herrenschmidt */ 90*0ebfff14SBenjamin Herrenschmidt opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen); 91*0ebfff14SBenjamin Herrenschmidt if (opicprop) { 92*0ebfff14SBenjamin Herrenschmidt opicplen /= sizeof(u32); 93*0ebfff14SBenjamin Herrenschmidt for (i = 0; i < opicplen; i++) { 94*0ebfff14SBenjamin Herrenschmidt if (count > 15) 95*0ebfff14SBenjamin Herrenschmidt break; 96*0ebfff14SBenjamin Herrenschmidt virqs[count] = irq_create_mapping(NULL, *(opicprop++), 97*0ebfff14SBenjamin Herrenschmidt IRQ_TYPE_NONE); 98*0ebfff14SBenjamin Herrenschmidt if (virqs[count] == NO_IRQ) 99d9953105SMichael Ellerman printk(KERN_ERR "Unable to allocate interrupt " 100d9953105SMichael Ellerman "number for %s\n", np->full_name); 101*0ebfff14SBenjamin Herrenschmidt else 102*0ebfff14SBenjamin Herrenschmidt count++; 103*0ebfff14SBenjamin Herrenschmidt 104d9953105SMichael Ellerman } 105*0ebfff14SBenjamin Herrenschmidt } 106*0ebfff14SBenjamin Herrenschmidt /* Else use normal interrupt tree parsing */ 107*0ebfff14SBenjamin Herrenschmidt else { 108*0ebfff14SBenjamin Herrenschmidt /* First try to do a proper OF tree parsing */ 109*0ebfff14SBenjamin Herrenschmidt for (index = 0; of_irq_map_one(np, index, &oirq) == 0; 110*0ebfff14SBenjamin Herrenschmidt index++) { 111*0ebfff14SBenjamin Herrenschmidt if (count > 15) 112*0ebfff14SBenjamin Herrenschmidt break; 113*0ebfff14SBenjamin Herrenschmidt virqs[count] = irq_create_of_mapping(oirq.controller, 114*0ebfff14SBenjamin Herrenschmidt oirq.specifier, 115*0ebfff14SBenjamin Herrenschmidt oirq.size); 116*0ebfff14SBenjamin Herrenschmidt if (virqs[count] == NO_IRQ) 117*0ebfff14SBenjamin Herrenschmidt printk(KERN_ERR "Unable to allocate interrupt " 118*0ebfff14SBenjamin Herrenschmidt "number for %s\n", np->full_name); 119*0ebfff14SBenjamin Herrenschmidt else 120*0ebfff14SBenjamin Herrenschmidt count++; 121*0ebfff14SBenjamin Herrenschmidt } 122*0ebfff14SBenjamin Herrenschmidt } 123*0ebfff14SBenjamin Herrenschmidt 124*0ebfff14SBenjamin Herrenschmidt /* Now request them */ 125*0ebfff14SBenjamin Herrenschmidt for (i = 0; i < count; i++) { 126*0ebfff14SBenjamin Herrenschmidt if (request_irq(virqs[i], handler, 0, name, NULL)) { 127d9953105SMichael Ellerman printk(KERN_ERR "Unable to request interrupt %d for " 128*0ebfff14SBenjamin Herrenschmidt "%s\n", virqs[i], np->full_name); 129d9953105SMichael Ellerman return; 130d9953105SMichael Ellerman } 131d9953105SMichael Ellerman } 132d9953105SMichael Ellerman } 133d9953105SMichael Ellerman 134d9953105SMichael Ellerman /* 135d9953105SMichael Ellerman * Initialize handlers for the set of interrupts caused by hardware errors 136d9953105SMichael Ellerman * and power system events. 137d9953105SMichael Ellerman */ 138d9953105SMichael Ellerman static int __init init_ras_IRQ(void) 139d9953105SMichael Ellerman { 140d9953105SMichael Ellerman struct device_node *np; 141d9953105SMichael Ellerman 142d9953105SMichael Ellerman ras_get_sensor_state_token = rtas_token("get-sensor-state"); 143d9953105SMichael Ellerman ras_check_exception_token = rtas_token("check-exception"); 144d9953105SMichael Ellerman 145d9953105SMichael Ellerman /* Internal Errors */ 146d9953105SMichael Ellerman np = of_find_node_by_path("/event-sources/internal-errors"); 147d9953105SMichael Ellerman if (np != NULL) { 148*0ebfff14SBenjamin Herrenschmidt request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); 149d9953105SMichael Ellerman of_node_put(np); 150d9953105SMichael Ellerman } 151d9953105SMichael Ellerman 152d9953105SMichael Ellerman /* EPOW Events */ 153d9953105SMichael Ellerman np = of_find_node_by_path("/event-sources/epow-events"); 154d9953105SMichael Ellerman if (np != NULL) { 155*0ebfff14SBenjamin Herrenschmidt request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); 156d9953105SMichael Ellerman of_node_put(np); 157d9953105SMichael Ellerman } 158d9953105SMichael Ellerman 15969ed3324SAnton Blanchard return 0; 160d9953105SMichael Ellerman } 161d9953105SMichael Ellerman __initcall(init_ras_IRQ); 162d9953105SMichael Ellerman 163d9953105SMichael Ellerman /* 164d9953105SMichael Ellerman * Handle power subsystem events (EPOW). 165d9953105SMichael Ellerman * 166d9953105SMichael Ellerman * Presently we just log the event has occurred. This should be fixed 167d9953105SMichael Ellerman * to examine the type of power failure and take appropriate action where 168d9953105SMichael Ellerman * the time horizon permits something useful to be done. 169d9953105SMichael Ellerman */ 170d9953105SMichael Ellerman static irqreturn_t 171d9953105SMichael Ellerman ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs) 172d9953105SMichael Ellerman { 173d9953105SMichael Ellerman int status = 0xdeadbeef; 174d9953105SMichael Ellerman int state = 0; 175d9953105SMichael Ellerman int critical; 176d9953105SMichael Ellerman 177d9953105SMichael Ellerman status = rtas_call(ras_get_sensor_state_token, 2, 2, &state, 178d9953105SMichael Ellerman EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX); 179d9953105SMichael Ellerman 180d9953105SMichael Ellerman if (state > 3) 181d9953105SMichael Ellerman critical = 1; /* Time Critical */ 182d9953105SMichael Ellerman else 183d9953105SMichael Ellerman critical = 0; 184d9953105SMichael Ellerman 185d9953105SMichael Ellerman spin_lock(&ras_log_buf_lock); 186d9953105SMichael Ellerman 187d9953105SMichael Ellerman status = rtas_call(ras_check_exception_token, 6, 1, NULL, 188d9953105SMichael Ellerman RAS_VECTOR_OFFSET, 189*0ebfff14SBenjamin Herrenschmidt irq_map[irq].hwirq, 190d9953105SMichael Ellerman RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, 191d9953105SMichael Ellerman critical, __pa(&ras_log_buf), 192d9953105SMichael Ellerman rtas_get_error_log_max()); 193d9953105SMichael Ellerman 194d9953105SMichael Ellerman udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n", 195d9953105SMichael Ellerman *((unsigned long *)&ras_log_buf), status, state); 196d9953105SMichael Ellerman printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n", 197d9953105SMichael Ellerman *((unsigned long *)&ras_log_buf), status, state); 198d9953105SMichael Ellerman 199d9953105SMichael Ellerman /* format and print the extended information */ 200d9953105SMichael Ellerman log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0); 201d9953105SMichael Ellerman 202d9953105SMichael Ellerman spin_unlock(&ras_log_buf_lock); 203d9953105SMichael Ellerman return IRQ_HANDLED; 204d9953105SMichael Ellerman } 205d9953105SMichael Ellerman 206d9953105SMichael Ellerman /* 207d9953105SMichael Ellerman * Handle hardware error interrupts. 208d9953105SMichael Ellerman * 209d9953105SMichael Ellerman * RTAS check-exception is called to collect data on the exception. If 210d9953105SMichael Ellerman * the error is deemed recoverable, we log a warning and return. 211d9953105SMichael Ellerman * For nonrecoverable errors, an error is logged and we stop all processing 212d9953105SMichael Ellerman * as quickly as possible in order to prevent propagation of the failure. 213d9953105SMichael Ellerman */ 214d9953105SMichael Ellerman static irqreturn_t 215d9953105SMichael Ellerman ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs) 216d9953105SMichael Ellerman { 217d9953105SMichael Ellerman struct rtas_error_log *rtas_elog; 218d9953105SMichael Ellerman int status = 0xdeadbeef; 219d9953105SMichael Ellerman int fatal; 220d9953105SMichael Ellerman 221d9953105SMichael Ellerman spin_lock(&ras_log_buf_lock); 222d9953105SMichael Ellerman 223d9953105SMichael Ellerman status = rtas_call(ras_check_exception_token, 6, 1, NULL, 224d9953105SMichael Ellerman RAS_VECTOR_OFFSET, 225*0ebfff14SBenjamin Herrenschmidt irq_map[irq].hwirq, 226d9953105SMichael Ellerman RTAS_INTERNAL_ERROR, 1 /*Time Critical */, 227d9953105SMichael Ellerman __pa(&ras_log_buf), 228d9953105SMichael Ellerman rtas_get_error_log_max()); 229d9953105SMichael Ellerman 230d9953105SMichael Ellerman rtas_elog = (struct rtas_error_log *)ras_log_buf; 231d9953105SMichael Ellerman 232d9953105SMichael Ellerman if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC)) 233d9953105SMichael Ellerman fatal = 1; 234d9953105SMichael Ellerman else 235d9953105SMichael Ellerman fatal = 0; 236d9953105SMichael Ellerman 237d9953105SMichael Ellerman /* format and print the extended information */ 238d9953105SMichael Ellerman log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal); 239d9953105SMichael Ellerman 240d9953105SMichael Ellerman if (fatal) { 241d9953105SMichael Ellerman udbg_printf("Fatal HW Error <0x%lx 0x%x>\n", 242d9953105SMichael Ellerman *((unsigned long *)&ras_log_buf), status); 243d9953105SMichael Ellerman printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n", 244d9953105SMichael Ellerman *((unsigned long *)&ras_log_buf), status); 245d9953105SMichael Ellerman 246d9953105SMichael Ellerman #ifndef DEBUG 247d9953105SMichael Ellerman /* Don't actually power off when debugging so we can test 248d9953105SMichael Ellerman * without actually failing while injecting errors. 249d9953105SMichael Ellerman * Error data will not be logged to syslog. 250d9953105SMichael Ellerman */ 251d9953105SMichael Ellerman ppc_md.power_off(); 252d9953105SMichael Ellerman #endif 253d9953105SMichael Ellerman } else { 254d9953105SMichael Ellerman udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n", 255d9953105SMichael Ellerman *((unsigned long *)&ras_log_buf), status); 256d9953105SMichael Ellerman printk(KERN_WARNING 257d9953105SMichael Ellerman "Warning: Recoverable hardware error <0x%lx 0x%x>\n", 258d9953105SMichael Ellerman *((unsigned long *)&ras_log_buf), status); 259d9953105SMichael Ellerman } 260d9953105SMichael Ellerman 261d9953105SMichael Ellerman spin_unlock(&ras_log_buf_lock); 262d9953105SMichael Ellerman return IRQ_HANDLED; 263d9953105SMichael Ellerman } 264d9953105SMichael Ellerman 265d9953105SMichael Ellerman /* Get the error information for errors coming through the 266d9953105SMichael Ellerman * FWNMI vectors. The pt_regs' r3 will be updated to reflect 267d9953105SMichael Ellerman * the actual r3 if possible, and a ptr to the error log entry 268d9953105SMichael Ellerman * will be returned if found. 269d9953105SMichael Ellerman * 270d9953105SMichael Ellerman * The mce_data_buf does not have any locks or protection around it, 271d9953105SMichael Ellerman * if a second machine check comes in, or a system reset is done 272d9953105SMichael Ellerman * before we have logged the error, then we will get corruption in the 273d9953105SMichael Ellerman * error log. This is preferable over holding off on calling 274d9953105SMichael Ellerman * ibm,nmi-interlock which would result in us checkstopping if a 275d9953105SMichael Ellerman * second machine check did come in. 276d9953105SMichael Ellerman */ 277d9953105SMichael Ellerman static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) 278d9953105SMichael Ellerman { 279d9953105SMichael Ellerman unsigned long errdata = regs->gpr[3]; 280d9953105SMichael Ellerman struct rtas_error_log *errhdr = NULL; 281d9953105SMichael Ellerman unsigned long *savep; 282d9953105SMichael Ellerman 283d9953105SMichael Ellerman if ((errdata >= 0x7000 && errdata < 0x7fff0) || 284d9953105SMichael Ellerman (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) { 285d9953105SMichael Ellerman savep = __va(errdata); 286d9953105SMichael Ellerman regs->gpr[3] = savep[0]; /* restore original r3 */ 287d9953105SMichael Ellerman memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX); 288d9953105SMichael Ellerman memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX); 289d9953105SMichael Ellerman errhdr = (struct rtas_error_log *)mce_data_buf; 290d9953105SMichael Ellerman } else { 291d9953105SMichael Ellerman printk("FWNMI: corrupt r3\n"); 292d9953105SMichael Ellerman } 293d9953105SMichael Ellerman return errhdr; 294d9953105SMichael Ellerman } 295d9953105SMichael Ellerman 296d9953105SMichael Ellerman /* Call this when done with the data returned by FWNMI_get_errinfo. 297d9953105SMichael Ellerman * It will release the saved data area for other CPUs in the 298d9953105SMichael Ellerman * partition to receive FWNMI errors. 299d9953105SMichael Ellerman */ 300d9953105SMichael Ellerman static void fwnmi_release_errinfo(void) 301d9953105SMichael Ellerman { 302d9953105SMichael Ellerman int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL); 303d9953105SMichael Ellerman if (ret != 0) 304d9953105SMichael Ellerman printk("FWNMI: nmi-interlock failed: %d\n", ret); 305d9953105SMichael Ellerman } 306d9953105SMichael Ellerman 307c902be71SArnd Bergmann int pSeries_system_reset_exception(struct pt_regs *regs) 308d9953105SMichael Ellerman { 309d9953105SMichael Ellerman if (fwnmi_active) { 310d9953105SMichael Ellerman struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs); 311d9953105SMichael Ellerman if (errhdr) { 312d9953105SMichael Ellerman /* XXX Should look at FWNMI information */ 313d9953105SMichael Ellerman } 314d9953105SMichael Ellerman fwnmi_release_errinfo(); 315d9953105SMichael Ellerman } 316c902be71SArnd Bergmann return 0; /* need to perform reset */ 317d9953105SMichael Ellerman } 318d9953105SMichael Ellerman 319d9953105SMichael Ellerman /* 320d9953105SMichael Ellerman * See if we can recover from a machine check exception. 321d9953105SMichael Ellerman * This is only called on power4 (or above) and only via 322d9953105SMichael Ellerman * the Firmware Non-Maskable Interrupts (fwnmi) handler 323d9953105SMichael Ellerman * which provides the error analysis for us. 324d9953105SMichael Ellerman * 325d9953105SMichael Ellerman * Return 1 if corrected (or delivered a signal). 326d9953105SMichael Ellerman * Return 0 if there is nothing we can do. 327d9953105SMichael Ellerman */ 328d9953105SMichael Ellerman static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err) 329d9953105SMichael Ellerman { 330d9953105SMichael Ellerman int nonfatal = 0; 331d9953105SMichael Ellerman 332d9953105SMichael Ellerman if (err->disposition == RTAS_DISP_FULLY_RECOVERED) { 333d9953105SMichael Ellerman /* Platform corrected itself */ 334d9953105SMichael Ellerman nonfatal = 1; 335d9953105SMichael Ellerman } else if ((regs->msr & MSR_RI) && 336d9953105SMichael Ellerman user_mode(regs) && 337d9953105SMichael Ellerman err->severity == RTAS_SEVERITY_ERROR_SYNC && 338d9953105SMichael Ellerman err->disposition == RTAS_DISP_NOT_RECOVERED && 339d9953105SMichael Ellerman err->target == RTAS_TARGET_MEMORY && 340d9953105SMichael Ellerman err->type == RTAS_TYPE_ECC_UNCORR && 341d9953105SMichael Ellerman !(current->pid == 0 || current->pid == 1)) { 342d9953105SMichael Ellerman /* Kill off a user process with an ECC error */ 343d9953105SMichael Ellerman printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n", 344d9953105SMichael Ellerman current->pid); 345d9953105SMichael Ellerman /* XXX something better for ECC error? */ 346d9953105SMichael Ellerman _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); 347d9953105SMichael Ellerman nonfatal = 1; 348d9953105SMichael Ellerman } 349d9953105SMichael Ellerman 350d9953105SMichael Ellerman log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal); 351d9953105SMichael Ellerman 352d9953105SMichael Ellerman return nonfatal; 353d9953105SMichael Ellerman } 354d9953105SMichael Ellerman 355d9953105SMichael Ellerman /* 356d9953105SMichael Ellerman * Handle a machine check. 357d9953105SMichael Ellerman * 358d9953105SMichael Ellerman * Note that on Power 4 and beyond Firmware Non-Maskable Interrupts (fwnmi) 359d9953105SMichael Ellerman * should be present. If so the handler which called us tells us if the 360d9953105SMichael Ellerman * error was recovered (never true if RI=0). 361d9953105SMichael Ellerman * 362d9953105SMichael Ellerman * On hardware prior to Power 4 these exceptions were asynchronous which 363d9953105SMichael Ellerman * means we can't tell exactly where it occurred and so we can't recover. 364d9953105SMichael Ellerman */ 365d9953105SMichael Ellerman int pSeries_machine_check_exception(struct pt_regs *regs) 366d9953105SMichael Ellerman { 367d9953105SMichael Ellerman struct rtas_error_log *errp; 368d9953105SMichael Ellerman 369d9953105SMichael Ellerman if (fwnmi_active) { 370d9953105SMichael Ellerman errp = fwnmi_get_errinfo(regs); 371d9953105SMichael Ellerman fwnmi_release_errinfo(); 372d9953105SMichael Ellerman if (errp && recover_mce(regs, errp)) 373d9953105SMichael Ellerman return 1; 374d9953105SMichael Ellerman } 375d9953105SMichael Ellerman 376d9953105SMichael Ellerman return 0; 377d9953105SMichael Ellerman } 378