xref: /openbmc/linux/arch/powerpc/kernel/mce.c (revision a5907065)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Machine check exception handling.
4  *
5  * Copyright 2013 IBM Corporation
6  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
7  */
8 
9 #undef DEBUG
10 #define pr_fmt(fmt) "mce: " fmt
11 
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 #include <linux/memblock.h>
21 
22 #include <asm/interrupt.h>
23 #include <asm/machdep.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/asm-prototypes.h>
27 
28 #include "setup.h"
29 
30 static void machine_check_process_queued_event(struct irq_work *work);
31 static void machine_check_ue_irq_work(struct irq_work *work);
32 static void machine_check_ue_event(struct machine_check_event *evt);
33 static void machine_process_ue_event(struct work_struct *work);
34 
35 static struct irq_work mce_event_process_work = {
36         .func = machine_check_process_queued_event,
37 };
38 
39 static struct irq_work mce_ue_event_irq_work = {
40 	.func = machine_check_ue_irq_work,
41 };
42 
43 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
44 
45 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
46 
47 int mce_register_notifier(struct notifier_block *nb)
48 {
49 	return blocking_notifier_chain_register(&mce_notifier_list, nb);
50 }
51 EXPORT_SYMBOL_GPL(mce_register_notifier);
52 
53 int mce_unregister_notifier(struct notifier_block *nb)
54 {
55 	return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
56 }
57 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
58 
59 static void mce_set_error_info(struct machine_check_event *mce,
60 			       struct mce_error_info *mce_err)
61 {
62 	mce->error_type = mce_err->error_type;
63 	switch (mce_err->error_type) {
64 	case MCE_ERROR_TYPE_UE:
65 		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
66 		break;
67 	case MCE_ERROR_TYPE_SLB:
68 		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
69 		break;
70 	case MCE_ERROR_TYPE_ERAT:
71 		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
72 		break;
73 	case MCE_ERROR_TYPE_TLB:
74 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
75 		break;
76 	case MCE_ERROR_TYPE_USER:
77 		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
78 		break;
79 	case MCE_ERROR_TYPE_RA:
80 		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
81 		break;
82 	case MCE_ERROR_TYPE_LINK:
83 		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
84 		break;
85 	case MCE_ERROR_TYPE_UNKNOWN:
86 	default:
87 		break;
88 	}
89 }
90 
91 /*
92  * Decode and save high level MCE information into per cpu buffer which
93  * is an array of machine_check_event structure.
94  */
95 void save_mce_event(struct pt_regs *regs, long handled,
96 		    struct mce_error_info *mce_err,
97 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
98 {
99 	int index = local_paca->mce_info->mce_nest_count++;
100 	struct machine_check_event *mce;
101 
102 	mce = &local_paca->mce_info->mce_event[index];
103 	/*
104 	 * Return if we don't have enough space to log mce event.
105 	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
106 	 * the check below will stop buffer overrun.
107 	 */
108 	if (index >= MAX_MC_EVT)
109 		return;
110 
111 	/* Populate generic machine check info */
112 	mce->version = MCE_V1;
113 	mce->srr0 = nip;
114 	mce->srr1 = regs->msr;
115 	mce->gpr3 = regs->gpr[3];
116 	mce->in_use = 1;
117 	mce->cpu = get_paca()->paca_index;
118 
119 	/* Mark it recovered if we have handled it and MSR(RI=1). */
120 	if (handled && (regs->msr & MSR_RI))
121 		mce->disposition = MCE_DISPOSITION_RECOVERED;
122 	else
123 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
124 
125 	mce->initiator = mce_err->initiator;
126 	mce->severity = mce_err->severity;
127 	mce->sync_error = mce_err->sync_error;
128 	mce->error_class = mce_err->error_class;
129 
130 	/*
131 	 * Populate the mce error_type and type-specific error_type.
132 	 */
133 	mce_set_error_info(mce, mce_err);
134 
135 	if (!addr)
136 		return;
137 
138 	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
139 		mce->u.tlb_error.effective_address_provided = true;
140 		mce->u.tlb_error.effective_address = addr;
141 	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
142 		mce->u.slb_error.effective_address_provided = true;
143 		mce->u.slb_error.effective_address = addr;
144 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
145 		mce->u.erat_error.effective_address_provided = true;
146 		mce->u.erat_error.effective_address = addr;
147 	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
148 		mce->u.user_error.effective_address_provided = true;
149 		mce->u.user_error.effective_address = addr;
150 	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
151 		mce->u.ra_error.effective_address_provided = true;
152 		mce->u.ra_error.effective_address = addr;
153 	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
154 		mce->u.link_error.effective_address_provided = true;
155 		mce->u.link_error.effective_address = addr;
156 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
157 		mce->u.ue_error.effective_address_provided = true;
158 		mce->u.ue_error.effective_address = addr;
159 		if (phys_addr != ULONG_MAX) {
160 			mce->u.ue_error.physical_address_provided = true;
161 			mce->u.ue_error.physical_address = phys_addr;
162 			mce->u.ue_error.ignore_event = mce_err->ignore_event;
163 			machine_check_ue_event(mce);
164 		}
165 	}
166 	return;
167 }
168 
169 /*
170  * get_mce_event:
171  *	mce	Pointer to machine_check_event structure to be filled.
172  *	release Flag to indicate whether to free the event slot or not.
173  *		0 <= do not release the mce event. Caller will invoke
174  *		     release_mce_event() once event has been consumed.
175  *		1 <= release the slot.
176  *
177  *	return	1 = success
178  *		0 = failure
179  *
180  * get_mce_event() will be called by platform specific machine check
181  * handle routine and in KVM.
182  * When we call get_mce_event(), we are still in interrupt context and
183  * preemption will not be scheduled until ret_from_expect() routine
184  * is called.
185  */
186 int get_mce_event(struct machine_check_event *mce, bool release)
187 {
188 	int index = local_paca->mce_info->mce_nest_count - 1;
189 	struct machine_check_event *mc_evt;
190 	int ret = 0;
191 
192 	/* Sanity check */
193 	if (index < 0)
194 		return ret;
195 
196 	/* Check if we have MCE info to process. */
197 	if (index < MAX_MC_EVT) {
198 		mc_evt = &local_paca->mce_info->mce_event[index];
199 		/* Copy the event structure and release the original */
200 		if (mce)
201 			*mce = *mc_evt;
202 		if (release)
203 			mc_evt->in_use = 0;
204 		ret = 1;
205 	}
206 	/* Decrement the count to free the slot. */
207 	if (release)
208 		local_paca->mce_info->mce_nest_count--;
209 
210 	return ret;
211 }
212 
213 void release_mce_event(void)
214 {
215 	get_mce_event(NULL, true);
216 }
217 
218 static void machine_check_ue_irq_work(struct irq_work *work)
219 {
220 	schedule_work(&mce_ue_event_work);
221 }
222 
223 /*
224  * Queue up the MCE event which then can be handled later.
225  */
226 static void machine_check_ue_event(struct machine_check_event *evt)
227 {
228 	int index;
229 
230 	index = local_paca->mce_info->mce_ue_count++;
231 	/* If queue is full, just return for now. */
232 	if (index >= MAX_MC_EVT) {
233 		local_paca->mce_info->mce_ue_count--;
234 		return;
235 	}
236 	memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
237 	       evt, sizeof(*evt));
238 
239 	/* Queue work to process this event later. */
240 	irq_work_queue(&mce_ue_event_irq_work);
241 }
242 
243 /*
244  * Queue up the MCE event which then can be handled later.
245  */
246 void machine_check_queue_event(void)
247 {
248 	int index;
249 	struct machine_check_event evt;
250 
251 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
252 		return;
253 
254 	index = local_paca->mce_info->mce_queue_count++;
255 	/* If queue is full, just return for now. */
256 	if (index >= MAX_MC_EVT) {
257 		local_paca->mce_info->mce_queue_count--;
258 		return;
259 	}
260 	memcpy(&local_paca->mce_info->mce_event_queue[index],
261 	       &evt, sizeof(evt));
262 
263 	/* Queue irq work to process this event later. */
264 	irq_work_queue(&mce_event_process_work);
265 }
266 
267 void mce_common_process_ue(struct pt_regs *regs,
268 			   struct mce_error_info *mce_err)
269 {
270 	const struct exception_table_entry *entry;
271 
272 	entry = search_kernel_exception_table(regs->nip);
273 	if (entry) {
274 		mce_err->ignore_event = true;
275 		regs->nip = extable_fixup(entry);
276 	}
277 }
278 
279 /*
280  * process pending MCE event from the mce event queue. This function will be
281  * called during syscall exit.
282  */
283 static void machine_process_ue_event(struct work_struct *work)
284 {
285 	int index;
286 	struct machine_check_event *evt;
287 
288 	while (local_paca->mce_info->mce_ue_count > 0) {
289 		index = local_paca->mce_info->mce_ue_count - 1;
290 		evt = &local_paca->mce_info->mce_ue_event_queue[index];
291 		blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
292 #ifdef CONFIG_MEMORY_FAILURE
293 		/*
294 		 * This should probably queued elsewhere, but
295 		 * oh! well
296 		 *
297 		 * Don't report this machine check because the caller has a
298 		 * asked us to ignore the event, it has a fixup handler which
299 		 * will do the appropriate error handling and reporting.
300 		 */
301 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
302 			if (evt->u.ue_error.ignore_event) {
303 				local_paca->mce_info->mce_ue_count--;
304 				continue;
305 			}
306 
307 			if (evt->u.ue_error.physical_address_provided) {
308 				unsigned long pfn;
309 
310 				pfn = evt->u.ue_error.physical_address >>
311 					PAGE_SHIFT;
312 				memory_failure(pfn, 0);
313 			} else
314 				pr_warn("Failed to identify bad address from "
315 					"where the uncorrectable error (UE) "
316 					"was generated\n");
317 		}
318 #endif
319 		local_paca->mce_info->mce_ue_count--;
320 	}
321 }
322 /*
323  * process pending MCE event from the mce event queue. This function will be
324  * called during syscall exit.
325  */
326 static void machine_check_process_queued_event(struct irq_work *work)
327 {
328 	int index;
329 	struct machine_check_event *evt;
330 
331 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
332 
333 	/*
334 	 * For now just print it to console.
335 	 * TODO: log this error event to FSP or nvram.
336 	 */
337 	while (local_paca->mce_info->mce_queue_count > 0) {
338 		index = local_paca->mce_info->mce_queue_count - 1;
339 		evt = &local_paca->mce_info->mce_event_queue[index];
340 
341 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
342 		    evt->u.ue_error.ignore_event) {
343 			local_paca->mce_info->mce_queue_count--;
344 			continue;
345 		}
346 		machine_check_print_event_info(evt, false, false);
347 		local_paca->mce_info->mce_queue_count--;
348 	}
349 }
350 
351 void machine_check_print_event_info(struct machine_check_event *evt,
352 				    bool user_mode, bool in_guest)
353 {
354 	const char *level, *sevstr, *subtype, *err_type, *initiator;
355 	uint64_t ea = 0, pa = 0;
356 	int n = 0;
357 	char dar_str[50];
358 	char pa_str[50];
359 	static const char *mc_ue_types[] = {
360 		"Indeterminate",
361 		"Instruction fetch",
362 		"Page table walk ifetch",
363 		"Load/Store",
364 		"Page table walk Load/Store",
365 	};
366 	static const char *mc_slb_types[] = {
367 		"Indeterminate",
368 		"Parity",
369 		"Multihit",
370 	};
371 	static const char *mc_erat_types[] = {
372 		"Indeterminate",
373 		"Parity",
374 		"Multihit",
375 	};
376 	static const char *mc_tlb_types[] = {
377 		"Indeterminate",
378 		"Parity",
379 		"Multihit",
380 	};
381 	static const char *mc_user_types[] = {
382 		"Indeterminate",
383 		"tlbie(l) invalid",
384 		"scv invalid",
385 	};
386 	static const char *mc_ra_types[] = {
387 		"Indeterminate",
388 		"Instruction fetch (bad)",
389 		"Instruction fetch (foreign)",
390 		"Page table walk ifetch (bad)",
391 		"Page table walk ifetch (foreign)",
392 		"Load (bad)",
393 		"Store (bad)",
394 		"Page table walk Load/Store (bad)",
395 		"Page table walk Load/Store (foreign)",
396 		"Load/Store (foreign)",
397 	};
398 	static const char *mc_link_types[] = {
399 		"Indeterminate",
400 		"Instruction fetch (timeout)",
401 		"Page table walk ifetch (timeout)",
402 		"Load (timeout)",
403 		"Store (timeout)",
404 		"Page table walk Load/Store (timeout)",
405 	};
406 	static const char *mc_error_class[] = {
407 		"Unknown",
408 		"Hardware error",
409 		"Probable Hardware error (some chance of software cause)",
410 		"Software error",
411 		"Probable Software error (some chance of hardware cause)",
412 	};
413 
414 	/* Print things out */
415 	if (evt->version != MCE_V1) {
416 		pr_err("Machine Check Exception, Unknown event version %d !\n",
417 		       evt->version);
418 		return;
419 	}
420 	switch (evt->severity) {
421 	case MCE_SEV_NO_ERROR:
422 		level = KERN_INFO;
423 		sevstr = "Harmless";
424 		break;
425 	case MCE_SEV_WARNING:
426 		level = KERN_WARNING;
427 		sevstr = "Warning";
428 		break;
429 	case MCE_SEV_SEVERE:
430 		level = KERN_ERR;
431 		sevstr = "Severe";
432 		break;
433 	case MCE_SEV_FATAL:
434 	default:
435 		level = KERN_ERR;
436 		sevstr = "Fatal";
437 		break;
438 	}
439 
440 	switch(evt->initiator) {
441 	case MCE_INITIATOR_CPU:
442 		initiator = "CPU";
443 		break;
444 	case MCE_INITIATOR_PCI:
445 		initiator = "PCI";
446 		break;
447 	case MCE_INITIATOR_ISA:
448 		initiator = "ISA";
449 		break;
450 	case MCE_INITIATOR_MEMORY:
451 		initiator = "Memory";
452 		break;
453 	case MCE_INITIATOR_POWERMGM:
454 		initiator = "Power Management";
455 		break;
456 	case MCE_INITIATOR_UNKNOWN:
457 	default:
458 		initiator = "Unknown";
459 		break;
460 	}
461 
462 	switch (evt->error_type) {
463 	case MCE_ERROR_TYPE_UE:
464 		err_type = "UE";
465 		subtype = evt->u.ue_error.ue_error_type <
466 			ARRAY_SIZE(mc_ue_types) ?
467 			mc_ue_types[evt->u.ue_error.ue_error_type]
468 			: "Unknown";
469 		if (evt->u.ue_error.effective_address_provided)
470 			ea = evt->u.ue_error.effective_address;
471 		if (evt->u.ue_error.physical_address_provided)
472 			pa = evt->u.ue_error.physical_address;
473 		break;
474 	case MCE_ERROR_TYPE_SLB:
475 		err_type = "SLB";
476 		subtype = evt->u.slb_error.slb_error_type <
477 			ARRAY_SIZE(mc_slb_types) ?
478 			mc_slb_types[evt->u.slb_error.slb_error_type]
479 			: "Unknown";
480 		if (evt->u.slb_error.effective_address_provided)
481 			ea = evt->u.slb_error.effective_address;
482 		break;
483 	case MCE_ERROR_TYPE_ERAT:
484 		err_type = "ERAT";
485 		subtype = evt->u.erat_error.erat_error_type <
486 			ARRAY_SIZE(mc_erat_types) ?
487 			mc_erat_types[evt->u.erat_error.erat_error_type]
488 			: "Unknown";
489 		if (evt->u.erat_error.effective_address_provided)
490 			ea = evt->u.erat_error.effective_address;
491 		break;
492 	case MCE_ERROR_TYPE_TLB:
493 		err_type = "TLB";
494 		subtype = evt->u.tlb_error.tlb_error_type <
495 			ARRAY_SIZE(mc_tlb_types) ?
496 			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
497 			: "Unknown";
498 		if (evt->u.tlb_error.effective_address_provided)
499 			ea = evt->u.tlb_error.effective_address;
500 		break;
501 	case MCE_ERROR_TYPE_USER:
502 		err_type = "User";
503 		subtype = evt->u.user_error.user_error_type <
504 			ARRAY_SIZE(mc_user_types) ?
505 			mc_user_types[evt->u.user_error.user_error_type]
506 			: "Unknown";
507 		if (evt->u.user_error.effective_address_provided)
508 			ea = evt->u.user_error.effective_address;
509 		break;
510 	case MCE_ERROR_TYPE_RA:
511 		err_type = "Real address";
512 		subtype = evt->u.ra_error.ra_error_type <
513 			ARRAY_SIZE(mc_ra_types) ?
514 			mc_ra_types[evt->u.ra_error.ra_error_type]
515 			: "Unknown";
516 		if (evt->u.ra_error.effective_address_provided)
517 			ea = evt->u.ra_error.effective_address;
518 		break;
519 	case MCE_ERROR_TYPE_LINK:
520 		err_type = "Link";
521 		subtype = evt->u.link_error.link_error_type <
522 			ARRAY_SIZE(mc_link_types) ?
523 			mc_link_types[evt->u.link_error.link_error_type]
524 			: "Unknown";
525 		if (evt->u.link_error.effective_address_provided)
526 			ea = evt->u.link_error.effective_address;
527 		break;
528 	case MCE_ERROR_TYPE_DCACHE:
529 		err_type = "D-Cache";
530 		subtype = "Unknown";
531 		break;
532 	case MCE_ERROR_TYPE_ICACHE:
533 		err_type = "I-Cache";
534 		subtype = "Unknown";
535 		break;
536 	default:
537 	case MCE_ERROR_TYPE_UNKNOWN:
538 		err_type = "Unknown";
539 		subtype = "";
540 		break;
541 	}
542 
543 	dar_str[0] = pa_str[0] = '\0';
544 	if (ea && evt->srr0 != ea) {
545 		/* Load/Store address */
546 		n = sprintf(dar_str, "DAR: %016llx ", ea);
547 		if (pa)
548 			sprintf(dar_str + n, "paddr: %016llx ", pa);
549 	} else if (pa) {
550 		sprintf(pa_str, " paddr: %016llx", pa);
551 	}
552 
553 	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
554 		level, evt->cpu, sevstr, in_guest ? "Guest" : "",
555 		err_type, subtype, dar_str,
556 		evt->disposition == MCE_DISPOSITION_RECOVERED ?
557 		"Recovered" : "Not recovered");
558 
559 	if (in_guest || user_mode) {
560 		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
561 			level, evt->cpu, current->pid, current->comm,
562 			in_guest ? "Guest " : "", evt->srr0, pa_str);
563 	} else {
564 		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
565 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
566 	}
567 
568 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
569 
570 	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
571 		mc_error_class[evt->error_class] : "Unknown";
572 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
573 
574 #ifdef CONFIG_PPC_BOOK3S_64
575 	/* Display faulty slb contents for SLB errors. */
576 	if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
577 		slb_dump_contents(local_paca->mce_faulty_slbs);
578 #endif
579 }
580 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
581 
582 /*
583  * This function is called in real mode. Strictly no printk's please.
584  *
585  * regs->nip and regs->msr contains srr0 and ssr1.
586  */
587 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
588 {
589 	long handled = 0;
590 
591 	hv_nmi_check_nonrecoverable(regs);
592 
593 	/*
594 	 * See if platform is capable of handling machine check.
595 	 */
596 	if (ppc_md.machine_check_early)
597 		handled = ppc_md.machine_check_early(regs);
598 
599 	return handled;
600 }
601 
602 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
603 static enum {
604 	DTRIG_UNKNOWN,
605 	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
606 	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
607 } hmer_debug_trig_function;
608 
609 static int init_debug_trig_function(void)
610 {
611 	int pvr;
612 	struct device_node *cpun;
613 	struct property *prop = NULL;
614 	const char *str;
615 
616 	/* First look in the device tree */
617 	preempt_disable();
618 	cpun = of_get_cpu_node(smp_processor_id(), NULL);
619 	if (cpun) {
620 		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
621 					    prop, str) {
622 			if (strcmp(str, "bit17-vector-ci-load") == 0)
623 				hmer_debug_trig_function = DTRIG_VECTOR_CI;
624 			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
625 				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
626 		}
627 		of_node_put(cpun);
628 	}
629 	preempt_enable();
630 
631 	/* If we found the property, don't look at PVR */
632 	if (prop)
633 		goto out;
634 
635 	pvr = mfspr(SPRN_PVR);
636 	/* Check for POWER9 Nimbus (scale-out) */
637 	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
638 		/* DD2.2 and later */
639 		if ((pvr & 0xfff) >= 0x202)
640 			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
641 		/* DD2.0 and DD2.1 - used for vector CI load emulation */
642 		else if ((pvr & 0xfff) >= 0x200)
643 			hmer_debug_trig_function = DTRIG_VECTOR_CI;
644 	}
645 
646  out:
647 	switch (hmer_debug_trig_function) {
648 	case DTRIG_VECTOR_CI:
649 		pr_debug("HMI debug trigger used for vector CI load\n");
650 		break;
651 	case DTRIG_SUSPEND_ESCAPE:
652 		pr_debug("HMI debug trigger used for TM suspend escape\n");
653 		break;
654 	default:
655 		break;
656 	}
657 	return 0;
658 }
659 __initcall(init_debug_trig_function);
660 
661 /*
662  * Handle HMIs that occur as a result of a debug trigger.
663  * Return values:
664  * -1 means this is not a HMI cause that we know about
665  *  0 means no further handling is required
666  *  1 means further handling is required
667  */
668 long hmi_handle_debugtrig(struct pt_regs *regs)
669 {
670 	unsigned long hmer = mfspr(SPRN_HMER);
671 	long ret = 0;
672 
673 	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
674 	if (!((hmer & HMER_DEBUG_TRIG)
675 	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
676 		return -1;
677 
678 	hmer &= ~HMER_DEBUG_TRIG;
679 	/* HMER is a write-AND register */
680 	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
681 
682 	switch (hmer_debug_trig_function) {
683 	case DTRIG_VECTOR_CI:
684 		/*
685 		 * Now to avoid problems with soft-disable we
686 		 * only do the emulation if we are coming from
687 		 * host user space
688 		 */
689 		if (regs && user_mode(regs))
690 			ret = local_paca->hmi_p9_special_emu = 1;
691 
692 		break;
693 
694 	default:
695 		break;
696 	}
697 
698 	/*
699 	 * See if any other HMI causes remain to be handled
700 	 */
701 	if (hmer & mfspr(SPRN_HMEER))
702 		return -1;
703 
704 	return ret;
705 }
706 
707 /*
708  * Return values:
709  */
710 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
711 {
712 	int ret;
713 
714 	local_paca->hmi_irqs++;
715 
716 	ret = hmi_handle_debugtrig(regs);
717 	if (ret >= 0)
718 		return ret;
719 
720 	wait_for_subcore_guest_exit();
721 
722 	if (ppc_md.hmi_exception_early)
723 		ppc_md.hmi_exception_early(regs);
724 
725 	wait_for_tb_resync();
726 
727 	return 1;
728 }
729 
730 void __init mce_init(void)
731 {
732 	struct mce_info *mce_info;
733 	u64 limit;
734 	int i;
735 
736 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
737 	for_each_possible_cpu(i) {
738 		mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
739 						  __alignof__(*mce_info),
740 						  MEMBLOCK_LOW_LIMIT,
741 						  limit, cpu_to_node(i));
742 		if (!mce_info)
743 			goto err;
744 		paca_ptrs[i]->mce_info = mce_info;
745 	}
746 	return;
747 err:
748 	panic("Failed to allocate memory for MCE event data\n");
749 }
750