xref: /openbmc/linux/arch/powerpc/kernel/mce.c (revision 71501859)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Machine check exception handling.
4  *
5  * Copyright 2013 IBM Corporation
6  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
7  */
8 
9 #undef DEBUG
10 #define pr_fmt(fmt) "mce: " fmt
11 
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 #include <linux/memblock.h>
21 
22 #include <asm/interrupt.h>
23 #include <asm/machdep.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/asm-prototypes.h>
27 
28 #include "setup.h"
29 
30 static void machine_check_process_queued_event(struct irq_work *work);
31 static void machine_check_ue_irq_work(struct irq_work *work);
32 static void machine_check_ue_event(struct machine_check_event *evt);
33 static void machine_process_ue_event(struct work_struct *work);
34 
35 static struct irq_work mce_event_process_work = {
36         .func = machine_check_process_queued_event,
37 };
38 
39 static struct irq_work mce_ue_event_irq_work = {
40 	.func = machine_check_ue_irq_work,
41 };
42 
43 static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
44 
45 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
46 
47 int mce_register_notifier(struct notifier_block *nb)
48 {
49 	return blocking_notifier_chain_register(&mce_notifier_list, nb);
50 }
51 EXPORT_SYMBOL_GPL(mce_register_notifier);
52 
53 int mce_unregister_notifier(struct notifier_block *nb)
54 {
55 	return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
56 }
57 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
58 
59 static void mce_set_error_info(struct machine_check_event *mce,
60 			       struct mce_error_info *mce_err)
61 {
62 	mce->error_type = mce_err->error_type;
63 	switch (mce_err->error_type) {
64 	case MCE_ERROR_TYPE_UE:
65 		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
66 		break;
67 	case MCE_ERROR_TYPE_SLB:
68 		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
69 		break;
70 	case MCE_ERROR_TYPE_ERAT:
71 		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
72 		break;
73 	case MCE_ERROR_TYPE_TLB:
74 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
75 		break;
76 	case MCE_ERROR_TYPE_USER:
77 		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
78 		break;
79 	case MCE_ERROR_TYPE_RA:
80 		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
81 		break;
82 	case MCE_ERROR_TYPE_LINK:
83 		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
84 		break;
85 	case MCE_ERROR_TYPE_UNKNOWN:
86 	default:
87 		break;
88 	}
89 }
90 
91 /*
92  * Decode and save high level MCE information into per cpu buffer which
93  * is an array of machine_check_event structure.
94  */
95 void save_mce_event(struct pt_regs *regs, long handled,
96 		    struct mce_error_info *mce_err,
97 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
98 {
99 	int index = local_paca->mce_info->mce_nest_count++;
100 	struct machine_check_event *mce;
101 
102 	mce = &local_paca->mce_info->mce_event[index];
103 	/*
104 	 * Return if we don't have enough space to log mce event.
105 	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
106 	 * the check below will stop buffer overrun.
107 	 */
108 	if (index >= MAX_MC_EVT)
109 		return;
110 
111 	/* Populate generic machine check info */
112 	mce->version = MCE_V1;
113 	mce->srr0 = nip;
114 	mce->srr1 = regs->msr;
115 	mce->gpr3 = regs->gpr[3];
116 	mce->in_use = 1;
117 	mce->cpu = get_paca()->paca_index;
118 
119 	/* Mark it recovered if we have handled it and MSR(RI=1). */
120 	if (handled && (regs->msr & MSR_RI))
121 		mce->disposition = MCE_DISPOSITION_RECOVERED;
122 	else
123 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
124 
125 	mce->initiator = mce_err->initiator;
126 	mce->severity = mce_err->severity;
127 	mce->sync_error = mce_err->sync_error;
128 	mce->error_class = mce_err->error_class;
129 
130 	/*
131 	 * Populate the mce error_type and type-specific error_type.
132 	 */
133 	mce_set_error_info(mce, mce_err);
134 	if (mce->error_type == MCE_ERROR_TYPE_UE)
135 		mce->u.ue_error.ignore_event = mce_err->ignore_event;
136 
137 	if (!addr)
138 		return;
139 
140 	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
141 		mce->u.tlb_error.effective_address_provided = true;
142 		mce->u.tlb_error.effective_address = addr;
143 	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
144 		mce->u.slb_error.effective_address_provided = true;
145 		mce->u.slb_error.effective_address = addr;
146 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
147 		mce->u.erat_error.effective_address_provided = true;
148 		mce->u.erat_error.effective_address = addr;
149 	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
150 		mce->u.user_error.effective_address_provided = true;
151 		mce->u.user_error.effective_address = addr;
152 	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
153 		mce->u.ra_error.effective_address_provided = true;
154 		mce->u.ra_error.effective_address = addr;
155 	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
156 		mce->u.link_error.effective_address_provided = true;
157 		mce->u.link_error.effective_address = addr;
158 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
159 		mce->u.ue_error.effective_address_provided = true;
160 		mce->u.ue_error.effective_address = addr;
161 		if (phys_addr != ULONG_MAX) {
162 			mce->u.ue_error.physical_address_provided = true;
163 			mce->u.ue_error.physical_address = phys_addr;
164 			machine_check_ue_event(mce);
165 		}
166 	}
167 	return;
168 }
169 
170 /*
171  * get_mce_event:
172  *	mce	Pointer to machine_check_event structure to be filled.
173  *	release Flag to indicate whether to free the event slot or not.
174  *		0 <= do not release the mce event. Caller will invoke
175  *		     release_mce_event() once event has been consumed.
176  *		1 <= release the slot.
177  *
178  *	return	1 = success
179  *		0 = failure
180  *
181  * get_mce_event() will be called by platform specific machine check
182  * handle routine and in KVM.
183  * When we call get_mce_event(), we are still in interrupt context and
184  * preemption will not be scheduled until ret_from_expect() routine
185  * is called.
186  */
187 int get_mce_event(struct machine_check_event *mce, bool release)
188 {
189 	int index = local_paca->mce_info->mce_nest_count - 1;
190 	struct machine_check_event *mc_evt;
191 	int ret = 0;
192 
193 	/* Sanity check */
194 	if (index < 0)
195 		return ret;
196 
197 	/* Check if we have MCE info to process. */
198 	if (index < MAX_MC_EVT) {
199 		mc_evt = &local_paca->mce_info->mce_event[index];
200 		/* Copy the event structure and release the original */
201 		if (mce)
202 			*mce = *mc_evt;
203 		if (release)
204 			mc_evt->in_use = 0;
205 		ret = 1;
206 	}
207 	/* Decrement the count to free the slot. */
208 	if (release)
209 		local_paca->mce_info->mce_nest_count--;
210 
211 	return ret;
212 }
213 
214 void release_mce_event(void)
215 {
216 	get_mce_event(NULL, true);
217 }
218 
219 static void machine_check_ue_irq_work(struct irq_work *work)
220 {
221 	schedule_work(&mce_ue_event_work);
222 }
223 
224 /*
225  * Queue up the MCE event which then can be handled later.
226  */
227 static void machine_check_ue_event(struct machine_check_event *evt)
228 {
229 	int index;
230 
231 	index = local_paca->mce_info->mce_ue_count++;
232 	/* If queue is full, just return for now. */
233 	if (index >= MAX_MC_EVT) {
234 		local_paca->mce_info->mce_ue_count--;
235 		return;
236 	}
237 	memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
238 	       evt, sizeof(*evt));
239 
240 	/* Queue work to process this event later. */
241 	irq_work_queue(&mce_ue_event_irq_work);
242 }
243 
244 /*
245  * Queue up the MCE event which then can be handled later.
246  */
247 void machine_check_queue_event(void)
248 {
249 	int index;
250 	struct machine_check_event evt;
251 
252 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
253 		return;
254 
255 	index = local_paca->mce_info->mce_queue_count++;
256 	/* If queue is full, just return for now. */
257 	if (index >= MAX_MC_EVT) {
258 		local_paca->mce_info->mce_queue_count--;
259 		return;
260 	}
261 	memcpy(&local_paca->mce_info->mce_event_queue[index],
262 	       &evt, sizeof(evt));
263 
264 	/* Queue irq work to process this event later. */
265 	irq_work_queue(&mce_event_process_work);
266 }
267 
268 void mce_common_process_ue(struct pt_regs *regs,
269 			   struct mce_error_info *mce_err)
270 {
271 	const struct exception_table_entry *entry;
272 
273 	entry = search_kernel_exception_table(regs->nip);
274 	if (entry) {
275 		mce_err->ignore_event = true;
276 		regs->nip = extable_fixup(entry);
277 	}
278 }
279 
280 /*
281  * process pending MCE event from the mce event queue. This function will be
282  * called during syscall exit.
283  */
284 static void machine_process_ue_event(struct work_struct *work)
285 {
286 	int index;
287 	struct machine_check_event *evt;
288 
289 	while (local_paca->mce_info->mce_ue_count > 0) {
290 		index = local_paca->mce_info->mce_ue_count - 1;
291 		evt = &local_paca->mce_info->mce_ue_event_queue[index];
292 		blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
293 #ifdef CONFIG_MEMORY_FAILURE
294 		/*
295 		 * This should probably queued elsewhere, but
296 		 * oh! well
297 		 *
298 		 * Don't report this machine check because the caller has a
299 		 * asked us to ignore the event, it has a fixup handler which
300 		 * will do the appropriate error handling and reporting.
301 		 */
302 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
303 			if (evt->u.ue_error.ignore_event) {
304 				local_paca->mce_info->mce_ue_count--;
305 				continue;
306 			}
307 
308 			if (evt->u.ue_error.physical_address_provided) {
309 				unsigned long pfn;
310 
311 				pfn = evt->u.ue_error.physical_address >>
312 					PAGE_SHIFT;
313 				memory_failure(pfn, 0);
314 			} else
315 				pr_warn("Failed to identify bad address from "
316 					"where the uncorrectable error (UE) "
317 					"was generated\n");
318 		}
319 #endif
320 		local_paca->mce_info->mce_ue_count--;
321 	}
322 }
323 /*
324  * process pending MCE event from the mce event queue. This function will be
325  * called during syscall exit.
326  */
327 static void machine_check_process_queued_event(struct irq_work *work)
328 {
329 	int index;
330 	struct machine_check_event *evt;
331 
332 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
333 
334 	/*
335 	 * For now just print it to console.
336 	 * TODO: log this error event to FSP or nvram.
337 	 */
338 	while (local_paca->mce_info->mce_queue_count > 0) {
339 		index = local_paca->mce_info->mce_queue_count - 1;
340 		evt = &local_paca->mce_info->mce_event_queue[index];
341 
342 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
343 		    evt->u.ue_error.ignore_event) {
344 			local_paca->mce_info->mce_queue_count--;
345 			continue;
346 		}
347 		machine_check_print_event_info(evt, false, false);
348 		local_paca->mce_info->mce_queue_count--;
349 	}
350 }
351 
352 void machine_check_print_event_info(struct machine_check_event *evt,
353 				    bool user_mode, bool in_guest)
354 {
355 	const char *level, *sevstr, *subtype, *err_type, *initiator;
356 	uint64_t ea = 0, pa = 0;
357 	int n = 0;
358 	char dar_str[50];
359 	char pa_str[50];
360 	static const char *mc_ue_types[] = {
361 		"Indeterminate",
362 		"Instruction fetch",
363 		"Page table walk ifetch",
364 		"Load/Store",
365 		"Page table walk Load/Store",
366 	};
367 	static const char *mc_slb_types[] = {
368 		"Indeterminate",
369 		"Parity",
370 		"Multihit",
371 	};
372 	static const char *mc_erat_types[] = {
373 		"Indeterminate",
374 		"Parity",
375 		"Multihit",
376 	};
377 	static const char *mc_tlb_types[] = {
378 		"Indeterminate",
379 		"Parity",
380 		"Multihit",
381 	};
382 	static const char *mc_user_types[] = {
383 		"Indeterminate",
384 		"tlbie(l) invalid",
385 		"scv invalid",
386 	};
387 	static const char *mc_ra_types[] = {
388 		"Indeterminate",
389 		"Instruction fetch (bad)",
390 		"Instruction fetch (foreign)",
391 		"Page table walk ifetch (bad)",
392 		"Page table walk ifetch (foreign)",
393 		"Load (bad)",
394 		"Store (bad)",
395 		"Page table walk Load/Store (bad)",
396 		"Page table walk Load/Store (foreign)",
397 		"Load/Store (foreign)",
398 	};
399 	static const char *mc_link_types[] = {
400 		"Indeterminate",
401 		"Instruction fetch (timeout)",
402 		"Page table walk ifetch (timeout)",
403 		"Load (timeout)",
404 		"Store (timeout)",
405 		"Page table walk Load/Store (timeout)",
406 	};
407 	static const char *mc_error_class[] = {
408 		"Unknown",
409 		"Hardware error",
410 		"Probable Hardware error (some chance of software cause)",
411 		"Software error",
412 		"Probable Software error (some chance of hardware cause)",
413 	};
414 
415 	/* Print things out */
416 	if (evt->version != MCE_V1) {
417 		pr_err("Machine Check Exception, Unknown event version %d !\n",
418 		       evt->version);
419 		return;
420 	}
421 	switch (evt->severity) {
422 	case MCE_SEV_NO_ERROR:
423 		level = KERN_INFO;
424 		sevstr = "Harmless";
425 		break;
426 	case MCE_SEV_WARNING:
427 		level = KERN_WARNING;
428 		sevstr = "Warning";
429 		break;
430 	case MCE_SEV_SEVERE:
431 		level = KERN_ERR;
432 		sevstr = "Severe";
433 		break;
434 	case MCE_SEV_FATAL:
435 	default:
436 		level = KERN_ERR;
437 		sevstr = "Fatal";
438 		break;
439 	}
440 
441 	switch(evt->initiator) {
442 	case MCE_INITIATOR_CPU:
443 		initiator = "CPU";
444 		break;
445 	case MCE_INITIATOR_PCI:
446 		initiator = "PCI";
447 		break;
448 	case MCE_INITIATOR_ISA:
449 		initiator = "ISA";
450 		break;
451 	case MCE_INITIATOR_MEMORY:
452 		initiator = "Memory";
453 		break;
454 	case MCE_INITIATOR_POWERMGM:
455 		initiator = "Power Management";
456 		break;
457 	case MCE_INITIATOR_UNKNOWN:
458 	default:
459 		initiator = "Unknown";
460 		break;
461 	}
462 
463 	switch (evt->error_type) {
464 	case MCE_ERROR_TYPE_UE:
465 		err_type = "UE";
466 		subtype = evt->u.ue_error.ue_error_type <
467 			ARRAY_SIZE(mc_ue_types) ?
468 			mc_ue_types[evt->u.ue_error.ue_error_type]
469 			: "Unknown";
470 		if (evt->u.ue_error.effective_address_provided)
471 			ea = evt->u.ue_error.effective_address;
472 		if (evt->u.ue_error.physical_address_provided)
473 			pa = evt->u.ue_error.physical_address;
474 		break;
475 	case MCE_ERROR_TYPE_SLB:
476 		err_type = "SLB";
477 		subtype = evt->u.slb_error.slb_error_type <
478 			ARRAY_SIZE(mc_slb_types) ?
479 			mc_slb_types[evt->u.slb_error.slb_error_type]
480 			: "Unknown";
481 		if (evt->u.slb_error.effective_address_provided)
482 			ea = evt->u.slb_error.effective_address;
483 		break;
484 	case MCE_ERROR_TYPE_ERAT:
485 		err_type = "ERAT";
486 		subtype = evt->u.erat_error.erat_error_type <
487 			ARRAY_SIZE(mc_erat_types) ?
488 			mc_erat_types[evt->u.erat_error.erat_error_type]
489 			: "Unknown";
490 		if (evt->u.erat_error.effective_address_provided)
491 			ea = evt->u.erat_error.effective_address;
492 		break;
493 	case MCE_ERROR_TYPE_TLB:
494 		err_type = "TLB";
495 		subtype = evt->u.tlb_error.tlb_error_type <
496 			ARRAY_SIZE(mc_tlb_types) ?
497 			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
498 			: "Unknown";
499 		if (evt->u.tlb_error.effective_address_provided)
500 			ea = evt->u.tlb_error.effective_address;
501 		break;
502 	case MCE_ERROR_TYPE_USER:
503 		err_type = "User";
504 		subtype = evt->u.user_error.user_error_type <
505 			ARRAY_SIZE(mc_user_types) ?
506 			mc_user_types[evt->u.user_error.user_error_type]
507 			: "Unknown";
508 		if (evt->u.user_error.effective_address_provided)
509 			ea = evt->u.user_error.effective_address;
510 		break;
511 	case MCE_ERROR_TYPE_RA:
512 		err_type = "Real address";
513 		subtype = evt->u.ra_error.ra_error_type <
514 			ARRAY_SIZE(mc_ra_types) ?
515 			mc_ra_types[evt->u.ra_error.ra_error_type]
516 			: "Unknown";
517 		if (evt->u.ra_error.effective_address_provided)
518 			ea = evt->u.ra_error.effective_address;
519 		break;
520 	case MCE_ERROR_TYPE_LINK:
521 		err_type = "Link";
522 		subtype = evt->u.link_error.link_error_type <
523 			ARRAY_SIZE(mc_link_types) ?
524 			mc_link_types[evt->u.link_error.link_error_type]
525 			: "Unknown";
526 		if (evt->u.link_error.effective_address_provided)
527 			ea = evt->u.link_error.effective_address;
528 		break;
529 	case MCE_ERROR_TYPE_DCACHE:
530 		err_type = "D-Cache";
531 		subtype = "Unknown";
532 		break;
533 	case MCE_ERROR_TYPE_ICACHE:
534 		err_type = "I-Cache";
535 		subtype = "Unknown";
536 		break;
537 	default:
538 	case MCE_ERROR_TYPE_UNKNOWN:
539 		err_type = "Unknown";
540 		subtype = "";
541 		break;
542 	}
543 
544 	dar_str[0] = pa_str[0] = '\0';
545 	if (ea && evt->srr0 != ea) {
546 		/* Load/Store address */
547 		n = sprintf(dar_str, "DAR: %016llx ", ea);
548 		if (pa)
549 			sprintf(dar_str + n, "paddr: %016llx ", pa);
550 	} else if (pa) {
551 		sprintf(pa_str, " paddr: %016llx", pa);
552 	}
553 
554 	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
555 		level, evt->cpu, sevstr, in_guest ? "Guest" : "",
556 		err_type, subtype, dar_str,
557 		evt->disposition == MCE_DISPOSITION_RECOVERED ?
558 		"Recovered" : "Not recovered");
559 
560 	if (in_guest || user_mode) {
561 		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
562 			level, evt->cpu, current->pid, current->comm,
563 			in_guest ? "Guest " : "", evt->srr0, pa_str);
564 	} else {
565 		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
566 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
567 	}
568 
569 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
570 
571 	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
572 		mc_error_class[evt->error_class] : "Unknown";
573 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
574 
575 #ifdef CONFIG_PPC_BOOK3S_64
576 	/* Display faulty slb contents for SLB errors. */
577 	if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
578 		slb_dump_contents(local_paca->mce_faulty_slbs);
579 #endif
580 }
581 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
582 
583 /*
584  * This function is called in real mode. Strictly no printk's please.
585  *
586  * regs->nip and regs->msr contains srr0 and ssr1.
587  */
588 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
589 {
590 	long handled = 0;
591 
592 	hv_nmi_check_nonrecoverable(regs);
593 
594 	/*
595 	 * See if platform is capable of handling machine check.
596 	 */
597 	if (ppc_md.machine_check_early)
598 		handled = ppc_md.machine_check_early(regs);
599 
600 	return handled;
601 }
602 
603 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
604 static enum {
605 	DTRIG_UNKNOWN,
606 	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
607 	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
608 } hmer_debug_trig_function;
609 
610 static int init_debug_trig_function(void)
611 {
612 	int pvr;
613 	struct device_node *cpun;
614 	struct property *prop = NULL;
615 	const char *str;
616 
617 	/* First look in the device tree */
618 	preempt_disable();
619 	cpun = of_get_cpu_node(smp_processor_id(), NULL);
620 	if (cpun) {
621 		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
622 					    prop, str) {
623 			if (strcmp(str, "bit17-vector-ci-load") == 0)
624 				hmer_debug_trig_function = DTRIG_VECTOR_CI;
625 			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
626 				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
627 		}
628 		of_node_put(cpun);
629 	}
630 	preempt_enable();
631 
632 	/* If we found the property, don't look at PVR */
633 	if (prop)
634 		goto out;
635 
636 	pvr = mfspr(SPRN_PVR);
637 	/* Check for POWER9 Nimbus (scale-out) */
638 	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
639 		/* DD2.2 and later */
640 		if ((pvr & 0xfff) >= 0x202)
641 			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
642 		/* DD2.0 and DD2.1 - used for vector CI load emulation */
643 		else if ((pvr & 0xfff) >= 0x200)
644 			hmer_debug_trig_function = DTRIG_VECTOR_CI;
645 	}
646 
647  out:
648 	switch (hmer_debug_trig_function) {
649 	case DTRIG_VECTOR_CI:
650 		pr_debug("HMI debug trigger used for vector CI load\n");
651 		break;
652 	case DTRIG_SUSPEND_ESCAPE:
653 		pr_debug("HMI debug trigger used for TM suspend escape\n");
654 		break;
655 	default:
656 		break;
657 	}
658 	return 0;
659 }
660 __initcall(init_debug_trig_function);
661 
662 /*
663  * Handle HMIs that occur as a result of a debug trigger.
664  * Return values:
665  * -1 means this is not a HMI cause that we know about
666  *  0 means no further handling is required
667  *  1 means further handling is required
668  */
669 long hmi_handle_debugtrig(struct pt_regs *regs)
670 {
671 	unsigned long hmer = mfspr(SPRN_HMER);
672 	long ret = 0;
673 
674 	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
675 	if (!((hmer & HMER_DEBUG_TRIG)
676 	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
677 		return -1;
678 
679 	hmer &= ~HMER_DEBUG_TRIG;
680 	/* HMER is a write-AND register */
681 	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
682 
683 	switch (hmer_debug_trig_function) {
684 	case DTRIG_VECTOR_CI:
685 		/*
686 		 * Now to avoid problems with soft-disable we
687 		 * only do the emulation if we are coming from
688 		 * host user space
689 		 */
690 		if (regs && user_mode(regs))
691 			ret = local_paca->hmi_p9_special_emu = 1;
692 
693 		break;
694 
695 	default:
696 		break;
697 	}
698 
699 	/*
700 	 * See if any other HMI causes remain to be handled
701 	 */
702 	if (hmer & mfspr(SPRN_HMEER))
703 		return -1;
704 
705 	return ret;
706 }
707 
708 /*
709  * Return values:
710  */
711 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
712 {
713 	int ret;
714 
715 	local_paca->hmi_irqs++;
716 
717 	ret = hmi_handle_debugtrig(regs);
718 	if (ret >= 0)
719 		return ret;
720 
721 	wait_for_subcore_guest_exit();
722 
723 	if (ppc_md.hmi_exception_early)
724 		ppc_md.hmi_exception_early(regs);
725 
726 	wait_for_tb_resync();
727 
728 	return 1;
729 }
730 
731 void __init mce_init(void)
732 {
733 	struct mce_info *mce_info;
734 	u64 limit;
735 	int i;
736 
737 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
738 	for_each_possible_cpu(i) {
739 		mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
740 						  __alignof__(*mce_info),
741 						  MEMBLOCK_LOW_LIMIT,
742 						  limit, cpu_to_node(i));
743 		if (!mce_info)
744 			goto err;
745 		paca_ptrs[i]->mce_info = mce_info;
746 	}
747 	return;
748 err:
749 	panic("Failed to allocate memory for MCE event data\n");
750 }
751