xref: /openbmc/linux/arch/powerpc/kernel/mce.c (revision b38269ec)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Machine check exception handling.
4  *
5  * Copyright 2013 IBM Corporation
6  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
7  */
8 
9 #undef DEBUG
10 #define pr_fmt(fmt) "mce: " fmt
11 
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 
23 #include <asm/interrupt.h>
24 #include <asm/machdep.h>
25 #include <asm/mce.h>
26 #include <asm/nmi.h>
27 #include <asm/asm-prototypes.h>
28 
29 #include "setup.h"
30 
31 static void machine_check_process_queued_event(struct irq_work *work);
32 static void machine_check_ue_irq_work(struct irq_work *work);
33 static void machine_check_ue_event(struct machine_check_event *evt);
34 static void machine_process_ue_event(struct work_struct *work);
35 
36 static struct irq_work mce_event_process_work = {
37         .func = machine_check_process_queued_event,
38 };
39 
40 static struct irq_work mce_ue_event_irq_work = {
41 	.func = machine_check_ue_irq_work,
42 };
43 
44 static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
45 
46 static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
47 
48 int mce_register_notifier(struct notifier_block *nb)
49 {
50 	return blocking_notifier_chain_register(&mce_notifier_list, nb);
51 }
52 EXPORT_SYMBOL_GPL(mce_register_notifier);
53 
54 int mce_unregister_notifier(struct notifier_block *nb)
55 {
56 	return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
57 }
58 EXPORT_SYMBOL_GPL(mce_unregister_notifier);
59 
60 static void mce_set_error_info(struct machine_check_event *mce,
61 			       struct mce_error_info *mce_err)
62 {
63 	mce->error_type = mce_err->error_type;
64 	switch (mce_err->error_type) {
65 	case MCE_ERROR_TYPE_UE:
66 		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
67 		break;
68 	case MCE_ERROR_TYPE_SLB:
69 		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
70 		break;
71 	case MCE_ERROR_TYPE_ERAT:
72 		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
73 		break;
74 	case MCE_ERROR_TYPE_TLB:
75 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
76 		break;
77 	case MCE_ERROR_TYPE_USER:
78 		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
79 		break;
80 	case MCE_ERROR_TYPE_RA:
81 		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
82 		break;
83 	case MCE_ERROR_TYPE_LINK:
84 		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
85 		break;
86 	case MCE_ERROR_TYPE_UNKNOWN:
87 	default:
88 		break;
89 	}
90 }
91 
92 /*
93  * Decode and save high level MCE information into per cpu buffer which
94  * is an array of machine_check_event structure.
95  */
96 void save_mce_event(struct pt_regs *regs, long handled,
97 		    struct mce_error_info *mce_err,
98 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
99 {
100 	int index = local_paca->mce_info->mce_nest_count++;
101 	struct machine_check_event *mce;
102 
103 	mce = &local_paca->mce_info->mce_event[index];
104 	/*
105 	 * Return if we don't have enough space to log mce event.
106 	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
107 	 * the check below will stop buffer overrun.
108 	 */
109 	if (index >= MAX_MC_EVT)
110 		return;
111 
112 	/* Populate generic machine check info */
113 	mce->version = MCE_V1;
114 	mce->srr0 = nip;
115 	mce->srr1 = regs->msr;
116 	mce->gpr3 = regs->gpr[3];
117 	mce->in_use = 1;
118 	mce->cpu = get_paca()->paca_index;
119 
120 	/* Mark it recovered if we have handled it and MSR(RI=1). */
121 	if (handled && (regs->msr & MSR_RI))
122 		mce->disposition = MCE_DISPOSITION_RECOVERED;
123 	else
124 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
125 
126 	mce->initiator = mce_err->initiator;
127 	mce->severity = mce_err->severity;
128 	mce->sync_error = mce_err->sync_error;
129 	mce->error_class = mce_err->error_class;
130 
131 	/*
132 	 * Populate the mce error_type and type-specific error_type.
133 	 */
134 	mce_set_error_info(mce, mce_err);
135 	if (mce->error_type == MCE_ERROR_TYPE_UE)
136 		mce->u.ue_error.ignore_event = mce_err->ignore_event;
137 
138 	if (!addr)
139 		return;
140 
141 	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
142 		mce->u.tlb_error.effective_address_provided = true;
143 		mce->u.tlb_error.effective_address = addr;
144 	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
145 		mce->u.slb_error.effective_address_provided = true;
146 		mce->u.slb_error.effective_address = addr;
147 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
148 		mce->u.erat_error.effective_address_provided = true;
149 		mce->u.erat_error.effective_address = addr;
150 	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
151 		mce->u.user_error.effective_address_provided = true;
152 		mce->u.user_error.effective_address = addr;
153 	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
154 		mce->u.ra_error.effective_address_provided = true;
155 		mce->u.ra_error.effective_address = addr;
156 	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
157 		mce->u.link_error.effective_address_provided = true;
158 		mce->u.link_error.effective_address = addr;
159 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
160 		mce->u.ue_error.effective_address_provided = true;
161 		mce->u.ue_error.effective_address = addr;
162 		if (phys_addr != ULONG_MAX) {
163 			mce->u.ue_error.physical_address_provided = true;
164 			mce->u.ue_error.physical_address = phys_addr;
165 			machine_check_ue_event(mce);
166 		}
167 	}
168 	return;
169 }
170 
171 /*
172  * get_mce_event:
173  *	mce	Pointer to machine_check_event structure to be filled.
174  *	release Flag to indicate whether to free the event slot or not.
175  *		0 <= do not release the mce event. Caller will invoke
176  *		     release_mce_event() once event has been consumed.
177  *		1 <= release the slot.
178  *
179  *	return	1 = success
180  *		0 = failure
181  *
182  * get_mce_event() will be called by platform specific machine check
183  * handle routine and in KVM.
184  * When we call get_mce_event(), we are still in interrupt context and
185  * preemption will not be scheduled until ret_from_expect() routine
186  * is called.
187  */
188 int get_mce_event(struct machine_check_event *mce, bool release)
189 {
190 	int index = local_paca->mce_info->mce_nest_count - 1;
191 	struct machine_check_event *mc_evt;
192 	int ret = 0;
193 
194 	/* Sanity check */
195 	if (index < 0)
196 		return ret;
197 
198 	/* Check if we have MCE info to process. */
199 	if (index < MAX_MC_EVT) {
200 		mc_evt = &local_paca->mce_info->mce_event[index];
201 		/* Copy the event structure and release the original */
202 		if (mce)
203 			*mce = *mc_evt;
204 		if (release)
205 			mc_evt->in_use = 0;
206 		ret = 1;
207 	}
208 	/* Decrement the count to free the slot. */
209 	if (release)
210 		local_paca->mce_info->mce_nest_count--;
211 
212 	return ret;
213 }
214 
215 void release_mce_event(void)
216 {
217 	get_mce_event(NULL, true);
218 }
219 
220 static void machine_check_ue_irq_work(struct irq_work *work)
221 {
222 	schedule_work(&mce_ue_event_work);
223 }
224 
225 /*
226  * Queue up the MCE event which then can be handled later.
227  */
228 static void machine_check_ue_event(struct machine_check_event *evt)
229 {
230 	int index;
231 
232 	index = local_paca->mce_info->mce_ue_count++;
233 	/* If queue is full, just return for now. */
234 	if (index >= MAX_MC_EVT) {
235 		local_paca->mce_info->mce_ue_count--;
236 		return;
237 	}
238 	memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
239 	       evt, sizeof(*evt));
240 
241 	/* Queue work to process this event later. */
242 	irq_work_queue(&mce_ue_event_irq_work);
243 }
244 
245 /*
246  * Queue up the MCE event which then can be handled later.
247  */
248 void machine_check_queue_event(void)
249 {
250 	int index;
251 	struct machine_check_event evt;
252 
253 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
254 		return;
255 
256 	index = local_paca->mce_info->mce_queue_count++;
257 	/* If queue is full, just return for now. */
258 	if (index >= MAX_MC_EVT) {
259 		local_paca->mce_info->mce_queue_count--;
260 		return;
261 	}
262 	memcpy(&local_paca->mce_info->mce_event_queue[index],
263 	       &evt, sizeof(evt));
264 
265 	/* Queue irq work to process this event later. */
266 	irq_work_queue(&mce_event_process_work);
267 }
268 
269 void mce_common_process_ue(struct pt_regs *regs,
270 			   struct mce_error_info *mce_err)
271 {
272 	const struct exception_table_entry *entry;
273 
274 	entry = search_kernel_exception_table(regs->nip);
275 	if (entry) {
276 		mce_err->ignore_event = true;
277 		regs_set_return_ip(regs, extable_fixup(entry));
278 	}
279 }
280 
281 /*
282  * process pending MCE event from the mce event queue. This function will be
283  * called during syscall exit.
284  */
285 static void machine_process_ue_event(struct work_struct *work)
286 {
287 	int index;
288 	struct machine_check_event *evt;
289 
290 	while (local_paca->mce_info->mce_ue_count > 0) {
291 		index = local_paca->mce_info->mce_ue_count - 1;
292 		evt = &local_paca->mce_info->mce_ue_event_queue[index];
293 		blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
294 #ifdef CONFIG_MEMORY_FAILURE
295 		/*
296 		 * This should probably queued elsewhere, but
297 		 * oh! well
298 		 *
299 		 * Don't report this machine check because the caller has a
300 		 * asked us to ignore the event, it has a fixup handler which
301 		 * will do the appropriate error handling and reporting.
302 		 */
303 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
304 			if (evt->u.ue_error.ignore_event) {
305 				local_paca->mce_info->mce_ue_count--;
306 				continue;
307 			}
308 
309 			if (evt->u.ue_error.physical_address_provided) {
310 				unsigned long pfn;
311 
312 				pfn = evt->u.ue_error.physical_address >>
313 					PAGE_SHIFT;
314 				memory_failure(pfn, 0);
315 			} else
316 				pr_warn("Failed to identify bad address from "
317 					"where the uncorrectable error (UE) "
318 					"was generated\n");
319 		}
320 #endif
321 		local_paca->mce_info->mce_ue_count--;
322 	}
323 }
324 /*
325  * process pending MCE event from the mce event queue. This function will be
326  * called during syscall exit.
327  */
328 static void machine_check_process_queued_event(struct irq_work *work)
329 {
330 	int index;
331 	struct machine_check_event *evt;
332 
333 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
334 
335 	/*
336 	 * For now just print it to console.
337 	 * TODO: log this error event to FSP or nvram.
338 	 */
339 	while (local_paca->mce_info->mce_queue_count > 0) {
340 		index = local_paca->mce_info->mce_queue_count - 1;
341 		evt = &local_paca->mce_info->mce_event_queue[index];
342 
343 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
344 		    evt->u.ue_error.ignore_event) {
345 			local_paca->mce_info->mce_queue_count--;
346 			continue;
347 		}
348 		machine_check_print_event_info(evt, false, false);
349 		local_paca->mce_info->mce_queue_count--;
350 	}
351 }
352 
353 void machine_check_print_event_info(struct machine_check_event *evt,
354 				    bool user_mode, bool in_guest)
355 {
356 	const char *level, *sevstr, *subtype, *err_type, *initiator;
357 	uint64_t ea = 0, pa = 0;
358 	int n = 0;
359 	char dar_str[50];
360 	char pa_str[50];
361 	static const char *mc_ue_types[] = {
362 		"Indeterminate",
363 		"Instruction fetch",
364 		"Page table walk ifetch",
365 		"Load/Store",
366 		"Page table walk Load/Store",
367 	};
368 	static const char *mc_slb_types[] = {
369 		"Indeterminate",
370 		"Parity",
371 		"Multihit",
372 	};
373 	static const char *mc_erat_types[] = {
374 		"Indeterminate",
375 		"Parity",
376 		"Multihit",
377 	};
378 	static const char *mc_tlb_types[] = {
379 		"Indeterminate",
380 		"Parity",
381 		"Multihit",
382 	};
383 	static const char *mc_user_types[] = {
384 		"Indeterminate",
385 		"tlbie(l) invalid",
386 		"scv invalid",
387 	};
388 	static const char *mc_ra_types[] = {
389 		"Indeterminate",
390 		"Instruction fetch (bad)",
391 		"Instruction fetch (foreign)",
392 		"Page table walk ifetch (bad)",
393 		"Page table walk ifetch (foreign)",
394 		"Load (bad)",
395 		"Store (bad)",
396 		"Page table walk Load/Store (bad)",
397 		"Page table walk Load/Store (foreign)",
398 		"Load/Store (foreign)",
399 	};
400 	static const char *mc_link_types[] = {
401 		"Indeterminate",
402 		"Instruction fetch (timeout)",
403 		"Page table walk ifetch (timeout)",
404 		"Load (timeout)",
405 		"Store (timeout)",
406 		"Page table walk Load/Store (timeout)",
407 	};
408 	static const char *mc_error_class[] = {
409 		"Unknown",
410 		"Hardware error",
411 		"Probable Hardware error (some chance of software cause)",
412 		"Software error",
413 		"Probable Software error (some chance of hardware cause)",
414 	};
415 
416 	/* Print things out */
417 	if (evt->version != MCE_V1) {
418 		pr_err("Machine Check Exception, Unknown event version %d !\n",
419 		       evt->version);
420 		return;
421 	}
422 	switch (evt->severity) {
423 	case MCE_SEV_NO_ERROR:
424 		level = KERN_INFO;
425 		sevstr = "Harmless";
426 		break;
427 	case MCE_SEV_WARNING:
428 		level = KERN_WARNING;
429 		sevstr = "Warning";
430 		break;
431 	case MCE_SEV_SEVERE:
432 		level = KERN_ERR;
433 		sevstr = "Severe";
434 		break;
435 	case MCE_SEV_FATAL:
436 	default:
437 		level = KERN_ERR;
438 		sevstr = "Fatal";
439 		break;
440 	}
441 
442 	switch(evt->initiator) {
443 	case MCE_INITIATOR_CPU:
444 		initiator = "CPU";
445 		break;
446 	case MCE_INITIATOR_PCI:
447 		initiator = "PCI";
448 		break;
449 	case MCE_INITIATOR_ISA:
450 		initiator = "ISA";
451 		break;
452 	case MCE_INITIATOR_MEMORY:
453 		initiator = "Memory";
454 		break;
455 	case MCE_INITIATOR_POWERMGM:
456 		initiator = "Power Management";
457 		break;
458 	case MCE_INITIATOR_UNKNOWN:
459 	default:
460 		initiator = "Unknown";
461 		break;
462 	}
463 
464 	switch (evt->error_type) {
465 	case MCE_ERROR_TYPE_UE:
466 		err_type = "UE";
467 		subtype = evt->u.ue_error.ue_error_type <
468 			ARRAY_SIZE(mc_ue_types) ?
469 			mc_ue_types[evt->u.ue_error.ue_error_type]
470 			: "Unknown";
471 		if (evt->u.ue_error.effective_address_provided)
472 			ea = evt->u.ue_error.effective_address;
473 		if (evt->u.ue_error.physical_address_provided)
474 			pa = evt->u.ue_error.physical_address;
475 		break;
476 	case MCE_ERROR_TYPE_SLB:
477 		err_type = "SLB";
478 		subtype = evt->u.slb_error.slb_error_type <
479 			ARRAY_SIZE(mc_slb_types) ?
480 			mc_slb_types[evt->u.slb_error.slb_error_type]
481 			: "Unknown";
482 		if (evt->u.slb_error.effective_address_provided)
483 			ea = evt->u.slb_error.effective_address;
484 		break;
485 	case MCE_ERROR_TYPE_ERAT:
486 		err_type = "ERAT";
487 		subtype = evt->u.erat_error.erat_error_type <
488 			ARRAY_SIZE(mc_erat_types) ?
489 			mc_erat_types[evt->u.erat_error.erat_error_type]
490 			: "Unknown";
491 		if (evt->u.erat_error.effective_address_provided)
492 			ea = evt->u.erat_error.effective_address;
493 		break;
494 	case MCE_ERROR_TYPE_TLB:
495 		err_type = "TLB";
496 		subtype = evt->u.tlb_error.tlb_error_type <
497 			ARRAY_SIZE(mc_tlb_types) ?
498 			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
499 			: "Unknown";
500 		if (evt->u.tlb_error.effective_address_provided)
501 			ea = evt->u.tlb_error.effective_address;
502 		break;
503 	case MCE_ERROR_TYPE_USER:
504 		err_type = "User";
505 		subtype = evt->u.user_error.user_error_type <
506 			ARRAY_SIZE(mc_user_types) ?
507 			mc_user_types[evt->u.user_error.user_error_type]
508 			: "Unknown";
509 		if (evt->u.user_error.effective_address_provided)
510 			ea = evt->u.user_error.effective_address;
511 		break;
512 	case MCE_ERROR_TYPE_RA:
513 		err_type = "Real address";
514 		subtype = evt->u.ra_error.ra_error_type <
515 			ARRAY_SIZE(mc_ra_types) ?
516 			mc_ra_types[evt->u.ra_error.ra_error_type]
517 			: "Unknown";
518 		if (evt->u.ra_error.effective_address_provided)
519 			ea = evt->u.ra_error.effective_address;
520 		break;
521 	case MCE_ERROR_TYPE_LINK:
522 		err_type = "Link";
523 		subtype = evt->u.link_error.link_error_type <
524 			ARRAY_SIZE(mc_link_types) ?
525 			mc_link_types[evt->u.link_error.link_error_type]
526 			: "Unknown";
527 		if (evt->u.link_error.effective_address_provided)
528 			ea = evt->u.link_error.effective_address;
529 		break;
530 	case MCE_ERROR_TYPE_DCACHE:
531 		err_type = "D-Cache";
532 		subtype = "Unknown";
533 		break;
534 	case MCE_ERROR_TYPE_ICACHE:
535 		err_type = "I-Cache";
536 		subtype = "Unknown";
537 		break;
538 	default:
539 	case MCE_ERROR_TYPE_UNKNOWN:
540 		err_type = "Unknown";
541 		subtype = "";
542 		break;
543 	}
544 
545 	dar_str[0] = pa_str[0] = '\0';
546 	if (ea && evt->srr0 != ea) {
547 		/* Load/Store address */
548 		n = sprintf(dar_str, "DAR: %016llx ", ea);
549 		if (pa)
550 			sprintf(dar_str + n, "paddr: %016llx ", pa);
551 	} else if (pa) {
552 		sprintf(pa_str, " paddr: %016llx", pa);
553 	}
554 
555 	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
556 		level, evt->cpu, sevstr, in_guest ? "Guest" : "",
557 		err_type, subtype, dar_str,
558 		evt->disposition == MCE_DISPOSITION_RECOVERED ?
559 		"Recovered" : "Not recovered");
560 
561 	if (in_guest || user_mode) {
562 		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
563 			level, evt->cpu, current->pid, current->comm,
564 			in_guest ? "Guest " : "", evt->srr0, pa_str);
565 	} else {
566 		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
567 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
568 	}
569 
570 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
571 
572 	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
573 		mc_error_class[evt->error_class] : "Unknown";
574 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
575 
576 #ifdef CONFIG_PPC_BOOK3S_64
577 	/* Display faulty slb contents for SLB errors. */
578 	if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
579 		slb_dump_contents(local_paca->mce_faulty_slbs);
580 #endif
581 }
582 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
583 
584 /*
585  * This function is called in real mode. Strictly no printk's please.
586  *
587  * regs->nip and regs->msr contains srr0 and ssr1.
588  */
589 DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
590 {
591 	long handled = 0;
592 
593 	hv_nmi_check_nonrecoverable(regs);
594 
595 	/*
596 	 * See if platform is capable of handling machine check.
597 	 */
598 	if (ppc_md.machine_check_early)
599 		handled = ppc_md.machine_check_early(regs);
600 
601 	return handled;
602 }
603 
604 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
605 static enum {
606 	DTRIG_UNKNOWN,
607 	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
608 	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
609 } hmer_debug_trig_function;
610 
611 static int init_debug_trig_function(void)
612 {
613 	int pvr;
614 	struct device_node *cpun;
615 	struct property *prop = NULL;
616 	const char *str;
617 
618 	/* First look in the device tree */
619 	preempt_disable();
620 	cpun = of_get_cpu_node(smp_processor_id(), NULL);
621 	if (cpun) {
622 		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
623 					    prop, str) {
624 			if (strcmp(str, "bit17-vector-ci-load") == 0)
625 				hmer_debug_trig_function = DTRIG_VECTOR_CI;
626 			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
627 				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
628 		}
629 		of_node_put(cpun);
630 	}
631 	preempt_enable();
632 
633 	/* If we found the property, don't look at PVR */
634 	if (prop)
635 		goto out;
636 
637 	pvr = mfspr(SPRN_PVR);
638 	/* Check for POWER9 Nimbus (scale-out) */
639 	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
640 		/* DD2.2 and later */
641 		if ((pvr & 0xfff) >= 0x202)
642 			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
643 		/* DD2.0 and DD2.1 - used for vector CI load emulation */
644 		else if ((pvr & 0xfff) >= 0x200)
645 			hmer_debug_trig_function = DTRIG_VECTOR_CI;
646 	}
647 
648  out:
649 	switch (hmer_debug_trig_function) {
650 	case DTRIG_VECTOR_CI:
651 		pr_debug("HMI debug trigger used for vector CI load\n");
652 		break;
653 	case DTRIG_SUSPEND_ESCAPE:
654 		pr_debug("HMI debug trigger used for TM suspend escape\n");
655 		break;
656 	default:
657 		break;
658 	}
659 	return 0;
660 }
661 __initcall(init_debug_trig_function);
662 
663 /*
664  * Handle HMIs that occur as a result of a debug trigger.
665  * Return values:
666  * -1 means this is not a HMI cause that we know about
667  *  0 means no further handling is required
668  *  1 means further handling is required
669  */
670 long hmi_handle_debugtrig(struct pt_regs *regs)
671 {
672 	unsigned long hmer = mfspr(SPRN_HMER);
673 	long ret = 0;
674 
675 	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
676 	if (!((hmer & HMER_DEBUG_TRIG)
677 	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
678 		return -1;
679 
680 	hmer &= ~HMER_DEBUG_TRIG;
681 	/* HMER is a write-AND register */
682 	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
683 
684 	switch (hmer_debug_trig_function) {
685 	case DTRIG_VECTOR_CI:
686 		/*
687 		 * Now to avoid problems with soft-disable we
688 		 * only do the emulation if we are coming from
689 		 * host user space
690 		 */
691 		if (regs && user_mode(regs))
692 			ret = local_paca->hmi_p9_special_emu = 1;
693 
694 		break;
695 
696 	default:
697 		break;
698 	}
699 
700 	/*
701 	 * See if any other HMI causes remain to be handled
702 	 */
703 	if (hmer & mfspr(SPRN_HMEER))
704 		return -1;
705 
706 	return ret;
707 }
708 
709 /*
710  * Return values:
711  */
712 DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
713 {
714 	int ret;
715 
716 	local_paca->hmi_irqs++;
717 
718 	ret = hmi_handle_debugtrig(regs);
719 	if (ret >= 0)
720 		return ret;
721 
722 	wait_for_subcore_guest_exit();
723 
724 	if (ppc_md.hmi_exception_early)
725 		ppc_md.hmi_exception_early(regs);
726 
727 	wait_for_tb_resync();
728 
729 	return 1;
730 }
731 
732 void __init mce_init(void)
733 {
734 	struct mce_info *mce_info;
735 	u64 limit;
736 	int i;
737 
738 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
739 	for_each_possible_cpu(i) {
740 		mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
741 						  __alignof__(*mce_info),
742 						  MEMBLOCK_LOW_LIMIT,
743 						  limit, cpu_to_node(i));
744 		if (!mce_info)
745 			goto err;
746 		paca_ptrs[i]->mce_info = mce_info;
747 	}
748 	return;
749 err:
750 	panic("Failed to allocate memory for MCE event data\n");
751 }
752