xref: /openbmc/linux/arch/powerpc/kernel/mce.c (revision 0cd08b10)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Machine check exception handling.
4  *
5  * Copyright 2013 IBM Corporation
6  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
7  */
8 
9 #undef DEBUG
10 #define pr_fmt(fmt) "mce: " fmt
11 
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 #include <linux/ftrace.h>
20 
21 #include <asm/machdep.h>
22 #include <asm/mce.h>
23 #include <asm/nmi.h>
24 
25 static DEFINE_PER_CPU(int, mce_nest_count);
26 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
27 
28 /* Queue for delayed MCE events. */
29 static DEFINE_PER_CPU(int, mce_queue_count);
30 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
31 
32 /* Queue for delayed MCE UE events. */
33 static DEFINE_PER_CPU(int, mce_ue_count);
34 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
35 					mce_ue_event_queue);
36 
37 static void machine_check_process_queued_event(struct irq_work *work);
38 static void machine_check_ue_irq_work(struct irq_work *work);
39 static void machine_check_ue_event(struct machine_check_event *evt);
40 static void machine_process_ue_event(struct work_struct *work);
41 
42 static struct irq_work mce_event_process_work = {
43         .func = machine_check_process_queued_event,
44 };
45 
46 static struct irq_work mce_ue_event_irq_work = {
47 	.func = machine_check_ue_irq_work,
48 };
49 
50 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
51 
52 static void mce_set_error_info(struct machine_check_event *mce,
53 			       struct mce_error_info *mce_err)
54 {
55 	mce->error_type = mce_err->error_type;
56 	switch (mce_err->error_type) {
57 	case MCE_ERROR_TYPE_UE:
58 		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
59 		break;
60 	case MCE_ERROR_TYPE_SLB:
61 		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
62 		break;
63 	case MCE_ERROR_TYPE_ERAT:
64 		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
65 		break;
66 	case MCE_ERROR_TYPE_TLB:
67 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
68 		break;
69 	case MCE_ERROR_TYPE_USER:
70 		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
71 		break;
72 	case MCE_ERROR_TYPE_RA:
73 		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
74 		break;
75 	case MCE_ERROR_TYPE_LINK:
76 		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
77 		break;
78 	case MCE_ERROR_TYPE_UNKNOWN:
79 	default:
80 		break;
81 	}
82 }
83 
84 /*
85  * Decode and save high level MCE information into per cpu buffer which
86  * is an array of machine_check_event structure.
87  */
88 void save_mce_event(struct pt_regs *regs, long handled,
89 		    struct mce_error_info *mce_err,
90 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
91 {
92 	int index = __this_cpu_inc_return(mce_nest_count) - 1;
93 	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
94 
95 	/*
96 	 * Return if we don't have enough space to log mce event.
97 	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
98 	 * the check below will stop buffer overrun.
99 	 */
100 	if (index >= MAX_MC_EVT)
101 		return;
102 
103 	/* Populate generic machine check info */
104 	mce->version = MCE_V1;
105 	mce->srr0 = nip;
106 	mce->srr1 = regs->msr;
107 	mce->gpr3 = regs->gpr[3];
108 	mce->in_use = 1;
109 	mce->cpu = get_paca()->paca_index;
110 
111 	/* Mark it recovered if we have handled it and MSR(RI=1). */
112 	if (handled && (regs->msr & MSR_RI))
113 		mce->disposition = MCE_DISPOSITION_RECOVERED;
114 	else
115 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
116 
117 	mce->initiator = mce_err->initiator;
118 	mce->severity = mce_err->severity;
119 	mce->sync_error = mce_err->sync_error;
120 	mce->error_class = mce_err->error_class;
121 
122 	/*
123 	 * Populate the mce error_type and type-specific error_type.
124 	 */
125 	mce_set_error_info(mce, mce_err);
126 
127 	if (!addr)
128 		return;
129 
130 	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
131 		mce->u.tlb_error.effective_address_provided = true;
132 		mce->u.tlb_error.effective_address = addr;
133 	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
134 		mce->u.slb_error.effective_address_provided = true;
135 		mce->u.slb_error.effective_address = addr;
136 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
137 		mce->u.erat_error.effective_address_provided = true;
138 		mce->u.erat_error.effective_address = addr;
139 	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
140 		mce->u.user_error.effective_address_provided = true;
141 		mce->u.user_error.effective_address = addr;
142 	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
143 		mce->u.ra_error.effective_address_provided = true;
144 		mce->u.ra_error.effective_address = addr;
145 	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
146 		mce->u.link_error.effective_address_provided = true;
147 		mce->u.link_error.effective_address = addr;
148 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
149 		mce->u.ue_error.effective_address_provided = true;
150 		mce->u.ue_error.effective_address = addr;
151 		if (phys_addr != ULONG_MAX) {
152 			mce->u.ue_error.physical_address_provided = true;
153 			mce->u.ue_error.physical_address = phys_addr;
154 			mce->u.ue_error.ignore_event = mce_err->ignore_event;
155 			machine_check_ue_event(mce);
156 		}
157 	}
158 	return;
159 }
160 
161 /*
162  * get_mce_event:
163  *	mce	Pointer to machine_check_event structure to be filled.
164  *	release Flag to indicate whether to free the event slot or not.
165  *		0 <= do not release the mce event. Caller will invoke
166  *		     release_mce_event() once event has been consumed.
167  *		1 <= release the slot.
168  *
169  *	return	1 = success
170  *		0 = failure
171  *
172  * get_mce_event() will be called by platform specific machine check
173  * handle routine and in KVM.
174  * When we call get_mce_event(), we are still in interrupt context and
175  * preemption will not be scheduled until ret_from_expect() routine
176  * is called.
177  */
178 int get_mce_event(struct machine_check_event *mce, bool release)
179 {
180 	int index = __this_cpu_read(mce_nest_count) - 1;
181 	struct machine_check_event *mc_evt;
182 	int ret = 0;
183 
184 	/* Sanity check */
185 	if (index < 0)
186 		return ret;
187 
188 	/* Check if we have MCE info to process. */
189 	if (index < MAX_MC_EVT) {
190 		mc_evt = this_cpu_ptr(&mce_event[index]);
191 		/* Copy the event structure and release the original */
192 		if (mce)
193 			*mce = *mc_evt;
194 		if (release)
195 			mc_evt->in_use = 0;
196 		ret = 1;
197 	}
198 	/* Decrement the count to free the slot. */
199 	if (release)
200 		__this_cpu_dec(mce_nest_count);
201 
202 	return ret;
203 }
204 
205 void release_mce_event(void)
206 {
207 	get_mce_event(NULL, true);
208 }
209 
210 static void machine_check_ue_irq_work(struct irq_work *work)
211 {
212 	schedule_work(&mce_ue_event_work);
213 }
214 
215 /*
216  * Queue up the MCE event which then can be handled later.
217  */
218 static void machine_check_ue_event(struct machine_check_event *evt)
219 {
220 	int index;
221 
222 	index = __this_cpu_inc_return(mce_ue_count) - 1;
223 	/* If queue is full, just return for now. */
224 	if (index >= MAX_MC_EVT) {
225 		__this_cpu_dec(mce_ue_count);
226 		return;
227 	}
228 	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
229 
230 	/* Queue work to process this event later. */
231 	irq_work_queue(&mce_ue_event_irq_work);
232 }
233 
234 /*
235  * Queue up the MCE event which then can be handled later.
236  */
237 void machine_check_queue_event(void)
238 {
239 	int index;
240 	struct machine_check_event evt;
241 
242 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
243 		return;
244 
245 	index = __this_cpu_inc_return(mce_queue_count) - 1;
246 	/* If queue is full, just return for now. */
247 	if (index >= MAX_MC_EVT) {
248 		__this_cpu_dec(mce_queue_count);
249 		return;
250 	}
251 	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
252 
253 	/* Queue irq work to process this event later. */
254 	irq_work_queue(&mce_event_process_work);
255 }
256 
257 void mce_common_process_ue(struct pt_regs *regs,
258 			   struct mce_error_info *mce_err)
259 {
260 	const struct exception_table_entry *entry;
261 
262 	entry = search_kernel_exception_table(regs->nip);
263 	if (entry) {
264 		mce_err->ignore_event = true;
265 		regs->nip = extable_fixup(entry);
266 	}
267 }
268 
269 /*
270  * process pending MCE event from the mce event queue. This function will be
271  * called during syscall exit.
272  */
273 static void machine_process_ue_event(struct work_struct *work)
274 {
275 	int index;
276 	struct machine_check_event *evt;
277 
278 	while (__this_cpu_read(mce_ue_count) > 0) {
279 		index = __this_cpu_read(mce_ue_count) - 1;
280 		evt = this_cpu_ptr(&mce_ue_event_queue[index]);
281 #ifdef CONFIG_MEMORY_FAILURE
282 		/*
283 		 * This should probably queued elsewhere, but
284 		 * oh! well
285 		 *
286 		 * Don't report this machine check because the caller has a
287 		 * asked us to ignore the event, it has a fixup handler which
288 		 * will do the appropriate error handling and reporting.
289 		 */
290 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
291 			if (evt->u.ue_error.ignore_event) {
292 				__this_cpu_dec(mce_ue_count);
293 				continue;
294 			}
295 
296 			if (evt->u.ue_error.physical_address_provided) {
297 				unsigned long pfn;
298 
299 				pfn = evt->u.ue_error.physical_address >>
300 					PAGE_SHIFT;
301 				memory_failure(pfn, 0);
302 			} else
303 				pr_warn("Failed to identify bad address from "
304 					"where the uncorrectable error (UE) "
305 					"was generated\n");
306 		}
307 #endif
308 		__this_cpu_dec(mce_ue_count);
309 	}
310 }
311 /*
312  * process pending MCE event from the mce event queue. This function will be
313  * called during syscall exit.
314  */
315 static void machine_check_process_queued_event(struct irq_work *work)
316 {
317 	int index;
318 	struct machine_check_event *evt;
319 
320 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
321 
322 	/*
323 	 * For now just print it to console.
324 	 * TODO: log this error event to FSP or nvram.
325 	 */
326 	while (__this_cpu_read(mce_queue_count) > 0) {
327 		index = __this_cpu_read(mce_queue_count) - 1;
328 		evt = this_cpu_ptr(&mce_event_queue[index]);
329 
330 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
331 		    evt->u.ue_error.ignore_event) {
332 			__this_cpu_dec(mce_queue_count);
333 			continue;
334 		}
335 		machine_check_print_event_info(evt, false, false);
336 		__this_cpu_dec(mce_queue_count);
337 	}
338 }
339 
340 void machine_check_print_event_info(struct machine_check_event *evt,
341 				    bool user_mode, bool in_guest)
342 {
343 	const char *level, *sevstr, *subtype, *err_type, *initiator;
344 	uint64_t ea = 0, pa = 0;
345 	int n = 0;
346 	char dar_str[50];
347 	char pa_str[50];
348 	static const char *mc_ue_types[] = {
349 		"Indeterminate",
350 		"Instruction fetch",
351 		"Page table walk ifetch",
352 		"Load/Store",
353 		"Page table walk Load/Store",
354 	};
355 	static const char *mc_slb_types[] = {
356 		"Indeterminate",
357 		"Parity",
358 		"Multihit",
359 	};
360 	static const char *mc_erat_types[] = {
361 		"Indeterminate",
362 		"Parity",
363 		"Multihit",
364 	};
365 	static const char *mc_tlb_types[] = {
366 		"Indeterminate",
367 		"Parity",
368 		"Multihit",
369 	};
370 	static const char *mc_user_types[] = {
371 		"Indeterminate",
372 		"tlbie(l) invalid",
373 	};
374 	static const char *mc_ra_types[] = {
375 		"Indeterminate",
376 		"Instruction fetch (bad)",
377 		"Instruction fetch (foreign)",
378 		"Page table walk ifetch (bad)",
379 		"Page table walk ifetch (foreign)",
380 		"Load (bad)",
381 		"Store (bad)",
382 		"Page table walk Load/Store (bad)",
383 		"Page table walk Load/Store (foreign)",
384 		"Load/Store (foreign)",
385 	};
386 	static const char *mc_link_types[] = {
387 		"Indeterminate",
388 		"Instruction fetch (timeout)",
389 		"Page table walk ifetch (timeout)",
390 		"Load (timeout)",
391 		"Store (timeout)",
392 		"Page table walk Load/Store (timeout)",
393 	};
394 	static const char *mc_error_class[] = {
395 		"Unknown",
396 		"Hardware error",
397 		"Probable Hardware error (some chance of software cause)",
398 		"Software error",
399 		"Probable Software error (some chance of hardware cause)",
400 	};
401 
402 	/* Print things out */
403 	if (evt->version != MCE_V1) {
404 		pr_err("Machine Check Exception, Unknown event version %d !\n",
405 		       evt->version);
406 		return;
407 	}
408 	switch (evt->severity) {
409 	case MCE_SEV_NO_ERROR:
410 		level = KERN_INFO;
411 		sevstr = "Harmless";
412 		break;
413 	case MCE_SEV_WARNING:
414 		level = KERN_WARNING;
415 		sevstr = "Warning";
416 		break;
417 	case MCE_SEV_SEVERE:
418 		level = KERN_ERR;
419 		sevstr = "Severe";
420 		break;
421 	case MCE_SEV_FATAL:
422 	default:
423 		level = KERN_ERR;
424 		sevstr = "Fatal";
425 		break;
426 	}
427 
428 	switch(evt->initiator) {
429 	case MCE_INITIATOR_CPU:
430 		initiator = "CPU";
431 		break;
432 	case MCE_INITIATOR_PCI:
433 		initiator = "PCI";
434 		break;
435 	case MCE_INITIATOR_ISA:
436 		initiator = "ISA";
437 		break;
438 	case MCE_INITIATOR_MEMORY:
439 		initiator = "Memory";
440 		break;
441 	case MCE_INITIATOR_POWERMGM:
442 		initiator = "Power Management";
443 		break;
444 	case MCE_INITIATOR_UNKNOWN:
445 	default:
446 		initiator = "Unknown";
447 		break;
448 	}
449 
450 	switch (evt->error_type) {
451 	case MCE_ERROR_TYPE_UE:
452 		err_type = "UE";
453 		subtype = evt->u.ue_error.ue_error_type <
454 			ARRAY_SIZE(mc_ue_types) ?
455 			mc_ue_types[evt->u.ue_error.ue_error_type]
456 			: "Unknown";
457 		if (evt->u.ue_error.effective_address_provided)
458 			ea = evt->u.ue_error.effective_address;
459 		if (evt->u.ue_error.physical_address_provided)
460 			pa = evt->u.ue_error.physical_address;
461 		break;
462 	case MCE_ERROR_TYPE_SLB:
463 		err_type = "SLB";
464 		subtype = evt->u.slb_error.slb_error_type <
465 			ARRAY_SIZE(mc_slb_types) ?
466 			mc_slb_types[evt->u.slb_error.slb_error_type]
467 			: "Unknown";
468 		if (evt->u.slb_error.effective_address_provided)
469 			ea = evt->u.slb_error.effective_address;
470 		break;
471 	case MCE_ERROR_TYPE_ERAT:
472 		err_type = "ERAT";
473 		subtype = evt->u.erat_error.erat_error_type <
474 			ARRAY_SIZE(mc_erat_types) ?
475 			mc_erat_types[evt->u.erat_error.erat_error_type]
476 			: "Unknown";
477 		if (evt->u.erat_error.effective_address_provided)
478 			ea = evt->u.erat_error.effective_address;
479 		break;
480 	case MCE_ERROR_TYPE_TLB:
481 		err_type = "TLB";
482 		subtype = evt->u.tlb_error.tlb_error_type <
483 			ARRAY_SIZE(mc_tlb_types) ?
484 			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
485 			: "Unknown";
486 		if (evt->u.tlb_error.effective_address_provided)
487 			ea = evt->u.tlb_error.effective_address;
488 		break;
489 	case MCE_ERROR_TYPE_USER:
490 		err_type = "User";
491 		subtype = evt->u.user_error.user_error_type <
492 			ARRAY_SIZE(mc_user_types) ?
493 			mc_user_types[evt->u.user_error.user_error_type]
494 			: "Unknown";
495 		if (evt->u.user_error.effective_address_provided)
496 			ea = evt->u.user_error.effective_address;
497 		break;
498 	case MCE_ERROR_TYPE_RA:
499 		err_type = "Real address";
500 		subtype = evt->u.ra_error.ra_error_type <
501 			ARRAY_SIZE(mc_ra_types) ?
502 			mc_ra_types[evt->u.ra_error.ra_error_type]
503 			: "Unknown";
504 		if (evt->u.ra_error.effective_address_provided)
505 			ea = evt->u.ra_error.effective_address;
506 		break;
507 	case MCE_ERROR_TYPE_LINK:
508 		err_type = "Link";
509 		subtype = evt->u.link_error.link_error_type <
510 			ARRAY_SIZE(mc_link_types) ?
511 			mc_link_types[evt->u.link_error.link_error_type]
512 			: "Unknown";
513 		if (evt->u.link_error.effective_address_provided)
514 			ea = evt->u.link_error.effective_address;
515 		break;
516 	case MCE_ERROR_TYPE_DCACHE:
517 		err_type = "D-Cache";
518 		subtype = "Unknown";
519 		break;
520 	case MCE_ERROR_TYPE_ICACHE:
521 		err_type = "I-Cache";
522 		subtype = "Unknown";
523 		break;
524 	default:
525 	case MCE_ERROR_TYPE_UNKNOWN:
526 		err_type = "Unknown";
527 		subtype = "";
528 		break;
529 	}
530 
531 	dar_str[0] = pa_str[0] = '\0';
532 	if (ea && evt->srr0 != ea) {
533 		/* Load/Store address */
534 		n = sprintf(dar_str, "DAR: %016llx ", ea);
535 		if (pa)
536 			sprintf(dar_str + n, "paddr: %016llx ", pa);
537 	} else if (pa) {
538 		sprintf(pa_str, " paddr: %016llx", pa);
539 	}
540 
541 	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
542 		level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
543 		err_type, subtype, dar_str,
544 		evt->disposition == MCE_DISPOSITION_RECOVERED ?
545 		"Recovered" : "Not recovered");
546 
547 	if (in_guest || user_mode) {
548 		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
549 			level, evt->cpu, current->pid, current->comm,
550 			in_guest ? "Guest " : "", evt->srr0, pa_str);
551 	} else {
552 		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
553 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
554 	}
555 
556 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
557 
558 	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
559 		mc_error_class[evt->error_class] : "Unknown";
560 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
561 
562 #ifdef CONFIG_PPC_BOOK3S_64
563 	/* Display faulty slb contents for SLB errors. */
564 	if (evt->error_type == MCE_ERROR_TYPE_SLB)
565 		slb_dump_contents(local_paca->mce_faulty_slbs);
566 #endif
567 }
568 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
569 
570 /*
571  * This function is called in real mode. Strictly no printk's please.
572  *
573  * regs->nip and regs->msr contains srr0 and ssr1.
574  */
575 long notrace machine_check_early(struct pt_regs *regs)
576 {
577 	long handled = 0;
578 	bool nested = in_nmi();
579 	u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
580 
581 	this_cpu_set_ftrace_enabled(0);
582 
583 	if (!nested)
584 		nmi_enter();
585 
586 	hv_nmi_check_nonrecoverable(regs);
587 
588 	/*
589 	 * See if platform is capable of handling machine check.
590 	 */
591 	if (ppc_md.machine_check_early)
592 		handled = ppc_md.machine_check_early(regs);
593 
594 	if (!nested)
595 		nmi_exit();
596 
597 	this_cpu_set_ftrace_enabled(ftrace_enabled);
598 
599 	return handled;
600 }
601 
602 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
603 static enum {
604 	DTRIG_UNKNOWN,
605 	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
606 	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
607 } hmer_debug_trig_function;
608 
609 static int init_debug_trig_function(void)
610 {
611 	int pvr;
612 	struct device_node *cpun;
613 	struct property *prop = NULL;
614 	const char *str;
615 
616 	/* First look in the device tree */
617 	preempt_disable();
618 	cpun = of_get_cpu_node(smp_processor_id(), NULL);
619 	if (cpun) {
620 		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
621 					    prop, str) {
622 			if (strcmp(str, "bit17-vector-ci-load") == 0)
623 				hmer_debug_trig_function = DTRIG_VECTOR_CI;
624 			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
625 				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
626 		}
627 		of_node_put(cpun);
628 	}
629 	preempt_enable();
630 
631 	/* If we found the property, don't look at PVR */
632 	if (prop)
633 		goto out;
634 
635 	pvr = mfspr(SPRN_PVR);
636 	/* Check for POWER9 Nimbus (scale-out) */
637 	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
638 		/* DD2.2 and later */
639 		if ((pvr & 0xfff) >= 0x202)
640 			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
641 		/* DD2.0 and DD2.1 - used for vector CI load emulation */
642 		else if ((pvr & 0xfff) >= 0x200)
643 			hmer_debug_trig_function = DTRIG_VECTOR_CI;
644 	}
645 
646  out:
647 	switch (hmer_debug_trig_function) {
648 	case DTRIG_VECTOR_CI:
649 		pr_debug("HMI debug trigger used for vector CI load\n");
650 		break;
651 	case DTRIG_SUSPEND_ESCAPE:
652 		pr_debug("HMI debug trigger used for TM suspend escape\n");
653 		break;
654 	default:
655 		break;
656 	}
657 	return 0;
658 }
659 __initcall(init_debug_trig_function);
660 
661 /*
662  * Handle HMIs that occur as a result of a debug trigger.
663  * Return values:
664  * -1 means this is not a HMI cause that we know about
665  *  0 means no further handling is required
666  *  1 means further handling is required
667  */
668 long hmi_handle_debugtrig(struct pt_regs *regs)
669 {
670 	unsigned long hmer = mfspr(SPRN_HMER);
671 	long ret = 0;
672 
673 	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
674 	if (!((hmer & HMER_DEBUG_TRIG)
675 	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
676 		return -1;
677 
678 	hmer &= ~HMER_DEBUG_TRIG;
679 	/* HMER is a write-AND register */
680 	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
681 
682 	switch (hmer_debug_trig_function) {
683 	case DTRIG_VECTOR_CI:
684 		/*
685 		 * Now to avoid problems with soft-disable we
686 		 * only do the emulation if we are coming from
687 		 * host user space
688 		 */
689 		if (regs && user_mode(regs))
690 			ret = local_paca->hmi_p9_special_emu = 1;
691 
692 		break;
693 
694 	default:
695 		break;
696 	}
697 
698 	/*
699 	 * See if any other HMI causes remain to be handled
700 	 */
701 	if (hmer & mfspr(SPRN_HMEER))
702 		return -1;
703 
704 	return ret;
705 }
706 
707 /*
708  * Return values:
709  */
710 long hmi_exception_realmode(struct pt_regs *regs)
711 {
712 	int ret;
713 
714 	__this_cpu_inc(irq_stat.hmi_exceptions);
715 
716 	ret = hmi_handle_debugtrig(regs);
717 	if (ret >= 0)
718 		return ret;
719 
720 	wait_for_subcore_guest_exit();
721 
722 	if (ppc_md.hmi_exception_early)
723 		ppc_md.hmi_exception_early(regs);
724 
725 	wait_for_tb_resync();
726 
727 	return 1;
728 }
729