xref: /openbmc/linux/arch/powerpc/kernel/mce.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Machine check exception handling.
4  *
5  * Copyright 2013 IBM Corporation
6  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
7  */
8 
9 #undef DEBUG
10 #define pr_fmt(fmt) "mce: " fmt
11 
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
18 #include <linux/extable.h>
19 
20 #include <asm/machdep.h>
21 #include <asm/mce.h>
22 #include <asm/nmi.h>
23 
24 static DEFINE_PER_CPU(int, mce_nest_count);
25 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
26 
27 /* Queue for delayed MCE events. */
28 static DEFINE_PER_CPU(int, mce_queue_count);
29 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
30 
31 /* Queue for delayed MCE UE events. */
32 static DEFINE_PER_CPU(int, mce_ue_count);
33 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
34 					mce_ue_event_queue);
35 
36 static void machine_check_process_queued_event(struct irq_work *work);
37 static void machine_check_ue_irq_work(struct irq_work *work);
38 static void machine_check_ue_event(struct machine_check_event *evt);
39 static void machine_process_ue_event(struct work_struct *work);
40 
41 static struct irq_work mce_event_process_work = {
42         .func = machine_check_process_queued_event,
43 };
44 
45 static struct irq_work mce_ue_event_irq_work = {
46 	.func = machine_check_ue_irq_work,
47 };
48 
49 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
50 
51 static void mce_set_error_info(struct machine_check_event *mce,
52 			       struct mce_error_info *mce_err)
53 {
54 	mce->error_type = mce_err->error_type;
55 	switch (mce_err->error_type) {
56 	case MCE_ERROR_TYPE_UE:
57 		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
58 		break;
59 	case MCE_ERROR_TYPE_SLB:
60 		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
61 		break;
62 	case MCE_ERROR_TYPE_ERAT:
63 		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
64 		break;
65 	case MCE_ERROR_TYPE_TLB:
66 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
67 		break;
68 	case MCE_ERROR_TYPE_USER:
69 		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
70 		break;
71 	case MCE_ERROR_TYPE_RA:
72 		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
73 		break;
74 	case MCE_ERROR_TYPE_LINK:
75 		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
76 		break;
77 	case MCE_ERROR_TYPE_UNKNOWN:
78 	default:
79 		break;
80 	}
81 }
82 
83 /*
84  * Decode and save high level MCE information into per cpu buffer which
85  * is an array of machine_check_event structure.
86  */
87 void save_mce_event(struct pt_regs *regs, long handled,
88 		    struct mce_error_info *mce_err,
89 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
90 {
91 	int index = __this_cpu_inc_return(mce_nest_count) - 1;
92 	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
93 
94 	/*
95 	 * Return if we don't have enough space to log mce event.
96 	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
97 	 * the check below will stop buffer overrun.
98 	 */
99 	if (index >= MAX_MC_EVT)
100 		return;
101 
102 	/* Populate generic machine check info */
103 	mce->version = MCE_V1;
104 	mce->srr0 = nip;
105 	mce->srr1 = regs->msr;
106 	mce->gpr3 = regs->gpr[3];
107 	mce->in_use = 1;
108 	mce->cpu = get_paca()->paca_index;
109 
110 	/* Mark it recovered if we have handled it and MSR(RI=1). */
111 	if (handled && (regs->msr & MSR_RI))
112 		mce->disposition = MCE_DISPOSITION_RECOVERED;
113 	else
114 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
115 
116 	mce->initiator = mce_err->initiator;
117 	mce->severity = mce_err->severity;
118 	mce->sync_error = mce_err->sync_error;
119 	mce->error_class = mce_err->error_class;
120 
121 	/*
122 	 * Populate the mce error_type and type-specific error_type.
123 	 */
124 	mce_set_error_info(mce, mce_err);
125 
126 	if (!addr)
127 		return;
128 
129 	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
130 		mce->u.tlb_error.effective_address_provided = true;
131 		mce->u.tlb_error.effective_address = addr;
132 	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
133 		mce->u.slb_error.effective_address_provided = true;
134 		mce->u.slb_error.effective_address = addr;
135 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
136 		mce->u.erat_error.effective_address_provided = true;
137 		mce->u.erat_error.effective_address = addr;
138 	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
139 		mce->u.user_error.effective_address_provided = true;
140 		mce->u.user_error.effective_address = addr;
141 	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
142 		mce->u.ra_error.effective_address_provided = true;
143 		mce->u.ra_error.effective_address = addr;
144 	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
145 		mce->u.link_error.effective_address_provided = true;
146 		mce->u.link_error.effective_address = addr;
147 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
148 		mce->u.ue_error.effective_address_provided = true;
149 		mce->u.ue_error.effective_address = addr;
150 		if (phys_addr != ULONG_MAX) {
151 			mce->u.ue_error.physical_address_provided = true;
152 			mce->u.ue_error.physical_address = phys_addr;
153 			mce->u.ue_error.ignore_event = mce_err->ignore_event;
154 			machine_check_ue_event(mce);
155 		}
156 	}
157 	return;
158 }
159 
160 /*
161  * get_mce_event:
162  *	mce	Pointer to machine_check_event structure to be filled.
163  *	release Flag to indicate whether to free the event slot or not.
164  *		0 <= do not release the mce event. Caller will invoke
165  *		     release_mce_event() once event has been consumed.
166  *		1 <= release the slot.
167  *
168  *	return	1 = success
169  *		0 = failure
170  *
171  * get_mce_event() will be called by platform specific machine check
172  * handle routine and in KVM.
173  * When we call get_mce_event(), we are still in interrupt context and
174  * preemption will not be scheduled until ret_from_expect() routine
175  * is called.
176  */
177 int get_mce_event(struct machine_check_event *mce, bool release)
178 {
179 	int index = __this_cpu_read(mce_nest_count) - 1;
180 	struct machine_check_event *mc_evt;
181 	int ret = 0;
182 
183 	/* Sanity check */
184 	if (index < 0)
185 		return ret;
186 
187 	/* Check if we have MCE info to process. */
188 	if (index < MAX_MC_EVT) {
189 		mc_evt = this_cpu_ptr(&mce_event[index]);
190 		/* Copy the event structure and release the original */
191 		if (mce)
192 			*mce = *mc_evt;
193 		if (release)
194 			mc_evt->in_use = 0;
195 		ret = 1;
196 	}
197 	/* Decrement the count to free the slot. */
198 	if (release)
199 		__this_cpu_dec(mce_nest_count);
200 
201 	return ret;
202 }
203 
204 void release_mce_event(void)
205 {
206 	get_mce_event(NULL, true);
207 }
208 
209 static void machine_check_ue_irq_work(struct irq_work *work)
210 {
211 	schedule_work(&mce_ue_event_work);
212 }
213 
214 /*
215  * Queue up the MCE event which then can be handled later.
216  */
217 static void machine_check_ue_event(struct machine_check_event *evt)
218 {
219 	int index;
220 
221 	index = __this_cpu_inc_return(mce_ue_count) - 1;
222 	/* If queue is full, just return for now. */
223 	if (index >= MAX_MC_EVT) {
224 		__this_cpu_dec(mce_ue_count);
225 		return;
226 	}
227 	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
228 
229 	/* Queue work to process this event later. */
230 	irq_work_queue(&mce_ue_event_irq_work);
231 }
232 
233 /*
234  * Queue up the MCE event which then can be handled later.
235  */
236 void machine_check_queue_event(void)
237 {
238 	int index;
239 	struct machine_check_event evt;
240 
241 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
242 		return;
243 
244 	index = __this_cpu_inc_return(mce_queue_count) - 1;
245 	/* If queue is full, just return for now. */
246 	if (index >= MAX_MC_EVT) {
247 		__this_cpu_dec(mce_queue_count);
248 		return;
249 	}
250 	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
251 
252 	/* Queue irq work to process this event later. */
253 	irq_work_queue(&mce_event_process_work);
254 }
255 
256 void mce_common_process_ue(struct pt_regs *regs,
257 			   struct mce_error_info *mce_err)
258 {
259 	const struct exception_table_entry *entry;
260 
261 	entry = search_kernel_exception_table(regs->nip);
262 	if (entry) {
263 		mce_err->ignore_event = true;
264 		regs->nip = extable_fixup(entry);
265 	}
266 }
267 
268 /*
269  * process pending MCE event from the mce event queue. This function will be
270  * called during syscall exit.
271  */
272 static void machine_process_ue_event(struct work_struct *work)
273 {
274 	int index;
275 	struct machine_check_event *evt;
276 
277 	while (__this_cpu_read(mce_ue_count) > 0) {
278 		index = __this_cpu_read(mce_ue_count) - 1;
279 		evt = this_cpu_ptr(&mce_ue_event_queue[index]);
280 #ifdef CONFIG_MEMORY_FAILURE
281 		/*
282 		 * This should probably queued elsewhere, but
283 		 * oh! well
284 		 *
285 		 * Don't report this machine check because the caller has a
286 		 * asked us to ignore the event, it has a fixup handler which
287 		 * will do the appropriate error handling and reporting.
288 		 */
289 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
290 			if (evt->u.ue_error.ignore_event) {
291 				__this_cpu_dec(mce_ue_count);
292 				continue;
293 			}
294 
295 			if (evt->u.ue_error.physical_address_provided) {
296 				unsigned long pfn;
297 
298 				pfn = evt->u.ue_error.physical_address >>
299 					PAGE_SHIFT;
300 				memory_failure(pfn, 0);
301 			} else
302 				pr_warn("Failed to identify bad address from "
303 					"where the uncorrectable error (UE) "
304 					"was generated\n");
305 		}
306 #endif
307 		__this_cpu_dec(mce_ue_count);
308 	}
309 }
310 /*
311  * process pending MCE event from the mce event queue. This function will be
312  * called during syscall exit.
313  */
314 static void machine_check_process_queued_event(struct irq_work *work)
315 {
316 	int index;
317 	struct machine_check_event *evt;
318 
319 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
320 
321 	/*
322 	 * For now just print it to console.
323 	 * TODO: log this error event to FSP or nvram.
324 	 */
325 	while (__this_cpu_read(mce_queue_count) > 0) {
326 		index = __this_cpu_read(mce_queue_count) - 1;
327 		evt = this_cpu_ptr(&mce_event_queue[index]);
328 
329 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
330 		    evt->u.ue_error.ignore_event) {
331 			__this_cpu_dec(mce_queue_count);
332 			continue;
333 		}
334 		machine_check_print_event_info(evt, false, false);
335 		__this_cpu_dec(mce_queue_count);
336 	}
337 }
338 
339 void machine_check_print_event_info(struct machine_check_event *evt,
340 				    bool user_mode, bool in_guest)
341 {
342 	const char *level, *sevstr, *subtype, *err_type, *initiator;
343 	uint64_t ea = 0, pa = 0;
344 	int n = 0;
345 	char dar_str[50];
346 	char pa_str[50];
347 	static const char *mc_ue_types[] = {
348 		"Indeterminate",
349 		"Instruction fetch",
350 		"Page table walk ifetch",
351 		"Load/Store",
352 		"Page table walk Load/Store",
353 	};
354 	static const char *mc_slb_types[] = {
355 		"Indeterminate",
356 		"Parity",
357 		"Multihit",
358 	};
359 	static const char *mc_erat_types[] = {
360 		"Indeterminate",
361 		"Parity",
362 		"Multihit",
363 	};
364 	static const char *mc_tlb_types[] = {
365 		"Indeterminate",
366 		"Parity",
367 		"Multihit",
368 	};
369 	static const char *mc_user_types[] = {
370 		"Indeterminate",
371 		"tlbie(l) invalid",
372 	};
373 	static const char *mc_ra_types[] = {
374 		"Indeterminate",
375 		"Instruction fetch (bad)",
376 		"Instruction fetch (foreign)",
377 		"Page table walk ifetch (bad)",
378 		"Page table walk ifetch (foreign)",
379 		"Load (bad)",
380 		"Store (bad)",
381 		"Page table walk Load/Store (bad)",
382 		"Page table walk Load/Store (foreign)",
383 		"Load/Store (foreign)",
384 	};
385 	static const char *mc_link_types[] = {
386 		"Indeterminate",
387 		"Instruction fetch (timeout)",
388 		"Page table walk ifetch (timeout)",
389 		"Load (timeout)",
390 		"Store (timeout)",
391 		"Page table walk Load/Store (timeout)",
392 	};
393 	static const char *mc_error_class[] = {
394 		"Unknown",
395 		"Hardware error",
396 		"Probable Hardware error (some chance of software cause)",
397 		"Software error",
398 		"Probable Software error (some chance of hardware cause)",
399 	};
400 
401 	/* Print things out */
402 	if (evt->version != MCE_V1) {
403 		pr_err("Machine Check Exception, Unknown event version %d !\n",
404 		       evt->version);
405 		return;
406 	}
407 	switch (evt->severity) {
408 	case MCE_SEV_NO_ERROR:
409 		level = KERN_INFO;
410 		sevstr = "Harmless";
411 		break;
412 	case MCE_SEV_WARNING:
413 		level = KERN_WARNING;
414 		sevstr = "Warning";
415 		break;
416 	case MCE_SEV_SEVERE:
417 		level = KERN_ERR;
418 		sevstr = "Severe";
419 		break;
420 	case MCE_SEV_FATAL:
421 	default:
422 		level = KERN_ERR;
423 		sevstr = "Fatal";
424 		break;
425 	}
426 
427 	switch(evt->initiator) {
428 	case MCE_INITIATOR_CPU:
429 		initiator = "CPU";
430 		break;
431 	case MCE_INITIATOR_PCI:
432 		initiator = "PCI";
433 		break;
434 	case MCE_INITIATOR_ISA:
435 		initiator = "ISA";
436 		break;
437 	case MCE_INITIATOR_MEMORY:
438 		initiator = "Memory";
439 		break;
440 	case MCE_INITIATOR_POWERMGM:
441 		initiator = "Power Management";
442 		break;
443 	case MCE_INITIATOR_UNKNOWN:
444 	default:
445 		initiator = "Unknown";
446 		break;
447 	}
448 
449 	switch (evt->error_type) {
450 	case MCE_ERROR_TYPE_UE:
451 		err_type = "UE";
452 		subtype = evt->u.ue_error.ue_error_type <
453 			ARRAY_SIZE(mc_ue_types) ?
454 			mc_ue_types[evt->u.ue_error.ue_error_type]
455 			: "Unknown";
456 		if (evt->u.ue_error.effective_address_provided)
457 			ea = evt->u.ue_error.effective_address;
458 		if (evt->u.ue_error.physical_address_provided)
459 			pa = evt->u.ue_error.physical_address;
460 		break;
461 	case MCE_ERROR_TYPE_SLB:
462 		err_type = "SLB";
463 		subtype = evt->u.slb_error.slb_error_type <
464 			ARRAY_SIZE(mc_slb_types) ?
465 			mc_slb_types[evt->u.slb_error.slb_error_type]
466 			: "Unknown";
467 		if (evt->u.slb_error.effective_address_provided)
468 			ea = evt->u.slb_error.effective_address;
469 		break;
470 	case MCE_ERROR_TYPE_ERAT:
471 		err_type = "ERAT";
472 		subtype = evt->u.erat_error.erat_error_type <
473 			ARRAY_SIZE(mc_erat_types) ?
474 			mc_erat_types[evt->u.erat_error.erat_error_type]
475 			: "Unknown";
476 		if (evt->u.erat_error.effective_address_provided)
477 			ea = evt->u.erat_error.effective_address;
478 		break;
479 	case MCE_ERROR_TYPE_TLB:
480 		err_type = "TLB";
481 		subtype = evt->u.tlb_error.tlb_error_type <
482 			ARRAY_SIZE(mc_tlb_types) ?
483 			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
484 			: "Unknown";
485 		if (evt->u.tlb_error.effective_address_provided)
486 			ea = evt->u.tlb_error.effective_address;
487 		break;
488 	case MCE_ERROR_TYPE_USER:
489 		err_type = "User";
490 		subtype = evt->u.user_error.user_error_type <
491 			ARRAY_SIZE(mc_user_types) ?
492 			mc_user_types[evt->u.user_error.user_error_type]
493 			: "Unknown";
494 		if (evt->u.user_error.effective_address_provided)
495 			ea = evt->u.user_error.effective_address;
496 		break;
497 	case MCE_ERROR_TYPE_RA:
498 		err_type = "Real address";
499 		subtype = evt->u.ra_error.ra_error_type <
500 			ARRAY_SIZE(mc_ra_types) ?
501 			mc_ra_types[evt->u.ra_error.ra_error_type]
502 			: "Unknown";
503 		if (evt->u.ra_error.effective_address_provided)
504 			ea = evt->u.ra_error.effective_address;
505 		break;
506 	case MCE_ERROR_TYPE_LINK:
507 		err_type = "Link";
508 		subtype = evt->u.link_error.link_error_type <
509 			ARRAY_SIZE(mc_link_types) ?
510 			mc_link_types[evt->u.link_error.link_error_type]
511 			: "Unknown";
512 		if (evt->u.link_error.effective_address_provided)
513 			ea = evt->u.link_error.effective_address;
514 		break;
515 	case MCE_ERROR_TYPE_DCACHE:
516 		err_type = "D-Cache";
517 		subtype = "Unknown";
518 		break;
519 	case MCE_ERROR_TYPE_ICACHE:
520 		err_type = "I-Cache";
521 		subtype = "Unknown";
522 		break;
523 	default:
524 	case MCE_ERROR_TYPE_UNKNOWN:
525 		err_type = "Unknown";
526 		subtype = "";
527 		break;
528 	}
529 
530 	dar_str[0] = pa_str[0] = '\0';
531 	if (ea && evt->srr0 != ea) {
532 		/* Load/Store address */
533 		n = sprintf(dar_str, "DAR: %016llx ", ea);
534 		if (pa)
535 			sprintf(dar_str + n, "paddr: %016llx ", pa);
536 	} else if (pa) {
537 		sprintf(pa_str, " paddr: %016llx", pa);
538 	}
539 
540 	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
541 		level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
542 		err_type, subtype, dar_str,
543 		evt->disposition == MCE_DISPOSITION_RECOVERED ?
544 		"Recovered" : "Not recovered");
545 
546 	if (in_guest || user_mode) {
547 		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
548 			level, evt->cpu, current->pid, current->comm,
549 			in_guest ? "Guest " : "", evt->srr0, pa_str);
550 	} else {
551 		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
552 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
553 	}
554 
555 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
556 
557 	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
558 		mc_error_class[evt->error_class] : "Unknown";
559 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
560 
561 #ifdef CONFIG_PPC_BOOK3S_64
562 	/* Display faulty slb contents for SLB errors. */
563 	if (evt->error_type == MCE_ERROR_TYPE_SLB)
564 		slb_dump_contents(local_paca->mce_faulty_slbs);
565 #endif
566 }
567 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
568 
569 /*
570  * This function is called in real mode. Strictly no printk's please.
571  *
572  * regs->nip and regs->msr contains srr0 and ssr1.
573  */
574 long machine_check_early(struct pt_regs *regs)
575 {
576 	long handled = 0;
577 
578 	hv_nmi_check_nonrecoverable(regs);
579 
580 	/*
581 	 * See if platform is capable of handling machine check.
582 	 */
583 	if (ppc_md.machine_check_early)
584 		handled = ppc_md.machine_check_early(regs);
585 	return handled;
586 }
587 
588 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
589 static enum {
590 	DTRIG_UNKNOWN,
591 	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
592 	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
593 } hmer_debug_trig_function;
594 
595 static int init_debug_trig_function(void)
596 {
597 	int pvr;
598 	struct device_node *cpun;
599 	struct property *prop = NULL;
600 	const char *str;
601 
602 	/* First look in the device tree */
603 	preempt_disable();
604 	cpun = of_get_cpu_node(smp_processor_id(), NULL);
605 	if (cpun) {
606 		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
607 					    prop, str) {
608 			if (strcmp(str, "bit17-vector-ci-load") == 0)
609 				hmer_debug_trig_function = DTRIG_VECTOR_CI;
610 			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
611 				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
612 		}
613 		of_node_put(cpun);
614 	}
615 	preempt_enable();
616 
617 	/* If we found the property, don't look at PVR */
618 	if (prop)
619 		goto out;
620 
621 	pvr = mfspr(SPRN_PVR);
622 	/* Check for POWER9 Nimbus (scale-out) */
623 	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
624 		/* DD2.2 and later */
625 		if ((pvr & 0xfff) >= 0x202)
626 			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
627 		/* DD2.0 and DD2.1 - used for vector CI load emulation */
628 		else if ((pvr & 0xfff) >= 0x200)
629 			hmer_debug_trig_function = DTRIG_VECTOR_CI;
630 	}
631 
632  out:
633 	switch (hmer_debug_trig_function) {
634 	case DTRIG_VECTOR_CI:
635 		pr_debug("HMI debug trigger used for vector CI load\n");
636 		break;
637 	case DTRIG_SUSPEND_ESCAPE:
638 		pr_debug("HMI debug trigger used for TM suspend escape\n");
639 		break;
640 	default:
641 		break;
642 	}
643 	return 0;
644 }
645 __initcall(init_debug_trig_function);
646 
647 /*
648  * Handle HMIs that occur as a result of a debug trigger.
649  * Return values:
650  * -1 means this is not a HMI cause that we know about
651  *  0 means no further handling is required
652  *  1 means further handling is required
653  */
654 long hmi_handle_debugtrig(struct pt_regs *regs)
655 {
656 	unsigned long hmer = mfspr(SPRN_HMER);
657 	long ret = 0;
658 
659 	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
660 	if (!((hmer & HMER_DEBUG_TRIG)
661 	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
662 		return -1;
663 
664 	hmer &= ~HMER_DEBUG_TRIG;
665 	/* HMER is a write-AND register */
666 	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
667 
668 	switch (hmer_debug_trig_function) {
669 	case DTRIG_VECTOR_CI:
670 		/*
671 		 * Now to avoid problems with soft-disable we
672 		 * only do the emulation if we are coming from
673 		 * host user space
674 		 */
675 		if (regs && user_mode(regs))
676 			ret = local_paca->hmi_p9_special_emu = 1;
677 
678 		break;
679 
680 	default:
681 		break;
682 	}
683 
684 	/*
685 	 * See if any other HMI causes remain to be handled
686 	 */
687 	if (hmer & mfspr(SPRN_HMEER))
688 		return -1;
689 
690 	return ret;
691 }
692 
693 /*
694  * Return values:
695  */
696 long hmi_exception_realmode(struct pt_regs *regs)
697 {
698 	int ret;
699 
700 	__this_cpu_inc(irq_stat.hmi_exceptions);
701 
702 	ret = hmi_handle_debugtrig(regs);
703 	if (ret >= 0)
704 		return ret;
705 
706 	wait_for_subcore_guest_exit();
707 
708 	if (ppc_md.hmi_exception_early)
709 		ppc_md.hmi_exception_early(regs);
710 
711 	wait_for_tb_resync();
712 
713 	return 1;
714 }
715