xref: /openbmc/linux/arch/x86/kernel/cpu/mce/severity.c (revision 249592bf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * MCE grading rules.
4  * Copyright 2008, 2009 Intel Corporation.
5  *
6  * Author: Andi Kleen
7  */
8 #include <linux/kernel.h>
9 #include <linux/seq_file.h>
10 #include <linux/init.h>
11 #include <linux/debugfs.h>
12 #include <linux/uaccess.h>
13 
14 #include <asm/mce.h>
15 #include <asm/intel-family.h>
16 #include <asm/traps.h>
17 #include <asm/insn.h>
18 #include <asm/insn-eval.h>
19 
20 #include "internal.h"
21 
22 /*
23  * Grade an mce by severity. In general the most severe ones are processed
24  * first. Since there are quite a lot of combinations test the bits in a
25  * table-driven way. The rules are simply processed in order, first
26  * match wins.
27  *
28  * Note this is only used for machine check exceptions, the corrected
29  * errors use much simpler rules. The exceptions still check for the corrected
30  * errors, but only to leave them alone for the CMCI handler (except for
31  * panic situations)
32  */
33 
34 enum context { IN_KERNEL = 1, IN_USER = 2, IN_KERNEL_RECOV = 3 };
35 enum ser { SER_REQUIRED = 1, NO_SER = 2 };
36 enum exception { EXCP_CONTEXT = 1, NO_EXCP = 2 };
37 
38 static struct severity {
39 	u64 mask;
40 	u64 result;
41 	unsigned char sev;
42 	unsigned char mcgmask;
43 	unsigned char mcgres;
44 	unsigned char ser;
45 	unsigned char context;
46 	unsigned char excp;
47 	unsigned char covered;
48 	unsigned char cpu_model;
49 	unsigned char cpu_minstepping;
50 	unsigned char bank_lo, bank_hi;
51 	char *msg;
52 } severities[] = {
53 #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
54 #define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
55 #define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
56 #define  KERNEL		.context = IN_KERNEL
57 #define  USER		.context = IN_USER
58 #define  KERNEL_RECOV	.context = IN_KERNEL_RECOV
59 #define  SER		.ser = SER_REQUIRED
60 #define  NOSER		.ser = NO_SER
61 #define  EXCP		.excp = EXCP_CONTEXT
62 #define  NOEXCP		.excp = NO_EXCP
63 #define  BITCLR(x)	.mask = x, .result = 0
64 #define  BITSET(x)	.mask = x, .result = x
65 #define  MCGMASK(x, y)	.mcgmask = x, .mcgres = y
66 #define  MASK(x, y)	.mask = x, .result = y
67 #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
68 #define MCI_UC_AR (MCI_STATUS_UC|MCI_STATUS_AR)
69 #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
70 #define	MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
71 
72 	MCESEV(
73 		NO, "Invalid",
74 		BITCLR(MCI_STATUS_VAL)
75 		),
76 	MCESEV(
77 		NO, "Not enabled",
78 		EXCP, BITCLR(MCI_STATUS_EN)
79 		),
80 	MCESEV(
81 		PANIC, "Processor context corrupt",
82 		BITSET(MCI_STATUS_PCC)
83 		),
84 	/* When MCIP is not set something is very confused */
85 	MCESEV(
86 		PANIC, "MCIP not set in MCA handler",
87 		EXCP, MCGMASK(MCG_STATUS_MCIP, 0)
88 		),
89 	/* Neither return not error IP -- no chance to recover -> PANIC */
90 	MCESEV(
91 		PANIC, "Neither restart nor error IP",
92 		EXCP, MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0)
93 		),
94 	MCESEV(
95 		PANIC, "In kernel and no restart IP",
96 		EXCP, KERNEL, MCGMASK(MCG_STATUS_RIPV, 0)
97 		),
98 	MCESEV(
99 		PANIC, "In kernel and no restart IP",
100 		EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0)
101 		),
102 	MCESEV(
103 		KEEP, "Corrected error",
104 		NOSER, BITCLR(MCI_STATUS_UC)
105 		),
106 	/*
107 	 * known AO MCACODs reported via MCE or CMC:
108 	 *
109 	 * SRAO could be signaled either via a machine check exception or
110 	 * CMCI with the corresponding bit S 1 or 0. So we don't need to
111 	 * check bit S for SRAO.
112 	 */
113 	MCESEV(
114 		AO, "Action optional: memory scrubbing error",
115 		SER, MASK(MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
116 		),
117 	MCESEV(
118 		AO, "Action optional: last level cache writeback error",
119 		SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
120 		),
121 	/*
122 	 * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
123 	 * to report uncorrected errors using CMCI with a special signature.
124 	 * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
125 	 * in one of the memory controller banks.
126 	 * Set severity to "AO" for same action as normal patrol scrub error.
127 	 */
128 	MCESEV(
129 		AO, "Uncorrected Patrol Scrub Error",
130 		SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
131 		MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
132 	),
133 
134 	/* ignore OVER for UCNA */
135 	MCESEV(
136 		UCNA, "Uncorrected no action required",
137 		SER, MASK(MCI_UC_SAR, MCI_STATUS_UC)
138 		),
139 	MCESEV(
140 		PANIC, "Illegal combination (UCNA with AR=1)",
141 		SER,
142 		MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
143 		),
144 	MCESEV(
145 		KEEP, "Non signaled machine check",
146 		SER, BITCLR(MCI_STATUS_S)
147 		),
148 
149 	MCESEV(
150 		PANIC, "Action required with lost events",
151 		SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR)
152 		),
153 
154 	/* known AR MCACODs: */
155 #ifdef	CONFIG_MEMORY_FAILURE
156 	MCESEV(
157 		KEEP, "Action required but unaffected thread is continuable",
158 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
159 		MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
160 		),
161 	MCESEV(
162 		AR, "Action required: data load in error recoverable area of kernel",
163 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
164 		KERNEL_RECOV
165 		),
166 	MCESEV(
167 		AR, "Action required: data load error in a user process",
168 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
169 		USER
170 		),
171 	MCESEV(
172 		AR, "Action required: instruction fetch error in a user process",
173 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
174 		USER
175 		),
176 	MCESEV(
177 		PANIC, "Data load in unrecoverable area of kernel",
178 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
179 		KERNEL
180 		),
181 	MCESEV(
182 		PANIC, "Instruction fetch error in kernel",
183 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
184 		KERNEL
185 		),
186 #endif
187 	MCESEV(
188 		PANIC, "Action required: unknown MCACOD",
189 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
190 		),
191 
192 	MCESEV(
193 		SOME, "Action optional: unknown MCACOD",
194 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
195 		),
196 	MCESEV(
197 		SOME, "Action optional with lost events",
198 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S)
199 		),
200 
201 	MCESEV(
202 		PANIC, "Overflowed uncorrected",
203 		BITSET(MCI_STATUS_OVER|MCI_STATUS_UC)
204 		),
205 	MCESEV(
206 		UC, "Uncorrected",
207 		BITSET(MCI_STATUS_UC)
208 		),
209 	MCESEV(
210 		SOME, "No match",
211 		BITSET(0)
212 		)	/* always matches. keep at end */
213 };
214 
215 #define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \
216 				(MCG_STATUS_RIPV|MCG_STATUS_EIPV))
217 
218 static bool is_copy_from_user(struct pt_regs *regs)
219 {
220 	u8 insn_buf[MAX_INSN_SIZE];
221 	unsigned long addr;
222 	struct insn insn;
223 	int ret;
224 
225 	if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
226 		return false;
227 
228 	ret = insn_decode_kernel(&insn, insn_buf);
229 	if (ret < 0)
230 		return false;
231 
232 	switch (insn.opcode.value) {
233 	/* MOV mem,reg */
234 	case 0x8A: case 0x8B:
235 	/* MOVZ mem,reg */
236 	case 0xB60F: case 0xB70F:
237 		addr = (unsigned long)insn_get_addr_ref(&insn, regs);
238 		break;
239 	/* REP MOVS */
240 	case 0xA4: case 0xA5:
241 		addr = regs->si;
242 		break;
243 	default:
244 		return false;
245 	}
246 
247 	if (fault_in_kernel_space(addr))
248 		return false;
249 
250 	current->mce_vaddr = (void __user *)addr;
251 
252 	return true;
253 }
254 
255 /*
256  * If mcgstatus indicated that ip/cs on the stack were
257  * no good, then "m->cs" will be zero and we will have
258  * to assume the worst case (IN_KERNEL) as we actually
259  * have no idea what we were executing when the machine
260  * check hit.
261  * If we do have a good "m->cs" (or a faked one in the
262  * case we were executing in VM86 mode) we can use it to
263  * distinguish an exception taken in user from from one
264  * taken in the kernel.
265  */
266 static int error_context(struct mce *m, struct pt_regs *regs)
267 {
268 	enum handler_type t;
269 
270 	if ((m->cs & 3) == 3)
271 		return IN_USER;
272 	if (!mc_recoverable(m->mcgstatus))
273 		return IN_KERNEL;
274 
275 	t = ex_get_fault_handler_type(m->ip);
276 	if (t == EX_HANDLER_FAULT) {
277 		m->kflags |= MCE_IN_KERNEL_RECOV;
278 		return IN_KERNEL_RECOV;
279 	}
280 	if (t == EX_HANDLER_UACCESS && regs && is_copy_from_user(regs)) {
281 		m->kflags |= MCE_IN_KERNEL_RECOV;
282 		m->kflags |= MCE_IN_KERNEL_COPYIN;
283 		return IN_KERNEL_RECOV;
284 	}
285 
286 	return IN_KERNEL;
287 }
288 
289 static int mce_severity_amd_smca(struct mce *m, enum context err_ctx)
290 {
291 	u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
292 	u32 low, high;
293 
294 	/*
295 	 * We need to look at the following bits:
296 	 * - "succor" bit (data poisoning support), and
297 	 * - TCC bit (Task Context Corrupt)
298 	 * in MCi_STATUS to determine error severity.
299 	 */
300 	if (!mce_flags.succor)
301 		return MCE_PANIC_SEVERITY;
302 
303 	if (rdmsr_safe(addr, &low, &high))
304 		return MCE_PANIC_SEVERITY;
305 
306 	/* TCC (Task context corrupt). If set and if IN_KERNEL, panic. */
307 	if ((low & MCI_CONFIG_MCAX) &&
308 	    (m->status & MCI_STATUS_TCC) &&
309 	    (err_ctx == IN_KERNEL))
310 		return MCE_PANIC_SEVERITY;
311 
312 	 /* ...otherwise invoke hwpoison handler. */
313 	return MCE_AR_SEVERITY;
314 }
315 
316 /*
317  * See AMD Error Scope Hierarchy table in a newer BKDG. For example
318  * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
319  */
320 static int mce_severity_amd(struct mce *m, struct pt_regs *regs, int tolerant,
321 			    char **msg, bool is_excp)
322 {
323 	enum context ctx = error_context(m, regs);
324 
325 	/* Processor Context Corrupt, no need to fumble too much, die! */
326 	if (m->status & MCI_STATUS_PCC)
327 		return MCE_PANIC_SEVERITY;
328 
329 	if (m->status & MCI_STATUS_UC) {
330 
331 		if (ctx == IN_KERNEL)
332 			return MCE_PANIC_SEVERITY;
333 
334 		/*
335 		 * On older systems where overflow_recov flag is not present, we
336 		 * should simply panic if an error overflow occurs. If
337 		 * overflow_recov flag is present and set, then software can try
338 		 * to at least kill process to prolong system operation.
339 		 */
340 		if (mce_flags.overflow_recov) {
341 			if (mce_flags.smca)
342 				return mce_severity_amd_smca(m, ctx);
343 
344 			/* kill current process */
345 			return MCE_AR_SEVERITY;
346 		} else {
347 			/* at least one error was not logged */
348 			if (m->status & MCI_STATUS_OVER)
349 				return MCE_PANIC_SEVERITY;
350 		}
351 
352 		/*
353 		 * For any other case, return MCE_UC_SEVERITY so that we log the
354 		 * error and exit #MC handler.
355 		 */
356 		return MCE_UC_SEVERITY;
357 	}
358 
359 	/*
360 	 * deferred error: poll handler catches these and adds to mce_ring so
361 	 * memory-failure can take recovery actions.
362 	 */
363 	if (m->status & MCI_STATUS_DEFERRED)
364 		return MCE_DEFERRED_SEVERITY;
365 
366 	/*
367 	 * corrected error: poll handler catches these and passes responsibility
368 	 * of decoding the error to EDAC
369 	 */
370 	return MCE_KEEP_SEVERITY;
371 }
372 
373 static int mce_severity_intel(struct mce *m, struct pt_regs *regs,
374 			      int tolerant, char **msg, bool is_excp)
375 {
376 	enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
377 	enum context ctx = error_context(m, regs);
378 	struct severity *s;
379 
380 	for (s = severities;; s++) {
381 		if ((m->status & s->mask) != s->result)
382 			continue;
383 		if ((m->mcgstatus & s->mcgmask) != s->mcgres)
384 			continue;
385 		if (s->ser == SER_REQUIRED && !mca_cfg.ser)
386 			continue;
387 		if (s->ser == NO_SER && mca_cfg.ser)
388 			continue;
389 		if (s->context && ctx != s->context)
390 			continue;
391 		if (s->excp && excp != s->excp)
392 			continue;
393 		if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
394 			continue;
395 		if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
396 			continue;
397 		if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
398 			continue;
399 		if (msg)
400 			*msg = s->msg;
401 		s->covered = 1;
402 		if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
403 			if (tolerant < 1)
404 				return MCE_PANIC_SEVERITY;
405 		}
406 		return s->sev;
407 	}
408 }
409 
410 /* Default to mce_severity_intel */
411 int (*mce_severity)(struct mce *m, struct pt_regs *regs, int tolerant, char **msg, bool is_excp) =
412 		    mce_severity_intel;
413 
414 void __init mcheck_vendor_init_severity(void)
415 {
416 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
417 	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
418 		mce_severity = mce_severity_amd;
419 }
420 
421 #ifdef CONFIG_DEBUG_FS
422 static void *s_start(struct seq_file *f, loff_t *pos)
423 {
424 	if (*pos >= ARRAY_SIZE(severities))
425 		return NULL;
426 	return &severities[*pos];
427 }
428 
429 static void *s_next(struct seq_file *f, void *data, loff_t *pos)
430 {
431 	if (++(*pos) >= ARRAY_SIZE(severities))
432 		return NULL;
433 	return &severities[*pos];
434 }
435 
436 static void s_stop(struct seq_file *f, void *data)
437 {
438 }
439 
440 static int s_show(struct seq_file *f, void *data)
441 {
442 	struct severity *ser = data;
443 	seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
444 	return 0;
445 }
446 
447 static const struct seq_operations severities_seq_ops = {
448 	.start	= s_start,
449 	.next	= s_next,
450 	.stop	= s_stop,
451 	.show	= s_show,
452 };
453 
454 static int severities_coverage_open(struct inode *inode, struct file *file)
455 {
456 	return seq_open(file, &severities_seq_ops);
457 }
458 
459 static ssize_t severities_coverage_write(struct file *file,
460 					 const char __user *ubuf,
461 					 size_t count, loff_t *ppos)
462 {
463 	int i;
464 	for (i = 0; i < ARRAY_SIZE(severities); i++)
465 		severities[i].covered = 0;
466 	return count;
467 }
468 
469 static const struct file_operations severities_coverage_fops = {
470 	.open		= severities_coverage_open,
471 	.release	= seq_release,
472 	.read		= seq_read,
473 	.write		= severities_coverage_write,
474 	.llseek		= seq_lseek,
475 };
476 
477 static int __init severities_debugfs_init(void)
478 {
479 	struct dentry *dmce;
480 
481 	dmce = mce_get_debugfs_dir();
482 
483 	debugfs_create_file("severities-coverage", 0444, dmce, NULL,
484 			    &severities_coverage_fops);
485 	return 0;
486 }
487 late_initcall(severities_debugfs_init);
488 #endif /* CONFIG_DEBUG_FS */
489