xref: /openbmc/linux/drivers/edac/mce_amd.c (revision 29a36d4d)
1 #include <linux/module.h>
2 #include <linux/slab.h>
3 
4 #include "mce_amd.h"
5 
6 static struct amd_decoder_ops *fam_ops;
7 
8 static u8 xec_mask	 = 0xf;
9 static u8 nb_err_cpumask = 0xf;
10 
11 static bool report_gart_errors;
12 static void (*nb_bus_decoder)(int node_id, struct mce *m);
13 
14 void amd_report_gart_errors(bool v)
15 {
16 	report_gart_errors = v;
17 }
18 EXPORT_SYMBOL_GPL(amd_report_gart_errors);
19 
20 void amd_register_ecc_decoder(void (*f)(int, struct mce *))
21 {
22 	nb_bus_decoder = f;
23 }
24 EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
25 
26 void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
27 {
28 	if (nb_bus_decoder) {
29 		WARN_ON(nb_bus_decoder != f);
30 
31 		nb_bus_decoder = NULL;
32 	}
33 }
34 EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
35 
36 /*
37  * string representation for the different MCA reported error types, see F3x48
38  * or MSR0000_0411.
39  */
40 
41 /* transaction type */
42 const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
43 EXPORT_SYMBOL_GPL(tt_msgs);
44 
45 /* cache level */
46 const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
47 EXPORT_SYMBOL_GPL(ll_msgs);
48 
49 /* memory transaction type */
50 const char *rrrr_msgs[] = {
51        "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
52 };
53 EXPORT_SYMBOL_GPL(rrrr_msgs);
54 
55 /* participating processor */
56 const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
57 EXPORT_SYMBOL_GPL(pp_msgs);
58 
59 /* request timeout */
60 const char *to_msgs[] = { "no timeout",	"timed out" };
61 EXPORT_SYMBOL_GPL(to_msgs);
62 
63 /* memory or i/o */
64 const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
65 EXPORT_SYMBOL_GPL(ii_msgs);
66 
67 static const char *f10h_nb_mce_desc[] = {
68 	"HT link data error",
69 	"Protocol error (link, L3, probe filter, etc.)",
70 	"Parity error in NB-internal arrays",
71 	"Link Retry due to IO link transmission error",
72 	"L3 ECC data cache error",
73 	"ECC error in L3 cache tag",
74 	"L3 LRU parity bits error",
75 	"ECC Error in the Probe Filter directory"
76 };
77 
78 static const char * const f15h_ic_mce_desc[] = {
79 	"UC during a demand linefill from L2",
80 	"Parity error during data load from IC",
81 	"Parity error for IC valid bit",
82 	"Main tag parity error",
83 	"Parity error in prediction queue",
84 	"PFB data/address parity error",
85 	"Parity error in the branch status reg",
86 	"PFB promotion address error",
87 	"Tag error during probe/victimization",
88 	"Parity error for IC probe tag valid bit",
89 	"PFB non-cacheable bit parity error",
90 	"PFB valid bit parity error",			/* xec = 0xd */
91 	"patch RAM",					/* xec = 010 */
92 	"uop queue",
93 	"insn buffer",
94 	"predecode buffer",
95 	"fetch address FIFO"
96 };
97 
98 static const char * const f15h_cu_mce_desc[] = {
99 	"Fill ECC error on data fills",			/* xec = 0x4 */
100 	"Fill parity error on insn fills",
101 	"Prefetcher request FIFO parity error",
102 	"PRQ address parity error",
103 	"PRQ data parity error",
104 	"WCC Tag ECC error",
105 	"WCC Data ECC error",
106 	"WCB Data parity error",
107 	"VB Data/ECC error",
108 	"L2 Tag ECC error",				/* xec = 0x10 */
109 	"Hard L2 Tag ECC error",
110 	"Multiple hits on L2 tag",
111 	"XAB parity error",
112 	"PRB address parity error"
113 };
114 
115 static const char * const fr_ex_mce_desc[] = {
116 	"CPU Watchdog timer expire",
117 	"Wakeup array dest tag",
118 	"AG payload array",
119 	"EX payload array",
120 	"IDRF array",
121 	"Retire dispatch queue",
122 	"Mapper checkpoint array",
123 	"Physical register file EX0 port",
124 	"Physical register file EX1 port",
125 	"Physical register file AG0 port",
126 	"Physical register file AG1 port",
127 	"Flag register file",
128 	"DE correctable error could not be corrected"
129 };
130 
131 static bool f12h_dc_mce(u16 ec, u8 xec)
132 {
133 	bool ret = false;
134 
135 	if (MEM_ERROR(ec)) {
136 		u8 ll = LL(ec);
137 		ret = true;
138 
139 		if (ll == LL_L2)
140 			pr_cont("during L1 linefill from L2.\n");
141 		else if (ll == LL_L1)
142 			pr_cont("Data/Tag %s error.\n", R4_MSG(ec));
143 		else
144 			ret = false;
145 	}
146 	return ret;
147 }
148 
149 static bool f10h_dc_mce(u16 ec, u8 xec)
150 {
151 	if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
152 		pr_cont("during data scrub.\n");
153 		return true;
154 	}
155 	return f12h_dc_mce(ec, xec);
156 }
157 
158 static bool k8_dc_mce(u16 ec, u8 xec)
159 {
160 	if (BUS_ERROR(ec)) {
161 		pr_cont("during system linefill.\n");
162 		return true;
163 	}
164 
165 	return f10h_dc_mce(ec, xec);
166 }
167 
168 static bool f14h_dc_mce(u16 ec, u8 xec)
169 {
170 	u8 r4	 = R4(ec);
171 	bool ret = true;
172 
173 	if (MEM_ERROR(ec)) {
174 
175 		if (TT(ec) != TT_DATA || LL(ec) != LL_L1)
176 			return false;
177 
178 		switch (r4) {
179 		case R4_DRD:
180 		case R4_DWR:
181 			pr_cont("Data/Tag parity error due to %s.\n",
182 				(r4 == R4_DRD ? "load/hw prf" : "store"));
183 			break;
184 		case R4_EVICT:
185 			pr_cont("Copyback parity error on a tag miss.\n");
186 			break;
187 		case R4_SNOOP:
188 			pr_cont("Tag parity error during snoop.\n");
189 			break;
190 		default:
191 			ret = false;
192 		}
193 	} else if (BUS_ERROR(ec)) {
194 
195 		if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG)
196 			return false;
197 
198 		pr_cont("System read data error on a ");
199 
200 		switch (r4) {
201 		case R4_RD:
202 			pr_cont("TLB reload.\n");
203 			break;
204 		case R4_DWR:
205 			pr_cont("store.\n");
206 			break;
207 		case R4_DRD:
208 			pr_cont("load.\n");
209 			break;
210 		default:
211 			ret = false;
212 		}
213 	} else {
214 		ret = false;
215 	}
216 
217 	return ret;
218 }
219 
220 static bool f15h_dc_mce(u16 ec, u8 xec)
221 {
222 	bool ret = true;
223 
224 	if (MEM_ERROR(ec)) {
225 
226 		switch (xec) {
227 		case 0x0:
228 			pr_cont("Data Array access error.\n");
229 			break;
230 
231 		case 0x1:
232 			pr_cont("UC error during a linefill from L2/NB.\n");
233 			break;
234 
235 		case 0x2:
236 		case 0x11:
237 			pr_cont("STQ access error.\n");
238 			break;
239 
240 		case 0x3:
241 			pr_cont("SCB access error.\n");
242 			break;
243 
244 		case 0x10:
245 			pr_cont("Tag error.\n");
246 			break;
247 
248 		case 0x12:
249 			pr_cont("LDQ access error.\n");
250 			break;
251 
252 		default:
253 			ret = false;
254 		}
255 	} else if (BUS_ERROR(ec)) {
256 
257 		if (!xec)
258 			pr_cont("during system linefill.\n");
259 		else
260 			pr_cont(" Internal %s condition.\n",
261 				((xec == 1) ? "livelock" : "deadlock"));
262 	} else
263 		ret = false;
264 
265 	return ret;
266 }
267 
268 static void amd_decode_dc_mce(struct mce *m)
269 {
270 	u16 ec = EC(m->status);
271 	u8 xec = XEC(m->status, xec_mask);
272 
273 	pr_emerg(HW_ERR "Data Cache Error: ");
274 
275 	/* TLB error signatures are the same across families */
276 	if (TLB_ERROR(ec)) {
277 		if (TT(ec) == TT_DATA) {
278 			pr_cont("%s TLB %s.\n", LL_MSG(ec),
279 				((xec == 2) ? "locked miss"
280 					    : (xec ? "multimatch" : "parity")));
281 			return;
282 		}
283 	} else if (fam_ops->dc_mce(ec, xec))
284 		;
285 	else
286 		pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
287 }
288 
289 static bool k8_ic_mce(u16 ec, u8 xec)
290 {
291 	u8 ll	 = LL(ec);
292 	bool ret = true;
293 
294 	if (!MEM_ERROR(ec))
295 		return false;
296 
297 	if (ll == 0x2)
298 		pr_cont("during a linefill from L2.\n");
299 	else if (ll == 0x1) {
300 		switch (R4(ec)) {
301 		case R4_IRD:
302 			pr_cont("Parity error during data load.\n");
303 			break;
304 
305 		case R4_EVICT:
306 			pr_cont("Copyback Parity/Victim error.\n");
307 			break;
308 
309 		case R4_SNOOP:
310 			pr_cont("Tag Snoop error.\n");
311 			break;
312 
313 		default:
314 			ret = false;
315 			break;
316 		}
317 	} else
318 		ret = false;
319 
320 	return ret;
321 }
322 
323 static bool f14h_ic_mce(u16 ec, u8 xec)
324 {
325 	u8 r4    = R4(ec);
326 	bool ret = true;
327 
328 	if (MEM_ERROR(ec)) {
329 		if (TT(ec) != 0 || LL(ec) != 1)
330 			ret = false;
331 
332 		if (r4 == R4_IRD)
333 			pr_cont("Data/tag array parity error for a tag hit.\n");
334 		else if (r4 == R4_SNOOP)
335 			pr_cont("Tag error during snoop/victimization.\n");
336 		else
337 			ret = false;
338 	}
339 	return ret;
340 }
341 
342 static bool f15h_ic_mce(u16 ec, u8 xec)
343 {
344 	bool ret = true;
345 
346 	if (!MEM_ERROR(ec))
347 		return false;
348 
349 	switch (xec) {
350 	case 0x0 ... 0xa:
351 		pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
352 		break;
353 
354 	case 0xd:
355 		pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
356 		break;
357 
358 	case 0x10 ... 0x14:
359 		pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
360 		break;
361 
362 	default:
363 		ret = false;
364 	}
365 	return ret;
366 }
367 
368 static void amd_decode_ic_mce(struct mce *m)
369 {
370 	u16 ec = EC(m->status);
371 	u8 xec = XEC(m->status, xec_mask);
372 
373 	pr_emerg(HW_ERR "Instruction Cache Error: ");
374 
375 	if (TLB_ERROR(ec))
376 		pr_cont("%s TLB %s.\n", LL_MSG(ec),
377 			(xec ? "multimatch" : "parity error"));
378 	else if (BUS_ERROR(ec)) {
379 		bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
380 
381 		pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
382 	} else if (fam_ops->ic_mce(ec, xec))
383 		;
384 	else
385 		pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
386 }
387 
388 static void amd_decode_bu_mce(struct mce *m)
389 {
390 	u16 ec = EC(m->status);
391 	u8 xec = XEC(m->status, xec_mask);
392 
393 	pr_emerg(HW_ERR "Bus Unit Error");
394 
395 	if (xec == 0x1)
396 		pr_cont(" in the write data buffers.\n");
397 	else if (xec == 0x3)
398 		pr_cont(" in the victim data buffers.\n");
399 	else if (xec == 0x2 && MEM_ERROR(ec))
400 		pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
401 	else if (xec == 0x0) {
402 		if (TLB_ERROR(ec))
403 			pr_cont(": %s error in a Page Descriptor Cache or "
404 				"Guest TLB.\n", TT_MSG(ec));
405 		else if (BUS_ERROR(ec))
406 			pr_cont(": %s/ECC error in data read from NB: %s.\n",
407 				R4_MSG(ec), PP_MSG(ec));
408 		else if (MEM_ERROR(ec)) {
409 			u8 r4 = R4(ec);
410 
411 			if (r4 >= 0x7)
412 				pr_cont(": %s error during data copyback.\n",
413 					R4_MSG(ec));
414 			else if (r4 <= 0x1)
415 				pr_cont(": %s parity/ECC error during data "
416 					"access from L2.\n", R4_MSG(ec));
417 			else
418 				goto wrong_bu_mce;
419 		} else
420 			goto wrong_bu_mce;
421 	} else
422 		goto wrong_bu_mce;
423 
424 	return;
425 
426 wrong_bu_mce:
427 	pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
428 }
429 
430 static void amd_decode_cu_mce(struct mce *m)
431 {
432 	u16 ec = EC(m->status);
433 	u8 xec = XEC(m->status, xec_mask);
434 
435 	pr_emerg(HW_ERR "Combined Unit Error: ");
436 
437 	if (TLB_ERROR(ec)) {
438 		if (xec == 0x0)
439 			pr_cont("Data parity TLB read error.\n");
440 		else if (xec == 0x1)
441 			pr_cont("Poison data provided for TLB fill.\n");
442 		else
443 			goto wrong_cu_mce;
444 	} else if (BUS_ERROR(ec)) {
445 		if (xec > 2)
446 			goto wrong_cu_mce;
447 
448 		pr_cont("Error during attempted NB data read.\n");
449 	} else if (MEM_ERROR(ec)) {
450 		switch (xec) {
451 		case 0x4 ... 0xc:
452 			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
453 			break;
454 
455 		case 0x10 ... 0x14:
456 			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
457 			break;
458 
459 		default:
460 			goto wrong_cu_mce;
461 		}
462 	}
463 
464 	return;
465 
466 wrong_cu_mce:
467 	pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
468 }
469 
470 static void amd_decode_ls_mce(struct mce *m)
471 {
472 	u16 ec = EC(m->status);
473 	u8 xec = XEC(m->status, xec_mask);
474 
475 	if (boot_cpu_data.x86 >= 0x14) {
476 		pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
477 			 " please report on LKML.\n");
478 		return;
479 	}
480 
481 	pr_emerg(HW_ERR "Load Store Error");
482 
483 	if (xec == 0x0) {
484 		u8 r4 = R4(ec);
485 
486 		if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
487 			goto wrong_ls_mce;
488 
489 		pr_cont(" during %s.\n", R4_MSG(ec));
490 	} else
491 		goto wrong_ls_mce;
492 
493 	return;
494 
495 wrong_ls_mce:
496 	pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
497 }
498 
499 static bool k8_nb_mce(u16 ec, u8 xec)
500 {
501 	bool ret = true;
502 
503 	switch (xec) {
504 	case 0x1:
505 		pr_cont("CRC error detected on HT link.\n");
506 		break;
507 
508 	case 0x5:
509 		pr_cont("Invalid GART PTE entry during GART table walk.\n");
510 		break;
511 
512 	case 0x6:
513 		pr_cont("Unsupported atomic RMW received from an IO link.\n");
514 		break;
515 
516 	case 0x0:
517 	case 0x8:
518 		if (boot_cpu_data.x86 == 0x11)
519 			return false;
520 
521 		pr_cont("DRAM ECC error detected on the NB.\n");
522 		break;
523 
524 	case 0xd:
525 		pr_cont("Parity error on the DRAM addr/ctl signals.\n");
526 		break;
527 
528 	default:
529 		ret = false;
530 		break;
531 	}
532 
533 	return ret;
534 }
535 
536 static bool f10h_nb_mce(u16 ec, u8 xec)
537 {
538 	bool ret = true;
539 	u8 offset = 0;
540 
541 	if (k8_nb_mce(ec, xec))
542 		return true;
543 
544 	switch(xec) {
545 	case 0xa ... 0xc:
546 		offset = 10;
547 		break;
548 
549 	case 0xe:
550 		offset = 11;
551 		break;
552 
553 	case 0xf:
554 		if (TLB_ERROR(ec))
555 			pr_cont("GART Table Walk data error.\n");
556 		else if (BUS_ERROR(ec))
557 			pr_cont("DMA Exclusion Vector Table Walk error.\n");
558 		else
559 			ret = false;
560 
561 		goto out;
562 		break;
563 
564 	case 0x19:
565 		if (boot_cpu_data.x86 == 0x15)
566 			pr_cont("Compute Unit Data Error.\n");
567 		else
568 			ret = false;
569 
570 		goto out;
571 		break;
572 
573 	case 0x1c ... 0x1f:
574 		offset = 24;
575 		break;
576 
577 	default:
578 		ret = false;
579 
580 		goto out;
581 		break;
582 	}
583 
584 	pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]);
585 
586 out:
587 	return ret;
588 }
589 
590 static bool nb_noop_mce(u16 ec, u8 xec)
591 {
592 	return false;
593 }
594 
595 void amd_decode_nb_mce(struct mce *m)
596 {
597 	struct cpuinfo_x86 *c = &boot_cpu_data;
598 	int node_id = amd_get_nb_id(m->extcpu);
599 	u16 ec = EC(m->status);
600 	u8 xec = XEC(m->status, 0x1f);
601 
602 	pr_emerg(HW_ERR "Northbridge Error (node %d): ", node_id);
603 
604 	switch (xec) {
605 	case 0x2:
606 		pr_cont("Sync error (sync packets on HT link detected).\n");
607 		return;
608 
609 	case 0x3:
610 		pr_cont("HT Master abort.\n");
611 		return;
612 
613 	case 0x4:
614 		pr_cont("HT Target abort.\n");
615 		return;
616 
617 	case 0x7:
618 		pr_cont("NB Watchdog timeout.\n");
619 		return;
620 
621 	case 0x9:
622 		pr_cont("SVM DMA Exclusion Vector error.\n");
623 		return;
624 
625 	default:
626 		break;
627 	}
628 
629 	if (!fam_ops->nb_mce(ec, xec))
630 		goto wrong_nb_mce;
631 
632 	if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
633 		if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
634 			nb_bus_decoder(node_id, m);
635 
636 	return;
637 
638 wrong_nb_mce:
639 	pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
640 }
641 EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
642 
643 static void amd_decode_fr_mce(struct mce *m)
644 {
645 	struct cpuinfo_x86 *c = &boot_cpu_data;
646 	u8 xec = XEC(m->status, xec_mask);
647 
648 	if (c->x86 == 0xf || c->x86 == 0x11)
649 		goto wrong_fr_mce;
650 
651 	if (c->x86 != 0x15 && xec != 0x0)
652 		goto wrong_fr_mce;
653 
654 	pr_emerg(HW_ERR "%s Error: ",
655 		 (c->x86 == 0x15 ? "Execution Unit" : "FIROB"));
656 
657 	if (xec == 0x0 || xec == 0xc)
658 		pr_cont("%s.\n", fr_ex_mce_desc[xec]);
659 	else if (xec < 0xd)
660 		pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]);
661 	else
662 		goto wrong_fr_mce;
663 
664 	return;
665 
666 wrong_fr_mce:
667 	pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
668 }
669 
670 static void amd_decode_fp_mce(struct mce *m)
671 {
672 	u8 xec = XEC(m->status, xec_mask);
673 
674 	pr_emerg(HW_ERR "Floating Point Unit Error: ");
675 
676 	switch (xec) {
677 	case 0x1:
678 		pr_cont("Free List");
679 		break;
680 
681 	case 0x2:
682 		pr_cont("Physical Register File");
683 		break;
684 
685 	case 0x3:
686 		pr_cont("Retire Queue");
687 		break;
688 
689 	case 0x4:
690 		pr_cont("Scheduler table");
691 		break;
692 
693 	case 0x5:
694 		pr_cont("Status Register File");
695 		break;
696 
697 	default:
698 		goto wrong_fp_mce;
699 		break;
700 	}
701 
702 	pr_cont(" parity error.\n");
703 
704 	return;
705 
706 wrong_fp_mce:
707 	pr_emerg(HW_ERR "Corrupted FP MCE info?\n");
708 }
709 
710 static inline void amd_decode_err_code(u16 ec)
711 {
712 
713 	pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
714 
715 	if (BUS_ERROR(ec))
716 		pr_cont(", mem/io: %s", II_MSG(ec));
717 	else
718 		pr_cont(", tx: %s", TT_MSG(ec));
719 
720 	if (MEM_ERROR(ec) || BUS_ERROR(ec)) {
721 		pr_cont(", mem-tx: %s", R4_MSG(ec));
722 
723 		if (BUS_ERROR(ec))
724 			pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec));
725 	}
726 
727 	pr_cont("\n");
728 }
729 
730 /*
731  * Filter out unwanted MCE signatures here.
732  */
733 static bool amd_filter_mce(struct mce *m)
734 {
735 	u8 xec = (m->status >> 16) & 0x1f;
736 
737 	/*
738 	 * NB GART TLB error reporting is disabled by default.
739 	 */
740 	if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
741 		return true;
742 
743 	return false;
744 }
745 
746 int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
747 {
748 	struct mce *m = (struct mce *)data;
749 	struct cpuinfo_x86 *c = &boot_cpu_data;
750 	int ecc;
751 
752 	if (amd_filter_mce(m))
753 		return NOTIFY_STOP;
754 
755 	pr_emerg(HW_ERR "CPU:%d\tMC%d_STATUS[%s|%s|%s|%s|%s",
756 		m->extcpu, m->bank,
757 		((m->status & MCI_STATUS_OVER)	? "Over"  : "-"),
758 		((m->status & MCI_STATUS_UC)	? "UE"	  : "CE"),
759 		((m->status & MCI_STATUS_MISCV)	? "MiscV" : "-"),
760 		((m->status & MCI_STATUS_PCC)	? "PCC"	  : "-"),
761 		((m->status & MCI_STATUS_ADDRV)	? "AddrV" : "-"));
762 
763 	if (c->x86 == 0x15)
764 		pr_cont("|%s|%s",
765 			((m->status & BIT_64(44)) ? "Deferred" : "-"),
766 			((m->status & BIT_64(43)) ? "Poison"   : "-"));
767 
768 	/* do the two bits[14:13] together */
769 	ecc = (m->status >> 45) & 0x3;
770 	if (ecc)
771 		pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
772 
773 	pr_cont("]: 0x%016llx\n", m->status);
774 
775 	if (m->status & MCI_STATUS_ADDRV)
776 		pr_emerg(HW_ERR "\tMC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
777 
778 	switch (m->bank) {
779 	case 0:
780 		amd_decode_dc_mce(m);
781 		break;
782 
783 	case 1:
784 		amd_decode_ic_mce(m);
785 		break;
786 
787 	case 2:
788 		if (c->x86 == 0x15)
789 			amd_decode_cu_mce(m);
790 		else
791 			amd_decode_bu_mce(m);
792 		break;
793 
794 	case 3:
795 		amd_decode_ls_mce(m);
796 		break;
797 
798 	case 4:
799 		amd_decode_nb_mce(m);
800 		break;
801 
802 	case 5:
803 		amd_decode_fr_mce(m);
804 		break;
805 
806 	case 6:
807 		amd_decode_fp_mce(m);
808 		break;
809 
810 	default:
811 		break;
812 	}
813 
814 	amd_decode_err_code(m->status & 0xffff);
815 
816 	return NOTIFY_STOP;
817 }
818 EXPORT_SYMBOL_GPL(amd_decode_mce);
819 
820 static struct notifier_block amd_mce_dec_nb = {
821 	.notifier_call	= amd_decode_mce,
822 };
823 
824 static int __init mce_amd_init(void)
825 {
826 	struct cpuinfo_x86 *c = &boot_cpu_data;
827 
828 	if (c->x86_vendor != X86_VENDOR_AMD)
829 		return 0;
830 
831 	if ((c->x86 < 0xf || c->x86 > 0x12) &&
832 	    (c->x86 != 0x14 || c->x86_model > 0xf) &&
833 	    (c->x86 != 0x15 || c->x86_model > 0xf))
834 		return 0;
835 
836 	fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
837 	if (!fam_ops)
838 		return -ENOMEM;
839 
840 	switch (c->x86) {
841 	case 0xf:
842 		fam_ops->dc_mce = k8_dc_mce;
843 		fam_ops->ic_mce = k8_ic_mce;
844 		fam_ops->nb_mce = k8_nb_mce;
845 		break;
846 
847 	case 0x10:
848 		fam_ops->dc_mce = f10h_dc_mce;
849 		fam_ops->ic_mce = k8_ic_mce;
850 		fam_ops->nb_mce = f10h_nb_mce;
851 		break;
852 
853 	case 0x11:
854 		fam_ops->dc_mce = k8_dc_mce;
855 		fam_ops->ic_mce = k8_ic_mce;
856 		fam_ops->nb_mce = f10h_nb_mce;
857 		break;
858 
859 	case 0x12:
860 		fam_ops->dc_mce = f12h_dc_mce;
861 		fam_ops->ic_mce = k8_ic_mce;
862 		fam_ops->nb_mce = nb_noop_mce;
863 		break;
864 
865 	case 0x14:
866 		nb_err_cpumask  = 0x3;
867 		fam_ops->dc_mce = f14h_dc_mce;
868 		fam_ops->ic_mce = f14h_ic_mce;
869 		fam_ops->nb_mce = nb_noop_mce;
870 		break;
871 
872 	case 0x15:
873 		xec_mask = 0x1f;
874 		fam_ops->dc_mce = f15h_dc_mce;
875 		fam_ops->ic_mce = f15h_ic_mce;
876 		fam_ops->nb_mce = f10h_nb_mce;
877 		break;
878 
879 	default:
880 		printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86);
881 		kfree(fam_ops);
882 		return -EINVAL;
883 	}
884 
885 	pr_info("MCE: In-kernel MCE decoding enabled.\n");
886 
887 	atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
888 
889 	return 0;
890 }
891 early_initcall(mce_amd_init);
892 
893 #ifdef MODULE
894 static void __exit mce_amd_exit(void)
895 {
896 	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
897 	kfree(fam_ops);
898 }
899 
900 MODULE_DESCRIPTION("AMD MCE decoder");
901 MODULE_ALIAS("edac-mce-amd");
902 MODULE_LICENSE("GPL");
903 module_exit(mce_amd_exit);
904 #endif
905