1 #include <linux/module.h> 2 #include <linux/slab.h> 3 4 #include "mce_amd.h" 5 6 static struct amd_decoder_ops *fam_ops; 7 8 static u8 xec_mask = 0xf; 9 static u8 nb_err_cpumask = 0xf; 10 11 static bool report_gart_errors; 12 static void (*nb_bus_decoder)(int node_id, struct mce *m); 13 14 void amd_report_gart_errors(bool v) 15 { 16 report_gart_errors = v; 17 } 18 EXPORT_SYMBOL_GPL(amd_report_gart_errors); 19 20 void amd_register_ecc_decoder(void (*f)(int, struct mce *)) 21 { 22 nb_bus_decoder = f; 23 } 24 EXPORT_SYMBOL_GPL(amd_register_ecc_decoder); 25 26 void amd_unregister_ecc_decoder(void (*f)(int, struct mce *)) 27 { 28 if (nb_bus_decoder) { 29 WARN_ON(nb_bus_decoder != f); 30 31 nb_bus_decoder = NULL; 32 } 33 } 34 EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder); 35 36 /* 37 * string representation for the different MCA reported error types, see F3x48 38 * or MSR0000_0411. 39 */ 40 41 /* transaction type */ 42 static const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" }; 43 44 /* cache level */ 45 static const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" }; 46 47 /* memory transaction type */ 48 static const char * const rrrr_msgs[] = { 49 "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP" 50 }; 51 52 /* participating processor */ 53 const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" }; 54 EXPORT_SYMBOL_GPL(pp_msgs); 55 56 /* request timeout */ 57 static const char * const to_msgs[] = { "no timeout", "timed out" }; 58 59 /* memory or i/o */ 60 static const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" }; 61 62 /* internal error type */ 63 static const char * const uu_msgs[] = { "RESV", "RESV", "HWA", "RESV" }; 64 65 static const char * const f15h_mc1_mce_desc[] = { 66 "UC during a demand linefill from L2", 67 "Parity error during data load from IC", 68 "Parity error for IC valid bit", 69 "Main tag parity error", 70 "Parity error in prediction queue", 71 "PFB data/address parity error", 72 "Parity error in the branch status reg", 73 "PFB promotion address error", 74 "Tag error during probe/victimization", 75 "Parity error for IC probe tag valid bit", 76 "PFB non-cacheable bit parity error", 77 "PFB valid bit parity error", /* xec = 0xd */ 78 "Microcode Patch Buffer", /* xec = 010 */ 79 "uop queue", 80 "insn buffer", 81 "predecode buffer", 82 "fetch address FIFO" 83 }; 84 85 static const char * const f15h_mc2_mce_desc[] = { 86 "Fill ECC error on data fills", /* xec = 0x4 */ 87 "Fill parity error on insn fills", 88 "Prefetcher request FIFO parity error", 89 "PRQ address parity error", 90 "PRQ data parity error", 91 "WCC Tag ECC error", 92 "WCC Data ECC error", 93 "WCB Data parity error", 94 "VB Data ECC or parity error", 95 "L2 Tag ECC error", /* xec = 0x10 */ 96 "Hard L2 Tag ECC error", 97 "Multiple hits on L2 tag", 98 "XAB parity error", 99 "PRB address parity error" 100 }; 101 102 static const char * const mc4_mce_desc[] = { 103 "DRAM ECC error detected on the NB", 104 "CRC error detected on HT link", 105 "Link-defined sync error packets detected on HT link", 106 "HT Master abort", 107 "HT Target abort", 108 "Invalid GART PTE entry during GART table walk", 109 "Unsupported atomic RMW received from an IO link", 110 "Watchdog timeout due to lack of progress", 111 "DRAM ECC error detected on the NB", 112 "SVM DMA Exclusion Vector error", 113 "HT data error detected on link", 114 "Protocol error (link, L3, probe filter)", 115 "NB internal arrays parity error", 116 "DRAM addr/ctl signals parity error", 117 "IO link transmission error", 118 "L3 data cache ECC error", /* xec = 0x1c */ 119 "L3 cache tag error", 120 "L3 LRU parity bits error", 121 "ECC Error in the Probe Filter directory" 122 }; 123 124 static const char * const mc5_mce_desc[] = { 125 "CPU Watchdog timer expire", 126 "Wakeup array dest tag", 127 "AG payload array", 128 "EX payload array", 129 "IDRF array", 130 "Retire dispatch queue", 131 "Mapper checkpoint array", 132 "Physical register file EX0 port", 133 "Physical register file EX1 port", 134 "Physical register file AG0 port", 135 "Physical register file AG1 port", 136 "Flag register file", 137 "DE error occurred", 138 "Retire status queue" 139 }; 140 141 static bool f12h_mc0_mce(u16 ec, u8 xec) 142 { 143 bool ret = false; 144 145 if (MEM_ERROR(ec)) { 146 u8 ll = LL(ec); 147 ret = true; 148 149 if (ll == LL_L2) 150 pr_cont("during L1 linefill from L2.\n"); 151 else if (ll == LL_L1) 152 pr_cont("Data/Tag %s error.\n", R4_MSG(ec)); 153 else 154 ret = false; 155 } 156 return ret; 157 } 158 159 static bool f10h_mc0_mce(u16 ec, u8 xec) 160 { 161 if (R4(ec) == R4_GEN && LL(ec) == LL_L1) { 162 pr_cont("during data scrub.\n"); 163 return true; 164 } 165 return f12h_mc0_mce(ec, xec); 166 } 167 168 static bool k8_mc0_mce(u16 ec, u8 xec) 169 { 170 if (BUS_ERROR(ec)) { 171 pr_cont("during system linefill.\n"); 172 return true; 173 } 174 175 return f10h_mc0_mce(ec, xec); 176 } 177 178 static bool cat_mc0_mce(u16 ec, u8 xec) 179 { 180 u8 r4 = R4(ec); 181 bool ret = true; 182 183 if (MEM_ERROR(ec)) { 184 185 if (TT(ec) != TT_DATA || LL(ec) != LL_L1) 186 return false; 187 188 switch (r4) { 189 case R4_DRD: 190 case R4_DWR: 191 pr_cont("Data/Tag parity error due to %s.\n", 192 (r4 == R4_DRD ? "load/hw prf" : "store")); 193 break; 194 case R4_EVICT: 195 pr_cont("Copyback parity error on a tag miss.\n"); 196 break; 197 case R4_SNOOP: 198 pr_cont("Tag parity error during snoop.\n"); 199 break; 200 default: 201 ret = false; 202 } 203 } else if (BUS_ERROR(ec)) { 204 205 if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG) 206 return false; 207 208 pr_cont("System read data error on a "); 209 210 switch (r4) { 211 case R4_RD: 212 pr_cont("TLB reload.\n"); 213 break; 214 case R4_DWR: 215 pr_cont("store.\n"); 216 break; 217 case R4_DRD: 218 pr_cont("load.\n"); 219 break; 220 default: 221 ret = false; 222 } 223 } else { 224 ret = false; 225 } 226 227 return ret; 228 } 229 230 static bool f15h_mc0_mce(u16 ec, u8 xec) 231 { 232 bool ret = true; 233 234 if (MEM_ERROR(ec)) { 235 236 switch (xec) { 237 case 0x0: 238 pr_cont("Data Array access error.\n"); 239 break; 240 241 case 0x1: 242 pr_cont("UC error during a linefill from L2/NB.\n"); 243 break; 244 245 case 0x2: 246 case 0x11: 247 pr_cont("STQ access error.\n"); 248 break; 249 250 case 0x3: 251 pr_cont("SCB access error.\n"); 252 break; 253 254 case 0x10: 255 pr_cont("Tag error.\n"); 256 break; 257 258 case 0x12: 259 pr_cont("LDQ access error.\n"); 260 break; 261 262 default: 263 ret = false; 264 } 265 } else if (BUS_ERROR(ec)) { 266 267 if (!xec) 268 pr_cont("System Read Data Error.\n"); 269 else 270 pr_cont(" Internal error condition type %d.\n", xec); 271 } else 272 ret = false; 273 274 return ret; 275 } 276 277 static void decode_mc0_mce(struct mce *m) 278 { 279 u16 ec = EC(m->status); 280 u8 xec = XEC(m->status, xec_mask); 281 282 pr_emerg(HW_ERR "MC0 Error: "); 283 284 /* TLB error signatures are the same across families */ 285 if (TLB_ERROR(ec)) { 286 if (TT(ec) == TT_DATA) { 287 pr_cont("%s TLB %s.\n", LL_MSG(ec), 288 ((xec == 2) ? "locked miss" 289 : (xec ? "multimatch" : "parity"))); 290 return; 291 } 292 } else if (fam_ops->mc0_mce(ec, xec)) 293 ; 294 else 295 pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n"); 296 } 297 298 static bool k8_mc1_mce(u16 ec, u8 xec) 299 { 300 u8 ll = LL(ec); 301 bool ret = true; 302 303 if (!MEM_ERROR(ec)) 304 return false; 305 306 if (ll == 0x2) 307 pr_cont("during a linefill from L2.\n"); 308 else if (ll == 0x1) { 309 switch (R4(ec)) { 310 case R4_IRD: 311 pr_cont("Parity error during data load.\n"); 312 break; 313 314 case R4_EVICT: 315 pr_cont("Copyback Parity/Victim error.\n"); 316 break; 317 318 case R4_SNOOP: 319 pr_cont("Tag Snoop error.\n"); 320 break; 321 322 default: 323 ret = false; 324 break; 325 } 326 } else 327 ret = false; 328 329 return ret; 330 } 331 332 static bool cat_mc1_mce(u16 ec, u8 xec) 333 { 334 u8 r4 = R4(ec); 335 bool ret = true; 336 337 if (!MEM_ERROR(ec)) 338 return false; 339 340 if (TT(ec) != TT_INSTR) 341 return false; 342 343 if (r4 == R4_IRD) 344 pr_cont("Data/tag array parity error for a tag hit.\n"); 345 else if (r4 == R4_SNOOP) 346 pr_cont("Tag error during snoop/victimization.\n"); 347 else if (xec == 0x0) 348 pr_cont("Tag parity error from victim castout.\n"); 349 else if (xec == 0x2) 350 pr_cont("Microcode patch RAM parity error.\n"); 351 else 352 ret = false; 353 354 return ret; 355 } 356 357 static bool f15h_mc1_mce(u16 ec, u8 xec) 358 { 359 bool ret = true; 360 361 if (!MEM_ERROR(ec)) 362 return false; 363 364 switch (xec) { 365 case 0x0 ... 0xa: 366 pr_cont("%s.\n", f15h_mc1_mce_desc[xec]); 367 break; 368 369 case 0xd: 370 pr_cont("%s.\n", f15h_mc1_mce_desc[xec-2]); 371 break; 372 373 case 0x10: 374 pr_cont("%s.\n", f15h_mc1_mce_desc[xec-4]); 375 break; 376 377 case 0x11 ... 0x14: 378 pr_cont("Decoder %s parity error.\n", f15h_mc1_mce_desc[xec-4]); 379 break; 380 381 default: 382 ret = false; 383 } 384 return ret; 385 } 386 387 static void decode_mc1_mce(struct mce *m) 388 { 389 u16 ec = EC(m->status); 390 u8 xec = XEC(m->status, xec_mask); 391 392 pr_emerg(HW_ERR "MC1 Error: "); 393 394 if (TLB_ERROR(ec)) 395 pr_cont("%s TLB %s.\n", LL_MSG(ec), 396 (xec ? "multimatch" : "parity error")); 397 else if (BUS_ERROR(ec)) { 398 bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58))); 399 400 pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read")); 401 } else if (fam_ops->mc1_mce(ec, xec)) 402 ; 403 else 404 pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n"); 405 } 406 407 static bool k8_mc2_mce(u16 ec, u8 xec) 408 { 409 bool ret = true; 410 411 if (xec == 0x1) 412 pr_cont(" in the write data buffers.\n"); 413 else if (xec == 0x3) 414 pr_cont(" in the victim data buffers.\n"); 415 else if (xec == 0x2 && MEM_ERROR(ec)) 416 pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec)); 417 else if (xec == 0x0) { 418 if (TLB_ERROR(ec)) 419 pr_cont(": %s error in a Page Descriptor Cache or " 420 "Guest TLB.\n", TT_MSG(ec)); 421 else if (BUS_ERROR(ec)) 422 pr_cont(": %s/ECC error in data read from NB: %s.\n", 423 R4_MSG(ec), PP_MSG(ec)); 424 else if (MEM_ERROR(ec)) { 425 u8 r4 = R4(ec); 426 427 if (r4 >= 0x7) 428 pr_cont(": %s error during data copyback.\n", 429 R4_MSG(ec)); 430 else if (r4 <= 0x1) 431 pr_cont(": %s parity/ECC error during data " 432 "access from L2.\n", R4_MSG(ec)); 433 else 434 ret = false; 435 } else 436 ret = false; 437 } else 438 ret = false; 439 440 return ret; 441 } 442 443 static bool f15h_mc2_mce(u16 ec, u8 xec) 444 { 445 bool ret = true; 446 447 if (TLB_ERROR(ec)) { 448 if (xec == 0x0) 449 pr_cont("Data parity TLB read error.\n"); 450 else if (xec == 0x1) 451 pr_cont("Poison data provided for TLB fill.\n"); 452 else 453 ret = false; 454 } else if (BUS_ERROR(ec)) { 455 if (xec > 2) 456 ret = false; 457 458 pr_cont("Error during attempted NB data read.\n"); 459 } else if (MEM_ERROR(ec)) { 460 switch (xec) { 461 case 0x4 ... 0xc: 462 pr_cont("%s.\n", f15h_mc2_mce_desc[xec - 0x4]); 463 break; 464 465 case 0x10 ... 0x14: 466 pr_cont("%s.\n", f15h_mc2_mce_desc[xec - 0x7]); 467 break; 468 469 default: 470 ret = false; 471 } 472 } 473 474 return ret; 475 } 476 477 static bool f16h_mc2_mce(u16 ec, u8 xec) 478 { 479 u8 r4 = R4(ec); 480 481 if (!MEM_ERROR(ec)) 482 return false; 483 484 switch (xec) { 485 case 0x04 ... 0x05: 486 pr_cont("%cBUFF parity error.\n", (r4 == R4_RD) ? 'I' : 'O'); 487 break; 488 489 case 0x09 ... 0x0b: 490 case 0x0d ... 0x0f: 491 pr_cont("ECC error in L2 tag (%s).\n", 492 ((r4 == R4_GEN) ? "BankReq" : 493 ((r4 == R4_SNOOP) ? "Prb" : "Fill"))); 494 break; 495 496 case 0x10 ... 0x19: 497 case 0x1b: 498 pr_cont("ECC error in L2 data array (%s).\n", 499 (((r4 == R4_RD) && !(xec & 0x3)) ? "Hit" : 500 ((r4 == R4_GEN) ? "Attr" : 501 ((r4 == R4_EVICT) ? "Vict" : "Fill")))); 502 break; 503 504 case 0x1c ... 0x1d: 505 case 0x1f: 506 pr_cont("Parity error in L2 attribute bits (%s).\n", 507 ((r4 == R4_RD) ? "Hit" : 508 ((r4 == R4_GEN) ? "Attr" : "Fill"))); 509 break; 510 511 default: 512 return false; 513 } 514 515 return true; 516 } 517 518 static void decode_mc2_mce(struct mce *m) 519 { 520 u16 ec = EC(m->status); 521 u8 xec = XEC(m->status, xec_mask); 522 523 pr_emerg(HW_ERR "MC2 Error: "); 524 525 if (!fam_ops->mc2_mce(ec, xec)) 526 pr_cont(HW_ERR "Corrupted MC2 MCE info?\n"); 527 } 528 529 static void decode_mc3_mce(struct mce *m) 530 { 531 u16 ec = EC(m->status); 532 u8 xec = XEC(m->status, xec_mask); 533 534 if (boot_cpu_data.x86 >= 0x14) { 535 pr_emerg("You shouldn't be seeing MC3 MCE on this cpu family," 536 " please report on LKML.\n"); 537 return; 538 } 539 540 pr_emerg(HW_ERR "MC3 Error"); 541 542 if (xec == 0x0) { 543 u8 r4 = R4(ec); 544 545 if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) 546 goto wrong_mc3_mce; 547 548 pr_cont(" during %s.\n", R4_MSG(ec)); 549 } else 550 goto wrong_mc3_mce; 551 552 return; 553 554 wrong_mc3_mce: 555 pr_emerg(HW_ERR "Corrupted MC3 MCE info?\n"); 556 } 557 558 static void decode_mc4_mce(struct mce *m) 559 { 560 struct cpuinfo_x86 *c = &boot_cpu_data; 561 int node_id = amd_get_nb_id(m->extcpu); 562 u16 ec = EC(m->status); 563 u8 xec = XEC(m->status, 0x1f); 564 u8 offset = 0; 565 566 pr_emerg(HW_ERR "MC4 Error (node %d): ", node_id); 567 568 switch (xec) { 569 case 0x0 ... 0xe: 570 571 /* special handling for DRAM ECCs */ 572 if (xec == 0x0 || xec == 0x8) { 573 /* no ECCs on F11h */ 574 if (c->x86 == 0x11) 575 goto wrong_mc4_mce; 576 577 pr_cont("%s.\n", mc4_mce_desc[xec]); 578 579 if (nb_bus_decoder) 580 nb_bus_decoder(node_id, m); 581 return; 582 } 583 break; 584 585 case 0xf: 586 if (TLB_ERROR(ec)) 587 pr_cont("GART Table Walk data error.\n"); 588 else if (BUS_ERROR(ec)) 589 pr_cont("DMA Exclusion Vector Table Walk error.\n"); 590 else 591 goto wrong_mc4_mce; 592 return; 593 594 case 0x19: 595 if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16) 596 pr_cont("Compute Unit Data Error.\n"); 597 else 598 goto wrong_mc4_mce; 599 return; 600 601 case 0x1c ... 0x1f: 602 offset = 13; 603 break; 604 605 default: 606 goto wrong_mc4_mce; 607 } 608 609 pr_cont("%s.\n", mc4_mce_desc[xec - offset]); 610 return; 611 612 wrong_mc4_mce: 613 pr_emerg(HW_ERR "Corrupted MC4 MCE info?\n"); 614 } 615 616 static void decode_mc5_mce(struct mce *m) 617 { 618 struct cpuinfo_x86 *c = &boot_cpu_data; 619 u8 xec = XEC(m->status, xec_mask); 620 621 if (c->x86 == 0xf || c->x86 == 0x11) 622 goto wrong_mc5_mce; 623 624 pr_emerg(HW_ERR "MC5 Error: "); 625 626 if (xec == 0x0 || xec == 0xc) 627 pr_cont("%s.\n", mc5_mce_desc[xec]); 628 else if (xec <= 0xd) 629 pr_cont("%s parity error.\n", mc5_mce_desc[xec]); 630 else 631 goto wrong_mc5_mce; 632 633 return; 634 635 wrong_mc5_mce: 636 pr_emerg(HW_ERR "Corrupted MC5 MCE info?\n"); 637 } 638 639 static void decode_mc6_mce(struct mce *m) 640 { 641 u8 xec = XEC(m->status, xec_mask); 642 643 pr_emerg(HW_ERR "MC6 Error: "); 644 645 switch (xec) { 646 case 0x1: 647 pr_cont("Free List"); 648 break; 649 650 case 0x2: 651 pr_cont("Physical Register File"); 652 break; 653 654 case 0x3: 655 pr_cont("Retire Queue"); 656 break; 657 658 case 0x4: 659 pr_cont("Scheduler table"); 660 break; 661 662 case 0x5: 663 pr_cont("Status Register File"); 664 break; 665 666 default: 667 goto wrong_mc6_mce; 668 break; 669 } 670 671 pr_cont(" parity error.\n"); 672 673 return; 674 675 wrong_mc6_mce: 676 pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n"); 677 } 678 679 static inline void amd_decode_err_code(u16 ec) 680 { 681 if (INT_ERROR(ec)) { 682 pr_emerg(HW_ERR "internal: %s\n", UU_MSG(ec)); 683 return; 684 } 685 686 pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec)); 687 688 if (BUS_ERROR(ec)) 689 pr_cont(", mem/io: %s", II_MSG(ec)); 690 else 691 pr_cont(", tx: %s", TT_MSG(ec)); 692 693 if (MEM_ERROR(ec) || BUS_ERROR(ec)) { 694 pr_cont(", mem-tx: %s", R4_MSG(ec)); 695 696 if (BUS_ERROR(ec)) 697 pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec)); 698 } 699 700 pr_cont("\n"); 701 } 702 703 /* 704 * Filter out unwanted MCE signatures here. 705 */ 706 static bool amd_filter_mce(struct mce *m) 707 { 708 u8 xec = (m->status >> 16) & 0x1f; 709 710 /* 711 * NB GART TLB error reporting is disabled by default. 712 */ 713 if (m->bank == 4 && xec == 0x5 && !report_gart_errors) 714 return true; 715 716 return false; 717 } 718 719 static const char *decode_error_status(struct mce *m) 720 { 721 if (m->status & MCI_STATUS_UC) { 722 if (m->status & MCI_STATUS_PCC) 723 return "System Fatal error."; 724 if (m->mcgstatus & MCG_STATUS_RIPV) 725 return "Uncorrected, software restartable error."; 726 return "Uncorrected, software containable error."; 727 } 728 729 if (m->status & MCI_STATUS_DEFERRED) 730 return "Deferred error."; 731 732 return "Corrected error, no action required."; 733 } 734 735 int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) 736 { 737 struct mce *m = (struct mce *)data; 738 struct cpuinfo_x86 *c = &cpu_data(m->extcpu); 739 int ecc; 740 741 if (amd_filter_mce(m)) 742 return NOTIFY_STOP; 743 744 switch (m->bank) { 745 case 0: 746 decode_mc0_mce(m); 747 break; 748 749 case 1: 750 decode_mc1_mce(m); 751 break; 752 753 case 2: 754 decode_mc2_mce(m); 755 break; 756 757 case 3: 758 decode_mc3_mce(m); 759 break; 760 761 case 4: 762 decode_mc4_mce(m); 763 break; 764 765 case 5: 766 decode_mc5_mce(m); 767 break; 768 769 case 6: 770 decode_mc6_mce(m); 771 break; 772 773 default: 774 break; 775 } 776 777 pr_emerg(HW_ERR "Error Status: %s\n", decode_error_status(m)); 778 779 pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s", 780 m->extcpu, 781 c->x86, c->x86_model, c->x86_mask, 782 m->bank, 783 ((m->status & MCI_STATUS_OVER) ? "Over" : "-"), 784 ((m->status & MCI_STATUS_UC) ? "UE" : "CE"), 785 ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"), 786 ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"), 787 ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-")); 788 789 if (c->x86 == 0x15 || c->x86 == 0x16) 790 pr_cont("|%s|%s", 791 ((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"), 792 ((m->status & MCI_STATUS_POISON) ? "Poison" : "-")); 793 794 /* do the two bits[14:13] together */ 795 ecc = (m->status >> 45) & 0x3; 796 if (ecc) 797 pr_cont("|%sECC", ((ecc == 2) ? "C" : "U")); 798 799 pr_cont("]: 0x%016llx\n", m->status); 800 801 if (m->status & MCI_STATUS_ADDRV) 802 pr_emerg(HW_ERR "MC%d_ADDR: 0x%016llx\n", m->bank, m->addr); 803 804 amd_decode_err_code(m->status & 0xffff); 805 806 return NOTIFY_STOP; 807 } 808 EXPORT_SYMBOL_GPL(amd_decode_mce); 809 810 static struct notifier_block amd_mce_dec_nb = { 811 .notifier_call = amd_decode_mce, 812 }; 813 814 static int __init mce_amd_init(void) 815 { 816 struct cpuinfo_x86 *c = &boot_cpu_data; 817 818 if (c->x86_vendor != X86_VENDOR_AMD) 819 return 0; 820 821 if (c->x86 < 0xf || c->x86 > 0x16) 822 return 0; 823 824 fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); 825 if (!fam_ops) 826 return -ENOMEM; 827 828 switch (c->x86) { 829 case 0xf: 830 fam_ops->mc0_mce = k8_mc0_mce; 831 fam_ops->mc1_mce = k8_mc1_mce; 832 fam_ops->mc2_mce = k8_mc2_mce; 833 break; 834 835 case 0x10: 836 fam_ops->mc0_mce = f10h_mc0_mce; 837 fam_ops->mc1_mce = k8_mc1_mce; 838 fam_ops->mc2_mce = k8_mc2_mce; 839 break; 840 841 case 0x11: 842 fam_ops->mc0_mce = k8_mc0_mce; 843 fam_ops->mc1_mce = k8_mc1_mce; 844 fam_ops->mc2_mce = k8_mc2_mce; 845 break; 846 847 case 0x12: 848 fam_ops->mc0_mce = f12h_mc0_mce; 849 fam_ops->mc1_mce = k8_mc1_mce; 850 fam_ops->mc2_mce = k8_mc2_mce; 851 break; 852 853 case 0x14: 854 nb_err_cpumask = 0x3; 855 fam_ops->mc0_mce = cat_mc0_mce; 856 fam_ops->mc1_mce = cat_mc1_mce; 857 fam_ops->mc2_mce = k8_mc2_mce; 858 break; 859 860 case 0x15: 861 xec_mask = 0x1f; 862 fam_ops->mc0_mce = f15h_mc0_mce; 863 fam_ops->mc1_mce = f15h_mc1_mce; 864 fam_ops->mc2_mce = f15h_mc2_mce; 865 break; 866 867 case 0x16: 868 xec_mask = 0x1f; 869 fam_ops->mc0_mce = cat_mc0_mce; 870 fam_ops->mc1_mce = cat_mc1_mce; 871 fam_ops->mc2_mce = f16h_mc2_mce; 872 break; 873 874 default: 875 printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86); 876 kfree(fam_ops); 877 return -EINVAL; 878 } 879 880 pr_info("MCE: In-kernel MCE decoding enabled.\n"); 881 882 mce_register_decode_chain(&amd_mce_dec_nb); 883 884 return 0; 885 } 886 early_initcall(mce_amd_init); 887 888 #ifdef MODULE 889 static void __exit mce_amd_exit(void) 890 { 891 mce_unregister_decode_chain(&amd_mce_dec_nb); 892 kfree(fam_ops); 893 } 894 895 MODULE_DESCRIPTION("AMD MCE decoder"); 896 MODULE_ALIAS("edac-mce-amd"); 897 MODULE_LICENSE("GPL"); 898 module_exit(mce_amd_exit); 899 #endif 900