1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * GHES/EDAC Linux driver 4 * 5 * Copyright (c) 2013 by Mauro Carvalho Chehab 6 * 7 * Red Hat Inc. https://www.redhat.com 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <acpi/ghes.h> 13 #include <linux/edac.h> 14 #include <linux/dmi.h> 15 #include "edac_module.h" 16 #include <ras/ras_event.h> 17 18 struct ghes_pvt { 19 struct mem_ctl_info *mci; 20 21 /* Buffers for the error handling routine */ 22 char other_detail[400]; 23 char msg[80]; 24 }; 25 26 static refcount_t ghes_refcount = REFCOUNT_INIT(0); 27 28 /* 29 * Access to ghes_pvt must be protected by ghes_lock. The spinlock 30 * also provides the necessary (implicit) memory barrier for the SMP 31 * case to make the pointer visible on another CPU. 32 */ 33 static struct ghes_pvt *ghes_pvt; 34 35 /* 36 * This driver's representation of the system hardware, as collected 37 * from DMI. 38 */ 39 struct ghes_hw_desc { 40 int num_dimms; 41 struct dimm_info *dimms; 42 } ghes_hw; 43 44 /* GHES registration mutex */ 45 static DEFINE_MUTEX(ghes_reg_mutex); 46 47 /* 48 * Sync with other, potentially concurrent callers of 49 * ghes_edac_report_mem_error(). We don't know what the 50 * "inventive" firmware would do. 51 */ 52 static DEFINE_SPINLOCK(ghes_lock); 53 54 /* "ghes_edac.force_load=1" skips the platform check */ 55 static bool __read_mostly force_load; 56 module_param(force_load, bool, 0); 57 58 static bool system_scanned; 59 60 /* Memory Device - Type 17 of SMBIOS spec */ 61 struct memdev_dmi_entry { 62 u8 type; 63 u8 length; 64 u16 handle; 65 u16 phys_mem_array_handle; 66 u16 mem_err_info_handle; 67 u16 total_width; 68 u16 data_width; 69 u16 size; 70 u8 form_factor; 71 u8 device_set; 72 u8 device_locator; 73 u8 bank_locator; 74 u8 memory_type; 75 u16 type_detail; 76 u16 speed; 77 u8 manufacturer; 78 u8 serial_number; 79 u8 asset_tag; 80 u8 part_number; 81 u8 attributes; 82 u32 extended_size; 83 u16 conf_mem_clk_speed; 84 } __attribute__((__packed__)); 85 86 static struct dimm_info *find_dimm_by_handle(struct mem_ctl_info *mci, u16 handle) 87 { 88 struct dimm_info *dimm; 89 90 mci_for_each_dimm(mci, dimm) { 91 if (dimm->smbios_handle == handle) 92 return dimm; 93 } 94 95 return NULL; 96 } 97 98 static void dimm_setup_label(struct dimm_info *dimm, u16 handle) 99 { 100 const char *bank = NULL, *device = NULL; 101 102 dmi_memdev_name(handle, &bank, &device); 103 104 /* both strings must be non-zero */ 105 if (bank && *bank && device && *device) 106 snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device); 107 } 108 109 static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry) 110 { 111 u16 rdr_mask = BIT(7) | BIT(13); 112 113 if (entry->size == 0xffff) { 114 pr_info("Can't get DIMM%i size\n", dimm->idx); 115 dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */ 116 } else if (entry->size == 0x7fff) { 117 dimm->nr_pages = MiB_TO_PAGES(entry->extended_size); 118 } else { 119 if (entry->size & BIT(15)) 120 dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10); 121 else 122 dimm->nr_pages = MiB_TO_PAGES(entry->size); 123 } 124 125 switch (entry->memory_type) { 126 case 0x12: 127 if (entry->type_detail & BIT(13)) 128 dimm->mtype = MEM_RDDR; 129 else 130 dimm->mtype = MEM_DDR; 131 break; 132 case 0x13: 133 if (entry->type_detail & BIT(13)) 134 dimm->mtype = MEM_RDDR2; 135 else 136 dimm->mtype = MEM_DDR2; 137 break; 138 case 0x14: 139 dimm->mtype = MEM_FB_DDR2; 140 break; 141 case 0x18: 142 if (entry->type_detail & BIT(12)) 143 dimm->mtype = MEM_NVDIMM; 144 else if (entry->type_detail & BIT(13)) 145 dimm->mtype = MEM_RDDR3; 146 else 147 dimm->mtype = MEM_DDR3; 148 break; 149 case 0x1a: 150 if (entry->type_detail & BIT(12)) 151 dimm->mtype = MEM_NVDIMM; 152 else if (entry->type_detail & BIT(13)) 153 dimm->mtype = MEM_RDDR4; 154 else 155 dimm->mtype = MEM_DDR4; 156 break; 157 default: 158 if (entry->type_detail & BIT(6)) 159 dimm->mtype = MEM_RMBS; 160 else if ((entry->type_detail & rdr_mask) == rdr_mask) 161 dimm->mtype = MEM_RDR; 162 else if (entry->type_detail & BIT(7)) 163 dimm->mtype = MEM_SDR; 164 else if (entry->type_detail & BIT(9)) 165 dimm->mtype = MEM_EDO; 166 else 167 dimm->mtype = MEM_UNKNOWN; 168 } 169 170 /* 171 * Actually, we can only detect if the memory has bits for 172 * checksum or not 173 */ 174 if (entry->total_width == entry->data_width) 175 dimm->edac_mode = EDAC_NONE; 176 else 177 dimm->edac_mode = EDAC_SECDED; 178 179 dimm->dtype = DEV_UNKNOWN; 180 dimm->grain = 128; /* Likely, worse case */ 181 182 dimm_setup_label(dimm, entry->handle); 183 184 if (dimm->nr_pages) { 185 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n", 186 dimm->idx, edac_mem_types[dimm->mtype], 187 PAGES_TO_MiB(dimm->nr_pages), 188 (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : ""); 189 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n", 190 entry->memory_type, entry->type_detail, 191 entry->total_width, entry->data_width); 192 } 193 194 dimm->smbios_handle = entry->handle; 195 } 196 197 static void enumerate_dimms(const struct dmi_header *dh, void *arg) 198 { 199 struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh; 200 struct ghes_hw_desc *hw = (struct ghes_hw_desc *)arg; 201 struct dimm_info *d; 202 203 if (dh->type != DMI_ENTRY_MEM_DEVICE) 204 return; 205 206 /* Enlarge the array with additional 16 */ 207 if (!hw->num_dimms || !(hw->num_dimms % 16)) { 208 struct dimm_info *new; 209 210 new = krealloc_array(hw->dimms, hw->num_dimms + 16, 211 sizeof(struct dimm_info), GFP_KERNEL); 212 if (!new) { 213 WARN_ON_ONCE(1); 214 return; 215 } 216 217 hw->dimms = new; 218 } 219 220 d = &hw->dimms[hw->num_dimms]; 221 d->idx = hw->num_dimms; 222 223 assign_dmi_dimm_info(d, entry); 224 225 hw->num_dimms++; 226 } 227 228 static void ghes_scan_system(void) 229 { 230 if (system_scanned) 231 return; 232 233 dmi_walk(enumerate_dimms, &ghes_hw); 234 235 system_scanned = true; 236 } 237 238 void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) 239 { 240 struct edac_raw_error_desc *e; 241 struct mem_ctl_info *mci; 242 struct ghes_pvt *pvt; 243 unsigned long flags; 244 char *p; 245 246 /* 247 * We can do the locking below because GHES defers error processing 248 * from NMI to IRQ context. Whenever that changes, we'd at least 249 * know. 250 */ 251 if (WARN_ON_ONCE(in_nmi())) 252 return; 253 254 spin_lock_irqsave(&ghes_lock, flags); 255 256 pvt = ghes_pvt; 257 if (!pvt) 258 goto unlock; 259 260 mci = pvt->mci; 261 e = &mci->error_desc; 262 263 /* Cleans the error report buffer */ 264 memset(e, 0, sizeof (*e)); 265 e->error_count = 1; 266 e->grain = 1; 267 e->msg = pvt->msg; 268 e->other_detail = pvt->other_detail; 269 e->top_layer = -1; 270 e->mid_layer = -1; 271 e->low_layer = -1; 272 *pvt->other_detail = '\0'; 273 *pvt->msg = '\0'; 274 275 switch (sev) { 276 case GHES_SEV_CORRECTED: 277 e->type = HW_EVENT_ERR_CORRECTED; 278 break; 279 case GHES_SEV_RECOVERABLE: 280 e->type = HW_EVENT_ERR_UNCORRECTED; 281 break; 282 case GHES_SEV_PANIC: 283 e->type = HW_EVENT_ERR_FATAL; 284 break; 285 default: 286 case GHES_SEV_NO: 287 e->type = HW_EVENT_ERR_INFO; 288 } 289 290 edac_dbg(1, "error validation_bits: 0x%08llx\n", 291 (long long)mem_err->validation_bits); 292 293 /* Error type, mapped on e->msg */ 294 if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) { 295 p = pvt->msg; 296 switch (mem_err->error_type) { 297 case 0: 298 p += sprintf(p, "Unknown"); 299 break; 300 case 1: 301 p += sprintf(p, "No error"); 302 break; 303 case 2: 304 p += sprintf(p, "Single-bit ECC"); 305 break; 306 case 3: 307 p += sprintf(p, "Multi-bit ECC"); 308 break; 309 case 4: 310 p += sprintf(p, "Single-symbol ChipKill ECC"); 311 break; 312 case 5: 313 p += sprintf(p, "Multi-symbol ChipKill ECC"); 314 break; 315 case 6: 316 p += sprintf(p, "Master abort"); 317 break; 318 case 7: 319 p += sprintf(p, "Target abort"); 320 break; 321 case 8: 322 p += sprintf(p, "Parity Error"); 323 break; 324 case 9: 325 p += sprintf(p, "Watchdog timeout"); 326 break; 327 case 10: 328 p += sprintf(p, "Invalid address"); 329 break; 330 case 11: 331 p += sprintf(p, "Mirror Broken"); 332 break; 333 case 12: 334 p += sprintf(p, "Memory Sparing"); 335 break; 336 case 13: 337 p += sprintf(p, "Scrub corrected error"); 338 break; 339 case 14: 340 p += sprintf(p, "Scrub uncorrected error"); 341 break; 342 case 15: 343 p += sprintf(p, "Physical Memory Map-out event"); 344 break; 345 default: 346 p += sprintf(p, "reserved error (%d)", 347 mem_err->error_type); 348 } 349 } else { 350 strcpy(pvt->msg, "unknown error"); 351 } 352 353 /* Error address */ 354 if (mem_err->validation_bits & CPER_MEM_VALID_PA) { 355 e->page_frame_number = PHYS_PFN(mem_err->physical_addr); 356 e->offset_in_page = offset_in_page(mem_err->physical_addr); 357 } 358 359 /* Error grain */ 360 if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK) 361 e->grain = ~mem_err->physical_addr_mask + 1; 362 363 /* Memory error location, mapped on e->location */ 364 p = e->location; 365 if (mem_err->validation_bits & CPER_MEM_VALID_NODE) 366 p += sprintf(p, "node:%d ", mem_err->node); 367 if (mem_err->validation_bits & CPER_MEM_VALID_CARD) 368 p += sprintf(p, "card:%d ", mem_err->card); 369 if (mem_err->validation_bits & CPER_MEM_VALID_MODULE) 370 p += sprintf(p, "module:%d ", mem_err->module); 371 if (mem_err->validation_bits & CPER_MEM_VALID_RANK_NUMBER) 372 p += sprintf(p, "rank:%d ", mem_err->rank); 373 if (mem_err->validation_bits & CPER_MEM_VALID_BANK) 374 p += sprintf(p, "bank:%d ", mem_err->bank); 375 if (mem_err->validation_bits & CPER_MEM_VALID_BANK_GROUP) 376 p += sprintf(p, "bank_group:%d ", 377 mem_err->bank >> CPER_MEM_BANK_GROUP_SHIFT); 378 if (mem_err->validation_bits & CPER_MEM_VALID_BANK_ADDRESS) 379 p += sprintf(p, "bank_address:%d ", 380 mem_err->bank & CPER_MEM_BANK_ADDRESS_MASK); 381 if (mem_err->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) { 382 u32 row = mem_err->row; 383 384 row |= cper_get_mem_extension(mem_err->validation_bits, mem_err->extended); 385 p += sprintf(p, "row:%d ", row); 386 } 387 if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN) 388 p += sprintf(p, "col:%d ", mem_err->column); 389 if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION) 390 p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos); 391 if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) { 392 const char *bank = NULL, *device = NULL; 393 struct dimm_info *dimm; 394 395 dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device); 396 if (bank != NULL && device != NULL) 397 p += sprintf(p, "DIMM location:%s %s ", bank, device); 398 else 399 p += sprintf(p, "DIMM DMI handle: 0x%.4x ", 400 mem_err->mem_dev_handle); 401 402 dimm = find_dimm_by_handle(mci, mem_err->mem_dev_handle); 403 if (dimm) { 404 e->top_layer = dimm->idx; 405 strcpy(e->label, dimm->label); 406 } 407 } 408 if (mem_err->validation_bits & CPER_MEM_VALID_CHIP_ID) 409 p += sprintf(p, "chipID: %d ", 410 mem_err->extended >> CPER_MEM_CHIP_ID_SHIFT); 411 if (p > e->location) 412 *(p - 1) = '\0'; 413 414 if (!*e->label) 415 strcpy(e->label, "unknown memory"); 416 417 /* All other fields are mapped on e->other_detail */ 418 p = pvt->other_detail; 419 p += snprintf(p, sizeof(pvt->other_detail), 420 "APEI location: %s ", e->location); 421 if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) { 422 u64 status = mem_err->error_status; 423 424 p += sprintf(p, "status(0x%016llx): ", (long long)status); 425 switch ((status >> 8) & 0xff) { 426 case 1: 427 p += sprintf(p, "Error detected internal to the component "); 428 break; 429 case 16: 430 p += sprintf(p, "Error detected in the bus "); 431 break; 432 case 4: 433 p += sprintf(p, "Storage error in DRAM memory "); 434 break; 435 case 5: 436 p += sprintf(p, "Storage error in TLB "); 437 break; 438 case 6: 439 p += sprintf(p, "Storage error in cache "); 440 break; 441 case 7: 442 p += sprintf(p, "Error in one or more functional units "); 443 break; 444 case 8: 445 p += sprintf(p, "component failed self test "); 446 break; 447 case 9: 448 p += sprintf(p, "Overflow or undervalue of internal queue "); 449 break; 450 case 17: 451 p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR "); 452 break; 453 case 18: 454 p += sprintf(p, "Improper access error "); 455 break; 456 case 19: 457 p += sprintf(p, "Access to a memory address which is not mapped to any component "); 458 break; 459 case 20: 460 p += sprintf(p, "Loss of Lockstep "); 461 break; 462 case 21: 463 p += sprintf(p, "Response not associated with a request "); 464 break; 465 case 22: 466 p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits "); 467 break; 468 case 23: 469 p += sprintf(p, "Detection of a PATH_ERROR "); 470 break; 471 case 25: 472 p += sprintf(p, "Bus operation timeout "); 473 break; 474 case 26: 475 p += sprintf(p, "A read was issued to data that has been poisoned "); 476 break; 477 default: 478 p += sprintf(p, "reserved "); 479 break; 480 } 481 } 482 if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) 483 p += sprintf(p, "requestorID: 0x%016llx ", 484 (long long)mem_err->requestor_id); 485 if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID) 486 p += sprintf(p, "responderID: 0x%016llx ", 487 (long long)mem_err->responder_id); 488 if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID) 489 p += sprintf(p, "targetID: 0x%016llx ", 490 (long long)mem_err->responder_id); 491 if (p > pvt->other_detail) 492 *(p - 1) = '\0'; 493 494 edac_raw_mc_handle_error(e); 495 496 unlock: 497 spin_unlock_irqrestore(&ghes_lock, flags); 498 } 499 500 /* 501 * Known systems that are safe to enable this module. 502 */ 503 static struct acpi_platform_list plat_list[] = { 504 {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions}, 505 { } /* End */ 506 }; 507 508 int ghes_edac_register(struct ghes *ghes, struct device *dev) 509 { 510 bool fake = false; 511 struct mem_ctl_info *mci; 512 struct ghes_pvt *pvt; 513 struct edac_mc_layer layers[1]; 514 unsigned long flags; 515 int idx = -1; 516 int rc = 0; 517 518 if (IS_ENABLED(CONFIG_X86)) { 519 /* Check if safe to enable on this system */ 520 idx = acpi_match_platform_list(plat_list); 521 if (!force_load && idx < 0) 522 return -ENODEV; 523 } else { 524 force_load = true; 525 idx = 0; 526 } 527 528 /* finish another registration/unregistration instance first */ 529 mutex_lock(&ghes_reg_mutex); 530 531 /* 532 * We have only one logical memory controller to which all DIMMs belong. 533 */ 534 if (refcount_inc_not_zero(&ghes_refcount)) 535 goto unlock; 536 537 ghes_scan_system(); 538 539 /* Check if we've got a bogus BIOS */ 540 if (!ghes_hw.num_dimms) { 541 fake = true; 542 ghes_hw.num_dimms = 1; 543 } 544 545 layers[0].type = EDAC_MC_LAYER_ALL_MEM; 546 layers[0].size = ghes_hw.num_dimms; 547 layers[0].is_virt_csrow = true; 548 549 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_pvt)); 550 if (!mci) { 551 pr_info("Can't allocate memory for EDAC data\n"); 552 rc = -ENOMEM; 553 goto unlock; 554 } 555 556 pvt = mci->pvt_info; 557 pvt->mci = mci; 558 559 mci->pdev = dev; 560 mci->mtype_cap = MEM_FLAG_EMPTY; 561 mci->edac_ctl_cap = EDAC_FLAG_NONE; 562 mci->edac_cap = EDAC_FLAG_NONE; 563 mci->mod_name = "ghes_edac.c"; 564 mci->ctl_name = "ghes_edac"; 565 mci->dev_name = "ghes"; 566 567 if (fake) { 568 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n"); 569 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n"); 570 pr_info("work on such system. Use this driver with caution\n"); 571 } else if (idx < 0) { 572 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n"); 573 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n"); 574 pr_info("So, the end result of using this driver varies from vendor to vendor.\n"); 575 pr_info("If you find incorrect reports, please contact your hardware vendor\n"); 576 pr_info("to correct its BIOS.\n"); 577 pr_info("This system has %d DIMM sockets.\n", ghes_hw.num_dimms); 578 } 579 580 if (!fake) { 581 struct dimm_info *src, *dst; 582 int i = 0; 583 584 mci_for_each_dimm(mci, dst) { 585 src = &ghes_hw.dimms[i]; 586 587 dst->idx = src->idx; 588 dst->smbios_handle = src->smbios_handle; 589 dst->nr_pages = src->nr_pages; 590 dst->mtype = src->mtype; 591 dst->edac_mode = src->edac_mode; 592 dst->dtype = src->dtype; 593 dst->grain = src->grain; 594 595 /* 596 * If no src->label, preserve default label assigned 597 * from EDAC core. 598 */ 599 if (strlen(src->label)) 600 memcpy(dst->label, src->label, sizeof(src->label)); 601 602 i++; 603 } 604 605 } else { 606 struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0); 607 608 dimm->nr_pages = 1; 609 dimm->grain = 128; 610 dimm->mtype = MEM_UNKNOWN; 611 dimm->dtype = DEV_UNKNOWN; 612 dimm->edac_mode = EDAC_SECDED; 613 } 614 615 rc = edac_mc_add_mc(mci); 616 if (rc < 0) { 617 pr_info("Can't register with the EDAC core\n"); 618 edac_mc_free(mci); 619 rc = -ENODEV; 620 goto unlock; 621 } 622 623 spin_lock_irqsave(&ghes_lock, flags); 624 ghes_pvt = pvt; 625 spin_unlock_irqrestore(&ghes_lock, flags); 626 627 /* only set on success */ 628 refcount_set(&ghes_refcount, 1); 629 630 unlock: 631 632 /* Not needed anymore */ 633 kfree(ghes_hw.dimms); 634 ghes_hw.dimms = NULL; 635 636 mutex_unlock(&ghes_reg_mutex); 637 638 return rc; 639 } 640 641 void ghes_edac_unregister(struct ghes *ghes) 642 { 643 struct mem_ctl_info *mci; 644 unsigned long flags; 645 646 if (!force_load) 647 return; 648 649 mutex_lock(&ghes_reg_mutex); 650 651 system_scanned = false; 652 memset(&ghes_hw, 0, sizeof(struct ghes_hw_desc)); 653 654 if (!refcount_dec_and_test(&ghes_refcount)) 655 goto unlock; 656 657 /* 658 * Wait for the irq handler being finished. 659 */ 660 spin_lock_irqsave(&ghes_lock, flags); 661 mci = ghes_pvt ? ghes_pvt->mci : NULL; 662 ghes_pvt = NULL; 663 spin_unlock_irqrestore(&ghes_lock, flags); 664 665 if (!mci) 666 goto unlock; 667 668 mci = edac_mc_del_mc(mci->pdev); 669 if (mci) 670 edac_mc_free(mci); 671 672 unlock: 673 mutex_unlock(&ghes_reg_mutex); 674 } 675