1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * GHES/EDAC Linux driver 4 * 5 * Copyright (c) 2013 by Mauro Carvalho Chehab 6 * 7 * Red Hat Inc. http://www.redhat.com 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <acpi/ghes.h> 13 #include <linux/edac.h> 14 #include <linux/dmi.h> 15 #include "edac_module.h" 16 #include <ras/ras_event.h> 17 18 struct ghes_edac_pvt { 19 struct list_head list; 20 struct ghes *ghes; 21 struct mem_ctl_info *mci; 22 23 /* Buffers for the error handling routine */ 24 char other_detail[400]; 25 char msg[80]; 26 }; 27 28 static refcount_t ghes_refcount = REFCOUNT_INIT(0); 29 30 /* 31 * Access to ghes_pvt must be protected by ghes_lock. The spinlock 32 * also provides the necessary (implicit) memory barrier for the SMP 33 * case to make the pointer visible on another CPU. 34 */ 35 static struct ghes_edac_pvt *ghes_pvt; 36 37 /* GHES registration mutex */ 38 static DEFINE_MUTEX(ghes_reg_mutex); 39 40 /* 41 * Sync with other, potentially concurrent callers of 42 * ghes_edac_report_mem_error(). We don't know what the 43 * "inventive" firmware would do. 44 */ 45 static DEFINE_SPINLOCK(ghes_lock); 46 47 /* "ghes_edac.force_load=1" skips the platform check */ 48 static bool __read_mostly force_load; 49 module_param(force_load, bool, 0); 50 51 /* Memory Device - Type 17 of SMBIOS spec */ 52 struct memdev_dmi_entry { 53 u8 type; 54 u8 length; 55 u16 handle; 56 u16 phys_mem_array_handle; 57 u16 mem_err_info_handle; 58 u16 total_width; 59 u16 data_width; 60 u16 size; 61 u8 form_factor; 62 u8 device_set; 63 u8 device_locator; 64 u8 bank_locator; 65 u8 memory_type; 66 u16 type_detail; 67 u16 speed; 68 u8 manufacturer; 69 u8 serial_number; 70 u8 asset_tag; 71 u8 part_number; 72 u8 attributes; 73 u32 extended_size; 74 u16 conf_mem_clk_speed; 75 } __attribute__((__packed__)); 76 77 struct ghes_edac_dimm_fill { 78 struct mem_ctl_info *mci; 79 unsigned int count; 80 }; 81 82 static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg) 83 { 84 int *num_dimm = arg; 85 86 if (dh->type == DMI_ENTRY_MEM_DEVICE) 87 (*num_dimm)++; 88 } 89 90 static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle) 91 { 92 struct dimm_info *dimm; 93 94 mci_for_each_dimm(mci, dimm) { 95 if (dimm->smbios_handle == handle) 96 return dimm->idx; 97 } 98 99 return -1; 100 } 101 102 static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg) 103 { 104 struct ghes_edac_dimm_fill *dimm_fill = arg; 105 struct mem_ctl_info *mci = dimm_fill->mci; 106 107 if (dh->type == DMI_ENTRY_MEM_DEVICE) { 108 struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh; 109 struct dimm_info *dimm = edac_get_dimm(mci, dimm_fill->count, 0, 0); 110 u16 rdr_mask = BIT(7) | BIT(13); 111 112 if (entry->size == 0xffff) { 113 pr_info("Can't get DIMM%i size\n", 114 dimm_fill->count); 115 dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */ 116 } else if (entry->size == 0x7fff) { 117 dimm->nr_pages = MiB_TO_PAGES(entry->extended_size); 118 } else { 119 if (entry->size & BIT(15)) 120 dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10); 121 else 122 dimm->nr_pages = MiB_TO_PAGES(entry->size); 123 } 124 125 switch (entry->memory_type) { 126 case 0x12: 127 if (entry->type_detail & BIT(13)) 128 dimm->mtype = MEM_RDDR; 129 else 130 dimm->mtype = MEM_DDR; 131 break; 132 case 0x13: 133 if (entry->type_detail & BIT(13)) 134 dimm->mtype = MEM_RDDR2; 135 else 136 dimm->mtype = MEM_DDR2; 137 break; 138 case 0x14: 139 dimm->mtype = MEM_FB_DDR2; 140 break; 141 case 0x18: 142 if (entry->type_detail & BIT(12)) 143 dimm->mtype = MEM_NVDIMM; 144 else if (entry->type_detail & BIT(13)) 145 dimm->mtype = MEM_RDDR3; 146 else 147 dimm->mtype = MEM_DDR3; 148 break; 149 case 0x1a: 150 if (entry->type_detail & BIT(12)) 151 dimm->mtype = MEM_NVDIMM; 152 else if (entry->type_detail & BIT(13)) 153 dimm->mtype = MEM_RDDR4; 154 else 155 dimm->mtype = MEM_DDR4; 156 break; 157 default: 158 if (entry->type_detail & BIT(6)) 159 dimm->mtype = MEM_RMBS; 160 else if ((entry->type_detail & rdr_mask) == rdr_mask) 161 dimm->mtype = MEM_RDR; 162 else if (entry->type_detail & BIT(7)) 163 dimm->mtype = MEM_SDR; 164 else if (entry->type_detail & BIT(9)) 165 dimm->mtype = MEM_EDO; 166 else 167 dimm->mtype = MEM_UNKNOWN; 168 } 169 170 /* 171 * Actually, we can only detect if the memory has bits for 172 * checksum or not 173 */ 174 if (entry->total_width == entry->data_width) 175 dimm->edac_mode = EDAC_NONE; 176 else 177 dimm->edac_mode = EDAC_SECDED; 178 179 dimm->dtype = DEV_UNKNOWN; 180 dimm->grain = 128; /* Likely, worse case */ 181 182 /* 183 * FIXME: It shouldn't be hard to also fill the DIMM labels 184 */ 185 186 if (dimm->nr_pages) { 187 edac_dbg(1, "DIMM%i: %s size = %d MB%s\n", 188 dimm_fill->count, edac_mem_types[dimm->mtype], 189 PAGES_TO_MiB(dimm->nr_pages), 190 (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : ""); 191 edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n", 192 entry->memory_type, entry->type_detail, 193 entry->total_width, entry->data_width); 194 } 195 196 dimm->smbios_handle = entry->handle; 197 198 dimm_fill->count++; 199 } 200 } 201 202 void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) 203 { 204 enum hw_event_mc_err_type type; 205 struct edac_raw_error_desc *e; 206 struct mem_ctl_info *mci; 207 struct ghes_edac_pvt *pvt; 208 unsigned long flags; 209 char *p; 210 211 /* 212 * We can do the locking below because GHES defers error processing 213 * from NMI to IRQ context. Whenever that changes, we'd at least 214 * know. 215 */ 216 if (WARN_ON_ONCE(in_nmi())) 217 return; 218 219 spin_lock_irqsave(&ghes_lock, flags); 220 221 pvt = ghes_pvt; 222 if (!pvt) 223 goto unlock; 224 225 mci = pvt->mci; 226 e = &mci->error_desc; 227 228 /* Cleans the error report buffer */ 229 memset(e, 0, sizeof (*e)); 230 e->error_count = 1; 231 e->grain = 1; 232 strcpy(e->label, "unknown label"); 233 e->msg = pvt->msg; 234 e->other_detail = pvt->other_detail; 235 e->top_layer = -1; 236 e->mid_layer = -1; 237 e->low_layer = -1; 238 *pvt->other_detail = '\0'; 239 *pvt->msg = '\0'; 240 241 switch (sev) { 242 case GHES_SEV_CORRECTED: 243 type = HW_EVENT_ERR_CORRECTED; 244 break; 245 case GHES_SEV_RECOVERABLE: 246 type = HW_EVENT_ERR_UNCORRECTED; 247 break; 248 case GHES_SEV_PANIC: 249 type = HW_EVENT_ERR_FATAL; 250 break; 251 default: 252 case GHES_SEV_NO: 253 type = HW_EVENT_ERR_INFO; 254 } 255 256 edac_dbg(1, "error validation_bits: 0x%08llx\n", 257 (long long)mem_err->validation_bits); 258 259 /* Error type, mapped on e->msg */ 260 if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) { 261 p = pvt->msg; 262 switch (mem_err->error_type) { 263 case 0: 264 p += sprintf(p, "Unknown"); 265 break; 266 case 1: 267 p += sprintf(p, "No error"); 268 break; 269 case 2: 270 p += sprintf(p, "Single-bit ECC"); 271 break; 272 case 3: 273 p += sprintf(p, "Multi-bit ECC"); 274 break; 275 case 4: 276 p += sprintf(p, "Single-symbol ChipKill ECC"); 277 break; 278 case 5: 279 p += sprintf(p, "Multi-symbol ChipKill ECC"); 280 break; 281 case 6: 282 p += sprintf(p, "Master abort"); 283 break; 284 case 7: 285 p += sprintf(p, "Target abort"); 286 break; 287 case 8: 288 p += sprintf(p, "Parity Error"); 289 break; 290 case 9: 291 p += sprintf(p, "Watchdog timeout"); 292 break; 293 case 10: 294 p += sprintf(p, "Invalid address"); 295 break; 296 case 11: 297 p += sprintf(p, "Mirror Broken"); 298 break; 299 case 12: 300 p += sprintf(p, "Memory Sparing"); 301 break; 302 case 13: 303 p += sprintf(p, "Scrub corrected error"); 304 break; 305 case 14: 306 p += sprintf(p, "Scrub uncorrected error"); 307 break; 308 case 15: 309 p += sprintf(p, "Physical Memory Map-out event"); 310 break; 311 default: 312 p += sprintf(p, "reserved error (%d)", 313 mem_err->error_type); 314 } 315 } else { 316 strcpy(pvt->msg, "unknown error"); 317 } 318 319 /* Error address */ 320 if (mem_err->validation_bits & CPER_MEM_VALID_PA) { 321 e->page_frame_number = PHYS_PFN(mem_err->physical_addr); 322 e->offset_in_page = offset_in_page(mem_err->physical_addr); 323 } 324 325 /* Error grain */ 326 if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK) 327 e->grain = ~mem_err->physical_addr_mask + 1; 328 329 /* Memory error location, mapped on e->location */ 330 p = e->location; 331 if (mem_err->validation_bits & CPER_MEM_VALID_NODE) 332 p += sprintf(p, "node:%d ", mem_err->node); 333 if (mem_err->validation_bits & CPER_MEM_VALID_CARD) 334 p += sprintf(p, "card:%d ", mem_err->card); 335 if (mem_err->validation_bits & CPER_MEM_VALID_MODULE) 336 p += sprintf(p, "module:%d ", mem_err->module); 337 if (mem_err->validation_bits & CPER_MEM_VALID_RANK_NUMBER) 338 p += sprintf(p, "rank:%d ", mem_err->rank); 339 if (mem_err->validation_bits & CPER_MEM_VALID_BANK) 340 p += sprintf(p, "bank:%d ", mem_err->bank); 341 if (mem_err->validation_bits & CPER_MEM_VALID_ROW) 342 p += sprintf(p, "row:%d ", mem_err->row); 343 if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN) 344 p += sprintf(p, "col:%d ", mem_err->column); 345 if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION) 346 p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos); 347 if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) { 348 const char *bank = NULL, *device = NULL; 349 int index = -1; 350 351 dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device); 352 if (bank != NULL && device != NULL) 353 p += sprintf(p, "DIMM location:%s %s ", bank, device); 354 else 355 p += sprintf(p, "DIMM DMI handle: 0x%.4x ", 356 mem_err->mem_dev_handle); 357 358 index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle); 359 if (index >= 0) { 360 e->top_layer = index; 361 e->enable_per_layer_report = true; 362 } 363 364 } 365 if (p > e->location) 366 *(p - 1) = '\0'; 367 368 /* All other fields are mapped on e->other_detail */ 369 p = pvt->other_detail; 370 p += snprintf(p, sizeof(pvt->other_detail), 371 "APEI location: %s ", e->location); 372 if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) { 373 u64 status = mem_err->error_status; 374 375 p += sprintf(p, "status(0x%016llx): ", (long long)status); 376 switch ((status >> 8) & 0xff) { 377 case 1: 378 p += sprintf(p, "Error detected internal to the component "); 379 break; 380 case 16: 381 p += sprintf(p, "Error detected in the bus "); 382 break; 383 case 4: 384 p += sprintf(p, "Storage error in DRAM memory "); 385 break; 386 case 5: 387 p += sprintf(p, "Storage error in TLB "); 388 break; 389 case 6: 390 p += sprintf(p, "Storage error in cache "); 391 break; 392 case 7: 393 p += sprintf(p, "Error in one or more functional units "); 394 break; 395 case 8: 396 p += sprintf(p, "component failed self test "); 397 break; 398 case 9: 399 p += sprintf(p, "Overflow or undervalue of internal queue "); 400 break; 401 case 17: 402 p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR "); 403 break; 404 case 18: 405 p += sprintf(p, "Improper access error "); 406 break; 407 case 19: 408 p += sprintf(p, "Access to a memory address which is not mapped to any component "); 409 break; 410 case 20: 411 p += sprintf(p, "Loss of Lockstep "); 412 break; 413 case 21: 414 p += sprintf(p, "Response not associated with a request "); 415 break; 416 case 22: 417 p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits "); 418 break; 419 case 23: 420 p += sprintf(p, "Detection of a PATH_ERROR "); 421 break; 422 case 25: 423 p += sprintf(p, "Bus operation timeout "); 424 break; 425 case 26: 426 p += sprintf(p, "A read was issued to data that has been poisoned "); 427 break; 428 default: 429 p += sprintf(p, "reserved "); 430 break; 431 } 432 } 433 if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) 434 p += sprintf(p, "requestorID: 0x%016llx ", 435 (long long)mem_err->requestor_id); 436 if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID) 437 p += sprintf(p, "responderID: 0x%016llx ", 438 (long long)mem_err->responder_id); 439 if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID) 440 p += sprintf(p, "targetID: 0x%016llx ", 441 (long long)mem_err->responder_id); 442 if (p > pvt->other_detail) 443 *(p - 1) = '\0'; 444 445 edac_raw_mc_handle_error(type, mci, e); 446 447 unlock: 448 spin_unlock_irqrestore(&ghes_lock, flags); 449 } 450 451 /* 452 * Known systems that are safe to enable this module. 453 */ 454 static struct acpi_platform_list plat_list[] = { 455 {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions}, 456 { } /* End */ 457 }; 458 459 int ghes_edac_register(struct ghes *ghes, struct device *dev) 460 { 461 bool fake = false; 462 int rc = 0, num_dimm = 0; 463 struct mem_ctl_info *mci; 464 struct ghes_edac_pvt *pvt; 465 struct edac_mc_layer layers[1]; 466 struct ghes_edac_dimm_fill dimm_fill; 467 unsigned long flags; 468 int idx = -1; 469 470 if (IS_ENABLED(CONFIG_X86)) { 471 /* Check if safe to enable on this system */ 472 idx = acpi_match_platform_list(plat_list); 473 if (!force_load && idx < 0) 474 return -ENODEV; 475 } else { 476 idx = 0; 477 } 478 479 /* finish another registration/unregistration instance first */ 480 mutex_lock(&ghes_reg_mutex); 481 482 /* 483 * We have only one logical memory controller to which all DIMMs belong. 484 */ 485 if (refcount_inc_not_zero(&ghes_refcount)) 486 goto unlock; 487 488 /* Get the number of DIMMs */ 489 dmi_walk(ghes_edac_count_dimms, &num_dimm); 490 491 /* Check if we've got a bogus BIOS */ 492 if (num_dimm == 0) { 493 fake = true; 494 num_dimm = 1; 495 } 496 497 layers[0].type = EDAC_MC_LAYER_ALL_MEM; 498 layers[0].size = num_dimm; 499 layers[0].is_virt_csrow = true; 500 501 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt)); 502 if (!mci) { 503 pr_info("Can't allocate memory for EDAC data\n"); 504 rc = -ENOMEM; 505 goto unlock; 506 } 507 508 pvt = mci->pvt_info; 509 pvt->ghes = ghes; 510 pvt->mci = mci; 511 512 mci->pdev = dev; 513 mci->mtype_cap = MEM_FLAG_EMPTY; 514 mci->edac_ctl_cap = EDAC_FLAG_NONE; 515 mci->edac_cap = EDAC_FLAG_NONE; 516 mci->mod_name = "ghes_edac.c"; 517 mci->ctl_name = "ghes_edac"; 518 mci->dev_name = "ghes"; 519 520 if (fake) { 521 pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n"); 522 pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n"); 523 pr_info("work on such system. Use this driver with caution\n"); 524 } else if (idx < 0) { 525 pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n"); 526 pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n"); 527 pr_info("So, the end result of using this driver varies from vendor to vendor.\n"); 528 pr_info("If you find incorrect reports, please contact your hardware vendor\n"); 529 pr_info("to correct its BIOS.\n"); 530 pr_info("This system has %d DIMM sockets.\n", num_dimm); 531 } 532 533 if (!fake) { 534 dimm_fill.count = 0; 535 dimm_fill.mci = mci; 536 dmi_walk(ghes_edac_dmidecode, &dimm_fill); 537 } else { 538 struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0); 539 540 dimm->nr_pages = 1; 541 dimm->grain = 128; 542 dimm->mtype = MEM_UNKNOWN; 543 dimm->dtype = DEV_UNKNOWN; 544 dimm->edac_mode = EDAC_SECDED; 545 } 546 547 rc = edac_mc_add_mc(mci); 548 if (rc < 0) { 549 pr_info("Can't register at EDAC core\n"); 550 edac_mc_free(mci); 551 rc = -ENODEV; 552 goto unlock; 553 } 554 555 spin_lock_irqsave(&ghes_lock, flags); 556 ghes_pvt = pvt; 557 spin_unlock_irqrestore(&ghes_lock, flags); 558 559 /* only set on success */ 560 refcount_set(&ghes_refcount, 1); 561 562 unlock: 563 mutex_unlock(&ghes_reg_mutex); 564 565 return rc; 566 } 567 568 void ghes_edac_unregister(struct ghes *ghes) 569 { 570 struct mem_ctl_info *mci; 571 unsigned long flags; 572 573 mutex_lock(&ghes_reg_mutex); 574 575 if (!refcount_dec_and_test(&ghes_refcount)) 576 goto unlock; 577 578 /* 579 * Wait for the irq handler being finished. 580 */ 581 spin_lock_irqsave(&ghes_lock, flags); 582 mci = ghes_pvt ? ghes_pvt->mci : NULL; 583 ghes_pvt = NULL; 584 spin_unlock_irqrestore(&ghes_lock, flags); 585 586 if (!mci) 587 goto unlock; 588 589 mci = edac_mc_del_mc(mci->pdev); 590 if (mci) 591 edac_mc_free(mci); 592 593 unlock: 594 mutex_unlock(&ghes_reg_mutex); 595 } 596