1 #include "amd64_edac.h" 2 #include <asm/amd_nb.h> 3 4 static struct edac_pci_ctl_info *pci_ctl; 5 6 static int report_gart_errors; 7 module_param(report_gart_errors, int, 0644); 8 9 /* 10 * Set by command line parameter. If BIOS has enabled the ECC, this override is 11 * cleared to prevent re-enabling the hardware by this driver. 12 */ 13 static int ecc_enable_override; 14 module_param(ecc_enable_override, int, 0644); 15 16 static struct msr __percpu *msrs; 17 18 /* Per-node stuff */ 19 static struct ecc_settings **ecc_stngs; 20 21 /* 22 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing 23 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- 24 * or higher value'. 25 * 26 *FIXME: Produce a better mapping/linearisation. 27 */ 28 static const struct scrubrate { 29 u32 scrubval; /* bit pattern for scrub rate */ 30 u32 bandwidth; /* bandwidth consumed (bytes/sec) */ 31 } scrubrates[] = { 32 { 0x01, 1600000000UL}, 33 { 0x02, 800000000UL}, 34 { 0x03, 400000000UL}, 35 { 0x04, 200000000UL}, 36 { 0x05, 100000000UL}, 37 { 0x06, 50000000UL}, 38 { 0x07, 25000000UL}, 39 { 0x08, 12284069UL}, 40 { 0x09, 6274509UL}, 41 { 0x0A, 3121951UL}, 42 { 0x0B, 1560975UL}, 43 { 0x0C, 781440UL}, 44 { 0x0D, 390720UL}, 45 { 0x0E, 195300UL}, 46 { 0x0F, 97650UL}, 47 { 0x10, 48854UL}, 48 { 0x11, 24427UL}, 49 { 0x12, 12213UL}, 50 { 0x13, 6101UL}, 51 { 0x14, 3051UL}, 52 { 0x15, 1523UL}, 53 { 0x16, 761UL}, 54 { 0x00, 0UL}, /* scrubbing off */ 55 }; 56 57 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, 58 u32 *val, const char *func) 59 { 60 int err = 0; 61 62 err = pci_read_config_dword(pdev, offset, val); 63 if (err) 64 amd64_warn("%s: error reading F%dx%03x.\n", 65 func, PCI_FUNC(pdev->devfn), offset); 66 67 return err; 68 } 69 70 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, 71 u32 val, const char *func) 72 { 73 int err = 0; 74 75 err = pci_write_config_dword(pdev, offset, val); 76 if (err) 77 amd64_warn("%s: error writing to F%dx%03x.\n", 78 func, PCI_FUNC(pdev->devfn), offset); 79 80 return err; 81 } 82 83 /* 84 * Select DCT to which PCI cfg accesses are routed 85 */ 86 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) 87 { 88 u32 reg = 0; 89 90 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); 91 reg &= (pvt->model == 0x30) ? ~3 : ~1; 92 reg |= dct; 93 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); 94 } 95 96 /* 97 * 98 * Depending on the family, F2 DCT reads need special handling: 99 * 100 * K8: has a single DCT only and no address offsets >= 0x100 101 * 102 * F10h: each DCT has its own set of regs 103 * DCT0 -> F2x040.. 104 * DCT1 -> F2x140.. 105 * 106 * F16h: has only 1 DCT 107 * 108 * F15h: we select which DCT we access using F1x10C[DctCfgSel] 109 */ 110 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct, 111 int offset, u32 *val) 112 { 113 switch (pvt->fam) { 114 case 0xf: 115 if (dct || offset >= 0x100) 116 return -EINVAL; 117 break; 118 119 case 0x10: 120 if (dct) { 121 /* 122 * Note: If ganging is enabled, barring the regs 123 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx 124 * return 0. (cf. Section 2.8.1 F10h BKDG) 125 */ 126 if (dct_ganging_enabled(pvt)) 127 return 0; 128 129 offset += 0x100; 130 } 131 break; 132 133 case 0x15: 134 /* 135 * F15h: F2x1xx addresses do not map explicitly to DCT1. 136 * We should select which DCT we access using F1x10C[DctCfgSel] 137 */ 138 dct = (dct && pvt->model == 0x30) ? 3 : dct; 139 f15h_select_dct(pvt, dct); 140 break; 141 142 case 0x16: 143 if (dct) 144 return -EINVAL; 145 break; 146 147 default: 148 break; 149 } 150 return amd64_read_pci_cfg(pvt->F2, offset, val); 151 } 152 153 /* 154 * Memory scrubber control interface. For K8, memory scrubbing is handled by 155 * hardware and can involve L2 cache, dcache as well as the main memory. With 156 * F10, this is extended to L3 cache scrubbing on CPU models sporting that 157 * functionality. 158 * 159 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks 160 * (dram) over to cache lines. This is nasty, so we will use bandwidth in 161 * bytes/sec for the setting. 162 * 163 * Currently, we only do dram scrubbing. If the scrubbing is done in software on 164 * other archs, we might not have access to the caches directly. 165 */ 166 167 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval) 168 { 169 /* 170 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values 171 * are shifted down by 0x5, so scrubval 0x5 is written to the register 172 * as 0x0, scrubval 0x6 as 0x1, etc. 173 */ 174 if (scrubval >= 0x5 && scrubval <= 0x14) { 175 scrubval -= 0x5; 176 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF); 177 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1); 178 } else { 179 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1); 180 } 181 } 182 /* 183 * Scan the scrub rate mapping table for a close or matching bandwidth value to 184 * issue. If requested is too big, then use last maximum value found. 185 */ 186 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate) 187 { 188 u32 scrubval; 189 int i; 190 191 /* 192 * map the configured rate (new_bw) to a value specific to the AMD64 193 * memory controller and apply to register. Search for the first 194 * bandwidth entry that is greater or equal than the setting requested 195 * and program that. If at last entry, turn off DRAM scrubbing. 196 * 197 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely 198 * by falling back to the last element in scrubrates[]. 199 */ 200 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { 201 /* 202 * skip scrub rates which aren't recommended 203 * (see F10 BKDG, F3x58) 204 */ 205 if (scrubrates[i].scrubval < min_rate) 206 continue; 207 208 if (scrubrates[i].bandwidth <= new_bw) 209 break; 210 } 211 212 scrubval = scrubrates[i].scrubval; 213 214 if (pvt->fam == 0x17 || pvt->fam == 0x18) { 215 __f17h_set_scrubval(pvt, scrubval); 216 } else if (pvt->fam == 0x15 && pvt->model == 0x60) { 217 f15h_select_dct(pvt, 0); 218 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F); 219 f15h_select_dct(pvt, 1); 220 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F); 221 } else { 222 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F); 223 } 224 225 if (scrubval) 226 return scrubrates[i].bandwidth; 227 228 return 0; 229 } 230 231 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 232 { 233 struct amd64_pvt *pvt = mci->pvt_info; 234 u32 min_scrubrate = 0x5; 235 236 if (pvt->fam == 0xf) 237 min_scrubrate = 0x0; 238 239 if (pvt->fam == 0x15) { 240 /* Erratum #505 */ 241 if (pvt->model < 0x10) 242 f15h_select_dct(pvt, 0); 243 244 if (pvt->model == 0x60) 245 min_scrubrate = 0x6; 246 } 247 return __set_scrub_rate(pvt, bw, min_scrubrate); 248 } 249 250 static int get_scrub_rate(struct mem_ctl_info *mci) 251 { 252 struct amd64_pvt *pvt = mci->pvt_info; 253 int i, retval = -EINVAL; 254 u32 scrubval = 0; 255 256 switch (pvt->fam) { 257 case 0x15: 258 /* Erratum #505 */ 259 if (pvt->model < 0x10) 260 f15h_select_dct(pvt, 0); 261 262 if (pvt->model == 0x60) 263 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); 264 break; 265 266 case 0x17: 267 case 0x18: 268 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); 269 if (scrubval & BIT(0)) { 270 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); 271 scrubval &= 0xF; 272 scrubval += 0x5; 273 } else { 274 scrubval = 0; 275 } 276 break; 277 278 default: 279 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); 280 break; 281 } 282 283 scrubval = scrubval & 0x001F; 284 285 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 286 if (scrubrates[i].scrubval == scrubval) { 287 retval = scrubrates[i].bandwidth; 288 break; 289 } 290 } 291 return retval; 292 } 293 294 /* 295 * returns true if the SysAddr given by sys_addr matches the 296 * DRAM base/limit associated with node_id 297 */ 298 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid) 299 { 300 u64 addr; 301 302 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be 303 * all ones if the most significant implemented address bit is 1. 304 * Here we discard bits 63-40. See section 3.4.2 of AMD publication 305 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 306 * Application Programming. 307 */ 308 addr = sys_addr & 0x000000ffffffffffull; 309 310 return ((addr >= get_dram_base(pvt, nid)) && 311 (addr <= get_dram_limit(pvt, nid))); 312 } 313 314 /* 315 * Attempt to map a SysAddr to a node. On success, return a pointer to the 316 * mem_ctl_info structure for the node that the SysAddr maps to. 317 * 318 * On failure, return NULL. 319 */ 320 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, 321 u64 sys_addr) 322 { 323 struct amd64_pvt *pvt; 324 u8 node_id; 325 u32 intlv_en, bits; 326 327 /* 328 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section 329 * 3.4.4.2) registers to map the SysAddr to a node ID. 330 */ 331 pvt = mci->pvt_info; 332 333 /* 334 * The value of this field should be the same for all DRAM Base 335 * registers. Therefore we arbitrarily choose to read it from the 336 * register for node 0. 337 */ 338 intlv_en = dram_intlv_en(pvt, 0); 339 340 if (intlv_en == 0) { 341 for (node_id = 0; node_id < DRAM_RANGES; node_id++) { 342 if (base_limit_match(pvt, sys_addr, node_id)) 343 goto found; 344 } 345 goto err_no_match; 346 } 347 348 if (unlikely((intlv_en != 0x01) && 349 (intlv_en != 0x03) && 350 (intlv_en != 0x07))) { 351 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); 352 return NULL; 353 } 354 355 bits = (((u32) sys_addr) >> 12) & intlv_en; 356 357 for (node_id = 0; ; ) { 358 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits) 359 break; /* intlv_sel field matches */ 360 361 if (++node_id >= DRAM_RANGES) 362 goto err_no_match; 363 } 364 365 /* sanity test for sys_addr */ 366 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) { 367 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" 368 "range for node %d with node interleaving enabled.\n", 369 __func__, sys_addr, node_id); 370 return NULL; 371 } 372 373 found: 374 return edac_mc_find((int)node_id); 375 376 err_no_match: 377 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n", 378 (unsigned long)sys_addr); 379 380 return NULL; 381 } 382 383 /* 384 * compute the CS base address of the @csrow on the DRAM controller @dct. 385 * For details see F2x[5C:40] in the processor's BKDG 386 */ 387 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, 388 u64 *base, u64 *mask) 389 { 390 u64 csbase, csmask, base_bits, mask_bits; 391 u8 addr_shift; 392 393 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { 394 csbase = pvt->csels[dct].csbases[csrow]; 395 csmask = pvt->csels[dct].csmasks[csrow]; 396 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9); 397 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9); 398 addr_shift = 4; 399 400 /* 401 * F16h and F15h, models 30h and later need two addr_shift values: 402 * 8 for high and 6 for low (cf. F16h BKDG). 403 */ 404 } else if (pvt->fam == 0x16 || 405 (pvt->fam == 0x15 && pvt->model >= 0x30)) { 406 csbase = pvt->csels[dct].csbases[csrow]; 407 csmask = pvt->csels[dct].csmasks[csrow >> 1]; 408 409 *base = (csbase & GENMASK_ULL(15, 5)) << 6; 410 *base |= (csbase & GENMASK_ULL(30, 19)) << 8; 411 412 *mask = ~0ULL; 413 /* poke holes for the csmask */ 414 *mask &= ~((GENMASK_ULL(15, 5) << 6) | 415 (GENMASK_ULL(30, 19) << 8)); 416 417 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6; 418 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8; 419 420 return; 421 } else { 422 csbase = pvt->csels[dct].csbases[csrow]; 423 csmask = pvt->csels[dct].csmasks[csrow >> 1]; 424 addr_shift = 8; 425 426 if (pvt->fam == 0x15) 427 base_bits = mask_bits = 428 GENMASK_ULL(30,19) | GENMASK_ULL(13,5); 429 else 430 base_bits = mask_bits = 431 GENMASK_ULL(28,19) | GENMASK_ULL(13,5); 432 } 433 434 *base = (csbase & base_bits) << addr_shift; 435 436 *mask = ~0ULL; 437 /* poke holes for the csmask */ 438 *mask &= ~(mask_bits << addr_shift); 439 /* OR them in */ 440 *mask |= (csmask & mask_bits) << addr_shift; 441 } 442 443 #define for_each_chip_select(i, dct, pvt) \ 444 for (i = 0; i < pvt->csels[dct].b_cnt; i++) 445 446 #define chip_select_base(i, dct, pvt) \ 447 pvt->csels[dct].csbases[i] 448 449 #define for_each_chip_select_mask(i, dct, pvt) \ 450 for (i = 0; i < pvt->csels[dct].m_cnt; i++) 451 452 /* 453 * @input_addr is an InputAddr associated with the node given by mci. Return the 454 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). 455 */ 456 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) 457 { 458 struct amd64_pvt *pvt; 459 int csrow; 460 u64 base, mask; 461 462 pvt = mci->pvt_info; 463 464 for_each_chip_select(csrow, 0, pvt) { 465 if (!csrow_enabled(csrow, 0, pvt)) 466 continue; 467 468 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); 469 470 mask = ~mask; 471 472 if ((input_addr & mask) == (base & mask)) { 473 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n", 474 (unsigned long)input_addr, csrow, 475 pvt->mc_node_id); 476 477 return csrow; 478 } 479 } 480 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n", 481 (unsigned long)input_addr, pvt->mc_node_id); 482 483 return -1; 484 } 485 486 /* 487 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) 488 * for the node represented by mci. Info is passed back in *hole_base, 489 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if 490 * info is invalid. Info may be invalid for either of the following reasons: 491 * 492 * - The revision of the node is not E or greater. In this case, the DRAM Hole 493 * Address Register does not exist. 494 * 495 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, 496 * indicating that its contents are not valid. 497 * 498 * The values passed back in *hole_base, *hole_offset, and *hole_size are 499 * complete 32-bit values despite the fact that the bitfields in the DHAR 500 * only represent bits 31-24 of the base and offset values. 501 */ 502 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 503 u64 *hole_offset, u64 *hole_size) 504 { 505 struct amd64_pvt *pvt = mci->pvt_info; 506 507 /* only revE and later have the DRAM Hole Address Register */ 508 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) { 509 edac_dbg(1, " revision %d for node %d does not support DHAR\n", 510 pvt->ext_model, pvt->mc_node_id); 511 return 1; 512 } 513 514 /* valid for Fam10h and above */ 515 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) { 516 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n"); 517 return 1; 518 } 519 520 if (!dhar_valid(pvt)) { 521 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n", 522 pvt->mc_node_id); 523 return 1; 524 } 525 526 /* This node has Memory Hoisting */ 527 528 /* +------------------+--------------------+--------------------+----- 529 * | memory | DRAM hole | relocated | 530 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | 531 * | | | DRAM hole | 532 * | | | [0x100000000, | 533 * | | | (0x100000000+ | 534 * | | | (0xffffffff-x))] | 535 * +------------------+--------------------+--------------------+----- 536 * 537 * Above is a diagram of physical memory showing the DRAM hole and the 538 * relocated addresses from the DRAM hole. As shown, the DRAM hole 539 * starts at address x (the base address) and extends through address 540 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the 541 * addresses in the hole so that they start at 0x100000000. 542 */ 543 544 *hole_base = dhar_base(pvt); 545 *hole_size = (1ULL << 32) - *hole_base; 546 547 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt) 548 : k8_dhar_offset(pvt); 549 550 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", 551 pvt->mc_node_id, (unsigned long)*hole_base, 552 (unsigned long)*hole_offset, (unsigned long)*hole_size); 553 554 return 0; 555 } 556 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); 557 558 /* 559 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is 560 * assumed that sys_addr maps to the node given by mci. 561 * 562 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section 563 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a 564 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, 565 * then it is also involved in translating a SysAddr to a DramAddr. Sections 566 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. 567 * These parts of the documentation are unclear. I interpret them as follows: 568 * 569 * When node n receives a SysAddr, it processes the SysAddr as follows: 570 * 571 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM 572 * Limit registers for node n. If the SysAddr is not within the range 573 * specified by the base and limit values, then node n ignores the Sysaddr 574 * (since it does not map to node n). Otherwise continue to step 2 below. 575 * 576 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is 577 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within 578 * the range of relocated addresses (starting at 0x100000000) from the DRAM 579 * hole. If not, skip to step 3 below. Else get the value of the 580 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the 581 * offset defined by this value from the SysAddr. 582 * 583 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM 584 * Base register for node n. To obtain the DramAddr, subtract the base 585 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). 586 */ 587 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) 588 { 589 struct amd64_pvt *pvt = mci->pvt_info; 590 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; 591 int ret; 592 593 dram_base = get_dram_base(pvt, pvt->mc_node_id); 594 595 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 596 &hole_size); 597 if (!ret) { 598 if ((sys_addr >= (1ULL << 32)) && 599 (sys_addr < ((1ULL << 32) + hole_size))) { 600 /* use DHAR to translate SysAddr to DramAddr */ 601 dram_addr = sys_addr - hole_offset; 602 603 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n", 604 (unsigned long)sys_addr, 605 (unsigned long)dram_addr); 606 607 return dram_addr; 608 } 609 } 610 611 /* 612 * Translate the SysAddr to a DramAddr as shown near the start of 613 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 614 * only deals with 40-bit values. Therefore we discard bits 63-40 of 615 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we 616 * discard are all 1s. Otherwise the bits we discard are all 0s. See 617 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture 618 * Programmer's Manual Volume 1 Application Programming. 619 */ 620 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base; 621 622 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n", 623 (unsigned long)sys_addr, (unsigned long)dram_addr); 624 return dram_addr; 625 } 626 627 /* 628 * @intlv_en is the value of the IntlvEn field from a DRAM Base register 629 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used 630 * for node interleaving. 631 */ 632 static int num_node_interleave_bits(unsigned intlv_en) 633 { 634 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; 635 int n; 636 637 BUG_ON(intlv_en > 7); 638 n = intlv_shift_table[intlv_en]; 639 return n; 640 } 641 642 /* Translate the DramAddr given by @dram_addr to an InputAddr. */ 643 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) 644 { 645 struct amd64_pvt *pvt; 646 int intlv_shift; 647 u64 input_addr; 648 649 pvt = mci->pvt_info; 650 651 /* 652 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 653 * concerning translating a DramAddr to an InputAddr. 654 */ 655 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); 656 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) + 657 (dram_addr & 0xfff); 658 659 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", 660 intlv_shift, (unsigned long)dram_addr, 661 (unsigned long)input_addr); 662 663 return input_addr; 664 } 665 666 /* 667 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is 668 * assumed that @sys_addr maps to the node given by mci. 669 */ 670 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) 671 { 672 u64 input_addr; 673 674 input_addr = 675 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); 676 677 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n", 678 (unsigned long)sys_addr, (unsigned long)input_addr); 679 680 return input_addr; 681 } 682 683 /* Map the Error address to a PAGE and PAGE OFFSET. */ 684 static inline void error_address_to_page_and_offset(u64 error_address, 685 struct err_info *err) 686 { 687 err->page = (u32) (error_address >> PAGE_SHIFT); 688 err->offset = ((u32) error_address) & ~PAGE_MASK; 689 } 690 691 /* 692 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address 693 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers 694 * of a node that detected an ECC memory error. mci represents the node that 695 * the error address maps to (possibly different from the node that detected 696 * the error). Return the number of the csrow that sys_addr maps to, or -1 on 697 * error. 698 */ 699 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) 700 { 701 int csrow; 702 703 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); 704 705 if (csrow == -1) 706 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " 707 "address 0x%lx\n", (unsigned long)sys_addr); 708 return csrow; 709 } 710 711 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); 712 713 /* 714 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 715 * are ECC capable. 716 */ 717 static unsigned long determine_edac_cap(struct amd64_pvt *pvt) 718 { 719 unsigned long edac_cap = EDAC_FLAG_NONE; 720 u8 bit; 721 722 if (pvt->umc) { 723 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0; 724 725 for (i = 0; i < NUM_UMCS; i++) { 726 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT)) 727 continue; 728 729 umc_en_mask |= BIT(i); 730 731 /* UMC Configuration bit 12 (DimmEccEn) */ 732 if (pvt->umc[i].umc_cfg & BIT(12)) 733 dimm_ecc_en_mask |= BIT(i); 734 } 735 736 if (umc_en_mask == dimm_ecc_en_mask) 737 edac_cap = EDAC_FLAG_SECDED; 738 } else { 739 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F) 740 ? 19 741 : 17; 742 743 if (pvt->dclr0 & BIT(bit)) 744 edac_cap = EDAC_FLAG_SECDED; 745 } 746 747 return edac_cap; 748 } 749 750 static void debug_display_dimm_sizes(struct amd64_pvt *, u8); 751 752 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) 753 { 754 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 755 756 if (pvt->dram_type == MEM_LRDDR3) { 757 u32 dcsm = pvt->csels[chan].csmasks[0]; 758 /* 759 * It's assumed all LRDIMMs in a DCT are going to be of 760 * same 'type' until proven otherwise. So, use a cs 761 * value of '0' here to get dcsm value. 762 */ 763 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3)); 764 } 765 766 edac_dbg(1, "All DIMMs support ECC:%s\n", 767 (dclr & BIT(19)) ? "yes" : "no"); 768 769 770 edac_dbg(1, " PAR/ERR parity: %s\n", 771 (dclr & BIT(8)) ? "enabled" : "disabled"); 772 773 if (pvt->fam == 0x10) 774 edac_dbg(1, " DCT 128bit mode width: %s\n", 775 (dclr & BIT(11)) ? "128b" : "64b"); 776 777 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", 778 (dclr & BIT(12)) ? "yes" : "no", 779 (dclr & BIT(13)) ? "yes" : "no", 780 (dclr & BIT(14)) ? "yes" : "no", 781 (dclr & BIT(15)) ? "yes" : "no"); 782 } 783 784 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) 785 { 786 int dimm, size0, size1, cs0, cs1; 787 788 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); 789 790 for (dimm = 0; dimm < 4; dimm++) { 791 size0 = 0; 792 cs0 = dimm * 2; 793 794 if (csrow_enabled(cs0, ctrl, pvt)) 795 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0); 796 797 size1 = 0; 798 cs1 = dimm * 2 + 1; 799 800 if (csrow_enabled(cs1, ctrl, pvt)) 801 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1); 802 803 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 804 cs0, size0, 805 cs1, size1); 806 } 807 } 808 809 static void __dump_misc_regs_df(struct amd64_pvt *pvt) 810 { 811 struct amd64_umc *umc; 812 u32 i, tmp, umc_base; 813 814 for (i = 0; i < NUM_UMCS; i++) { 815 umc_base = get_umc_base(i); 816 umc = &pvt->umc[i]; 817 818 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); 819 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg); 820 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl); 821 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl); 822 823 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp); 824 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp); 825 826 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp); 827 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp); 828 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi); 829 830 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n", 831 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no", 832 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no"); 833 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n", 834 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no"); 835 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n", 836 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no"); 837 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n", 838 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); 839 840 if (pvt->dram_type == MEM_LRDDR4) { 841 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp); 842 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n", 843 i, 1 << ((tmp >> 4) & 0x3)); 844 } 845 846 debug_display_dimm_sizes_df(pvt, i); 847 } 848 849 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n", 850 pvt->dhar, dhar_base(pvt)); 851 } 852 853 /* Display and decode various NB registers for debug purposes. */ 854 static void __dump_misc_regs(struct amd64_pvt *pvt) 855 { 856 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 857 858 edac_dbg(1, " NB two channel DRAM capable: %s\n", 859 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); 860 861 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n", 862 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", 863 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); 864 865 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0); 866 867 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 868 869 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n", 870 pvt->dhar, dhar_base(pvt), 871 (pvt->fam == 0xf) ? k8_dhar_offset(pvt) 872 : f10_dhar_offset(pvt)); 873 874 debug_display_dimm_sizes(pvt, 0); 875 876 /* everything below this point is Fam10h and above */ 877 if (pvt->fam == 0xf) 878 return; 879 880 debug_display_dimm_sizes(pvt, 1); 881 882 /* Only if NOT ganged does dclr1 have valid info */ 883 if (!dct_ganging_enabled(pvt)) 884 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1); 885 } 886 887 /* Display and decode various NB registers for debug purposes. */ 888 static void dump_misc_regs(struct amd64_pvt *pvt) 889 { 890 if (pvt->umc) 891 __dump_misc_regs_df(pvt); 892 else 893 __dump_misc_regs(pvt); 894 895 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); 896 897 amd64_info("using %s syndromes.\n", 898 ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); 899 } 900 901 /* 902 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] 903 */ 904 static void prep_chip_selects(struct amd64_pvt *pvt) 905 { 906 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { 907 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; 908 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; 909 } else if (pvt->fam == 0x15 && pvt->model == 0x30) { 910 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; 911 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; 912 } else { 913 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; 914 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; 915 } 916 } 917 918 /* 919 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers 920 */ 921 static void read_dct_base_mask(struct amd64_pvt *pvt) 922 { 923 int base_reg0, base_reg1, mask_reg0, mask_reg1, cs; 924 925 prep_chip_selects(pvt); 926 927 if (pvt->umc) { 928 base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR; 929 base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR; 930 mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK; 931 mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK; 932 } else { 933 base_reg0 = DCSB0; 934 base_reg1 = DCSB1; 935 mask_reg0 = DCSM0; 936 mask_reg1 = DCSM1; 937 } 938 939 for_each_chip_select(cs, 0, pvt) { 940 int reg0 = base_reg0 + (cs * 4); 941 int reg1 = base_reg1 + (cs * 4); 942 u32 *base0 = &pvt->csels[0].csbases[cs]; 943 u32 *base1 = &pvt->csels[1].csbases[cs]; 944 945 if (pvt->umc) { 946 if (!amd_smn_read(pvt->mc_node_id, reg0, base0)) 947 edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n", 948 cs, *base0, reg0); 949 950 if (!amd_smn_read(pvt->mc_node_id, reg1, base1)) 951 edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n", 952 cs, *base1, reg1); 953 } else { 954 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0)) 955 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", 956 cs, *base0, reg0); 957 958 if (pvt->fam == 0xf) 959 continue; 960 961 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1)) 962 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", 963 cs, *base1, (pvt->fam == 0x10) ? reg1 964 : reg0); 965 } 966 } 967 968 for_each_chip_select_mask(cs, 0, pvt) { 969 int reg0 = mask_reg0 + (cs * 4); 970 int reg1 = mask_reg1 + (cs * 4); 971 u32 *mask0 = &pvt->csels[0].csmasks[cs]; 972 u32 *mask1 = &pvt->csels[1].csmasks[cs]; 973 974 if (pvt->umc) { 975 if (!amd_smn_read(pvt->mc_node_id, reg0, mask0)) 976 edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n", 977 cs, *mask0, reg0); 978 979 if (!amd_smn_read(pvt->mc_node_id, reg1, mask1)) 980 edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n", 981 cs, *mask1, reg1); 982 } else { 983 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0)) 984 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", 985 cs, *mask0, reg0); 986 987 if (pvt->fam == 0xf) 988 continue; 989 990 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1)) 991 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", 992 cs, *mask1, (pvt->fam == 0x10) ? reg1 993 : reg0); 994 } 995 } 996 } 997 998 static void determine_memory_type(struct amd64_pvt *pvt) 999 { 1000 u32 dram_ctrl, dcsm; 1001 1002 switch (pvt->fam) { 1003 case 0xf: 1004 if (pvt->ext_model >= K8_REV_F) 1005 goto ddr3; 1006 1007 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; 1008 return; 1009 1010 case 0x10: 1011 if (pvt->dchr0 & DDR3_MODE) 1012 goto ddr3; 1013 1014 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 1015 return; 1016 1017 case 0x15: 1018 if (pvt->model < 0x60) 1019 goto ddr3; 1020 1021 /* 1022 * Model 0x60h needs special handling: 1023 * 1024 * We use a Chip Select value of '0' to obtain dcsm. 1025 * Theoretically, it is possible to populate LRDIMMs of different 1026 * 'Rank' value on a DCT. But this is not the common case. So, 1027 * it's reasonable to assume all DIMMs are going to be of same 1028 * 'type' until proven otherwise. 1029 */ 1030 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl); 1031 dcsm = pvt->csels[0].csmasks[0]; 1032 1033 if (((dram_ctrl >> 8) & 0x7) == 0x2) 1034 pvt->dram_type = MEM_DDR4; 1035 else if (pvt->dclr0 & BIT(16)) 1036 pvt->dram_type = MEM_DDR3; 1037 else if (dcsm & 0x3) 1038 pvt->dram_type = MEM_LRDDR3; 1039 else 1040 pvt->dram_type = MEM_RDDR3; 1041 1042 return; 1043 1044 case 0x16: 1045 goto ddr3; 1046 1047 case 0x17: 1048 case 0x18: 1049 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) 1050 pvt->dram_type = MEM_LRDDR4; 1051 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) 1052 pvt->dram_type = MEM_RDDR4; 1053 else 1054 pvt->dram_type = MEM_DDR4; 1055 return; 1056 1057 default: 1058 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); 1059 pvt->dram_type = MEM_EMPTY; 1060 } 1061 return; 1062 1063 ddr3: 1064 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 1065 } 1066 1067 /* Get the number of DCT channels the memory controller is using. */ 1068 static int k8_early_channel_count(struct amd64_pvt *pvt) 1069 { 1070 int flag; 1071 1072 if (pvt->ext_model >= K8_REV_F) 1073 /* RevF (NPT) and later */ 1074 flag = pvt->dclr0 & WIDTH_128; 1075 else 1076 /* RevE and earlier */ 1077 flag = pvt->dclr0 & REVE_WIDTH_128; 1078 1079 /* not used */ 1080 pvt->dclr1 = 0; 1081 1082 return (flag) ? 2 : 1; 1083 } 1084 1085 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ 1086 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m) 1087 { 1088 u16 mce_nid = amd_get_nb_id(m->extcpu); 1089 struct mem_ctl_info *mci; 1090 u8 start_bit = 1; 1091 u8 end_bit = 47; 1092 u64 addr; 1093 1094 mci = edac_mc_find(mce_nid); 1095 if (!mci) 1096 return 0; 1097 1098 pvt = mci->pvt_info; 1099 1100 if (pvt->fam == 0xf) { 1101 start_bit = 3; 1102 end_bit = 39; 1103 } 1104 1105 addr = m->addr & GENMASK_ULL(end_bit, start_bit); 1106 1107 /* 1108 * Erratum 637 workaround 1109 */ 1110 if (pvt->fam == 0x15) { 1111 u64 cc6_base, tmp_addr; 1112 u32 tmp; 1113 u8 intlv_en; 1114 1115 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7) 1116 return addr; 1117 1118 1119 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); 1120 intlv_en = tmp >> 21 & 0x7; 1121 1122 /* add [47:27] + 3 trailing bits */ 1123 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3; 1124 1125 /* reverse and add DramIntlvEn */ 1126 cc6_base |= intlv_en ^ 0x7; 1127 1128 /* pin at [47:24] */ 1129 cc6_base <<= 24; 1130 1131 if (!intlv_en) 1132 return cc6_base | (addr & GENMASK_ULL(23, 0)); 1133 1134 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); 1135 1136 /* faster log2 */ 1137 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1); 1138 1139 /* OR DramIntlvSel into bits [14:12] */ 1140 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9; 1141 1142 /* add remaining [11:0] bits from original MC4_ADDR */ 1143 tmp_addr |= addr & GENMASK_ULL(11, 0); 1144 1145 return cc6_base | tmp_addr; 1146 } 1147 1148 return addr; 1149 } 1150 1151 static struct pci_dev *pci_get_related_function(unsigned int vendor, 1152 unsigned int device, 1153 struct pci_dev *related) 1154 { 1155 struct pci_dev *dev = NULL; 1156 1157 while ((dev = pci_get_device(vendor, device, dev))) { 1158 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) && 1159 (dev->bus->number == related->bus->number) && 1160 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) 1161 break; 1162 } 1163 1164 return dev; 1165 } 1166 1167 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) 1168 { 1169 struct amd_northbridge *nb; 1170 struct pci_dev *f1 = NULL; 1171 unsigned int pci_func; 1172 int off = range << 3; 1173 u32 llim; 1174 1175 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); 1176 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); 1177 1178 if (pvt->fam == 0xf) 1179 return; 1180 1181 if (!dram_rw(pvt, range)) 1182 return; 1183 1184 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); 1185 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); 1186 1187 /* F15h: factor in CC6 save area by reading dst node's limit reg */ 1188 if (pvt->fam != 0x15) 1189 return; 1190 1191 nb = node_to_amd_nb(dram_dst_node(pvt, range)); 1192 if (WARN_ON(!nb)) 1193 return; 1194 1195 if (pvt->model == 0x60) 1196 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1; 1197 else if (pvt->model == 0x30) 1198 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1; 1199 else 1200 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1; 1201 1202 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc); 1203 if (WARN_ON(!f1)) 1204 return; 1205 1206 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); 1207 1208 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0); 1209 1210 /* {[39:27],111b} */ 1211 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; 1212 1213 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0); 1214 1215 /* [47:40] */ 1216 pvt->ranges[range].lim.hi |= llim >> 13; 1217 1218 pci_dev_put(f1); 1219 } 1220 1221 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, 1222 struct err_info *err) 1223 { 1224 struct amd64_pvt *pvt = mci->pvt_info; 1225 1226 error_address_to_page_and_offset(sys_addr, err); 1227 1228 /* 1229 * Find out which node the error address belongs to. This may be 1230 * different from the node that detected the error. 1231 */ 1232 err->src_mci = find_mc_by_sys_addr(mci, sys_addr); 1233 if (!err->src_mci) { 1234 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", 1235 (unsigned long)sys_addr); 1236 err->err_code = ERR_NODE; 1237 return; 1238 } 1239 1240 /* Now map the sys_addr to a CSROW */ 1241 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr); 1242 if (err->csrow < 0) { 1243 err->err_code = ERR_CSROW; 1244 return; 1245 } 1246 1247 /* CHIPKILL enabled */ 1248 if (pvt->nbcfg & NBCFG_CHIPKILL) { 1249 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); 1250 if (err->channel < 0) { 1251 /* 1252 * Syndrome didn't map, so we don't know which of the 1253 * 2 DIMMs is in error. So we need to ID 'both' of them 1254 * as suspect. 1255 */ 1256 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - " 1257 "possible error reporting race\n", 1258 err->syndrome); 1259 err->err_code = ERR_CHANNEL; 1260 return; 1261 } 1262 } else { 1263 /* 1264 * non-chipkill ecc mode 1265 * 1266 * The k8 documentation is unclear about how to determine the 1267 * channel number when using non-chipkill memory. This method 1268 * was obtained from email communication with someone at AMD. 1269 * (Wish the email was placed in this comment - norsk) 1270 */ 1271 err->channel = ((sys_addr & BIT(3)) != 0); 1272 } 1273 } 1274 1275 static int ddr2_cs_size(unsigned i, bool dct_width) 1276 { 1277 unsigned shift = 0; 1278 1279 if (i <= 2) 1280 shift = i; 1281 else if (!(i & 0x1)) 1282 shift = i >> 1; 1283 else 1284 shift = (i + 1) >> 1; 1285 1286 return 128 << (shift + !!dct_width); 1287 } 1288 1289 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1290 unsigned cs_mode, int cs_mask_nr) 1291 { 1292 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1293 1294 if (pvt->ext_model >= K8_REV_F) { 1295 WARN_ON(cs_mode > 11); 1296 return ddr2_cs_size(cs_mode, dclr & WIDTH_128); 1297 } 1298 else if (pvt->ext_model >= K8_REV_D) { 1299 unsigned diff; 1300 WARN_ON(cs_mode > 10); 1301 1302 /* 1303 * the below calculation, besides trying to win an obfuscated C 1304 * contest, maps cs_mode values to DIMM chip select sizes. The 1305 * mappings are: 1306 * 1307 * cs_mode CS size (mb) 1308 * ======= ============ 1309 * 0 32 1310 * 1 64 1311 * 2 128 1312 * 3 128 1313 * 4 256 1314 * 5 512 1315 * 6 256 1316 * 7 512 1317 * 8 1024 1318 * 9 1024 1319 * 10 2048 1320 * 1321 * Basically, it calculates a value with which to shift the 1322 * smallest CS size of 32MB. 1323 * 1324 * ddr[23]_cs_size have a similar purpose. 1325 */ 1326 diff = cs_mode/3 + (unsigned)(cs_mode > 5); 1327 1328 return 32 << (cs_mode - diff); 1329 } 1330 else { 1331 WARN_ON(cs_mode > 6); 1332 return 32 << cs_mode; 1333 } 1334 } 1335 1336 /* 1337 * Get the number of DCT channels in use. 1338 * 1339 * Return: 1340 * number of Memory Channels in operation 1341 * Pass back: 1342 * contents of the DCL0_LOW register 1343 */ 1344 static int f1x_early_channel_count(struct amd64_pvt *pvt) 1345 { 1346 int i, j, channels = 0; 1347 1348 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ 1349 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128)) 1350 return 2; 1351 1352 /* 1353 * Need to check if in unganged mode: In such, there are 2 channels, 1354 * but they are not in 128 bit mode and thus the above 'dclr0' status 1355 * bit will be OFF. 1356 * 1357 * Need to check DCT0[0] and DCT1[0] to see if only one of them has 1358 * their CSEnable bit on. If so, then SINGLE DIMM case. 1359 */ 1360 edac_dbg(0, "Data width is not 128 bits - need more decoding\n"); 1361 1362 /* 1363 * Check DRAM Bank Address Mapping values for each DIMM to see if there 1364 * is more than just one DIMM present in unganged mode. Need to check 1365 * both controllers since DIMMs can be placed in either one. 1366 */ 1367 for (i = 0; i < 2; i++) { 1368 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); 1369 1370 for (j = 0; j < 4; j++) { 1371 if (DBAM_DIMM(j, dbam) > 0) { 1372 channels++; 1373 break; 1374 } 1375 } 1376 } 1377 1378 if (channels > 2) 1379 channels = 2; 1380 1381 amd64_info("MCT channel count: %d\n", channels); 1382 1383 return channels; 1384 } 1385 1386 static int f17_early_channel_count(struct amd64_pvt *pvt) 1387 { 1388 int i, channels = 0; 1389 1390 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */ 1391 for (i = 0; i < NUM_UMCS; i++) 1392 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT); 1393 1394 amd64_info("MCT channel count: %d\n", channels); 1395 1396 return channels; 1397 } 1398 1399 static int ddr3_cs_size(unsigned i, bool dct_width) 1400 { 1401 unsigned shift = 0; 1402 int cs_size = 0; 1403 1404 if (i == 0 || i == 3 || i == 4) 1405 cs_size = -1; 1406 else if (i <= 2) 1407 shift = i; 1408 else if (i == 12) 1409 shift = 7; 1410 else if (!(i & 0x1)) 1411 shift = i >> 1; 1412 else 1413 shift = (i + 1) >> 1; 1414 1415 if (cs_size != -1) 1416 cs_size = (128 * (1 << !!dct_width)) << shift; 1417 1418 return cs_size; 1419 } 1420 1421 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply) 1422 { 1423 unsigned shift = 0; 1424 int cs_size = 0; 1425 1426 if (i < 4 || i == 6) 1427 cs_size = -1; 1428 else if (i == 12) 1429 shift = 7; 1430 else if (!(i & 0x1)) 1431 shift = i >> 1; 1432 else 1433 shift = (i + 1) >> 1; 1434 1435 if (cs_size != -1) 1436 cs_size = rank_multiply * (128 << shift); 1437 1438 return cs_size; 1439 } 1440 1441 static int ddr4_cs_size(unsigned i) 1442 { 1443 int cs_size = 0; 1444 1445 if (i == 0) 1446 cs_size = -1; 1447 else if (i == 1) 1448 cs_size = 1024; 1449 else 1450 /* Min cs_size = 1G */ 1451 cs_size = 1024 * (1 << (i >> 1)); 1452 1453 return cs_size; 1454 } 1455 1456 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1457 unsigned cs_mode, int cs_mask_nr) 1458 { 1459 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; 1460 1461 WARN_ON(cs_mode > 11); 1462 1463 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) 1464 return ddr3_cs_size(cs_mode, dclr & WIDTH_128); 1465 else 1466 return ddr2_cs_size(cs_mode, dclr & WIDTH_128); 1467 } 1468 1469 /* 1470 * F15h supports only 64bit DCT interfaces 1471 */ 1472 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1473 unsigned cs_mode, int cs_mask_nr) 1474 { 1475 WARN_ON(cs_mode > 12); 1476 1477 return ddr3_cs_size(cs_mode, false); 1478 } 1479 1480 /* F15h M60h supports DDR4 mapping as well.. */ 1481 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1482 unsigned cs_mode, int cs_mask_nr) 1483 { 1484 int cs_size; 1485 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr]; 1486 1487 WARN_ON(cs_mode > 12); 1488 1489 if (pvt->dram_type == MEM_DDR4) { 1490 if (cs_mode > 9) 1491 return -1; 1492 1493 cs_size = ddr4_cs_size(cs_mode); 1494 } else if (pvt->dram_type == MEM_LRDDR3) { 1495 unsigned rank_multiply = dcsm & 0xf; 1496 1497 if (rank_multiply == 3) 1498 rank_multiply = 4; 1499 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply); 1500 } else { 1501 /* Minimum cs size is 512mb for F15hM60h*/ 1502 if (cs_mode == 0x1) 1503 return -1; 1504 1505 cs_size = ddr3_cs_size(cs_mode, false); 1506 } 1507 1508 return cs_size; 1509 } 1510 1511 /* 1512 * F16h and F15h model 30h have only limited cs_modes. 1513 */ 1514 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, 1515 unsigned cs_mode, int cs_mask_nr) 1516 { 1517 WARN_ON(cs_mode > 12); 1518 1519 if (cs_mode == 6 || cs_mode == 8 || 1520 cs_mode == 9 || cs_mode == 12) 1521 return -1; 1522 else 1523 return ddr3_cs_size(cs_mode, false); 1524 } 1525 1526 static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc, 1527 unsigned int cs_mode, int csrow_nr) 1528 { 1529 u32 base_addr = pvt->csels[umc].csbases[csrow_nr]; 1530 1531 /* Each mask is used for every two base addresses. */ 1532 u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1]; 1533 1534 /* Register [31:1] = Address [39:9]. Size is in kBs here. */ 1535 u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1; 1536 1537 edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask); 1538 1539 /* Return size in MBs. */ 1540 return size >> 10; 1541 } 1542 1543 static void read_dram_ctl_register(struct amd64_pvt *pvt) 1544 { 1545 1546 if (pvt->fam == 0xf) 1547 return; 1548 1549 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) { 1550 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", 1551 pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); 1552 1553 edac_dbg(0, " DCTs operate in %s mode\n", 1554 (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); 1555 1556 if (!dct_ganging_enabled(pvt)) 1557 edac_dbg(0, " Address range split per DCT: %s\n", 1558 (dct_high_range_enabled(pvt) ? "yes" : "no")); 1559 1560 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n", 1561 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), 1562 (dct_memory_cleared(pvt) ? "yes" : "no")); 1563 1564 edac_dbg(0, " channel interleave: %s, " 1565 "interleave bits selector: 0x%x\n", 1566 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), 1567 dct_sel_interleave_addr(pvt)); 1568 } 1569 1570 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi); 1571 } 1572 1573 /* 1574 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG, 1575 * 2.10.12 Memory Interleaving Modes). 1576 */ 1577 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1578 u8 intlv_en, int num_dcts_intlv, 1579 u32 dct_sel) 1580 { 1581 u8 channel = 0; 1582 u8 select; 1583 1584 if (!(intlv_en)) 1585 return (u8)(dct_sel); 1586 1587 if (num_dcts_intlv == 2) { 1588 select = (sys_addr >> 8) & 0x3; 1589 channel = select ? 0x3 : 0; 1590 } else if (num_dcts_intlv == 4) { 1591 u8 intlv_addr = dct_sel_interleave_addr(pvt); 1592 switch (intlv_addr) { 1593 case 0x4: 1594 channel = (sys_addr >> 8) & 0x3; 1595 break; 1596 case 0x5: 1597 channel = (sys_addr >> 9) & 0x3; 1598 break; 1599 } 1600 } 1601 return channel; 1602 } 1603 1604 /* 1605 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory 1606 * Interleaving Modes. 1607 */ 1608 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 1609 bool hi_range_sel, u8 intlv_en) 1610 { 1611 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; 1612 1613 if (dct_ganging_enabled(pvt)) 1614 return 0; 1615 1616 if (hi_range_sel) 1617 return dct_sel_high; 1618 1619 /* 1620 * see F2x110[DctSelIntLvAddr] - channel interleave mode 1621 */ 1622 if (dct_interleave_enabled(pvt)) { 1623 u8 intlv_addr = dct_sel_interleave_addr(pvt); 1624 1625 /* return DCT select function: 0=DCT0, 1=DCT1 */ 1626 if (!intlv_addr) 1627 return sys_addr >> 6 & 1; 1628 1629 if (intlv_addr & 0x2) { 1630 u8 shift = intlv_addr & 0x1 ? 9 : 6; 1631 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1; 1632 1633 return ((sys_addr >> shift) & 1) ^ temp; 1634 } 1635 1636 if (intlv_addr & 0x4) { 1637 u8 shift = intlv_addr & 0x1 ? 9 : 8; 1638 1639 return (sys_addr >> shift) & 1; 1640 } 1641 1642 return (sys_addr >> (12 + hweight8(intlv_en))) & 1; 1643 } 1644 1645 if (dct_high_range_enabled(pvt)) 1646 return ~dct_sel_high & 1; 1647 1648 return 0; 1649 } 1650 1651 /* Convert the sys_addr to the normalized DCT address */ 1652 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range, 1653 u64 sys_addr, bool hi_rng, 1654 u32 dct_sel_base_addr) 1655 { 1656 u64 chan_off; 1657 u64 dram_base = get_dram_base(pvt, range); 1658 u64 hole_off = f10_dhar_offset(pvt); 1659 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16; 1660 1661 if (hi_rng) { 1662 /* 1663 * if 1664 * base address of high range is below 4Gb 1665 * (bits [47:27] at [31:11]) 1666 * DRAM address space on this DCT is hoisted above 4Gb && 1667 * sys_addr > 4Gb 1668 * 1669 * remove hole offset from sys_addr 1670 * else 1671 * remove high range offset from sys_addr 1672 */ 1673 if ((!(dct_sel_base_addr >> 16) || 1674 dct_sel_base_addr < dhar_base(pvt)) && 1675 dhar_valid(pvt) && 1676 (sys_addr >= BIT_64(32))) 1677 chan_off = hole_off; 1678 else 1679 chan_off = dct_sel_base_off; 1680 } else { 1681 /* 1682 * if 1683 * we have a valid hole && 1684 * sys_addr > 4Gb 1685 * 1686 * remove hole 1687 * else 1688 * remove dram base to normalize to DCT address 1689 */ 1690 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32))) 1691 chan_off = hole_off; 1692 else 1693 chan_off = dram_base; 1694 } 1695 1696 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23)); 1697 } 1698 1699 /* 1700 * checks if the csrow passed in is marked as SPARED, if so returns the new 1701 * spare row 1702 */ 1703 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow) 1704 { 1705 int tmp_cs; 1706 1707 if (online_spare_swap_done(pvt, dct) && 1708 csrow == online_spare_bad_dramcs(pvt, dct)) { 1709 1710 for_each_chip_select(tmp_cs, dct, pvt) { 1711 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) { 1712 csrow = tmp_cs; 1713 break; 1714 } 1715 } 1716 } 1717 return csrow; 1718 } 1719 1720 /* 1721 * Iterate over the DRAM DCT "base" and "mask" registers looking for a 1722 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' 1723 * 1724 * Return: 1725 * -EINVAL: NOT FOUND 1726 * 0..csrow = Chip-Select Row 1727 */ 1728 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct) 1729 { 1730 struct mem_ctl_info *mci; 1731 struct amd64_pvt *pvt; 1732 u64 cs_base, cs_mask; 1733 int cs_found = -EINVAL; 1734 int csrow; 1735 1736 mci = edac_mc_find(nid); 1737 if (!mci) 1738 return cs_found; 1739 1740 pvt = mci->pvt_info; 1741 1742 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct); 1743 1744 for_each_chip_select(csrow, dct, pvt) { 1745 if (!csrow_enabled(csrow, dct, pvt)) 1746 continue; 1747 1748 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); 1749 1750 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", 1751 csrow, cs_base, cs_mask); 1752 1753 cs_mask = ~cs_mask; 1754 1755 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n", 1756 (in_addr & cs_mask), (cs_base & cs_mask)); 1757 1758 if ((in_addr & cs_mask) == (cs_base & cs_mask)) { 1759 if (pvt->fam == 0x15 && pvt->model >= 0x30) { 1760 cs_found = csrow; 1761 break; 1762 } 1763 cs_found = f10_process_possible_spare(pvt, dct, csrow); 1764 1765 edac_dbg(1, " MATCH csrow=%d\n", cs_found); 1766 break; 1767 } 1768 } 1769 return cs_found; 1770 } 1771 1772 /* 1773 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is 1774 * swapped with a region located at the bottom of memory so that the GPU can use 1775 * the interleaved region and thus two channels. 1776 */ 1777 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) 1778 { 1779 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; 1780 1781 if (pvt->fam == 0x10) { 1782 /* only revC3 and revE have that feature */ 1783 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3)) 1784 return sys_addr; 1785 } 1786 1787 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg); 1788 1789 if (!(swap_reg & 0x1)) 1790 return sys_addr; 1791 1792 swap_base = (swap_reg >> 3) & 0x7f; 1793 swap_limit = (swap_reg >> 11) & 0x7f; 1794 rgn_size = (swap_reg >> 20) & 0x7f; 1795 tmp_addr = sys_addr >> 27; 1796 1797 if (!(sys_addr >> 34) && 1798 (((tmp_addr >= swap_base) && 1799 (tmp_addr <= swap_limit)) || 1800 (tmp_addr < rgn_size))) 1801 return sys_addr ^ (u64)swap_base << 27; 1802 1803 return sys_addr; 1804 } 1805 1806 /* For a given @dram_range, check if @sys_addr falls within it. */ 1807 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, 1808 u64 sys_addr, int *chan_sel) 1809 { 1810 int cs_found = -EINVAL; 1811 u64 chan_addr; 1812 u32 dct_sel_base; 1813 u8 channel; 1814 bool high_range = false; 1815 1816 u8 node_id = dram_dst_node(pvt, range); 1817 u8 intlv_en = dram_intlv_en(pvt, range); 1818 u32 intlv_sel = dram_intlv_sel(pvt, range); 1819 1820 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", 1821 range, sys_addr, get_dram_limit(pvt, range)); 1822 1823 if (dhar_valid(pvt) && 1824 dhar_base(pvt) <= sys_addr && 1825 sys_addr < BIT_64(32)) { 1826 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", 1827 sys_addr); 1828 return -EINVAL; 1829 } 1830 1831 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) 1832 return -EINVAL; 1833 1834 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); 1835 1836 dct_sel_base = dct_sel_baseaddr(pvt); 1837 1838 /* 1839 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to 1840 * select between DCT0 and DCT1. 1841 */ 1842 if (dct_high_range_enabled(pvt) && 1843 !dct_ganging_enabled(pvt) && 1844 ((sys_addr >> 27) >= (dct_sel_base >> 11))) 1845 high_range = true; 1846 1847 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en); 1848 1849 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr, 1850 high_range, dct_sel_base); 1851 1852 /* Remove node interleaving, see F1x120 */ 1853 if (intlv_en) 1854 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) | 1855 (chan_addr & 0xfff); 1856 1857 /* remove channel interleave */ 1858 if (dct_interleave_enabled(pvt) && 1859 !dct_high_range_enabled(pvt) && 1860 !dct_ganging_enabled(pvt)) { 1861 1862 if (dct_sel_interleave_addr(pvt) != 1) { 1863 if (dct_sel_interleave_addr(pvt) == 0x3) 1864 /* hash 9 */ 1865 chan_addr = ((chan_addr >> 10) << 9) | 1866 (chan_addr & 0x1ff); 1867 else 1868 /* A[6] or hash 6 */ 1869 chan_addr = ((chan_addr >> 7) << 6) | 1870 (chan_addr & 0x3f); 1871 } else 1872 /* A[12] */ 1873 chan_addr = ((chan_addr >> 13) << 12) | 1874 (chan_addr & 0xfff); 1875 } 1876 1877 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr); 1878 1879 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); 1880 1881 if (cs_found >= 0) 1882 *chan_sel = channel; 1883 1884 return cs_found; 1885 } 1886 1887 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range, 1888 u64 sys_addr, int *chan_sel) 1889 { 1890 int cs_found = -EINVAL; 1891 int num_dcts_intlv = 0; 1892 u64 chan_addr, chan_offset; 1893 u64 dct_base, dct_limit; 1894 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp; 1895 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en; 1896 1897 u64 dhar_offset = f10_dhar_offset(pvt); 1898 u8 intlv_addr = dct_sel_interleave_addr(pvt); 1899 u8 node_id = dram_dst_node(pvt, range); 1900 u8 intlv_en = dram_intlv_en(pvt, range); 1901 1902 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg); 1903 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg); 1904 1905 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0)); 1906 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7); 1907 1908 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", 1909 range, sys_addr, get_dram_limit(pvt, range)); 1910 1911 if (!(get_dram_base(pvt, range) <= sys_addr) && 1912 !(get_dram_limit(pvt, range) >= sys_addr)) 1913 return -EINVAL; 1914 1915 if (dhar_valid(pvt) && 1916 dhar_base(pvt) <= sys_addr && 1917 sys_addr < BIT_64(32)) { 1918 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", 1919 sys_addr); 1920 return -EINVAL; 1921 } 1922 1923 /* Verify sys_addr is within DCT Range. */ 1924 dct_base = (u64) dct_sel_baseaddr(pvt); 1925 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF; 1926 1927 if (!(dct_cont_base_reg & BIT(0)) && 1928 !(dct_base <= (sys_addr >> 27) && 1929 dct_limit >= (sys_addr >> 27))) 1930 return -EINVAL; 1931 1932 /* Verify number of dct's that participate in channel interleaving. */ 1933 num_dcts_intlv = (int) hweight8(intlv_en); 1934 1935 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4)) 1936 return -EINVAL; 1937 1938 if (pvt->model >= 0x60) 1939 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en); 1940 else 1941 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en, 1942 num_dcts_intlv, dct_sel); 1943 1944 /* Verify we stay within the MAX number of channels allowed */ 1945 if (channel > 3) 1946 return -EINVAL; 1947 1948 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0)); 1949 1950 /* Get normalized DCT addr */ 1951 if (leg_mmio_hole && (sys_addr >= BIT_64(32))) 1952 chan_offset = dhar_offset; 1953 else 1954 chan_offset = dct_base << 27; 1955 1956 chan_addr = sys_addr - chan_offset; 1957 1958 /* remove channel interleave */ 1959 if (num_dcts_intlv == 2) { 1960 if (intlv_addr == 0x4) 1961 chan_addr = ((chan_addr >> 9) << 8) | 1962 (chan_addr & 0xff); 1963 else if (intlv_addr == 0x5) 1964 chan_addr = ((chan_addr >> 10) << 9) | 1965 (chan_addr & 0x1ff); 1966 else 1967 return -EINVAL; 1968 1969 } else if (num_dcts_intlv == 4) { 1970 if (intlv_addr == 0x4) 1971 chan_addr = ((chan_addr >> 10) << 8) | 1972 (chan_addr & 0xff); 1973 else if (intlv_addr == 0x5) 1974 chan_addr = ((chan_addr >> 11) << 9) | 1975 (chan_addr & 0x1ff); 1976 else 1977 return -EINVAL; 1978 } 1979 1980 if (dct_offset_en) { 1981 amd64_read_pci_cfg(pvt->F1, 1982 DRAM_CONT_HIGH_OFF + (int) channel * 4, 1983 &tmp); 1984 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27; 1985 } 1986 1987 f15h_select_dct(pvt, channel); 1988 1989 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr); 1990 1991 /* 1992 * Find Chip select: 1993 * if channel = 3, then alias it to 1. This is because, in F15 M30h, 1994 * there is support for 4 DCT's, but only 2 are currently functional. 1995 * They are DCT0 and DCT3. But we have read all registers of DCT3 into 1996 * pvt->csels[1]. So we need to use '1' here to get correct info. 1997 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications. 1998 */ 1999 alias_channel = (channel == 3) ? 1 : channel; 2000 2001 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel); 2002 2003 if (cs_found >= 0) 2004 *chan_sel = alias_channel; 2005 2006 return cs_found; 2007 } 2008 2009 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, 2010 u64 sys_addr, 2011 int *chan_sel) 2012 { 2013 int cs_found = -EINVAL; 2014 unsigned range; 2015 2016 for (range = 0; range < DRAM_RANGES; range++) { 2017 if (!dram_rw(pvt, range)) 2018 continue; 2019 2020 if (pvt->fam == 0x15 && pvt->model >= 0x30) 2021 cs_found = f15_m30h_match_to_this_node(pvt, range, 2022 sys_addr, 2023 chan_sel); 2024 2025 else if ((get_dram_base(pvt, range) <= sys_addr) && 2026 (get_dram_limit(pvt, range) >= sys_addr)) { 2027 cs_found = f1x_match_to_this_node(pvt, range, 2028 sys_addr, chan_sel); 2029 if (cs_found >= 0) 2030 break; 2031 } 2032 } 2033 return cs_found; 2034 } 2035 2036 /* 2037 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps 2038 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). 2039 * 2040 * The @sys_addr is usually an error address received from the hardware 2041 * (MCX_ADDR). 2042 */ 2043 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, 2044 struct err_info *err) 2045 { 2046 struct amd64_pvt *pvt = mci->pvt_info; 2047 2048 error_address_to_page_and_offset(sys_addr, err); 2049 2050 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel); 2051 if (err->csrow < 0) { 2052 err->err_code = ERR_CSROW; 2053 return; 2054 } 2055 2056 /* 2057 * We need the syndromes for channel detection only when we're 2058 * ganged. Otherwise @chan should already contain the channel at 2059 * this point. 2060 */ 2061 if (dct_ganging_enabled(pvt)) 2062 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); 2063 } 2064 2065 /* 2066 * debug routine to display the memory sizes of all logical DIMMs and its 2067 * CSROWs 2068 */ 2069 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) 2070 { 2071 int dimm, size0, size1; 2072 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 2073 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 2074 2075 if (pvt->fam == 0xf) { 2076 /* K8 families < revF not supported yet */ 2077 if (pvt->ext_model < K8_REV_F) 2078 return; 2079 else 2080 WARN_ON(ctrl != 0); 2081 } 2082 2083 if (pvt->fam == 0x10) { 2084 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 2085 : pvt->dbam0; 2086 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? 2087 pvt->csels[1].csbases : 2088 pvt->csels[0].csbases; 2089 } else if (ctrl) { 2090 dbam = pvt->dbam0; 2091 dcsb = pvt->csels[1].csbases; 2092 } 2093 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", 2094 ctrl, dbam); 2095 2096 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 2097 2098 /* Dump memory sizes for DIMM and its CSROWs */ 2099 for (dimm = 0; dimm < 4; dimm++) { 2100 2101 size0 = 0; 2102 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 2103 /* 2104 * For F15m60h, we need multiplier for LRDIMM cs_size 2105 * calculation. We pass dimm value to the dbam_to_cs 2106 * mapper so we can find the multiplier from the 2107 * corresponding DCSM. 2108 */ 2109 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 2110 DBAM_DIMM(dimm, dbam), 2111 dimm); 2112 2113 size1 = 0; 2114 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 2115 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 2116 DBAM_DIMM(dimm, dbam), 2117 dimm); 2118 2119 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 2120 dimm * 2, size0, 2121 dimm * 2 + 1, size1); 2122 } 2123 } 2124 2125 static struct amd64_family_type family_types[] = { 2126 [K8_CPUS] = { 2127 .ctl_name = "K8", 2128 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 2129 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 2130 .ops = { 2131 .early_channel_count = k8_early_channel_count, 2132 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 2133 .dbam_to_cs = k8_dbam_to_chip_select, 2134 } 2135 }, 2136 [F10_CPUS] = { 2137 .ctl_name = "F10h", 2138 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, 2139 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM, 2140 .ops = { 2141 .early_channel_count = f1x_early_channel_count, 2142 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 2143 .dbam_to_cs = f10_dbam_to_chip_select, 2144 } 2145 }, 2146 [F15_CPUS] = { 2147 .ctl_name = "F15h", 2148 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, 2149 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2, 2150 .ops = { 2151 .early_channel_count = f1x_early_channel_count, 2152 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 2153 .dbam_to_cs = f15_dbam_to_chip_select, 2154 } 2155 }, 2156 [F15_M30H_CPUS] = { 2157 .ctl_name = "F15h_M30h", 2158 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1, 2159 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2, 2160 .ops = { 2161 .early_channel_count = f1x_early_channel_count, 2162 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 2163 .dbam_to_cs = f16_dbam_to_chip_select, 2164 } 2165 }, 2166 [F15_M60H_CPUS] = { 2167 .ctl_name = "F15h_M60h", 2168 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1, 2169 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2, 2170 .ops = { 2171 .early_channel_count = f1x_early_channel_count, 2172 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 2173 .dbam_to_cs = f15_m60h_dbam_to_chip_select, 2174 } 2175 }, 2176 [F16_CPUS] = { 2177 .ctl_name = "F16h", 2178 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, 2179 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2, 2180 .ops = { 2181 .early_channel_count = f1x_early_channel_count, 2182 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 2183 .dbam_to_cs = f16_dbam_to_chip_select, 2184 } 2185 }, 2186 [F16_M30H_CPUS] = { 2187 .ctl_name = "F16h_M30h", 2188 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1, 2189 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2, 2190 .ops = { 2191 .early_channel_count = f1x_early_channel_count, 2192 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, 2193 .dbam_to_cs = f16_dbam_to_chip_select, 2194 } 2195 }, 2196 [F17_CPUS] = { 2197 .ctl_name = "F17h", 2198 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0, 2199 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6, 2200 .ops = { 2201 .early_channel_count = f17_early_channel_count, 2202 .dbam_to_cs = f17_base_addr_to_cs_size, 2203 } 2204 }, 2205 [F17_M10H_CPUS] = { 2206 .ctl_name = "F17h_M10h", 2207 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0, 2208 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6, 2209 .ops = { 2210 .early_channel_count = f17_early_channel_count, 2211 .dbam_to_cs = f17_base_addr_to_cs_size, 2212 } 2213 }, 2214 }; 2215 2216 /* 2217 * These are tables of eigenvectors (one per line) which can be used for the 2218 * construction of the syndrome tables. The modified syndrome search algorithm 2219 * uses those to find the symbol in error and thus the DIMM. 2220 * 2221 * Algorithm courtesy of Ross LaFetra from AMD. 2222 */ 2223 static const u16 x4_vectors[] = { 2224 0x2f57, 0x1afe, 0x66cc, 0xdd88, 2225 0x11eb, 0x3396, 0x7f4c, 0xeac8, 2226 0x0001, 0x0002, 0x0004, 0x0008, 2227 0x1013, 0x3032, 0x4044, 0x8088, 2228 0x106b, 0x30d6, 0x70fc, 0xe0a8, 2229 0x4857, 0xc4fe, 0x13cc, 0x3288, 2230 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, 2231 0x1f39, 0x251e, 0xbd6c, 0x6bd8, 2232 0x15c1, 0x2a42, 0x89ac, 0x4758, 2233 0x2b03, 0x1602, 0x4f0c, 0xca08, 2234 0x1f07, 0x3a0e, 0x6b04, 0xbd08, 2235 0x8ba7, 0x465e, 0x244c, 0x1cc8, 2236 0x2b87, 0x164e, 0x642c, 0xdc18, 2237 0x40b9, 0x80de, 0x1094, 0x20e8, 2238 0x27db, 0x1eb6, 0x9dac, 0x7b58, 2239 0x11c1, 0x2242, 0x84ac, 0x4c58, 2240 0x1be5, 0x2d7a, 0x5e34, 0xa718, 2241 0x4b39, 0x8d1e, 0x14b4, 0x28d8, 2242 0x4c97, 0xc87e, 0x11fc, 0x33a8, 2243 0x8e97, 0x497e, 0x2ffc, 0x1aa8, 2244 0x16b3, 0x3d62, 0x4f34, 0x8518, 2245 0x1e2f, 0x391a, 0x5cac, 0xf858, 2246 0x1d9f, 0x3b7a, 0x572c, 0xfe18, 2247 0x15f5, 0x2a5a, 0x5264, 0xa3b8, 2248 0x1dbb, 0x3b66, 0x715c, 0xe3f8, 2249 0x4397, 0xc27e, 0x17fc, 0x3ea8, 2250 0x1617, 0x3d3e, 0x6464, 0xb8b8, 2251 0x23ff, 0x12aa, 0xab6c, 0x56d8, 2252 0x2dfb, 0x1ba6, 0x913c, 0x7328, 2253 0x185d, 0x2ca6, 0x7914, 0x9e28, 2254 0x171b, 0x3e36, 0x7d7c, 0xebe8, 2255 0x4199, 0x82ee, 0x19f4, 0x2e58, 2256 0x4807, 0xc40e, 0x130c, 0x3208, 2257 0x1905, 0x2e0a, 0x5804, 0xac08, 2258 0x213f, 0x132a, 0xadfc, 0x5ba8, 2259 0x19a9, 0x2efe, 0xb5cc, 0x6f88, 2260 }; 2261 2262 static const u16 x8_vectors[] = { 2263 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, 2264 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, 2265 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, 2266 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, 2267 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, 2268 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, 2269 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, 2270 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, 2271 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, 2272 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, 2273 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, 2274 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, 2275 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, 2276 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, 2277 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, 2278 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, 2279 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, 2280 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 2281 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, 2282 }; 2283 2284 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs, 2285 unsigned v_dim) 2286 { 2287 unsigned int i, err_sym; 2288 2289 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { 2290 u16 s = syndrome; 2291 unsigned v_idx = err_sym * v_dim; 2292 unsigned v_end = (err_sym + 1) * v_dim; 2293 2294 /* walk over all 16 bits of the syndrome */ 2295 for (i = 1; i < (1U << 16); i <<= 1) { 2296 2297 /* if bit is set in that eigenvector... */ 2298 if (v_idx < v_end && vectors[v_idx] & i) { 2299 u16 ev_comp = vectors[v_idx++]; 2300 2301 /* ... and bit set in the modified syndrome, */ 2302 if (s & i) { 2303 /* remove it. */ 2304 s ^= ev_comp; 2305 2306 if (!s) 2307 return err_sym; 2308 } 2309 2310 } else if (s & i) 2311 /* can't get to zero, move to next symbol */ 2312 break; 2313 } 2314 } 2315 2316 edac_dbg(0, "syndrome(%x) not found\n", syndrome); 2317 return -1; 2318 } 2319 2320 static int map_err_sym_to_channel(int err_sym, int sym_size) 2321 { 2322 if (sym_size == 4) 2323 switch (err_sym) { 2324 case 0x20: 2325 case 0x21: 2326 return 0; 2327 break; 2328 case 0x22: 2329 case 0x23: 2330 return 1; 2331 break; 2332 default: 2333 return err_sym >> 4; 2334 break; 2335 } 2336 /* x8 symbols */ 2337 else 2338 switch (err_sym) { 2339 /* imaginary bits not in a DIMM */ 2340 case 0x10: 2341 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", 2342 err_sym); 2343 return -1; 2344 break; 2345 2346 case 0x11: 2347 return 0; 2348 break; 2349 case 0x12: 2350 return 1; 2351 break; 2352 default: 2353 return err_sym >> 3; 2354 break; 2355 } 2356 return -1; 2357 } 2358 2359 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) 2360 { 2361 struct amd64_pvt *pvt = mci->pvt_info; 2362 int err_sym = -1; 2363 2364 if (pvt->ecc_sym_sz == 8) 2365 err_sym = decode_syndrome(syndrome, x8_vectors, 2366 ARRAY_SIZE(x8_vectors), 2367 pvt->ecc_sym_sz); 2368 else if (pvt->ecc_sym_sz == 4) 2369 err_sym = decode_syndrome(syndrome, x4_vectors, 2370 ARRAY_SIZE(x4_vectors), 2371 pvt->ecc_sym_sz); 2372 else { 2373 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); 2374 return err_sym; 2375 } 2376 2377 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); 2378 } 2379 2380 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err, 2381 u8 ecc_type) 2382 { 2383 enum hw_event_mc_err_type err_type; 2384 const char *string; 2385 2386 if (ecc_type == 2) 2387 err_type = HW_EVENT_ERR_CORRECTED; 2388 else if (ecc_type == 1) 2389 err_type = HW_EVENT_ERR_UNCORRECTED; 2390 else if (ecc_type == 3) 2391 err_type = HW_EVENT_ERR_DEFERRED; 2392 else { 2393 WARN(1, "Something is rotten in the state of Denmark.\n"); 2394 return; 2395 } 2396 2397 switch (err->err_code) { 2398 case DECODE_OK: 2399 string = ""; 2400 break; 2401 case ERR_NODE: 2402 string = "Failed to map error addr to a node"; 2403 break; 2404 case ERR_CSROW: 2405 string = "Failed to map error addr to a csrow"; 2406 break; 2407 case ERR_CHANNEL: 2408 string = "Unknown syndrome - possible error reporting race"; 2409 break; 2410 case ERR_SYND: 2411 string = "MCA_SYND not valid - unknown syndrome and csrow"; 2412 break; 2413 case ERR_NORM_ADDR: 2414 string = "Cannot decode normalized address"; 2415 break; 2416 default: 2417 string = "WTF error"; 2418 break; 2419 } 2420 2421 edac_mc_handle_error(err_type, mci, 1, 2422 err->page, err->offset, err->syndrome, 2423 err->csrow, err->channel, -1, 2424 string, ""); 2425 } 2426 2427 static inline void decode_bus_error(int node_id, struct mce *m) 2428 { 2429 struct mem_ctl_info *mci; 2430 struct amd64_pvt *pvt; 2431 u8 ecc_type = (m->status >> 45) & 0x3; 2432 u8 xec = XEC(m->status, 0x1f); 2433 u16 ec = EC(m->status); 2434 u64 sys_addr; 2435 struct err_info err; 2436 2437 mci = edac_mc_find(node_id); 2438 if (!mci) 2439 return; 2440 2441 pvt = mci->pvt_info; 2442 2443 /* Bail out early if this was an 'observed' error */ 2444 if (PP(ec) == NBSL_PP_OBS) 2445 return; 2446 2447 /* Do only ECC errors */ 2448 if (xec && xec != F10_NBSL_EXT_ERR_ECC) 2449 return; 2450 2451 memset(&err, 0, sizeof(err)); 2452 2453 sys_addr = get_error_address(pvt, m); 2454 2455 if (ecc_type == 2) 2456 err.syndrome = extract_syndrome(m->status); 2457 2458 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err); 2459 2460 __log_ecc_error(mci, &err, ecc_type); 2461 } 2462 2463 /* 2464 * To find the UMC channel represented by this bank we need to match on its 2465 * instance_id. The instance_id of a bank is held in the lower 32 bits of its 2466 * IPID. 2467 */ 2468 static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m) 2469 { 2470 u32 umc_instance_id[] = {0x50f00, 0x150f00}; 2471 u32 instance_id = m->ipid & GENMASK(31, 0); 2472 int i, channel = -1; 2473 2474 for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++) 2475 if (umc_instance_id[i] == instance_id) 2476 channel = i; 2477 2478 return channel; 2479 } 2480 2481 static void decode_umc_error(int node_id, struct mce *m) 2482 { 2483 u8 ecc_type = (m->status >> 45) & 0x3; 2484 struct mem_ctl_info *mci; 2485 struct amd64_pvt *pvt; 2486 struct err_info err; 2487 u64 sys_addr; 2488 2489 mci = edac_mc_find(node_id); 2490 if (!mci) 2491 return; 2492 2493 pvt = mci->pvt_info; 2494 2495 memset(&err, 0, sizeof(err)); 2496 2497 if (m->status & MCI_STATUS_DEFERRED) 2498 ecc_type = 3; 2499 2500 err.channel = find_umc_channel(pvt, m); 2501 if (err.channel < 0) { 2502 err.err_code = ERR_CHANNEL; 2503 goto log_error; 2504 } 2505 2506 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { 2507 err.err_code = ERR_NORM_ADDR; 2508 goto log_error; 2509 } 2510 2511 error_address_to_page_and_offset(sys_addr, &err); 2512 2513 if (!(m->status & MCI_STATUS_SYNDV)) { 2514 err.err_code = ERR_SYND; 2515 goto log_error; 2516 } 2517 2518 if (ecc_type == 2) { 2519 u8 length = (m->synd >> 18) & 0x3f; 2520 2521 if (length) 2522 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0); 2523 else 2524 err.err_code = ERR_CHANNEL; 2525 } 2526 2527 err.csrow = m->synd & 0x7; 2528 2529 log_error: 2530 __log_ecc_error(mci, &err, ecc_type); 2531 } 2532 2533 /* 2534 * Use pvt->F3 which contains the F3 CPU PCI device to get the related 2535 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error. 2536 * Reserve F0 and F6 on systems with a UMC. 2537 */ 2538 static int 2539 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2) 2540 { 2541 if (pvt->umc) { 2542 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3); 2543 if (!pvt->F0) { 2544 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1); 2545 return -ENODEV; 2546 } 2547 2548 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3); 2549 if (!pvt->F6) { 2550 pci_dev_put(pvt->F0); 2551 pvt->F0 = NULL; 2552 2553 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2); 2554 return -ENODEV; 2555 } 2556 2557 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0)); 2558 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); 2559 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6)); 2560 2561 return 0; 2562 } 2563 2564 /* Reserve the ADDRESS MAP Device */ 2565 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3); 2566 if (!pvt->F1) { 2567 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1); 2568 return -ENODEV; 2569 } 2570 2571 /* Reserve the DCT Device */ 2572 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3); 2573 if (!pvt->F2) { 2574 pci_dev_put(pvt->F1); 2575 pvt->F1 = NULL; 2576 2577 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2); 2578 return -ENODEV; 2579 } 2580 2581 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1)); 2582 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2)); 2583 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); 2584 2585 return 0; 2586 } 2587 2588 static void free_mc_sibling_devs(struct amd64_pvt *pvt) 2589 { 2590 if (pvt->umc) { 2591 pci_dev_put(pvt->F0); 2592 pci_dev_put(pvt->F6); 2593 } else { 2594 pci_dev_put(pvt->F1); 2595 pci_dev_put(pvt->F2); 2596 } 2597 } 2598 2599 static void determine_ecc_sym_sz(struct amd64_pvt *pvt) 2600 { 2601 pvt->ecc_sym_sz = 4; 2602 2603 if (pvt->umc) { 2604 u8 i; 2605 2606 for (i = 0; i < NUM_UMCS; i++) { 2607 /* Check enabled channels only: */ 2608 if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) && 2609 (pvt->umc[i].ecc_ctrl & BIT(7))) { 2610 pvt->ecc_sym_sz = 8; 2611 break; 2612 } 2613 } 2614 2615 return; 2616 } 2617 2618 if (pvt->fam >= 0x10) { 2619 u32 tmp; 2620 2621 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); 2622 /* F16h has only DCT0, so no need to read dbam1. */ 2623 if (pvt->fam != 0x16) 2624 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1); 2625 2626 /* F10h, revD and later can do x8 ECC too. */ 2627 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) 2628 pvt->ecc_sym_sz = 8; 2629 } 2630 } 2631 2632 /* 2633 * Retrieve the hardware registers of the memory controller. 2634 */ 2635 static void __read_mc_regs_df(struct amd64_pvt *pvt) 2636 { 2637 u8 nid = pvt->mc_node_id; 2638 struct amd64_umc *umc; 2639 u32 i, umc_base; 2640 2641 /* Read registers from each UMC */ 2642 for (i = 0; i < NUM_UMCS; i++) { 2643 2644 umc_base = get_umc_base(i); 2645 umc = &pvt->umc[i]; 2646 2647 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg); 2648 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); 2649 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); 2650 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); 2651 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi); 2652 } 2653 } 2654 2655 /* 2656 * Retrieve the hardware registers of the memory controller (this includes the 2657 * 'Address Map' and 'Misc' device regs) 2658 */ 2659 static void read_mc_regs(struct amd64_pvt *pvt) 2660 { 2661 unsigned int range; 2662 u64 msr_val; 2663 2664 /* 2665 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 2666 * those are Read-As-Zero. 2667 */ 2668 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); 2669 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); 2670 2671 /* Check first whether TOP_MEM2 is enabled: */ 2672 rdmsrl(MSR_K8_SYSCFG, msr_val); 2673 if (msr_val & BIT(21)) { 2674 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); 2675 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); 2676 } else { 2677 edac_dbg(0, " TOP_MEM2 disabled\n"); 2678 } 2679 2680 if (pvt->umc) { 2681 __read_mc_regs_df(pvt); 2682 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar); 2683 2684 goto skip; 2685 } 2686 2687 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); 2688 2689 read_dram_ctl_register(pvt); 2690 2691 for (range = 0; range < DRAM_RANGES; range++) { 2692 u8 rw; 2693 2694 /* read settings for this DRAM range */ 2695 read_dram_base_limit_regs(pvt, range); 2696 2697 rw = dram_rw(pvt, range); 2698 if (!rw) 2699 continue; 2700 2701 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", 2702 range, 2703 get_dram_base(pvt, range), 2704 get_dram_limit(pvt, range)); 2705 2706 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", 2707 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", 2708 (rw & 0x1) ? "R" : "-", 2709 (rw & 0x2) ? "W" : "-", 2710 dram_intlv_sel(pvt, range), 2711 dram_dst_node(pvt, range)); 2712 } 2713 2714 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); 2715 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0); 2716 2717 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); 2718 2719 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0); 2720 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0); 2721 2722 if (!dct_ganging_enabled(pvt)) { 2723 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1); 2724 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1); 2725 } 2726 2727 skip: 2728 read_dct_base_mask(pvt); 2729 2730 determine_memory_type(pvt); 2731 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); 2732 2733 determine_ecc_sym_sz(pvt); 2734 2735 dump_misc_regs(pvt); 2736 } 2737 2738 /* 2739 * NOTE: CPU Revision Dependent code 2740 * 2741 * Input: 2742 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1) 2743 * k8 private pointer to --> 2744 * DRAM Bank Address mapping register 2745 * node_id 2746 * DCL register where dual_channel_active is 2747 * 2748 * The DBAM register consists of 4 sets of 4 bits each definitions: 2749 * 2750 * Bits: CSROWs 2751 * 0-3 CSROWs 0 and 1 2752 * 4-7 CSROWs 2 and 3 2753 * 8-11 CSROWs 4 and 5 2754 * 12-15 CSROWs 6 and 7 2755 * 2756 * Values range from: 0 to 15 2757 * The meaning of the values depends on CPU revision and dual-channel state, 2758 * see relevant BKDG more info. 2759 * 2760 * The memory controller provides for total of only 8 CSROWs in its current 2761 * architecture. Each "pair" of CSROWs normally represents just one DIMM in 2762 * single channel or two (2) DIMMs in dual channel mode. 2763 * 2764 * The following code logic collapses the various tables for CSROW based on CPU 2765 * revision. 2766 * 2767 * Returns: 2768 * The number of PAGE_SIZE pages on the specified CSROW number it 2769 * encompasses 2770 * 2771 */ 2772 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig) 2773 { 2774 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2775 int csrow_nr = csrow_nr_orig; 2776 u32 cs_mode, nr_pages; 2777 2778 if (!pvt->umc) 2779 csrow_nr >>= 1; 2780 2781 cs_mode = DBAM_DIMM(csrow_nr, dbam); 2782 2783 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr); 2784 nr_pages <<= 20 - PAGE_SHIFT; 2785 2786 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2787 csrow_nr_orig, dct, cs_mode); 2788 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); 2789 2790 return nr_pages; 2791 } 2792 2793 /* 2794 * Initialize the array of csrow attribute instances, based on the values 2795 * from pci config hardware registers. 2796 */ 2797 static int init_csrows(struct mem_ctl_info *mci) 2798 { 2799 struct amd64_pvt *pvt = mci->pvt_info; 2800 enum edac_type edac_mode = EDAC_NONE; 2801 struct csrow_info *csrow; 2802 struct dimm_info *dimm; 2803 int i, j, empty = 1; 2804 int nr_pages = 0; 2805 u32 val; 2806 2807 if (!pvt->umc) { 2808 amd64_read_pci_cfg(pvt->F3, NBCFG, &val); 2809 2810 pvt->nbcfg = val; 2811 2812 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", 2813 pvt->mc_node_id, val, 2814 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); 2815 } 2816 2817 /* 2818 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed. 2819 */ 2820 for_each_chip_select(i, 0, pvt) { 2821 bool row_dct0 = !!csrow_enabled(i, 0, pvt); 2822 bool row_dct1 = false; 2823 2824 if (pvt->fam != 0xf) 2825 row_dct1 = !!csrow_enabled(i, 1, pvt); 2826 2827 if (!row_dct0 && !row_dct1) 2828 continue; 2829 2830 csrow = mci->csrows[i]; 2831 empty = 0; 2832 2833 edac_dbg(1, "MC node: %d, csrow: %d\n", 2834 pvt->mc_node_id, i); 2835 2836 if (row_dct0) { 2837 nr_pages = get_csrow_nr_pages(pvt, 0, i); 2838 csrow->channels[0]->dimm->nr_pages = nr_pages; 2839 } 2840 2841 /* K8 has only one DCT */ 2842 if (pvt->fam != 0xf && row_dct1) { 2843 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i); 2844 2845 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; 2846 nr_pages += row_dct1_pages; 2847 } 2848 2849 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); 2850 2851 /* Determine DIMM ECC mode: */ 2852 if (pvt->umc) { 2853 if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) 2854 edac_mode = EDAC_S4ECD4ED; 2855 else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) 2856 edac_mode = EDAC_SECDED; 2857 2858 } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) { 2859 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) 2860 ? EDAC_S4ECD4ED 2861 : EDAC_SECDED; 2862 } 2863 2864 for (j = 0; j < pvt->channel_count; j++) { 2865 dimm = csrow->channels[j]->dimm; 2866 dimm->mtype = pvt->dram_type; 2867 dimm->edac_mode = edac_mode; 2868 } 2869 } 2870 2871 return empty; 2872 } 2873 2874 /* get all cores on this DCT */ 2875 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid) 2876 { 2877 int cpu; 2878 2879 for_each_online_cpu(cpu) 2880 if (amd_get_nb_id(cpu) == nid) 2881 cpumask_set_cpu(cpu, mask); 2882 } 2883 2884 /* check MCG_CTL on all the cpus on this node */ 2885 static bool nb_mce_bank_enabled_on_node(u16 nid) 2886 { 2887 cpumask_var_t mask; 2888 int cpu, nbe; 2889 bool ret = false; 2890 2891 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2892 amd64_warn("%s: Error allocating mask\n", __func__); 2893 return false; 2894 } 2895 2896 get_cpus_on_this_dct_cpumask(mask, nid); 2897 2898 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); 2899 2900 for_each_cpu(cpu, mask) { 2901 struct msr *reg = per_cpu_ptr(msrs, cpu); 2902 nbe = reg->l & MSR_MCGCTL_NBE; 2903 2904 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 2905 cpu, reg->q, 2906 (nbe ? "enabled" : "disabled")); 2907 2908 if (!nbe) 2909 goto out; 2910 } 2911 ret = true; 2912 2913 out: 2914 free_cpumask_var(mask); 2915 return ret; 2916 } 2917 2918 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on) 2919 { 2920 cpumask_var_t cmask; 2921 int cpu; 2922 2923 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { 2924 amd64_warn("%s: error allocating mask\n", __func__); 2925 return -ENOMEM; 2926 } 2927 2928 get_cpus_on_this_dct_cpumask(cmask, nid); 2929 2930 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2931 2932 for_each_cpu(cpu, cmask) { 2933 2934 struct msr *reg = per_cpu_ptr(msrs, cpu); 2935 2936 if (on) { 2937 if (reg->l & MSR_MCGCTL_NBE) 2938 s->flags.nb_mce_enable = 1; 2939 2940 reg->l |= MSR_MCGCTL_NBE; 2941 } else { 2942 /* 2943 * Turn off NB MCE reporting only when it was off before 2944 */ 2945 if (!s->flags.nb_mce_enable) 2946 reg->l &= ~MSR_MCGCTL_NBE; 2947 } 2948 } 2949 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2950 2951 free_cpumask_var(cmask); 2952 2953 return 0; 2954 } 2955 2956 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid, 2957 struct pci_dev *F3) 2958 { 2959 bool ret = true; 2960 u32 value, mask = 0x3; /* UECC/CECC enable */ 2961 2962 if (toggle_ecc_err_reporting(s, nid, ON)) { 2963 amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); 2964 return false; 2965 } 2966 2967 amd64_read_pci_cfg(F3, NBCTL, &value); 2968 2969 s->old_nbctl = value & mask; 2970 s->nbctl_valid = true; 2971 2972 value |= mask; 2973 amd64_write_pci_cfg(F3, NBCTL, value); 2974 2975 amd64_read_pci_cfg(F3, NBCFG, &value); 2976 2977 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 2978 nid, value, !!(value & NBCFG_ECC_ENABLE)); 2979 2980 if (!(value & NBCFG_ECC_ENABLE)) { 2981 amd64_warn("DRAM ECC disabled on this node, enabling...\n"); 2982 2983 s->flags.nb_ecc_prev = 0; 2984 2985 /* Attempt to turn on DRAM ECC Enable */ 2986 value |= NBCFG_ECC_ENABLE; 2987 amd64_write_pci_cfg(F3, NBCFG, value); 2988 2989 amd64_read_pci_cfg(F3, NBCFG, &value); 2990 2991 if (!(value & NBCFG_ECC_ENABLE)) { 2992 amd64_warn("Hardware rejected DRAM ECC enable," 2993 "check memory DIMM configuration.\n"); 2994 ret = false; 2995 } else { 2996 amd64_info("Hardware accepted DRAM ECC Enable\n"); 2997 } 2998 } else { 2999 s->flags.nb_ecc_prev = 1; 3000 } 3001 3002 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", 3003 nid, value, !!(value & NBCFG_ECC_ENABLE)); 3004 3005 return ret; 3006 } 3007 3008 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid, 3009 struct pci_dev *F3) 3010 { 3011 u32 value, mask = 0x3; /* UECC/CECC enable */ 3012 3013 if (!s->nbctl_valid) 3014 return; 3015 3016 amd64_read_pci_cfg(F3, NBCTL, &value); 3017 value &= ~mask; 3018 value |= s->old_nbctl; 3019 3020 amd64_write_pci_cfg(F3, NBCTL, value); 3021 3022 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ 3023 if (!s->flags.nb_ecc_prev) { 3024 amd64_read_pci_cfg(F3, NBCFG, &value); 3025 value &= ~NBCFG_ECC_ENABLE; 3026 amd64_write_pci_cfg(F3, NBCFG, value); 3027 } 3028 3029 /* restore the NB Enable MCGCTL bit */ 3030 if (toggle_ecc_err_reporting(s, nid, OFF)) 3031 amd64_warn("Error restoring NB MCGCTL settings!\n"); 3032 } 3033 3034 /* 3035 * EDAC requires that the BIOS have ECC enabled before 3036 * taking over the processing of ECC errors. A command line 3037 * option allows to force-enable hardware ECC later in 3038 * enable_ecc_error_reporting(). 3039 */ 3040 static const char *ecc_msg = 3041 "ECC disabled in the BIOS or no ECC capability, module will not load.\n" 3042 " Either enable ECC checking or force module loading by setting " 3043 "'ecc_enable_override'.\n" 3044 " (Note that use of the override may cause unknown side effects.)\n"; 3045 3046 static bool ecc_enabled(struct pci_dev *F3, u16 nid) 3047 { 3048 bool nb_mce_en = false; 3049 u8 ecc_en = 0, i; 3050 u32 value; 3051 3052 if (boot_cpu_data.x86 >= 0x17) { 3053 u8 umc_en_mask = 0, ecc_en_mask = 0; 3054 3055 for (i = 0; i < NUM_UMCS; i++) { 3056 u32 base = get_umc_base(i); 3057 3058 /* Only check enabled UMCs. */ 3059 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value)) 3060 continue; 3061 3062 if (!(value & UMC_SDP_INIT)) 3063 continue; 3064 3065 umc_en_mask |= BIT(i); 3066 3067 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value)) 3068 continue; 3069 3070 if (value & UMC_ECC_ENABLED) 3071 ecc_en_mask |= BIT(i); 3072 } 3073 3074 /* Check whether at least one UMC is enabled: */ 3075 if (umc_en_mask) 3076 ecc_en = umc_en_mask == ecc_en_mask; 3077 else 3078 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid); 3079 3080 /* Assume UMC MCA banks are enabled. */ 3081 nb_mce_en = true; 3082 } else { 3083 amd64_read_pci_cfg(F3, NBCFG, &value); 3084 3085 ecc_en = !!(value & NBCFG_ECC_ENABLE); 3086 3087 nb_mce_en = nb_mce_bank_enabled_on_node(nid); 3088 if (!nb_mce_en) 3089 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n", 3090 MSR_IA32_MCG_CTL, nid); 3091 } 3092 3093 amd64_info("Node %d: DRAM ECC %s.\n", 3094 nid, (ecc_en ? "enabled" : "disabled")); 3095 3096 if (!ecc_en || !nb_mce_en) { 3097 amd64_info("%s", ecc_msg); 3098 return false; 3099 } 3100 return true; 3101 } 3102 3103 static inline void 3104 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt) 3105 { 3106 u8 i, ecc_en = 1, cpk_en = 1; 3107 3108 for (i = 0; i < NUM_UMCS; i++) { 3109 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { 3110 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED); 3111 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP); 3112 } 3113 } 3114 3115 /* Set chipkill only if ECC is enabled: */ 3116 if (ecc_en) { 3117 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 3118 3119 if (cpk_en) 3120 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 3121 } 3122 } 3123 3124 static void setup_mci_misc_attrs(struct mem_ctl_info *mci, 3125 struct amd64_family_type *fam) 3126 { 3127 struct amd64_pvt *pvt = mci->pvt_info; 3128 3129 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; 3130 mci->edac_ctl_cap = EDAC_FLAG_NONE; 3131 3132 if (pvt->umc) { 3133 f17h_determine_edac_ctl_cap(mci, pvt); 3134 } else { 3135 if (pvt->nbcap & NBCAP_SECDED) 3136 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 3137 3138 if (pvt->nbcap & NBCAP_CHIPKILL) 3139 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 3140 } 3141 3142 mci->edac_cap = determine_edac_cap(pvt); 3143 mci->mod_name = EDAC_MOD_STR; 3144 mci->ctl_name = fam->ctl_name; 3145 mci->dev_name = pci_name(pvt->F3); 3146 mci->ctl_page_to_phys = NULL; 3147 3148 /* memory scrubber interface */ 3149 mci->set_sdram_scrub_rate = set_scrub_rate; 3150 mci->get_sdram_scrub_rate = get_scrub_rate; 3151 } 3152 3153 /* 3154 * returns a pointer to the family descriptor on success, NULL otherwise. 3155 */ 3156 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) 3157 { 3158 struct amd64_family_type *fam_type = NULL; 3159 3160 pvt->ext_model = boot_cpu_data.x86_model >> 4; 3161 pvt->stepping = boot_cpu_data.x86_stepping; 3162 pvt->model = boot_cpu_data.x86_model; 3163 pvt->fam = boot_cpu_data.x86; 3164 3165 switch (pvt->fam) { 3166 case 0xf: 3167 fam_type = &family_types[K8_CPUS]; 3168 pvt->ops = &family_types[K8_CPUS].ops; 3169 break; 3170 3171 case 0x10: 3172 fam_type = &family_types[F10_CPUS]; 3173 pvt->ops = &family_types[F10_CPUS].ops; 3174 break; 3175 3176 case 0x15: 3177 if (pvt->model == 0x30) { 3178 fam_type = &family_types[F15_M30H_CPUS]; 3179 pvt->ops = &family_types[F15_M30H_CPUS].ops; 3180 break; 3181 } else if (pvt->model == 0x60) { 3182 fam_type = &family_types[F15_M60H_CPUS]; 3183 pvt->ops = &family_types[F15_M60H_CPUS].ops; 3184 break; 3185 } 3186 3187 fam_type = &family_types[F15_CPUS]; 3188 pvt->ops = &family_types[F15_CPUS].ops; 3189 break; 3190 3191 case 0x16: 3192 if (pvt->model == 0x30) { 3193 fam_type = &family_types[F16_M30H_CPUS]; 3194 pvt->ops = &family_types[F16_M30H_CPUS].ops; 3195 break; 3196 } 3197 fam_type = &family_types[F16_CPUS]; 3198 pvt->ops = &family_types[F16_CPUS].ops; 3199 break; 3200 3201 case 0x17: 3202 if (pvt->model >= 0x10 && pvt->model <= 0x2f) { 3203 fam_type = &family_types[F17_M10H_CPUS]; 3204 pvt->ops = &family_types[F17_M10H_CPUS].ops; 3205 break; 3206 } 3207 /* fall through */ 3208 case 0x18: 3209 fam_type = &family_types[F17_CPUS]; 3210 pvt->ops = &family_types[F17_CPUS].ops; 3211 3212 if (pvt->fam == 0x18) 3213 family_types[F17_CPUS].ctl_name = "F18h"; 3214 break; 3215 3216 default: 3217 amd64_err("Unsupported family!\n"); 3218 return NULL; 3219 } 3220 3221 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, 3222 (pvt->fam == 0xf ? 3223 (pvt->ext_model >= K8_REV_F ? "revF or later " 3224 : "revE or earlier ") 3225 : ""), pvt->mc_node_id); 3226 return fam_type; 3227 } 3228 3229 static const struct attribute_group *amd64_edac_attr_groups[] = { 3230 #ifdef CONFIG_EDAC_DEBUG 3231 &amd64_edac_dbg_group, 3232 #endif 3233 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION 3234 &amd64_edac_inj_group, 3235 #endif 3236 NULL 3237 }; 3238 3239 static int init_one_instance(unsigned int nid) 3240 { 3241 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 3242 struct amd64_family_type *fam_type = NULL; 3243 struct mem_ctl_info *mci = NULL; 3244 struct edac_mc_layer layers[2]; 3245 struct amd64_pvt *pvt = NULL; 3246 u16 pci_id1, pci_id2; 3247 int err = 0, ret; 3248 3249 ret = -ENOMEM; 3250 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); 3251 if (!pvt) 3252 goto err_ret; 3253 3254 pvt->mc_node_id = nid; 3255 pvt->F3 = F3; 3256 3257 ret = -EINVAL; 3258 fam_type = per_family_init(pvt); 3259 if (!fam_type) 3260 goto err_free; 3261 3262 if (pvt->fam >= 0x17) { 3263 pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL); 3264 if (!pvt->umc) { 3265 ret = -ENOMEM; 3266 goto err_free; 3267 } 3268 3269 pci_id1 = fam_type->f0_id; 3270 pci_id2 = fam_type->f6_id; 3271 } else { 3272 pci_id1 = fam_type->f1_id; 3273 pci_id2 = fam_type->f2_id; 3274 } 3275 3276 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2); 3277 if (err) 3278 goto err_post_init; 3279 3280 read_mc_regs(pvt); 3281 3282 /* 3283 * We need to determine how many memory channels there are. Then use 3284 * that information for calculating the size of the dynamic instance 3285 * tables in the 'mci' structure. 3286 */ 3287 ret = -EINVAL; 3288 pvt->channel_count = pvt->ops->early_channel_count(pvt); 3289 if (pvt->channel_count < 0) 3290 goto err_siblings; 3291 3292 ret = -ENOMEM; 3293 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; 3294 layers[0].size = pvt->csels[0].b_cnt; 3295 layers[0].is_virt_csrow = true; 3296 layers[1].type = EDAC_MC_LAYER_CHANNEL; 3297 3298 /* 3299 * Always allocate two channels since we can have setups with DIMMs on 3300 * only one channel. Also, this simplifies handling later for the price 3301 * of a couple of KBs tops. 3302 */ 3303 layers[1].size = 2; 3304 layers[1].is_virt_csrow = false; 3305 3306 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0); 3307 if (!mci) 3308 goto err_siblings; 3309 3310 mci->pvt_info = pvt; 3311 mci->pdev = &pvt->F3->dev; 3312 3313 setup_mci_misc_attrs(mci, fam_type); 3314 3315 if (init_csrows(mci)) 3316 mci->edac_cap = EDAC_FLAG_NONE; 3317 3318 ret = -ENODEV; 3319 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) { 3320 edac_dbg(1, "failed edac_mc_add_mc()\n"); 3321 goto err_add_mc; 3322 } 3323 3324 return 0; 3325 3326 err_add_mc: 3327 edac_mc_free(mci); 3328 3329 err_siblings: 3330 free_mc_sibling_devs(pvt); 3331 3332 err_post_init: 3333 if (pvt->fam >= 0x17) 3334 kfree(pvt->umc); 3335 3336 err_free: 3337 kfree(pvt); 3338 3339 err_ret: 3340 return ret; 3341 } 3342 3343 static int probe_one_instance(unsigned int nid) 3344 { 3345 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 3346 struct ecc_settings *s; 3347 int ret; 3348 3349 ret = -ENOMEM; 3350 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL); 3351 if (!s) 3352 goto err_out; 3353 3354 ecc_stngs[nid] = s; 3355 3356 if (!ecc_enabled(F3, nid)) { 3357 ret = 0; 3358 3359 if (!ecc_enable_override) 3360 goto err_enable; 3361 3362 if (boot_cpu_data.x86 >= 0x17) { 3363 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS."); 3364 goto err_enable; 3365 } else 3366 amd64_warn("Forcing ECC on!\n"); 3367 3368 if (!enable_ecc_error_reporting(s, nid, F3)) 3369 goto err_enable; 3370 } 3371 3372 ret = init_one_instance(nid); 3373 if (ret < 0) { 3374 amd64_err("Error probing instance: %d\n", nid); 3375 3376 if (boot_cpu_data.x86 < 0x17) 3377 restore_ecc_error_reporting(s, nid, F3); 3378 3379 goto err_enable; 3380 } 3381 3382 return ret; 3383 3384 err_enable: 3385 kfree(s); 3386 ecc_stngs[nid] = NULL; 3387 3388 err_out: 3389 return ret; 3390 } 3391 3392 static void remove_one_instance(unsigned int nid) 3393 { 3394 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 3395 struct ecc_settings *s = ecc_stngs[nid]; 3396 struct mem_ctl_info *mci; 3397 struct amd64_pvt *pvt; 3398 3399 mci = find_mci_by_dev(&F3->dev); 3400 WARN_ON(!mci); 3401 3402 /* Remove from EDAC CORE tracking list */ 3403 mci = edac_mc_del_mc(&F3->dev); 3404 if (!mci) 3405 return; 3406 3407 pvt = mci->pvt_info; 3408 3409 restore_ecc_error_reporting(s, nid, F3); 3410 3411 free_mc_sibling_devs(pvt); 3412 3413 kfree(ecc_stngs[nid]); 3414 ecc_stngs[nid] = NULL; 3415 3416 /* Free the EDAC CORE resources */ 3417 mci->pvt_info = NULL; 3418 3419 kfree(pvt); 3420 edac_mc_free(mci); 3421 } 3422 3423 static void setup_pci_device(void) 3424 { 3425 struct mem_ctl_info *mci; 3426 struct amd64_pvt *pvt; 3427 3428 if (pci_ctl) 3429 return; 3430 3431 mci = edac_mc_find(0); 3432 if (!mci) 3433 return; 3434 3435 pvt = mci->pvt_info; 3436 if (pvt->umc) 3437 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR); 3438 else 3439 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); 3440 if (!pci_ctl) { 3441 pr_warn("%s(): Unable to create PCI control\n", __func__); 3442 pr_warn("%s(): PCI error report via EDAC not set\n", __func__); 3443 } 3444 } 3445 3446 static const struct x86_cpu_id amd64_cpuids[] = { 3447 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, 3448 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, 3449 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, 3450 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, 3451 { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, 3452 { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, 3453 { } 3454 }; 3455 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); 3456 3457 static int __init amd64_edac_init(void) 3458 { 3459 const char *owner; 3460 int err = -ENODEV; 3461 int i; 3462 3463 owner = edac_get_owner(); 3464 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) 3465 return -EBUSY; 3466 3467 if (!x86_match_cpu(amd64_cpuids)) 3468 return -ENODEV; 3469 3470 if (amd_cache_northbridges() < 0) 3471 return -ENODEV; 3472 3473 opstate_init(); 3474 3475 err = -ENOMEM; 3476 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); 3477 if (!ecc_stngs) 3478 goto err_free; 3479 3480 msrs = msrs_alloc(); 3481 if (!msrs) 3482 goto err_free; 3483 3484 for (i = 0; i < amd_nb_num(); i++) { 3485 err = probe_one_instance(i); 3486 if (err) { 3487 /* unwind properly */ 3488 while (--i >= 0) 3489 remove_one_instance(i); 3490 3491 goto err_pci; 3492 } 3493 } 3494 3495 if (!edac_has_mcs()) { 3496 err = -ENODEV; 3497 goto err_pci; 3498 } 3499 3500 /* register stuff with EDAC MCE */ 3501 if (report_gart_errors) 3502 amd_report_gart_errors(true); 3503 3504 if (boot_cpu_data.x86 >= 0x17) 3505 amd_register_ecc_decoder(decode_umc_error); 3506 else 3507 amd_register_ecc_decoder(decode_bus_error); 3508 3509 setup_pci_device(); 3510 3511 #ifdef CONFIG_X86_32 3512 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR); 3513 #endif 3514 3515 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); 3516 3517 return 0; 3518 3519 err_pci: 3520 msrs_free(msrs); 3521 msrs = NULL; 3522 3523 err_free: 3524 kfree(ecc_stngs); 3525 ecc_stngs = NULL; 3526 3527 return err; 3528 } 3529 3530 static void __exit amd64_edac_exit(void) 3531 { 3532 int i; 3533 3534 if (pci_ctl) 3535 edac_pci_release_generic_ctl(pci_ctl); 3536 3537 /* unregister from EDAC MCE */ 3538 amd_report_gart_errors(false); 3539 3540 if (boot_cpu_data.x86 >= 0x17) 3541 amd_unregister_ecc_decoder(decode_umc_error); 3542 else 3543 amd_unregister_ecc_decoder(decode_bus_error); 3544 3545 for (i = 0; i < amd_nb_num(); i++) 3546 remove_one_instance(i); 3547 3548 kfree(ecc_stngs); 3549 ecc_stngs = NULL; 3550 3551 msrs_free(msrs); 3552 msrs = NULL; 3553 } 3554 3555 module_init(amd64_edac_init); 3556 module_exit(amd64_edac_exit); 3557 3558 MODULE_LICENSE("GPL"); 3559 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " 3560 "Dave Peterson, Thayne Harbaugh"); 3561 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " 3562 EDAC_AMD64_VERSION); 3563 3564 module_param(edac_op_state, int, 0444); 3565 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 3566