12bc65418SDoug Thompson #include "amd64_edac.h" 27d6034d3SDoug Thompson #include <asm/k8.h> 32bc65418SDoug Thompson 42bc65418SDoug Thompson static struct edac_pci_ctl_info *amd64_ctl_pci; 52bc65418SDoug Thompson 62bc65418SDoug Thompson static int report_gart_errors; 72bc65418SDoug Thompson module_param(report_gart_errors, int, 0644); 82bc65418SDoug Thompson 92bc65418SDoug Thompson /* 102bc65418SDoug Thompson * Set by command line parameter. If BIOS has enabled the ECC, this override is 112bc65418SDoug Thompson * cleared to prevent re-enabling the hardware by this driver. 122bc65418SDoug Thompson */ 132bc65418SDoug Thompson static int ecc_enable_override; 142bc65418SDoug Thompson module_param(ecc_enable_override, int, 0644); 152bc65418SDoug Thompson 1650542251SBorislav Petkov static struct msr *msrs; 1750542251SBorislav Petkov 182bc65418SDoug Thompson /* Lookup table for all possible MC control instances */ 192bc65418SDoug Thompson struct amd64_pvt; 203011b20dSBorislav Petkov static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; 213011b20dSBorislav Petkov static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; 222bc65418SDoug Thompson 232bc65418SDoug Thompson /* 241433eb99SBorislav Petkov * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and 251433eb99SBorislav Petkov * later. 26b70ef010SBorislav Petkov */ 271433eb99SBorislav Petkov static int ddr2_dbam_revCG[] = { 281433eb99SBorislav Petkov [0] = 32, 291433eb99SBorislav Petkov [1] = 64, 301433eb99SBorislav Petkov [2] = 128, 311433eb99SBorislav Petkov [3] = 256, 321433eb99SBorislav Petkov [4] = 512, 331433eb99SBorislav Petkov [5] = 1024, 341433eb99SBorislav Petkov [6] = 2048, 351433eb99SBorislav Petkov }; 361433eb99SBorislav Petkov 371433eb99SBorislav Petkov static int ddr2_dbam_revD[] = { 381433eb99SBorislav Petkov [0] = 32, 391433eb99SBorislav Petkov [1] = 64, 401433eb99SBorislav Petkov [2 ... 3] = 128, 411433eb99SBorislav Petkov [4] = 256, 421433eb99SBorislav Petkov [5] = 512, 431433eb99SBorislav Petkov [6] = 256, 441433eb99SBorislav Petkov [7] = 512, 451433eb99SBorislav Petkov [8 ... 9] = 1024, 461433eb99SBorislav Petkov [10] = 2048, 471433eb99SBorislav Petkov }; 481433eb99SBorislav Petkov 491433eb99SBorislav Petkov static int ddr2_dbam[] = { [0] = 128, 501433eb99SBorislav Petkov [1] = 256, 511433eb99SBorislav Petkov [2 ... 4] = 512, 521433eb99SBorislav Petkov [5 ... 6] = 1024, 531433eb99SBorislav Petkov [7 ... 8] = 2048, 541433eb99SBorislav Petkov [9 ... 10] = 4096, 551433eb99SBorislav Petkov [11] = 8192, 561433eb99SBorislav Petkov }; 571433eb99SBorislav Petkov 581433eb99SBorislav Petkov static int ddr3_dbam[] = { [0] = -1, 591433eb99SBorislav Petkov [1] = 256, 601433eb99SBorislav Petkov [2] = 512, 611433eb99SBorislav Petkov [3 ... 4] = -1, 621433eb99SBorislav Petkov [5 ... 6] = 1024, 631433eb99SBorislav Petkov [7 ... 8] = 2048, 641433eb99SBorislav Petkov [9 ... 10] = 4096, 651433eb99SBorislav Petkov [11] = 8192, 66b70ef010SBorislav Petkov }; 67b70ef010SBorislav Petkov 68b70ef010SBorislav Petkov /* 69b70ef010SBorislav Petkov * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing 70b70ef010SBorislav Petkov * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- 71b70ef010SBorislav Petkov * or higher value'. 72b70ef010SBorislav Petkov * 73b70ef010SBorislav Petkov *FIXME: Produce a better mapping/linearisation. 74b70ef010SBorislav Petkov */ 75b70ef010SBorislav Petkov 76b70ef010SBorislav Petkov struct scrubrate scrubrates[] = { 77b70ef010SBorislav Petkov { 0x01, 1600000000UL}, 78b70ef010SBorislav Petkov { 0x02, 800000000UL}, 79b70ef010SBorislav Petkov { 0x03, 400000000UL}, 80b70ef010SBorislav Petkov { 0x04, 200000000UL}, 81b70ef010SBorislav Petkov { 0x05, 100000000UL}, 82b70ef010SBorislav Petkov { 0x06, 50000000UL}, 83b70ef010SBorislav Petkov { 0x07, 25000000UL}, 84b70ef010SBorislav Petkov { 0x08, 12284069UL}, 85b70ef010SBorislav Petkov { 0x09, 6274509UL}, 86b70ef010SBorislav Petkov { 0x0A, 3121951UL}, 87b70ef010SBorislav Petkov { 0x0B, 1560975UL}, 88b70ef010SBorislav Petkov { 0x0C, 781440UL}, 89b70ef010SBorislav Petkov { 0x0D, 390720UL}, 90b70ef010SBorislav Petkov { 0x0E, 195300UL}, 91b70ef010SBorislav Petkov { 0x0F, 97650UL}, 92b70ef010SBorislav Petkov { 0x10, 48854UL}, 93b70ef010SBorislav Petkov { 0x11, 24427UL}, 94b70ef010SBorislav Petkov { 0x12, 12213UL}, 95b70ef010SBorislav Petkov { 0x13, 6101UL}, 96b70ef010SBorislav Petkov { 0x14, 3051UL}, 97b70ef010SBorislav Petkov { 0x15, 1523UL}, 98b70ef010SBorislav Petkov { 0x16, 761UL}, 99b70ef010SBorislav Petkov { 0x00, 0UL}, /* scrubbing off */ 100b70ef010SBorislav Petkov }; 101b70ef010SBorislav Petkov 102b70ef010SBorislav Petkov /* 1032bc65418SDoug Thompson * Memory scrubber control interface. For K8, memory scrubbing is handled by 1042bc65418SDoug Thompson * hardware and can involve L2 cache, dcache as well as the main memory. With 1052bc65418SDoug Thompson * F10, this is extended to L3 cache scrubbing on CPU models sporting that 1062bc65418SDoug Thompson * functionality. 1072bc65418SDoug Thompson * 1082bc65418SDoug Thompson * This causes the "units" for the scrubbing speed to vary from 64 byte blocks 1092bc65418SDoug Thompson * (dram) over to cache lines. This is nasty, so we will use bandwidth in 1102bc65418SDoug Thompson * bytes/sec for the setting. 1112bc65418SDoug Thompson * 1122bc65418SDoug Thompson * Currently, we only do dram scrubbing. If the scrubbing is done in software on 1132bc65418SDoug Thompson * other archs, we might not have access to the caches directly. 1142bc65418SDoug Thompson */ 1152bc65418SDoug Thompson 1162bc65418SDoug Thompson /* 1172bc65418SDoug Thompson * scan the scrub rate mapping table for a close or matching bandwidth value to 1182bc65418SDoug Thompson * issue. If requested is too big, then use last maximum value found. 1192bc65418SDoug Thompson */ 1202bc65418SDoug Thompson static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, 1212bc65418SDoug Thompson u32 min_scrubrate) 1222bc65418SDoug Thompson { 1232bc65418SDoug Thompson u32 scrubval; 1242bc65418SDoug Thompson int i; 1252bc65418SDoug Thompson 1262bc65418SDoug Thompson /* 1272bc65418SDoug Thompson * map the configured rate (new_bw) to a value specific to the AMD64 1282bc65418SDoug Thompson * memory controller and apply to register. Search for the first 1292bc65418SDoug Thompson * bandwidth entry that is greater or equal than the setting requested 1302bc65418SDoug Thompson * and program that. If at last entry, turn off DRAM scrubbing. 1312bc65418SDoug Thompson */ 1322bc65418SDoug Thompson for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 1332bc65418SDoug Thompson /* 1342bc65418SDoug Thompson * skip scrub rates which aren't recommended 1352bc65418SDoug Thompson * (see F10 BKDG, F3x58) 1362bc65418SDoug Thompson */ 1372bc65418SDoug Thompson if (scrubrates[i].scrubval < min_scrubrate) 1382bc65418SDoug Thompson continue; 1392bc65418SDoug Thompson 1402bc65418SDoug Thompson if (scrubrates[i].bandwidth <= new_bw) 1412bc65418SDoug Thompson break; 1422bc65418SDoug Thompson 1432bc65418SDoug Thompson /* 1442bc65418SDoug Thompson * if no suitable bandwidth found, turn off DRAM scrubbing 1452bc65418SDoug Thompson * entirely by falling back to the last element in the 1462bc65418SDoug Thompson * scrubrates array. 1472bc65418SDoug Thompson */ 1482bc65418SDoug Thompson } 1492bc65418SDoug Thompson 1502bc65418SDoug Thompson scrubval = scrubrates[i].scrubval; 1512bc65418SDoug Thompson if (scrubval) 1522bc65418SDoug Thompson edac_printk(KERN_DEBUG, EDAC_MC, 1532bc65418SDoug Thompson "Setting scrub rate bandwidth: %u\n", 1542bc65418SDoug Thompson scrubrates[i].bandwidth); 1552bc65418SDoug Thompson else 1562bc65418SDoug Thompson edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); 1572bc65418SDoug Thompson 1582bc65418SDoug Thompson pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); 1592bc65418SDoug Thompson 1602bc65418SDoug Thompson return 0; 1612bc65418SDoug Thompson } 1622bc65418SDoug Thompson 1632bc65418SDoug Thompson static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth) 1642bc65418SDoug Thompson { 1652bc65418SDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 1662bc65418SDoug Thompson u32 min_scrubrate = 0x0; 1672bc65418SDoug Thompson 1682bc65418SDoug Thompson switch (boot_cpu_data.x86) { 1692bc65418SDoug Thompson case 0xf: 1702bc65418SDoug Thompson min_scrubrate = K8_MIN_SCRUB_RATE_BITS; 1712bc65418SDoug Thompson break; 1722bc65418SDoug Thompson case 0x10: 1732bc65418SDoug Thompson min_scrubrate = F10_MIN_SCRUB_RATE_BITS; 1742bc65418SDoug Thompson break; 1752bc65418SDoug Thompson case 0x11: 1762bc65418SDoug Thompson min_scrubrate = F11_MIN_SCRUB_RATE_BITS; 1772bc65418SDoug Thompson break; 1782bc65418SDoug Thompson 1792bc65418SDoug Thompson default: 1802bc65418SDoug Thompson amd64_printk(KERN_ERR, "Unsupported family!\n"); 1812bc65418SDoug Thompson break; 1822bc65418SDoug Thompson } 1832bc65418SDoug Thompson return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth, 1842bc65418SDoug Thompson min_scrubrate); 1852bc65418SDoug Thompson } 1862bc65418SDoug Thompson 1872bc65418SDoug Thompson static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) 1882bc65418SDoug Thompson { 1892bc65418SDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 1902bc65418SDoug Thompson u32 scrubval = 0; 1916ba5dcdcSBorislav Petkov int status = -1, i; 1922bc65418SDoug Thompson 1936ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); 1942bc65418SDoug Thompson 1952bc65418SDoug Thompson scrubval = scrubval & 0x001F; 1962bc65418SDoug Thompson 1972bc65418SDoug Thompson edac_printk(KERN_DEBUG, EDAC_MC, 1982bc65418SDoug Thompson "pci-read, sdram scrub control value: %d \n", scrubval); 1992bc65418SDoug Thompson 200926311fdSRoel Kluin for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 2012bc65418SDoug Thompson if (scrubrates[i].scrubval == scrubval) { 2022bc65418SDoug Thompson *bw = scrubrates[i].bandwidth; 2032bc65418SDoug Thompson status = 0; 2042bc65418SDoug Thompson break; 2052bc65418SDoug Thompson } 2062bc65418SDoug Thompson } 2072bc65418SDoug Thompson 2082bc65418SDoug Thompson return status; 2092bc65418SDoug Thompson } 2102bc65418SDoug Thompson 2116775763aSDoug Thompson /* Map from a CSROW entry to the mask entry that operates on it */ 2126775763aSDoug Thompson static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 2136775763aSDoug Thompson { 2141433eb99SBorislav Petkov if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) 2159d858bb1SBorislav Petkov return csrow; 2169d858bb1SBorislav Petkov else 2179d858bb1SBorislav Petkov return csrow >> 1; 2186775763aSDoug Thompson } 2196775763aSDoug Thompson 2206775763aSDoug Thompson /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ 2216775763aSDoug Thompson static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) 2226775763aSDoug Thompson { 2236775763aSDoug Thompson if (dct == 0) 2246775763aSDoug Thompson return pvt->dcsb0[csrow]; 2256775763aSDoug Thompson else 2266775763aSDoug Thompson return pvt->dcsb1[csrow]; 2276775763aSDoug Thompson } 2286775763aSDoug Thompson 2296775763aSDoug Thompson /* 2306775763aSDoug Thompson * Return the 'mask' address the i'th CS entry. This function is needed because 2316775763aSDoug Thompson * there number of DCSM registers on Rev E and prior vs Rev F and later is 2326775763aSDoug Thompson * different. 2336775763aSDoug Thompson */ 2346775763aSDoug Thompson static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) 2356775763aSDoug Thompson { 2366775763aSDoug Thompson if (dct == 0) 2376775763aSDoug Thompson return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; 2386775763aSDoug Thompson else 2396775763aSDoug Thompson return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; 2406775763aSDoug Thompson } 2416775763aSDoug Thompson 2426775763aSDoug Thompson 2436775763aSDoug Thompson /* 2446775763aSDoug Thompson * In *base and *limit, pass back the full 40-bit base and limit physical 2456775763aSDoug Thompson * addresses for the node given by node_id. This information is obtained from 2466775763aSDoug Thompson * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The 2476775763aSDoug Thompson * base and limit addresses are of type SysAddr, as defined at the start of 2486775763aSDoug Thompson * section 3.4.4 (p. 70). They are the lowest and highest physical addresses 2496775763aSDoug Thompson * in the address range they represent. 2506775763aSDoug Thompson */ 2516775763aSDoug Thompson static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, 2526775763aSDoug Thompson u64 *base, u64 *limit) 2536775763aSDoug Thompson { 2546775763aSDoug Thompson *base = pvt->dram_base[node_id]; 2556775763aSDoug Thompson *limit = pvt->dram_limit[node_id]; 2566775763aSDoug Thompson } 2576775763aSDoug Thompson 2586775763aSDoug Thompson /* 2596775763aSDoug Thompson * Return 1 if the SysAddr given by sys_addr matches the base/limit associated 2606775763aSDoug Thompson * with node_id 2616775763aSDoug Thompson */ 2626775763aSDoug Thompson static int amd64_base_limit_match(struct amd64_pvt *pvt, 2636775763aSDoug Thompson u64 sys_addr, int node_id) 2646775763aSDoug Thompson { 2656775763aSDoug Thompson u64 base, limit, addr; 2666775763aSDoug Thompson 2676775763aSDoug Thompson amd64_get_base_and_limit(pvt, node_id, &base, &limit); 2686775763aSDoug Thompson 2696775763aSDoug Thompson /* The K8 treats this as a 40-bit value. However, bits 63-40 will be 2706775763aSDoug Thompson * all ones if the most significant implemented address bit is 1. 2716775763aSDoug Thompson * Here we discard bits 63-40. See section 3.4.2 of AMD publication 2726775763aSDoug Thompson * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 2736775763aSDoug Thompson * Application Programming. 2746775763aSDoug Thompson */ 2756775763aSDoug Thompson addr = sys_addr & 0x000000ffffffffffull; 2766775763aSDoug Thompson 2776775763aSDoug Thompson return (addr >= base) && (addr <= limit); 2786775763aSDoug Thompson } 2796775763aSDoug Thompson 2806775763aSDoug Thompson /* 2816775763aSDoug Thompson * Attempt to map a SysAddr to a node. On success, return a pointer to the 2826775763aSDoug Thompson * mem_ctl_info structure for the node that the SysAddr maps to. 2836775763aSDoug Thompson * 2846775763aSDoug Thompson * On failure, return NULL. 2856775763aSDoug Thompson */ 2866775763aSDoug Thompson static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, 2876775763aSDoug Thompson u64 sys_addr) 2886775763aSDoug Thompson { 2896775763aSDoug Thompson struct amd64_pvt *pvt; 2906775763aSDoug Thompson int node_id; 2916775763aSDoug Thompson u32 intlv_en, bits; 2926775763aSDoug Thompson 2936775763aSDoug Thompson /* 2946775763aSDoug Thompson * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section 2956775763aSDoug Thompson * 3.4.4.2) registers to map the SysAddr to a node ID. 2966775763aSDoug Thompson */ 2976775763aSDoug Thompson pvt = mci->pvt_info; 2986775763aSDoug Thompson 2996775763aSDoug Thompson /* 3006775763aSDoug Thompson * The value of this field should be the same for all DRAM Base 3016775763aSDoug Thompson * registers. Therefore we arbitrarily choose to read it from the 3026775763aSDoug Thompson * register for node 0. 3036775763aSDoug Thompson */ 3046775763aSDoug Thompson intlv_en = pvt->dram_IntlvEn[0]; 3056775763aSDoug Thompson 3066775763aSDoug Thompson if (intlv_en == 0) { 3078edc5445SBorislav Petkov for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { 3086775763aSDoug Thompson if (amd64_base_limit_match(pvt, sys_addr, node_id)) 3096775763aSDoug Thompson goto found; 3106775763aSDoug Thompson } 3118edc5445SBorislav Petkov goto err_no_match; 3128edc5445SBorislav Petkov } 3136775763aSDoug Thompson 31472f158feSBorislav Petkov if (unlikely((intlv_en != 0x01) && 31572f158feSBorislav Petkov (intlv_en != 0x03) && 31672f158feSBorislav Petkov (intlv_en != 0x07))) { 3176775763aSDoug Thompson amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " 3186775763aSDoug Thompson "IntlvEn field of DRAM Base Register for node 0: " 31972f158feSBorislav Petkov "this probably indicates a BIOS bug.\n", intlv_en); 3206775763aSDoug Thompson return NULL; 3216775763aSDoug Thompson } 3226775763aSDoug Thompson 3236775763aSDoug Thompson bits = (((u32) sys_addr) >> 12) & intlv_en; 3246775763aSDoug Thompson 3256775763aSDoug Thompson for (node_id = 0; ; ) { 3268edc5445SBorislav Petkov if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) 3276775763aSDoug Thompson break; /* intlv_sel field matches */ 3286775763aSDoug Thompson 3296775763aSDoug Thompson if (++node_id >= DRAM_REG_COUNT) 3306775763aSDoug Thompson goto err_no_match; 3316775763aSDoug Thompson } 3326775763aSDoug Thompson 3336775763aSDoug Thompson /* sanity test for sys_addr */ 3346775763aSDoug Thompson if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 3356775763aSDoug Thompson amd64_printk(KERN_WARNING, 3368edc5445SBorislav Petkov "%s(): sys_addr 0x%llx falls outside base/limit " 3376775763aSDoug Thompson "address range for node %d with node interleaving " 3388edc5445SBorislav Petkov "enabled.\n", 3398edc5445SBorislav Petkov __func__, sys_addr, node_id); 3406775763aSDoug Thompson return NULL; 3416775763aSDoug Thompson } 3426775763aSDoug Thompson 3436775763aSDoug Thompson found: 3446775763aSDoug Thompson return edac_mc_find(node_id); 3456775763aSDoug Thompson 3466775763aSDoug Thompson err_no_match: 3476775763aSDoug Thompson debugf2("sys_addr 0x%lx doesn't match any node\n", 3486775763aSDoug Thompson (unsigned long)sys_addr); 3496775763aSDoug Thompson 3506775763aSDoug Thompson return NULL; 3516775763aSDoug Thompson } 352e2ce7255SDoug Thompson 353e2ce7255SDoug Thompson /* 354e2ce7255SDoug Thompson * Extract the DRAM CS base address from selected csrow register. 355e2ce7255SDoug Thompson */ 356e2ce7255SDoug Thompson static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) 357e2ce7255SDoug Thompson { 358e2ce7255SDoug Thompson return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << 359e2ce7255SDoug Thompson pvt->dcs_shift; 360e2ce7255SDoug Thompson } 361e2ce7255SDoug Thompson 362e2ce7255SDoug Thompson /* 363e2ce7255SDoug Thompson * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. 364e2ce7255SDoug Thompson */ 365e2ce7255SDoug Thompson static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) 366e2ce7255SDoug Thompson { 367e2ce7255SDoug Thompson u64 dcsm_bits, other_bits; 368e2ce7255SDoug Thompson u64 mask; 369e2ce7255SDoug Thompson 370e2ce7255SDoug Thompson /* Extract bits from DRAM CS Mask. */ 371e2ce7255SDoug Thompson dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; 372e2ce7255SDoug Thompson 373e2ce7255SDoug Thompson other_bits = pvt->dcsm_mask; 374e2ce7255SDoug Thompson other_bits = ~(other_bits << pvt->dcs_shift); 375e2ce7255SDoug Thompson 376e2ce7255SDoug Thompson /* 377e2ce7255SDoug Thompson * The extracted bits from DCSM belong in the spaces represented by 378e2ce7255SDoug Thompson * the cleared bits in other_bits. 379e2ce7255SDoug Thompson */ 380e2ce7255SDoug Thompson mask = (dcsm_bits << pvt->dcs_shift) | other_bits; 381e2ce7255SDoug Thompson 382e2ce7255SDoug Thompson return mask; 383e2ce7255SDoug Thompson } 384e2ce7255SDoug Thompson 385e2ce7255SDoug Thompson /* 386e2ce7255SDoug Thompson * @input_addr is an InputAddr associated with the node given by mci. Return the 387e2ce7255SDoug Thompson * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). 388e2ce7255SDoug Thompson */ 389e2ce7255SDoug Thompson static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) 390e2ce7255SDoug Thompson { 391e2ce7255SDoug Thompson struct amd64_pvt *pvt; 392e2ce7255SDoug Thompson int csrow; 393e2ce7255SDoug Thompson u64 base, mask; 394e2ce7255SDoug Thompson 395e2ce7255SDoug Thompson pvt = mci->pvt_info; 396e2ce7255SDoug Thompson 397e2ce7255SDoug Thompson /* 398e2ce7255SDoug Thompson * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS 399e2ce7255SDoug Thompson * base/mask register pair, test the condition shown near the start of 400e2ce7255SDoug Thompson * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). 401e2ce7255SDoug Thompson */ 4029d858bb1SBorislav Petkov for (csrow = 0; csrow < pvt->cs_count; csrow++) { 403e2ce7255SDoug Thompson 404e2ce7255SDoug Thompson /* This DRAM chip select is disabled on this node */ 405e2ce7255SDoug Thompson if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) 406e2ce7255SDoug Thompson continue; 407e2ce7255SDoug Thompson 408e2ce7255SDoug Thompson base = base_from_dct_base(pvt, csrow); 409e2ce7255SDoug Thompson mask = ~mask_from_dct_mask(pvt, csrow); 410e2ce7255SDoug Thompson 411e2ce7255SDoug Thompson if ((input_addr & mask) == (base & mask)) { 412e2ce7255SDoug Thompson debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", 413e2ce7255SDoug Thompson (unsigned long)input_addr, csrow, 414e2ce7255SDoug Thompson pvt->mc_node_id); 415e2ce7255SDoug Thompson 416e2ce7255SDoug Thompson return csrow; 417e2ce7255SDoug Thompson } 418e2ce7255SDoug Thompson } 419e2ce7255SDoug Thompson 420e2ce7255SDoug Thompson debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", 421e2ce7255SDoug Thompson (unsigned long)input_addr, pvt->mc_node_id); 422e2ce7255SDoug Thompson 423e2ce7255SDoug Thompson return -1; 424e2ce7255SDoug Thompson } 425e2ce7255SDoug Thompson 426e2ce7255SDoug Thompson /* 427e2ce7255SDoug Thompson * Return the base value defined by the DRAM Base register for the node 428e2ce7255SDoug Thompson * represented by mci. This function returns the full 40-bit value despite the 429e2ce7255SDoug Thompson * fact that the register only stores bits 39-24 of the value. See section 430e2ce7255SDoug Thompson * 3.4.4.1 (BKDG #26094, K8, revA-E) 431e2ce7255SDoug Thompson */ 432e2ce7255SDoug Thompson static inline u64 get_dram_base(struct mem_ctl_info *mci) 433e2ce7255SDoug Thompson { 434e2ce7255SDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 435e2ce7255SDoug Thompson 436e2ce7255SDoug Thompson return pvt->dram_base[pvt->mc_node_id]; 437e2ce7255SDoug Thompson } 438e2ce7255SDoug Thompson 439e2ce7255SDoug Thompson /* 440e2ce7255SDoug Thompson * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) 441e2ce7255SDoug Thompson * for the node represented by mci. Info is passed back in *hole_base, 442e2ce7255SDoug Thompson * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if 443e2ce7255SDoug Thompson * info is invalid. Info may be invalid for either of the following reasons: 444e2ce7255SDoug Thompson * 445e2ce7255SDoug Thompson * - The revision of the node is not E or greater. In this case, the DRAM Hole 446e2ce7255SDoug Thompson * Address Register does not exist. 447e2ce7255SDoug Thompson * 448e2ce7255SDoug Thompson * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, 449e2ce7255SDoug Thompson * indicating that its contents are not valid. 450e2ce7255SDoug Thompson * 451e2ce7255SDoug Thompson * The values passed back in *hole_base, *hole_offset, and *hole_size are 452e2ce7255SDoug Thompson * complete 32-bit values despite the fact that the bitfields in the DHAR 453e2ce7255SDoug Thompson * only represent bits 31-24 of the base and offset values. 454e2ce7255SDoug Thompson */ 455e2ce7255SDoug Thompson int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, 456e2ce7255SDoug Thompson u64 *hole_offset, u64 *hole_size) 457e2ce7255SDoug Thompson { 458e2ce7255SDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 459e2ce7255SDoug Thompson u64 base; 460e2ce7255SDoug Thompson 461e2ce7255SDoug Thompson /* only revE and later have the DRAM Hole Address Register */ 4621433eb99SBorislav Petkov if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { 463e2ce7255SDoug Thompson debugf1(" revision %d for node %d does not support DHAR\n", 464e2ce7255SDoug Thompson pvt->ext_model, pvt->mc_node_id); 465e2ce7255SDoug Thompson return 1; 466e2ce7255SDoug Thompson } 467e2ce7255SDoug Thompson 468e2ce7255SDoug Thompson /* only valid for Fam10h */ 469e2ce7255SDoug Thompson if (boot_cpu_data.x86 == 0x10 && 470e2ce7255SDoug Thompson (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { 471e2ce7255SDoug Thompson debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); 472e2ce7255SDoug Thompson return 1; 473e2ce7255SDoug Thompson } 474e2ce7255SDoug Thompson 475e2ce7255SDoug Thompson if ((pvt->dhar & DHAR_VALID) == 0) { 476e2ce7255SDoug Thompson debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", 477e2ce7255SDoug Thompson pvt->mc_node_id); 478e2ce7255SDoug Thompson return 1; 479e2ce7255SDoug Thompson } 480e2ce7255SDoug Thompson 481e2ce7255SDoug Thompson /* This node has Memory Hoisting */ 482e2ce7255SDoug Thompson 483e2ce7255SDoug Thompson /* +------------------+--------------------+--------------------+----- 484e2ce7255SDoug Thompson * | memory | DRAM hole | relocated | 485e2ce7255SDoug Thompson * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | 486e2ce7255SDoug Thompson * | | | DRAM hole | 487e2ce7255SDoug Thompson * | | | [0x100000000, | 488e2ce7255SDoug Thompson * | | | (0x100000000+ | 489e2ce7255SDoug Thompson * | | | (0xffffffff-x))] | 490e2ce7255SDoug Thompson * +------------------+--------------------+--------------------+----- 491e2ce7255SDoug Thompson * 492e2ce7255SDoug Thompson * Above is a diagram of physical memory showing the DRAM hole and the 493e2ce7255SDoug Thompson * relocated addresses from the DRAM hole. As shown, the DRAM hole 494e2ce7255SDoug Thompson * starts at address x (the base address) and extends through address 495e2ce7255SDoug Thompson * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the 496e2ce7255SDoug Thompson * addresses in the hole so that they start at 0x100000000. 497e2ce7255SDoug Thompson */ 498e2ce7255SDoug Thompson 499e2ce7255SDoug Thompson base = dhar_base(pvt->dhar); 500e2ce7255SDoug Thompson 501e2ce7255SDoug Thompson *hole_base = base; 502e2ce7255SDoug Thompson *hole_size = (0x1ull << 32) - base; 503e2ce7255SDoug Thompson 504e2ce7255SDoug Thompson if (boot_cpu_data.x86 > 0xf) 505e2ce7255SDoug Thompson *hole_offset = f10_dhar_offset(pvt->dhar); 506e2ce7255SDoug Thompson else 507e2ce7255SDoug Thompson *hole_offset = k8_dhar_offset(pvt->dhar); 508e2ce7255SDoug Thompson 509e2ce7255SDoug Thompson debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", 510e2ce7255SDoug Thompson pvt->mc_node_id, (unsigned long)*hole_base, 511e2ce7255SDoug Thompson (unsigned long)*hole_offset, (unsigned long)*hole_size); 512e2ce7255SDoug Thompson 513e2ce7255SDoug Thompson return 0; 514e2ce7255SDoug Thompson } 515e2ce7255SDoug Thompson EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); 516e2ce7255SDoug Thompson 51793c2df58SDoug Thompson /* 51893c2df58SDoug Thompson * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is 51993c2df58SDoug Thompson * assumed that sys_addr maps to the node given by mci. 52093c2df58SDoug Thompson * 52193c2df58SDoug Thompson * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section 52293c2df58SDoug Thompson * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a 52393c2df58SDoug Thompson * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, 52493c2df58SDoug Thompson * then it is also involved in translating a SysAddr to a DramAddr. Sections 52593c2df58SDoug Thompson * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. 52693c2df58SDoug Thompson * These parts of the documentation are unclear. I interpret them as follows: 52793c2df58SDoug Thompson * 52893c2df58SDoug Thompson * When node n receives a SysAddr, it processes the SysAddr as follows: 52993c2df58SDoug Thompson * 53093c2df58SDoug Thompson * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM 53193c2df58SDoug Thompson * Limit registers for node n. If the SysAddr is not within the range 53293c2df58SDoug Thompson * specified by the base and limit values, then node n ignores the Sysaddr 53393c2df58SDoug Thompson * (since it does not map to node n). Otherwise continue to step 2 below. 53493c2df58SDoug Thompson * 53593c2df58SDoug Thompson * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is 53693c2df58SDoug Thompson * disabled so skip to step 3 below. Otherwise see if the SysAddr is within 53793c2df58SDoug Thompson * the range of relocated addresses (starting at 0x100000000) from the DRAM 53893c2df58SDoug Thompson * hole. If not, skip to step 3 below. Else get the value of the 53993c2df58SDoug Thompson * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the 54093c2df58SDoug Thompson * offset defined by this value from the SysAddr. 54193c2df58SDoug Thompson * 54293c2df58SDoug Thompson * 3. Obtain the base address for node n from the DRAMBase field of the DRAM 54393c2df58SDoug Thompson * Base register for node n. To obtain the DramAddr, subtract the base 54493c2df58SDoug Thompson * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). 54593c2df58SDoug Thompson */ 54693c2df58SDoug Thompson static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) 54793c2df58SDoug Thompson { 54893c2df58SDoug Thompson u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; 54993c2df58SDoug Thompson int ret = 0; 55093c2df58SDoug Thompson 55193c2df58SDoug Thompson dram_base = get_dram_base(mci); 55293c2df58SDoug Thompson 55393c2df58SDoug Thompson ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 55493c2df58SDoug Thompson &hole_size); 55593c2df58SDoug Thompson if (!ret) { 55693c2df58SDoug Thompson if ((sys_addr >= (1ull << 32)) && 55793c2df58SDoug Thompson (sys_addr < ((1ull << 32) + hole_size))) { 55893c2df58SDoug Thompson /* use DHAR to translate SysAddr to DramAddr */ 55993c2df58SDoug Thompson dram_addr = sys_addr - hole_offset; 56093c2df58SDoug Thompson 56193c2df58SDoug Thompson debugf2("using DHAR to translate SysAddr 0x%lx to " 56293c2df58SDoug Thompson "DramAddr 0x%lx\n", 56393c2df58SDoug Thompson (unsigned long)sys_addr, 56493c2df58SDoug Thompson (unsigned long)dram_addr); 56593c2df58SDoug Thompson 56693c2df58SDoug Thompson return dram_addr; 56793c2df58SDoug Thompson } 56893c2df58SDoug Thompson } 56993c2df58SDoug Thompson 57093c2df58SDoug Thompson /* 57193c2df58SDoug Thompson * Translate the SysAddr to a DramAddr as shown near the start of 57293c2df58SDoug Thompson * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 57393c2df58SDoug Thompson * only deals with 40-bit values. Therefore we discard bits 63-40 of 57493c2df58SDoug Thompson * sys_addr below. If bit 39 of sys_addr is 1 then the bits we 57593c2df58SDoug Thompson * discard are all 1s. Otherwise the bits we discard are all 0s. See 57693c2df58SDoug Thompson * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture 57793c2df58SDoug Thompson * Programmer's Manual Volume 1 Application Programming. 57893c2df58SDoug Thompson */ 57993c2df58SDoug Thompson dram_addr = (sys_addr & 0xffffffffffull) - dram_base; 58093c2df58SDoug Thompson 58193c2df58SDoug Thompson debugf2("using DRAM Base register to translate SysAddr 0x%lx to " 58293c2df58SDoug Thompson "DramAddr 0x%lx\n", (unsigned long)sys_addr, 58393c2df58SDoug Thompson (unsigned long)dram_addr); 58493c2df58SDoug Thompson return dram_addr; 58593c2df58SDoug Thompson } 58693c2df58SDoug Thompson 58793c2df58SDoug Thompson /* 58893c2df58SDoug Thompson * @intlv_en is the value of the IntlvEn field from a DRAM Base register 58993c2df58SDoug Thompson * (section 3.4.4.1). Return the number of bits from a SysAddr that are used 59093c2df58SDoug Thompson * for node interleaving. 59193c2df58SDoug Thompson */ 59293c2df58SDoug Thompson static int num_node_interleave_bits(unsigned intlv_en) 59393c2df58SDoug Thompson { 59493c2df58SDoug Thompson static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; 59593c2df58SDoug Thompson int n; 59693c2df58SDoug Thompson 59793c2df58SDoug Thompson BUG_ON(intlv_en > 7); 59893c2df58SDoug Thompson n = intlv_shift_table[intlv_en]; 59993c2df58SDoug Thompson return n; 60093c2df58SDoug Thompson } 60193c2df58SDoug Thompson 60293c2df58SDoug Thompson /* Translate the DramAddr given by @dram_addr to an InputAddr. */ 60393c2df58SDoug Thompson static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) 60493c2df58SDoug Thompson { 60593c2df58SDoug Thompson struct amd64_pvt *pvt; 60693c2df58SDoug Thompson int intlv_shift; 60793c2df58SDoug Thompson u64 input_addr; 60893c2df58SDoug Thompson 60993c2df58SDoug Thompson pvt = mci->pvt_info; 61093c2df58SDoug Thompson 61193c2df58SDoug Thompson /* 61293c2df58SDoug Thompson * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 61393c2df58SDoug Thompson * concerning translating a DramAddr to an InputAddr. 61493c2df58SDoug Thompson */ 61593c2df58SDoug Thompson intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); 61693c2df58SDoug Thompson input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + 61793c2df58SDoug Thompson (dram_addr & 0xfff); 61893c2df58SDoug Thompson 61993c2df58SDoug Thompson debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", 62093c2df58SDoug Thompson intlv_shift, (unsigned long)dram_addr, 62193c2df58SDoug Thompson (unsigned long)input_addr); 62293c2df58SDoug Thompson 62393c2df58SDoug Thompson return input_addr; 62493c2df58SDoug Thompson } 62593c2df58SDoug Thompson 62693c2df58SDoug Thompson /* 62793c2df58SDoug Thompson * Translate the SysAddr represented by @sys_addr to an InputAddr. It is 62893c2df58SDoug Thompson * assumed that @sys_addr maps to the node given by mci. 62993c2df58SDoug Thompson */ 63093c2df58SDoug Thompson static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) 63193c2df58SDoug Thompson { 63293c2df58SDoug Thompson u64 input_addr; 63393c2df58SDoug Thompson 63493c2df58SDoug Thompson input_addr = 63593c2df58SDoug Thompson dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); 63693c2df58SDoug Thompson 63793c2df58SDoug Thompson debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", 63893c2df58SDoug Thompson (unsigned long)sys_addr, (unsigned long)input_addr); 63993c2df58SDoug Thompson 64093c2df58SDoug Thompson return input_addr; 64193c2df58SDoug Thompson } 64293c2df58SDoug Thompson 64393c2df58SDoug Thompson 64493c2df58SDoug Thompson /* 64593c2df58SDoug Thompson * @input_addr is an InputAddr associated with the node represented by mci. 64693c2df58SDoug Thompson * Translate @input_addr to a DramAddr and return the result. 64793c2df58SDoug Thompson */ 64893c2df58SDoug Thompson static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) 64993c2df58SDoug Thompson { 65093c2df58SDoug Thompson struct amd64_pvt *pvt; 65193c2df58SDoug Thompson int node_id, intlv_shift; 65293c2df58SDoug Thompson u64 bits, dram_addr; 65393c2df58SDoug Thompson u32 intlv_sel; 65493c2df58SDoug Thompson 65593c2df58SDoug Thompson /* 65693c2df58SDoug Thompson * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) 65793c2df58SDoug Thompson * shows how to translate a DramAddr to an InputAddr. Here we reverse 65893c2df58SDoug Thompson * this procedure. When translating from a DramAddr to an InputAddr, the 65993c2df58SDoug Thompson * bits used for node interleaving are discarded. Here we recover these 66093c2df58SDoug Thompson * bits from the IntlvSel field of the DRAM Limit register (section 66193c2df58SDoug Thompson * 3.4.4.2) for the node that input_addr is associated with. 66293c2df58SDoug Thompson */ 66393c2df58SDoug Thompson pvt = mci->pvt_info; 66493c2df58SDoug Thompson node_id = pvt->mc_node_id; 66593c2df58SDoug Thompson BUG_ON((node_id < 0) || (node_id > 7)); 66693c2df58SDoug Thompson 66793c2df58SDoug Thompson intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); 66893c2df58SDoug Thompson 66993c2df58SDoug Thompson if (intlv_shift == 0) { 67093c2df58SDoug Thompson debugf1(" InputAddr 0x%lx translates to DramAddr of " 67193c2df58SDoug Thompson "same value\n", (unsigned long)input_addr); 67293c2df58SDoug Thompson 67393c2df58SDoug Thompson return input_addr; 67493c2df58SDoug Thompson } 67593c2df58SDoug Thompson 67693c2df58SDoug Thompson bits = ((input_addr & 0xffffff000ull) << intlv_shift) + 67793c2df58SDoug Thompson (input_addr & 0xfff); 67893c2df58SDoug Thompson 67993c2df58SDoug Thompson intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); 68093c2df58SDoug Thompson dram_addr = bits + (intlv_sel << 12); 68193c2df58SDoug Thompson 68293c2df58SDoug Thompson debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " 68393c2df58SDoug Thompson "(%d node interleave bits)\n", (unsigned long)input_addr, 68493c2df58SDoug Thompson (unsigned long)dram_addr, intlv_shift); 68593c2df58SDoug Thompson 68693c2df58SDoug Thompson return dram_addr; 68793c2df58SDoug Thompson } 68893c2df58SDoug Thompson 68993c2df58SDoug Thompson /* 69093c2df58SDoug Thompson * @dram_addr is a DramAddr that maps to the node represented by mci. Convert 69193c2df58SDoug Thompson * @dram_addr to a SysAddr. 69293c2df58SDoug Thompson */ 69393c2df58SDoug Thompson static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) 69493c2df58SDoug Thompson { 69593c2df58SDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 69693c2df58SDoug Thompson u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; 69793c2df58SDoug Thompson int ret = 0; 69893c2df58SDoug Thompson 69993c2df58SDoug Thompson ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, 70093c2df58SDoug Thompson &hole_size); 70193c2df58SDoug Thompson if (!ret) { 70293c2df58SDoug Thompson if ((dram_addr >= hole_base) && 70393c2df58SDoug Thompson (dram_addr < (hole_base + hole_size))) { 70493c2df58SDoug Thompson sys_addr = dram_addr + hole_offset; 70593c2df58SDoug Thompson 70693c2df58SDoug Thompson debugf1("using DHAR to translate DramAddr 0x%lx to " 70793c2df58SDoug Thompson "SysAddr 0x%lx\n", (unsigned long)dram_addr, 70893c2df58SDoug Thompson (unsigned long)sys_addr); 70993c2df58SDoug Thompson 71093c2df58SDoug Thompson return sys_addr; 71193c2df58SDoug Thompson } 71293c2df58SDoug Thompson } 71393c2df58SDoug Thompson 71493c2df58SDoug Thompson amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); 71593c2df58SDoug Thompson sys_addr = dram_addr + base; 71693c2df58SDoug Thompson 71793c2df58SDoug Thompson /* 71893c2df58SDoug Thompson * The sys_addr we have computed up to this point is a 40-bit value 71993c2df58SDoug Thompson * because the k8 deals with 40-bit values. However, the value we are 72093c2df58SDoug Thompson * supposed to return is a full 64-bit physical address. The AMD 72193c2df58SDoug Thompson * x86-64 architecture specifies that the most significant implemented 72293c2df58SDoug Thompson * address bit through bit 63 of a physical address must be either all 72393c2df58SDoug Thompson * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a 72493c2df58SDoug Thompson * 64-bit value below. See section 3.4.2 of AMD publication 24592: 72593c2df58SDoug Thompson * AMD x86-64 Architecture Programmer's Manual Volume 1 Application 72693c2df58SDoug Thompson * Programming. 72793c2df58SDoug Thompson */ 72893c2df58SDoug Thompson sys_addr |= ~((sys_addr & (1ull << 39)) - 1); 72993c2df58SDoug Thompson 73093c2df58SDoug Thompson debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", 73193c2df58SDoug Thompson pvt->mc_node_id, (unsigned long)dram_addr, 73293c2df58SDoug Thompson (unsigned long)sys_addr); 73393c2df58SDoug Thompson 73493c2df58SDoug Thompson return sys_addr; 73593c2df58SDoug Thompson } 73693c2df58SDoug Thompson 73793c2df58SDoug Thompson /* 73893c2df58SDoug Thompson * @input_addr is an InputAddr associated with the node given by mci. Translate 73993c2df58SDoug Thompson * @input_addr to a SysAddr. 74093c2df58SDoug Thompson */ 74193c2df58SDoug Thompson static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, 74293c2df58SDoug Thompson u64 input_addr) 74393c2df58SDoug Thompson { 74493c2df58SDoug Thompson return dram_addr_to_sys_addr(mci, 74593c2df58SDoug Thompson input_addr_to_dram_addr(mci, input_addr)); 74693c2df58SDoug Thompson } 74793c2df58SDoug Thompson 74893c2df58SDoug Thompson /* 74993c2df58SDoug Thompson * Find the minimum and maximum InputAddr values that map to the given @csrow. 75093c2df58SDoug Thompson * Pass back these values in *input_addr_min and *input_addr_max. 75193c2df58SDoug Thompson */ 75293c2df58SDoug Thompson static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, 75393c2df58SDoug Thompson u64 *input_addr_min, u64 *input_addr_max) 75493c2df58SDoug Thompson { 75593c2df58SDoug Thompson struct amd64_pvt *pvt; 75693c2df58SDoug Thompson u64 base, mask; 75793c2df58SDoug Thompson 75893c2df58SDoug Thompson pvt = mci->pvt_info; 7599d858bb1SBorislav Petkov BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); 76093c2df58SDoug Thompson 76193c2df58SDoug Thompson base = base_from_dct_base(pvt, csrow); 76293c2df58SDoug Thompson mask = mask_from_dct_mask(pvt, csrow); 76393c2df58SDoug Thompson 76493c2df58SDoug Thompson *input_addr_min = base & ~mask; 76593c2df58SDoug Thompson *input_addr_max = base | mask | pvt->dcs_mask_notused; 76693c2df58SDoug Thompson } 76793c2df58SDoug Thompson 76893c2df58SDoug Thompson /* Map the Error address to a PAGE and PAGE OFFSET. */ 76993c2df58SDoug Thompson static inline void error_address_to_page_and_offset(u64 error_address, 77093c2df58SDoug Thompson u32 *page, u32 *offset) 77193c2df58SDoug Thompson { 77293c2df58SDoug Thompson *page = (u32) (error_address >> PAGE_SHIFT); 77393c2df58SDoug Thompson *offset = ((u32) error_address) & ~PAGE_MASK; 77493c2df58SDoug Thompson } 77593c2df58SDoug Thompson 77693c2df58SDoug Thompson /* 77793c2df58SDoug Thompson * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address 77893c2df58SDoug Thompson * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers 77993c2df58SDoug Thompson * of a node that detected an ECC memory error. mci represents the node that 78093c2df58SDoug Thompson * the error address maps to (possibly different from the node that detected 78193c2df58SDoug Thompson * the error). Return the number of the csrow that sys_addr maps to, or -1 on 78293c2df58SDoug Thompson * error. 78393c2df58SDoug Thompson */ 78493c2df58SDoug Thompson static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) 78593c2df58SDoug Thompson { 78693c2df58SDoug Thompson int csrow; 78793c2df58SDoug Thompson 78893c2df58SDoug Thompson csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); 78993c2df58SDoug Thompson 79093c2df58SDoug Thompson if (csrow == -1) 79193c2df58SDoug Thompson amd64_mc_printk(mci, KERN_ERR, 79293c2df58SDoug Thompson "Failed to translate InputAddr to csrow for " 79393c2df58SDoug Thompson "address 0x%lx\n", (unsigned long)sys_addr); 79493c2df58SDoug Thompson return csrow; 79593c2df58SDoug Thompson } 796e2ce7255SDoug Thompson 797bfc04aecSBorislav Petkov static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); 7982da11654SDoug Thompson 7992da11654SDoug Thompson static void amd64_cpu_display_info(struct amd64_pvt *pvt) 8002da11654SDoug Thompson { 8012da11654SDoug Thompson if (boot_cpu_data.x86 == 0x11) 8022da11654SDoug Thompson edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n"); 8032da11654SDoug Thompson else if (boot_cpu_data.x86 == 0x10) 8042da11654SDoug Thompson edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); 8052da11654SDoug Thompson else if (boot_cpu_data.x86 == 0xf) 8062da11654SDoug Thompson edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", 8071433eb99SBorislav Petkov (pvt->ext_model >= K8_REV_F) ? 8082da11654SDoug Thompson "Rev F or later" : "Rev E or earlier"); 8092da11654SDoug Thompson else 8102da11654SDoug Thompson /* we'll hardly ever ever get here */ 8112da11654SDoug Thompson edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n"); 8122da11654SDoug Thompson } 8132da11654SDoug Thompson 8142da11654SDoug Thompson /* 8152da11654SDoug Thompson * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 8162da11654SDoug Thompson * are ECC capable. 8172da11654SDoug Thompson */ 8182da11654SDoug Thompson static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) 8192da11654SDoug Thompson { 8202da11654SDoug Thompson int bit; 821584fcff4SBorislav Petkov enum dev_type edac_cap = EDAC_FLAG_NONE; 8222da11654SDoug Thompson 8231433eb99SBorislav Petkov bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) 8242da11654SDoug Thompson ? 19 8252da11654SDoug Thompson : 17; 8262da11654SDoug Thompson 827584fcff4SBorislav Petkov if (pvt->dclr0 & BIT(bit)) 8282da11654SDoug Thompson edac_cap = EDAC_FLAG_SECDED; 8292da11654SDoug Thompson 8302da11654SDoug Thompson return edac_cap; 8312da11654SDoug Thompson } 8322da11654SDoug Thompson 8332da11654SDoug Thompson 8348566c4dfSBorislav Petkov static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); 8352da11654SDoug Thompson 83668798e17SBorislav Petkov static void amd64_dump_dramcfg_low(u32 dclr, int chan) 83768798e17SBorislav Petkov { 83868798e17SBorislav Petkov debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 83968798e17SBorislav Petkov 84068798e17SBorislav Petkov debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", 84168798e17SBorislav Petkov (dclr & BIT(16)) ? "un" : "", 84268798e17SBorislav Petkov (dclr & BIT(19)) ? "yes" : "no"); 84368798e17SBorislav Petkov 84468798e17SBorislav Petkov debugf1(" PAR/ERR parity: %s\n", 84568798e17SBorislav Petkov (dclr & BIT(8)) ? "enabled" : "disabled"); 84668798e17SBorislav Petkov 84768798e17SBorislav Petkov debugf1(" DCT 128bit mode width: %s\n", 84868798e17SBorislav Petkov (dclr & BIT(11)) ? "128b" : "64b"); 84968798e17SBorislav Petkov 85068798e17SBorislav Petkov debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", 85168798e17SBorislav Petkov (dclr & BIT(12)) ? "yes" : "no", 85268798e17SBorislav Petkov (dclr & BIT(13)) ? "yes" : "no", 85368798e17SBorislav Petkov (dclr & BIT(14)) ? "yes" : "no", 85468798e17SBorislav Petkov (dclr & BIT(15)) ? "yes" : "no"); 85568798e17SBorislav Petkov } 85668798e17SBorislav Petkov 8572da11654SDoug Thompson /* Display and decode various NB registers for debug purposes. */ 8582da11654SDoug Thompson static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 8592da11654SDoug Thompson { 8602da11654SDoug Thompson int ganged; 8612da11654SDoug Thompson 86268798e17SBorislav Petkov debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 8632da11654SDoug Thompson 86468798e17SBorislav Petkov debugf1(" NB two channel DRAM capable: %s\n", 86568798e17SBorislav Petkov (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); 86668798e17SBorislav Petkov 86768798e17SBorislav Petkov debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", 86868798e17SBorislav Petkov (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", 86968798e17SBorislav Petkov (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); 87068798e17SBorislav Petkov 87168798e17SBorislav Petkov amd64_dump_dramcfg_low(pvt->dclr0, 0); 8722da11654SDoug Thompson 8738de1d91eSBorislav Petkov debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 8742da11654SDoug Thompson 8758de1d91eSBorislav Petkov debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " 8768de1d91eSBorislav Petkov "offset: 0x%08x\n", 8778de1d91eSBorislav Petkov pvt->dhar, 8788de1d91eSBorislav Petkov dhar_base(pvt->dhar), 8798de1d91eSBorislav Petkov (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) 8808de1d91eSBorislav Petkov : f10_dhar_offset(pvt->dhar)); 8812da11654SDoug Thompson 8828de1d91eSBorislav Petkov debugf1(" DramHoleValid: %s\n", 8838de1d91eSBorislav Petkov (pvt->dhar & DHAR_VALID) ? "yes" : "no"); 8842da11654SDoug Thompson 8852da11654SDoug Thompson /* everything below this point is Fam10h and above */ 8868566c4dfSBorislav Petkov if (boot_cpu_data.x86 == 0xf) { 8878566c4dfSBorislav Petkov amd64_debug_display_dimm_sizes(0, pvt); 8882da11654SDoug Thompson return; 8898566c4dfSBorislav Petkov } 8902da11654SDoug Thompson 8918de1d91eSBorislav Petkov /* Only if NOT ganged does dclr1 have valid info */ 89268798e17SBorislav Petkov if (!dct_ganging_enabled(pvt)) 89368798e17SBorislav Petkov amd64_dump_dramcfg_low(pvt->dclr1, 1); 8942da11654SDoug Thompson 8952da11654SDoug Thompson /* 8962da11654SDoug Thompson * Determine if ganged and then dump memory sizes for first controller, 8972da11654SDoug Thompson * and if NOT ganged dump info for 2nd controller. 8982da11654SDoug Thompson */ 8992da11654SDoug Thompson ganged = dct_ganging_enabled(pvt); 9002da11654SDoug Thompson 9018566c4dfSBorislav Petkov amd64_debug_display_dimm_sizes(0, pvt); 9022da11654SDoug Thompson 9032da11654SDoug Thompson if (!ganged) 9048566c4dfSBorislav Petkov amd64_debug_display_dimm_sizes(1, pvt); 9052da11654SDoug Thompson } 9062da11654SDoug Thompson 9072da11654SDoug Thompson /* Read in both of DBAM registers */ 9082da11654SDoug Thompson static void amd64_read_dbam_reg(struct amd64_pvt *pvt) 9092da11654SDoug Thompson { 9106ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); 9112da11654SDoug Thompson 9126ba5dcdcSBorislav Petkov if (boot_cpu_data.x86 >= 0x10) 9136ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); 9142da11654SDoug Thompson } 9152da11654SDoug Thompson 91694be4bffSDoug Thompson /* 91794be4bffSDoug Thompson * NOTE: CPU Revision Dependent code: Rev E and Rev F 91894be4bffSDoug Thompson * 91994be4bffSDoug Thompson * Set the DCSB and DCSM mask values depending on the CPU revision value. Also 92094be4bffSDoug Thompson * set the shift factor for the DCSB and DCSM values. 92194be4bffSDoug Thompson * 92294be4bffSDoug Thompson * ->dcs_mask_notused, RevE: 92394be4bffSDoug Thompson * 92494be4bffSDoug Thompson * To find the max InputAddr for the csrow, start with the base address and set 92594be4bffSDoug Thompson * all bits that are "don't care" bits in the test at the start of section 92694be4bffSDoug Thompson * 3.5.4 (p. 84). 92794be4bffSDoug Thompson * 92894be4bffSDoug Thompson * The "don't care" bits are all set bits in the mask and all bits in the gaps 92994be4bffSDoug Thompson * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS 93094be4bffSDoug Thompson * represents bits [24:20] and [12:0], which are all bits in the above-mentioned 93194be4bffSDoug Thompson * gaps. 93294be4bffSDoug Thompson * 93394be4bffSDoug Thompson * ->dcs_mask_notused, RevF and later: 93494be4bffSDoug Thompson * 93594be4bffSDoug Thompson * To find the max InputAddr for the csrow, start with the base address and set 93694be4bffSDoug Thompson * all bits that are "don't care" bits in the test at the start of NPT section 93794be4bffSDoug Thompson * 4.5.4 (p. 87). 93894be4bffSDoug Thompson * 93994be4bffSDoug Thompson * The "don't care" bits are all set bits in the mask and all bits in the gaps 94094be4bffSDoug Thompson * between bit ranges [36:27] and [21:13]. 94194be4bffSDoug Thompson * 94294be4bffSDoug Thompson * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], 94394be4bffSDoug Thompson * which are all bits in the above-mentioned gaps. 94494be4bffSDoug Thompson */ 94594be4bffSDoug Thompson static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 94694be4bffSDoug Thompson { 9479d858bb1SBorislav Petkov 9481433eb99SBorislav Petkov if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { 9499d858bb1SBorislav Petkov pvt->dcsb_base = REV_E_DCSB_BASE_BITS; 9509d858bb1SBorislav Petkov pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; 9519d858bb1SBorislav Petkov pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; 9529d858bb1SBorislav Petkov pvt->dcs_shift = REV_E_DCS_SHIFT; 9539d858bb1SBorislav Petkov pvt->cs_count = 8; 9549d858bb1SBorislav Petkov pvt->num_dcsm = 8; 9559d858bb1SBorislav Petkov } else { 95694be4bffSDoug Thompson pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 95794be4bffSDoug Thompson pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 95894be4bffSDoug Thompson pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; 95994be4bffSDoug Thompson pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; 96094be4bffSDoug Thompson 9619d858bb1SBorislav Petkov if (boot_cpu_data.x86 == 0x11) { 9629d858bb1SBorislav Petkov pvt->cs_count = 4; 9639d858bb1SBorislav Petkov pvt->num_dcsm = 2; 96494be4bffSDoug Thompson } else { 9659d858bb1SBorislav Petkov pvt->cs_count = 8; 9669d858bb1SBorislav Petkov pvt->num_dcsm = 4; 9679d858bb1SBorislav Petkov } 96894be4bffSDoug Thompson } 96994be4bffSDoug Thompson } 97094be4bffSDoug Thompson 97194be4bffSDoug Thompson /* 97294be4bffSDoug Thompson * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers 97394be4bffSDoug Thompson */ 97494be4bffSDoug Thompson static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) 97594be4bffSDoug Thompson { 9766ba5dcdcSBorislav Petkov int cs, reg; 97794be4bffSDoug Thompson 97894be4bffSDoug Thompson amd64_set_dct_base_and_mask(pvt); 97994be4bffSDoug Thompson 9809d858bb1SBorislav Petkov for (cs = 0; cs < pvt->cs_count; cs++) { 98194be4bffSDoug Thompson reg = K8_DCSB0 + (cs * 4); 9826ba5dcdcSBorislav Petkov if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) 98394be4bffSDoug Thompson debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 98494be4bffSDoug Thompson cs, pvt->dcsb0[cs], reg); 98594be4bffSDoug Thompson 98694be4bffSDoug Thompson /* If DCT are NOT ganged, then read in DCT1's base */ 98794be4bffSDoug Thompson if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 98894be4bffSDoug Thompson reg = F10_DCSB1 + (cs * 4); 9896ba5dcdcSBorislav Petkov if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, 9906ba5dcdcSBorislav Petkov &pvt->dcsb1[cs])) 99194be4bffSDoug Thompson debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 99294be4bffSDoug Thompson cs, pvt->dcsb1[cs], reg); 99394be4bffSDoug Thompson } else { 99494be4bffSDoug Thompson pvt->dcsb1[cs] = 0; 99594be4bffSDoug Thompson } 99694be4bffSDoug Thompson } 99794be4bffSDoug Thompson 99894be4bffSDoug Thompson for (cs = 0; cs < pvt->num_dcsm; cs++) { 9994afcd2dcSWan Wei reg = K8_DCSM0 + (cs * 4); 10006ba5dcdcSBorislav Petkov if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) 100194be4bffSDoug Thompson debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 100294be4bffSDoug Thompson cs, pvt->dcsm0[cs], reg); 100394be4bffSDoug Thompson 100494be4bffSDoug Thompson /* If DCT are NOT ganged, then read in DCT1's mask */ 100594be4bffSDoug Thompson if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 100694be4bffSDoug Thompson reg = F10_DCSM1 + (cs * 4); 10076ba5dcdcSBorislav Petkov if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, 10086ba5dcdcSBorislav Petkov &pvt->dcsm1[cs])) 100994be4bffSDoug Thompson debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 101094be4bffSDoug Thompson cs, pvt->dcsm1[cs], reg); 10116ba5dcdcSBorislav Petkov } else { 101294be4bffSDoug Thompson pvt->dcsm1[cs] = 0; 101394be4bffSDoug Thompson } 101494be4bffSDoug Thompson } 10156ba5dcdcSBorislav Petkov } 101694be4bffSDoug Thompson 101794be4bffSDoug Thompson static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) 101894be4bffSDoug Thompson { 101994be4bffSDoug Thompson enum mem_type type; 102094be4bffSDoug Thompson 10211433eb99SBorislav Petkov if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { 10226b4c0bdeSBorislav Petkov if (pvt->dchr0 & DDR3_MODE) 10236b4c0bdeSBorislav Petkov type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; 10246b4c0bdeSBorislav Petkov else 102594be4bffSDoug Thompson type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 102694be4bffSDoug Thompson } else { 102794be4bffSDoug Thompson type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; 102894be4bffSDoug Thompson } 102994be4bffSDoug Thompson 1030239642feSBorislav Petkov debugf1(" Memory type is: %s\n", edac_mem_types[type]); 103194be4bffSDoug Thompson 103294be4bffSDoug Thompson return type; 103394be4bffSDoug Thompson } 103494be4bffSDoug Thompson 1035ddff876dSDoug Thompson /* 1036ddff876dSDoug Thompson * Read the DRAM Configuration Low register. It differs between CG, D & E revs 1037ddff876dSDoug Thompson * and the later RevF memory controllers (DDR vs DDR2) 1038ddff876dSDoug Thompson * 1039ddff876dSDoug Thompson * Return: 1040ddff876dSDoug Thompson * number of memory channels in operation 1041ddff876dSDoug Thompson * Pass back: 1042ddff876dSDoug Thompson * contents of the DCL0_LOW register 1043ddff876dSDoug Thompson */ 1044ddff876dSDoug Thompson static int k8_early_channel_count(struct amd64_pvt *pvt) 1045ddff876dSDoug Thompson { 1046ddff876dSDoug Thompson int flag, err = 0; 1047ddff876dSDoug Thompson 10486ba5dcdcSBorislav Petkov err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 1049ddff876dSDoug Thompson if (err) 1050ddff876dSDoug Thompson return err; 1051ddff876dSDoug Thompson 10521433eb99SBorislav Petkov if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) { 1053ddff876dSDoug Thompson /* RevF (NPT) and later */ 1054ddff876dSDoug Thompson flag = pvt->dclr0 & F10_WIDTH_128; 1055ddff876dSDoug Thompson } else { 1056ddff876dSDoug Thompson /* RevE and earlier */ 1057ddff876dSDoug Thompson flag = pvt->dclr0 & REVE_WIDTH_128; 1058ddff876dSDoug Thompson } 1059ddff876dSDoug Thompson 1060ddff876dSDoug Thompson /* not used */ 1061ddff876dSDoug Thompson pvt->dclr1 = 0; 1062ddff876dSDoug Thompson 1063ddff876dSDoug Thompson return (flag) ? 2 : 1; 1064ddff876dSDoug Thompson } 1065ddff876dSDoug Thompson 1066ddff876dSDoug Thompson /* extract the ERROR ADDRESS for the K8 CPUs */ 1067ddff876dSDoug Thompson static u64 k8_get_error_address(struct mem_ctl_info *mci, 1068ef44cc4cSBorislav Petkov struct err_regs *info) 1069ddff876dSDoug Thompson { 1070ddff876dSDoug Thompson return (((u64) (info->nbeah & 0xff)) << 32) + 1071ddff876dSDoug Thompson (info->nbeal & ~0x03); 1072ddff876dSDoug Thompson } 1073ddff876dSDoug Thompson 1074ddff876dSDoug Thompson /* 1075ddff876dSDoug Thompson * Read the Base and Limit registers for K8 based Memory controllers; extract 1076ddff876dSDoug Thompson * fields from the 'raw' reg into separate data fields 1077ddff876dSDoug Thompson * 1078ddff876dSDoug Thompson * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN 1079ddff876dSDoug Thompson */ 1080ddff876dSDoug Thompson static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) 1081ddff876dSDoug Thompson { 1082ddff876dSDoug Thompson u32 low; 1083ddff876dSDoug Thompson u32 off = dram << 3; /* 8 bytes between DRAM entries */ 1084ddff876dSDoug Thompson 10856ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); 1086ddff876dSDoug Thompson 1087ddff876dSDoug Thompson /* Extract parts into separate data entries */ 10884997811eSBorislav Petkov pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1089ddff876dSDoug Thompson pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1090ddff876dSDoug Thompson pvt->dram_rw_en[dram] = (low & 0x3); 1091ddff876dSDoug Thompson 10926ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); 1093ddff876dSDoug Thompson 1094ddff876dSDoug Thompson /* 1095ddff876dSDoug Thompson * Extract parts into separate data entries. Limit is the HIGHEST memory 1096ddff876dSDoug Thompson * location of the region, so lower 24 bits need to be all ones 1097ddff876dSDoug Thompson */ 10984997811eSBorislav Petkov pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; 1099ddff876dSDoug Thompson pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; 1100ddff876dSDoug Thompson pvt->dram_DstNode[dram] = (low & 0x7); 1101ddff876dSDoug Thompson } 1102ddff876dSDoug Thompson 1103ddff876dSDoug Thompson static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 1104ef44cc4cSBorislav Petkov struct err_regs *info, 110544e9e2eeSBorislav Petkov u64 sys_addr) 1106ddff876dSDoug Thompson { 1107ddff876dSDoug Thompson struct mem_ctl_info *src_mci; 1108ddff876dSDoug Thompson unsigned short syndrome; 1109ddff876dSDoug Thompson int channel, csrow; 1110ddff876dSDoug Thompson u32 page, offset; 1111ddff876dSDoug Thompson 1112ddff876dSDoug Thompson /* Extract the syndrome parts and form a 16-bit syndrome */ 1113b70ef010SBorislav Petkov syndrome = HIGH_SYNDROME(info->nbsl) << 8; 1114b70ef010SBorislav Petkov syndrome |= LOW_SYNDROME(info->nbsh); 1115ddff876dSDoug Thompson 1116ddff876dSDoug Thompson /* CHIPKILL enabled */ 1117ddff876dSDoug Thompson if (info->nbcfg & K8_NBCFG_CHIPKILL) { 1118bfc04aecSBorislav Petkov channel = get_channel_from_ecc_syndrome(mci, syndrome); 1119ddff876dSDoug Thompson if (channel < 0) { 1120ddff876dSDoug Thompson /* 1121ddff876dSDoug Thompson * Syndrome didn't map, so we don't know which of the 1122ddff876dSDoug Thompson * 2 DIMMs is in error. So we need to ID 'both' of them 1123ddff876dSDoug Thompson * as suspect. 1124ddff876dSDoug Thompson */ 1125ddff876dSDoug Thompson amd64_mc_printk(mci, KERN_WARNING, 1126ddff876dSDoug Thompson "unknown syndrome 0x%x - possible error " 1127ddff876dSDoug Thompson "reporting race\n", syndrome); 1128ddff876dSDoug Thompson edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1129ddff876dSDoug Thompson return; 1130ddff876dSDoug Thompson } 1131ddff876dSDoug Thompson } else { 1132ddff876dSDoug Thompson /* 1133ddff876dSDoug Thompson * non-chipkill ecc mode 1134ddff876dSDoug Thompson * 1135ddff876dSDoug Thompson * The k8 documentation is unclear about how to determine the 1136ddff876dSDoug Thompson * channel number when using non-chipkill memory. This method 1137ddff876dSDoug Thompson * was obtained from email communication with someone at AMD. 1138ddff876dSDoug Thompson * (Wish the email was placed in this comment - norsk) 1139ddff876dSDoug Thompson */ 114044e9e2eeSBorislav Petkov channel = ((sys_addr & BIT(3)) != 0); 1141ddff876dSDoug Thompson } 1142ddff876dSDoug Thompson 1143ddff876dSDoug Thompson /* 1144ddff876dSDoug Thompson * Find out which node the error address belongs to. This may be 1145ddff876dSDoug Thompson * different from the node that detected the error. 1146ddff876dSDoug Thompson */ 114744e9e2eeSBorislav Petkov src_mci = find_mc_by_sys_addr(mci, sys_addr); 11482cff18c2SKeith Mannthey if (!src_mci) { 1149ddff876dSDoug Thompson amd64_mc_printk(mci, KERN_ERR, 1150ddff876dSDoug Thompson "failed to map error address 0x%lx to a node\n", 115144e9e2eeSBorislav Petkov (unsigned long)sys_addr); 1152ddff876dSDoug Thompson edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1153ddff876dSDoug Thompson return; 1154ddff876dSDoug Thompson } 1155ddff876dSDoug Thompson 115644e9e2eeSBorislav Petkov /* Now map the sys_addr to a CSROW */ 115744e9e2eeSBorislav Petkov csrow = sys_addr_to_csrow(src_mci, sys_addr); 1158ddff876dSDoug Thompson if (csrow < 0) { 1159ddff876dSDoug Thompson edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); 1160ddff876dSDoug Thompson } else { 116144e9e2eeSBorislav Petkov error_address_to_page_and_offset(sys_addr, &page, &offset); 1162ddff876dSDoug Thompson 1163ddff876dSDoug Thompson edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, 1164ddff876dSDoug Thompson channel, EDAC_MOD_STR); 1165ddff876dSDoug Thompson } 1166ddff876dSDoug Thompson } 1167ddff876dSDoug Thompson 11681433eb99SBorislav Petkov static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 1169ddff876dSDoug Thompson { 11701433eb99SBorislav Petkov int *dbam_map; 1171ddff876dSDoug Thompson 11721433eb99SBorislav Petkov if (pvt->ext_model >= K8_REV_F) 11731433eb99SBorislav Petkov dbam_map = ddr2_dbam; 11741433eb99SBorislav Petkov else if (pvt->ext_model >= K8_REV_D) 11751433eb99SBorislav Petkov dbam_map = ddr2_dbam_revD; 11761433eb99SBorislav Petkov else 11771433eb99SBorislav Petkov dbam_map = ddr2_dbam_revCG; 1178ddff876dSDoug Thompson 11791433eb99SBorislav Petkov return dbam_map[cs_mode]; 1180ddff876dSDoug Thompson } 1181ddff876dSDoug Thompson 11821afd3c98SDoug Thompson /* 11831afd3c98SDoug Thompson * Get the number of DCT channels in use. 11841afd3c98SDoug Thompson * 11851afd3c98SDoug Thompson * Return: 11861afd3c98SDoug Thompson * number of Memory Channels in operation 11871afd3c98SDoug Thompson * Pass back: 11881afd3c98SDoug Thompson * contents of the DCL0_LOW register 11891afd3c98SDoug Thompson */ 11901afd3c98SDoug Thompson static int f10_early_channel_count(struct amd64_pvt *pvt) 11911afd3c98SDoug Thompson { 119257a30854SWan Wei int dbams[] = { DBAM0, DBAM1 }; 11936ba5dcdcSBorislav Petkov int i, j, channels = 0; 11941afd3c98SDoug Thompson u32 dbam; 1195ddff876dSDoug Thompson 11961afd3c98SDoug Thompson /* If we are in 128 bit mode, then we are using 2 channels */ 11971afd3c98SDoug Thompson if (pvt->dclr0 & F10_WIDTH_128) { 11981afd3c98SDoug Thompson channels = 2; 11991afd3c98SDoug Thompson return channels; 12001afd3c98SDoug Thompson } 12011afd3c98SDoug Thompson 12021afd3c98SDoug Thompson /* 1203d16149e8SBorislav Petkov * Need to check if in unganged mode: In such, there are 2 channels, 1204d16149e8SBorislav Petkov * but they are not in 128 bit mode and thus the above 'dclr0' status 1205d16149e8SBorislav Petkov * bit will be OFF. 12061afd3c98SDoug Thompson * 12071afd3c98SDoug Thompson * Need to check DCT0[0] and DCT1[0] to see if only one of them has 12081afd3c98SDoug Thompson * their CSEnable bit on. If so, then SINGLE DIMM case. 12091afd3c98SDoug Thompson */ 1210d16149e8SBorislav Petkov debugf0("Data width is not 128 bits - need more decoding\n"); 12111afd3c98SDoug Thompson 12121afd3c98SDoug Thompson /* 12131afd3c98SDoug Thompson * Check DRAM Bank Address Mapping values for each DIMM to see if there 12141afd3c98SDoug Thompson * is more than just one DIMM present in unganged mode. Need to check 12151afd3c98SDoug Thompson * both controllers since DIMMs can be placed in either one. 12161afd3c98SDoug Thompson */ 121757a30854SWan Wei for (i = 0; i < ARRAY_SIZE(dbams); i++) { 12186ba5dcdcSBorislav Petkov if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) 12191afd3c98SDoug Thompson goto err_reg; 12201afd3c98SDoug Thompson 122157a30854SWan Wei for (j = 0; j < 4; j++) { 122257a30854SWan Wei if (DBAM_DIMM(j, dbam) > 0) { 12231afd3c98SDoug Thompson channels++; 122457a30854SWan Wei break; 12251afd3c98SDoug Thompson } 122657a30854SWan Wei } 122757a30854SWan Wei } 12281afd3c98SDoug Thompson 1229d16149e8SBorislav Petkov if (channels > 2) 1230d16149e8SBorislav Petkov channels = 2; 1231d16149e8SBorislav Petkov 123237da0450SBorislav Petkov debugf0("MCT channel count: %d\n", channels); 12331afd3c98SDoug Thompson 12341afd3c98SDoug Thompson return channels; 12351afd3c98SDoug Thompson 12361afd3c98SDoug Thompson err_reg: 12371afd3c98SDoug Thompson return -1; 12381afd3c98SDoug Thompson 12391afd3c98SDoug Thompson } 12401afd3c98SDoug Thompson 12411433eb99SBorislav Petkov static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) 12421afd3c98SDoug Thompson { 12431433eb99SBorislav Petkov int *dbam_map; 12441433eb99SBorislav Petkov 12451433eb99SBorislav Petkov if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) 12461433eb99SBorislav Petkov dbam_map = ddr3_dbam; 12471433eb99SBorislav Petkov else 12481433eb99SBorislav Petkov dbam_map = ddr2_dbam; 12491433eb99SBorislav Petkov 12501433eb99SBorislav Petkov return dbam_map[cs_mode]; 12511afd3c98SDoug Thompson } 12521afd3c98SDoug Thompson 12531afd3c98SDoug Thompson /* Enable extended configuration access via 0xCF8 feature */ 12541afd3c98SDoug Thompson static void amd64_setup(struct amd64_pvt *pvt) 12551afd3c98SDoug Thompson { 12561afd3c98SDoug Thompson u32 reg; 12571afd3c98SDoug Thompson 12586ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); 12591afd3c98SDoug Thompson 12601afd3c98SDoug Thompson pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); 12611afd3c98SDoug Thompson reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; 12621afd3c98SDoug Thompson pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); 12631afd3c98SDoug Thompson } 12641afd3c98SDoug Thompson 12651afd3c98SDoug Thompson /* Restore the extended configuration access via 0xCF8 feature */ 12661afd3c98SDoug Thompson static void amd64_teardown(struct amd64_pvt *pvt) 12671afd3c98SDoug Thompson { 12681afd3c98SDoug Thompson u32 reg; 12691afd3c98SDoug Thompson 12706ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); 12711afd3c98SDoug Thompson 12721afd3c98SDoug Thompson reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; 12731afd3c98SDoug Thompson if (pvt->flags.cf8_extcfg) 12741afd3c98SDoug Thompson reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; 12751afd3c98SDoug Thompson pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); 12761afd3c98SDoug Thompson } 12771afd3c98SDoug Thompson 12781afd3c98SDoug Thompson static u64 f10_get_error_address(struct mem_ctl_info *mci, 1279ef44cc4cSBorislav Petkov struct err_regs *info) 12801afd3c98SDoug Thompson { 12811afd3c98SDoug Thompson return (((u64) (info->nbeah & 0xffff)) << 32) + 12821afd3c98SDoug Thompson (info->nbeal & ~0x01); 12831afd3c98SDoug Thompson } 12841afd3c98SDoug Thompson 12851afd3c98SDoug Thompson /* 12861afd3c98SDoug Thompson * Read the Base and Limit registers for F10 based Memory controllers. Extract 12871afd3c98SDoug Thompson * fields from the 'raw' reg into separate data fields. 12881afd3c98SDoug Thompson * 12891afd3c98SDoug Thompson * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. 12901afd3c98SDoug Thompson */ 12911afd3c98SDoug Thompson static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) 12921afd3c98SDoug Thompson { 12931afd3c98SDoug Thompson u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; 12941afd3c98SDoug Thompson 12951afd3c98SDoug Thompson low_offset = K8_DRAM_BASE_LOW + (dram << 3); 12961afd3c98SDoug Thompson high_offset = F10_DRAM_BASE_HIGH + (dram << 3); 12971afd3c98SDoug Thompson 12981afd3c98SDoug Thompson /* read the 'raw' DRAM BASE Address register */ 12996ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); 13001afd3c98SDoug Thompson 13011afd3c98SDoug Thompson /* Read from the ECS data register */ 13026ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base); 13031afd3c98SDoug Thompson 13041afd3c98SDoug Thompson /* Extract parts into separate data entries */ 13051afd3c98SDoug Thompson pvt->dram_rw_en[dram] = (low_base & 0x3); 13061afd3c98SDoug Thompson 13071afd3c98SDoug Thompson if (pvt->dram_rw_en[dram] == 0) 13081afd3c98SDoug Thompson return; 13091afd3c98SDoug Thompson 13101afd3c98SDoug Thompson pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; 13111afd3c98SDoug Thompson 131266216a7aSBorislav Petkov pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | 13134997811eSBorislav Petkov (((u64)low_base & 0xFFFF0000) << 8); 13141afd3c98SDoug Thompson 13151afd3c98SDoug Thompson low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); 13161afd3c98SDoug Thompson high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 13171afd3c98SDoug Thompson 13181afd3c98SDoug Thompson /* read the 'raw' LIMIT registers */ 13196ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); 13201afd3c98SDoug Thompson 13211afd3c98SDoug Thompson /* Read from the ECS data register for the HIGH portion */ 13226ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit); 13231afd3c98SDoug Thompson 13241afd3c98SDoug Thompson pvt->dram_DstNode[dram] = (low_limit & 0x7); 13251afd3c98SDoug Thompson pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; 13261afd3c98SDoug Thompson 13271afd3c98SDoug Thompson /* 13281afd3c98SDoug Thompson * Extract address values and form a LIMIT address. Limit is the HIGHEST 13291afd3c98SDoug Thompson * memory location of the region, so low 24 bits need to be all ones. 13301afd3c98SDoug Thompson */ 133166216a7aSBorislav Petkov pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | 13324997811eSBorislav Petkov (((u64) low_limit & 0xFFFF0000) << 8) | 133366216a7aSBorislav Petkov 0x00FFFFFF; 13341afd3c98SDoug Thompson } 13356163b5d4SDoug Thompson 13366163b5d4SDoug Thompson static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 13376163b5d4SDoug Thompson { 13386163b5d4SDoug Thompson 13396ba5dcdcSBorislav Petkov if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, 13406ba5dcdcSBorislav Petkov &pvt->dram_ctl_select_low)) { 134172381bd5SBorislav Petkov debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " 134272381bd5SBorislav Petkov "High range addresses at: 0x%x\n", 134372381bd5SBorislav Petkov pvt->dram_ctl_select_low, 134472381bd5SBorislav Petkov dct_sel_baseaddr(pvt)); 13456163b5d4SDoug Thompson 134672381bd5SBorislav Petkov debugf0(" DCT mode: %s, All DCTs on: %s\n", 134772381bd5SBorislav Petkov (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), 134872381bd5SBorislav Petkov (dct_dram_enabled(pvt) ? "yes" : "no")); 13496163b5d4SDoug Thompson 135072381bd5SBorislav Petkov if (!dct_ganging_enabled(pvt)) 135172381bd5SBorislav Petkov debugf0(" Address range split per DCT: %s\n", 135272381bd5SBorislav Petkov (dct_high_range_enabled(pvt) ? "yes" : "no")); 135372381bd5SBorislav Petkov 135472381bd5SBorislav Petkov debugf0(" DCT data interleave for ECC: %s, " 135572381bd5SBorislav Petkov "DRAM cleared since last warm reset: %s\n", 135672381bd5SBorislav Petkov (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), 135772381bd5SBorislav Petkov (dct_memory_cleared(pvt) ? "yes" : "no")); 135872381bd5SBorislav Petkov 135972381bd5SBorislav Petkov debugf0(" DCT channel interleave: %s, " 136072381bd5SBorislav Petkov "DCT interleave bits selector: 0x%x\n", 136172381bd5SBorislav Petkov (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), 13626163b5d4SDoug Thompson dct_sel_interleave_addr(pvt)); 13636163b5d4SDoug Thompson } 13646163b5d4SDoug Thompson 13656ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, 13666163b5d4SDoug Thompson &pvt->dram_ctl_select_high); 13676163b5d4SDoug Thompson } 13686163b5d4SDoug Thompson 1369f71d0a05SDoug Thompson /* 1370f71d0a05SDoug Thompson * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory 1371f71d0a05SDoug Thompson * Interleaving Modes. 1372f71d0a05SDoug Thompson */ 13736163b5d4SDoug Thompson static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, 13746163b5d4SDoug Thompson int hi_range_sel, u32 intlv_en) 13756163b5d4SDoug Thompson { 13766163b5d4SDoug Thompson u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; 13776163b5d4SDoug Thompson 13786163b5d4SDoug Thompson if (dct_ganging_enabled(pvt)) 13796163b5d4SDoug Thompson cs = 0; 13806163b5d4SDoug Thompson else if (hi_range_sel) 13816163b5d4SDoug Thompson cs = dct_sel_high; 13826163b5d4SDoug Thompson else if (dct_interleave_enabled(pvt)) { 1383f71d0a05SDoug Thompson /* 1384f71d0a05SDoug Thompson * see F2x110[DctSelIntLvAddr] - channel interleave mode 1385f71d0a05SDoug Thompson */ 13866163b5d4SDoug Thompson if (dct_sel_interleave_addr(pvt) == 0) 13876163b5d4SDoug Thompson cs = sys_addr >> 6 & 1; 13886163b5d4SDoug Thompson else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { 13896163b5d4SDoug Thompson temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; 13906163b5d4SDoug Thompson 13916163b5d4SDoug Thompson if (dct_sel_interleave_addr(pvt) & 1) 13926163b5d4SDoug Thompson cs = (sys_addr >> 9 & 1) ^ temp; 13936163b5d4SDoug Thompson else 13946163b5d4SDoug Thompson cs = (sys_addr >> 6 & 1) ^ temp; 13956163b5d4SDoug Thompson } else if (intlv_en & 4) 13966163b5d4SDoug Thompson cs = sys_addr >> 15 & 1; 13976163b5d4SDoug Thompson else if (intlv_en & 2) 13986163b5d4SDoug Thompson cs = sys_addr >> 14 & 1; 13996163b5d4SDoug Thompson else if (intlv_en & 1) 14006163b5d4SDoug Thompson cs = sys_addr >> 13 & 1; 14016163b5d4SDoug Thompson else 14026163b5d4SDoug Thompson cs = sys_addr >> 12 & 1; 14036163b5d4SDoug Thompson } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) 14046163b5d4SDoug Thompson cs = ~dct_sel_high & 1; 14056163b5d4SDoug Thompson else 14066163b5d4SDoug Thompson cs = 0; 14076163b5d4SDoug Thompson 14086163b5d4SDoug Thompson return cs; 14096163b5d4SDoug Thompson } 14106163b5d4SDoug Thompson 14116163b5d4SDoug Thompson static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) 14126163b5d4SDoug Thompson { 14136163b5d4SDoug Thompson if (intlv_en == 1) 14146163b5d4SDoug Thompson return 1; 14156163b5d4SDoug Thompson else if (intlv_en == 3) 14166163b5d4SDoug Thompson return 2; 14176163b5d4SDoug Thompson else if (intlv_en == 7) 14186163b5d4SDoug Thompson return 3; 14196163b5d4SDoug Thompson 14206163b5d4SDoug Thompson return 0; 14216163b5d4SDoug Thompson } 14226163b5d4SDoug Thompson 1423f71d0a05SDoug Thompson /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ 1424f71d0a05SDoug Thompson static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, 14256163b5d4SDoug Thompson u32 dct_sel_base_addr, 14266163b5d4SDoug Thompson u64 dct_sel_base_off, 1427f71d0a05SDoug Thompson u32 hole_valid, u32 hole_off, 14286163b5d4SDoug Thompson u64 dram_base) 14296163b5d4SDoug Thompson { 14306163b5d4SDoug Thompson u64 chan_off; 14316163b5d4SDoug Thompson 14326163b5d4SDoug Thompson if (hi_range_sel) { 14336163b5d4SDoug Thompson if (!(dct_sel_base_addr & 0xFFFFF800) && 1434f71d0a05SDoug Thompson hole_valid && (sys_addr >= 0x100000000ULL)) 14356163b5d4SDoug Thompson chan_off = hole_off << 16; 14366163b5d4SDoug Thompson else 14376163b5d4SDoug Thompson chan_off = dct_sel_base_off; 14386163b5d4SDoug Thompson } else { 1439f71d0a05SDoug Thompson if (hole_valid && (sys_addr >= 0x100000000ULL)) 14406163b5d4SDoug Thompson chan_off = hole_off << 16; 14416163b5d4SDoug Thompson else 14426163b5d4SDoug Thompson chan_off = dram_base & 0xFFFFF8000000ULL; 14436163b5d4SDoug Thompson } 14446163b5d4SDoug Thompson 14456163b5d4SDoug Thompson return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - 14466163b5d4SDoug Thompson (chan_off & 0x0000FFFFFF800000ULL); 14476163b5d4SDoug Thompson } 14486163b5d4SDoug Thompson 14496163b5d4SDoug Thompson /* Hack for the time being - Can we get this from BIOS?? */ 14506163b5d4SDoug Thompson #define CH0SPARE_RANK 0 14516163b5d4SDoug Thompson #define CH1SPARE_RANK 1 14526163b5d4SDoug Thompson 14536163b5d4SDoug Thompson /* 14546163b5d4SDoug Thompson * checks if the csrow passed in is marked as SPARED, if so returns the new 14556163b5d4SDoug Thompson * spare row 14566163b5d4SDoug Thompson */ 14576163b5d4SDoug Thompson static inline int f10_process_possible_spare(int csrow, 14586163b5d4SDoug Thompson u32 cs, struct amd64_pvt *pvt) 14596163b5d4SDoug Thompson { 14606163b5d4SDoug Thompson u32 swap_done; 14616163b5d4SDoug Thompson u32 bad_dram_cs; 14626163b5d4SDoug Thompson 14636163b5d4SDoug Thompson /* Depending on channel, isolate respective SPARING info */ 14646163b5d4SDoug Thompson if (cs) { 14656163b5d4SDoug Thompson swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); 14666163b5d4SDoug Thompson bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); 14676163b5d4SDoug Thompson if (swap_done && (csrow == bad_dram_cs)) 14686163b5d4SDoug Thompson csrow = CH1SPARE_RANK; 14696163b5d4SDoug Thompson } else { 14706163b5d4SDoug Thompson swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); 14716163b5d4SDoug Thompson bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); 14726163b5d4SDoug Thompson if (swap_done && (csrow == bad_dram_cs)) 14736163b5d4SDoug Thompson csrow = CH0SPARE_RANK; 14746163b5d4SDoug Thompson } 14756163b5d4SDoug Thompson return csrow; 14766163b5d4SDoug Thompson } 14776163b5d4SDoug Thompson 14786163b5d4SDoug Thompson /* 14796163b5d4SDoug Thompson * Iterate over the DRAM DCT "base" and "mask" registers looking for a 14806163b5d4SDoug Thompson * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' 14816163b5d4SDoug Thompson * 14826163b5d4SDoug Thompson * Return: 14836163b5d4SDoug Thompson * -EINVAL: NOT FOUND 14846163b5d4SDoug Thompson * 0..csrow = Chip-Select Row 14856163b5d4SDoug Thompson */ 14866163b5d4SDoug Thompson static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) 14876163b5d4SDoug Thompson { 14886163b5d4SDoug Thompson struct mem_ctl_info *mci; 14896163b5d4SDoug Thompson struct amd64_pvt *pvt; 14906163b5d4SDoug Thompson u32 cs_base, cs_mask; 14916163b5d4SDoug Thompson int cs_found = -EINVAL; 14926163b5d4SDoug Thompson int csrow; 14936163b5d4SDoug Thompson 14946163b5d4SDoug Thompson mci = mci_lookup[nid]; 14956163b5d4SDoug Thompson if (!mci) 14966163b5d4SDoug Thompson return cs_found; 14976163b5d4SDoug Thompson 14986163b5d4SDoug Thompson pvt = mci->pvt_info; 14996163b5d4SDoug Thompson 15006163b5d4SDoug Thompson debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 15016163b5d4SDoug Thompson 15029d858bb1SBorislav Petkov for (csrow = 0; csrow < pvt->cs_count; csrow++) { 15036163b5d4SDoug Thompson 15046163b5d4SDoug Thompson cs_base = amd64_get_dct_base(pvt, cs, csrow); 15056163b5d4SDoug Thompson if (!(cs_base & K8_DCSB_CS_ENABLE)) 15066163b5d4SDoug Thompson continue; 15076163b5d4SDoug Thompson 15086163b5d4SDoug Thompson /* 15096163b5d4SDoug Thompson * We have an ENABLED CSROW, Isolate just the MASK bits of the 15106163b5d4SDoug Thompson * target: [28:19] and [13:5], which map to [36:27] and [21:13] 15116163b5d4SDoug Thompson * of the actual address. 15126163b5d4SDoug Thompson */ 15136163b5d4SDoug Thompson cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; 15146163b5d4SDoug Thompson 15156163b5d4SDoug Thompson /* 15166163b5d4SDoug Thompson * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and 15176163b5d4SDoug Thompson * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) 15186163b5d4SDoug Thompson */ 15196163b5d4SDoug Thompson cs_mask = amd64_get_dct_mask(pvt, cs, csrow); 15206163b5d4SDoug Thompson 15216163b5d4SDoug Thompson debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", 15226163b5d4SDoug Thompson csrow, cs_base, cs_mask); 15236163b5d4SDoug Thompson 15246163b5d4SDoug Thompson cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; 15256163b5d4SDoug Thompson 15266163b5d4SDoug Thompson debugf1(" Final CSMask=0x%x\n", cs_mask); 15276163b5d4SDoug Thompson debugf1(" (InputAddr & ~CSMask)=0x%x " 15286163b5d4SDoug Thompson "(CSBase & ~CSMask)=0x%x\n", 15296163b5d4SDoug Thompson (in_addr & ~cs_mask), (cs_base & ~cs_mask)); 15306163b5d4SDoug Thompson 15316163b5d4SDoug Thompson if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { 15326163b5d4SDoug Thompson cs_found = f10_process_possible_spare(csrow, cs, pvt); 15336163b5d4SDoug Thompson 15346163b5d4SDoug Thompson debugf1(" MATCH csrow=%d\n", cs_found); 15356163b5d4SDoug Thompson break; 15366163b5d4SDoug Thompson } 15376163b5d4SDoug Thompson } 15386163b5d4SDoug Thompson return cs_found; 15396163b5d4SDoug Thompson } 15406163b5d4SDoug Thompson 1541f71d0a05SDoug Thompson /* For a given @dram_range, check if @sys_addr falls within it. */ 1542f71d0a05SDoug Thompson static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, 1543f71d0a05SDoug Thompson u64 sys_addr, int *nid, int *chan_sel) 1544f71d0a05SDoug Thompson { 1545f71d0a05SDoug Thompson int node_id, cs_found = -EINVAL, high_range = 0; 1546f71d0a05SDoug Thompson u32 intlv_en, intlv_sel, intlv_shift, hole_off; 1547f71d0a05SDoug Thompson u32 hole_valid, tmp, dct_sel_base, channel; 1548f71d0a05SDoug Thompson u64 dram_base, chan_addr, dct_sel_base_off; 1549f71d0a05SDoug Thompson 1550f71d0a05SDoug Thompson dram_base = pvt->dram_base[dram_range]; 1551f71d0a05SDoug Thompson intlv_en = pvt->dram_IntlvEn[dram_range]; 1552f71d0a05SDoug Thompson 1553f71d0a05SDoug Thompson node_id = pvt->dram_DstNode[dram_range]; 1554f71d0a05SDoug Thompson intlv_sel = pvt->dram_IntlvSel[dram_range]; 1555f71d0a05SDoug Thompson 1556f71d0a05SDoug Thompson debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", 1557f71d0a05SDoug Thompson dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); 1558f71d0a05SDoug Thompson 1559f71d0a05SDoug Thompson /* 1560f71d0a05SDoug Thompson * This assumes that one node's DHAR is the same as all the other 1561f71d0a05SDoug Thompson * nodes' DHAR. 1562f71d0a05SDoug Thompson */ 1563f71d0a05SDoug Thompson hole_off = (pvt->dhar & 0x0000FF80); 1564f71d0a05SDoug Thompson hole_valid = (pvt->dhar & 0x1); 1565f71d0a05SDoug Thompson dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; 1566f71d0a05SDoug Thompson 1567f71d0a05SDoug Thompson debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", 1568f71d0a05SDoug Thompson hole_off, hole_valid, intlv_sel); 1569f71d0a05SDoug Thompson 1570f71d0a05SDoug Thompson if (intlv_en || 1571f71d0a05SDoug Thompson (intlv_sel != ((sys_addr >> 12) & intlv_en))) 1572f71d0a05SDoug Thompson return -EINVAL; 1573f71d0a05SDoug Thompson 1574f71d0a05SDoug Thompson dct_sel_base = dct_sel_baseaddr(pvt); 1575f71d0a05SDoug Thompson 1576f71d0a05SDoug Thompson /* 1577f71d0a05SDoug Thompson * check whether addresses >= DctSelBaseAddr[47:27] are to be used to 1578f71d0a05SDoug Thompson * select between DCT0 and DCT1. 1579f71d0a05SDoug Thompson */ 1580f71d0a05SDoug Thompson if (dct_high_range_enabled(pvt) && 1581f71d0a05SDoug Thompson !dct_ganging_enabled(pvt) && 1582f71d0a05SDoug Thompson ((sys_addr >> 27) >= (dct_sel_base >> 11))) 1583f71d0a05SDoug Thompson high_range = 1; 1584f71d0a05SDoug Thompson 1585f71d0a05SDoug Thompson channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); 1586f71d0a05SDoug Thompson 1587f71d0a05SDoug Thompson chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, 1588f71d0a05SDoug Thompson dct_sel_base_off, hole_valid, 1589f71d0a05SDoug Thompson hole_off, dram_base); 1590f71d0a05SDoug Thompson 1591f71d0a05SDoug Thompson intlv_shift = f10_map_intlv_en_to_shift(intlv_en); 1592f71d0a05SDoug Thompson 1593f71d0a05SDoug Thompson /* remove Node ID (in case of memory interleaving) */ 1594f71d0a05SDoug Thompson tmp = chan_addr & 0xFC0; 1595f71d0a05SDoug Thompson 1596f71d0a05SDoug Thompson chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; 1597f71d0a05SDoug Thompson 1598f71d0a05SDoug Thompson /* remove channel interleave and hash */ 1599f71d0a05SDoug Thompson if (dct_interleave_enabled(pvt) && 1600f71d0a05SDoug Thompson !dct_high_range_enabled(pvt) && 1601f71d0a05SDoug Thompson !dct_ganging_enabled(pvt)) { 1602f71d0a05SDoug Thompson if (dct_sel_interleave_addr(pvt) != 1) 1603f71d0a05SDoug Thompson chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; 1604f71d0a05SDoug Thompson else { 1605f71d0a05SDoug Thompson tmp = chan_addr & 0xFC0; 1606f71d0a05SDoug Thompson chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) 1607f71d0a05SDoug Thompson | tmp; 1608f71d0a05SDoug Thompson } 1609f71d0a05SDoug Thompson } 1610f71d0a05SDoug Thompson 1611f71d0a05SDoug Thompson debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", 1612f71d0a05SDoug Thompson chan_addr, (u32)(chan_addr >> 8)); 1613f71d0a05SDoug Thompson 1614f71d0a05SDoug Thompson cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); 1615f71d0a05SDoug Thompson 1616f71d0a05SDoug Thompson if (cs_found >= 0) { 1617f71d0a05SDoug Thompson *nid = node_id; 1618f71d0a05SDoug Thompson *chan_sel = channel; 1619f71d0a05SDoug Thompson } 1620f71d0a05SDoug Thompson return cs_found; 1621f71d0a05SDoug Thompson } 1622f71d0a05SDoug Thompson 1623f71d0a05SDoug Thompson static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, 1624f71d0a05SDoug Thompson int *node, int *chan_sel) 1625f71d0a05SDoug Thompson { 1626f71d0a05SDoug Thompson int dram_range, cs_found = -EINVAL; 1627f71d0a05SDoug Thompson u64 dram_base, dram_limit; 1628f71d0a05SDoug Thompson 1629f71d0a05SDoug Thompson for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { 1630f71d0a05SDoug Thompson 1631f71d0a05SDoug Thompson if (!pvt->dram_rw_en[dram_range]) 1632f71d0a05SDoug Thompson continue; 1633f71d0a05SDoug Thompson 1634f71d0a05SDoug Thompson dram_base = pvt->dram_base[dram_range]; 1635f71d0a05SDoug Thompson dram_limit = pvt->dram_limit[dram_range]; 1636f71d0a05SDoug Thompson 1637f71d0a05SDoug Thompson if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { 1638f71d0a05SDoug Thompson 1639f71d0a05SDoug Thompson cs_found = f10_match_to_this_node(pvt, dram_range, 1640f71d0a05SDoug Thompson sys_addr, node, 1641f71d0a05SDoug Thompson chan_sel); 1642f71d0a05SDoug Thompson if (cs_found >= 0) 1643f71d0a05SDoug Thompson break; 1644f71d0a05SDoug Thompson } 1645f71d0a05SDoug Thompson } 1646f71d0a05SDoug Thompson return cs_found; 1647f71d0a05SDoug Thompson } 1648f71d0a05SDoug Thompson 1649f71d0a05SDoug Thompson /* 1650bdc30a0cSBorislav Petkov * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps 1651bdc30a0cSBorislav Petkov * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). 1652f71d0a05SDoug Thompson * 1653bdc30a0cSBorislav Petkov * The @sys_addr is usually an error address received from the hardware 1654bdc30a0cSBorislav Petkov * (MCX_ADDR). 1655f71d0a05SDoug Thompson */ 1656f71d0a05SDoug Thompson static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 1657ef44cc4cSBorislav Petkov struct err_regs *info, 1658f71d0a05SDoug Thompson u64 sys_addr) 1659f71d0a05SDoug Thompson { 1660f71d0a05SDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 1661f71d0a05SDoug Thompson u32 page, offset; 1662f71d0a05SDoug Thompson unsigned short syndrome; 1663f71d0a05SDoug Thompson int nid, csrow, chan = 0; 1664f71d0a05SDoug Thompson 1665f71d0a05SDoug Thompson csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1666f71d0a05SDoug Thompson 1667bdc30a0cSBorislav Petkov if (csrow < 0) { 1668bdc30a0cSBorislav Petkov edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1669bdc30a0cSBorislav Petkov return; 1670bdc30a0cSBorislav Petkov } 1671bdc30a0cSBorislav Petkov 1672f71d0a05SDoug Thompson error_address_to_page_and_offset(sys_addr, &page, &offset); 1673f71d0a05SDoug Thompson 1674b70ef010SBorislav Petkov syndrome = HIGH_SYNDROME(info->nbsl) << 8; 1675b70ef010SBorislav Petkov syndrome |= LOW_SYNDROME(info->nbsh); 1676f71d0a05SDoug Thompson 1677f71d0a05SDoug Thompson /* 1678bdc30a0cSBorislav Petkov * We need the syndromes for channel detection only when we're 1679bdc30a0cSBorislav Petkov * ganged. Otherwise @chan should already contain the channel at 1680bdc30a0cSBorislav Petkov * this point. 1681f71d0a05SDoug Thompson */ 1682bdc30a0cSBorislav Petkov if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL) 1683bfc04aecSBorislav Petkov chan = get_channel_from_ecc_syndrome(mci, syndrome); 1684f71d0a05SDoug Thompson 1685bdc30a0cSBorislav Petkov if (chan >= 0) 1686bdc30a0cSBorislav Petkov edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, 1687bdc30a0cSBorislav Petkov EDAC_MOD_STR); 1688bdc30a0cSBorislav Petkov else 1689bdc30a0cSBorislav Petkov /* 1690bdc30a0cSBorislav Petkov * Channel unknown, report all channels on this CSROW as failed. 1691bdc30a0cSBorislav Petkov */ 1692bdc30a0cSBorislav Petkov for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) 1693f71d0a05SDoug Thompson edac_mc_handle_ce(mci, page, offset, syndrome, 1694f71d0a05SDoug Thompson csrow, chan, EDAC_MOD_STR); 1695f71d0a05SDoug Thompson } 1696f71d0a05SDoug Thompson 1697f71d0a05SDoug Thompson /* 16988566c4dfSBorislav Petkov * debug routine to display the memory sizes of all logical DIMMs and its 1699f71d0a05SDoug Thompson * CSROWs as well 1700f71d0a05SDoug Thompson */ 17018566c4dfSBorislav Petkov static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) 1702f71d0a05SDoug Thompson { 1703603adaf6SBorislav Petkov int dimm, size0, size1, factor = 0; 1704f71d0a05SDoug Thompson u32 dbam; 1705f71d0a05SDoug Thompson u32 *dcsb; 1706f71d0a05SDoug Thompson 17078566c4dfSBorislav Petkov if (boot_cpu_data.x86 == 0xf) { 1708603adaf6SBorislav Petkov if (pvt->dclr0 & F10_WIDTH_128) 1709603adaf6SBorislav Petkov factor = 1; 1710603adaf6SBorislav Petkov 17118566c4dfSBorislav Petkov /* K8 families < revF not supported yet */ 17121433eb99SBorislav Petkov if (pvt->ext_model < K8_REV_F) 17138566c4dfSBorislav Petkov return; 17148566c4dfSBorislav Petkov else 17158566c4dfSBorislav Petkov WARN_ON(ctrl != 0); 17168566c4dfSBorislav Petkov } 17178566c4dfSBorislav Petkov 17188566c4dfSBorislav Petkov debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", 17198566c4dfSBorislav Petkov ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); 1720f71d0a05SDoug Thompson 1721f71d0a05SDoug Thompson dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 1722f71d0a05SDoug Thompson dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; 1723f71d0a05SDoug Thompson 17248566c4dfSBorislav Petkov edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 17258566c4dfSBorislav Petkov 1726f71d0a05SDoug Thompson /* Dump memory sizes for DIMM and its CSROWs */ 1727f71d0a05SDoug Thompson for (dimm = 0; dimm < 4; dimm++) { 1728f71d0a05SDoug Thompson 1729f71d0a05SDoug Thompson size0 = 0; 1730f71d0a05SDoug Thompson if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) 17311433eb99SBorislav Petkov size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1732f71d0a05SDoug Thompson 1733f71d0a05SDoug Thompson size1 = 0; 1734f71d0a05SDoug Thompson if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) 17351433eb99SBorislav Petkov size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); 1736f71d0a05SDoug Thompson 17378566c4dfSBorislav Petkov edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", 1738603adaf6SBorislav Petkov dimm * 2, size0 << factor, 1739603adaf6SBorislav Petkov dimm * 2 + 1, size1 << factor); 1740f71d0a05SDoug Thompson } 1741f71d0a05SDoug Thompson } 1742f71d0a05SDoug Thompson 1743f71d0a05SDoug Thompson /* 17444d37607aSDoug Thompson * There currently are 3 types type of MC devices for AMD Athlon/Opterons 17454d37607aSDoug Thompson * (as per PCI DEVICE_IDs): 17464d37607aSDoug Thompson * 17474d37607aSDoug Thompson * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI 17484d37607aSDoug Thompson * DEVICE ID, even though there is differences between the different Revisions 17494d37607aSDoug Thompson * (CG,D,E,F). 17504d37607aSDoug Thompson * 17514d37607aSDoug Thompson * Family F10h and F11h. 17524d37607aSDoug Thompson * 17534d37607aSDoug Thompson */ 17544d37607aSDoug Thompson static struct amd64_family_type amd64_family_types[] = { 17554d37607aSDoug Thompson [K8_CPUS] = { 17564d37607aSDoug Thompson .ctl_name = "RevF", 17574d37607aSDoug Thompson .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 17584d37607aSDoug Thompson .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, 17594d37607aSDoug Thompson .ops = { 17604d37607aSDoug Thompson .early_channel_count = k8_early_channel_count, 17614d37607aSDoug Thompson .get_error_address = k8_get_error_address, 17624d37607aSDoug Thompson .read_dram_base_limit = k8_read_dram_base_limit, 17634d37607aSDoug Thompson .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 17641433eb99SBorislav Petkov .dbam_to_cs = k8_dbam_to_chip_select, 17654d37607aSDoug Thompson } 17664d37607aSDoug Thompson }, 17674d37607aSDoug Thompson [F10_CPUS] = { 17684d37607aSDoug Thompson .ctl_name = "Family 10h", 17694d37607aSDoug Thompson .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, 17704d37607aSDoug Thompson .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, 17714d37607aSDoug Thompson .ops = { 17724d37607aSDoug Thompson .early_channel_count = f10_early_channel_count, 17734d37607aSDoug Thompson .get_error_address = f10_get_error_address, 17744d37607aSDoug Thompson .read_dram_base_limit = f10_read_dram_base_limit, 17754d37607aSDoug Thompson .read_dram_ctl_register = f10_read_dram_ctl_register, 17764d37607aSDoug Thompson .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 17771433eb99SBorislav Petkov .dbam_to_cs = f10_dbam_to_chip_select, 17784d37607aSDoug Thompson } 17794d37607aSDoug Thompson }, 17804d37607aSDoug Thompson [F11_CPUS] = { 17814d37607aSDoug Thompson .ctl_name = "Family 11h", 17824d37607aSDoug Thompson .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, 17834d37607aSDoug Thompson .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, 17844d37607aSDoug Thompson .ops = { 17854d37607aSDoug Thompson .early_channel_count = f10_early_channel_count, 17864d37607aSDoug Thompson .get_error_address = f10_get_error_address, 17874d37607aSDoug Thompson .read_dram_base_limit = f10_read_dram_base_limit, 17884d37607aSDoug Thompson .read_dram_ctl_register = f10_read_dram_ctl_register, 17894d37607aSDoug Thompson .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 17901433eb99SBorislav Petkov .dbam_to_cs = f10_dbam_to_chip_select, 17914d37607aSDoug Thompson } 17924d37607aSDoug Thompson }, 17934d37607aSDoug Thompson }; 17944d37607aSDoug Thompson 17954d37607aSDoug Thompson static struct pci_dev *pci_get_related_function(unsigned int vendor, 17964d37607aSDoug Thompson unsigned int device, 17974d37607aSDoug Thompson struct pci_dev *related) 17984d37607aSDoug Thompson { 17994d37607aSDoug Thompson struct pci_dev *dev = NULL; 18004d37607aSDoug Thompson 18014d37607aSDoug Thompson dev = pci_get_device(vendor, device, dev); 18024d37607aSDoug Thompson while (dev) { 18034d37607aSDoug Thompson if ((dev->bus->number == related->bus->number) && 18044d37607aSDoug Thompson (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) 18054d37607aSDoug Thompson break; 18064d37607aSDoug Thompson dev = pci_get_device(vendor, device, dev); 18074d37607aSDoug Thompson } 18084d37607aSDoug Thompson 18094d37607aSDoug Thompson return dev; 18104d37607aSDoug Thompson } 18114d37607aSDoug Thompson 1812b1289d6fSDoug Thompson /* 1813bfc04aecSBorislav Petkov * These are tables of eigenvectors (one per line) which can be used for the 1814bfc04aecSBorislav Petkov * construction of the syndrome tables. The modified syndrome search algorithm 1815bfc04aecSBorislav Petkov * uses those to find the symbol in error and thus the DIMM. 1816b1289d6fSDoug Thompson * 1817bfc04aecSBorislav Petkov * Algorithm courtesy of Ross LaFetra from AMD. 1818b1289d6fSDoug Thompson */ 1819bfc04aecSBorislav Petkov static u16 x4_vectors[] = { 1820bfc04aecSBorislav Petkov 0x2f57, 0x1afe, 0x66cc, 0xdd88, 1821bfc04aecSBorislav Petkov 0x11eb, 0x3396, 0x7f4c, 0xeac8, 1822bfc04aecSBorislav Petkov 0x0001, 0x0002, 0x0004, 0x0008, 1823bfc04aecSBorislav Petkov 0x1013, 0x3032, 0x4044, 0x8088, 1824bfc04aecSBorislav Petkov 0x106b, 0x30d6, 0x70fc, 0xe0a8, 1825bfc04aecSBorislav Petkov 0x4857, 0xc4fe, 0x13cc, 0x3288, 1826bfc04aecSBorislav Petkov 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, 1827bfc04aecSBorislav Petkov 0x1f39, 0x251e, 0xbd6c, 0x6bd8, 1828bfc04aecSBorislav Petkov 0x15c1, 0x2a42, 0x89ac, 0x4758, 1829bfc04aecSBorislav Petkov 0x2b03, 0x1602, 0x4f0c, 0xca08, 1830bfc04aecSBorislav Petkov 0x1f07, 0x3a0e, 0x6b04, 0xbd08, 1831bfc04aecSBorislav Petkov 0x8ba7, 0x465e, 0x244c, 0x1cc8, 1832bfc04aecSBorislav Petkov 0x2b87, 0x164e, 0x642c, 0xdc18, 1833bfc04aecSBorislav Petkov 0x40b9, 0x80de, 0x1094, 0x20e8, 1834bfc04aecSBorislav Petkov 0x27db, 0x1eb6, 0x9dac, 0x7b58, 1835bfc04aecSBorislav Petkov 0x11c1, 0x2242, 0x84ac, 0x4c58, 1836bfc04aecSBorislav Petkov 0x1be5, 0x2d7a, 0x5e34, 0xa718, 1837bfc04aecSBorislav Petkov 0x4b39, 0x8d1e, 0x14b4, 0x28d8, 1838bfc04aecSBorislav Petkov 0x4c97, 0xc87e, 0x11fc, 0x33a8, 1839bfc04aecSBorislav Petkov 0x8e97, 0x497e, 0x2ffc, 0x1aa8, 1840bfc04aecSBorislav Petkov 0x16b3, 0x3d62, 0x4f34, 0x8518, 1841bfc04aecSBorislav Petkov 0x1e2f, 0x391a, 0x5cac, 0xf858, 1842bfc04aecSBorislav Petkov 0x1d9f, 0x3b7a, 0x572c, 0xfe18, 1843bfc04aecSBorislav Petkov 0x15f5, 0x2a5a, 0x5264, 0xa3b8, 1844bfc04aecSBorislav Petkov 0x1dbb, 0x3b66, 0x715c, 0xe3f8, 1845bfc04aecSBorislav Petkov 0x4397, 0xc27e, 0x17fc, 0x3ea8, 1846bfc04aecSBorislav Petkov 0x1617, 0x3d3e, 0x6464, 0xb8b8, 1847bfc04aecSBorislav Petkov 0x23ff, 0x12aa, 0xab6c, 0x56d8, 1848bfc04aecSBorislav Petkov 0x2dfb, 0x1ba6, 0x913c, 0x7328, 1849bfc04aecSBorislav Petkov 0x185d, 0x2ca6, 0x7914, 0x9e28, 1850bfc04aecSBorislav Petkov 0x171b, 0x3e36, 0x7d7c, 0xebe8, 1851bfc04aecSBorislav Petkov 0x4199, 0x82ee, 0x19f4, 0x2e58, 1852bfc04aecSBorislav Petkov 0x4807, 0xc40e, 0x130c, 0x3208, 1853bfc04aecSBorislav Petkov 0x1905, 0x2e0a, 0x5804, 0xac08, 1854bfc04aecSBorislav Petkov 0x213f, 0x132a, 0xadfc, 0x5ba8, 1855bfc04aecSBorislav Petkov 0x19a9, 0x2efe, 0xb5cc, 0x6f88, 1856b1289d6fSDoug Thompson }; 1857b1289d6fSDoug Thompson 1858bfc04aecSBorislav Petkov static u16 x8_vectors[] = { 1859bfc04aecSBorislav Petkov 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, 1860bfc04aecSBorislav Petkov 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, 1861bfc04aecSBorislav Petkov 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, 1862bfc04aecSBorislav Petkov 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, 1863bfc04aecSBorislav Petkov 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, 1864bfc04aecSBorislav Petkov 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, 1865bfc04aecSBorislav Petkov 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, 1866bfc04aecSBorislav Petkov 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, 1867bfc04aecSBorislav Petkov 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, 1868bfc04aecSBorislav Petkov 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, 1869bfc04aecSBorislav Petkov 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, 1870bfc04aecSBorislav Petkov 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, 1871bfc04aecSBorislav Petkov 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, 1872bfc04aecSBorislav Petkov 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, 1873bfc04aecSBorislav Petkov 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, 1874bfc04aecSBorislav Petkov 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, 1875bfc04aecSBorislav Petkov 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, 1876bfc04aecSBorislav Petkov 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 1877bfc04aecSBorislav Petkov 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, 1878bfc04aecSBorislav Petkov }; 1879bfc04aecSBorislav Petkov 1880bfc04aecSBorislav Petkov static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, 1881bfc04aecSBorislav Petkov int v_dim) 1882b1289d6fSDoug Thompson { 1883bfc04aecSBorislav Petkov unsigned int i, err_sym; 1884b1289d6fSDoug Thompson 1885bfc04aecSBorislav Petkov for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { 1886bfc04aecSBorislav Petkov u16 s = syndrome; 1887bfc04aecSBorislav Petkov int v_idx = err_sym * v_dim; 1888bfc04aecSBorislav Petkov int v_end = (err_sym + 1) * v_dim; 1889b1289d6fSDoug Thompson 1890bfc04aecSBorislav Petkov /* walk over all 16 bits of the syndrome */ 1891bfc04aecSBorislav Petkov for (i = 1; i < (1U << 16); i <<= 1) { 1892bfc04aecSBorislav Petkov 1893bfc04aecSBorislav Petkov /* if bit is set in that eigenvector... */ 1894bfc04aecSBorislav Petkov if (v_idx < v_end && vectors[v_idx] & i) { 1895bfc04aecSBorislav Petkov u16 ev_comp = vectors[v_idx++]; 1896bfc04aecSBorislav Petkov 1897bfc04aecSBorislav Petkov /* ... and bit set in the modified syndrome, */ 1898bfc04aecSBorislav Petkov if (s & i) { 1899bfc04aecSBorislav Petkov /* remove it. */ 1900bfc04aecSBorislav Petkov s ^= ev_comp; 1901bfc04aecSBorislav Petkov 1902bfc04aecSBorislav Petkov if (!s) 1903bfc04aecSBorislav Petkov return err_sym; 1904bfc04aecSBorislav Petkov } 1905bfc04aecSBorislav Petkov 1906bfc04aecSBorislav Petkov } else if (s & i) 1907bfc04aecSBorislav Petkov /* can't get to zero, move to next symbol */ 1908bfc04aecSBorislav Petkov break; 1909bfc04aecSBorislav Petkov } 1910b1289d6fSDoug Thompson } 1911b1289d6fSDoug Thompson 1912b1289d6fSDoug Thompson debugf0("syndrome(%x) not found\n", syndrome); 1913b1289d6fSDoug Thompson return -1; 1914b1289d6fSDoug Thompson } 1915d27bf6faSDoug Thompson 1916bfc04aecSBorislav Petkov static int map_err_sym_to_channel(int err_sym, int sym_size) 1917bfc04aecSBorislav Petkov { 1918bfc04aecSBorislav Petkov if (sym_size == 4) 1919bfc04aecSBorislav Petkov switch (err_sym) { 1920bfc04aecSBorislav Petkov case 0x20: 1921bfc04aecSBorislav Petkov case 0x21: 1922bfc04aecSBorislav Petkov return 0; 1923bfc04aecSBorislav Petkov break; 1924bfc04aecSBorislav Petkov case 0x22: 1925bfc04aecSBorislav Petkov case 0x23: 1926bfc04aecSBorislav Petkov return 1; 1927bfc04aecSBorislav Petkov break; 1928bfc04aecSBorislav Petkov default: 1929bfc04aecSBorislav Petkov return err_sym >> 4; 1930bfc04aecSBorislav Petkov break; 1931bfc04aecSBorislav Petkov } 1932bfc04aecSBorislav Petkov /* x8 symbols */ 1933bfc04aecSBorislav Petkov else 1934bfc04aecSBorislav Petkov switch (err_sym) { 1935bfc04aecSBorislav Petkov /* imaginary bits not in a DIMM */ 1936bfc04aecSBorislav Petkov case 0x10: 1937bfc04aecSBorislav Petkov WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", 1938bfc04aecSBorislav Petkov err_sym); 1939bfc04aecSBorislav Petkov return -1; 1940bfc04aecSBorislav Petkov break; 1941bfc04aecSBorislav Petkov 1942bfc04aecSBorislav Petkov case 0x11: 1943bfc04aecSBorislav Petkov return 0; 1944bfc04aecSBorislav Petkov break; 1945bfc04aecSBorislav Petkov case 0x12: 1946bfc04aecSBorislav Petkov return 1; 1947bfc04aecSBorislav Petkov break; 1948bfc04aecSBorislav Petkov default: 1949bfc04aecSBorislav Petkov return err_sym >> 3; 1950bfc04aecSBorislav Petkov break; 1951bfc04aecSBorislav Petkov } 1952bfc04aecSBorislav Petkov return -1; 1953bfc04aecSBorislav Petkov } 1954bfc04aecSBorislav Petkov 1955bfc04aecSBorislav Petkov static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) 1956bfc04aecSBorislav Petkov { 1957bfc04aecSBorislav Petkov struct amd64_pvt *pvt = mci->pvt_info; 1958bfc04aecSBorislav Petkov u32 value = 0; 1959bfc04aecSBorislav Petkov int err_sym = 0; 1960bfc04aecSBorislav Petkov 1961bfc04aecSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); 1962bfc04aecSBorislav Petkov 1963bfc04aecSBorislav Petkov /* F3x180[EccSymbolSize]=1, x8 symbols */ 1964bfc04aecSBorislav Petkov if (boot_cpu_data.x86 == 0x10 && 1965bfc04aecSBorislav Petkov boot_cpu_data.x86_model > 7 && 1966bfc04aecSBorislav Petkov value & BIT(25)) { 1967bfc04aecSBorislav Petkov err_sym = decode_syndrome(syndrome, x8_vectors, 1968bfc04aecSBorislav Petkov ARRAY_SIZE(x8_vectors), 8); 1969bfc04aecSBorislav Petkov return map_err_sym_to_channel(err_sym, 8); 1970bfc04aecSBorislav Petkov } else { 1971bfc04aecSBorislav Petkov err_sym = decode_syndrome(syndrome, x4_vectors, 1972bfc04aecSBorislav Petkov ARRAY_SIZE(x4_vectors), 4); 1973bfc04aecSBorislav Petkov return map_err_sym_to_channel(err_sym, 4); 1974bfc04aecSBorislav Petkov } 1975bfc04aecSBorislav Petkov } 1976bfc04aecSBorislav Petkov 1977d27bf6faSDoug Thompson /* 1978d27bf6faSDoug Thompson * Check for valid error in the NB Status High register. If so, proceed to read 1979d27bf6faSDoug Thompson * NB Status Low, NB Address Low and NB Address High registers and store data 1980d27bf6faSDoug Thompson * into error structure. 1981d27bf6faSDoug Thompson * 1982d27bf6faSDoug Thompson * Returns: 1983d27bf6faSDoug Thompson * - 1: if hardware regs contains valid error info 1984d27bf6faSDoug Thompson * - 0: if no valid error is indicated 1985d27bf6faSDoug Thompson */ 1986d27bf6faSDoug Thompson static int amd64_get_error_info_regs(struct mem_ctl_info *mci, 1987ef44cc4cSBorislav Petkov struct err_regs *regs) 1988d27bf6faSDoug Thompson { 1989d27bf6faSDoug Thompson struct amd64_pvt *pvt; 1990d27bf6faSDoug Thompson struct pci_dev *misc_f3_ctl; 1991d27bf6faSDoug Thompson 1992d27bf6faSDoug Thompson pvt = mci->pvt_info; 1993d27bf6faSDoug Thompson misc_f3_ctl = pvt->misc_f3_ctl; 1994d27bf6faSDoug Thompson 19956ba5dcdcSBorislav Petkov if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, ®s->nbsh)) 19966ba5dcdcSBorislav Petkov return 0; 1997d27bf6faSDoug Thompson 1998d27bf6faSDoug Thompson if (!(regs->nbsh & K8_NBSH_VALID_BIT)) 1999d27bf6faSDoug Thompson return 0; 2000d27bf6faSDoug Thompson 2001d27bf6faSDoug Thompson /* valid error, read remaining error information registers */ 20026ba5dcdcSBorislav Petkov if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, ®s->nbsl) || 20036ba5dcdcSBorislav Petkov amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, ®s->nbeal) || 20046ba5dcdcSBorislav Petkov amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, ®s->nbeah) || 20056ba5dcdcSBorislav Petkov amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, ®s->nbcfg)) 20066ba5dcdcSBorislav Petkov return 0; 2007d27bf6faSDoug Thompson 2008d27bf6faSDoug Thompson return 1; 2009d27bf6faSDoug Thompson } 2010d27bf6faSDoug Thompson 2011d27bf6faSDoug Thompson /* 2012d27bf6faSDoug Thompson * This function is called to retrieve the error data from hardware and store it 2013d27bf6faSDoug Thompson * in the info structure. 2014d27bf6faSDoug Thompson * 2015d27bf6faSDoug Thompson * Returns: 2016d27bf6faSDoug Thompson * - 1: if a valid error is found 2017d27bf6faSDoug Thompson * - 0: if no error is found 2018d27bf6faSDoug Thompson */ 2019d27bf6faSDoug Thompson static int amd64_get_error_info(struct mem_ctl_info *mci, 2020ef44cc4cSBorislav Petkov struct err_regs *info) 2021d27bf6faSDoug Thompson { 2022d27bf6faSDoug Thompson struct amd64_pvt *pvt; 2023ef44cc4cSBorislav Petkov struct err_regs regs; 2024d27bf6faSDoug Thompson 2025d27bf6faSDoug Thompson pvt = mci->pvt_info; 2026d27bf6faSDoug Thompson 2027d27bf6faSDoug Thompson if (!amd64_get_error_info_regs(mci, info)) 2028d27bf6faSDoug Thompson return 0; 2029d27bf6faSDoug Thompson 2030d27bf6faSDoug Thompson /* 2031d27bf6faSDoug Thompson * Here's the problem with the K8's EDAC reporting: There are four 2032d27bf6faSDoug Thompson * registers which report pieces of error information. They are shared 2033d27bf6faSDoug Thompson * between CEs and UEs. Furthermore, contrary to what is stated in the 2034d27bf6faSDoug Thompson * BKDG, the overflow bit is never used! Every error always updates the 2035d27bf6faSDoug Thompson * reporting registers. 2036d27bf6faSDoug Thompson * 2037d27bf6faSDoug Thompson * Can you see the race condition? All four error reporting registers 2038d27bf6faSDoug Thompson * must be read before a new error updates them! There is no way to read 2039d27bf6faSDoug Thompson * all four registers atomically. The best than can be done is to detect 2040d27bf6faSDoug Thompson * that a race has occured and then report the error without any kind of 2041d27bf6faSDoug Thompson * precision. 2042d27bf6faSDoug Thompson * 2043d27bf6faSDoug Thompson * What is still positive is that errors are still reported and thus 2044d27bf6faSDoug Thompson * problems can still be detected - just not localized because the 2045d27bf6faSDoug Thompson * syndrome and address are spread out across registers. 2046d27bf6faSDoug Thompson * 2047d27bf6faSDoug Thompson * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev. 2048d27bf6faSDoug Thompson * UEs and CEs should have separate register sets with proper overflow 2049d27bf6faSDoug Thompson * bits that are used! At very least the problem can be fixed by 2050d27bf6faSDoug Thompson * honoring the ErrValid bit in 'nbsh' and not updating registers - just 2051d27bf6faSDoug Thompson * set the overflow bit - unless the current error is CE and the new 2052d27bf6faSDoug Thompson * error is UE which would be the only situation for overwriting the 2053d27bf6faSDoug Thompson * current values. 2054d27bf6faSDoug Thompson */ 2055d27bf6faSDoug Thompson 2056d27bf6faSDoug Thompson regs = *info; 2057d27bf6faSDoug Thompson 2058d27bf6faSDoug Thompson /* Use info from the second read - most current */ 2059d27bf6faSDoug Thompson if (unlikely(!amd64_get_error_info_regs(mci, info))) 2060d27bf6faSDoug Thompson return 0; 2061d27bf6faSDoug Thompson 2062d27bf6faSDoug Thompson /* clear the error bits in hardware */ 2063d27bf6faSDoug Thompson pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT); 2064d27bf6faSDoug Thompson 2065d27bf6faSDoug Thompson /* Check for the possible race condition */ 2066d27bf6faSDoug Thompson if ((regs.nbsh != info->nbsh) || 2067d27bf6faSDoug Thompson (regs.nbsl != info->nbsl) || 2068d27bf6faSDoug Thompson (regs.nbeah != info->nbeah) || 2069d27bf6faSDoug Thompson (regs.nbeal != info->nbeal)) { 2070d27bf6faSDoug Thompson amd64_mc_printk(mci, KERN_WARNING, 2071d27bf6faSDoug Thompson "hardware STATUS read access race condition " 2072d27bf6faSDoug Thompson "detected!\n"); 2073d27bf6faSDoug Thompson return 0; 2074d27bf6faSDoug Thompson } 2075d27bf6faSDoug Thompson return 1; 2076d27bf6faSDoug Thompson } 2077d27bf6faSDoug Thompson 2078d27bf6faSDoug Thompson /* 2079d27bf6faSDoug Thompson * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR 2080d27bf6faSDoug Thompson * ADDRESS and process. 2081d27bf6faSDoug Thompson */ 2082d27bf6faSDoug Thompson static void amd64_handle_ce(struct mem_ctl_info *mci, 2083ef44cc4cSBorislav Petkov struct err_regs *info) 2084d27bf6faSDoug Thompson { 2085d27bf6faSDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 208644e9e2eeSBorislav Petkov u64 sys_addr; 2087d27bf6faSDoug Thompson 2088d27bf6faSDoug Thompson /* Ensure that the Error Address is VALID */ 2089d27bf6faSDoug Thompson if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { 2090d27bf6faSDoug Thompson amd64_mc_printk(mci, KERN_ERR, 2091d27bf6faSDoug Thompson "HW has no ERROR_ADDRESS available\n"); 2092d27bf6faSDoug Thompson edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 2093d27bf6faSDoug Thompson return; 2094d27bf6faSDoug Thompson } 2095d27bf6faSDoug Thompson 20961f6bcee7SBorislav Petkov sys_addr = pvt->ops->get_error_address(mci, info); 2097d27bf6faSDoug Thompson 2098d27bf6faSDoug Thompson amd64_mc_printk(mci, KERN_ERR, 209944e9e2eeSBorislav Petkov "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); 2100d27bf6faSDoug Thompson 210144e9e2eeSBorislav Petkov pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); 2102d27bf6faSDoug Thompson } 2103d27bf6faSDoug Thompson 2104d27bf6faSDoug Thompson /* Handle any Un-correctable Errors (UEs) */ 2105d27bf6faSDoug Thompson static void amd64_handle_ue(struct mem_ctl_info *mci, 2106ef44cc4cSBorislav Petkov struct err_regs *info) 2107d27bf6faSDoug Thompson { 21081f6bcee7SBorislav Petkov struct amd64_pvt *pvt = mci->pvt_info; 21091f6bcee7SBorislav Petkov struct mem_ctl_info *log_mci, *src_mci = NULL; 2110d27bf6faSDoug Thompson int csrow; 211144e9e2eeSBorislav Petkov u64 sys_addr; 2112d27bf6faSDoug Thompson u32 page, offset; 2113d27bf6faSDoug Thompson 2114d27bf6faSDoug Thompson log_mci = mci; 2115d27bf6faSDoug Thompson 2116d27bf6faSDoug Thompson if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { 2117d27bf6faSDoug Thompson amd64_mc_printk(mci, KERN_CRIT, 2118d27bf6faSDoug Thompson "HW has no ERROR_ADDRESS available\n"); 2119d27bf6faSDoug Thompson edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 2120d27bf6faSDoug Thompson return; 2121d27bf6faSDoug Thompson } 2122d27bf6faSDoug Thompson 21231f6bcee7SBorislav Petkov sys_addr = pvt->ops->get_error_address(mci, info); 2124d27bf6faSDoug Thompson 2125d27bf6faSDoug Thompson /* 2126d27bf6faSDoug Thompson * Find out which node the error address belongs to. This may be 2127d27bf6faSDoug Thompson * different from the node that detected the error. 2128d27bf6faSDoug Thompson */ 212944e9e2eeSBorislav Petkov src_mci = find_mc_by_sys_addr(mci, sys_addr); 2130d27bf6faSDoug Thompson if (!src_mci) { 2131d27bf6faSDoug Thompson amd64_mc_printk(mci, KERN_CRIT, 2132d27bf6faSDoug Thompson "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", 213344e9e2eeSBorislav Petkov (unsigned long)sys_addr); 2134d27bf6faSDoug Thompson edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 2135d27bf6faSDoug Thompson return; 2136d27bf6faSDoug Thompson } 2137d27bf6faSDoug Thompson 2138d27bf6faSDoug Thompson log_mci = src_mci; 2139d27bf6faSDoug Thompson 214044e9e2eeSBorislav Petkov csrow = sys_addr_to_csrow(log_mci, sys_addr); 2141d27bf6faSDoug Thompson if (csrow < 0) { 2142d27bf6faSDoug Thompson amd64_mc_printk(mci, KERN_CRIT, 2143d27bf6faSDoug Thompson "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", 214444e9e2eeSBorislav Petkov (unsigned long)sys_addr); 2145d27bf6faSDoug Thompson edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 2146d27bf6faSDoug Thompson } else { 214744e9e2eeSBorislav Petkov error_address_to_page_and_offset(sys_addr, &page, &offset); 2148d27bf6faSDoug Thompson edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); 2149d27bf6faSDoug Thompson } 2150d27bf6faSDoug Thompson } 2151d27bf6faSDoug Thompson 2152549d042dSBorislav Petkov static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, 2153b69b29deSBorislav Petkov struct err_regs *info) 2154d27bf6faSDoug Thompson { 2155b70ef010SBorislav Petkov u32 ec = ERROR_CODE(info->nbsl); 2156b70ef010SBorislav Petkov u32 xec = EXT_ERROR_CODE(info->nbsl); 215717adea01SBorislav Petkov int ecc_type = (info->nbsh >> 13) & 0x3; 2158d27bf6faSDoug Thompson 2159b70ef010SBorislav Petkov /* Bail early out if this was an 'observed' error */ 2160b70ef010SBorislav Petkov if (PP(ec) == K8_NBSL_PP_OBS) 2161b70ef010SBorislav Petkov return; 2162d27bf6faSDoug Thompson 2163ecaf5606SBorislav Petkov /* Do only ECC errors */ 2164ecaf5606SBorislav Petkov if (xec && xec != F10_NBSL_EXT_ERR_ECC) 2165d27bf6faSDoug Thompson return; 2166d27bf6faSDoug Thompson 2167ecaf5606SBorislav Petkov if (ecc_type == 2) 2168d27bf6faSDoug Thompson amd64_handle_ce(mci, info); 2169ecaf5606SBorislav Petkov else if (ecc_type == 1) 2170d27bf6faSDoug Thompson amd64_handle_ue(mci, info); 2171d27bf6faSDoug Thompson 2172d27bf6faSDoug Thompson /* 2173d27bf6faSDoug Thompson * If main error is CE then overflow must be CE. If main error is UE 2174d27bf6faSDoug Thompson * then overflow is unknown. We'll call the overflow a CE - if 2175d27bf6faSDoug Thompson * panic_on_ue is set then we're already panic'ed and won't arrive 2176d27bf6faSDoug Thompson * here. Else, then apparently someone doesn't think that UE's are 2177d27bf6faSDoug Thompson * catastrophic. 2178d27bf6faSDoug Thompson */ 2179d27bf6faSDoug Thompson if (info->nbsh & K8_NBSH_OVERFLOW) 2180ecaf5606SBorislav Petkov edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow"); 2181d27bf6faSDoug Thompson } 2182d27bf6faSDoug Thompson 2183b69b29deSBorislav Petkov void amd64_decode_bus_error(int node_id, struct err_regs *regs) 2184d27bf6faSDoug Thompson { 2185549d042dSBorislav Petkov struct mem_ctl_info *mci = mci_lookup[node_id]; 2186d27bf6faSDoug Thompson 2187b69b29deSBorislav Petkov __amd64_decode_bus_error(mci, regs); 2188d27bf6faSDoug Thompson 2189d27bf6faSDoug Thompson /* 2190d27bf6faSDoug Thompson * Check the UE bit of the NB status high register, if set generate some 2191d27bf6faSDoug Thompson * logs. If NOT a GART error, then process the event as a NO-INFO event. 2192d27bf6faSDoug Thompson * If it was a GART error, skip that process. 2193549d042dSBorislav Petkov * 2194549d042dSBorislav Petkov * FIXME: this should go somewhere else, if at all. 2195d27bf6faSDoug Thompson */ 21965110dbdeSBorislav Petkov if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors) 21975110dbdeSBorislav Petkov edac_mc_handle_ue_no_info(mci, "UE bit is set"); 2198549d042dSBorislav Petkov 2199d27bf6faSDoug Thompson } 2200d27bf6faSDoug Thompson 22010ec449eeSDoug Thompson /* 22020ec449eeSDoug Thompson * The main polling 'check' function, called FROM the edac core to perform the 22030ec449eeSDoug Thompson * error checking and if an error is encountered, error processing. 22040ec449eeSDoug Thompson */ 22050ec449eeSDoug Thompson static void amd64_check(struct mem_ctl_info *mci) 22060ec449eeSDoug Thompson { 2207ef44cc4cSBorislav Petkov struct err_regs regs; 22080ec449eeSDoug Thompson 2209549d042dSBorislav Petkov if (amd64_get_error_info(mci, ®s)) { 2210549d042dSBorislav Petkov struct amd64_pvt *pvt = mci->pvt_info; 2211549d042dSBorislav Petkov amd_decode_nb_mce(pvt->mc_node_id, ®s, 1); 2212549d042dSBorislav Petkov } 22130ec449eeSDoug Thompson } 22140ec449eeSDoug Thompson 22150ec449eeSDoug Thompson /* 22160ec449eeSDoug Thompson * Input: 22170ec449eeSDoug Thompson * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer 22180ec449eeSDoug Thompson * 2) AMD Family index value 22190ec449eeSDoug Thompson * 22200ec449eeSDoug Thompson * Ouput: 22210ec449eeSDoug Thompson * Upon return of 0, the following filled in: 22220ec449eeSDoug Thompson * 22230ec449eeSDoug Thompson * struct pvt->addr_f1_ctl 22240ec449eeSDoug Thompson * struct pvt->misc_f3_ctl 22250ec449eeSDoug Thompson * 22260ec449eeSDoug Thompson * Filled in with related device funcitions of 'dram_f2_ctl' 22270ec449eeSDoug Thompson * These devices are "reserved" via the pci_get_device() 22280ec449eeSDoug Thompson * 22290ec449eeSDoug Thompson * Upon return of 1 (error status): 22300ec449eeSDoug Thompson * 22310ec449eeSDoug Thompson * Nothing reserved 22320ec449eeSDoug Thompson */ 22330ec449eeSDoug Thompson static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx) 22340ec449eeSDoug Thompson { 22350ec449eeSDoug Thompson const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx]; 22360ec449eeSDoug Thompson 22370ec449eeSDoug Thompson /* Reserve the ADDRESS MAP Device */ 22380ec449eeSDoug Thompson pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, 22390ec449eeSDoug Thompson amd64_dev->addr_f1_ctl, 22400ec449eeSDoug Thompson pvt->dram_f2_ctl); 22410ec449eeSDoug Thompson 22420ec449eeSDoug Thompson if (!pvt->addr_f1_ctl) { 22430ec449eeSDoug Thompson amd64_printk(KERN_ERR, "error address map device not found: " 22440ec449eeSDoug Thompson "vendor %x device 0x%x (broken BIOS?)\n", 22450ec449eeSDoug Thompson PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl); 22460ec449eeSDoug Thompson return 1; 22470ec449eeSDoug Thompson } 22480ec449eeSDoug Thompson 22490ec449eeSDoug Thompson /* Reserve the MISC Device */ 22500ec449eeSDoug Thompson pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, 22510ec449eeSDoug Thompson amd64_dev->misc_f3_ctl, 22520ec449eeSDoug Thompson pvt->dram_f2_ctl); 22530ec449eeSDoug Thompson 22540ec449eeSDoug Thompson if (!pvt->misc_f3_ctl) { 22550ec449eeSDoug Thompson pci_dev_put(pvt->addr_f1_ctl); 22560ec449eeSDoug Thompson pvt->addr_f1_ctl = NULL; 22570ec449eeSDoug Thompson 22580ec449eeSDoug Thompson amd64_printk(KERN_ERR, "error miscellaneous device not found: " 22590ec449eeSDoug Thompson "vendor %x device 0x%x (broken BIOS?)\n", 22600ec449eeSDoug Thompson PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl); 22610ec449eeSDoug Thompson return 1; 22620ec449eeSDoug Thompson } 22630ec449eeSDoug Thompson 22640ec449eeSDoug Thompson debugf1(" Addr Map device PCI Bus ID:\t%s\n", 22650ec449eeSDoug Thompson pci_name(pvt->addr_f1_ctl)); 22660ec449eeSDoug Thompson debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n", 22670ec449eeSDoug Thompson pci_name(pvt->dram_f2_ctl)); 22680ec449eeSDoug Thompson debugf1(" Misc device PCI Bus ID:\t%s\n", 22690ec449eeSDoug Thompson pci_name(pvt->misc_f3_ctl)); 22700ec449eeSDoug Thompson 22710ec449eeSDoug Thompson return 0; 22720ec449eeSDoug Thompson } 22730ec449eeSDoug Thompson 22740ec449eeSDoug Thompson static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) 22750ec449eeSDoug Thompson { 22760ec449eeSDoug Thompson pci_dev_put(pvt->addr_f1_ctl); 22770ec449eeSDoug Thompson pci_dev_put(pvt->misc_f3_ctl); 22780ec449eeSDoug Thompson } 22790ec449eeSDoug Thompson 22800ec449eeSDoug Thompson /* 22810ec449eeSDoug Thompson * Retrieve the hardware registers of the memory controller (this includes the 22820ec449eeSDoug Thompson * 'Address Map' and 'Misc' device regs) 22830ec449eeSDoug Thompson */ 22840ec449eeSDoug Thompson static void amd64_read_mc_registers(struct amd64_pvt *pvt) 22850ec449eeSDoug Thompson { 22860ec449eeSDoug Thompson u64 msr_val; 22876ba5dcdcSBorislav Petkov int dram; 22880ec449eeSDoug Thompson 22890ec449eeSDoug Thompson /* 22900ec449eeSDoug Thompson * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 22910ec449eeSDoug Thompson * those are Read-As-Zero 22920ec449eeSDoug Thompson */ 2293e97f8bb8SBorislav Petkov rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); 2294e97f8bb8SBorislav Petkov debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); 22950ec449eeSDoug Thompson 22960ec449eeSDoug Thompson /* check first whether TOP_MEM2 is enabled */ 22970ec449eeSDoug Thompson rdmsrl(MSR_K8_SYSCFG, msr_val); 22980ec449eeSDoug Thompson if (msr_val & (1U << 21)) { 2299e97f8bb8SBorislav Petkov rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); 2300e97f8bb8SBorislav Petkov debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); 23010ec449eeSDoug Thompson } else 23020ec449eeSDoug Thompson debugf0(" TOP_MEM2 disabled.\n"); 23030ec449eeSDoug Thompson 23040ec449eeSDoug Thompson amd64_cpu_display_info(pvt); 23050ec449eeSDoug Thompson 23066ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); 23070ec449eeSDoug Thompson 23080ec449eeSDoug Thompson if (pvt->ops->read_dram_ctl_register) 23090ec449eeSDoug Thompson pvt->ops->read_dram_ctl_register(pvt); 23100ec449eeSDoug Thompson 23110ec449eeSDoug Thompson for (dram = 0; dram < DRAM_REG_COUNT; dram++) { 23120ec449eeSDoug Thompson /* 23130ec449eeSDoug Thompson * Call CPU specific READ function to get the DRAM Base and 23140ec449eeSDoug Thompson * Limit values from the DCT. 23150ec449eeSDoug Thompson */ 23160ec449eeSDoug Thompson pvt->ops->read_dram_base_limit(pvt, dram); 23170ec449eeSDoug Thompson 23180ec449eeSDoug Thompson /* 23190ec449eeSDoug Thompson * Only print out debug info on rows with both R and W Enabled. 23200ec449eeSDoug Thompson * Normal processing, compiler should optimize this whole 'if' 23210ec449eeSDoug Thompson * debug output block away. 23220ec449eeSDoug Thompson */ 23230ec449eeSDoug Thompson if (pvt->dram_rw_en[dram] != 0) { 2324e97f8bb8SBorislav Petkov debugf1(" DRAM-BASE[%d]: 0x%016llx " 2325e97f8bb8SBorislav Petkov "DRAM-LIMIT: 0x%016llx\n", 23260ec449eeSDoug Thompson dram, 2327e97f8bb8SBorislav Petkov pvt->dram_base[dram], 2328e97f8bb8SBorislav Petkov pvt->dram_limit[dram]); 2329e97f8bb8SBorislav Petkov 23300ec449eeSDoug Thompson debugf1(" IntlvEn=%s %s %s " 23310ec449eeSDoug Thompson "IntlvSel=%d DstNode=%d\n", 23320ec449eeSDoug Thompson pvt->dram_IntlvEn[dram] ? 23330ec449eeSDoug Thompson "Enabled" : "Disabled", 23340ec449eeSDoug Thompson (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", 23350ec449eeSDoug Thompson (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", 23360ec449eeSDoug Thompson pvt->dram_IntlvSel[dram], 23370ec449eeSDoug Thompson pvt->dram_DstNode[dram]); 23380ec449eeSDoug Thompson } 23390ec449eeSDoug Thompson } 23400ec449eeSDoug Thompson 23410ec449eeSDoug Thompson amd64_read_dct_base_mask(pvt); 23420ec449eeSDoug Thompson 23436ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); 23440ec449eeSDoug Thompson amd64_read_dbam_reg(pvt); 23450ec449eeSDoug Thompson 23466ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, 23470ec449eeSDoug Thompson F10_ONLINE_SPARE, &pvt->online_spare); 23480ec449eeSDoug Thompson 23496ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 23506ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); 23510ec449eeSDoug Thompson 235292389102SBorislav Petkov if (!dct_ganging_enabled(pvt) && boot_cpu_data.x86 >= 0x10) { 23536ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); 23546ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); 23550ec449eeSDoug Thompson } 23560ec449eeSDoug Thompson amd64_dump_misc_regs(pvt); 23570ec449eeSDoug Thompson } 23580ec449eeSDoug Thompson 23590ec449eeSDoug Thompson /* 23600ec449eeSDoug Thompson * NOTE: CPU Revision Dependent code 23610ec449eeSDoug Thompson * 23620ec449eeSDoug Thompson * Input: 23639d858bb1SBorislav Petkov * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) 23640ec449eeSDoug Thompson * k8 private pointer to --> 23650ec449eeSDoug Thompson * DRAM Bank Address mapping register 23660ec449eeSDoug Thompson * node_id 23670ec449eeSDoug Thompson * DCL register where dual_channel_active is 23680ec449eeSDoug Thompson * 23690ec449eeSDoug Thompson * The DBAM register consists of 4 sets of 4 bits each definitions: 23700ec449eeSDoug Thompson * 23710ec449eeSDoug Thompson * Bits: CSROWs 23720ec449eeSDoug Thompson * 0-3 CSROWs 0 and 1 23730ec449eeSDoug Thompson * 4-7 CSROWs 2 and 3 23740ec449eeSDoug Thompson * 8-11 CSROWs 4 and 5 23750ec449eeSDoug Thompson * 12-15 CSROWs 6 and 7 23760ec449eeSDoug Thompson * 23770ec449eeSDoug Thompson * Values range from: 0 to 15 23780ec449eeSDoug Thompson * The meaning of the values depends on CPU revision and dual-channel state, 23790ec449eeSDoug Thompson * see relevant BKDG more info. 23800ec449eeSDoug Thompson * 23810ec449eeSDoug Thompson * The memory controller provides for total of only 8 CSROWs in its current 23820ec449eeSDoug Thompson * architecture. Each "pair" of CSROWs normally represents just one DIMM in 23830ec449eeSDoug Thompson * single channel or two (2) DIMMs in dual channel mode. 23840ec449eeSDoug Thompson * 23850ec449eeSDoug Thompson * The following code logic collapses the various tables for CSROW based on CPU 23860ec449eeSDoug Thompson * revision. 23870ec449eeSDoug Thompson * 23880ec449eeSDoug Thompson * Returns: 23890ec449eeSDoug Thompson * The number of PAGE_SIZE pages on the specified CSROW number it 23900ec449eeSDoug Thompson * encompasses 23910ec449eeSDoug Thompson * 23920ec449eeSDoug Thompson */ 23930ec449eeSDoug Thompson static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) 23940ec449eeSDoug Thompson { 23951433eb99SBorislav Petkov u32 cs_mode, nr_pages; 23960ec449eeSDoug Thompson 23970ec449eeSDoug Thompson /* 23980ec449eeSDoug Thompson * The math on this doesn't look right on the surface because x/2*4 can 23990ec449eeSDoug Thompson * be simplified to x*2 but this expression makes use of the fact that 24000ec449eeSDoug Thompson * it is integral math where 1/2=0. This intermediate value becomes the 24010ec449eeSDoug Thompson * number of bits to shift the DBAM register to extract the proper CSROW 24020ec449eeSDoug Thompson * field. 24030ec449eeSDoug Thompson */ 24041433eb99SBorislav Petkov cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; 24050ec449eeSDoug Thompson 24061433eb99SBorislav Petkov nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); 24070ec449eeSDoug Thompson 24080ec449eeSDoug Thompson /* 24090ec449eeSDoug Thompson * If dual channel then double the memory size of single channel. 24100ec449eeSDoug Thompson * Channel count is 1 or 2 24110ec449eeSDoug Thompson */ 24120ec449eeSDoug Thompson nr_pages <<= (pvt->channel_count - 1); 24130ec449eeSDoug Thompson 24141433eb99SBorislav Petkov debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); 24150ec449eeSDoug Thompson debugf0(" nr_pages= %u channel-count = %d\n", 24160ec449eeSDoug Thompson nr_pages, pvt->channel_count); 24170ec449eeSDoug Thompson 24180ec449eeSDoug Thompson return nr_pages; 24190ec449eeSDoug Thompson } 24200ec449eeSDoug Thompson 24210ec449eeSDoug Thompson /* 24220ec449eeSDoug Thompson * Initialize the array of csrow attribute instances, based on the values 24230ec449eeSDoug Thompson * from pci config hardware registers. 24240ec449eeSDoug Thompson */ 24250ec449eeSDoug Thompson static int amd64_init_csrows(struct mem_ctl_info *mci) 24260ec449eeSDoug Thompson { 24270ec449eeSDoug Thompson struct csrow_info *csrow; 24280ec449eeSDoug Thompson struct amd64_pvt *pvt; 24290ec449eeSDoug Thompson u64 input_addr_min, input_addr_max, sys_addr; 24306ba5dcdcSBorislav Petkov int i, empty = 1; 24310ec449eeSDoug Thompson 24320ec449eeSDoug Thompson pvt = mci->pvt_info; 24330ec449eeSDoug Thompson 24346ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); 24350ec449eeSDoug Thompson 24360ec449eeSDoug Thompson debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, 24370ec449eeSDoug Thompson (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 24380ec449eeSDoug Thompson (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" 24390ec449eeSDoug Thompson ); 24400ec449eeSDoug Thompson 24419d858bb1SBorislav Petkov for (i = 0; i < pvt->cs_count; i++) { 24420ec449eeSDoug Thompson csrow = &mci->csrows[i]; 24430ec449eeSDoug Thompson 24440ec449eeSDoug Thompson if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 24450ec449eeSDoug Thompson debugf1("----CSROW %d EMPTY for node %d\n", i, 24460ec449eeSDoug Thompson pvt->mc_node_id); 24470ec449eeSDoug Thompson continue; 24480ec449eeSDoug Thompson } 24490ec449eeSDoug Thompson 24500ec449eeSDoug Thompson debugf1("----CSROW %d VALID for MC node %d\n", 24510ec449eeSDoug Thompson i, pvt->mc_node_id); 24520ec449eeSDoug Thompson 24530ec449eeSDoug Thompson empty = 0; 24540ec449eeSDoug Thompson csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); 24550ec449eeSDoug Thompson find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); 24560ec449eeSDoug Thompson sys_addr = input_addr_to_sys_addr(mci, input_addr_min); 24570ec449eeSDoug Thompson csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); 24580ec449eeSDoug Thompson sys_addr = input_addr_to_sys_addr(mci, input_addr_max); 24590ec449eeSDoug Thompson csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); 24600ec449eeSDoug Thompson csrow->page_mask = ~mask_from_dct_mask(pvt, i); 24610ec449eeSDoug Thompson /* 8 bytes of resolution */ 24620ec449eeSDoug Thompson 24630ec449eeSDoug Thompson csrow->mtype = amd64_determine_memory_type(pvt); 24640ec449eeSDoug Thompson 24650ec449eeSDoug Thompson debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); 24660ec449eeSDoug Thompson debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", 24670ec449eeSDoug Thompson (unsigned long)input_addr_min, 24680ec449eeSDoug Thompson (unsigned long)input_addr_max); 24690ec449eeSDoug Thompson debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n", 24700ec449eeSDoug Thompson (unsigned long)sys_addr, csrow->page_mask); 24710ec449eeSDoug Thompson debugf1(" nr_pages: %u first_page: 0x%lx " 24720ec449eeSDoug Thompson "last_page: 0x%lx\n", 24730ec449eeSDoug Thompson (unsigned)csrow->nr_pages, 24740ec449eeSDoug Thompson csrow->first_page, csrow->last_page); 24750ec449eeSDoug Thompson 24760ec449eeSDoug Thompson /* 24770ec449eeSDoug Thompson * determine whether CHIPKILL or JUST ECC or NO ECC is operating 24780ec449eeSDoug Thompson */ 24790ec449eeSDoug Thompson if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) 24800ec449eeSDoug Thompson csrow->edac_mode = 24810ec449eeSDoug Thompson (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? 24820ec449eeSDoug Thompson EDAC_S4ECD4ED : EDAC_SECDED; 24830ec449eeSDoug Thompson else 24840ec449eeSDoug Thompson csrow->edac_mode = EDAC_NONE; 24850ec449eeSDoug Thompson } 24860ec449eeSDoug Thompson 24870ec449eeSDoug Thompson return empty; 24880ec449eeSDoug Thompson } 2489d27bf6faSDoug Thompson 249006724535SBorislav Petkov /* get all cores on this DCT */ 2491ba578cb3SRusty Russell static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) 2492f9431992SDoug Thompson { 249306724535SBorislav Petkov int cpu; 2494f9431992SDoug Thompson 249506724535SBorislav Petkov for_each_online_cpu(cpu) 249606724535SBorislav Petkov if (amd_get_nb_id(cpu) == nid) 249706724535SBorislav Petkov cpumask_set_cpu(cpu, mask); 2498f9431992SDoug Thompson } 2499f9431992SDoug Thompson 2500f9431992SDoug Thompson /* check MCG_CTL on all the cpus on this node */ 250106724535SBorislav Petkov static bool amd64_nb_mce_bank_enabled_on_node(int nid) 2502f9431992SDoug Thompson { 2503ba578cb3SRusty Russell cpumask_var_t mask; 250450542251SBorislav Petkov int cpu, nbe; 250506724535SBorislav Petkov bool ret = false; 2506f9431992SDoug Thompson 2507ba578cb3SRusty Russell if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2508ba578cb3SRusty Russell amd64_printk(KERN_WARNING, "%s: error allocating mask\n", 250906724535SBorislav Petkov __func__); 251006724535SBorislav Petkov return false; 251106724535SBorislav Petkov } 251206724535SBorislav Petkov 2513ba578cb3SRusty Russell get_cpus_on_this_dct_cpumask(mask, nid); 251406724535SBorislav Petkov 2515ba578cb3SRusty Russell rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); 2516ba578cb3SRusty Russell 2517ba578cb3SRusty Russell for_each_cpu(cpu, mask) { 251850542251SBorislav Petkov struct msr *reg = per_cpu_ptr(msrs, cpu); 251950542251SBorislav Petkov nbe = reg->l & K8_MSR_MCGCTL_NBE; 252006724535SBorislav Petkov 252106724535SBorislav Petkov debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", 252250542251SBorislav Petkov cpu, reg->q, 252306724535SBorislav Petkov (nbe ? "enabled" : "disabled")); 252406724535SBorislav Petkov 252506724535SBorislav Petkov if (!nbe) 252606724535SBorislav Petkov goto out; 252706724535SBorislav Petkov } 252806724535SBorislav Petkov ret = true; 252906724535SBorislav Petkov 253006724535SBorislav Petkov out: 2531ba578cb3SRusty Russell free_cpumask_var(mask); 2532f9431992SDoug Thompson return ret; 2533f9431992SDoug Thompson } 2534f9431992SDoug Thompson 2535f6d6ae96SBorislav Petkov static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) 2536f6d6ae96SBorislav Petkov { 2537f6d6ae96SBorislav Petkov cpumask_var_t cmask; 253850542251SBorislav Petkov int cpu; 2539f6d6ae96SBorislav Petkov 2540f6d6ae96SBorislav Petkov if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { 2541f6d6ae96SBorislav Petkov amd64_printk(KERN_WARNING, "%s: error allocating mask\n", 2542f6d6ae96SBorislav Petkov __func__); 2543f6d6ae96SBorislav Petkov return false; 2544f6d6ae96SBorislav Petkov } 2545f6d6ae96SBorislav Petkov 2546f6d6ae96SBorislav Petkov get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); 2547f6d6ae96SBorislav Petkov 2548f6d6ae96SBorislav Petkov rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2549f6d6ae96SBorislav Petkov 2550f6d6ae96SBorislav Petkov for_each_cpu(cpu, cmask) { 2551f6d6ae96SBorislav Petkov 255250542251SBorislav Petkov struct msr *reg = per_cpu_ptr(msrs, cpu); 255350542251SBorislav Petkov 2554f6d6ae96SBorislav Petkov if (on) { 255550542251SBorislav Petkov if (reg->l & K8_MSR_MCGCTL_NBE) 2556f6d6ae96SBorislav Petkov pvt->flags.ecc_report = 1; 2557f6d6ae96SBorislav Petkov 255850542251SBorislav Petkov reg->l |= K8_MSR_MCGCTL_NBE; 2559f6d6ae96SBorislav Petkov } else { 2560f6d6ae96SBorislav Petkov /* 2561f6d6ae96SBorislav Petkov * Turn off ECC reporting only when it was off before 2562f6d6ae96SBorislav Petkov */ 2563f6d6ae96SBorislav Petkov if (!pvt->flags.ecc_report) 256450542251SBorislav Petkov reg->l &= ~K8_MSR_MCGCTL_NBE; 2565f6d6ae96SBorislav Petkov } 2566f6d6ae96SBorislav Petkov } 2567f6d6ae96SBorislav Petkov wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); 2568f6d6ae96SBorislav Petkov 2569f6d6ae96SBorislav Petkov free_cpumask_var(cmask); 2570f6d6ae96SBorislav Petkov 2571f6d6ae96SBorislav Petkov return 0; 2572f6d6ae96SBorislav Petkov } 2573f6d6ae96SBorislav Petkov 2574f6d6ae96SBorislav Petkov /* 2575f6d6ae96SBorislav Petkov * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" 2576f6d6ae96SBorislav Petkov * enable it. 2577f6d6ae96SBorislav Petkov */ 2578f6d6ae96SBorislav Petkov static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) 2579f6d6ae96SBorislav Petkov { 2580f6d6ae96SBorislav Petkov struct amd64_pvt *pvt = mci->pvt_info; 2581f6d6ae96SBorislav Petkov u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2582f6d6ae96SBorislav Petkov 2583f6d6ae96SBorislav Petkov if (!ecc_enable_override) 2584f6d6ae96SBorislav Petkov return; 2585f6d6ae96SBorislav Petkov 2586f6d6ae96SBorislav Petkov amd64_printk(KERN_WARNING, 2587f6d6ae96SBorislav Petkov "'ecc_enable_override' parameter is active, " 2588f6d6ae96SBorislav Petkov "Enabling AMD ECC hardware now: CAUTION\n"); 2589f6d6ae96SBorislav Petkov 25906ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); 2591f6d6ae96SBorislav Petkov 2592f6d6ae96SBorislav Petkov /* turn on UECCn and CECCEn bits */ 2593f6d6ae96SBorislav Petkov pvt->old_nbctl = value & mask; 2594f6d6ae96SBorislav Petkov pvt->nbctl_mcgctl_saved = 1; 2595f6d6ae96SBorislav Petkov 2596f6d6ae96SBorislav Petkov value |= mask; 2597f6d6ae96SBorislav Petkov pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2598f6d6ae96SBorislav Petkov 2599f6d6ae96SBorislav Petkov if (amd64_toggle_ecc_err_reporting(pvt, ON)) 2600f6d6ae96SBorislav Petkov amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " 2601f6d6ae96SBorislav Petkov "MCGCTL!\n"); 2602f6d6ae96SBorislav Petkov 26036ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); 2604f6d6ae96SBorislav Petkov 2605f6d6ae96SBorislav Petkov debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, 2606f6d6ae96SBorislav Petkov (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2607f6d6ae96SBorislav Petkov (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); 2608f6d6ae96SBorislav Petkov 2609f6d6ae96SBorislav Petkov if (!(value & K8_NBCFG_ECC_ENABLE)) { 2610f6d6ae96SBorislav Petkov amd64_printk(KERN_WARNING, 2611f6d6ae96SBorislav Petkov "This node reports that DRAM ECC is " 2612f6d6ae96SBorislav Petkov "currently Disabled; ENABLING now\n"); 2613f6d6ae96SBorislav Petkov 2614f6d6ae96SBorislav Petkov /* Attempt to turn on DRAM ECC Enable */ 2615f6d6ae96SBorislav Petkov value |= K8_NBCFG_ECC_ENABLE; 2616f6d6ae96SBorislav Petkov pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); 2617f6d6ae96SBorislav Petkov 26186ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); 2619f6d6ae96SBorislav Petkov 2620f6d6ae96SBorislav Petkov if (!(value & K8_NBCFG_ECC_ENABLE)) { 2621f6d6ae96SBorislav Petkov amd64_printk(KERN_WARNING, 2622f6d6ae96SBorislav Petkov "Hardware rejects Enabling DRAM ECC checking\n" 2623f6d6ae96SBorislav Petkov "Check memory DIMM configuration\n"); 2624f6d6ae96SBorislav Petkov } else { 2625f6d6ae96SBorislav Petkov amd64_printk(KERN_DEBUG, 2626f6d6ae96SBorislav Petkov "Hardware accepted DRAM ECC Enable\n"); 2627f6d6ae96SBorislav Petkov } 2628f6d6ae96SBorislav Petkov } 2629f6d6ae96SBorislav Petkov debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, 2630f6d6ae96SBorislav Petkov (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2631f6d6ae96SBorislav Petkov (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); 2632f6d6ae96SBorislav Petkov 2633f6d6ae96SBorislav Petkov pvt->ctl_error_info.nbcfg = value; 2634f6d6ae96SBorislav Petkov } 2635f6d6ae96SBorislav Petkov 2636f6d6ae96SBorislav Petkov static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) 2637f6d6ae96SBorislav Petkov { 2638f6d6ae96SBorislav Petkov u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2639f6d6ae96SBorislav Petkov 2640f6d6ae96SBorislav Petkov if (!pvt->nbctl_mcgctl_saved) 2641f6d6ae96SBorislav Petkov return; 2642f6d6ae96SBorislav Petkov 26436ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); 2644f6d6ae96SBorislav Petkov value &= ~mask; 2645f6d6ae96SBorislav Petkov value |= pvt->old_nbctl; 2646f6d6ae96SBorislav Petkov 2647f6d6ae96SBorislav Petkov /* restore the NB Enable MCGCTL bit */ 2648f6d6ae96SBorislav Petkov pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2649f6d6ae96SBorislav Petkov 2650f6d6ae96SBorislav Petkov if (amd64_toggle_ecc_err_reporting(pvt, OFF)) 2651f6d6ae96SBorislav Petkov amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " 2652f6d6ae96SBorislav Petkov "MCGCTL!\n"); 2653f6d6ae96SBorislav Petkov } 2654f6d6ae96SBorislav Petkov 2655f9431992SDoug Thompson /* 2656f9431992SDoug Thompson * EDAC requires that the BIOS have ECC enabled before taking over the 2657f9431992SDoug Thompson * processing of ECC errors. This is because the BIOS can properly initialize 2658f9431992SDoug Thompson * the memory system completely. A command line option allows to force-enable 2659f9431992SDoug Thompson * hardware ECC later in amd64_enable_ecc_error_reporting(). 2660f9431992SDoug Thompson */ 2661cab4d277SBorislav Petkov static const char *ecc_msg = 2662cab4d277SBorislav Petkov "ECC disabled in the BIOS or no ECC capability, module will not load.\n" 2663cab4d277SBorislav Petkov " Either enable ECC checking or force module loading by setting " 2664cab4d277SBorislav Petkov "'ecc_enable_override'.\n" 2665cab4d277SBorislav Petkov " (Note that use of the override may cause unknown side effects.)\n"; 2666be3468e8SBorislav Petkov 2667f9431992SDoug Thompson static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) 2668f9431992SDoug Thompson { 2669f9431992SDoug Thompson u32 value; 267006724535SBorislav Petkov u8 ecc_enabled = 0; 267106724535SBorislav Petkov bool nb_mce_en = false; 2672f9431992SDoug Thompson 26736ba5dcdcSBorislav Petkov amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); 2674f9431992SDoug Thompson 2675f9431992SDoug Thompson ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2676be3468e8SBorislav Petkov if (!ecc_enabled) 2677cab4d277SBorislav Petkov amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " 2678be3468e8SBorislav Petkov "is currently disabled, set F3x%x[22] (%s).\n", 2679f9431992SDoug Thompson K8_NBCFG, pci_name(pvt->misc_f3_ctl)); 2680be3468e8SBorislav Petkov else 2681be3468e8SBorislav Petkov amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n"); 2682be3468e8SBorislav Petkov 268306724535SBorislav Petkov nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); 268406724535SBorislav Petkov if (!nb_mce_en) 2685cab4d277SBorislav Petkov amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " 2686be3468e8SBorislav Petkov "0x%08x[4] on node %d to enable.\n", 2687be3468e8SBorislav Petkov MSR_IA32_MCG_CTL, pvt->mc_node_id); 2688be3468e8SBorislav Petkov 268906724535SBorislav Petkov if (!ecc_enabled || !nb_mce_en) { 2690f9431992SDoug Thompson if (!ecc_enable_override) { 2691cab4d277SBorislav Petkov amd64_printk(KERN_NOTICE, "%s", ecc_msg); 2692be3468e8SBorislav Petkov return -ENODEV; 2693be3468e8SBorislav Petkov } 2694f9431992SDoug Thompson ecc_enable_override = 0; 269543f5e687SBorislav Petkov } 2696f9431992SDoug Thompson 2697be3468e8SBorislav Petkov return 0; 2698f9431992SDoug Thompson } 2699f9431992SDoug Thompson 27007d6034d3SDoug Thompson struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + 27017d6034d3SDoug Thompson ARRAY_SIZE(amd64_inj_attrs) + 27027d6034d3SDoug Thompson 1]; 27037d6034d3SDoug Thompson 27047d6034d3SDoug Thompson struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; 27057d6034d3SDoug Thompson 27067d6034d3SDoug Thompson static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) 27077d6034d3SDoug Thompson { 27087d6034d3SDoug Thompson unsigned int i = 0, j = 0; 27097d6034d3SDoug Thompson 27107d6034d3SDoug Thompson for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) 27117d6034d3SDoug Thompson sysfs_attrs[i] = amd64_dbg_attrs[i]; 27127d6034d3SDoug Thompson 27137d6034d3SDoug Thompson for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) 27147d6034d3SDoug Thompson sysfs_attrs[i] = amd64_inj_attrs[j]; 27157d6034d3SDoug Thompson 27167d6034d3SDoug Thompson sysfs_attrs[i] = terminator; 27177d6034d3SDoug Thompson 27187d6034d3SDoug Thompson mci->mc_driver_sysfs_attributes = sysfs_attrs; 27197d6034d3SDoug Thompson } 27207d6034d3SDoug Thompson 27217d6034d3SDoug Thompson static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) 27227d6034d3SDoug Thompson { 27237d6034d3SDoug Thompson struct amd64_pvt *pvt = mci->pvt_info; 27247d6034d3SDoug Thompson 27257d6034d3SDoug Thompson mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; 27267d6034d3SDoug Thompson mci->edac_ctl_cap = EDAC_FLAG_NONE; 27277d6034d3SDoug Thompson 27287d6034d3SDoug Thompson if (pvt->nbcap & K8_NBCAP_SECDED) 27297d6034d3SDoug Thompson mci->edac_ctl_cap |= EDAC_FLAG_SECDED; 27307d6034d3SDoug Thompson 27317d6034d3SDoug Thompson if (pvt->nbcap & K8_NBCAP_CHIPKILL) 27327d6034d3SDoug Thompson mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 27337d6034d3SDoug Thompson 27347d6034d3SDoug Thompson mci->edac_cap = amd64_determine_edac_cap(pvt); 27357d6034d3SDoug Thompson mci->mod_name = EDAC_MOD_STR; 27367d6034d3SDoug Thompson mci->mod_ver = EDAC_AMD64_VERSION; 27377d6034d3SDoug Thompson mci->ctl_name = get_amd_family_name(pvt->mc_type_index); 27387d6034d3SDoug Thompson mci->dev_name = pci_name(pvt->dram_f2_ctl); 27397d6034d3SDoug Thompson mci->ctl_page_to_phys = NULL; 27407d6034d3SDoug Thompson 27417d6034d3SDoug Thompson /* IMPORTANT: Set the polling 'check' function in this module */ 27427d6034d3SDoug Thompson mci->edac_check = amd64_check; 27437d6034d3SDoug Thompson 27447d6034d3SDoug Thompson /* memory scrubber interface */ 27457d6034d3SDoug Thompson mci->set_sdram_scrub_rate = amd64_set_scrub_rate; 27467d6034d3SDoug Thompson mci->get_sdram_scrub_rate = amd64_get_scrub_rate; 27477d6034d3SDoug Thompson } 27487d6034d3SDoug Thompson 27497d6034d3SDoug Thompson /* 27507d6034d3SDoug Thompson * Init stuff for this DRAM Controller device. 27517d6034d3SDoug Thompson * 27527d6034d3SDoug Thompson * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration 27537d6034d3SDoug Thompson * Space feature MUST be enabled on ALL Processors prior to actually reading 27547d6034d3SDoug Thompson * from the ECS registers. Since the loading of the module can occur on any 27557d6034d3SDoug Thompson * 'core', and cores don't 'see' all the other processors ECS data when the 27567d6034d3SDoug Thompson * others are NOT enabled. Our solution is to first enable ECS access in this 27577d6034d3SDoug Thompson * routine on all processors, gather some data in a amd64_pvt structure and 27587d6034d3SDoug Thompson * later come back in a finish-setup function to perform that final 27597d6034d3SDoug Thompson * initialization. See also amd64_init_2nd_stage() for that. 27607d6034d3SDoug Thompson */ 27617d6034d3SDoug Thompson static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, 27627d6034d3SDoug Thompson int mc_type_index) 27637d6034d3SDoug Thompson { 27647d6034d3SDoug Thompson struct amd64_pvt *pvt = NULL; 27657d6034d3SDoug Thompson int err = 0, ret; 27667d6034d3SDoug Thompson 27677d6034d3SDoug Thompson ret = -ENOMEM; 27687d6034d3SDoug Thompson pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); 27697d6034d3SDoug Thompson if (!pvt) 27707d6034d3SDoug Thompson goto err_exit; 27717d6034d3SDoug Thompson 277237da0450SBorislav Petkov pvt->mc_node_id = get_node_id(dram_f2_ctl); 27737d6034d3SDoug Thompson 27747d6034d3SDoug Thompson pvt->dram_f2_ctl = dram_f2_ctl; 27757d6034d3SDoug Thompson pvt->ext_model = boot_cpu_data.x86_model >> 4; 27767d6034d3SDoug Thompson pvt->mc_type_index = mc_type_index; 27777d6034d3SDoug Thompson pvt->ops = family_ops(mc_type_index); 27787d6034d3SDoug Thompson 27797d6034d3SDoug Thompson /* 27807d6034d3SDoug Thompson * We have the dram_f2_ctl device as an argument, now go reserve its 27817d6034d3SDoug Thompson * sibling devices from the PCI system. 27827d6034d3SDoug Thompson */ 27837d6034d3SDoug Thompson ret = -ENODEV; 27847d6034d3SDoug Thompson err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index); 27857d6034d3SDoug Thompson if (err) 27867d6034d3SDoug Thompson goto err_free; 27877d6034d3SDoug Thompson 27887d6034d3SDoug Thompson ret = -EINVAL; 27897d6034d3SDoug Thompson err = amd64_check_ecc_enabled(pvt); 27907d6034d3SDoug Thompson if (err) 27917d6034d3SDoug Thompson goto err_put; 27927d6034d3SDoug Thompson 27937d6034d3SDoug Thompson /* 27947d6034d3SDoug Thompson * Key operation here: setup of HW prior to performing ops on it. Some 27957d6034d3SDoug Thompson * setup is required to access ECS data. After this is performed, the 27967d6034d3SDoug Thompson * 'teardown' function must be called upon error and normal exit paths. 27977d6034d3SDoug Thompson */ 27987d6034d3SDoug Thompson if (boot_cpu_data.x86 >= 0x10) 27997d6034d3SDoug Thompson amd64_setup(pvt); 28007d6034d3SDoug Thompson 28017d6034d3SDoug Thompson /* 28027d6034d3SDoug Thompson * Save the pointer to the private data for use in 2nd initialization 28037d6034d3SDoug Thompson * stage 28047d6034d3SDoug Thompson */ 28057d6034d3SDoug Thompson pvt_lookup[pvt->mc_node_id] = pvt; 28067d6034d3SDoug Thompson 28077d6034d3SDoug Thompson return 0; 28087d6034d3SDoug Thompson 28097d6034d3SDoug Thompson err_put: 28107d6034d3SDoug Thompson amd64_free_mc_sibling_devices(pvt); 28117d6034d3SDoug Thompson 28127d6034d3SDoug Thompson err_free: 28137d6034d3SDoug Thompson kfree(pvt); 28147d6034d3SDoug Thompson 28157d6034d3SDoug Thompson err_exit: 28167d6034d3SDoug Thompson return ret; 28177d6034d3SDoug Thompson } 28187d6034d3SDoug Thompson 28197d6034d3SDoug Thompson /* 28207d6034d3SDoug Thompson * This is the finishing stage of the init code. Needs to be performed after all 28217d6034d3SDoug Thompson * MCs' hardware have been prepped for accessing extended config space. 28227d6034d3SDoug Thompson */ 28237d6034d3SDoug Thompson static int amd64_init_2nd_stage(struct amd64_pvt *pvt) 28247d6034d3SDoug Thompson { 28257d6034d3SDoug Thompson int node_id = pvt->mc_node_id; 28267d6034d3SDoug Thompson struct mem_ctl_info *mci; 282718ba54acSAndrew Morton int ret = -ENODEV; 28287d6034d3SDoug Thompson 28297d6034d3SDoug Thompson amd64_read_mc_registers(pvt); 28307d6034d3SDoug Thompson 28317d6034d3SDoug Thompson /* 28327d6034d3SDoug Thompson * We need to determine how many memory channels there are. Then use 28337d6034d3SDoug Thompson * that information for calculating the size of the dynamic instance 28347d6034d3SDoug Thompson * tables in the 'mci' structure 28357d6034d3SDoug Thompson */ 28367d6034d3SDoug Thompson pvt->channel_count = pvt->ops->early_channel_count(pvt); 28377d6034d3SDoug Thompson if (pvt->channel_count < 0) 28387d6034d3SDoug Thompson goto err_exit; 28397d6034d3SDoug Thompson 28407d6034d3SDoug Thompson ret = -ENOMEM; 28419d858bb1SBorislav Petkov mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); 28427d6034d3SDoug Thompson if (!mci) 28437d6034d3SDoug Thompson goto err_exit; 28447d6034d3SDoug Thompson 28457d6034d3SDoug Thompson mci->pvt_info = pvt; 28467d6034d3SDoug Thompson 28477d6034d3SDoug Thompson mci->dev = &pvt->dram_f2_ctl->dev; 28487d6034d3SDoug Thompson amd64_setup_mci_misc_attributes(mci); 28497d6034d3SDoug Thompson 28507d6034d3SDoug Thompson if (amd64_init_csrows(mci)) 28517d6034d3SDoug Thompson mci->edac_cap = EDAC_FLAG_NONE; 28527d6034d3SDoug Thompson 28537d6034d3SDoug Thompson amd64_enable_ecc_error_reporting(mci); 28547d6034d3SDoug Thompson amd64_set_mc_sysfs_attributes(mci); 28557d6034d3SDoug Thompson 28567d6034d3SDoug Thompson ret = -ENODEV; 28577d6034d3SDoug Thompson if (edac_mc_add_mc(mci)) { 28587d6034d3SDoug Thompson debugf1("failed edac_mc_add_mc()\n"); 28597d6034d3SDoug Thompson goto err_add_mc; 28607d6034d3SDoug Thompson } 28617d6034d3SDoug Thompson 28627d6034d3SDoug Thompson mci_lookup[node_id] = mci; 28637d6034d3SDoug Thompson pvt_lookup[node_id] = NULL; 2864549d042dSBorislav Petkov 2865549d042dSBorislav Petkov /* register stuff with EDAC MCE */ 2866549d042dSBorislav Petkov if (report_gart_errors) 2867549d042dSBorislav Petkov amd_report_gart_errors(true); 2868549d042dSBorislav Petkov 2869549d042dSBorislav Petkov amd_register_ecc_decoder(amd64_decode_bus_error); 2870549d042dSBorislav Petkov 28717d6034d3SDoug Thompson return 0; 28727d6034d3SDoug Thompson 28737d6034d3SDoug Thompson err_add_mc: 28747d6034d3SDoug Thompson edac_mc_free(mci); 28757d6034d3SDoug Thompson 28767d6034d3SDoug Thompson err_exit: 28777d6034d3SDoug Thompson debugf0("failure to init 2nd stage: ret=%d\n", ret); 28787d6034d3SDoug Thompson 28797d6034d3SDoug Thompson amd64_restore_ecc_error_reporting(pvt); 28807d6034d3SDoug Thompson 28817d6034d3SDoug Thompson if (boot_cpu_data.x86 > 0xf) 28827d6034d3SDoug Thompson amd64_teardown(pvt); 28837d6034d3SDoug Thompson 28847d6034d3SDoug Thompson amd64_free_mc_sibling_devices(pvt); 28857d6034d3SDoug Thompson 28867d6034d3SDoug Thompson kfree(pvt_lookup[pvt->mc_node_id]); 28877d6034d3SDoug Thompson pvt_lookup[node_id] = NULL; 28887d6034d3SDoug Thompson 28897d6034d3SDoug Thompson return ret; 28907d6034d3SDoug Thompson } 28917d6034d3SDoug Thompson 28927d6034d3SDoug Thompson 28937d6034d3SDoug Thompson static int __devinit amd64_init_one_instance(struct pci_dev *pdev, 28947d6034d3SDoug Thompson const struct pci_device_id *mc_type) 28957d6034d3SDoug Thompson { 28967d6034d3SDoug Thompson int ret = 0; 28977d6034d3SDoug Thompson 289837da0450SBorislav Petkov debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev), 28997d6034d3SDoug Thompson get_amd_family_name(mc_type->driver_data)); 29007d6034d3SDoug Thompson 29017d6034d3SDoug Thompson ret = pci_enable_device(pdev); 29027d6034d3SDoug Thompson if (ret < 0) 29037d6034d3SDoug Thompson ret = -EIO; 29047d6034d3SDoug Thompson else 29057d6034d3SDoug Thompson ret = amd64_probe_one_instance(pdev, mc_type->driver_data); 29067d6034d3SDoug Thompson 29077d6034d3SDoug Thompson if (ret < 0) 29087d6034d3SDoug Thompson debugf0("ret=%d\n", ret); 29097d6034d3SDoug Thompson 29107d6034d3SDoug Thompson return ret; 29117d6034d3SDoug Thompson } 29127d6034d3SDoug Thompson 29137d6034d3SDoug Thompson static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) 29147d6034d3SDoug Thompson { 29157d6034d3SDoug Thompson struct mem_ctl_info *mci; 29167d6034d3SDoug Thompson struct amd64_pvt *pvt; 29177d6034d3SDoug Thompson 29187d6034d3SDoug Thompson /* Remove from EDAC CORE tracking list */ 29197d6034d3SDoug Thompson mci = edac_mc_del_mc(&pdev->dev); 29207d6034d3SDoug Thompson if (!mci) 29217d6034d3SDoug Thompson return; 29227d6034d3SDoug Thompson 29237d6034d3SDoug Thompson pvt = mci->pvt_info; 29247d6034d3SDoug Thompson 29257d6034d3SDoug Thompson amd64_restore_ecc_error_reporting(pvt); 29267d6034d3SDoug Thompson 29277d6034d3SDoug Thompson if (boot_cpu_data.x86 > 0xf) 29287d6034d3SDoug Thompson amd64_teardown(pvt); 29297d6034d3SDoug Thompson 29307d6034d3SDoug Thompson amd64_free_mc_sibling_devices(pvt); 29317d6034d3SDoug Thompson 2932549d042dSBorislav Petkov /* unregister from EDAC MCE */ 2933549d042dSBorislav Petkov amd_report_gart_errors(false); 2934549d042dSBorislav Petkov amd_unregister_ecc_decoder(amd64_decode_bus_error); 2935549d042dSBorislav Petkov 29367d6034d3SDoug Thompson /* Free the EDAC CORE resources */ 29378f68ed97SBorislav Petkov mci->pvt_info = NULL; 29388f68ed97SBorislav Petkov mci_lookup[pvt->mc_node_id] = NULL; 29398f68ed97SBorislav Petkov 29408f68ed97SBorislav Petkov kfree(pvt); 29417d6034d3SDoug Thompson edac_mc_free(mci); 29427d6034d3SDoug Thompson } 29437d6034d3SDoug Thompson 29447d6034d3SDoug Thompson /* 29457d6034d3SDoug Thompson * This table is part of the interface for loading drivers for PCI devices. The 29467d6034d3SDoug Thompson * PCI core identifies what devices are on a system during boot, and then 29477d6034d3SDoug Thompson * inquiry this table to see if this driver is for a given device found. 29487d6034d3SDoug Thompson */ 29497d6034d3SDoug Thompson static const struct pci_device_id amd64_pci_table[] __devinitdata = { 29507d6034d3SDoug Thompson { 29517d6034d3SDoug Thompson .vendor = PCI_VENDOR_ID_AMD, 29527d6034d3SDoug Thompson .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 29537d6034d3SDoug Thompson .subvendor = PCI_ANY_ID, 29547d6034d3SDoug Thompson .subdevice = PCI_ANY_ID, 29557d6034d3SDoug Thompson .class = 0, 29567d6034d3SDoug Thompson .class_mask = 0, 29577d6034d3SDoug Thompson .driver_data = K8_CPUS 29587d6034d3SDoug Thompson }, 29597d6034d3SDoug Thompson { 29607d6034d3SDoug Thompson .vendor = PCI_VENDOR_ID_AMD, 29617d6034d3SDoug Thompson .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM, 29627d6034d3SDoug Thompson .subvendor = PCI_ANY_ID, 29637d6034d3SDoug Thompson .subdevice = PCI_ANY_ID, 29647d6034d3SDoug Thompson .class = 0, 29657d6034d3SDoug Thompson .class_mask = 0, 29667d6034d3SDoug Thompson .driver_data = F10_CPUS 29677d6034d3SDoug Thompson }, 29687d6034d3SDoug Thompson { 29697d6034d3SDoug Thompson .vendor = PCI_VENDOR_ID_AMD, 29707d6034d3SDoug Thompson .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM, 29717d6034d3SDoug Thompson .subvendor = PCI_ANY_ID, 29727d6034d3SDoug Thompson .subdevice = PCI_ANY_ID, 29737d6034d3SDoug Thompson .class = 0, 29747d6034d3SDoug Thompson .class_mask = 0, 29757d6034d3SDoug Thompson .driver_data = F11_CPUS 29767d6034d3SDoug Thompson }, 29777d6034d3SDoug Thompson {0, } 29787d6034d3SDoug Thompson }; 29797d6034d3SDoug Thompson MODULE_DEVICE_TABLE(pci, amd64_pci_table); 29807d6034d3SDoug Thompson 29817d6034d3SDoug Thompson static struct pci_driver amd64_pci_driver = { 29827d6034d3SDoug Thompson .name = EDAC_MOD_STR, 29837d6034d3SDoug Thompson .probe = amd64_init_one_instance, 29847d6034d3SDoug Thompson .remove = __devexit_p(amd64_remove_one_instance), 29857d6034d3SDoug Thompson .id_table = amd64_pci_table, 29867d6034d3SDoug Thompson }; 29877d6034d3SDoug Thompson 29887d6034d3SDoug Thompson static void amd64_setup_pci_device(void) 29897d6034d3SDoug Thompson { 29907d6034d3SDoug Thompson struct mem_ctl_info *mci; 29917d6034d3SDoug Thompson struct amd64_pvt *pvt; 29927d6034d3SDoug Thompson 29937d6034d3SDoug Thompson if (amd64_ctl_pci) 29947d6034d3SDoug Thompson return; 29957d6034d3SDoug Thompson 29967d6034d3SDoug Thompson mci = mci_lookup[0]; 29977d6034d3SDoug Thompson if (mci) { 29987d6034d3SDoug Thompson 29997d6034d3SDoug Thompson pvt = mci->pvt_info; 30007d6034d3SDoug Thompson amd64_ctl_pci = 30017d6034d3SDoug Thompson edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev, 30027d6034d3SDoug Thompson EDAC_MOD_STR); 30037d6034d3SDoug Thompson 30047d6034d3SDoug Thompson if (!amd64_ctl_pci) { 30057d6034d3SDoug Thompson pr_warning("%s(): Unable to create PCI control\n", 30067d6034d3SDoug Thompson __func__); 30077d6034d3SDoug Thompson 30087d6034d3SDoug Thompson pr_warning("%s(): PCI error report via EDAC not set\n", 30097d6034d3SDoug Thompson __func__); 30107d6034d3SDoug Thompson } 30117d6034d3SDoug Thompson } 30127d6034d3SDoug Thompson } 30137d6034d3SDoug Thompson 30147d6034d3SDoug Thompson static int __init amd64_edac_init(void) 30157d6034d3SDoug Thompson { 30167d6034d3SDoug Thompson int nb, err = -ENODEV; 301756b34b91SBorislav Petkov bool load_ok = false; 30187d6034d3SDoug Thompson 30197d6034d3SDoug Thompson edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); 30207d6034d3SDoug Thompson 30217d6034d3SDoug Thompson opstate_init(); 30227d6034d3SDoug Thompson 30237d6034d3SDoug Thompson if (cache_k8_northbridges() < 0) 302456b34b91SBorislav Petkov goto err_ret; 30257d6034d3SDoug Thompson 302650542251SBorislav Petkov msrs = msrs_alloc(); 302756b34b91SBorislav Petkov if (!msrs) 302856b34b91SBorislav Petkov goto err_ret; 302950542251SBorislav Petkov 30307d6034d3SDoug Thompson err = pci_register_driver(&amd64_pci_driver); 30317d6034d3SDoug Thompson if (err) 303256b34b91SBorislav Petkov goto err_pci; 30337d6034d3SDoug Thompson 30347d6034d3SDoug Thompson /* 30357d6034d3SDoug Thompson * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd 30367d6034d3SDoug Thompson * amd64_pvt structs. These will be used in the 2nd stage init function 30377d6034d3SDoug Thompson * to finish initialization of the MC instances. 30387d6034d3SDoug Thompson */ 303956b34b91SBorislav Petkov err = -ENODEV; 30407d6034d3SDoug Thompson for (nb = 0; nb < num_k8_northbridges; nb++) { 30417d6034d3SDoug Thompson if (!pvt_lookup[nb]) 30427d6034d3SDoug Thompson continue; 30437d6034d3SDoug Thompson 30447d6034d3SDoug Thompson err = amd64_init_2nd_stage(pvt_lookup[nb]); 30457d6034d3SDoug Thompson if (err) 304637da0450SBorislav Petkov goto err_2nd_stage; 304756b34b91SBorislav Petkov 304856b34b91SBorislav Petkov load_ok = true; 30497d6034d3SDoug Thompson } 30507d6034d3SDoug Thompson 305156b34b91SBorislav Petkov if (load_ok) { 30527d6034d3SDoug Thompson amd64_setup_pci_device(); 30537d6034d3SDoug Thompson return 0; 305456b34b91SBorislav Petkov } 30557d6034d3SDoug Thompson 305637da0450SBorislav Petkov err_2nd_stage: 30577d6034d3SDoug Thompson pci_unregister_driver(&amd64_pci_driver); 305856b34b91SBorislav Petkov err_pci: 305956b34b91SBorislav Petkov msrs_free(msrs); 306056b34b91SBorislav Petkov msrs = NULL; 306156b34b91SBorislav Petkov err_ret: 30627d6034d3SDoug Thompson return err; 30637d6034d3SDoug Thompson } 30647d6034d3SDoug Thompson 30657d6034d3SDoug Thompson static void __exit amd64_edac_exit(void) 30667d6034d3SDoug Thompson { 30677d6034d3SDoug Thompson if (amd64_ctl_pci) 30687d6034d3SDoug Thompson edac_pci_release_generic_ctl(amd64_ctl_pci); 30697d6034d3SDoug Thompson 30707d6034d3SDoug Thompson pci_unregister_driver(&amd64_pci_driver); 307150542251SBorislav Petkov 307250542251SBorislav Petkov msrs_free(msrs); 307350542251SBorislav Petkov msrs = NULL; 30747d6034d3SDoug Thompson } 30757d6034d3SDoug Thompson 30767d6034d3SDoug Thompson module_init(amd64_edac_init); 30777d6034d3SDoug Thompson module_exit(amd64_edac_exit); 30787d6034d3SDoug Thompson 30797d6034d3SDoug Thompson MODULE_LICENSE("GPL"); 30807d6034d3SDoug Thompson MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " 30817d6034d3SDoug Thompson "Dave Peterson, Thayne Harbaugh"); 30827d6034d3SDoug Thompson MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " 30837d6034d3SDoug Thompson EDAC_AMD64_VERSION); 30847d6034d3SDoug Thompson 30857d6034d3SDoug Thompson module_param(edac_op_state, int, 0444); 30867d6034d3SDoug Thompson MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 3087