xref: /openbmc/linux/drivers/edac/amd64_edac.c (revision 8a10bc9d)
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3 
4 static struct edac_pci_ctl_info *pci_ctl;
5 
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8 
9 /*
10  * Set by command line parameter. If BIOS has enabled the ECC, this override is
11  * cleared to prevent re-enabling the hardware by this driver.
12  */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15 
16 static struct msr __percpu *msrs;
17 
18 /*
19  * count successfully initialized driver instances for setup_pci_device()
20  */
21 static atomic_t drv_instances = ATOMIC_INIT(0);
22 
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
26 
27 /*
28  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30  * or higher value'.
31  *
32  *FIXME: Produce a better mapping/linearisation.
33  */
34 static const struct scrubrate {
35        u32 scrubval;           /* bit pattern for scrub rate */
36        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
37 } scrubrates[] = {
38 	{ 0x01, 1600000000UL},
39 	{ 0x02, 800000000UL},
40 	{ 0x03, 400000000UL},
41 	{ 0x04, 200000000UL},
42 	{ 0x05, 100000000UL},
43 	{ 0x06, 50000000UL},
44 	{ 0x07, 25000000UL},
45 	{ 0x08, 12284069UL},
46 	{ 0x09, 6274509UL},
47 	{ 0x0A, 3121951UL},
48 	{ 0x0B, 1560975UL},
49 	{ 0x0C, 781440UL},
50 	{ 0x0D, 390720UL},
51 	{ 0x0E, 195300UL},
52 	{ 0x0F, 97650UL},
53 	{ 0x10, 48854UL},
54 	{ 0x11, 24427UL},
55 	{ 0x12, 12213UL},
56 	{ 0x13, 6101UL},
57 	{ 0x14, 3051UL},
58 	{ 0x15, 1523UL},
59 	{ 0x16, 761UL},
60 	{ 0x00, 0UL},        /* scrubbing off */
61 };
62 
63 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 			       u32 *val, const char *func)
65 {
66 	int err = 0;
67 
68 	err = pci_read_config_dword(pdev, offset, val);
69 	if (err)
70 		amd64_warn("%s: error reading F%dx%03x.\n",
71 			   func, PCI_FUNC(pdev->devfn), offset);
72 
73 	return err;
74 }
75 
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 				u32 val, const char *func)
78 {
79 	int err = 0;
80 
81 	err = pci_write_config_dword(pdev, offset, val);
82 	if (err)
83 		amd64_warn("%s: error writing to F%dx%03x.\n",
84 			   func, PCI_FUNC(pdev->devfn), offset);
85 
86 	return err;
87 }
88 
89 /*
90  *
91  * Depending on the family, F2 DCT reads need special handling:
92  *
93  * K8: has a single DCT only
94  *
95  * F10h: each DCT has its own set of regs
96  *	DCT0 -> F2x040..
97  *	DCT1 -> F2x140..
98  *
99  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100  *
101  * F16h: has only 1 DCT
102  */
103 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
104 			       const char *func)
105 {
106 	if (addr >= 0x100)
107 		return -EINVAL;
108 
109 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
110 }
111 
112 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
113 				 const char *func)
114 {
115 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
116 }
117 
118 /*
119  * Select DCT to which PCI cfg accesses are routed
120  */
121 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
122 {
123 	u32 reg = 0;
124 
125 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
126 	reg &= (pvt->model >= 0x30) ? ~3 : ~1;
127 	reg |= dct;
128 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
129 }
130 
131 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
132 				 const char *func)
133 {
134 	u8 dct  = 0;
135 
136 	/* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
137 	if (addr >= 0x140 && addr <= 0x1a0) {
138 		dct   = (pvt->model >= 0x30) ? 3 : 1;
139 		addr -= 0x100;
140 	}
141 
142 	f15h_select_dct(pvt, dct);
143 
144 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
145 }
146 
147 /*
148  * Memory scrubber control interface. For K8, memory scrubbing is handled by
149  * hardware and can involve L2 cache, dcache as well as the main memory. With
150  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151  * functionality.
152  *
153  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
154  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
155  * bytes/sec for the setting.
156  *
157  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
158  * other archs, we might not have access to the caches directly.
159  */
160 
161 /*
162  * scan the scrub rate mapping table for a close or matching bandwidth value to
163  * issue. If requested is too big, then use last maximum value found.
164  */
165 static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
166 {
167 	u32 scrubval;
168 	int i;
169 
170 	/*
171 	 * map the configured rate (new_bw) to a value specific to the AMD64
172 	 * memory controller and apply to register. Search for the first
173 	 * bandwidth entry that is greater or equal than the setting requested
174 	 * and program that. If at last entry, turn off DRAM scrubbing.
175 	 *
176 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
177 	 * by falling back to the last element in scrubrates[].
178 	 */
179 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
180 		/*
181 		 * skip scrub rates which aren't recommended
182 		 * (see F10 BKDG, F3x58)
183 		 */
184 		if (scrubrates[i].scrubval < min_rate)
185 			continue;
186 
187 		if (scrubrates[i].bandwidth <= new_bw)
188 			break;
189 	}
190 
191 	scrubval = scrubrates[i].scrubval;
192 
193 	pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
194 
195 	if (scrubval)
196 		return scrubrates[i].bandwidth;
197 
198 	return 0;
199 }
200 
201 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
202 {
203 	struct amd64_pvt *pvt = mci->pvt_info;
204 	u32 min_scrubrate = 0x5;
205 
206 	if (pvt->fam == 0xf)
207 		min_scrubrate = 0x0;
208 
209 	/* Erratum #505 */
210 	if (pvt->fam == 0x15 && pvt->model < 0x10)
211 		f15h_select_dct(pvt, 0);
212 
213 	return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
214 }
215 
216 static int get_scrub_rate(struct mem_ctl_info *mci)
217 {
218 	struct amd64_pvt *pvt = mci->pvt_info;
219 	u32 scrubval = 0;
220 	int i, retval = -EINVAL;
221 
222 	/* Erratum #505 */
223 	if (pvt->fam == 0x15 && pvt->model < 0x10)
224 		f15h_select_dct(pvt, 0);
225 
226 	amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
227 
228 	scrubval = scrubval & 0x001F;
229 
230 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
231 		if (scrubrates[i].scrubval == scrubval) {
232 			retval = scrubrates[i].bandwidth;
233 			break;
234 		}
235 	}
236 	return retval;
237 }
238 
239 /*
240  * returns true if the SysAddr given by sys_addr matches the
241  * DRAM base/limit associated with node_id
242  */
243 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
244 {
245 	u64 addr;
246 
247 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
248 	 * all ones if the most significant implemented address bit is 1.
249 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
250 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
251 	 * Application Programming.
252 	 */
253 	addr = sys_addr & 0x000000ffffffffffull;
254 
255 	return ((addr >= get_dram_base(pvt, nid)) &&
256 		(addr <= get_dram_limit(pvt, nid)));
257 }
258 
259 /*
260  * Attempt to map a SysAddr to a node. On success, return a pointer to the
261  * mem_ctl_info structure for the node that the SysAddr maps to.
262  *
263  * On failure, return NULL.
264  */
265 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
266 						u64 sys_addr)
267 {
268 	struct amd64_pvt *pvt;
269 	u8 node_id;
270 	u32 intlv_en, bits;
271 
272 	/*
273 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
274 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
275 	 */
276 	pvt = mci->pvt_info;
277 
278 	/*
279 	 * The value of this field should be the same for all DRAM Base
280 	 * registers.  Therefore we arbitrarily choose to read it from the
281 	 * register for node 0.
282 	 */
283 	intlv_en = dram_intlv_en(pvt, 0);
284 
285 	if (intlv_en == 0) {
286 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
287 			if (base_limit_match(pvt, sys_addr, node_id))
288 				goto found;
289 		}
290 		goto err_no_match;
291 	}
292 
293 	if (unlikely((intlv_en != 0x01) &&
294 		     (intlv_en != 0x03) &&
295 		     (intlv_en != 0x07))) {
296 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
297 		return NULL;
298 	}
299 
300 	bits = (((u32) sys_addr) >> 12) & intlv_en;
301 
302 	for (node_id = 0; ; ) {
303 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
304 			break;	/* intlv_sel field matches */
305 
306 		if (++node_id >= DRAM_RANGES)
307 			goto err_no_match;
308 	}
309 
310 	/* sanity test for sys_addr */
311 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
312 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
313 			   "range for node %d with node interleaving enabled.\n",
314 			   __func__, sys_addr, node_id);
315 		return NULL;
316 	}
317 
318 found:
319 	return edac_mc_find((int)node_id);
320 
321 err_no_match:
322 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
323 		 (unsigned long)sys_addr);
324 
325 	return NULL;
326 }
327 
328 /*
329  * compute the CS base address of the @csrow on the DRAM controller @dct.
330  * For details see F2x[5C:40] in the processor's BKDG
331  */
332 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
333 				 u64 *base, u64 *mask)
334 {
335 	u64 csbase, csmask, base_bits, mask_bits;
336 	u8 addr_shift;
337 
338 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
339 		csbase		= pvt->csels[dct].csbases[csrow];
340 		csmask		= pvt->csels[dct].csmasks[csrow];
341 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
342 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
343 		addr_shift	= 4;
344 
345 	/*
346 	 * F16h and F15h, models 30h and later need two addr_shift values:
347 	 * 8 for high and 6 for low (cf. F16h BKDG).
348 	 */
349 	} else if (pvt->fam == 0x16 ||
350 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
351 		csbase          = pvt->csels[dct].csbases[csrow];
352 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
353 
354 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
355 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
356 
357 		*mask = ~0ULL;
358 		/* poke holes for the csmask */
359 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
360 			   (GENMASK_ULL(30, 19) << 8));
361 
362 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
363 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
364 
365 		return;
366 	} else {
367 		csbase		= pvt->csels[dct].csbases[csrow];
368 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
369 		addr_shift	= 8;
370 
371 		if (pvt->fam == 0x15)
372 			base_bits = mask_bits =
373 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
374 		else
375 			base_bits = mask_bits =
376 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
377 	}
378 
379 	*base  = (csbase & base_bits) << addr_shift;
380 
381 	*mask  = ~0ULL;
382 	/* poke holes for the csmask */
383 	*mask &= ~(mask_bits << addr_shift);
384 	/* OR them in */
385 	*mask |= (csmask & mask_bits) << addr_shift;
386 }
387 
388 #define for_each_chip_select(i, dct, pvt) \
389 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
390 
391 #define chip_select_base(i, dct, pvt) \
392 	pvt->csels[dct].csbases[i]
393 
394 #define for_each_chip_select_mask(i, dct, pvt) \
395 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
396 
397 /*
398  * @input_addr is an InputAddr associated with the node given by mci. Return the
399  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
400  */
401 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
402 {
403 	struct amd64_pvt *pvt;
404 	int csrow;
405 	u64 base, mask;
406 
407 	pvt = mci->pvt_info;
408 
409 	for_each_chip_select(csrow, 0, pvt) {
410 		if (!csrow_enabled(csrow, 0, pvt))
411 			continue;
412 
413 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
414 
415 		mask = ~mask;
416 
417 		if ((input_addr & mask) == (base & mask)) {
418 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
419 				 (unsigned long)input_addr, csrow,
420 				 pvt->mc_node_id);
421 
422 			return csrow;
423 		}
424 	}
425 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
426 		 (unsigned long)input_addr, pvt->mc_node_id);
427 
428 	return -1;
429 }
430 
431 /*
432  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
433  * for the node represented by mci. Info is passed back in *hole_base,
434  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
435  * info is invalid. Info may be invalid for either of the following reasons:
436  *
437  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
438  *   Address Register does not exist.
439  *
440  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
441  *   indicating that its contents are not valid.
442  *
443  * The values passed back in *hole_base, *hole_offset, and *hole_size are
444  * complete 32-bit values despite the fact that the bitfields in the DHAR
445  * only represent bits 31-24 of the base and offset values.
446  */
447 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
448 			     u64 *hole_offset, u64 *hole_size)
449 {
450 	struct amd64_pvt *pvt = mci->pvt_info;
451 
452 	/* only revE and later have the DRAM Hole Address Register */
453 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
454 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
455 			 pvt->ext_model, pvt->mc_node_id);
456 		return 1;
457 	}
458 
459 	/* valid for Fam10h and above */
460 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
461 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
462 		return 1;
463 	}
464 
465 	if (!dhar_valid(pvt)) {
466 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
467 			 pvt->mc_node_id);
468 		return 1;
469 	}
470 
471 	/* This node has Memory Hoisting */
472 
473 	/* +------------------+--------------------+--------------------+-----
474 	 * | memory           | DRAM hole          | relocated          |
475 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
476 	 * |                  |                    | DRAM hole          |
477 	 * |                  |                    | [0x100000000,      |
478 	 * |                  |                    |  (0x100000000+     |
479 	 * |                  |                    |   (0xffffffff-x))] |
480 	 * +------------------+--------------------+--------------------+-----
481 	 *
482 	 * Above is a diagram of physical memory showing the DRAM hole and the
483 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
484 	 * starts at address x (the base address) and extends through address
485 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
486 	 * addresses in the hole so that they start at 0x100000000.
487 	 */
488 
489 	*hole_base = dhar_base(pvt);
490 	*hole_size = (1ULL << 32) - *hole_base;
491 
492 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
493 					: k8_dhar_offset(pvt);
494 
495 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
496 		 pvt->mc_node_id, (unsigned long)*hole_base,
497 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
498 
499 	return 0;
500 }
501 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
502 
503 /*
504  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
505  * assumed that sys_addr maps to the node given by mci.
506  *
507  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
508  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
509  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
510  * then it is also involved in translating a SysAddr to a DramAddr. Sections
511  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
512  * These parts of the documentation are unclear. I interpret them as follows:
513  *
514  * When node n receives a SysAddr, it processes the SysAddr as follows:
515  *
516  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
517  *    Limit registers for node n. If the SysAddr is not within the range
518  *    specified by the base and limit values, then node n ignores the Sysaddr
519  *    (since it does not map to node n). Otherwise continue to step 2 below.
520  *
521  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
522  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
523  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
524  *    hole. If not, skip to step 3 below. Else get the value of the
525  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
526  *    offset defined by this value from the SysAddr.
527  *
528  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
529  *    Base register for node n. To obtain the DramAddr, subtract the base
530  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
531  */
532 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
533 {
534 	struct amd64_pvt *pvt = mci->pvt_info;
535 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
536 	int ret;
537 
538 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
539 
540 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
541 				      &hole_size);
542 	if (!ret) {
543 		if ((sys_addr >= (1ULL << 32)) &&
544 		    (sys_addr < ((1ULL << 32) + hole_size))) {
545 			/* use DHAR to translate SysAddr to DramAddr */
546 			dram_addr = sys_addr - hole_offset;
547 
548 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
549 				 (unsigned long)sys_addr,
550 				 (unsigned long)dram_addr);
551 
552 			return dram_addr;
553 		}
554 	}
555 
556 	/*
557 	 * Translate the SysAddr to a DramAddr as shown near the start of
558 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
559 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
560 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
561 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
562 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
563 	 * Programmer's Manual Volume 1 Application Programming.
564 	 */
565 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
566 
567 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
568 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
569 	return dram_addr;
570 }
571 
572 /*
573  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
574  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
575  * for node interleaving.
576  */
577 static int num_node_interleave_bits(unsigned intlv_en)
578 {
579 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
580 	int n;
581 
582 	BUG_ON(intlv_en > 7);
583 	n = intlv_shift_table[intlv_en];
584 	return n;
585 }
586 
587 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
588 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
589 {
590 	struct amd64_pvt *pvt;
591 	int intlv_shift;
592 	u64 input_addr;
593 
594 	pvt = mci->pvt_info;
595 
596 	/*
597 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
598 	 * concerning translating a DramAddr to an InputAddr.
599 	 */
600 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
601 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
602 		      (dram_addr & 0xfff);
603 
604 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
605 		 intlv_shift, (unsigned long)dram_addr,
606 		 (unsigned long)input_addr);
607 
608 	return input_addr;
609 }
610 
611 /*
612  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
613  * assumed that @sys_addr maps to the node given by mci.
614  */
615 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
616 {
617 	u64 input_addr;
618 
619 	input_addr =
620 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
621 
622 	edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
623 		 (unsigned long)sys_addr, (unsigned long)input_addr);
624 
625 	return input_addr;
626 }
627 
628 /* Map the Error address to a PAGE and PAGE OFFSET. */
629 static inline void error_address_to_page_and_offset(u64 error_address,
630 						    struct err_info *err)
631 {
632 	err->page = (u32) (error_address >> PAGE_SHIFT);
633 	err->offset = ((u32) error_address) & ~PAGE_MASK;
634 }
635 
636 /*
637  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
638  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
639  * of a node that detected an ECC memory error.  mci represents the node that
640  * the error address maps to (possibly different from the node that detected
641  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
642  * error.
643  */
644 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
645 {
646 	int csrow;
647 
648 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
649 
650 	if (csrow == -1)
651 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
652 				  "address 0x%lx\n", (unsigned long)sys_addr);
653 	return csrow;
654 }
655 
656 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
657 
658 /*
659  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
660  * are ECC capable.
661  */
662 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
663 {
664 	u8 bit;
665 	unsigned long edac_cap = EDAC_FLAG_NONE;
666 
667 	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
668 		? 19
669 		: 17;
670 
671 	if (pvt->dclr0 & BIT(bit))
672 		edac_cap = EDAC_FLAG_SECDED;
673 
674 	return edac_cap;
675 }
676 
677 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
678 
679 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
680 {
681 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
682 
683 	edac_dbg(1, "  DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
684 		 (dclr & BIT(16)) ?  "un" : "",
685 		 (dclr & BIT(19)) ? "yes" : "no");
686 
687 	edac_dbg(1, "  PAR/ERR parity: %s\n",
688 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
689 
690 	if (pvt->fam == 0x10)
691 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
692 			 (dclr & BIT(11)) ?  "128b" : "64b");
693 
694 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
695 		 (dclr & BIT(12)) ?  "yes" : "no",
696 		 (dclr & BIT(13)) ?  "yes" : "no",
697 		 (dclr & BIT(14)) ?  "yes" : "no",
698 		 (dclr & BIT(15)) ?  "yes" : "no");
699 }
700 
701 /* Display and decode various NB registers for debug purposes. */
702 static void dump_misc_regs(struct amd64_pvt *pvt)
703 {
704 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
705 
706 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
707 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
708 
709 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
710 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
711 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
712 
713 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
714 
715 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
716 
717 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
718 		 pvt->dhar, dhar_base(pvt),
719 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
720 				   : f10_dhar_offset(pvt));
721 
722 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
723 
724 	debug_display_dimm_sizes(pvt, 0);
725 
726 	/* everything below this point is Fam10h and above */
727 	if (pvt->fam == 0xf)
728 		return;
729 
730 	debug_display_dimm_sizes(pvt, 1);
731 
732 	amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
733 
734 	/* Only if NOT ganged does dclr1 have valid info */
735 	if (!dct_ganging_enabled(pvt))
736 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
737 }
738 
739 /*
740  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
741  */
742 static void prep_chip_selects(struct amd64_pvt *pvt)
743 {
744 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
745 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
746 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
747 	} else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
748 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
749 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
750 	} else {
751 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
752 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
753 	}
754 }
755 
756 /*
757  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
758  */
759 static void read_dct_base_mask(struct amd64_pvt *pvt)
760 {
761 	int cs;
762 
763 	prep_chip_selects(pvt);
764 
765 	for_each_chip_select(cs, 0, pvt) {
766 		int reg0   = DCSB0 + (cs * 4);
767 		int reg1   = DCSB1 + (cs * 4);
768 		u32 *base0 = &pvt->csels[0].csbases[cs];
769 		u32 *base1 = &pvt->csels[1].csbases[cs];
770 
771 		if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
772 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
773 				 cs, *base0, reg0);
774 
775 		if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
776 			continue;
777 
778 		if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
779 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
780 				 cs, *base1, reg1);
781 	}
782 
783 	for_each_chip_select_mask(cs, 0, pvt) {
784 		int reg0   = DCSM0 + (cs * 4);
785 		int reg1   = DCSM1 + (cs * 4);
786 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
787 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
788 
789 		if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
790 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
791 				 cs, *mask0, reg0);
792 
793 		if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
794 			continue;
795 
796 		if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
797 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
798 				 cs, *mask1, reg1);
799 	}
800 }
801 
802 static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
803 {
804 	enum mem_type type;
805 
806 	/* F15h supports only DDR3 */
807 	if (pvt->fam >= 0x15)
808 		type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
809 	else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
810 		if (pvt->dchr0 & DDR3_MODE)
811 			type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
812 		else
813 			type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
814 	} else {
815 		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
816 	}
817 
818 	amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
819 
820 	return type;
821 }
822 
823 /* Get the number of DCT channels the memory controller is using. */
824 static int k8_early_channel_count(struct amd64_pvt *pvt)
825 {
826 	int flag;
827 
828 	if (pvt->ext_model >= K8_REV_F)
829 		/* RevF (NPT) and later */
830 		flag = pvt->dclr0 & WIDTH_128;
831 	else
832 		/* RevE and earlier */
833 		flag = pvt->dclr0 & REVE_WIDTH_128;
834 
835 	/* not used */
836 	pvt->dclr1 = 0;
837 
838 	return (flag) ? 2 : 1;
839 }
840 
841 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
842 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
843 {
844 	u64 addr;
845 	u8 start_bit = 1;
846 	u8 end_bit   = 47;
847 
848 	if (pvt->fam == 0xf) {
849 		start_bit = 3;
850 		end_bit   = 39;
851 	}
852 
853 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
854 
855 	/*
856 	 * Erratum 637 workaround
857 	 */
858 	if (pvt->fam == 0x15) {
859 		struct amd64_pvt *pvt;
860 		u64 cc6_base, tmp_addr;
861 		u32 tmp;
862 		u16 mce_nid;
863 		u8 intlv_en;
864 
865 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
866 			return addr;
867 
868 		mce_nid	= amd_get_nb_id(m->extcpu);
869 		pvt	= mcis[mce_nid]->pvt_info;
870 
871 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
872 		intlv_en = tmp >> 21 & 0x7;
873 
874 		/* add [47:27] + 3 trailing bits */
875 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
876 
877 		/* reverse and add DramIntlvEn */
878 		cc6_base |= intlv_en ^ 0x7;
879 
880 		/* pin at [47:24] */
881 		cc6_base <<= 24;
882 
883 		if (!intlv_en)
884 			return cc6_base | (addr & GENMASK_ULL(23, 0));
885 
886 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
887 
888 							/* faster log2 */
889 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
890 
891 		/* OR DramIntlvSel into bits [14:12] */
892 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
893 
894 		/* add remaining [11:0] bits from original MC4_ADDR */
895 		tmp_addr |= addr & GENMASK_ULL(11, 0);
896 
897 		return cc6_base | tmp_addr;
898 	}
899 
900 	return addr;
901 }
902 
903 static struct pci_dev *pci_get_related_function(unsigned int vendor,
904 						unsigned int device,
905 						struct pci_dev *related)
906 {
907 	struct pci_dev *dev = NULL;
908 
909 	while ((dev = pci_get_device(vendor, device, dev))) {
910 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
911 		    (dev->bus->number == related->bus->number) &&
912 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
913 			break;
914 	}
915 
916 	return dev;
917 }
918 
919 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
920 {
921 	struct amd_northbridge *nb;
922 	struct pci_dev *f1 = NULL;
923 	unsigned int pci_func;
924 	int off = range << 3;
925 	u32 llim;
926 
927 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
928 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
929 
930 	if (pvt->fam == 0xf)
931 		return;
932 
933 	if (!dram_rw(pvt, range))
934 		return;
935 
936 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
937 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
938 
939 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
940 	if (pvt->fam != 0x15)
941 		return;
942 
943 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
944 	if (WARN_ON(!nb))
945 		return;
946 
947 	pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
948 					: PCI_DEVICE_ID_AMD_15H_NB_F1;
949 
950 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
951 	if (WARN_ON(!f1))
952 		return;
953 
954 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
955 
956 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
957 
958 				    /* {[39:27],111b} */
959 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
960 
961 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
962 
963 				    /* [47:40] */
964 	pvt->ranges[range].lim.hi |= llim >> 13;
965 
966 	pci_dev_put(f1);
967 }
968 
969 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
970 				    struct err_info *err)
971 {
972 	struct amd64_pvt *pvt = mci->pvt_info;
973 
974 	error_address_to_page_and_offset(sys_addr, err);
975 
976 	/*
977 	 * Find out which node the error address belongs to. This may be
978 	 * different from the node that detected the error.
979 	 */
980 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
981 	if (!err->src_mci) {
982 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
983 			     (unsigned long)sys_addr);
984 		err->err_code = ERR_NODE;
985 		return;
986 	}
987 
988 	/* Now map the sys_addr to a CSROW */
989 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
990 	if (err->csrow < 0) {
991 		err->err_code = ERR_CSROW;
992 		return;
993 	}
994 
995 	/* CHIPKILL enabled */
996 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
997 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
998 		if (err->channel < 0) {
999 			/*
1000 			 * Syndrome didn't map, so we don't know which of the
1001 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1002 			 * as suspect.
1003 			 */
1004 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1005 				      "possible error reporting race\n",
1006 				      err->syndrome);
1007 			err->err_code = ERR_CHANNEL;
1008 			return;
1009 		}
1010 	} else {
1011 		/*
1012 		 * non-chipkill ecc mode
1013 		 *
1014 		 * The k8 documentation is unclear about how to determine the
1015 		 * channel number when using non-chipkill memory.  This method
1016 		 * was obtained from email communication with someone at AMD.
1017 		 * (Wish the email was placed in this comment - norsk)
1018 		 */
1019 		err->channel = ((sys_addr & BIT(3)) != 0);
1020 	}
1021 }
1022 
1023 static int ddr2_cs_size(unsigned i, bool dct_width)
1024 {
1025 	unsigned shift = 0;
1026 
1027 	if (i <= 2)
1028 		shift = i;
1029 	else if (!(i & 0x1))
1030 		shift = i >> 1;
1031 	else
1032 		shift = (i + 1) >> 1;
1033 
1034 	return 128 << (shift + !!dct_width);
1035 }
1036 
1037 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1038 				  unsigned cs_mode)
1039 {
1040 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1041 
1042 	if (pvt->ext_model >= K8_REV_F) {
1043 		WARN_ON(cs_mode > 11);
1044 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1045 	}
1046 	else if (pvt->ext_model >= K8_REV_D) {
1047 		unsigned diff;
1048 		WARN_ON(cs_mode > 10);
1049 
1050 		/*
1051 		 * the below calculation, besides trying to win an obfuscated C
1052 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1053 		 * mappings are:
1054 		 *
1055 		 * cs_mode	CS size (mb)
1056 		 * =======	============
1057 		 * 0		32
1058 		 * 1		64
1059 		 * 2		128
1060 		 * 3		128
1061 		 * 4		256
1062 		 * 5		512
1063 		 * 6		256
1064 		 * 7		512
1065 		 * 8		1024
1066 		 * 9		1024
1067 		 * 10		2048
1068 		 *
1069 		 * Basically, it calculates a value with which to shift the
1070 		 * smallest CS size of 32MB.
1071 		 *
1072 		 * ddr[23]_cs_size have a similar purpose.
1073 		 */
1074 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1075 
1076 		return 32 << (cs_mode - diff);
1077 	}
1078 	else {
1079 		WARN_ON(cs_mode > 6);
1080 		return 32 << cs_mode;
1081 	}
1082 }
1083 
1084 /*
1085  * Get the number of DCT channels in use.
1086  *
1087  * Return:
1088  *	number of Memory Channels in operation
1089  * Pass back:
1090  *	contents of the DCL0_LOW register
1091  */
1092 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1093 {
1094 	int i, j, channels = 0;
1095 
1096 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1097 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1098 		return 2;
1099 
1100 	/*
1101 	 * Need to check if in unganged mode: In such, there are 2 channels,
1102 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1103 	 * bit will be OFF.
1104 	 *
1105 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1106 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1107 	 */
1108 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1109 
1110 	/*
1111 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1112 	 * is more than just one DIMM present in unganged mode. Need to check
1113 	 * both controllers since DIMMs can be placed in either one.
1114 	 */
1115 	for (i = 0; i < 2; i++) {
1116 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1117 
1118 		for (j = 0; j < 4; j++) {
1119 			if (DBAM_DIMM(j, dbam) > 0) {
1120 				channels++;
1121 				break;
1122 			}
1123 		}
1124 	}
1125 
1126 	if (channels > 2)
1127 		channels = 2;
1128 
1129 	amd64_info("MCT channel count: %d\n", channels);
1130 
1131 	return channels;
1132 }
1133 
1134 static int ddr3_cs_size(unsigned i, bool dct_width)
1135 {
1136 	unsigned shift = 0;
1137 	int cs_size = 0;
1138 
1139 	if (i == 0 || i == 3 || i == 4)
1140 		cs_size = -1;
1141 	else if (i <= 2)
1142 		shift = i;
1143 	else if (i == 12)
1144 		shift = 7;
1145 	else if (!(i & 0x1))
1146 		shift = i >> 1;
1147 	else
1148 		shift = (i + 1) >> 1;
1149 
1150 	if (cs_size != -1)
1151 		cs_size = (128 * (1 << !!dct_width)) << shift;
1152 
1153 	return cs_size;
1154 }
1155 
1156 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1157 				   unsigned cs_mode)
1158 {
1159 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1160 
1161 	WARN_ON(cs_mode > 11);
1162 
1163 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1164 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1165 	else
1166 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1167 }
1168 
1169 /*
1170  * F15h supports only 64bit DCT interfaces
1171  */
1172 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1173 				   unsigned cs_mode)
1174 {
1175 	WARN_ON(cs_mode > 12);
1176 
1177 	return ddr3_cs_size(cs_mode, false);
1178 }
1179 
1180 /*
1181  * F16h and F15h model 30h have only limited cs_modes.
1182  */
1183 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1184 				unsigned cs_mode)
1185 {
1186 	WARN_ON(cs_mode > 12);
1187 
1188 	if (cs_mode == 6 || cs_mode == 8 ||
1189 	    cs_mode == 9 || cs_mode == 12)
1190 		return -1;
1191 	else
1192 		return ddr3_cs_size(cs_mode, false);
1193 }
1194 
1195 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1196 {
1197 
1198 	if (pvt->fam == 0xf)
1199 		return;
1200 
1201 	if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1202 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1203 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1204 
1205 		edac_dbg(0, "  DCTs operate in %s mode\n",
1206 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1207 
1208 		if (!dct_ganging_enabled(pvt))
1209 			edac_dbg(0, "  Address range split per DCT: %s\n",
1210 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1211 
1212 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1213 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1214 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1215 
1216 		edac_dbg(0, "  channel interleave: %s, "
1217 			 "interleave bits selector: 0x%x\n",
1218 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1219 			 dct_sel_interleave_addr(pvt));
1220 	}
1221 
1222 	amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1223 }
1224 
1225 /*
1226  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1227  * 2.10.12 Memory Interleaving Modes).
1228  */
1229 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1230 				     u8 intlv_en, int num_dcts_intlv,
1231 				     u32 dct_sel)
1232 {
1233 	u8 channel = 0;
1234 	u8 select;
1235 
1236 	if (!(intlv_en))
1237 		return (u8)(dct_sel);
1238 
1239 	if (num_dcts_intlv == 2) {
1240 		select = (sys_addr >> 8) & 0x3;
1241 		channel = select ? 0x3 : 0;
1242 	} else if (num_dcts_intlv == 4)
1243 		channel = (sys_addr >> 8) & 0x7;
1244 
1245 	return channel;
1246 }
1247 
1248 /*
1249  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1250  * Interleaving Modes.
1251  */
1252 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1253 				bool hi_range_sel, u8 intlv_en)
1254 {
1255 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1256 
1257 	if (dct_ganging_enabled(pvt))
1258 		return 0;
1259 
1260 	if (hi_range_sel)
1261 		return dct_sel_high;
1262 
1263 	/*
1264 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1265 	 */
1266 	if (dct_interleave_enabled(pvt)) {
1267 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1268 
1269 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1270 		if (!intlv_addr)
1271 			return sys_addr >> 6 & 1;
1272 
1273 		if (intlv_addr & 0x2) {
1274 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1275 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1276 
1277 			return ((sys_addr >> shift) & 1) ^ temp;
1278 		}
1279 
1280 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1281 	}
1282 
1283 	if (dct_high_range_enabled(pvt))
1284 		return ~dct_sel_high & 1;
1285 
1286 	return 0;
1287 }
1288 
1289 /* Convert the sys_addr to the normalized DCT address */
1290 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1291 				 u64 sys_addr, bool hi_rng,
1292 				 u32 dct_sel_base_addr)
1293 {
1294 	u64 chan_off;
1295 	u64 dram_base		= get_dram_base(pvt, range);
1296 	u64 hole_off		= f10_dhar_offset(pvt);
1297 	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1298 
1299 	if (hi_rng) {
1300 		/*
1301 		 * if
1302 		 * base address of high range is below 4Gb
1303 		 * (bits [47:27] at [31:11])
1304 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1305 		 * sys_addr > 4Gb
1306 		 *
1307 		 *	remove hole offset from sys_addr
1308 		 * else
1309 		 *	remove high range offset from sys_addr
1310 		 */
1311 		if ((!(dct_sel_base_addr >> 16) ||
1312 		     dct_sel_base_addr < dhar_base(pvt)) &&
1313 		    dhar_valid(pvt) &&
1314 		    (sys_addr >= BIT_64(32)))
1315 			chan_off = hole_off;
1316 		else
1317 			chan_off = dct_sel_base_off;
1318 	} else {
1319 		/*
1320 		 * if
1321 		 * we have a valid hole		&&
1322 		 * sys_addr > 4Gb
1323 		 *
1324 		 *	remove hole
1325 		 * else
1326 		 *	remove dram base to normalize to DCT address
1327 		 */
1328 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1329 			chan_off = hole_off;
1330 		else
1331 			chan_off = dram_base;
1332 	}
1333 
1334 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1335 }
1336 
1337 /*
1338  * checks if the csrow passed in is marked as SPARED, if so returns the new
1339  * spare row
1340  */
1341 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1342 {
1343 	int tmp_cs;
1344 
1345 	if (online_spare_swap_done(pvt, dct) &&
1346 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1347 
1348 		for_each_chip_select(tmp_cs, dct, pvt) {
1349 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1350 				csrow = tmp_cs;
1351 				break;
1352 			}
1353 		}
1354 	}
1355 	return csrow;
1356 }
1357 
1358 /*
1359  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1360  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1361  *
1362  * Return:
1363  *	-EINVAL:  NOT FOUND
1364  *	0..csrow = Chip-Select Row
1365  */
1366 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1367 {
1368 	struct mem_ctl_info *mci;
1369 	struct amd64_pvt *pvt;
1370 	u64 cs_base, cs_mask;
1371 	int cs_found = -EINVAL;
1372 	int csrow;
1373 
1374 	mci = mcis[nid];
1375 	if (!mci)
1376 		return cs_found;
1377 
1378 	pvt = mci->pvt_info;
1379 
1380 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1381 
1382 	for_each_chip_select(csrow, dct, pvt) {
1383 		if (!csrow_enabled(csrow, dct, pvt))
1384 			continue;
1385 
1386 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1387 
1388 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1389 			 csrow, cs_base, cs_mask);
1390 
1391 		cs_mask = ~cs_mask;
1392 
1393 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1394 			 (in_addr & cs_mask), (cs_base & cs_mask));
1395 
1396 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1397 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1398 				cs_found =  csrow;
1399 				break;
1400 			}
1401 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1402 
1403 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1404 			break;
1405 		}
1406 	}
1407 	return cs_found;
1408 }
1409 
1410 /*
1411  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1412  * swapped with a region located at the bottom of memory so that the GPU can use
1413  * the interleaved region and thus two channels.
1414  */
1415 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1416 {
1417 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1418 
1419 	if (pvt->fam == 0x10) {
1420 		/* only revC3 and revE have that feature */
1421 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1422 			return sys_addr;
1423 	}
1424 
1425 	amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1426 
1427 	if (!(swap_reg & 0x1))
1428 		return sys_addr;
1429 
1430 	swap_base	= (swap_reg >> 3) & 0x7f;
1431 	swap_limit	= (swap_reg >> 11) & 0x7f;
1432 	rgn_size	= (swap_reg >> 20) & 0x7f;
1433 	tmp_addr	= sys_addr >> 27;
1434 
1435 	if (!(sys_addr >> 34) &&
1436 	    (((tmp_addr >= swap_base) &&
1437 	     (tmp_addr <= swap_limit)) ||
1438 	     (tmp_addr < rgn_size)))
1439 		return sys_addr ^ (u64)swap_base << 27;
1440 
1441 	return sys_addr;
1442 }
1443 
1444 /* For a given @dram_range, check if @sys_addr falls within it. */
1445 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1446 				  u64 sys_addr, int *chan_sel)
1447 {
1448 	int cs_found = -EINVAL;
1449 	u64 chan_addr;
1450 	u32 dct_sel_base;
1451 	u8 channel;
1452 	bool high_range = false;
1453 
1454 	u8 node_id    = dram_dst_node(pvt, range);
1455 	u8 intlv_en   = dram_intlv_en(pvt, range);
1456 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1457 
1458 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1459 		 range, sys_addr, get_dram_limit(pvt, range));
1460 
1461 	if (dhar_valid(pvt) &&
1462 	    dhar_base(pvt) <= sys_addr &&
1463 	    sys_addr < BIT_64(32)) {
1464 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1465 			    sys_addr);
1466 		return -EINVAL;
1467 	}
1468 
1469 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1470 		return -EINVAL;
1471 
1472 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1473 
1474 	dct_sel_base = dct_sel_baseaddr(pvt);
1475 
1476 	/*
1477 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1478 	 * select between DCT0 and DCT1.
1479 	 */
1480 	if (dct_high_range_enabled(pvt) &&
1481 	   !dct_ganging_enabled(pvt) &&
1482 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1483 		high_range = true;
1484 
1485 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1486 
1487 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1488 					  high_range, dct_sel_base);
1489 
1490 	/* Remove node interleaving, see F1x120 */
1491 	if (intlv_en)
1492 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1493 			    (chan_addr & 0xfff);
1494 
1495 	/* remove channel interleave */
1496 	if (dct_interleave_enabled(pvt) &&
1497 	   !dct_high_range_enabled(pvt) &&
1498 	   !dct_ganging_enabled(pvt)) {
1499 
1500 		if (dct_sel_interleave_addr(pvt) != 1) {
1501 			if (dct_sel_interleave_addr(pvt) == 0x3)
1502 				/* hash 9 */
1503 				chan_addr = ((chan_addr >> 10) << 9) |
1504 					     (chan_addr & 0x1ff);
1505 			else
1506 				/* A[6] or hash 6 */
1507 				chan_addr = ((chan_addr >> 7) << 6) |
1508 					     (chan_addr & 0x3f);
1509 		} else
1510 			/* A[12] */
1511 			chan_addr = ((chan_addr >> 13) << 12) |
1512 				     (chan_addr & 0xfff);
1513 	}
1514 
1515 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1516 
1517 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1518 
1519 	if (cs_found >= 0)
1520 		*chan_sel = channel;
1521 
1522 	return cs_found;
1523 }
1524 
1525 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1526 					u64 sys_addr, int *chan_sel)
1527 {
1528 	int cs_found = -EINVAL;
1529 	int num_dcts_intlv = 0;
1530 	u64 chan_addr, chan_offset;
1531 	u64 dct_base, dct_limit;
1532 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1533 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1534 
1535 	u64 dhar_offset		= f10_dhar_offset(pvt);
1536 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1537 	u8 node_id		= dram_dst_node(pvt, range);
1538 	u8 intlv_en		= dram_intlv_en(pvt, range);
1539 
1540 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1541 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1542 
1543 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1544 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
1545 
1546 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1547 		 range, sys_addr, get_dram_limit(pvt, range));
1548 
1549 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
1550 	    !(get_dram_limit(pvt, range) >= sys_addr))
1551 		return -EINVAL;
1552 
1553 	if (dhar_valid(pvt) &&
1554 	    dhar_base(pvt) <= sys_addr &&
1555 	    sys_addr < BIT_64(32)) {
1556 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1557 			    sys_addr);
1558 		return -EINVAL;
1559 	}
1560 
1561 	/* Verify sys_addr is within DCT Range. */
1562 	dct_base = (u64) dct_sel_baseaddr(pvt);
1563 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1564 
1565 	if (!(dct_cont_base_reg & BIT(0)) &&
1566 	    !(dct_base <= (sys_addr >> 27) &&
1567 	      dct_limit >= (sys_addr >> 27)))
1568 		return -EINVAL;
1569 
1570 	/* Verify number of dct's that participate in channel interleaving. */
1571 	num_dcts_intlv = (int) hweight8(intlv_en);
1572 
1573 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1574 		return -EINVAL;
1575 
1576 	channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1577 					     num_dcts_intlv, dct_sel);
1578 
1579 	/* Verify we stay within the MAX number of channels allowed */
1580 	if (channel > 3)
1581 		return -EINVAL;
1582 
1583 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1584 
1585 	/* Get normalized DCT addr */
1586 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1587 		chan_offset = dhar_offset;
1588 	else
1589 		chan_offset = dct_base << 27;
1590 
1591 	chan_addr = sys_addr - chan_offset;
1592 
1593 	/* remove channel interleave */
1594 	if (num_dcts_intlv == 2) {
1595 		if (intlv_addr == 0x4)
1596 			chan_addr = ((chan_addr >> 9) << 8) |
1597 						(chan_addr & 0xff);
1598 		else if (intlv_addr == 0x5)
1599 			chan_addr = ((chan_addr >> 10) << 9) |
1600 						(chan_addr & 0x1ff);
1601 		else
1602 			return -EINVAL;
1603 
1604 	} else if (num_dcts_intlv == 4) {
1605 		if (intlv_addr == 0x4)
1606 			chan_addr = ((chan_addr >> 10) << 8) |
1607 							(chan_addr & 0xff);
1608 		else if (intlv_addr == 0x5)
1609 			chan_addr = ((chan_addr >> 11) << 9) |
1610 							(chan_addr & 0x1ff);
1611 		else
1612 			return -EINVAL;
1613 	}
1614 
1615 	if (dct_offset_en) {
1616 		amd64_read_pci_cfg(pvt->F1,
1617 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
1618 				   &tmp);
1619 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
1620 	}
1621 
1622 	f15h_select_dct(pvt, channel);
1623 
1624 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1625 
1626 	/*
1627 	 * Find Chip select:
1628 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1629 	 * there is support for 4 DCT's, but only 2 are currently functional.
1630 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1631 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
1632 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1633 	 */
1634 	alias_channel =  (channel == 3) ? 1 : channel;
1635 
1636 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1637 
1638 	if (cs_found >= 0)
1639 		*chan_sel = alias_channel;
1640 
1641 	return cs_found;
1642 }
1643 
1644 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1645 					u64 sys_addr,
1646 					int *chan_sel)
1647 {
1648 	int cs_found = -EINVAL;
1649 	unsigned range;
1650 
1651 	for (range = 0; range < DRAM_RANGES; range++) {
1652 		if (!dram_rw(pvt, range))
1653 			continue;
1654 
1655 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
1656 			cs_found = f15_m30h_match_to_this_node(pvt, range,
1657 							       sys_addr,
1658 							       chan_sel);
1659 
1660 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
1661 			 (get_dram_limit(pvt, range) >= sys_addr)) {
1662 			cs_found = f1x_match_to_this_node(pvt, range,
1663 							  sys_addr, chan_sel);
1664 			if (cs_found >= 0)
1665 				break;
1666 		}
1667 	}
1668 	return cs_found;
1669 }
1670 
1671 /*
1672  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1673  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1674  *
1675  * The @sys_addr is usually an error address received from the hardware
1676  * (MCX_ADDR).
1677  */
1678 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1679 				     struct err_info *err)
1680 {
1681 	struct amd64_pvt *pvt = mci->pvt_info;
1682 
1683 	error_address_to_page_and_offset(sys_addr, err);
1684 
1685 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1686 	if (err->csrow < 0) {
1687 		err->err_code = ERR_CSROW;
1688 		return;
1689 	}
1690 
1691 	/*
1692 	 * We need the syndromes for channel detection only when we're
1693 	 * ganged. Otherwise @chan should already contain the channel at
1694 	 * this point.
1695 	 */
1696 	if (dct_ganging_enabled(pvt))
1697 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1698 }
1699 
1700 /*
1701  * debug routine to display the memory sizes of all logical DIMMs and its
1702  * CSROWs
1703  */
1704 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1705 {
1706 	int dimm, size0, size1;
1707 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1708 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
1709 
1710 	if (pvt->fam == 0xf) {
1711 		/* K8 families < revF not supported yet */
1712 	       if (pvt->ext_model < K8_REV_F)
1713 			return;
1714 	       else
1715 		       WARN_ON(ctrl != 0);
1716 	}
1717 
1718 	dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1719 	dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1720 						   : pvt->csels[0].csbases;
1721 
1722 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1723 		 ctrl, dbam);
1724 
1725 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1726 
1727 	/* Dump memory sizes for DIMM and its CSROWs */
1728 	for (dimm = 0; dimm < 4; dimm++) {
1729 
1730 		size0 = 0;
1731 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1732 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1733 						     DBAM_DIMM(dimm, dbam));
1734 
1735 		size1 = 0;
1736 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1737 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1738 						     DBAM_DIMM(dimm, dbam));
1739 
1740 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1741 				dimm * 2,     size0,
1742 				dimm * 2 + 1, size1);
1743 	}
1744 }
1745 
1746 static struct amd64_family_type family_types[] = {
1747 	[K8_CPUS] = {
1748 		.ctl_name = "K8",
1749 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1750 		.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1751 		.ops = {
1752 			.early_channel_count	= k8_early_channel_count,
1753 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
1754 			.dbam_to_cs		= k8_dbam_to_chip_select,
1755 			.read_dct_pci_cfg	= k8_read_dct_pci_cfg,
1756 		}
1757 	},
1758 	[F10_CPUS] = {
1759 		.ctl_name = "F10h",
1760 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1761 		.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1762 		.ops = {
1763 			.early_channel_count	= f1x_early_channel_count,
1764 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1765 			.dbam_to_cs		= f10_dbam_to_chip_select,
1766 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
1767 		}
1768 	},
1769 	[F15_CPUS] = {
1770 		.ctl_name = "F15h",
1771 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1772 		.f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1773 		.ops = {
1774 			.early_channel_count	= f1x_early_channel_count,
1775 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1776 			.dbam_to_cs		= f15_dbam_to_chip_select,
1777 			.read_dct_pci_cfg	= f15_read_dct_pci_cfg,
1778 		}
1779 	},
1780 	[F15_M30H_CPUS] = {
1781 		.ctl_name = "F15h_M30h",
1782 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1783 		.f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
1784 		.ops = {
1785 			.early_channel_count	= f1x_early_channel_count,
1786 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1787 			.dbam_to_cs		= f16_dbam_to_chip_select,
1788 			.read_dct_pci_cfg	= f15_read_dct_pci_cfg,
1789 		}
1790 	},
1791 	[F16_CPUS] = {
1792 		.ctl_name = "F16h",
1793 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1794 		.f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1795 		.ops = {
1796 			.early_channel_count	= f1x_early_channel_count,
1797 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1798 			.dbam_to_cs		= f16_dbam_to_chip_select,
1799 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
1800 		}
1801 	},
1802 };
1803 
1804 /*
1805  * These are tables of eigenvectors (one per line) which can be used for the
1806  * construction of the syndrome tables. The modified syndrome search algorithm
1807  * uses those to find the symbol in error and thus the DIMM.
1808  *
1809  * Algorithm courtesy of Ross LaFetra from AMD.
1810  */
1811 static const u16 x4_vectors[] = {
1812 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
1813 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
1814 	0x0001, 0x0002, 0x0004, 0x0008,
1815 	0x1013, 0x3032, 0x4044, 0x8088,
1816 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
1817 	0x4857, 0xc4fe, 0x13cc, 0x3288,
1818 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1819 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1820 	0x15c1, 0x2a42, 0x89ac, 0x4758,
1821 	0x2b03, 0x1602, 0x4f0c, 0xca08,
1822 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1823 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
1824 	0x2b87, 0x164e, 0x642c, 0xdc18,
1825 	0x40b9, 0x80de, 0x1094, 0x20e8,
1826 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
1827 	0x11c1, 0x2242, 0x84ac, 0x4c58,
1828 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
1829 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1830 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
1831 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1832 	0x16b3, 0x3d62, 0x4f34, 0x8518,
1833 	0x1e2f, 0x391a, 0x5cac, 0xf858,
1834 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1835 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1836 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1837 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
1838 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
1839 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
1840 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
1841 	0x185d, 0x2ca6, 0x7914, 0x9e28,
1842 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
1843 	0x4199, 0x82ee, 0x19f4, 0x2e58,
1844 	0x4807, 0xc40e, 0x130c, 0x3208,
1845 	0x1905, 0x2e0a, 0x5804, 0xac08,
1846 	0x213f, 0x132a, 0xadfc, 0x5ba8,
1847 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1848 };
1849 
1850 static const u16 x8_vectors[] = {
1851 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1852 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1853 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1854 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1855 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1856 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1857 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1858 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1859 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1860 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1861 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1862 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1863 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1864 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1865 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1866 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1867 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1868 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1869 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1870 };
1871 
1872 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
1873 			   unsigned v_dim)
1874 {
1875 	unsigned int i, err_sym;
1876 
1877 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1878 		u16 s = syndrome;
1879 		unsigned v_idx =  err_sym * v_dim;
1880 		unsigned v_end = (err_sym + 1) * v_dim;
1881 
1882 		/* walk over all 16 bits of the syndrome */
1883 		for (i = 1; i < (1U << 16); i <<= 1) {
1884 
1885 			/* if bit is set in that eigenvector... */
1886 			if (v_idx < v_end && vectors[v_idx] & i) {
1887 				u16 ev_comp = vectors[v_idx++];
1888 
1889 				/* ... and bit set in the modified syndrome, */
1890 				if (s & i) {
1891 					/* remove it. */
1892 					s ^= ev_comp;
1893 
1894 					if (!s)
1895 						return err_sym;
1896 				}
1897 
1898 			} else if (s & i)
1899 				/* can't get to zero, move to next symbol */
1900 				break;
1901 		}
1902 	}
1903 
1904 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1905 	return -1;
1906 }
1907 
1908 static int map_err_sym_to_channel(int err_sym, int sym_size)
1909 {
1910 	if (sym_size == 4)
1911 		switch (err_sym) {
1912 		case 0x20:
1913 		case 0x21:
1914 			return 0;
1915 			break;
1916 		case 0x22:
1917 		case 0x23:
1918 			return 1;
1919 			break;
1920 		default:
1921 			return err_sym >> 4;
1922 			break;
1923 		}
1924 	/* x8 symbols */
1925 	else
1926 		switch (err_sym) {
1927 		/* imaginary bits not in a DIMM */
1928 		case 0x10:
1929 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1930 					  err_sym);
1931 			return -1;
1932 			break;
1933 
1934 		case 0x11:
1935 			return 0;
1936 			break;
1937 		case 0x12:
1938 			return 1;
1939 			break;
1940 		default:
1941 			return err_sym >> 3;
1942 			break;
1943 		}
1944 	return -1;
1945 }
1946 
1947 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1948 {
1949 	struct amd64_pvt *pvt = mci->pvt_info;
1950 	int err_sym = -1;
1951 
1952 	if (pvt->ecc_sym_sz == 8)
1953 		err_sym = decode_syndrome(syndrome, x8_vectors,
1954 					  ARRAY_SIZE(x8_vectors),
1955 					  pvt->ecc_sym_sz);
1956 	else if (pvt->ecc_sym_sz == 4)
1957 		err_sym = decode_syndrome(syndrome, x4_vectors,
1958 					  ARRAY_SIZE(x4_vectors),
1959 					  pvt->ecc_sym_sz);
1960 	else {
1961 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1962 		return err_sym;
1963 	}
1964 
1965 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1966 }
1967 
1968 static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
1969 			    u8 ecc_type)
1970 {
1971 	enum hw_event_mc_err_type err_type;
1972 	const char *string;
1973 
1974 	if (ecc_type == 2)
1975 		err_type = HW_EVENT_ERR_CORRECTED;
1976 	else if (ecc_type == 1)
1977 		err_type = HW_EVENT_ERR_UNCORRECTED;
1978 	else {
1979 		WARN(1, "Something is rotten in the state of Denmark.\n");
1980 		return;
1981 	}
1982 
1983 	switch (err->err_code) {
1984 	case DECODE_OK:
1985 		string = "";
1986 		break;
1987 	case ERR_NODE:
1988 		string = "Failed to map error addr to a node";
1989 		break;
1990 	case ERR_CSROW:
1991 		string = "Failed to map error addr to a csrow";
1992 		break;
1993 	case ERR_CHANNEL:
1994 		string = "unknown syndrome - possible error reporting race";
1995 		break;
1996 	default:
1997 		string = "WTF error";
1998 		break;
1999 	}
2000 
2001 	edac_mc_handle_error(err_type, mci, 1,
2002 			     err->page, err->offset, err->syndrome,
2003 			     err->csrow, err->channel, -1,
2004 			     string, "");
2005 }
2006 
2007 static inline void decode_bus_error(int node_id, struct mce *m)
2008 {
2009 	struct mem_ctl_info *mci = mcis[node_id];
2010 	struct amd64_pvt *pvt = mci->pvt_info;
2011 	u8 ecc_type = (m->status >> 45) & 0x3;
2012 	u8 xec = XEC(m->status, 0x1f);
2013 	u16 ec = EC(m->status);
2014 	u64 sys_addr;
2015 	struct err_info err;
2016 
2017 	/* Bail out early if this was an 'observed' error */
2018 	if (PP(ec) == NBSL_PP_OBS)
2019 		return;
2020 
2021 	/* Do only ECC errors */
2022 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2023 		return;
2024 
2025 	memset(&err, 0, sizeof(err));
2026 
2027 	sys_addr = get_error_address(pvt, m);
2028 
2029 	if (ecc_type == 2)
2030 		err.syndrome = extract_syndrome(m->status);
2031 
2032 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2033 
2034 	__log_bus_error(mci, &err, ecc_type);
2035 }
2036 
2037 /*
2038  * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2039  * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2040  */
2041 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2042 {
2043 	/* Reserve the ADDRESS MAP Device */
2044 	pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2045 	if (!pvt->F1) {
2046 		amd64_err("error address map device not found: "
2047 			  "vendor %x device 0x%x (broken BIOS?)\n",
2048 			  PCI_VENDOR_ID_AMD, f1_id);
2049 		return -ENODEV;
2050 	}
2051 
2052 	/* Reserve the MISC Device */
2053 	pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2054 	if (!pvt->F3) {
2055 		pci_dev_put(pvt->F1);
2056 		pvt->F1 = NULL;
2057 
2058 		amd64_err("error F3 device not found: "
2059 			  "vendor %x device 0x%x (broken BIOS?)\n",
2060 			  PCI_VENDOR_ID_AMD, f3_id);
2061 
2062 		return -ENODEV;
2063 	}
2064 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2065 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2066 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2067 
2068 	return 0;
2069 }
2070 
2071 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2072 {
2073 	pci_dev_put(pvt->F1);
2074 	pci_dev_put(pvt->F3);
2075 }
2076 
2077 /*
2078  * Retrieve the hardware registers of the memory controller (this includes the
2079  * 'Address Map' and 'Misc' device regs)
2080  */
2081 static void read_mc_regs(struct amd64_pvt *pvt)
2082 {
2083 	unsigned range;
2084 	u64 msr_val;
2085 	u32 tmp;
2086 
2087 	/*
2088 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2089 	 * those are Read-As-Zero
2090 	 */
2091 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2092 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2093 
2094 	/* check first whether TOP_MEM2 is enabled */
2095 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2096 	if (msr_val & (1U << 21)) {
2097 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2098 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2099 	} else
2100 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2101 
2102 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2103 
2104 	read_dram_ctl_register(pvt);
2105 
2106 	for (range = 0; range < DRAM_RANGES; range++) {
2107 		u8 rw;
2108 
2109 		/* read settings for this DRAM range */
2110 		read_dram_base_limit_regs(pvt, range);
2111 
2112 		rw = dram_rw(pvt, range);
2113 		if (!rw)
2114 			continue;
2115 
2116 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2117 			 range,
2118 			 get_dram_base(pvt, range),
2119 			 get_dram_limit(pvt, range));
2120 
2121 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2122 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2123 			 (rw & 0x1) ? "R" : "-",
2124 			 (rw & 0x2) ? "W" : "-",
2125 			 dram_intlv_sel(pvt, range),
2126 			 dram_dst_node(pvt, range));
2127 	}
2128 
2129 	read_dct_base_mask(pvt);
2130 
2131 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2132 	amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2133 
2134 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2135 
2136 	amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2137 	amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2138 
2139 	if (!dct_ganging_enabled(pvt)) {
2140 		amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2141 		amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2142 	}
2143 
2144 	pvt->ecc_sym_sz = 4;
2145 
2146 	if (pvt->fam >= 0x10) {
2147 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2148 		if (pvt->fam != 0x16)
2149 			/* F16h has only DCT0 */
2150 			amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2151 
2152 		/* F10h, revD and later can do x8 ECC too */
2153 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2154 			pvt->ecc_sym_sz = 8;
2155 	}
2156 	dump_misc_regs(pvt);
2157 }
2158 
2159 /*
2160  * NOTE: CPU Revision Dependent code
2161  *
2162  * Input:
2163  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2164  *	k8 private pointer to -->
2165  *			DRAM Bank Address mapping register
2166  *			node_id
2167  *			DCL register where dual_channel_active is
2168  *
2169  * The DBAM register consists of 4 sets of 4 bits each definitions:
2170  *
2171  * Bits:	CSROWs
2172  * 0-3		CSROWs 0 and 1
2173  * 4-7		CSROWs 2 and 3
2174  * 8-11		CSROWs 4 and 5
2175  * 12-15	CSROWs 6 and 7
2176  *
2177  * Values range from: 0 to 15
2178  * The meaning of the values depends on CPU revision and dual-channel state,
2179  * see relevant BKDG more info.
2180  *
2181  * The memory controller provides for total of only 8 CSROWs in its current
2182  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2183  * single channel or two (2) DIMMs in dual channel mode.
2184  *
2185  * The following code logic collapses the various tables for CSROW based on CPU
2186  * revision.
2187  *
2188  * Returns:
2189  *	The number of PAGE_SIZE pages on the specified CSROW number it
2190  *	encompasses
2191  *
2192  */
2193 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2194 {
2195 	u32 cs_mode, nr_pages;
2196 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2197 
2198 
2199 	/*
2200 	 * The math on this doesn't look right on the surface because x/2*4 can
2201 	 * be simplified to x*2 but this expression makes use of the fact that
2202 	 * it is integral math where 1/2=0. This intermediate value becomes the
2203 	 * number of bits to shift the DBAM register to extract the proper CSROW
2204 	 * field.
2205 	 */
2206 	cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2207 
2208 	nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2209 
2210 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2211 		    csrow_nr, dct,  cs_mode);
2212 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2213 
2214 	return nr_pages;
2215 }
2216 
2217 /*
2218  * Initialize the array of csrow attribute instances, based on the values
2219  * from pci config hardware registers.
2220  */
2221 static int init_csrows(struct mem_ctl_info *mci)
2222 {
2223 	struct amd64_pvt *pvt = mci->pvt_info;
2224 	struct csrow_info *csrow;
2225 	struct dimm_info *dimm;
2226 	enum edac_type edac_mode;
2227 	enum mem_type mtype;
2228 	int i, j, empty = 1;
2229 	int nr_pages = 0;
2230 	u32 val;
2231 
2232 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2233 
2234 	pvt->nbcfg = val;
2235 
2236 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2237 		 pvt->mc_node_id, val,
2238 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2239 
2240 	/*
2241 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2242 	 */
2243 	for_each_chip_select(i, 0, pvt) {
2244 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2245 		bool row_dct1 = false;
2246 
2247 		if (pvt->fam != 0xf)
2248 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2249 
2250 		if (!row_dct0 && !row_dct1)
2251 			continue;
2252 
2253 		csrow = mci->csrows[i];
2254 		empty = 0;
2255 
2256 		edac_dbg(1, "MC node: %d, csrow: %d\n",
2257 			    pvt->mc_node_id, i);
2258 
2259 		if (row_dct0) {
2260 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
2261 			csrow->channels[0]->dimm->nr_pages = nr_pages;
2262 		}
2263 
2264 		/* K8 has only one DCT */
2265 		if (pvt->fam != 0xf && row_dct1) {
2266 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2267 
2268 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2269 			nr_pages += row_dct1_pages;
2270 		}
2271 
2272 		mtype = determine_memory_type(pvt, i);
2273 
2274 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2275 
2276 		/*
2277 		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2278 		 */
2279 		if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2280 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2281 				    EDAC_S4ECD4ED : EDAC_SECDED;
2282 		else
2283 			edac_mode = EDAC_NONE;
2284 
2285 		for (j = 0; j < pvt->channel_count; j++) {
2286 			dimm = csrow->channels[j]->dimm;
2287 			dimm->mtype = mtype;
2288 			dimm->edac_mode = edac_mode;
2289 		}
2290 	}
2291 
2292 	return empty;
2293 }
2294 
2295 /* get all cores on this DCT */
2296 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2297 {
2298 	int cpu;
2299 
2300 	for_each_online_cpu(cpu)
2301 		if (amd_get_nb_id(cpu) == nid)
2302 			cpumask_set_cpu(cpu, mask);
2303 }
2304 
2305 /* check MCG_CTL on all the cpus on this node */
2306 static bool nb_mce_bank_enabled_on_node(u16 nid)
2307 {
2308 	cpumask_var_t mask;
2309 	int cpu, nbe;
2310 	bool ret = false;
2311 
2312 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2313 		amd64_warn("%s: Error allocating mask\n", __func__);
2314 		return false;
2315 	}
2316 
2317 	get_cpus_on_this_dct_cpumask(mask, nid);
2318 
2319 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2320 
2321 	for_each_cpu(cpu, mask) {
2322 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2323 		nbe = reg->l & MSR_MCGCTL_NBE;
2324 
2325 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2326 			 cpu, reg->q,
2327 			 (nbe ? "enabled" : "disabled"));
2328 
2329 		if (!nbe)
2330 			goto out;
2331 	}
2332 	ret = true;
2333 
2334 out:
2335 	free_cpumask_var(mask);
2336 	return ret;
2337 }
2338 
2339 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2340 {
2341 	cpumask_var_t cmask;
2342 	int cpu;
2343 
2344 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2345 		amd64_warn("%s: error allocating mask\n", __func__);
2346 		return false;
2347 	}
2348 
2349 	get_cpus_on_this_dct_cpumask(cmask, nid);
2350 
2351 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2352 
2353 	for_each_cpu(cpu, cmask) {
2354 
2355 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2356 
2357 		if (on) {
2358 			if (reg->l & MSR_MCGCTL_NBE)
2359 				s->flags.nb_mce_enable = 1;
2360 
2361 			reg->l |= MSR_MCGCTL_NBE;
2362 		} else {
2363 			/*
2364 			 * Turn off NB MCE reporting only when it was off before
2365 			 */
2366 			if (!s->flags.nb_mce_enable)
2367 				reg->l &= ~MSR_MCGCTL_NBE;
2368 		}
2369 	}
2370 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2371 
2372 	free_cpumask_var(cmask);
2373 
2374 	return 0;
2375 }
2376 
2377 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2378 				       struct pci_dev *F3)
2379 {
2380 	bool ret = true;
2381 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2382 
2383 	if (toggle_ecc_err_reporting(s, nid, ON)) {
2384 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2385 		return false;
2386 	}
2387 
2388 	amd64_read_pci_cfg(F3, NBCTL, &value);
2389 
2390 	s->old_nbctl   = value & mask;
2391 	s->nbctl_valid = true;
2392 
2393 	value |= mask;
2394 	amd64_write_pci_cfg(F3, NBCTL, value);
2395 
2396 	amd64_read_pci_cfg(F3, NBCFG, &value);
2397 
2398 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2399 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2400 
2401 	if (!(value & NBCFG_ECC_ENABLE)) {
2402 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2403 
2404 		s->flags.nb_ecc_prev = 0;
2405 
2406 		/* Attempt to turn on DRAM ECC Enable */
2407 		value |= NBCFG_ECC_ENABLE;
2408 		amd64_write_pci_cfg(F3, NBCFG, value);
2409 
2410 		amd64_read_pci_cfg(F3, NBCFG, &value);
2411 
2412 		if (!(value & NBCFG_ECC_ENABLE)) {
2413 			amd64_warn("Hardware rejected DRAM ECC enable,"
2414 				   "check memory DIMM configuration.\n");
2415 			ret = false;
2416 		} else {
2417 			amd64_info("Hardware accepted DRAM ECC Enable\n");
2418 		}
2419 	} else {
2420 		s->flags.nb_ecc_prev = 1;
2421 	}
2422 
2423 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2424 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2425 
2426 	return ret;
2427 }
2428 
2429 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2430 					struct pci_dev *F3)
2431 {
2432 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2433 
2434 
2435 	if (!s->nbctl_valid)
2436 		return;
2437 
2438 	amd64_read_pci_cfg(F3, NBCTL, &value);
2439 	value &= ~mask;
2440 	value |= s->old_nbctl;
2441 
2442 	amd64_write_pci_cfg(F3, NBCTL, value);
2443 
2444 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2445 	if (!s->flags.nb_ecc_prev) {
2446 		amd64_read_pci_cfg(F3, NBCFG, &value);
2447 		value &= ~NBCFG_ECC_ENABLE;
2448 		amd64_write_pci_cfg(F3, NBCFG, value);
2449 	}
2450 
2451 	/* restore the NB Enable MCGCTL bit */
2452 	if (toggle_ecc_err_reporting(s, nid, OFF))
2453 		amd64_warn("Error restoring NB MCGCTL settings!\n");
2454 }
2455 
2456 /*
2457  * EDAC requires that the BIOS have ECC enabled before
2458  * taking over the processing of ECC errors. A command line
2459  * option allows to force-enable hardware ECC later in
2460  * enable_ecc_error_reporting().
2461  */
2462 static const char *ecc_msg =
2463 	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2464 	" Either enable ECC checking or force module loading by setting "
2465 	"'ecc_enable_override'.\n"
2466 	" (Note that use of the override may cause unknown side effects.)\n";
2467 
2468 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2469 {
2470 	u32 value;
2471 	u8 ecc_en = 0;
2472 	bool nb_mce_en = false;
2473 
2474 	amd64_read_pci_cfg(F3, NBCFG, &value);
2475 
2476 	ecc_en = !!(value & NBCFG_ECC_ENABLE);
2477 	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2478 
2479 	nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2480 	if (!nb_mce_en)
2481 		amd64_notice("NB MCE bank disabled, set MSR "
2482 			     "0x%08x[4] on node %d to enable.\n",
2483 			     MSR_IA32_MCG_CTL, nid);
2484 
2485 	if (!ecc_en || !nb_mce_en) {
2486 		amd64_notice("%s", ecc_msg);
2487 		return false;
2488 	}
2489 	return true;
2490 }
2491 
2492 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2493 {
2494 	struct amd64_pvt *pvt = mci->pvt_info;
2495 	int rc;
2496 
2497 	rc = amd64_create_sysfs_dbg_files(mci);
2498 	if (rc < 0)
2499 		return rc;
2500 
2501 	if (pvt->fam >= 0x10) {
2502 		rc = amd64_create_sysfs_inject_files(mci);
2503 		if (rc < 0)
2504 			return rc;
2505 	}
2506 
2507 	return 0;
2508 }
2509 
2510 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2511 {
2512 	struct amd64_pvt *pvt = mci->pvt_info;
2513 
2514 	amd64_remove_sysfs_dbg_files(mci);
2515 
2516 	if (pvt->fam >= 0x10)
2517 		amd64_remove_sysfs_inject_files(mci);
2518 }
2519 
2520 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2521 				 struct amd64_family_type *fam)
2522 {
2523 	struct amd64_pvt *pvt = mci->pvt_info;
2524 
2525 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2526 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
2527 
2528 	if (pvt->nbcap & NBCAP_SECDED)
2529 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2530 
2531 	if (pvt->nbcap & NBCAP_CHIPKILL)
2532 		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2533 
2534 	mci->edac_cap		= determine_edac_cap(pvt);
2535 	mci->mod_name		= EDAC_MOD_STR;
2536 	mci->mod_ver		= EDAC_AMD64_VERSION;
2537 	mci->ctl_name		= fam->ctl_name;
2538 	mci->dev_name		= pci_name(pvt->F2);
2539 	mci->ctl_page_to_phys	= NULL;
2540 
2541 	/* memory scrubber interface */
2542 	mci->set_sdram_scrub_rate = set_scrub_rate;
2543 	mci->get_sdram_scrub_rate = get_scrub_rate;
2544 }
2545 
2546 /*
2547  * returns a pointer to the family descriptor on success, NULL otherwise.
2548  */
2549 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2550 {
2551 	struct amd64_family_type *fam_type = NULL;
2552 
2553 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
2554 	pvt->stepping	= boot_cpu_data.x86_mask;
2555 	pvt->model	= boot_cpu_data.x86_model;
2556 	pvt->fam	= boot_cpu_data.x86;
2557 
2558 	switch (pvt->fam) {
2559 	case 0xf:
2560 		fam_type	= &family_types[K8_CPUS];
2561 		pvt->ops	= &family_types[K8_CPUS].ops;
2562 		break;
2563 
2564 	case 0x10:
2565 		fam_type	= &family_types[F10_CPUS];
2566 		pvt->ops	= &family_types[F10_CPUS].ops;
2567 		break;
2568 
2569 	case 0x15:
2570 		if (pvt->model == 0x30) {
2571 			fam_type = &family_types[F15_M30H_CPUS];
2572 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
2573 			break;
2574 		}
2575 
2576 		fam_type	= &family_types[F15_CPUS];
2577 		pvt->ops	= &family_types[F15_CPUS].ops;
2578 		break;
2579 
2580 	case 0x16:
2581 		fam_type	= &family_types[F16_CPUS];
2582 		pvt->ops	= &family_types[F16_CPUS].ops;
2583 		break;
2584 
2585 	default:
2586 		amd64_err("Unsupported family!\n");
2587 		return NULL;
2588 	}
2589 
2590 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2591 		     (pvt->fam == 0xf ?
2592 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
2593 							     : "revE or earlier ")
2594 				 : ""), pvt->mc_node_id);
2595 	return fam_type;
2596 }
2597 
2598 static int init_one_instance(struct pci_dev *F2)
2599 {
2600 	struct amd64_pvt *pvt = NULL;
2601 	struct amd64_family_type *fam_type = NULL;
2602 	struct mem_ctl_info *mci = NULL;
2603 	struct edac_mc_layer layers[2];
2604 	int err = 0, ret;
2605 	u16 nid = amd_get_node_id(F2);
2606 
2607 	ret = -ENOMEM;
2608 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2609 	if (!pvt)
2610 		goto err_ret;
2611 
2612 	pvt->mc_node_id	= nid;
2613 	pvt->F2 = F2;
2614 
2615 	ret = -EINVAL;
2616 	fam_type = per_family_init(pvt);
2617 	if (!fam_type)
2618 		goto err_free;
2619 
2620 	ret = -ENODEV;
2621 	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2622 	if (err)
2623 		goto err_free;
2624 
2625 	read_mc_regs(pvt);
2626 
2627 	/*
2628 	 * We need to determine how many memory channels there are. Then use
2629 	 * that information for calculating the size of the dynamic instance
2630 	 * tables in the 'mci' structure.
2631 	 */
2632 	ret = -EINVAL;
2633 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
2634 	if (pvt->channel_count < 0)
2635 		goto err_siblings;
2636 
2637 	ret = -ENOMEM;
2638 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2639 	layers[0].size = pvt->csels[0].b_cnt;
2640 	layers[0].is_virt_csrow = true;
2641 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
2642 
2643 	/*
2644 	 * Always allocate two channels since we can have setups with DIMMs on
2645 	 * only one channel. Also, this simplifies handling later for the price
2646 	 * of a couple of KBs tops.
2647 	 */
2648 	layers[1].size = 2;
2649 	layers[1].is_virt_csrow = false;
2650 
2651 	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2652 	if (!mci)
2653 		goto err_siblings;
2654 
2655 	mci->pvt_info = pvt;
2656 	mci->pdev = &pvt->F2->dev;
2657 
2658 	setup_mci_misc_attrs(mci, fam_type);
2659 
2660 	if (init_csrows(mci))
2661 		mci->edac_cap = EDAC_FLAG_NONE;
2662 
2663 	ret = -ENODEV;
2664 	if (edac_mc_add_mc(mci)) {
2665 		edac_dbg(1, "failed edac_mc_add_mc()\n");
2666 		goto err_add_mc;
2667 	}
2668 	if (set_mc_sysfs_attrs(mci)) {
2669 		edac_dbg(1, "failed edac_mc_add_mc()\n");
2670 		goto err_add_sysfs;
2671 	}
2672 
2673 	/* register stuff with EDAC MCE */
2674 	if (report_gart_errors)
2675 		amd_report_gart_errors(true);
2676 
2677 	amd_register_ecc_decoder(decode_bus_error);
2678 
2679 	mcis[nid] = mci;
2680 
2681 	atomic_inc(&drv_instances);
2682 
2683 	return 0;
2684 
2685 err_add_sysfs:
2686 	edac_mc_del_mc(mci->pdev);
2687 err_add_mc:
2688 	edac_mc_free(mci);
2689 
2690 err_siblings:
2691 	free_mc_sibling_devs(pvt);
2692 
2693 err_free:
2694 	kfree(pvt);
2695 
2696 err_ret:
2697 	return ret;
2698 }
2699 
2700 static int probe_one_instance(struct pci_dev *pdev,
2701 			      const struct pci_device_id *mc_type)
2702 {
2703 	u16 nid = amd_get_node_id(pdev);
2704 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2705 	struct ecc_settings *s;
2706 	int ret = 0;
2707 
2708 	ret = pci_enable_device(pdev);
2709 	if (ret < 0) {
2710 		edac_dbg(0, "ret=%d\n", ret);
2711 		return -EIO;
2712 	}
2713 
2714 	ret = -ENOMEM;
2715 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2716 	if (!s)
2717 		goto err_out;
2718 
2719 	ecc_stngs[nid] = s;
2720 
2721 	if (!ecc_enabled(F3, nid)) {
2722 		ret = -ENODEV;
2723 
2724 		if (!ecc_enable_override)
2725 			goto err_enable;
2726 
2727 		amd64_warn("Forcing ECC on!\n");
2728 
2729 		if (!enable_ecc_error_reporting(s, nid, F3))
2730 			goto err_enable;
2731 	}
2732 
2733 	ret = init_one_instance(pdev);
2734 	if (ret < 0) {
2735 		amd64_err("Error probing instance: %d\n", nid);
2736 		restore_ecc_error_reporting(s, nid, F3);
2737 	}
2738 
2739 	return ret;
2740 
2741 err_enable:
2742 	kfree(s);
2743 	ecc_stngs[nid] = NULL;
2744 
2745 err_out:
2746 	return ret;
2747 }
2748 
2749 static void remove_one_instance(struct pci_dev *pdev)
2750 {
2751 	struct mem_ctl_info *mci;
2752 	struct amd64_pvt *pvt;
2753 	u16 nid = amd_get_node_id(pdev);
2754 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2755 	struct ecc_settings *s = ecc_stngs[nid];
2756 
2757 	mci = find_mci_by_dev(&pdev->dev);
2758 	WARN_ON(!mci);
2759 
2760 	del_mc_sysfs_attrs(mci);
2761 	/* Remove from EDAC CORE tracking list */
2762 	mci = edac_mc_del_mc(&pdev->dev);
2763 	if (!mci)
2764 		return;
2765 
2766 	pvt = mci->pvt_info;
2767 
2768 	restore_ecc_error_reporting(s, nid, F3);
2769 
2770 	free_mc_sibling_devs(pvt);
2771 
2772 	/* unregister from EDAC MCE */
2773 	amd_report_gart_errors(false);
2774 	amd_unregister_ecc_decoder(decode_bus_error);
2775 
2776 	kfree(ecc_stngs[nid]);
2777 	ecc_stngs[nid] = NULL;
2778 
2779 	/* Free the EDAC CORE resources */
2780 	mci->pvt_info = NULL;
2781 	mcis[nid] = NULL;
2782 
2783 	kfree(pvt);
2784 	edac_mc_free(mci);
2785 }
2786 
2787 /*
2788  * This table is part of the interface for loading drivers for PCI devices. The
2789  * PCI core identifies what devices are on a system during boot, and then
2790  * inquiry this table to see if this driver is for a given device found.
2791  */
2792 static const struct pci_device_id amd64_pci_table[] = {
2793 	{
2794 		.vendor		= PCI_VENDOR_ID_AMD,
2795 		.device		= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2796 		.subvendor	= PCI_ANY_ID,
2797 		.subdevice	= PCI_ANY_ID,
2798 		.class		= 0,
2799 		.class_mask	= 0,
2800 	},
2801 	{
2802 		.vendor		= PCI_VENDOR_ID_AMD,
2803 		.device		= PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2804 		.subvendor	= PCI_ANY_ID,
2805 		.subdevice	= PCI_ANY_ID,
2806 		.class		= 0,
2807 		.class_mask	= 0,
2808 	},
2809 	{
2810 		.vendor		= PCI_VENDOR_ID_AMD,
2811 		.device		= PCI_DEVICE_ID_AMD_15H_NB_F2,
2812 		.subvendor	= PCI_ANY_ID,
2813 		.subdevice	= PCI_ANY_ID,
2814 		.class		= 0,
2815 		.class_mask	= 0,
2816 	},
2817 	{
2818 		.vendor		= PCI_VENDOR_ID_AMD,
2819 		.device		= PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2820 		.subvendor	= PCI_ANY_ID,
2821 		.subdevice	= PCI_ANY_ID,
2822 		.class		= 0,
2823 		.class_mask	= 0,
2824 	},
2825 	{
2826 		.vendor		= PCI_VENDOR_ID_AMD,
2827 		.device		= PCI_DEVICE_ID_AMD_16H_NB_F2,
2828 		.subvendor	= PCI_ANY_ID,
2829 		.subdevice	= PCI_ANY_ID,
2830 		.class		= 0,
2831 		.class_mask	= 0,
2832 	},
2833 
2834 	{0, }
2835 };
2836 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2837 
2838 static struct pci_driver amd64_pci_driver = {
2839 	.name		= EDAC_MOD_STR,
2840 	.probe		= probe_one_instance,
2841 	.remove		= remove_one_instance,
2842 	.id_table	= amd64_pci_table,
2843 };
2844 
2845 static void setup_pci_device(void)
2846 {
2847 	struct mem_ctl_info *mci;
2848 	struct amd64_pvt *pvt;
2849 
2850 	if (pci_ctl)
2851 		return;
2852 
2853 	mci = mcis[0];
2854 	if (!mci)
2855 		return;
2856 
2857 	pvt = mci->pvt_info;
2858 	pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2859 	if (!pci_ctl) {
2860 		pr_warn("%s(): Unable to create PCI control\n", __func__);
2861 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2862 	}
2863 }
2864 
2865 static int __init amd64_edac_init(void)
2866 {
2867 	int err = -ENODEV;
2868 
2869 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2870 
2871 	opstate_init();
2872 
2873 	if (amd_cache_northbridges() < 0)
2874 		goto err_ret;
2875 
2876 	err = -ENOMEM;
2877 	mcis	  = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2878 	ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2879 	if (!(mcis && ecc_stngs))
2880 		goto err_free;
2881 
2882 	msrs = msrs_alloc();
2883 	if (!msrs)
2884 		goto err_free;
2885 
2886 	err = pci_register_driver(&amd64_pci_driver);
2887 	if (err)
2888 		goto err_pci;
2889 
2890 	err = -ENODEV;
2891 	if (!atomic_read(&drv_instances))
2892 		goto err_no_instances;
2893 
2894 	setup_pci_device();
2895 	return 0;
2896 
2897 err_no_instances:
2898 	pci_unregister_driver(&amd64_pci_driver);
2899 
2900 err_pci:
2901 	msrs_free(msrs);
2902 	msrs = NULL;
2903 
2904 err_free:
2905 	kfree(mcis);
2906 	mcis = NULL;
2907 
2908 	kfree(ecc_stngs);
2909 	ecc_stngs = NULL;
2910 
2911 err_ret:
2912 	return err;
2913 }
2914 
2915 static void __exit amd64_edac_exit(void)
2916 {
2917 	if (pci_ctl)
2918 		edac_pci_release_generic_ctl(pci_ctl);
2919 
2920 	pci_unregister_driver(&amd64_pci_driver);
2921 
2922 	kfree(ecc_stngs);
2923 	ecc_stngs = NULL;
2924 
2925 	kfree(mcis);
2926 	mcis = NULL;
2927 
2928 	msrs_free(msrs);
2929 	msrs = NULL;
2930 }
2931 
2932 module_init(amd64_edac_init);
2933 module_exit(amd64_edac_exit);
2934 
2935 MODULE_LICENSE("GPL");
2936 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2937 		"Dave Peterson, Thayne Harbaugh");
2938 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2939 		EDAC_AMD64_VERSION);
2940 
2941 module_param(edac_op_state, int, 0444);
2942 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2943