xref: /openbmc/linux/drivers/edac/amd64_edac.c (revision 089a49b6)
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3 
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
5 
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8 
9 /*
10  * Set by command line parameter. If BIOS has enabled the ECC, this override is
11  * cleared to prevent re-enabling the hardware by this driver.
12  */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15 
16 static struct msr __percpu *msrs;
17 
18 /*
19  * count successfully initialized driver instances for setup_pci_device()
20  */
21 static atomic_t drv_instances = ATOMIC_INIT(0);
22 
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
26 
27 /*
28  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30  * or higher value'.
31  *
32  *FIXME: Produce a better mapping/linearisation.
33  */
34 static const struct scrubrate {
35        u32 scrubval;           /* bit pattern for scrub rate */
36        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
37 } scrubrates[] = {
38 	{ 0x01, 1600000000UL},
39 	{ 0x02, 800000000UL},
40 	{ 0x03, 400000000UL},
41 	{ 0x04, 200000000UL},
42 	{ 0x05, 100000000UL},
43 	{ 0x06, 50000000UL},
44 	{ 0x07, 25000000UL},
45 	{ 0x08, 12284069UL},
46 	{ 0x09, 6274509UL},
47 	{ 0x0A, 3121951UL},
48 	{ 0x0B, 1560975UL},
49 	{ 0x0C, 781440UL},
50 	{ 0x0D, 390720UL},
51 	{ 0x0E, 195300UL},
52 	{ 0x0F, 97650UL},
53 	{ 0x10, 48854UL},
54 	{ 0x11, 24427UL},
55 	{ 0x12, 12213UL},
56 	{ 0x13, 6101UL},
57 	{ 0x14, 3051UL},
58 	{ 0x15, 1523UL},
59 	{ 0x16, 761UL},
60 	{ 0x00, 0UL},        /* scrubbing off */
61 };
62 
63 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 			       u32 *val, const char *func)
65 {
66 	int err = 0;
67 
68 	err = pci_read_config_dword(pdev, offset, val);
69 	if (err)
70 		amd64_warn("%s: error reading F%dx%03x.\n",
71 			   func, PCI_FUNC(pdev->devfn), offset);
72 
73 	return err;
74 }
75 
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 				u32 val, const char *func)
78 {
79 	int err = 0;
80 
81 	err = pci_write_config_dword(pdev, offset, val);
82 	if (err)
83 		amd64_warn("%s: error writing to F%dx%03x.\n",
84 			   func, PCI_FUNC(pdev->devfn), offset);
85 
86 	return err;
87 }
88 
89 /*
90  *
91  * Depending on the family, F2 DCT reads need special handling:
92  *
93  * K8: has a single DCT only
94  *
95  * F10h: each DCT has its own set of regs
96  *	DCT0 -> F2x040..
97  *	DCT1 -> F2x140..
98  *
99  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100  *
101  * F16h: has only 1 DCT
102  */
103 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
104 			       const char *func)
105 {
106 	if (addr >= 0x100)
107 		return -EINVAL;
108 
109 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
110 }
111 
112 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
113 				 const char *func)
114 {
115 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
116 }
117 
118 /*
119  * Select DCT to which PCI cfg accesses are routed
120  */
121 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
122 {
123 	u32 reg = 0;
124 
125 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
126 	reg &= (pvt->model >= 0x30) ? ~3 : ~1;
127 	reg |= dct;
128 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
129 }
130 
131 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
132 				 const char *func)
133 {
134 	u8 dct  = 0;
135 
136 	/* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
137 	if (addr >= 0x140 && addr <= 0x1a0) {
138 		dct   = (pvt->model >= 0x30) ? 3 : 1;
139 		addr -= 0x100;
140 	}
141 
142 	f15h_select_dct(pvt, dct);
143 
144 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
145 }
146 
147 /*
148  * Memory scrubber control interface. For K8, memory scrubbing is handled by
149  * hardware and can involve L2 cache, dcache as well as the main memory. With
150  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151  * functionality.
152  *
153  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
154  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
155  * bytes/sec for the setting.
156  *
157  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
158  * other archs, we might not have access to the caches directly.
159  */
160 
161 /*
162  * scan the scrub rate mapping table for a close or matching bandwidth value to
163  * issue. If requested is too big, then use last maximum value found.
164  */
165 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
166 {
167 	u32 scrubval;
168 	int i;
169 
170 	/*
171 	 * map the configured rate (new_bw) to a value specific to the AMD64
172 	 * memory controller and apply to register. Search for the first
173 	 * bandwidth entry that is greater or equal than the setting requested
174 	 * and program that. If at last entry, turn off DRAM scrubbing.
175 	 *
176 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
177 	 * by falling back to the last element in scrubrates[].
178 	 */
179 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
180 		/*
181 		 * skip scrub rates which aren't recommended
182 		 * (see F10 BKDG, F3x58)
183 		 */
184 		if (scrubrates[i].scrubval < min_rate)
185 			continue;
186 
187 		if (scrubrates[i].bandwidth <= new_bw)
188 			break;
189 	}
190 
191 	scrubval = scrubrates[i].scrubval;
192 
193 	pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
194 
195 	if (scrubval)
196 		return scrubrates[i].bandwidth;
197 
198 	return 0;
199 }
200 
201 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
202 {
203 	struct amd64_pvt *pvt = mci->pvt_info;
204 	u32 min_scrubrate = 0x5;
205 
206 	if (pvt->fam == 0xf)
207 		min_scrubrate = 0x0;
208 
209 	/* Erratum #505 */
210 	if (pvt->fam == 0x15 && pvt->model < 0x10)
211 		f15h_select_dct(pvt, 0);
212 
213 	return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
214 }
215 
216 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
217 {
218 	struct amd64_pvt *pvt = mci->pvt_info;
219 	u32 scrubval = 0;
220 	int i, retval = -EINVAL;
221 
222 	/* Erratum #505 */
223 	if (pvt->fam == 0x15 && pvt->model < 0x10)
224 		f15h_select_dct(pvt, 0);
225 
226 	amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
227 
228 	scrubval = scrubval & 0x001F;
229 
230 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
231 		if (scrubrates[i].scrubval == scrubval) {
232 			retval = scrubrates[i].bandwidth;
233 			break;
234 		}
235 	}
236 	return retval;
237 }
238 
239 /*
240  * returns true if the SysAddr given by sys_addr matches the
241  * DRAM base/limit associated with node_id
242  */
243 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
244 				   u8 nid)
245 {
246 	u64 addr;
247 
248 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
249 	 * all ones if the most significant implemented address bit is 1.
250 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
251 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
252 	 * Application Programming.
253 	 */
254 	addr = sys_addr & 0x000000ffffffffffull;
255 
256 	return ((addr >= get_dram_base(pvt, nid)) &&
257 		(addr <= get_dram_limit(pvt, nid)));
258 }
259 
260 /*
261  * Attempt to map a SysAddr to a node. On success, return a pointer to the
262  * mem_ctl_info structure for the node that the SysAddr maps to.
263  *
264  * On failure, return NULL.
265  */
266 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
267 						u64 sys_addr)
268 {
269 	struct amd64_pvt *pvt;
270 	u8 node_id;
271 	u32 intlv_en, bits;
272 
273 	/*
274 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
275 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
276 	 */
277 	pvt = mci->pvt_info;
278 
279 	/*
280 	 * The value of this field should be the same for all DRAM Base
281 	 * registers.  Therefore we arbitrarily choose to read it from the
282 	 * register for node 0.
283 	 */
284 	intlv_en = dram_intlv_en(pvt, 0);
285 
286 	if (intlv_en == 0) {
287 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
288 			if (amd64_base_limit_match(pvt, sys_addr, node_id))
289 				goto found;
290 		}
291 		goto err_no_match;
292 	}
293 
294 	if (unlikely((intlv_en != 0x01) &&
295 		     (intlv_en != 0x03) &&
296 		     (intlv_en != 0x07))) {
297 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
298 		return NULL;
299 	}
300 
301 	bits = (((u32) sys_addr) >> 12) & intlv_en;
302 
303 	for (node_id = 0; ; ) {
304 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
305 			break;	/* intlv_sel field matches */
306 
307 		if (++node_id >= DRAM_RANGES)
308 			goto err_no_match;
309 	}
310 
311 	/* sanity test for sys_addr */
312 	if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
314 			   "range for node %d with node interleaving enabled.\n",
315 			   __func__, sys_addr, node_id);
316 		return NULL;
317 	}
318 
319 found:
320 	return edac_mc_find((int)node_id);
321 
322 err_no_match:
323 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
324 		 (unsigned long)sys_addr);
325 
326 	return NULL;
327 }
328 
329 /*
330  * compute the CS base address of the @csrow on the DRAM controller @dct.
331  * For details see F2x[5C:40] in the processor's BKDG
332  */
333 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
334 				 u64 *base, u64 *mask)
335 {
336 	u64 csbase, csmask, base_bits, mask_bits;
337 	u8 addr_shift;
338 
339 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
340 		csbase		= pvt->csels[dct].csbases[csrow];
341 		csmask		= pvt->csels[dct].csmasks[csrow];
342 		base_bits	= GENMASK(21, 31) | GENMASK(9, 15);
343 		mask_bits	= GENMASK(21, 29) | GENMASK(9, 15);
344 		addr_shift	= 4;
345 
346 	/*
347 	 * F16h and F15h, models 30h and later need two addr_shift values:
348 	 * 8 for high and 6 for low (cf. F16h BKDG).
349 	 */
350 	} else if (pvt->fam == 0x16 ||
351 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
352 		csbase          = pvt->csels[dct].csbases[csrow];
353 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
354 
355 		*base  = (csbase & GENMASK(5,  15)) << 6;
356 		*base |= (csbase & GENMASK(19, 30)) << 8;
357 
358 		*mask = ~0ULL;
359 		/* poke holes for the csmask */
360 		*mask &= ~((GENMASK(5, 15)  << 6) |
361 			   (GENMASK(19, 30) << 8));
362 
363 		*mask |= (csmask & GENMASK(5, 15))  << 6;
364 		*mask |= (csmask & GENMASK(19, 30)) << 8;
365 
366 		return;
367 	} else {
368 		csbase		= pvt->csels[dct].csbases[csrow];
369 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
370 		addr_shift	= 8;
371 
372 		if (pvt->fam == 0x15)
373 			base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
374 		else
375 			base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
376 	}
377 
378 	*base  = (csbase & base_bits) << addr_shift;
379 
380 	*mask  = ~0ULL;
381 	/* poke holes for the csmask */
382 	*mask &= ~(mask_bits << addr_shift);
383 	/* OR them in */
384 	*mask |= (csmask & mask_bits) << addr_shift;
385 }
386 
387 #define for_each_chip_select(i, dct, pvt) \
388 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
389 
390 #define chip_select_base(i, dct, pvt) \
391 	pvt->csels[dct].csbases[i]
392 
393 #define for_each_chip_select_mask(i, dct, pvt) \
394 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
395 
396 /*
397  * @input_addr is an InputAddr associated with the node given by mci. Return the
398  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
399  */
400 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
401 {
402 	struct amd64_pvt *pvt;
403 	int csrow;
404 	u64 base, mask;
405 
406 	pvt = mci->pvt_info;
407 
408 	for_each_chip_select(csrow, 0, pvt) {
409 		if (!csrow_enabled(csrow, 0, pvt))
410 			continue;
411 
412 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
413 
414 		mask = ~mask;
415 
416 		if ((input_addr & mask) == (base & mask)) {
417 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
418 				 (unsigned long)input_addr, csrow,
419 				 pvt->mc_node_id);
420 
421 			return csrow;
422 		}
423 	}
424 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
425 		 (unsigned long)input_addr, pvt->mc_node_id);
426 
427 	return -1;
428 }
429 
430 /*
431  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
432  * for the node represented by mci. Info is passed back in *hole_base,
433  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
434  * info is invalid. Info may be invalid for either of the following reasons:
435  *
436  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
437  *   Address Register does not exist.
438  *
439  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
440  *   indicating that its contents are not valid.
441  *
442  * The values passed back in *hole_base, *hole_offset, and *hole_size are
443  * complete 32-bit values despite the fact that the bitfields in the DHAR
444  * only represent bits 31-24 of the base and offset values.
445  */
446 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
447 			     u64 *hole_offset, u64 *hole_size)
448 {
449 	struct amd64_pvt *pvt = mci->pvt_info;
450 
451 	/* only revE and later have the DRAM Hole Address Register */
452 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
453 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
454 			 pvt->ext_model, pvt->mc_node_id);
455 		return 1;
456 	}
457 
458 	/* valid for Fam10h and above */
459 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
460 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
461 		return 1;
462 	}
463 
464 	if (!dhar_valid(pvt)) {
465 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
466 			 pvt->mc_node_id);
467 		return 1;
468 	}
469 
470 	/* This node has Memory Hoisting */
471 
472 	/* +------------------+--------------------+--------------------+-----
473 	 * | memory           | DRAM hole          | relocated          |
474 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
475 	 * |                  |                    | DRAM hole          |
476 	 * |                  |                    | [0x100000000,      |
477 	 * |                  |                    |  (0x100000000+     |
478 	 * |                  |                    |   (0xffffffff-x))] |
479 	 * +------------------+--------------------+--------------------+-----
480 	 *
481 	 * Above is a diagram of physical memory showing the DRAM hole and the
482 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
483 	 * starts at address x (the base address) and extends through address
484 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
485 	 * addresses in the hole so that they start at 0x100000000.
486 	 */
487 
488 	*hole_base = dhar_base(pvt);
489 	*hole_size = (1ULL << 32) - *hole_base;
490 
491 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
492 					: k8_dhar_offset(pvt);
493 
494 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
495 		 pvt->mc_node_id, (unsigned long)*hole_base,
496 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
497 
498 	return 0;
499 }
500 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
501 
502 /*
503  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
504  * assumed that sys_addr maps to the node given by mci.
505  *
506  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
507  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
508  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
509  * then it is also involved in translating a SysAddr to a DramAddr. Sections
510  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
511  * These parts of the documentation are unclear. I interpret them as follows:
512  *
513  * When node n receives a SysAddr, it processes the SysAddr as follows:
514  *
515  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
516  *    Limit registers for node n. If the SysAddr is not within the range
517  *    specified by the base and limit values, then node n ignores the Sysaddr
518  *    (since it does not map to node n). Otherwise continue to step 2 below.
519  *
520  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
521  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
522  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
523  *    hole. If not, skip to step 3 below. Else get the value of the
524  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
525  *    offset defined by this value from the SysAddr.
526  *
527  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
528  *    Base register for node n. To obtain the DramAddr, subtract the base
529  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
530  */
531 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
532 {
533 	struct amd64_pvt *pvt = mci->pvt_info;
534 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
535 	int ret;
536 
537 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
538 
539 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
540 				      &hole_size);
541 	if (!ret) {
542 		if ((sys_addr >= (1ULL << 32)) &&
543 		    (sys_addr < ((1ULL << 32) + hole_size))) {
544 			/* use DHAR to translate SysAddr to DramAddr */
545 			dram_addr = sys_addr - hole_offset;
546 
547 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
548 				 (unsigned long)sys_addr,
549 				 (unsigned long)dram_addr);
550 
551 			return dram_addr;
552 		}
553 	}
554 
555 	/*
556 	 * Translate the SysAddr to a DramAddr as shown near the start of
557 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
558 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
559 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
560 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
561 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
562 	 * Programmer's Manual Volume 1 Application Programming.
563 	 */
564 	dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
565 
566 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
567 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
568 	return dram_addr;
569 }
570 
571 /*
572  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
573  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
574  * for node interleaving.
575  */
576 static int num_node_interleave_bits(unsigned intlv_en)
577 {
578 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
579 	int n;
580 
581 	BUG_ON(intlv_en > 7);
582 	n = intlv_shift_table[intlv_en];
583 	return n;
584 }
585 
586 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
587 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
588 {
589 	struct amd64_pvt *pvt;
590 	int intlv_shift;
591 	u64 input_addr;
592 
593 	pvt = mci->pvt_info;
594 
595 	/*
596 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
597 	 * concerning translating a DramAddr to an InputAddr.
598 	 */
599 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
600 	input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
601 		      (dram_addr & 0xfff);
602 
603 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
604 		 intlv_shift, (unsigned long)dram_addr,
605 		 (unsigned long)input_addr);
606 
607 	return input_addr;
608 }
609 
610 /*
611  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
612  * assumed that @sys_addr maps to the node given by mci.
613  */
614 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
615 {
616 	u64 input_addr;
617 
618 	input_addr =
619 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
620 
621 	edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
622 		 (unsigned long)sys_addr, (unsigned long)input_addr);
623 
624 	return input_addr;
625 }
626 
627 /* Map the Error address to a PAGE and PAGE OFFSET. */
628 static inline void error_address_to_page_and_offset(u64 error_address,
629 						    struct err_info *err)
630 {
631 	err->page = (u32) (error_address >> PAGE_SHIFT);
632 	err->offset = ((u32) error_address) & ~PAGE_MASK;
633 }
634 
635 /*
636  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
637  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
638  * of a node that detected an ECC memory error.  mci represents the node that
639  * the error address maps to (possibly different from the node that detected
640  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
641  * error.
642  */
643 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
644 {
645 	int csrow;
646 
647 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
648 
649 	if (csrow == -1)
650 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
651 				  "address 0x%lx\n", (unsigned long)sys_addr);
652 	return csrow;
653 }
654 
655 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
656 
657 /*
658  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
659  * are ECC capable.
660  */
661 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
662 {
663 	u8 bit;
664 	unsigned long edac_cap = EDAC_FLAG_NONE;
665 
666 	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
667 		? 19
668 		: 17;
669 
670 	if (pvt->dclr0 & BIT(bit))
671 		edac_cap = EDAC_FLAG_SECDED;
672 
673 	return edac_cap;
674 }
675 
676 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
677 
678 static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
679 {
680 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
681 
682 	edac_dbg(1, "  DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
683 		 (dclr & BIT(16)) ?  "un" : "",
684 		 (dclr & BIT(19)) ? "yes" : "no");
685 
686 	edac_dbg(1, "  PAR/ERR parity: %s\n",
687 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
688 
689 	if (pvt->fam == 0x10)
690 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
691 			 (dclr & BIT(11)) ?  "128b" : "64b");
692 
693 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
694 		 (dclr & BIT(12)) ?  "yes" : "no",
695 		 (dclr & BIT(13)) ?  "yes" : "no",
696 		 (dclr & BIT(14)) ?  "yes" : "no",
697 		 (dclr & BIT(15)) ?  "yes" : "no");
698 }
699 
700 /* Display and decode various NB registers for debug purposes. */
701 static void dump_misc_regs(struct amd64_pvt *pvt)
702 {
703 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
704 
705 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
706 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
707 
708 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
709 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
710 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
711 
712 	amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0);
713 
714 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
715 
716 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
717 		 pvt->dhar, dhar_base(pvt),
718 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
719 				   : f10_dhar_offset(pvt));
720 
721 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
722 
723 	amd64_debug_display_dimm_sizes(pvt, 0);
724 
725 	/* everything below this point is Fam10h and above */
726 	if (pvt->fam == 0xf)
727 		return;
728 
729 	amd64_debug_display_dimm_sizes(pvt, 1);
730 
731 	amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
732 
733 	/* Only if NOT ganged does dclr1 have valid info */
734 	if (!dct_ganging_enabled(pvt))
735 		amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1);
736 }
737 
738 /*
739  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
740  */
741 static void prep_chip_selects(struct amd64_pvt *pvt)
742 {
743 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
744 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
745 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
746 	} else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
747 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
748 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
749 	} else {
750 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
751 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
752 	}
753 }
754 
755 /*
756  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
757  */
758 static void read_dct_base_mask(struct amd64_pvt *pvt)
759 {
760 	int cs;
761 
762 	prep_chip_selects(pvt);
763 
764 	for_each_chip_select(cs, 0, pvt) {
765 		int reg0   = DCSB0 + (cs * 4);
766 		int reg1   = DCSB1 + (cs * 4);
767 		u32 *base0 = &pvt->csels[0].csbases[cs];
768 		u32 *base1 = &pvt->csels[1].csbases[cs];
769 
770 		if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
771 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
772 				 cs, *base0, reg0);
773 
774 		if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
775 			continue;
776 
777 		if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
778 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
779 				 cs, *base1, reg1);
780 	}
781 
782 	for_each_chip_select_mask(cs, 0, pvt) {
783 		int reg0   = DCSM0 + (cs * 4);
784 		int reg1   = DCSM1 + (cs * 4);
785 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
786 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
787 
788 		if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
789 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
790 				 cs, *mask0, reg0);
791 
792 		if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
793 			continue;
794 
795 		if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
796 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
797 				 cs, *mask1, reg1);
798 	}
799 }
800 
801 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
802 {
803 	enum mem_type type;
804 
805 	/* F15h supports only DDR3 */
806 	if (pvt->fam >= 0x15)
807 		type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
808 	else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
809 		if (pvt->dchr0 & DDR3_MODE)
810 			type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
811 		else
812 			type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
813 	} else {
814 		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
815 	}
816 
817 	amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
818 
819 	return type;
820 }
821 
822 /* Get the number of DCT channels the memory controller is using. */
823 static int k8_early_channel_count(struct amd64_pvt *pvt)
824 {
825 	int flag;
826 
827 	if (pvt->ext_model >= K8_REV_F)
828 		/* RevF (NPT) and later */
829 		flag = pvt->dclr0 & WIDTH_128;
830 	else
831 		/* RevE and earlier */
832 		flag = pvt->dclr0 & REVE_WIDTH_128;
833 
834 	/* not used */
835 	pvt->dclr1 = 0;
836 
837 	return (flag) ? 2 : 1;
838 }
839 
840 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
841 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
842 {
843 	u64 addr;
844 	u8 start_bit = 1;
845 	u8 end_bit   = 47;
846 
847 	if (pvt->fam == 0xf) {
848 		start_bit = 3;
849 		end_bit   = 39;
850 	}
851 
852 	addr = m->addr & GENMASK(start_bit, end_bit);
853 
854 	/*
855 	 * Erratum 637 workaround
856 	 */
857 	if (pvt->fam == 0x15) {
858 		struct amd64_pvt *pvt;
859 		u64 cc6_base, tmp_addr;
860 		u32 tmp;
861 		u16 mce_nid;
862 		u8 intlv_en;
863 
864 		if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
865 			return addr;
866 
867 		mce_nid	= amd_get_nb_id(m->extcpu);
868 		pvt	= mcis[mce_nid]->pvt_info;
869 
870 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
871 		intlv_en = tmp >> 21 & 0x7;
872 
873 		/* add [47:27] + 3 trailing bits */
874 		cc6_base  = (tmp & GENMASK(0, 20)) << 3;
875 
876 		/* reverse and add DramIntlvEn */
877 		cc6_base |= intlv_en ^ 0x7;
878 
879 		/* pin at [47:24] */
880 		cc6_base <<= 24;
881 
882 		if (!intlv_en)
883 			return cc6_base | (addr & GENMASK(0, 23));
884 
885 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
886 
887 							/* faster log2 */
888 		tmp_addr  = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
889 
890 		/* OR DramIntlvSel into bits [14:12] */
891 		tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
892 
893 		/* add remaining [11:0] bits from original MC4_ADDR */
894 		tmp_addr |= addr & GENMASK(0, 11);
895 
896 		return cc6_base | tmp_addr;
897 	}
898 
899 	return addr;
900 }
901 
902 static struct pci_dev *pci_get_related_function(unsigned int vendor,
903 						unsigned int device,
904 						struct pci_dev *related)
905 {
906 	struct pci_dev *dev = NULL;
907 
908 	while ((dev = pci_get_device(vendor, device, dev))) {
909 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
910 		    (dev->bus->number == related->bus->number) &&
911 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
912 			break;
913 	}
914 
915 	return dev;
916 }
917 
918 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
919 {
920 	struct amd_northbridge *nb;
921 	struct pci_dev *f1 = NULL;
922 	unsigned int pci_func;
923 	int off = range << 3;
924 	u32 llim;
925 
926 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
927 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
928 
929 	if (pvt->fam == 0xf)
930 		return;
931 
932 	if (!dram_rw(pvt, range))
933 		return;
934 
935 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
936 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
937 
938 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
939 	if (pvt->fam != 0x15)
940 		return;
941 
942 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
943 	if (WARN_ON(!nb))
944 		return;
945 
946 	pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
947 					: PCI_DEVICE_ID_AMD_15H_NB_F1;
948 
949 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
950 	if (WARN_ON(!f1))
951 		return;
952 
953 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
954 
955 	pvt->ranges[range].lim.lo &= GENMASK(0, 15);
956 
957 				    /* {[39:27],111b} */
958 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
959 
960 	pvt->ranges[range].lim.hi &= GENMASK(0, 7);
961 
962 				    /* [47:40] */
963 	pvt->ranges[range].lim.hi |= llim >> 13;
964 
965 	pci_dev_put(f1);
966 }
967 
968 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
969 				    struct err_info *err)
970 {
971 	struct amd64_pvt *pvt = mci->pvt_info;
972 
973 	error_address_to_page_and_offset(sys_addr, err);
974 
975 	/*
976 	 * Find out which node the error address belongs to. This may be
977 	 * different from the node that detected the error.
978 	 */
979 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
980 	if (!err->src_mci) {
981 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
982 			     (unsigned long)sys_addr);
983 		err->err_code = ERR_NODE;
984 		return;
985 	}
986 
987 	/* Now map the sys_addr to a CSROW */
988 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
989 	if (err->csrow < 0) {
990 		err->err_code = ERR_CSROW;
991 		return;
992 	}
993 
994 	/* CHIPKILL enabled */
995 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
996 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
997 		if (err->channel < 0) {
998 			/*
999 			 * Syndrome didn't map, so we don't know which of the
1000 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1001 			 * as suspect.
1002 			 */
1003 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1004 				      "possible error reporting race\n",
1005 				      err->syndrome);
1006 			err->err_code = ERR_CHANNEL;
1007 			return;
1008 		}
1009 	} else {
1010 		/*
1011 		 * non-chipkill ecc mode
1012 		 *
1013 		 * The k8 documentation is unclear about how to determine the
1014 		 * channel number when using non-chipkill memory.  This method
1015 		 * was obtained from email communication with someone at AMD.
1016 		 * (Wish the email was placed in this comment - norsk)
1017 		 */
1018 		err->channel = ((sys_addr & BIT(3)) != 0);
1019 	}
1020 }
1021 
1022 static int ddr2_cs_size(unsigned i, bool dct_width)
1023 {
1024 	unsigned shift = 0;
1025 
1026 	if (i <= 2)
1027 		shift = i;
1028 	else if (!(i & 0x1))
1029 		shift = i >> 1;
1030 	else
1031 		shift = (i + 1) >> 1;
1032 
1033 	return 128 << (shift + !!dct_width);
1034 }
1035 
1036 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1037 				  unsigned cs_mode)
1038 {
1039 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1040 
1041 	if (pvt->ext_model >= K8_REV_F) {
1042 		WARN_ON(cs_mode > 11);
1043 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1044 	}
1045 	else if (pvt->ext_model >= K8_REV_D) {
1046 		unsigned diff;
1047 		WARN_ON(cs_mode > 10);
1048 
1049 		/*
1050 		 * the below calculation, besides trying to win an obfuscated C
1051 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1052 		 * mappings are:
1053 		 *
1054 		 * cs_mode	CS size (mb)
1055 		 * =======	============
1056 		 * 0		32
1057 		 * 1		64
1058 		 * 2		128
1059 		 * 3		128
1060 		 * 4		256
1061 		 * 5		512
1062 		 * 6		256
1063 		 * 7		512
1064 		 * 8		1024
1065 		 * 9		1024
1066 		 * 10		2048
1067 		 *
1068 		 * Basically, it calculates a value with which to shift the
1069 		 * smallest CS size of 32MB.
1070 		 *
1071 		 * ddr[23]_cs_size have a similar purpose.
1072 		 */
1073 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1074 
1075 		return 32 << (cs_mode - diff);
1076 	}
1077 	else {
1078 		WARN_ON(cs_mode > 6);
1079 		return 32 << cs_mode;
1080 	}
1081 }
1082 
1083 /*
1084  * Get the number of DCT channels in use.
1085  *
1086  * Return:
1087  *	number of Memory Channels in operation
1088  * Pass back:
1089  *	contents of the DCL0_LOW register
1090  */
1091 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1092 {
1093 	int i, j, channels = 0;
1094 
1095 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1096 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1097 		return 2;
1098 
1099 	/*
1100 	 * Need to check if in unganged mode: In such, there are 2 channels,
1101 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1102 	 * bit will be OFF.
1103 	 *
1104 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1105 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1106 	 */
1107 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1108 
1109 	/*
1110 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1111 	 * is more than just one DIMM present in unganged mode. Need to check
1112 	 * both controllers since DIMMs can be placed in either one.
1113 	 */
1114 	for (i = 0; i < 2; i++) {
1115 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1116 
1117 		for (j = 0; j < 4; j++) {
1118 			if (DBAM_DIMM(j, dbam) > 0) {
1119 				channels++;
1120 				break;
1121 			}
1122 		}
1123 	}
1124 
1125 	if (channels > 2)
1126 		channels = 2;
1127 
1128 	amd64_info("MCT channel count: %d\n", channels);
1129 
1130 	return channels;
1131 }
1132 
1133 static int ddr3_cs_size(unsigned i, bool dct_width)
1134 {
1135 	unsigned shift = 0;
1136 	int cs_size = 0;
1137 
1138 	if (i == 0 || i == 3 || i == 4)
1139 		cs_size = -1;
1140 	else if (i <= 2)
1141 		shift = i;
1142 	else if (i == 12)
1143 		shift = 7;
1144 	else if (!(i & 0x1))
1145 		shift = i >> 1;
1146 	else
1147 		shift = (i + 1) >> 1;
1148 
1149 	if (cs_size != -1)
1150 		cs_size = (128 * (1 << !!dct_width)) << shift;
1151 
1152 	return cs_size;
1153 }
1154 
1155 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1156 				   unsigned cs_mode)
1157 {
1158 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1159 
1160 	WARN_ON(cs_mode > 11);
1161 
1162 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1163 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1164 	else
1165 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1166 }
1167 
1168 /*
1169  * F15h supports only 64bit DCT interfaces
1170  */
1171 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1172 				   unsigned cs_mode)
1173 {
1174 	WARN_ON(cs_mode > 12);
1175 
1176 	return ddr3_cs_size(cs_mode, false);
1177 }
1178 
1179 /*
1180  * F16h and F15h model 30h have only limited cs_modes.
1181  */
1182 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1183 				unsigned cs_mode)
1184 {
1185 	WARN_ON(cs_mode > 12);
1186 
1187 	if (cs_mode == 6 || cs_mode == 8 ||
1188 	    cs_mode == 9 || cs_mode == 12)
1189 		return -1;
1190 	else
1191 		return ddr3_cs_size(cs_mode, false);
1192 }
1193 
1194 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1195 {
1196 
1197 	if (pvt->fam == 0xf)
1198 		return;
1199 
1200 	if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1201 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1202 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1203 
1204 		edac_dbg(0, "  DCTs operate in %s mode\n",
1205 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1206 
1207 		if (!dct_ganging_enabled(pvt))
1208 			edac_dbg(0, "  Address range split per DCT: %s\n",
1209 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1210 
1211 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1212 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1213 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1214 
1215 		edac_dbg(0, "  channel interleave: %s, "
1216 			 "interleave bits selector: 0x%x\n",
1217 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1218 			 dct_sel_interleave_addr(pvt));
1219 	}
1220 
1221 	amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1222 }
1223 
1224 /*
1225  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1226  * 2.10.12 Memory Interleaving Modes).
1227  */
1228 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1229 				     u8 intlv_en, int num_dcts_intlv,
1230 				     u32 dct_sel)
1231 {
1232 	u8 channel = 0;
1233 	u8 select;
1234 
1235 	if (!(intlv_en))
1236 		return (u8)(dct_sel);
1237 
1238 	if (num_dcts_intlv == 2) {
1239 		select = (sys_addr >> 8) & 0x3;
1240 		channel = select ? 0x3 : 0;
1241 	} else if (num_dcts_intlv == 4)
1242 		channel = (sys_addr >> 8) & 0x7;
1243 
1244 	return channel;
1245 }
1246 
1247 /*
1248  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1249  * Interleaving Modes.
1250  */
1251 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1252 				bool hi_range_sel, u8 intlv_en)
1253 {
1254 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1255 
1256 	if (dct_ganging_enabled(pvt))
1257 		return 0;
1258 
1259 	if (hi_range_sel)
1260 		return dct_sel_high;
1261 
1262 	/*
1263 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1264 	 */
1265 	if (dct_interleave_enabled(pvt)) {
1266 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1267 
1268 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1269 		if (!intlv_addr)
1270 			return sys_addr >> 6 & 1;
1271 
1272 		if (intlv_addr & 0x2) {
1273 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1274 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1275 
1276 			return ((sys_addr >> shift) & 1) ^ temp;
1277 		}
1278 
1279 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1280 	}
1281 
1282 	if (dct_high_range_enabled(pvt))
1283 		return ~dct_sel_high & 1;
1284 
1285 	return 0;
1286 }
1287 
1288 /* Convert the sys_addr to the normalized DCT address */
1289 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1290 				 u64 sys_addr, bool hi_rng,
1291 				 u32 dct_sel_base_addr)
1292 {
1293 	u64 chan_off;
1294 	u64 dram_base		= get_dram_base(pvt, range);
1295 	u64 hole_off		= f10_dhar_offset(pvt);
1296 	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1297 
1298 	if (hi_rng) {
1299 		/*
1300 		 * if
1301 		 * base address of high range is below 4Gb
1302 		 * (bits [47:27] at [31:11])
1303 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1304 		 * sys_addr > 4Gb
1305 		 *
1306 		 *	remove hole offset from sys_addr
1307 		 * else
1308 		 *	remove high range offset from sys_addr
1309 		 */
1310 		if ((!(dct_sel_base_addr >> 16) ||
1311 		     dct_sel_base_addr < dhar_base(pvt)) &&
1312 		    dhar_valid(pvt) &&
1313 		    (sys_addr >= BIT_64(32)))
1314 			chan_off = hole_off;
1315 		else
1316 			chan_off = dct_sel_base_off;
1317 	} else {
1318 		/*
1319 		 * if
1320 		 * we have a valid hole		&&
1321 		 * sys_addr > 4Gb
1322 		 *
1323 		 *	remove hole
1324 		 * else
1325 		 *	remove dram base to normalize to DCT address
1326 		 */
1327 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1328 			chan_off = hole_off;
1329 		else
1330 			chan_off = dram_base;
1331 	}
1332 
1333 	return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1334 }
1335 
1336 /*
1337  * checks if the csrow passed in is marked as SPARED, if so returns the new
1338  * spare row
1339  */
1340 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1341 {
1342 	int tmp_cs;
1343 
1344 	if (online_spare_swap_done(pvt, dct) &&
1345 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1346 
1347 		for_each_chip_select(tmp_cs, dct, pvt) {
1348 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1349 				csrow = tmp_cs;
1350 				break;
1351 			}
1352 		}
1353 	}
1354 	return csrow;
1355 }
1356 
1357 /*
1358  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1359  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1360  *
1361  * Return:
1362  *	-EINVAL:  NOT FOUND
1363  *	0..csrow = Chip-Select Row
1364  */
1365 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1366 {
1367 	struct mem_ctl_info *mci;
1368 	struct amd64_pvt *pvt;
1369 	u64 cs_base, cs_mask;
1370 	int cs_found = -EINVAL;
1371 	int csrow;
1372 
1373 	mci = mcis[nid];
1374 	if (!mci)
1375 		return cs_found;
1376 
1377 	pvt = mci->pvt_info;
1378 
1379 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1380 
1381 	for_each_chip_select(csrow, dct, pvt) {
1382 		if (!csrow_enabled(csrow, dct, pvt))
1383 			continue;
1384 
1385 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1386 
1387 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1388 			 csrow, cs_base, cs_mask);
1389 
1390 		cs_mask = ~cs_mask;
1391 
1392 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1393 			 (in_addr & cs_mask), (cs_base & cs_mask));
1394 
1395 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1396 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1397 				cs_found =  csrow;
1398 				break;
1399 			}
1400 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1401 
1402 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1403 			break;
1404 		}
1405 	}
1406 	return cs_found;
1407 }
1408 
1409 /*
1410  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1411  * swapped with a region located at the bottom of memory so that the GPU can use
1412  * the interleaved region and thus two channels.
1413  */
1414 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1415 {
1416 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1417 
1418 	if (pvt->fam == 0x10) {
1419 		/* only revC3 and revE have that feature */
1420 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1421 			return sys_addr;
1422 	}
1423 
1424 	amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1425 
1426 	if (!(swap_reg & 0x1))
1427 		return sys_addr;
1428 
1429 	swap_base	= (swap_reg >> 3) & 0x7f;
1430 	swap_limit	= (swap_reg >> 11) & 0x7f;
1431 	rgn_size	= (swap_reg >> 20) & 0x7f;
1432 	tmp_addr	= sys_addr >> 27;
1433 
1434 	if (!(sys_addr >> 34) &&
1435 	    (((tmp_addr >= swap_base) &&
1436 	     (tmp_addr <= swap_limit)) ||
1437 	     (tmp_addr < rgn_size)))
1438 		return sys_addr ^ (u64)swap_base << 27;
1439 
1440 	return sys_addr;
1441 }
1442 
1443 /* For a given @dram_range, check if @sys_addr falls within it. */
1444 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1445 				  u64 sys_addr, int *chan_sel)
1446 {
1447 	int cs_found = -EINVAL;
1448 	u64 chan_addr;
1449 	u32 dct_sel_base;
1450 	u8 channel;
1451 	bool high_range = false;
1452 
1453 	u8 node_id    = dram_dst_node(pvt, range);
1454 	u8 intlv_en   = dram_intlv_en(pvt, range);
1455 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1456 
1457 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1458 		 range, sys_addr, get_dram_limit(pvt, range));
1459 
1460 	if (dhar_valid(pvt) &&
1461 	    dhar_base(pvt) <= sys_addr &&
1462 	    sys_addr < BIT_64(32)) {
1463 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1464 			    sys_addr);
1465 		return -EINVAL;
1466 	}
1467 
1468 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1469 		return -EINVAL;
1470 
1471 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1472 
1473 	dct_sel_base = dct_sel_baseaddr(pvt);
1474 
1475 	/*
1476 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1477 	 * select between DCT0 and DCT1.
1478 	 */
1479 	if (dct_high_range_enabled(pvt) &&
1480 	   !dct_ganging_enabled(pvt) &&
1481 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1482 		high_range = true;
1483 
1484 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1485 
1486 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1487 					  high_range, dct_sel_base);
1488 
1489 	/* Remove node interleaving, see F1x120 */
1490 	if (intlv_en)
1491 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1492 			    (chan_addr & 0xfff);
1493 
1494 	/* remove channel interleave */
1495 	if (dct_interleave_enabled(pvt) &&
1496 	   !dct_high_range_enabled(pvt) &&
1497 	   !dct_ganging_enabled(pvt)) {
1498 
1499 		if (dct_sel_interleave_addr(pvt) != 1) {
1500 			if (dct_sel_interleave_addr(pvt) == 0x3)
1501 				/* hash 9 */
1502 				chan_addr = ((chan_addr >> 10) << 9) |
1503 					     (chan_addr & 0x1ff);
1504 			else
1505 				/* A[6] or hash 6 */
1506 				chan_addr = ((chan_addr >> 7) << 6) |
1507 					     (chan_addr & 0x3f);
1508 		} else
1509 			/* A[12] */
1510 			chan_addr = ((chan_addr >> 13) << 12) |
1511 				     (chan_addr & 0xfff);
1512 	}
1513 
1514 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1515 
1516 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1517 
1518 	if (cs_found >= 0)
1519 		*chan_sel = channel;
1520 
1521 	return cs_found;
1522 }
1523 
1524 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1525 					u64 sys_addr, int *chan_sel)
1526 {
1527 	int cs_found = -EINVAL;
1528 	int num_dcts_intlv = 0;
1529 	u64 chan_addr, chan_offset;
1530 	u64 dct_base, dct_limit;
1531 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1532 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1533 
1534 	u64 dhar_offset		= f10_dhar_offset(pvt);
1535 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1536 	u8 node_id		= dram_dst_node(pvt, range);
1537 	u8 intlv_en		= dram_intlv_en(pvt, range);
1538 
1539 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1540 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1541 
1542 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1543 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
1544 
1545 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1546 		 range, sys_addr, get_dram_limit(pvt, range));
1547 
1548 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
1549 	    !(get_dram_limit(pvt, range) >= sys_addr))
1550 		return -EINVAL;
1551 
1552 	if (dhar_valid(pvt) &&
1553 	    dhar_base(pvt) <= sys_addr &&
1554 	    sys_addr < BIT_64(32)) {
1555 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1556 			    sys_addr);
1557 		return -EINVAL;
1558 	}
1559 
1560 	/* Verify sys_addr is within DCT Range. */
1561 	dct_base = (u64) dct_sel_baseaddr(pvt);
1562 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1563 
1564 	if (!(dct_cont_base_reg & BIT(0)) &&
1565 	    !(dct_base <= (sys_addr >> 27) &&
1566 	      dct_limit >= (sys_addr >> 27)))
1567 		return -EINVAL;
1568 
1569 	/* Verify number of dct's that participate in channel interleaving. */
1570 	num_dcts_intlv = (int) hweight8(intlv_en);
1571 
1572 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1573 		return -EINVAL;
1574 
1575 	channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1576 					     num_dcts_intlv, dct_sel);
1577 
1578 	/* Verify we stay within the MAX number of channels allowed */
1579 	if (channel > 4 || channel < 0)
1580 		return -EINVAL;
1581 
1582 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1583 
1584 	/* Get normalized DCT addr */
1585 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1586 		chan_offset = dhar_offset;
1587 	else
1588 		chan_offset = dct_base << 27;
1589 
1590 	chan_addr = sys_addr - chan_offset;
1591 
1592 	/* remove channel interleave */
1593 	if (num_dcts_intlv == 2) {
1594 		if (intlv_addr == 0x4)
1595 			chan_addr = ((chan_addr >> 9) << 8) |
1596 						(chan_addr & 0xff);
1597 		else if (intlv_addr == 0x5)
1598 			chan_addr = ((chan_addr >> 10) << 9) |
1599 						(chan_addr & 0x1ff);
1600 		else
1601 			return -EINVAL;
1602 
1603 	} else if (num_dcts_intlv == 4) {
1604 		if (intlv_addr == 0x4)
1605 			chan_addr = ((chan_addr >> 10) << 8) |
1606 							(chan_addr & 0xff);
1607 		else if (intlv_addr == 0x5)
1608 			chan_addr = ((chan_addr >> 11) << 9) |
1609 							(chan_addr & 0x1ff);
1610 		else
1611 			return -EINVAL;
1612 	}
1613 
1614 	if (dct_offset_en) {
1615 		amd64_read_pci_cfg(pvt->F1,
1616 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
1617 				   &tmp);
1618 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
1619 	}
1620 
1621 	f15h_select_dct(pvt, channel);
1622 
1623 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1624 
1625 	/*
1626 	 * Find Chip select:
1627 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1628 	 * there is support for 4 DCT's, but only 2 are currently functional.
1629 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1630 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
1631 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1632 	 */
1633 	alias_channel =  (channel == 3) ? 1 : channel;
1634 
1635 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1636 
1637 	if (cs_found >= 0)
1638 		*chan_sel = alias_channel;
1639 
1640 	return cs_found;
1641 }
1642 
1643 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1644 					u64 sys_addr,
1645 					int *chan_sel)
1646 {
1647 	int cs_found = -EINVAL;
1648 	unsigned range;
1649 
1650 	for (range = 0; range < DRAM_RANGES; range++) {
1651 		if (!dram_rw(pvt, range))
1652 			continue;
1653 
1654 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
1655 			cs_found = f15_m30h_match_to_this_node(pvt, range,
1656 							       sys_addr,
1657 							       chan_sel);
1658 
1659 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
1660 			 (get_dram_limit(pvt, range) >= sys_addr)) {
1661 			cs_found = f1x_match_to_this_node(pvt, range,
1662 							  sys_addr, chan_sel);
1663 			if (cs_found >= 0)
1664 				break;
1665 		}
1666 	}
1667 	return cs_found;
1668 }
1669 
1670 /*
1671  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1672  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1673  *
1674  * The @sys_addr is usually an error address received from the hardware
1675  * (MCX_ADDR).
1676  */
1677 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1678 				     struct err_info *err)
1679 {
1680 	struct amd64_pvt *pvt = mci->pvt_info;
1681 
1682 	error_address_to_page_and_offset(sys_addr, err);
1683 
1684 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1685 	if (err->csrow < 0) {
1686 		err->err_code = ERR_CSROW;
1687 		return;
1688 	}
1689 
1690 	/*
1691 	 * We need the syndromes for channel detection only when we're
1692 	 * ganged. Otherwise @chan should already contain the channel at
1693 	 * this point.
1694 	 */
1695 	if (dct_ganging_enabled(pvt))
1696 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1697 }
1698 
1699 /*
1700  * debug routine to display the memory sizes of all logical DIMMs and its
1701  * CSROWs
1702  */
1703 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1704 {
1705 	int dimm, size0, size1;
1706 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1707 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
1708 
1709 	if (pvt->fam == 0xf) {
1710 		/* K8 families < revF not supported yet */
1711 	       if (pvt->ext_model < K8_REV_F)
1712 			return;
1713 	       else
1714 		       WARN_ON(ctrl != 0);
1715 	}
1716 
1717 	dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1718 	dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1719 						   : pvt->csels[0].csbases;
1720 
1721 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1722 		 ctrl, dbam);
1723 
1724 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1725 
1726 	/* Dump memory sizes for DIMM and its CSROWs */
1727 	for (dimm = 0; dimm < 4; dimm++) {
1728 
1729 		size0 = 0;
1730 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1731 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1732 						     DBAM_DIMM(dimm, dbam));
1733 
1734 		size1 = 0;
1735 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1736 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1737 						     DBAM_DIMM(dimm, dbam));
1738 
1739 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1740 				dimm * 2,     size0,
1741 				dimm * 2 + 1, size1);
1742 	}
1743 }
1744 
1745 static struct amd64_family_type amd64_family_types[] = {
1746 	[K8_CPUS] = {
1747 		.ctl_name = "K8",
1748 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1749 		.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1750 		.ops = {
1751 			.early_channel_count	= k8_early_channel_count,
1752 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
1753 			.dbam_to_cs		= k8_dbam_to_chip_select,
1754 			.read_dct_pci_cfg	= k8_read_dct_pci_cfg,
1755 		}
1756 	},
1757 	[F10_CPUS] = {
1758 		.ctl_name = "F10h",
1759 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1760 		.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1761 		.ops = {
1762 			.early_channel_count	= f1x_early_channel_count,
1763 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1764 			.dbam_to_cs		= f10_dbam_to_chip_select,
1765 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
1766 		}
1767 	},
1768 	[F15_CPUS] = {
1769 		.ctl_name = "F15h",
1770 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1771 		.f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1772 		.ops = {
1773 			.early_channel_count	= f1x_early_channel_count,
1774 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1775 			.dbam_to_cs		= f15_dbam_to_chip_select,
1776 			.read_dct_pci_cfg	= f15_read_dct_pci_cfg,
1777 		}
1778 	},
1779 	[F15_M30H_CPUS] = {
1780 		.ctl_name = "F15h_M30h",
1781 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1782 		.f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
1783 		.ops = {
1784 			.early_channel_count	= f1x_early_channel_count,
1785 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1786 			.dbam_to_cs		= f16_dbam_to_chip_select,
1787 			.read_dct_pci_cfg	= f15_read_dct_pci_cfg,
1788 		}
1789 	},
1790 	[F16_CPUS] = {
1791 		.ctl_name = "F16h",
1792 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1793 		.f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1794 		.ops = {
1795 			.early_channel_count	= f1x_early_channel_count,
1796 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1797 			.dbam_to_cs		= f16_dbam_to_chip_select,
1798 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
1799 		}
1800 	},
1801 };
1802 
1803 /*
1804  * These are tables of eigenvectors (one per line) which can be used for the
1805  * construction of the syndrome tables. The modified syndrome search algorithm
1806  * uses those to find the symbol in error and thus the DIMM.
1807  *
1808  * Algorithm courtesy of Ross LaFetra from AMD.
1809  */
1810 static const u16 x4_vectors[] = {
1811 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
1812 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
1813 	0x0001, 0x0002, 0x0004, 0x0008,
1814 	0x1013, 0x3032, 0x4044, 0x8088,
1815 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
1816 	0x4857, 0xc4fe, 0x13cc, 0x3288,
1817 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1818 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1819 	0x15c1, 0x2a42, 0x89ac, 0x4758,
1820 	0x2b03, 0x1602, 0x4f0c, 0xca08,
1821 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1822 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
1823 	0x2b87, 0x164e, 0x642c, 0xdc18,
1824 	0x40b9, 0x80de, 0x1094, 0x20e8,
1825 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
1826 	0x11c1, 0x2242, 0x84ac, 0x4c58,
1827 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
1828 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1829 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
1830 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1831 	0x16b3, 0x3d62, 0x4f34, 0x8518,
1832 	0x1e2f, 0x391a, 0x5cac, 0xf858,
1833 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1834 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1835 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1836 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
1837 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
1838 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
1839 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
1840 	0x185d, 0x2ca6, 0x7914, 0x9e28,
1841 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
1842 	0x4199, 0x82ee, 0x19f4, 0x2e58,
1843 	0x4807, 0xc40e, 0x130c, 0x3208,
1844 	0x1905, 0x2e0a, 0x5804, 0xac08,
1845 	0x213f, 0x132a, 0xadfc, 0x5ba8,
1846 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1847 };
1848 
1849 static const u16 x8_vectors[] = {
1850 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1851 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1852 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1853 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1854 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1855 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1856 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1857 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1858 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1859 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1860 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1861 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1862 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1863 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1864 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1865 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1866 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1867 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1868 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1869 };
1870 
1871 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
1872 			   unsigned v_dim)
1873 {
1874 	unsigned int i, err_sym;
1875 
1876 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1877 		u16 s = syndrome;
1878 		unsigned v_idx =  err_sym * v_dim;
1879 		unsigned v_end = (err_sym + 1) * v_dim;
1880 
1881 		/* walk over all 16 bits of the syndrome */
1882 		for (i = 1; i < (1U << 16); i <<= 1) {
1883 
1884 			/* if bit is set in that eigenvector... */
1885 			if (v_idx < v_end && vectors[v_idx] & i) {
1886 				u16 ev_comp = vectors[v_idx++];
1887 
1888 				/* ... and bit set in the modified syndrome, */
1889 				if (s & i) {
1890 					/* remove it. */
1891 					s ^= ev_comp;
1892 
1893 					if (!s)
1894 						return err_sym;
1895 				}
1896 
1897 			} else if (s & i)
1898 				/* can't get to zero, move to next symbol */
1899 				break;
1900 		}
1901 	}
1902 
1903 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1904 	return -1;
1905 }
1906 
1907 static int map_err_sym_to_channel(int err_sym, int sym_size)
1908 {
1909 	if (sym_size == 4)
1910 		switch (err_sym) {
1911 		case 0x20:
1912 		case 0x21:
1913 			return 0;
1914 			break;
1915 		case 0x22:
1916 		case 0x23:
1917 			return 1;
1918 			break;
1919 		default:
1920 			return err_sym >> 4;
1921 			break;
1922 		}
1923 	/* x8 symbols */
1924 	else
1925 		switch (err_sym) {
1926 		/* imaginary bits not in a DIMM */
1927 		case 0x10:
1928 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1929 					  err_sym);
1930 			return -1;
1931 			break;
1932 
1933 		case 0x11:
1934 			return 0;
1935 			break;
1936 		case 0x12:
1937 			return 1;
1938 			break;
1939 		default:
1940 			return err_sym >> 3;
1941 			break;
1942 		}
1943 	return -1;
1944 }
1945 
1946 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1947 {
1948 	struct amd64_pvt *pvt = mci->pvt_info;
1949 	int err_sym = -1;
1950 
1951 	if (pvt->ecc_sym_sz == 8)
1952 		err_sym = decode_syndrome(syndrome, x8_vectors,
1953 					  ARRAY_SIZE(x8_vectors),
1954 					  pvt->ecc_sym_sz);
1955 	else if (pvt->ecc_sym_sz == 4)
1956 		err_sym = decode_syndrome(syndrome, x4_vectors,
1957 					  ARRAY_SIZE(x4_vectors),
1958 					  pvt->ecc_sym_sz);
1959 	else {
1960 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1961 		return err_sym;
1962 	}
1963 
1964 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1965 }
1966 
1967 static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
1968 			    u8 ecc_type)
1969 {
1970 	enum hw_event_mc_err_type err_type;
1971 	const char *string;
1972 
1973 	if (ecc_type == 2)
1974 		err_type = HW_EVENT_ERR_CORRECTED;
1975 	else if (ecc_type == 1)
1976 		err_type = HW_EVENT_ERR_UNCORRECTED;
1977 	else {
1978 		WARN(1, "Something is rotten in the state of Denmark.\n");
1979 		return;
1980 	}
1981 
1982 	switch (err->err_code) {
1983 	case DECODE_OK:
1984 		string = "";
1985 		break;
1986 	case ERR_NODE:
1987 		string = "Failed to map error addr to a node";
1988 		break;
1989 	case ERR_CSROW:
1990 		string = "Failed to map error addr to a csrow";
1991 		break;
1992 	case ERR_CHANNEL:
1993 		string = "unknown syndrome - possible error reporting race";
1994 		break;
1995 	default:
1996 		string = "WTF error";
1997 		break;
1998 	}
1999 
2000 	edac_mc_handle_error(err_type, mci, 1,
2001 			     err->page, err->offset, err->syndrome,
2002 			     err->csrow, err->channel, -1,
2003 			     string, "");
2004 }
2005 
2006 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2007 					    struct mce *m)
2008 {
2009 	struct amd64_pvt *pvt = mci->pvt_info;
2010 	u8 ecc_type = (m->status >> 45) & 0x3;
2011 	u8 xec = XEC(m->status, 0x1f);
2012 	u16 ec = EC(m->status);
2013 	u64 sys_addr;
2014 	struct err_info err;
2015 
2016 	/* Bail out early if this was an 'observed' error */
2017 	if (PP(ec) == NBSL_PP_OBS)
2018 		return;
2019 
2020 	/* Do only ECC errors */
2021 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2022 		return;
2023 
2024 	memset(&err, 0, sizeof(err));
2025 
2026 	sys_addr = get_error_address(pvt, m);
2027 
2028 	if (ecc_type == 2)
2029 		err.syndrome = extract_syndrome(m->status);
2030 
2031 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2032 
2033 	__log_bus_error(mci, &err, ecc_type);
2034 }
2035 
2036 void amd64_decode_bus_error(int node_id, struct mce *m)
2037 {
2038 	__amd64_decode_bus_error(mcis[node_id], m);
2039 }
2040 
2041 /*
2042  * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2043  * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2044  */
2045 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2046 {
2047 	/* Reserve the ADDRESS MAP Device */
2048 	pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2049 	if (!pvt->F1) {
2050 		amd64_err("error address map device not found: "
2051 			  "vendor %x device 0x%x (broken BIOS?)\n",
2052 			  PCI_VENDOR_ID_AMD, f1_id);
2053 		return -ENODEV;
2054 	}
2055 
2056 	/* Reserve the MISC Device */
2057 	pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2058 	if (!pvt->F3) {
2059 		pci_dev_put(pvt->F1);
2060 		pvt->F1 = NULL;
2061 
2062 		amd64_err("error F3 device not found: "
2063 			  "vendor %x device 0x%x (broken BIOS?)\n",
2064 			  PCI_VENDOR_ID_AMD, f3_id);
2065 
2066 		return -ENODEV;
2067 	}
2068 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2069 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2070 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2071 
2072 	return 0;
2073 }
2074 
2075 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2076 {
2077 	pci_dev_put(pvt->F1);
2078 	pci_dev_put(pvt->F3);
2079 }
2080 
2081 /*
2082  * Retrieve the hardware registers of the memory controller (this includes the
2083  * 'Address Map' and 'Misc' device regs)
2084  */
2085 static void read_mc_regs(struct amd64_pvt *pvt)
2086 {
2087 	unsigned range;
2088 	u64 msr_val;
2089 	u32 tmp;
2090 
2091 	/*
2092 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2093 	 * those are Read-As-Zero
2094 	 */
2095 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2096 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2097 
2098 	/* check first whether TOP_MEM2 is enabled */
2099 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2100 	if (msr_val & (1U << 21)) {
2101 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2102 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2103 	} else
2104 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2105 
2106 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2107 
2108 	read_dram_ctl_register(pvt);
2109 
2110 	for (range = 0; range < DRAM_RANGES; range++) {
2111 		u8 rw;
2112 
2113 		/* read settings for this DRAM range */
2114 		read_dram_base_limit_regs(pvt, range);
2115 
2116 		rw = dram_rw(pvt, range);
2117 		if (!rw)
2118 			continue;
2119 
2120 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2121 			 range,
2122 			 get_dram_base(pvt, range),
2123 			 get_dram_limit(pvt, range));
2124 
2125 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2126 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2127 			 (rw & 0x1) ? "R" : "-",
2128 			 (rw & 0x2) ? "W" : "-",
2129 			 dram_intlv_sel(pvt, range),
2130 			 dram_dst_node(pvt, range));
2131 	}
2132 
2133 	read_dct_base_mask(pvt);
2134 
2135 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2136 	amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2137 
2138 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2139 
2140 	amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2141 	amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2142 
2143 	if (!dct_ganging_enabled(pvt)) {
2144 		amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2145 		amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2146 	}
2147 
2148 	pvt->ecc_sym_sz = 4;
2149 
2150 	if (pvt->fam >= 0x10) {
2151 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2152 		if (pvt->fam != 0x16)
2153 			/* F16h has only DCT0 */
2154 			amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2155 
2156 		/* F10h, revD and later can do x8 ECC too */
2157 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2158 			pvt->ecc_sym_sz = 8;
2159 	}
2160 	dump_misc_regs(pvt);
2161 }
2162 
2163 /*
2164  * NOTE: CPU Revision Dependent code
2165  *
2166  * Input:
2167  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2168  *	k8 private pointer to -->
2169  *			DRAM Bank Address mapping register
2170  *			node_id
2171  *			DCL register where dual_channel_active is
2172  *
2173  * The DBAM register consists of 4 sets of 4 bits each definitions:
2174  *
2175  * Bits:	CSROWs
2176  * 0-3		CSROWs 0 and 1
2177  * 4-7		CSROWs 2 and 3
2178  * 8-11		CSROWs 4 and 5
2179  * 12-15	CSROWs 6 and 7
2180  *
2181  * Values range from: 0 to 15
2182  * The meaning of the values depends on CPU revision and dual-channel state,
2183  * see relevant BKDG more info.
2184  *
2185  * The memory controller provides for total of only 8 CSROWs in its current
2186  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2187  * single channel or two (2) DIMMs in dual channel mode.
2188  *
2189  * The following code logic collapses the various tables for CSROW based on CPU
2190  * revision.
2191  *
2192  * Returns:
2193  *	The number of PAGE_SIZE pages on the specified CSROW number it
2194  *	encompasses
2195  *
2196  */
2197 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2198 {
2199 	u32 cs_mode, nr_pages;
2200 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2201 
2202 
2203 	/*
2204 	 * The math on this doesn't look right on the surface because x/2*4 can
2205 	 * be simplified to x*2 but this expression makes use of the fact that
2206 	 * it is integral math where 1/2=0. This intermediate value becomes the
2207 	 * number of bits to shift the DBAM register to extract the proper CSROW
2208 	 * field.
2209 	 */
2210 	cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2211 
2212 	nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2213 
2214 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2215 		    csrow_nr, dct,  cs_mode);
2216 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2217 
2218 	return nr_pages;
2219 }
2220 
2221 /*
2222  * Initialize the array of csrow attribute instances, based on the values
2223  * from pci config hardware registers.
2224  */
2225 static int init_csrows(struct mem_ctl_info *mci)
2226 {
2227 	struct amd64_pvt *pvt = mci->pvt_info;
2228 	struct csrow_info *csrow;
2229 	struct dimm_info *dimm;
2230 	enum edac_type edac_mode;
2231 	enum mem_type mtype;
2232 	int i, j, empty = 1;
2233 	int nr_pages = 0;
2234 	u32 val;
2235 
2236 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2237 
2238 	pvt->nbcfg = val;
2239 
2240 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2241 		 pvt->mc_node_id, val,
2242 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2243 
2244 	/*
2245 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2246 	 */
2247 	for_each_chip_select(i, 0, pvt) {
2248 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2249 		bool row_dct1 = false;
2250 
2251 		if (pvt->fam != 0xf)
2252 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2253 
2254 		if (!row_dct0 && !row_dct1)
2255 			continue;
2256 
2257 		csrow = mci->csrows[i];
2258 		empty = 0;
2259 
2260 		edac_dbg(1, "MC node: %d, csrow: %d\n",
2261 			    pvt->mc_node_id, i);
2262 
2263 		if (row_dct0) {
2264 			nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2265 			csrow->channels[0]->dimm->nr_pages = nr_pages;
2266 		}
2267 
2268 		/* K8 has only one DCT */
2269 		if (pvt->fam != 0xf && row_dct1) {
2270 			int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
2271 
2272 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2273 			nr_pages += row_dct1_pages;
2274 		}
2275 
2276 		mtype = amd64_determine_memory_type(pvt, i);
2277 
2278 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2279 
2280 		/*
2281 		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2282 		 */
2283 		if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2284 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2285 				    EDAC_S4ECD4ED : EDAC_SECDED;
2286 		else
2287 			edac_mode = EDAC_NONE;
2288 
2289 		for (j = 0; j < pvt->channel_count; j++) {
2290 			dimm = csrow->channels[j]->dimm;
2291 			dimm->mtype = mtype;
2292 			dimm->edac_mode = edac_mode;
2293 		}
2294 	}
2295 
2296 	return empty;
2297 }
2298 
2299 /* get all cores on this DCT */
2300 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2301 {
2302 	int cpu;
2303 
2304 	for_each_online_cpu(cpu)
2305 		if (amd_get_nb_id(cpu) == nid)
2306 			cpumask_set_cpu(cpu, mask);
2307 }
2308 
2309 /* check MCG_CTL on all the cpus on this node */
2310 static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
2311 {
2312 	cpumask_var_t mask;
2313 	int cpu, nbe;
2314 	bool ret = false;
2315 
2316 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2317 		amd64_warn("%s: Error allocating mask\n", __func__);
2318 		return false;
2319 	}
2320 
2321 	get_cpus_on_this_dct_cpumask(mask, nid);
2322 
2323 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2324 
2325 	for_each_cpu(cpu, mask) {
2326 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2327 		nbe = reg->l & MSR_MCGCTL_NBE;
2328 
2329 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2330 			 cpu, reg->q,
2331 			 (nbe ? "enabled" : "disabled"));
2332 
2333 		if (!nbe)
2334 			goto out;
2335 	}
2336 	ret = true;
2337 
2338 out:
2339 	free_cpumask_var(mask);
2340 	return ret;
2341 }
2342 
2343 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2344 {
2345 	cpumask_var_t cmask;
2346 	int cpu;
2347 
2348 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2349 		amd64_warn("%s: error allocating mask\n", __func__);
2350 		return false;
2351 	}
2352 
2353 	get_cpus_on_this_dct_cpumask(cmask, nid);
2354 
2355 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2356 
2357 	for_each_cpu(cpu, cmask) {
2358 
2359 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2360 
2361 		if (on) {
2362 			if (reg->l & MSR_MCGCTL_NBE)
2363 				s->flags.nb_mce_enable = 1;
2364 
2365 			reg->l |= MSR_MCGCTL_NBE;
2366 		} else {
2367 			/*
2368 			 * Turn off NB MCE reporting only when it was off before
2369 			 */
2370 			if (!s->flags.nb_mce_enable)
2371 				reg->l &= ~MSR_MCGCTL_NBE;
2372 		}
2373 	}
2374 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2375 
2376 	free_cpumask_var(cmask);
2377 
2378 	return 0;
2379 }
2380 
2381 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2382 				       struct pci_dev *F3)
2383 {
2384 	bool ret = true;
2385 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2386 
2387 	if (toggle_ecc_err_reporting(s, nid, ON)) {
2388 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2389 		return false;
2390 	}
2391 
2392 	amd64_read_pci_cfg(F3, NBCTL, &value);
2393 
2394 	s->old_nbctl   = value & mask;
2395 	s->nbctl_valid = true;
2396 
2397 	value |= mask;
2398 	amd64_write_pci_cfg(F3, NBCTL, value);
2399 
2400 	amd64_read_pci_cfg(F3, NBCFG, &value);
2401 
2402 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2403 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2404 
2405 	if (!(value & NBCFG_ECC_ENABLE)) {
2406 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2407 
2408 		s->flags.nb_ecc_prev = 0;
2409 
2410 		/* Attempt to turn on DRAM ECC Enable */
2411 		value |= NBCFG_ECC_ENABLE;
2412 		amd64_write_pci_cfg(F3, NBCFG, value);
2413 
2414 		amd64_read_pci_cfg(F3, NBCFG, &value);
2415 
2416 		if (!(value & NBCFG_ECC_ENABLE)) {
2417 			amd64_warn("Hardware rejected DRAM ECC enable,"
2418 				   "check memory DIMM configuration.\n");
2419 			ret = false;
2420 		} else {
2421 			amd64_info("Hardware accepted DRAM ECC Enable\n");
2422 		}
2423 	} else {
2424 		s->flags.nb_ecc_prev = 1;
2425 	}
2426 
2427 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2428 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2429 
2430 	return ret;
2431 }
2432 
2433 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2434 					struct pci_dev *F3)
2435 {
2436 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2437 
2438 
2439 	if (!s->nbctl_valid)
2440 		return;
2441 
2442 	amd64_read_pci_cfg(F3, NBCTL, &value);
2443 	value &= ~mask;
2444 	value |= s->old_nbctl;
2445 
2446 	amd64_write_pci_cfg(F3, NBCTL, value);
2447 
2448 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2449 	if (!s->flags.nb_ecc_prev) {
2450 		amd64_read_pci_cfg(F3, NBCFG, &value);
2451 		value &= ~NBCFG_ECC_ENABLE;
2452 		amd64_write_pci_cfg(F3, NBCFG, value);
2453 	}
2454 
2455 	/* restore the NB Enable MCGCTL bit */
2456 	if (toggle_ecc_err_reporting(s, nid, OFF))
2457 		amd64_warn("Error restoring NB MCGCTL settings!\n");
2458 }
2459 
2460 /*
2461  * EDAC requires that the BIOS have ECC enabled before
2462  * taking over the processing of ECC errors. A command line
2463  * option allows to force-enable hardware ECC later in
2464  * enable_ecc_error_reporting().
2465  */
2466 static const char *ecc_msg =
2467 	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2468 	" Either enable ECC checking or force module loading by setting "
2469 	"'ecc_enable_override'.\n"
2470 	" (Note that use of the override may cause unknown side effects.)\n";
2471 
2472 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2473 {
2474 	u32 value;
2475 	u8 ecc_en = 0;
2476 	bool nb_mce_en = false;
2477 
2478 	amd64_read_pci_cfg(F3, NBCFG, &value);
2479 
2480 	ecc_en = !!(value & NBCFG_ECC_ENABLE);
2481 	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2482 
2483 	nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2484 	if (!nb_mce_en)
2485 		amd64_notice("NB MCE bank disabled, set MSR "
2486 			     "0x%08x[4] on node %d to enable.\n",
2487 			     MSR_IA32_MCG_CTL, nid);
2488 
2489 	if (!ecc_en || !nb_mce_en) {
2490 		amd64_notice("%s", ecc_msg);
2491 		return false;
2492 	}
2493 	return true;
2494 }
2495 
2496 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2497 {
2498 	struct amd64_pvt *pvt = mci->pvt_info;
2499 	int rc;
2500 
2501 	rc = amd64_create_sysfs_dbg_files(mci);
2502 	if (rc < 0)
2503 		return rc;
2504 
2505 	if (pvt->fam >= 0x10) {
2506 		rc = amd64_create_sysfs_inject_files(mci);
2507 		if (rc < 0)
2508 			return rc;
2509 	}
2510 
2511 	return 0;
2512 }
2513 
2514 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2515 {
2516 	struct amd64_pvt *pvt = mci->pvt_info;
2517 
2518 	amd64_remove_sysfs_dbg_files(mci);
2519 
2520 	if (pvt->fam >= 0x10)
2521 		amd64_remove_sysfs_inject_files(mci);
2522 }
2523 
2524 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2525 				 struct amd64_family_type *fam)
2526 {
2527 	struct amd64_pvt *pvt = mci->pvt_info;
2528 
2529 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2530 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
2531 
2532 	if (pvt->nbcap & NBCAP_SECDED)
2533 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2534 
2535 	if (pvt->nbcap & NBCAP_CHIPKILL)
2536 		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2537 
2538 	mci->edac_cap		= amd64_determine_edac_cap(pvt);
2539 	mci->mod_name		= EDAC_MOD_STR;
2540 	mci->mod_ver		= EDAC_AMD64_VERSION;
2541 	mci->ctl_name		= fam->ctl_name;
2542 	mci->dev_name		= pci_name(pvt->F2);
2543 	mci->ctl_page_to_phys	= NULL;
2544 
2545 	/* memory scrubber interface */
2546 	mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2547 	mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2548 }
2549 
2550 /*
2551  * returns a pointer to the family descriptor on success, NULL otherwise.
2552  */
2553 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2554 {
2555 	struct amd64_family_type *fam_type = NULL;
2556 
2557 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
2558 	pvt->stepping	= boot_cpu_data.x86_mask;
2559 	pvt->model	= boot_cpu_data.x86_model;
2560 	pvt->fam	= boot_cpu_data.x86;
2561 
2562 	switch (pvt->fam) {
2563 	case 0xf:
2564 		fam_type		= &amd64_family_types[K8_CPUS];
2565 		pvt->ops		= &amd64_family_types[K8_CPUS].ops;
2566 		break;
2567 
2568 	case 0x10:
2569 		fam_type		= &amd64_family_types[F10_CPUS];
2570 		pvt->ops		= &amd64_family_types[F10_CPUS].ops;
2571 		break;
2572 
2573 	case 0x15:
2574 		if (pvt->model == 0x30) {
2575 			fam_type	= &amd64_family_types[F15_M30H_CPUS];
2576 			pvt->ops	= &amd64_family_types[F15_M30H_CPUS].ops;
2577 			break;
2578 		}
2579 
2580 		fam_type		= &amd64_family_types[F15_CPUS];
2581 		pvt->ops		= &amd64_family_types[F15_CPUS].ops;
2582 		break;
2583 
2584 	case 0x16:
2585 		fam_type		= &amd64_family_types[F16_CPUS];
2586 		pvt->ops		= &amd64_family_types[F16_CPUS].ops;
2587 		break;
2588 
2589 	default:
2590 		amd64_err("Unsupported family!\n");
2591 		return NULL;
2592 	}
2593 
2594 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2595 		     (pvt->fam == 0xf ?
2596 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
2597 							     : "revE or earlier ")
2598 				 : ""), pvt->mc_node_id);
2599 	return fam_type;
2600 }
2601 
2602 static int amd64_init_one_instance(struct pci_dev *F2)
2603 {
2604 	struct amd64_pvt *pvt = NULL;
2605 	struct amd64_family_type *fam_type = NULL;
2606 	struct mem_ctl_info *mci = NULL;
2607 	struct edac_mc_layer layers[2];
2608 	int err = 0, ret;
2609 	u16 nid = amd_get_node_id(F2);
2610 
2611 	ret = -ENOMEM;
2612 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2613 	if (!pvt)
2614 		goto err_ret;
2615 
2616 	pvt->mc_node_id	= nid;
2617 	pvt->F2 = F2;
2618 
2619 	ret = -EINVAL;
2620 	fam_type = amd64_per_family_init(pvt);
2621 	if (!fam_type)
2622 		goto err_free;
2623 
2624 	ret = -ENODEV;
2625 	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2626 	if (err)
2627 		goto err_free;
2628 
2629 	read_mc_regs(pvt);
2630 
2631 	/*
2632 	 * We need to determine how many memory channels there are. Then use
2633 	 * that information for calculating the size of the dynamic instance
2634 	 * tables in the 'mci' structure.
2635 	 */
2636 	ret = -EINVAL;
2637 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
2638 	if (pvt->channel_count < 0)
2639 		goto err_siblings;
2640 
2641 	ret = -ENOMEM;
2642 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2643 	layers[0].size = pvt->csels[0].b_cnt;
2644 	layers[0].is_virt_csrow = true;
2645 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
2646 
2647 	/*
2648 	 * Always allocate two channels since we can have setups with DIMMs on
2649 	 * only one channel. Also, this simplifies handling later for the price
2650 	 * of a couple of KBs tops.
2651 	 */
2652 	layers[1].size = 2;
2653 	layers[1].is_virt_csrow = false;
2654 
2655 	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2656 	if (!mci)
2657 		goto err_siblings;
2658 
2659 	mci->pvt_info = pvt;
2660 	mci->pdev = &pvt->F2->dev;
2661 
2662 	setup_mci_misc_attrs(mci, fam_type);
2663 
2664 	if (init_csrows(mci))
2665 		mci->edac_cap = EDAC_FLAG_NONE;
2666 
2667 	ret = -ENODEV;
2668 	if (edac_mc_add_mc(mci)) {
2669 		edac_dbg(1, "failed edac_mc_add_mc()\n");
2670 		goto err_add_mc;
2671 	}
2672 	if (set_mc_sysfs_attrs(mci)) {
2673 		edac_dbg(1, "failed edac_mc_add_mc()\n");
2674 		goto err_add_sysfs;
2675 	}
2676 
2677 	/* register stuff with EDAC MCE */
2678 	if (report_gart_errors)
2679 		amd_report_gart_errors(true);
2680 
2681 	amd_register_ecc_decoder(amd64_decode_bus_error);
2682 
2683 	mcis[nid] = mci;
2684 
2685 	atomic_inc(&drv_instances);
2686 
2687 	return 0;
2688 
2689 err_add_sysfs:
2690 	edac_mc_del_mc(mci->pdev);
2691 err_add_mc:
2692 	edac_mc_free(mci);
2693 
2694 err_siblings:
2695 	free_mc_sibling_devs(pvt);
2696 
2697 err_free:
2698 	kfree(pvt);
2699 
2700 err_ret:
2701 	return ret;
2702 }
2703 
2704 static int amd64_probe_one_instance(struct pci_dev *pdev,
2705 				    const struct pci_device_id *mc_type)
2706 {
2707 	u16 nid = amd_get_node_id(pdev);
2708 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2709 	struct ecc_settings *s;
2710 	int ret = 0;
2711 
2712 	ret = pci_enable_device(pdev);
2713 	if (ret < 0) {
2714 		edac_dbg(0, "ret=%d\n", ret);
2715 		return -EIO;
2716 	}
2717 
2718 	ret = -ENOMEM;
2719 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2720 	if (!s)
2721 		goto err_out;
2722 
2723 	ecc_stngs[nid] = s;
2724 
2725 	if (!ecc_enabled(F3, nid)) {
2726 		ret = -ENODEV;
2727 
2728 		if (!ecc_enable_override)
2729 			goto err_enable;
2730 
2731 		amd64_warn("Forcing ECC on!\n");
2732 
2733 		if (!enable_ecc_error_reporting(s, nid, F3))
2734 			goto err_enable;
2735 	}
2736 
2737 	ret = amd64_init_one_instance(pdev);
2738 	if (ret < 0) {
2739 		amd64_err("Error probing instance: %d\n", nid);
2740 		restore_ecc_error_reporting(s, nid, F3);
2741 	}
2742 
2743 	return ret;
2744 
2745 err_enable:
2746 	kfree(s);
2747 	ecc_stngs[nid] = NULL;
2748 
2749 err_out:
2750 	return ret;
2751 }
2752 
2753 static void amd64_remove_one_instance(struct pci_dev *pdev)
2754 {
2755 	struct mem_ctl_info *mci;
2756 	struct amd64_pvt *pvt;
2757 	u16 nid = amd_get_node_id(pdev);
2758 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2759 	struct ecc_settings *s = ecc_stngs[nid];
2760 
2761 	mci = find_mci_by_dev(&pdev->dev);
2762 	WARN_ON(!mci);
2763 
2764 	del_mc_sysfs_attrs(mci);
2765 	/* Remove from EDAC CORE tracking list */
2766 	mci = edac_mc_del_mc(&pdev->dev);
2767 	if (!mci)
2768 		return;
2769 
2770 	pvt = mci->pvt_info;
2771 
2772 	restore_ecc_error_reporting(s, nid, F3);
2773 
2774 	free_mc_sibling_devs(pvt);
2775 
2776 	/* unregister from EDAC MCE */
2777 	amd_report_gart_errors(false);
2778 	amd_unregister_ecc_decoder(amd64_decode_bus_error);
2779 
2780 	kfree(ecc_stngs[nid]);
2781 	ecc_stngs[nid] = NULL;
2782 
2783 	/* Free the EDAC CORE resources */
2784 	mci->pvt_info = NULL;
2785 	mcis[nid] = NULL;
2786 
2787 	kfree(pvt);
2788 	edac_mc_free(mci);
2789 }
2790 
2791 /*
2792  * This table is part of the interface for loading drivers for PCI devices. The
2793  * PCI core identifies what devices are on a system during boot, and then
2794  * inquiry this table to see if this driver is for a given device found.
2795  */
2796 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2797 	{
2798 		.vendor		= PCI_VENDOR_ID_AMD,
2799 		.device		= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2800 		.subvendor	= PCI_ANY_ID,
2801 		.subdevice	= PCI_ANY_ID,
2802 		.class		= 0,
2803 		.class_mask	= 0,
2804 	},
2805 	{
2806 		.vendor		= PCI_VENDOR_ID_AMD,
2807 		.device		= PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2808 		.subvendor	= PCI_ANY_ID,
2809 		.subdevice	= PCI_ANY_ID,
2810 		.class		= 0,
2811 		.class_mask	= 0,
2812 	},
2813 	{
2814 		.vendor		= PCI_VENDOR_ID_AMD,
2815 		.device		= PCI_DEVICE_ID_AMD_15H_NB_F2,
2816 		.subvendor	= PCI_ANY_ID,
2817 		.subdevice	= PCI_ANY_ID,
2818 		.class		= 0,
2819 		.class_mask	= 0,
2820 	},
2821 	{
2822 		.vendor		= PCI_VENDOR_ID_AMD,
2823 		.device		= PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2824 		.subvendor	= PCI_ANY_ID,
2825 		.subdevice	= PCI_ANY_ID,
2826 		.class		= 0,
2827 		.class_mask	= 0,
2828 	},
2829 	{
2830 		.vendor		= PCI_VENDOR_ID_AMD,
2831 		.device		= PCI_DEVICE_ID_AMD_16H_NB_F2,
2832 		.subvendor	= PCI_ANY_ID,
2833 		.subdevice	= PCI_ANY_ID,
2834 		.class		= 0,
2835 		.class_mask	= 0,
2836 	},
2837 
2838 	{0, }
2839 };
2840 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2841 
2842 static struct pci_driver amd64_pci_driver = {
2843 	.name		= EDAC_MOD_STR,
2844 	.probe		= amd64_probe_one_instance,
2845 	.remove		= amd64_remove_one_instance,
2846 	.id_table	= amd64_pci_table,
2847 };
2848 
2849 static void setup_pci_device(void)
2850 {
2851 	struct mem_ctl_info *mci;
2852 	struct amd64_pvt *pvt;
2853 
2854 	if (amd64_ctl_pci)
2855 		return;
2856 
2857 	mci = mcis[0];
2858 	if (mci) {
2859 
2860 		pvt = mci->pvt_info;
2861 		amd64_ctl_pci =
2862 			edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2863 
2864 		if (!amd64_ctl_pci) {
2865 			pr_warning("%s(): Unable to create PCI control\n",
2866 				   __func__);
2867 
2868 			pr_warning("%s(): PCI error report via EDAC not set\n",
2869 				   __func__);
2870 			}
2871 	}
2872 }
2873 
2874 static int __init amd64_edac_init(void)
2875 {
2876 	int err = -ENODEV;
2877 
2878 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2879 
2880 	opstate_init();
2881 
2882 	if (amd_cache_northbridges() < 0)
2883 		goto err_ret;
2884 
2885 	err = -ENOMEM;
2886 	mcis	  = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2887 	ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2888 	if (!(mcis && ecc_stngs))
2889 		goto err_free;
2890 
2891 	msrs = msrs_alloc();
2892 	if (!msrs)
2893 		goto err_free;
2894 
2895 	err = pci_register_driver(&amd64_pci_driver);
2896 	if (err)
2897 		goto err_pci;
2898 
2899 	err = -ENODEV;
2900 	if (!atomic_read(&drv_instances))
2901 		goto err_no_instances;
2902 
2903 	setup_pci_device();
2904 	return 0;
2905 
2906 err_no_instances:
2907 	pci_unregister_driver(&amd64_pci_driver);
2908 
2909 err_pci:
2910 	msrs_free(msrs);
2911 	msrs = NULL;
2912 
2913 err_free:
2914 	kfree(mcis);
2915 	mcis = NULL;
2916 
2917 	kfree(ecc_stngs);
2918 	ecc_stngs = NULL;
2919 
2920 err_ret:
2921 	return err;
2922 }
2923 
2924 static void __exit amd64_edac_exit(void)
2925 {
2926 	if (amd64_ctl_pci)
2927 		edac_pci_release_generic_ctl(amd64_ctl_pci);
2928 
2929 	pci_unregister_driver(&amd64_pci_driver);
2930 
2931 	kfree(ecc_stngs);
2932 	ecc_stngs = NULL;
2933 
2934 	kfree(mcis);
2935 	mcis = NULL;
2936 
2937 	msrs_free(msrs);
2938 	msrs = NULL;
2939 }
2940 
2941 module_init(amd64_edac_init);
2942 module_exit(amd64_edac_exit);
2943 
2944 MODULE_LICENSE("GPL");
2945 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2946 		"Dave Peterson, Thayne Harbaugh");
2947 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2948 		EDAC_AMD64_VERSION);
2949 
2950 module_param(edac_op_state, int, 0444);
2951 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2952