xref: /openbmc/linux/drivers/edac/amd64_edac.c (revision 752beb5e)
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3 
4 static struct edac_pci_ctl_info *pci_ctl;
5 
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8 
9 /*
10  * Set by command line parameter. If BIOS has enabled the ECC, this override is
11  * cleared to prevent re-enabling the hardware by this driver.
12  */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15 
16 static struct msr __percpu *msrs;
17 
18 /* Per-node stuff */
19 static struct ecc_settings **ecc_stngs;
20 
21 /* Number of Unified Memory Controllers */
22 static u8 num_umcs;
23 
24 /*
25  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
26  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
27  * or higher value'.
28  *
29  *FIXME: Produce a better mapping/linearisation.
30  */
31 static const struct scrubrate {
32        u32 scrubval;           /* bit pattern for scrub rate */
33        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
34 } scrubrates[] = {
35 	{ 0x01, 1600000000UL},
36 	{ 0x02, 800000000UL},
37 	{ 0x03, 400000000UL},
38 	{ 0x04, 200000000UL},
39 	{ 0x05, 100000000UL},
40 	{ 0x06, 50000000UL},
41 	{ 0x07, 25000000UL},
42 	{ 0x08, 12284069UL},
43 	{ 0x09, 6274509UL},
44 	{ 0x0A, 3121951UL},
45 	{ 0x0B, 1560975UL},
46 	{ 0x0C, 781440UL},
47 	{ 0x0D, 390720UL},
48 	{ 0x0E, 195300UL},
49 	{ 0x0F, 97650UL},
50 	{ 0x10, 48854UL},
51 	{ 0x11, 24427UL},
52 	{ 0x12, 12213UL},
53 	{ 0x13, 6101UL},
54 	{ 0x14, 3051UL},
55 	{ 0x15, 1523UL},
56 	{ 0x16, 761UL},
57 	{ 0x00, 0UL},        /* scrubbing off */
58 };
59 
60 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
61 			       u32 *val, const char *func)
62 {
63 	int err = 0;
64 
65 	err = pci_read_config_dword(pdev, offset, val);
66 	if (err)
67 		amd64_warn("%s: error reading F%dx%03x.\n",
68 			   func, PCI_FUNC(pdev->devfn), offset);
69 
70 	return err;
71 }
72 
73 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 				u32 val, const char *func)
75 {
76 	int err = 0;
77 
78 	err = pci_write_config_dword(pdev, offset, val);
79 	if (err)
80 		amd64_warn("%s: error writing to F%dx%03x.\n",
81 			   func, PCI_FUNC(pdev->devfn), offset);
82 
83 	return err;
84 }
85 
86 /*
87  * Select DCT to which PCI cfg accesses are routed
88  */
89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
90 {
91 	u32 reg = 0;
92 
93 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
94 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
95 	reg |= dct;
96 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
97 }
98 
99 /*
100  *
101  * Depending on the family, F2 DCT reads need special handling:
102  *
103  * K8: has a single DCT only and no address offsets >= 0x100
104  *
105  * F10h: each DCT has its own set of regs
106  *	DCT0 -> F2x040..
107  *	DCT1 -> F2x140..
108  *
109  * F16h: has only 1 DCT
110  *
111  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
112  */
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
114 					 int offset, u32 *val)
115 {
116 	switch (pvt->fam) {
117 	case 0xf:
118 		if (dct || offset >= 0x100)
119 			return -EINVAL;
120 		break;
121 
122 	case 0x10:
123 		if (dct) {
124 			/*
125 			 * Note: If ganging is enabled, barring the regs
126 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
127 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
128 			 */
129 			if (dct_ganging_enabled(pvt))
130 				return 0;
131 
132 			offset += 0x100;
133 		}
134 		break;
135 
136 	case 0x15:
137 		/*
138 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
139 		 * We should select which DCT we access using F1x10C[DctCfgSel]
140 		 */
141 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 		f15h_select_dct(pvt, dct);
143 		break;
144 
145 	case 0x16:
146 		if (dct)
147 			return -EINVAL;
148 		break;
149 
150 	default:
151 		break;
152 	}
153 	return amd64_read_pci_cfg(pvt->F2, offset, val);
154 }
155 
156 /*
157  * Memory scrubber control interface. For K8, memory scrubbing is handled by
158  * hardware and can involve L2 cache, dcache as well as the main memory. With
159  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
160  * functionality.
161  *
162  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
163  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
164  * bytes/sec for the setting.
165  *
166  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
167  * other archs, we might not have access to the caches directly.
168  */
169 
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
171 {
172 	/*
173 	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
174 	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
175 	 * as 0x0, scrubval 0x6 as 0x1, etc.
176 	 */
177 	if (scrubval >= 0x5 && scrubval <= 0x14) {
178 		scrubval -= 0x5;
179 		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
181 	} else {
182 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
183 	}
184 }
185 /*
186  * Scan the scrub rate mapping table for a close or matching bandwidth value to
187  * issue. If requested is too big, then use last maximum value found.
188  */
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
190 {
191 	u32 scrubval;
192 	int i;
193 
194 	/*
195 	 * map the configured rate (new_bw) to a value specific to the AMD64
196 	 * memory controller and apply to register. Search for the first
197 	 * bandwidth entry that is greater or equal than the setting requested
198 	 * and program that. If at last entry, turn off DRAM scrubbing.
199 	 *
200 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
201 	 * by falling back to the last element in scrubrates[].
202 	 */
203 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
204 		/*
205 		 * skip scrub rates which aren't recommended
206 		 * (see F10 BKDG, F3x58)
207 		 */
208 		if (scrubrates[i].scrubval < min_rate)
209 			continue;
210 
211 		if (scrubrates[i].bandwidth <= new_bw)
212 			break;
213 	}
214 
215 	scrubval = scrubrates[i].scrubval;
216 
217 	if (pvt->fam == 0x17 || pvt->fam == 0x18) {
218 		__f17h_set_scrubval(pvt, scrubval);
219 	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 		f15h_select_dct(pvt, 0);
221 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 		f15h_select_dct(pvt, 1);
223 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
224 	} else {
225 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
226 	}
227 
228 	if (scrubval)
229 		return scrubrates[i].bandwidth;
230 
231 	return 0;
232 }
233 
234 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
235 {
236 	struct amd64_pvt *pvt = mci->pvt_info;
237 	u32 min_scrubrate = 0x5;
238 
239 	if (pvt->fam == 0xf)
240 		min_scrubrate = 0x0;
241 
242 	if (pvt->fam == 0x15) {
243 		/* Erratum #505 */
244 		if (pvt->model < 0x10)
245 			f15h_select_dct(pvt, 0);
246 
247 		if (pvt->model == 0x60)
248 			min_scrubrate = 0x6;
249 	}
250 	return __set_scrub_rate(pvt, bw, min_scrubrate);
251 }
252 
253 static int get_scrub_rate(struct mem_ctl_info *mci)
254 {
255 	struct amd64_pvt *pvt = mci->pvt_info;
256 	int i, retval = -EINVAL;
257 	u32 scrubval = 0;
258 
259 	switch (pvt->fam) {
260 	case 0x15:
261 		/* Erratum #505 */
262 		if (pvt->model < 0x10)
263 			f15h_select_dct(pvt, 0);
264 
265 		if (pvt->model == 0x60)
266 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
267 		break;
268 
269 	case 0x17:
270 	case 0x18:
271 		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
272 		if (scrubval & BIT(0)) {
273 			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
274 			scrubval &= 0xF;
275 			scrubval += 0x5;
276 		} else {
277 			scrubval = 0;
278 		}
279 		break;
280 
281 	default:
282 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
283 		break;
284 	}
285 
286 	scrubval = scrubval & 0x001F;
287 
288 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
289 		if (scrubrates[i].scrubval == scrubval) {
290 			retval = scrubrates[i].bandwidth;
291 			break;
292 		}
293 	}
294 	return retval;
295 }
296 
297 /*
298  * returns true if the SysAddr given by sys_addr matches the
299  * DRAM base/limit associated with node_id
300  */
301 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
302 {
303 	u64 addr;
304 
305 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
306 	 * all ones if the most significant implemented address bit is 1.
307 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
308 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
309 	 * Application Programming.
310 	 */
311 	addr = sys_addr & 0x000000ffffffffffull;
312 
313 	return ((addr >= get_dram_base(pvt, nid)) &&
314 		(addr <= get_dram_limit(pvt, nid)));
315 }
316 
317 /*
318  * Attempt to map a SysAddr to a node. On success, return a pointer to the
319  * mem_ctl_info structure for the node that the SysAddr maps to.
320  *
321  * On failure, return NULL.
322  */
323 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
324 						u64 sys_addr)
325 {
326 	struct amd64_pvt *pvt;
327 	u8 node_id;
328 	u32 intlv_en, bits;
329 
330 	/*
331 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
332 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
333 	 */
334 	pvt = mci->pvt_info;
335 
336 	/*
337 	 * The value of this field should be the same for all DRAM Base
338 	 * registers.  Therefore we arbitrarily choose to read it from the
339 	 * register for node 0.
340 	 */
341 	intlv_en = dram_intlv_en(pvt, 0);
342 
343 	if (intlv_en == 0) {
344 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
345 			if (base_limit_match(pvt, sys_addr, node_id))
346 				goto found;
347 		}
348 		goto err_no_match;
349 	}
350 
351 	if (unlikely((intlv_en != 0x01) &&
352 		     (intlv_en != 0x03) &&
353 		     (intlv_en != 0x07))) {
354 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
355 		return NULL;
356 	}
357 
358 	bits = (((u32) sys_addr) >> 12) & intlv_en;
359 
360 	for (node_id = 0; ; ) {
361 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
362 			break;	/* intlv_sel field matches */
363 
364 		if (++node_id >= DRAM_RANGES)
365 			goto err_no_match;
366 	}
367 
368 	/* sanity test for sys_addr */
369 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
370 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
371 			   "range for node %d with node interleaving enabled.\n",
372 			   __func__, sys_addr, node_id);
373 		return NULL;
374 	}
375 
376 found:
377 	return edac_mc_find((int)node_id);
378 
379 err_no_match:
380 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
381 		 (unsigned long)sys_addr);
382 
383 	return NULL;
384 }
385 
386 /*
387  * compute the CS base address of the @csrow on the DRAM controller @dct.
388  * For details see F2x[5C:40] in the processor's BKDG
389  */
390 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
391 				 u64 *base, u64 *mask)
392 {
393 	u64 csbase, csmask, base_bits, mask_bits;
394 	u8 addr_shift;
395 
396 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
397 		csbase		= pvt->csels[dct].csbases[csrow];
398 		csmask		= pvt->csels[dct].csmasks[csrow];
399 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
400 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
401 		addr_shift	= 4;
402 
403 	/*
404 	 * F16h and F15h, models 30h and later need two addr_shift values:
405 	 * 8 for high and 6 for low (cf. F16h BKDG).
406 	 */
407 	} else if (pvt->fam == 0x16 ||
408 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
409 		csbase          = pvt->csels[dct].csbases[csrow];
410 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
411 
412 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
413 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
414 
415 		*mask = ~0ULL;
416 		/* poke holes for the csmask */
417 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
418 			   (GENMASK_ULL(30, 19) << 8));
419 
420 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
421 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
422 
423 		return;
424 	} else {
425 		csbase		= pvt->csels[dct].csbases[csrow];
426 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
427 		addr_shift	= 8;
428 
429 		if (pvt->fam == 0x15)
430 			base_bits = mask_bits =
431 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
432 		else
433 			base_bits = mask_bits =
434 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
435 	}
436 
437 	*base  = (csbase & base_bits) << addr_shift;
438 
439 	*mask  = ~0ULL;
440 	/* poke holes for the csmask */
441 	*mask &= ~(mask_bits << addr_shift);
442 	/* OR them in */
443 	*mask |= (csmask & mask_bits) << addr_shift;
444 }
445 
446 #define for_each_chip_select(i, dct, pvt) \
447 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
448 
449 #define chip_select_base(i, dct, pvt) \
450 	pvt->csels[dct].csbases[i]
451 
452 #define for_each_chip_select_mask(i, dct, pvt) \
453 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
454 
455 #define for_each_umc(i) \
456 	for (i = 0; i < num_umcs; i++)
457 
458 /*
459  * @input_addr is an InputAddr associated with the node given by mci. Return the
460  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
461  */
462 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
463 {
464 	struct amd64_pvt *pvt;
465 	int csrow;
466 	u64 base, mask;
467 
468 	pvt = mci->pvt_info;
469 
470 	for_each_chip_select(csrow, 0, pvt) {
471 		if (!csrow_enabled(csrow, 0, pvt))
472 			continue;
473 
474 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
475 
476 		mask = ~mask;
477 
478 		if ((input_addr & mask) == (base & mask)) {
479 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
480 				 (unsigned long)input_addr, csrow,
481 				 pvt->mc_node_id);
482 
483 			return csrow;
484 		}
485 	}
486 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
487 		 (unsigned long)input_addr, pvt->mc_node_id);
488 
489 	return -1;
490 }
491 
492 /*
493  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
494  * for the node represented by mci. Info is passed back in *hole_base,
495  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
496  * info is invalid. Info may be invalid for either of the following reasons:
497  *
498  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
499  *   Address Register does not exist.
500  *
501  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
502  *   indicating that its contents are not valid.
503  *
504  * The values passed back in *hole_base, *hole_offset, and *hole_size are
505  * complete 32-bit values despite the fact that the bitfields in the DHAR
506  * only represent bits 31-24 of the base and offset values.
507  */
508 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
509 			     u64 *hole_offset, u64 *hole_size)
510 {
511 	struct amd64_pvt *pvt = mci->pvt_info;
512 
513 	/* only revE and later have the DRAM Hole Address Register */
514 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
515 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
516 			 pvt->ext_model, pvt->mc_node_id);
517 		return 1;
518 	}
519 
520 	/* valid for Fam10h and above */
521 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
522 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
523 		return 1;
524 	}
525 
526 	if (!dhar_valid(pvt)) {
527 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
528 			 pvt->mc_node_id);
529 		return 1;
530 	}
531 
532 	/* This node has Memory Hoisting */
533 
534 	/* +------------------+--------------------+--------------------+-----
535 	 * | memory           | DRAM hole          | relocated          |
536 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
537 	 * |                  |                    | DRAM hole          |
538 	 * |                  |                    | [0x100000000,      |
539 	 * |                  |                    |  (0x100000000+     |
540 	 * |                  |                    |   (0xffffffff-x))] |
541 	 * +------------------+--------------------+--------------------+-----
542 	 *
543 	 * Above is a diagram of physical memory showing the DRAM hole and the
544 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
545 	 * starts at address x (the base address) and extends through address
546 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
547 	 * addresses in the hole so that they start at 0x100000000.
548 	 */
549 
550 	*hole_base = dhar_base(pvt);
551 	*hole_size = (1ULL << 32) - *hole_base;
552 
553 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
554 					: k8_dhar_offset(pvt);
555 
556 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
557 		 pvt->mc_node_id, (unsigned long)*hole_base,
558 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
559 
560 	return 0;
561 }
562 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
563 
564 /*
565  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
566  * assumed that sys_addr maps to the node given by mci.
567  *
568  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
569  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
570  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
571  * then it is also involved in translating a SysAddr to a DramAddr. Sections
572  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
573  * These parts of the documentation are unclear. I interpret them as follows:
574  *
575  * When node n receives a SysAddr, it processes the SysAddr as follows:
576  *
577  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
578  *    Limit registers for node n. If the SysAddr is not within the range
579  *    specified by the base and limit values, then node n ignores the Sysaddr
580  *    (since it does not map to node n). Otherwise continue to step 2 below.
581  *
582  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
583  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
584  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
585  *    hole. If not, skip to step 3 below. Else get the value of the
586  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
587  *    offset defined by this value from the SysAddr.
588  *
589  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
590  *    Base register for node n. To obtain the DramAddr, subtract the base
591  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
592  */
593 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
594 {
595 	struct amd64_pvt *pvt = mci->pvt_info;
596 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
597 	int ret;
598 
599 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
600 
601 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
602 				      &hole_size);
603 	if (!ret) {
604 		if ((sys_addr >= (1ULL << 32)) &&
605 		    (sys_addr < ((1ULL << 32) + hole_size))) {
606 			/* use DHAR to translate SysAddr to DramAddr */
607 			dram_addr = sys_addr - hole_offset;
608 
609 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
610 				 (unsigned long)sys_addr,
611 				 (unsigned long)dram_addr);
612 
613 			return dram_addr;
614 		}
615 	}
616 
617 	/*
618 	 * Translate the SysAddr to a DramAddr as shown near the start of
619 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
620 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
621 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
622 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
623 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
624 	 * Programmer's Manual Volume 1 Application Programming.
625 	 */
626 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
627 
628 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
629 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
630 	return dram_addr;
631 }
632 
633 /*
634  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
635  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
636  * for node interleaving.
637  */
638 static int num_node_interleave_bits(unsigned intlv_en)
639 {
640 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
641 	int n;
642 
643 	BUG_ON(intlv_en > 7);
644 	n = intlv_shift_table[intlv_en];
645 	return n;
646 }
647 
648 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
649 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
650 {
651 	struct amd64_pvt *pvt;
652 	int intlv_shift;
653 	u64 input_addr;
654 
655 	pvt = mci->pvt_info;
656 
657 	/*
658 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
659 	 * concerning translating a DramAddr to an InputAddr.
660 	 */
661 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
662 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
663 		      (dram_addr & 0xfff);
664 
665 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
666 		 intlv_shift, (unsigned long)dram_addr,
667 		 (unsigned long)input_addr);
668 
669 	return input_addr;
670 }
671 
672 /*
673  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
674  * assumed that @sys_addr maps to the node given by mci.
675  */
676 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
677 {
678 	u64 input_addr;
679 
680 	input_addr =
681 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
682 
683 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
684 		 (unsigned long)sys_addr, (unsigned long)input_addr);
685 
686 	return input_addr;
687 }
688 
689 /* Map the Error address to a PAGE and PAGE OFFSET. */
690 static inline void error_address_to_page_and_offset(u64 error_address,
691 						    struct err_info *err)
692 {
693 	err->page = (u32) (error_address >> PAGE_SHIFT);
694 	err->offset = ((u32) error_address) & ~PAGE_MASK;
695 }
696 
697 /*
698  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
699  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
700  * of a node that detected an ECC memory error.  mci represents the node that
701  * the error address maps to (possibly different from the node that detected
702  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
703  * error.
704  */
705 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
706 {
707 	int csrow;
708 
709 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
710 
711 	if (csrow == -1)
712 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
713 				  "address 0x%lx\n", (unsigned long)sys_addr);
714 	return csrow;
715 }
716 
717 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
718 
719 /*
720  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
721  * are ECC capable.
722  */
723 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
724 {
725 	unsigned long edac_cap = EDAC_FLAG_NONE;
726 	u8 bit;
727 
728 	if (pvt->umc) {
729 		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
730 
731 		for_each_umc(i) {
732 			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
733 				continue;
734 
735 			umc_en_mask |= BIT(i);
736 
737 			/* UMC Configuration bit 12 (DimmEccEn) */
738 			if (pvt->umc[i].umc_cfg & BIT(12))
739 				dimm_ecc_en_mask |= BIT(i);
740 		}
741 
742 		if (umc_en_mask == dimm_ecc_en_mask)
743 			edac_cap = EDAC_FLAG_SECDED;
744 	} else {
745 		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
746 			? 19
747 			: 17;
748 
749 		if (pvt->dclr0 & BIT(bit))
750 			edac_cap = EDAC_FLAG_SECDED;
751 	}
752 
753 	return edac_cap;
754 }
755 
756 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
757 
758 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
759 {
760 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
761 
762 	if (pvt->dram_type == MEM_LRDDR3) {
763 		u32 dcsm = pvt->csels[chan].csmasks[0];
764 		/*
765 		 * It's assumed all LRDIMMs in a DCT are going to be of
766 		 * same 'type' until proven otherwise. So, use a cs
767 		 * value of '0' here to get dcsm value.
768 		 */
769 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
770 	}
771 
772 	edac_dbg(1, "All DIMMs support ECC:%s\n",
773 		    (dclr & BIT(19)) ? "yes" : "no");
774 
775 
776 	edac_dbg(1, "  PAR/ERR parity: %s\n",
777 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
778 
779 	if (pvt->fam == 0x10)
780 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
781 			 (dclr & BIT(11)) ?  "128b" : "64b");
782 
783 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
784 		 (dclr & BIT(12)) ?  "yes" : "no",
785 		 (dclr & BIT(13)) ?  "yes" : "no",
786 		 (dclr & BIT(14)) ?  "yes" : "no",
787 		 (dclr & BIT(15)) ?  "yes" : "no");
788 }
789 
790 /*
791  * The Address Mask should be a contiguous set of bits in the non-interleaved
792  * case. So to check for CS interleaving, find the most- and least-significant
793  * bits of the mask, generate a contiguous bitmask, and compare the two.
794  */
795 static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
796 {
797 	u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
798 	u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
799 	u32 test_mask = GENMASK(msb, lsb);
800 
801 	edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
802 
803 	return mask ^ test_mask;
804 }
805 
806 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
807 {
808 	int dimm, size0, size1, cs0, cs1;
809 
810 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
811 
812 	for (dimm = 0; dimm < 4; dimm++) {
813 		size0 = 0;
814 		cs0 = dimm * 2;
815 
816 		if (csrow_enabled(cs0, ctrl, pvt))
817 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
818 
819 		size1 = 0;
820 		cs1 = dimm * 2 + 1;
821 
822 		if (csrow_enabled(cs1, ctrl, pvt)) {
823 			/*
824 			 * CS interleaving is only supported if both CSes have
825 			 * the same amount of memory. Because they are
826 			 * interleaved, it will look like both CSes have the
827 			 * full amount of memory. Save the size for both as
828 			 * half the amount we found on CS0, if interleaved.
829 			 */
830 			if (f17_cs_interleaved(pvt, ctrl, cs1))
831 				size1 = size0 = (size0 >> 1);
832 			else
833 				size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
834 		}
835 
836 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
837 				cs0,	size0,
838 				cs1,	size1);
839 	}
840 }
841 
842 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
843 {
844 	struct amd64_umc *umc;
845 	u32 i, tmp, umc_base;
846 
847 	for_each_umc(i) {
848 		umc_base = get_umc_base(i);
849 		umc = &pvt->umc[i];
850 
851 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
852 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
853 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
854 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
855 
856 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
857 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
858 
859 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
860 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
861 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
862 
863 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
864 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
865 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
866 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
867 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
868 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
869 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
870 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
871 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
872 
873 		if (pvt->dram_type == MEM_LRDDR4) {
874 			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
875 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
876 					i, 1 << ((tmp >> 4) & 0x3));
877 		}
878 
879 		debug_display_dimm_sizes_df(pvt, i);
880 	}
881 
882 	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
883 		 pvt->dhar, dhar_base(pvt));
884 }
885 
886 /* Display and decode various NB registers for debug purposes. */
887 static void __dump_misc_regs(struct amd64_pvt *pvt)
888 {
889 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
890 
891 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
892 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
893 
894 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
895 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
896 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
897 
898 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
899 
900 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
901 
902 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
903 		 pvt->dhar, dhar_base(pvt),
904 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
905 				   : f10_dhar_offset(pvt));
906 
907 	debug_display_dimm_sizes(pvt, 0);
908 
909 	/* everything below this point is Fam10h and above */
910 	if (pvt->fam == 0xf)
911 		return;
912 
913 	debug_display_dimm_sizes(pvt, 1);
914 
915 	/* Only if NOT ganged does dclr1 have valid info */
916 	if (!dct_ganging_enabled(pvt))
917 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
918 }
919 
920 /* Display and decode various NB registers for debug purposes. */
921 static void dump_misc_regs(struct amd64_pvt *pvt)
922 {
923 	if (pvt->umc)
924 		__dump_misc_regs_df(pvt);
925 	else
926 		__dump_misc_regs(pvt);
927 
928 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
929 
930 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
931 }
932 
933 /*
934  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
935  */
936 static void prep_chip_selects(struct amd64_pvt *pvt)
937 {
938 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
939 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
940 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
941 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
942 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
943 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
944 	} else {
945 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
946 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
947 	}
948 }
949 
950 /*
951  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
952  */
953 static void read_dct_base_mask(struct amd64_pvt *pvt)
954 {
955 	int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
956 
957 	prep_chip_selects(pvt);
958 
959 	if (pvt->umc) {
960 		base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
961 		base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
962 		mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
963 		mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
964 	} else {
965 		base_reg0 = DCSB0;
966 		base_reg1 = DCSB1;
967 		mask_reg0 = DCSM0;
968 		mask_reg1 = DCSM1;
969 	}
970 
971 	for_each_chip_select(cs, 0, pvt) {
972 		int reg0   = base_reg0 + (cs * 4);
973 		int reg1   = base_reg1 + (cs * 4);
974 		u32 *base0 = &pvt->csels[0].csbases[cs];
975 		u32 *base1 = &pvt->csels[1].csbases[cs];
976 
977 		if (pvt->umc) {
978 			if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
979 				edac_dbg(0, "  DCSB0[%d]=0x%08x reg: 0x%x\n",
980 					 cs, *base0, reg0);
981 
982 			if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
983 				edac_dbg(0, "  DCSB1[%d]=0x%08x reg: 0x%x\n",
984 					 cs, *base1, reg1);
985 		} else {
986 			if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
987 				edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
988 					 cs, *base0, reg0);
989 
990 			if (pvt->fam == 0xf)
991 				continue;
992 
993 			if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
994 				edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
995 					 cs, *base1, (pvt->fam == 0x10) ? reg1
996 								: reg0);
997 		}
998 	}
999 
1000 	for_each_chip_select_mask(cs, 0, pvt) {
1001 		int reg0   = mask_reg0 + (cs * 4);
1002 		int reg1   = mask_reg1 + (cs * 4);
1003 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1004 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1005 
1006 		if (pvt->umc) {
1007 			if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
1008 				edac_dbg(0, "    DCSM0[%d]=0x%08x reg: 0x%x\n",
1009 					 cs, *mask0, reg0);
1010 
1011 			if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
1012 				edac_dbg(0, "    DCSM1[%d]=0x%08x reg: 0x%x\n",
1013 					 cs, *mask1, reg1);
1014 		} else {
1015 			if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1016 				edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1017 					 cs, *mask0, reg0);
1018 
1019 			if (pvt->fam == 0xf)
1020 				continue;
1021 
1022 			if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1023 				edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1024 					 cs, *mask1, (pvt->fam == 0x10) ? reg1
1025 								: reg0);
1026 		}
1027 	}
1028 }
1029 
1030 static void determine_memory_type(struct amd64_pvt *pvt)
1031 {
1032 	u32 dram_ctrl, dcsm;
1033 
1034 	switch (pvt->fam) {
1035 	case 0xf:
1036 		if (pvt->ext_model >= K8_REV_F)
1037 			goto ddr3;
1038 
1039 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1040 		return;
1041 
1042 	case 0x10:
1043 		if (pvt->dchr0 & DDR3_MODE)
1044 			goto ddr3;
1045 
1046 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1047 		return;
1048 
1049 	case 0x15:
1050 		if (pvt->model < 0x60)
1051 			goto ddr3;
1052 
1053 		/*
1054 		 * Model 0x60h needs special handling:
1055 		 *
1056 		 * We use a Chip Select value of '0' to obtain dcsm.
1057 		 * Theoretically, it is possible to populate LRDIMMs of different
1058 		 * 'Rank' value on a DCT. But this is not the common case. So,
1059 		 * it's reasonable to assume all DIMMs are going to be of same
1060 		 * 'type' until proven otherwise.
1061 		 */
1062 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1063 		dcsm = pvt->csels[0].csmasks[0];
1064 
1065 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1066 			pvt->dram_type = MEM_DDR4;
1067 		else if (pvt->dclr0 & BIT(16))
1068 			pvt->dram_type = MEM_DDR3;
1069 		else if (dcsm & 0x3)
1070 			pvt->dram_type = MEM_LRDDR3;
1071 		else
1072 			pvt->dram_type = MEM_RDDR3;
1073 
1074 		return;
1075 
1076 	case 0x16:
1077 		goto ddr3;
1078 
1079 	case 0x17:
1080 	case 0x18:
1081 		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1082 			pvt->dram_type = MEM_LRDDR4;
1083 		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1084 			pvt->dram_type = MEM_RDDR4;
1085 		else
1086 			pvt->dram_type = MEM_DDR4;
1087 		return;
1088 
1089 	default:
1090 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1091 		pvt->dram_type = MEM_EMPTY;
1092 	}
1093 	return;
1094 
1095 ddr3:
1096 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1097 }
1098 
1099 /* Get the number of DCT channels the memory controller is using. */
1100 static int k8_early_channel_count(struct amd64_pvt *pvt)
1101 {
1102 	int flag;
1103 
1104 	if (pvt->ext_model >= K8_REV_F)
1105 		/* RevF (NPT) and later */
1106 		flag = pvt->dclr0 & WIDTH_128;
1107 	else
1108 		/* RevE and earlier */
1109 		flag = pvt->dclr0 & REVE_WIDTH_128;
1110 
1111 	/* not used */
1112 	pvt->dclr1 = 0;
1113 
1114 	return (flag) ? 2 : 1;
1115 }
1116 
1117 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1118 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1119 {
1120 	u16 mce_nid = amd_get_nb_id(m->extcpu);
1121 	struct mem_ctl_info *mci;
1122 	u8 start_bit = 1;
1123 	u8 end_bit   = 47;
1124 	u64 addr;
1125 
1126 	mci = edac_mc_find(mce_nid);
1127 	if (!mci)
1128 		return 0;
1129 
1130 	pvt = mci->pvt_info;
1131 
1132 	if (pvt->fam == 0xf) {
1133 		start_bit = 3;
1134 		end_bit   = 39;
1135 	}
1136 
1137 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1138 
1139 	/*
1140 	 * Erratum 637 workaround
1141 	 */
1142 	if (pvt->fam == 0x15) {
1143 		u64 cc6_base, tmp_addr;
1144 		u32 tmp;
1145 		u8 intlv_en;
1146 
1147 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1148 			return addr;
1149 
1150 
1151 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1152 		intlv_en = tmp >> 21 & 0x7;
1153 
1154 		/* add [47:27] + 3 trailing bits */
1155 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1156 
1157 		/* reverse and add DramIntlvEn */
1158 		cc6_base |= intlv_en ^ 0x7;
1159 
1160 		/* pin at [47:24] */
1161 		cc6_base <<= 24;
1162 
1163 		if (!intlv_en)
1164 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1165 
1166 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1167 
1168 							/* faster log2 */
1169 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1170 
1171 		/* OR DramIntlvSel into bits [14:12] */
1172 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1173 
1174 		/* add remaining [11:0] bits from original MC4_ADDR */
1175 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1176 
1177 		return cc6_base | tmp_addr;
1178 	}
1179 
1180 	return addr;
1181 }
1182 
1183 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1184 						unsigned int device,
1185 						struct pci_dev *related)
1186 {
1187 	struct pci_dev *dev = NULL;
1188 
1189 	while ((dev = pci_get_device(vendor, device, dev))) {
1190 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1191 		    (dev->bus->number == related->bus->number) &&
1192 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1193 			break;
1194 	}
1195 
1196 	return dev;
1197 }
1198 
1199 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1200 {
1201 	struct amd_northbridge *nb;
1202 	struct pci_dev *f1 = NULL;
1203 	unsigned int pci_func;
1204 	int off = range << 3;
1205 	u32 llim;
1206 
1207 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1208 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1209 
1210 	if (pvt->fam == 0xf)
1211 		return;
1212 
1213 	if (!dram_rw(pvt, range))
1214 		return;
1215 
1216 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1217 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1218 
1219 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1220 	if (pvt->fam != 0x15)
1221 		return;
1222 
1223 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1224 	if (WARN_ON(!nb))
1225 		return;
1226 
1227 	if (pvt->model == 0x60)
1228 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1229 	else if (pvt->model == 0x30)
1230 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1231 	else
1232 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1233 
1234 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1235 	if (WARN_ON(!f1))
1236 		return;
1237 
1238 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1239 
1240 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1241 
1242 				    /* {[39:27],111b} */
1243 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1244 
1245 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1246 
1247 				    /* [47:40] */
1248 	pvt->ranges[range].lim.hi |= llim >> 13;
1249 
1250 	pci_dev_put(f1);
1251 }
1252 
1253 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1254 				    struct err_info *err)
1255 {
1256 	struct amd64_pvt *pvt = mci->pvt_info;
1257 
1258 	error_address_to_page_and_offset(sys_addr, err);
1259 
1260 	/*
1261 	 * Find out which node the error address belongs to. This may be
1262 	 * different from the node that detected the error.
1263 	 */
1264 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1265 	if (!err->src_mci) {
1266 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1267 			     (unsigned long)sys_addr);
1268 		err->err_code = ERR_NODE;
1269 		return;
1270 	}
1271 
1272 	/* Now map the sys_addr to a CSROW */
1273 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1274 	if (err->csrow < 0) {
1275 		err->err_code = ERR_CSROW;
1276 		return;
1277 	}
1278 
1279 	/* CHIPKILL enabled */
1280 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1281 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1282 		if (err->channel < 0) {
1283 			/*
1284 			 * Syndrome didn't map, so we don't know which of the
1285 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1286 			 * as suspect.
1287 			 */
1288 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1289 				      "possible error reporting race\n",
1290 				      err->syndrome);
1291 			err->err_code = ERR_CHANNEL;
1292 			return;
1293 		}
1294 	} else {
1295 		/*
1296 		 * non-chipkill ecc mode
1297 		 *
1298 		 * The k8 documentation is unclear about how to determine the
1299 		 * channel number when using non-chipkill memory.  This method
1300 		 * was obtained from email communication with someone at AMD.
1301 		 * (Wish the email was placed in this comment - norsk)
1302 		 */
1303 		err->channel = ((sys_addr & BIT(3)) != 0);
1304 	}
1305 }
1306 
1307 static int ddr2_cs_size(unsigned i, bool dct_width)
1308 {
1309 	unsigned shift = 0;
1310 
1311 	if (i <= 2)
1312 		shift = i;
1313 	else if (!(i & 0x1))
1314 		shift = i >> 1;
1315 	else
1316 		shift = (i + 1) >> 1;
1317 
1318 	return 128 << (shift + !!dct_width);
1319 }
1320 
1321 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1322 				  unsigned cs_mode, int cs_mask_nr)
1323 {
1324 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1325 
1326 	if (pvt->ext_model >= K8_REV_F) {
1327 		WARN_ON(cs_mode > 11);
1328 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1329 	}
1330 	else if (pvt->ext_model >= K8_REV_D) {
1331 		unsigned diff;
1332 		WARN_ON(cs_mode > 10);
1333 
1334 		/*
1335 		 * the below calculation, besides trying to win an obfuscated C
1336 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1337 		 * mappings are:
1338 		 *
1339 		 * cs_mode	CS size (mb)
1340 		 * =======	============
1341 		 * 0		32
1342 		 * 1		64
1343 		 * 2		128
1344 		 * 3		128
1345 		 * 4		256
1346 		 * 5		512
1347 		 * 6		256
1348 		 * 7		512
1349 		 * 8		1024
1350 		 * 9		1024
1351 		 * 10		2048
1352 		 *
1353 		 * Basically, it calculates a value with which to shift the
1354 		 * smallest CS size of 32MB.
1355 		 *
1356 		 * ddr[23]_cs_size have a similar purpose.
1357 		 */
1358 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1359 
1360 		return 32 << (cs_mode - diff);
1361 	}
1362 	else {
1363 		WARN_ON(cs_mode > 6);
1364 		return 32 << cs_mode;
1365 	}
1366 }
1367 
1368 /*
1369  * Get the number of DCT channels in use.
1370  *
1371  * Return:
1372  *	number of Memory Channels in operation
1373  * Pass back:
1374  *	contents of the DCL0_LOW register
1375  */
1376 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1377 {
1378 	int i, j, channels = 0;
1379 
1380 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1381 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1382 		return 2;
1383 
1384 	/*
1385 	 * Need to check if in unganged mode: In such, there are 2 channels,
1386 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1387 	 * bit will be OFF.
1388 	 *
1389 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1390 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1391 	 */
1392 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1393 
1394 	/*
1395 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1396 	 * is more than just one DIMM present in unganged mode. Need to check
1397 	 * both controllers since DIMMs can be placed in either one.
1398 	 */
1399 	for (i = 0; i < 2; i++) {
1400 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1401 
1402 		for (j = 0; j < 4; j++) {
1403 			if (DBAM_DIMM(j, dbam) > 0) {
1404 				channels++;
1405 				break;
1406 			}
1407 		}
1408 	}
1409 
1410 	if (channels > 2)
1411 		channels = 2;
1412 
1413 	amd64_info("MCT channel count: %d\n", channels);
1414 
1415 	return channels;
1416 }
1417 
1418 static int f17_early_channel_count(struct amd64_pvt *pvt)
1419 {
1420 	int i, channels = 0;
1421 
1422 	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1423 	for_each_umc(i)
1424 		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1425 
1426 	amd64_info("MCT channel count: %d\n", channels);
1427 
1428 	return channels;
1429 }
1430 
1431 static int ddr3_cs_size(unsigned i, bool dct_width)
1432 {
1433 	unsigned shift = 0;
1434 	int cs_size = 0;
1435 
1436 	if (i == 0 || i == 3 || i == 4)
1437 		cs_size = -1;
1438 	else if (i <= 2)
1439 		shift = i;
1440 	else if (i == 12)
1441 		shift = 7;
1442 	else if (!(i & 0x1))
1443 		shift = i >> 1;
1444 	else
1445 		shift = (i + 1) >> 1;
1446 
1447 	if (cs_size != -1)
1448 		cs_size = (128 * (1 << !!dct_width)) << shift;
1449 
1450 	return cs_size;
1451 }
1452 
1453 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1454 {
1455 	unsigned shift = 0;
1456 	int cs_size = 0;
1457 
1458 	if (i < 4 || i == 6)
1459 		cs_size = -1;
1460 	else if (i == 12)
1461 		shift = 7;
1462 	else if (!(i & 0x1))
1463 		shift = i >> 1;
1464 	else
1465 		shift = (i + 1) >> 1;
1466 
1467 	if (cs_size != -1)
1468 		cs_size = rank_multiply * (128 << shift);
1469 
1470 	return cs_size;
1471 }
1472 
1473 static int ddr4_cs_size(unsigned i)
1474 {
1475 	int cs_size = 0;
1476 
1477 	if (i == 0)
1478 		cs_size = -1;
1479 	else if (i == 1)
1480 		cs_size = 1024;
1481 	else
1482 		/* Min cs_size = 1G */
1483 		cs_size = 1024 * (1 << (i >> 1));
1484 
1485 	return cs_size;
1486 }
1487 
1488 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1489 				   unsigned cs_mode, int cs_mask_nr)
1490 {
1491 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1492 
1493 	WARN_ON(cs_mode > 11);
1494 
1495 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1496 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1497 	else
1498 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1499 }
1500 
1501 /*
1502  * F15h supports only 64bit DCT interfaces
1503  */
1504 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1505 				   unsigned cs_mode, int cs_mask_nr)
1506 {
1507 	WARN_ON(cs_mode > 12);
1508 
1509 	return ddr3_cs_size(cs_mode, false);
1510 }
1511 
1512 /* F15h M60h supports DDR4 mapping as well.. */
1513 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1514 					unsigned cs_mode, int cs_mask_nr)
1515 {
1516 	int cs_size;
1517 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1518 
1519 	WARN_ON(cs_mode > 12);
1520 
1521 	if (pvt->dram_type == MEM_DDR4) {
1522 		if (cs_mode > 9)
1523 			return -1;
1524 
1525 		cs_size = ddr4_cs_size(cs_mode);
1526 	} else if (pvt->dram_type == MEM_LRDDR3) {
1527 		unsigned rank_multiply = dcsm & 0xf;
1528 
1529 		if (rank_multiply == 3)
1530 			rank_multiply = 4;
1531 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1532 	} else {
1533 		/* Minimum cs size is 512mb for F15hM60h*/
1534 		if (cs_mode == 0x1)
1535 			return -1;
1536 
1537 		cs_size = ddr3_cs_size(cs_mode, false);
1538 	}
1539 
1540 	return cs_size;
1541 }
1542 
1543 /*
1544  * F16h and F15h model 30h have only limited cs_modes.
1545  */
1546 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1547 				unsigned cs_mode, int cs_mask_nr)
1548 {
1549 	WARN_ON(cs_mode > 12);
1550 
1551 	if (cs_mode == 6 || cs_mode == 8 ||
1552 	    cs_mode == 9 || cs_mode == 12)
1553 		return -1;
1554 	else
1555 		return ddr3_cs_size(cs_mode, false);
1556 }
1557 
1558 static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1559 				    unsigned int cs_mode, int csrow_nr)
1560 {
1561 	u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
1562 
1563 	/*  Each mask is used for every two base addresses. */
1564 	u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
1565 
1566 	/*  Register [31:1] = Address [39:9]. Size is in kBs here. */
1567 	u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
1568 
1569 	edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
1570 
1571 	/* Return size in MBs. */
1572 	return size >> 10;
1573 }
1574 
1575 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1576 {
1577 
1578 	if (pvt->fam == 0xf)
1579 		return;
1580 
1581 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1582 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1583 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1584 
1585 		edac_dbg(0, "  DCTs operate in %s mode\n",
1586 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1587 
1588 		if (!dct_ganging_enabled(pvt))
1589 			edac_dbg(0, "  Address range split per DCT: %s\n",
1590 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1591 
1592 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1593 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1594 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1595 
1596 		edac_dbg(0, "  channel interleave: %s, "
1597 			 "interleave bits selector: 0x%x\n",
1598 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1599 			 dct_sel_interleave_addr(pvt));
1600 	}
1601 
1602 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1603 }
1604 
1605 /*
1606  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1607  * 2.10.12 Memory Interleaving Modes).
1608  */
1609 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1610 				     u8 intlv_en, int num_dcts_intlv,
1611 				     u32 dct_sel)
1612 {
1613 	u8 channel = 0;
1614 	u8 select;
1615 
1616 	if (!(intlv_en))
1617 		return (u8)(dct_sel);
1618 
1619 	if (num_dcts_intlv == 2) {
1620 		select = (sys_addr >> 8) & 0x3;
1621 		channel = select ? 0x3 : 0;
1622 	} else if (num_dcts_intlv == 4) {
1623 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1624 		switch (intlv_addr) {
1625 		case 0x4:
1626 			channel = (sys_addr >> 8) & 0x3;
1627 			break;
1628 		case 0x5:
1629 			channel = (sys_addr >> 9) & 0x3;
1630 			break;
1631 		}
1632 	}
1633 	return channel;
1634 }
1635 
1636 /*
1637  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1638  * Interleaving Modes.
1639  */
1640 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1641 				bool hi_range_sel, u8 intlv_en)
1642 {
1643 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1644 
1645 	if (dct_ganging_enabled(pvt))
1646 		return 0;
1647 
1648 	if (hi_range_sel)
1649 		return dct_sel_high;
1650 
1651 	/*
1652 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1653 	 */
1654 	if (dct_interleave_enabled(pvt)) {
1655 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1656 
1657 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1658 		if (!intlv_addr)
1659 			return sys_addr >> 6 & 1;
1660 
1661 		if (intlv_addr & 0x2) {
1662 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1663 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1664 
1665 			return ((sys_addr >> shift) & 1) ^ temp;
1666 		}
1667 
1668 		if (intlv_addr & 0x4) {
1669 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
1670 
1671 			return (sys_addr >> shift) & 1;
1672 		}
1673 
1674 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1675 	}
1676 
1677 	if (dct_high_range_enabled(pvt))
1678 		return ~dct_sel_high & 1;
1679 
1680 	return 0;
1681 }
1682 
1683 /* Convert the sys_addr to the normalized DCT address */
1684 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1685 				 u64 sys_addr, bool hi_rng,
1686 				 u32 dct_sel_base_addr)
1687 {
1688 	u64 chan_off;
1689 	u64 dram_base		= get_dram_base(pvt, range);
1690 	u64 hole_off		= f10_dhar_offset(pvt);
1691 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1692 
1693 	if (hi_rng) {
1694 		/*
1695 		 * if
1696 		 * base address of high range is below 4Gb
1697 		 * (bits [47:27] at [31:11])
1698 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1699 		 * sys_addr > 4Gb
1700 		 *
1701 		 *	remove hole offset from sys_addr
1702 		 * else
1703 		 *	remove high range offset from sys_addr
1704 		 */
1705 		if ((!(dct_sel_base_addr >> 16) ||
1706 		     dct_sel_base_addr < dhar_base(pvt)) &&
1707 		    dhar_valid(pvt) &&
1708 		    (sys_addr >= BIT_64(32)))
1709 			chan_off = hole_off;
1710 		else
1711 			chan_off = dct_sel_base_off;
1712 	} else {
1713 		/*
1714 		 * if
1715 		 * we have a valid hole		&&
1716 		 * sys_addr > 4Gb
1717 		 *
1718 		 *	remove hole
1719 		 * else
1720 		 *	remove dram base to normalize to DCT address
1721 		 */
1722 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1723 			chan_off = hole_off;
1724 		else
1725 			chan_off = dram_base;
1726 	}
1727 
1728 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1729 }
1730 
1731 /*
1732  * checks if the csrow passed in is marked as SPARED, if so returns the new
1733  * spare row
1734  */
1735 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1736 {
1737 	int tmp_cs;
1738 
1739 	if (online_spare_swap_done(pvt, dct) &&
1740 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1741 
1742 		for_each_chip_select(tmp_cs, dct, pvt) {
1743 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1744 				csrow = tmp_cs;
1745 				break;
1746 			}
1747 		}
1748 	}
1749 	return csrow;
1750 }
1751 
1752 /*
1753  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1754  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1755  *
1756  * Return:
1757  *	-EINVAL:  NOT FOUND
1758  *	0..csrow = Chip-Select Row
1759  */
1760 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1761 {
1762 	struct mem_ctl_info *mci;
1763 	struct amd64_pvt *pvt;
1764 	u64 cs_base, cs_mask;
1765 	int cs_found = -EINVAL;
1766 	int csrow;
1767 
1768 	mci = edac_mc_find(nid);
1769 	if (!mci)
1770 		return cs_found;
1771 
1772 	pvt = mci->pvt_info;
1773 
1774 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1775 
1776 	for_each_chip_select(csrow, dct, pvt) {
1777 		if (!csrow_enabled(csrow, dct, pvt))
1778 			continue;
1779 
1780 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1781 
1782 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1783 			 csrow, cs_base, cs_mask);
1784 
1785 		cs_mask = ~cs_mask;
1786 
1787 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1788 			 (in_addr & cs_mask), (cs_base & cs_mask));
1789 
1790 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1791 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1792 				cs_found =  csrow;
1793 				break;
1794 			}
1795 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1796 
1797 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1798 			break;
1799 		}
1800 	}
1801 	return cs_found;
1802 }
1803 
1804 /*
1805  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1806  * swapped with a region located at the bottom of memory so that the GPU can use
1807  * the interleaved region and thus two channels.
1808  */
1809 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1810 {
1811 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1812 
1813 	if (pvt->fam == 0x10) {
1814 		/* only revC3 and revE have that feature */
1815 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1816 			return sys_addr;
1817 	}
1818 
1819 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1820 
1821 	if (!(swap_reg & 0x1))
1822 		return sys_addr;
1823 
1824 	swap_base	= (swap_reg >> 3) & 0x7f;
1825 	swap_limit	= (swap_reg >> 11) & 0x7f;
1826 	rgn_size	= (swap_reg >> 20) & 0x7f;
1827 	tmp_addr	= sys_addr >> 27;
1828 
1829 	if (!(sys_addr >> 34) &&
1830 	    (((tmp_addr >= swap_base) &&
1831 	     (tmp_addr <= swap_limit)) ||
1832 	     (tmp_addr < rgn_size)))
1833 		return sys_addr ^ (u64)swap_base << 27;
1834 
1835 	return sys_addr;
1836 }
1837 
1838 /* For a given @dram_range, check if @sys_addr falls within it. */
1839 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1840 				  u64 sys_addr, int *chan_sel)
1841 {
1842 	int cs_found = -EINVAL;
1843 	u64 chan_addr;
1844 	u32 dct_sel_base;
1845 	u8 channel;
1846 	bool high_range = false;
1847 
1848 	u8 node_id    = dram_dst_node(pvt, range);
1849 	u8 intlv_en   = dram_intlv_en(pvt, range);
1850 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1851 
1852 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1853 		 range, sys_addr, get_dram_limit(pvt, range));
1854 
1855 	if (dhar_valid(pvt) &&
1856 	    dhar_base(pvt) <= sys_addr &&
1857 	    sys_addr < BIT_64(32)) {
1858 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1859 			    sys_addr);
1860 		return -EINVAL;
1861 	}
1862 
1863 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1864 		return -EINVAL;
1865 
1866 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1867 
1868 	dct_sel_base = dct_sel_baseaddr(pvt);
1869 
1870 	/*
1871 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1872 	 * select between DCT0 and DCT1.
1873 	 */
1874 	if (dct_high_range_enabled(pvt) &&
1875 	   !dct_ganging_enabled(pvt) &&
1876 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1877 		high_range = true;
1878 
1879 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1880 
1881 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1882 					  high_range, dct_sel_base);
1883 
1884 	/* Remove node interleaving, see F1x120 */
1885 	if (intlv_en)
1886 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1887 			    (chan_addr & 0xfff);
1888 
1889 	/* remove channel interleave */
1890 	if (dct_interleave_enabled(pvt) &&
1891 	   !dct_high_range_enabled(pvt) &&
1892 	   !dct_ganging_enabled(pvt)) {
1893 
1894 		if (dct_sel_interleave_addr(pvt) != 1) {
1895 			if (dct_sel_interleave_addr(pvt) == 0x3)
1896 				/* hash 9 */
1897 				chan_addr = ((chan_addr >> 10) << 9) |
1898 					     (chan_addr & 0x1ff);
1899 			else
1900 				/* A[6] or hash 6 */
1901 				chan_addr = ((chan_addr >> 7) << 6) |
1902 					     (chan_addr & 0x3f);
1903 		} else
1904 			/* A[12] */
1905 			chan_addr = ((chan_addr >> 13) << 12) |
1906 				     (chan_addr & 0xfff);
1907 	}
1908 
1909 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1910 
1911 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1912 
1913 	if (cs_found >= 0)
1914 		*chan_sel = channel;
1915 
1916 	return cs_found;
1917 }
1918 
1919 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1920 					u64 sys_addr, int *chan_sel)
1921 {
1922 	int cs_found = -EINVAL;
1923 	int num_dcts_intlv = 0;
1924 	u64 chan_addr, chan_offset;
1925 	u64 dct_base, dct_limit;
1926 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1927 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1928 
1929 	u64 dhar_offset		= f10_dhar_offset(pvt);
1930 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1931 	u8 node_id		= dram_dst_node(pvt, range);
1932 	u8 intlv_en		= dram_intlv_en(pvt, range);
1933 
1934 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1935 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1936 
1937 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1938 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
1939 
1940 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1941 		 range, sys_addr, get_dram_limit(pvt, range));
1942 
1943 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
1944 	    !(get_dram_limit(pvt, range) >= sys_addr))
1945 		return -EINVAL;
1946 
1947 	if (dhar_valid(pvt) &&
1948 	    dhar_base(pvt) <= sys_addr &&
1949 	    sys_addr < BIT_64(32)) {
1950 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1951 			    sys_addr);
1952 		return -EINVAL;
1953 	}
1954 
1955 	/* Verify sys_addr is within DCT Range. */
1956 	dct_base = (u64) dct_sel_baseaddr(pvt);
1957 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1958 
1959 	if (!(dct_cont_base_reg & BIT(0)) &&
1960 	    !(dct_base <= (sys_addr >> 27) &&
1961 	      dct_limit >= (sys_addr >> 27)))
1962 		return -EINVAL;
1963 
1964 	/* Verify number of dct's that participate in channel interleaving. */
1965 	num_dcts_intlv = (int) hweight8(intlv_en);
1966 
1967 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1968 		return -EINVAL;
1969 
1970 	if (pvt->model >= 0x60)
1971 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
1972 	else
1973 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1974 						     num_dcts_intlv, dct_sel);
1975 
1976 	/* Verify we stay within the MAX number of channels allowed */
1977 	if (channel > 3)
1978 		return -EINVAL;
1979 
1980 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1981 
1982 	/* Get normalized DCT addr */
1983 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1984 		chan_offset = dhar_offset;
1985 	else
1986 		chan_offset = dct_base << 27;
1987 
1988 	chan_addr = sys_addr - chan_offset;
1989 
1990 	/* remove channel interleave */
1991 	if (num_dcts_intlv == 2) {
1992 		if (intlv_addr == 0x4)
1993 			chan_addr = ((chan_addr >> 9) << 8) |
1994 						(chan_addr & 0xff);
1995 		else if (intlv_addr == 0x5)
1996 			chan_addr = ((chan_addr >> 10) << 9) |
1997 						(chan_addr & 0x1ff);
1998 		else
1999 			return -EINVAL;
2000 
2001 	} else if (num_dcts_intlv == 4) {
2002 		if (intlv_addr == 0x4)
2003 			chan_addr = ((chan_addr >> 10) << 8) |
2004 							(chan_addr & 0xff);
2005 		else if (intlv_addr == 0x5)
2006 			chan_addr = ((chan_addr >> 11) << 9) |
2007 							(chan_addr & 0x1ff);
2008 		else
2009 			return -EINVAL;
2010 	}
2011 
2012 	if (dct_offset_en) {
2013 		amd64_read_pci_cfg(pvt->F1,
2014 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2015 				   &tmp);
2016 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2017 	}
2018 
2019 	f15h_select_dct(pvt, channel);
2020 
2021 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2022 
2023 	/*
2024 	 * Find Chip select:
2025 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2026 	 * there is support for 4 DCT's, but only 2 are currently functional.
2027 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2028 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2029 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2030 	 */
2031 	alias_channel =  (channel == 3) ? 1 : channel;
2032 
2033 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2034 
2035 	if (cs_found >= 0)
2036 		*chan_sel = alias_channel;
2037 
2038 	return cs_found;
2039 }
2040 
2041 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2042 					u64 sys_addr,
2043 					int *chan_sel)
2044 {
2045 	int cs_found = -EINVAL;
2046 	unsigned range;
2047 
2048 	for (range = 0; range < DRAM_RANGES; range++) {
2049 		if (!dram_rw(pvt, range))
2050 			continue;
2051 
2052 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2053 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2054 							       sys_addr,
2055 							       chan_sel);
2056 
2057 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2058 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2059 			cs_found = f1x_match_to_this_node(pvt, range,
2060 							  sys_addr, chan_sel);
2061 			if (cs_found >= 0)
2062 				break;
2063 		}
2064 	}
2065 	return cs_found;
2066 }
2067 
2068 /*
2069  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2070  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2071  *
2072  * The @sys_addr is usually an error address received from the hardware
2073  * (MCX_ADDR).
2074  */
2075 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2076 				     struct err_info *err)
2077 {
2078 	struct amd64_pvt *pvt = mci->pvt_info;
2079 
2080 	error_address_to_page_and_offset(sys_addr, err);
2081 
2082 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2083 	if (err->csrow < 0) {
2084 		err->err_code = ERR_CSROW;
2085 		return;
2086 	}
2087 
2088 	/*
2089 	 * We need the syndromes for channel detection only when we're
2090 	 * ganged. Otherwise @chan should already contain the channel at
2091 	 * this point.
2092 	 */
2093 	if (dct_ganging_enabled(pvt))
2094 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2095 }
2096 
2097 /*
2098  * debug routine to display the memory sizes of all logical DIMMs and its
2099  * CSROWs
2100  */
2101 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2102 {
2103 	int dimm, size0, size1;
2104 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2105 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
2106 
2107 	if (pvt->fam == 0xf) {
2108 		/* K8 families < revF not supported yet */
2109 	       if (pvt->ext_model < K8_REV_F)
2110 			return;
2111 	       else
2112 		       WARN_ON(ctrl != 0);
2113 	}
2114 
2115 	if (pvt->fam == 0x10) {
2116 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2117 							   : pvt->dbam0;
2118 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2119 				 pvt->csels[1].csbases :
2120 				 pvt->csels[0].csbases;
2121 	} else if (ctrl) {
2122 		dbam = pvt->dbam0;
2123 		dcsb = pvt->csels[1].csbases;
2124 	}
2125 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2126 		 ctrl, dbam);
2127 
2128 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2129 
2130 	/* Dump memory sizes for DIMM and its CSROWs */
2131 	for (dimm = 0; dimm < 4; dimm++) {
2132 
2133 		size0 = 0;
2134 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2135 			/*
2136 			 * For F15m60h, we need multiplier for LRDIMM cs_size
2137 			 * calculation. We pass dimm value to the dbam_to_cs
2138 			 * mapper so we can find the multiplier from the
2139 			 * corresponding DCSM.
2140 			 */
2141 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2142 						     DBAM_DIMM(dimm, dbam),
2143 						     dimm);
2144 
2145 		size1 = 0;
2146 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2147 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2148 						     DBAM_DIMM(dimm, dbam),
2149 						     dimm);
2150 
2151 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2152 				dimm * 2,     size0,
2153 				dimm * 2 + 1, size1);
2154 	}
2155 }
2156 
2157 static struct amd64_family_type family_types[] = {
2158 	[K8_CPUS] = {
2159 		.ctl_name = "K8",
2160 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2161 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2162 		.ops = {
2163 			.early_channel_count	= k8_early_channel_count,
2164 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
2165 			.dbam_to_cs		= k8_dbam_to_chip_select,
2166 		}
2167 	},
2168 	[F10_CPUS] = {
2169 		.ctl_name = "F10h",
2170 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2171 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2172 		.ops = {
2173 			.early_channel_count	= f1x_early_channel_count,
2174 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2175 			.dbam_to_cs		= f10_dbam_to_chip_select,
2176 		}
2177 	},
2178 	[F15_CPUS] = {
2179 		.ctl_name = "F15h",
2180 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2181 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2182 		.ops = {
2183 			.early_channel_count	= f1x_early_channel_count,
2184 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2185 			.dbam_to_cs		= f15_dbam_to_chip_select,
2186 		}
2187 	},
2188 	[F15_M30H_CPUS] = {
2189 		.ctl_name = "F15h_M30h",
2190 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2191 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2192 		.ops = {
2193 			.early_channel_count	= f1x_early_channel_count,
2194 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2195 			.dbam_to_cs		= f16_dbam_to_chip_select,
2196 		}
2197 	},
2198 	[F15_M60H_CPUS] = {
2199 		.ctl_name = "F15h_M60h",
2200 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2201 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2202 		.ops = {
2203 			.early_channel_count	= f1x_early_channel_count,
2204 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2205 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
2206 		}
2207 	},
2208 	[F16_CPUS] = {
2209 		.ctl_name = "F16h",
2210 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2211 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2212 		.ops = {
2213 			.early_channel_count	= f1x_early_channel_count,
2214 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2215 			.dbam_to_cs		= f16_dbam_to_chip_select,
2216 		}
2217 	},
2218 	[F16_M30H_CPUS] = {
2219 		.ctl_name = "F16h_M30h",
2220 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2221 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2222 		.ops = {
2223 			.early_channel_count	= f1x_early_channel_count,
2224 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2225 			.dbam_to_cs		= f16_dbam_to_chip_select,
2226 		}
2227 	},
2228 	[F17_CPUS] = {
2229 		.ctl_name = "F17h",
2230 		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2231 		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2232 		.ops = {
2233 			.early_channel_count	= f17_early_channel_count,
2234 			.dbam_to_cs		= f17_base_addr_to_cs_size,
2235 		}
2236 	},
2237 	[F17_M10H_CPUS] = {
2238 		.ctl_name = "F17h_M10h",
2239 		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2240 		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2241 		.ops = {
2242 			.early_channel_count	= f17_early_channel_count,
2243 			.dbam_to_cs		= f17_base_addr_to_cs_size,
2244 		}
2245 	},
2246 	[F17_M30H_CPUS] = {
2247 		.ctl_name = "F17h_M30h",
2248 		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2249 		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2250 		.ops = {
2251 			.early_channel_count	= f17_early_channel_count,
2252 			.dbam_to_cs		= f17_base_addr_to_cs_size,
2253 		}
2254 	},
2255 };
2256 
2257 /*
2258  * These are tables of eigenvectors (one per line) which can be used for the
2259  * construction of the syndrome tables. The modified syndrome search algorithm
2260  * uses those to find the symbol in error and thus the DIMM.
2261  *
2262  * Algorithm courtesy of Ross LaFetra from AMD.
2263  */
2264 static const u16 x4_vectors[] = {
2265 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2266 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2267 	0x0001, 0x0002, 0x0004, 0x0008,
2268 	0x1013, 0x3032, 0x4044, 0x8088,
2269 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2270 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2271 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2272 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2273 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2274 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2275 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2276 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2277 	0x2b87, 0x164e, 0x642c, 0xdc18,
2278 	0x40b9, 0x80de, 0x1094, 0x20e8,
2279 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2280 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2281 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2282 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2283 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2284 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2285 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2286 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2287 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2288 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2289 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2290 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2291 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2292 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2293 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2294 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2295 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2296 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2297 	0x4807, 0xc40e, 0x130c, 0x3208,
2298 	0x1905, 0x2e0a, 0x5804, 0xac08,
2299 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2300 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2301 };
2302 
2303 static const u16 x8_vectors[] = {
2304 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2305 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2306 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2307 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2308 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2309 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2310 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2311 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2312 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2313 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2314 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2315 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2316 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2317 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2318 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2319 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2320 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2321 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2322 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2323 };
2324 
2325 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2326 			   unsigned v_dim)
2327 {
2328 	unsigned int i, err_sym;
2329 
2330 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2331 		u16 s = syndrome;
2332 		unsigned v_idx =  err_sym * v_dim;
2333 		unsigned v_end = (err_sym + 1) * v_dim;
2334 
2335 		/* walk over all 16 bits of the syndrome */
2336 		for (i = 1; i < (1U << 16); i <<= 1) {
2337 
2338 			/* if bit is set in that eigenvector... */
2339 			if (v_idx < v_end && vectors[v_idx] & i) {
2340 				u16 ev_comp = vectors[v_idx++];
2341 
2342 				/* ... and bit set in the modified syndrome, */
2343 				if (s & i) {
2344 					/* remove it. */
2345 					s ^= ev_comp;
2346 
2347 					if (!s)
2348 						return err_sym;
2349 				}
2350 
2351 			} else if (s & i)
2352 				/* can't get to zero, move to next symbol */
2353 				break;
2354 		}
2355 	}
2356 
2357 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2358 	return -1;
2359 }
2360 
2361 static int map_err_sym_to_channel(int err_sym, int sym_size)
2362 {
2363 	if (sym_size == 4)
2364 		switch (err_sym) {
2365 		case 0x20:
2366 		case 0x21:
2367 			return 0;
2368 			break;
2369 		case 0x22:
2370 		case 0x23:
2371 			return 1;
2372 			break;
2373 		default:
2374 			return err_sym >> 4;
2375 			break;
2376 		}
2377 	/* x8 symbols */
2378 	else
2379 		switch (err_sym) {
2380 		/* imaginary bits not in a DIMM */
2381 		case 0x10:
2382 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2383 					  err_sym);
2384 			return -1;
2385 			break;
2386 
2387 		case 0x11:
2388 			return 0;
2389 			break;
2390 		case 0x12:
2391 			return 1;
2392 			break;
2393 		default:
2394 			return err_sym >> 3;
2395 			break;
2396 		}
2397 	return -1;
2398 }
2399 
2400 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2401 {
2402 	struct amd64_pvt *pvt = mci->pvt_info;
2403 	int err_sym = -1;
2404 
2405 	if (pvt->ecc_sym_sz == 8)
2406 		err_sym = decode_syndrome(syndrome, x8_vectors,
2407 					  ARRAY_SIZE(x8_vectors),
2408 					  pvt->ecc_sym_sz);
2409 	else if (pvt->ecc_sym_sz == 4)
2410 		err_sym = decode_syndrome(syndrome, x4_vectors,
2411 					  ARRAY_SIZE(x4_vectors),
2412 					  pvt->ecc_sym_sz);
2413 	else {
2414 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2415 		return err_sym;
2416 	}
2417 
2418 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2419 }
2420 
2421 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2422 			    u8 ecc_type)
2423 {
2424 	enum hw_event_mc_err_type err_type;
2425 	const char *string;
2426 
2427 	if (ecc_type == 2)
2428 		err_type = HW_EVENT_ERR_CORRECTED;
2429 	else if (ecc_type == 1)
2430 		err_type = HW_EVENT_ERR_UNCORRECTED;
2431 	else if (ecc_type == 3)
2432 		err_type = HW_EVENT_ERR_DEFERRED;
2433 	else {
2434 		WARN(1, "Something is rotten in the state of Denmark.\n");
2435 		return;
2436 	}
2437 
2438 	switch (err->err_code) {
2439 	case DECODE_OK:
2440 		string = "";
2441 		break;
2442 	case ERR_NODE:
2443 		string = "Failed to map error addr to a node";
2444 		break;
2445 	case ERR_CSROW:
2446 		string = "Failed to map error addr to a csrow";
2447 		break;
2448 	case ERR_CHANNEL:
2449 		string = "Unknown syndrome - possible error reporting race";
2450 		break;
2451 	case ERR_SYND:
2452 		string = "MCA_SYND not valid - unknown syndrome and csrow";
2453 		break;
2454 	case ERR_NORM_ADDR:
2455 		string = "Cannot decode normalized address";
2456 		break;
2457 	default:
2458 		string = "WTF error";
2459 		break;
2460 	}
2461 
2462 	edac_mc_handle_error(err_type, mci, 1,
2463 			     err->page, err->offset, err->syndrome,
2464 			     err->csrow, err->channel, -1,
2465 			     string, "");
2466 }
2467 
2468 static inline void decode_bus_error(int node_id, struct mce *m)
2469 {
2470 	struct mem_ctl_info *mci;
2471 	struct amd64_pvt *pvt;
2472 	u8 ecc_type = (m->status >> 45) & 0x3;
2473 	u8 xec = XEC(m->status, 0x1f);
2474 	u16 ec = EC(m->status);
2475 	u64 sys_addr;
2476 	struct err_info err;
2477 
2478 	mci = edac_mc_find(node_id);
2479 	if (!mci)
2480 		return;
2481 
2482 	pvt = mci->pvt_info;
2483 
2484 	/* Bail out early if this was an 'observed' error */
2485 	if (PP(ec) == NBSL_PP_OBS)
2486 		return;
2487 
2488 	/* Do only ECC errors */
2489 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2490 		return;
2491 
2492 	memset(&err, 0, sizeof(err));
2493 
2494 	sys_addr = get_error_address(pvt, m);
2495 
2496 	if (ecc_type == 2)
2497 		err.syndrome = extract_syndrome(m->status);
2498 
2499 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2500 
2501 	__log_ecc_error(mci, &err, ecc_type);
2502 }
2503 
2504 /*
2505  * To find the UMC channel represented by this bank we need to match on its
2506  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2507  * IPID.
2508  *
2509  * Currently, we can derive the channel number by looking at the 6th nibble in
2510  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2511  * number.
2512  */
2513 static int find_umc_channel(struct mce *m)
2514 {
2515 	return (m->ipid & GENMASK(31, 0)) >> 20;
2516 }
2517 
2518 static void decode_umc_error(int node_id, struct mce *m)
2519 {
2520 	u8 ecc_type = (m->status >> 45) & 0x3;
2521 	struct mem_ctl_info *mci;
2522 	struct amd64_pvt *pvt;
2523 	struct err_info err;
2524 	u64 sys_addr;
2525 
2526 	mci = edac_mc_find(node_id);
2527 	if (!mci)
2528 		return;
2529 
2530 	pvt = mci->pvt_info;
2531 
2532 	memset(&err, 0, sizeof(err));
2533 
2534 	if (m->status & MCI_STATUS_DEFERRED)
2535 		ecc_type = 3;
2536 
2537 	err.channel = find_umc_channel(m);
2538 
2539 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2540 		err.err_code = ERR_NORM_ADDR;
2541 		goto log_error;
2542 	}
2543 
2544 	error_address_to_page_and_offset(sys_addr, &err);
2545 
2546 	if (!(m->status & MCI_STATUS_SYNDV)) {
2547 		err.err_code = ERR_SYND;
2548 		goto log_error;
2549 	}
2550 
2551 	if (ecc_type == 2) {
2552 		u8 length = (m->synd >> 18) & 0x3f;
2553 
2554 		if (length)
2555 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2556 		else
2557 			err.err_code = ERR_CHANNEL;
2558 	}
2559 
2560 	err.csrow = m->synd & 0x7;
2561 
2562 log_error:
2563 	__log_ecc_error(mci, &err, ecc_type);
2564 }
2565 
2566 /*
2567  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2568  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2569  * Reserve F0 and F6 on systems with a UMC.
2570  */
2571 static int
2572 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2573 {
2574 	if (pvt->umc) {
2575 		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2576 		if (!pvt->F0) {
2577 			amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2578 			return -ENODEV;
2579 		}
2580 
2581 		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2582 		if (!pvt->F6) {
2583 			pci_dev_put(pvt->F0);
2584 			pvt->F0 = NULL;
2585 
2586 			amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2587 			return -ENODEV;
2588 		}
2589 
2590 		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2591 		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2592 		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2593 
2594 		return 0;
2595 	}
2596 
2597 	/* Reserve the ADDRESS MAP Device */
2598 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2599 	if (!pvt->F1) {
2600 		amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2601 		return -ENODEV;
2602 	}
2603 
2604 	/* Reserve the DCT Device */
2605 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2606 	if (!pvt->F2) {
2607 		pci_dev_put(pvt->F1);
2608 		pvt->F1 = NULL;
2609 
2610 		amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2611 		return -ENODEV;
2612 	}
2613 
2614 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2615 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2616 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2617 
2618 	return 0;
2619 }
2620 
2621 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2622 {
2623 	if (pvt->umc) {
2624 		pci_dev_put(pvt->F0);
2625 		pci_dev_put(pvt->F6);
2626 	} else {
2627 		pci_dev_put(pvt->F1);
2628 		pci_dev_put(pvt->F2);
2629 	}
2630 }
2631 
2632 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2633 {
2634 	pvt->ecc_sym_sz = 4;
2635 
2636 	if (pvt->umc) {
2637 		u8 i;
2638 
2639 		for_each_umc(i) {
2640 			/* Check enabled channels only: */
2641 			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2642 				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2643 					pvt->ecc_sym_sz = 16;
2644 					return;
2645 				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2646 					pvt->ecc_sym_sz = 8;
2647 					return;
2648 				}
2649 			}
2650 		}
2651 	} else if (pvt->fam >= 0x10) {
2652 		u32 tmp;
2653 
2654 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2655 		/* F16h has only DCT0, so no need to read dbam1. */
2656 		if (pvt->fam != 0x16)
2657 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2658 
2659 		/* F10h, revD and later can do x8 ECC too. */
2660 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2661 			pvt->ecc_sym_sz = 8;
2662 	}
2663 }
2664 
2665 /*
2666  * Retrieve the hardware registers of the memory controller.
2667  */
2668 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2669 {
2670 	u8 nid = pvt->mc_node_id;
2671 	struct amd64_umc *umc;
2672 	u32 i, umc_base;
2673 
2674 	/* Read registers from each UMC */
2675 	for_each_umc(i) {
2676 
2677 		umc_base = get_umc_base(i);
2678 		umc = &pvt->umc[i];
2679 
2680 		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2681 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2682 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2683 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2684 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2685 	}
2686 }
2687 
2688 /*
2689  * Retrieve the hardware registers of the memory controller (this includes the
2690  * 'Address Map' and 'Misc' device regs)
2691  */
2692 static void read_mc_regs(struct amd64_pvt *pvt)
2693 {
2694 	unsigned int range;
2695 	u64 msr_val;
2696 
2697 	/*
2698 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2699 	 * those are Read-As-Zero.
2700 	 */
2701 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2702 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2703 
2704 	/* Check first whether TOP_MEM2 is enabled: */
2705 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2706 	if (msr_val & BIT(21)) {
2707 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2708 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2709 	} else {
2710 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2711 	}
2712 
2713 	if (pvt->umc) {
2714 		__read_mc_regs_df(pvt);
2715 		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2716 
2717 		goto skip;
2718 	}
2719 
2720 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2721 
2722 	read_dram_ctl_register(pvt);
2723 
2724 	for (range = 0; range < DRAM_RANGES; range++) {
2725 		u8 rw;
2726 
2727 		/* read settings for this DRAM range */
2728 		read_dram_base_limit_regs(pvt, range);
2729 
2730 		rw = dram_rw(pvt, range);
2731 		if (!rw)
2732 			continue;
2733 
2734 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2735 			 range,
2736 			 get_dram_base(pvt, range),
2737 			 get_dram_limit(pvt, range));
2738 
2739 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2740 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2741 			 (rw & 0x1) ? "R" : "-",
2742 			 (rw & 0x2) ? "W" : "-",
2743 			 dram_intlv_sel(pvt, range),
2744 			 dram_dst_node(pvt, range));
2745 	}
2746 
2747 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2748 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2749 
2750 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2751 
2752 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2753 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2754 
2755 	if (!dct_ganging_enabled(pvt)) {
2756 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2757 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2758 	}
2759 
2760 skip:
2761 	read_dct_base_mask(pvt);
2762 
2763 	determine_memory_type(pvt);
2764 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2765 
2766 	determine_ecc_sym_sz(pvt);
2767 
2768 	dump_misc_regs(pvt);
2769 }
2770 
2771 /*
2772  * NOTE: CPU Revision Dependent code
2773  *
2774  * Input:
2775  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2776  *	k8 private pointer to -->
2777  *			DRAM Bank Address mapping register
2778  *			node_id
2779  *			DCL register where dual_channel_active is
2780  *
2781  * The DBAM register consists of 4 sets of 4 bits each definitions:
2782  *
2783  * Bits:	CSROWs
2784  * 0-3		CSROWs 0 and 1
2785  * 4-7		CSROWs 2 and 3
2786  * 8-11		CSROWs 4 and 5
2787  * 12-15	CSROWs 6 and 7
2788  *
2789  * Values range from: 0 to 15
2790  * The meaning of the values depends on CPU revision and dual-channel state,
2791  * see relevant BKDG more info.
2792  *
2793  * The memory controller provides for total of only 8 CSROWs in its current
2794  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2795  * single channel or two (2) DIMMs in dual channel mode.
2796  *
2797  * The following code logic collapses the various tables for CSROW based on CPU
2798  * revision.
2799  *
2800  * Returns:
2801  *	The number of PAGE_SIZE pages on the specified CSROW number it
2802  *	encompasses
2803  *
2804  */
2805 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2806 {
2807 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2808 	int csrow_nr = csrow_nr_orig;
2809 	u32 cs_mode, nr_pages;
2810 
2811 	if (!pvt->umc)
2812 		csrow_nr >>= 1;
2813 
2814 	cs_mode = DBAM_DIMM(csrow_nr, dbam);
2815 
2816 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2817 	nr_pages <<= 20 - PAGE_SHIFT;
2818 
2819 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2820 		    csrow_nr_orig, dct,  cs_mode);
2821 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2822 
2823 	return nr_pages;
2824 }
2825 
2826 /*
2827  * Initialize the array of csrow attribute instances, based on the values
2828  * from pci config hardware registers.
2829  */
2830 static int init_csrows(struct mem_ctl_info *mci)
2831 {
2832 	struct amd64_pvt *pvt = mci->pvt_info;
2833 	enum edac_type edac_mode = EDAC_NONE;
2834 	struct csrow_info *csrow;
2835 	struct dimm_info *dimm;
2836 	int i, j, empty = 1;
2837 	int nr_pages = 0;
2838 	u32 val;
2839 
2840 	if (!pvt->umc) {
2841 		amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2842 
2843 		pvt->nbcfg = val;
2844 
2845 		edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2846 			 pvt->mc_node_id, val,
2847 			 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2848 	}
2849 
2850 	/*
2851 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2852 	 */
2853 	for_each_chip_select(i, 0, pvt) {
2854 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2855 		bool row_dct1 = false;
2856 
2857 		if (pvt->fam != 0xf)
2858 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2859 
2860 		if (!row_dct0 && !row_dct1)
2861 			continue;
2862 
2863 		csrow = mci->csrows[i];
2864 		empty = 0;
2865 
2866 		edac_dbg(1, "MC node: %d, csrow: %d\n",
2867 			    pvt->mc_node_id, i);
2868 
2869 		if (row_dct0) {
2870 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
2871 			csrow->channels[0]->dimm->nr_pages = nr_pages;
2872 		}
2873 
2874 		/* K8 has only one DCT */
2875 		if (pvt->fam != 0xf && row_dct1) {
2876 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2877 
2878 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2879 			nr_pages += row_dct1_pages;
2880 		}
2881 
2882 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2883 
2884 		/* Determine DIMM ECC mode: */
2885 		if (pvt->umc) {
2886 			if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
2887 				edac_mode = EDAC_S4ECD4ED;
2888 			else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
2889 				edac_mode = EDAC_SECDED;
2890 
2891 		} else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
2892 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
2893 					? EDAC_S4ECD4ED
2894 					: EDAC_SECDED;
2895 		}
2896 
2897 		for (j = 0; j < pvt->channel_count; j++) {
2898 			dimm = csrow->channels[j]->dimm;
2899 			dimm->mtype = pvt->dram_type;
2900 			dimm->edac_mode = edac_mode;
2901 		}
2902 	}
2903 
2904 	return empty;
2905 }
2906 
2907 /* get all cores on this DCT */
2908 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2909 {
2910 	int cpu;
2911 
2912 	for_each_online_cpu(cpu)
2913 		if (amd_get_nb_id(cpu) == nid)
2914 			cpumask_set_cpu(cpu, mask);
2915 }
2916 
2917 /* check MCG_CTL on all the cpus on this node */
2918 static bool nb_mce_bank_enabled_on_node(u16 nid)
2919 {
2920 	cpumask_var_t mask;
2921 	int cpu, nbe;
2922 	bool ret = false;
2923 
2924 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2925 		amd64_warn("%s: Error allocating mask\n", __func__);
2926 		return false;
2927 	}
2928 
2929 	get_cpus_on_this_dct_cpumask(mask, nid);
2930 
2931 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2932 
2933 	for_each_cpu(cpu, mask) {
2934 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2935 		nbe = reg->l & MSR_MCGCTL_NBE;
2936 
2937 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2938 			 cpu, reg->q,
2939 			 (nbe ? "enabled" : "disabled"));
2940 
2941 		if (!nbe)
2942 			goto out;
2943 	}
2944 	ret = true;
2945 
2946 out:
2947 	free_cpumask_var(mask);
2948 	return ret;
2949 }
2950 
2951 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2952 {
2953 	cpumask_var_t cmask;
2954 	int cpu;
2955 
2956 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2957 		amd64_warn("%s: error allocating mask\n", __func__);
2958 		return -ENOMEM;
2959 	}
2960 
2961 	get_cpus_on_this_dct_cpumask(cmask, nid);
2962 
2963 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2964 
2965 	for_each_cpu(cpu, cmask) {
2966 
2967 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2968 
2969 		if (on) {
2970 			if (reg->l & MSR_MCGCTL_NBE)
2971 				s->flags.nb_mce_enable = 1;
2972 
2973 			reg->l |= MSR_MCGCTL_NBE;
2974 		} else {
2975 			/*
2976 			 * Turn off NB MCE reporting only when it was off before
2977 			 */
2978 			if (!s->flags.nb_mce_enable)
2979 				reg->l &= ~MSR_MCGCTL_NBE;
2980 		}
2981 	}
2982 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2983 
2984 	free_cpumask_var(cmask);
2985 
2986 	return 0;
2987 }
2988 
2989 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2990 				       struct pci_dev *F3)
2991 {
2992 	bool ret = true;
2993 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2994 
2995 	if (toggle_ecc_err_reporting(s, nid, ON)) {
2996 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2997 		return false;
2998 	}
2999 
3000 	amd64_read_pci_cfg(F3, NBCTL, &value);
3001 
3002 	s->old_nbctl   = value & mask;
3003 	s->nbctl_valid = true;
3004 
3005 	value |= mask;
3006 	amd64_write_pci_cfg(F3, NBCTL, value);
3007 
3008 	amd64_read_pci_cfg(F3, NBCFG, &value);
3009 
3010 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3011 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3012 
3013 	if (!(value & NBCFG_ECC_ENABLE)) {
3014 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3015 
3016 		s->flags.nb_ecc_prev = 0;
3017 
3018 		/* Attempt to turn on DRAM ECC Enable */
3019 		value |= NBCFG_ECC_ENABLE;
3020 		amd64_write_pci_cfg(F3, NBCFG, value);
3021 
3022 		amd64_read_pci_cfg(F3, NBCFG, &value);
3023 
3024 		if (!(value & NBCFG_ECC_ENABLE)) {
3025 			amd64_warn("Hardware rejected DRAM ECC enable,"
3026 				   "check memory DIMM configuration.\n");
3027 			ret = false;
3028 		} else {
3029 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3030 		}
3031 	} else {
3032 		s->flags.nb_ecc_prev = 1;
3033 	}
3034 
3035 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3036 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3037 
3038 	return ret;
3039 }
3040 
3041 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3042 					struct pci_dev *F3)
3043 {
3044 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3045 
3046 	if (!s->nbctl_valid)
3047 		return;
3048 
3049 	amd64_read_pci_cfg(F3, NBCTL, &value);
3050 	value &= ~mask;
3051 	value |= s->old_nbctl;
3052 
3053 	amd64_write_pci_cfg(F3, NBCTL, value);
3054 
3055 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3056 	if (!s->flags.nb_ecc_prev) {
3057 		amd64_read_pci_cfg(F3, NBCFG, &value);
3058 		value &= ~NBCFG_ECC_ENABLE;
3059 		amd64_write_pci_cfg(F3, NBCFG, value);
3060 	}
3061 
3062 	/* restore the NB Enable MCGCTL bit */
3063 	if (toggle_ecc_err_reporting(s, nid, OFF))
3064 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3065 }
3066 
3067 /*
3068  * EDAC requires that the BIOS have ECC enabled before
3069  * taking over the processing of ECC errors. A command line
3070  * option allows to force-enable hardware ECC later in
3071  * enable_ecc_error_reporting().
3072  */
3073 static const char *ecc_msg =
3074 	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3075 	" Either enable ECC checking or force module loading by setting "
3076 	"'ecc_enable_override'.\n"
3077 	" (Note that use of the override may cause unknown side effects.)\n";
3078 
3079 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3080 {
3081 	bool nb_mce_en = false;
3082 	u8 ecc_en = 0, i;
3083 	u32 value;
3084 
3085 	if (boot_cpu_data.x86 >= 0x17) {
3086 		u8 umc_en_mask = 0, ecc_en_mask = 0;
3087 
3088 		for_each_umc(i) {
3089 			u32 base = get_umc_base(i);
3090 
3091 			/* Only check enabled UMCs. */
3092 			if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3093 				continue;
3094 
3095 			if (!(value & UMC_SDP_INIT))
3096 				continue;
3097 
3098 			umc_en_mask |= BIT(i);
3099 
3100 			if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3101 				continue;
3102 
3103 			if (value & UMC_ECC_ENABLED)
3104 				ecc_en_mask |= BIT(i);
3105 		}
3106 
3107 		/* Check whether at least one UMC is enabled: */
3108 		if (umc_en_mask)
3109 			ecc_en = umc_en_mask == ecc_en_mask;
3110 		else
3111 			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3112 
3113 		/* Assume UMC MCA banks are enabled. */
3114 		nb_mce_en = true;
3115 	} else {
3116 		amd64_read_pci_cfg(F3, NBCFG, &value);
3117 
3118 		ecc_en = !!(value & NBCFG_ECC_ENABLE);
3119 
3120 		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3121 		if (!nb_mce_en)
3122 			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3123 				     MSR_IA32_MCG_CTL, nid);
3124 	}
3125 
3126 	amd64_info("Node %d: DRAM ECC %s.\n",
3127 		   nid, (ecc_en ? "enabled" : "disabled"));
3128 
3129 	if (!ecc_en || !nb_mce_en) {
3130 		amd64_info("%s", ecc_msg);
3131 		return false;
3132 	}
3133 	return true;
3134 }
3135 
3136 static inline void
3137 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3138 {
3139 	u8 i, ecc_en = 1, cpk_en = 1;
3140 
3141 	for_each_umc(i) {
3142 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3143 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3144 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3145 		}
3146 	}
3147 
3148 	/* Set chipkill only if ECC is enabled: */
3149 	if (ecc_en) {
3150 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3151 
3152 		if (cpk_en)
3153 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3154 	}
3155 }
3156 
3157 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3158 				 struct amd64_family_type *fam)
3159 {
3160 	struct amd64_pvt *pvt = mci->pvt_info;
3161 
3162 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3163 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3164 
3165 	if (pvt->umc) {
3166 		f17h_determine_edac_ctl_cap(mci, pvt);
3167 	} else {
3168 		if (pvt->nbcap & NBCAP_SECDED)
3169 			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3170 
3171 		if (pvt->nbcap & NBCAP_CHIPKILL)
3172 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3173 	}
3174 
3175 	mci->edac_cap		= determine_edac_cap(pvt);
3176 	mci->mod_name		= EDAC_MOD_STR;
3177 	mci->ctl_name		= fam->ctl_name;
3178 	mci->dev_name		= pci_name(pvt->F3);
3179 	mci->ctl_page_to_phys	= NULL;
3180 
3181 	/* memory scrubber interface */
3182 	mci->set_sdram_scrub_rate = set_scrub_rate;
3183 	mci->get_sdram_scrub_rate = get_scrub_rate;
3184 }
3185 
3186 /*
3187  * returns a pointer to the family descriptor on success, NULL otherwise.
3188  */
3189 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3190 {
3191 	struct amd64_family_type *fam_type = NULL;
3192 
3193 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3194 	pvt->stepping	= boot_cpu_data.x86_stepping;
3195 	pvt->model	= boot_cpu_data.x86_model;
3196 	pvt->fam	= boot_cpu_data.x86;
3197 
3198 	switch (pvt->fam) {
3199 	case 0xf:
3200 		fam_type	= &family_types[K8_CPUS];
3201 		pvt->ops	= &family_types[K8_CPUS].ops;
3202 		break;
3203 
3204 	case 0x10:
3205 		fam_type	= &family_types[F10_CPUS];
3206 		pvt->ops	= &family_types[F10_CPUS].ops;
3207 		break;
3208 
3209 	case 0x15:
3210 		if (pvt->model == 0x30) {
3211 			fam_type = &family_types[F15_M30H_CPUS];
3212 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
3213 			break;
3214 		} else if (pvt->model == 0x60) {
3215 			fam_type = &family_types[F15_M60H_CPUS];
3216 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
3217 			break;
3218 		}
3219 
3220 		fam_type	= &family_types[F15_CPUS];
3221 		pvt->ops	= &family_types[F15_CPUS].ops;
3222 		break;
3223 
3224 	case 0x16:
3225 		if (pvt->model == 0x30) {
3226 			fam_type = &family_types[F16_M30H_CPUS];
3227 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
3228 			break;
3229 		}
3230 		fam_type	= &family_types[F16_CPUS];
3231 		pvt->ops	= &family_types[F16_CPUS].ops;
3232 		break;
3233 
3234 	case 0x17:
3235 		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3236 			fam_type = &family_types[F17_M10H_CPUS];
3237 			pvt->ops = &family_types[F17_M10H_CPUS].ops;
3238 			break;
3239 		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3240 			fam_type = &family_types[F17_M30H_CPUS];
3241 			pvt->ops = &family_types[F17_M30H_CPUS].ops;
3242 			break;
3243 		}
3244 		/* fall through */
3245 	case 0x18:
3246 		fam_type	= &family_types[F17_CPUS];
3247 		pvt->ops	= &family_types[F17_CPUS].ops;
3248 
3249 		if (pvt->fam == 0x18)
3250 			family_types[F17_CPUS].ctl_name = "F18h";
3251 		break;
3252 
3253 	default:
3254 		amd64_err("Unsupported family!\n");
3255 		return NULL;
3256 	}
3257 
3258 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3259 		     (pvt->fam == 0xf ?
3260 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
3261 							     : "revE or earlier ")
3262 				 : ""), pvt->mc_node_id);
3263 	return fam_type;
3264 }
3265 
3266 static const struct attribute_group *amd64_edac_attr_groups[] = {
3267 #ifdef CONFIG_EDAC_DEBUG
3268 	&amd64_edac_dbg_group,
3269 #endif
3270 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3271 	&amd64_edac_inj_group,
3272 #endif
3273 	NULL
3274 };
3275 
3276 /* Set the number of Unified Memory Controllers in the system. */
3277 static void compute_num_umcs(void)
3278 {
3279 	u8 model = boot_cpu_data.x86_model;
3280 
3281 	if (boot_cpu_data.x86 < 0x17)
3282 		return;
3283 
3284 	if (model >= 0x30 && model <= 0x3f)
3285 		num_umcs = 8;
3286 	else
3287 		num_umcs = 2;
3288 
3289 	edac_dbg(1, "Number of UMCs: %x", num_umcs);
3290 }
3291 
3292 static int init_one_instance(unsigned int nid)
3293 {
3294 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3295 	struct amd64_family_type *fam_type = NULL;
3296 	struct mem_ctl_info *mci = NULL;
3297 	struct edac_mc_layer layers[2];
3298 	struct amd64_pvt *pvt = NULL;
3299 	u16 pci_id1, pci_id2;
3300 	int err = 0, ret;
3301 
3302 	ret = -ENOMEM;
3303 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3304 	if (!pvt)
3305 		goto err_ret;
3306 
3307 	pvt->mc_node_id	= nid;
3308 	pvt->F3 = F3;
3309 
3310 	ret = -EINVAL;
3311 	fam_type = per_family_init(pvt);
3312 	if (!fam_type)
3313 		goto err_free;
3314 
3315 	if (pvt->fam >= 0x17) {
3316 		pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
3317 		if (!pvt->umc) {
3318 			ret = -ENOMEM;
3319 			goto err_free;
3320 		}
3321 
3322 		pci_id1 = fam_type->f0_id;
3323 		pci_id2 = fam_type->f6_id;
3324 	} else {
3325 		pci_id1 = fam_type->f1_id;
3326 		pci_id2 = fam_type->f2_id;
3327 	}
3328 
3329 	err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3330 	if (err)
3331 		goto err_post_init;
3332 
3333 	read_mc_regs(pvt);
3334 
3335 	/*
3336 	 * We need to determine how many memory channels there are. Then use
3337 	 * that information for calculating the size of the dynamic instance
3338 	 * tables in the 'mci' structure.
3339 	 */
3340 	ret = -EINVAL;
3341 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
3342 	if (pvt->channel_count < 0)
3343 		goto err_siblings;
3344 
3345 	ret = -ENOMEM;
3346 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3347 	layers[0].size = pvt->csels[0].b_cnt;
3348 	layers[0].is_virt_csrow = true;
3349 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3350 
3351 	/*
3352 	 * Always allocate two channels since we can have setups with DIMMs on
3353 	 * only one channel. Also, this simplifies handling later for the price
3354 	 * of a couple of KBs tops.
3355 	 *
3356 	 * On Fam17h+, the number of controllers may be greater than two. So set
3357 	 * the size equal to the maximum number of UMCs.
3358 	 */
3359 	if (pvt->fam >= 0x17)
3360 		layers[1].size = num_umcs;
3361 	else
3362 		layers[1].size = 2;
3363 	layers[1].is_virt_csrow = false;
3364 
3365 	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
3366 	if (!mci)
3367 		goto err_siblings;
3368 
3369 	mci->pvt_info = pvt;
3370 	mci->pdev = &pvt->F3->dev;
3371 
3372 	setup_mci_misc_attrs(mci, fam_type);
3373 
3374 	if (init_csrows(mci))
3375 		mci->edac_cap = EDAC_FLAG_NONE;
3376 
3377 	ret = -ENODEV;
3378 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3379 		edac_dbg(1, "failed edac_mc_add_mc()\n");
3380 		goto err_add_mc;
3381 	}
3382 
3383 	return 0;
3384 
3385 err_add_mc:
3386 	edac_mc_free(mci);
3387 
3388 err_siblings:
3389 	free_mc_sibling_devs(pvt);
3390 
3391 err_post_init:
3392 	if (pvt->fam >= 0x17)
3393 		kfree(pvt->umc);
3394 
3395 err_free:
3396 	kfree(pvt);
3397 
3398 err_ret:
3399 	return ret;
3400 }
3401 
3402 static int probe_one_instance(unsigned int nid)
3403 {
3404 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3405 	struct ecc_settings *s;
3406 	int ret;
3407 
3408 	ret = -ENOMEM;
3409 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3410 	if (!s)
3411 		goto err_out;
3412 
3413 	ecc_stngs[nid] = s;
3414 
3415 	if (!ecc_enabled(F3, nid)) {
3416 		ret = 0;
3417 
3418 		if (!ecc_enable_override)
3419 			goto err_enable;
3420 
3421 		if (boot_cpu_data.x86 >= 0x17) {
3422 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3423 			goto err_enable;
3424 		} else
3425 			amd64_warn("Forcing ECC on!\n");
3426 
3427 		if (!enable_ecc_error_reporting(s, nid, F3))
3428 			goto err_enable;
3429 	}
3430 
3431 	ret = init_one_instance(nid);
3432 	if (ret < 0) {
3433 		amd64_err("Error probing instance: %d\n", nid);
3434 
3435 		if (boot_cpu_data.x86 < 0x17)
3436 			restore_ecc_error_reporting(s, nid, F3);
3437 
3438 		goto err_enable;
3439 	}
3440 
3441 	return ret;
3442 
3443 err_enable:
3444 	kfree(s);
3445 	ecc_stngs[nid] = NULL;
3446 
3447 err_out:
3448 	return ret;
3449 }
3450 
3451 static void remove_one_instance(unsigned int nid)
3452 {
3453 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3454 	struct ecc_settings *s = ecc_stngs[nid];
3455 	struct mem_ctl_info *mci;
3456 	struct amd64_pvt *pvt;
3457 
3458 	mci = find_mci_by_dev(&F3->dev);
3459 	WARN_ON(!mci);
3460 
3461 	/* Remove from EDAC CORE tracking list */
3462 	mci = edac_mc_del_mc(&F3->dev);
3463 	if (!mci)
3464 		return;
3465 
3466 	pvt = mci->pvt_info;
3467 
3468 	restore_ecc_error_reporting(s, nid, F3);
3469 
3470 	free_mc_sibling_devs(pvt);
3471 
3472 	kfree(ecc_stngs[nid]);
3473 	ecc_stngs[nid] = NULL;
3474 
3475 	/* Free the EDAC CORE resources */
3476 	mci->pvt_info = NULL;
3477 
3478 	kfree(pvt);
3479 	edac_mc_free(mci);
3480 }
3481 
3482 static void setup_pci_device(void)
3483 {
3484 	struct mem_ctl_info *mci;
3485 	struct amd64_pvt *pvt;
3486 
3487 	if (pci_ctl)
3488 		return;
3489 
3490 	mci = edac_mc_find(0);
3491 	if (!mci)
3492 		return;
3493 
3494 	pvt = mci->pvt_info;
3495 	if (pvt->umc)
3496 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3497 	else
3498 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3499 	if (!pci_ctl) {
3500 		pr_warn("%s(): Unable to create PCI control\n", __func__);
3501 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3502 	}
3503 }
3504 
3505 static const struct x86_cpu_id amd64_cpuids[] = {
3506 	{ X86_VENDOR_AMD, 0xF,	X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3507 	{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3508 	{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3509 	{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3510 	{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3511 	{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3512 	{ }
3513 };
3514 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3515 
3516 static int __init amd64_edac_init(void)
3517 {
3518 	const char *owner;
3519 	int err = -ENODEV;
3520 	int i;
3521 
3522 	owner = edac_get_owner();
3523 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3524 		return -EBUSY;
3525 
3526 	if (!x86_match_cpu(amd64_cpuids))
3527 		return -ENODEV;
3528 
3529 	if (amd_cache_northbridges() < 0)
3530 		return -ENODEV;
3531 
3532 	opstate_init();
3533 
3534 	err = -ENOMEM;
3535 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3536 	if (!ecc_stngs)
3537 		goto err_free;
3538 
3539 	msrs = msrs_alloc();
3540 	if (!msrs)
3541 		goto err_free;
3542 
3543 	compute_num_umcs();
3544 
3545 	for (i = 0; i < amd_nb_num(); i++) {
3546 		err = probe_one_instance(i);
3547 		if (err) {
3548 			/* unwind properly */
3549 			while (--i >= 0)
3550 				remove_one_instance(i);
3551 
3552 			goto err_pci;
3553 		}
3554 	}
3555 
3556 	if (!edac_has_mcs()) {
3557 		err = -ENODEV;
3558 		goto err_pci;
3559 	}
3560 
3561 	/* register stuff with EDAC MCE */
3562 	if (report_gart_errors)
3563 		amd_report_gart_errors(true);
3564 
3565 	if (boot_cpu_data.x86 >= 0x17)
3566 		amd_register_ecc_decoder(decode_umc_error);
3567 	else
3568 		amd_register_ecc_decoder(decode_bus_error);
3569 
3570 	setup_pci_device();
3571 
3572 #ifdef CONFIG_X86_32
3573 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3574 #endif
3575 
3576 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3577 
3578 	return 0;
3579 
3580 err_pci:
3581 	msrs_free(msrs);
3582 	msrs = NULL;
3583 
3584 err_free:
3585 	kfree(ecc_stngs);
3586 	ecc_stngs = NULL;
3587 
3588 	return err;
3589 }
3590 
3591 static void __exit amd64_edac_exit(void)
3592 {
3593 	int i;
3594 
3595 	if (pci_ctl)
3596 		edac_pci_release_generic_ctl(pci_ctl);
3597 
3598 	/* unregister from EDAC MCE */
3599 	amd_report_gart_errors(false);
3600 
3601 	if (boot_cpu_data.x86 >= 0x17)
3602 		amd_unregister_ecc_decoder(decode_umc_error);
3603 	else
3604 		amd_unregister_ecc_decoder(decode_bus_error);
3605 
3606 	for (i = 0; i < amd_nb_num(); i++)
3607 		remove_one_instance(i);
3608 
3609 	kfree(ecc_stngs);
3610 	ecc_stngs = NULL;
3611 
3612 	msrs_free(msrs);
3613 	msrs = NULL;
3614 }
3615 
3616 module_init(amd64_edac_init);
3617 module_exit(amd64_edac_exit);
3618 
3619 MODULE_LICENSE("GPL");
3620 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3621 		"Dave Peterson, Thayne Harbaugh");
3622 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3623 		EDAC_AMD64_VERSION);
3624 
3625 module_param(edac_op_state, int, 0444);
3626 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3627