xref: /openbmc/linux/drivers/edac/amd64_edac.c (revision 7b73a9c8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
4 
5 static struct edac_pci_ctl_info *pci_ctl;
6 
7 static int report_gart_errors;
8 module_param(report_gart_errors, int, 0644);
9 
10 /*
11  * Set by command line parameter. If BIOS has enabled the ECC, this override is
12  * cleared to prevent re-enabling the hardware by this driver.
13  */
14 static int ecc_enable_override;
15 module_param(ecc_enable_override, int, 0644);
16 
17 static struct msr __percpu *msrs;
18 
19 static struct amd64_family_type *fam_type;
20 
21 /* Per-node stuff */
22 static struct ecc_settings **ecc_stngs;
23 
24 /*
25  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
26  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
27  * or higher value'.
28  *
29  *FIXME: Produce a better mapping/linearisation.
30  */
31 static const struct scrubrate {
32        u32 scrubval;           /* bit pattern for scrub rate */
33        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
34 } scrubrates[] = {
35 	{ 0x01, 1600000000UL},
36 	{ 0x02, 800000000UL},
37 	{ 0x03, 400000000UL},
38 	{ 0x04, 200000000UL},
39 	{ 0x05, 100000000UL},
40 	{ 0x06, 50000000UL},
41 	{ 0x07, 25000000UL},
42 	{ 0x08, 12284069UL},
43 	{ 0x09, 6274509UL},
44 	{ 0x0A, 3121951UL},
45 	{ 0x0B, 1560975UL},
46 	{ 0x0C, 781440UL},
47 	{ 0x0D, 390720UL},
48 	{ 0x0E, 195300UL},
49 	{ 0x0F, 97650UL},
50 	{ 0x10, 48854UL},
51 	{ 0x11, 24427UL},
52 	{ 0x12, 12213UL},
53 	{ 0x13, 6101UL},
54 	{ 0x14, 3051UL},
55 	{ 0x15, 1523UL},
56 	{ 0x16, 761UL},
57 	{ 0x00, 0UL},        /* scrubbing off */
58 };
59 
60 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
61 			       u32 *val, const char *func)
62 {
63 	int err = 0;
64 
65 	err = pci_read_config_dword(pdev, offset, val);
66 	if (err)
67 		amd64_warn("%s: error reading F%dx%03x.\n",
68 			   func, PCI_FUNC(pdev->devfn), offset);
69 
70 	return err;
71 }
72 
73 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 				u32 val, const char *func)
75 {
76 	int err = 0;
77 
78 	err = pci_write_config_dword(pdev, offset, val);
79 	if (err)
80 		amd64_warn("%s: error writing to F%dx%03x.\n",
81 			   func, PCI_FUNC(pdev->devfn), offset);
82 
83 	return err;
84 }
85 
86 /*
87  * Select DCT to which PCI cfg accesses are routed
88  */
89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
90 {
91 	u32 reg = 0;
92 
93 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
94 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
95 	reg |= dct;
96 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
97 }
98 
99 /*
100  *
101  * Depending on the family, F2 DCT reads need special handling:
102  *
103  * K8: has a single DCT only and no address offsets >= 0x100
104  *
105  * F10h: each DCT has its own set of regs
106  *	DCT0 -> F2x040..
107  *	DCT1 -> F2x140..
108  *
109  * F16h: has only 1 DCT
110  *
111  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
112  */
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
114 					 int offset, u32 *val)
115 {
116 	switch (pvt->fam) {
117 	case 0xf:
118 		if (dct || offset >= 0x100)
119 			return -EINVAL;
120 		break;
121 
122 	case 0x10:
123 		if (dct) {
124 			/*
125 			 * Note: If ganging is enabled, barring the regs
126 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
127 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
128 			 */
129 			if (dct_ganging_enabled(pvt))
130 				return 0;
131 
132 			offset += 0x100;
133 		}
134 		break;
135 
136 	case 0x15:
137 		/*
138 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
139 		 * We should select which DCT we access using F1x10C[DctCfgSel]
140 		 */
141 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 		f15h_select_dct(pvt, dct);
143 		break;
144 
145 	case 0x16:
146 		if (dct)
147 			return -EINVAL;
148 		break;
149 
150 	default:
151 		break;
152 	}
153 	return amd64_read_pci_cfg(pvt->F2, offset, val);
154 }
155 
156 /*
157  * Memory scrubber control interface. For K8, memory scrubbing is handled by
158  * hardware and can involve L2 cache, dcache as well as the main memory. With
159  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
160  * functionality.
161  *
162  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
163  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
164  * bytes/sec for the setting.
165  *
166  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
167  * other archs, we might not have access to the caches directly.
168  */
169 
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
171 {
172 	/*
173 	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
174 	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
175 	 * as 0x0, scrubval 0x6 as 0x1, etc.
176 	 */
177 	if (scrubval >= 0x5 && scrubval <= 0x14) {
178 		scrubval -= 0x5;
179 		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
181 	} else {
182 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
183 	}
184 }
185 /*
186  * Scan the scrub rate mapping table for a close or matching bandwidth value to
187  * issue. If requested is too big, then use last maximum value found.
188  */
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
190 {
191 	u32 scrubval;
192 	int i;
193 
194 	/*
195 	 * map the configured rate (new_bw) to a value specific to the AMD64
196 	 * memory controller and apply to register. Search for the first
197 	 * bandwidth entry that is greater or equal than the setting requested
198 	 * and program that. If at last entry, turn off DRAM scrubbing.
199 	 *
200 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
201 	 * by falling back to the last element in scrubrates[].
202 	 */
203 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
204 		/*
205 		 * skip scrub rates which aren't recommended
206 		 * (see F10 BKDG, F3x58)
207 		 */
208 		if (scrubrates[i].scrubval < min_rate)
209 			continue;
210 
211 		if (scrubrates[i].bandwidth <= new_bw)
212 			break;
213 	}
214 
215 	scrubval = scrubrates[i].scrubval;
216 
217 	if (pvt->fam == 0x17 || pvt->fam == 0x18) {
218 		__f17h_set_scrubval(pvt, scrubval);
219 	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 		f15h_select_dct(pvt, 0);
221 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 		f15h_select_dct(pvt, 1);
223 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
224 	} else {
225 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
226 	}
227 
228 	if (scrubval)
229 		return scrubrates[i].bandwidth;
230 
231 	return 0;
232 }
233 
234 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
235 {
236 	struct amd64_pvt *pvt = mci->pvt_info;
237 	u32 min_scrubrate = 0x5;
238 
239 	if (pvt->fam == 0xf)
240 		min_scrubrate = 0x0;
241 
242 	if (pvt->fam == 0x15) {
243 		/* Erratum #505 */
244 		if (pvt->model < 0x10)
245 			f15h_select_dct(pvt, 0);
246 
247 		if (pvt->model == 0x60)
248 			min_scrubrate = 0x6;
249 	}
250 	return __set_scrub_rate(pvt, bw, min_scrubrate);
251 }
252 
253 static int get_scrub_rate(struct mem_ctl_info *mci)
254 {
255 	struct amd64_pvt *pvt = mci->pvt_info;
256 	int i, retval = -EINVAL;
257 	u32 scrubval = 0;
258 
259 	switch (pvt->fam) {
260 	case 0x15:
261 		/* Erratum #505 */
262 		if (pvt->model < 0x10)
263 			f15h_select_dct(pvt, 0);
264 
265 		if (pvt->model == 0x60)
266 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
267 		break;
268 
269 	case 0x17:
270 	case 0x18:
271 		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
272 		if (scrubval & BIT(0)) {
273 			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
274 			scrubval &= 0xF;
275 			scrubval += 0x5;
276 		} else {
277 			scrubval = 0;
278 		}
279 		break;
280 
281 	default:
282 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
283 		break;
284 	}
285 
286 	scrubval = scrubval & 0x001F;
287 
288 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
289 		if (scrubrates[i].scrubval == scrubval) {
290 			retval = scrubrates[i].bandwidth;
291 			break;
292 		}
293 	}
294 	return retval;
295 }
296 
297 /*
298  * returns true if the SysAddr given by sys_addr matches the
299  * DRAM base/limit associated with node_id
300  */
301 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
302 {
303 	u64 addr;
304 
305 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
306 	 * all ones if the most significant implemented address bit is 1.
307 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
308 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
309 	 * Application Programming.
310 	 */
311 	addr = sys_addr & 0x000000ffffffffffull;
312 
313 	return ((addr >= get_dram_base(pvt, nid)) &&
314 		(addr <= get_dram_limit(pvt, nid)));
315 }
316 
317 /*
318  * Attempt to map a SysAddr to a node. On success, return a pointer to the
319  * mem_ctl_info structure for the node that the SysAddr maps to.
320  *
321  * On failure, return NULL.
322  */
323 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
324 						u64 sys_addr)
325 {
326 	struct amd64_pvt *pvt;
327 	u8 node_id;
328 	u32 intlv_en, bits;
329 
330 	/*
331 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
332 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
333 	 */
334 	pvt = mci->pvt_info;
335 
336 	/*
337 	 * The value of this field should be the same for all DRAM Base
338 	 * registers.  Therefore we arbitrarily choose to read it from the
339 	 * register for node 0.
340 	 */
341 	intlv_en = dram_intlv_en(pvt, 0);
342 
343 	if (intlv_en == 0) {
344 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
345 			if (base_limit_match(pvt, sys_addr, node_id))
346 				goto found;
347 		}
348 		goto err_no_match;
349 	}
350 
351 	if (unlikely((intlv_en != 0x01) &&
352 		     (intlv_en != 0x03) &&
353 		     (intlv_en != 0x07))) {
354 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
355 		return NULL;
356 	}
357 
358 	bits = (((u32) sys_addr) >> 12) & intlv_en;
359 
360 	for (node_id = 0; ; ) {
361 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
362 			break;	/* intlv_sel field matches */
363 
364 		if (++node_id >= DRAM_RANGES)
365 			goto err_no_match;
366 	}
367 
368 	/* sanity test for sys_addr */
369 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
370 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
371 			   "range for node %d with node interleaving enabled.\n",
372 			   __func__, sys_addr, node_id);
373 		return NULL;
374 	}
375 
376 found:
377 	return edac_mc_find((int)node_id);
378 
379 err_no_match:
380 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
381 		 (unsigned long)sys_addr);
382 
383 	return NULL;
384 }
385 
386 /*
387  * compute the CS base address of the @csrow on the DRAM controller @dct.
388  * For details see F2x[5C:40] in the processor's BKDG
389  */
390 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
391 				 u64 *base, u64 *mask)
392 {
393 	u64 csbase, csmask, base_bits, mask_bits;
394 	u8 addr_shift;
395 
396 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
397 		csbase		= pvt->csels[dct].csbases[csrow];
398 		csmask		= pvt->csels[dct].csmasks[csrow];
399 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
400 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
401 		addr_shift	= 4;
402 
403 	/*
404 	 * F16h and F15h, models 30h and later need two addr_shift values:
405 	 * 8 for high and 6 for low (cf. F16h BKDG).
406 	 */
407 	} else if (pvt->fam == 0x16 ||
408 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
409 		csbase          = pvt->csels[dct].csbases[csrow];
410 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
411 
412 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
413 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
414 
415 		*mask = ~0ULL;
416 		/* poke holes for the csmask */
417 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
418 			   (GENMASK_ULL(30, 19) << 8));
419 
420 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
421 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
422 
423 		return;
424 	} else {
425 		csbase		= pvt->csels[dct].csbases[csrow];
426 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
427 		addr_shift	= 8;
428 
429 		if (pvt->fam == 0x15)
430 			base_bits = mask_bits =
431 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
432 		else
433 			base_bits = mask_bits =
434 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
435 	}
436 
437 	*base  = (csbase & base_bits) << addr_shift;
438 
439 	*mask  = ~0ULL;
440 	/* poke holes for the csmask */
441 	*mask &= ~(mask_bits << addr_shift);
442 	/* OR them in */
443 	*mask |= (csmask & mask_bits) << addr_shift;
444 }
445 
446 #define for_each_chip_select(i, dct, pvt) \
447 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
448 
449 #define chip_select_base(i, dct, pvt) \
450 	pvt->csels[dct].csbases[i]
451 
452 #define for_each_chip_select_mask(i, dct, pvt) \
453 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
454 
455 #define for_each_umc(i) \
456 	for (i = 0; i < fam_type->max_mcs; i++)
457 
458 /*
459  * @input_addr is an InputAddr associated with the node given by mci. Return the
460  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
461  */
462 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
463 {
464 	struct amd64_pvt *pvt;
465 	int csrow;
466 	u64 base, mask;
467 
468 	pvt = mci->pvt_info;
469 
470 	for_each_chip_select(csrow, 0, pvt) {
471 		if (!csrow_enabled(csrow, 0, pvt))
472 			continue;
473 
474 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
475 
476 		mask = ~mask;
477 
478 		if ((input_addr & mask) == (base & mask)) {
479 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
480 				 (unsigned long)input_addr, csrow,
481 				 pvt->mc_node_id);
482 
483 			return csrow;
484 		}
485 	}
486 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
487 		 (unsigned long)input_addr, pvt->mc_node_id);
488 
489 	return -1;
490 }
491 
492 /*
493  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
494  * for the node represented by mci. Info is passed back in *hole_base,
495  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
496  * info is invalid. Info may be invalid for either of the following reasons:
497  *
498  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
499  *   Address Register does not exist.
500  *
501  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
502  *   indicating that its contents are not valid.
503  *
504  * The values passed back in *hole_base, *hole_offset, and *hole_size are
505  * complete 32-bit values despite the fact that the bitfields in the DHAR
506  * only represent bits 31-24 of the base and offset values.
507  */
508 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
509 			     u64 *hole_offset, u64 *hole_size)
510 {
511 	struct amd64_pvt *pvt = mci->pvt_info;
512 
513 	/* only revE and later have the DRAM Hole Address Register */
514 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
515 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
516 			 pvt->ext_model, pvt->mc_node_id);
517 		return 1;
518 	}
519 
520 	/* valid for Fam10h and above */
521 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
522 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
523 		return 1;
524 	}
525 
526 	if (!dhar_valid(pvt)) {
527 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
528 			 pvt->mc_node_id);
529 		return 1;
530 	}
531 
532 	/* This node has Memory Hoisting */
533 
534 	/* +------------------+--------------------+--------------------+-----
535 	 * | memory           | DRAM hole          | relocated          |
536 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
537 	 * |                  |                    | DRAM hole          |
538 	 * |                  |                    | [0x100000000,      |
539 	 * |                  |                    |  (0x100000000+     |
540 	 * |                  |                    |   (0xffffffff-x))] |
541 	 * +------------------+--------------------+--------------------+-----
542 	 *
543 	 * Above is a diagram of physical memory showing the DRAM hole and the
544 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
545 	 * starts at address x (the base address) and extends through address
546 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
547 	 * addresses in the hole so that they start at 0x100000000.
548 	 */
549 
550 	*hole_base = dhar_base(pvt);
551 	*hole_size = (1ULL << 32) - *hole_base;
552 
553 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
554 					: k8_dhar_offset(pvt);
555 
556 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
557 		 pvt->mc_node_id, (unsigned long)*hole_base,
558 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
559 
560 	return 0;
561 }
562 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
563 
564 /*
565  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
566  * assumed that sys_addr maps to the node given by mci.
567  *
568  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
569  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
570  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
571  * then it is also involved in translating a SysAddr to a DramAddr. Sections
572  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
573  * These parts of the documentation are unclear. I interpret them as follows:
574  *
575  * When node n receives a SysAddr, it processes the SysAddr as follows:
576  *
577  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
578  *    Limit registers for node n. If the SysAddr is not within the range
579  *    specified by the base and limit values, then node n ignores the Sysaddr
580  *    (since it does not map to node n). Otherwise continue to step 2 below.
581  *
582  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
583  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
584  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
585  *    hole. If not, skip to step 3 below. Else get the value of the
586  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
587  *    offset defined by this value from the SysAddr.
588  *
589  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
590  *    Base register for node n. To obtain the DramAddr, subtract the base
591  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
592  */
593 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
594 {
595 	struct amd64_pvt *pvt = mci->pvt_info;
596 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
597 	int ret;
598 
599 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
600 
601 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
602 				      &hole_size);
603 	if (!ret) {
604 		if ((sys_addr >= (1ULL << 32)) &&
605 		    (sys_addr < ((1ULL << 32) + hole_size))) {
606 			/* use DHAR to translate SysAddr to DramAddr */
607 			dram_addr = sys_addr - hole_offset;
608 
609 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
610 				 (unsigned long)sys_addr,
611 				 (unsigned long)dram_addr);
612 
613 			return dram_addr;
614 		}
615 	}
616 
617 	/*
618 	 * Translate the SysAddr to a DramAddr as shown near the start of
619 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
620 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
621 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
622 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
623 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
624 	 * Programmer's Manual Volume 1 Application Programming.
625 	 */
626 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
627 
628 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
629 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
630 	return dram_addr;
631 }
632 
633 /*
634  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
635  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
636  * for node interleaving.
637  */
638 static int num_node_interleave_bits(unsigned intlv_en)
639 {
640 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
641 	int n;
642 
643 	BUG_ON(intlv_en > 7);
644 	n = intlv_shift_table[intlv_en];
645 	return n;
646 }
647 
648 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
649 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
650 {
651 	struct amd64_pvt *pvt;
652 	int intlv_shift;
653 	u64 input_addr;
654 
655 	pvt = mci->pvt_info;
656 
657 	/*
658 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
659 	 * concerning translating a DramAddr to an InputAddr.
660 	 */
661 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
662 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
663 		      (dram_addr & 0xfff);
664 
665 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
666 		 intlv_shift, (unsigned long)dram_addr,
667 		 (unsigned long)input_addr);
668 
669 	return input_addr;
670 }
671 
672 /*
673  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
674  * assumed that @sys_addr maps to the node given by mci.
675  */
676 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
677 {
678 	u64 input_addr;
679 
680 	input_addr =
681 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
682 
683 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
684 		 (unsigned long)sys_addr, (unsigned long)input_addr);
685 
686 	return input_addr;
687 }
688 
689 /* Map the Error address to a PAGE and PAGE OFFSET. */
690 static inline void error_address_to_page_and_offset(u64 error_address,
691 						    struct err_info *err)
692 {
693 	err->page = (u32) (error_address >> PAGE_SHIFT);
694 	err->offset = ((u32) error_address) & ~PAGE_MASK;
695 }
696 
697 /*
698  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
699  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
700  * of a node that detected an ECC memory error.  mci represents the node that
701  * the error address maps to (possibly different from the node that detected
702  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
703  * error.
704  */
705 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
706 {
707 	int csrow;
708 
709 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
710 
711 	if (csrow == -1)
712 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
713 				  "address 0x%lx\n", (unsigned long)sys_addr);
714 	return csrow;
715 }
716 
717 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
718 
719 /*
720  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
721  * are ECC capable.
722  */
723 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
724 {
725 	unsigned long edac_cap = EDAC_FLAG_NONE;
726 	u8 bit;
727 
728 	if (pvt->umc) {
729 		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
730 
731 		for_each_umc(i) {
732 			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
733 				continue;
734 
735 			umc_en_mask |= BIT(i);
736 
737 			/* UMC Configuration bit 12 (DimmEccEn) */
738 			if (pvt->umc[i].umc_cfg & BIT(12))
739 				dimm_ecc_en_mask |= BIT(i);
740 		}
741 
742 		if (umc_en_mask == dimm_ecc_en_mask)
743 			edac_cap = EDAC_FLAG_SECDED;
744 	} else {
745 		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
746 			? 19
747 			: 17;
748 
749 		if (pvt->dclr0 & BIT(bit))
750 			edac_cap = EDAC_FLAG_SECDED;
751 	}
752 
753 	return edac_cap;
754 }
755 
756 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
757 
758 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
759 {
760 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
761 
762 	if (pvt->dram_type == MEM_LRDDR3) {
763 		u32 dcsm = pvt->csels[chan].csmasks[0];
764 		/*
765 		 * It's assumed all LRDIMMs in a DCT are going to be of
766 		 * same 'type' until proven otherwise. So, use a cs
767 		 * value of '0' here to get dcsm value.
768 		 */
769 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
770 	}
771 
772 	edac_dbg(1, "All DIMMs support ECC:%s\n",
773 		    (dclr & BIT(19)) ? "yes" : "no");
774 
775 
776 	edac_dbg(1, "  PAR/ERR parity: %s\n",
777 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
778 
779 	if (pvt->fam == 0x10)
780 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
781 			 (dclr & BIT(11)) ?  "128b" : "64b");
782 
783 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
784 		 (dclr & BIT(12)) ?  "yes" : "no",
785 		 (dclr & BIT(13)) ?  "yes" : "no",
786 		 (dclr & BIT(14)) ?  "yes" : "no",
787 		 (dclr & BIT(15)) ?  "yes" : "no");
788 }
789 
790 #define CS_EVEN_PRIMARY		BIT(0)
791 #define CS_ODD_PRIMARY		BIT(1)
792 #define CS_EVEN_SECONDARY	BIT(2)
793 #define CS_ODD_SECONDARY	BIT(3)
794 
795 #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
796 #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
797 
798 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
799 {
800 	int cs_mode = 0;
801 
802 	if (csrow_enabled(2 * dimm, ctrl, pvt))
803 		cs_mode |= CS_EVEN_PRIMARY;
804 
805 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
806 		cs_mode |= CS_ODD_PRIMARY;
807 
808 	/* Asymmetric dual-rank DIMM support. */
809 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
810 		cs_mode |= CS_ODD_SECONDARY;
811 
812 	return cs_mode;
813 }
814 
815 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
816 {
817 	int dimm, size0, size1, cs0, cs1, cs_mode;
818 
819 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
820 
821 	for (dimm = 0; dimm < 2; dimm++) {
822 		cs0 = dimm * 2;
823 		cs1 = dimm * 2 + 1;
824 
825 		cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
826 
827 		size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
828 		size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
829 
830 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
831 				cs0,	size0,
832 				cs1,	size1);
833 	}
834 }
835 
836 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
837 {
838 	struct amd64_umc *umc;
839 	u32 i, tmp, umc_base;
840 
841 	for_each_umc(i) {
842 		umc_base = get_umc_base(i);
843 		umc = &pvt->umc[i];
844 
845 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
846 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
847 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
848 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
849 
850 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
851 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
852 
853 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
854 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
855 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
856 
857 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
858 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
859 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
860 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
861 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
862 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
863 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
864 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
865 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
866 
867 		if (pvt->dram_type == MEM_LRDDR4) {
868 			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
869 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
870 					i, 1 << ((tmp >> 4) & 0x3));
871 		}
872 
873 		debug_display_dimm_sizes_df(pvt, i);
874 	}
875 
876 	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
877 		 pvt->dhar, dhar_base(pvt));
878 }
879 
880 /* Display and decode various NB registers for debug purposes. */
881 static void __dump_misc_regs(struct amd64_pvt *pvt)
882 {
883 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
884 
885 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
886 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
887 
888 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
889 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
890 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
891 
892 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
893 
894 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
895 
896 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
897 		 pvt->dhar, dhar_base(pvt),
898 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
899 				   : f10_dhar_offset(pvt));
900 
901 	debug_display_dimm_sizes(pvt, 0);
902 
903 	/* everything below this point is Fam10h and above */
904 	if (pvt->fam == 0xf)
905 		return;
906 
907 	debug_display_dimm_sizes(pvt, 1);
908 
909 	/* Only if NOT ganged does dclr1 have valid info */
910 	if (!dct_ganging_enabled(pvt))
911 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
912 }
913 
914 /* Display and decode various NB registers for debug purposes. */
915 static void dump_misc_regs(struct amd64_pvt *pvt)
916 {
917 	if (pvt->umc)
918 		__dump_misc_regs_df(pvt);
919 	else
920 		__dump_misc_regs(pvt);
921 
922 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
923 
924 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
925 }
926 
927 /*
928  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
929  */
930 static void prep_chip_selects(struct amd64_pvt *pvt)
931 {
932 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
933 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
934 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
935 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
936 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
937 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
938 	} else if (pvt->fam >= 0x17) {
939 		int umc;
940 
941 		for_each_umc(umc) {
942 			pvt->csels[umc].b_cnt = 4;
943 			pvt->csels[umc].m_cnt = 2;
944 		}
945 
946 	} else {
947 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
948 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
949 	}
950 }
951 
952 static void read_umc_base_mask(struct amd64_pvt *pvt)
953 {
954 	u32 umc_base_reg, umc_base_reg_sec;
955 	u32 umc_mask_reg, umc_mask_reg_sec;
956 	u32 base_reg, base_reg_sec;
957 	u32 mask_reg, mask_reg_sec;
958 	u32 *base, *base_sec;
959 	u32 *mask, *mask_sec;
960 	int cs, umc;
961 
962 	for_each_umc(umc) {
963 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
964 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
965 
966 		for_each_chip_select(cs, umc, pvt) {
967 			base = &pvt->csels[umc].csbases[cs];
968 			base_sec = &pvt->csels[umc].csbases_sec[cs];
969 
970 			base_reg = umc_base_reg + (cs * 4);
971 			base_reg_sec = umc_base_reg_sec + (cs * 4);
972 
973 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
974 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
975 					 umc, cs, *base, base_reg);
976 
977 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
978 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
979 					 umc, cs, *base_sec, base_reg_sec);
980 		}
981 
982 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
983 		umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
984 
985 		for_each_chip_select_mask(cs, umc, pvt) {
986 			mask = &pvt->csels[umc].csmasks[cs];
987 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
988 
989 			mask_reg = umc_mask_reg + (cs * 4);
990 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
991 
992 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
993 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
994 					 umc, cs, *mask, mask_reg);
995 
996 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
997 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
998 					 umc, cs, *mask_sec, mask_reg_sec);
999 		}
1000 	}
1001 }
1002 
1003 /*
1004  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1005  */
1006 static void read_dct_base_mask(struct amd64_pvt *pvt)
1007 {
1008 	int cs;
1009 
1010 	prep_chip_selects(pvt);
1011 
1012 	if (pvt->umc)
1013 		return read_umc_base_mask(pvt);
1014 
1015 	for_each_chip_select(cs, 0, pvt) {
1016 		int reg0   = DCSB0 + (cs * 4);
1017 		int reg1   = DCSB1 + (cs * 4);
1018 		u32 *base0 = &pvt->csels[0].csbases[cs];
1019 		u32 *base1 = &pvt->csels[1].csbases[cs];
1020 
1021 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1022 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
1023 				 cs, *base0, reg0);
1024 
1025 		if (pvt->fam == 0xf)
1026 			continue;
1027 
1028 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1029 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
1030 				 cs, *base1, (pvt->fam == 0x10) ? reg1
1031 							: reg0);
1032 	}
1033 
1034 	for_each_chip_select_mask(cs, 0, pvt) {
1035 		int reg0   = DCSM0 + (cs * 4);
1036 		int reg1   = DCSM1 + (cs * 4);
1037 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1038 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1039 
1040 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1041 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1042 				 cs, *mask0, reg0);
1043 
1044 		if (pvt->fam == 0xf)
1045 			continue;
1046 
1047 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1048 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1049 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
1050 							: reg0);
1051 	}
1052 }
1053 
1054 static void determine_memory_type(struct amd64_pvt *pvt)
1055 {
1056 	u32 dram_ctrl, dcsm;
1057 
1058 	switch (pvt->fam) {
1059 	case 0xf:
1060 		if (pvt->ext_model >= K8_REV_F)
1061 			goto ddr3;
1062 
1063 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1064 		return;
1065 
1066 	case 0x10:
1067 		if (pvt->dchr0 & DDR3_MODE)
1068 			goto ddr3;
1069 
1070 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1071 		return;
1072 
1073 	case 0x15:
1074 		if (pvt->model < 0x60)
1075 			goto ddr3;
1076 
1077 		/*
1078 		 * Model 0x60h needs special handling:
1079 		 *
1080 		 * We use a Chip Select value of '0' to obtain dcsm.
1081 		 * Theoretically, it is possible to populate LRDIMMs of different
1082 		 * 'Rank' value on a DCT. But this is not the common case. So,
1083 		 * it's reasonable to assume all DIMMs are going to be of same
1084 		 * 'type' until proven otherwise.
1085 		 */
1086 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1087 		dcsm = pvt->csels[0].csmasks[0];
1088 
1089 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1090 			pvt->dram_type = MEM_DDR4;
1091 		else if (pvt->dclr0 & BIT(16))
1092 			pvt->dram_type = MEM_DDR3;
1093 		else if (dcsm & 0x3)
1094 			pvt->dram_type = MEM_LRDDR3;
1095 		else
1096 			pvt->dram_type = MEM_RDDR3;
1097 
1098 		return;
1099 
1100 	case 0x16:
1101 		goto ddr3;
1102 
1103 	case 0x17:
1104 	case 0x18:
1105 		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1106 			pvt->dram_type = MEM_LRDDR4;
1107 		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1108 			pvt->dram_type = MEM_RDDR4;
1109 		else
1110 			pvt->dram_type = MEM_DDR4;
1111 		return;
1112 
1113 	default:
1114 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1115 		pvt->dram_type = MEM_EMPTY;
1116 	}
1117 	return;
1118 
1119 ddr3:
1120 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1121 }
1122 
1123 /* Get the number of DCT channels the memory controller is using. */
1124 static int k8_early_channel_count(struct amd64_pvt *pvt)
1125 {
1126 	int flag;
1127 
1128 	if (pvt->ext_model >= K8_REV_F)
1129 		/* RevF (NPT) and later */
1130 		flag = pvt->dclr0 & WIDTH_128;
1131 	else
1132 		/* RevE and earlier */
1133 		flag = pvt->dclr0 & REVE_WIDTH_128;
1134 
1135 	/* not used */
1136 	pvt->dclr1 = 0;
1137 
1138 	return (flag) ? 2 : 1;
1139 }
1140 
1141 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1142 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1143 {
1144 	u16 mce_nid = amd_get_nb_id(m->extcpu);
1145 	struct mem_ctl_info *mci;
1146 	u8 start_bit = 1;
1147 	u8 end_bit   = 47;
1148 	u64 addr;
1149 
1150 	mci = edac_mc_find(mce_nid);
1151 	if (!mci)
1152 		return 0;
1153 
1154 	pvt = mci->pvt_info;
1155 
1156 	if (pvt->fam == 0xf) {
1157 		start_bit = 3;
1158 		end_bit   = 39;
1159 	}
1160 
1161 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1162 
1163 	/*
1164 	 * Erratum 637 workaround
1165 	 */
1166 	if (pvt->fam == 0x15) {
1167 		u64 cc6_base, tmp_addr;
1168 		u32 tmp;
1169 		u8 intlv_en;
1170 
1171 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1172 			return addr;
1173 
1174 
1175 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1176 		intlv_en = tmp >> 21 & 0x7;
1177 
1178 		/* add [47:27] + 3 trailing bits */
1179 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1180 
1181 		/* reverse and add DramIntlvEn */
1182 		cc6_base |= intlv_en ^ 0x7;
1183 
1184 		/* pin at [47:24] */
1185 		cc6_base <<= 24;
1186 
1187 		if (!intlv_en)
1188 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1189 
1190 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1191 
1192 							/* faster log2 */
1193 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1194 
1195 		/* OR DramIntlvSel into bits [14:12] */
1196 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1197 
1198 		/* add remaining [11:0] bits from original MC4_ADDR */
1199 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1200 
1201 		return cc6_base | tmp_addr;
1202 	}
1203 
1204 	return addr;
1205 }
1206 
1207 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1208 						unsigned int device,
1209 						struct pci_dev *related)
1210 {
1211 	struct pci_dev *dev = NULL;
1212 
1213 	while ((dev = pci_get_device(vendor, device, dev))) {
1214 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1215 		    (dev->bus->number == related->bus->number) &&
1216 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1217 			break;
1218 	}
1219 
1220 	return dev;
1221 }
1222 
1223 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1224 {
1225 	struct amd_northbridge *nb;
1226 	struct pci_dev *f1 = NULL;
1227 	unsigned int pci_func;
1228 	int off = range << 3;
1229 	u32 llim;
1230 
1231 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1232 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1233 
1234 	if (pvt->fam == 0xf)
1235 		return;
1236 
1237 	if (!dram_rw(pvt, range))
1238 		return;
1239 
1240 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1241 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1242 
1243 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1244 	if (pvt->fam != 0x15)
1245 		return;
1246 
1247 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1248 	if (WARN_ON(!nb))
1249 		return;
1250 
1251 	if (pvt->model == 0x60)
1252 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1253 	else if (pvt->model == 0x30)
1254 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1255 	else
1256 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1257 
1258 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1259 	if (WARN_ON(!f1))
1260 		return;
1261 
1262 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1263 
1264 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1265 
1266 				    /* {[39:27],111b} */
1267 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1268 
1269 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1270 
1271 				    /* [47:40] */
1272 	pvt->ranges[range].lim.hi |= llim >> 13;
1273 
1274 	pci_dev_put(f1);
1275 }
1276 
1277 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1278 				    struct err_info *err)
1279 {
1280 	struct amd64_pvt *pvt = mci->pvt_info;
1281 
1282 	error_address_to_page_and_offset(sys_addr, err);
1283 
1284 	/*
1285 	 * Find out which node the error address belongs to. This may be
1286 	 * different from the node that detected the error.
1287 	 */
1288 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1289 	if (!err->src_mci) {
1290 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1291 			     (unsigned long)sys_addr);
1292 		err->err_code = ERR_NODE;
1293 		return;
1294 	}
1295 
1296 	/* Now map the sys_addr to a CSROW */
1297 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1298 	if (err->csrow < 0) {
1299 		err->err_code = ERR_CSROW;
1300 		return;
1301 	}
1302 
1303 	/* CHIPKILL enabled */
1304 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1305 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1306 		if (err->channel < 0) {
1307 			/*
1308 			 * Syndrome didn't map, so we don't know which of the
1309 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1310 			 * as suspect.
1311 			 */
1312 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1313 				      "possible error reporting race\n",
1314 				      err->syndrome);
1315 			err->err_code = ERR_CHANNEL;
1316 			return;
1317 		}
1318 	} else {
1319 		/*
1320 		 * non-chipkill ecc mode
1321 		 *
1322 		 * The k8 documentation is unclear about how to determine the
1323 		 * channel number when using non-chipkill memory.  This method
1324 		 * was obtained from email communication with someone at AMD.
1325 		 * (Wish the email was placed in this comment - norsk)
1326 		 */
1327 		err->channel = ((sys_addr & BIT(3)) != 0);
1328 	}
1329 }
1330 
1331 static int ddr2_cs_size(unsigned i, bool dct_width)
1332 {
1333 	unsigned shift = 0;
1334 
1335 	if (i <= 2)
1336 		shift = i;
1337 	else if (!(i & 0x1))
1338 		shift = i >> 1;
1339 	else
1340 		shift = (i + 1) >> 1;
1341 
1342 	return 128 << (shift + !!dct_width);
1343 }
1344 
1345 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1346 				  unsigned cs_mode, int cs_mask_nr)
1347 {
1348 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1349 
1350 	if (pvt->ext_model >= K8_REV_F) {
1351 		WARN_ON(cs_mode > 11);
1352 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1353 	}
1354 	else if (pvt->ext_model >= K8_REV_D) {
1355 		unsigned diff;
1356 		WARN_ON(cs_mode > 10);
1357 
1358 		/*
1359 		 * the below calculation, besides trying to win an obfuscated C
1360 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1361 		 * mappings are:
1362 		 *
1363 		 * cs_mode	CS size (mb)
1364 		 * =======	============
1365 		 * 0		32
1366 		 * 1		64
1367 		 * 2		128
1368 		 * 3		128
1369 		 * 4		256
1370 		 * 5		512
1371 		 * 6		256
1372 		 * 7		512
1373 		 * 8		1024
1374 		 * 9		1024
1375 		 * 10		2048
1376 		 *
1377 		 * Basically, it calculates a value with which to shift the
1378 		 * smallest CS size of 32MB.
1379 		 *
1380 		 * ddr[23]_cs_size have a similar purpose.
1381 		 */
1382 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1383 
1384 		return 32 << (cs_mode - diff);
1385 	}
1386 	else {
1387 		WARN_ON(cs_mode > 6);
1388 		return 32 << cs_mode;
1389 	}
1390 }
1391 
1392 /*
1393  * Get the number of DCT channels in use.
1394  *
1395  * Return:
1396  *	number of Memory Channels in operation
1397  * Pass back:
1398  *	contents of the DCL0_LOW register
1399  */
1400 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1401 {
1402 	int i, j, channels = 0;
1403 
1404 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1405 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1406 		return 2;
1407 
1408 	/*
1409 	 * Need to check if in unganged mode: In such, there are 2 channels,
1410 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1411 	 * bit will be OFF.
1412 	 *
1413 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1414 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1415 	 */
1416 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1417 
1418 	/*
1419 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1420 	 * is more than just one DIMM present in unganged mode. Need to check
1421 	 * both controllers since DIMMs can be placed in either one.
1422 	 */
1423 	for (i = 0; i < 2; i++) {
1424 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1425 
1426 		for (j = 0; j < 4; j++) {
1427 			if (DBAM_DIMM(j, dbam) > 0) {
1428 				channels++;
1429 				break;
1430 			}
1431 		}
1432 	}
1433 
1434 	if (channels > 2)
1435 		channels = 2;
1436 
1437 	amd64_info("MCT channel count: %d\n", channels);
1438 
1439 	return channels;
1440 }
1441 
1442 static int f17_early_channel_count(struct amd64_pvt *pvt)
1443 {
1444 	int i, channels = 0;
1445 
1446 	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1447 	for_each_umc(i)
1448 		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1449 
1450 	amd64_info("MCT channel count: %d\n", channels);
1451 
1452 	return channels;
1453 }
1454 
1455 static int ddr3_cs_size(unsigned i, bool dct_width)
1456 {
1457 	unsigned shift = 0;
1458 	int cs_size = 0;
1459 
1460 	if (i == 0 || i == 3 || i == 4)
1461 		cs_size = -1;
1462 	else if (i <= 2)
1463 		shift = i;
1464 	else if (i == 12)
1465 		shift = 7;
1466 	else if (!(i & 0x1))
1467 		shift = i >> 1;
1468 	else
1469 		shift = (i + 1) >> 1;
1470 
1471 	if (cs_size != -1)
1472 		cs_size = (128 * (1 << !!dct_width)) << shift;
1473 
1474 	return cs_size;
1475 }
1476 
1477 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1478 {
1479 	unsigned shift = 0;
1480 	int cs_size = 0;
1481 
1482 	if (i < 4 || i == 6)
1483 		cs_size = -1;
1484 	else if (i == 12)
1485 		shift = 7;
1486 	else if (!(i & 0x1))
1487 		shift = i >> 1;
1488 	else
1489 		shift = (i + 1) >> 1;
1490 
1491 	if (cs_size != -1)
1492 		cs_size = rank_multiply * (128 << shift);
1493 
1494 	return cs_size;
1495 }
1496 
1497 static int ddr4_cs_size(unsigned i)
1498 {
1499 	int cs_size = 0;
1500 
1501 	if (i == 0)
1502 		cs_size = -1;
1503 	else if (i == 1)
1504 		cs_size = 1024;
1505 	else
1506 		/* Min cs_size = 1G */
1507 		cs_size = 1024 * (1 << (i >> 1));
1508 
1509 	return cs_size;
1510 }
1511 
1512 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1513 				   unsigned cs_mode, int cs_mask_nr)
1514 {
1515 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1516 
1517 	WARN_ON(cs_mode > 11);
1518 
1519 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1520 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1521 	else
1522 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1523 }
1524 
1525 /*
1526  * F15h supports only 64bit DCT interfaces
1527  */
1528 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1529 				   unsigned cs_mode, int cs_mask_nr)
1530 {
1531 	WARN_ON(cs_mode > 12);
1532 
1533 	return ddr3_cs_size(cs_mode, false);
1534 }
1535 
1536 /* F15h M60h supports DDR4 mapping as well.. */
1537 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1538 					unsigned cs_mode, int cs_mask_nr)
1539 {
1540 	int cs_size;
1541 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1542 
1543 	WARN_ON(cs_mode > 12);
1544 
1545 	if (pvt->dram_type == MEM_DDR4) {
1546 		if (cs_mode > 9)
1547 			return -1;
1548 
1549 		cs_size = ddr4_cs_size(cs_mode);
1550 	} else if (pvt->dram_type == MEM_LRDDR3) {
1551 		unsigned rank_multiply = dcsm & 0xf;
1552 
1553 		if (rank_multiply == 3)
1554 			rank_multiply = 4;
1555 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1556 	} else {
1557 		/* Minimum cs size is 512mb for F15hM60h*/
1558 		if (cs_mode == 0x1)
1559 			return -1;
1560 
1561 		cs_size = ddr3_cs_size(cs_mode, false);
1562 	}
1563 
1564 	return cs_size;
1565 }
1566 
1567 /*
1568  * F16h and F15h model 30h have only limited cs_modes.
1569  */
1570 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1571 				unsigned cs_mode, int cs_mask_nr)
1572 {
1573 	WARN_ON(cs_mode > 12);
1574 
1575 	if (cs_mode == 6 || cs_mode == 8 ||
1576 	    cs_mode == 9 || cs_mode == 12)
1577 		return -1;
1578 	else
1579 		return ddr3_cs_size(cs_mode, false);
1580 }
1581 
1582 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1583 				    unsigned int cs_mode, int csrow_nr)
1584 {
1585 	u32 addr_mask_orig, addr_mask_deinterleaved;
1586 	u32 msb, weight, num_zero_bits;
1587 	int dimm, size = 0;
1588 
1589 	/* No Chip Selects are enabled. */
1590 	if (!cs_mode)
1591 		return size;
1592 
1593 	/* Requested size of an even CS but none are enabled. */
1594 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1595 		return size;
1596 
1597 	/* Requested size of an odd CS but none are enabled. */
1598 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1599 		return size;
1600 
1601 	/*
1602 	 * There is one mask per DIMM, and two Chip Selects per DIMM.
1603 	 *	CS0 and CS1 -> DIMM0
1604 	 *	CS2 and CS3 -> DIMM1
1605 	 */
1606 	dimm = csrow_nr >> 1;
1607 
1608 	/* Asymmetric dual-rank DIMM support. */
1609 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1610 		addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1611 	else
1612 		addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1613 
1614 	/*
1615 	 * The number of zero bits in the mask is equal to the number of bits
1616 	 * in a full mask minus the number of bits in the current mask.
1617 	 *
1618 	 * The MSB is the number of bits in the full mask because BIT[0] is
1619 	 * always 0.
1620 	 */
1621 	msb = fls(addr_mask_orig) - 1;
1622 	weight = hweight_long(addr_mask_orig);
1623 	num_zero_bits = msb - weight;
1624 
1625 	/* Take the number of zero bits off from the top of the mask. */
1626 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1627 
1628 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1629 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
1630 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1631 
1632 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
1633 	size = (addr_mask_deinterleaved >> 2) + 1;
1634 
1635 	/* Return size in MBs. */
1636 	return size >> 10;
1637 }
1638 
1639 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1640 {
1641 
1642 	if (pvt->fam == 0xf)
1643 		return;
1644 
1645 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1646 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1647 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1648 
1649 		edac_dbg(0, "  DCTs operate in %s mode\n",
1650 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1651 
1652 		if (!dct_ganging_enabled(pvt))
1653 			edac_dbg(0, "  Address range split per DCT: %s\n",
1654 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1655 
1656 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1657 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1658 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1659 
1660 		edac_dbg(0, "  channel interleave: %s, "
1661 			 "interleave bits selector: 0x%x\n",
1662 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1663 			 dct_sel_interleave_addr(pvt));
1664 	}
1665 
1666 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1667 }
1668 
1669 /*
1670  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1671  * 2.10.12 Memory Interleaving Modes).
1672  */
1673 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1674 				     u8 intlv_en, int num_dcts_intlv,
1675 				     u32 dct_sel)
1676 {
1677 	u8 channel = 0;
1678 	u8 select;
1679 
1680 	if (!(intlv_en))
1681 		return (u8)(dct_sel);
1682 
1683 	if (num_dcts_intlv == 2) {
1684 		select = (sys_addr >> 8) & 0x3;
1685 		channel = select ? 0x3 : 0;
1686 	} else if (num_dcts_intlv == 4) {
1687 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1688 		switch (intlv_addr) {
1689 		case 0x4:
1690 			channel = (sys_addr >> 8) & 0x3;
1691 			break;
1692 		case 0x5:
1693 			channel = (sys_addr >> 9) & 0x3;
1694 			break;
1695 		}
1696 	}
1697 	return channel;
1698 }
1699 
1700 /*
1701  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1702  * Interleaving Modes.
1703  */
1704 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1705 				bool hi_range_sel, u8 intlv_en)
1706 {
1707 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1708 
1709 	if (dct_ganging_enabled(pvt))
1710 		return 0;
1711 
1712 	if (hi_range_sel)
1713 		return dct_sel_high;
1714 
1715 	/*
1716 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1717 	 */
1718 	if (dct_interleave_enabled(pvt)) {
1719 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1720 
1721 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1722 		if (!intlv_addr)
1723 			return sys_addr >> 6 & 1;
1724 
1725 		if (intlv_addr & 0x2) {
1726 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1727 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1728 
1729 			return ((sys_addr >> shift) & 1) ^ temp;
1730 		}
1731 
1732 		if (intlv_addr & 0x4) {
1733 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
1734 
1735 			return (sys_addr >> shift) & 1;
1736 		}
1737 
1738 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1739 	}
1740 
1741 	if (dct_high_range_enabled(pvt))
1742 		return ~dct_sel_high & 1;
1743 
1744 	return 0;
1745 }
1746 
1747 /* Convert the sys_addr to the normalized DCT address */
1748 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1749 				 u64 sys_addr, bool hi_rng,
1750 				 u32 dct_sel_base_addr)
1751 {
1752 	u64 chan_off;
1753 	u64 dram_base		= get_dram_base(pvt, range);
1754 	u64 hole_off		= f10_dhar_offset(pvt);
1755 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1756 
1757 	if (hi_rng) {
1758 		/*
1759 		 * if
1760 		 * base address of high range is below 4Gb
1761 		 * (bits [47:27] at [31:11])
1762 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1763 		 * sys_addr > 4Gb
1764 		 *
1765 		 *	remove hole offset from sys_addr
1766 		 * else
1767 		 *	remove high range offset from sys_addr
1768 		 */
1769 		if ((!(dct_sel_base_addr >> 16) ||
1770 		     dct_sel_base_addr < dhar_base(pvt)) &&
1771 		    dhar_valid(pvt) &&
1772 		    (sys_addr >= BIT_64(32)))
1773 			chan_off = hole_off;
1774 		else
1775 			chan_off = dct_sel_base_off;
1776 	} else {
1777 		/*
1778 		 * if
1779 		 * we have a valid hole		&&
1780 		 * sys_addr > 4Gb
1781 		 *
1782 		 *	remove hole
1783 		 * else
1784 		 *	remove dram base to normalize to DCT address
1785 		 */
1786 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1787 			chan_off = hole_off;
1788 		else
1789 			chan_off = dram_base;
1790 	}
1791 
1792 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1793 }
1794 
1795 /*
1796  * checks if the csrow passed in is marked as SPARED, if so returns the new
1797  * spare row
1798  */
1799 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1800 {
1801 	int tmp_cs;
1802 
1803 	if (online_spare_swap_done(pvt, dct) &&
1804 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1805 
1806 		for_each_chip_select(tmp_cs, dct, pvt) {
1807 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1808 				csrow = tmp_cs;
1809 				break;
1810 			}
1811 		}
1812 	}
1813 	return csrow;
1814 }
1815 
1816 /*
1817  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1818  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1819  *
1820  * Return:
1821  *	-EINVAL:  NOT FOUND
1822  *	0..csrow = Chip-Select Row
1823  */
1824 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1825 {
1826 	struct mem_ctl_info *mci;
1827 	struct amd64_pvt *pvt;
1828 	u64 cs_base, cs_mask;
1829 	int cs_found = -EINVAL;
1830 	int csrow;
1831 
1832 	mci = edac_mc_find(nid);
1833 	if (!mci)
1834 		return cs_found;
1835 
1836 	pvt = mci->pvt_info;
1837 
1838 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1839 
1840 	for_each_chip_select(csrow, dct, pvt) {
1841 		if (!csrow_enabled(csrow, dct, pvt))
1842 			continue;
1843 
1844 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1845 
1846 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1847 			 csrow, cs_base, cs_mask);
1848 
1849 		cs_mask = ~cs_mask;
1850 
1851 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1852 			 (in_addr & cs_mask), (cs_base & cs_mask));
1853 
1854 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1855 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1856 				cs_found =  csrow;
1857 				break;
1858 			}
1859 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1860 
1861 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1862 			break;
1863 		}
1864 	}
1865 	return cs_found;
1866 }
1867 
1868 /*
1869  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1870  * swapped with a region located at the bottom of memory so that the GPU can use
1871  * the interleaved region and thus two channels.
1872  */
1873 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1874 {
1875 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1876 
1877 	if (pvt->fam == 0x10) {
1878 		/* only revC3 and revE have that feature */
1879 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1880 			return sys_addr;
1881 	}
1882 
1883 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1884 
1885 	if (!(swap_reg & 0x1))
1886 		return sys_addr;
1887 
1888 	swap_base	= (swap_reg >> 3) & 0x7f;
1889 	swap_limit	= (swap_reg >> 11) & 0x7f;
1890 	rgn_size	= (swap_reg >> 20) & 0x7f;
1891 	tmp_addr	= sys_addr >> 27;
1892 
1893 	if (!(sys_addr >> 34) &&
1894 	    (((tmp_addr >= swap_base) &&
1895 	     (tmp_addr <= swap_limit)) ||
1896 	     (tmp_addr < rgn_size)))
1897 		return sys_addr ^ (u64)swap_base << 27;
1898 
1899 	return sys_addr;
1900 }
1901 
1902 /* For a given @dram_range, check if @sys_addr falls within it. */
1903 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1904 				  u64 sys_addr, int *chan_sel)
1905 {
1906 	int cs_found = -EINVAL;
1907 	u64 chan_addr;
1908 	u32 dct_sel_base;
1909 	u8 channel;
1910 	bool high_range = false;
1911 
1912 	u8 node_id    = dram_dst_node(pvt, range);
1913 	u8 intlv_en   = dram_intlv_en(pvt, range);
1914 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1915 
1916 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1917 		 range, sys_addr, get_dram_limit(pvt, range));
1918 
1919 	if (dhar_valid(pvt) &&
1920 	    dhar_base(pvt) <= sys_addr &&
1921 	    sys_addr < BIT_64(32)) {
1922 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1923 			    sys_addr);
1924 		return -EINVAL;
1925 	}
1926 
1927 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1928 		return -EINVAL;
1929 
1930 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1931 
1932 	dct_sel_base = dct_sel_baseaddr(pvt);
1933 
1934 	/*
1935 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1936 	 * select between DCT0 and DCT1.
1937 	 */
1938 	if (dct_high_range_enabled(pvt) &&
1939 	   !dct_ganging_enabled(pvt) &&
1940 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1941 		high_range = true;
1942 
1943 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1944 
1945 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1946 					  high_range, dct_sel_base);
1947 
1948 	/* Remove node interleaving, see F1x120 */
1949 	if (intlv_en)
1950 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1951 			    (chan_addr & 0xfff);
1952 
1953 	/* remove channel interleave */
1954 	if (dct_interleave_enabled(pvt) &&
1955 	   !dct_high_range_enabled(pvt) &&
1956 	   !dct_ganging_enabled(pvt)) {
1957 
1958 		if (dct_sel_interleave_addr(pvt) != 1) {
1959 			if (dct_sel_interleave_addr(pvt) == 0x3)
1960 				/* hash 9 */
1961 				chan_addr = ((chan_addr >> 10) << 9) |
1962 					     (chan_addr & 0x1ff);
1963 			else
1964 				/* A[6] or hash 6 */
1965 				chan_addr = ((chan_addr >> 7) << 6) |
1966 					     (chan_addr & 0x3f);
1967 		} else
1968 			/* A[12] */
1969 			chan_addr = ((chan_addr >> 13) << 12) |
1970 				     (chan_addr & 0xfff);
1971 	}
1972 
1973 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1974 
1975 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1976 
1977 	if (cs_found >= 0)
1978 		*chan_sel = channel;
1979 
1980 	return cs_found;
1981 }
1982 
1983 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1984 					u64 sys_addr, int *chan_sel)
1985 {
1986 	int cs_found = -EINVAL;
1987 	int num_dcts_intlv = 0;
1988 	u64 chan_addr, chan_offset;
1989 	u64 dct_base, dct_limit;
1990 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1991 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1992 
1993 	u64 dhar_offset		= f10_dhar_offset(pvt);
1994 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1995 	u8 node_id		= dram_dst_node(pvt, range);
1996 	u8 intlv_en		= dram_intlv_en(pvt, range);
1997 
1998 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1999 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2000 
2001 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2002 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
2003 
2004 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2005 		 range, sys_addr, get_dram_limit(pvt, range));
2006 
2007 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
2008 	    !(get_dram_limit(pvt, range) >= sys_addr))
2009 		return -EINVAL;
2010 
2011 	if (dhar_valid(pvt) &&
2012 	    dhar_base(pvt) <= sys_addr &&
2013 	    sys_addr < BIT_64(32)) {
2014 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2015 			    sys_addr);
2016 		return -EINVAL;
2017 	}
2018 
2019 	/* Verify sys_addr is within DCT Range. */
2020 	dct_base = (u64) dct_sel_baseaddr(pvt);
2021 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2022 
2023 	if (!(dct_cont_base_reg & BIT(0)) &&
2024 	    !(dct_base <= (sys_addr >> 27) &&
2025 	      dct_limit >= (sys_addr >> 27)))
2026 		return -EINVAL;
2027 
2028 	/* Verify number of dct's that participate in channel interleaving. */
2029 	num_dcts_intlv = (int) hweight8(intlv_en);
2030 
2031 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2032 		return -EINVAL;
2033 
2034 	if (pvt->model >= 0x60)
2035 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2036 	else
2037 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2038 						     num_dcts_intlv, dct_sel);
2039 
2040 	/* Verify we stay within the MAX number of channels allowed */
2041 	if (channel > 3)
2042 		return -EINVAL;
2043 
2044 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2045 
2046 	/* Get normalized DCT addr */
2047 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2048 		chan_offset = dhar_offset;
2049 	else
2050 		chan_offset = dct_base << 27;
2051 
2052 	chan_addr = sys_addr - chan_offset;
2053 
2054 	/* remove channel interleave */
2055 	if (num_dcts_intlv == 2) {
2056 		if (intlv_addr == 0x4)
2057 			chan_addr = ((chan_addr >> 9) << 8) |
2058 						(chan_addr & 0xff);
2059 		else if (intlv_addr == 0x5)
2060 			chan_addr = ((chan_addr >> 10) << 9) |
2061 						(chan_addr & 0x1ff);
2062 		else
2063 			return -EINVAL;
2064 
2065 	} else if (num_dcts_intlv == 4) {
2066 		if (intlv_addr == 0x4)
2067 			chan_addr = ((chan_addr >> 10) << 8) |
2068 							(chan_addr & 0xff);
2069 		else if (intlv_addr == 0x5)
2070 			chan_addr = ((chan_addr >> 11) << 9) |
2071 							(chan_addr & 0x1ff);
2072 		else
2073 			return -EINVAL;
2074 	}
2075 
2076 	if (dct_offset_en) {
2077 		amd64_read_pci_cfg(pvt->F1,
2078 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2079 				   &tmp);
2080 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2081 	}
2082 
2083 	f15h_select_dct(pvt, channel);
2084 
2085 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2086 
2087 	/*
2088 	 * Find Chip select:
2089 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2090 	 * there is support for 4 DCT's, but only 2 are currently functional.
2091 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2092 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2093 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2094 	 */
2095 	alias_channel =  (channel == 3) ? 1 : channel;
2096 
2097 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2098 
2099 	if (cs_found >= 0)
2100 		*chan_sel = alias_channel;
2101 
2102 	return cs_found;
2103 }
2104 
2105 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2106 					u64 sys_addr,
2107 					int *chan_sel)
2108 {
2109 	int cs_found = -EINVAL;
2110 	unsigned range;
2111 
2112 	for (range = 0; range < DRAM_RANGES; range++) {
2113 		if (!dram_rw(pvt, range))
2114 			continue;
2115 
2116 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2117 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2118 							       sys_addr,
2119 							       chan_sel);
2120 
2121 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2122 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2123 			cs_found = f1x_match_to_this_node(pvt, range,
2124 							  sys_addr, chan_sel);
2125 			if (cs_found >= 0)
2126 				break;
2127 		}
2128 	}
2129 	return cs_found;
2130 }
2131 
2132 /*
2133  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2134  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2135  *
2136  * The @sys_addr is usually an error address received from the hardware
2137  * (MCX_ADDR).
2138  */
2139 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2140 				     struct err_info *err)
2141 {
2142 	struct amd64_pvt *pvt = mci->pvt_info;
2143 
2144 	error_address_to_page_and_offset(sys_addr, err);
2145 
2146 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2147 	if (err->csrow < 0) {
2148 		err->err_code = ERR_CSROW;
2149 		return;
2150 	}
2151 
2152 	/*
2153 	 * We need the syndromes for channel detection only when we're
2154 	 * ganged. Otherwise @chan should already contain the channel at
2155 	 * this point.
2156 	 */
2157 	if (dct_ganging_enabled(pvt))
2158 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2159 }
2160 
2161 /*
2162  * debug routine to display the memory sizes of all logical DIMMs and its
2163  * CSROWs
2164  */
2165 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2166 {
2167 	int dimm, size0, size1;
2168 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2169 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
2170 
2171 	if (pvt->fam == 0xf) {
2172 		/* K8 families < revF not supported yet */
2173 	       if (pvt->ext_model < K8_REV_F)
2174 			return;
2175 	       else
2176 		       WARN_ON(ctrl != 0);
2177 	}
2178 
2179 	if (pvt->fam == 0x10) {
2180 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2181 							   : pvt->dbam0;
2182 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2183 				 pvt->csels[1].csbases :
2184 				 pvt->csels[0].csbases;
2185 	} else if (ctrl) {
2186 		dbam = pvt->dbam0;
2187 		dcsb = pvt->csels[1].csbases;
2188 	}
2189 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2190 		 ctrl, dbam);
2191 
2192 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2193 
2194 	/* Dump memory sizes for DIMM and its CSROWs */
2195 	for (dimm = 0; dimm < 4; dimm++) {
2196 
2197 		size0 = 0;
2198 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2199 			/*
2200 			 * For F15m60h, we need multiplier for LRDIMM cs_size
2201 			 * calculation. We pass dimm value to the dbam_to_cs
2202 			 * mapper so we can find the multiplier from the
2203 			 * corresponding DCSM.
2204 			 */
2205 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2206 						     DBAM_DIMM(dimm, dbam),
2207 						     dimm);
2208 
2209 		size1 = 0;
2210 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2211 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2212 						     DBAM_DIMM(dimm, dbam),
2213 						     dimm);
2214 
2215 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2216 				dimm * 2,     size0,
2217 				dimm * 2 + 1, size1);
2218 	}
2219 }
2220 
2221 static struct amd64_family_type family_types[] = {
2222 	[K8_CPUS] = {
2223 		.ctl_name = "K8",
2224 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2225 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2226 		.max_mcs = 2,
2227 		.ops = {
2228 			.early_channel_count	= k8_early_channel_count,
2229 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
2230 			.dbam_to_cs		= k8_dbam_to_chip_select,
2231 		}
2232 	},
2233 	[F10_CPUS] = {
2234 		.ctl_name = "F10h",
2235 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2236 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2237 		.max_mcs = 2,
2238 		.ops = {
2239 			.early_channel_count	= f1x_early_channel_count,
2240 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2241 			.dbam_to_cs		= f10_dbam_to_chip_select,
2242 		}
2243 	},
2244 	[F15_CPUS] = {
2245 		.ctl_name = "F15h",
2246 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2247 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2248 		.max_mcs = 2,
2249 		.ops = {
2250 			.early_channel_count	= f1x_early_channel_count,
2251 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2252 			.dbam_to_cs		= f15_dbam_to_chip_select,
2253 		}
2254 	},
2255 	[F15_M30H_CPUS] = {
2256 		.ctl_name = "F15h_M30h",
2257 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2258 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2259 		.max_mcs = 2,
2260 		.ops = {
2261 			.early_channel_count	= f1x_early_channel_count,
2262 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2263 			.dbam_to_cs		= f16_dbam_to_chip_select,
2264 		}
2265 	},
2266 	[F15_M60H_CPUS] = {
2267 		.ctl_name = "F15h_M60h",
2268 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2269 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2270 		.max_mcs = 2,
2271 		.ops = {
2272 			.early_channel_count	= f1x_early_channel_count,
2273 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2274 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
2275 		}
2276 	},
2277 	[F16_CPUS] = {
2278 		.ctl_name = "F16h",
2279 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2280 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2281 		.max_mcs = 2,
2282 		.ops = {
2283 			.early_channel_count	= f1x_early_channel_count,
2284 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2285 			.dbam_to_cs		= f16_dbam_to_chip_select,
2286 		}
2287 	},
2288 	[F16_M30H_CPUS] = {
2289 		.ctl_name = "F16h_M30h",
2290 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2291 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2292 		.max_mcs = 2,
2293 		.ops = {
2294 			.early_channel_count	= f1x_early_channel_count,
2295 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2296 			.dbam_to_cs		= f16_dbam_to_chip_select,
2297 		}
2298 	},
2299 	[F17_CPUS] = {
2300 		.ctl_name = "F17h",
2301 		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2302 		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2303 		.max_mcs = 2,
2304 		.ops = {
2305 			.early_channel_count	= f17_early_channel_count,
2306 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2307 		}
2308 	},
2309 	[F17_M10H_CPUS] = {
2310 		.ctl_name = "F17h_M10h",
2311 		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2312 		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2313 		.max_mcs = 2,
2314 		.ops = {
2315 			.early_channel_count	= f17_early_channel_count,
2316 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2317 		}
2318 	},
2319 	[F17_M30H_CPUS] = {
2320 		.ctl_name = "F17h_M30h",
2321 		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2322 		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2323 		.max_mcs = 8,
2324 		.ops = {
2325 			.early_channel_count	= f17_early_channel_count,
2326 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2327 		}
2328 	},
2329 	[F17_M70H_CPUS] = {
2330 		.ctl_name = "F17h_M70h",
2331 		.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2332 		.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2333 		.max_mcs = 2,
2334 		.ops = {
2335 			.early_channel_count	= f17_early_channel_count,
2336 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2337 		}
2338 	},
2339 };
2340 
2341 /*
2342  * These are tables of eigenvectors (one per line) which can be used for the
2343  * construction of the syndrome tables. The modified syndrome search algorithm
2344  * uses those to find the symbol in error and thus the DIMM.
2345  *
2346  * Algorithm courtesy of Ross LaFetra from AMD.
2347  */
2348 static const u16 x4_vectors[] = {
2349 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2350 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2351 	0x0001, 0x0002, 0x0004, 0x0008,
2352 	0x1013, 0x3032, 0x4044, 0x8088,
2353 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2354 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2355 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2356 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2357 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2358 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2359 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2360 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2361 	0x2b87, 0x164e, 0x642c, 0xdc18,
2362 	0x40b9, 0x80de, 0x1094, 0x20e8,
2363 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2364 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2365 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2366 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2367 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2368 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2369 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2370 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2371 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2372 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2373 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2374 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2375 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2376 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2377 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2378 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2379 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2380 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2381 	0x4807, 0xc40e, 0x130c, 0x3208,
2382 	0x1905, 0x2e0a, 0x5804, 0xac08,
2383 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2384 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2385 };
2386 
2387 static const u16 x8_vectors[] = {
2388 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2389 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2390 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2391 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2392 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2393 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2394 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2395 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2396 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2397 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2398 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2399 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2400 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2401 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2402 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2403 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2404 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2405 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2406 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2407 };
2408 
2409 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2410 			   unsigned v_dim)
2411 {
2412 	unsigned int i, err_sym;
2413 
2414 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2415 		u16 s = syndrome;
2416 		unsigned v_idx =  err_sym * v_dim;
2417 		unsigned v_end = (err_sym + 1) * v_dim;
2418 
2419 		/* walk over all 16 bits of the syndrome */
2420 		for (i = 1; i < (1U << 16); i <<= 1) {
2421 
2422 			/* if bit is set in that eigenvector... */
2423 			if (v_idx < v_end && vectors[v_idx] & i) {
2424 				u16 ev_comp = vectors[v_idx++];
2425 
2426 				/* ... and bit set in the modified syndrome, */
2427 				if (s & i) {
2428 					/* remove it. */
2429 					s ^= ev_comp;
2430 
2431 					if (!s)
2432 						return err_sym;
2433 				}
2434 
2435 			} else if (s & i)
2436 				/* can't get to zero, move to next symbol */
2437 				break;
2438 		}
2439 	}
2440 
2441 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2442 	return -1;
2443 }
2444 
2445 static int map_err_sym_to_channel(int err_sym, int sym_size)
2446 {
2447 	if (sym_size == 4)
2448 		switch (err_sym) {
2449 		case 0x20:
2450 		case 0x21:
2451 			return 0;
2452 			break;
2453 		case 0x22:
2454 		case 0x23:
2455 			return 1;
2456 			break;
2457 		default:
2458 			return err_sym >> 4;
2459 			break;
2460 		}
2461 	/* x8 symbols */
2462 	else
2463 		switch (err_sym) {
2464 		/* imaginary bits not in a DIMM */
2465 		case 0x10:
2466 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2467 					  err_sym);
2468 			return -1;
2469 			break;
2470 
2471 		case 0x11:
2472 			return 0;
2473 			break;
2474 		case 0x12:
2475 			return 1;
2476 			break;
2477 		default:
2478 			return err_sym >> 3;
2479 			break;
2480 		}
2481 	return -1;
2482 }
2483 
2484 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2485 {
2486 	struct amd64_pvt *pvt = mci->pvt_info;
2487 	int err_sym = -1;
2488 
2489 	if (pvt->ecc_sym_sz == 8)
2490 		err_sym = decode_syndrome(syndrome, x8_vectors,
2491 					  ARRAY_SIZE(x8_vectors),
2492 					  pvt->ecc_sym_sz);
2493 	else if (pvt->ecc_sym_sz == 4)
2494 		err_sym = decode_syndrome(syndrome, x4_vectors,
2495 					  ARRAY_SIZE(x4_vectors),
2496 					  pvt->ecc_sym_sz);
2497 	else {
2498 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2499 		return err_sym;
2500 	}
2501 
2502 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2503 }
2504 
2505 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2506 			    u8 ecc_type)
2507 {
2508 	enum hw_event_mc_err_type err_type;
2509 	const char *string;
2510 
2511 	if (ecc_type == 2)
2512 		err_type = HW_EVENT_ERR_CORRECTED;
2513 	else if (ecc_type == 1)
2514 		err_type = HW_EVENT_ERR_UNCORRECTED;
2515 	else if (ecc_type == 3)
2516 		err_type = HW_EVENT_ERR_DEFERRED;
2517 	else {
2518 		WARN(1, "Something is rotten in the state of Denmark.\n");
2519 		return;
2520 	}
2521 
2522 	switch (err->err_code) {
2523 	case DECODE_OK:
2524 		string = "";
2525 		break;
2526 	case ERR_NODE:
2527 		string = "Failed to map error addr to a node";
2528 		break;
2529 	case ERR_CSROW:
2530 		string = "Failed to map error addr to a csrow";
2531 		break;
2532 	case ERR_CHANNEL:
2533 		string = "Unknown syndrome - possible error reporting race";
2534 		break;
2535 	case ERR_SYND:
2536 		string = "MCA_SYND not valid - unknown syndrome and csrow";
2537 		break;
2538 	case ERR_NORM_ADDR:
2539 		string = "Cannot decode normalized address";
2540 		break;
2541 	default:
2542 		string = "WTF error";
2543 		break;
2544 	}
2545 
2546 	edac_mc_handle_error(err_type, mci, 1,
2547 			     err->page, err->offset, err->syndrome,
2548 			     err->csrow, err->channel, -1,
2549 			     string, "");
2550 }
2551 
2552 static inline void decode_bus_error(int node_id, struct mce *m)
2553 {
2554 	struct mem_ctl_info *mci;
2555 	struct amd64_pvt *pvt;
2556 	u8 ecc_type = (m->status >> 45) & 0x3;
2557 	u8 xec = XEC(m->status, 0x1f);
2558 	u16 ec = EC(m->status);
2559 	u64 sys_addr;
2560 	struct err_info err;
2561 
2562 	mci = edac_mc_find(node_id);
2563 	if (!mci)
2564 		return;
2565 
2566 	pvt = mci->pvt_info;
2567 
2568 	/* Bail out early if this was an 'observed' error */
2569 	if (PP(ec) == NBSL_PP_OBS)
2570 		return;
2571 
2572 	/* Do only ECC errors */
2573 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2574 		return;
2575 
2576 	memset(&err, 0, sizeof(err));
2577 
2578 	sys_addr = get_error_address(pvt, m);
2579 
2580 	if (ecc_type == 2)
2581 		err.syndrome = extract_syndrome(m->status);
2582 
2583 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2584 
2585 	__log_ecc_error(mci, &err, ecc_type);
2586 }
2587 
2588 /*
2589  * To find the UMC channel represented by this bank we need to match on its
2590  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2591  * IPID.
2592  *
2593  * Currently, we can derive the channel number by looking at the 6th nibble in
2594  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2595  * number.
2596  */
2597 static int find_umc_channel(struct mce *m)
2598 {
2599 	return (m->ipid & GENMASK(31, 0)) >> 20;
2600 }
2601 
2602 static void decode_umc_error(int node_id, struct mce *m)
2603 {
2604 	u8 ecc_type = (m->status >> 45) & 0x3;
2605 	struct mem_ctl_info *mci;
2606 	struct amd64_pvt *pvt;
2607 	struct err_info err;
2608 	u64 sys_addr;
2609 
2610 	mci = edac_mc_find(node_id);
2611 	if (!mci)
2612 		return;
2613 
2614 	pvt = mci->pvt_info;
2615 
2616 	memset(&err, 0, sizeof(err));
2617 
2618 	if (m->status & MCI_STATUS_DEFERRED)
2619 		ecc_type = 3;
2620 
2621 	err.channel = find_umc_channel(m);
2622 
2623 	if (!(m->status & MCI_STATUS_SYNDV)) {
2624 		err.err_code = ERR_SYND;
2625 		goto log_error;
2626 	}
2627 
2628 	if (ecc_type == 2) {
2629 		u8 length = (m->synd >> 18) & 0x3f;
2630 
2631 		if (length)
2632 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2633 		else
2634 			err.err_code = ERR_CHANNEL;
2635 	}
2636 
2637 	err.csrow = m->synd & 0x7;
2638 
2639 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2640 		err.err_code = ERR_NORM_ADDR;
2641 		goto log_error;
2642 	}
2643 
2644 	error_address_to_page_and_offset(sys_addr, &err);
2645 
2646 log_error:
2647 	__log_ecc_error(mci, &err, ecc_type);
2648 }
2649 
2650 /*
2651  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2652  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2653  * Reserve F0 and F6 on systems with a UMC.
2654  */
2655 static int
2656 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2657 {
2658 	if (pvt->umc) {
2659 		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2660 		if (!pvt->F0) {
2661 			amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2662 			return -ENODEV;
2663 		}
2664 
2665 		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2666 		if (!pvt->F6) {
2667 			pci_dev_put(pvt->F0);
2668 			pvt->F0 = NULL;
2669 
2670 			amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2671 			return -ENODEV;
2672 		}
2673 
2674 		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2675 		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2676 		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2677 
2678 		return 0;
2679 	}
2680 
2681 	/* Reserve the ADDRESS MAP Device */
2682 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2683 	if (!pvt->F1) {
2684 		amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2685 		return -ENODEV;
2686 	}
2687 
2688 	/* Reserve the DCT Device */
2689 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2690 	if (!pvt->F2) {
2691 		pci_dev_put(pvt->F1);
2692 		pvt->F1 = NULL;
2693 
2694 		amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2695 		return -ENODEV;
2696 	}
2697 
2698 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2699 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2700 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2701 
2702 	return 0;
2703 }
2704 
2705 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2706 {
2707 	if (pvt->umc) {
2708 		pci_dev_put(pvt->F0);
2709 		pci_dev_put(pvt->F6);
2710 	} else {
2711 		pci_dev_put(pvt->F1);
2712 		pci_dev_put(pvt->F2);
2713 	}
2714 }
2715 
2716 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2717 {
2718 	pvt->ecc_sym_sz = 4;
2719 
2720 	if (pvt->umc) {
2721 		u8 i;
2722 
2723 		for_each_umc(i) {
2724 			/* Check enabled channels only: */
2725 			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2726 				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2727 					pvt->ecc_sym_sz = 16;
2728 					return;
2729 				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2730 					pvt->ecc_sym_sz = 8;
2731 					return;
2732 				}
2733 			}
2734 		}
2735 	} else if (pvt->fam >= 0x10) {
2736 		u32 tmp;
2737 
2738 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2739 		/* F16h has only DCT0, so no need to read dbam1. */
2740 		if (pvt->fam != 0x16)
2741 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2742 
2743 		/* F10h, revD and later can do x8 ECC too. */
2744 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2745 			pvt->ecc_sym_sz = 8;
2746 	}
2747 }
2748 
2749 /*
2750  * Retrieve the hardware registers of the memory controller.
2751  */
2752 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2753 {
2754 	u8 nid = pvt->mc_node_id;
2755 	struct amd64_umc *umc;
2756 	u32 i, umc_base;
2757 
2758 	/* Read registers from each UMC */
2759 	for_each_umc(i) {
2760 
2761 		umc_base = get_umc_base(i);
2762 		umc = &pvt->umc[i];
2763 
2764 		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2765 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2766 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2767 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2768 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2769 	}
2770 }
2771 
2772 /*
2773  * Retrieve the hardware registers of the memory controller (this includes the
2774  * 'Address Map' and 'Misc' device regs)
2775  */
2776 static void read_mc_regs(struct amd64_pvt *pvt)
2777 {
2778 	unsigned int range;
2779 	u64 msr_val;
2780 
2781 	/*
2782 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2783 	 * those are Read-As-Zero.
2784 	 */
2785 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2786 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2787 
2788 	/* Check first whether TOP_MEM2 is enabled: */
2789 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2790 	if (msr_val & BIT(21)) {
2791 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2792 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2793 	} else {
2794 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2795 	}
2796 
2797 	if (pvt->umc) {
2798 		__read_mc_regs_df(pvt);
2799 		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2800 
2801 		goto skip;
2802 	}
2803 
2804 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2805 
2806 	read_dram_ctl_register(pvt);
2807 
2808 	for (range = 0; range < DRAM_RANGES; range++) {
2809 		u8 rw;
2810 
2811 		/* read settings for this DRAM range */
2812 		read_dram_base_limit_regs(pvt, range);
2813 
2814 		rw = dram_rw(pvt, range);
2815 		if (!rw)
2816 			continue;
2817 
2818 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2819 			 range,
2820 			 get_dram_base(pvt, range),
2821 			 get_dram_limit(pvt, range));
2822 
2823 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2824 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2825 			 (rw & 0x1) ? "R" : "-",
2826 			 (rw & 0x2) ? "W" : "-",
2827 			 dram_intlv_sel(pvt, range),
2828 			 dram_dst_node(pvt, range));
2829 	}
2830 
2831 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2832 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2833 
2834 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2835 
2836 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2837 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2838 
2839 	if (!dct_ganging_enabled(pvt)) {
2840 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2841 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2842 	}
2843 
2844 skip:
2845 	read_dct_base_mask(pvt);
2846 
2847 	determine_memory_type(pvt);
2848 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2849 
2850 	determine_ecc_sym_sz(pvt);
2851 }
2852 
2853 /*
2854  * NOTE: CPU Revision Dependent code
2855  *
2856  * Input:
2857  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2858  *	k8 private pointer to -->
2859  *			DRAM Bank Address mapping register
2860  *			node_id
2861  *			DCL register where dual_channel_active is
2862  *
2863  * The DBAM register consists of 4 sets of 4 bits each definitions:
2864  *
2865  * Bits:	CSROWs
2866  * 0-3		CSROWs 0 and 1
2867  * 4-7		CSROWs 2 and 3
2868  * 8-11		CSROWs 4 and 5
2869  * 12-15	CSROWs 6 and 7
2870  *
2871  * Values range from: 0 to 15
2872  * The meaning of the values depends on CPU revision and dual-channel state,
2873  * see relevant BKDG more info.
2874  *
2875  * The memory controller provides for total of only 8 CSROWs in its current
2876  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2877  * single channel or two (2) DIMMs in dual channel mode.
2878  *
2879  * The following code logic collapses the various tables for CSROW based on CPU
2880  * revision.
2881  *
2882  * Returns:
2883  *	The number of PAGE_SIZE pages on the specified CSROW number it
2884  *	encompasses
2885  *
2886  */
2887 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2888 {
2889 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2890 	int csrow_nr = csrow_nr_orig;
2891 	u32 cs_mode, nr_pages;
2892 
2893 	if (!pvt->umc) {
2894 		csrow_nr >>= 1;
2895 		cs_mode = DBAM_DIMM(csrow_nr, dbam);
2896 	} else {
2897 		cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2898 	}
2899 
2900 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2901 	nr_pages <<= 20 - PAGE_SHIFT;
2902 
2903 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2904 		    csrow_nr_orig, dct,  cs_mode);
2905 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2906 
2907 	return nr_pages;
2908 }
2909 
2910 static int init_csrows_df(struct mem_ctl_info *mci)
2911 {
2912 	struct amd64_pvt *pvt = mci->pvt_info;
2913 	enum edac_type edac_mode = EDAC_NONE;
2914 	enum dev_type dev_type = DEV_UNKNOWN;
2915 	struct dimm_info *dimm;
2916 	int empty = 1;
2917 	u8 umc, cs;
2918 
2919 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2920 		edac_mode = EDAC_S16ECD16ED;
2921 		dev_type = DEV_X16;
2922 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2923 		edac_mode = EDAC_S8ECD8ED;
2924 		dev_type = DEV_X8;
2925 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2926 		edac_mode = EDAC_S4ECD4ED;
2927 		dev_type = DEV_X4;
2928 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2929 		edac_mode = EDAC_SECDED;
2930 	}
2931 
2932 	for_each_umc(umc) {
2933 		for_each_chip_select(cs, umc, pvt) {
2934 			if (!csrow_enabled(cs, umc, pvt))
2935 				continue;
2936 
2937 			empty = 0;
2938 			dimm = mci->csrows[cs]->channels[umc]->dimm;
2939 
2940 			edac_dbg(1, "MC node: %d, csrow: %d\n",
2941 					pvt->mc_node_id, cs);
2942 
2943 			dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2944 			dimm->mtype = pvt->dram_type;
2945 			dimm->edac_mode = edac_mode;
2946 			dimm->dtype = dev_type;
2947 			dimm->grain = 64;
2948 		}
2949 	}
2950 
2951 	return empty;
2952 }
2953 
2954 /*
2955  * Initialize the array of csrow attribute instances, based on the values
2956  * from pci config hardware registers.
2957  */
2958 static int init_csrows(struct mem_ctl_info *mci)
2959 {
2960 	struct amd64_pvt *pvt = mci->pvt_info;
2961 	enum edac_type edac_mode = EDAC_NONE;
2962 	struct csrow_info *csrow;
2963 	struct dimm_info *dimm;
2964 	int i, j, empty = 1;
2965 	int nr_pages = 0;
2966 	u32 val;
2967 
2968 	if (pvt->umc)
2969 		return init_csrows_df(mci);
2970 
2971 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2972 
2973 	pvt->nbcfg = val;
2974 
2975 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2976 		 pvt->mc_node_id, val,
2977 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2978 
2979 	/*
2980 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2981 	 */
2982 	for_each_chip_select(i, 0, pvt) {
2983 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2984 		bool row_dct1 = false;
2985 
2986 		if (pvt->fam != 0xf)
2987 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2988 
2989 		if (!row_dct0 && !row_dct1)
2990 			continue;
2991 
2992 		csrow = mci->csrows[i];
2993 		empty = 0;
2994 
2995 		edac_dbg(1, "MC node: %d, csrow: %d\n",
2996 			    pvt->mc_node_id, i);
2997 
2998 		if (row_dct0) {
2999 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
3000 			csrow->channels[0]->dimm->nr_pages = nr_pages;
3001 		}
3002 
3003 		/* K8 has only one DCT */
3004 		if (pvt->fam != 0xf && row_dct1) {
3005 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3006 
3007 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3008 			nr_pages += row_dct1_pages;
3009 		}
3010 
3011 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3012 
3013 		/* Determine DIMM ECC mode: */
3014 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3015 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3016 					? EDAC_S4ECD4ED
3017 					: EDAC_SECDED;
3018 		}
3019 
3020 		for (j = 0; j < pvt->channel_count; j++) {
3021 			dimm = csrow->channels[j]->dimm;
3022 			dimm->mtype = pvt->dram_type;
3023 			dimm->edac_mode = edac_mode;
3024 			dimm->grain = 64;
3025 		}
3026 	}
3027 
3028 	return empty;
3029 }
3030 
3031 /* get all cores on this DCT */
3032 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3033 {
3034 	int cpu;
3035 
3036 	for_each_online_cpu(cpu)
3037 		if (amd_get_nb_id(cpu) == nid)
3038 			cpumask_set_cpu(cpu, mask);
3039 }
3040 
3041 /* check MCG_CTL on all the cpus on this node */
3042 static bool nb_mce_bank_enabled_on_node(u16 nid)
3043 {
3044 	cpumask_var_t mask;
3045 	int cpu, nbe;
3046 	bool ret = false;
3047 
3048 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3049 		amd64_warn("%s: Error allocating mask\n", __func__);
3050 		return false;
3051 	}
3052 
3053 	get_cpus_on_this_dct_cpumask(mask, nid);
3054 
3055 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3056 
3057 	for_each_cpu(cpu, mask) {
3058 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3059 		nbe = reg->l & MSR_MCGCTL_NBE;
3060 
3061 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3062 			 cpu, reg->q,
3063 			 (nbe ? "enabled" : "disabled"));
3064 
3065 		if (!nbe)
3066 			goto out;
3067 	}
3068 	ret = true;
3069 
3070 out:
3071 	free_cpumask_var(mask);
3072 	return ret;
3073 }
3074 
3075 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3076 {
3077 	cpumask_var_t cmask;
3078 	int cpu;
3079 
3080 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3081 		amd64_warn("%s: error allocating mask\n", __func__);
3082 		return -ENOMEM;
3083 	}
3084 
3085 	get_cpus_on_this_dct_cpumask(cmask, nid);
3086 
3087 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3088 
3089 	for_each_cpu(cpu, cmask) {
3090 
3091 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3092 
3093 		if (on) {
3094 			if (reg->l & MSR_MCGCTL_NBE)
3095 				s->flags.nb_mce_enable = 1;
3096 
3097 			reg->l |= MSR_MCGCTL_NBE;
3098 		} else {
3099 			/*
3100 			 * Turn off NB MCE reporting only when it was off before
3101 			 */
3102 			if (!s->flags.nb_mce_enable)
3103 				reg->l &= ~MSR_MCGCTL_NBE;
3104 		}
3105 	}
3106 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3107 
3108 	free_cpumask_var(cmask);
3109 
3110 	return 0;
3111 }
3112 
3113 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3114 				       struct pci_dev *F3)
3115 {
3116 	bool ret = true;
3117 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3118 
3119 	if (toggle_ecc_err_reporting(s, nid, ON)) {
3120 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3121 		return false;
3122 	}
3123 
3124 	amd64_read_pci_cfg(F3, NBCTL, &value);
3125 
3126 	s->old_nbctl   = value & mask;
3127 	s->nbctl_valid = true;
3128 
3129 	value |= mask;
3130 	amd64_write_pci_cfg(F3, NBCTL, value);
3131 
3132 	amd64_read_pci_cfg(F3, NBCFG, &value);
3133 
3134 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3135 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3136 
3137 	if (!(value & NBCFG_ECC_ENABLE)) {
3138 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3139 
3140 		s->flags.nb_ecc_prev = 0;
3141 
3142 		/* Attempt to turn on DRAM ECC Enable */
3143 		value |= NBCFG_ECC_ENABLE;
3144 		amd64_write_pci_cfg(F3, NBCFG, value);
3145 
3146 		amd64_read_pci_cfg(F3, NBCFG, &value);
3147 
3148 		if (!(value & NBCFG_ECC_ENABLE)) {
3149 			amd64_warn("Hardware rejected DRAM ECC enable,"
3150 				   "check memory DIMM configuration.\n");
3151 			ret = false;
3152 		} else {
3153 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3154 		}
3155 	} else {
3156 		s->flags.nb_ecc_prev = 1;
3157 	}
3158 
3159 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3160 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3161 
3162 	return ret;
3163 }
3164 
3165 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3166 					struct pci_dev *F3)
3167 {
3168 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3169 
3170 	if (!s->nbctl_valid)
3171 		return;
3172 
3173 	amd64_read_pci_cfg(F3, NBCTL, &value);
3174 	value &= ~mask;
3175 	value |= s->old_nbctl;
3176 
3177 	amd64_write_pci_cfg(F3, NBCTL, value);
3178 
3179 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3180 	if (!s->flags.nb_ecc_prev) {
3181 		amd64_read_pci_cfg(F3, NBCFG, &value);
3182 		value &= ~NBCFG_ECC_ENABLE;
3183 		amd64_write_pci_cfg(F3, NBCFG, value);
3184 	}
3185 
3186 	/* restore the NB Enable MCGCTL bit */
3187 	if (toggle_ecc_err_reporting(s, nid, OFF))
3188 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3189 }
3190 
3191 static bool ecc_enabled(struct amd64_pvt *pvt)
3192 {
3193 	u16 nid = pvt->mc_node_id;
3194 	bool nb_mce_en = false;
3195 	u8 ecc_en = 0, i;
3196 	u32 value;
3197 
3198 	if (boot_cpu_data.x86 >= 0x17) {
3199 		u8 umc_en_mask = 0, ecc_en_mask = 0;
3200 		struct amd64_umc *umc;
3201 
3202 		for_each_umc(i) {
3203 			umc = &pvt->umc[i];
3204 
3205 			/* Only check enabled UMCs. */
3206 			if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3207 				continue;
3208 
3209 			umc_en_mask |= BIT(i);
3210 
3211 			if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3212 				ecc_en_mask |= BIT(i);
3213 		}
3214 
3215 		/* Check whether at least one UMC is enabled: */
3216 		if (umc_en_mask)
3217 			ecc_en = umc_en_mask == ecc_en_mask;
3218 		else
3219 			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3220 
3221 		/* Assume UMC MCA banks are enabled. */
3222 		nb_mce_en = true;
3223 	} else {
3224 		amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3225 
3226 		ecc_en = !!(value & NBCFG_ECC_ENABLE);
3227 
3228 		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3229 		if (!nb_mce_en)
3230 			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3231 				     MSR_IA32_MCG_CTL, nid);
3232 	}
3233 
3234 	amd64_info("Node %d: DRAM ECC %s.\n",
3235 		   nid, (ecc_en ? "enabled" : "disabled"));
3236 
3237 	if (!ecc_en || !nb_mce_en)
3238 		return false;
3239 	else
3240 		return true;
3241 }
3242 
3243 static inline void
3244 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3245 {
3246 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3247 
3248 	for_each_umc(i) {
3249 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3250 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3251 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3252 
3253 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3254 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3255 		}
3256 	}
3257 
3258 	/* Set chipkill only if ECC is enabled: */
3259 	if (ecc_en) {
3260 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3261 
3262 		if (!cpk_en)
3263 			return;
3264 
3265 		if (dev_x4)
3266 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3267 		else if (dev_x16)
3268 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3269 		else
3270 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3271 	}
3272 }
3273 
3274 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3275 {
3276 	struct amd64_pvt *pvt = mci->pvt_info;
3277 
3278 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3279 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3280 
3281 	if (pvt->umc) {
3282 		f17h_determine_edac_ctl_cap(mci, pvt);
3283 	} else {
3284 		if (pvt->nbcap & NBCAP_SECDED)
3285 			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3286 
3287 		if (pvt->nbcap & NBCAP_CHIPKILL)
3288 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3289 	}
3290 
3291 	mci->edac_cap		= determine_edac_cap(pvt);
3292 	mci->mod_name		= EDAC_MOD_STR;
3293 	mci->ctl_name		= fam_type->ctl_name;
3294 	mci->dev_name		= pci_name(pvt->F3);
3295 	mci->ctl_page_to_phys	= NULL;
3296 
3297 	/* memory scrubber interface */
3298 	mci->set_sdram_scrub_rate = set_scrub_rate;
3299 	mci->get_sdram_scrub_rate = get_scrub_rate;
3300 }
3301 
3302 /*
3303  * returns a pointer to the family descriptor on success, NULL otherwise.
3304  */
3305 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3306 {
3307 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3308 	pvt->stepping	= boot_cpu_data.x86_stepping;
3309 	pvt->model	= boot_cpu_data.x86_model;
3310 	pvt->fam	= boot_cpu_data.x86;
3311 
3312 	switch (pvt->fam) {
3313 	case 0xf:
3314 		fam_type	= &family_types[K8_CPUS];
3315 		pvt->ops	= &family_types[K8_CPUS].ops;
3316 		break;
3317 
3318 	case 0x10:
3319 		fam_type	= &family_types[F10_CPUS];
3320 		pvt->ops	= &family_types[F10_CPUS].ops;
3321 		break;
3322 
3323 	case 0x15:
3324 		if (pvt->model == 0x30) {
3325 			fam_type = &family_types[F15_M30H_CPUS];
3326 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
3327 			break;
3328 		} else if (pvt->model == 0x60) {
3329 			fam_type = &family_types[F15_M60H_CPUS];
3330 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
3331 			break;
3332 		}
3333 
3334 		fam_type	= &family_types[F15_CPUS];
3335 		pvt->ops	= &family_types[F15_CPUS].ops;
3336 		break;
3337 
3338 	case 0x16:
3339 		if (pvt->model == 0x30) {
3340 			fam_type = &family_types[F16_M30H_CPUS];
3341 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
3342 			break;
3343 		}
3344 		fam_type	= &family_types[F16_CPUS];
3345 		pvt->ops	= &family_types[F16_CPUS].ops;
3346 		break;
3347 
3348 	case 0x17:
3349 		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3350 			fam_type = &family_types[F17_M10H_CPUS];
3351 			pvt->ops = &family_types[F17_M10H_CPUS].ops;
3352 			break;
3353 		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3354 			fam_type = &family_types[F17_M30H_CPUS];
3355 			pvt->ops = &family_types[F17_M30H_CPUS].ops;
3356 			break;
3357 		} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3358 			fam_type = &family_types[F17_M70H_CPUS];
3359 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
3360 			break;
3361 		}
3362 		/* fall through */
3363 	case 0x18:
3364 		fam_type	= &family_types[F17_CPUS];
3365 		pvt->ops	= &family_types[F17_CPUS].ops;
3366 
3367 		if (pvt->fam == 0x18)
3368 			family_types[F17_CPUS].ctl_name = "F18h";
3369 		break;
3370 
3371 	default:
3372 		amd64_err("Unsupported family!\n");
3373 		return NULL;
3374 	}
3375 
3376 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3377 		     (pvt->fam == 0xf ?
3378 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
3379 							     : "revE or earlier ")
3380 				 : ""), pvt->mc_node_id);
3381 	return fam_type;
3382 }
3383 
3384 static const struct attribute_group *amd64_edac_attr_groups[] = {
3385 #ifdef CONFIG_EDAC_DEBUG
3386 	&amd64_edac_dbg_group,
3387 #endif
3388 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3389 	&amd64_edac_inj_group,
3390 #endif
3391 	NULL
3392 };
3393 
3394 static int hw_info_get(struct amd64_pvt *pvt)
3395 {
3396 	u16 pci_id1, pci_id2;
3397 	int ret = -EINVAL;
3398 
3399 	if (pvt->fam >= 0x17) {
3400 		pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3401 		if (!pvt->umc)
3402 			return -ENOMEM;
3403 
3404 		pci_id1 = fam_type->f0_id;
3405 		pci_id2 = fam_type->f6_id;
3406 	} else {
3407 		pci_id1 = fam_type->f1_id;
3408 		pci_id2 = fam_type->f2_id;
3409 	}
3410 
3411 	ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3412 	if (ret)
3413 		return ret;
3414 
3415 	read_mc_regs(pvt);
3416 
3417 	return 0;
3418 }
3419 
3420 static void hw_info_put(struct amd64_pvt *pvt)
3421 {
3422 	if (pvt->F0 || pvt->F1)
3423 		free_mc_sibling_devs(pvt);
3424 
3425 	kfree(pvt->umc);
3426 }
3427 
3428 static int init_one_instance(struct amd64_pvt *pvt)
3429 {
3430 	struct mem_ctl_info *mci = NULL;
3431 	struct edac_mc_layer layers[2];
3432 	int ret = -EINVAL;
3433 
3434 	/*
3435 	 * We need to determine how many memory channels there are. Then use
3436 	 * that information for calculating the size of the dynamic instance
3437 	 * tables in the 'mci' structure.
3438 	 */
3439 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
3440 	if (pvt->channel_count < 0)
3441 		return ret;
3442 
3443 	ret = -ENOMEM;
3444 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3445 	layers[0].size = pvt->csels[0].b_cnt;
3446 	layers[0].is_virt_csrow = true;
3447 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3448 
3449 	/*
3450 	 * Always allocate two channels since we can have setups with DIMMs on
3451 	 * only one channel. Also, this simplifies handling later for the price
3452 	 * of a couple of KBs tops.
3453 	 */
3454 	layers[1].size = fam_type->max_mcs;
3455 	layers[1].is_virt_csrow = false;
3456 
3457 	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3458 	if (!mci)
3459 		return ret;
3460 
3461 	mci->pvt_info = pvt;
3462 	mci->pdev = &pvt->F3->dev;
3463 
3464 	setup_mci_misc_attrs(mci);
3465 
3466 	if (init_csrows(mci))
3467 		mci->edac_cap = EDAC_FLAG_NONE;
3468 
3469 	ret = -ENODEV;
3470 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3471 		edac_dbg(1, "failed edac_mc_add_mc()\n");
3472 		edac_mc_free(mci);
3473 		return ret;
3474 	}
3475 
3476 	return 0;
3477 }
3478 
3479 static bool instance_has_memory(struct amd64_pvt *pvt)
3480 {
3481 	bool cs_enabled = false;
3482 	int cs = 0, dct = 0;
3483 
3484 	for (dct = 0; dct < fam_type->max_mcs; dct++) {
3485 		for_each_chip_select(cs, dct, pvt)
3486 			cs_enabled |= csrow_enabled(cs, dct, pvt);
3487 	}
3488 
3489 	return cs_enabled;
3490 }
3491 
3492 static int probe_one_instance(unsigned int nid)
3493 {
3494 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3495 	struct amd64_pvt *pvt = NULL;
3496 	struct ecc_settings *s;
3497 	int ret;
3498 
3499 	ret = -ENOMEM;
3500 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3501 	if (!s)
3502 		goto err_out;
3503 
3504 	ecc_stngs[nid] = s;
3505 
3506 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3507 	if (!pvt)
3508 		goto err_settings;
3509 
3510 	pvt->mc_node_id	= nid;
3511 	pvt->F3 = F3;
3512 
3513 	fam_type = per_family_init(pvt);
3514 	if (!fam_type)
3515 		goto err_enable;
3516 
3517 	ret = hw_info_get(pvt);
3518 	if (ret < 0)
3519 		goto err_enable;
3520 
3521 	ret = 0;
3522 	if (!instance_has_memory(pvt)) {
3523 		amd64_info("Node %d: No DIMMs detected.\n", nid);
3524 		goto err_enable;
3525 	}
3526 
3527 	if (!ecc_enabled(pvt)) {
3528 		ret = -ENODEV;
3529 
3530 		if (!ecc_enable_override)
3531 			goto err_enable;
3532 
3533 		if (boot_cpu_data.x86 >= 0x17) {
3534 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3535 			goto err_enable;
3536 		} else
3537 			amd64_warn("Forcing ECC on!\n");
3538 
3539 		if (!enable_ecc_error_reporting(s, nid, F3))
3540 			goto err_enable;
3541 	}
3542 
3543 	ret = init_one_instance(pvt);
3544 	if (ret < 0) {
3545 		amd64_err("Error probing instance: %d\n", nid);
3546 
3547 		if (boot_cpu_data.x86 < 0x17)
3548 			restore_ecc_error_reporting(s, nid, F3);
3549 
3550 		goto err_enable;
3551 	}
3552 
3553 	dump_misc_regs(pvt);
3554 
3555 	return ret;
3556 
3557 err_enable:
3558 	hw_info_put(pvt);
3559 	kfree(pvt);
3560 
3561 err_settings:
3562 	kfree(s);
3563 	ecc_stngs[nid] = NULL;
3564 
3565 err_out:
3566 	return ret;
3567 }
3568 
3569 static void remove_one_instance(unsigned int nid)
3570 {
3571 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3572 	struct ecc_settings *s = ecc_stngs[nid];
3573 	struct mem_ctl_info *mci;
3574 	struct amd64_pvt *pvt;
3575 
3576 	mci = find_mci_by_dev(&F3->dev);
3577 	WARN_ON(!mci);
3578 
3579 	/* Remove from EDAC CORE tracking list */
3580 	mci = edac_mc_del_mc(&F3->dev);
3581 	if (!mci)
3582 		return;
3583 
3584 	pvt = mci->pvt_info;
3585 
3586 	restore_ecc_error_reporting(s, nid, F3);
3587 
3588 	kfree(ecc_stngs[nid]);
3589 	ecc_stngs[nid] = NULL;
3590 
3591 	/* Free the EDAC CORE resources */
3592 	mci->pvt_info = NULL;
3593 
3594 	hw_info_put(pvt);
3595 	kfree(pvt);
3596 	edac_mc_free(mci);
3597 }
3598 
3599 static void setup_pci_device(void)
3600 {
3601 	struct mem_ctl_info *mci;
3602 	struct amd64_pvt *pvt;
3603 
3604 	if (pci_ctl)
3605 		return;
3606 
3607 	mci = edac_mc_find(0);
3608 	if (!mci)
3609 		return;
3610 
3611 	pvt = mci->pvt_info;
3612 	if (pvt->umc)
3613 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3614 	else
3615 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3616 	if (!pci_ctl) {
3617 		pr_warn("%s(): Unable to create PCI control\n", __func__);
3618 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3619 	}
3620 }
3621 
3622 static const struct x86_cpu_id amd64_cpuids[] = {
3623 	{ X86_VENDOR_AMD, 0xF,	X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3624 	{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3625 	{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3626 	{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3627 	{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3628 	{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3629 	{ }
3630 };
3631 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3632 
3633 static int __init amd64_edac_init(void)
3634 {
3635 	const char *owner;
3636 	int err = -ENODEV;
3637 	int i;
3638 
3639 	owner = edac_get_owner();
3640 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3641 		return -EBUSY;
3642 
3643 	if (!x86_match_cpu(amd64_cpuids))
3644 		return -ENODEV;
3645 
3646 	if (amd_cache_northbridges() < 0)
3647 		return -ENODEV;
3648 
3649 	opstate_init();
3650 
3651 	err = -ENOMEM;
3652 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3653 	if (!ecc_stngs)
3654 		goto err_free;
3655 
3656 	msrs = msrs_alloc();
3657 	if (!msrs)
3658 		goto err_free;
3659 
3660 	for (i = 0; i < amd_nb_num(); i++) {
3661 		err = probe_one_instance(i);
3662 		if (err) {
3663 			/* unwind properly */
3664 			while (--i >= 0)
3665 				remove_one_instance(i);
3666 
3667 			goto err_pci;
3668 		}
3669 	}
3670 
3671 	if (!edac_has_mcs()) {
3672 		err = -ENODEV;
3673 		goto err_pci;
3674 	}
3675 
3676 	/* register stuff with EDAC MCE */
3677 	if (report_gart_errors)
3678 		amd_report_gart_errors(true);
3679 
3680 	if (boot_cpu_data.x86 >= 0x17)
3681 		amd_register_ecc_decoder(decode_umc_error);
3682 	else
3683 		amd_register_ecc_decoder(decode_bus_error);
3684 
3685 	setup_pci_device();
3686 
3687 #ifdef CONFIG_X86_32
3688 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3689 #endif
3690 
3691 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3692 
3693 	return 0;
3694 
3695 err_pci:
3696 	msrs_free(msrs);
3697 	msrs = NULL;
3698 
3699 err_free:
3700 	kfree(ecc_stngs);
3701 	ecc_stngs = NULL;
3702 
3703 	return err;
3704 }
3705 
3706 static void __exit amd64_edac_exit(void)
3707 {
3708 	int i;
3709 
3710 	if (pci_ctl)
3711 		edac_pci_release_generic_ctl(pci_ctl);
3712 
3713 	/* unregister from EDAC MCE */
3714 	amd_report_gart_errors(false);
3715 
3716 	if (boot_cpu_data.x86 >= 0x17)
3717 		amd_unregister_ecc_decoder(decode_umc_error);
3718 	else
3719 		amd_unregister_ecc_decoder(decode_bus_error);
3720 
3721 	for (i = 0; i < amd_nb_num(); i++)
3722 		remove_one_instance(i);
3723 
3724 	kfree(ecc_stngs);
3725 	ecc_stngs = NULL;
3726 
3727 	msrs_free(msrs);
3728 	msrs = NULL;
3729 }
3730 
3731 module_init(amd64_edac_init);
3732 module_exit(amd64_edac_exit);
3733 
3734 MODULE_LICENSE("GPL");
3735 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3736 		"Dave Peterson, Thayne Harbaugh");
3737 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3738 		EDAC_AMD64_VERSION);
3739 
3740 module_param(edac_op_state, int, 0444);
3741 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3742