xref: /openbmc/linux/drivers/edac/amd64_edac.c (revision 4464005a12b5c79e1a364e6272ee10a83413f928)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
4 
5 static struct edac_pci_ctl_info *pci_ctl;
6 
7 /*
8  * Set by command line parameter. If BIOS has enabled the ECC, this override is
9  * cleared to prevent re-enabling the hardware by this driver.
10  */
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
13 
14 static struct msr __percpu *msrs;
15 
16 static struct amd64_family_type *fam_type;
17 
18 /* Per-node stuff */
19 static struct ecc_settings **ecc_stngs;
20 
21 /*
22  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
24  * or higher value'.
25  *
26  *FIXME: Produce a better mapping/linearisation.
27  */
28 static const struct scrubrate {
29        u32 scrubval;           /* bit pattern for scrub rate */
30        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
31 } scrubrates[] = {
32 	{ 0x01, 1600000000UL},
33 	{ 0x02, 800000000UL},
34 	{ 0x03, 400000000UL},
35 	{ 0x04, 200000000UL},
36 	{ 0x05, 100000000UL},
37 	{ 0x06, 50000000UL},
38 	{ 0x07, 25000000UL},
39 	{ 0x08, 12284069UL},
40 	{ 0x09, 6274509UL},
41 	{ 0x0A, 3121951UL},
42 	{ 0x0B, 1560975UL},
43 	{ 0x0C, 781440UL},
44 	{ 0x0D, 390720UL},
45 	{ 0x0E, 195300UL},
46 	{ 0x0F, 97650UL},
47 	{ 0x10, 48854UL},
48 	{ 0x11, 24427UL},
49 	{ 0x12, 12213UL},
50 	{ 0x13, 6101UL},
51 	{ 0x14, 3051UL},
52 	{ 0x15, 1523UL},
53 	{ 0x16, 761UL},
54 	{ 0x00, 0UL},        /* scrubbing off */
55 };
56 
57 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
58 			       u32 *val, const char *func)
59 {
60 	int err = 0;
61 
62 	err = pci_read_config_dword(pdev, offset, val);
63 	if (err)
64 		amd64_warn("%s: error reading F%dx%03x.\n",
65 			   func, PCI_FUNC(pdev->devfn), offset);
66 
67 	return err;
68 }
69 
70 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
71 				u32 val, const char *func)
72 {
73 	int err = 0;
74 
75 	err = pci_write_config_dword(pdev, offset, val);
76 	if (err)
77 		amd64_warn("%s: error writing to F%dx%03x.\n",
78 			   func, PCI_FUNC(pdev->devfn), offset);
79 
80 	return err;
81 }
82 
83 /*
84  * Select DCT to which PCI cfg accesses are routed
85  */
86 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
87 {
88 	u32 reg = 0;
89 
90 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
91 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
92 	reg |= dct;
93 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
94 }
95 
96 /*
97  *
98  * Depending on the family, F2 DCT reads need special handling:
99  *
100  * K8: has a single DCT only and no address offsets >= 0x100
101  *
102  * F10h: each DCT has its own set of regs
103  *	DCT0 -> F2x040..
104  *	DCT1 -> F2x140..
105  *
106  * F16h: has only 1 DCT
107  *
108  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
109  */
110 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
111 					 int offset, u32 *val)
112 {
113 	switch (pvt->fam) {
114 	case 0xf:
115 		if (dct || offset >= 0x100)
116 			return -EINVAL;
117 		break;
118 
119 	case 0x10:
120 		if (dct) {
121 			/*
122 			 * Note: If ganging is enabled, barring the regs
123 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
125 			 */
126 			if (dct_ganging_enabled(pvt))
127 				return 0;
128 
129 			offset += 0x100;
130 		}
131 		break;
132 
133 	case 0x15:
134 		/*
135 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 		 * We should select which DCT we access using F1x10C[DctCfgSel]
137 		 */
138 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
139 		f15h_select_dct(pvt, dct);
140 		break;
141 
142 	case 0x16:
143 		if (dct)
144 			return -EINVAL;
145 		break;
146 
147 	default:
148 		break;
149 	}
150 	return amd64_read_pci_cfg(pvt->F2, offset, val);
151 }
152 
153 /*
154  * Memory scrubber control interface. For K8, memory scrubbing is handled by
155  * hardware and can involve L2 cache, dcache as well as the main memory. With
156  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
157  * functionality.
158  *
159  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161  * bytes/sec for the setting.
162  *
163  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164  * other archs, we might not have access to the caches directly.
165  */
166 
167 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
168 {
169 	/*
170 	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
171 	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
172 	 * as 0x0, scrubval 0x6 as 0x1, etc.
173 	 */
174 	if (scrubval >= 0x5 && scrubval <= 0x14) {
175 		scrubval -= 0x5;
176 		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
177 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
178 	} else {
179 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
180 	}
181 }
182 /*
183  * Scan the scrub rate mapping table for a close or matching bandwidth value to
184  * issue. If requested is too big, then use last maximum value found.
185  */
186 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
187 {
188 	u32 scrubval;
189 	int i;
190 
191 	/*
192 	 * map the configured rate (new_bw) to a value specific to the AMD64
193 	 * memory controller and apply to register. Search for the first
194 	 * bandwidth entry that is greater or equal than the setting requested
195 	 * and program that. If at last entry, turn off DRAM scrubbing.
196 	 *
197 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
198 	 * by falling back to the last element in scrubrates[].
199 	 */
200 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
201 		/*
202 		 * skip scrub rates which aren't recommended
203 		 * (see F10 BKDG, F3x58)
204 		 */
205 		if (scrubrates[i].scrubval < min_rate)
206 			continue;
207 
208 		if (scrubrates[i].bandwidth <= new_bw)
209 			break;
210 	}
211 
212 	scrubval = scrubrates[i].scrubval;
213 
214 	if (pvt->umc) {
215 		__f17h_set_scrubval(pvt, scrubval);
216 	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
217 		f15h_select_dct(pvt, 0);
218 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
219 		f15h_select_dct(pvt, 1);
220 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
221 	} else {
222 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
223 	}
224 
225 	if (scrubval)
226 		return scrubrates[i].bandwidth;
227 
228 	return 0;
229 }
230 
231 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
232 {
233 	struct amd64_pvt *pvt = mci->pvt_info;
234 	u32 min_scrubrate = 0x5;
235 
236 	if (pvt->fam == 0xf)
237 		min_scrubrate = 0x0;
238 
239 	if (pvt->fam == 0x15) {
240 		/* Erratum #505 */
241 		if (pvt->model < 0x10)
242 			f15h_select_dct(pvt, 0);
243 
244 		if (pvt->model == 0x60)
245 			min_scrubrate = 0x6;
246 	}
247 	return __set_scrub_rate(pvt, bw, min_scrubrate);
248 }
249 
250 static int get_scrub_rate(struct mem_ctl_info *mci)
251 {
252 	struct amd64_pvt *pvt = mci->pvt_info;
253 	int i, retval = -EINVAL;
254 	u32 scrubval = 0;
255 
256 	if (pvt->umc) {
257 		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
258 		if (scrubval & BIT(0)) {
259 			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
260 			scrubval &= 0xF;
261 			scrubval += 0x5;
262 		} else {
263 			scrubval = 0;
264 		}
265 	} else if (pvt->fam == 0x15) {
266 		/* Erratum #505 */
267 		if (pvt->model < 0x10)
268 			f15h_select_dct(pvt, 0);
269 
270 		if (pvt->model == 0x60)
271 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
272 	} else {
273 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
274 	}
275 
276 	scrubval = scrubval & 0x001F;
277 
278 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
279 		if (scrubrates[i].scrubval == scrubval) {
280 			retval = scrubrates[i].bandwidth;
281 			break;
282 		}
283 	}
284 	return retval;
285 }
286 
287 /*
288  * returns true if the SysAddr given by sys_addr matches the
289  * DRAM base/limit associated with node_id
290  */
291 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
292 {
293 	u64 addr;
294 
295 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
296 	 * all ones if the most significant implemented address bit is 1.
297 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
298 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
299 	 * Application Programming.
300 	 */
301 	addr = sys_addr & 0x000000ffffffffffull;
302 
303 	return ((addr >= get_dram_base(pvt, nid)) &&
304 		(addr <= get_dram_limit(pvt, nid)));
305 }
306 
307 /*
308  * Attempt to map a SysAddr to a node. On success, return a pointer to the
309  * mem_ctl_info structure for the node that the SysAddr maps to.
310  *
311  * On failure, return NULL.
312  */
313 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
314 						u64 sys_addr)
315 {
316 	struct amd64_pvt *pvt;
317 	u8 node_id;
318 	u32 intlv_en, bits;
319 
320 	/*
321 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
322 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
323 	 */
324 	pvt = mci->pvt_info;
325 
326 	/*
327 	 * The value of this field should be the same for all DRAM Base
328 	 * registers.  Therefore we arbitrarily choose to read it from the
329 	 * register for node 0.
330 	 */
331 	intlv_en = dram_intlv_en(pvt, 0);
332 
333 	if (intlv_en == 0) {
334 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
335 			if (base_limit_match(pvt, sys_addr, node_id))
336 				goto found;
337 		}
338 		goto err_no_match;
339 	}
340 
341 	if (unlikely((intlv_en != 0x01) &&
342 		     (intlv_en != 0x03) &&
343 		     (intlv_en != 0x07))) {
344 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
345 		return NULL;
346 	}
347 
348 	bits = (((u32) sys_addr) >> 12) & intlv_en;
349 
350 	for (node_id = 0; ; ) {
351 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
352 			break;	/* intlv_sel field matches */
353 
354 		if (++node_id >= DRAM_RANGES)
355 			goto err_no_match;
356 	}
357 
358 	/* sanity test for sys_addr */
359 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
360 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
361 			   "range for node %d with node interleaving enabled.\n",
362 			   __func__, sys_addr, node_id);
363 		return NULL;
364 	}
365 
366 found:
367 	return edac_mc_find((int)node_id);
368 
369 err_no_match:
370 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
371 		 (unsigned long)sys_addr);
372 
373 	return NULL;
374 }
375 
376 /*
377  * compute the CS base address of the @csrow on the DRAM controller @dct.
378  * For details see F2x[5C:40] in the processor's BKDG
379  */
380 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
381 				 u64 *base, u64 *mask)
382 {
383 	u64 csbase, csmask, base_bits, mask_bits;
384 	u8 addr_shift;
385 
386 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
387 		csbase		= pvt->csels[dct].csbases[csrow];
388 		csmask		= pvt->csels[dct].csmasks[csrow];
389 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
390 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
391 		addr_shift	= 4;
392 
393 	/*
394 	 * F16h and F15h, models 30h and later need two addr_shift values:
395 	 * 8 for high and 6 for low (cf. F16h BKDG).
396 	 */
397 	} else if (pvt->fam == 0x16 ||
398 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
399 		csbase          = pvt->csels[dct].csbases[csrow];
400 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
401 
402 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
403 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
404 
405 		*mask = ~0ULL;
406 		/* poke holes for the csmask */
407 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
408 			   (GENMASK_ULL(30, 19) << 8));
409 
410 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
411 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
412 
413 		return;
414 	} else {
415 		csbase		= pvt->csels[dct].csbases[csrow];
416 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
417 		addr_shift	= 8;
418 
419 		if (pvt->fam == 0x15)
420 			base_bits = mask_bits =
421 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
422 		else
423 			base_bits = mask_bits =
424 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
425 	}
426 
427 	*base  = (csbase & base_bits) << addr_shift;
428 
429 	*mask  = ~0ULL;
430 	/* poke holes for the csmask */
431 	*mask &= ~(mask_bits << addr_shift);
432 	/* OR them in */
433 	*mask |= (csmask & mask_bits) << addr_shift;
434 }
435 
436 #define for_each_chip_select(i, dct, pvt) \
437 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
438 
439 #define chip_select_base(i, dct, pvt) \
440 	pvt->csels[dct].csbases[i]
441 
442 #define for_each_chip_select_mask(i, dct, pvt) \
443 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
444 
445 #define for_each_umc(i) \
446 	for (i = 0; i < fam_type->max_mcs; i++)
447 
448 /*
449  * @input_addr is an InputAddr associated with the node given by mci. Return the
450  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
451  */
452 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
453 {
454 	struct amd64_pvt *pvt;
455 	int csrow;
456 	u64 base, mask;
457 
458 	pvt = mci->pvt_info;
459 
460 	for_each_chip_select(csrow, 0, pvt) {
461 		if (!csrow_enabled(csrow, 0, pvt))
462 			continue;
463 
464 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
465 
466 		mask = ~mask;
467 
468 		if ((input_addr & mask) == (base & mask)) {
469 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
470 				 (unsigned long)input_addr, csrow,
471 				 pvt->mc_node_id);
472 
473 			return csrow;
474 		}
475 	}
476 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
477 		 (unsigned long)input_addr, pvt->mc_node_id);
478 
479 	return -1;
480 }
481 
482 /*
483  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
484  * for the node represented by mci. Info is passed back in *hole_base,
485  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
486  * info is invalid. Info may be invalid for either of the following reasons:
487  *
488  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
489  *   Address Register does not exist.
490  *
491  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
492  *   indicating that its contents are not valid.
493  *
494  * The values passed back in *hole_base, *hole_offset, and *hole_size are
495  * complete 32-bit values despite the fact that the bitfields in the DHAR
496  * only represent bits 31-24 of the base and offset values.
497  */
498 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
499 			     u64 *hole_offset, u64 *hole_size)
500 {
501 	struct amd64_pvt *pvt = mci->pvt_info;
502 
503 	/* only revE and later have the DRAM Hole Address Register */
504 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
505 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
506 			 pvt->ext_model, pvt->mc_node_id);
507 		return 1;
508 	}
509 
510 	/* valid for Fam10h and above */
511 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
512 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
513 		return 1;
514 	}
515 
516 	if (!dhar_valid(pvt)) {
517 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
518 			 pvt->mc_node_id);
519 		return 1;
520 	}
521 
522 	/* This node has Memory Hoisting */
523 
524 	/* +------------------+--------------------+--------------------+-----
525 	 * | memory           | DRAM hole          | relocated          |
526 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
527 	 * |                  |                    | DRAM hole          |
528 	 * |                  |                    | [0x100000000,      |
529 	 * |                  |                    |  (0x100000000+     |
530 	 * |                  |                    |   (0xffffffff-x))] |
531 	 * +------------------+--------------------+--------------------+-----
532 	 *
533 	 * Above is a diagram of physical memory showing the DRAM hole and the
534 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
535 	 * starts at address x (the base address) and extends through address
536 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
537 	 * addresses in the hole so that they start at 0x100000000.
538 	 */
539 
540 	*hole_base = dhar_base(pvt);
541 	*hole_size = (1ULL << 32) - *hole_base;
542 
543 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
544 					: k8_dhar_offset(pvt);
545 
546 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
547 		 pvt->mc_node_id, (unsigned long)*hole_base,
548 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
549 
550 	return 0;
551 }
552 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
553 
554 /*
555  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
556  * assumed that sys_addr maps to the node given by mci.
557  *
558  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
559  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
560  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
561  * then it is also involved in translating a SysAddr to a DramAddr. Sections
562  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
563  * These parts of the documentation are unclear. I interpret them as follows:
564  *
565  * When node n receives a SysAddr, it processes the SysAddr as follows:
566  *
567  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
568  *    Limit registers for node n. If the SysAddr is not within the range
569  *    specified by the base and limit values, then node n ignores the Sysaddr
570  *    (since it does not map to node n). Otherwise continue to step 2 below.
571  *
572  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
573  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
574  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
575  *    hole. If not, skip to step 3 below. Else get the value of the
576  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
577  *    offset defined by this value from the SysAddr.
578  *
579  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
580  *    Base register for node n. To obtain the DramAddr, subtract the base
581  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
582  */
583 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
584 {
585 	struct amd64_pvt *pvt = mci->pvt_info;
586 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
587 	int ret;
588 
589 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
590 
591 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
592 				      &hole_size);
593 	if (!ret) {
594 		if ((sys_addr >= (1ULL << 32)) &&
595 		    (sys_addr < ((1ULL << 32) + hole_size))) {
596 			/* use DHAR to translate SysAddr to DramAddr */
597 			dram_addr = sys_addr - hole_offset;
598 
599 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
600 				 (unsigned long)sys_addr,
601 				 (unsigned long)dram_addr);
602 
603 			return dram_addr;
604 		}
605 	}
606 
607 	/*
608 	 * Translate the SysAddr to a DramAddr as shown near the start of
609 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
610 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
611 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
612 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
613 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
614 	 * Programmer's Manual Volume 1 Application Programming.
615 	 */
616 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
617 
618 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
619 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
620 	return dram_addr;
621 }
622 
623 /*
624  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
625  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
626  * for node interleaving.
627  */
628 static int num_node_interleave_bits(unsigned intlv_en)
629 {
630 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
631 	int n;
632 
633 	BUG_ON(intlv_en > 7);
634 	n = intlv_shift_table[intlv_en];
635 	return n;
636 }
637 
638 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
639 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
640 {
641 	struct amd64_pvt *pvt;
642 	int intlv_shift;
643 	u64 input_addr;
644 
645 	pvt = mci->pvt_info;
646 
647 	/*
648 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
649 	 * concerning translating a DramAddr to an InputAddr.
650 	 */
651 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
652 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
653 		      (dram_addr & 0xfff);
654 
655 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
656 		 intlv_shift, (unsigned long)dram_addr,
657 		 (unsigned long)input_addr);
658 
659 	return input_addr;
660 }
661 
662 /*
663  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
664  * assumed that @sys_addr maps to the node given by mci.
665  */
666 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
667 {
668 	u64 input_addr;
669 
670 	input_addr =
671 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
672 
673 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
674 		 (unsigned long)sys_addr, (unsigned long)input_addr);
675 
676 	return input_addr;
677 }
678 
679 /* Map the Error address to a PAGE and PAGE OFFSET. */
680 static inline void error_address_to_page_and_offset(u64 error_address,
681 						    struct err_info *err)
682 {
683 	err->page = (u32) (error_address >> PAGE_SHIFT);
684 	err->offset = ((u32) error_address) & ~PAGE_MASK;
685 }
686 
687 /*
688  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
689  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
690  * of a node that detected an ECC memory error.  mci represents the node that
691  * the error address maps to (possibly different from the node that detected
692  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
693  * error.
694  */
695 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
696 {
697 	int csrow;
698 
699 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
700 
701 	if (csrow == -1)
702 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
703 				  "address 0x%lx\n", (unsigned long)sys_addr);
704 	return csrow;
705 }
706 
707 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
708 
709 /*
710  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
711  * are ECC capable.
712  */
713 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
714 {
715 	unsigned long edac_cap = EDAC_FLAG_NONE;
716 	u8 bit;
717 
718 	if (pvt->umc) {
719 		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
720 
721 		for_each_umc(i) {
722 			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
723 				continue;
724 
725 			umc_en_mask |= BIT(i);
726 
727 			/* UMC Configuration bit 12 (DimmEccEn) */
728 			if (pvt->umc[i].umc_cfg & BIT(12))
729 				dimm_ecc_en_mask |= BIT(i);
730 		}
731 
732 		if (umc_en_mask == dimm_ecc_en_mask)
733 			edac_cap = EDAC_FLAG_SECDED;
734 	} else {
735 		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
736 			? 19
737 			: 17;
738 
739 		if (pvt->dclr0 & BIT(bit))
740 			edac_cap = EDAC_FLAG_SECDED;
741 	}
742 
743 	return edac_cap;
744 }
745 
746 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
747 
748 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
749 {
750 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
751 
752 	if (pvt->dram_type == MEM_LRDDR3) {
753 		u32 dcsm = pvt->csels[chan].csmasks[0];
754 		/*
755 		 * It's assumed all LRDIMMs in a DCT are going to be of
756 		 * same 'type' until proven otherwise. So, use a cs
757 		 * value of '0' here to get dcsm value.
758 		 */
759 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
760 	}
761 
762 	edac_dbg(1, "All DIMMs support ECC:%s\n",
763 		    (dclr & BIT(19)) ? "yes" : "no");
764 
765 
766 	edac_dbg(1, "  PAR/ERR parity: %s\n",
767 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
768 
769 	if (pvt->fam == 0x10)
770 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
771 			 (dclr & BIT(11)) ?  "128b" : "64b");
772 
773 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
774 		 (dclr & BIT(12)) ?  "yes" : "no",
775 		 (dclr & BIT(13)) ?  "yes" : "no",
776 		 (dclr & BIT(14)) ?  "yes" : "no",
777 		 (dclr & BIT(15)) ?  "yes" : "no");
778 }
779 
780 #define CS_EVEN_PRIMARY		BIT(0)
781 #define CS_ODD_PRIMARY		BIT(1)
782 #define CS_EVEN_SECONDARY	BIT(2)
783 #define CS_ODD_SECONDARY	BIT(3)
784 
785 #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
786 #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
787 
788 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
789 {
790 	int cs_mode = 0;
791 
792 	if (csrow_enabled(2 * dimm, ctrl, pvt))
793 		cs_mode |= CS_EVEN_PRIMARY;
794 
795 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
796 		cs_mode |= CS_ODD_PRIMARY;
797 
798 	/* Asymmetric dual-rank DIMM support. */
799 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
800 		cs_mode |= CS_ODD_SECONDARY;
801 
802 	return cs_mode;
803 }
804 
805 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
806 {
807 	int dimm, size0, size1, cs0, cs1, cs_mode;
808 
809 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
810 
811 	for (dimm = 0; dimm < 2; dimm++) {
812 		cs0 = dimm * 2;
813 		cs1 = dimm * 2 + 1;
814 
815 		cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
816 
817 		size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
818 		size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
819 
820 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
821 				cs0,	size0,
822 				cs1,	size1);
823 	}
824 }
825 
826 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
827 {
828 	struct amd64_umc *umc;
829 	u32 i, tmp, umc_base;
830 
831 	for_each_umc(i) {
832 		umc_base = get_umc_base(i);
833 		umc = &pvt->umc[i];
834 
835 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
836 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
837 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
838 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
839 
840 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
841 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
842 
843 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
844 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
845 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
846 
847 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
848 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
849 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
850 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
851 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
852 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
853 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
854 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
855 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
856 
857 		if (pvt->dram_type == MEM_LRDDR4) {
858 			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
859 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
860 					i, 1 << ((tmp >> 4) & 0x3));
861 		}
862 
863 		debug_display_dimm_sizes_df(pvt, i);
864 	}
865 
866 	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
867 		 pvt->dhar, dhar_base(pvt));
868 }
869 
870 /* Display and decode various NB registers for debug purposes. */
871 static void __dump_misc_regs(struct amd64_pvt *pvt)
872 {
873 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
874 
875 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
876 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
877 
878 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
879 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
880 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
881 
882 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
883 
884 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
885 
886 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
887 		 pvt->dhar, dhar_base(pvt),
888 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
889 				   : f10_dhar_offset(pvt));
890 
891 	debug_display_dimm_sizes(pvt, 0);
892 
893 	/* everything below this point is Fam10h and above */
894 	if (pvt->fam == 0xf)
895 		return;
896 
897 	debug_display_dimm_sizes(pvt, 1);
898 
899 	/* Only if NOT ganged does dclr1 have valid info */
900 	if (!dct_ganging_enabled(pvt))
901 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
902 }
903 
904 /* Display and decode various NB registers for debug purposes. */
905 static void dump_misc_regs(struct amd64_pvt *pvt)
906 {
907 	if (pvt->umc)
908 		__dump_misc_regs_df(pvt);
909 	else
910 		__dump_misc_regs(pvt);
911 
912 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
913 
914 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
915 }
916 
917 /*
918  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
919  */
920 static void prep_chip_selects(struct amd64_pvt *pvt)
921 {
922 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
923 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
924 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
925 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
926 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
927 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
928 	} else if (pvt->fam >= 0x17) {
929 		int umc;
930 
931 		for_each_umc(umc) {
932 			pvt->csels[umc].b_cnt = 4;
933 			pvt->csels[umc].m_cnt = 2;
934 		}
935 
936 	} else {
937 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
938 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
939 	}
940 }
941 
942 static void read_umc_base_mask(struct amd64_pvt *pvt)
943 {
944 	u32 umc_base_reg, umc_base_reg_sec;
945 	u32 umc_mask_reg, umc_mask_reg_sec;
946 	u32 base_reg, base_reg_sec;
947 	u32 mask_reg, mask_reg_sec;
948 	u32 *base, *base_sec;
949 	u32 *mask, *mask_sec;
950 	int cs, umc;
951 
952 	for_each_umc(umc) {
953 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
954 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
955 
956 		for_each_chip_select(cs, umc, pvt) {
957 			base = &pvt->csels[umc].csbases[cs];
958 			base_sec = &pvt->csels[umc].csbases_sec[cs];
959 
960 			base_reg = umc_base_reg + (cs * 4);
961 			base_reg_sec = umc_base_reg_sec + (cs * 4);
962 
963 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
964 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
965 					 umc, cs, *base, base_reg);
966 
967 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
968 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
969 					 umc, cs, *base_sec, base_reg_sec);
970 		}
971 
972 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
973 		umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
974 
975 		for_each_chip_select_mask(cs, umc, pvt) {
976 			mask = &pvt->csels[umc].csmasks[cs];
977 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
978 
979 			mask_reg = umc_mask_reg + (cs * 4);
980 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
981 
982 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
983 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
984 					 umc, cs, *mask, mask_reg);
985 
986 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
987 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
988 					 umc, cs, *mask_sec, mask_reg_sec);
989 		}
990 	}
991 }
992 
993 /*
994  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
995  */
996 static void read_dct_base_mask(struct amd64_pvt *pvt)
997 {
998 	int cs;
999 
1000 	prep_chip_selects(pvt);
1001 
1002 	if (pvt->umc)
1003 		return read_umc_base_mask(pvt);
1004 
1005 	for_each_chip_select(cs, 0, pvt) {
1006 		int reg0   = DCSB0 + (cs * 4);
1007 		int reg1   = DCSB1 + (cs * 4);
1008 		u32 *base0 = &pvt->csels[0].csbases[cs];
1009 		u32 *base1 = &pvt->csels[1].csbases[cs];
1010 
1011 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1012 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
1013 				 cs, *base0, reg0);
1014 
1015 		if (pvt->fam == 0xf)
1016 			continue;
1017 
1018 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1019 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
1020 				 cs, *base1, (pvt->fam == 0x10) ? reg1
1021 							: reg0);
1022 	}
1023 
1024 	for_each_chip_select_mask(cs, 0, pvt) {
1025 		int reg0   = DCSM0 + (cs * 4);
1026 		int reg1   = DCSM1 + (cs * 4);
1027 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1028 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1029 
1030 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1031 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1032 				 cs, *mask0, reg0);
1033 
1034 		if (pvt->fam == 0xf)
1035 			continue;
1036 
1037 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1038 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1039 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
1040 							: reg0);
1041 	}
1042 }
1043 
1044 static void determine_memory_type(struct amd64_pvt *pvt)
1045 {
1046 	u32 dram_ctrl, dcsm;
1047 
1048 	if (pvt->umc) {
1049 		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1050 			pvt->dram_type = MEM_LRDDR4;
1051 		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1052 			pvt->dram_type = MEM_RDDR4;
1053 		else
1054 			pvt->dram_type = MEM_DDR4;
1055 		return;
1056 	}
1057 
1058 	switch (pvt->fam) {
1059 	case 0xf:
1060 		if (pvt->ext_model >= K8_REV_F)
1061 			goto ddr3;
1062 
1063 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1064 		return;
1065 
1066 	case 0x10:
1067 		if (pvt->dchr0 & DDR3_MODE)
1068 			goto ddr3;
1069 
1070 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1071 		return;
1072 
1073 	case 0x15:
1074 		if (pvt->model < 0x60)
1075 			goto ddr3;
1076 
1077 		/*
1078 		 * Model 0x60h needs special handling:
1079 		 *
1080 		 * We use a Chip Select value of '0' to obtain dcsm.
1081 		 * Theoretically, it is possible to populate LRDIMMs of different
1082 		 * 'Rank' value on a DCT. But this is not the common case. So,
1083 		 * it's reasonable to assume all DIMMs are going to be of same
1084 		 * 'type' until proven otherwise.
1085 		 */
1086 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1087 		dcsm = pvt->csels[0].csmasks[0];
1088 
1089 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1090 			pvt->dram_type = MEM_DDR4;
1091 		else if (pvt->dclr0 & BIT(16))
1092 			pvt->dram_type = MEM_DDR3;
1093 		else if (dcsm & 0x3)
1094 			pvt->dram_type = MEM_LRDDR3;
1095 		else
1096 			pvt->dram_type = MEM_RDDR3;
1097 
1098 		return;
1099 
1100 	case 0x16:
1101 		goto ddr3;
1102 
1103 	default:
1104 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1105 		pvt->dram_type = MEM_EMPTY;
1106 	}
1107 	return;
1108 
1109 ddr3:
1110 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1111 }
1112 
1113 /* Get the number of DCT channels the memory controller is using. */
1114 static int k8_early_channel_count(struct amd64_pvt *pvt)
1115 {
1116 	int flag;
1117 
1118 	if (pvt->ext_model >= K8_REV_F)
1119 		/* RevF (NPT) and later */
1120 		flag = pvt->dclr0 & WIDTH_128;
1121 	else
1122 		/* RevE and earlier */
1123 		flag = pvt->dclr0 & REVE_WIDTH_128;
1124 
1125 	/* not used */
1126 	pvt->dclr1 = 0;
1127 
1128 	return (flag) ? 2 : 1;
1129 }
1130 
1131 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1132 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1133 {
1134 	u16 mce_nid = amd_get_nb_id(m->extcpu);
1135 	struct mem_ctl_info *mci;
1136 	u8 start_bit = 1;
1137 	u8 end_bit   = 47;
1138 	u64 addr;
1139 
1140 	mci = edac_mc_find(mce_nid);
1141 	if (!mci)
1142 		return 0;
1143 
1144 	pvt = mci->pvt_info;
1145 
1146 	if (pvt->fam == 0xf) {
1147 		start_bit = 3;
1148 		end_bit   = 39;
1149 	}
1150 
1151 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1152 
1153 	/*
1154 	 * Erratum 637 workaround
1155 	 */
1156 	if (pvt->fam == 0x15) {
1157 		u64 cc6_base, tmp_addr;
1158 		u32 tmp;
1159 		u8 intlv_en;
1160 
1161 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1162 			return addr;
1163 
1164 
1165 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1166 		intlv_en = tmp >> 21 & 0x7;
1167 
1168 		/* add [47:27] + 3 trailing bits */
1169 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1170 
1171 		/* reverse and add DramIntlvEn */
1172 		cc6_base |= intlv_en ^ 0x7;
1173 
1174 		/* pin at [47:24] */
1175 		cc6_base <<= 24;
1176 
1177 		if (!intlv_en)
1178 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1179 
1180 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1181 
1182 							/* faster log2 */
1183 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1184 
1185 		/* OR DramIntlvSel into bits [14:12] */
1186 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1187 
1188 		/* add remaining [11:0] bits from original MC4_ADDR */
1189 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1190 
1191 		return cc6_base | tmp_addr;
1192 	}
1193 
1194 	return addr;
1195 }
1196 
1197 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1198 						unsigned int device,
1199 						struct pci_dev *related)
1200 {
1201 	struct pci_dev *dev = NULL;
1202 
1203 	while ((dev = pci_get_device(vendor, device, dev))) {
1204 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1205 		    (dev->bus->number == related->bus->number) &&
1206 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1207 			break;
1208 	}
1209 
1210 	return dev;
1211 }
1212 
1213 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1214 {
1215 	struct amd_northbridge *nb;
1216 	struct pci_dev *f1 = NULL;
1217 	unsigned int pci_func;
1218 	int off = range << 3;
1219 	u32 llim;
1220 
1221 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1222 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1223 
1224 	if (pvt->fam == 0xf)
1225 		return;
1226 
1227 	if (!dram_rw(pvt, range))
1228 		return;
1229 
1230 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1231 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1232 
1233 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1234 	if (pvt->fam != 0x15)
1235 		return;
1236 
1237 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1238 	if (WARN_ON(!nb))
1239 		return;
1240 
1241 	if (pvt->model == 0x60)
1242 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1243 	else if (pvt->model == 0x30)
1244 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1245 	else
1246 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1247 
1248 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1249 	if (WARN_ON(!f1))
1250 		return;
1251 
1252 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1253 
1254 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1255 
1256 				    /* {[39:27],111b} */
1257 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1258 
1259 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1260 
1261 				    /* [47:40] */
1262 	pvt->ranges[range].lim.hi |= llim >> 13;
1263 
1264 	pci_dev_put(f1);
1265 }
1266 
1267 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1268 				    struct err_info *err)
1269 {
1270 	struct amd64_pvt *pvt = mci->pvt_info;
1271 
1272 	error_address_to_page_and_offset(sys_addr, err);
1273 
1274 	/*
1275 	 * Find out which node the error address belongs to. This may be
1276 	 * different from the node that detected the error.
1277 	 */
1278 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1279 	if (!err->src_mci) {
1280 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1281 			     (unsigned long)sys_addr);
1282 		err->err_code = ERR_NODE;
1283 		return;
1284 	}
1285 
1286 	/* Now map the sys_addr to a CSROW */
1287 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1288 	if (err->csrow < 0) {
1289 		err->err_code = ERR_CSROW;
1290 		return;
1291 	}
1292 
1293 	/* CHIPKILL enabled */
1294 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1295 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1296 		if (err->channel < 0) {
1297 			/*
1298 			 * Syndrome didn't map, so we don't know which of the
1299 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1300 			 * as suspect.
1301 			 */
1302 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1303 				      "possible error reporting race\n",
1304 				      err->syndrome);
1305 			err->err_code = ERR_CHANNEL;
1306 			return;
1307 		}
1308 	} else {
1309 		/*
1310 		 * non-chipkill ecc mode
1311 		 *
1312 		 * The k8 documentation is unclear about how to determine the
1313 		 * channel number when using non-chipkill memory.  This method
1314 		 * was obtained from email communication with someone at AMD.
1315 		 * (Wish the email was placed in this comment - norsk)
1316 		 */
1317 		err->channel = ((sys_addr & BIT(3)) != 0);
1318 	}
1319 }
1320 
1321 static int ddr2_cs_size(unsigned i, bool dct_width)
1322 {
1323 	unsigned shift = 0;
1324 
1325 	if (i <= 2)
1326 		shift = i;
1327 	else if (!(i & 0x1))
1328 		shift = i >> 1;
1329 	else
1330 		shift = (i + 1) >> 1;
1331 
1332 	return 128 << (shift + !!dct_width);
1333 }
1334 
1335 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1336 				  unsigned cs_mode, int cs_mask_nr)
1337 {
1338 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1339 
1340 	if (pvt->ext_model >= K8_REV_F) {
1341 		WARN_ON(cs_mode > 11);
1342 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1343 	}
1344 	else if (pvt->ext_model >= K8_REV_D) {
1345 		unsigned diff;
1346 		WARN_ON(cs_mode > 10);
1347 
1348 		/*
1349 		 * the below calculation, besides trying to win an obfuscated C
1350 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1351 		 * mappings are:
1352 		 *
1353 		 * cs_mode	CS size (mb)
1354 		 * =======	============
1355 		 * 0		32
1356 		 * 1		64
1357 		 * 2		128
1358 		 * 3		128
1359 		 * 4		256
1360 		 * 5		512
1361 		 * 6		256
1362 		 * 7		512
1363 		 * 8		1024
1364 		 * 9		1024
1365 		 * 10		2048
1366 		 *
1367 		 * Basically, it calculates a value with which to shift the
1368 		 * smallest CS size of 32MB.
1369 		 *
1370 		 * ddr[23]_cs_size have a similar purpose.
1371 		 */
1372 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1373 
1374 		return 32 << (cs_mode - diff);
1375 	}
1376 	else {
1377 		WARN_ON(cs_mode > 6);
1378 		return 32 << cs_mode;
1379 	}
1380 }
1381 
1382 /*
1383  * Get the number of DCT channels in use.
1384  *
1385  * Return:
1386  *	number of Memory Channels in operation
1387  * Pass back:
1388  *	contents of the DCL0_LOW register
1389  */
1390 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1391 {
1392 	int i, j, channels = 0;
1393 
1394 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1395 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1396 		return 2;
1397 
1398 	/*
1399 	 * Need to check if in unganged mode: In such, there are 2 channels,
1400 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1401 	 * bit will be OFF.
1402 	 *
1403 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1404 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1405 	 */
1406 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1407 
1408 	/*
1409 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1410 	 * is more than just one DIMM present in unganged mode. Need to check
1411 	 * both controllers since DIMMs can be placed in either one.
1412 	 */
1413 	for (i = 0; i < 2; i++) {
1414 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1415 
1416 		for (j = 0; j < 4; j++) {
1417 			if (DBAM_DIMM(j, dbam) > 0) {
1418 				channels++;
1419 				break;
1420 			}
1421 		}
1422 	}
1423 
1424 	if (channels > 2)
1425 		channels = 2;
1426 
1427 	amd64_info("MCT channel count: %d\n", channels);
1428 
1429 	return channels;
1430 }
1431 
1432 static int f17_early_channel_count(struct amd64_pvt *pvt)
1433 {
1434 	int i, channels = 0;
1435 
1436 	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1437 	for_each_umc(i)
1438 		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1439 
1440 	amd64_info("MCT channel count: %d\n", channels);
1441 
1442 	return channels;
1443 }
1444 
1445 static int ddr3_cs_size(unsigned i, bool dct_width)
1446 {
1447 	unsigned shift = 0;
1448 	int cs_size = 0;
1449 
1450 	if (i == 0 || i == 3 || i == 4)
1451 		cs_size = -1;
1452 	else if (i <= 2)
1453 		shift = i;
1454 	else if (i == 12)
1455 		shift = 7;
1456 	else if (!(i & 0x1))
1457 		shift = i >> 1;
1458 	else
1459 		shift = (i + 1) >> 1;
1460 
1461 	if (cs_size != -1)
1462 		cs_size = (128 * (1 << !!dct_width)) << shift;
1463 
1464 	return cs_size;
1465 }
1466 
1467 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1468 {
1469 	unsigned shift = 0;
1470 	int cs_size = 0;
1471 
1472 	if (i < 4 || i == 6)
1473 		cs_size = -1;
1474 	else if (i == 12)
1475 		shift = 7;
1476 	else if (!(i & 0x1))
1477 		shift = i >> 1;
1478 	else
1479 		shift = (i + 1) >> 1;
1480 
1481 	if (cs_size != -1)
1482 		cs_size = rank_multiply * (128 << shift);
1483 
1484 	return cs_size;
1485 }
1486 
1487 static int ddr4_cs_size(unsigned i)
1488 {
1489 	int cs_size = 0;
1490 
1491 	if (i == 0)
1492 		cs_size = -1;
1493 	else if (i == 1)
1494 		cs_size = 1024;
1495 	else
1496 		/* Min cs_size = 1G */
1497 		cs_size = 1024 * (1 << (i >> 1));
1498 
1499 	return cs_size;
1500 }
1501 
1502 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1503 				   unsigned cs_mode, int cs_mask_nr)
1504 {
1505 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1506 
1507 	WARN_ON(cs_mode > 11);
1508 
1509 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1510 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1511 	else
1512 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1513 }
1514 
1515 /*
1516  * F15h supports only 64bit DCT interfaces
1517  */
1518 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1519 				   unsigned cs_mode, int cs_mask_nr)
1520 {
1521 	WARN_ON(cs_mode > 12);
1522 
1523 	return ddr3_cs_size(cs_mode, false);
1524 }
1525 
1526 /* F15h M60h supports DDR4 mapping as well.. */
1527 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1528 					unsigned cs_mode, int cs_mask_nr)
1529 {
1530 	int cs_size;
1531 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1532 
1533 	WARN_ON(cs_mode > 12);
1534 
1535 	if (pvt->dram_type == MEM_DDR4) {
1536 		if (cs_mode > 9)
1537 			return -1;
1538 
1539 		cs_size = ddr4_cs_size(cs_mode);
1540 	} else if (pvt->dram_type == MEM_LRDDR3) {
1541 		unsigned rank_multiply = dcsm & 0xf;
1542 
1543 		if (rank_multiply == 3)
1544 			rank_multiply = 4;
1545 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1546 	} else {
1547 		/* Minimum cs size is 512mb for F15hM60h*/
1548 		if (cs_mode == 0x1)
1549 			return -1;
1550 
1551 		cs_size = ddr3_cs_size(cs_mode, false);
1552 	}
1553 
1554 	return cs_size;
1555 }
1556 
1557 /*
1558  * F16h and F15h model 30h have only limited cs_modes.
1559  */
1560 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1561 				unsigned cs_mode, int cs_mask_nr)
1562 {
1563 	WARN_ON(cs_mode > 12);
1564 
1565 	if (cs_mode == 6 || cs_mode == 8 ||
1566 	    cs_mode == 9 || cs_mode == 12)
1567 		return -1;
1568 	else
1569 		return ddr3_cs_size(cs_mode, false);
1570 }
1571 
1572 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1573 				    unsigned int cs_mode, int csrow_nr)
1574 {
1575 	u32 addr_mask_orig, addr_mask_deinterleaved;
1576 	u32 msb, weight, num_zero_bits;
1577 	int dimm, size = 0;
1578 
1579 	/* No Chip Selects are enabled. */
1580 	if (!cs_mode)
1581 		return size;
1582 
1583 	/* Requested size of an even CS but none are enabled. */
1584 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1585 		return size;
1586 
1587 	/* Requested size of an odd CS but none are enabled. */
1588 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1589 		return size;
1590 
1591 	/*
1592 	 * There is one mask per DIMM, and two Chip Selects per DIMM.
1593 	 *	CS0 and CS1 -> DIMM0
1594 	 *	CS2 and CS3 -> DIMM1
1595 	 */
1596 	dimm = csrow_nr >> 1;
1597 
1598 	/* Asymmetric dual-rank DIMM support. */
1599 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1600 		addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1601 	else
1602 		addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1603 
1604 	/*
1605 	 * The number of zero bits in the mask is equal to the number of bits
1606 	 * in a full mask minus the number of bits in the current mask.
1607 	 *
1608 	 * The MSB is the number of bits in the full mask because BIT[0] is
1609 	 * always 0.
1610 	 */
1611 	msb = fls(addr_mask_orig) - 1;
1612 	weight = hweight_long(addr_mask_orig);
1613 	num_zero_bits = msb - weight;
1614 
1615 	/* Take the number of zero bits off from the top of the mask. */
1616 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1617 
1618 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1619 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
1620 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1621 
1622 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
1623 	size = (addr_mask_deinterleaved >> 2) + 1;
1624 
1625 	/* Return size in MBs. */
1626 	return size >> 10;
1627 }
1628 
1629 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1630 {
1631 
1632 	if (pvt->fam == 0xf)
1633 		return;
1634 
1635 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1636 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1637 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1638 
1639 		edac_dbg(0, "  DCTs operate in %s mode\n",
1640 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1641 
1642 		if (!dct_ganging_enabled(pvt))
1643 			edac_dbg(0, "  Address range split per DCT: %s\n",
1644 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1645 
1646 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1647 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1648 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1649 
1650 		edac_dbg(0, "  channel interleave: %s, "
1651 			 "interleave bits selector: 0x%x\n",
1652 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1653 			 dct_sel_interleave_addr(pvt));
1654 	}
1655 
1656 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1657 }
1658 
1659 /*
1660  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1661  * 2.10.12 Memory Interleaving Modes).
1662  */
1663 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1664 				     u8 intlv_en, int num_dcts_intlv,
1665 				     u32 dct_sel)
1666 {
1667 	u8 channel = 0;
1668 	u8 select;
1669 
1670 	if (!(intlv_en))
1671 		return (u8)(dct_sel);
1672 
1673 	if (num_dcts_intlv == 2) {
1674 		select = (sys_addr >> 8) & 0x3;
1675 		channel = select ? 0x3 : 0;
1676 	} else if (num_dcts_intlv == 4) {
1677 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1678 		switch (intlv_addr) {
1679 		case 0x4:
1680 			channel = (sys_addr >> 8) & 0x3;
1681 			break;
1682 		case 0x5:
1683 			channel = (sys_addr >> 9) & 0x3;
1684 			break;
1685 		}
1686 	}
1687 	return channel;
1688 }
1689 
1690 /*
1691  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1692  * Interleaving Modes.
1693  */
1694 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1695 				bool hi_range_sel, u8 intlv_en)
1696 {
1697 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1698 
1699 	if (dct_ganging_enabled(pvt))
1700 		return 0;
1701 
1702 	if (hi_range_sel)
1703 		return dct_sel_high;
1704 
1705 	/*
1706 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1707 	 */
1708 	if (dct_interleave_enabled(pvt)) {
1709 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1710 
1711 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1712 		if (!intlv_addr)
1713 			return sys_addr >> 6 & 1;
1714 
1715 		if (intlv_addr & 0x2) {
1716 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1717 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1718 
1719 			return ((sys_addr >> shift) & 1) ^ temp;
1720 		}
1721 
1722 		if (intlv_addr & 0x4) {
1723 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
1724 
1725 			return (sys_addr >> shift) & 1;
1726 		}
1727 
1728 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1729 	}
1730 
1731 	if (dct_high_range_enabled(pvt))
1732 		return ~dct_sel_high & 1;
1733 
1734 	return 0;
1735 }
1736 
1737 /* Convert the sys_addr to the normalized DCT address */
1738 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1739 				 u64 sys_addr, bool hi_rng,
1740 				 u32 dct_sel_base_addr)
1741 {
1742 	u64 chan_off;
1743 	u64 dram_base		= get_dram_base(pvt, range);
1744 	u64 hole_off		= f10_dhar_offset(pvt);
1745 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1746 
1747 	if (hi_rng) {
1748 		/*
1749 		 * if
1750 		 * base address of high range is below 4Gb
1751 		 * (bits [47:27] at [31:11])
1752 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1753 		 * sys_addr > 4Gb
1754 		 *
1755 		 *	remove hole offset from sys_addr
1756 		 * else
1757 		 *	remove high range offset from sys_addr
1758 		 */
1759 		if ((!(dct_sel_base_addr >> 16) ||
1760 		     dct_sel_base_addr < dhar_base(pvt)) &&
1761 		    dhar_valid(pvt) &&
1762 		    (sys_addr >= BIT_64(32)))
1763 			chan_off = hole_off;
1764 		else
1765 			chan_off = dct_sel_base_off;
1766 	} else {
1767 		/*
1768 		 * if
1769 		 * we have a valid hole		&&
1770 		 * sys_addr > 4Gb
1771 		 *
1772 		 *	remove hole
1773 		 * else
1774 		 *	remove dram base to normalize to DCT address
1775 		 */
1776 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1777 			chan_off = hole_off;
1778 		else
1779 			chan_off = dram_base;
1780 	}
1781 
1782 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1783 }
1784 
1785 /*
1786  * checks if the csrow passed in is marked as SPARED, if so returns the new
1787  * spare row
1788  */
1789 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1790 {
1791 	int tmp_cs;
1792 
1793 	if (online_spare_swap_done(pvt, dct) &&
1794 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1795 
1796 		for_each_chip_select(tmp_cs, dct, pvt) {
1797 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1798 				csrow = tmp_cs;
1799 				break;
1800 			}
1801 		}
1802 	}
1803 	return csrow;
1804 }
1805 
1806 /*
1807  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1808  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1809  *
1810  * Return:
1811  *	-EINVAL:  NOT FOUND
1812  *	0..csrow = Chip-Select Row
1813  */
1814 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1815 {
1816 	struct mem_ctl_info *mci;
1817 	struct amd64_pvt *pvt;
1818 	u64 cs_base, cs_mask;
1819 	int cs_found = -EINVAL;
1820 	int csrow;
1821 
1822 	mci = edac_mc_find(nid);
1823 	if (!mci)
1824 		return cs_found;
1825 
1826 	pvt = mci->pvt_info;
1827 
1828 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1829 
1830 	for_each_chip_select(csrow, dct, pvt) {
1831 		if (!csrow_enabled(csrow, dct, pvt))
1832 			continue;
1833 
1834 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1835 
1836 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1837 			 csrow, cs_base, cs_mask);
1838 
1839 		cs_mask = ~cs_mask;
1840 
1841 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1842 			 (in_addr & cs_mask), (cs_base & cs_mask));
1843 
1844 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1845 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1846 				cs_found =  csrow;
1847 				break;
1848 			}
1849 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1850 
1851 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1852 			break;
1853 		}
1854 	}
1855 	return cs_found;
1856 }
1857 
1858 /*
1859  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1860  * swapped with a region located at the bottom of memory so that the GPU can use
1861  * the interleaved region and thus two channels.
1862  */
1863 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1864 {
1865 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1866 
1867 	if (pvt->fam == 0x10) {
1868 		/* only revC3 and revE have that feature */
1869 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1870 			return sys_addr;
1871 	}
1872 
1873 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1874 
1875 	if (!(swap_reg & 0x1))
1876 		return sys_addr;
1877 
1878 	swap_base	= (swap_reg >> 3) & 0x7f;
1879 	swap_limit	= (swap_reg >> 11) & 0x7f;
1880 	rgn_size	= (swap_reg >> 20) & 0x7f;
1881 	tmp_addr	= sys_addr >> 27;
1882 
1883 	if (!(sys_addr >> 34) &&
1884 	    (((tmp_addr >= swap_base) &&
1885 	     (tmp_addr <= swap_limit)) ||
1886 	     (tmp_addr < rgn_size)))
1887 		return sys_addr ^ (u64)swap_base << 27;
1888 
1889 	return sys_addr;
1890 }
1891 
1892 /* For a given @dram_range, check if @sys_addr falls within it. */
1893 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1894 				  u64 sys_addr, int *chan_sel)
1895 {
1896 	int cs_found = -EINVAL;
1897 	u64 chan_addr;
1898 	u32 dct_sel_base;
1899 	u8 channel;
1900 	bool high_range = false;
1901 
1902 	u8 node_id    = dram_dst_node(pvt, range);
1903 	u8 intlv_en   = dram_intlv_en(pvt, range);
1904 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1905 
1906 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1907 		 range, sys_addr, get_dram_limit(pvt, range));
1908 
1909 	if (dhar_valid(pvt) &&
1910 	    dhar_base(pvt) <= sys_addr &&
1911 	    sys_addr < BIT_64(32)) {
1912 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1913 			    sys_addr);
1914 		return -EINVAL;
1915 	}
1916 
1917 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1918 		return -EINVAL;
1919 
1920 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1921 
1922 	dct_sel_base = dct_sel_baseaddr(pvt);
1923 
1924 	/*
1925 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1926 	 * select between DCT0 and DCT1.
1927 	 */
1928 	if (dct_high_range_enabled(pvt) &&
1929 	   !dct_ganging_enabled(pvt) &&
1930 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1931 		high_range = true;
1932 
1933 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1934 
1935 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1936 					  high_range, dct_sel_base);
1937 
1938 	/* Remove node interleaving, see F1x120 */
1939 	if (intlv_en)
1940 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1941 			    (chan_addr & 0xfff);
1942 
1943 	/* remove channel interleave */
1944 	if (dct_interleave_enabled(pvt) &&
1945 	   !dct_high_range_enabled(pvt) &&
1946 	   !dct_ganging_enabled(pvt)) {
1947 
1948 		if (dct_sel_interleave_addr(pvt) != 1) {
1949 			if (dct_sel_interleave_addr(pvt) == 0x3)
1950 				/* hash 9 */
1951 				chan_addr = ((chan_addr >> 10) << 9) |
1952 					     (chan_addr & 0x1ff);
1953 			else
1954 				/* A[6] or hash 6 */
1955 				chan_addr = ((chan_addr >> 7) << 6) |
1956 					     (chan_addr & 0x3f);
1957 		} else
1958 			/* A[12] */
1959 			chan_addr = ((chan_addr >> 13) << 12) |
1960 				     (chan_addr & 0xfff);
1961 	}
1962 
1963 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1964 
1965 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1966 
1967 	if (cs_found >= 0)
1968 		*chan_sel = channel;
1969 
1970 	return cs_found;
1971 }
1972 
1973 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1974 					u64 sys_addr, int *chan_sel)
1975 {
1976 	int cs_found = -EINVAL;
1977 	int num_dcts_intlv = 0;
1978 	u64 chan_addr, chan_offset;
1979 	u64 dct_base, dct_limit;
1980 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1981 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1982 
1983 	u64 dhar_offset		= f10_dhar_offset(pvt);
1984 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1985 	u8 node_id		= dram_dst_node(pvt, range);
1986 	u8 intlv_en		= dram_intlv_en(pvt, range);
1987 
1988 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1989 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1990 
1991 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1992 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
1993 
1994 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1995 		 range, sys_addr, get_dram_limit(pvt, range));
1996 
1997 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
1998 	    !(get_dram_limit(pvt, range) >= sys_addr))
1999 		return -EINVAL;
2000 
2001 	if (dhar_valid(pvt) &&
2002 	    dhar_base(pvt) <= sys_addr &&
2003 	    sys_addr < BIT_64(32)) {
2004 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2005 			    sys_addr);
2006 		return -EINVAL;
2007 	}
2008 
2009 	/* Verify sys_addr is within DCT Range. */
2010 	dct_base = (u64) dct_sel_baseaddr(pvt);
2011 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2012 
2013 	if (!(dct_cont_base_reg & BIT(0)) &&
2014 	    !(dct_base <= (sys_addr >> 27) &&
2015 	      dct_limit >= (sys_addr >> 27)))
2016 		return -EINVAL;
2017 
2018 	/* Verify number of dct's that participate in channel interleaving. */
2019 	num_dcts_intlv = (int) hweight8(intlv_en);
2020 
2021 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2022 		return -EINVAL;
2023 
2024 	if (pvt->model >= 0x60)
2025 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2026 	else
2027 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2028 						     num_dcts_intlv, dct_sel);
2029 
2030 	/* Verify we stay within the MAX number of channels allowed */
2031 	if (channel > 3)
2032 		return -EINVAL;
2033 
2034 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2035 
2036 	/* Get normalized DCT addr */
2037 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2038 		chan_offset = dhar_offset;
2039 	else
2040 		chan_offset = dct_base << 27;
2041 
2042 	chan_addr = sys_addr - chan_offset;
2043 
2044 	/* remove channel interleave */
2045 	if (num_dcts_intlv == 2) {
2046 		if (intlv_addr == 0x4)
2047 			chan_addr = ((chan_addr >> 9) << 8) |
2048 						(chan_addr & 0xff);
2049 		else if (intlv_addr == 0x5)
2050 			chan_addr = ((chan_addr >> 10) << 9) |
2051 						(chan_addr & 0x1ff);
2052 		else
2053 			return -EINVAL;
2054 
2055 	} else if (num_dcts_intlv == 4) {
2056 		if (intlv_addr == 0x4)
2057 			chan_addr = ((chan_addr >> 10) << 8) |
2058 							(chan_addr & 0xff);
2059 		else if (intlv_addr == 0x5)
2060 			chan_addr = ((chan_addr >> 11) << 9) |
2061 							(chan_addr & 0x1ff);
2062 		else
2063 			return -EINVAL;
2064 	}
2065 
2066 	if (dct_offset_en) {
2067 		amd64_read_pci_cfg(pvt->F1,
2068 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2069 				   &tmp);
2070 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2071 	}
2072 
2073 	f15h_select_dct(pvt, channel);
2074 
2075 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2076 
2077 	/*
2078 	 * Find Chip select:
2079 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2080 	 * there is support for 4 DCT's, but only 2 are currently functional.
2081 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2082 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2083 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2084 	 */
2085 	alias_channel =  (channel == 3) ? 1 : channel;
2086 
2087 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2088 
2089 	if (cs_found >= 0)
2090 		*chan_sel = alias_channel;
2091 
2092 	return cs_found;
2093 }
2094 
2095 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2096 					u64 sys_addr,
2097 					int *chan_sel)
2098 {
2099 	int cs_found = -EINVAL;
2100 	unsigned range;
2101 
2102 	for (range = 0; range < DRAM_RANGES; range++) {
2103 		if (!dram_rw(pvt, range))
2104 			continue;
2105 
2106 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2107 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2108 							       sys_addr,
2109 							       chan_sel);
2110 
2111 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2112 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2113 			cs_found = f1x_match_to_this_node(pvt, range,
2114 							  sys_addr, chan_sel);
2115 			if (cs_found >= 0)
2116 				break;
2117 		}
2118 	}
2119 	return cs_found;
2120 }
2121 
2122 /*
2123  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2124  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2125  *
2126  * The @sys_addr is usually an error address received from the hardware
2127  * (MCX_ADDR).
2128  */
2129 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2130 				     struct err_info *err)
2131 {
2132 	struct amd64_pvt *pvt = mci->pvt_info;
2133 
2134 	error_address_to_page_and_offset(sys_addr, err);
2135 
2136 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2137 	if (err->csrow < 0) {
2138 		err->err_code = ERR_CSROW;
2139 		return;
2140 	}
2141 
2142 	/*
2143 	 * We need the syndromes for channel detection only when we're
2144 	 * ganged. Otherwise @chan should already contain the channel at
2145 	 * this point.
2146 	 */
2147 	if (dct_ganging_enabled(pvt))
2148 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2149 }
2150 
2151 /*
2152  * debug routine to display the memory sizes of all logical DIMMs and its
2153  * CSROWs
2154  */
2155 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2156 {
2157 	int dimm, size0, size1;
2158 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2159 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
2160 
2161 	if (pvt->fam == 0xf) {
2162 		/* K8 families < revF not supported yet */
2163 	       if (pvt->ext_model < K8_REV_F)
2164 			return;
2165 	       else
2166 		       WARN_ON(ctrl != 0);
2167 	}
2168 
2169 	if (pvt->fam == 0x10) {
2170 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2171 							   : pvt->dbam0;
2172 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2173 				 pvt->csels[1].csbases :
2174 				 pvt->csels[0].csbases;
2175 	} else if (ctrl) {
2176 		dbam = pvt->dbam0;
2177 		dcsb = pvt->csels[1].csbases;
2178 	}
2179 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2180 		 ctrl, dbam);
2181 
2182 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2183 
2184 	/* Dump memory sizes for DIMM and its CSROWs */
2185 	for (dimm = 0; dimm < 4; dimm++) {
2186 
2187 		size0 = 0;
2188 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2189 			/*
2190 			 * For F15m60h, we need multiplier for LRDIMM cs_size
2191 			 * calculation. We pass dimm value to the dbam_to_cs
2192 			 * mapper so we can find the multiplier from the
2193 			 * corresponding DCSM.
2194 			 */
2195 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2196 						     DBAM_DIMM(dimm, dbam),
2197 						     dimm);
2198 
2199 		size1 = 0;
2200 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2201 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2202 						     DBAM_DIMM(dimm, dbam),
2203 						     dimm);
2204 
2205 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2206 				dimm * 2,     size0,
2207 				dimm * 2 + 1, size1);
2208 	}
2209 }
2210 
2211 static struct amd64_family_type family_types[] = {
2212 	[K8_CPUS] = {
2213 		.ctl_name = "K8",
2214 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2215 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2216 		.max_mcs = 2,
2217 		.ops = {
2218 			.early_channel_count	= k8_early_channel_count,
2219 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
2220 			.dbam_to_cs		= k8_dbam_to_chip_select,
2221 		}
2222 	},
2223 	[F10_CPUS] = {
2224 		.ctl_name = "F10h",
2225 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2226 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2227 		.max_mcs = 2,
2228 		.ops = {
2229 			.early_channel_count	= f1x_early_channel_count,
2230 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2231 			.dbam_to_cs		= f10_dbam_to_chip_select,
2232 		}
2233 	},
2234 	[F15_CPUS] = {
2235 		.ctl_name = "F15h",
2236 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2237 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2238 		.max_mcs = 2,
2239 		.ops = {
2240 			.early_channel_count	= f1x_early_channel_count,
2241 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2242 			.dbam_to_cs		= f15_dbam_to_chip_select,
2243 		}
2244 	},
2245 	[F15_M30H_CPUS] = {
2246 		.ctl_name = "F15h_M30h",
2247 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2248 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2249 		.max_mcs = 2,
2250 		.ops = {
2251 			.early_channel_count	= f1x_early_channel_count,
2252 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2253 			.dbam_to_cs		= f16_dbam_to_chip_select,
2254 		}
2255 	},
2256 	[F15_M60H_CPUS] = {
2257 		.ctl_name = "F15h_M60h",
2258 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2259 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2260 		.max_mcs = 2,
2261 		.ops = {
2262 			.early_channel_count	= f1x_early_channel_count,
2263 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2264 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
2265 		}
2266 	},
2267 	[F16_CPUS] = {
2268 		.ctl_name = "F16h",
2269 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2270 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2271 		.max_mcs = 2,
2272 		.ops = {
2273 			.early_channel_count	= f1x_early_channel_count,
2274 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2275 			.dbam_to_cs		= f16_dbam_to_chip_select,
2276 		}
2277 	},
2278 	[F16_M30H_CPUS] = {
2279 		.ctl_name = "F16h_M30h",
2280 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2281 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2282 		.max_mcs = 2,
2283 		.ops = {
2284 			.early_channel_count	= f1x_early_channel_count,
2285 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2286 			.dbam_to_cs		= f16_dbam_to_chip_select,
2287 		}
2288 	},
2289 	[F17_CPUS] = {
2290 		.ctl_name = "F17h",
2291 		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2292 		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2293 		.max_mcs = 2,
2294 		.ops = {
2295 			.early_channel_count	= f17_early_channel_count,
2296 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2297 		}
2298 	},
2299 	[F17_M10H_CPUS] = {
2300 		.ctl_name = "F17h_M10h",
2301 		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2302 		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2303 		.max_mcs = 2,
2304 		.ops = {
2305 			.early_channel_count	= f17_early_channel_count,
2306 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2307 		}
2308 	},
2309 	[F17_M30H_CPUS] = {
2310 		.ctl_name = "F17h_M30h",
2311 		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2312 		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2313 		.max_mcs = 8,
2314 		.ops = {
2315 			.early_channel_count	= f17_early_channel_count,
2316 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2317 		}
2318 	},
2319 	[F17_M60H_CPUS] = {
2320 		.ctl_name = "F17h_M60h",
2321 		.f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2322 		.f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2323 		.max_mcs = 2,
2324 		.ops = {
2325 			.early_channel_count	= f17_early_channel_count,
2326 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2327 		}
2328 	},
2329 	[F17_M70H_CPUS] = {
2330 		.ctl_name = "F17h_M70h",
2331 		.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2332 		.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2333 		.max_mcs = 2,
2334 		.ops = {
2335 			.early_channel_count	= f17_early_channel_count,
2336 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2337 		}
2338 	},
2339 	[F19_CPUS] = {
2340 		.ctl_name = "F19h",
2341 		.f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2342 		.f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2343 		.max_mcs = 8,
2344 		.ops = {
2345 			.early_channel_count	= f17_early_channel_count,
2346 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2347 		}
2348 	},
2349 };
2350 
2351 /*
2352  * These are tables of eigenvectors (one per line) which can be used for the
2353  * construction of the syndrome tables. The modified syndrome search algorithm
2354  * uses those to find the symbol in error and thus the DIMM.
2355  *
2356  * Algorithm courtesy of Ross LaFetra from AMD.
2357  */
2358 static const u16 x4_vectors[] = {
2359 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2360 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2361 	0x0001, 0x0002, 0x0004, 0x0008,
2362 	0x1013, 0x3032, 0x4044, 0x8088,
2363 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2364 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2365 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2366 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2367 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2368 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2369 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2370 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2371 	0x2b87, 0x164e, 0x642c, 0xdc18,
2372 	0x40b9, 0x80de, 0x1094, 0x20e8,
2373 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2374 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2375 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2376 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2377 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2378 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2379 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2380 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2381 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2382 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2383 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2384 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2385 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2386 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2387 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2388 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2389 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2390 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2391 	0x4807, 0xc40e, 0x130c, 0x3208,
2392 	0x1905, 0x2e0a, 0x5804, 0xac08,
2393 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2394 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2395 };
2396 
2397 static const u16 x8_vectors[] = {
2398 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2399 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2400 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2401 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2402 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2403 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2404 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2405 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2406 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2407 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2408 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2409 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2410 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2411 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2412 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2413 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2414 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2415 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2416 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2417 };
2418 
2419 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2420 			   unsigned v_dim)
2421 {
2422 	unsigned int i, err_sym;
2423 
2424 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2425 		u16 s = syndrome;
2426 		unsigned v_idx =  err_sym * v_dim;
2427 		unsigned v_end = (err_sym + 1) * v_dim;
2428 
2429 		/* walk over all 16 bits of the syndrome */
2430 		for (i = 1; i < (1U << 16); i <<= 1) {
2431 
2432 			/* if bit is set in that eigenvector... */
2433 			if (v_idx < v_end && vectors[v_idx] & i) {
2434 				u16 ev_comp = vectors[v_idx++];
2435 
2436 				/* ... and bit set in the modified syndrome, */
2437 				if (s & i) {
2438 					/* remove it. */
2439 					s ^= ev_comp;
2440 
2441 					if (!s)
2442 						return err_sym;
2443 				}
2444 
2445 			} else if (s & i)
2446 				/* can't get to zero, move to next symbol */
2447 				break;
2448 		}
2449 	}
2450 
2451 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2452 	return -1;
2453 }
2454 
2455 static int map_err_sym_to_channel(int err_sym, int sym_size)
2456 {
2457 	if (sym_size == 4)
2458 		switch (err_sym) {
2459 		case 0x20:
2460 		case 0x21:
2461 			return 0;
2462 			break;
2463 		case 0x22:
2464 		case 0x23:
2465 			return 1;
2466 			break;
2467 		default:
2468 			return err_sym >> 4;
2469 			break;
2470 		}
2471 	/* x8 symbols */
2472 	else
2473 		switch (err_sym) {
2474 		/* imaginary bits not in a DIMM */
2475 		case 0x10:
2476 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2477 					  err_sym);
2478 			return -1;
2479 			break;
2480 
2481 		case 0x11:
2482 			return 0;
2483 			break;
2484 		case 0x12:
2485 			return 1;
2486 			break;
2487 		default:
2488 			return err_sym >> 3;
2489 			break;
2490 		}
2491 	return -1;
2492 }
2493 
2494 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2495 {
2496 	struct amd64_pvt *pvt = mci->pvt_info;
2497 	int err_sym = -1;
2498 
2499 	if (pvt->ecc_sym_sz == 8)
2500 		err_sym = decode_syndrome(syndrome, x8_vectors,
2501 					  ARRAY_SIZE(x8_vectors),
2502 					  pvt->ecc_sym_sz);
2503 	else if (pvt->ecc_sym_sz == 4)
2504 		err_sym = decode_syndrome(syndrome, x4_vectors,
2505 					  ARRAY_SIZE(x4_vectors),
2506 					  pvt->ecc_sym_sz);
2507 	else {
2508 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2509 		return err_sym;
2510 	}
2511 
2512 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2513 }
2514 
2515 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2516 			    u8 ecc_type)
2517 {
2518 	enum hw_event_mc_err_type err_type;
2519 	const char *string;
2520 
2521 	if (ecc_type == 2)
2522 		err_type = HW_EVENT_ERR_CORRECTED;
2523 	else if (ecc_type == 1)
2524 		err_type = HW_EVENT_ERR_UNCORRECTED;
2525 	else if (ecc_type == 3)
2526 		err_type = HW_EVENT_ERR_DEFERRED;
2527 	else {
2528 		WARN(1, "Something is rotten in the state of Denmark.\n");
2529 		return;
2530 	}
2531 
2532 	switch (err->err_code) {
2533 	case DECODE_OK:
2534 		string = "";
2535 		break;
2536 	case ERR_NODE:
2537 		string = "Failed to map error addr to a node";
2538 		break;
2539 	case ERR_CSROW:
2540 		string = "Failed to map error addr to a csrow";
2541 		break;
2542 	case ERR_CHANNEL:
2543 		string = "Unknown syndrome - possible error reporting race";
2544 		break;
2545 	case ERR_SYND:
2546 		string = "MCA_SYND not valid - unknown syndrome and csrow";
2547 		break;
2548 	case ERR_NORM_ADDR:
2549 		string = "Cannot decode normalized address";
2550 		break;
2551 	default:
2552 		string = "WTF error";
2553 		break;
2554 	}
2555 
2556 	edac_mc_handle_error(err_type, mci, 1,
2557 			     err->page, err->offset, err->syndrome,
2558 			     err->csrow, err->channel, -1,
2559 			     string, "");
2560 }
2561 
2562 static inline void decode_bus_error(int node_id, struct mce *m)
2563 {
2564 	struct mem_ctl_info *mci;
2565 	struct amd64_pvt *pvt;
2566 	u8 ecc_type = (m->status >> 45) & 0x3;
2567 	u8 xec = XEC(m->status, 0x1f);
2568 	u16 ec = EC(m->status);
2569 	u64 sys_addr;
2570 	struct err_info err;
2571 
2572 	mci = edac_mc_find(node_id);
2573 	if (!mci)
2574 		return;
2575 
2576 	pvt = mci->pvt_info;
2577 
2578 	/* Bail out early if this was an 'observed' error */
2579 	if (PP(ec) == NBSL_PP_OBS)
2580 		return;
2581 
2582 	/* Do only ECC errors */
2583 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2584 		return;
2585 
2586 	memset(&err, 0, sizeof(err));
2587 
2588 	sys_addr = get_error_address(pvt, m);
2589 
2590 	if (ecc_type == 2)
2591 		err.syndrome = extract_syndrome(m->status);
2592 
2593 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2594 
2595 	__log_ecc_error(mci, &err, ecc_type);
2596 }
2597 
2598 /*
2599  * To find the UMC channel represented by this bank we need to match on its
2600  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2601  * IPID.
2602  *
2603  * Currently, we can derive the channel number by looking at the 6th nibble in
2604  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2605  * number.
2606  */
2607 static int find_umc_channel(struct mce *m)
2608 {
2609 	return (m->ipid & GENMASK(31, 0)) >> 20;
2610 }
2611 
2612 static void decode_umc_error(int node_id, struct mce *m)
2613 {
2614 	u8 ecc_type = (m->status >> 45) & 0x3;
2615 	struct mem_ctl_info *mci;
2616 	struct amd64_pvt *pvt;
2617 	struct err_info err;
2618 	u64 sys_addr;
2619 
2620 	mci = edac_mc_find(node_id);
2621 	if (!mci)
2622 		return;
2623 
2624 	pvt = mci->pvt_info;
2625 
2626 	memset(&err, 0, sizeof(err));
2627 
2628 	if (m->status & MCI_STATUS_DEFERRED)
2629 		ecc_type = 3;
2630 
2631 	err.channel = find_umc_channel(m);
2632 
2633 	if (!(m->status & MCI_STATUS_SYNDV)) {
2634 		err.err_code = ERR_SYND;
2635 		goto log_error;
2636 	}
2637 
2638 	if (ecc_type == 2) {
2639 		u8 length = (m->synd >> 18) & 0x3f;
2640 
2641 		if (length)
2642 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2643 		else
2644 			err.err_code = ERR_CHANNEL;
2645 	}
2646 
2647 	err.csrow = m->synd & 0x7;
2648 
2649 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2650 		err.err_code = ERR_NORM_ADDR;
2651 		goto log_error;
2652 	}
2653 
2654 	error_address_to_page_and_offset(sys_addr, &err);
2655 
2656 log_error:
2657 	__log_ecc_error(mci, &err, ecc_type);
2658 }
2659 
2660 /*
2661  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2662  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2663  * Reserve F0 and F6 on systems with a UMC.
2664  */
2665 static int
2666 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2667 {
2668 	if (pvt->umc) {
2669 		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2670 		if (!pvt->F0) {
2671 			amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2672 			return -ENODEV;
2673 		}
2674 
2675 		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2676 		if (!pvt->F6) {
2677 			pci_dev_put(pvt->F0);
2678 			pvt->F0 = NULL;
2679 
2680 			amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2681 			return -ENODEV;
2682 		}
2683 
2684 		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2685 		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2686 		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2687 
2688 		return 0;
2689 	}
2690 
2691 	/* Reserve the ADDRESS MAP Device */
2692 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2693 	if (!pvt->F1) {
2694 		amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2695 		return -ENODEV;
2696 	}
2697 
2698 	/* Reserve the DCT Device */
2699 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2700 	if (!pvt->F2) {
2701 		pci_dev_put(pvt->F1);
2702 		pvt->F1 = NULL;
2703 
2704 		amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2705 		return -ENODEV;
2706 	}
2707 
2708 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2709 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2710 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2711 
2712 	return 0;
2713 }
2714 
2715 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2716 {
2717 	if (pvt->umc) {
2718 		pci_dev_put(pvt->F0);
2719 		pci_dev_put(pvt->F6);
2720 	} else {
2721 		pci_dev_put(pvt->F1);
2722 		pci_dev_put(pvt->F2);
2723 	}
2724 }
2725 
2726 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2727 {
2728 	pvt->ecc_sym_sz = 4;
2729 
2730 	if (pvt->umc) {
2731 		u8 i;
2732 
2733 		for_each_umc(i) {
2734 			/* Check enabled channels only: */
2735 			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2736 				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2737 					pvt->ecc_sym_sz = 16;
2738 					return;
2739 				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2740 					pvt->ecc_sym_sz = 8;
2741 					return;
2742 				}
2743 			}
2744 		}
2745 	} else if (pvt->fam >= 0x10) {
2746 		u32 tmp;
2747 
2748 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2749 		/* F16h has only DCT0, so no need to read dbam1. */
2750 		if (pvt->fam != 0x16)
2751 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2752 
2753 		/* F10h, revD and later can do x8 ECC too. */
2754 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2755 			pvt->ecc_sym_sz = 8;
2756 	}
2757 }
2758 
2759 /*
2760  * Retrieve the hardware registers of the memory controller.
2761  */
2762 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2763 {
2764 	u8 nid = pvt->mc_node_id;
2765 	struct amd64_umc *umc;
2766 	u32 i, umc_base;
2767 
2768 	/* Read registers from each UMC */
2769 	for_each_umc(i) {
2770 
2771 		umc_base = get_umc_base(i);
2772 		umc = &pvt->umc[i];
2773 
2774 		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2775 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2776 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2777 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2778 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2779 	}
2780 }
2781 
2782 /*
2783  * Retrieve the hardware registers of the memory controller (this includes the
2784  * 'Address Map' and 'Misc' device regs)
2785  */
2786 static void read_mc_regs(struct amd64_pvt *pvt)
2787 {
2788 	unsigned int range;
2789 	u64 msr_val;
2790 
2791 	/*
2792 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2793 	 * those are Read-As-Zero.
2794 	 */
2795 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2796 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2797 
2798 	/* Check first whether TOP_MEM2 is enabled: */
2799 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2800 	if (msr_val & BIT(21)) {
2801 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2802 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2803 	} else {
2804 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2805 	}
2806 
2807 	if (pvt->umc) {
2808 		__read_mc_regs_df(pvt);
2809 		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2810 
2811 		goto skip;
2812 	}
2813 
2814 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2815 
2816 	read_dram_ctl_register(pvt);
2817 
2818 	for (range = 0; range < DRAM_RANGES; range++) {
2819 		u8 rw;
2820 
2821 		/* read settings for this DRAM range */
2822 		read_dram_base_limit_regs(pvt, range);
2823 
2824 		rw = dram_rw(pvt, range);
2825 		if (!rw)
2826 			continue;
2827 
2828 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2829 			 range,
2830 			 get_dram_base(pvt, range),
2831 			 get_dram_limit(pvt, range));
2832 
2833 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2834 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2835 			 (rw & 0x1) ? "R" : "-",
2836 			 (rw & 0x2) ? "W" : "-",
2837 			 dram_intlv_sel(pvt, range),
2838 			 dram_dst_node(pvt, range));
2839 	}
2840 
2841 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2842 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2843 
2844 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2845 
2846 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2847 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2848 
2849 	if (!dct_ganging_enabled(pvt)) {
2850 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2851 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2852 	}
2853 
2854 skip:
2855 	read_dct_base_mask(pvt);
2856 
2857 	determine_memory_type(pvt);
2858 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2859 
2860 	determine_ecc_sym_sz(pvt);
2861 }
2862 
2863 /*
2864  * NOTE: CPU Revision Dependent code
2865  *
2866  * Input:
2867  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2868  *	k8 private pointer to -->
2869  *			DRAM Bank Address mapping register
2870  *			node_id
2871  *			DCL register where dual_channel_active is
2872  *
2873  * The DBAM register consists of 4 sets of 4 bits each definitions:
2874  *
2875  * Bits:	CSROWs
2876  * 0-3		CSROWs 0 and 1
2877  * 4-7		CSROWs 2 and 3
2878  * 8-11		CSROWs 4 and 5
2879  * 12-15	CSROWs 6 and 7
2880  *
2881  * Values range from: 0 to 15
2882  * The meaning of the values depends on CPU revision and dual-channel state,
2883  * see relevant BKDG more info.
2884  *
2885  * The memory controller provides for total of only 8 CSROWs in its current
2886  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2887  * single channel or two (2) DIMMs in dual channel mode.
2888  *
2889  * The following code logic collapses the various tables for CSROW based on CPU
2890  * revision.
2891  *
2892  * Returns:
2893  *	The number of PAGE_SIZE pages on the specified CSROW number it
2894  *	encompasses
2895  *
2896  */
2897 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2898 {
2899 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2900 	int csrow_nr = csrow_nr_orig;
2901 	u32 cs_mode, nr_pages;
2902 
2903 	if (!pvt->umc) {
2904 		csrow_nr >>= 1;
2905 		cs_mode = DBAM_DIMM(csrow_nr, dbam);
2906 	} else {
2907 		cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2908 	}
2909 
2910 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2911 	nr_pages <<= 20 - PAGE_SHIFT;
2912 
2913 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2914 		    csrow_nr_orig, dct,  cs_mode);
2915 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2916 
2917 	return nr_pages;
2918 }
2919 
2920 static int init_csrows_df(struct mem_ctl_info *mci)
2921 {
2922 	struct amd64_pvt *pvt = mci->pvt_info;
2923 	enum edac_type edac_mode = EDAC_NONE;
2924 	enum dev_type dev_type = DEV_UNKNOWN;
2925 	struct dimm_info *dimm;
2926 	int empty = 1;
2927 	u8 umc, cs;
2928 
2929 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2930 		edac_mode = EDAC_S16ECD16ED;
2931 		dev_type = DEV_X16;
2932 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2933 		edac_mode = EDAC_S8ECD8ED;
2934 		dev_type = DEV_X8;
2935 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2936 		edac_mode = EDAC_S4ECD4ED;
2937 		dev_type = DEV_X4;
2938 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2939 		edac_mode = EDAC_SECDED;
2940 	}
2941 
2942 	for_each_umc(umc) {
2943 		for_each_chip_select(cs, umc, pvt) {
2944 			if (!csrow_enabled(cs, umc, pvt))
2945 				continue;
2946 
2947 			empty = 0;
2948 			dimm = mci->csrows[cs]->channels[umc]->dimm;
2949 
2950 			edac_dbg(1, "MC node: %d, csrow: %d\n",
2951 					pvt->mc_node_id, cs);
2952 
2953 			dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2954 			dimm->mtype = pvt->dram_type;
2955 			dimm->edac_mode = edac_mode;
2956 			dimm->dtype = dev_type;
2957 			dimm->grain = 64;
2958 		}
2959 	}
2960 
2961 	return empty;
2962 }
2963 
2964 /*
2965  * Initialize the array of csrow attribute instances, based on the values
2966  * from pci config hardware registers.
2967  */
2968 static int init_csrows(struct mem_ctl_info *mci)
2969 {
2970 	struct amd64_pvt *pvt = mci->pvt_info;
2971 	enum edac_type edac_mode = EDAC_NONE;
2972 	struct csrow_info *csrow;
2973 	struct dimm_info *dimm;
2974 	int i, j, empty = 1;
2975 	int nr_pages = 0;
2976 	u32 val;
2977 
2978 	if (pvt->umc)
2979 		return init_csrows_df(mci);
2980 
2981 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2982 
2983 	pvt->nbcfg = val;
2984 
2985 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2986 		 pvt->mc_node_id, val,
2987 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2988 
2989 	/*
2990 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2991 	 */
2992 	for_each_chip_select(i, 0, pvt) {
2993 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2994 		bool row_dct1 = false;
2995 
2996 		if (pvt->fam != 0xf)
2997 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2998 
2999 		if (!row_dct0 && !row_dct1)
3000 			continue;
3001 
3002 		csrow = mci->csrows[i];
3003 		empty = 0;
3004 
3005 		edac_dbg(1, "MC node: %d, csrow: %d\n",
3006 			    pvt->mc_node_id, i);
3007 
3008 		if (row_dct0) {
3009 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
3010 			csrow->channels[0]->dimm->nr_pages = nr_pages;
3011 		}
3012 
3013 		/* K8 has only one DCT */
3014 		if (pvt->fam != 0xf && row_dct1) {
3015 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3016 
3017 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3018 			nr_pages += row_dct1_pages;
3019 		}
3020 
3021 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3022 
3023 		/* Determine DIMM ECC mode: */
3024 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3025 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3026 					? EDAC_S4ECD4ED
3027 					: EDAC_SECDED;
3028 		}
3029 
3030 		for (j = 0; j < pvt->channel_count; j++) {
3031 			dimm = csrow->channels[j]->dimm;
3032 			dimm->mtype = pvt->dram_type;
3033 			dimm->edac_mode = edac_mode;
3034 			dimm->grain = 64;
3035 		}
3036 	}
3037 
3038 	return empty;
3039 }
3040 
3041 /* get all cores on this DCT */
3042 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3043 {
3044 	int cpu;
3045 
3046 	for_each_online_cpu(cpu)
3047 		if (amd_get_nb_id(cpu) == nid)
3048 			cpumask_set_cpu(cpu, mask);
3049 }
3050 
3051 /* check MCG_CTL on all the cpus on this node */
3052 static bool nb_mce_bank_enabled_on_node(u16 nid)
3053 {
3054 	cpumask_var_t mask;
3055 	int cpu, nbe;
3056 	bool ret = false;
3057 
3058 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3059 		amd64_warn("%s: Error allocating mask\n", __func__);
3060 		return false;
3061 	}
3062 
3063 	get_cpus_on_this_dct_cpumask(mask, nid);
3064 
3065 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3066 
3067 	for_each_cpu(cpu, mask) {
3068 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3069 		nbe = reg->l & MSR_MCGCTL_NBE;
3070 
3071 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3072 			 cpu, reg->q,
3073 			 (nbe ? "enabled" : "disabled"));
3074 
3075 		if (!nbe)
3076 			goto out;
3077 	}
3078 	ret = true;
3079 
3080 out:
3081 	free_cpumask_var(mask);
3082 	return ret;
3083 }
3084 
3085 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3086 {
3087 	cpumask_var_t cmask;
3088 	int cpu;
3089 
3090 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3091 		amd64_warn("%s: error allocating mask\n", __func__);
3092 		return -ENOMEM;
3093 	}
3094 
3095 	get_cpus_on_this_dct_cpumask(cmask, nid);
3096 
3097 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3098 
3099 	for_each_cpu(cpu, cmask) {
3100 
3101 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3102 
3103 		if (on) {
3104 			if (reg->l & MSR_MCGCTL_NBE)
3105 				s->flags.nb_mce_enable = 1;
3106 
3107 			reg->l |= MSR_MCGCTL_NBE;
3108 		} else {
3109 			/*
3110 			 * Turn off NB MCE reporting only when it was off before
3111 			 */
3112 			if (!s->flags.nb_mce_enable)
3113 				reg->l &= ~MSR_MCGCTL_NBE;
3114 		}
3115 	}
3116 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3117 
3118 	free_cpumask_var(cmask);
3119 
3120 	return 0;
3121 }
3122 
3123 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3124 				       struct pci_dev *F3)
3125 {
3126 	bool ret = true;
3127 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3128 
3129 	if (toggle_ecc_err_reporting(s, nid, ON)) {
3130 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3131 		return false;
3132 	}
3133 
3134 	amd64_read_pci_cfg(F3, NBCTL, &value);
3135 
3136 	s->old_nbctl   = value & mask;
3137 	s->nbctl_valid = true;
3138 
3139 	value |= mask;
3140 	amd64_write_pci_cfg(F3, NBCTL, value);
3141 
3142 	amd64_read_pci_cfg(F3, NBCFG, &value);
3143 
3144 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3145 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3146 
3147 	if (!(value & NBCFG_ECC_ENABLE)) {
3148 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3149 
3150 		s->flags.nb_ecc_prev = 0;
3151 
3152 		/* Attempt to turn on DRAM ECC Enable */
3153 		value |= NBCFG_ECC_ENABLE;
3154 		amd64_write_pci_cfg(F3, NBCFG, value);
3155 
3156 		amd64_read_pci_cfg(F3, NBCFG, &value);
3157 
3158 		if (!(value & NBCFG_ECC_ENABLE)) {
3159 			amd64_warn("Hardware rejected DRAM ECC enable,"
3160 				   "check memory DIMM configuration.\n");
3161 			ret = false;
3162 		} else {
3163 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3164 		}
3165 	} else {
3166 		s->flags.nb_ecc_prev = 1;
3167 	}
3168 
3169 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3170 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3171 
3172 	return ret;
3173 }
3174 
3175 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3176 					struct pci_dev *F3)
3177 {
3178 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3179 
3180 	if (!s->nbctl_valid)
3181 		return;
3182 
3183 	amd64_read_pci_cfg(F3, NBCTL, &value);
3184 	value &= ~mask;
3185 	value |= s->old_nbctl;
3186 
3187 	amd64_write_pci_cfg(F3, NBCTL, value);
3188 
3189 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3190 	if (!s->flags.nb_ecc_prev) {
3191 		amd64_read_pci_cfg(F3, NBCFG, &value);
3192 		value &= ~NBCFG_ECC_ENABLE;
3193 		amd64_write_pci_cfg(F3, NBCFG, value);
3194 	}
3195 
3196 	/* restore the NB Enable MCGCTL bit */
3197 	if (toggle_ecc_err_reporting(s, nid, OFF))
3198 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3199 }
3200 
3201 static bool ecc_enabled(struct amd64_pvt *pvt)
3202 {
3203 	u16 nid = pvt->mc_node_id;
3204 	bool nb_mce_en = false;
3205 	u8 ecc_en = 0, i;
3206 	u32 value;
3207 
3208 	if (boot_cpu_data.x86 >= 0x17) {
3209 		u8 umc_en_mask = 0, ecc_en_mask = 0;
3210 		struct amd64_umc *umc;
3211 
3212 		for_each_umc(i) {
3213 			umc = &pvt->umc[i];
3214 
3215 			/* Only check enabled UMCs. */
3216 			if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3217 				continue;
3218 
3219 			umc_en_mask |= BIT(i);
3220 
3221 			if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3222 				ecc_en_mask |= BIT(i);
3223 		}
3224 
3225 		/* Check whether at least one UMC is enabled: */
3226 		if (umc_en_mask)
3227 			ecc_en = umc_en_mask == ecc_en_mask;
3228 		else
3229 			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3230 
3231 		/* Assume UMC MCA banks are enabled. */
3232 		nb_mce_en = true;
3233 	} else {
3234 		amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3235 
3236 		ecc_en = !!(value & NBCFG_ECC_ENABLE);
3237 
3238 		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3239 		if (!nb_mce_en)
3240 			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3241 				     MSR_IA32_MCG_CTL, nid);
3242 	}
3243 
3244 	amd64_info("Node %d: DRAM ECC %s.\n",
3245 		   nid, (ecc_en ? "enabled" : "disabled"));
3246 
3247 	if (!ecc_en || !nb_mce_en)
3248 		return false;
3249 	else
3250 		return true;
3251 }
3252 
3253 static inline void
3254 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3255 {
3256 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3257 
3258 	for_each_umc(i) {
3259 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3260 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3261 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3262 
3263 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3264 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3265 		}
3266 	}
3267 
3268 	/* Set chipkill only if ECC is enabled: */
3269 	if (ecc_en) {
3270 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3271 
3272 		if (!cpk_en)
3273 			return;
3274 
3275 		if (dev_x4)
3276 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3277 		else if (dev_x16)
3278 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3279 		else
3280 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3281 	}
3282 }
3283 
3284 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3285 {
3286 	struct amd64_pvt *pvt = mci->pvt_info;
3287 
3288 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3289 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3290 
3291 	if (pvt->umc) {
3292 		f17h_determine_edac_ctl_cap(mci, pvt);
3293 	} else {
3294 		if (pvt->nbcap & NBCAP_SECDED)
3295 			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3296 
3297 		if (pvt->nbcap & NBCAP_CHIPKILL)
3298 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3299 	}
3300 
3301 	mci->edac_cap		= determine_edac_cap(pvt);
3302 	mci->mod_name		= EDAC_MOD_STR;
3303 	mci->ctl_name		= fam_type->ctl_name;
3304 	mci->dev_name		= pci_name(pvt->F3);
3305 	mci->ctl_page_to_phys	= NULL;
3306 
3307 	/* memory scrubber interface */
3308 	mci->set_sdram_scrub_rate = set_scrub_rate;
3309 	mci->get_sdram_scrub_rate = get_scrub_rate;
3310 }
3311 
3312 /*
3313  * returns a pointer to the family descriptor on success, NULL otherwise.
3314  */
3315 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3316 {
3317 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3318 	pvt->stepping	= boot_cpu_data.x86_stepping;
3319 	pvt->model	= boot_cpu_data.x86_model;
3320 	pvt->fam	= boot_cpu_data.x86;
3321 
3322 	switch (pvt->fam) {
3323 	case 0xf:
3324 		fam_type	= &family_types[K8_CPUS];
3325 		pvt->ops	= &family_types[K8_CPUS].ops;
3326 		break;
3327 
3328 	case 0x10:
3329 		fam_type	= &family_types[F10_CPUS];
3330 		pvt->ops	= &family_types[F10_CPUS].ops;
3331 		break;
3332 
3333 	case 0x15:
3334 		if (pvt->model == 0x30) {
3335 			fam_type = &family_types[F15_M30H_CPUS];
3336 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
3337 			break;
3338 		} else if (pvt->model == 0x60) {
3339 			fam_type = &family_types[F15_M60H_CPUS];
3340 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
3341 			break;
3342 		}
3343 
3344 		fam_type	= &family_types[F15_CPUS];
3345 		pvt->ops	= &family_types[F15_CPUS].ops;
3346 		break;
3347 
3348 	case 0x16:
3349 		if (pvt->model == 0x30) {
3350 			fam_type = &family_types[F16_M30H_CPUS];
3351 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
3352 			break;
3353 		}
3354 		fam_type	= &family_types[F16_CPUS];
3355 		pvt->ops	= &family_types[F16_CPUS].ops;
3356 		break;
3357 
3358 	case 0x17:
3359 		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3360 			fam_type = &family_types[F17_M10H_CPUS];
3361 			pvt->ops = &family_types[F17_M10H_CPUS].ops;
3362 			break;
3363 		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3364 			fam_type = &family_types[F17_M30H_CPUS];
3365 			pvt->ops = &family_types[F17_M30H_CPUS].ops;
3366 			break;
3367 		} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3368 			fam_type = &family_types[F17_M60H_CPUS];
3369 			pvt->ops = &family_types[F17_M60H_CPUS].ops;
3370 			break;
3371 		} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3372 			fam_type = &family_types[F17_M70H_CPUS];
3373 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
3374 			break;
3375 		}
3376 		/* fall through */
3377 	case 0x18:
3378 		fam_type	= &family_types[F17_CPUS];
3379 		pvt->ops	= &family_types[F17_CPUS].ops;
3380 
3381 		if (pvt->fam == 0x18)
3382 			family_types[F17_CPUS].ctl_name = "F18h";
3383 		break;
3384 
3385 	case 0x19:
3386 		fam_type	= &family_types[F19_CPUS];
3387 		pvt->ops	= &family_types[F19_CPUS].ops;
3388 		family_types[F19_CPUS].ctl_name = "F19h";
3389 		break;
3390 
3391 	default:
3392 		amd64_err("Unsupported family!\n");
3393 		return NULL;
3394 	}
3395 
3396 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3397 		     (pvt->fam == 0xf ?
3398 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
3399 							     : "revE or earlier ")
3400 				 : ""), pvt->mc_node_id);
3401 	return fam_type;
3402 }
3403 
3404 static const struct attribute_group *amd64_edac_attr_groups[] = {
3405 #ifdef CONFIG_EDAC_DEBUG
3406 	&amd64_edac_dbg_group,
3407 #endif
3408 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3409 	&amd64_edac_inj_group,
3410 #endif
3411 	NULL
3412 };
3413 
3414 static int hw_info_get(struct amd64_pvt *pvt)
3415 {
3416 	u16 pci_id1, pci_id2;
3417 	int ret;
3418 
3419 	if (pvt->fam >= 0x17) {
3420 		pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3421 		if (!pvt->umc)
3422 			return -ENOMEM;
3423 
3424 		pci_id1 = fam_type->f0_id;
3425 		pci_id2 = fam_type->f6_id;
3426 	} else {
3427 		pci_id1 = fam_type->f1_id;
3428 		pci_id2 = fam_type->f2_id;
3429 	}
3430 
3431 	ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3432 	if (ret)
3433 		return ret;
3434 
3435 	read_mc_regs(pvt);
3436 
3437 	return 0;
3438 }
3439 
3440 static void hw_info_put(struct amd64_pvt *pvt)
3441 {
3442 	if (pvt->F0 || pvt->F1)
3443 		free_mc_sibling_devs(pvt);
3444 
3445 	kfree(pvt->umc);
3446 }
3447 
3448 static int init_one_instance(struct amd64_pvt *pvt)
3449 {
3450 	struct mem_ctl_info *mci = NULL;
3451 	struct edac_mc_layer layers[2];
3452 	int ret = -EINVAL;
3453 
3454 	/*
3455 	 * We need to determine how many memory channels there are. Then use
3456 	 * that information for calculating the size of the dynamic instance
3457 	 * tables in the 'mci' structure.
3458 	 */
3459 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
3460 	if (pvt->channel_count < 0)
3461 		return ret;
3462 
3463 	ret = -ENOMEM;
3464 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3465 	layers[0].size = pvt->csels[0].b_cnt;
3466 	layers[0].is_virt_csrow = true;
3467 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3468 
3469 	/*
3470 	 * Always allocate two channels since we can have setups with DIMMs on
3471 	 * only one channel. Also, this simplifies handling later for the price
3472 	 * of a couple of KBs tops.
3473 	 */
3474 	layers[1].size = fam_type->max_mcs;
3475 	layers[1].is_virt_csrow = false;
3476 
3477 	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3478 	if (!mci)
3479 		return ret;
3480 
3481 	mci->pvt_info = pvt;
3482 	mci->pdev = &pvt->F3->dev;
3483 
3484 	setup_mci_misc_attrs(mci);
3485 
3486 	if (init_csrows(mci))
3487 		mci->edac_cap = EDAC_FLAG_NONE;
3488 
3489 	ret = -ENODEV;
3490 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3491 		edac_dbg(1, "failed edac_mc_add_mc()\n");
3492 		edac_mc_free(mci);
3493 		return ret;
3494 	}
3495 
3496 	return 0;
3497 }
3498 
3499 static bool instance_has_memory(struct amd64_pvt *pvt)
3500 {
3501 	bool cs_enabled = false;
3502 	int cs = 0, dct = 0;
3503 
3504 	for (dct = 0; dct < fam_type->max_mcs; dct++) {
3505 		for_each_chip_select(cs, dct, pvt)
3506 			cs_enabled |= csrow_enabled(cs, dct, pvt);
3507 	}
3508 
3509 	return cs_enabled;
3510 }
3511 
3512 static int probe_one_instance(unsigned int nid)
3513 {
3514 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3515 	struct amd64_pvt *pvt = NULL;
3516 	struct ecc_settings *s;
3517 	int ret;
3518 
3519 	ret = -ENOMEM;
3520 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3521 	if (!s)
3522 		goto err_out;
3523 
3524 	ecc_stngs[nid] = s;
3525 
3526 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3527 	if (!pvt)
3528 		goto err_settings;
3529 
3530 	pvt->mc_node_id	= nid;
3531 	pvt->F3 = F3;
3532 
3533 	fam_type = per_family_init(pvt);
3534 	if (!fam_type)
3535 		goto err_enable;
3536 
3537 	ret = hw_info_get(pvt);
3538 	if (ret < 0)
3539 		goto err_enable;
3540 
3541 	ret = 0;
3542 	if (!instance_has_memory(pvt)) {
3543 		amd64_info("Node %d: No DIMMs detected.\n", nid);
3544 		goto err_enable;
3545 	}
3546 
3547 	if (!ecc_enabled(pvt)) {
3548 		ret = -ENODEV;
3549 
3550 		if (!ecc_enable_override)
3551 			goto err_enable;
3552 
3553 		if (boot_cpu_data.x86 >= 0x17) {
3554 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3555 			goto err_enable;
3556 		} else
3557 			amd64_warn("Forcing ECC on!\n");
3558 
3559 		if (!enable_ecc_error_reporting(s, nid, F3))
3560 			goto err_enable;
3561 	}
3562 
3563 	ret = init_one_instance(pvt);
3564 	if (ret < 0) {
3565 		amd64_err("Error probing instance: %d\n", nid);
3566 
3567 		if (boot_cpu_data.x86 < 0x17)
3568 			restore_ecc_error_reporting(s, nid, F3);
3569 
3570 		goto err_enable;
3571 	}
3572 
3573 	dump_misc_regs(pvt);
3574 
3575 	return ret;
3576 
3577 err_enable:
3578 	hw_info_put(pvt);
3579 	kfree(pvt);
3580 
3581 err_settings:
3582 	kfree(s);
3583 	ecc_stngs[nid] = NULL;
3584 
3585 err_out:
3586 	return ret;
3587 }
3588 
3589 static void remove_one_instance(unsigned int nid)
3590 {
3591 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3592 	struct ecc_settings *s = ecc_stngs[nid];
3593 	struct mem_ctl_info *mci;
3594 	struct amd64_pvt *pvt;
3595 
3596 	/* Remove from EDAC CORE tracking list */
3597 	mci = edac_mc_del_mc(&F3->dev);
3598 	if (!mci)
3599 		return;
3600 
3601 	pvt = mci->pvt_info;
3602 
3603 	restore_ecc_error_reporting(s, nid, F3);
3604 
3605 	kfree(ecc_stngs[nid]);
3606 	ecc_stngs[nid] = NULL;
3607 
3608 	/* Free the EDAC CORE resources */
3609 	mci->pvt_info = NULL;
3610 
3611 	hw_info_put(pvt);
3612 	kfree(pvt);
3613 	edac_mc_free(mci);
3614 }
3615 
3616 static void setup_pci_device(void)
3617 {
3618 	struct mem_ctl_info *mci;
3619 	struct amd64_pvt *pvt;
3620 
3621 	if (pci_ctl)
3622 		return;
3623 
3624 	mci = edac_mc_find(0);
3625 	if (!mci)
3626 		return;
3627 
3628 	pvt = mci->pvt_info;
3629 	if (pvt->umc)
3630 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3631 	else
3632 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3633 	if (!pci_ctl) {
3634 		pr_warn("%s(): Unable to create PCI control\n", __func__);
3635 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3636 	}
3637 }
3638 
3639 static const struct x86_cpu_id amd64_cpuids[] = {
3640 	X86_MATCH_VENDOR_FAM(AMD,	0x0F, NULL),
3641 	X86_MATCH_VENDOR_FAM(AMD,	0x10, NULL),
3642 	X86_MATCH_VENDOR_FAM(AMD,	0x15, NULL),
3643 	X86_MATCH_VENDOR_FAM(AMD,	0x16, NULL),
3644 	X86_MATCH_VENDOR_FAM(AMD,	0x17, NULL),
3645 	X86_MATCH_VENDOR_FAM(HYGON,	0x18, NULL),
3646 	X86_MATCH_VENDOR_FAM(AMD,	0x19, NULL),
3647 	{ }
3648 };
3649 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3650 
3651 static int __init amd64_edac_init(void)
3652 {
3653 	const char *owner;
3654 	int err = -ENODEV;
3655 	int i;
3656 
3657 	owner = edac_get_owner();
3658 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3659 		return -EBUSY;
3660 
3661 	if (!x86_match_cpu(amd64_cpuids))
3662 		return -ENODEV;
3663 
3664 	if (amd_cache_northbridges() < 0)
3665 		return -ENODEV;
3666 
3667 	opstate_init();
3668 
3669 	err = -ENOMEM;
3670 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3671 	if (!ecc_stngs)
3672 		goto err_free;
3673 
3674 	msrs = msrs_alloc();
3675 	if (!msrs)
3676 		goto err_free;
3677 
3678 	for (i = 0; i < amd_nb_num(); i++) {
3679 		err = probe_one_instance(i);
3680 		if (err) {
3681 			/* unwind properly */
3682 			while (--i >= 0)
3683 				remove_one_instance(i);
3684 
3685 			goto err_pci;
3686 		}
3687 	}
3688 
3689 	if (!edac_has_mcs()) {
3690 		err = -ENODEV;
3691 		goto err_pci;
3692 	}
3693 
3694 	/* register stuff with EDAC MCE */
3695 	if (boot_cpu_data.x86 >= 0x17)
3696 		amd_register_ecc_decoder(decode_umc_error);
3697 	else
3698 		amd_register_ecc_decoder(decode_bus_error);
3699 
3700 	setup_pci_device();
3701 
3702 #ifdef CONFIG_X86_32
3703 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3704 #endif
3705 
3706 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3707 
3708 	return 0;
3709 
3710 err_pci:
3711 	msrs_free(msrs);
3712 	msrs = NULL;
3713 
3714 err_free:
3715 	kfree(ecc_stngs);
3716 	ecc_stngs = NULL;
3717 
3718 	return err;
3719 }
3720 
3721 static void __exit amd64_edac_exit(void)
3722 {
3723 	int i;
3724 
3725 	if (pci_ctl)
3726 		edac_pci_release_generic_ctl(pci_ctl);
3727 
3728 	/* unregister from EDAC MCE */
3729 	if (boot_cpu_data.x86 >= 0x17)
3730 		amd_unregister_ecc_decoder(decode_umc_error);
3731 	else
3732 		amd_unregister_ecc_decoder(decode_bus_error);
3733 
3734 	for (i = 0; i < amd_nb_num(); i++)
3735 		remove_one_instance(i);
3736 
3737 	kfree(ecc_stngs);
3738 	ecc_stngs = NULL;
3739 
3740 	msrs_free(msrs);
3741 	msrs = NULL;
3742 }
3743 
3744 module_init(amd64_edac_init);
3745 module_exit(amd64_edac_exit);
3746 
3747 MODULE_LICENSE("GPL");
3748 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3749 		"Dave Peterson, Thayne Harbaugh");
3750 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3751 		EDAC_AMD64_VERSION);
3752 
3753 module_param(edac_op_state, int, 0444);
3754 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3755