xref: /openbmc/linux/drivers/edac/amd64_edac.c (revision 335f70fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
4 
5 static struct edac_pci_ctl_info *pci_ctl;
6 
7 /*
8  * Set by command line parameter. If BIOS has enabled the ECC, this override is
9  * cleared to prevent re-enabling the hardware by this driver.
10  */
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
13 
14 static struct msr __percpu *msrs;
15 
16 static struct amd64_family_type *fam_type;
17 
18 static inline u32 get_umc_reg(u32 reg)
19 {
20 	if (!fam_type->flags.zn_regs_v2)
21 		return reg;
22 
23 	switch (reg) {
24 	case UMCCH_ADDR_CFG:		return UMCCH_ADDR_CFG_DDR5;
25 	case UMCCH_ADDR_MASK_SEC:	return UMCCH_ADDR_MASK_SEC_DDR5;
26 	case UMCCH_DIMM_CFG:		return UMCCH_DIMM_CFG_DDR5;
27 	}
28 
29 	WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
30 	return 0;
31 }
32 
33 /* Per-node stuff */
34 static struct ecc_settings **ecc_stngs;
35 
36 /* Device for the PCI component */
37 static struct device *pci_ctl_dev;
38 
39 /*
40  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
41  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
42  * or higher value'.
43  *
44  *FIXME: Produce a better mapping/linearisation.
45  */
46 static const struct scrubrate {
47        u32 scrubval;           /* bit pattern for scrub rate */
48        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
49 } scrubrates[] = {
50 	{ 0x01, 1600000000UL},
51 	{ 0x02, 800000000UL},
52 	{ 0x03, 400000000UL},
53 	{ 0x04, 200000000UL},
54 	{ 0x05, 100000000UL},
55 	{ 0x06, 50000000UL},
56 	{ 0x07, 25000000UL},
57 	{ 0x08, 12284069UL},
58 	{ 0x09, 6274509UL},
59 	{ 0x0A, 3121951UL},
60 	{ 0x0B, 1560975UL},
61 	{ 0x0C, 781440UL},
62 	{ 0x0D, 390720UL},
63 	{ 0x0E, 195300UL},
64 	{ 0x0F, 97650UL},
65 	{ 0x10, 48854UL},
66 	{ 0x11, 24427UL},
67 	{ 0x12, 12213UL},
68 	{ 0x13, 6101UL},
69 	{ 0x14, 3051UL},
70 	{ 0x15, 1523UL},
71 	{ 0x16, 761UL},
72 	{ 0x00, 0UL},        /* scrubbing off */
73 };
74 
75 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
76 			       u32 *val, const char *func)
77 {
78 	int err = 0;
79 
80 	err = pci_read_config_dword(pdev, offset, val);
81 	if (err)
82 		amd64_warn("%s: error reading F%dx%03x.\n",
83 			   func, PCI_FUNC(pdev->devfn), offset);
84 
85 	return err;
86 }
87 
88 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
89 				u32 val, const char *func)
90 {
91 	int err = 0;
92 
93 	err = pci_write_config_dword(pdev, offset, val);
94 	if (err)
95 		amd64_warn("%s: error writing to F%dx%03x.\n",
96 			   func, PCI_FUNC(pdev->devfn), offset);
97 
98 	return err;
99 }
100 
101 /*
102  * Select DCT to which PCI cfg accesses are routed
103  */
104 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
105 {
106 	u32 reg = 0;
107 
108 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
109 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
110 	reg |= dct;
111 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
112 }
113 
114 /*
115  *
116  * Depending on the family, F2 DCT reads need special handling:
117  *
118  * K8: has a single DCT only and no address offsets >= 0x100
119  *
120  * F10h: each DCT has its own set of regs
121  *	DCT0 -> F2x040..
122  *	DCT1 -> F2x140..
123  *
124  * F16h: has only 1 DCT
125  *
126  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
127  */
128 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
129 					 int offset, u32 *val)
130 {
131 	switch (pvt->fam) {
132 	case 0xf:
133 		if (dct || offset >= 0x100)
134 			return -EINVAL;
135 		break;
136 
137 	case 0x10:
138 		if (dct) {
139 			/*
140 			 * Note: If ganging is enabled, barring the regs
141 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
142 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
143 			 */
144 			if (dct_ganging_enabled(pvt))
145 				return 0;
146 
147 			offset += 0x100;
148 		}
149 		break;
150 
151 	case 0x15:
152 		/*
153 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
154 		 * We should select which DCT we access using F1x10C[DctCfgSel]
155 		 */
156 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
157 		f15h_select_dct(pvt, dct);
158 		break;
159 
160 	case 0x16:
161 		if (dct)
162 			return -EINVAL;
163 		break;
164 
165 	default:
166 		break;
167 	}
168 	return amd64_read_pci_cfg(pvt->F2, offset, val);
169 }
170 
171 /*
172  * Memory scrubber control interface. For K8, memory scrubbing is handled by
173  * hardware and can involve L2 cache, dcache as well as the main memory. With
174  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
175  * functionality.
176  *
177  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
178  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
179  * bytes/sec for the setting.
180  *
181  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
182  * other archs, we might not have access to the caches directly.
183  */
184 
185 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
186 {
187 	/*
188 	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
189 	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
190 	 * as 0x0, scrubval 0x6 as 0x1, etc.
191 	 */
192 	if (scrubval >= 0x5 && scrubval <= 0x14) {
193 		scrubval -= 0x5;
194 		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
195 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
196 	} else {
197 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
198 	}
199 }
200 /*
201  * Scan the scrub rate mapping table for a close or matching bandwidth value to
202  * issue. If requested is too big, then use last maximum value found.
203  */
204 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
205 {
206 	u32 scrubval;
207 	int i;
208 
209 	/*
210 	 * map the configured rate (new_bw) to a value specific to the AMD64
211 	 * memory controller and apply to register. Search for the first
212 	 * bandwidth entry that is greater or equal than the setting requested
213 	 * and program that. If at last entry, turn off DRAM scrubbing.
214 	 *
215 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
216 	 * by falling back to the last element in scrubrates[].
217 	 */
218 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
219 		/*
220 		 * skip scrub rates which aren't recommended
221 		 * (see F10 BKDG, F3x58)
222 		 */
223 		if (scrubrates[i].scrubval < min_rate)
224 			continue;
225 
226 		if (scrubrates[i].bandwidth <= new_bw)
227 			break;
228 	}
229 
230 	scrubval = scrubrates[i].scrubval;
231 
232 	if (pvt->umc) {
233 		__f17h_set_scrubval(pvt, scrubval);
234 	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
235 		f15h_select_dct(pvt, 0);
236 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
237 		f15h_select_dct(pvt, 1);
238 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
239 	} else {
240 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
241 	}
242 
243 	if (scrubval)
244 		return scrubrates[i].bandwidth;
245 
246 	return 0;
247 }
248 
249 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
250 {
251 	struct amd64_pvt *pvt = mci->pvt_info;
252 	u32 min_scrubrate = 0x5;
253 
254 	if (pvt->fam == 0xf)
255 		min_scrubrate = 0x0;
256 
257 	if (pvt->fam == 0x15) {
258 		/* Erratum #505 */
259 		if (pvt->model < 0x10)
260 			f15h_select_dct(pvt, 0);
261 
262 		if (pvt->model == 0x60)
263 			min_scrubrate = 0x6;
264 	}
265 	return __set_scrub_rate(pvt, bw, min_scrubrate);
266 }
267 
268 static int get_scrub_rate(struct mem_ctl_info *mci)
269 {
270 	struct amd64_pvt *pvt = mci->pvt_info;
271 	int i, retval = -EINVAL;
272 	u32 scrubval = 0;
273 
274 	if (pvt->umc) {
275 		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
276 		if (scrubval & BIT(0)) {
277 			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
278 			scrubval &= 0xF;
279 			scrubval += 0x5;
280 		} else {
281 			scrubval = 0;
282 		}
283 	} else if (pvt->fam == 0x15) {
284 		/* Erratum #505 */
285 		if (pvt->model < 0x10)
286 			f15h_select_dct(pvt, 0);
287 
288 		if (pvt->model == 0x60)
289 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
290 		else
291 			amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
292 	} else {
293 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
294 	}
295 
296 	scrubval = scrubval & 0x001F;
297 
298 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
299 		if (scrubrates[i].scrubval == scrubval) {
300 			retval = scrubrates[i].bandwidth;
301 			break;
302 		}
303 	}
304 	return retval;
305 }
306 
307 /*
308  * returns true if the SysAddr given by sys_addr matches the
309  * DRAM base/limit associated with node_id
310  */
311 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
312 {
313 	u64 addr;
314 
315 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
316 	 * all ones if the most significant implemented address bit is 1.
317 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
318 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
319 	 * Application Programming.
320 	 */
321 	addr = sys_addr & 0x000000ffffffffffull;
322 
323 	return ((addr >= get_dram_base(pvt, nid)) &&
324 		(addr <= get_dram_limit(pvt, nid)));
325 }
326 
327 /*
328  * Attempt to map a SysAddr to a node. On success, return a pointer to the
329  * mem_ctl_info structure for the node that the SysAddr maps to.
330  *
331  * On failure, return NULL.
332  */
333 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
334 						u64 sys_addr)
335 {
336 	struct amd64_pvt *pvt;
337 	u8 node_id;
338 	u32 intlv_en, bits;
339 
340 	/*
341 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
342 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
343 	 */
344 	pvt = mci->pvt_info;
345 
346 	/*
347 	 * The value of this field should be the same for all DRAM Base
348 	 * registers.  Therefore we arbitrarily choose to read it from the
349 	 * register for node 0.
350 	 */
351 	intlv_en = dram_intlv_en(pvt, 0);
352 
353 	if (intlv_en == 0) {
354 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
355 			if (base_limit_match(pvt, sys_addr, node_id))
356 				goto found;
357 		}
358 		goto err_no_match;
359 	}
360 
361 	if (unlikely((intlv_en != 0x01) &&
362 		     (intlv_en != 0x03) &&
363 		     (intlv_en != 0x07))) {
364 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
365 		return NULL;
366 	}
367 
368 	bits = (((u32) sys_addr) >> 12) & intlv_en;
369 
370 	for (node_id = 0; ; ) {
371 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
372 			break;	/* intlv_sel field matches */
373 
374 		if (++node_id >= DRAM_RANGES)
375 			goto err_no_match;
376 	}
377 
378 	/* sanity test for sys_addr */
379 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
380 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
381 			   "range for node %d with node interleaving enabled.\n",
382 			   __func__, sys_addr, node_id);
383 		return NULL;
384 	}
385 
386 found:
387 	return edac_mc_find((int)node_id);
388 
389 err_no_match:
390 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
391 		 (unsigned long)sys_addr);
392 
393 	return NULL;
394 }
395 
396 /*
397  * compute the CS base address of the @csrow on the DRAM controller @dct.
398  * For details see F2x[5C:40] in the processor's BKDG
399  */
400 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
401 				 u64 *base, u64 *mask)
402 {
403 	u64 csbase, csmask, base_bits, mask_bits;
404 	u8 addr_shift;
405 
406 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
407 		csbase		= pvt->csels[dct].csbases[csrow];
408 		csmask		= pvt->csels[dct].csmasks[csrow];
409 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
410 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
411 		addr_shift	= 4;
412 
413 	/*
414 	 * F16h and F15h, models 30h and later need two addr_shift values:
415 	 * 8 for high and 6 for low (cf. F16h BKDG).
416 	 */
417 	} else if (pvt->fam == 0x16 ||
418 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
419 		csbase          = pvt->csels[dct].csbases[csrow];
420 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
421 
422 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
423 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
424 
425 		*mask = ~0ULL;
426 		/* poke holes for the csmask */
427 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
428 			   (GENMASK_ULL(30, 19) << 8));
429 
430 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
431 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
432 
433 		return;
434 	} else {
435 		csbase		= pvt->csels[dct].csbases[csrow];
436 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
437 		addr_shift	= 8;
438 
439 		if (pvt->fam == 0x15)
440 			base_bits = mask_bits =
441 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
442 		else
443 			base_bits = mask_bits =
444 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
445 	}
446 
447 	*base  = (csbase & base_bits) << addr_shift;
448 
449 	*mask  = ~0ULL;
450 	/* poke holes for the csmask */
451 	*mask &= ~(mask_bits << addr_shift);
452 	/* OR them in */
453 	*mask |= (csmask & mask_bits) << addr_shift;
454 }
455 
456 #define for_each_chip_select(i, dct, pvt) \
457 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
458 
459 #define chip_select_base(i, dct, pvt) \
460 	pvt->csels[dct].csbases[i]
461 
462 #define for_each_chip_select_mask(i, dct, pvt) \
463 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
464 
465 #define for_each_umc(i) \
466 	for (i = 0; i < fam_type->max_mcs; i++)
467 
468 /*
469  * @input_addr is an InputAddr associated with the node given by mci. Return the
470  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
471  */
472 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
473 {
474 	struct amd64_pvt *pvt;
475 	int csrow;
476 	u64 base, mask;
477 
478 	pvt = mci->pvt_info;
479 
480 	for_each_chip_select(csrow, 0, pvt) {
481 		if (!csrow_enabled(csrow, 0, pvt))
482 			continue;
483 
484 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
485 
486 		mask = ~mask;
487 
488 		if ((input_addr & mask) == (base & mask)) {
489 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
490 				 (unsigned long)input_addr, csrow,
491 				 pvt->mc_node_id);
492 
493 			return csrow;
494 		}
495 	}
496 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
497 		 (unsigned long)input_addr, pvt->mc_node_id);
498 
499 	return -1;
500 }
501 
502 /*
503  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
504  * for the node represented by mci. Info is passed back in *hole_base,
505  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
506  * info is invalid. Info may be invalid for either of the following reasons:
507  *
508  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
509  *   Address Register does not exist.
510  *
511  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
512  *   indicating that its contents are not valid.
513  *
514  * The values passed back in *hole_base, *hole_offset, and *hole_size are
515  * complete 32-bit values despite the fact that the bitfields in the DHAR
516  * only represent bits 31-24 of the base and offset values.
517  */
518 static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
519 			      u64 *hole_offset, u64 *hole_size)
520 {
521 	struct amd64_pvt *pvt = mci->pvt_info;
522 
523 	/* only revE and later have the DRAM Hole Address Register */
524 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
525 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
526 			 pvt->ext_model, pvt->mc_node_id);
527 		return 1;
528 	}
529 
530 	/* valid for Fam10h and above */
531 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
532 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
533 		return 1;
534 	}
535 
536 	if (!dhar_valid(pvt)) {
537 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
538 			 pvt->mc_node_id);
539 		return 1;
540 	}
541 
542 	/* This node has Memory Hoisting */
543 
544 	/* +------------------+--------------------+--------------------+-----
545 	 * | memory           | DRAM hole          | relocated          |
546 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
547 	 * |                  |                    | DRAM hole          |
548 	 * |                  |                    | [0x100000000,      |
549 	 * |                  |                    |  (0x100000000+     |
550 	 * |                  |                    |   (0xffffffff-x))] |
551 	 * +------------------+--------------------+--------------------+-----
552 	 *
553 	 * Above is a diagram of physical memory showing the DRAM hole and the
554 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
555 	 * starts at address x (the base address) and extends through address
556 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
557 	 * addresses in the hole so that they start at 0x100000000.
558 	 */
559 
560 	*hole_base = dhar_base(pvt);
561 	*hole_size = (1ULL << 32) - *hole_base;
562 
563 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
564 					: k8_dhar_offset(pvt);
565 
566 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
567 		 pvt->mc_node_id, (unsigned long)*hole_base,
568 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
569 
570 	return 0;
571 }
572 
573 #ifdef CONFIG_EDAC_DEBUG
574 #define EDAC_DCT_ATTR_SHOW(reg)						\
575 static ssize_t reg##_show(struct device *dev,				\
576 			 struct device_attribute *mattr, char *data)	\
577 {									\
578 	struct mem_ctl_info *mci = to_mci(dev);				\
579 	struct amd64_pvt *pvt = mci->pvt_info;				\
580 									\
581 	return sprintf(data, "0x%016llx\n", (u64)pvt->reg);		\
582 }
583 
584 EDAC_DCT_ATTR_SHOW(dhar);
585 EDAC_DCT_ATTR_SHOW(dbam0);
586 EDAC_DCT_ATTR_SHOW(top_mem);
587 EDAC_DCT_ATTR_SHOW(top_mem2);
588 
589 static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
590 			      char *data)
591 {
592 	struct mem_ctl_info *mci = to_mci(dev);
593 
594 	u64 hole_base = 0;
595 	u64 hole_offset = 0;
596 	u64 hole_size = 0;
597 
598 	get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
599 
600 	return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
601 						 hole_size);
602 }
603 
604 /*
605  * update NUM_DBG_ATTRS in case you add new members
606  */
607 static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
608 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
609 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
610 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
611 static DEVICE_ATTR_RO(dram_hole);
612 
613 static struct attribute *dbg_attrs[] = {
614 	&dev_attr_dhar.attr,
615 	&dev_attr_dbam.attr,
616 	&dev_attr_topmem.attr,
617 	&dev_attr_topmem2.attr,
618 	&dev_attr_dram_hole.attr,
619 	NULL
620 };
621 
622 static const struct attribute_group dbg_group = {
623 	.attrs = dbg_attrs,
624 };
625 
626 static ssize_t inject_section_show(struct device *dev,
627 				   struct device_attribute *mattr, char *buf)
628 {
629 	struct mem_ctl_info *mci = to_mci(dev);
630 	struct amd64_pvt *pvt = mci->pvt_info;
631 	return sprintf(buf, "0x%x\n", pvt->injection.section);
632 }
633 
634 /*
635  * store error injection section value which refers to one of 4 16-byte sections
636  * within a 64-byte cacheline
637  *
638  * range: 0..3
639  */
640 static ssize_t inject_section_store(struct device *dev,
641 				    struct device_attribute *mattr,
642 				    const char *data, size_t count)
643 {
644 	struct mem_ctl_info *mci = to_mci(dev);
645 	struct amd64_pvt *pvt = mci->pvt_info;
646 	unsigned long value;
647 	int ret;
648 
649 	ret = kstrtoul(data, 10, &value);
650 	if (ret < 0)
651 		return ret;
652 
653 	if (value > 3) {
654 		amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
655 		return -EINVAL;
656 	}
657 
658 	pvt->injection.section = (u32) value;
659 	return count;
660 }
661 
662 static ssize_t inject_word_show(struct device *dev,
663 				struct device_attribute *mattr, char *buf)
664 {
665 	struct mem_ctl_info *mci = to_mci(dev);
666 	struct amd64_pvt *pvt = mci->pvt_info;
667 	return sprintf(buf, "0x%x\n", pvt->injection.word);
668 }
669 
670 /*
671  * store error injection word value which refers to one of 9 16-bit word of the
672  * 16-byte (128-bit + ECC bits) section
673  *
674  * range: 0..8
675  */
676 static ssize_t inject_word_store(struct device *dev,
677 				 struct device_attribute *mattr,
678 				 const char *data, size_t count)
679 {
680 	struct mem_ctl_info *mci = to_mci(dev);
681 	struct amd64_pvt *pvt = mci->pvt_info;
682 	unsigned long value;
683 	int ret;
684 
685 	ret = kstrtoul(data, 10, &value);
686 	if (ret < 0)
687 		return ret;
688 
689 	if (value > 8) {
690 		amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
691 		return -EINVAL;
692 	}
693 
694 	pvt->injection.word = (u32) value;
695 	return count;
696 }
697 
698 static ssize_t inject_ecc_vector_show(struct device *dev,
699 				      struct device_attribute *mattr,
700 				      char *buf)
701 {
702 	struct mem_ctl_info *mci = to_mci(dev);
703 	struct amd64_pvt *pvt = mci->pvt_info;
704 	return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
705 }
706 
707 /*
708  * store 16 bit error injection vector which enables injecting errors to the
709  * corresponding bit within the error injection word above. When used during a
710  * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
711  */
712 static ssize_t inject_ecc_vector_store(struct device *dev,
713 				       struct device_attribute *mattr,
714 				       const char *data, size_t count)
715 {
716 	struct mem_ctl_info *mci = to_mci(dev);
717 	struct amd64_pvt *pvt = mci->pvt_info;
718 	unsigned long value;
719 	int ret;
720 
721 	ret = kstrtoul(data, 16, &value);
722 	if (ret < 0)
723 		return ret;
724 
725 	if (value & 0xFFFF0000) {
726 		amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
727 		return -EINVAL;
728 	}
729 
730 	pvt->injection.bit_map = (u32) value;
731 	return count;
732 }
733 
734 /*
735  * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
736  * fields needed by the injection registers and read the NB Array Data Port.
737  */
738 static ssize_t inject_read_store(struct device *dev,
739 				 struct device_attribute *mattr,
740 				 const char *data, size_t count)
741 {
742 	struct mem_ctl_info *mci = to_mci(dev);
743 	struct amd64_pvt *pvt = mci->pvt_info;
744 	unsigned long value;
745 	u32 section, word_bits;
746 	int ret;
747 
748 	ret = kstrtoul(data, 10, &value);
749 	if (ret < 0)
750 		return ret;
751 
752 	/* Form value to choose 16-byte section of cacheline */
753 	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
754 
755 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
756 
757 	word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
758 
759 	/* Issue 'word' and 'bit' along with the READ request */
760 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
761 
762 	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
763 
764 	return count;
765 }
766 
767 /*
768  * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
769  * fields needed by the injection registers.
770  */
771 static ssize_t inject_write_store(struct device *dev,
772 				  struct device_attribute *mattr,
773 				  const char *data, size_t count)
774 {
775 	struct mem_ctl_info *mci = to_mci(dev);
776 	struct amd64_pvt *pvt = mci->pvt_info;
777 	u32 section, word_bits, tmp;
778 	unsigned long value;
779 	int ret;
780 
781 	ret = kstrtoul(data, 10, &value);
782 	if (ret < 0)
783 		return ret;
784 
785 	/* Form value to choose 16-byte section of cacheline */
786 	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
787 
788 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
789 
790 	word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
791 
792 	pr_notice_once("Don't forget to decrease MCE polling interval in\n"
793 			"/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
794 			"so that you can get the error report faster.\n");
795 
796 	on_each_cpu(disable_caches, NULL, 1);
797 
798 	/* Issue 'word' and 'bit' along with the READ request */
799 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
800 
801  retry:
802 	/* wait until injection happens */
803 	amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
804 	if (tmp & F10_NB_ARR_ECC_WR_REQ) {
805 		cpu_relax();
806 		goto retry;
807 	}
808 
809 	on_each_cpu(enable_caches, NULL, 1);
810 
811 	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
812 
813 	return count;
814 }
815 
816 /*
817  * update NUM_INJ_ATTRS in case you add new members
818  */
819 
820 static DEVICE_ATTR_RW(inject_section);
821 static DEVICE_ATTR_RW(inject_word);
822 static DEVICE_ATTR_RW(inject_ecc_vector);
823 static DEVICE_ATTR_WO(inject_write);
824 static DEVICE_ATTR_WO(inject_read);
825 
826 static struct attribute *inj_attrs[] = {
827 	&dev_attr_inject_section.attr,
828 	&dev_attr_inject_word.attr,
829 	&dev_attr_inject_ecc_vector.attr,
830 	&dev_attr_inject_write.attr,
831 	&dev_attr_inject_read.attr,
832 	NULL
833 };
834 
835 static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
836 {
837 	struct device *dev = kobj_to_dev(kobj);
838 	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
839 	struct amd64_pvt *pvt = mci->pvt_info;
840 
841 	/* Families which have that injection hw */
842 	if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
843 		return attr->mode;
844 
845 	return 0;
846 }
847 
848 static const struct attribute_group inj_group = {
849 	.attrs = inj_attrs,
850 	.is_visible = inj_is_visible,
851 };
852 #endif /* CONFIG_EDAC_DEBUG */
853 
854 /*
855  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
856  * assumed that sys_addr maps to the node given by mci.
857  *
858  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
859  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
860  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
861  * then it is also involved in translating a SysAddr to a DramAddr. Sections
862  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
863  * These parts of the documentation are unclear. I interpret them as follows:
864  *
865  * When node n receives a SysAddr, it processes the SysAddr as follows:
866  *
867  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
868  *    Limit registers for node n. If the SysAddr is not within the range
869  *    specified by the base and limit values, then node n ignores the Sysaddr
870  *    (since it does not map to node n). Otherwise continue to step 2 below.
871  *
872  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
873  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
874  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
875  *    hole. If not, skip to step 3 below. Else get the value of the
876  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
877  *    offset defined by this value from the SysAddr.
878  *
879  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
880  *    Base register for node n. To obtain the DramAddr, subtract the base
881  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
882  */
883 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
884 {
885 	struct amd64_pvt *pvt = mci->pvt_info;
886 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
887 	int ret;
888 
889 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
890 
891 	ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
892 	if (!ret) {
893 		if ((sys_addr >= (1ULL << 32)) &&
894 		    (sys_addr < ((1ULL << 32) + hole_size))) {
895 			/* use DHAR to translate SysAddr to DramAddr */
896 			dram_addr = sys_addr - hole_offset;
897 
898 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
899 				 (unsigned long)sys_addr,
900 				 (unsigned long)dram_addr);
901 
902 			return dram_addr;
903 		}
904 	}
905 
906 	/*
907 	 * Translate the SysAddr to a DramAddr as shown near the start of
908 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
909 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
910 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
911 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
912 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
913 	 * Programmer's Manual Volume 1 Application Programming.
914 	 */
915 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
916 
917 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
918 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
919 	return dram_addr;
920 }
921 
922 /*
923  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
924  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
925  * for node interleaving.
926  */
927 static int num_node_interleave_bits(unsigned intlv_en)
928 {
929 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
930 	int n;
931 
932 	BUG_ON(intlv_en > 7);
933 	n = intlv_shift_table[intlv_en];
934 	return n;
935 }
936 
937 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
938 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
939 {
940 	struct amd64_pvt *pvt;
941 	int intlv_shift;
942 	u64 input_addr;
943 
944 	pvt = mci->pvt_info;
945 
946 	/*
947 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
948 	 * concerning translating a DramAddr to an InputAddr.
949 	 */
950 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
951 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
952 		      (dram_addr & 0xfff);
953 
954 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
955 		 intlv_shift, (unsigned long)dram_addr,
956 		 (unsigned long)input_addr);
957 
958 	return input_addr;
959 }
960 
961 /*
962  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
963  * assumed that @sys_addr maps to the node given by mci.
964  */
965 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
966 {
967 	u64 input_addr;
968 
969 	input_addr =
970 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
971 
972 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
973 		 (unsigned long)sys_addr, (unsigned long)input_addr);
974 
975 	return input_addr;
976 }
977 
978 /* Map the Error address to a PAGE and PAGE OFFSET. */
979 static inline void error_address_to_page_and_offset(u64 error_address,
980 						    struct err_info *err)
981 {
982 	err->page = (u32) (error_address >> PAGE_SHIFT);
983 	err->offset = ((u32) error_address) & ~PAGE_MASK;
984 }
985 
986 /*
987  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
988  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
989  * of a node that detected an ECC memory error.  mci represents the node that
990  * the error address maps to (possibly different from the node that detected
991  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
992  * error.
993  */
994 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
995 {
996 	int csrow;
997 
998 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
999 
1000 	if (csrow == -1)
1001 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
1002 				  "address 0x%lx\n", (unsigned long)sys_addr);
1003 	return csrow;
1004 }
1005 
1006 /* Protect the PCI config register pairs used for DF indirect access. */
1007 static DEFINE_MUTEX(df_indirect_mutex);
1008 
1009 /*
1010  * Data Fabric Indirect Access uses FICAA/FICAD.
1011  *
1012  * Fabric Indirect Configuration Access Address (FICAA): Constructed based
1013  * on the device's Instance Id and the PCI function and register offset of
1014  * the desired register.
1015  *
1016  * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
1017  * and FICAD HI registers but so far we only need the LO register.
1018  *
1019  * Use Instance Id 0xFF to indicate a broadcast read.
1020  */
1021 #define DF_BROADCAST	0xFF
1022 static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
1023 {
1024 	struct pci_dev *F4;
1025 	u32 ficaa;
1026 	int err = -ENODEV;
1027 
1028 	if (node >= amd_nb_num())
1029 		goto out;
1030 
1031 	F4 = node_to_amd_nb(node)->link;
1032 	if (!F4)
1033 		goto out;
1034 
1035 	ficaa  = (instance_id == DF_BROADCAST) ? 0 : 1;
1036 	ficaa |= reg & 0x3FC;
1037 	ficaa |= (func & 0x7) << 11;
1038 	ficaa |= instance_id << 16;
1039 
1040 	mutex_lock(&df_indirect_mutex);
1041 
1042 	err = pci_write_config_dword(F4, 0x5C, ficaa);
1043 	if (err) {
1044 		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
1045 		goto out_unlock;
1046 	}
1047 
1048 	err = pci_read_config_dword(F4, 0x98, lo);
1049 	if (err)
1050 		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
1051 
1052 out_unlock:
1053 	mutex_unlock(&df_indirect_mutex);
1054 
1055 out:
1056 	return err;
1057 }
1058 
1059 static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
1060 {
1061 	return __df_indirect_read(node, func, reg, instance_id, lo);
1062 }
1063 
1064 static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
1065 {
1066 	return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
1067 }
1068 
1069 struct addr_ctx {
1070 	u64 ret_addr;
1071 	u32 tmp;
1072 	u16 nid;
1073 	u8 inst_id;
1074 };
1075 
1076 static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
1077 {
1078 	u64 dram_base_addr, dram_limit_addr, dram_hole_base;
1079 
1080 	u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
1081 	u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
1082 	u8 intlv_addr_sel, intlv_addr_bit;
1083 	u8 num_intlv_bits, hashed_bit;
1084 	u8 lgcy_mmio_hole_en, base = 0;
1085 	u8 cs_mask, cs_id = 0;
1086 	bool hash_enabled = false;
1087 
1088 	struct addr_ctx ctx;
1089 
1090 	memset(&ctx, 0, sizeof(ctx));
1091 
1092 	/* Start from the normalized address */
1093 	ctx.ret_addr = norm_addr;
1094 
1095 	ctx.nid = nid;
1096 	ctx.inst_id = umc;
1097 
1098 	/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
1099 	if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
1100 		goto out_err;
1101 
1102 	/* Remove HiAddrOffset from normalized address, if enabled: */
1103 	if (ctx.tmp & BIT(0)) {
1104 		u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
1105 
1106 		if (norm_addr >= hi_addr_offset) {
1107 			ctx.ret_addr -= hi_addr_offset;
1108 			base = 1;
1109 		}
1110 	}
1111 
1112 	/* Read D18F0x110 (DramBaseAddress). */
1113 	if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
1114 		goto out_err;
1115 
1116 	/* Check if address range is valid. */
1117 	if (!(ctx.tmp & BIT(0))) {
1118 		pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
1119 			__func__, ctx.tmp);
1120 		goto out_err;
1121 	}
1122 
1123 	lgcy_mmio_hole_en = ctx.tmp & BIT(1);
1124 	intlv_num_chan	  = (ctx.tmp >> 4) & 0xF;
1125 	intlv_addr_sel	  = (ctx.tmp >> 8) & 0x7;
1126 	dram_base_addr	  = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
1127 
1128 	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
1129 	if (intlv_addr_sel > 3) {
1130 		pr_err("%s: Invalid interleave address select %d.\n",
1131 			__func__, intlv_addr_sel);
1132 		goto out_err;
1133 	}
1134 
1135 	/* Read D18F0x114 (DramLimitAddress). */
1136 	if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
1137 		goto out_err;
1138 
1139 	intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
1140 	intlv_num_dies	  = (ctx.tmp >> 10) & 0x3;
1141 	dram_limit_addr	  = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
1142 
1143 	intlv_addr_bit = intlv_addr_sel + 8;
1144 
1145 	/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
1146 	switch (intlv_num_chan) {
1147 	case 0:	intlv_num_chan = 0; break;
1148 	case 1: intlv_num_chan = 1; break;
1149 	case 3: intlv_num_chan = 2; break;
1150 	case 5:	intlv_num_chan = 3; break;
1151 	case 7:	intlv_num_chan = 4; break;
1152 
1153 	case 8: intlv_num_chan = 1;
1154 		hash_enabled = true;
1155 		break;
1156 	default:
1157 		pr_err("%s: Invalid number of interleaved channels %d.\n",
1158 			__func__, intlv_num_chan);
1159 		goto out_err;
1160 	}
1161 
1162 	num_intlv_bits = intlv_num_chan;
1163 
1164 	if (intlv_num_dies > 2) {
1165 		pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
1166 			__func__, intlv_num_dies);
1167 		goto out_err;
1168 	}
1169 
1170 	num_intlv_bits += intlv_num_dies;
1171 
1172 	/* Add a bit if sockets are interleaved. */
1173 	num_intlv_bits += intlv_num_sockets;
1174 
1175 	/* Assert num_intlv_bits <= 4 */
1176 	if (num_intlv_bits > 4) {
1177 		pr_err("%s: Invalid interleave bits %d.\n",
1178 			__func__, num_intlv_bits);
1179 		goto out_err;
1180 	}
1181 
1182 	if (num_intlv_bits > 0) {
1183 		u64 temp_addr_x, temp_addr_i, temp_addr_y;
1184 		u8 die_id_bit, sock_id_bit, cs_fabric_id;
1185 
1186 		/*
1187 		 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
1188 		 * This is the fabric id for this coherent slave. Use
1189 		 * umc/channel# as instance id of the coherent slave
1190 		 * for FICAA.
1191 		 */
1192 		if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
1193 			goto out_err;
1194 
1195 		cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
1196 		die_id_bit   = 0;
1197 
1198 		/* If interleaved over more than 1 channel: */
1199 		if (intlv_num_chan) {
1200 			die_id_bit = intlv_num_chan;
1201 			cs_mask	   = (1 << die_id_bit) - 1;
1202 			cs_id	   = cs_fabric_id & cs_mask;
1203 		}
1204 
1205 		sock_id_bit = die_id_bit;
1206 
1207 		/* Read D18F1x208 (SystemFabricIdMask). */
1208 		if (intlv_num_dies || intlv_num_sockets)
1209 			if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
1210 				goto out_err;
1211 
1212 		/* If interleaved over more than 1 die. */
1213 		if (intlv_num_dies) {
1214 			sock_id_bit  = die_id_bit + intlv_num_dies;
1215 			die_id_shift = (ctx.tmp >> 24) & 0xF;
1216 			die_id_mask  = (ctx.tmp >> 8) & 0xFF;
1217 
1218 			cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
1219 		}
1220 
1221 		/* If interleaved over more than 1 socket. */
1222 		if (intlv_num_sockets) {
1223 			socket_id_shift	= (ctx.tmp >> 28) & 0xF;
1224 			socket_id_mask	= (ctx.tmp >> 16) & 0xFF;
1225 
1226 			cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
1227 		}
1228 
1229 		/*
1230 		 * The pre-interleaved address consists of XXXXXXIIIYYYYY
1231 		 * where III is the ID for this CS, and XXXXXXYYYYY are the
1232 		 * address bits from the post-interleaved address.
1233 		 * "num_intlv_bits" has been calculated to tell us how many "I"
1234 		 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
1235 		 * there are (where "I" starts).
1236 		 */
1237 		temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
1238 		temp_addr_i = (cs_id << intlv_addr_bit);
1239 		temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
1240 		ctx.ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
1241 	}
1242 
1243 	/* Add dram base address */
1244 	ctx.ret_addr += dram_base_addr;
1245 
1246 	/* If legacy MMIO hole enabled */
1247 	if (lgcy_mmio_hole_en) {
1248 		if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
1249 			goto out_err;
1250 
1251 		dram_hole_base = ctx.tmp & GENMASK(31, 24);
1252 		if (ctx.ret_addr >= dram_hole_base)
1253 			ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
1254 	}
1255 
1256 	if (hash_enabled) {
1257 		/* Save some parentheses and grab ls-bit at the end. */
1258 		hashed_bit =	(ctx.ret_addr >> 12) ^
1259 				(ctx.ret_addr >> 18) ^
1260 				(ctx.ret_addr >> 21) ^
1261 				(ctx.ret_addr >> 30) ^
1262 				cs_id;
1263 
1264 		hashed_bit &= BIT(0);
1265 
1266 		if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
1267 			ctx.ret_addr ^= BIT(intlv_addr_bit);
1268 	}
1269 
1270 	/* Is calculated system address is above DRAM limit address? */
1271 	if (ctx.ret_addr > dram_limit_addr)
1272 		goto out_err;
1273 
1274 	*sys_addr = ctx.ret_addr;
1275 	return 0;
1276 
1277 out_err:
1278 	return -EINVAL;
1279 }
1280 
1281 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
1282 
1283 /*
1284  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
1285  * are ECC capable.
1286  */
1287 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
1288 {
1289 	unsigned long edac_cap = EDAC_FLAG_NONE;
1290 	u8 bit;
1291 
1292 	if (pvt->umc) {
1293 		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1294 
1295 		for_each_umc(i) {
1296 			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1297 				continue;
1298 
1299 			umc_en_mask |= BIT(i);
1300 
1301 			/* UMC Configuration bit 12 (DimmEccEn) */
1302 			if (pvt->umc[i].umc_cfg & BIT(12))
1303 				dimm_ecc_en_mask |= BIT(i);
1304 		}
1305 
1306 		if (umc_en_mask == dimm_ecc_en_mask)
1307 			edac_cap = EDAC_FLAG_SECDED;
1308 	} else {
1309 		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1310 			? 19
1311 			: 17;
1312 
1313 		if (pvt->dclr0 & BIT(bit))
1314 			edac_cap = EDAC_FLAG_SECDED;
1315 	}
1316 
1317 	return edac_cap;
1318 }
1319 
1320 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
1321 
1322 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1323 {
1324 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1325 
1326 	if (pvt->dram_type == MEM_LRDDR3) {
1327 		u32 dcsm = pvt->csels[chan].csmasks[0];
1328 		/*
1329 		 * It's assumed all LRDIMMs in a DCT are going to be of
1330 		 * same 'type' until proven otherwise. So, use a cs
1331 		 * value of '0' here to get dcsm value.
1332 		 */
1333 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
1334 	}
1335 
1336 	edac_dbg(1, "All DIMMs support ECC:%s\n",
1337 		    (dclr & BIT(19)) ? "yes" : "no");
1338 
1339 
1340 	edac_dbg(1, "  PAR/ERR parity: %s\n",
1341 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
1342 
1343 	if (pvt->fam == 0x10)
1344 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
1345 			 (dclr & BIT(11)) ?  "128b" : "64b");
1346 
1347 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
1348 		 (dclr & BIT(12)) ?  "yes" : "no",
1349 		 (dclr & BIT(13)) ?  "yes" : "no",
1350 		 (dclr & BIT(14)) ?  "yes" : "no",
1351 		 (dclr & BIT(15)) ?  "yes" : "no");
1352 }
1353 
1354 #define CS_EVEN_PRIMARY		BIT(0)
1355 #define CS_ODD_PRIMARY		BIT(1)
1356 #define CS_EVEN_SECONDARY	BIT(2)
1357 #define CS_ODD_SECONDARY	BIT(3)
1358 #define CS_3R_INTERLEAVE	BIT(4)
1359 
1360 #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
1361 #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1362 
1363 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1364 {
1365 	u8 base, count = 0;
1366 	int cs_mode = 0;
1367 
1368 	if (csrow_enabled(2 * dimm, ctrl, pvt))
1369 		cs_mode |= CS_EVEN_PRIMARY;
1370 
1371 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1372 		cs_mode |= CS_ODD_PRIMARY;
1373 
1374 	/* Asymmetric dual-rank DIMM support. */
1375 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1376 		cs_mode |= CS_ODD_SECONDARY;
1377 
1378 	/*
1379 	 * 3 Rank inteleaving support.
1380 	 * There should be only three bases enabled and their two masks should
1381 	 * be equal.
1382 	 */
1383 	for_each_chip_select(base, ctrl, pvt)
1384 		count += csrow_enabled(base, ctrl, pvt);
1385 
1386 	if (count == 3 &&
1387 	    pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
1388 		edac_dbg(1, "3R interleaving in use.\n");
1389 		cs_mode |= CS_3R_INTERLEAVE;
1390 	}
1391 
1392 	return cs_mode;
1393 }
1394 
1395 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
1396 {
1397 	int dimm, size0, size1, cs0, cs1, cs_mode;
1398 
1399 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
1400 
1401 	for (dimm = 0; dimm < 2; dimm++) {
1402 		cs0 = dimm * 2;
1403 		cs1 = dimm * 2 + 1;
1404 
1405 		cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
1406 
1407 		size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
1408 		size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
1409 
1410 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1411 				cs0,	size0,
1412 				cs1,	size1);
1413 	}
1414 }
1415 
1416 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
1417 {
1418 	struct amd64_umc *umc;
1419 	u32 i, tmp, umc_base;
1420 
1421 	for_each_umc(i) {
1422 		umc_base = get_umc_base(i);
1423 		umc = &pvt->umc[i];
1424 
1425 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1426 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1427 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1428 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1429 
1430 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
1431 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
1432 
1433 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
1434 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
1435 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1436 
1437 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
1438 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1439 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1440 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
1441 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1442 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
1443 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1444 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
1445 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1446 
1447 		if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
1448 			amd_smn_read(pvt->mc_node_id,
1449 				     umc_base + get_umc_reg(UMCCH_ADDR_CFG),
1450 				     &tmp);
1451 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
1452 					i, 1 << ((tmp >> 4) & 0x3));
1453 		}
1454 
1455 		debug_display_dimm_sizes_df(pvt, i);
1456 	}
1457 
1458 	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
1459 		 pvt->dhar, dhar_base(pvt));
1460 }
1461 
1462 /* Display and decode various NB registers for debug purposes. */
1463 static void __dump_misc_regs(struct amd64_pvt *pvt)
1464 {
1465 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1466 
1467 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
1468 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1469 
1470 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
1471 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1472 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1473 
1474 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1475 
1476 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1477 
1478 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
1479 		 pvt->dhar, dhar_base(pvt),
1480 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1481 				   : f10_dhar_offset(pvt));
1482 
1483 	debug_display_dimm_sizes(pvt, 0);
1484 
1485 	/* everything below this point is Fam10h and above */
1486 	if (pvt->fam == 0xf)
1487 		return;
1488 
1489 	debug_display_dimm_sizes(pvt, 1);
1490 
1491 	/* Only if NOT ganged does dclr1 have valid info */
1492 	if (!dct_ganging_enabled(pvt))
1493 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1494 }
1495 
1496 /* Display and decode various NB registers for debug purposes. */
1497 static void dump_misc_regs(struct amd64_pvt *pvt)
1498 {
1499 	if (pvt->umc)
1500 		__dump_misc_regs_df(pvt);
1501 	else
1502 		__dump_misc_regs(pvt);
1503 
1504 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1505 
1506 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1507 }
1508 
1509 /*
1510  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1511  */
1512 static void prep_chip_selects(struct amd64_pvt *pvt)
1513 {
1514 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1515 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1516 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1517 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1518 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1519 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1520 	} else if (pvt->fam >= 0x17) {
1521 		int umc;
1522 
1523 		for_each_umc(umc) {
1524 			pvt->csels[umc].b_cnt = 4;
1525 			pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2;
1526 		}
1527 
1528 	} else {
1529 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1530 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1531 	}
1532 }
1533 
1534 static void read_umc_base_mask(struct amd64_pvt *pvt)
1535 {
1536 	u32 umc_base_reg, umc_base_reg_sec;
1537 	u32 umc_mask_reg, umc_mask_reg_sec;
1538 	u32 base_reg, base_reg_sec;
1539 	u32 mask_reg, mask_reg_sec;
1540 	u32 *base, *base_sec;
1541 	u32 *mask, *mask_sec;
1542 	int cs, umc;
1543 
1544 	for_each_umc(umc) {
1545 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1546 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1547 
1548 		for_each_chip_select(cs, umc, pvt) {
1549 			base = &pvt->csels[umc].csbases[cs];
1550 			base_sec = &pvt->csels[umc].csbases_sec[cs];
1551 
1552 			base_reg = umc_base_reg + (cs * 4);
1553 			base_reg_sec = umc_base_reg_sec + (cs * 4);
1554 
1555 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
1556 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
1557 					 umc, cs, *base, base_reg);
1558 
1559 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
1560 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
1561 					 umc, cs, *base_sec, base_reg_sec);
1562 		}
1563 
1564 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1565 		umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC);
1566 
1567 		for_each_chip_select_mask(cs, umc, pvt) {
1568 			mask = &pvt->csels[umc].csmasks[cs];
1569 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1570 
1571 			mask_reg = umc_mask_reg + (cs * 4);
1572 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1573 
1574 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1575 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
1576 					 umc, cs, *mask, mask_reg);
1577 
1578 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1579 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1580 					 umc, cs, *mask_sec, mask_reg_sec);
1581 		}
1582 	}
1583 }
1584 
1585 /*
1586  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1587  */
1588 static void read_dct_base_mask(struct amd64_pvt *pvt)
1589 {
1590 	int cs;
1591 
1592 	prep_chip_selects(pvt);
1593 
1594 	if (pvt->umc)
1595 		return read_umc_base_mask(pvt);
1596 
1597 	for_each_chip_select(cs, 0, pvt) {
1598 		int reg0   = DCSB0 + (cs * 4);
1599 		int reg1   = DCSB1 + (cs * 4);
1600 		u32 *base0 = &pvt->csels[0].csbases[cs];
1601 		u32 *base1 = &pvt->csels[1].csbases[cs];
1602 
1603 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1604 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
1605 				 cs, *base0, reg0);
1606 
1607 		if (pvt->fam == 0xf)
1608 			continue;
1609 
1610 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1611 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
1612 				 cs, *base1, (pvt->fam == 0x10) ? reg1
1613 							: reg0);
1614 	}
1615 
1616 	for_each_chip_select_mask(cs, 0, pvt) {
1617 		int reg0   = DCSM0 + (cs * 4);
1618 		int reg1   = DCSM1 + (cs * 4);
1619 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1620 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1621 
1622 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1623 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1624 				 cs, *mask0, reg0);
1625 
1626 		if (pvt->fam == 0xf)
1627 			continue;
1628 
1629 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1630 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1631 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
1632 							: reg0);
1633 	}
1634 }
1635 
1636 static void determine_memory_type_df(struct amd64_pvt *pvt)
1637 {
1638 	struct amd64_umc *umc;
1639 	u32 i;
1640 
1641 	for_each_umc(i) {
1642 		umc = &pvt->umc[i];
1643 
1644 		if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
1645 			umc->dram_type = MEM_EMPTY;
1646 			continue;
1647 		}
1648 
1649 		/*
1650 		 * Check if the system supports the "DDR Type" field in UMC Config
1651 		 * and has DDR5 DIMMs in use.
1652 		 */
1653 		if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
1654 			if (umc->dimm_cfg & BIT(5))
1655 				umc->dram_type = MEM_LRDDR5;
1656 			else if (umc->dimm_cfg & BIT(4))
1657 				umc->dram_type = MEM_RDDR5;
1658 			else
1659 				umc->dram_type = MEM_DDR5;
1660 		} else {
1661 			if (umc->dimm_cfg & BIT(5))
1662 				umc->dram_type = MEM_LRDDR4;
1663 			else if (umc->dimm_cfg & BIT(4))
1664 				umc->dram_type = MEM_RDDR4;
1665 			else
1666 				umc->dram_type = MEM_DDR4;
1667 		}
1668 
1669 		edac_dbg(1, "  UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
1670 	}
1671 }
1672 
1673 static void determine_memory_type(struct amd64_pvt *pvt)
1674 {
1675 	u32 dram_ctrl, dcsm;
1676 
1677 	if (pvt->umc)
1678 		return determine_memory_type_df(pvt);
1679 
1680 	switch (pvt->fam) {
1681 	case 0xf:
1682 		if (pvt->ext_model >= K8_REV_F)
1683 			goto ddr3;
1684 
1685 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1686 		return;
1687 
1688 	case 0x10:
1689 		if (pvt->dchr0 & DDR3_MODE)
1690 			goto ddr3;
1691 
1692 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1693 		return;
1694 
1695 	case 0x15:
1696 		if (pvt->model < 0x60)
1697 			goto ddr3;
1698 
1699 		/*
1700 		 * Model 0x60h needs special handling:
1701 		 *
1702 		 * We use a Chip Select value of '0' to obtain dcsm.
1703 		 * Theoretically, it is possible to populate LRDIMMs of different
1704 		 * 'Rank' value on a DCT. But this is not the common case. So,
1705 		 * it's reasonable to assume all DIMMs are going to be of same
1706 		 * 'type' until proven otherwise.
1707 		 */
1708 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1709 		dcsm = pvt->csels[0].csmasks[0];
1710 
1711 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1712 			pvt->dram_type = MEM_DDR4;
1713 		else if (pvt->dclr0 & BIT(16))
1714 			pvt->dram_type = MEM_DDR3;
1715 		else if (dcsm & 0x3)
1716 			pvt->dram_type = MEM_LRDDR3;
1717 		else
1718 			pvt->dram_type = MEM_RDDR3;
1719 
1720 		return;
1721 
1722 	case 0x16:
1723 		goto ddr3;
1724 
1725 	default:
1726 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1727 		pvt->dram_type = MEM_EMPTY;
1728 	}
1729 	return;
1730 
1731 ddr3:
1732 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1733 }
1734 
1735 /* Get the number of DCT channels the memory controller is using. */
1736 static int k8_early_channel_count(struct amd64_pvt *pvt)
1737 {
1738 	int flag;
1739 
1740 	if (pvt->ext_model >= K8_REV_F)
1741 		/* RevF (NPT) and later */
1742 		flag = pvt->dclr0 & WIDTH_128;
1743 	else
1744 		/* RevE and earlier */
1745 		flag = pvt->dclr0 & REVE_WIDTH_128;
1746 
1747 	/* not used */
1748 	pvt->dclr1 = 0;
1749 
1750 	return (flag) ? 2 : 1;
1751 }
1752 
1753 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1754 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1755 {
1756 	u16 mce_nid = topology_die_id(m->extcpu);
1757 	struct mem_ctl_info *mci;
1758 	u8 start_bit = 1;
1759 	u8 end_bit   = 47;
1760 	u64 addr;
1761 
1762 	mci = edac_mc_find(mce_nid);
1763 	if (!mci)
1764 		return 0;
1765 
1766 	pvt = mci->pvt_info;
1767 
1768 	if (pvt->fam == 0xf) {
1769 		start_bit = 3;
1770 		end_bit   = 39;
1771 	}
1772 
1773 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1774 
1775 	/*
1776 	 * Erratum 637 workaround
1777 	 */
1778 	if (pvt->fam == 0x15) {
1779 		u64 cc6_base, tmp_addr;
1780 		u32 tmp;
1781 		u8 intlv_en;
1782 
1783 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1784 			return addr;
1785 
1786 
1787 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1788 		intlv_en = tmp >> 21 & 0x7;
1789 
1790 		/* add [47:27] + 3 trailing bits */
1791 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1792 
1793 		/* reverse and add DramIntlvEn */
1794 		cc6_base |= intlv_en ^ 0x7;
1795 
1796 		/* pin at [47:24] */
1797 		cc6_base <<= 24;
1798 
1799 		if (!intlv_en)
1800 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1801 
1802 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1803 
1804 							/* faster log2 */
1805 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1806 
1807 		/* OR DramIntlvSel into bits [14:12] */
1808 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1809 
1810 		/* add remaining [11:0] bits from original MC4_ADDR */
1811 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1812 
1813 		return cc6_base | tmp_addr;
1814 	}
1815 
1816 	return addr;
1817 }
1818 
1819 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1820 						unsigned int device,
1821 						struct pci_dev *related)
1822 {
1823 	struct pci_dev *dev = NULL;
1824 
1825 	while ((dev = pci_get_device(vendor, device, dev))) {
1826 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1827 		    (dev->bus->number == related->bus->number) &&
1828 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1829 			break;
1830 	}
1831 
1832 	return dev;
1833 }
1834 
1835 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1836 {
1837 	struct amd_northbridge *nb;
1838 	struct pci_dev *f1 = NULL;
1839 	unsigned int pci_func;
1840 	int off = range << 3;
1841 	u32 llim;
1842 
1843 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1844 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1845 
1846 	if (pvt->fam == 0xf)
1847 		return;
1848 
1849 	if (!dram_rw(pvt, range))
1850 		return;
1851 
1852 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1853 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1854 
1855 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1856 	if (pvt->fam != 0x15)
1857 		return;
1858 
1859 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1860 	if (WARN_ON(!nb))
1861 		return;
1862 
1863 	if (pvt->model == 0x60)
1864 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1865 	else if (pvt->model == 0x30)
1866 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1867 	else
1868 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1869 
1870 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1871 	if (WARN_ON(!f1))
1872 		return;
1873 
1874 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1875 
1876 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1877 
1878 				    /* {[39:27],111b} */
1879 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1880 
1881 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1882 
1883 				    /* [47:40] */
1884 	pvt->ranges[range].lim.hi |= llim >> 13;
1885 
1886 	pci_dev_put(f1);
1887 }
1888 
1889 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1890 				    struct err_info *err)
1891 {
1892 	struct amd64_pvt *pvt = mci->pvt_info;
1893 
1894 	error_address_to_page_and_offset(sys_addr, err);
1895 
1896 	/*
1897 	 * Find out which node the error address belongs to. This may be
1898 	 * different from the node that detected the error.
1899 	 */
1900 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1901 	if (!err->src_mci) {
1902 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1903 			     (unsigned long)sys_addr);
1904 		err->err_code = ERR_NODE;
1905 		return;
1906 	}
1907 
1908 	/* Now map the sys_addr to a CSROW */
1909 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1910 	if (err->csrow < 0) {
1911 		err->err_code = ERR_CSROW;
1912 		return;
1913 	}
1914 
1915 	/* CHIPKILL enabled */
1916 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1917 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1918 		if (err->channel < 0) {
1919 			/*
1920 			 * Syndrome didn't map, so we don't know which of the
1921 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1922 			 * as suspect.
1923 			 */
1924 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1925 				      "possible error reporting race\n",
1926 				      err->syndrome);
1927 			err->err_code = ERR_CHANNEL;
1928 			return;
1929 		}
1930 	} else {
1931 		/*
1932 		 * non-chipkill ecc mode
1933 		 *
1934 		 * The k8 documentation is unclear about how to determine the
1935 		 * channel number when using non-chipkill memory.  This method
1936 		 * was obtained from email communication with someone at AMD.
1937 		 * (Wish the email was placed in this comment - norsk)
1938 		 */
1939 		err->channel = ((sys_addr & BIT(3)) != 0);
1940 	}
1941 }
1942 
1943 static int ddr2_cs_size(unsigned i, bool dct_width)
1944 {
1945 	unsigned shift = 0;
1946 
1947 	if (i <= 2)
1948 		shift = i;
1949 	else if (!(i & 0x1))
1950 		shift = i >> 1;
1951 	else
1952 		shift = (i + 1) >> 1;
1953 
1954 	return 128 << (shift + !!dct_width);
1955 }
1956 
1957 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1958 				  unsigned cs_mode, int cs_mask_nr)
1959 {
1960 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1961 
1962 	if (pvt->ext_model >= K8_REV_F) {
1963 		WARN_ON(cs_mode > 11);
1964 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1965 	}
1966 	else if (pvt->ext_model >= K8_REV_D) {
1967 		unsigned diff;
1968 		WARN_ON(cs_mode > 10);
1969 
1970 		/*
1971 		 * the below calculation, besides trying to win an obfuscated C
1972 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1973 		 * mappings are:
1974 		 *
1975 		 * cs_mode	CS size (mb)
1976 		 * =======	============
1977 		 * 0		32
1978 		 * 1		64
1979 		 * 2		128
1980 		 * 3		128
1981 		 * 4		256
1982 		 * 5		512
1983 		 * 6		256
1984 		 * 7		512
1985 		 * 8		1024
1986 		 * 9		1024
1987 		 * 10		2048
1988 		 *
1989 		 * Basically, it calculates a value with which to shift the
1990 		 * smallest CS size of 32MB.
1991 		 *
1992 		 * ddr[23]_cs_size have a similar purpose.
1993 		 */
1994 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1995 
1996 		return 32 << (cs_mode - diff);
1997 	}
1998 	else {
1999 		WARN_ON(cs_mode > 6);
2000 		return 32 << cs_mode;
2001 	}
2002 }
2003 
2004 /*
2005  * Get the number of DCT channels in use.
2006  *
2007  * Return:
2008  *	number of Memory Channels in operation
2009  * Pass back:
2010  *	contents of the DCL0_LOW register
2011  */
2012 static int f1x_early_channel_count(struct amd64_pvt *pvt)
2013 {
2014 	int i, j, channels = 0;
2015 
2016 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
2017 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
2018 		return 2;
2019 
2020 	/*
2021 	 * Need to check if in unganged mode: In such, there are 2 channels,
2022 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
2023 	 * bit will be OFF.
2024 	 *
2025 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
2026 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
2027 	 */
2028 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
2029 
2030 	/*
2031 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
2032 	 * is more than just one DIMM present in unganged mode. Need to check
2033 	 * both controllers since DIMMs can be placed in either one.
2034 	 */
2035 	for (i = 0; i < 2; i++) {
2036 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
2037 
2038 		for (j = 0; j < 4; j++) {
2039 			if (DBAM_DIMM(j, dbam) > 0) {
2040 				channels++;
2041 				break;
2042 			}
2043 		}
2044 	}
2045 
2046 	if (channels > 2)
2047 		channels = 2;
2048 
2049 	amd64_info("MCT channel count: %d\n", channels);
2050 
2051 	return channels;
2052 }
2053 
2054 static int f17_early_channel_count(struct amd64_pvt *pvt)
2055 {
2056 	int i, channels = 0;
2057 
2058 	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
2059 	for_each_umc(i)
2060 		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
2061 
2062 	amd64_info("MCT channel count: %d\n", channels);
2063 
2064 	return channels;
2065 }
2066 
2067 static int ddr3_cs_size(unsigned i, bool dct_width)
2068 {
2069 	unsigned shift = 0;
2070 	int cs_size = 0;
2071 
2072 	if (i == 0 || i == 3 || i == 4)
2073 		cs_size = -1;
2074 	else if (i <= 2)
2075 		shift = i;
2076 	else if (i == 12)
2077 		shift = 7;
2078 	else if (!(i & 0x1))
2079 		shift = i >> 1;
2080 	else
2081 		shift = (i + 1) >> 1;
2082 
2083 	if (cs_size != -1)
2084 		cs_size = (128 * (1 << !!dct_width)) << shift;
2085 
2086 	return cs_size;
2087 }
2088 
2089 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
2090 {
2091 	unsigned shift = 0;
2092 	int cs_size = 0;
2093 
2094 	if (i < 4 || i == 6)
2095 		cs_size = -1;
2096 	else if (i == 12)
2097 		shift = 7;
2098 	else if (!(i & 0x1))
2099 		shift = i >> 1;
2100 	else
2101 		shift = (i + 1) >> 1;
2102 
2103 	if (cs_size != -1)
2104 		cs_size = rank_multiply * (128 << shift);
2105 
2106 	return cs_size;
2107 }
2108 
2109 static int ddr4_cs_size(unsigned i)
2110 {
2111 	int cs_size = 0;
2112 
2113 	if (i == 0)
2114 		cs_size = -1;
2115 	else if (i == 1)
2116 		cs_size = 1024;
2117 	else
2118 		/* Min cs_size = 1G */
2119 		cs_size = 1024 * (1 << (i >> 1));
2120 
2121 	return cs_size;
2122 }
2123 
2124 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2125 				   unsigned cs_mode, int cs_mask_nr)
2126 {
2127 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2128 
2129 	WARN_ON(cs_mode > 11);
2130 
2131 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
2132 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
2133 	else
2134 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
2135 }
2136 
2137 /*
2138  * F15h supports only 64bit DCT interfaces
2139  */
2140 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2141 				   unsigned cs_mode, int cs_mask_nr)
2142 {
2143 	WARN_ON(cs_mode > 12);
2144 
2145 	return ddr3_cs_size(cs_mode, false);
2146 }
2147 
2148 /* F15h M60h supports DDR4 mapping as well.. */
2149 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2150 					unsigned cs_mode, int cs_mask_nr)
2151 {
2152 	int cs_size;
2153 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
2154 
2155 	WARN_ON(cs_mode > 12);
2156 
2157 	if (pvt->dram_type == MEM_DDR4) {
2158 		if (cs_mode > 9)
2159 			return -1;
2160 
2161 		cs_size = ddr4_cs_size(cs_mode);
2162 	} else if (pvt->dram_type == MEM_LRDDR3) {
2163 		unsigned rank_multiply = dcsm & 0xf;
2164 
2165 		if (rank_multiply == 3)
2166 			rank_multiply = 4;
2167 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
2168 	} else {
2169 		/* Minimum cs size is 512mb for F15hM60h*/
2170 		if (cs_mode == 0x1)
2171 			return -1;
2172 
2173 		cs_size = ddr3_cs_size(cs_mode, false);
2174 	}
2175 
2176 	return cs_size;
2177 }
2178 
2179 /*
2180  * F16h and F15h model 30h have only limited cs_modes.
2181  */
2182 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2183 				unsigned cs_mode, int cs_mask_nr)
2184 {
2185 	WARN_ON(cs_mode > 12);
2186 
2187 	if (cs_mode == 6 || cs_mode == 8 ||
2188 	    cs_mode == 9 || cs_mode == 12)
2189 		return -1;
2190 	else
2191 		return ddr3_cs_size(cs_mode, false);
2192 }
2193 
2194 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
2195 				    unsigned int cs_mode, int csrow_nr)
2196 {
2197 	u32 addr_mask_orig, addr_mask_deinterleaved;
2198 	u32 msb, weight, num_zero_bits;
2199 	int cs_mask_nr = csrow_nr;
2200 	int dimm, size = 0;
2201 
2202 	/* No Chip Selects are enabled. */
2203 	if (!cs_mode)
2204 		return size;
2205 
2206 	/* Requested size of an even CS but none are enabled. */
2207 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
2208 		return size;
2209 
2210 	/* Requested size of an odd CS but none are enabled. */
2211 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
2212 		return size;
2213 
2214 	/*
2215 	 * Family 17h introduced systems with one mask per DIMM,
2216 	 * and two Chip Selects per DIMM.
2217 	 *
2218 	 *	CS0 and CS1 -> MASK0 / DIMM0
2219 	 *	CS2 and CS3 -> MASK1 / DIMM1
2220 	 *
2221 	 * Family 19h Model 10h introduced systems with one mask per Chip Select,
2222 	 * and two Chip Selects per DIMM.
2223 	 *
2224 	 *	CS0 -> MASK0 -> DIMM0
2225 	 *	CS1 -> MASK1 -> DIMM0
2226 	 *	CS2 -> MASK2 -> DIMM1
2227 	 *	CS3 -> MASK3 -> DIMM1
2228 	 *
2229 	 * Keep the mask number equal to the Chip Select number for newer systems,
2230 	 * and shift the mask number for older systems.
2231 	 */
2232 	dimm = csrow_nr >> 1;
2233 
2234 	if (!fam_type->flags.zn_regs_v2)
2235 		cs_mask_nr >>= 1;
2236 
2237 	/* Asymmetric dual-rank DIMM support. */
2238 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
2239 		addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
2240 	else
2241 		addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
2242 
2243 	/*
2244 	 * The number of zero bits in the mask is equal to the number of bits
2245 	 * in a full mask minus the number of bits in the current mask.
2246 	 *
2247 	 * The MSB is the number of bits in the full mask because BIT[0] is
2248 	 * always 0.
2249 	 *
2250 	 * In the special 3 Rank interleaving case, a single bit is flipped
2251 	 * without swapping with the most significant bit. This can be handled
2252 	 * by keeping the MSB where it is and ignoring the single zero bit.
2253 	 */
2254 	msb = fls(addr_mask_orig) - 1;
2255 	weight = hweight_long(addr_mask_orig);
2256 	num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
2257 
2258 	/* Take the number of zero bits off from the top of the mask. */
2259 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
2260 
2261 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
2262 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
2263 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
2264 
2265 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
2266 	size = (addr_mask_deinterleaved >> 2) + 1;
2267 
2268 	/* Return size in MBs. */
2269 	return size >> 10;
2270 }
2271 
2272 static void read_dram_ctl_register(struct amd64_pvt *pvt)
2273 {
2274 
2275 	if (pvt->fam == 0xf)
2276 		return;
2277 
2278 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2279 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
2280 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2281 
2282 		edac_dbg(0, "  DCTs operate in %s mode\n",
2283 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2284 
2285 		if (!dct_ganging_enabled(pvt))
2286 			edac_dbg(0, "  Address range split per DCT: %s\n",
2287 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2288 
2289 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
2290 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
2291 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
2292 
2293 		edac_dbg(0, "  channel interleave: %s, "
2294 			 "interleave bits selector: 0x%x\n",
2295 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
2296 			 dct_sel_interleave_addr(pvt));
2297 	}
2298 
2299 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2300 }
2301 
2302 /*
2303  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
2304  * 2.10.12 Memory Interleaving Modes).
2305  */
2306 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2307 				     u8 intlv_en, int num_dcts_intlv,
2308 				     u32 dct_sel)
2309 {
2310 	u8 channel = 0;
2311 	u8 select;
2312 
2313 	if (!(intlv_en))
2314 		return (u8)(dct_sel);
2315 
2316 	if (num_dcts_intlv == 2) {
2317 		select = (sys_addr >> 8) & 0x3;
2318 		channel = select ? 0x3 : 0;
2319 	} else if (num_dcts_intlv == 4) {
2320 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
2321 		switch (intlv_addr) {
2322 		case 0x4:
2323 			channel = (sys_addr >> 8) & 0x3;
2324 			break;
2325 		case 0x5:
2326 			channel = (sys_addr >> 9) & 0x3;
2327 			break;
2328 		}
2329 	}
2330 	return channel;
2331 }
2332 
2333 /*
2334  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2335  * Interleaving Modes.
2336  */
2337 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2338 				bool hi_range_sel, u8 intlv_en)
2339 {
2340 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2341 
2342 	if (dct_ganging_enabled(pvt))
2343 		return 0;
2344 
2345 	if (hi_range_sel)
2346 		return dct_sel_high;
2347 
2348 	/*
2349 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
2350 	 */
2351 	if (dct_interleave_enabled(pvt)) {
2352 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
2353 
2354 		/* return DCT select function: 0=DCT0, 1=DCT1 */
2355 		if (!intlv_addr)
2356 			return sys_addr >> 6 & 1;
2357 
2358 		if (intlv_addr & 0x2) {
2359 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
2360 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2361 
2362 			return ((sys_addr >> shift) & 1) ^ temp;
2363 		}
2364 
2365 		if (intlv_addr & 0x4) {
2366 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
2367 
2368 			return (sys_addr >> shift) & 1;
2369 		}
2370 
2371 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
2372 	}
2373 
2374 	if (dct_high_range_enabled(pvt))
2375 		return ~dct_sel_high & 1;
2376 
2377 	return 0;
2378 }
2379 
2380 /* Convert the sys_addr to the normalized DCT address */
2381 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2382 				 u64 sys_addr, bool hi_rng,
2383 				 u32 dct_sel_base_addr)
2384 {
2385 	u64 chan_off;
2386 	u64 dram_base		= get_dram_base(pvt, range);
2387 	u64 hole_off		= f10_dhar_offset(pvt);
2388 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2389 
2390 	if (hi_rng) {
2391 		/*
2392 		 * if
2393 		 * base address of high range is below 4Gb
2394 		 * (bits [47:27] at [31:11])
2395 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
2396 		 * sys_addr > 4Gb
2397 		 *
2398 		 *	remove hole offset from sys_addr
2399 		 * else
2400 		 *	remove high range offset from sys_addr
2401 		 */
2402 		if ((!(dct_sel_base_addr >> 16) ||
2403 		     dct_sel_base_addr < dhar_base(pvt)) &&
2404 		    dhar_valid(pvt) &&
2405 		    (sys_addr >= BIT_64(32)))
2406 			chan_off = hole_off;
2407 		else
2408 			chan_off = dct_sel_base_off;
2409 	} else {
2410 		/*
2411 		 * if
2412 		 * we have a valid hole		&&
2413 		 * sys_addr > 4Gb
2414 		 *
2415 		 *	remove hole
2416 		 * else
2417 		 *	remove dram base to normalize to DCT address
2418 		 */
2419 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2420 			chan_off = hole_off;
2421 		else
2422 			chan_off = dram_base;
2423 	}
2424 
2425 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2426 }
2427 
2428 /*
2429  * checks if the csrow passed in is marked as SPARED, if so returns the new
2430  * spare row
2431  */
2432 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2433 {
2434 	int tmp_cs;
2435 
2436 	if (online_spare_swap_done(pvt, dct) &&
2437 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
2438 
2439 		for_each_chip_select(tmp_cs, dct, pvt) {
2440 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2441 				csrow = tmp_cs;
2442 				break;
2443 			}
2444 		}
2445 	}
2446 	return csrow;
2447 }
2448 
2449 /*
2450  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2451  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
2452  *
2453  * Return:
2454  *	-EINVAL:  NOT FOUND
2455  *	0..csrow = Chip-Select Row
2456  */
2457 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2458 {
2459 	struct mem_ctl_info *mci;
2460 	struct amd64_pvt *pvt;
2461 	u64 cs_base, cs_mask;
2462 	int cs_found = -EINVAL;
2463 	int csrow;
2464 
2465 	mci = edac_mc_find(nid);
2466 	if (!mci)
2467 		return cs_found;
2468 
2469 	pvt = mci->pvt_info;
2470 
2471 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2472 
2473 	for_each_chip_select(csrow, dct, pvt) {
2474 		if (!csrow_enabled(csrow, dct, pvt))
2475 			continue;
2476 
2477 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2478 
2479 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
2480 			 csrow, cs_base, cs_mask);
2481 
2482 		cs_mask = ~cs_mask;
2483 
2484 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
2485 			 (in_addr & cs_mask), (cs_base & cs_mask));
2486 
2487 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2488 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2489 				cs_found =  csrow;
2490 				break;
2491 			}
2492 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
2493 
2494 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2495 			break;
2496 		}
2497 	}
2498 	return cs_found;
2499 }
2500 
2501 /*
2502  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2503  * swapped with a region located at the bottom of memory so that the GPU can use
2504  * the interleaved region and thus two channels.
2505  */
2506 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2507 {
2508 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
2509 
2510 	if (pvt->fam == 0x10) {
2511 		/* only revC3 and revE have that feature */
2512 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2513 			return sys_addr;
2514 	}
2515 
2516 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2517 
2518 	if (!(swap_reg & 0x1))
2519 		return sys_addr;
2520 
2521 	swap_base	= (swap_reg >> 3) & 0x7f;
2522 	swap_limit	= (swap_reg >> 11) & 0x7f;
2523 	rgn_size	= (swap_reg >> 20) & 0x7f;
2524 	tmp_addr	= sys_addr >> 27;
2525 
2526 	if (!(sys_addr >> 34) &&
2527 	    (((tmp_addr >= swap_base) &&
2528 	     (tmp_addr <= swap_limit)) ||
2529 	     (tmp_addr < rgn_size)))
2530 		return sys_addr ^ (u64)swap_base << 27;
2531 
2532 	return sys_addr;
2533 }
2534 
2535 /* For a given @dram_range, check if @sys_addr falls within it. */
2536 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2537 				  u64 sys_addr, int *chan_sel)
2538 {
2539 	int cs_found = -EINVAL;
2540 	u64 chan_addr;
2541 	u32 dct_sel_base;
2542 	u8 channel;
2543 	bool high_range = false;
2544 
2545 	u8 node_id    = dram_dst_node(pvt, range);
2546 	u8 intlv_en   = dram_intlv_en(pvt, range);
2547 	u32 intlv_sel = dram_intlv_sel(pvt, range);
2548 
2549 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2550 		 range, sys_addr, get_dram_limit(pvt, range));
2551 
2552 	if (dhar_valid(pvt) &&
2553 	    dhar_base(pvt) <= sys_addr &&
2554 	    sys_addr < BIT_64(32)) {
2555 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2556 			    sys_addr);
2557 		return -EINVAL;
2558 	}
2559 
2560 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2561 		return -EINVAL;
2562 
2563 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2564 
2565 	dct_sel_base = dct_sel_baseaddr(pvt);
2566 
2567 	/*
2568 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
2569 	 * select between DCT0 and DCT1.
2570 	 */
2571 	if (dct_high_range_enabled(pvt) &&
2572 	   !dct_ganging_enabled(pvt) &&
2573 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2574 		high_range = true;
2575 
2576 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2577 
2578 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2579 					  high_range, dct_sel_base);
2580 
2581 	/* Remove node interleaving, see F1x120 */
2582 	if (intlv_en)
2583 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
2584 			    (chan_addr & 0xfff);
2585 
2586 	/* remove channel interleave */
2587 	if (dct_interleave_enabled(pvt) &&
2588 	   !dct_high_range_enabled(pvt) &&
2589 	   !dct_ganging_enabled(pvt)) {
2590 
2591 		if (dct_sel_interleave_addr(pvt) != 1) {
2592 			if (dct_sel_interleave_addr(pvt) == 0x3)
2593 				/* hash 9 */
2594 				chan_addr = ((chan_addr >> 10) << 9) |
2595 					     (chan_addr & 0x1ff);
2596 			else
2597 				/* A[6] or hash 6 */
2598 				chan_addr = ((chan_addr >> 7) << 6) |
2599 					     (chan_addr & 0x3f);
2600 		} else
2601 			/* A[12] */
2602 			chan_addr = ((chan_addr >> 13) << 12) |
2603 				     (chan_addr & 0xfff);
2604 	}
2605 
2606 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2607 
2608 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2609 
2610 	if (cs_found >= 0)
2611 		*chan_sel = channel;
2612 
2613 	return cs_found;
2614 }
2615 
2616 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2617 					u64 sys_addr, int *chan_sel)
2618 {
2619 	int cs_found = -EINVAL;
2620 	int num_dcts_intlv = 0;
2621 	u64 chan_addr, chan_offset;
2622 	u64 dct_base, dct_limit;
2623 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2624 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2625 
2626 	u64 dhar_offset		= f10_dhar_offset(pvt);
2627 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
2628 	u8 node_id		= dram_dst_node(pvt, range);
2629 	u8 intlv_en		= dram_intlv_en(pvt, range);
2630 
2631 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2632 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2633 
2634 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2635 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
2636 
2637 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2638 		 range, sys_addr, get_dram_limit(pvt, range));
2639 
2640 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
2641 	    !(get_dram_limit(pvt, range) >= sys_addr))
2642 		return -EINVAL;
2643 
2644 	if (dhar_valid(pvt) &&
2645 	    dhar_base(pvt) <= sys_addr &&
2646 	    sys_addr < BIT_64(32)) {
2647 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2648 			    sys_addr);
2649 		return -EINVAL;
2650 	}
2651 
2652 	/* Verify sys_addr is within DCT Range. */
2653 	dct_base = (u64) dct_sel_baseaddr(pvt);
2654 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2655 
2656 	if (!(dct_cont_base_reg & BIT(0)) &&
2657 	    !(dct_base <= (sys_addr >> 27) &&
2658 	      dct_limit >= (sys_addr >> 27)))
2659 		return -EINVAL;
2660 
2661 	/* Verify number of dct's that participate in channel interleaving. */
2662 	num_dcts_intlv = (int) hweight8(intlv_en);
2663 
2664 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2665 		return -EINVAL;
2666 
2667 	if (pvt->model >= 0x60)
2668 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2669 	else
2670 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2671 						     num_dcts_intlv, dct_sel);
2672 
2673 	/* Verify we stay within the MAX number of channels allowed */
2674 	if (channel > 3)
2675 		return -EINVAL;
2676 
2677 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2678 
2679 	/* Get normalized DCT addr */
2680 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2681 		chan_offset = dhar_offset;
2682 	else
2683 		chan_offset = dct_base << 27;
2684 
2685 	chan_addr = sys_addr - chan_offset;
2686 
2687 	/* remove channel interleave */
2688 	if (num_dcts_intlv == 2) {
2689 		if (intlv_addr == 0x4)
2690 			chan_addr = ((chan_addr >> 9) << 8) |
2691 						(chan_addr & 0xff);
2692 		else if (intlv_addr == 0x5)
2693 			chan_addr = ((chan_addr >> 10) << 9) |
2694 						(chan_addr & 0x1ff);
2695 		else
2696 			return -EINVAL;
2697 
2698 	} else if (num_dcts_intlv == 4) {
2699 		if (intlv_addr == 0x4)
2700 			chan_addr = ((chan_addr >> 10) << 8) |
2701 							(chan_addr & 0xff);
2702 		else if (intlv_addr == 0x5)
2703 			chan_addr = ((chan_addr >> 11) << 9) |
2704 							(chan_addr & 0x1ff);
2705 		else
2706 			return -EINVAL;
2707 	}
2708 
2709 	if (dct_offset_en) {
2710 		amd64_read_pci_cfg(pvt->F1,
2711 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2712 				   &tmp);
2713 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2714 	}
2715 
2716 	f15h_select_dct(pvt, channel);
2717 
2718 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2719 
2720 	/*
2721 	 * Find Chip select:
2722 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2723 	 * there is support for 4 DCT's, but only 2 are currently functional.
2724 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2725 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2726 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2727 	 */
2728 	alias_channel =  (channel == 3) ? 1 : channel;
2729 
2730 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2731 
2732 	if (cs_found >= 0)
2733 		*chan_sel = alias_channel;
2734 
2735 	return cs_found;
2736 }
2737 
2738 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2739 					u64 sys_addr,
2740 					int *chan_sel)
2741 {
2742 	int cs_found = -EINVAL;
2743 	unsigned range;
2744 
2745 	for (range = 0; range < DRAM_RANGES; range++) {
2746 		if (!dram_rw(pvt, range))
2747 			continue;
2748 
2749 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2750 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2751 							       sys_addr,
2752 							       chan_sel);
2753 
2754 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2755 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2756 			cs_found = f1x_match_to_this_node(pvt, range,
2757 							  sys_addr, chan_sel);
2758 			if (cs_found >= 0)
2759 				break;
2760 		}
2761 	}
2762 	return cs_found;
2763 }
2764 
2765 /*
2766  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2767  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2768  *
2769  * The @sys_addr is usually an error address received from the hardware
2770  * (MCX_ADDR).
2771  */
2772 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2773 				     struct err_info *err)
2774 {
2775 	struct amd64_pvt *pvt = mci->pvt_info;
2776 
2777 	error_address_to_page_and_offset(sys_addr, err);
2778 
2779 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2780 	if (err->csrow < 0) {
2781 		err->err_code = ERR_CSROW;
2782 		return;
2783 	}
2784 
2785 	/*
2786 	 * We need the syndromes for channel detection only when we're
2787 	 * ganged. Otherwise @chan should already contain the channel at
2788 	 * this point.
2789 	 */
2790 	if (dct_ganging_enabled(pvt))
2791 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2792 }
2793 
2794 /*
2795  * debug routine to display the memory sizes of all logical DIMMs and its
2796  * CSROWs
2797  */
2798 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2799 {
2800 	int dimm, size0, size1;
2801 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2802 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
2803 
2804 	if (pvt->fam == 0xf) {
2805 		/* K8 families < revF not supported yet */
2806 	       if (pvt->ext_model < K8_REV_F)
2807 			return;
2808 	       else
2809 		       WARN_ON(ctrl != 0);
2810 	}
2811 
2812 	if (pvt->fam == 0x10) {
2813 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2814 							   : pvt->dbam0;
2815 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2816 				 pvt->csels[1].csbases :
2817 				 pvt->csels[0].csbases;
2818 	} else if (ctrl) {
2819 		dbam = pvt->dbam0;
2820 		dcsb = pvt->csels[1].csbases;
2821 	}
2822 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2823 		 ctrl, dbam);
2824 
2825 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2826 
2827 	/* Dump memory sizes for DIMM and its CSROWs */
2828 	for (dimm = 0; dimm < 4; dimm++) {
2829 
2830 		size0 = 0;
2831 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2832 			/*
2833 			 * For F15m60h, we need multiplier for LRDIMM cs_size
2834 			 * calculation. We pass dimm value to the dbam_to_cs
2835 			 * mapper so we can find the multiplier from the
2836 			 * corresponding DCSM.
2837 			 */
2838 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2839 						     DBAM_DIMM(dimm, dbam),
2840 						     dimm);
2841 
2842 		size1 = 0;
2843 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2844 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2845 						     DBAM_DIMM(dimm, dbam),
2846 						     dimm);
2847 
2848 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2849 				dimm * 2,     size0,
2850 				dimm * 2 + 1, size1);
2851 	}
2852 }
2853 
2854 static struct amd64_family_type family_types[] = {
2855 	[K8_CPUS] = {
2856 		.ctl_name = "K8",
2857 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2858 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2859 		.max_mcs = 2,
2860 		.ops = {
2861 			.early_channel_count	= k8_early_channel_count,
2862 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
2863 			.dbam_to_cs		= k8_dbam_to_chip_select,
2864 		}
2865 	},
2866 	[F10_CPUS] = {
2867 		.ctl_name = "F10h",
2868 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2869 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2870 		.max_mcs = 2,
2871 		.ops = {
2872 			.early_channel_count	= f1x_early_channel_count,
2873 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2874 			.dbam_to_cs		= f10_dbam_to_chip_select,
2875 		}
2876 	},
2877 	[F15_CPUS] = {
2878 		.ctl_name = "F15h",
2879 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2880 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2881 		.max_mcs = 2,
2882 		.ops = {
2883 			.early_channel_count	= f1x_early_channel_count,
2884 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2885 			.dbam_to_cs		= f15_dbam_to_chip_select,
2886 		}
2887 	},
2888 	[F15_M30H_CPUS] = {
2889 		.ctl_name = "F15h_M30h",
2890 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2891 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2892 		.max_mcs = 2,
2893 		.ops = {
2894 			.early_channel_count	= f1x_early_channel_count,
2895 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2896 			.dbam_to_cs		= f16_dbam_to_chip_select,
2897 		}
2898 	},
2899 	[F15_M60H_CPUS] = {
2900 		.ctl_name = "F15h_M60h",
2901 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2902 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2903 		.max_mcs = 2,
2904 		.ops = {
2905 			.early_channel_count	= f1x_early_channel_count,
2906 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2907 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
2908 		}
2909 	},
2910 	[F16_CPUS] = {
2911 		.ctl_name = "F16h",
2912 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2913 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2914 		.max_mcs = 2,
2915 		.ops = {
2916 			.early_channel_count	= f1x_early_channel_count,
2917 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2918 			.dbam_to_cs		= f16_dbam_to_chip_select,
2919 		}
2920 	},
2921 	[F16_M30H_CPUS] = {
2922 		.ctl_name = "F16h_M30h",
2923 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2924 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2925 		.max_mcs = 2,
2926 		.ops = {
2927 			.early_channel_count	= f1x_early_channel_count,
2928 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2929 			.dbam_to_cs		= f16_dbam_to_chip_select,
2930 		}
2931 	},
2932 	[F17_CPUS] = {
2933 		.ctl_name = "F17h",
2934 		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2935 		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2936 		.max_mcs = 2,
2937 		.ops = {
2938 			.early_channel_count	= f17_early_channel_count,
2939 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2940 		}
2941 	},
2942 	[F17_M10H_CPUS] = {
2943 		.ctl_name = "F17h_M10h",
2944 		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2945 		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2946 		.max_mcs = 2,
2947 		.ops = {
2948 			.early_channel_count	= f17_early_channel_count,
2949 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2950 		}
2951 	},
2952 	[F17_M30H_CPUS] = {
2953 		.ctl_name = "F17h_M30h",
2954 		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2955 		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2956 		.max_mcs = 8,
2957 		.ops = {
2958 			.early_channel_count	= f17_early_channel_count,
2959 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2960 		}
2961 	},
2962 	[F17_M60H_CPUS] = {
2963 		.ctl_name = "F17h_M60h",
2964 		.f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2965 		.f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2966 		.max_mcs = 2,
2967 		.ops = {
2968 			.early_channel_count	= f17_early_channel_count,
2969 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2970 		}
2971 	},
2972 	[F17_M70H_CPUS] = {
2973 		.ctl_name = "F17h_M70h",
2974 		.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2975 		.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2976 		.max_mcs = 2,
2977 		.ops = {
2978 			.early_channel_count	= f17_early_channel_count,
2979 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2980 		}
2981 	},
2982 	[F19_CPUS] = {
2983 		.ctl_name = "F19h",
2984 		.f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2985 		.f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2986 		.max_mcs = 8,
2987 		.ops = {
2988 			.early_channel_count	= f17_early_channel_count,
2989 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2990 		}
2991 	},
2992 	[F19_M10H_CPUS] = {
2993 		.ctl_name = "F19h_M10h",
2994 		.f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
2995 		.f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
2996 		.max_mcs = 12,
2997 		.flags.zn_regs_v2 = 1,
2998 		.ops = {
2999 			.early_channel_count	= f17_early_channel_count,
3000 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
3001 		}
3002 	},
3003 	[F19_M50H_CPUS] = {
3004 		.ctl_name = "F19h_M50h",
3005 		.f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
3006 		.f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
3007 		.max_mcs = 2,
3008 		.ops = {
3009 			.early_channel_count	= f17_early_channel_count,
3010 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
3011 		}
3012 	},
3013 };
3014 
3015 /*
3016  * These are tables of eigenvectors (one per line) which can be used for the
3017  * construction of the syndrome tables. The modified syndrome search algorithm
3018  * uses those to find the symbol in error and thus the DIMM.
3019  *
3020  * Algorithm courtesy of Ross LaFetra from AMD.
3021  */
3022 static const u16 x4_vectors[] = {
3023 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
3024 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
3025 	0x0001, 0x0002, 0x0004, 0x0008,
3026 	0x1013, 0x3032, 0x4044, 0x8088,
3027 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
3028 	0x4857, 0xc4fe, 0x13cc, 0x3288,
3029 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
3030 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
3031 	0x15c1, 0x2a42, 0x89ac, 0x4758,
3032 	0x2b03, 0x1602, 0x4f0c, 0xca08,
3033 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
3034 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
3035 	0x2b87, 0x164e, 0x642c, 0xdc18,
3036 	0x40b9, 0x80de, 0x1094, 0x20e8,
3037 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
3038 	0x11c1, 0x2242, 0x84ac, 0x4c58,
3039 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
3040 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
3041 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
3042 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
3043 	0x16b3, 0x3d62, 0x4f34, 0x8518,
3044 	0x1e2f, 0x391a, 0x5cac, 0xf858,
3045 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
3046 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
3047 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
3048 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
3049 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
3050 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
3051 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
3052 	0x185d, 0x2ca6, 0x7914, 0x9e28,
3053 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
3054 	0x4199, 0x82ee, 0x19f4, 0x2e58,
3055 	0x4807, 0xc40e, 0x130c, 0x3208,
3056 	0x1905, 0x2e0a, 0x5804, 0xac08,
3057 	0x213f, 0x132a, 0xadfc, 0x5ba8,
3058 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
3059 };
3060 
3061 static const u16 x8_vectors[] = {
3062 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
3063 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
3064 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
3065 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
3066 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
3067 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
3068 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
3069 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
3070 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
3071 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
3072 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
3073 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
3074 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
3075 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
3076 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
3077 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
3078 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
3079 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
3080 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
3081 };
3082 
3083 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
3084 			   unsigned v_dim)
3085 {
3086 	unsigned int i, err_sym;
3087 
3088 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
3089 		u16 s = syndrome;
3090 		unsigned v_idx =  err_sym * v_dim;
3091 		unsigned v_end = (err_sym + 1) * v_dim;
3092 
3093 		/* walk over all 16 bits of the syndrome */
3094 		for (i = 1; i < (1U << 16); i <<= 1) {
3095 
3096 			/* if bit is set in that eigenvector... */
3097 			if (v_idx < v_end && vectors[v_idx] & i) {
3098 				u16 ev_comp = vectors[v_idx++];
3099 
3100 				/* ... and bit set in the modified syndrome, */
3101 				if (s & i) {
3102 					/* remove it. */
3103 					s ^= ev_comp;
3104 
3105 					if (!s)
3106 						return err_sym;
3107 				}
3108 
3109 			} else if (s & i)
3110 				/* can't get to zero, move to next symbol */
3111 				break;
3112 		}
3113 	}
3114 
3115 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
3116 	return -1;
3117 }
3118 
3119 static int map_err_sym_to_channel(int err_sym, int sym_size)
3120 {
3121 	if (sym_size == 4)
3122 		switch (err_sym) {
3123 		case 0x20:
3124 		case 0x21:
3125 			return 0;
3126 		case 0x22:
3127 		case 0x23:
3128 			return 1;
3129 		default:
3130 			return err_sym >> 4;
3131 		}
3132 	/* x8 symbols */
3133 	else
3134 		switch (err_sym) {
3135 		/* imaginary bits not in a DIMM */
3136 		case 0x10:
3137 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
3138 					  err_sym);
3139 			return -1;
3140 		case 0x11:
3141 			return 0;
3142 		case 0x12:
3143 			return 1;
3144 		default:
3145 			return err_sym >> 3;
3146 		}
3147 	return -1;
3148 }
3149 
3150 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
3151 {
3152 	struct amd64_pvt *pvt = mci->pvt_info;
3153 	int err_sym = -1;
3154 
3155 	if (pvt->ecc_sym_sz == 8)
3156 		err_sym = decode_syndrome(syndrome, x8_vectors,
3157 					  ARRAY_SIZE(x8_vectors),
3158 					  pvt->ecc_sym_sz);
3159 	else if (pvt->ecc_sym_sz == 4)
3160 		err_sym = decode_syndrome(syndrome, x4_vectors,
3161 					  ARRAY_SIZE(x4_vectors),
3162 					  pvt->ecc_sym_sz);
3163 	else {
3164 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
3165 		return err_sym;
3166 	}
3167 
3168 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
3169 }
3170 
3171 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
3172 			    u8 ecc_type)
3173 {
3174 	enum hw_event_mc_err_type err_type;
3175 	const char *string;
3176 
3177 	if (ecc_type == 2)
3178 		err_type = HW_EVENT_ERR_CORRECTED;
3179 	else if (ecc_type == 1)
3180 		err_type = HW_EVENT_ERR_UNCORRECTED;
3181 	else if (ecc_type == 3)
3182 		err_type = HW_EVENT_ERR_DEFERRED;
3183 	else {
3184 		WARN(1, "Something is rotten in the state of Denmark.\n");
3185 		return;
3186 	}
3187 
3188 	switch (err->err_code) {
3189 	case DECODE_OK:
3190 		string = "";
3191 		break;
3192 	case ERR_NODE:
3193 		string = "Failed to map error addr to a node";
3194 		break;
3195 	case ERR_CSROW:
3196 		string = "Failed to map error addr to a csrow";
3197 		break;
3198 	case ERR_CHANNEL:
3199 		string = "Unknown syndrome - possible error reporting race";
3200 		break;
3201 	case ERR_SYND:
3202 		string = "MCA_SYND not valid - unknown syndrome and csrow";
3203 		break;
3204 	case ERR_NORM_ADDR:
3205 		string = "Cannot decode normalized address";
3206 		break;
3207 	default:
3208 		string = "WTF error";
3209 		break;
3210 	}
3211 
3212 	edac_mc_handle_error(err_type, mci, 1,
3213 			     err->page, err->offset, err->syndrome,
3214 			     err->csrow, err->channel, -1,
3215 			     string, "");
3216 }
3217 
3218 static inline void decode_bus_error(int node_id, struct mce *m)
3219 {
3220 	struct mem_ctl_info *mci;
3221 	struct amd64_pvt *pvt;
3222 	u8 ecc_type = (m->status >> 45) & 0x3;
3223 	u8 xec = XEC(m->status, 0x1f);
3224 	u16 ec = EC(m->status);
3225 	u64 sys_addr;
3226 	struct err_info err;
3227 
3228 	mci = edac_mc_find(node_id);
3229 	if (!mci)
3230 		return;
3231 
3232 	pvt = mci->pvt_info;
3233 
3234 	/* Bail out early if this was an 'observed' error */
3235 	if (PP(ec) == NBSL_PP_OBS)
3236 		return;
3237 
3238 	/* Do only ECC errors */
3239 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
3240 		return;
3241 
3242 	memset(&err, 0, sizeof(err));
3243 
3244 	sys_addr = get_error_address(pvt, m);
3245 
3246 	if (ecc_type == 2)
3247 		err.syndrome = extract_syndrome(m->status);
3248 
3249 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
3250 
3251 	__log_ecc_error(mci, &err, ecc_type);
3252 }
3253 
3254 /*
3255  * To find the UMC channel represented by this bank we need to match on its
3256  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
3257  * IPID.
3258  *
3259  * Currently, we can derive the channel number by looking at the 6th nibble in
3260  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
3261  * number.
3262  */
3263 static int find_umc_channel(struct mce *m)
3264 {
3265 	return (m->ipid & GENMASK(31, 0)) >> 20;
3266 }
3267 
3268 static void decode_umc_error(int node_id, struct mce *m)
3269 {
3270 	u8 ecc_type = (m->status >> 45) & 0x3;
3271 	struct mem_ctl_info *mci;
3272 	struct amd64_pvt *pvt;
3273 	struct err_info err;
3274 	u64 sys_addr;
3275 
3276 	mci = edac_mc_find(node_id);
3277 	if (!mci)
3278 		return;
3279 
3280 	pvt = mci->pvt_info;
3281 
3282 	memset(&err, 0, sizeof(err));
3283 
3284 	if (m->status & MCI_STATUS_DEFERRED)
3285 		ecc_type = 3;
3286 
3287 	err.channel = find_umc_channel(m);
3288 
3289 	if (!(m->status & MCI_STATUS_SYNDV)) {
3290 		err.err_code = ERR_SYND;
3291 		goto log_error;
3292 	}
3293 
3294 	if (ecc_type == 2) {
3295 		u8 length = (m->synd >> 18) & 0x3f;
3296 
3297 		if (length)
3298 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
3299 		else
3300 			err.err_code = ERR_CHANNEL;
3301 	}
3302 
3303 	err.csrow = m->synd & 0x7;
3304 
3305 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
3306 		err.err_code = ERR_NORM_ADDR;
3307 		goto log_error;
3308 	}
3309 
3310 	error_address_to_page_and_offset(sys_addr, &err);
3311 
3312 log_error:
3313 	__log_ecc_error(mci, &err, ecc_type);
3314 }
3315 
3316 /*
3317  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
3318  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
3319  * Reserve F0 and F6 on systems with a UMC.
3320  */
3321 static int
3322 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
3323 {
3324 	if (pvt->umc) {
3325 		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3326 		if (!pvt->F0) {
3327 			edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
3328 			return -ENODEV;
3329 		}
3330 
3331 		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3332 		if (!pvt->F6) {
3333 			pci_dev_put(pvt->F0);
3334 			pvt->F0 = NULL;
3335 
3336 			edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
3337 			return -ENODEV;
3338 		}
3339 
3340 		if (!pci_ctl_dev)
3341 			pci_ctl_dev = &pvt->F0->dev;
3342 
3343 		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
3344 		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3345 		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
3346 
3347 		return 0;
3348 	}
3349 
3350 	/* Reserve the ADDRESS MAP Device */
3351 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3352 	if (!pvt->F1) {
3353 		edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
3354 		return -ENODEV;
3355 	}
3356 
3357 	/* Reserve the DCT Device */
3358 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3359 	if (!pvt->F2) {
3360 		pci_dev_put(pvt->F1);
3361 		pvt->F1 = NULL;
3362 
3363 		edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
3364 		return -ENODEV;
3365 	}
3366 
3367 	if (!pci_ctl_dev)
3368 		pci_ctl_dev = &pvt->F2->dev;
3369 
3370 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
3371 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
3372 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3373 
3374 	return 0;
3375 }
3376 
3377 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
3378 {
3379 	if (pvt->umc) {
3380 		pci_dev_put(pvt->F0);
3381 		pci_dev_put(pvt->F6);
3382 	} else {
3383 		pci_dev_put(pvt->F1);
3384 		pci_dev_put(pvt->F2);
3385 	}
3386 }
3387 
3388 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
3389 {
3390 	pvt->ecc_sym_sz = 4;
3391 
3392 	if (pvt->umc) {
3393 		u8 i;
3394 
3395 		for_each_umc(i) {
3396 			/* Check enabled channels only: */
3397 			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3398 				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
3399 					pvt->ecc_sym_sz = 16;
3400 					return;
3401 				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
3402 					pvt->ecc_sym_sz = 8;
3403 					return;
3404 				}
3405 			}
3406 		}
3407 	} else if (pvt->fam >= 0x10) {
3408 		u32 tmp;
3409 
3410 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
3411 		/* F16h has only DCT0, so no need to read dbam1. */
3412 		if (pvt->fam != 0x16)
3413 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
3414 
3415 		/* F10h, revD and later can do x8 ECC too. */
3416 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
3417 			pvt->ecc_sym_sz = 8;
3418 	}
3419 }
3420 
3421 /*
3422  * Retrieve the hardware registers of the memory controller.
3423  */
3424 static void __read_mc_regs_df(struct amd64_pvt *pvt)
3425 {
3426 	u8 nid = pvt->mc_node_id;
3427 	struct amd64_umc *umc;
3428 	u32 i, umc_base;
3429 
3430 	/* Read registers from each UMC */
3431 	for_each_umc(i) {
3432 
3433 		umc_base = get_umc_base(i);
3434 		umc = &pvt->umc[i];
3435 
3436 		amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg);
3437 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3438 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3439 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3440 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
3441 	}
3442 }
3443 
3444 /*
3445  * Retrieve the hardware registers of the memory controller (this includes the
3446  * 'Address Map' and 'Misc' device regs)
3447  */
3448 static void read_mc_regs(struct amd64_pvt *pvt)
3449 {
3450 	unsigned int range;
3451 	u64 msr_val;
3452 
3453 	/*
3454 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
3455 	 * those are Read-As-Zero.
3456 	 */
3457 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
3458 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
3459 
3460 	/* Check first whether TOP_MEM2 is enabled: */
3461 	rdmsrl(MSR_AMD64_SYSCFG, msr_val);
3462 	if (msr_val & BIT(21)) {
3463 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
3464 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
3465 	} else {
3466 		edac_dbg(0, "  TOP_MEM2 disabled\n");
3467 	}
3468 
3469 	if (pvt->umc) {
3470 		__read_mc_regs_df(pvt);
3471 		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
3472 
3473 		goto skip;
3474 	}
3475 
3476 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
3477 
3478 	read_dram_ctl_register(pvt);
3479 
3480 	for (range = 0; range < DRAM_RANGES; range++) {
3481 		u8 rw;
3482 
3483 		/* read settings for this DRAM range */
3484 		read_dram_base_limit_regs(pvt, range);
3485 
3486 		rw = dram_rw(pvt, range);
3487 		if (!rw)
3488 			continue;
3489 
3490 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
3491 			 range,
3492 			 get_dram_base(pvt, range),
3493 			 get_dram_limit(pvt, range));
3494 
3495 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
3496 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
3497 			 (rw & 0x1) ? "R" : "-",
3498 			 (rw & 0x2) ? "W" : "-",
3499 			 dram_intlv_sel(pvt, range),
3500 			 dram_dst_node(pvt, range));
3501 	}
3502 
3503 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
3504 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
3505 
3506 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
3507 
3508 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
3509 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
3510 
3511 	if (!dct_ganging_enabled(pvt)) {
3512 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
3513 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
3514 	}
3515 
3516 skip:
3517 	read_dct_base_mask(pvt);
3518 
3519 	determine_memory_type(pvt);
3520 
3521 	if (!pvt->umc)
3522 		edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
3523 
3524 	determine_ecc_sym_sz(pvt);
3525 }
3526 
3527 /*
3528  * NOTE: CPU Revision Dependent code
3529  *
3530  * Input:
3531  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3532  *	k8 private pointer to -->
3533  *			DRAM Bank Address mapping register
3534  *			node_id
3535  *			DCL register where dual_channel_active is
3536  *
3537  * The DBAM register consists of 4 sets of 4 bits each definitions:
3538  *
3539  * Bits:	CSROWs
3540  * 0-3		CSROWs 0 and 1
3541  * 4-7		CSROWs 2 and 3
3542  * 8-11		CSROWs 4 and 5
3543  * 12-15	CSROWs 6 and 7
3544  *
3545  * Values range from: 0 to 15
3546  * The meaning of the values depends on CPU revision and dual-channel state,
3547  * see relevant BKDG more info.
3548  *
3549  * The memory controller provides for total of only 8 CSROWs in its current
3550  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
3551  * single channel or two (2) DIMMs in dual channel mode.
3552  *
3553  * The following code logic collapses the various tables for CSROW based on CPU
3554  * revision.
3555  *
3556  * Returns:
3557  *	The number of PAGE_SIZE pages on the specified CSROW number it
3558  *	encompasses
3559  *
3560  */
3561 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3562 {
3563 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3564 	int csrow_nr = csrow_nr_orig;
3565 	u32 cs_mode, nr_pages;
3566 
3567 	if (!pvt->umc) {
3568 		csrow_nr >>= 1;
3569 		cs_mode = DBAM_DIMM(csrow_nr, dbam);
3570 	} else {
3571 		cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
3572 	}
3573 
3574 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3575 	nr_pages <<= 20 - PAGE_SHIFT;
3576 
3577 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3578 		    csrow_nr_orig, dct,  cs_mode);
3579 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3580 
3581 	return nr_pages;
3582 }
3583 
3584 static int init_csrows_df(struct mem_ctl_info *mci)
3585 {
3586 	struct amd64_pvt *pvt = mci->pvt_info;
3587 	enum edac_type edac_mode = EDAC_NONE;
3588 	enum dev_type dev_type = DEV_UNKNOWN;
3589 	struct dimm_info *dimm;
3590 	int empty = 1;
3591 	u8 umc, cs;
3592 
3593 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
3594 		edac_mode = EDAC_S16ECD16ED;
3595 		dev_type = DEV_X16;
3596 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
3597 		edac_mode = EDAC_S8ECD8ED;
3598 		dev_type = DEV_X8;
3599 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
3600 		edac_mode = EDAC_S4ECD4ED;
3601 		dev_type = DEV_X4;
3602 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
3603 		edac_mode = EDAC_SECDED;
3604 	}
3605 
3606 	for_each_umc(umc) {
3607 		for_each_chip_select(cs, umc, pvt) {
3608 			if (!csrow_enabled(cs, umc, pvt))
3609 				continue;
3610 
3611 			empty = 0;
3612 			dimm = mci->csrows[cs]->channels[umc]->dimm;
3613 
3614 			edac_dbg(1, "MC node: %d, csrow: %d\n",
3615 					pvt->mc_node_id, cs);
3616 
3617 			dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
3618 			dimm->mtype = pvt->umc[umc].dram_type;
3619 			dimm->edac_mode = edac_mode;
3620 			dimm->dtype = dev_type;
3621 			dimm->grain = 64;
3622 		}
3623 	}
3624 
3625 	return empty;
3626 }
3627 
3628 /*
3629  * Initialize the array of csrow attribute instances, based on the values
3630  * from pci config hardware registers.
3631  */
3632 static int init_csrows(struct mem_ctl_info *mci)
3633 {
3634 	struct amd64_pvt *pvt = mci->pvt_info;
3635 	enum edac_type edac_mode = EDAC_NONE;
3636 	struct csrow_info *csrow;
3637 	struct dimm_info *dimm;
3638 	int i, j, empty = 1;
3639 	int nr_pages = 0;
3640 	u32 val;
3641 
3642 	if (pvt->umc)
3643 		return init_csrows_df(mci);
3644 
3645 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3646 
3647 	pvt->nbcfg = val;
3648 
3649 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3650 		 pvt->mc_node_id, val,
3651 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3652 
3653 	/*
3654 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3655 	 */
3656 	for_each_chip_select(i, 0, pvt) {
3657 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3658 		bool row_dct1 = false;
3659 
3660 		if (pvt->fam != 0xf)
3661 			row_dct1 = !!csrow_enabled(i, 1, pvt);
3662 
3663 		if (!row_dct0 && !row_dct1)
3664 			continue;
3665 
3666 		csrow = mci->csrows[i];
3667 		empty = 0;
3668 
3669 		edac_dbg(1, "MC node: %d, csrow: %d\n",
3670 			    pvt->mc_node_id, i);
3671 
3672 		if (row_dct0) {
3673 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
3674 			csrow->channels[0]->dimm->nr_pages = nr_pages;
3675 		}
3676 
3677 		/* K8 has only one DCT */
3678 		if (pvt->fam != 0xf && row_dct1) {
3679 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3680 
3681 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3682 			nr_pages += row_dct1_pages;
3683 		}
3684 
3685 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3686 
3687 		/* Determine DIMM ECC mode: */
3688 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3689 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3690 					? EDAC_S4ECD4ED
3691 					: EDAC_SECDED;
3692 		}
3693 
3694 		for (j = 0; j < pvt->channel_count; j++) {
3695 			dimm = csrow->channels[j]->dimm;
3696 			dimm->mtype = pvt->dram_type;
3697 			dimm->edac_mode = edac_mode;
3698 			dimm->grain = 64;
3699 		}
3700 	}
3701 
3702 	return empty;
3703 }
3704 
3705 /* get all cores on this DCT */
3706 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3707 {
3708 	int cpu;
3709 
3710 	for_each_online_cpu(cpu)
3711 		if (topology_die_id(cpu) == nid)
3712 			cpumask_set_cpu(cpu, mask);
3713 }
3714 
3715 /* check MCG_CTL on all the cpus on this node */
3716 static bool nb_mce_bank_enabled_on_node(u16 nid)
3717 {
3718 	cpumask_var_t mask;
3719 	int cpu, nbe;
3720 	bool ret = false;
3721 
3722 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3723 		amd64_warn("%s: Error allocating mask\n", __func__);
3724 		return false;
3725 	}
3726 
3727 	get_cpus_on_this_dct_cpumask(mask, nid);
3728 
3729 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3730 
3731 	for_each_cpu(cpu, mask) {
3732 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3733 		nbe = reg->l & MSR_MCGCTL_NBE;
3734 
3735 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3736 			 cpu, reg->q,
3737 			 (nbe ? "enabled" : "disabled"));
3738 
3739 		if (!nbe)
3740 			goto out;
3741 	}
3742 	ret = true;
3743 
3744 out:
3745 	free_cpumask_var(mask);
3746 	return ret;
3747 }
3748 
3749 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3750 {
3751 	cpumask_var_t cmask;
3752 	int cpu;
3753 
3754 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3755 		amd64_warn("%s: error allocating mask\n", __func__);
3756 		return -ENOMEM;
3757 	}
3758 
3759 	get_cpus_on_this_dct_cpumask(cmask, nid);
3760 
3761 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3762 
3763 	for_each_cpu(cpu, cmask) {
3764 
3765 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3766 
3767 		if (on) {
3768 			if (reg->l & MSR_MCGCTL_NBE)
3769 				s->flags.nb_mce_enable = 1;
3770 
3771 			reg->l |= MSR_MCGCTL_NBE;
3772 		} else {
3773 			/*
3774 			 * Turn off NB MCE reporting only when it was off before
3775 			 */
3776 			if (!s->flags.nb_mce_enable)
3777 				reg->l &= ~MSR_MCGCTL_NBE;
3778 		}
3779 	}
3780 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3781 
3782 	free_cpumask_var(cmask);
3783 
3784 	return 0;
3785 }
3786 
3787 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3788 				       struct pci_dev *F3)
3789 {
3790 	bool ret = true;
3791 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3792 
3793 	if (toggle_ecc_err_reporting(s, nid, ON)) {
3794 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3795 		return false;
3796 	}
3797 
3798 	amd64_read_pci_cfg(F3, NBCTL, &value);
3799 
3800 	s->old_nbctl   = value & mask;
3801 	s->nbctl_valid = true;
3802 
3803 	value |= mask;
3804 	amd64_write_pci_cfg(F3, NBCTL, value);
3805 
3806 	amd64_read_pci_cfg(F3, NBCFG, &value);
3807 
3808 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3809 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3810 
3811 	if (!(value & NBCFG_ECC_ENABLE)) {
3812 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3813 
3814 		s->flags.nb_ecc_prev = 0;
3815 
3816 		/* Attempt to turn on DRAM ECC Enable */
3817 		value |= NBCFG_ECC_ENABLE;
3818 		amd64_write_pci_cfg(F3, NBCFG, value);
3819 
3820 		amd64_read_pci_cfg(F3, NBCFG, &value);
3821 
3822 		if (!(value & NBCFG_ECC_ENABLE)) {
3823 			amd64_warn("Hardware rejected DRAM ECC enable,"
3824 				   "check memory DIMM configuration.\n");
3825 			ret = false;
3826 		} else {
3827 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3828 		}
3829 	} else {
3830 		s->flags.nb_ecc_prev = 1;
3831 	}
3832 
3833 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3834 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3835 
3836 	return ret;
3837 }
3838 
3839 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3840 					struct pci_dev *F3)
3841 {
3842 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3843 
3844 	if (!s->nbctl_valid)
3845 		return;
3846 
3847 	amd64_read_pci_cfg(F3, NBCTL, &value);
3848 	value &= ~mask;
3849 	value |= s->old_nbctl;
3850 
3851 	amd64_write_pci_cfg(F3, NBCTL, value);
3852 
3853 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3854 	if (!s->flags.nb_ecc_prev) {
3855 		amd64_read_pci_cfg(F3, NBCFG, &value);
3856 		value &= ~NBCFG_ECC_ENABLE;
3857 		amd64_write_pci_cfg(F3, NBCFG, value);
3858 	}
3859 
3860 	/* restore the NB Enable MCGCTL bit */
3861 	if (toggle_ecc_err_reporting(s, nid, OFF))
3862 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3863 }
3864 
3865 static bool ecc_enabled(struct amd64_pvt *pvt)
3866 {
3867 	u16 nid = pvt->mc_node_id;
3868 	bool nb_mce_en = false;
3869 	u8 ecc_en = 0, i;
3870 	u32 value;
3871 
3872 	if (boot_cpu_data.x86 >= 0x17) {
3873 		u8 umc_en_mask = 0, ecc_en_mask = 0;
3874 		struct amd64_umc *umc;
3875 
3876 		for_each_umc(i) {
3877 			umc = &pvt->umc[i];
3878 
3879 			/* Only check enabled UMCs. */
3880 			if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3881 				continue;
3882 
3883 			umc_en_mask |= BIT(i);
3884 
3885 			if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3886 				ecc_en_mask |= BIT(i);
3887 		}
3888 
3889 		/* Check whether at least one UMC is enabled: */
3890 		if (umc_en_mask)
3891 			ecc_en = umc_en_mask == ecc_en_mask;
3892 		else
3893 			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3894 
3895 		/* Assume UMC MCA banks are enabled. */
3896 		nb_mce_en = true;
3897 	} else {
3898 		amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3899 
3900 		ecc_en = !!(value & NBCFG_ECC_ENABLE);
3901 
3902 		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3903 		if (!nb_mce_en)
3904 			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3905 				     MSR_IA32_MCG_CTL, nid);
3906 	}
3907 
3908 	edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3909 
3910 	if (!ecc_en || !nb_mce_en)
3911 		return false;
3912 	else
3913 		return true;
3914 }
3915 
3916 static inline void
3917 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3918 {
3919 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3920 
3921 	for_each_umc(i) {
3922 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3923 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3924 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3925 
3926 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3927 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3928 		}
3929 	}
3930 
3931 	/* Set chipkill only if ECC is enabled: */
3932 	if (ecc_en) {
3933 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3934 
3935 		if (!cpk_en)
3936 			return;
3937 
3938 		if (dev_x4)
3939 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3940 		else if (dev_x16)
3941 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3942 		else
3943 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3944 	}
3945 }
3946 
3947 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3948 {
3949 	struct amd64_pvt *pvt = mci->pvt_info;
3950 
3951 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3952 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3953 
3954 	if (pvt->umc) {
3955 		f17h_determine_edac_ctl_cap(mci, pvt);
3956 	} else {
3957 		if (pvt->nbcap & NBCAP_SECDED)
3958 			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3959 
3960 		if (pvt->nbcap & NBCAP_CHIPKILL)
3961 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3962 	}
3963 
3964 	mci->edac_cap		= determine_edac_cap(pvt);
3965 	mci->mod_name		= EDAC_MOD_STR;
3966 	mci->ctl_name		= fam_type->ctl_name;
3967 	mci->dev_name		= pci_name(pvt->F3);
3968 	mci->ctl_page_to_phys	= NULL;
3969 
3970 	/* memory scrubber interface */
3971 	mci->set_sdram_scrub_rate = set_scrub_rate;
3972 	mci->get_sdram_scrub_rate = get_scrub_rate;
3973 }
3974 
3975 /*
3976  * returns a pointer to the family descriptor on success, NULL otherwise.
3977  */
3978 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3979 {
3980 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3981 	pvt->stepping	= boot_cpu_data.x86_stepping;
3982 	pvt->model	= boot_cpu_data.x86_model;
3983 	pvt->fam	= boot_cpu_data.x86;
3984 
3985 	switch (pvt->fam) {
3986 	case 0xf:
3987 		fam_type	= &family_types[K8_CPUS];
3988 		pvt->ops	= &family_types[K8_CPUS].ops;
3989 		break;
3990 
3991 	case 0x10:
3992 		fam_type	= &family_types[F10_CPUS];
3993 		pvt->ops	= &family_types[F10_CPUS].ops;
3994 		break;
3995 
3996 	case 0x15:
3997 		if (pvt->model == 0x30) {
3998 			fam_type = &family_types[F15_M30H_CPUS];
3999 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
4000 			break;
4001 		} else if (pvt->model == 0x60) {
4002 			fam_type = &family_types[F15_M60H_CPUS];
4003 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
4004 			break;
4005 		/* Richland is only client */
4006 		} else if (pvt->model == 0x13) {
4007 			return NULL;
4008 		} else {
4009 			fam_type	= &family_types[F15_CPUS];
4010 			pvt->ops	= &family_types[F15_CPUS].ops;
4011 		}
4012 		break;
4013 
4014 	case 0x16:
4015 		if (pvt->model == 0x30) {
4016 			fam_type = &family_types[F16_M30H_CPUS];
4017 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
4018 			break;
4019 		}
4020 		fam_type	= &family_types[F16_CPUS];
4021 		pvt->ops	= &family_types[F16_CPUS].ops;
4022 		break;
4023 
4024 	case 0x17:
4025 		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
4026 			fam_type = &family_types[F17_M10H_CPUS];
4027 			pvt->ops = &family_types[F17_M10H_CPUS].ops;
4028 			break;
4029 		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
4030 			fam_type = &family_types[F17_M30H_CPUS];
4031 			pvt->ops = &family_types[F17_M30H_CPUS].ops;
4032 			break;
4033 		} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
4034 			fam_type = &family_types[F17_M60H_CPUS];
4035 			pvt->ops = &family_types[F17_M60H_CPUS].ops;
4036 			break;
4037 		} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
4038 			fam_type = &family_types[F17_M70H_CPUS];
4039 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
4040 			break;
4041 		}
4042 		fallthrough;
4043 	case 0x18:
4044 		fam_type	= &family_types[F17_CPUS];
4045 		pvt->ops	= &family_types[F17_CPUS].ops;
4046 
4047 		if (pvt->fam == 0x18)
4048 			family_types[F17_CPUS].ctl_name = "F18h";
4049 		break;
4050 
4051 	case 0x19:
4052 		if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
4053 			fam_type = &family_types[F19_M10H_CPUS];
4054 			pvt->ops = &family_types[F19_M10H_CPUS].ops;
4055 			break;
4056 		} else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
4057 			fam_type = &family_types[F17_M70H_CPUS];
4058 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
4059 			fam_type->ctl_name = "F19h_M20h";
4060 			break;
4061 		} else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
4062 			fam_type = &family_types[F19_M50H_CPUS];
4063 			pvt->ops = &family_types[F19_M50H_CPUS].ops;
4064 			fam_type->ctl_name = "F19h_M50h";
4065 			break;
4066 		} else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
4067 			fam_type = &family_types[F19_M10H_CPUS];
4068 			pvt->ops = &family_types[F19_M10H_CPUS].ops;
4069 			fam_type->ctl_name = "F19h_MA0h";
4070 			break;
4071 		}
4072 		fam_type	= &family_types[F19_CPUS];
4073 		pvt->ops	= &family_types[F19_CPUS].ops;
4074 		family_types[F19_CPUS].ctl_name = "F19h";
4075 		break;
4076 
4077 	default:
4078 		amd64_err("Unsupported family!\n");
4079 		return NULL;
4080 	}
4081 
4082 	return fam_type;
4083 }
4084 
4085 static const struct attribute_group *amd64_edac_attr_groups[] = {
4086 #ifdef CONFIG_EDAC_DEBUG
4087 	&dbg_group,
4088 	&inj_group,
4089 #endif
4090 	NULL
4091 };
4092 
4093 static int hw_info_get(struct amd64_pvt *pvt)
4094 {
4095 	u16 pci_id1, pci_id2;
4096 	int ret;
4097 
4098 	if (pvt->fam >= 0x17) {
4099 		pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
4100 		if (!pvt->umc)
4101 			return -ENOMEM;
4102 
4103 		pci_id1 = fam_type->f0_id;
4104 		pci_id2 = fam_type->f6_id;
4105 	} else {
4106 		pci_id1 = fam_type->f1_id;
4107 		pci_id2 = fam_type->f2_id;
4108 	}
4109 
4110 	ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
4111 	if (ret)
4112 		return ret;
4113 
4114 	read_mc_regs(pvt);
4115 
4116 	return 0;
4117 }
4118 
4119 static void hw_info_put(struct amd64_pvt *pvt)
4120 {
4121 	if (pvt->F0 || pvt->F1)
4122 		free_mc_sibling_devs(pvt);
4123 
4124 	kfree(pvt->umc);
4125 }
4126 
4127 static int init_one_instance(struct amd64_pvt *pvt)
4128 {
4129 	struct mem_ctl_info *mci = NULL;
4130 	struct edac_mc_layer layers[2];
4131 	int ret = -EINVAL;
4132 
4133 	/*
4134 	 * We need to determine how many memory channels there are. Then use
4135 	 * that information for calculating the size of the dynamic instance
4136 	 * tables in the 'mci' structure.
4137 	 */
4138 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
4139 	if (pvt->channel_count < 0)
4140 		return ret;
4141 
4142 	ret = -ENOMEM;
4143 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
4144 	layers[0].size = pvt->csels[0].b_cnt;
4145 	layers[0].is_virt_csrow = true;
4146 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
4147 
4148 	/*
4149 	 * Always allocate two channels since we can have setups with DIMMs on
4150 	 * only one channel. Also, this simplifies handling later for the price
4151 	 * of a couple of KBs tops.
4152 	 */
4153 	layers[1].size = fam_type->max_mcs;
4154 	layers[1].is_virt_csrow = false;
4155 
4156 	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
4157 	if (!mci)
4158 		return ret;
4159 
4160 	mci->pvt_info = pvt;
4161 	mci->pdev = &pvt->F3->dev;
4162 
4163 	setup_mci_misc_attrs(mci);
4164 
4165 	if (init_csrows(mci))
4166 		mci->edac_cap = EDAC_FLAG_NONE;
4167 
4168 	ret = -ENODEV;
4169 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
4170 		edac_dbg(1, "failed edac_mc_add_mc()\n");
4171 		edac_mc_free(mci);
4172 		return ret;
4173 	}
4174 
4175 	return 0;
4176 }
4177 
4178 static bool instance_has_memory(struct amd64_pvt *pvt)
4179 {
4180 	bool cs_enabled = false;
4181 	int cs = 0, dct = 0;
4182 
4183 	for (dct = 0; dct < fam_type->max_mcs; dct++) {
4184 		for_each_chip_select(cs, dct, pvt)
4185 			cs_enabled |= csrow_enabled(cs, dct, pvt);
4186 	}
4187 
4188 	return cs_enabled;
4189 }
4190 
4191 static int probe_one_instance(unsigned int nid)
4192 {
4193 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4194 	struct amd64_pvt *pvt = NULL;
4195 	struct ecc_settings *s;
4196 	int ret;
4197 
4198 	ret = -ENOMEM;
4199 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
4200 	if (!s)
4201 		goto err_out;
4202 
4203 	ecc_stngs[nid] = s;
4204 
4205 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
4206 	if (!pvt)
4207 		goto err_settings;
4208 
4209 	pvt->mc_node_id	= nid;
4210 	pvt->F3 = F3;
4211 
4212 	ret = -ENODEV;
4213 	fam_type = per_family_init(pvt);
4214 	if (!fam_type)
4215 		goto err_enable;
4216 
4217 	ret = hw_info_get(pvt);
4218 	if (ret < 0)
4219 		goto err_enable;
4220 
4221 	ret = 0;
4222 	if (!instance_has_memory(pvt)) {
4223 		amd64_info("Node %d: No DIMMs detected.\n", nid);
4224 		goto err_enable;
4225 	}
4226 
4227 	if (!ecc_enabled(pvt)) {
4228 		ret = -ENODEV;
4229 
4230 		if (!ecc_enable_override)
4231 			goto err_enable;
4232 
4233 		if (boot_cpu_data.x86 >= 0x17) {
4234 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
4235 			goto err_enable;
4236 		} else
4237 			amd64_warn("Forcing ECC on!\n");
4238 
4239 		if (!enable_ecc_error_reporting(s, nid, F3))
4240 			goto err_enable;
4241 	}
4242 
4243 	ret = init_one_instance(pvt);
4244 	if (ret < 0) {
4245 		amd64_err("Error probing instance: %d\n", nid);
4246 
4247 		if (boot_cpu_data.x86 < 0x17)
4248 			restore_ecc_error_reporting(s, nid, F3);
4249 
4250 		goto err_enable;
4251 	}
4252 
4253 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
4254 		     (pvt->fam == 0xf ?
4255 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
4256 							     : "revE or earlier ")
4257 				 : ""), pvt->mc_node_id);
4258 
4259 	dump_misc_regs(pvt);
4260 
4261 	return ret;
4262 
4263 err_enable:
4264 	hw_info_put(pvt);
4265 	kfree(pvt);
4266 
4267 err_settings:
4268 	kfree(s);
4269 	ecc_stngs[nid] = NULL;
4270 
4271 err_out:
4272 	return ret;
4273 }
4274 
4275 static void remove_one_instance(unsigned int nid)
4276 {
4277 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4278 	struct ecc_settings *s = ecc_stngs[nid];
4279 	struct mem_ctl_info *mci;
4280 	struct amd64_pvt *pvt;
4281 
4282 	/* Remove from EDAC CORE tracking list */
4283 	mci = edac_mc_del_mc(&F3->dev);
4284 	if (!mci)
4285 		return;
4286 
4287 	pvt = mci->pvt_info;
4288 
4289 	restore_ecc_error_reporting(s, nid, F3);
4290 
4291 	kfree(ecc_stngs[nid]);
4292 	ecc_stngs[nid] = NULL;
4293 
4294 	/* Free the EDAC CORE resources */
4295 	mci->pvt_info = NULL;
4296 
4297 	hw_info_put(pvt);
4298 	kfree(pvt);
4299 	edac_mc_free(mci);
4300 }
4301 
4302 static void setup_pci_device(void)
4303 {
4304 	if (pci_ctl)
4305 		return;
4306 
4307 	pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
4308 	if (!pci_ctl) {
4309 		pr_warn("%s(): Unable to create PCI control\n", __func__);
4310 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
4311 	}
4312 }
4313 
4314 static const struct x86_cpu_id amd64_cpuids[] = {
4315 	X86_MATCH_VENDOR_FAM(AMD,	0x0F, NULL),
4316 	X86_MATCH_VENDOR_FAM(AMD,	0x10, NULL),
4317 	X86_MATCH_VENDOR_FAM(AMD,	0x15, NULL),
4318 	X86_MATCH_VENDOR_FAM(AMD,	0x16, NULL),
4319 	X86_MATCH_VENDOR_FAM(AMD,	0x17, NULL),
4320 	X86_MATCH_VENDOR_FAM(HYGON,	0x18, NULL),
4321 	X86_MATCH_VENDOR_FAM(AMD,	0x19, NULL),
4322 	{ }
4323 };
4324 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
4325 
4326 static int __init amd64_edac_init(void)
4327 {
4328 	const char *owner;
4329 	int err = -ENODEV;
4330 	int i;
4331 
4332 	owner = edac_get_owner();
4333 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
4334 		return -EBUSY;
4335 
4336 	if (!x86_match_cpu(amd64_cpuids))
4337 		return -ENODEV;
4338 
4339 	if (amd_cache_northbridges() < 0)
4340 		return -ENODEV;
4341 
4342 	opstate_init();
4343 
4344 	err = -ENOMEM;
4345 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
4346 	if (!ecc_stngs)
4347 		goto err_free;
4348 
4349 	msrs = msrs_alloc();
4350 	if (!msrs)
4351 		goto err_free;
4352 
4353 	for (i = 0; i < amd_nb_num(); i++) {
4354 		err = probe_one_instance(i);
4355 		if (err) {
4356 			/* unwind properly */
4357 			while (--i >= 0)
4358 				remove_one_instance(i);
4359 
4360 			goto err_pci;
4361 		}
4362 	}
4363 
4364 	if (!edac_has_mcs()) {
4365 		err = -ENODEV;
4366 		goto err_pci;
4367 	}
4368 
4369 	/* register stuff with EDAC MCE */
4370 	if (boot_cpu_data.x86 >= 0x17)
4371 		amd_register_ecc_decoder(decode_umc_error);
4372 	else
4373 		amd_register_ecc_decoder(decode_bus_error);
4374 
4375 	setup_pci_device();
4376 
4377 #ifdef CONFIG_X86_32
4378 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
4379 #endif
4380 
4381 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
4382 
4383 	return 0;
4384 
4385 err_pci:
4386 	pci_ctl_dev = NULL;
4387 
4388 	msrs_free(msrs);
4389 	msrs = NULL;
4390 
4391 err_free:
4392 	kfree(ecc_stngs);
4393 	ecc_stngs = NULL;
4394 
4395 	return err;
4396 }
4397 
4398 static void __exit amd64_edac_exit(void)
4399 {
4400 	int i;
4401 
4402 	if (pci_ctl)
4403 		edac_pci_release_generic_ctl(pci_ctl);
4404 
4405 	/* unregister from EDAC MCE */
4406 	if (boot_cpu_data.x86 >= 0x17)
4407 		amd_unregister_ecc_decoder(decode_umc_error);
4408 	else
4409 		amd_unregister_ecc_decoder(decode_bus_error);
4410 
4411 	for (i = 0; i < amd_nb_num(); i++)
4412 		remove_one_instance(i);
4413 
4414 	kfree(ecc_stngs);
4415 	ecc_stngs = NULL;
4416 
4417 	pci_ctl_dev = NULL;
4418 
4419 	msrs_free(msrs);
4420 	msrs = NULL;
4421 }
4422 
4423 module_init(amd64_edac_init);
4424 module_exit(amd64_edac_exit);
4425 
4426 MODULE_LICENSE("GPL");
4427 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
4428 		"Dave Peterson, Thayne Harbaugh");
4429 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
4430 		EDAC_AMD64_VERSION);
4431 
4432 module_param(edac_op_state, int, 0444);
4433 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
4434