xref: /openbmc/linux/drivers/edac/pnd2_edac.c (revision 44900c3e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Pondicherry2 memory controller.
4  *
5  * Copyright (c) 2016, Intel Corporation.
6  *
7  * [Derived from sb_edac.c]
8  *
9  * Translation of system physical addresses to DIMM addresses
10  * is a two stage process:
11  *
12  * First the Pondicherry 2 memory controller handles slice and channel interleaving
13  * in "sys2pmi()". This is (almost) completley common between platforms.
14  *
15  * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
16  * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 #include <linux/edac.h>
26 #include <linux/mmzone.h>
27 #include <linux/smp.h>
28 #include <linux/bitmap.h>
29 #include <linux/math64.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/platform_data/x86/p2sb.h>
32 
33 #include <asm/cpu_device_id.h>
34 #include <asm/intel-family.h>
35 #include <asm/processor.h>
36 #include <asm/mce.h>
37 
38 #include "edac_mc.h"
39 #include "edac_module.h"
40 #include "pnd2_edac.h"
41 
42 #define EDAC_MOD_STR		"pnd2_edac"
43 
44 #define APL_NUM_CHANNELS	4
45 #define DNV_NUM_CHANNELS	2
46 #define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
47 
48 enum type {
49 	APL,
50 	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
51 };
52 
53 struct dram_addr {
54 	int chan;
55 	int dimm;
56 	int rank;
57 	int bank;
58 	int row;
59 	int col;
60 };
61 
62 struct pnd2_pvt {
63 	int dimm_geom[APL_NUM_CHANNELS];
64 	u64 tolm, tohm;
65 };
66 
67 /*
68  * System address space is divided into multiple regions with
69  * different interleave rules in each. The as0/as1 regions
70  * have no interleaving at all. The as2 region is interleaved
71  * between two channels. The mot region is magic and may overlap
72  * other regions, with its interleave rules taking precedence.
73  * Addresses not in any of these regions are interleaved across
74  * all four channels.
75  */
76 static struct region {
77 	u64	base;
78 	u64	limit;
79 	u8	enabled;
80 } mot, as0, as1, as2;
81 
82 static struct dunit_ops {
83 	char *name;
84 	enum type type;
85 	int pmiaddr_shift;
86 	int pmiidx_shift;
87 	int channels;
88 	int dimms_per_channel;
89 	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
90 	int (*get_registers)(void);
91 	int (*check_ecc)(void);
92 	void (*mk_region)(char *name, struct region *rp, void *asym);
93 	void (*get_dimm_config)(struct mem_ctl_info *mci);
94 	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
95 				   struct dram_addr *daddr, char *msg);
96 } *ops;
97 
98 static struct mem_ctl_info *pnd2_mci;
99 
100 #define PND2_MSG_SIZE	256
101 
102 /* Debug macros */
103 #define pnd2_printk(level, fmt, arg...)			\
104 	edac_printk(level, "pnd2", fmt, ##arg)
105 
106 #define pnd2_mc_printk(mci, level, fmt, arg...)	\
107 	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
108 
109 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
110 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
111 #define SELECTOR_DISABLED (-1)
112 #define _4GB (1ul << 32)
113 
114 #define PMI_ADDRESS_WIDTH	31
115 #define PND_MAX_PHYS_BIT	39
116 
117 #define APL_ASYMSHIFT		28
118 #define DNV_ASYMSHIFT		31
119 #define CH_HASH_MASK_LSB	6
120 #define SLICE_HASH_MASK_LSB	6
121 #define MOT_SLC_INTLV_BIT	12
122 #define LOG2_PMI_ADDR_GRANULARITY	5
123 #define MOT_SHIFT	24
124 
125 #define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
126 #define U64_LSHIFT(val, s)	((u64)(val) << (s))
127 
128 /*
129  * On Apollo Lake we access memory controller registers via a
130  * side-band mailbox style interface in a hidden PCI device
131  * configuration space.
132  */
133 static struct pci_bus	*p2sb_bus;
134 #define P2SB_DEVFN	PCI_DEVFN(0xd, 0)
135 #define P2SB_ADDR_OFF	0xd0
136 #define P2SB_DATA_OFF	0xd4
137 #define P2SB_STAT_OFF	0xd8
138 #define P2SB_ROUT_OFF	0xda
139 #define P2SB_EADD_OFF	0xdc
140 #define P2SB_HIDE_OFF	0xe1
141 
142 #define P2SB_BUSY	1
143 
144 #define P2SB_READ(size, off, ptr) \
145 	pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
146 #define P2SB_WRITE(size, off, val) \
147 	pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
148 
149 static bool p2sb_is_busy(u16 *status)
150 {
151 	P2SB_READ(word, P2SB_STAT_OFF, status);
152 
153 	return !!(*status & P2SB_BUSY);
154 }
155 
156 static int _apl_rd_reg(int port, int off, int op, u32 *data)
157 {
158 	int retries = 0xff, ret;
159 	u16 status;
160 	u8 hidden;
161 
162 	/* Unhide the P2SB device, if it's hidden */
163 	P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
164 	if (hidden)
165 		P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
166 
167 	if (p2sb_is_busy(&status)) {
168 		ret = -EAGAIN;
169 		goto out;
170 	}
171 
172 	P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
173 	P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
174 	P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
175 	P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
176 	P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
177 
178 	while (p2sb_is_busy(&status)) {
179 		if (retries-- == 0) {
180 			ret = -EBUSY;
181 			goto out;
182 		}
183 	}
184 
185 	P2SB_READ(dword, P2SB_DATA_OFF, data);
186 	ret = (status >> 1) & 0x3;
187 out:
188 	/* Hide the P2SB device, if it was hidden before */
189 	if (hidden)
190 		P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
191 
192 	return ret;
193 }
194 
195 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
196 {
197 	int ret = 0;
198 
199 	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
200 	switch (sz) {
201 	case 8:
202 		ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
203 		fallthrough;
204 	case 4:
205 		ret |= _apl_rd_reg(port, off, op, (u32 *)data);
206 		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
207 					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
208 		break;
209 	}
210 
211 	return ret;
212 }
213 
214 static u64 get_mem_ctrl_hub_base_addr(void)
215 {
216 	struct b_cr_mchbar_lo_pci lo;
217 	struct b_cr_mchbar_hi_pci hi;
218 	struct pci_dev *pdev;
219 
220 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
221 	if (pdev) {
222 		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
223 		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
224 		pci_dev_put(pdev);
225 	} else {
226 		return 0;
227 	}
228 
229 	if (!lo.enable) {
230 		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
231 		return 0;
232 	}
233 
234 	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
235 }
236 
237 #define DNV_MCHBAR_SIZE  0x8000
238 #define DNV_SB_PORT_SIZE 0x10000
239 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
240 {
241 	struct pci_dev *pdev;
242 	void __iomem *base;
243 	struct resource r;
244 	int ret;
245 
246 	if (op == 4) {
247 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
248 		if (!pdev)
249 			return -ENODEV;
250 
251 		pci_read_config_dword(pdev, off, data);
252 		pci_dev_put(pdev);
253 	} else {
254 		/* MMIO via memory controller hub base address */
255 		if (op == 0 && port == 0x4c) {
256 			memset(&r, 0, sizeof(r));
257 
258 			r.start = get_mem_ctrl_hub_base_addr();
259 			if (!r.start)
260 				return -ENODEV;
261 			r.end = r.start + DNV_MCHBAR_SIZE - 1;
262 		} else {
263 			/* MMIO via sideband register base address */
264 			ret = p2sb_bar(NULL, 0, &r);
265 			if (ret)
266 				return ret;
267 
268 			r.start += (port << 16);
269 			r.end = r.start + DNV_SB_PORT_SIZE - 1;
270 		}
271 
272 		base = ioremap(r.start, resource_size(&r));
273 		if (!base)
274 			return -ENODEV;
275 
276 		if (sz == 8)
277 			*(u64 *)data = readq(base + off);
278 		else
279 			*(u32 *)data = readl(base + off);
280 
281 		iounmap(base);
282 	}
283 
284 	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
285 			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
286 
287 	return 0;
288 }
289 
290 #define RD_REGP(regp, regname, port)	\
291 	ops->rd_reg(port,					\
292 		regname##_offset,				\
293 		regname##_r_opcode,				\
294 		regp, sizeof(struct regname),	\
295 		#regname)
296 
297 #define RD_REG(regp, regname)			\
298 	ops->rd_reg(regname ## _port,		\
299 		regname##_offset,				\
300 		regname##_r_opcode,				\
301 		regp, sizeof(struct regname),	\
302 		#regname)
303 
304 static u64 top_lm, top_hm;
305 static bool two_slices;
306 static bool two_channels; /* Both PMI channels in one slice enabled */
307 
308 static u8 sym_chan_mask;
309 static u8 asym_chan_mask;
310 static u8 chan_mask;
311 
312 static int slice_selector = -1;
313 static int chan_selector = -1;
314 static u64 slice_hash_mask;
315 static u64 chan_hash_mask;
316 
317 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
318 {
319 	rp->enabled = 1;
320 	rp->base = base;
321 	rp->limit = limit;
322 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
323 }
324 
325 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
326 {
327 	if (mask == 0) {
328 		pr_info(FW_BUG "MOT mask cannot be zero\n");
329 		return;
330 	}
331 	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
332 		pr_info(FW_BUG "MOT mask not power of two\n");
333 		return;
334 	}
335 	if (base & ~mask) {
336 		pr_info(FW_BUG "MOT region base/mask alignment error\n");
337 		return;
338 	}
339 	rp->base = base;
340 	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
341 	rp->enabled = 1;
342 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
343 }
344 
345 static bool in_region(struct region *rp, u64 addr)
346 {
347 	if (!rp->enabled)
348 		return false;
349 
350 	return rp->base <= addr && addr <= rp->limit;
351 }
352 
353 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
354 {
355 	int mask = 0;
356 
357 	if (!p->slice_0_mem_disabled)
358 		mask |= p->sym_slice0_channel_enabled;
359 
360 	if (!p->slice_1_disabled)
361 		mask |= p->sym_slice1_channel_enabled << 2;
362 
363 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
364 		mask &= 0x5;
365 
366 	return mask;
367 }
368 
369 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
370 			 struct b_cr_asym_mem_region0_mchbar *as0,
371 			 struct b_cr_asym_mem_region1_mchbar *as1,
372 			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
373 {
374 	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
375 	int mask = 0;
376 
377 	if (as2way->asym_2way_interleave_enable)
378 		mask = intlv[as2way->asym_2way_intlv_mode];
379 	if (as0->slice0_asym_enable)
380 		mask |= (1 << as0->slice0_asym_channel_select);
381 	if (as1->slice1_asym_enable)
382 		mask |= (4 << as1->slice1_asym_channel_select);
383 	if (p->slice_0_mem_disabled)
384 		mask &= 0xc;
385 	if (p->slice_1_disabled)
386 		mask &= 0x3;
387 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
388 		mask &= 0x5;
389 
390 	return mask;
391 }
392 
393 static struct b_cr_tolud_pci tolud;
394 static struct b_cr_touud_lo_pci touud_lo;
395 static struct b_cr_touud_hi_pci touud_hi;
396 static struct b_cr_asym_mem_region0_mchbar asym0;
397 static struct b_cr_asym_mem_region1_mchbar asym1;
398 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
399 static struct b_cr_mot_out_base_mchbar mot_base;
400 static struct b_cr_mot_out_mask_mchbar mot_mask;
401 static struct b_cr_slice_channel_hash chash;
402 
403 /* Apollo Lake dunit */
404 /*
405  * Validated on board with just two DIMMs in the [0] and [2] positions
406  * in this array. Other port number matches documentation, but caution
407  * advised.
408  */
409 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
410 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
411 
412 /* Denverton dunit */
413 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
414 static struct d_cr_dsch dsch;
415 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
416 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
417 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
418 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
419 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
420 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
421 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
422 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
423 
424 static void apl_mk_region(char *name, struct region *rp, void *asym)
425 {
426 	struct b_cr_asym_mem_region0_mchbar *a = asym;
427 
428 	mk_region(name, rp,
429 			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
430 			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
431 			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
432 }
433 
434 static void dnv_mk_region(char *name, struct region *rp, void *asym)
435 {
436 	struct b_cr_asym_mem_region_denverton *a = asym;
437 
438 	mk_region(name, rp,
439 			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
440 			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
441 			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
442 }
443 
444 static int apl_get_registers(void)
445 {
446 	int ret = -ENODEV;
447 	int i;
448 
449 	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
450 		return -ENODEV;
451 
452 	/*
453 	 * RD_REGP() will fail for unpopulated or non-existent
454 	 * DIMM slots. Return success if we find at least one DIMM.
455 	 */
456 	for (i = 0; i < APL_NUM_CHANNELS; i++)
457 		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
458 			ret = 0;
459 
460 	return ret;
461 }
462 
463 static int dnv_get_registers(void)
464 {
465 	int i;
466 
467 	if (RD_REG(&dsch, d_cr_dsch))
468 		return -ENODEV;
469 
470 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
471 		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
472 			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
473 			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
474 			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
475 			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
476 			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
477 			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
478 			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
479 			return -ENODEV;
480 
481 	return 0;
482 }
483 
484 /*
485  * Read all the h/w config registers once here (they don't
486  * change at run time. Figure out which address ranges have
487  * which interleave characteristics.
488  */
489 static int get_registers(void)
490 {
491 	const int intlv[] = { 10, 11, 12, 12 };
492 
493 	if (RD_REG(&tolud, b_cr_tolud_pci) ||
494 		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
495 		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
496 		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
497 		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
498 		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
499 		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
500 		RD_REG(&chash, b_cr_slice_channel_hash))
501 		return -ENODEV;
502 
503 	if (ops->get_registers())
504 		return -ENODEV;
505 
506 	if (ops->type == DNV) {
507 		/* PMI channel idx (always 0) for asymmetric region */
508 		asym0.slice0_asym_channel_select = 0;
509 		asym1.slice1_asym_channel_select = 0;
510 		/* PMI channel bitmap (always 1) for symmetric region */
511 		chash.sym_slice0_channel_enabled = 0x1;
512 		chash.sym_slice1_channel_enabled = 0x1;
513 	}
514 
515 	if (asym0.slice0_asym_enable)
516 		ops->mk_region("as0", &as0, &asym0);
517 
518 	if (asym1.slice1_asym_enable)
519 		ops->mk_region("as1", &as1, &asym1);
520 
521 	if (asym_2way.asym_2way_interleave_enable) {
522 		mk_region("as2way", &as2,
523 				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
524 				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
525 				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
526 	}
527 
528 	if (mot_base.imr_en) {
529 		mk_region_mask("mot", &mot,
530 					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
531 					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
532 	}
533 
534 	top_lm = U64_LSHIFT(tolud.tolud, 20);
535 	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
536 
537 	two_slices = !chash.slice_1_disabled &&
538 				 !chash.slice_0_mem_disabled &&
539 				 (chash.sym_slice0_channel_enabled != 0) &&
540 				 (chash.sym_slice1_channel_enabled != 0);
541 	two_channels = !chash.ch_1_disabled &&
542 				 !chash.enable_pmi_dual_data_mode &&
543 				 ((chash.sym_slice0_channel_enabled == 3) ||
544 				 (chash.sym_slice1_channel_enabled == 3));
545 
546 	sym_chan_mask = gen_sym_mask(&chash);
547 	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
548 	chan_mask = sym_chan_mask | asym_chan_mask;
549 
550 	if (two_slices && !two_channels) {
551 		if (chash.hvm_mode)
552 			slice_selector = 29;
553 		else
554 			slice_selector = intlv[chash.interleave_mode];
555 	} else if (!two_slices && two_channels) {
556 		if (chash.hvm_mode)
557 			chan_selector = 29;
558 		else
559 			chan_selector = intlv[chash.interleave_mode];
560 	} else if (two_slices && two_channels) {
561 		if (chash.hvm_mode) {
562 			slice_selector = 29;
563 			chan_selector = 30;
564 		} else {
565 			slice_selector = intlv[chash.interleave_mode];
566 			chan_selector = intlv[chash.interleave_mode] + 1;
567 		}
568 	}
569 
570 	if (two_slices) {
571 		if (!chash.hvm_mode)
572 			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
573 		if (!two_channels)
574 			slice_hash_mask |= BIT_ULL(slice_selector);
575 	}
576 
577 	if (two_channels) {
578 		if (!chash.hvm_mode)
579 			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
580 		if (!two_slices)
581 			chan_hash_mask |= BIT_ULL(chan_selector);
582 	}
583 
584 	return 0;
585 }
586 
587 /* Get a contiguous memory address (remove the MMIO gap) */
588 static u64 remove_mmio_gap(u64 sys)
589 {
590 	return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
591 }
592 
593 /* Squeeze out one address bit, shift upper part down to fill gap */
594 static void remove_addr_bit(u64 *addr, int bitidx)
595 {
596 	u64	mask;
597 
598 	if (bitidx == -1)
599 		return;
600 
601 	mask = (1ull << bitidx) - 1;
602 	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
603 }
604 
605 /* XOR all the bits from addr specified in mask */
606 static int hash_by_mask(u64 addr, u64 mask)
607 {
608 	u64 result = addr & mask;
609 
610 	result = (result >> 32) ^ result;
611 	result = (result >> 16) ^ result;
612 	result = (result >> 8) ^ result;
613 	result = (result >> 4) ^ result;
614 	result = (result >> 2) ^ result;
615 	result = (result >> 1) ^ result;
616 
617 	return (int)result & 1;
618 }
619 
620 /*
621  * First stage decode. Take the system address and figure out which
622  * second stage will deal with it based on interleave modes.
623  */
624 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
625 {
626 	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
627 	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
628 						MOT_CHAN_INTLV_BIT_1SLC_2CH;
629 	int slice_intlv_bit_rm = SELECTOR_DISABLED;
630 	int chan_intlv_bit_rm = SELECTOR_DISABLED;
631 	/* Determine if address is in the MOT region. */
632 	bool mot_hit = in_region(&mot, addr);
633 	/* Calculate the number of symmetric regions enabled. */
634 	int sym_channels = hweight8(sym_chan_mask);
635 
636 	/*
637 	 * The amount we need to shift the asym base can be determined by the
638 	 * number of enabled symmetric channels.
639 	 * NOTE: This can only work because symmetric memory is not supposed
640 	 * to do a 3-way interleave.
641 	 */
642 	int sym_chan_shift = sym_channels >> 1;
643 
644 	/* Give up if address is out of range, or in MMIO gap */
645 	if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
646 	   (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
647 		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
648 		return -EINVAL;
649 	}
650 
651 	/* Get a contiguous memory address (remove the MMIO gap) */
652 	contig_addr = remove_mmio_gap(addr);
653 
654 	if (in_region(&as0, addr)) {
655 		*pmiidx = asym0.slice0_asym_channel_select;
656 
657 		contig_base = remove_mmio_gap(as0.base);
658 		contig_offset = contig_addr - contig_base;
659 		contig_base_adj = (contig_base >> sym_chan_shift) *
660 						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
661 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
662 	} else if (in_region(&as1, addr)) {
663 		*pmiidx = 2u + asym1.slice1_asym_channel_select;
664 
665 		contig_base = remove_mmio_gap(as1.base);
666 		contig_offset = contig_addr - contig_base;
667 		contig_base_adj = (contig_base >> sym_chan_shift) *
668 						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
669 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
670 	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
671 		bool channel1;
672 
673 		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
674 		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
675 		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
676 			hash_by_mask(contig_addr, chan_hash_mask);
677 		*pmiidx |= (u32)channel1;
678 
679 		contig_base = remove_mmio_gap(as2.base);
680 		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
681 		contig_offset = contig_addr - contig_base;
682 		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
683 		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
684 	} else {
685 		/* Otherwise we're in normal, boring symmetric mode. */
686 		*pmiidx = 0u;
687 
688 		if (two_slices) {
689 			bool slice1;
690 
691 			if (mot_hit) {
692 				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
693 				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
694 			} else {
695 				slice_intlv_bit_rm = slice_selector;
696 				slice1 = hash_by_mask(addr, slice_hash_mask);
697 			}
698 
699 			*pmiidx = (u32)slice1 << 1;
700 		}
701 
702 		if (two_channels) {
703 			bool channel1;
704 
705 			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
706 							MOT_CHAN_INTLV_BIT_1SLC_2CH;
707 
708 			if (mot_hit) {
709 				chan_intlv_bit_rm = mot_intlv_bit;
710 				channel1 = (addr >> mot_intlv_bit) & 1;
711 			} else {
712 				chan_intlv_bit_rm = chan_selector;
713 				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
714 			}
715 
716 			*pmiidx |= (u32)channel1;
717 		}
718 	}
719 
720 	/* Remove the chan_selector bit first */
721 	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
722 	/* Remove the slice bit (we remove it second because it must be lower */
723 	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
724 	*pmiaddr = contig_addr;
725 
726 	return 0;
727 }
728 
729 /* Translate PMI address to memory (rank, row, bank, column) */
730 #define C(n) (0x10 | (n))	/* column */
731 #define B(n) (0x20 | (n))	/* bank */
732 #define R(n) (0x40 | (n))	/* row */
733 #define RS   (0x80)			/* rank */
734 
735 /* addrdec values */
736 #define AMAP_1KB	0
737 #define AMAP_2KB	1
738 #define AMAP_4KB	2
739 #define AMAP_RSVD	3
740 
741 /* dden values */
742 #define DEN_4Gb		0
743 #define DEN_8Gb		2
744 
745 /* dwid values */
746 #define X8		0
747 #define X16		1
748 
749 static struct dimm_geometry {
750 	u8	addrdec;
751 	u8	dden;
752 	u8	dwid;
753 	u8	rowbits, colbits;
754 	u16	bits[PMI_ADDRESS_WIDTH];
755 } dimms[] = {
756 	{
757 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
758 		.rowbits = 15, .colbits = 10,
759 		.bits = {
760 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
761 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
762 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
763 			0,     0,     0,     0
764 		}
765 	},
766 	{
767 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
768 		.rowbits = 16, .colbits = 10,
769 		.bits = {
770 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
771 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
772 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
773 			R(15), 0,     0,     0
774 		}
775 	},
776 	{
777 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
778 		.rowbits = 16, .colbits = 10,
779 		.bits = {
780 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
781 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
782 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
783 			R(15), 0,     0,     0
784 		}
785 	},
786 	{
787 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
788 		.rowbits = 16, .colbits = 11,
789 		.bits = {
790 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
791 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
792 			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
793 			R(14), R(15), 0,     0
794 		}
795 	},
796 	{
797 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
798 		.rowbits = 15, .colbits = 10,
799 		.bits = {
800 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
801 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
802 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
803 			0,     0,     0,     0
804 		}
805 	},
806 	{
807 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
808 		.rowbits = 16, .colbits = 10,
809 		.bits = {
810 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
811 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
812 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
813 			R(15), 0,     0,     0
814 		}
815 	},
816 	{
817 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
818 		.rowbits = 16, .colbits = 10,
819 		.bits = {
820 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
821 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
822 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
823 			R(15), 0,     0,     0
824 		}
825 	},
826 	{
827 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
828 		.rowbits = 16, .colbits = 11,
829 		.bits = {
830 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
831 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
832 			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
833 			R(14), R(15), 0,     0
834 		}
835 	},
836 	{
837 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
838 		.rowbits = 15, .colbits = 10,
839 		.bits = {
840 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
841 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
842 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
843 			0,     0,     0,     0
844 		}
845 	},
846 	{
847 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
848 		.rowbits = 16, .colbits = 10,
849 		.bits = {
850 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
851 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
852 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
853 			R(15), 0,     0,     0
854 		}
855 	},
856 	{
857 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
858 		.rowbits = 16, .colbits = 10,
859 		.bits = {
860 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
861 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
862 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
863 			R(15), 0,     0,     0
864 		}
865 	},
866 	{
867 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
868 		.rowbits = 16, .colbits = 11,
869 		.bits = {
870 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
871 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
872 			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
873 			R(14), R(15), 0,     0
874 		}
875 	}
876 };
877 
878 static int bank_hash(u64 pmiaddr, int idx, int shft)
879 {
880 	int bhash = 0;
881 
882 	switch (idx) {
883 	case 0:
884 		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
885 		break;
886 	case 1:
887 		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
888 		bhash ^= ((pmiaddr >> 22) & 1) << 1;
889 		break;
890 	case 2:
891 		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
892 		break;
893 	}
894 
895 	return bhash;
896 }
897 
898 static int rank_hash(u64 pmiaddr)
899 {
900 	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
901 }
902 
903 /* Second stage decode. Compute rank, bank, row & column. */
904 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
905 		       struct dram_addr *daddr, char *msg)
906 {
907 	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
908 	struct pnd2_pvt *pvt = mci->pvt_info;
909 	int g = pvt->dimm_geom[pmiidx];
910 	struct dimm_geometry *d = &dimms[g];
911 	int column = 0, bank = 0, row = 0, rank = 0;
912 	int i, idx, type, skiprs = 0;
913 
914 	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
915 		int	bit = (pmiaddr >> i) & 1;
916 
917 		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
918 			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
919 			return -EINVAL;
920 		}
921 
922 		type = d->bits[i + skiprs] & ~0xf;
923 		idx = d->bits[i + skiprs] & 0xf;
924 
925 		/*
926 		 * On single rank DIMMs ignore the rank select bit
927 		 * and shift remainder of "bits[]" down one place.
928 		 */
929 		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
930 			skiprs = 1;
931 			type = d->bits[i + skiprs] & ~0xf;
932 			idx = d->bits[i + skiprs] & 0xf;
933 		}
934 
935 		switch (type) {
936 		case C(0):
937 			column |= (bit << idx);
938 			break;
939 		case B(0):
940 			bank |= (bit << idx);
941 			if (cr_drp0->bahen)
942 				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
943 			break;
944 		case R(0):
945 			row |= (bit << idx);
946 			break;
947 		case RS:
948 			rank = bit;
949 			if (cr_drp0->rsien)
950 				rank ^= rank_hash(pmiaddr);
951 			break;
952 		default:
953 			if (bit) {
954 				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
955 				return -EINVAL;
956 			}
957 			goto done;
958 		}
959 	}
960 
961 done:
962 	daddr->col = column;
963 	daddr->bank = bank;
964 	daddr->row = row;
965 	daddr->rank = rank;
966 	daddr->dimm = 0;
967 
968 	return 0;
969 }
970 
971 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
972 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
973 
974 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
975 					   struct dram_addr *daddr, char *msg)
976 {
977 	/* Rank 0 or 1 */
978 	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
979 	/* Rank 2 or 3 */
980 	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
981 
982 	/*
983 	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
984 	 * flip them if DIMM1 is larger than DIMM0.
985 	 */
986 	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
987 
988 	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
989 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
990 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
991 	if (dsch.ddr4en)
992 		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
993 	if (dmap1[pmiidx].bxor) {
994 		if (dsch.ddr4en) {
995 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
996 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
997 			if (dsch.chan_width == 0)
998 				/* 64/72 bit dram channel width */
999 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1000 			else
1001 				/* 32/40 bit dram channel width */
1002 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1003 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1004 		} else {
1005 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1006 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1007 			if (dsch.chan_width == 0)
1008 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1009 			else
1010 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1011 		}
1012 	}
1013 
1014 	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1015 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1016 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1017 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1018 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1019 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1020 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1021 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1022 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1023 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1024 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1025 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1026 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1027 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1028 	if (dmap4[pmiidx].row14 != 31)
1029 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1030 	if (dmap4[pmiidx].row15 != 31)
1031 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1032 	if (dmap4[pmiidx].row16 != 31)
1033 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1034 	if (dmap4[pmiidx].row17 != 31)
1035 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1036 
1037 	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1038 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1039 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1040 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1041 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1042 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1043 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1044 	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1045 		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1046 
1047 	return 0;
1048 }
1049 
1050 static int check_channel(int ch)
1051 {
1052 	if (drp0[ch].dramtype != 0) {
1053 		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1054 		return 1;
1055 	} else if (drp0[ch].eccen == 0) {
1056 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1057 		return 1;
1058 	}
1059 	return 0;
1060 }
1061 
1062 static int apl_check_ecc_active(void)
1063 {
1064 	int	i, ret = 0;
1065 
1066 	/* Check dramtype and ECC mode for each present DIMM */
1067 	for (i = 0; i < APL_NUM_CHANNELS; i++)
1068 		if (chan_mask & BIT(i))
1069 			ret += check_channel(i);
1070 	return ret ? -EINVAL : 0;
1071 }
1072 
1073 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1074 
1075 static int check_unit(int ch)
1076 {
1077 	struct d_cr_drp *d = &drp[ch];
1078 
1079 	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1080 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1081 		return 1;
1082 	}
1083 	return 0;
1084 }
1085 
1086 static int dnv_check_ecc_active(void)
1087 {
1088 	int	i, ret = 0;
1089 
1090 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1091 		ret += check_unit(i);
1092 	return ret ? -EINVAL : 0;
1093 }
1094 
1095 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1096 								 struct dram_addr *daddr, char *msg)
1097 {
1098 	u64	pmiaddr;
1099 	u32	pmiidx;
1100 	int	ret;
1101 
1102 	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1103 	if (ret)
1104 		return ret;
1105 
1106 	pmiaddr >>= ops->pmiaddr_shift;
1107 	/* pmi channel idx to dimm channel idx */
1108 	pmiidx >>= ops->pmiidx_shift;
1109 	daddr->chan = pmiidx;
1110 
1111 	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1112 	if (ret)
1113 		return ret;
1114 
1115 	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1116 			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1117 
1118 	return 0;
1119 }
1120 
1121 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1122 				  struct dram_addr *daddr)
1123 {
1124 	enum hw_event_mc_err_type tp_event;
1125 	char *optype, msg[PND2_MSG_SIZE];
1126 	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1127 	bool overflow = m->status & MCI_STATUS_OVER;
1128 	bool uc_err = m->status & MCI_STATUS_UC;
1129 	bool recov = m->status & MCI_STATUS_S;
1130 	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1131 	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1132 	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1133 	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1134 	int rc;
1135 
1136 	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
1137 						 HW_EVENT_ERR_CORRECTED;
1138 
1139 	/*
1140 	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1141 	 * memory errors should fit in this mask:
1142 	 *	000f 0000 1mmm cccc (binary)
1143 	 * where:
1144 	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1145 	 *	    won't be shown
1146 	 *	mmm = error type
1147 	 *	cccc = channel
1148 	 * If the mask doesn't match, report an error to the parsing logic
1149 	 */
1150 	if (!((errcode & 0xef80) == 0x80)) {
1151 		optype = "Can't parse: it is not a mem";
1152 	} else {
1153 		switch (optypenum) {
1154 		case 0:
1155 			optype = "generic undef request error";
1156 			break;
1157 		case 1:
1158 			optype = "memory read error";
1159 			break;
1160 		case 2:
1161 			optype = "memory write error";
1162 			break;
1163 		case 3:
1164 			optype = "addr/cmd error";
1165 			break;
1166 		case 4:
1167 			optype = "memory scrubbing error";
1168 			break;
1169 		default:
1170 			optype = "reserved";
1171 			break;
1172 		}
1173 	}
1174 
1175 	/* Only decode errors with an valid address (ADDRV) */
1176 	if (!(m->status & MCI_STATUS_ADDRV))
1177 		return;
1178 
1179 	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1180 	if (rc)
1181 		goto address_error;
1182 
1183 	snprintf(msg, sizeof(msg),
1184 		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1185 		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1186 		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1187 
1188 	edac_dbg(0, "%s\n", msg);
1189 
1190 	/* Call the helper to output message */
1191 	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1192 						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1193 
1194 	return;
1195 
1196 address_error:
1197 	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1198 }
1199 
1200 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1201 {
1202 	struct pnd2_pvt	*pvt = mci->pvt_info;
1203 	struct dimm_info *dimm;
1204 	struct d_cr_drp0 *d;
1205 	u64	capacity;
1206 	int	i, g;
1207 
1208 	for (i = 0; i < APL_NUM_CHANNELS; i++) {
1209 		if (!(chan_mask & BIT(i)))
1210 			continue;
1211 
1212 		dimm = edac_get_dimm(mci, i, 0, 0);
1213 		if (!dimm) {
1214 			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1215 			continue;
1216 		}
1217 
1218 		d = &drp0[i];
1219 		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1220 			if (dimms[g].addrdec == d->addrdec &&
1221 			    dimms[g].dden == d->dden &&
1222 			    dimms[g].dwid == d->dwid)
1223 				break;
1224 
1225 		if (g == ARRAY_SIZE(dimms)) {
1226 			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1227 			continue;
1228 		}
1229 
1230 		pvt->dimm_geom[i] = g;
1231 		capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1232 				   (1ul << dimms[g].colbits);
1233 		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1234 		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1235 		dimm->grain = 32;
1236 		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1237 		dimm->mtype = MEM_DDR3;
1238 		dimm->edac_mode = EDAC_SECDED;
1239 		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1240 	}
1241 }
1242 
1243 static const int dnv_dtypes[] = {
1244 	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1245 };
1246 
1247 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1248 {
1249 	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1250 	struct dimm_info *dimm;
1251 	struct d_cr_drp *d;
1252 	u64	capacity;
1253 
1254 	if (dsch.ddr4en) {
1255 		memtype = MEM_DDR4;
1256 		banks = 16;
1257 		colbits = 10;
1258 	} else {
1259 		memtype = MEM_DDR3;
1260 		banks = 8;
1261 	}
1262 
1263 	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1264 		if (dmap4[i].row14 == 31)
1265 			rowbits = 14;
1266 		else if (dmap4[i].row15 == 31)
1267 			rowbits = 15;
1268 		else if (dmap4[i].row16 == 31)
1269 			rowbits = 16;
1270 		else if (dmap4[i].row17 == 31)
1271 			rowbits = 17;
1272 		else
1273 			rowbits = 18;
1274 
1275 		if (memtype == MEM_DDR3) {
1276 			if (dmap1[i].ca11 != 0x3f)
1277 				colbits = 12;
1278 			else
1279 				colbits = 10;
1280 		}
1281 
1282 		d = &drp[i];
1283 		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1284 		ranks_of_dimm[0] = d->rken0 + d->rken1;
1285 		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1286 		ranks_of_dimm[1] = d->rken2 + d->rken3;
1287 
1288 		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1289 			if (!ranks_of_dimm[j])
1290 				continue;
1291 
1292 			dimm = edac_get_dimm(mci, i, j, 0);
1293 			if (!dimm) {
1294 				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1295 				continue;
1296 			}
1297 
1298 			capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1299 			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1300 			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1301 			dimm->grain = 32;
1302 			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1303 			dimm->mtype = memtype;
1304 			dimm->edac_mode = EDAC_SECDED;
1305 			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1306 		}
1307 	}
1308 }
1309 
1310 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1311 {
1312 	struct edac_mc_layer layers[2];
1313 	struct mem_ctl_info *mci;
1314 	struct pnd2_pvt *pvt;
1315 	int rc;
1316 
1317 	rc = ops->check_ecc();
1318 	if (rc < 0)
1319 		return rc;
1320 
1321 	/* Allocate a new MC control structure */
1322 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1323 	layers[0].size = ops->channels;
1324 	layers[0].is_virt_csrow = false;
1325 	layers[1].type = EDAC_MC_LAYER_SLOT;
1326 	layers[1].size = ops->dimms_per_channel;
1327 	layers[1].is_virt_csrow = true;
1328 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1329 	if (!mci)
1330 		return -ENOMEM;
1331 
1332 	pvt = mci->pvt_info;
1333 	memset(pvt, 0, sizeof(*pvt));
1334 
1335 	mci->mod_name = EDAC_MOD_STR;
1336 	mci->dev_name = ops->name;
1337 	mci->ctl_name = "Pondicherry2";
1338 
1339 	/* Get dimm basic config and the memory layout */
1340 	ops->get_dimm_config(mci);
1341 
1342 	if (edac_mc_add_mc(mci)) {
1343 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1344 		edac_mc_free(mci);
1345 		return -EINVAL;
1346 	}
1347 
1348 	*ppmci = mci;
1349 
1350 	return 0;
1351 }
1352 
1353 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1354 {
1355 	if (unlikely(!mci || !mci->pvt_info)) {
1356 		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1357 		return;
1358 	}
1359 
1360 	/* Remove MC sysfs nodes */
1361 	edac_mc_del_mc(NULL);
1362 	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1363 	edac_mc_free(mci);
1364 }
1365 
1366 /*
1367  * Callback function registered with core kernel mce code.
1368  * Called once for each logged error.
1369  */
1370 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1371 {
1372 	struct mce *mce = (struct mce *)data;
1373 	struct mem_ctl_info *mci;
1374 	struct dram_addr daddr;
1375 	char *type;
1376 
1377 	mci = pnd2_mci;
1378 	if (!mci || (mce->kflags & MCE_HANDLED_CEC))
1379 		return NOTIFY_DONE;
1380 
1381 	/*
1382 	 * Just let mcelog handle it if the error is
1383 	 * outside the memory controller. A memory error
1384 	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1385 	 * bit 12 has an special meaning.
1386 	 */
1387 	if ((mce->status & 0xefff) >> 7 != 1)
1388 		return NOTIFY_DONE;
1389 
1390 	if (mce->mcgstatus & MCG_STATUS_MCIP)
1391 		type = "Exception";
1392 	else
1393 		type = "Event";
1394 
1395 	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1396 	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1397 				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1398 	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1399 	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1400 	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1401 	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1402 				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1403 
1404 	pnd2_mce_output_error(mci, mce, &daddr);
1405 
1406 	/* Advice mcelog that the error were handled */
1407 	mce->kflags |= MCE_HANDLED_EDAC;
1408 	return NOTIFY_OK;
1409 }
1410 
1411 static struct notifier_block pnd2_mce_dec = {
1412 	.notifier_call	= pnd2_mce_check_error,
1413 	.priority	= MCE_PRIO_EDAC,
1414 };
1415 
1416 #ifdef CONFIG_EDAC_DEBUG
1417 /*
1418  * Write an address to this file to exercise the address decode
1419  * logic in this driver.
1420  */
1421 static u64 pnd2_fake_addr;
1422 #define PND2_BLOB_SIZE 1024
1423 static char pnd2_result[PND2_BLOB_SIZE];
1424 static struct dentry *pnd2_test;
1425 static struct debugfs_blob_wrapper pnd2_blob = {
1426 	.data = pnd2_result,
1427 	.size = 0
1428 };
1429 
1430 static int debugfs_u64_set(void *data, u64 val)
1431 {
1432 	struct dram_addr daddr;
1433 	struct mce m;
1434 
1435 	*(u64 *)data = val;
1436 	m.mcgstatus = 0;
1437 	/* ADDRV + MemRd + Unknown channel */
1438 	m.status = MCI_STATUS_ADDRV + 0x9f;
1439 	m.addr = val;
1440 	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1441 	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1442 			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1443 			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1444 	pnd2_blob.size = strlen(pnd2_blob.data);
1445 
1446 	return 0;
1447 }
1448 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1449 
1450 static void setup_pnd2_debug(void)
1451 {
1452 	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1453 	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1454 							 &pnd2_fake_addr, &fops_u64_wo);
1455 	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1456 }
1457 
1458 static void teardown_pnd2_debug(void)
1459 {
1460 	debugfs_remove_recursive(pnd2_test);
1461 }
1462 #else
1463 static void setup_pnd2_debug(void)	{}
1464 static void teardown_pnd2_debug(void)	{}
1465 #endif /* CONFIG_EDAC_DEBUG */
1466 
1467 
1468 static int pnd2_probe(void)
1469 {
1470 	int rc;
1471 
1472 	edac_dbg(2, "\n");
1473 	rc = get_registers();
1474 	if (rc)
1475 		return rc;
1476 
1477 	return pnd2_register_mci(&pnd2_mci);
1478 }
1479 
1480 static void pnd2_remove(void)
1481 {
1482 	edac_dbg(0, "\n");
1483 	pnd2_unregister_mci(pnd2_mci);
1484 }
1485 
1486 static struct dunit_ops apl_ops = {
1487 		.name			= "pnd2/apl",
1488 		.type			= APL,
1489 		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1490 		.pmiidx_shift		= 0,
1491 		.channels		= APL_NUM_CHANNELS,
1492 		.dimms_per_channel	= 1,
1493 		.rd_reg			= apl_rd_reg,
1494 		.get_registers		= apl_get_registers,
1495 		.check_ecc		= apl_check_ecc_active,
1496 		.mk_region		= apl_mk_region,
1497 		.get_dimm_config	= apl_get_dimm_config,
1498 		.pmi2mem		= apl_pmi2mem,
1499 };
1500 
1501 static struct dunit_ops dnv_ops = {
1502 		.name			= "pnd2/dnv",
1503 		.type			= DNV,
1504 		.pmiaddr_shift		= 0,
1505 		.pmiidx_shift		= 1,
1506 		.channels		= DNV_NUM_CHANNELS,
1507 		.dimms_per_channel	= 2,
1508 		.rd_reg			= dnv_rd_reg,
1509 		.get_registers		= dnv_get_registers,
1510 		.check_ecc		= dnv_check_ecc_active,
1511 		.mk_region		= dnv_mk_region,
1512 		.get_dimm_config	= dnv_get_dimm_config,
1513 		.pmi2mem		= dnv_pmi2mem,
1514 };
1515 
1516 static const struct x86_cpu_id pnd2_cpuids[] = {
1517 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	&apl_ops),
1518 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,	&dnv_ops),
1519 	{ }
1520 };
1521 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1522 
1523 static int __init pnd2_init(void)
1524 {
1525 	const struct x86_cpu_id *id;
1526 	const char *owner;
1527 	int rc;
1528 
1529 	edac_dbg(2, "\n");
1530 
1531 	if (ghes_get_devices())
1532 		return -EBUSY;
1533 
1534 	owner = edac_get_owner();
1535 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1536 		return -EBUSY;
1537 
1538 	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1539 		return -ENODEV;
1540 
1541 	id = x86_match_cpu(pnd2_cpuids);
1542 	if (!id)
1543 		return -ENODEV;
1544 
1545 	ops = (struct dunit_ops *)id->driver_data;
1546 
1547 	if (ops->type == APL) {
1548 		p2sb_bus = pci_find_bus(0, 0);
1549 		if (!p2sb_bus)
1550 			return -ENODEV;
1551 	}
1552 
1553 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1554 	opstate_init();
1555 
1556 	rc = pnd2_probe();
1557 	if (rc < 0) {
1558 		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1559 		return rc;
1560 	}
1561 
1562 	if (!pnd2_mci)
1563 		return -ENODEV;
1564 
1565 	mce_register_decode_chain(&pnd2_mce_dec);
1566 	setup_pnd2_debug();
1567 
1568 	return 0;
1569 }
1570 
1571 static void __exit pnd2_exit(void)
1572 {
1573 	edac_dbg(2, "\n");
1574 	teardown_pnd2_debug();
1575 	mce_unregister_decode_chain(&pnd2_mce_dec);
1576 	pnd2_remove();
1577 }
1578 
1579 module_init(pnd2_init);
1580 module_exit(pnd2_exit);
1581 
1582 module_param(edac_op_state, int, 0444);
1583 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1584 
1585 MODULE_LICENSE("GPL v2");
1586 MODULE_AUTHOR("Tony Luck");
1587 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1588