xref: /openbmc/linux/drivers/edac/pnd2_edac.c (revision 1c2dd16a)
1 /*
2  * Driver for Pondicherry2 memory controller.
3  *
4  * Copyright (c) 2016, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * [Derived from sb_edac.c]
16  *
17  * Translation of system physical addresses to DIMM addresses
18  * is a two stage process:
19  *
20  * First the Pondicherry 2 memory controller handles slice and channel interleaving
21  * in "sys2pmi()". This is (almost) completley common between platforms.
22  *
23  * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24  * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25  */
26 
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
42 #include <asm/mce.h>
43 
44 #include "edac_mc.h"
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
47 
48 #define APL_NUM_CHANNELS	4
49 #define DNV_NUM_CHANNELS	2
50 #define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
51 
52 enum type {
53 	APL,
54 	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55 };
56 
57 struct dram_addr {
58 	int chan;
59 	int dimm;
60 	int rank;
61 	int bank;
62 	int row;
63 	int col;
64 };
65 
66 struct pnd2_pvt {
67 	int dimm_geom[APL_NUM_CHANNELS];
68 	u64 tolm, tohm;
69 };
70 
71 /*
72  * System address space is divided into multiple regions with
73  * different interleave rules in each. The as0/as1 regions
74  * have no interleaving at all. The as2 region is interleaved
75  * between two channels. The mot region is magic and may overlap
76  * other regions, with its interleave rules taking precedence.
77  * Addresses not in any of these regions are interleaved across
78  * all four channels.
79  */
80 static struct region {
81 	u64	base;
82 	u64	limit;
83 	u8	enabled;
84 } mot, as0, as1, as2;
85 
86 static struct dunit_ops {
87 	char *name;
88 	enum type type;
89 	int pmiaddr_shift;
90 	int pmiidx_shift;
91 	int channels;
92 	int dimms_per_channel;
93 	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 	int (*get_registers)(void);
95 	int (*check_ecc)(void);
96 	void (*mk_region)(char *name, struct region *rp, void *asym);
97 	void (*get_dimm_config)(struct mem_ctl_info *mci);
98 	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 				   struct dram_addr *daddr, char *msg);
100 } *ops;
101 
102 static struct mem_ctl_info *pnd2_mci;
103 
104 #define PND2_MSG_SIZE	256
105 
106 /* Debug macros */
107 #define pnd2_printk(level, fmt, arg...)			\
108 	edac_printk(level, "pnd2", fmt, ##arg)
109 
110 #define pnd2_mc_printk(mci, level, fmt, arg...)	\
111 	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112 
113 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115 #define SELECTOR_DISABLED (-1)
116 #define _4GB (1ul << 32)
117 
118 #define PMI_ADDRESS_WIDTH	31
119 #define PND_MAX_PHYS_BIT	39
120 
121 #define APL_ASYMSHIFT		28
122 #define DNV_ASYMSHIFT		31
123 #define CH_HASH_MASK_LSB	6
124 #define SLICE_HASH_MASK_LSB	6
125 #define MOT_SLC_INTLV_BIT	12
126 #define LOG2_PMI_ADDR_GRANULARITY	5
127 #define MOT_SHIFT	24
128 
129 #define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
130 #define U64_LSHIFT(val, s)	((u64)(val) << (s))
131 
132 #ifdef CONFIG_X86_INTEL_SBI_APL
133 #include "linux/platform_data/sbi_apl.h"
134 int sbi_send(int port, int off, int op, u32 *data)
135 {
136 	struct sbi_apl_message sbi_arg;
137 	int ret, read = 0;
138 
139 	memset(&sbi_arg, 0, sizeof(sbi_arg));
140 
141 	if (op == 0 || op == 4 || op == 6)
142 		read = 1;
143 	else
144 		sbi_arg.data = *data;
145 
146 	sbi_arg.opcode = op;
147 	sbi_arg.port_address = port;
148 	sbi_arg.register_offset = off;
149 	ret = sbi_apl_commit(&sbi_arg);
150 	if (ret || sbi_arg.status)
151 		edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 				 sbi_arg.status, ret, sbi_arg.data);
153 
154 	if (ret == 0)
155 		ret = sbi_arg.status;
156 
157 	if (ret == 0 && read)
158 		*data = sbi_arg.data;
159 
160 	return ret;
161 }
162 #else
163 int sbi_send(int port, int off, int op, u32 *data)
164 {
165 	return -EUNATCH;
166 }
167 #endif
168 
169 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
170 {
171 	int	ret = 0;
172 
173 	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
174 	switch (sz) {
175 	case 8:
176 		ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
177 	case 4:
178 		ret = sbi_send(port, off, op, (u32 *)data);
179 		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
180 					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
181 		break;
182 	}
183 
184 	return ret;
185 }
186 
187 static u64 get_mem_ctrl_hub_base_addr(void)
188 {
189 	struct b_cr_mchbar_lo_pci lo;
190 	struct b_cr_mchbar_hi_pci hi;
191 	struct pci_dev *pdev;
192 
193 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
194 	if (pdev) {
195 		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
196 		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
197 		pci_dev_put(pdev);
198 	} else {
199 		return 0;
200 	}
201 
202 	if (!lo.enable) {
203 		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
204 		return 0;
205 	}
206 
207 	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
208 }
209 
210 static u64 get_sideband_reg_base_addr(void)
211 {
212 	struct pci_dev *pdev;
213 	u32 hi, lo;
214 
215 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
216 	if (pdev) {
217 		pci_read_config_dword(pdev, 0x10, &lo);
218 		pci_read_config_dword(pdev, 0x14, &hi);
219 		pci_dev_put(pdev);
220 		return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
221 	} else {
222 		return 0xfd000000;
223 	}
224 }
225 
226 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
227 {
228 	struct pci_dev *pdev;
229 	char *base;
230 	u64 addr;
231 
232 	if (op == 4) {
233 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
234 		if (!pdev)
235 			return -ENODEV;
236 
237 		pci_read_config_dword(pdev, off, data);
238 		pci_dev_put(pdev);
239 	} else {
240 		/* MMIO via memory controller hub base address */
241 		if (op == 0 && port == 0x4c) {
242 			addr = get_mem_ctrl_hub_base_addr();
243 			if (!addr)
244 				return -ENODEV;
245 		} else {
246 			/* MMIO via sideband register base address */
247 			addr = get_sideband_reg_base_addr();
248 			if (!addr)
249 				return -ENODEV;
250 			addr += (port << 16);
251 		}
252 
253 		base = ioremap((resource_size_t)addr, 0x10000);
254 		if (!base)
255 			return -ENODEV;
256 
257 		if (sz == 8)
258 			*(u32 *)(data + 4) = *(u32 *)(base + off + 4);
259 		*(u32 *)data = *(u32 *)(base + off);
260 
261 		iounmap(base);
262 	}
263 
264 	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
265 			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
266 
267 	return 0;
268 }
269 
270 #define RD_REGP(regp, regname, port)	\
271 	ops->rd_reg(port,					\
272 		regname##_offset,				\
273 		regname##_r_opcode,				\
274 		regp, sizeof(struct regname),	\
275 		#regname)
276 
277 #define RD_REG(regp, regname)			\
278 	ops->rd_reg(regname ## _port,		\
279 		regname##_offset,				\
280 		regname##_r_opcode,				\
281 		regp, sizeof(struct regname),	\
282 		#regname)
283 
284 static u64 top_lm, top_hm;
285 static bool two_slices;
286 static bool two_channels; /* Both PMI channels in one slice enabled */
287 
288 static u8 sym_chan_mask;
289 static u8 asym_chan_mask;
290 static u8 chan_mask;
291 
292 static int slice_selector = -1;
293 static int chan_selector = -1;
294 static u64 slice_hash_mask;
295 static u64 chan_hash_mask;
296 
297 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
298 {
299 	rp->enabled = 1;
300 	rp->base = base;
301 	rp->limit = limit;
302 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
303 }
304 
305 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
306 {
307 	if (mask == 0) {
308 		pr_info(FW_BUG "MOT mask cannot be zero\n");
309 		return;
310 	}
311 	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
312 		pr_info(FW_BUG "MOT mask not power of two\n");
313 		return;
314 	}
315 	if (base & ~mask) {
316 		pr_info(FW_BUG "MOT region base/mask alignment error\n");
317 		return;
318 	}
319 	rp->base = base;
320 	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
321 	rp->enabled = 1;
322 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
323 }
324 
325 static bool in_region(struct region *rp, u64 addr)
326 {
327 	if (!rp->enabled)
328 		return false;
329 
330 	return rp->base <= addr && addr <= rp->limit;
331 }
332 
333 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
334 {
335 	int mask = 0;
336 
337 	if (!p->slice_0_mem_disabled)
338 		mask |= p->sym_slice0_channel_enabled;
339 
340 	if (!p->slice_1_disabled)
341 		mask |= p->sym_slice1_channel_enabled << 2;
342 
343 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
344 		mask &= 0x5;
345 
346 	return mask;
347 }
348 
349 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
350 			 struct b_cr_asym_mem_region0_mchbar *as0,
351 			 struct b_cr_asym_mem_region1_mchbar *as1,
352 			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
353 {
354 	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
355 	int mask = 0;
356 
357 	if (as2way->asym_2way_interleave_enable)
358 		mask = intlv[as2way->asym_2way_intlv_mode];
359 	if (as0->slice0_asym_enable)
360 		mask |= (1 << as0->slice0_asym_channel_select);
361 	if (as1->slice1_asym_enable)
362 		mask |= (4 << as1->slice1_asym_channel_select);
363 	if (p->slice_0_mem_disabled)
364 		mask &= 0xc;
365 	if (p->slice_1_disabled)
366 		mask &= 0x3;
367 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
368 		mask &= 0x5;
369 
370 	return mask;
371 }
372 
373 static struct b_cr_tolud_pci tolud;
374 static struct b_cr_touud_lo_pci touud_lo;
375 static struct b_cr_touud_hi_pci touud_hi;
376 static struct b_cr_asym_mem_region0_mchbar asym0;
377 static struct b_cr_asym_mem_region1_mchbar asym1;
378 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
379 static struct b_cr_mot_out_base_mchbar mot_base;
380 static struct b_cr_mot_out_mask_mchbar mot_mask;
381 static struct b_cr_slice_channel_hash chash;
382 
383 /* Apollo Lake dunit */
384 /*
385  * Validated on board with just two DIMMs in the [0] and [2] positions
386  * in this array. Other port number matches documentation, but caution
387  * advised.
388  */
389 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
390 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
391 
392 /* Denverton dunit */
393 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
394 static struct d_cr_dsch dsch;
395 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
396 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
397 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
398 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
399 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
400 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
401 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
402 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
403 
404 static void apl_mk_region(char *name, struct region *rp, void *asym)
405 {
406 	struct b_cr_asym_mem_region0_mchbar *a = asym;
407 
408 	mk_region(name, rp,
409 			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
410 			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
411 			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
412 }
413 
414 static void dnv_mk_region(char *name, struct region *rp, void *asym)
415 {
416 	struct b_cr_asym_mem_region_denverton *a = asym;
417 
418 	mk_region(name, rp,
419 			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
420 			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
421 			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
422 }
423 
424 static int apl_get_registers(void)
425 {
426 	int i;
427 
428 	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
429 		return -ENODEV;
430 
431 	for (i = 0; i < APL_NUM_CHANNELS; i++)
432 		if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
433 			return -ENODEV;
434 
435 	return 0;
436 }
437 
438 static int dnv_get_registers(void)
439 {
440 	int i;
441 
442 	if (RD_REG(&dsch, d_cr_dsch))
443 		return -ENODEV;
444 
445 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
446 		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
447 			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
448 			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
449 			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
450 			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
451 			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
452 			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
453 			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
454 			return -ENODEV;
455 
456 	return 0;
457 }
458 
459 /*
460  * Read all the h/w config registers once here (they don't
461  * change at run time. Figure out which address ranges have
462  * which interleave characteristics.
463  */
464 static int get_registers(void)
465 {
466 	const int intlv[] = { 10, 11, 12, 12 };
467 
468 	if (RD_REG(&tolud, b_cr_tolud_pci) ||
469 		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
470 		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
471 		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
472 		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
473 		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
474 		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
475 		RD_REG(&chash, b_cr_slice_channel_hash))
476 		return -ENODEV;
477 
478 	if (ops->get_registers())
479 		return -ENODEV;
480 
481 	if (ops->type == DNV) {
482 		/* PMI channel idx (always 0) for asymmetric region */
483 		asym0.slice0_asym_channel_select = 0;
484 		asym1.slice1_asym_channel_select = 0;
485 		/* PMI channel bitmap (always 1) for symmetric region */
486 		chash.sym_slice0_channel_enabled = 0x1;
487 		chash.sym_slice1_channel_enabled = 0x1;
488 	}
489 
490 	if (asym0.slice0_asym_enable)
491 		ops->mk_region("as0", &as0, &asym0);
492 
493 	if (asym1.slice1_asym_enable)
494 		ops->mk_region("as1", &as1, &asym1);
495 
496 	if (asym_2way.asym_2way_interleave_enable) {
497 		mk_region("as2way", &as2,
498 				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
499 				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
500 				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
501 	}
502 
503 	if (mot_base.imr_en) {
504 		mk_region_mask("mot", &mot,
505 					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
506 					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
507 	}
508 
509 	top_lm = U64_LSHIFT(tolud.tolud, 20);
510 	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
511 
512 	two_slices = !chash.slice_1_disabled &&
513 				 !chash.slice_0_mem_disabled &&
514 				 (chash.sym_slice0_channel_enabled != 0) &&
515 				 (chash.sym_slice1_channel_enabled != 0);
516 	two_channels = !chash.ch_1_disabled &&
517 				 !chash.enable_pmi_dual_data_mode &&
518 				 ((chash.sym_slice0_channel_enabled == 3) ||
519 				 (chash.sym_slice1_channel_enabled == 3));
520 
521 	sym_chan_mask = gen_sym_mask(&chash);
522 	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
523 	chan_mask = sym_chan_mask | asym_chan_mask;
524 
525 	if (two_slices && !two_channels) {
526 		if (chash.hvm_mode)
527 			slice_selector = 29;
528 		else
529 			slice_selector = intlv[chash.interleave_mode];
530 	} else if (!two_slices && two_channels) {
531 		if (chash.hvm_mode)
532 			chan_selector = 29;
533 		else
534 			chan_selector = intlv[chash.interleave_mode];
535 	} else if (two_slices && two_channels) {
536 		if (chash.hvm_mode) {
537 			slice_selector = 29;
538 			chan_selector = 30;
539 		} else {
540 			slice_selector = intlv[chash.interleave_mode];
541 			chan_selector = intlv[chash.interleave_mode] + 1;
542 		}
543 	}
544 
545 	if (two_slices) {
546 		if (!chash.hvm_mode)
547 			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
548 		if (!two_channels)
549 			slice_hash_mask |= BIT_ULL(slice_selector);
550 	}
551 
552 	if (two_channels) {
553 		if (!chash.hvm_mode)
554 			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
555 		if (!two_slices)
556 			chan_hash_mask |= BIT_ULL(chan_selector);
557 	}
558 
559 	return 0;
560 }
561 
562 /* Get a contiguous memory address (remove the MMIO gap) */
563 static u64 remove_mmio_gap(u64 sys)
564 {
565 	return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
566 }
567 
568 /* Squeeze out one address bit, shift upper part down to fill gap */
569 static void remove_addr_bit(u64 *addr, int bitidx)
570 {
571 	u64	mask;
572 
573 	if (bitidx == -1)
574 		return;
575 
576 	mask = (1ull << bitidx) - 1;
577 	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
578 }
579 
580 /* XOR all the bits from addr specified in mask */
581 static int hash_by_mask(u64 addr, u64 mask)
582 {
583 	u64 result = addr & mask;
584 
585 	result = (result >> 32) ^ result;
586 	result = (result >> 16) ^ result;
587 	result = (result >> 8) ^ result;
588 	result = (result >> 4) ^ result;
589 	result = (result >> 2) ^ result;
590 	result = (result >> 1) ^ result;
591 
592 	return (int)result & 1;
593 }
594 
595 /*
596  * First stage decode. Take the system address and figure out which
597  * second stage will deal with it based on interleave modes.
598  */
599 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
600 {
601 	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
602 	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
603 						MOT_CHAN_INTLV_BIT_1SLC_2CH;
604 	int slice_intlv_bit_rm = SELECTOR_DISABLED;
605 	int chan_intlv_bit_rm = SELECTOR_DISABLED;
606 	/* Determine if address is in the MOT region. */
607 	bool mot_hit = in_region(&mot, addr);
608 	/* Calculate the number of symmetric regions enabled. */
609 	int sym_channels = hweight8(sym_chan_mask);
610 
611 	/*
612 	 * The amount we need to shift the asym base can be determined by the
613 	 * number of enabled symmetric channels.
614 	 * NOTE: This can only work because symmetric memory is not supposed
615 	 * to do a 3-way interleave.
616 	 */
617 	int sym_chan_shift = sym_channels >> 1;
618 
619 	/* Give up if address is out of range, or in MMIO gap */
620 	if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
621 	   (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
622 		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
623 		return -EINVAL;
624 	}
625 
626 	/* Get a contiguous memory address (remove the MMIO gap) */
627 	contig_addr = remove_mmio_gap(addr);
628 
629 	if (in_region(&as0, addr)) {
630 		*pmiidx = asym0.slice0_asym_channel_select;
631 
632 		contig_base = remove_mmio_gap(as0.base);
633 		contig_offset = contig_addr - contig_base;
634 		contig_base_adj = (contig_base >> sym_chan_shift) *
635 						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
636 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
637 	} else if (in_region(&as1, addr)) {
638 		*pmiidx = 2u + asym1.slice1_asym_channel_select;
639 
640 		contig_base = remove_mmio_gap(as1.base);
641 		contig_offset = contig_addr - contig_base;
642 		contig_base_adj = (contig_base >> sym_chan_shift) *
643 						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
644 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
645 	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
646 		bool channel1;
647 
648 		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
649 		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
650 		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
651 			hash_by_mask(contig_addr, chan_hash_mask);
652 		*pmiidx |= (u32)channel1;
653 
654 		contig_base = remove_mmio_gap(as2.base);
655 		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
656 		contig_offset = contig_addr - contig_base;
657 		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
658 		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
659 	} else {
660 		/* Otherwise we're in normal, boring symmetric mode. */
661 		*pmiidx = 0u;
662 
663 		if (two_slices) {
664 			bool slice1;
665 
666 			if (mot_hit) {
667 				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
668 				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
669 			} else {
670 				slice_intlv_bit_rm = slice_selector;
671 				slice1 = hash_by_mask(addr, slice_hash_mask);
672 			}
673 
674 			*pmiidx = (u32)slice1 << 1;
675 		}
676 
677 		if (two_channels) {
678 			bool channel1;
679 
680 			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
681 							MOT_CHAN_INTLV_BIT_1SLC_2CH;
682 
683 			if (mot_hit) {
684 				chan_intlv_bit_rm = mot_intlv_bit;
685 				channel1 = (addr >> mot_intlv_bit) & 1;
686 			} else {
687 				chan_intlv_bit_rm = chan_selector;
688 				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
689 			}
690 
691 			*pmiidx |= (u32)channel1;
692 		}
693 	}
694 
695 	/* Remove the chan_selector bit first */
696 	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
697 	/* Remove the slice bit (we remove it second because it must be lower */
698 	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
699 	*pmiaddr = contig_addr;
700 
701 	return 0;
702 }
703 
704 /* Translate PMI address to memory (rank, row, bank, column) */
705 #define C(n) (0x10 | (n))	/* column */
706 #define B(n) (0x20 | (n))	/* bank */
707 #define R(n) (0x40 | (n))	/* row */
708 #define RS   (0x80)			/* rank */
709 
710 /* addrdec values */
711 #define AMAP_1KB	0
712 #define AMAP_2KB	1
713 #define AMAP_4KB	2
714 #define AMAP_RSVD	3
715 
716 /* dden values */
717 #define DEN_4Gb		0
718 #define DEN_8Gb		2
719 
720 /* dwid values */
721 #define X8		0
722 #define X16		1
723 
724 static struct dimm_geometry {
725 	u8	addrdec;
726 	u8	dden;
727 	u8	dwid;
728 	u8	rowbits, colbits;
729 	u16	bits[PMI_ADDRESS_WIDTH];
730 } dimms[] = {
731 	{
732 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
733 		.rowbits = 15, .colbits = 10,
734 		.bits = {
735 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
736 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
737 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
738 			0,     0,     0,     0
739 		}
740 	},
741 	{
742 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
743 		.rowbits = 16, .colbits = 10,
744 		.bits = {
745 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
746 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
747 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
748 			R(15), 0,     0,     0
749 		}
750 	},
751 	{
752 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
753 		.rowbits = 16, .colbits = 10,
754 		.bits = {
755 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
756 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
757 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
758 			R(15), 0,     0,     0
759 		}
760 	},
761 	{
762 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
763 		.rowbits = 16, .colbits = 11,
764 		.bits = {
765 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
766 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
767 			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
768 			R(14), R(15), 0,     0
769 		}
770 	},
771 	{
772 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
773 		.rowbits = 15, .colbits = 10,
774 		.bits = {
775 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
776 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
777 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
778 			0,     0,     0,     0
779 		}
780 	},
781 	{
782 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
783 		.rowbits = 16, .colbits = 10,
784 		.bits = {
785 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
786 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
787 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
788 			R(15), 0,     0,     0
789 		}
790 	},
791 	{
792 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
793 		.rowbits = 16, .colbits = 10,
794 		.bits = {
795 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
796 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
797 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
798 			R(15), 0,     0,     0
799 		}
800 	},
801 	{
802 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
803 		.rowbits = 16, .colbits = 11,
804 		.bits = {
805 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
806 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
807 			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
808 			R(14), R(15), 0,     0
809 		}
810 	},
811 	{
812 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
813 		.rowbits = 15, .colbits = 10,
814 		.bits = {
815 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
816 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
817 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
818 			0,     0,     0,     0
819 		}
820 	},
821 	{
822 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
823 		.rowbits = 16, .colbits = 10,
824 		.bits = {
825 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
826 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
827 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
828 			R(15), 0,     0,     0
829 		}
830 	},
831 	{
832 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
833 		.rowbits = 16, .colbits = 10,
834 		.bits = {
835 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
836 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
837 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
838 			R(15), 0,     0,     0
839 		}
840 	},
841 	{
842 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
843 		.rowbits = 16, .colbits = 11,
844 		.bits = {
845 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
846 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
847 			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
848 			R(14), R(15), 0,     0
849 		}
850 	}
851 };
852 
853 static int bank_hash(u64 pmiaddr, int idx, int shft)
854 {
855 	int bhash = 0;
856 
857 	switch (idx) {
858 	case 0:
859 		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
860 		break;
861 	case 1:
862 		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
863 		bhash ^= ((pmiaddr >> 22) & 1) << 1;
864 		break;
865 	case 2:
866 		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
867 		break;
868 	}
869 
870 	return bhash;
871 }
872 
873 static int rank_hash(u64 pmiaddr)
874 {
875 	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
876 }
877 
878 /* Second stage decode. Compute rank, bank, row & column. */
879 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
880 		       struct dram_addr *daddr, char *msg)
881 {
882 	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
883 	struct pnd2_pvt *pvt = mci->pvt_info;
884 	int g = pvt->dimm_geom[pmiidx];
885 	struct dimm_geometry *d = &dimms[g];
886 	int column = 0, bank = 0, row = 0, rank = 0;
887 	int i, idx, type, skiprs = 0;
888 
889 	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
890 		int	bit = (pmiaddr >> i) & 1;
891 
892 		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
893 			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
894 			return -EINVAL;
895 		}
896 
897 		type = d->bits[i + skiprs] & ~0xf;
898 		idx = d->bits[i + skiprs] & 0xf;
899 
900 		/*
901 		 * On single rank DIMMs ignore the rank select bit
902 		 * and shift remainder of "bits[]" down one place.
903 		 */
904 		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
905 			skiprs = 1;
906 			type = d->bits[i + skiprs] & ~0xf;
907 			idx = d->bits[i + skiprs] & 0xf;
908 		}
909 
910 		switch (type) {
911 		case C(0):
912 			column |= (bit << idx);
913 			break;
914 		case B(0):
915 			bank |= (bit << idx);
916 			if (cr_drp0->bahen)
917 				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
918 			break;
919 		case R(0):
920 			row |= (bit << idx);
921 			break;
922 		case RS:
923 			rank = bit;
924 			if (cr_drp0->rsien)
925 				rank ^= rank_hash(pmiaddr);
926 			break;
927 		default:
928 			if (bit) {
929 				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
930 				return -EINVAL;
931 			}
932 			goto done;
933 		}
934 	}
935 
936 done:
937 	daddr->col = column;
938 	daddr->bank = bank;
939 	daddr->row = row;
940 	daddr->rank = rank;
941 	daddr->dimm = 0;
942 
943 	return 0;
944 }
945 
946 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
947 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
948 
949 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
950 					   struct dram_addr *daddr, char *msg)
951 {
952 	/* Rank 0 or 1 */
953 	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
954 	/* Rank 2 or 3 */
955 	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
956 
957 	/*
958 	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
959 	 * flip them if DIMM1 is larger than DIMM0.
960 	 */
961 	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
962 
963 	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
964 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
965 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
966 	if (dsch.ddr4en)
967 		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
968 	if (dmap1[pmiidx].bxor) {
969 		if (dsch.ddr4en) {
970 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
971 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
972 			if (dsch.chan_width == 0)
973 				/* 64/72 bit dram channel width */
974 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
975 			else
976 				/* 32/40 bit dram channel width */
977 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
978 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
979 		} else {
980 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
981 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
982 			if (dsch.chan_width == 0)
983 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
984 			else
985 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
986 		}
987 	}
988 
989 	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
990 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
991 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
992 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
993 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
994 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
995 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
996 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
997 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
998 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
999 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1000 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1001 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1002 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1003 	if (dmap4[pmiidx].row14 != 31)
1004 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1005 	if (dmap4[pmiidx].row15 != 31)
1006 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1007 	if (dmap4[pmiidx].row16 != 31)
1008 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1009 	if (dmap4[pmiidx].row17 != 31)
1010 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1011 
1012 	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1013 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1014 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1015 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1016 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1017 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1018 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1019 	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1020 		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1021 
1022 	return 0;
1023 }
1024 
1025 static int check_channel(int ch)
1026 {
1027 	if (drp0[ch].dramtype != 0) {
1028 		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1029 		return 1;
1030 	} else if (drp0[ch].eccen == 0) {
1031 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1032 		return 1;
1033 	}
1034 	return 0;
1035 }
1036 
1037 static int apl_check_ecc_active(void)
1038 {
1039 	int	i, ret = 0;
1040 
1041 	/* Check dramtype and ECC mode for each present DIMM */
1042 	for (i = 0; i < APL_NUM_CHANNELS; i++)
1043 		if (chan_mask & BIT(i))
1044 			ret += check_channel(i);
1045 	return ret ? -EINVAL : 0;
1046 }
1047 
1048 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1049 
1050 static int check_unit(int ch)
1051 {
1052 	struct d_cr_drp *d = &drp[ch];
1053 
1054 	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1055 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1056 		return 1;
1057 	}
1058 	return 0;
1059 }
1060 
1061 static int dnv_check_ecc_active(void)
1062 {
1063 	int	i, ret = 0;
1064 
1065 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1066 		ret += check_unit(i);
1067 	return ret ? -EINVAL : 0;
1068 }
1069 
1070 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1071 								 struct dram_addr *daddr, char *msg)
1072 {
1073 	u64	pmiaddr;
1074 	u32	pmiidx;
1075 	int	ret;
1076 
1077 	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1078 	if (ret)
1079 		return ret;
1080 
1081 	pmiaddr >>= ops->pmiaddr_shift;
1082 	/* pmi channel idx to dimm channel idx */
1083 	pmiidx >>= ops->pmiidx_shift;
1084 	daddr->chan = pmiidx;
1085 
1086 	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1087 	if (ret)
1088 		return ret;
1089 
1090 	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1091 			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1092 
1093 	return 0;
1094 }
1095 
1096 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1097 				  struct dram_addr *daddr)
1098 {
1099 	enum hw_event_mc_err_type tp_event;
1100 	char *optype, msg[PND2_MSG_SIZE];
1101 	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1102 	bool overflow = m->status & MCI_STATUS_OVER;
1103 	bool uc_err = m->status & MCI_STATUS_UC;
1104 	bool recov = m->status & MCI_STATUS_S;
1105 	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1106 	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1107 	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1108 	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1109 	int rc;
1110 
1111 	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1112 						 HW_EVENT_ERR_CORRECTED;
1113 
1114 	/*
1115 	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1116 	 * memory errors should fit in this mask:
1117 	 *	000f 0000 1mmm cccc (binary)
1118 	 * where:
1119 	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1120 	 *	    won't be shown
1121 	 *	mmm = error type
1122 	 *	cccc = channel
1123 	 * If the mask doesn't match, report an error to the parsing logic
1124 	 */
1125 	if (!((errcode & 0xef80) == 0x80)) {
1126 		optype = "Can't parse: it is not a mem";
1127 	} else {
1128 		switch (optypenum) {
1129 		case 0:
1130 			optype = "generic undef request error";
1131 			break;
1132 		case 1:
1133 			optype = "memory read error";
1134 			break;
1135 		case 2:
1136 			optype = "memory write error";
1137 			break;
1138 		case 3:
1139 			optype = "addr/cmd error";
1140 			break;
1141 		case 4:
1142 			optype = "memory scrubbing error";
1143 			break;
1144 		default:
1145 			optype = "reserved";
1146 			break;
1147 		}
1148 	}
1149 
1150 	/* Only decode errors with an valid address (ADDRV) */
1151 	if (!(m->status & MCI_STATUS_ADDRV))
1152 		return;
1153 
1154 	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1155 	if (rc)
1156 		goto address_error;
1157 
1158 	snprintf(msg, sizeof(msg),
1159 		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1160 		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1161 		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1162 
1163 	edac_dbg(0, "%s\n", msg);
1164 
1165 	/* Call the helper to output message */
1166 	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1167 						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1168 
1169 	return;
1170 
1171 address_error:
1172 	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1173 }
1174 
1175 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1176 {
1177 	struct pnd2_pvt	*pvt = mci->pvt_info;
1178 	struct dimm_info *dimm;
1179 	struct d_cr_drp0 *d;
1180 	u64	capacity;
1181 	int	i, g;
1182 
1183 	for (i = 0; i < APL_NUM_CHANNELS; i++) {
1184 		if (!(chan_mask & BIT(i)))
1185 			continue;
1186 
1187 		dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1188 		if (!dimm) {
1189 			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1190 			continue;
1191 		}
1192 
1193 		d = &drp0[i];
1194 		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1195 			if (dimms[g].addrdec == d->addrdec &&
1196 			    dimms[g].dden == d->dden &&
1197 			    dimms[g].dwid == d->dwid)
1198 				break;
1199 
1200 		if (g == ARRAY_SIZE(dimms)) {
1201 			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1202 			continue;
1203 		}
1204 
1205 		pvt->dimm_geom[i] = g;
1206 		capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1207 				   (1ul << dimms[g].colbits);
1208 		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1209 		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1210 		dimm->grain = 32;
1211 		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1212 		dimm->mtype = MEM_DDR3;
1213 		dimm->edac_mode = EDAC_SECDED;
1214 		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1215 	}
1216 }
1217 
1218 static const int dnv_dtypes[] = {
1219 	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1220 };
1221 
1222 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1223 {
1224 	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1225 	struct dimm_info *dimm;
1226 	struct d_cr_drp *d;
1227 	u64	capacity;
1228 
1229 	if (dsch.ddr4en) {
1230 		memtype = MEM_DDR4;
1231 		banks = 16;
1232 		colbits = 10;
1233 	} else {
1234 		memtype = MEM_DDR3;
1235 		banks = 8;
1236 	}
1237 
1238 	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1239 		if (dmap4[i].row14 == 31)
1240 			rowbits = 14;
1241 		else if (dmap4[i].row15 == 31)
1242 			rowbits = 15;
1243 		else if (dmap4[i].row16 == 31)
1244 			rowbits = 16;
1245 		else if (dmap4[i].row17 == 31)
1246 			rowbits = 17;
1247 		else
1248 			rowbits = 18;
1249 
1250 		if (memtype == MEM_DDR3) {
1251 			if (dmap1[i].ca11 != 0x3f)
1252 				colbits = 12;
1253 			else
1254 				colbits = 10;
1255 		}
1256 
1257 		d = &drp[i];
1258 		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1259 		ranks_of_dimm[0] = d->rken0 + d->rken1;
1260 		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1261 		ranks_of_dimm[1] = d->rken2 + d->rken3;
1262 
1263 		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1264 			if (!ranks_of_dimm[j])
1265 				continue;
1266 
1267 			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1268 			if (!dimm) {
1269 				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1270 				continue;
1271 			}
1272 
1273 			capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1274 			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1275 			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1276 			dimm->grain = 32;
1277 			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1278 			dimm->mtype = memtype;
1279 			dimm->edac_mode = EDAC_SECDED;
1280 			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1281 		}
1282 	}
1283 }
1284 
1285 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1286 {
1287 	struct edac_mc_layer layers[2];
1288 	struct mem_ctl_info *mci;
1289 	struct pnd2_pvt *pvt;
1290 	int rc;
1291 
1292 	rc = ops->check_ecc();
1293 	if (rc < 0)
1294 		return rc;
1295 
1296 	/* Allocate a new MC control structure */
1297 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1298 	layers[0].size = ops->channels;
1299 	layers[0].is_virt_csrow = false;
1300 	layers[1].type = EDAC_MC_LAYER_SLOT;
1301 	layers[1].size = ops->dimms_per_channel;
1302 	layers[1].is_virt_csrow = true;
1303 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1304 	if (!mci)
1305 		return -ENOMEM;
1306 
1307 	pvt = mci->pvt_info;
1308 	memset(pvt, 0, sizeof(*pvt));
1309 
1310 	mci->mod_name = "pnd2_edac.c";
1311 	mci->dev_name = ops->name;
1312 	mci->ctl_name = "Pondicherry2";
1313 
1314 	/* Get dimm basic config and the memory layout */
1315 	ops->get_dimm_config(mci);
1316 
1317 	if (edac_mc_add_mc(mci)) {
1318 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1319 		edac_mc_free(mci);
1320 		return -EINVAL;
1321 	}
1322 
1323 	*ppmci = mci;
1324 
1325 	return 0;
1326 }
1327 
1328 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1329 {
1330 	if (unlikely(!mci || !mci->pvt_info)) {
1331 		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1332 		return;
1333 	}
1334 
1335 	/* Remove MC sysfs nodes */
1336 	edac_mc_del_mc(NULL);
1337 	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1338 	edac_mc_free(mci);
1339 }
1340 
1341 /*
1342  * Callback function registered with core kernel mce code.
1343  * Called once for each logged error.
1344  */
1345 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1346 {
1347 	struct mce *mce = (struct mce *)data;
1348 	struct mem_ctl_info *mci;
1349 	struct dram_addr daddr;
1350 	char *type;
1351 
1352 	if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1353 		return NOTIFY_DONE;
1354 
1355 	mci = pnd2_mci;
1356 	if (!mci)
1357 		return NOTIFY_DONE;
1358 
1359 	/*
1360 	 * Just let mcelog handle it if the error is
1361 	 * outside the memory controller. A memory error
1362 	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1363 	 * bit 12 has an special meaning.
1364 	 */
1365 	if ((mce->status & 0xefff) >> 7 != 1)
1366 		return NOTIFY_DONE;
1367 
1368 	if (mce->mcgstatus & MCG_STATUS_MCIP)
1369 		type = "Exception";
1370 	else
1371 		type = "Event";
1372 
1373 	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1374 	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1375 				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1376 	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1377 	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1378 	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1379 	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1380 				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1381 
1382 	pnd2_mce_output_error(mci, mce, &daddr);
1383 
1384 	/* Advice mcelog that the error were handled */
1385 	return NOTIFY_STOP;
1386 }
1387 
1388 static struct notifier_block pnd2_mce_dec = {
1389 	.notifier_call	= pnd2_mce_check_error,
1390 };
1391 
1392 #ifdef CONFIG_EDAC_DEBUG
1393 /*
1394  * Write an address to this file to exercise the address decode
1395  * logic in this driver.
1396  */
1397 static u64 pnd2_fake_addr;
1398 #define PND2_BLOB_SIZE 1024
1399 static char pnd2_result[PND2_BLOB_SIZE];
1400 static struct dentry *pnd2_test;
1401 static struct debugfs_blob_wrapper pnd2_blob = {
1402 	.data = pnd2_result,
1403 	.size = 0
1404 };
1405 
1406 static int debugfs_u64_set(void *data, u64 val)
1407 {
1408 	struct dram_addr daddr;
1409 	struct mce m;
1410 
1411 	*(u64 *)data = val;
1412 	m.mcgstatus = 0;
1413 	/* ADDRV + MemRd + Unknown channel */
1414 	m.status = MCI_STATUS_ADDRV + 0x9f;
1415 	m.addr = val;
1416 	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1417 	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1418 			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1419 			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1420 	pnd2_blob.size = strlen(pnd2_blob.data);
1421 
1422 	return 0;
1423 }
1424 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1425 
1426 static void setup_pnd2_debug(void)
1427 {
1428 	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1429 	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1430 							 &pnd2_fake_addr, &fops_u64_wo);
1431 	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1432 }
1433 
1434 static void teardown_pnd2_debug(void)
1435 {
1436 	debugfs_remove_recursive(pnd2_test);
1437 }
1438 #else
1439 static void setup_pnd2_debug(void)	{}
1440 static void teardown_pnd2_debug(void)	{}
1441 #endif /* CONFIG_EDAC_DEBUG */
1442 
1443 
1444 static int pnd2_probe(void)
1445 {
1446 	int rc;
1447 
1448 	edac_dbg(2, "\n");
1449 	rc = get_registers();
1450 	if (rc)
1451 		return rc;
1452 
1453 	return pnd2_register_mci(&pnd2_mci);
1454 }
1455 
1456 static void pnd2_remove(void)
1457 {
1458 	edac_dbg(0, "\n");
1459 	pnd2_unregister_mci(pnd2_mci);
1460 }
1461 
1462 static struct dunit_ops apl_ops = {
1463 		.name			= "pnd2/apl",
1464 		.type			= APL,
1465 		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1466 		.pmiidx_shift		= 0,
1467 		.channels		= APL_NUM_CHANNELS,
1468 		.dimms_per_channel	= 1,
1469 		.rd_reg			= apl_rd_reg,
1470 		.get_registers		= apl_get_registers,
1471 		.check_ecc		= apl_check_ecc_active,
1472 		.mk_region		= apl_mk_region,
1473 		.get_dimm_config	= apl_get_dimm_config,
1474 		.pmi2mem		= apl_pmi2mem,
1475 };
1476 
1477 static struct dunit_ops dnv_ops = {
1478 		.name			= "pnd2/dnv",
1479 		.type			= DNV,
1480 		.pmiaddr_shift		= 0,
1481 		.pmiidx_shift		= 1,
1482 		.channels		= DNV_NUM_CHANNELS,
1483 		.dimms_per_channel	= 2,
1484 		.rd_reg			= dnv_rd_reg,
1485 		.get_registers		= dnv_get_registers,
1486 		.check_ecc		= dnv_check_ecc_active,
1487 		.mk_region		= dnv_mk_region,
1488 		.get_dimm_config	= dnv_get_dimm_config,
1489 		.pmi2mem		= dnv_pmi2mem,
1490 };
1491 
1492 static const struct x86_cpu_id pnd2_cpuids[] = {
1493 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1494 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1495 	{ }
1496 };
1497 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1498 
1499 static int __init pnd2_init(void)
1500 {
1501 	const struct x86_cpu_id *id;
1502 	int rc;
1503 
1504 	edac_dbg(2, "\n");
1505 
1506 	id = x86_match_cpu(pnd2_cpuids);
1507 	if (!id)
1508 		return -ENODEV;
1509 
1510 	ops = (struct dunit_ops *)id->driver_data;
1511 
1512 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1513 	opstate_init();
1514 
1515 	rc = pnd2_probe();
1516 	if (rc < 0) {
1517 		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1518 		return rc;
1519 	}
1520 
1521 	if (!pnd2_mci)
1522 		return -ENODEV;
1523 
1524 	mce_register_decode_chain(&pnd2_mce_dec);
1525 	setup_pnd2_debug();
1526 
1527 	return 0;
1528 }
1529 
1530 static void __exit pnd2_exit(void)
1531 {
1532 	edac_dbg(2, "\n");
1533 	teardown_pnd2_debug();
1534 	mce_unregister_decode_chain(&pnd2_mce_dec);
1535 	pnd2_remove();
1536 }
1537 
1538 module_init(pnd2_init);
1539 module_exit(pnd2_exit);
1540 
1541 module_param(edac_op_state, int, 0444);
1542 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1543 
1544 MODULE_LICENSE("GPL v2");
1545 MODULE_AUTHOR("Tony Luck");
1546 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1547