xref: /openbmc/linux/drivers/edac/synopsys_edac.c (revision 141e5239)
1 /*
2  * Synopsys DDR ECC Driver
3  * This driver is based on ppc4xx_edac.c drivers
4  *
5  * Copyright (C) 2012 - 2014 Xilinx, Inc.
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation, either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * This file is subject to the terms and conditions of the GNU General Public
18  * License.  See the file "COPYING" in the main directory of this archive
19  * for more details
20  */
21 
22 #include <linux/edac.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/interrupt.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
28 
29 #include "edac_module.h"
30 
31 /* Number of cs_rows needed per memory controller */
32 #define SYNPS_EDAC_NR_CSROWS		1
33 
34 /* Number of channels per memory controller */
35 #define SYNPS_EDAC_NR_CHANS		1
36 
37 /* Granularity of reported error in bytes */
38 #define SYNPS_EDAC_ERR_GRAIN		1
39 
40 #define SYNPS_EDAC_MSG_SIZE		256
41 
42 #define SYNPS_EDAC_MOD_STRING		"synps_edac"
43 #define SYNPS_EDAC_MOD_VER		"1"
44 
45 /* Synopsys DDR memory controller registers that are relevant to ECC */
46 #define CTRL_OFST			0x0
47 #define T_ZQ_OFST			0xA4
48 
49 /* ECC control register */
50 #define ECC_CTRL_OFST			0xC4
51 /* ECC log register */
52 #define CE_LOG_OFST			0xC8
53 /* ECC address register */
54 #define CE_ADDR_OFST			0xCC
55 /* ECC data[31:0] register */
56 #define CE_DATA_31_0_OFST		0xD0
57 
58 /* Uncorrectable error info registers */
59 #define UE_LOG_OFST			0xDC
60 #define UE_ADDR_OFST			0xE0
61 #define UE_DATA_31_0_OFST		0xE4
62 
63 #define STAT_OFST			0xF0
64 #define SCRUB_OFST			0xF4
65 
66 /* Control register bit field definitions */
67 #define CTRL_BW_MASK			0xC
68 #define CTRL_BW_SHIFT			2
69 
70 #define DDRCTL_WDTH_16			1
71 #define DDRCTL_WDTH_32			0
72 
73 /* ZQ register bit field definitions */
74 #define T_ZQ_DDRMODE_MASK		0x2
75 
76 /* ECC control register bit field definitions */
77 #define ECC_CTRL_CLR_CE_ERR		0x2
78 #define ECC_CTRL_CLR_UE_ERR		0x1
79 
80 /* ECC correctable/uncorrectable error log register definitions */
81 #define LOG_VALID			0x1
82 #define CE_LOG_BITPOS_MASK		0xFE
83 #define CE_LOG_BITPOS_SHIFT		1
84 
85 /* ECC correctable/uncorrectable error address register definitions */
86 #define ADDR_COL_MASK			0xFFF
87 #define ADDR_ROW_MASK			0xFFFF000
88 #define ADDR_ROW_SHIFT			12
89 #define ADDR_BANK_MASK			0x70000000
90 #define ADDR_BANK_SHIFT			28
91 
92 /* ECC statistic register definitions */
93 #define STAT_UECNT_MASK			0xFF
94 #define STAT_CECNT_MASK			0xFF00
95 #define STAT_CECNT_SHIFT		8
96 
97 /* ECC scrub register definitions */
98 #define SCRUB_MODE_MASK			0x7
99 #define SCRUB_MODE_SECDED		0x4
100 
101 /* DDR ECC Quirks */
102 #define DDR_ECC_INTR_SUPPORT		BIT(0)
103 #define DDR_ECC_DATA_POISON_SUPPORT	BIT(1)
104 #define DDR_ECC_INTR_SELF_CLEAR		BIT(2)
105 
106 /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
107 /* ECC Configuration Registers */
108 #define ECC_CFG0_OFST			0x70
109 #define ECC_CFG1_OFST			0x74
110 
111 /* ECC Status Register */
112 #define ECC_STAT_OFST			0x78
113 
114 /* ECC Clear Register */
115 #define ECC_CLR_OFST			0x7C
116 
117 /* ECC Error count Register */
118 #define ECC_ERRCNT_OFST			0x80
119 
120 /* ECC Corrected Error Address Register */
121 #define ECC_CEADDR0_OFST		0x84
122 #define ECC_CEADDR1_OFST		0x88
123 
124 /* ECC Syndrome Registers */
125 #define ECC_CSYND0_OFST			0x8C
126 #define ECC_CSYND1_OFST			0x90
127 #define ECC_CSYND2_OFST			0x94
128 
129 /* ECC Bit Mask0 Address Register */
130 #define ECC_BITMASK0_OFST		0x98
131 #define ECC_BITMASK1_OFST		0x9C
132 #define ECC_BITMASK2_OFST		0xA0
133 
134 /* ECC UnCorrected Error Address Register */
135 #define ECC_UEADDR0_OFST		0xA4
136 #define ECC_UEADDR1_OFST		0xA8
137 
138 /* ECC Syndrome Registers */
139 #define ECC_UESYND0_OFST		0xAC
140 #define ECC_UESYND1_OFST		0xB0
141 #define ECC_UESYND2_OFST		0xB4
142 
143 /* ECC Poison Address Reg */
144 #define ECC_POISON0_OFST		0xB8
145 #define ECC_POISON1_OFST		0xBC
146 
147 #define ECC_ADDRMAP0_OFFSET		0x200
148 
149 /* Control register bitfield definitions */
150 #define ECC_CTRL_BUSWIDTH_MASK		0x3000
151 #define ECC_CTRL_BUSWIDTH_SHIFT		12
152 #define ECC_CTRL_CLR_CE_ERRCNT		BIT(2)
153 #define ECC_CTRL_CLR_UE_ERRCNT		BIT(3)
154 
155 /* DDR Control Register width definitions  */
156 #define DDRCTL_EWDTH_16			2
157 #define DDRCTL_EWDTH_32			1
158 #define DDRCTL_EWDTH_64			0
159 
160 /* ECC status register definitions */
161 #define ECC_STAT_UECNT_MASK		0xF0000
162 #define ECC_STAT_UECNT_SHIFT		16
163 #define ECC_STAT_CECNT_MASK		0xF00
164 #define ECC_STAT_CECNT_SHIFT		8
165 #define ECC_STAT_BITNUM_MASK		0x7F
166 
167 /* ECC error count register definitions */
168 #define ECC_ERRCNT_UECNT_MASK		0xFFFF0000
169 #define ECC_ERRCNT_UECNT_SHIFT		16
170 #define ECC_ERRCNT_CECNT_MASK		0xFFFF
171 
172 /* DDR QOS Interrupt register definitions */
173 #define DDR_QOS_IRQ_STAT_OFST		0x20200
174 #define DDR_QOSUE_MASK			0x4
175 #define	DDR_QOSCE_MASK			0x2
176 #define	ECC_CE_UE_INTR_MASK		0x6
177 #define DDR_QOS_IRQ_EN_OFST		0x20208
178 #define DDR_QOS_IRQ_DB_OFST		0x2020C
179 
180 /* DDR QOS Interrupt register definitions */
181 #define DDR_UE_MASK			BIT(9)
182 #define DDR_CE_MASK			BIT(8)
183 
184 /* ECC Corrected Error Register Mask and Shifts*/
185 #define ECC_CEADDR0_RW_MASK		0x3FFFF
186 #define ECC_CEADDR0_RNK_MASK		BIT(24)
187 #define ECC_CEADDR1_BNKGRP_MASK		0x3000000
188 #define ECC_CEADDR1_BNKNR_MASK		0x70000
189 #define ECC_CEADDR1_BLKNR_MASK		0xFFF
190 #define ECC_CEADDR1_BNKGRP_SHIFT	24
191 #define ECC_CEADDR1_BNKNR_SHIFT		16
192 
193 /* ECC Poison register shifts */
194 #define ECC_POISON0_RANK_SHIFT		24
195 #define ECC_POISON0_RANK_MASK		BIT(24)
196 #define ECC_POISON0_COLUMN_SHIFT	0
197 #define ECC_POISON0_COLUMN_MASK		0xFFF
198 #define ECC_POISON1_BG_SHIFT		28
199 #define ECC_POISON1_BG_MASK		0x30000000
200 #define ECC_POISON1_BANKNR_SHIFT	24
201 #define ECC_POISON1_BANKNR_MASK		0x7000000
202 #define ECC_POISON1_ROW_SHIFT		0
203 #define ECC_POISON1_ROW_MASK		0x3FFFF
204 
205 /* DDR Memory type defines */
206 #define MEM_TYPE_DDR3			0x1
207 #define MEM_TYPE_LPDDR3			0x8
208 #define MEM_TYPE_DDR2			0x4
209 #define MEM_TYPE_DDR4			0x10
210 #define MEM_TYPE_LPDDR4			0x20
211 
212 /* DDRC Software control register */
213 #define DDRC_SWCTL			0x320
214 
215 /* DDRC ECC CE & UE poison mask */
216 #define ECC_CEPOISON_MASK		0x3
217 #define ECC_UEPOISON_MASK		0x1
218 
219 /* DDRC Device config masks */
220 #define DDRC_MSTR_CFG_MASK		0xC0000000
221 #define DDRC_MSTR_CFG_SHIFT		30
222 #define DDRC_MSTR_CFG_X4_MASK		0x0
223 #define DDRC_MSTR_CFG_X8_MASK		0x1
224 #define DDRC_MSTR_CFG_X16_MASK		0x2
225 #define DDRC_MSTR_CFG_X32_MASK		0x3
226 
227 #define DDR_MAX_ROW_SHIFT		18
228 #define DDR_MAX_COL_SHIFT		14
229 #define DDR_MAX_BANK_SHIFT		3
230 #define DDR_MAX_BANKGRP_SHIFT		2
231 
232 #define ROW_MAX_VAL_MASK		0xF
233 #define COL_MAX_VAL_MASK		0xF
234 #define BANK_MAX_VAL_MASK		0x1F
235 #define BANKGRP_MAX_VAL_MASK		0x1F
236 #define RANK_MAX_VAL_MASK		0x1F
237 
238 #define ROW_B0_BASE			6
239 #define ROW_B1_BASE			7
240 #define ROW_B2_BASE			8
241 #define ROW_B3_BASE			9
242 #define ROW_B4_BASE			10
243 #define ROW_B5_BASE			11
244 #define ROW_B6_BASE			12
245 #define ROW_B7_BASE			13
246 #define ROW_B8_BASE			14
247 #define ROW_B9_BASE			15
248 #define ROW_B10_BASE			16
249 #define ROW_B11_BASE			17
250 #define ROW_B12_BASE			18
251 #define ROW_B13_BASE			19
252 #define ROW_B14_BASE			20
253 #define ROW_B15_BASE			21
254 #define ROW_B16_BASE			22
255 #define ROW_B17_BASE			23
256 
257 #define COL_B2_BASE			2
258 #define COL_B3_BASE			3
259 #define COL_B4_BASE			4
260 #define COL_B5_BASE			5
261 #define COL_B6_BASE			6
262 #define COL_B7_BASE			7
263 #define COL_B8_BASE			8
264 #define COL_B9_BASE			9
265 #define COL_B10_BASE			10
266 #define COL_B11_BASE			11
267 #define COL_B12_BASE			12
268 #define COL_B13_BASE			13
269 
270 #define BANK_B0_BASE			2
271 #define BANK_B1_BASE			3
272 #define BANK_B2_BASE			4
273 
274 #define BANKGRP_B0_BASE			2
275 #define BANKGRP_B1_BASE			3
276 
277 #define RANK_B0_BASE			6
278 
279 /**
280  * struct ecc_error_info - ECC error log information.
281  * @row:	Row number.
282  * @col:	Column number.
283  * @bank:	Bank number.
284  * @bitpos:	Bit position.
285  * @data:	Data causing the error.
286  * @bankgrpnr:	Bank group number.
287  * @blknr:	Block number.
288  */
289 struct ecc_error_info {
290 	u32 row;
291 	u32 col;
292 	u32 bank;
293 	u32 bitpos;
294 	u32 data;
295 	u32 bankgrpnr;
296 	u32 blknr;
297 };
298 
299 /**
300  * struct synps_ecc_status - ECC status information to report.
301  * @ce_cnt:	Correctable error count.
302  * @ue_cnt:	Uncorrectable error count.
303  * @ceinfo:	Correctable error log information.
304  * @ueinfo:	Uncorrectable error log information.
305  */
306 struct synps_ecc_status {
307 	u32 ce_cnt;
308 	u32 ue_cnt;
309 	struct ecc_error_info ceinfo;
310 	struct ecc_error_info ueinfo;
311 };
312 
313 /**
314  * struct synps_edac_priv - DDR memory controller private instance data.
315  * @baseaddr:		Base address of the DDR controller.
316  * @message:		Buffer for framing the event specific info.
317  * @stat:		ECC status information.
318  * @p_data:		Platform data.
319  * @ce_cnt:		Correctable Error count.
320  * @ue_cnt:		Uncorrectable Error count.
321  * @poison_addr:	Data poison address.
322  * @row_shift:		Bit shifts for row bit.
323  * @col_shift:		Bit shifts for column bit.
324  * @bank_shift:		Bit shifts for bank bit.
325  * @bankgrp_shift:	Bit shifts for bank group bit.
326  * @rank_shift:		Bit shifts for rank bit.
327  */
328 struct synps_edac_priv {
329 	void __iomem *baseaddr;
330 	char message[SYNPS_EDAC_MSG_SIZE];
331 	struct synps_ecc_status stat;
332 	const struct synps_platform_data *p_data;
333 	u32 ce_cnt;
334 	u32 ue_cnt;
335 #ifdef CONFIG_EDAC_DEBUG
336 	ulong poison_addr;
337 	u32 row_shift[18];
338 	u32 col_shift[14];
339 	u32 bank_shift[3];
340 	u32 bankgrp_shift[2];
341 	u32 rank_shift[1];
342 #endif
343 };
344 
345 /**
346  * struct synps_platform_data -  synps platform data structure.
347  * @get_error_info:	Get EDAC error info.
348  * @get_mtype:		Get mtype.
349  * @get_dtype:		Get dtype.
350  * @get_ecc_state:	Get ECC state.
351  * @quirks:		To differentiate IPs.
352  */
353 struct synps_platform_data {
354 	int (*get_error_info)(struct synps_edac_priv *priv);
355 	enum mem_type (*get_mtype)(const void __iomem *base);
356 	enum dev_type (*get_dtype)(const void __iomem *base);
357 	bool (*get_ecc_state)(void __iomem *base);
358 	int quirks;
359 };
360 
361 /**
362  * zynq_get_error_info - Get the current ECC error info.
363  * @priv:	DDR memory controller private instance data.
364  *
365  * Return: one if there is no error, otherwise zero.
366  */
367 static int zynq_get_error_info(struct synps_edac_priv *priv)
368 {
369 	struct synps_ecc_status *p;
370 	u32 regval, clearval = 0;
371 	void __iomem *base;
372 
373 	base = priv->baseaddr;
374 	p = &priv->stat;
375 
376 	regval = readl(base + STAT_OFST);
377 	if (!regval)
378 		return 1;
379 
380 	p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
381 	p->ue_cnt = regval & STAT_UECNT_MASK;
382 
383 	regval = readl(base + CE_LOG_OFST);
384 	if (!(p->ce_cnt && (regval & LOG_VALID)))
385 		goto ue_err;
386 
387 	p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
388 	regval = readl(base + CE_ADDR_OFST);
389 	p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
390 	p->ceinfo.col = regval & ADDR_COL_MASK;
391 	p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
392 	p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
393 	edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
394 		 p->ceinfo.data);
395 	clearval = ECC_CTRL_CLR_CE_ERR;
396 
397 ue_err:
398 	regval = readl(base + UE_LOG_OFST);
399 	if (!(p->ue_cnt && (regval & LOG_VALID)))
400 		goto out;
401 
402 	regval = readl(base + UE_ADDR_OFST);
403 	p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
404 	p->ueinfo.col = regval & ADDR_COL_MASK;
405 	p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
406 	p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
407 	clearval |= ECC_CTRL_CLR_UE_ERR;
408 
409 out:
410 	writel(clearval, base + ECC_CTRL_OFST);
411 	writel(0x0, base + ECC_CTRL_OFST);
412 
413 	return 0;
414 }
415 
416 /**
417  * zynqmp_get_error_info - Get the current ECC error info.
418  * @priv:	DDR memory controller private instance data.
419  *
420  * Return: one if there is no error otherwise returns zero.
421  */
422 static int zynqmp_get_error_info(struct synps_edac_priv *priv)
423 {
424 	struct synps_ecc_status *p;
425 	u32 regval, clearval = 0;
426 	void __iomem *base;
427 
428 	base = priv->baseaddr;
429 	p = &priv->stat;
430 
431 	regval = readl(base + ECC_ERRCNT_OFST);
432 	p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
433 	p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
434 	if (!p->ce_cnt)
435 		goto ue_err;
436 
437 	regval = readl(base + ECC_STAT_OFST);
438 	if (!regval)
439 		return 1;
440 
441 	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
442 
443 	regval = readl(base + ECC_CEADDR0_OFST);
444 	p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
445 	regval = readl(base + ECC_CEADDR1_OFST);
446 	p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
447 					ECC_CEADDR1_BNKNR_SHIFT;
448 	p->ceinfo.bankgrpnr = (regval &	ECC_CEADDR1_BNKGRP_MASK) >>
449 					ECC_CEADDR1_BNKGRP_SHIFT;
450 	p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
451 	p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
452 	edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
453 		 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
454 		 readl(base + ECC_CSYND2_OFST));
455 ue_err:
456 	if (!p->ue_cnt)
457 		goto out;
458 
459 	regval = readl(base + ECC_UEADDR0_OFST);
460 	p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
461 	regval = readl(base + ECC_UEADDR1_OFST);
462 	p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
463 					ECC_CEADDR1_BNKGRP_SHIFT;
464 	p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
465 					ECC_CEADDR1_BNKNR_SHIFT;
466 	p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
467 	p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
468 out:
469 	clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
470 	clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
471 	writel(clearval, base + ECC_CLR_OFST);
472 	writel(0x0, base + ECC_CLR_OFST);
473 
474 	return 0;
475 }
476 
477 /**
478  * handle_error - Handle Correctable and Uncorrectable errors.
479  * @mci:	EDAC memory controller instance.
480  * @p:		Synopsys ECC status structure.
481  *
482  * Handles ECC correctable and uncorrectable errors.
483  */
484 static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
485 {
486 	struct synps_edac_priv *priv = mci->pvt_info;
487 	struct ecc_error_info *pinf;
488 
489 	if (p->ce_cnt) {
490 		pinf = &p->ceinfo;
491 		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
492 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
493 				 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
494 				 "CE", pinf->row, pinf->bank,
495 				 pinf->bankgrpnr, pinf->blknr,
496 				 pinf->bitpos, pinf->data);
497 		} else {
498 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
499 				 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
500 				 "CE", pinf->row, pinf->bank, pinf->col,
501 				 pinf->bitpos, pinf->data);
502 		}
503 
504 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
505 				     p->ce_cnt, 0, 0, 0, 0, 0, -1,
506 				     priv->message, "");
507 	}
508 
509 	if (p->ue_cnt) {
510 		pinf = &p->ueinfo;
511 		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
512 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
513 				 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
514 				 "UE", pinf->row, pinf->bank,
515 				 pinf->bankgrpnr, pinf->blknr);
516 		} else {
517 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
518 				 "DDR ECC error type :%s Row %d Bank %d Col %d ",
519 				 "UE", pinf->row, pinf->bank, pinf->col);
520 		}
521 
522 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
523 				     p->ue_cnt, 0, 0, 0, 0, 0, -1,
524 				     priv->message, "");
525 	}
526 
527 	memset(p, 0, sizeof(*p));
528 }
529 
530 /**
531  * intr_handler - Interrupt Handler for ECC interrupts.
532  * @irq:        IRQ number.
533  * @dev_id:     Device ID.
534  *
535  * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
536  */
537 static irqreturn_t intr_handler(int irq, void *dev_id)
538 {
539 	const struct synps_platform_data *p_data;
540 	struct mem_ctl_info *mci = dev_id;
541 	struct synps_edac_priv *priv;
542 	int status, regval;
543 
544 	priv = mci->pvt_info;
545 	p_data = priv->p_data;
546 
547 	/*
548 	 * v3.0 of the controller has the ce/ue bits cleared automatically,
549 	 * so this condition does not apply.
550 	 */
551 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
552 		regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
553 		regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
554 		if (!(regval & ECC_CE_UE_INTR_MASK))
555 			return IRQ_NONE;
556 	}
557 
558 	status = p_data->get_error_info(priv);
559 	if (status)
560 		return IRQ_NONE;
561 
562 	priv->ce_cnt += priv->stat.ce_cnt;
563 	priv->ue_cnt += priv->stat.ue_cnt;
564 	handle_error(mci, &priv->stat);
565 
566 	edac_dbg(3, "Total error count CE %d UE %d\n",
567 		 priv->ce_cnt, priv->ue_cnt);
568 	/* v3.0 of the controller does not have this register */
569 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
570 		writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
571 	return IRQ_HANDLED;
572 }
573 
574 /**
575  * check_errors - Check controller for ECC errors.
576  * @mci:	EDAC memory controller instance.
577  *
578  * Check and post ECC errors. Called by the polling thread.
579  */
580 static void check_errors(struct mem_ctl_info *mci)
581 {
582 	const struct synps_platform_data *p_data;
583 	struct synps_edac_priv *priv;
584 	int status;
585 
586 	priv = mci->pvt_info;
587 	p_data = priv->p_data;
588 
589 	status = p_data->get_error_info(priv);
590 	if (status)
591 		return;
592 
593 	priv->ce_cnt += priv->stat.ce_cnt;
594 	priv->ue_cnt += priv->stat.ue_cnt;
595 	handle_error(mci, &priv->stat);
596 
597 	edac_dbg(3, "Total error count CE %d UE %d\n",
598 		 priv->ce_cnt, priv->ue_cnt);
599 }
600 
601 /**
602  * zynq_get_dtype - Return the controller memory width.
603  * @base:	DDR memory controller base address.
604  *
605  * Get the EDAC device type width appropriate for the current controller
606  * configuration.
607  *
608  * Return: a device type width enumeration.
609  */
610 static enum dev_type zynq_get_dtype(const void __iomem *base)
611 {
612 	enum dev_type dt;
613 	u32 width;
614 
615 	width = readl(base + CTRL_OFST);
616 	width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
617 
618 	switch (width) {
619 	case DDRCTL_WDTH_16:
620 		dt = DEV_X2;
621 		break;
622 	case DDRCTL_WDTH_32:
623 		dt = DEV_X4;
624 		break;
625 	default:
626 		dt = DEV_UNKNOWN;
627 	}
628 
629 	return dt;
630 }
631 
632 /**
633  * zynqmp_get_dtype - Return the controller memory width.
634  * @base:	DDR memory controller base address.
635  *
636  * Get the EDAC device type width appropriate for the current controller
637  * configuration.
638  *
639  * Return: a device type width enumeration.
640  */
641 static enum dev_type zynqmp_get_dtype(const void __iomem *base)
642 {
643 	enum dev_type dt;
644 	u32 width;
645 
646 	width = readl(base + CTRL_OFST);
647 	width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
648 	switch (width) {
649 	case DDRCTL_EWDTH_16:
650 		dt = DEV_X2;
651 		break;
652 	case DDRCTL_EWDTH_32:
653 		dt = DEV_X4;
654 		break;
655 	case DDRCTL_EWDTH_64:
656 		dt = DEV_X8;
657 		break;
658 	default:
659 		dt = DEV_UNKNOWN;
660 	}
661 
662 	return dt;
663 }
664 
665 /**
666  * zynq_get_ecc_state - Return the controller ECC enable/disable status.
667  * @base:	DDR memory controller base address.
668  *
669  * Get the ECC enable/disable status of the controller.
670  *
671  * Return: true if enabled, otherwise false.
672  */
673 static bool zynq_get_ecc_state(void __iomem *base)
674 {
675 	enum dev_type dt;
676 	u32 ecctype;
677 
678 	dt = zynq_get_dtype(base);
679 	if (dt == DEV_UNKNOWN)
680 		return false;
681 
682 	ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
683 	if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
684 		return true;
685 
686 	return false;
687 }
688 
689 /**
690  * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
691  * @base:	DDR memory controller base address.
692  *
693  * Get the ECC enable/disable status for the controller.
694  *
695  * Return: a ECC status boolean i.e true/false - enabled/disabled.
696  */
697 static bool zynqmp_get_ecc_state(void __iomem *base)
698 {
699 	enum dev_type dt;
700 	u32 ecctype;
701 
702 	dt = zynqmp_get_dtype(base);
703 	if (dt == DEV_UNKNOWN)
704 		return false;
705 
706 	ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
707 	if ((ecctype == SCRUB_MODE_SECDED) &&
708 	    ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
709 		return true;
710 
711 	return false;
712 }
713 
714 /**
715  * get_memsize - Read the size of the attached memory device.
716  *
717  * Return: the memory size in bytes.
718  */
719 static u32 get_memsize(void)
720 {
721 	struct sysinfo inf;
722 
723 	si_meminfo(&inf);
724 
725 	return inf.totalram * inf.mem_unit;
726 }
727 
728 /**
729  * zynq_get_mtype - Return the controller memory type.
730  * @base:	Synopsys ECC status structure.
731  *
732  * Get the EDAC memory type appropriate for the current controller
733  * configuration.
734  *
735  * Return: a memory type enumeration.
736  */
737 static enum mem_type zynq_get_mtype(const void __iomem *base)
738 {
739 	enum mem_type mt;
740 	u32 memtype;
741 
742 	memtype = readl(base + T_ZQ_OFST);
743 
744 	if (memtype & T_ZQ_DDRMODE_MASK)
745 		mt = MEM_DDR3;
746 	else
747 		mt = MEM_DDR2;
748 
749 	return mt;
750 }
751 
752 /**
753  * zynqmp_get_mtype - Returns controller memory type.
754  * @base:	Synopsys ECC status structure.
755  *
756  * Get the EDAC memory type appropriate for the current controller
757  * configuration.
758  *
759  * Return: a memory type enumeration.
760  */
761 static enum mem_type zynqmp_get_mtype(const void __iomem *base)
762 {
763 	enum mem_type mt;
764 	u32 memtype;
765 
766 	memtype = readl(base + CTRL_OFST);
767 
768 	if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
769 		mt = MEM_DDR3;
770 	else if (memtype & MEM_TYPE_DDR2)
771 		mt = MEM_RDDR2;
772 	else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
773 		mt = MEM_DDR4;
774 	else
775 		mt = MEM_EMPTY;
776 
777 	return mt;
778 }
779 
780 /**
781  * init_csrows - Initialize the csrow data.
782  * @mci:	EDAC memory controller instance.
783  *
784  * Initialize the chip select rows associated with the EDAC memory
785  * controller instance.
786  */
787 static void init_csrows(struct mem_ctl_info *mci)
788 {
789 	struct synps_edac_priv *priv = mci->pvt_info;
790 	const struct synps_platform_data *p_data;
791 	struct csrow_info *csi;
792 	struct dimm_info *dimm;
793 	u32 size, row;
794 	int j;
795 
796 	p_data = priv->p_data;
797 
798 	for (row = 0; row < mci->nr_csrows; row++) {
799 		csi = mci->csrows[row];
800 		size = get_memsize();
801 
802 		for (j = 0; j < csi->nr_channels; j++) {
803 			dimm		= csi->channels[j]->dimm;
804 			dimm->edac_mode	= EDAC_SECDED;
805 			dimm->mtype	= p_data->get_mtype(priv->baseaddr);
806 			dimm->nr_pages	= (size >> PAGE_SHIFT) / csi->nr_channels;
807 			dimm->grain	= SYNPS_EDAC_ERR_GRAIN;
808 			dimm->dtype	= p_data->get_dtype(priv->baseaddr);
809 		}
810 	}
811 }
812 
813 /**
814  * mc_init - Initialize one driver instance.
815  * @mci:	EDAC memory controller instance.
816  * @pdev:	platform device.
817  *
818  * Perform initialization of the EDAC memory controller instance and
819  * related driver-private data associated with the memory controller the
820  * instance is bound to.
821  */
822 static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
823 {
824 	struct synps_edac_priv *priv;
825 
826 	mci->pdev = &pdev->dev;
827 	priv = mci->pvt_info;
828 	platform_set_drvdata(pdev, mci);
829 
830 	/* Initialize controller capabilities and configuration */
831 	mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
832 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
833 	mci->scrub_cap = SCRUB_HW_SRC;
834 	mci->scrub_mode = SCRUB_NONE;
835 
836 	mci->edac_cap = EDAC_FLAG_SECDED;
837 	mci->ctl_name = "synps_ddr_controller";
838 	mci->dev_name = SYNPS_EDAC_MOD_STRING;
839 	mci->mod_name = SYNPS_EDAC_MOD_VER;
840 
841 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
842 		edac_op_state = EDAC_OPSTATE_INT;
843 	} else {
844 		edac_op_state = EDAC_OPSTATE_POLL;
845 		mci->edac_check = check_errors;
846 	}
847 
848 	mci->ctl_page_to_phys = NULL;
849 
850 	init_csrows(mci);
851 }
852 
853 static void enable_intr(struct synps_edac_priv *priv)
854 {
855 	/* Enable UE/CE Interrupts */
856 	if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
857 		writel(DDR_UE_MASK | DDR_CE_MASK,
858 		       priv->baseaddr + ECC_CLR_OFST);
859 	else
860 		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
861 		       priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
862 
863 }
864 
865 static void disable_intr(struct synps_edac_priv *priv)
866 {
867 	/* Disable UE/CE Interrupts */
868 	writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
869 			priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
870 }
871 
872 static int setup_irq(struct mem_ctl_info *mci,
873 		     struct platform_device *pdev)
874 {
875 	struct synps_edac_priv *priv = mci->pvt_info;
876 	int ret, irq;
877 
878 	irq = platform_get_irq(pdev, 0);
879 	if (irq < 0) {
880 		edac_printk(KERN_ERR, EDAC_MC,
881 			    "No IRQ %d in DT\n", irq);
882 		return irq;
883 	}
884 
885 	ret = devm_request_irq(&pdev->dev, irq, intr_handler,
886 			       0, dev_name(&pdev->dev), mci);
887 	if (ret < 0) {
888 		edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
889 		return ret;
890 	}
891 
892 	enable_intr(priv);
893 
894 	return 0;
895 }
896 
897 static const struct synps_platform_data zynq_edac_def = {
898 	.get_error_info	= zynq_get_error_info,
899 	.get_mtype	= zynq_get_mtype,
900 	.get_dtype	= zynq_get_dtype,
901 	.get_ecc_state	= zynq_get_ecc_state,
902 	.quirks		= 0,
903 };
904 
905 static const struct synps_platform_data zynqmp_edac_def = {
906 	.get_error_info	= zynqmp_get_error_info,
907 	.get_mtype	= zynqmp_get_mtype,
908 	.get_dtype	= zynqmp_get_dtype,
909 	.get_ecc_state	= zynqmp_get_ecc_state,
910 	.quirks         = (DDR_ECC_INTR_SUPPORT
911 #ifdef CONFIG_EDAC_DEBUG
912 			  | DDR_ECC_DATA_POISON_SUPPORT
913 #endif
914 			  ),
915 };
916 
917 static const struct synps_platform_data synopsys_edac_def = {
918 	.get_error_info	= zynqmp_get_error_info,
919 	.get_mtype	= zynqmp_get_mtype,
920 	.get_dtype	= zynqmp_get_dtype,
921 	.get_ecc_state	= zynqmp_get_ecc_state,
922 	.quirks         = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
923 #ifdef CONFIG_EDAC_DEBUG
924 			  | DDR_ECC_DATA_POISON_SUPPORT
925 #endif
926 			  ),
927 };
928 
929 
930 static const struct of_device_id synps_edac_match[] = {
931 	{
932 		.compatible = "xlnx,zynq-ddrc-a05",
933 		.data = (void *)&zynq_edac_def
934 	},
935 	{
936 		.compatible = "xlnx,zynqmp-ddrc-2.40a",
937 		.data = (void *)&zynqmp_edac_def
938 	},
939 	{
940 		.compatible = "snps,ddrc-3.80a",
941 		.data = (void *)&synopsys_edac_def
942 	},
943 	{
944 		/* end of table */
945 	}
946 };
947 
948 MODULE_DEVICE_TABLE(of, synps_edac_match);
949 
950 #ifdef CONFIG_EDAC_DEBUG
951 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
952 
953 /**
954  * ddr_poison_setup -	Update poison registers.
955  * @priv:		DDR memory controller private instance data.
956  *
957  * Update poison registers as per DDR mapping.
958  * Return: none.
959  */
960 static void ddr_poison_setup(struct synps_edac_priv *priv)
961 {
962 	int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
963 	int index;
964 	ulong hif_addr = 0;
965 
966 	hif_addr = priv->poison_addr >> 3;
967 
968 	for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
969 		if (priv->row_shift[index])
970 			row |= (((hif_addr >> priv->row_shift[index]) &
971 						BIT(0)) << index);
972 		else
973 			break;
974 	}
975 
976 	for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
977 		if (priv->col_shift[index] || index < 3)
978 			col |= (((hif_addr >> priv->col_shift[index]) &
979 						BIT(0)) << index);
980 		else
981 			break;
982 	}
983 
984 	for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
985 		if (priv->bank_shift[index])
986 			bank |= (((hif_addr >> priv->bank_shift[index]) &
987 						BIT(0)) << index);
988 		else
989 			break;
990 	}
991 
992 	for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
993 		if (priv->bankgrp_shift[index])
994 			bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
995 						& BIT(0)) << index);
996 		else
997 			break;
998 	}
999 
1000 	if (priv->rank_shift[0])
1001 		rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
1002 
1003 	regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
1004 	regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
1005 	writel(regval, priv->baseaddr + ECC_POISON0_OFST);
1006 
1007 	regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1008 	regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1009 	regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1010 	writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1011 }
1012 
1013 static ssize_t inject_data_error_show(struct device *dev,
1014 				      struct device_attribute *mattr,
1015 				      char *data)
1016 {
1017 	struct mem_ctl_info *mci = to_mci(dev);
1018 	struct synps_edac_priv *priv = mci->pvt_info;
1019 
1020 	return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1021 			"Error injection Address: 0x%lx\n\r",
1022 			readl(priv->baseaddr + ECC_POISON0_OFST),
1023 			readl(priv->baseaddr + ECC_POISON1_OFST),
1024 			priv->poison_addr);
1025 }
1026 
1027 static ssize_t inject_data_error_store(struct device *dev,
1028 				       struct device_attribute *mattr,
1029 				       const char *data, size_t count)
1030 {
1031 	struct mem_ctl_info *mci = to_mci(dev);
1032 	struct synps_edac_priv *priv = mci->pvt_info;
1033 
1034 	if (kstrtoul(data, 0, &priv->poison_addr))
1035 		return -EINVAL;
1036 
1037 	ddr_poison_setup(priv);
1038 
1039 	return count;
1040 }
1041 
1042 static ssize_t inject_data_poison_show(struct device *dev,
1043 				       struct device_attribute *mattr,
1044 				       char *data)
1045 {
1046 	struct mem_ctl_info *mci = to_mci(dev);
1047 	struct synps_edac_priv *priv = mci->pvt_info;
1048 
1049 	return sprintf(data, "Data Poisoning: %s\n\r",
1050 			(((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1051 			? ("Correctable Error") : ("UnCorrectable Error"));
1052 }
1053 
1054 static ssize_t inject_data_poison_store(struct device *dev,
1055 					struct device_attribute *mattr,
1056 					const char *data, size_t count)
1057 {
1058 	struct mem_ctl_info *mci = to_mci(dev);
1059 	struct synps_edac_priv *priv = mci->pvt_info;
1060 
1061 	writel(0, priv->baseaddr + DDRC_SWCTL);
1062 	if (strncmp(data, "CE", 2) == 0)
1063 		writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1064 	else
1065 		writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1066 	writel(1, priv->baseaddr + DDRC_SWCTL);
1067 
1068 	return count;
1069 }
1070 
1071 static DEVICE_ATTR_RW(inject_data_error);
1072 static DEVICE_ATTR_RW(inject_data_poison);
1073 
1074 static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1075 {
1076 	int rc;
1077 
1078 	rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1079 	if (rc < 0)
1080 		return rc;
1081 	rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1082 	if (rc < 0)
1083 		return rc;
1084 	return 0;
1085 }
1086 
1087 static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1088 {
1089 	device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1090 	device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1091 }
1092 
1093 static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1094 {
1095 	u32 addrmap_row_b2_10;
1096 	int index;
1097 
1098 	priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1099 	priv->row_shift[1] = ((addrmap[5] >> 8) &
1100 			ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1101 
1102 	addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1103 	if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1104 		for (index = 2; index < 11; index++)
1105 			priv->row_shift[index] = addrmap_row_b2_10 +
1106 				index + ROW_B0_BASE;
1107 
1108 	} else {
1109 		priv->row_shift[2] = (addrmap[9] &
1110 				ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1111 		priv->row_shift[3] = ((addrmap[9] >> 8) &
1112 				ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1113 		priv->row_shift[4] = ((addrmap[9] >> 16) &
1114 				ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1115 		priv->row_shift[5] = ((addrmap[9] >> 24) &
1116 				ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1117 		priv->row_shift[6] = (addrmap[10] &
1118 				ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1119 		priv->row_shift[7] = ((addrmap[10] >> 8) &
1120 				ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1121 		priv->row_shift[8] = ((addrmap[10] >> 16) &
1122 				ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1123 		priv->row_shift[9] = ((addrmap[10] >> 24) &
1124 				ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1125 		priv->row_shift[10] = (addrmap[11] &
1126 				ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1127 	}
1128 
1129 	priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1130 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1131 				ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1132 	priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1133 				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1134 				ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1135 	priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1136 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1137 				ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1138 	priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1139 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1140 				ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1141 	priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1142 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1143 				ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1144 	priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1145 				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1146 				ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1147 	priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1148 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1149 				ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1150 }
1151 
1152 static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1153 {
1154 	u32 width, memtype;
1155 	int index;
1156 
1157 	memtype = readl(priv->baseaddr + CTRL_OFST);
1158 	width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1159 
1160 	priv->col_shift[0] = 0;
1161 	priv->col_shift[1] = 1;
1162 	priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1163 	priv->col_shift[3] = ((addrmap[2] >> 8) &
1164 			COL_MAX_VAL_MASK) + COL_B3_BASE;
1165 	priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1166 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1167 					COL_MAX_VAL_MASK) + COL_B4_BASE);
1168 	priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1169 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1170 					COL_MAX_VAL_MASK) + COL_B5_BASE);
1171 	priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1172 			COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1173 					COL_MAX_VAL_MASK) + COL_B6_BASE);
1174 	priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1175 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1176 					COL_MAX_VAL_MASK) + COL_B7_BASE);
1177 	priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1178 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1179 					COL_MAX_VAL_MASK) + COL_B8_BASE);
1180 	priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1181 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1182 					COL_MAX_VAL_MASK) + COL_B9_BASE);
1183 	if (width == DDRCTL_EWDTH_64) {
1184 		if (memtype & MEM_TYPE_LPDDR3) {
1185 			priv->col_shift[10] = ((addrmap[4] &
1186 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1187 				((addrmap[4] & COL_MAX_VAL_MASK) +
1188 				 COL_B10_BASE);
1189 			priv->col_shift[11] = (((addrmap[4] >> 8) &
1190 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1191 				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1192 				 COL_B11_BASE);
1193 		} else {
1194 			priv->col_shift[11] = ((addrmap[4] &
1195 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1196 				((addrmap[4] & COL_MAX_VAL_MASK) +
1197 				 COL_B10_BASE);
1198 			priv->col_shift[13] = (((addrmap[4] >> 8) &
1199 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1200 				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1201 				 COL_B11_BASE);
1202 		}
1203 	} else if (width == DDRCTL_EWDTH_32) {
1204 		if (memtype & MEM_TYPE_LPDDR3) {
1205 			priv->col_shift[10] = (((addrmap[3] >> 24) &
1206 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1207 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1208 				 COL_B9_BASE);
1209 			priv->col_shift[11] = ((addrmap[4] &
1210 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1211 				((addrmap[4] & COL_MAX_VAL_MASK) +
1212 				 COL_B10_BASE);
1213 		} else {
1214 			priv->col_shift[11] = (((addrmap[3] >> 24) &
1215 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1216 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1217 				 COL_B9_BASE);
1218 			priv->col_shift[13] = ((addrmap[4] &
1219 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1220 				((addrmap[4] & COL_MAX_VAL_MASK) +
1221 				 COL_B10_BASE);
1222 		}
1223 	} else {
1224 		if (memtype & MEM_TYPE_LPDDR3) {
1225 			priv->col_shift[10] = (((addrmap[3] >> 16) &
1226 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1227 				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1228 				 COL_B8_BASE);
1229 			priv->col_shift[11] = (((addrmap[3] >> 24) &
1230 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1231 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1232 				 COL_B9_BASE);
1233 			priv->col_shift[13] = ((addrmap[4] &
1234 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1235 				((addrmap[4] & COL_MAX_VAL_MASK) +
1236 				 COL_B10_BASE);
1237 		} else {
1238 			priv->col_shift[11] = (((addrmap[3] >> 16) &
1239 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1240 				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1241 				 COL_B8_BASE);
1242 			priv->col_shift[13] = (((addrmap[3] >> 24) &
1243 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1244 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1245 				 COL_B9_BASE);
1246 		}
1247 	}
1248 
1249 	if (width) {
1250 		for (index = 9; index > width; index--) {
1251 			priv->col_shift[index] = priv->col_shift[index - width];
1252 			priv->col_shift[index - width] = 0;
1253 		}
1254 	}
1255 
1256 }
1257 
1258 static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1259 {
1260 	priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1261 	priv->bank_shift[1] = ((addrmap[1] >> 8) &
1262 				BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1263 	priv->bank_shift[2] = (((addrmap[1] >> 16) &
1264 				BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1265 				(((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1266 				 BANK_B2_BASE);
1267 
1268 }
1269 
1270 static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1271 {
1272 	priv->bankgrp_shift[0] = (addrmap[8] &
1273 				BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1274 	priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1275 				BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1276 				& BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1277 
1278 }
1279 
1280 static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1281 {
1282 	priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1283 				RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1284 				RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1285 }
1286 
1287 /**
1288  * setup_address_map -	Set Address Map by querying ADDRMAP registers.
1289  * @priv:		DDR memory controller private instance data.
1290  *
1291  * Set Address Map by querying ADDRMAP registers.
1292  *
1293  * Return: none.
1294  */
1295 static void setup_address_map(struct synps_edac_priv *priv)
1296 {
1297 	u32 addrmap[12];
1298 	int index;
1299 
1300 	for (index = 0; index < 12; index++) {
1301 		u32 addrmap_offset;
1302 
1303 		addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1304 		addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1305 	}
1306 
1307 	setup_row_address_map(priv, addrmap);
1308 
1309 	setup_column_address_map(priv, addrmap);
1310 
1311 	setup_bank_address_map(priv, addrmap);
1312 
1313 	setup_bg_address_map(priv, addrmap);
1314 
1315 	setup_rank_address_map(priv, addrmap);
1316 }
1317 #endif /* CONFIG_EDAC_DEBUG */
1318 
1319 /**
1320  * mc_probe - Check controller and bind driver.
1321  * @pdev:	platform device.
1322  *
1323  * Probe a specific controller instance for binding with the driver.
1324  *
1325  * Return: 0 if the controller instance was successfully bound to the
1326  * driver; otherwise, < 0 on error.
1327  */
1328 static int mc_probe(struct platform_device *pdev)
1329 {
1330 	const struct synps_platform_data *p_data;
1331 	struct edac_mc_layer layers[2];
1332 	struct synps_edac_priv *priv;
1333 	struct mem_ctl_info *mci;
1334 	void __iomem *baseaddr;
1335 	struct resource *res;
1336 	int rc;
1337 
1338 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1339 	baseaddr = devm_ioremap_resource(&pdev->dev, res);
1340 	if (IS_ERR(baseaddr))
1341 		return PTR_ERR(baseaddr);
1342 
1343 	p_data = of_device_get_match_data(&pdev->dev);
1344 	if (!p_data)
1345 		return -ENODEV;
1346 
1347 	if (!p_data->get_ecc_state(baseaddr)) {
1348 		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1349 		return -ENXIO;
1350 	}
1351 
1352 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1353 	layers[0].size = SYNPS_EDAC_NR_CSROWS;
1354 	layers[0].is_virt_csrow = true;
1355 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1356 	layers[1].size = SYNPS_EDAC_NR_CHANS;
1357 	layers[1].is_virt_csrow = false;
1358 
1359 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1360 			    sizeof(struct synps_edac_priv));
1361 	if (!mci) {
1362 		edac_printk(KERN_ERR, EDAC_MC,
1363 			    "Failed memory allocation for mc instance\n");
1364 		return -ENOMEM;
1365 	}
1366 
1367 	priv = mci->pvt_info;
1368 	priv->baseaddr = baseaddr;
1369 	priv->p_data = p_data;
1370 
1371 	mc_init(mci, pdev);
1372 
1373 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1374 		rc = setup_irq(mci, pdev);
1375 		if (rc)
1376 			goto free_edac_mc;
1377 	}
1378 
1379 	rc = edac_mc_add_mc(mci);
1380 	if (rc) {
1381 		edac_printk(KERN_ERR, EDAC_MC,
1382 			    "Failed to register with EDAC core\n");
1383 		goto free_edac_mc;
1384 	}
1385 
1386 #ifdef CONFIG_EDAC_DEBUG
1387 	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1388 		rc = edac_create_sysfs_attributes(mci);
1389 		if (rc) {
1390 			edac_printk(KERN_ERR, EDAC_MC,
1391 					"Failed to create sysfs entries\n");
1392 			goto free_edac_mc;
1393 		}
1394 	}
1395 
1396 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1397 		setup_address_map(priv);
1398 #endif
1399 
1400 	/*
1401 	 * Start capturing the correctable and uncorrectable errors. A write of
1402 	 * 0 starts the counters.
1403 	 */
1404 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1405 		writel(0x0, baseaddr + ECC_CTRL_OFST);
1406 
1407 	return rc;
1408 
1409 free_edac_mc:
1410 	edac_mc_free(mci);
1411 
1412 	return rc;
1413 }
1414 
1415 /**
1416  * mc_remove - Unbind driver from controller.
1417  * @pdev:	Platform device.
1418  *
1419  * Return: Unconditionally 0
1420  */
1421 static int mc_remove(struct platform_device *pdev)
1422 {
1423 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1424 	struct synps_edac_priv *priv = mci->pvt_info;
1425 
1426 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1427 		disable_intr(priv);
1428 
1429 #ifdef CONFIG_EDAC_DEBUG
1430 	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1431 		edac_remove_sysfs_attributes(mci);
1432 #endif
1433 
1434 	edac_mc_del_mc(&pdev->dev);
1435 	edac_mc_free(mci);
1436 
1437 	return 0;
1438 }
1439 
1440 static struct platform_driver synps_edac_mc_driver = {
1441 	.driver = {
1442 		   .name = "synopsys-edac",
1443 		   .of_match_table = synps_edac_match,
1444 		   },
1445 	.probe = mc_probe,
1446 	.remove = mc_remove,
1447 };
1448 
1449 module_platform_driver(synps_edac_mc_driver);
1450 
1451 MODULE_AUTHOR("Xilinx Inc");
1452 MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1453 MODULE_LICENSE("GPL v2");
1454