1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Synopsys DDR ECC Driver
4 * This driver is based on ppc4xx_edac.c drivers
5 *
6 * Copyright (C) 2012 - 2014 Xilinx, Inc.
7 */
8
9 #include <linux/edac.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/spinlock.h>
13 #include <linux/sizes.h>
14 #include <linux/interrupt.h>
15 #include <linux/of.h>
16
17 #include "edac_module.h"
18
19 /* Number of cs_rows needed per memory controller */
20 #define SYNPS_EDAC_NR_CSROWS 1
21
22 /* Number of channels per memory controller */
23 #define SYNPS_EDAC_NR_CHANS 1
24
25 /* Granularity of reported error in bytes */
26 #define SYNPS_EDAC_ERR_GRAIN 1
27
28 #define SYNPS_EDAC_MSG_SIZE 256
29
30 #define SYNPS_EDAC_MOD_STRING "synps_edac"
31 #define SYNPS_EDAC_MOD_VER "1"
32
33 /* Synopsys DDR memory controller registers that are relevant to ECC */
34 #define CTRL_OFST 0x0
35 #define T_ZQ_OFST 0xA4
36
37 /* ECC control register */
38 #define ECC_CTRL_OFST 0xC4
39 /* ECC log register */
40 #define CE_LOG_OFST 0xC8
41 /* ECC address register */
42 #define CE_ADDR_OFST 0xCC
43 /* ECC data[31:0] register */
44 #define CE_DATA_31_0_OFST 0xD0
45
46 /* Uncorrectable error info registers */
47 #define UE_LOG_OFST 0xDC
48 #define UE_ADDR_OFST 0xE0
49 #define UE_DATA_31_0_OFST 0xE4
50
51 #define STAT_OFST 0xF0
52 #define SCRUB_OFST 0xF4
53
54 /* Control register bit field definitions */
55 #define CTRL_BW_MASK 0xC
56 #define CTRL_BW_SHIFT 2
57
58 #define DDRCTL_WDTH_16 1
59 #define DDRCTL_WDTH_32 0
60
61 /* ZQ register bit field definitions */
62 #define T_ZQ_DDRMODE_MASK 0x2
63
64 /* ECC control register bit field definitions */
65 #define ECC_CTRL_CLR_CE_ERR 0x2
66 #define ECC_CTRL_CLR_UE_ERR 0x1
67
68 /* ECC correctable/uncorrectable error log register definitions */
69 #define LOG_VALID 0x1
70 #define CE_LOG_BITPOS_MASK 0xFE
71 #define CE_LOG_BITPOS_SHIFT 1
72
73 /* ECC correctable/uncorrectable error address register definitions */
74 #define ADDR_COL_MASK 0xFFF
75 #define ADDR_ROW_MASK 0xFFFF000
76 #define ADDR_ROW_SHIFT 12
77 #define ADDR_BANK_MASK 0x70000000
78 #define ADDR_BANK_SHIFT 28
79
80 /* ECC statistic register definitions */
81 #define STAT_UECNT_MASK 0xFF
82 #define STAT_CECNT_MASK 0xFF00
83 #define STAT_CECNT_SHIFT 8
84
85 /* ECC scrub register definitions */
86 #define SCRUB_MODE_MASK 0x7
87 #define SCRUB_MODE_SECDED 0x4
88
89 /* DDR ECC Quirks */
90 #define DDR_ECC_INTR_SUPPORT BIT(0)
91 #define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
92 #define DDR_ECC_INTR_SELF_CLEAR BIT(2)
93
94 /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
95 /* ECC Configuration Registers */
96 #define ECC_CFG0_OFST 0x70
97 #define ECC_CFG1_OFST 0x74
98
99 /* ECC Status Register */
100 #define ECC_STAT_OFST 0x78
101
102 /* ECC Clear Register */
103 #define ECC_CLR_OFST 0x7C
104
105 /* ECC Error count Register */
106 #define ECC_ERRCNT_OFST 0x80
107
108 /* ECC Corrected Error Address Register */
109 #define ECC_CEADDR0_OFST 0x84
110 #define ECC_CEADDR1_OFST 0x88
111
112 /* ECC Syndrome Registers */
113 #define ECC_CSYND0_OFST 0x8C
114 #define ECC_CSYND1_OFST 0x90
115 #define ECC_CSYND2_OFST 0x94
116
117 /* ECC Bit Mask0 Address Register */
118 #define ECC_BITMASK0_OFST 0x98
119 #define ECC_BITMASK1_OFST 0x9C
120 #define ECC_BITMASK2_OFST 0xA0
121
122 /* ECC UnCorrected Error Address Register */
123 #define ECC_UEADDR0_OFST 0xA4
124 #define ECC_UEADDR1_OFST 0xA8
125
126 /* ECC Syndrome Registers */
127 #define ECC_UESYND0_OFST 0xAC
128 #define ECC_UESYND1_OFST 0xB0
129 #define ECC_UESYND2_OFST 0xB4
130
131 /* ECC Poison Address Reg */
132 #define ECC_POISON0_OFST 0xB8
133 #define ECC_POISON1_OFST 0xBC
134
135 #define ECC_ADDRMAP0_OFFSET 0x200
136
137 /* Control register bitfield definitions */
138 #define ECC_CTRL_BUSWIDTH_MASK 0x3000
139 #define ECC_CTRL_BUSWIDTH_SHIFT 12
140 #define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
141 #define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
142
143 /* DDR Control Register width definitions */
144 #define DDRCTL_EWDTH_16 2
145 #define DDRCTL_EWDTH_32 1
146 #define DDRCTL_EWDTH_64 0
147
148 /* ECC status register definitions */
149 #define ECC_STAT_UECNT_MASK 0xF0000
150 #define ECC_STAT_UECNT_SHIFT 16
151 #define ECC_STAT_CECNT_MASK 0xF00
152 #define ECC_STAT_CECNT_SHIFT 8
153 #define ECC_STAT_BITNUM_MASK 0x7F
154
155 /* ECC error count register definitions */
156 #define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
157 #define ECC_ERRCNT_UECNT_SHIFT 16
158 #define ECC_ERRCNT_CECNT_MASK 0xFFFF
159
160 /* DDR QOS Interrupt register definitions */
161 #define DDR_QOS_IRQ_STAT_OFST 0x20200
162 #define DDR_QOSUE_MASK 0x4
163 #define DDR_QOSCE_MASK 0x2
164 #define ECC_CE_UE_INTR_MASK 0x6
165 #define DDR_QOS_IRQ_EN_OFST 0x20208
166 #define DDR_QOS_IRQ_DB_OFST 0x2020C
167
168 /* DDR QOS Interrupt register definitions */
169 #define DDR_UE_MASK BIT(9)
170 #define DDR_CE_MASK BIT(8)
171
172 /* ECC Corrected Error Register Mask and Shifts*/
173 #define ECC_CEADDR0_RW_MASK 0x3FFFF
174 #define ECC_CEADDR0_RNK_MASK BIT(24)
175 #define ECC_CEADDR1_BNKGRP_MASK 0x3000000
176 #define ECC_CEADDR1_BNKNR_MASK 0x70000
177 #define ECC_CEADDR1_BLKNR_MASK 0xFFF
178 #define ECC_CEADDR1_BNKGRP_SHIFT 24
179 #define ECC_CEADDR1_BNKNR_SHIFT 16
180
181 /* ECC Poison register shifts */
182 #define ECC_POISON0_RANK_SHIFT 24
183 #define ECC_POISON0_RANK_MASK BIT(24)
184 #define ECC_POISON0_COLUMN_SHIFT 0
185 #define ECC_POISON0_COLUMN_MASK 0xFFF
186 #define ECC_POISON1_BG_SHIFT 28
187 #define ECC_POISON1_BG_MASK 0x30000000
188 #define ECC_POISON1_BANKNR_SHIFT 24
189 #define ECC_POISON1_BANKNR_MASK 0x7000000
190 #define ECC_POISON1_ROW_SHIFT 0
191 #define ECC_POISON1_ROW_MASK 0x3FFFF
192
193 /* DDR Memory type defines */
194 #define MEM_TYPE_DDR3 0x1
195 #define MEM_TYPE_LPDDR3 0x8
196 #define MEM_TYPE_DDR2 0x4
197 #define MEM_TYPE_DDR4 0x10
198 #define MEM_TYPE_LPDDR4 0x20
199
200 /* DDRC Software control register */
201 #define DDRC_SWCTL 0x320
202
203 /* DDRC ECC CE & UE poison mask */
204 #define ECC_CEPOISON_MASK 0x3
205 #define ECC_UEPOISON_MASK 0x1
206
207 /* DDRC Device config masks */
208 #define DDRC_MSTR_CFG_MASK 0xC0000000
209 #define DDRC_MSTR_CFG_SHIFT 30
210 #define DDRC_MSTR_CFG_X4_MASK 0x0
211 #define DDRC_MSTR_CFG_X8_MASK 0x1
212 #define DDRC_MSTR_CFG_X16_MASK 0x2
213 #define DDRC_MSTR_CFG_X32_MASK 0x3
214
215 #define DDR_MAX_ROW_SHIFT 18
216 #define DDR_MAX_COL_SHIFT 14
217 #define DDR_MAX_BANK_SHIFT 3
218 #define DDR_MAX_BANKGRP_SHIFT 2
219
220 #define ROW_MAX_VAL_MASK 0xF
221 #define COL_MAX_VAL_MASK 0xF
222 #define BANK_MAX_VAL_MASK 0x1F
223 #define BANKGRP_MAX_VAL_MASK 0x1F
224 #define RANK_MAX_VAL_MASK 0x1F
225
226 #define ROW_B0_BASE 6
227 #define ROW_B1_BASE 7
228 #define ROW_B2_BASE 8
229 #define ROW_B3_BASE 9
230 #define ROW_B4_BASE 10
231 #define ROW_B5_BASE 11
232 #define ROW_B6_BASE 12
233 #define ROW_B7_BASE 13
234 #define ROW_B8_BASE 14
235 #define ROW_B9_BASE 15
236 #define ROW_B10_BASE 16
237 #define ROW_B11_BASE 17
238 #define ROW_B12_BASE 18
239 #define ROW_B13_BASE 19
240 #define ROW_B14_BASE 20
241 #define ROW_B15_BASE 21
242 #define ROW_B16_BASE 22
243 #define ROW_B17_BASE 23
244
245 #define COL_B2_BASE 2
246 #define COL_B3_BASE 3
247 #define COL_B4_BASE 4
248 #define COL_B5_BASE 5
249 #define COL_B6_BASE 6
250 #define COL_B7_BASE 7
251 #define COL_B8_BASE 8
252 #define COL_B9_BASE 9
253 #define COL_B10_BASE 10
254 #define COL_B11_BASE 11
255 #define COL_B12_BASE 12
256 #define COL_B13_BASE 13
257
258 #define BANK_B0_BASE 2
259 #define BANK_B1_BASE 3
260 #define BANK_B2_BASE 4
261
262 #define BANKGRP_B0_BASE 2
263 #define BANKGRP_B1_BASE 3
264
265 #define RANK_B0_BASE 6
266
267 /**
268 * struct ecc_error_info - ECC error log information.
269 * @row: Row number.
270 * @col: Column number.
271 * @bank: Bank number.
272 * @bitpos: Bit position.
273 * @data: Data causing the error.
274 * @bankgrpnr: Bank group number.
275 * @blknr: Block number.
276 */
277 struct ecc_error_info {
278 u32 row;
279 u32 col;
280 u32 bank;
281 u32 bitpos;
282 u32 data;
283 u32 bankgrpnr;
284 u32 blknr;
285 };
286
287 /**
288 * struct synps_ecc_status - ECC status information to report.
289 * @ce_cnt: Correctable error count.
290 * @ue_cnt: Uncorrectable error count.
291 * @ceinfo: Correctable error log information.
292 * @ueinfo: Uncorrectable error log information.
293 */
294 struct synps_ecc_status {
295 u32 ce_cnt;
296 u32 ue_cnt;
297 struct ecc_error_info ceinfo;
298 struct ecc_error_info ueinfo;
299 };
300
301 /**
302 * struct synps_edac_priv - DDR memory controller private instance data.
303 * @baseaddr: Base address of the DDR controller.
304 * @reglock: Concurrent CSRs access lock.
305 * @message: Buffer for framing the event specific info.
306 * @stat: ECC status information.
307 * @p_data: Platform data.
308 * @ce_cnt: Correctable Error count.
309 * @ue_cnt: Uncorrectable Error count.
310 * @poison_addr: Data poison address.
311 * @row_shift: Bit shifts for row bit.
312 * @col_shift: Bit shifts for column bit.
313 * @bank_shift: Bit shifts for bank bit.
314 * @bankgrp_shift: Bit shifts for bank group bit.
315 * @rank_shift: Bit shifts for rank bit.
316 */
317 struct synps_edac_priv {
318 void __iomem *baseaddr;
319 spinlock_t reglock;
320 char message[SYNPS_EDAC_MSG_SIZE];
321 struct synps_ecc_status stat;
322 const struct synps_platform_data *p_data;
323 u32 ce_cnt;
324 u32 ue_cnt;
325 #ifdef CONFIG_EDAC_DEBUG
326 ulong poison_addr;
327 u32 row_shift[18];
328 u32 col_shift[14];
329 u32 bank_shift[3];
330 u32 bankgrp_shift[2];
331 u32 rank_shift[1];
332 #endif
333 };
334
335 /**
336 * struct synps_platform_data - synps platform data structure.
337 * @get_error_info: Get EDAC error info.
338 * @get_mtype: Get mtype.
339 * @get_dtype: Get dtype.
340 * @get_ecc_state: Get ECC state.
341 * @get_mem_info: Get EDAC memory info
342 * @quirks: To differentiate IPs.
343 */
344 struct synps_platform_data {
345 int (*get_error_info)(struct synps_edac_priv *priv);
346 enum mem_type (*get_mtype)(const void __iomem *base);
347 enum dev_type (*get_dtype)(const void __iomem *base);
348 bool (*get_ecc_state)(void __iomem *base);
349 #ifdef CONFIG_EDAC_DEBUG
350 u64 (*get_mem_info)(struct synps_edac_priv *priv);
351 #endif
352 int quirks;
353 };
354
355 /**
356 * zynq_get_error_info - Get the current ECC error info.
357 * @priv: DDR memory controller private instance data.
358 *
359 * Return: one if there is no error, otherwise zero.
360 */
zynq_get_error_info(struct synps_edac_priv * priv)361 static int zynq_get_error_info(struct synps_edac_priv *priv)
362 {
363 struct synps_ecc_status *p;
364 u32 regval, clearval = 0;
365 void __iomem *base;
366
367 base = priv->baseaddr;
368 p = &priv->stat;
369
370 regval = readl(base + STAT_OFST);
371 if (!regval)
372 return 1;
373
374 p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
375 p->ue_cnt = regval & STAT_UECNT_MASK;
376
377 regval = readl(base + CE_LOG_OFST);
378 if (!(p->ce_cnt && (regval & LOG_VALID)))
379 goto ue_err;
380
381 p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
382 regval = readl(base + CE_ADDR_OFST);
383 p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
384 p->ceinfo.col = regval & ADDR_COL_MASK;
385 p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
386 p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
387 edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
388 p->ceinfo.data);
389 clearval = ECC_CTRL_CLR_CE_ERR;
390
391 ue_err:
392 regval = readl(base + UE_LOG_OFST);
393 if (!(p->ue_cnt && (regval & LOG_VALID)))
394 goto out;
395
396 regval = readl(base + UE_ADDR_OFST);
397 p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
398 p->ueinfo.col = regval & ADDR_COL_MASK;
399 p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
400 p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
401 clearval |= ECC_CTRL_CLR_UE_ERR;
402
403 out:
404 writel(clearval, base + ECC_CTRL_OFST);
405 writel(0x0, base + ECC_CTRL_OFST);
406
407 return 0;
408 }
409
410 #ifdef CONFIG_EDAC_DEBUG
411 /**
412 * zynqmp_get_mem_info - Get the current memory info.
413 * @priv: DDR memory controller private instance data.
414 *
415 * Return: host interface address.
416 */
zynqmp_get_mem_info(struct synps_edac_priv * priv)417 static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv)
418 {
419 u64 hif_addr = 0, linear_addr;
420
421 linear_addr = priv->poison_addr;
422 if (linear_addr >= SZ_32G)
423 linear_addr = linear_addr - SZ_32G + SZ_2G;
424 hif_addr = linear_addr >> 3;
425 return hif_addr;
426 }
427 #endif
428
429 /**
430 * zynqmp_get_error_info - Get the current ECC error info.
431 * @priv: DDR memory controller private instance data.
432 *
433 * Return: one if there is no error otherwise returns zero.
434 */
zynqmp_get_error_info(struct synps_edac_priv * priv)435 static int zynqmp_get_error_info(struct synps_edac_priv *priv)
436 {
437 struct synps_ecc_status *p;
438 u32 regval, clearval;
439 unsigned long flags;
440 void __iomem *base;
441
442 base = priv->baseaddr;
443 p = &priv->stat;
444
445 regval = readl(base + ECC_ERRCNT_OFST);
446 p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
447 p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
448 if (!p->ce_cnt)
449 goto ue_err;
450
451 regval = readl(base + ECC_STAT_OFST);
452 if (!regval)
453 return 1;
454
455 p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
456
457 regval = readl(base + ECC_CEADDR0_OFST);
458 p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
459 regval = readl(base + ECC_CEADDR1_OFST);
460 p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
461 ECC_CEADDR1_BNKNR_SHIFT;
462 p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
463 ECC_CEADDR1_BNKGRP_SHIFT;
464 p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
465 p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
466 edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
467 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
468 readl(base + ECC_CSYND2_OFST));
469 ue_err:
470 if (!p->ue_cnt)
471 goto out;
472
473 regval = readl(base + ECC_UEADDR0_OFST);
474 p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
475 regval = readl(base + ECC_UEADDR1_OFST);
476 p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
477 ECC_CEADDR1_BNKGRP_SHIFT;
478 p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
479 ECC_CEADDR1_BNKNR_SHIFT;
480 p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
481 p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
482 out:
483 spin_lock_irqsave(&priv->reglock, flags);
484
485 clearval = readl(base + ECC_CLR_OFST) |
486 ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
487 ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
488 writel(clearval, base + ECC_CLR_OFST);
489
490 spin_unlock_irqrestore(&priv->reglock, flags);
491
492 return 0;
493 }
494
495 /**
496 * handle_error - Handle Correctable and Uncorrectable errors.
497 * @mci: EDAC memory controller instance.
498 * @p: Synopsys ECC status structure.
499 *
500 * Handles ECC correctable and uncorrectable errors.
501 */
handle_error(struct mem_ctl_info * mci,struct synps_ecc_status * p)502 static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
503 {
504 struct synps_edac_priv *priv = mci->pvt_info;
505 struct ecc_error_info *pinf;
506
507 if (p->ce_cnt) {
508 pinf = &p->ceinfo;
509 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
510 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
511 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
512 "CE", pinf->row, pinf->bank,
513 pinf->bankgrpnr, pinf->blknr,
514 pinf->bitpos, pinf->data);
515 } else {
516 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
517 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
518 "CE", pinf->row, pinf->bank, pinf->col,
519 pinf->bitpos, pinf->data);
520 }
521
522 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
523 p->ce_cnt, 0, 0, 0, 0, 0, -1,
524 priv->message, "");
525 }
526
527 if (p->ue_cnt) {
528 pinf = &p->ueinfo;
529 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
530 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
531 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
532 "UE", pinf->row, pinf->bank,
533 pinf->bankgrpnr, pinf->blknr);
534 } else {
535 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
536 "DDR ECC error type :%s Row %d Bank %d Col %d ",
537 "UE", pinf->row, pinf->bank, pinf->col);
538 }
539
540 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
541 p->ue_cnt, 0, 0, 0, 0, 0, -1,
542 priv->message, "");
543 }
544
545 memset(p, 0, sizeof(*p));
546 }
547
enable_intr(struct synps_edac_priv * priv)548 static void enable_intr(struct synps_edac_priv *priv)
549 {
550 unsigned long flags;
551
552 /* Enable UE/CE Interrupts */
553 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
554 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
555 priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
556
557 return;
558 }
559
560 spin_lock_irqsave(&priv->reglock, flags);
561
562 writel(DDR_UE_MASK | DDR_CE_MASK,
563 priv->baseaddr + ECC_CLR_OFST);
564
565 spin_unlock_irqrestore(&priv->reglock, flags);
566 }
567
disable_intr(struct synps_edac_priv * priv)568 static void disable_intr(struct synps_edac_priv *priv)
569 {
570 unsigned long flags;
571
572 /* Disable UE/CE Interrupts */
573 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
574 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
575 priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
576
577 return;
578 }
579
580 spin_lock_irqsave(&priv->reglock, flags);
581
582 writel(0, priv->baseaddr + ECC_CLR_OFST);
583
584 spin_unlock_irqrestore(&priv->reglock, flags);
585 }
586
587 /**
588 * intr_handler - Interrupt Handler for ECC interrupts.
589 * @irq: IRQ number.
590 * @dev_id: Device ID.
591 *
592 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
593 */
intr_handler(int irq,void * dev_id)594 static irqreturn_t intr_handler(int irq, void *dev_id)
595 {
596 const struct synps_platform_data *p_data;
597 struct mem_ctl_info *mci = dev_id;
598 struct synps_edac_priv *priv;
599 int status, regval;
600
601 priv = mci->pvt_info;
602 p_data = priv->p_data;
603
604 /*
605 * v3.0 of the controller has the ce/ue bits cleared automatically,
606 * so this condition does not apply.
607 */
608 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
609 regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
610 regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
611 if (!(regval & ECC_CE_UE_INTR_MASK))
612 return IRQ_NONE;
613 }
614
615 status = p_data->get_error_info(priv);
616 if (status)
617 return IRQ_NONE;
618
619 priv->ce_cnt += priv->stat.ce_cnt;
620 priv->ue_cnt += priv->stat.ue_cnt;
621 handle_error(mci, &priv->stat);
622
623 edac_dbg(3, "Total error count CE %d UE %d\n",
624 priv->ce_cnt, priv->ue_cnt);
625 /* v3.0 of the controller does not have this register */
626 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
627 writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
628
629 return IRQ_HANDLED;
630 }
631
632 /**
633 * check_errors - Check controller for ECC errors.
634 * @mci: EDAC memory controller instance.
635 *
636 * Check and post ECC errors. Called by the polling thread.
637 */
check_errors(struct mem_ctl_info * mci)638 static void check_errors(struct mem_ctl_info *mci)
639 {
640 const struct synps_platform_data *p_data;
641 struct synps_edac_priv *priv;
642 int status;
643
644 priv = mci->pvt_info;
645 p_data = priv->p_data;
646
647 status = p_data->get_error_info(priv);
648 if (status)
649 return;
650
651 priv->ce_cnt += priv->stat.ce_cnt;
652 priv->ue_cnt += priv->stat.ue_cnt;
653 handle_error(mci, &priv->stat);
654
655 edac_dbg(3, "Total error count CE %d UE %d\n",
656 priv->ce_cnt, priv->ue_cnt);
657 }
658
659 /**
660 * zynq_get_dtype - Return the controller memory width.
661 * @base: DDR memory controller base address.
662 *
663 * Get the EDAC device type width appropriate for the current controller
664 * configuration.
665 *
666 * Return: a device type width enumeration.
667 */
zynq_get_dtype(const void __iomem * base)668 static enum dev_type zynq_get_dtype(const void __iomem *base)
669 {
670 enum dev_type dt;
671 u32 width;
672
673 width = readl(base + CTRL_OFST);
674 width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
675
676 switch (width) {
677 case DDRCTL_WDTH_16:
678 dt = DEV_X2;
679 break;
680 case DDRCTL_WDTH_32:
681 dt = DEV_X4;
682 break;
683 default:
684 dt = DEV_UNKNOWN;
685 }
686
687 return dt;
688 }
689
690 /**
691 * zynqmp_get_dtype - Return the controller memory width.
692 * @base: DDR memory controller base address.
693 *
694 * Get the EDAC device type width appropriate for the current controller
695 * configuration.
696 *
697 * Return: a device type width enumeration.
698 */
zynqmp_get_dtype(const void __iomem * base)699 static enum dev_type zynqmp_get_dtype(const void __iomem *base)
700 {
701 enum dev_type dt;
702 u32 width;
703
704 width = readl(base + CTRL_OFST);
705 width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
706 switch (width) {
707 case DDRCTL_EWDTH_16:
708 dt = DEV_X2;
709 break;
710 case DDRCTL_EWDTH_32:
711 dt = DEV_X4;
712 break;
713 case DDRCTL_EWDTH_64:
714 dt = DEV_X8;
715 break;
716 default:
717 dt = DEV_UNKNOWN;
718 }
719
720 return dt;
721 }
722
723 /**
724 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
725 * @base: DDR memory controller base address.
726 *
727 * Get the ECC enable/disable status of the controller.
728 *
729 * Return: true if enabled, otherwise false.
730 */
zynq_get_ecc_state(void __iomem * base)731 static bool zynq_get_ecc_state(void __iomem *base)
732 {
733 enum dev_type dt;
734 u32 ecctype;
735
736 dt = zynq_get_dtype(base);
737 if (dt == DEV_UNKNOWN)
738 return false;
739
740 ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
741 if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
742 return true;
743
744 return false;
745 }
746
747 /**
748 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
749 * @base: DDR memory controller base address.
750 *
751 * Get the ECC enable/disable status for the controller.
752 *
753 * Return: a ECC status boolean i.e true/false - enabled/disabled.
754 */
zynqmp_get_ecc_state(void __iomem * base)755 static bool zynqmp_get_ecc_state(void __iomem *base)
756 {
757 enum dev_type dt;
758 u32 ecctype;
759
760 dt = zynqmp_get_dtype(base);
761 if (dt == DEV_UNKNOWN)
762 return false;
763
764 ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
765 if ((ecctype == SCRUB_MODE_SECDED) &&
766 ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
767 return true;
768
769 return false;
770 }
771
772 /**
773 * get_memsize - Read the size of the attached memory device.
774 *
775 * Return: the memory size in bytes.
776 */
get_memsize(void)777 static u32 get_memsize(void)
778 {
779 struct sysinfo inf;
780
781 si_meminfo(&inf);
782
783 return inf.totalram * inf.mem_unit;
784 }
785
786 /**
787 * zynq_get_mtype - Return the controller memory type.
788 * @base: Synopsys ECC status structure.
789 *
790 * Get the EDAC memory type appropriate for the current controller
791 * configuration.
792 *
793 * Return: a memory type enumeration.
794 */
zynq_get_mtype(const void __iomem * base)795 static enum mem_type zynq_get_mtype(const void __iomem *base)
796 {
797 enum mem_type mt;
798 u32 memtype;
799
800 memtype = readl(base + T_ZQ_OFST);
801
802 if (memtype & T_ZQ_DDRMODE_MASK)
803 mt = MEM_DDR3;
804 else
805 mt = MEM_DDR2;
806
807 return mt;
808 }
809
810 /**
811 * zynqmp_get_mtype - Returns controller memory type.
812 * @base: Synopsys ECC status structure.
813 *
814 * Get the EDAC memory type appropriate for the current controller
815 * configuration.
816 *
817 * Return: a memory type enumeration.
818 */
zynqmp_get_mtype(const void __iomem * base)819 static enum mem_type zynqmp_get_mtype(const void __iomem *base)
820 {
821 enum mem_type mt;
822 u32 memtype;
823
824 memtype = readl(base + CTRL_OFST);
825
826 if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
827 mt = MEM_DDR3;
828 else if (memtype & MEM_TYPE_DDR2)
829 mt = MEM_RDDR2;
830 else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
831 mt = MEM_DDR4;
832 else
833 mt = MEM_EMPTY;
834
835 return mt;
836 }
837
838 /**
839 * init_csrows - Initialize the csrow data.
840 * @mci: EDAC memory controller instance.
841 *
842 * Initialize the chip select rows associated with the EDAC memory
843 * controller instance.
844 */
init_csrows(struct mem_ctl_info * mci)845 static void init_csrows(struct mem_ctl_info *mci)
846 {
847 struct synps_edac_priv *priv = mci->pvt_info;
848 const struct synps_platform_data *p_data;
849 struct csrow_info *csi;
850 struct dimm_info *dimm;
851 u32 size, row;
852 int j;
853
854 p_data = priv->p_data;
855
856 for (row = 0; row < mci->nr_csrows; row++) {
857 csi = mci->csrows[row];
858 size = get_memsize();
859
860 for (j = 0; j < csi->nr_channels; j++) {
861 dimm = csi->channels[j]->dimm;
862 dimm->edac_mode = EDAC_SECDED;
863 dimm->mtype = p_data->get_mtype(priv->baseaddr);
864 dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
865 dimm->grain = SYNPS_EDAC_ERR_GRAIN;
866 dimm->dtype = p_data->get_dtype(priv->baseaddr);
867 }
868 }
869 }
870
871 /**
872 * mc_init - Initialize one driver instance.
873 * @mci: EDAC memory controller instance.
874 * @pdev: platform device.
875 *
876 * Perform initialization of the EDAC memory controller instance and
877 * related driver-private data associated with the memory controller the
878 * instance is bound to.
879 */
mc_init(struct mem_ctl_info * mci,struct platform_device * pdev)880 static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
881 {
882 struct synps_edac_priv *priv;
883
884 mci->pdev = &pdev->dev;
885 priv = mci->pvt_info;
886 platform_set_drvdata(pdev, mci);
887
888 /* Initialize controller capabilities and configuration */
889 mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
890 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
891 mci->scrub_cap = SCRUB_HW_SRC;
892 mci->scrub_mode = SCRUB_NONE;
893
894 mci->edac_cap = EDAC_FLAG_SECDED;
895 mci->ctl_name = "synps_ddr_controller";
896 mci->dev_name = SYNPS_EDAC_MOD_STRING;
897 mci->mod_name = SYNPS_EDAC_MOD_VER;
898
899 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
900 edac_op_state = EDAC_OPSTATE_INT;
901 } else {
902 edac_op_state = EDAC_OPSTATE_POLL;
903 mci->edac_check = check_errors;
904 }
905
906 mci->ctl_page_to_phys = NULL;
907
908 init_csrows(mci);
909 }
910
setup_irq(struct mem_ctl_info * mci,struct platform_device * pdev)911 static int setup_irq(struct mem_ctl_info *mci,
912 struct platform_device *pdev)
913 {
914 struct synps_edac_priv *priv = mci->pvt_info;
915 int ret, irq;
916
917 irq = platform_get_irq(pdev, 0);
918 if (irq < 0) {
919 edac_printk(KERN_ERR, EDAC_MC,
920 "No IRQ %d in DT\n", irq);
921 return irq;
922 }
923
924 ret = devm_request_irq(&pdev->dev, irq, intr_handler,
925 0, dev_name(&pdev->dev), mci);
926 if (ret < 0) {
927 edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
928 return ret;
929 }
930
931 enable_intr(priv);
932
933 return 0;
934 }
935
936 static const struct synps_platform_data zynq_edac_def = {
937 .get_error_info = zynq_get_error_info,
938 .get_mtype = zynq_get_mtype,
939 .get_dtype = zynq_get_dtype,
940 .get_ecc_state = zynq_get_ecc_state,
941 .quirks = 0,
942 };
943
944 static const struct synps_platform_data zynqmp_edac_def = {
945 .get_error_info = zynqmp_get_error_info,
946 .get_mtype = zynqmp_get_mtype,
947 .get_dtype = zynqmp_get_dtype,
948 .get_ecc_state = zynqmp_get_ecc_state,
949 #ifdef CONFIG_EDAC_DEBUG
950 .get_mem_info = zynqmp_get_mem_info,
951 #endif
952 .quirks = (DDR_ECC_INTR_SUPPORT
953 #ifdef CONFIG_EDAC_DEBUG
954 | DDR_ECC_DATA_POISON_SUPPORT
955 #endif
956 ),
957 };
958
959 static const struct synps_platform_data synopsys_edac_def = {
960 .get_error_info = zynqmp_get_error_info,
961 .get_mtype = zynqmp_get_mtype,
962 .get_dtype = zynqmp_get_dtype,
963 .get_ecc_state = zynqmp_get_ecc_state,
964 .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
965 #ifdef CONFIG_EDAC_DEBUG
966 | DDR_ECC_DATA_POISON_SUPPORT
967 #endif
968 ),
969 };
970
971
972 static const struct of_device_id synps_edac_match[] = {
973 {
974 .compatible = "xlnx,zynq-ddrc-a05",
975 .data = (void *)&zynq_edac_def
976 },
977 {
978 .compatible = "xlnx,zynqmp-ddrc-2.40a",
979 .data = (void *)&zynqmp_edac_def
980 },
981 {
982 .compatible = "snps,ddrc-3.80a",
983 .data = (void *)&synopsys_edac_def
984 },
985 {
986 /* end of table */
987 }
988 };
989
990 MODULE_DEVICE_TABLE(of, synps_edac_match);
991
992 #ifdef CONFIG_EDAC_DEBUG
993 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
994
995 /**
996 * ddr_poison_setup - Update poison registers.
997 * @priv: DDR memory controller private instance data.
998 *
999 * Update poison registers as per DDR mapping.
1000 * Return: none.
1001 */
ddr_poison_setup(struct synps_edac_priv * priv)1002 static void ddr_poison_setup(struct synps_edac_priv *priv)
1003 {
1004 int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
1005 const struct synps_platform_data *p_data;
1006 int index;
1007 ulong hif_addr = 0;
1008
1009 p_data = priv->p_data;
1010
1011 if (p_data->get_mem_info)
1012 hif_addr = p_data->get_mem_info(priv);
1013 else
1014 hif_addr = priv->poison_addr >> 3;
1015
1016 for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
1017 if (priv->row_shift[index])
1018 row |= (((hif_addr >> priv->row_shift[index]) &
1019 BIT(0)) << index);
1020 else
1021 break;
1022 }
1023
1024 for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
1025 if (priv->col_shift[index] || index < 3)
1026 col |= (((hif_addr >> priv->col_shift[index]) &
1027 BIT(0)) << index);
1028 else
1029 break;
1030 }
1031
1032 for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
1033 if (priv->bank_shift[index])
1034 bank |= (((hif_addr >> priv->bank_shift[index]) &
1035 BIT(0)) << index);
1036 else
1037 break;
1038 }
1039
1040 for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
1041 if (priv->bankgrp_shift[index])
1042 bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
1043 & BIT(0)) << index);
1044 else
1045 break;
1046 }
1047
1048 if (priv->rank_shift[0])
1049 rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
1050
1051 regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
1052 regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
1053 writel(regval, priv->baseaddr + ECC_POISON0_OFST);
1054
1055 regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1056 regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1057 regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1058 writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1059 }
1060
inject_data_error_show(struct device * dev,struct device_attribute * mattr,char * data)1061 static ssize_t inject_data_error_show(struct device *dev,
1062 struct device_attribute *mattr,
1063 char *data)
1064 {
1065 struct mem_ctl_info *mci = to_mci(dev);
1066 struct synps_edac_priv *priv = mci->pvt_info;
1067
1068 return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1069 "Error injection Address: 0x%lx\n\r",
1070 readl(priv->baseaddr + ECC_POISON0_OFST),
1071 readl(priv->baseaddr + ECC_POISON1_OFST),
1072 priv->poison_addr);
1073 }
1074
inject_data_error_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)1075 static ssize_t inject_data_error_store(struct device *dev,
1076 struct device_attribute *mattr,
1077 const char *data, size_t count)
1078 {
1079 struct mem_ctl_info *mci = to_mci(dev);
1080 struct synps_edac_priv *priv = mci->pvt_info;
1081
1082 if (kstrtoul(data, 0, &priv->poison_addr))
1083 return -EINVAL;
1084
1085 ddr_poison_setup(priv);
1086
1087 return count;
1088 }
1089
inject_data_poison_show(struct device * dev,struct device_attribute * mattr,char * data)1090 static ssize_t inject_data_poison_show(struct device *dev,
1091 struct device_attribute *mattr,
1092 char *data)
1093 {
1094 struct mem_ctl_info *mci = to_mci(dev);
1095 struct synps_edac_priv *priv = mci->pvt_info;
1096
1097 return sprintf(data, "Data Poisoning: %s\n\r",
1098 (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1099 ? ("Correctable Error") : ("UnCorrectable Error"));
1100 }
1101
inject_data_poison_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)1102 static ssize_t inject_data_poison_store(struct device *dev,
1103 struct device_attribute *mattr,
1104 const char *data, size_t count)
1105 {
1106 struct mem_ctl_info *mci = to_mci(dev);
1107 struct synps_edac_priv *priv = mci->pvt_info;
1108
1109 writel(0, priv->baseaddr + DDRC_SWCTL);
1110 if (strncmp(data, "CE", 2) == 0)
1111 writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1112 else
1113 writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1114 writel(1, priv->baseaddr + DDRC_SWCTL);
1115
1116 return count;
1117 }
1118
1119 static DEVICE_ATTR_RW(inject_data_error);
1120 static DEVICE_ATTR_RW(inject_data_poison);
1121
edac_create_sysfs_attributes(struct mem_ctl_info * mci)1122 static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1123 {
1124 int rc;
1125
1126 rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1127 if (rc < 0)
1128 return rc;
1129 rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1130 if (rc < 0)
1131 return rc;
1132 return 0;
1133 }
1134
edac_remove_sysfs_attributes(struct mem_ctl_info * mci)1135 static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1136 {
1137 device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1138 device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1139 }
1140
setup_row_address_map(struct synps_edac_priv * priv,u32 * addrmap)1141 static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1142 {
1143 u32 addrmap_row_b2_10;
1144 int index;
1145
1146 priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1147 priv->row_shift[1] = ((addrmap[5] >> 8) &
1148 ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1149
1150 addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1151 if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1152 for (index = 2; index < 11; index++)
1153 priv->row_shift[index] = addrmap_row_b2_10 +
1154 index + ROW_B0_BASE;
1155
1156 } else {
1157 priv->row_shift[2] = (addrmap[9] &
1158 ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1159 priv->row_shift[3] = ((addrmap[9] >> 8) &
1160 ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1161 priv->row_shift[4] = ((addrmap[9] >> 16) &
1162 ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1163 priv->row_shift[5] = ((addrmap[9] >> 24) &
1164 ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1165 priv->row_shift[6] = (addrmap[10] &
1166 ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1167 priv->row_shift[7] = ((addrmap[10] >> 8) &
1168 ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1169 priv->row_shift[8] = ((addrmap[10] >> 16) &
1170 ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1171 priv->row_shift[9] = ((addrmap[10] >> 24) &
1172 ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1173 priv->row_shift[10] = (addrmap[11] &
1174 ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1175 }
1176
1177 priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1178 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1179 ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1180 priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1181 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1182 ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1183 priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1184 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1185 ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1186 priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1187 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1188 ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1189 priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1190 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1191 ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1192 priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1193 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1194 ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1195 priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1196 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1197 ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1198 }
1199
setup_column_address_map(struct synps_edac_priv * priv,u32 * addrmap)1200 static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1201 {
1202 u32 width, memtype;
1203 int index;
1204
1205 memtype = readl(priv->baseaddr + CTRL_OFST);
1206 width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1207
1208 priv->col_shift[0] = 0;
1209 priv->col_shift[1] = 1;
1210 priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1211 priv->col_shift[3] = ((addrmap[2] >> 8) &
1212 COL_MAX_VAL_MASK) + COL_B3_BASE;
1213 priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1214 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1215 COL_MAX_VAL_MASK) + COL_B4_BASE);
1216 priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1217 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1218 COL_MAX_VAL_MASK) + COL_B5_BASE);
1219 priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1220 COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1221 COL_MAX_VAL_MASK) + COL_B6_BASE);
1222 priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1223 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1224 COL_MAX_VAL_MASK) + COL_B7_BASE);
1225 priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1226 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1227 COL_MAX_VAL_MASK) + COL_B8_BASE);
1228 priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1229 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1230 COL_MAX_VAL_MASK) + COL_B9_BASE);
1231 if (width == DDRCTL_EWDTH_64) {
1232 if (memtype & MEM_TYPE_LPDDR3) {
1233 priv->col_shift[10] = ((addrmap[4] &
1234 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1235 ((addrmap[4] & COL_MAX_VAL_MASK) +
1236 COL_B10_BASE);
1237 priv->col_shift[11] = (((addrmap[4] >> 8) &
1238 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1239 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1240 COL_B11_BASE);
1241 } else {
1242 priv->col_shift[11] = ((addrmap[4] &
1243 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1244 ((addrmap[4] & COL_MAX_VAL_MASK) +
1245 COL_B10_BASE);
1246 priv->col_shift[13] = (((addrmap[4] >> 8) &
1247 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1248 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1249 COL_B11_BASE);
1250 }
1251 } else if (width == DDRCTL_EWDTH_32) {
1252 if (memtype & MEM_TYPE_LPDDR3) {
1253 priv->col_shift[10] = (((addrmap[3] >> 24) &
1254 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1255 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1256 COL_B9_BASE);
1257 priv->col_shift[11] = ((addrmap[4] &
1258 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1259 ((addrmap[4] & COL_MAX_VAL_MASK) +
1260 COL_B10_BASE);
1261 } else {
1262 priv->col_shift[11] = (((addrmap[3] >> 24) &
1263 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1264 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1265 COL_B9_BASE);
1266 priv->col_shift[13] = ((addrmap[4] &
1267 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1268 ((addrmap[4] & COL_MAX_VAL_MASK) +
1269 COL_B10_BASE);
1270 }
1271 } else {
1272 if (memtype & MEM_TYPE_LPDDR3) {
1273 priv->col_shift[10] = (((addrmap[3] >> 16) &
1274 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1275 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1276 COL_B8_BASE);
1277 priv->col_shift[11] = (((addrmap[3] >> 24) &
1278 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1279 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1280 COL_B9_BASE);
1281 priv->col_shift[13] = ((addrmap[4] &
1282 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1283 ((addrmap[4] & COL_MAX_VAL_MASK) +
1284 COL_B10_BASE);
1285 } else {
1286 priv->col_shift[11] = (((addrmap[3] >> 16) &
1287 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1288 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1289 COL_B8_BASE);
1290 priv->col_shift[13] = (((addrmap[3] >> 24) &
1291 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1292 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1293 COL_B9_BASE);
1294 }
1295 }
1296
1297 if (width) {
1298 for (index = 9; index > width; index--) {
1299 priv->col_shift[index] = priv->col_shift[index - width];
1300 priv->col_shift[index - width] = 0;
1301 }
1302 }
1303
1304 }
1305
setup_bank_address_map(struct synps_edac_priv * priv,u32 * addrmap)1306 static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1307 {
1308 priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1309 priv->bank_shift[1] = ((addrmap[1] >> 8) &
1310 BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1311 priv->bank_shift[2] = (((addrmap[1] >> 16) &
1312 BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1313 (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1314 BANK_B2_BASE);
1315
1316 }
1317
setup_bg_address_map(struct synps_edac_priv * priv,u32 * addrmap)1318 static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1319 {
1320 priv->bankgrp_shift[0] = (addrmap[8] &
1321 BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1322 priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1323 BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1324 & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1325
1326 }
1327
setup_rank_address_map(struct synps_edac_priv * priv,u32 * addrmap)1328 static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1329 {
1330 priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1331 RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1332 RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1333 }
1334
1335 /**
1336 * setup_address_map - Set Address Map by querying ADDRMAP registers.
1337 * @priv: DDR memory controller private instance data.
1338 *
1339 * Set Address Map by querying ADDRMAP registers.
1340 *
1341 * Return: none.
1342 */
setup_address_map(struct synps_edac_priv * priv)1343 static void setup_address_map(struct synps_edac_priv *priv)
1344 {
1345 u32 addrmap[12];
1346 int index;
1347
1348 for (index = 0; index < 12; index++) {
1349 u32 addrmap_offset;
1350
1351 addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1352 addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1353 }
1354
1355 setup_row_address_map(priv, addrmap);
1356
1357 setup_column_address_map(priv, addrmap);
1358
1359 setup_bank_address_map(priv, addrmap);
1360
1361 setup_bg_address_map(priv, addrmap);
1362
1363 setup_rank_address_map(priv, addrmap);
1364 }
1365 #endif /* CONFIG_EDAC_DEBUG */
1366
1367 /**
1368 * mc_probe - Check controller and bind driver.
1369 * @pdev: platform device.
1370 *
1371 * Probe a specific controller instance for binding with the driver.
1372 *
1373 * Return: 0 if the controller instance was successfully bound to the
1374 * driver; otherwise, < 0 on error.
1375 */
mc_probe(struct platform_device * pdev)1376 static int mc_probe(struct platform_device *pdev)
1377 {
1378 const struct synps_platform_data *p_data;
1379 struct edac_mc_layer layers[2];
1380 struct synps_edac_priv *priv;
1381 struct mem_ctl_info *mci;
1382 void __iomem *baseaddr;
1383 struct resource *res;
1384 int rc;
1385
1386 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1387 baseaddr = devm_ioremap_resource(&pdev->dev, res);
1388 if (IS_ERR(baseaddr))
1389 return PTR_ERR(baseaddr);
1390
1391 p_data = of_device_get_match_data(&pdev->dev);
1392 if (!p_data)
1393 return -ENODEV;
1394
1395 if (!p_data->get_ecc_state(baseaddr)) {
1396 edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1397 return -ENXIO;
1398 }
1399
1400 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1401 layers[0].size = SYNPS_EDAC_NR_CSROWS;
1402 layers[0].is_virt_csrow = true;
1403 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1404 layers[1].size = SYNPS_EDAC_NR_CHANS;
1405 layers[1].is_virt_csrow = false;
1406
1407 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1408 sizeof(struct synps_edac_priv));
1409 if (!mci) {
1410 edac_printk(KERN_ERR, EDAC_MC,
1411 "Failed memory allocation for mc instance\n");
1412 return -ENOMEM;
1413 }
1414
1415 priv = mci->pvt_info;
1416 priv->baseaddr = baseaddr;
1417 priv->p_data = p_data;
1418 spin_lock_init(&priv->reglock);
1419
1420 mc_init(mci, pdev);
1421
1422 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1423 rc = setup_irq(mci, pdev);
1424 if (rc)
1425 goto free_edac_mc;
1426 }
1427
1428 rc = edac_mc_add_mc(mci);
1429 if (rc) {
1430 edac_printk(KERN_ERR, EDAC_MC,
1431 "Failed to register with EDAC core\n");
1432 goto free_edac_mc;
1433 }
1434
1435 #ifdef CONFIG_EDAC_DEBUG
1436 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1437 rc = edac_create_sysfs_attributes(mci);
1438 if (rc) {
1439 edac_printk(KERN_ERR, EDAC_MC,
1440 "Failed to create sysfs entries\n");
1441 goto free_edac_mc;
1442 }
1443 }
1444
1445 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1446 setup_address_map(priv);
1447 #endif
1448
1449 /*
1450 * Start capturing the correctable and uncorrectable errors. A write of
1451 * 0 starts the counters.
1452 */
1453 if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1454 writel(0x0, baseaddr + ECC_CTRL_OFST);
1455
1456 return rc;
1457
1458 free_edac_mc:
1459 edac_mc_free(mci);
1460
1461 return rc;
1462 }
1463
1464 /**
1465 * mc_remove - Unbind driver from controller.
1466 * @pdev: Platform device.
1467 *
1468 * Return: Unconditionally 0
1469 */
mc_remove(struct platform_device * pdev)1470 static int mc_remove(struct platform_device *pdev)
1471 {
1472 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1473 struct synps_edac_priv *priv = mci->pvt_info;
1474
1475 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1476 disable_intr(priv);
1477
1478 #ifdef CONFIG_EDAC_DEBUG
1479 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1480 edac_remove_sysfs_attributes(mci);
1481 #endif
1482
1483 edac_mc_del_mc(&pdev->dev);
1484 edac_mc_free(mci);
1485
1486 return 0;
1487 }
1488
1489 static struct platform_driver synps_edac_mc_driver = {
1490 .driver = {
1491 .name = "synopsys-edac",
1492 .of_match_table = synps_edac_match,
1493 },
1494 .probe = mc_probe,
1495 .remove = mc_remove,
1496 };
1497
1498 module_platform_driver(synps_edac_mc_driver);
1499
1500 MODULE_AUTHOR("Xilinx Inc");
1501 MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1502 MODULE_LICENSE("GPL v2");
1503