xref: /openbmc/linux/drivers/edac/armada_xp_edac.c (revision 9b68f30b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Pengutronix, Jan Luebbe <kernel@pengutronix.de>
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/edac.h>
8 #include <linux/of_platform.h>
9 
10 #include <asm/hardware/cache-l2x0.h>
11 #include <asm/hardware/cache-aurora-l2.h>
12 
13 #include "edac_mc.h"
14 #include "edac_device.h"
15 #include "edac_module.h"
16 
17 /************************ EDAC MC (DDR RAM) ********************************/
18 
19 #define SDRAM_NUM_CS 4
20 
21 #define SDRAM_CONFIG_REG        0x0
22 #define SDRAM_CONFIG_ECC_MASK         BIT(18)
23 #define SDRAM_CONFIG_REGISTERED_MASK  BIT(17)
24 #define SDRAM_CONFIG_BUS_WIDTH_MASK   BIT(15)
25 
26 #define SDRAM_ADDR_CTRL_REG     0x10
27 #define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs)
28 #define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs)   (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs))
29 #define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs)    BIT(16+cs)
30 #define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs)  (cs*4+2)
31 #define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs)    (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs))
32 #define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs)    (cs*4)
33 #define SDRAM_ADDR_CTRL_STRUCT_MASK(cs)      (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs))
34 
35 #define SDRAM_ERR_DATA_H_REG    0x40
36 #define SDRAM_ERR_DATA_L_REG    0x44
37 
38 #define SDRAM_ERR_RECV_ECC_REG  0x48
39 #define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff
40 
41 #define SDRAM_ERR_CALC_ECC_REG  0x4c
42 #define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8
43 #define SDRAM_ERR_CALC_ECC_ROW_MASK   (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET)
44 #define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff
45 
46 #define SDRAM_ERR_ADDR_REG      0x50
47 #define SDRAM_ERR_ADDR_BANK_OFFSET    23
48 #define SDRAM_ERR_ADDR_BANK_MASK      (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET)
49 #define SDRAM_ERR_ADDR_COL_OFFSET     8
50 #define SDRAM_ERR_ADDR_COL_MASK       (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET)
51 #define SDRAM_ERR_ADDR_CS_OFFSET      1
52 #define SDRAM_ERR_ADDR_CS_MASK        (0x3 << SDRAM_ERR_ADDR_CS_OFFSET)
53 #define SDRAM_ERR_ADDR_TYPE_MASK      BIT(0)
54 
55 #define SDRAM_ERR_CTRL_REG      0x54
56 #define SDRAM_ERR_CTRL_THR_OFFSET     16
57 #define SDRAM_ERR_CTRL_THR_MASK       (0xff << SDRAM_ERR_CTRL_THR_OFFSET)
58 #define SDRAM_ERR_CTRL_PROP_MASK      BIT(9)
59 
60 #define SDRAM_ERR_SBE_COUNT_REG 0x58
61 #define SDRAM_ERR_DBE_COUNT_REG 0x5c
62 
63 #define SDRAM_ERR_CAUSE_ERR_REG 0xd0
64 #define SDRAM_ERR_CAUSE_MSG_REG 0xd8
65 #define SDRAM_ERR_CAUSE_DBE_MASK      BIT(1)
66 #define SDRAM_ERR_CAUSE_SBE_MASK      BIT(0)
67 
68 #define SDRAM_RANK_CTRL_REG 0x1e0
69 #define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs)
70 
71 struct axp_mc_drvdata {
72 	void __iomem *base;
73 	/* width in bytes */
74 	unsigned int width;
75 	/* bank interleaving */
76 	bool cs_addr_sel[SDRAM_NUM_CS];
77 
78 	char msg[128];
79 };
80 
81 /* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
82 static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
83 				    uint8_t cs, uint8_t bank, uint16_t row,
84 				    uint16_t col)
85 {
86 	if (drvdata->width == 8) {
87 		/* 64 bit */
88 		if (drvdata->cs_addr_sel[cs])
89 			/* bank interleaved */
90 			return (((row & 0xfff8) << 16) |
91 				((bank & 0x7) << 16) |
92 				((row & 0x7) << 13) |
93 				((col & 0x3ff) << 3));
94 		else
95 			return (((row & 0xffff << 16) |
96 				 ((bank & 0x7) << 13) |
97 				 ((col & 0x3ff)) << 3));
98 	} else if (drvdata->width == 4) {
99 		/* 32 bit */
100 		if (drvdata->cs_addr_sel[cs])
101 			/* bank interleaved */
102 			return (((row & 0xfff0) << 15) |
103 				((bank & 0x7) << 16) |
104 				((row & 0xf) << 12) |
105 				((col & 0x3ff) << 2));
106 		else
107 			return (((row & 0xffff << 15) |
108 				 ((bank & 0x7) << 12) |
109 				 ((col & 0x3ff)) << 2));
110 	} else {
111 		/* 16 bit */
112 		if (drvdata->cs_addr_sel[cs])
113 			/* bank interleaved */
114 			return (((row & 0xffe0) << 14) |
115 				((bank & 0x7) << 16) |
116 				((row & 0x1f) << 11) |
117 				((col & 0x3ff) << 1));
118 		else
119 			return (((row & 0xffff << 14) |
120 				 ((bank & 0x7) << 11) |
121 				 ((col & 0x3ff)) << 1));
122 	}
123 }
124 
125 static void axp_mc_check(struct mem_ctl_info *mci)
126 {
127 	struct axp_mc_drvdata *drvdata = mci->pvt_info;
128 	uint32_t data_h, data_l, recv_ecc, calc_ecc, addr;
129 	uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg;
130 	uint32_t row_val, col_val, bank_val, addr_val;
131 	uint8_t syndrome_val, cs_val;
132 	char *msg = drvdata->msg;
133 
134 	data_h    = readl(drvdata->base + SDRAM_ERR_DATA_H_REG);
135 	data_l    = readl(drvdata->base + SDRAM_ERR_DATA_L_REG);
136 	recv_ecc  = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG);
137 	calc_ecc  = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG);
138 	addr      = readl(drvdata->base + SDRAM_ERR_ADDR_REG);
139 	cnt_sbe   = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
140 	cnt_dbe   = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
141 	cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
142 	cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
143 
144 	/* clear cause registers */
145 	writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
146 	       drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
147 	writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
148 	       drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
149 
150 	/* clear error counter registers */
151 	if (cnt_sbe)
152 		writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
153 	if (cnt_dbe)
154 		writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
155 
156 	if (!cnt_sbe && !cnt_dbe)
157 		return;
158 
159 	if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
160 		if (cnt_sbe)
161 			cnt_sbe--;
162 		else
163 			dev_warn(mci->pdev, "inconsistent SBE count detected\n");
164 	} else {
165 		if (cnt_dbe)
166 			cnt_dbe--;
167 		else
168 			dev_warn(mci->pdev, "inconsistent DBE count detected\n");
169 	}
170 
171 	/* report earlier errors */
172 	if (cnt_sbe)
173 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
174 				     cnt_sbe, /* error count */
175 				     0, 0, 0, /* pfn, offset, syndrome */
176 				     -1, -1, -1, /* top, mid, low layer */
177 				     mci->ctl_name,
178 				     "details unavailable (multiple errors)");
179 	if (cnt_dbe)
180 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
181 				     cnt_dbe, /* error count */
182 				     0, 0, 0, /* pfn, offset, syndrome */
183 				     -1, -1, -1, /* top, mid, low layer */
184 				     mci->ctl_name,
185 				     "details unavailable (multiple errors)");
186 
187 	/* report details for most recent error */
188 	cs_val   = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET;
189 	bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET;
190 	row_val  = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET;
191 	col_val  = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET;
192 	syndrome_val = (recv_ecc ^ calc_ecc) & 0xff;
193 	addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val,
194 				       col_val);
195 	msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */
196 	msg += sprintf(msg, "bank=0x%x ", bank_val); /*  9 chars */
197 	msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */
198 	msg += sprintf(msg, "cs=%d", cs_val);	     /*  4 chars */
199 
200 	if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
201 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
202 				     1,	/* error count */
203 				     addr_val >> PAGE_SHIFT,
204 				     addr_val & ~PAGE_MASK,
205 				     syndrome_val,
206 				     cs_val, -1, -1, /* top, mid, low layer */
207 				     mci->ctl_name, drvdata->msg);
208 	} else {
209 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
210 				     1,	/* error count */
211 				     addr_val >> PAGE_SHIFT,
212 				     addr_val & ~PAGE_MASK,
213 				     syndrome_val,
214 				     cs_val, -1, -1, /* top, mid, low layer */
215 				     mci->ctl_name, drvdata->msg);
216 	}
217 }
218 
219 static void axp_mc_read_config(struct mem_ctl_info *mci)
220 {
221 	struct axp_mc_drvdata *drvdata = mci->pvt_info;
222 	uint32_t config, addr_ctrl, rank_ctrl;
223 	unsigned int i, cs_struct, cs_size;
224 	struct dimm_info *dimm;
225 
226 	config = readl(drvdata->base + SDRAM_CONFIG_REG);
227 	if (config & SDRAM_CONFIG_BUS_WIDTH_MASK)
228 		/* 64 bit */
229 		drvdata->width = 8;
230 	else
231 		/* 32 bit */
232 		drvdata->width = 4;
233 
234 	addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG);
235 	rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG);
236 	for (i = 0; i < SDRAM_NUM_CS; i++) {
237 		dimm = mci->dimms[i];
238 
239 		if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i)))
240 			continue;
241 
242 		drvdata->cs_addr_sel[i] =
243 			!!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i));
244 
245 		cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i);
246 		cs_size   = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) |
247 			    ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i)));
248 
249 		switch (cs_size) {
250 		case 0: /* 2GBit */
251 			dimm->nr_pages = 524288;
252 			break;
253 		case 1: /* 256MBit */
254 			dimm->nr_pages = 65536;
255 			break;
256 		case 2: /* 512MBit */
257 			dimm->nr_pages = 131072;
258 			break;
259 		case 3: /* 1GBit */
260 			dimm->nr_pages = 262144;
261 			break;
262 		case 4: /* 4GBit */
263 			dimm->nr_pages = 1048576;
264 			break;
265 		case 5: /* 8GBit */
266 			dimm->nr_pages = 2097152;
267 			break;
268 		}
269 		dimm->grain = 8;
270 		dimm->dtype = cs_struct ? DEV_X16 : DEV_X8;
271 		dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ?
272 			MEM_RDDR3 : MEM_DDR3;
273 		dimm->edac_mode = EDAC_SECDED;
274 	}
275 }
276 
277 static const struct of_device_id axp_mc_of_match[] = {
278 	{.compatible = "marvell,armada-xp-sdram-controller",},
279 	{},
280 };
281 MODULE_DEVICE_TABLE(of, axp_mc_of_match);
282 
283 static int axp_mc_probe(struct platform_device *pdev)
284 {
285 	struct axp_mc_drvdata *drvdata;
286 	struct edac_mc_layer layers[1];
287 	const struct of_device_id *id;
288 	struct mem_ctl_info *mci;
289 	void __iomem *base;
290 	uint32_t config;
291 
292 	base = devm_platform_ioremap_resource(pdev, 0);
293 	if (IS_ERR(base)) {
294 		dev_err(&pdev->dev, "Unable to map regs\n");
295 		return PTR_ERR(base);
296 	}
297 
298 	config = readl(base + SDRAM_CONFIG_REG);
299 	if (!(config & SDRAM_CONFIG_ECC_MASK)) {
300 		dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
301 		return -EINVAL;
302 	}
303 
304 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
305 	layers[0].size = SDRAM_NUM_CS;
306 	layers[0].is_virt_csrow = true;
307 
308 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
309 	if (!mci)
310 		return -ENOMEM;
311 
312 	drvdata = mci->pvt_info;
313 	drvdata->base = base;
314 	mci->pdev = &pdev->dev;
315 	platform_set_drvdata(pdev, mci);
316 
317 	id = of_match_device(axp_mc_of_match, &pdev->dev);
318 	mci->edac_check = axp_mc_check;
319 	mci->mtype_cap = MEM_FLAG_DDR3;
320 	mci->edac_cap = EDAC_FLAG_SECDED;
321 	mci->mod_name = pdev->dev.driver->name;
322 	mci->ctl_name = id ? id->compatible : "unknown";
323 	mci->dev_name = dev_name(&pdev->dev);
324 	mci->scrub_mode = SCRUB_NONE;
325 
326 	axp_mc_read_config(mci);
327 
328 	/* These SoCs have a reduced width bus */
329 	if (of_machine_is_compatible("marvell,armada380") ||
330 	    of_machine_is_compatible("marvell,armadaxp-98dx3236"))
331 		drvdata->width /= 2;
332 
333 	/* configure SBE threshold */
334 	/* it seems that SBEs are not captured otherwise */
335 	writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG);
336 
337 	/* clear cause registers */
338 	writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
339 	writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
340 
341 	/* clear counter registers */
342 	writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
343 	writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
344 
345 	if (edac_mc_add_mc(mci)) {
346 		edac_mc_free(mci);
347 		return -EINVAL;
348 	}
349 	edac_op_state = EDAC_OPSTATE_POLL;
350 
351 	return 0;
352 }
353 
354 static int axp_mc_remove(struct platform_device *pdev)
355 {
356 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
357 
358 	edac_mc_del_mc(&pdev->dev);
359 	edac_mc_free(mci);
360 	platform_set_drvdata(pdev, NULL);
361 
362 	return 0;
363 }
364 
365 static struct platform_driver axp_mc_driver = {
366 	.probe = axp_mc_probe,
367 	.remove = axp_mc_remove,
368 	.driver = {
369 		.name = "armada_xp_mc_edac",
370 		.of_match_table = of_match_ptr(axp_mc_of_match),
371 	},
372 };
373 
374 /************************ EDAC Device (L2 Cache) ***************************/
375 
376 struct aurora_l2_drvdata {
377 	void __iomem *base;
378 
379 	char msg[128];
380 
381 	/* error injection via debugfs */
382 	uint32_t inject_addr;
383 	uint32_t inject_mask;
384 	uint8_t inject_ctl;
385 
386 	struct dentry *debugfs;
387 };
388 
389 #ifdef CONFIG_EDAC_DEBUG
390 static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata)
391 {
392 	drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK;
393 	drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK;
394 	writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
395 	writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG);
396 	writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
397 }
398 #endif
399 
400 static void aurora_l2_check(struct edac_device_ctl_info *dci)
401 {
402 	struct aurora_l2_drvdata *drvdata = dci->pvt_info;
403 	uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap;
404 	unsigned int cnt_ce, cnt_ue;
405 	char *msg = drvdata->msg;
406 	size_t size = sizeof(drvdata->msg);
407 	size_t len = 0;
408 
409 	cnt = readl(drvdata->base + AURORA_ERR_CNT_REG);
410 	attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG);
411 	addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG);
412 	way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG);
413 
414 	cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET;
415 	cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET;
416 	/* clear error counter registers */
417 	if (cnt_ce || cnt_ue)
418 		writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
419 
420 	if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID))
421 		goto clear_remaining;
422 
423 	src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
424 	if (src <= 3)
425 		len += scnprintf(msg+len, size-len, "src=CPU%d ", src);
426 	else
427 		len += scnprintf(msg+len, size-len, "src=IO ");
428 
429 	txn =  (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
430 	switch (txn) {
431 	case 0:
432 		len += scnprintf(msg+len, size-len, "txn=Data-Read ");
433 		break;
434 	case 1:
435 		len += scnprintf(msg+len, size-len, "txn=Isn-Read ");
436 		break;
437 	case 2:
438 		len += scnprintf(msg+len, size-len, "txn=Clean-Flush ");
439 		break;
440 	case 3:
441 		len += scnprintf(msg+len, size-len, "txn=Eviction ");
442 		break;
443 	case 4:
444 		len += scnprintf(msg+len, size-len,
445 				"txn=Read-Modify-Write ");
446 		break;
447 	}
448 
449 	err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
450 	switch (err) {
451 	case 0:
452 		len += scnprintf(msg+len, size-len, "err=CorrECC ");
453 		break;
454 	case 1:
455 		len += scnprintf(msg+len, size-len, "err=UnCorrECC ");
456 		break;
457 	case 2:
458 		len += scnprintf(msg+len, size-len, "err=TagParity ");
459 		break;
460 	}
461 
462 	len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
463 	len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
464 	len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
465 
466 	/* clear error capture registers */
467 	writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
468 	if (err) {
469 		/* UnCorrECC or TagParity */
470 		if (cnt_ue)
471 			cnt_ue--;
472 		edac_device_handle_ue(dci, 0, 0, drvdata->msg);
473 	} else {
474 		if (cnt_ce)
475 			cnt_ce--;
476 		edac_device_handle_ce(dci, 0, 0, drvdata->msg);
477 	}
478 
479 clear_remaining:
480 	/* report remaining errors */
481 	while (cnt_ue--)
482 		edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
483 	while (cnt_ce--)
484 		edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
485 }
486 
487 static void aurora_l2_poll(struct edac_device_ctl_info *dci)
488 {
489 #ifdef CONFIG_EDAC_DEBUG
490 	struct aurora_l2_drvdata *drvdata = dci->pvt_info;
491 #endif
492 
493 	aurora_l2_check(dci);
494 #ifdef CONFIG_EDAC_DEBUG
495 	aurora_l2_inject(drvdata);
496 #endif
497 }
498 
499 static const struct of_device_id aurora_l2_of_match[] = {
500 	{.compatible = "marvell,aurora-system-cache",},
501 	{},
502 };
503 MODULE_DEVICE_TABLE(of, aurora_l2_of_match);
504 
505 static int aurora_l2_probe(struct platform_device *pdev)
506 {
507 	struct aurora_l2_drvdata *drvdata;
508 	struct edac_device_ctl_info *dci;
509 	const struct of_device_id *id;
510 	uint32_t l2x0_aux_ctrl;
511 	void __iomem *base;
512 
513 	base = devm_platform_ioremap_resource(pdev, 0);
514 	if (IS_ERR(base)) {
515 		dev_err(&pdev->dev, "Unable to map regs\n");
516 		return PTR_ERR(base);
517 	}
518 
519 	l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
520 	if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
521 		dev_warn(&pdev->dev, "tag parity is not enabled\n");
522 	if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
523 		dev_warn(&pdev->dev, "data ECC is not enabled\n");
524 
525 	dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
526 					 "cpu", 1, "L", 1, 2, NULL, 0, 0);
527 	if (!dci)
528 		return -ENOMEM;
529 
530 	drvdata = dci->pvt_info;
531 	drvdata->base = base;
532 	dci->dev = &pdev->dev;
533 	platform_set_drvdata(pdev, dci);
534 
535 	id = of_match_device(aurora_l2_of_match, &pdev->dev);
536 	dci->edac_check = aurora_l2_poll;
537 	dci->mod_name = pdev->dev.driver->name;
538 	dci->ctl_name = id ? id->compatible : "unknown";
539 	dci->dev_name = dev_name(&pdev->dev);
540 
541 	/* clear registers */
542 	writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
543 	writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
544 
545 	if (edac_device_add_device(dci)) {
546 		edac_device_free_ctl_info(dci);
547 		return -EINVAL;
548 	}
549 
550 #ifdef CONFIG_EDAC_DEBUG
551 	drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev));
552 	if (drvdata->debugfs) {
553 		edac_debugfs_create_x32("inject_addr", 0644,
554 					drvdata->debugfs,
555 					&drvdata->inject_addr);
556 		edac_debugfs_create_x32("inject_mask", 0644,
557 					drvdata->debugfs,
558 					&drvdata->inject_mask);
559 		edac_debugfs_create_x8("inject_ctl", 0644,
560 				       drvdata->debugfs, &drvdata->inject_ctl);
561 	}
562 #endif
563 
564 	return 0;
565 }
566 
567 static int aurora_l2_remove(struct platform_device *pdev)
568 {
569 	struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
570 #ifdef CONFIG_EDAC_DEBUG
571 	struct aurora_l2_drvdata *drvdata = dci->pvt_info;
572 
573 	edac_debugfs_remove_recursive(drvdata->debugfs);
574 #endif
575 	edac_device_del_device(&pdev->dev);
576 	edac_device_free_ctl_info(dci);
577 	platform_set_drvdata(pdev, NULL);
578 
579 	return 0;
580 }
581 
582 static struct platform_driver aurora_l2_driver = {
583 	.probe = aurora_l2_probe,
584 	.remove = aurora_l2_remove,
585 	.driver = {
586 		.name = "aurora_l2_edac",
587 		.of_match_table = of_match_ptr(aurora_l2_of_match),
588 	},
589 };
590 
591 /************************ Driver registration ******************************/
592 
593 static struct platform_driver * const drivers[] = {
594 	&axp_mc_driver,
595 	&aurora_l2_driver,
596 };
597 
598 static int __init armada_xp_edac_init(void)
599 {
600 	int res;
601 
602 	if (ghes_get_devices())
603 		return -EBUSY;
604 
605 	/* only polling is supported */
606 	edac_op_state = EDAC_OPSTATE_POLL;
607 
608 	res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
609 	if (res)
610 		pr_warn("Armada XP EDAC drivers fail to register\n");
611 
612 	return 0;
613 }
614 module_init(armada_xp_edac_init);
615 
616 static void __exit armada_xp_edac_exit(void)
617 {
618 	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
619 }
620 module_exit(armada_xp_edac_exit);
621 
622 MODULE_LICENSE("GPL v2");
623 MODULE_AUTHOR("Pengutronix");
624 MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");
625