xref: /openbmc/linux/drivers/edac/dmc520_edac.c (revision 405db98b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * EDAC driver for DMC-520 memory controller.
5  *
6  * The driver supports 10 interrupt lines,
7  * though only dram_ecc_errc and dram_ecc_errd are currently handled.
8  *
9  * Authors:	Rui Zhao <ruizhao@microsoft.com>
10  *		Lei Wang <lewan@microsoft.com>
11  *		Shiping Ji <shji@microsoft.com>
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/edac.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include "edac_mc.h"
24 
25 /* DMC-520 registers */
26 #define REG_OFFSET_FEATURE_CONFIG			0x130
27 #define REG_OFFSET_ECC_ERRC_COUNT_31_00		0x158
28 #define REG_OFFSET_ECC_ERRC_COUNT_63_32		0x15C
29 #define REG_OFFSET_ECC_ERRD_COUNT_31_00		0x160
30 #define REG_OFFSET_ECC_ERRD_COUNT_63_32		0x164
31 #define REG_OFFSET_INTERRUPT_CONTROL			0x500
32 #define REG_OFFSET_INTERRUPT_CLR			0x508
33 #define REG_OFFSET_INTERRUPT_STATUS			0x510
34 #define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00	0x528
35 #define REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32	0x52C
36 #define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00	0x530
37 #define REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32	0x534
38 #define REG_OFFSET_ADDRESS_CONTROL_NOW			0x1010
39 #define REG_OFFSET_MEMORY_TYPE_NOW			0x1128
40 #define REG_OFFSET_SCRUB_CONTROL0_NOW			0x1170
41 #define REG_OFFSET_FORMAT_CONTROL			0x18
42 
43 /* DMC-520 types, masks and bitfields */
44 #define RAM_ECC_INT_CE_BIT			BIT(0)
45 #define RAM_ECC_INT_UE_BIT			BIT(1)
46 #define DRAM_ECC_INT_CE_BIT			BIT(2)
47 #define DRAM_ECC_INT_UE_BIT			BIT(3)
48 #define FAILED_ACCESS_INT_BIT			BIT(4)
49 #define FAILED_PROG_INT_BIT			BIT(5)
50 #define LINK_ERR_INT_BIT			BIT(6)
51 #define TEMPERATURE_EVENT_INT_BIT		BIT(7)
52 #define ARCH_FSM_INT_BIT			BIT(8)
53 #define PHY_REQUEST_INT_BIT			BIT(9)
54 #define MEMORY_WIDTH_MASK			GENMASK(1, 0)
55 #define SCRUB_TRIGGER0_NEXT_MASK		GENMASK(1, 0)
56 #define REG_FIELD_DRAM_ECC_ENABLED		GENMASK(1, 0)
57 #define REG_FIELD_MEMORY_TYPE			GENMASK(2, 0)
58 #define REG_FIELD_DEVICE_WIDTH			GENMASK(9, 8)
59 #define REG_FIELD_ADDRESS_CONTROL_COL		GENMASK(2,  0)
60 #define REG_FIELD_ADDRESS_CONTROL_ROW		GENMASK(10, 8)
61 #define REG_FIELD_ADDRESS_CONTROL_BANK		GENMASK(18, 16)
62 #define REG_FIELD_ADDRESS_CONTROL_RANK		GENMASK(25, 24)
63 #define REG_FIELD_ERR_INFO_LOW_VALID		BIT(0)
64 #define REG_FIELD_ERR_INFO_LOW_COL		GENMASK(10, 1)
65 #define REG_FIELD_ERR_INFO_LOW_ROW		GENMASK(28, 11)
66 #define REG_FIELD_ERR_INFO_LOW_RANK		GENMASK(31, 29)
67 #define REG_FIELD_ERR_INFO_HIGH_BANK		GENMASK(3, 0)
68 #define REG_FIELD_ERR_INFO_HIGH_VALID		BIT(31)
69 
70 #define DRAM_ADDRESS_CONTROL_MIN_COL_BITS	8
71 #define DRAM_ADDRESS_CONTROL_MIN_ROW_BITS	11
72 
73 #define DMC520_SCRUB_TRIGGER_ERR_DETECT	2
74 #define DMC520_SCRUB_TRIGGER_IDLE		3
75 
76 /* Driver settings */
77 /*
78  * The max-length message would be: "rank:7 bank:15 row:262143 col:1023".
79  * Max length is 34. Using a 40-size buffer is enough.
80  */
81 #define DMC520_MSG_BUF_SIZE			40
82 #define EDAC_MOD_NAME				"dmc520-edac"
83 #define EDAC_CTL_NAME				"dmc520"
84 
85 /* the data bus width for the attached memory chips. */
86 enum dmc520_mem_width {
87 	MEM_WIDTH_X32 = 2,
88 	MEM_WIDTH_X64 = 3
89 };
90 
91 /* memory type */
92 enum dmc520_mem_type {
93 	MEM_TYPE_DDR3 = 1,
94 	MEM_TYPE_DDR4 = 2
95 };
96 
97 /* memory device width */
98 enum dmc520_dev_width {
99 	DEV_WIDTH_X4 = 0,
100 	DEV_WIDTH_X8 = 1,
101 	DEV_WIDTH_X16 = 2
102 };
103 
104 struct ecc_error_info {
105 	u32 col;
106 	u32 row;
107 	u32 bank;
108 	u32 rank;
109 };
110 
111 /* The interrupt config */
112 struct dmc520_irq_config {
113 	char *name;
114 	int mask;
115 };
116 
117 /* The interrupt mappings */
118 static struct dmc520_irq_config dmc520_irq_configs[] = {
119 	{
120 		.name = "ram_ecc_errc",
121 		.mask = RAM_ECC_INT_CE_BIT
122 	},
123 	{
124 		.name = "ram_ecc_errd",
125 		.mask = RAM_ECC_INT_UE_BIT
126 	},
127 	{
128 		.name = "dram_ecc_errc",
129 		.mask = DRAM_ECC_INT_CE_BIT
130 	},
131 	{
132 		.name = "dram_ecc_errd",
133 		.mask = DRAM_ECC_INT_UE_BIT
134 	},
135 	{
136 		.name = "failed_access",
137 		.mask = FAILED_ACCESS_INT_BIT
138 	},
139 	{
140 		.name = "failed_prog",
141 		.mask = FAILED_PROG_INT_BIT
142 	},
143 	{
144 		.name = "link_err",
145 		.mask = LINK_ERR_INT_BIT
146 	},
147 	{
148 		.name = "temperature_event",
149 		.mask = TEMPERATURE_EVENT_INT_BIT
150 	},
151 	{
152 		.name = "arch_fsm",
153 		.mask = ARCH_FSM_INT_BIT
154 	},
155 	{
156 		.name = "phy_request",
157 		.mask = PHY_REQUEST_INT_BIT
158 	}
159 };
160 
161 #define NUMBER_OF_IRQS				ARRAY_SIZE(dmc520_irq_configs)
162 
163 /*
164  * The EDAC driver private data.
165  * error_lock is to protect concurrent writes to the mci->error_desc through
166  * edac_mc_handle_error().
167  */
168 struct dmc520_edac {
169 	void __iomem *reg_base;
170 	spinlock_t error_lock;
171 	u32 mem_width_in_bytes;
172 	int irqs[NUMBER_OF_IRQS];
173 	int masks[NUMBER_OF_IRQS];
174 };
175 
176 static int dmc520_mc_idx;
177 
178 static u32 dmc520_read_reg(struct dmc520_edac *pvt, u32 offset)
179 {
180 	return readl(pvt->reg_base + offset);
181 }
182 
183 static void dmc520_write_reg(struct dmc520_edac *pvt, u32 val, u32 offset)
184 {
185 	writel(val, pvt->reg_base + offset);
186 }
187 
188 static u32 dmc520_calc_dram_ecc_error(u32 value)
189 {
190 	u32 total = 0;
191 
192 	/* Each rank's error counter takes one byte. */
193 	while (value > 0) {
194 		total += (value & 0xFF);
195 		value >>= 8;
196 	}
197 	return total;
198 }
199 
200 static u32 dmc520_get_dram_ecc_error_count(struct dmc520_edac *pvt,
201 					    bool is_ce)
202 {
203 	u32 reg_offset_low, reg_offset_high;
204 	u32 err_low, err_high;
205 	u32 err_count;
206 
207 	reg_offset_low = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_31_00 :
208 				 REG_OFFSET_ECC_ERRD_COUNT_31_00;
209 	reg_offset_high = is_ce ? REG_OFFSET_ECC_ERRC_COUNT_63_32 :
210 				  REG_OFFSET_ECC_ERRD_COUNT_63_32;
211 
212 	err_low = dmc520_read_reg(pvt, reg_offset_low);
213 	err_high = dmc520_read_reg(pvt, reg_offset_high);
214 	/* Reset error counters */
215 	dmc520_write_reg(pvt, 0, reg_offset_low);
216 	dmc520_write_reg(pvt, 0, reg_offset_high);
217 
218 	err_count = dmc520_calc_dram_ecc_error(err_low) +
219 		   dmc520_calc_dram_ecc_error(err_high);
220 
221 	return err_count;
222 }
223 
224 static void dmc520_get_dram_ecc_error_info(struct dmc520_edac *pvt,
225 					    bool is_ce,
226 					    struct ecc_error_info *info)
227 {
228 	u32 reg_offset_low, reg_offset_high;
229 	u32 reg_val_low, reg_val_high;
230 	bool valid;
231 
232 	reg_offset_low = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_31_00 :
233 				 REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_31_00;
234 	reg_offset_high = is_ce ? REG_OFFSET_DRAM_ECC_ERRC_INT_INFO_63_32 :
235 				  REG_OFFSET_DRAM_ECC_ERRD_INT_INFO_63_32;
236 
237 	reg_val_low = dmc520_read_reg(pvt, reg_offset_low);
238 	reg_val_high = dmc520_read_reg(pvt, reg_offset_high);
239 
240 	valid = (FIELD_GET(REG_FIELD_ERR_INFO_LOW_VALID, reg_val_low) != 0) &&
241 		(FIELD_GET(REG_FIELD_ERR_INFO_HIGH_VALID, reg_val_high) != 0);
242 
243 	if (valid) {
244 		info->col = FIELD_GET(REG_FIELD_ERR_INFO_LOW_COL, reg_val_low);
245 		info->row = FIELD_GET(REG_FIELD_ERR_INFO_LOW_ROW, reg_val_low);
246 		info->rank = FIELD_GET(REG_FIELD_ERR_INFO_LOW_RANK, reg_val_low);
247 		info->bank = FIELD_GET(REG_FIELD_ERR_INFO_HIGH_BANK, reg_val_high);
248 	} else {
249 		memset(info, 0, sizeof(*info));
250 	}
251 }
252 
253 static bool dmc520_is_ecc_enabled(void __iomem *reg_base)
254 {
255 	u32 reg_val = readl(reg_base + REG_OFFSET_FEATURE_CONFIG);
256 
257 	return FIELD_GET(REG_FIELD_DRAM_ECC_ENABLED, reg_val);
258 }
259 
260 static enum scrub_type dmc520_get_scrub_type(struct dmc520_edac *pvt)
261 {
262 	enum scrub_type type = SCRUB_NONE;
263 	u32 reg_val, scrub_cfg;
264 
265 	reg_val = dmc520_read_reg(pvt, REG_OFFSET_SCRUB_CONTROL0_NOW);
266 	scrub_cfg = FIELD_GET(SCRUB_TRIGGER0_NEXT_MASK, reg_val);
267 
268 	if (scrub_cfg == DMC520_SCRUB_TRIGGER_ERR_DETECT ||
269 	    scrub_cfg == DMC520_SCRUB_TRIGGER_IDLE)
270 		type = SCRUB_HW_PROG;
271 
272 	return type;
273 }
274 
275 /* Get the memory data bus width, in number of bytes. */
276 static u32 dmc520_get_memory_width(struct dmc520_edac *pvt)
277 {
278 	enum dmc520_mem_width mem_width_field;
279 	u32 mem_width_in_bytes = 0;
280 	u32 reg_val;
281 
282 	reg_val = dmc520_read_reg(pvt, REG_OFFSET_FORMAT_CONTROL);
283 	mem_width_field = FIELD_GET(MEMORY_WIDTH_MASK, reg_val);
284 
285 	if (mem_width_field == MEM_WIDTH_X32)
286 		mem_width_in_bytes = 4;
287 	else if (mem_width_field == MEM_WIDTH_X64)
288 		mem_width_in_bytes = 8;
289 	return mem_width_in_bytes;
290 }
291 
292 static enum mem_type dmc520_get_mtype(struct dmc520_edac *pvt)
293 {
294 	enum mem_type mt = MEM_UNKNOWN;
295 	enum dmc520_mem_type type;
296 	u32 reg_val;
297 
298 	reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW);
299 	type = FIELD_GET(REG_FIELD_MEMORY_TYPE, reg_val);
300 
301 	switch (type) {
302 	case MEM_TYPE_DDR3:
303 		mt = MEM_DDR3;
304 		break;
305 
306 	case MEM_TYPE_DDR4:
307 		mt = MEM_DDR4;
308 		break;
309 	}
310 
311 	return mt;
312 }
313 
314 static enum dev_type dmc520_get_dtype(struct dmc520_edac *pvt)
315 {
316 	enum dmc520_dev_width device_width;
317 	enum dev_type dt = DEV_UNKNOWN;
318 	u32 reg_val;
319 
320 	reg_val = dmc520_read_reg(pvt, REG_OFFSET_MEMORY_TYPE_NOW);
321 	device_width = FIELD_GET(REG_FIELD_DEVICE_WIDTH, reg_val);
322 
323 	switch (device_width) {
324 	case DEV_WIDTH_X4:
325 		dt = DEV_X4;
326 		break;
327 
328 	case DEV_WIDTH_X8:
329 		dt = DEV_X8;
330 		break;
331 
332 	case DEV_WIDTH_X16:
333 		dt = DEV_X16;
334 		break;
335 	}
336 
337 	return dt;
338 }
339 
340 static u32 dmc520_get_rank_count(void __iomem *reg_base)
341 {
342 	u32 reg_val, rank_bits;
343 
344 	reg_val = readl(reg_base + REG_OFFSET_ADDRESS_CONTROL_NOW);
345 	rank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_RANK, reg_val);
346 
347 	return BIT(rank_bits);
348 }
349 
350 static u64 dmc520_get_rank_size(struct dmc520_edac *pvt)
351 {
352 	u32 reg_val, col_bits, row_bits, bank_bits;
353 
354 	reg_val = dmc520_read_reg(pvt, REG_OFFSET_ADDRESS_CONTROL_NOW);
355 
356 	col_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_COL, reg_val) +
357 		   DRAM_ADDRESS_CONTROL_MIN_COL_BITS;
358 	row_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_ROW, reg_val) +
359 		   DRAM_ADDRESS_CONTROL_MIN_ROW_BITS;
360 	bank_bits = FIELD_GET(REG_FIELD_ADDRESS_CONTROL_BANK, reg_val);
361 
362 	return (u64)pvt->mem_width_in_bytes << (col_bits + row_bits + bank_bits);
363 }
364 
365 static void dmc520_handle_dram_ecc_errors(struct mem_ctl_info *mci,
366 					   bool is_ce)
367 {
368 	struct dmc520_edac *pvt = mci->pvt_info;
369 	char message[DMC520_MSG_BUF_SIZE];
370 	struct ecc_error_info info;
371 	u32 cnt;
372 
373 	dmc520_get_dram_ecc_error_info(pvt, is_ce, &info);
374 
375 	cnt = dmc520_get_dram_ecc_error_count(pvt, is_ce);
376 	if (!cnt)
377 		return;
378 
379 	snprintf(message, ARRAY_SIZE(message),
380 		 "rank:%d bank:%d row:%d col:%d",
381 		 info.rank, info.bank,
382 		 info.row, info.col);
383 
384 	spin_lock(&pvt->error_lock);
385 	edac_mc_handle_error((is_ce ? HW_EVENT_ERR_CORRECTED :
386 			     HW_EVENT_ERR_UNCORRECTED),
387 			     mci, cnt, 0, 0, 0, info.rank, -1, -1,
388 			     message, "");
389 	spin_unlock(&pvt->error_lock);
390 }
391 
392 static irqreturn_t dmc520_edac_dram_ecc_isr(int irq, struct mem_ctl_info *mci,
393 					     bool is_ce)
394 {
395 	struct dmc520_edac *pvt = mci->pvt_info;
396 	u32 i_mask;
397 
398 	i_mask = is_ce ? DRAM_ECC_INT_CE_BIT : DRAM_ECC_INT_UE_BIT;
399 
400 	dmc520_handle_dram_ecc_errors(mci, is_ce);
401 
402 	dmc520_write_reg(pvt, i_mask, REG_OFFSET_INTERRUPT_CLR);
403 
404 	return IRQ_HANDLED;
405 }
406 
407 static irqreturn_t dmc520_edac_dram_all_isr(int irq, struct mem_ctl_info *mci,
408 					     u32 irq_mask)
409 {
410 	struct dmc520_edac *pvt = mci->pvt_info;
411 	irqreturn_t irq_ret = IRQ_NONE;
412 	u32 status;
413 
414 	status = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_STATUS);
415 
416 	if ((irq_mask & DRAM_ECC_INT_CE_BIT) &&
417 		(status & DRAM_ECC_INT_CE_BIT))
418 		irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, true);
419 
420 	if ((irq_mask & DRAM_ECC_INT_UE_BIT) &&
421 		(status & DRAM_ECC_INT_UE_BIT))
422 		irq_ret = dmc520_edac_dram_ecc_isr(irq, mci, false);
423 
424 	return irq_ret;
425 }
426 
427 static irqreturn_t dmc520_isr(int irq, void *data)
428 {
429 	struct mem_ctl_info *mci = data;
430 	struct dmc520_edac *pvt = mci->pvt_info;
431 	u32 mask = 0;
432 	int idx;
433 
434 	for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
435 		if (pvt->irqs[idx] == irq) {
436 			mask = pvt->masks[idx];
437 			break;
438 		}
439 	}
440 	return dmc520_edac_dram_all_isr(irq, mci, mask);
441 }
442 
443 static void dmc520_init_csrow(struct mem_ctl_info *mci)
444 {
445 	struct dmc520_edac *pvt = mci->pvt_info;
446 	struct csrow_info *csi;
447 	struct dimm_info *dimm;
448 	u32 pages_per_rank;
449 	enum dev_type dt;
450 	enum mem_type mt;
451 	int row, ch;
452 	u64 rs;
453 
454 	dt = dmc520_get_dtype(pvt);
455 	mt = dmc520_get_mtype(pvt);
456 	rs = dmc520_get_rank_size(pvt);
457 	pages_per_rank = rs >> PAGE_SHIFT;
458 
459 	for (row = 0; row < mci->nr_csrows; row++) {
460 		csi = mci->csrows[row];
461 
462 		for (ch = 0; ch < csi->nr_channels; ch++) {
463 			dimm		= csi->channels[ch]->dimm;
464 			dimm->grain	= pvt->mem_width_in_bytes;
465 			dimm->dtype	= dt;
466 			dimm->mtype	= mt;
467 			dimm->edac_mode	= EDAC_SECDED;
468 			dimm->nr_pages	= pages_per_rank / csi->nr_channels;
469 		}
470 	}
471 }
472 
473 static int dmc520_edac_probe(struct platform_device *pdev)
474 {
475 	bool registered[NUMBER_OF_IRQS] = { false };
476 	int irqs[NUMBER_OF_IRQS] = { -ENXIO };
477 	int masks[NUMBER_OF_IRQS] = { 0 };
478 	struct edac_mc_layer layers[1];
479 	struct dmc520_edac *pvt = NULL;
480 	struct mem_ctl_info *mci;
481 	void __iomem *reg_base;
482 	u32 irq_mask_all = 0;
483 	struct resource *res;
484 	struct device *dev;
485 	int ret, idx, irq;
486 	u32 reg_val;
487 
488 	/* Parse the device node */
489 	dev = &pdev->dev;
490 
491 	for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
492 		irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name);
493 		irqs[idx] = irq;
494 		masks[idx] = dmc520_irq_configs[idx].mask;
495 		if (irq >= 0) {
496 			irq_mask_all |= dmc520_irq_configs[idx].mask;
497 			edac_dbg(0, "Discovered %s, irq: %d.\n", dmc520_irq_configs[idx].name, irq);
498 		}
499 	}
500 
501 	if (!irq_mask_all) {
502 		edac_printk(KERN_ERR, EDAC_MOD_NAME,
503 			    "At least one valid interrupt line is expected.\n");
504 		return -EINVAL;
505 	}
506 
507 	/* Initialize dmc520 edac */
508 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
509 	reg_base = devm_ioremap_resource(dev, res);
510 	if (IS_ERR(reg_base))
511 		return PTR_ERR(reg_base);
512 
513 	if (!dmc520_is_ecc_enabled(reg_base))
514 		return -ENXIO;
515 
516 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
517 	layers[0].size = dmc520_get_rank_count(reg_base);
518 	layers[0].is_virt_csrow = true;
519 
520 	mci = edac_mc_alloc(dmc520_mc_idx++, ARRAY_SIZE(layers), layers, sizeof(*pvt));
521 	if (!mci) {
522 		edac_printk(KERN_ERR, EDAC_MOD_NAME,
523 			    "Failed to allocate memory for mc instance\n");
524 		ret = -ENOMEM;
525 		goto err;
526 	}
527 
528 	pvt = mci->pvt_info;
529 
530 	pvt->reg_base = reg_base;
531 	spin_lock_init(&pvt->error_lock);
532 	memcpy(pvt->irqs, irqs, sizeof(irqs));
533 	memcpy(pvt->masks, masks, sizeof(masks));
534 
535 	platform_set_drvdata(pdev, mci);
536 
537 	mci->pdev = dev;
538 	mci->mtype_cap		= MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
539 	mci->edac_ctl_cap	= EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
540 	mci->edac_cap		= EDAC_FLAG_SECDED;
541 	mci->scrub_cap		= SCRUB_FLAG_HW_SRC;
542 	mci->scrub_mode		= dmc520_get_scrub_type(pvt);
543 	mci->ctl_name		= EDAC_CTL_NAME;
544 	mci->dev_name		= dev_name(mci->pdev);
545 	mci->mod_name		= EDAC_MOD_NAME;
546 
547 	edac_op_state = EDAC_OPSTATE_INT;
548 
549 	pvt->mem_width_in_bytes = dmc520_get_memory_width(pvt);
550 
551 	dmc520_init_csrow(mci);
552 
553 	/* Clear interrupts, not affecting other unrelated interrupts */
554 	reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL);
555 	dmc520_write_reg(pvt, reg_val & (~irq_mask_all),
556 			 REG_OFFSET_INTERRUPT_CONTROL);
557 	dmc520_write_reg(pvt, irq_mask_all, REG_OFFSET_INTERRUPT_CLR);
558 
559 	for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
560 		irq = irqs[idx];
561 		if (irq >= 0) {
562 			ret = devm_request_irq(&pdev->dev, irq,
563 					       dmc520_isr, IRQF_SHARED,
564 					       dev_name(&pdev->dev), mci);
565 			if (ret < 0) {
566 				edac_printk(KERN_ERR, EDAC_MC,
567 					    "Failed to request irq %d\n", irq);
568 				goto err;
569 			}
570 			registered[idx] = true;
571 		}
572 	}
573 
574 	/* Reset DRAM CE/UE counters */
575 	if (irq_mask_all & DRAM_ECC_INT_CE_BIT)
576 		dmc520_get_dram_ecc_error_count(pvt, true);
577 
578 	if (irq_mask_all & DRAM_ECC_INT_UE_BIT)
579 		dmc520_get_dram_ecc_error_count(pvt, false);
580 
581 	ret = edac_mc_add_mc(mci);
582 	if (ret) {
583 		edac_printk(KERN_ERR, EDAC_MOD_NAME,
584 			    "Failed to register with EDAC core\n");
585 		goto err;
586 	}
587 
588 	/* Enable interrupts, not affecting other unrelated interrupts */
589 	dmc520_write_reg(pvt, reg_val | irq_mask_all,
590 			 REG_OFFSET_INTERRUPT_CONTROL);
591 
592 	return 0;
593 
594 err:
595 	for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
596 		if (registered[idx])
597 			devm_free_irq(&pdev->dev, pvt->irqs[idx], mci);
598 	}
599 	if (mci)
600 		edac_mc_free(mci);
601 
602 	return ret;
603 }
604 
605 static int dmc520_edac_remove(struct platform_device *pdev)
606 {
607 	u32 reg_val, idx, irq_mask_all = 0;
608 	struct mem_ctl_info *mci;
609 	struct dmc520_edac *pvt;
610 
611 	mci = platform_get_drvdata(pdev);
612 	pvt = mci->pvt_info;
613 
614 	/* Disable interrupts */
615 	reg_val = dmc520_read_reg(pvt, REG_OFFSET_INTERRUPT_CONTROL);
616 	dmc520_write_reg(pvt, reg_val & (~irq_mask_all),
617 			 REG_OFFSET_INTERRUPT_CONTROL);
618 
619 	/* free irq's */
620 	for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
621 		if (pvt->irqs[idx] >= 0) {
622 			irq_mask_all |= pvt->masks[idx];
623 			devm_free_irq(&pdev->dev, pvt->irqs[idx], mci);
624 		}
625 	}
626 
627 	edac_mc_del_mc(&pdev->dev);
628 	edac_mc_free(mci);
629 
630 	return 0;
631 }
632 
633 static const struct of_device_id dmc520_edac_driver_id[] = {
634 	{ .compatible = "arm,dmc-520", },
635 	{ /* end of table */ }
636 };
637 
638 MODULE_DEVICE_TABLE(of, dmc520_edac_driver_id);
639 
640 static struct platform_driver dmc520_edac_driver = {
641 	.driver = {
642 		.name = "dmc520",
643 		.of_match_table = dmc520_edac_driver_id,
644 	},
645 
646 	.probe = dmc520_edac_probe,
647 	.remove = dmc520_edac_remove
648 };
649 
650 module_platform_driver(dmc520_edac_driver);
651 
652 MODULE_AUTHOR("Rui Zhao <ruizhao@microsoft.com>");
653 MODULE_AUTHOR("Lei Wang <lewan@microsoft.com>");
654 MODULE_AUTHOR("Shiping Ji <shji@microsoft.com>");
655 MODULE_DESCRIPTION("DMC-520 ECC driver");
656 MODULE_LICENSE("GPL v2");
657