xref: /openbmc/linux/drivers/edac/fsl_ddr_edac.c (revision 239480ab)
1 /*
2  * Freescale Memory Controller kernel module
3  *
4  * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
5  * ARM-based Layerscape SoCs including LS2xxx. Originally split
6  * out from mpc85xx_edac EDAC driver.
7  *
8  * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
9  *
10  * Author: Dave Jiang <djiang@mvista.com>
11  *
12  * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
13  * the terms of the GNU General Public License version 2. This program
14  * is licensed "as is" without any warranty of any kind, whether express
15  * or implied.
16  */
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/ctype.h>
21 #include <linux/io.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/edac.h>
24 #include <linux/smp.h>
25 #include <linux/gfp.h>
26 
27 #include <linux/of_platform.h>
28 #include <linux/of_device.h>
29 #include <linux/of_address.h>
30 #include "edac_module.h"
31 #include "fsl_ddr_edac.h"
32 
33 #define EDAC_MOD_STR	"fsl_ddr_edac"
34 
35 static int edac_mc_idx;
36 
37 static u32 orig_ddr_err_disable;
38 static u32 orig_ddr_err_sbe;
39 static bool little_endian;
40 
41 static inline u32 ddr_in32(void __iomem *addr)
42 {
43 	return little_endian ? ioread32(addr) : ioread32be(addr);
44 }
45 
46 static inline void ddr_out32(void __iomem *addr, u32 value)
47 {
48 	if (little_endian)
49 		iowrite32(value, addr);
50 	else
51 		iowrite32be(value, addr);
52 }
53 
54 /************************ MC SYSFS parts ***********************************/
55 
56 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
57 
58 static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
59 					  struct device_attribute *mattr,
60 					  char *data)
61 {
62 	struct mem_ctl_info *mci = to_mci(dev);
63 	struct fsl_mc_pdata *pdata = mci->pvt_info;
64 	return sprintf(data, "0x%08x",
65 		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
66 }
67 
68 static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
69 					  struct device_attribute *mattr,
70 					      char *data)
71 {
72 	struct mem_ctl_info *mci = to_mci(dev);
73 	struct fsl_mc_pdata *pdata = mci->pvt_info;
74 	return sprintf(data, "0x%08x",
75 		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
76 }
77 
78 static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
79 				       struct device_attribute *mattr,
80 					   char *data)
81 {
82 	struct mem_ctl_info *mci = to_mci(dev);
83 	struct fsl_mc_pdata *pdata = mci->pvt_info;
84 	return sprintf(data, "0x%08x",
85 		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
86 }
87 
88 static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
89 					   struct device_attribute *mattr,
90 					       const char *data, size_t count)
91 {
92 	struct mem_ctl_info *mci = to_mci(dev);
93 	struct fsl_mc_pdata *pdata = mci->pvt_info;
94 	unsigned long val;
95 	int rc;
96 
97 	if (isdigit(*data)) {
98 		rc = kstrtoul(data, 0, &val);
99 		if (rc)
100 			return rc;
101 
102 		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
103 		return count;
104 	}
105 	return 0;
106 }
107 
108 static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
109 					   struct device_attribute *mattr,
110 					       const char *data, size_t count)
111 {
112 	struct mem_ctl_info *mci = to_mci(dev);
113 	struct fsl_mc_pdata *pdata = mci->pvt_info;
114 	unsigned long val;
115 	int rc;
116 
117 	if (isdigit(*data)) {
118 		rc = kstrtoul(data, 0, &val);
119 		if (rc)
120 			return rc;
121 
122 		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
123 		return count;
124 	}
125 	return 0;
126 }
127 
128 static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
129 					struct device_attribute *mattr,
130 					       const char *data, size_t count)
131 {
132 	struct mem_ctl_info *mci = to_mci(dev);
133 	struct fsl_mc_pdata *pdata = mci->pvt_info;
134 	unsigned long val;
135 	int rc;
136 
137 	if (isdigit(*data)) {
138 		rc = kstrtoul(data, 0, &val);
139 		if (rc)
140 			return rc;
141 
142 		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
143 		return count;
144 	}
145 	return 0;
146 }
147 
148 static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
149 		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
150 static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
151 		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
152 static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
153 		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
154 
155 static struct attribute *fsl_ddr_dev_attrs[] = {
156 	&dev_attr_inject_data_hi.attr,
157 	&dev_attr_inject_data_lo.attr,
158 	&dev_attr_inject_ctrl.attr,
159 	NULL
160 };
161 
162 ATTRIBUTE_GROUPS(fsl_ddr_dev);
163 
164 /**************************** MC Err device ***************************/
165 
166 /*
167  * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
168  * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
169  * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
170  * below correspond to Freescale's manuals.
171  */
172 static unsigned int ecc_table[16] = {
173 	/* MSB           LSB */
174 	/* [0:31]    [32:63] */
175 	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
176 	0x00ff00ff, 0x00fff0ff,
177 	0x0f0f0f0f, 0x0f0fff00,
178 	0x11113333, 0x7777000f,
179 	0x22224444, 0x8888222f,
180 	0x44448888, 0xffff4441,
181 	0x8888ffff, 0x11118882,
182 	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
183 };
184 
185 /*
186  * Calculate the correct ECC value for a 64-bit value specified by high:low
187  */
188 static u8 calculate_ecc(u32 high, u32 low)
189 {
190 	u32 mask_low;
191 	u32 mask_high;
192 	int bit_cnt;
193 	u8 ecc = 0;
194 	int i;
195 	int j;
196 
197 	for (i = 0; i < 8; i++) {
198 		mask_high = ecc_table[i * 2];
199 		mask_low = ecc_table[i * 2 + 1];
200 		bit_cnt = 0;
201 
202 		for (j = 0; j < 32; j++) {
203 			if ((mask_high >> j) & 1)
204 				bit_cnt ^= (high >> j) & 1;
205 			if ((mask_low >> j) & 1)
206 				bit_cnt ^= (low >> j) & 1;
207 		}
208 
209 		ecc |= bit_cnt << i;
210 	}
211 
212 	return ecc;
213 }
214 
215 /*
216  * Create the syndrome code which is generated if the data line specified by
217  * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
218  * User's Manual and 9-61 in the MPC8572 User's Manual.
219  */
220 static u8 syndrome_from_bit(unsigned int bit) {
221 	int i;
222 	u8 syndrome = 0;
223 
224 	/*
225 	 * Cycle through the upper or lower 32-bit portion of each value in
226 	 * ecc_table depending on if 'bit' is in the upper or lower half of
227 	 * 64-bit data.
228 	 */
229 	for (i = bit < 32; i < 16; i += 2)
230 		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
231 
232 	return syndrome;
233 }
234 
235 /*
236  * Decode data and ecc syndrome to determine what went wrong
237  * Note: This can only decode single-bit errors
238  */
239 static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
240 		       int *bad_data_bit, int *bad_ecc_bit)
241 {
242 	int i;
243 	u8 syndrome;
244 
245 	*bad_data_bit = -1;
246 	*bad_ecc_bit = -1;
247 
248 	/*
249 	 * Calculate the ECC of the captured data and XOR it with the captured
250 	 * ECC to find an ECC syndrome value we can search for
251 	 */
252 	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
253 
254 	/* Check if a data line is stuck... */
255 	for (i = 0; i < 64; i++) {
256 		if (syndrome == syndrome_from_bit(i)) {
257 			*bad_data_bit = i;
258 			return;
259 		}
260 	}
261 
262 	/* If data is correct, check ECC bits for errors... */
263 	for (i = 0; i < 8; i++) {
264 		if ((syndrome >> i) & 0x1) {
265 			*bad_ecc_bit = i;
266 			return;
267 		}
268 	}
269 }
270 
271 #define make64(high, low) (((u64)(high) << 32) | (low))
272 
273 static void fsl_mc_check(struct mem_ctl_info *mci)
274 {
275 	struct fsl_mc_pdata *pdata = mci->pvt_info;
276 	struct csrow_info *csrow;
277 	u32 bus_width;
278 	u32 err_detect;
279 	u32 syndrome;
280 	u64 err_addr;
281 	u32 pfn;
282 	int row_index;
283 	u32 cap_high;
284 	u32 cap_low;
285 	int bad_data_bit;
286 	int bad_ecc_bit;
287 
288 	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
289 	if (!err_detect)
290 		return;
291 
292 	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
293 		      err_detect);
294 
295 	/* no more processing if not ECC bit errors */
296 	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
297 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
298 		return;
299 	}
300 
301 	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
302 
303 	/* Mask off appropriate bits of syndrome based on bus width */
304 	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
305 		     DSC_DBW_MASK) ? 32 : 64;
306 	if (bus_width == 64)
307 		syndrome &= 0xff;
308 	else
309 		syndrome &= 0xffff;
310 
311 	err_addr = make64(
312 		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
313 		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
314 	pfn = err_addr >> PAGE_SHIFT;
315 
316 	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
317 		csrow = mci->csrows[row_index];
318 		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
319 			break;
320 	}
321 
322 	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
323 	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
324 
325 	/*
326 	 * Analyze single-bit errors on 64-bit wide buses
327 	 * TODO: Add support for 32-bit wide buses
328 	 */
329 	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
330 		sbe_ecc_decode(cap_high, cap_low, syndrome,
331 				&bad_data_bit, &bad_ecc_bit);
332 
333 		if (bad_data_bit != -1)
334 			fsl_mc_printk(mci, KERN_ERR,
335 				"Faulty Data bit: %d\n", bad_data_bit);
336 		if (bad_ecc_bit != -1)
337 			fsl_mc_printk(mci, KERN_ERR,
338 				"Faulty ECC bit: %d\n", bad_ecc_bit);
339 
340 		fsl_mc_printk(mci, KERN_ERR,
341 			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
342 			cap_high ^ (1 << (bad_data_bit - 32)),
343 			cap_low ^ (1 << bad_data_bit),
344 			syndrome ^ (1 << bad_ecc_bit));
345 	}
346 
347 	fsl_mc_printk(mci, KERN_ERR,
348 			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
349 			cap_high, cap_low, syndrome);
350 	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
351 	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
352 
353 	/* we are out of range */
354 	if (row_index == mci->nr_csrows)
355 		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
356 
357 	if (err_detect & DDR_EDE_SBE)
358 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
359 				     pfn, err_addr & ~PAGE_MASK, syndrome,
360 				     row_index, 0, -1,
361 				     mci->ctl_name, "");
362 
363 	if (err_detect & DDR_EDE_MBE)
364 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
365 				     pfn, err_addr & ~PAGE_MASK, syndrome,
366 				     row_index, 0, -1,
367 				     mci->ctl_name, "");
368 
369 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
370 }
371 
372 static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
373 {
374 	struct mem_ctl_info *mci = dev_id;
375 	struct fsl_mc_pdata *pdata = mci->pvt_info;
376 	u32 err_detect;
377 
378 	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
379 	if (!err_detect)
380 		return IRQ_NONE;
381 
382 	fsl_mc_check(mci);
383 
384 	return IRQ_HANDLED;
385 }
386 
387 static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
388 {
389 	struct fsl_mc_pdata *pdata = mci->pvt_info;
390 	struct csrow_info *csrow;
391 	struct dimm_info *dimm;
392 	u32 sdram_ctl;
393 	u32 sdtype;
394 	enum mem_type mtype;
395 	u32 cs_bnds;
396 	int index;
397 
398 	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
399 
400 	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
401 	if (sdram_ctl & DSC_RD_EN) {
402 		switch (sdtype) {
403 		case 0x02000000:
404 			mtype = MEM_RDDR;
405 			break;
406 		case 0x03000000:
407 			mtype = MEM_RDDR2;
408 			break;
409 		case 0x07000000:
410 			mtype = MEM_RDDR3;
411 			break;
412 		case 0x05000000:
413 			mtype = MEM_RDDR4;
414 			break;
415 		default:
416 			mtype = MEM_UNKNOWN;
417 			break;
418 		}
419 	} else {
420 		switch (sdtype) {
421 		case 0x02000000:
422 			mtype = MEM_DDR;
423 			break;
424 		case 0x03000000:
425 			mtype = MEM_DDR2;
426 			break;
427 		case 0x07000000:
428 			mtype = MEM_DDR3;
429 			break;
430 		case 0x05000000:
431 			mtype = MEM_DDR4;
432 			break;
433 		default:
434 			mtype = MEM_UNKNOWN;
435 			break;
436 		}
437 	}
438 
439 	for (index = 0; index < mci->nr_csrows; index++) {
440 		u32 start;
441 		u32 end;
442 
443 		csrow = mci->csrows[index];
444 		dimm = csrow->channels[0]->dimm;
445 
446 		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
447 				   (index * FSL_MC_CS_BNDS_OFS));
448 
449 		start = (cs_bnds & 0xffff0000) >> 16;
450 		end   = (cs_bnds & 0x0000ffff);
451 
452 		if (start == end)
453 			continue;	/* not populated */
454 
455 		start <<= (24 - PAGE_SHIFT);
456 		end   <<= (24 - PAGE_SHIFT);
457 		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
458 
459 		csrow->first_page = start;
460 		csrow->last_page = end;
461 
462 		dimm->nr_pages = end + 1 - start;
463 		dimm->grain = 8;
464 		dimm->mtype = mtype;
465 		dimm->dtype = DEV_UNKNOWN;
466 		if (sdram_ctl & DSC_X32_EN)
467 			dimm->dtype = DEV_X32;
468 		dimm->edac_mode = EDAC_SECDED;
469 	}
470 }
471 
472 int fsl_mc_err_probe(struct platform_device *op)
473 {
474 	struct mem_ctl_info *mci;
475 	struct edac_mc_layer layers[2];
476 	struct fsl_mc_pdata *pdata;
477 	struct resource r;
478 	u32 sdram_ctl;
479 	int res;
480 
481 	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
482 		return -ENOMEM;
483 
484 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
485 	layers[0].size = 4;
486 	layers[0].is_virt_csrow = true;
487 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
488 	layers[1].size = 1;
489 	layers[1].is_virt_csrow = false;
490 	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
491 			    sizeof(*pdata));
492 	if (!mci) {
493 		devres_release_group(&op->dev, fsl_mc_err_probe);
494 		return -ENOMEM;
495 	}
496 
497 	pdata = mci->pvt_info;
498 	pdata->name = "fsl_mc_err";
499 	mci->pdev = &op->dev;
500 	pdata->edac_idx = edac_mc_idx++;
501 	dev_set_drvdata(mci->pdev, mci);
502 	mci->ctl_name = pdata->name;
503 	mci->dev_name = pdata->name;
504 
505 	/*
506 	 * Get the endianness of DDR controller registers.
507 	 * Default is big endian.
508 	 */
509 	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
510 
511 	res = of_address_to_resource(op->dev.of_node, 0, &r);
512 	if (res) {
513 		pr_err("%s: Unable to get resource for MC err regs\n",
514 		       __func__);
515 		goto err;
516 	}
517 
518 	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
519 				     pdata->name)) {
520 		pr_err("%s: Error while requesting mem region\n",
521 		       __func__);
522 		res = -EBUSY;
523 		goto err;
524 	}
525 
526 	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
527 	if (!pdata->mc_vbase) {
528 		pr_err("%s: Unable to setup MC err regs\n", __func__);
529 		res = -ENOMEM;
530 		goto err;
531 	}
532 
533 	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
534 	if (!(sdram_ctl & DSC_ECC_EN)) {
535 		/* no ECC */
536 		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
537 		res = -ENODEV;
538 		goto err;
539 	}
540 
541 	edac_dbg(3, "init mci\n");
542 	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
543 			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
544 			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
545 			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
546 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
547 	mci->edac_cap = EDAC_FLAG_SECDED;
548 	mci->mod_name = EDAC_MOD_STR;
549 
550 	if (edac_op_state == EDAC_OPSTATE_POLL)
551 		mci->edac_check = fsl_mc_check;
552 
553 	mci->ctl_page_to_phys = NULL;
554 
555 	mci->scrub_mode = SCRUB_SW_SRC;
556 
557 	fsl_ddr_init_csrows(mci);
558 
559 	/* store the original error disable bits */
560 	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
561 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
562 
563 	/* clear all error bits */
564 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
565 
566 	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
567 	if (res) {
568 		edac_dbg(3, "failed edac_mc_add_mc()\n");
569 		goto err;
570 	}
571 
572 	if (edac_op_state == EDAC_OPSTATE_INT) {
573 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
574 			  DDR_EIE_MBEE | DDR_EIE_SBEE);
575 
576 		/* store the original error management threshold */
577 		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
578 					    FSL_MC_ERR_SBE) & 0xff0000;
579 
580 		/* set threshold to 1 error per interrupt */
581 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
582 
583 		/* register interrupts */
584 		pdata->irq = platform_get_irq(op, 0);
585 		res = devm_request_irq(&op->dev, pdata->irq,
586 				       fsl_mc_isr,
587 				       IRQF_SHARED,
588 				       "[EDAC] MC err", mci);
589 		if (res < 0) {
590 			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
591 			       __func__, pdata->irq);
592 			res = -ENODEV;
593 			goto err2;
594 		}
595 
596 		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
597 		       pdata->irq);
598 	}
599 
600 	devres_remove_group(&op->dev, fsl_mc_err_probe);
601 	edac_dbg(3, "success\n");
602 	pr_info(EDAC_MOD_STR " MC err registered\n");
603 
604 	return 0;
605 
606 err2:
607 	edac_mc_del_mc(&op->dev);
608 err:
609 	devres_release_group(&op->dev, fsl_mc_err_probe);
610 	edac_mc_free(mci);
611 	return res;
612 }
613 
614 int fsl_mc_err_remove(struct platform_device *op)
615 {
616 	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
617 	struct fsl_mc_pdata *pdata = mci->pvt_info;
618 
619 	edac_dbg(0, "\n");
620 
621 	if (edac_op_state == EDAC_OPSTATE_INT) {
622 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
623 	}
624 
625 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
626 		  orig_ddr_err_disable);
627 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
628 
629 	edac_mc_del_mc(&op->dev);
630 	edac_mc_free(mci);
631 	return 0;
632 }
633