xref: /openbmc/linux/drivers/edac/fsl_ddr_edac.c (revision 151f4e2b)
1 /*
2  * Freescale Memory Controller kernel module
3  *
4  * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
5  * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
6  * split out from mpc85xx_edac EDAC driver.
7  *
8  * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
9  *
10  * Author: Dave Jiang <djiang@mvista.com>
11  *
12  * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
13  * the terms of the GNU General Public License version 2. This program
14  * is licensed "as is" without any warranty of any kind, whether express
15  * or implied.
16  */
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/ctype.h>
21 #include <linux/io.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/edac.h>
24 #include <linux/smp.h>
25 #include <linux/gfp.h>
26 
27 #include <linux/of_platform.h>
28 #include <linux/of_device.h>
29 #include <linux/of_address.h>
30 #include "edac_module.h"
31 #include "fsl_ddr_edac.h"
32 
33 #define EDAC_MOD_STR	"fsl_ddr_edac"
34 
35 static int edac_mc_idx;
36 
37 static u32 orig_ddr_err_disable;
38 static u32 orig_ddr_err_sbe;
39 static bool little_endian;
40 
41 static inline u32 ddr_in32(void __iomem *addr)
42 {
43 	return little_endian ? ioread32(addr) : ioread32be(addr);
44 }
45 
46 static inline void ddr_out32(void __iomem *addr, u32 value)
47 {
48 	if (little_endian)
49 		iowrite32(value, addr);
50 	else
51 		iowrite32be(value, addr);
52 }
53 
54 #ifdef CONFIG_EDAC_DEBUG
55 /************************ MC SYSFS parts ***********************************/
56 
57 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
58 
59 static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
60 					  struct device_attribute *mattr,
61 					  char *data)
62 {
63 	struct mem_ctl_info *mci = to_mci(dev);
64 	struct fsl_mc_pdata *pdata = mci->pvt_info;
65 	return sprintf(data, "0x%08x",
66 		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
67 }
68 
69 static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
70 					  struct device_attribute *mattr,
71 					      char *data)
72 {
73 	struct mem_ctl_info *mci = to_mci(dev);
74 	struct fsl_mc_pdata *pdata = mci->pvt_info;
75 	return sprintf(data, "0x%08x",
76 		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
77 }
78 
79 static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
80 				       struct device_attribute *mattr,
81 					   char *data)
82 {
83 	struct mem_ctl_info *mci = to_mci(dev);
84 	struct fsl_mc_pdata *pdata = mci->pvt_info;
85 	return sprintf(data, "0x%08x",
86 		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
87 }
88 
89 static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
90 					   struct device_attribute *mattr,
91 					       const char *data, size_t count)
92 {
93 	struct mem_ctl_info *mci = to_mci(dev);
94 	struct fsl_mc_pdata *pdata = mci->pvt_info;
95 	unsigned long val;
96 	int rc;
97 
98 	if (isdigit(*data)) {
99 		rc = kstrtoul(data, 0, &val);
100 		if (rc)
101 			return rc;
102 
103 		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
104 		return count;
105 	}
106 	return 0;
107 }
108 
109 static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
110 					   struct device_attribute *mattr,
111 					       const char *data, size_t count)
112 {
113 	struct mem_ctl_info *mci = to_mci(dev);
114 	struct fsl_mc_pdata *pdata = mci->pvt_info;
115 	unsigned long val;
116 	int rc;
117 
118 	if (isdigit(*data)) {
119 		rc = kstrtoul(data, 0, &val);
120 		if (rc)
121 			return rc;
122 
123 		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
124 		return count;
125 	}
126 	return 0;
127 }
128 
129 static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
130 					struct device_attribute *mattr,
131 					       const char *data, size_t count)
132 {
133 	struct mem_ctl_info *mci = to_mci(dev);
134 	struct fsl_mc_pdata *pdata = mci->pvt_info;
135 	unsigned long val;
136 	int rc;
137 
138 	if (isdigit(*data)) {
139 		rc = kstrtoul(data, 0, &val);
140 		if (rc)
141 			return rc;
142 
143 		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
144 		return count;
145 	}
146 	return 0;
147 }
148 
149 static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
150 		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
151 static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
152 		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
153 static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
154 		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
155 #endif /* CONFIG_EDAC_DEBUG */
156 
157 static struct attribute *fsl_ddr_dev_attrs[] = {
158 #ifdef CONFIG_EDAC_DEBUG
159 	&dev_attr_inject_data_hi.attr,
160 	&dev_attr_inject_data_lo.attr,
161 	&dev_attr_inject_ctrl.attr,
162 #endif
163 	NULL
164 };
165 
166 ATTRIBUTE_GROUPS(fsl_ddr_dev);
167 
168 /**************************** MC Err device ***************************/
169 
170 /*
171  * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
172  * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
173  * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
174  * below correspond to Freescale's manuals.
175  */
176 static unsigned int ecc_table[16] = {
177 	/* MSB           LSB */
178 	/* [0:31]    [32:63] */
179 	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
180 	0x00ff00ff, 0x00fff0ff,
181 	0x0f0f0f0f, 0x0f0fff00,
182 	0x11113333, 0x7777000f,
183 	0x22224444, 0x8888222f,
184 	0x44448888, 0xffff4441,
185 	0x8888ffff, 0x11118882,
186 	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
187 };
188 
189 /*
190  * Calculate the correct ECC value for a 64-bit value specified by high:low
191  */
192 static u8 calculate_ecc(u32 high, u32 low)
193 {
194 	u32 mask_low;
195 	u32 mask_high;
196 	int bit_cnt;
197 	u8 ecc = 0;
198 	int i;
199 	int j;
200 
201 	for (i = 0; i < 8; i++) {
202 		mask_high = ecc_table[i * 2];
203 		mask_low = ecc_table[i * 2 + 1];
204 		bit_cnt = 0;
205 
206 		for (j = 0; j < 32; j++) {
207 			if ((mask_high >> j) & 1)
208 				bit_cnt ^= (high >> j) & 1;
209 			if ((mask_low >> j) & 1)
210 				bit_cnt ^= (low >> j) & 1;
211 		}
212 
213 		ecc |= bit_cnt << i;
214 	}
215 
216 	return ecc;
217 }
218 
219 /*
220  * Create the syndrome code which is generated if the data line specified by
221  * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
222  * User's Manual and 9-61 in the MPC8572 User's Manual.
223  */
224 static u8 syndrome_from_bit(unsigned int bit) {
225 	int i;
226 	u8 syndrome = 0;
227 
228 	/*
229 	 * Cycle through the upper or lower 32-bit portion of each value in
230 	 * ecc_table depending on if 'bit' is in the upper or lower half of
231 	 * 64-bit data.
232 	 */
233 	for (i = bit < 32; i < 16; i += 2)
234 		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
235 
236 	return syndrome;
237 }
238 
239 /*
240  * Decode data and ecc syndrome to determine what went wrong
241  * Note: This can only decode single-bit errors
242  */
243 static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
244 		       int *bad_data_bit, int *bad_ecc_bit)
245 {
246 	int i;
247 	u8 syndrome;
248 
249 	*bad_data_bit = -1;
250 	*bad_ecc_bit = -1;
251 
252 	/*
253 	 * Calculate the ECC of the captured data and XOR it with the captured
254 	 * ECC to find an ECC syndrome value we can search for
255 	 */
256 	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
257 
258 	/* Check if a data line is stuck... */
259 	for (i = 0; i < 64; i++) {
260 		if (syndrome == syndrome_from_bit(i)) {
261 			*bad_data_bit = i;
262 			return;
263 		}
264 	}
265 
266 	/* If data is correct, check ECC bits for errors... */
267 	for (i = 0; i < 8; i++) {
268 		if ((syndrome >> i) & 0x1) {
269 			*bad_ecc_bit = i;
270 			return;
271 		}
272 	}
273 }
274 
275 #define make64(high, low) (((u64)(high) << 32) | (low))
276 
277 static void fsl_mc_check(struct mem_ctl_info *mci)
278 {
279 	struct fsl_mc_pdata *pdata = mci->pvt_info;
280 	struct csrow_info *csrow;
281 	u32 bus_width;
282 	u32 err_detect;
283 	u32 syndrome;
284 	u64 err_addr;
285 	u32 pfn;
286 	int row_index;
287 	u32 cap_high;
288 	u32 cap_low;
289 	int bad_data_bit;
290 	int bad_ecc_bit;
291 
292 	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
293 	if (!err_detect)
294 		return;
295 
296 	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
297 		      err_detect);
298 
299 	/* no more processing if not ECC bit errors */
300 	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
301 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
302 		return;
303 	}
304 
305 	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
306 
307 	/* Mask off appropriate bits of syndrome based on bus width */
308 	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
309 		     DSC_DBW_MASK) ? 32 : 64;
310 	if (bus_width == 64)
311 		syndrome &= 0xff;
312 	else
313 		syndrome &= 0xffff;
314 
315 	err_addr = make64(
316 		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
317 		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
318 	pfn = err_addr >> PAGE_SHIFT;
319 
320 	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
321 		csrow = mci->csrows[row_index];
322 		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
323 			break;
324 	}
325 
326 	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
327 	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
328 
329 	/*
330 	 * Analyze single-bit errors on 64-bit wide buses
331 	 * TODO: Add support for 32-bit wide buses
332 	 */
333 	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
334 		sbe_ecc_decode(cap_high, cap_low, syndrome,
335 				&bad_data_bit, &bad_ecc_bit);
336 
337 		if (bad_data_bit != -1)
338 			fsl_mc_printk(mci, KERN_ERR,
339 				"Faulty Data bit: %d\n", bad_data_bit);
340 		if (bad_ecc_bit != -1)
341 			fsl_mc_printk(mci, KERN_ERR,
342 				"Faulty ECC bit: %d\n", bad_ecc_bit);
343 
344 		fsl_mc_printk(mci, KERN_ERR,
345 			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
346 			cap_high ^ (1 << (bad_data_bit - 32)),
347 			cap_low ^ (1 << bad_data_bit),
348 			syndrome ^ (1 << bad_ecc_bit));
349 	}
350 
351 	fsl_mc_printk(mci, KERN_ERR,
352 			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
353 			cap_high, cap_low, syndrome);
354 	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
355 	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
356 
357 	/* we are out of range */
358 	if (row_index == mci->nr_csrows)
359 		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
360 
361 	if (err_detect & DDR_EDE_SBE)
362 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
363 				     pfn, err_addr & ~PAGE_MASK, syndrome,
364 				     row_index, 0, -1,
365 				     mci->ctl_name, "");
366 
367 	if (err_detect & DDR_EDE_MBE)
368 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
369 				     pfn, err_addr & ~PAGE_MASK, syndrome,
370 				     row_index, 0, -1,
371 				     mci->ctl_name, "");
372 
373 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
374 }
375 
376 static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
377 {
378 	struct mem_ctl_info *mci = dev_id;
379 	struct fsl_mc_pdata *pdata = mci->pvt_info;
380 	u32 err_detect;
381 
382 	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
383 	if (!err_detect)
384 		return IRQ_NONE;
385 
386 	fsl_mc_check(mci);
387 
388 	return IRQ_HANDLED;
389 }
390 
391 static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
392 {
393 	struct fsl_mc_pdata *pdata = mci->pvt_info;
394 	struct csrow_info *csrow;
395 	struct dimm_info *dimm;
396 	u32 sdram_ctl;
397 	u32 sdtype;
398 	enum mem_type mtype;
399 	u32 cs_bnds;
400 	int index;
401 
402 	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
403 
404 	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
405 	if (sdram_ctl & DSC_RD_EN) {
406 		switch (sdtype) {
407 		case 0x02000000:
408 			mtype = MEM_RDDR;
409 			break;
410 		case 0x03000000:
411 			mtype = MEM_RDDR2;
412 			break;
413 		case 0x07000000:
414 			mtype = MEM_RDDR3;
415 			break;
416 		case 0x05000000:
417 			mtype = MEM_RDDR4;
418 			break;
419 		default:
420 			mtype = MEM_UNKNOWN;
421 			break;
422 		}
423 	} else {
424 		switch (sdtype) {
425 		case 0x02000000:
426 			mtype = MEM_DDR;
427 			break;
428 		case 0x03000000:
429 			mtype = MEM_DDR2;
430 			break;
431 		case 0x07000000:
432 			mtype = MEM_DDR3;
433 			break;
434 		case 0x05000000:
435 			mtype = MEM_DDR4;
436 			break;
437 		default:
438 			mtype = MEM_UNKNOWN;
439 			break;
440 		}
441 	}
442 
443 	for (index = 0; index < mci->nr_csrows; index++) {
444 		u32 start;
445 		u32 end;
446 
447 		csrow = mci->csrows[index];
448 		dimm = csrow->channels[0]->dimm;
449 
450 		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
451 				   (index * FSL_MC_CS_BNDS_OFS));
452 
453 		start = (cs_bnds & 0xffff0000) >> 16;
454 		end   = (cs_bnds & 0x0000ffff);
455 
456 		if (start == end)
457 			continue;	/* not populated */
458 
459 		start <<= (24 - PAGE_SHIFT);
460 		end   <<= (24 - PAGE_SHIFT);
461 		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
462 
463 		csrow->first_page = start;
464 		csrow->last_page = end;
465 
466 		dimm->nr_pages = end + 1 - start;
467 		dimm->grain = 8;
468 		dimm->mtype = mtype;
469 		dimm->dtype = DEV_UNKNOWN;
470 		if (sdram_ctl & DSC_X32_EN)
471 			dimm->dtype = DEV_X32;
472 		dimm->edac_mode = EDAC_SECDED;
473 	}
474 }
475 
476 int fsl_mc_err_probe(struct platform_device *op)
477 {
478 	struct mem_ctl_info *mci;
479 	struct edac_mc_layer layers[2];
480 	struct fsl_mc_pdata *pdata;
481 	struct resource r;
482 	u32 sdram_ctl;
483 	int res;
484 
485 	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
486 		return -ENOMEM;
487 
488 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
489 	layers[0].size = 4;
490 	layers[0].is_virt_csrow = true;
491 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
492 	layers[1].size = 1;
493 	layers[1].is_virt_csrow = false;
494 	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
495 			    sizeof(*pdata));
496 	if (!mci) {
497 		devres_release_group(&op->dev, fsl_mc_err_probe);
498 		return -ENOMEM;
499 	}
500 
501 	pdata = mci->pvt_info;
502 	pdata->name = "fsl_mc_err";
503 	mci->pdev = &op->dev;
504 	pdata->edac_idx = edac_mc_idx++;
505 	dev_set_drvdata(mci->pdev, mci);
506 	mci->ctl_name = pdata->name;
507 	mci->dev_name = pdata->name;
508 
509 	/*
510 	 * Get the endianness of DDR controller registers.
511 	 * Default is big endian.
512 	 */
513 	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
514 
515 	res = of_address_to_resource(op->dev.of_node, 0, &r);
516 	if (res) {
517 		pr_err("%s: Unable to get resource for MC err regs\n",
518 		       __func__);
519 		goto err;
520 	}
521 
522 	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
523 				     pdata->name)) {
524 		pr_err("%s: Error while requesting mem region\n",
525 		       __func__);
526 		res = -EBUSY;
527 		goto err;
528 	}
529 
530 	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
531 	if (!pdata->mc_vbase) {
532 		pr_err("%s: Unable to setup MC err regs\n", __func__);
533 		res = -ENOMEM;
534 		goto err;
535 	}
536 
537 	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
538 	if (!(sdram_ctl & DSC_ECC_EN)) {
539 		/* no ECC */
540 		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
541 		res = -ENODEV;
542 		goto err;
543 	}
544 
545 	edac_dbg(3, "init mci\n");
546 	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
547 			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
548 			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
549 			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
550 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
551 	mci->edac_cap = EDAC_FLAG_SECDED;
552 	mci->mod_name = EDAC_MOD_STR;
553 
554 	if (edac_op_state == EDAC_OPSTATE_POLL)
555 		mci->edac_check = fsl_mc_check;
556 
557 	mci->ctl_page_to_phys = NULL;
558 
559 	mci->scrub_mode = SCRUB_SW_SRC;
560 
561 	fsl_ddr_init_csrows(mci);
562 
563 	/* store the original error disable bits */
564 	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
565 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
566 
567 	/* clear all error bits */
568 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
569 
570 	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
571 	if (res) {
572 		edac_dbg(3, "failed edac_mc_add_mc()\n");
573 		goto err;
574 	}
575 
576 	if (edac_op_state == EDAC_OPSTATE_INT) {
577 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
578 			  DDR_EIE_MBEE | DDR_EIE_SBEE);
579 
580 		/* store the original error management threshold */
581 		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
582 					    FSL_MC_ERR_SBE) & 0xff0000;
583 
584 		/* set threshold to 1 error per interrupt */
585 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
586 
587 		/* register interrupts */
588 		pdata->irq = platform_get_irq(op, 0);
589 		res = devm_request_irq(&op->dev, pdata->irq,
590 				       fsl_mc_isr,
591 				       IRQF_SHARED,
592 				       "[EDAC] MC err", mci);
593 		if (res < 0) {
594 			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
595 			       __func__, pdata->irq);
596 			res = -ENODEV;
597 			goto err2;
598 		}
599 
600 		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
601 		       pdata->irq);
602 	}
603 
604 	devres_remove_group(&op->dev, fsl_mc_err_probe);
605 	edac_dbg(3, "success\n");
606 	pr_info(EDAC_MOD_STR " MC err registered\n");
607 
608 	return 0;
609 
610 err2:
611 	edac_mc_del_mc(&op->dev);
612 err:
613 	devres_release_group(&op->dev, fsl_mc_err_probe);
614 	edac_mc_free(mci);
615 	return res;
616 }
617 
618 int fsl_mc_err_remove(struct platform_device *op)
619 {
620 	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
621 	struct fsl_mc_pdata *pdata = mci->pvt_info;
622 
623 	edac_dbg(0, "\n");
624 
625 	if (edac_op_state == EDAC_OPSTATE_INT) {
626 		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
627 	}
628 
629 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
630 		  orig_ddr_err_disable);
631 	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
632 
633 	edac_mc_del_mc(&op->dev);
634 	edac_mc_free(mci);
635 	return 0;
636 }
637