Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Freescale Memory Controller kernel module
  4 *
  5 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
  6 * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
  7 * split out from mpc85xx_edac EDAC driver.
  8 *
  9 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
 10 *
 11 * Author: Dave Jiang <djiang@mvista.com>
 12 *
 13 * 2006-2007 (c) MontaVista Software, Inc.
 
 
 
 14 */
 15#include <linux/module.h>
 16#include <linux/init.h>
 17#include <linux/interrupt.h>
 18#include <linux/ctype.h>
 19#include <linux/io.h>
 20#include <linux/mod_devicetable.h>
 21#include <linux/edac.h>
 22#include <linux/smp.h>
 23#include <linux/gfp.h>
 24
 25#include <linux/of.h>
 
 26#include <linux/of_address.h>
 27#include "edac_module.h"
 28#include "fsl_ddr_edac.h"
 29
 30#define EDAC_MOD_STR	"fsl_ddr_edac"
 31
 32static int edac_mc_idx;
 33
 34static inline void __iomem *ddr_reg_addr(struct fsl_mc_pdata *pdata, unsigned int off)
 35{
 36	if (pdata->flag == TYPE_IMX9 && off >= FSL_MC_DATA_ERR_INJECT_HI && off <= FSL_MC_ERR_SBE)
 37		return pdata->inject_vbase + off - FSL_MC_DATA_ERR_INJECT_HI
 38		       + IMX9_MC_DATA_ERR_INJECT_OFF;
 39
 40	if (pdata->flag == TYPE_IMX9 && off >= IMX9_MC_ERR_EN)
 41		return pdata->inject_vbase + off - IMX9_MC_ERR_EN;
 42
 43	return pdata->mc_vbase + off;
 44}
 45
 46static inline u32 ddr_in32(struct fsl_mc_pdata *pdata, unsigned int off)
 47{
 48	void __iomem *addr = ddr_reg_addr(pdata, off);
 49
 50	return pdata->little_endian ? ioread32(addr) : ioread32be(addr);
 51}
 52
 53static inline void ddr_out32(struct fsl_mc_pdata *pdata, unsigned int off, u32 value)
 54{
 55	void __iomem *addr = ddr_reg_addr(pdata, off);
 56
 57	if (pdata->little_endian)
 58		iowrite32(value, addr);
 59	else
 60		iowrite32be(value, addr);
 61}
 62
 63#ifdef CONFIG_EDAC_DEBUG
 64/************************ MC SYSFS parts ***********************************/
 65
 66#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 67
 68static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
 69					  struct device_attribute *mattr,
 70					  char *data)
 71{
 72	struct mem_ctl_info *mci = to_mci(dev);
 73	struct fsl_mc_pdata *pdata = mci->pvt_info;
 74	return sprintf(data, "0x%08x",
 75		       ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_HI));
 76}
 77
 78static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
 79					  struct device_attribute *mattr,
 80					      char *data)
 81{
 82	struct mem_ctl_info *mci = to_mci(dev);
 83	struct fsl_mc_pdata *pdata = mci->pvt_info;
 84	return sprintf(data, "0x%08x",
 85		       ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_LO));
 86}
 87
 88static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
 89				       struct device_attribute *mattr,
 90					   char *data)
 91{
 92	struct mem_ctl_info *mci = to_mci(dev);
 93	struct fsl_mc_pdata *pdata = mci->pvt_info;
 94	return sprintf(data, "0x%08x",
 95		       ddr_in32(pdata, FSL_MC_ECC_ERR_INJECT));
 96}
 97
 98static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
 99					   struct device_attribute *mattr,
100					       const char *data, size_t count)
101{
102	struct mem_ctl_info *mci = to_mci(dev);
103	struct fsl_mc_pdata *pdata = mci->pvt_info;
104	unsigned long val;
105	int rc;
106
107	if (isdigit(*data)) {
108		rc = kstrtoul(data, 0, &val);
109		if (rc)
110			return rc;
111
112		ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_HI, val);
113		return count;
114	}
115	return 0;
116}
117
118static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
119					   struct device_attribute *mattr,
120					       const char *data, size_t count)
121{
122	struct mem_ctl_info *mci = to_mci(dev);
123	struct fsl_mc_pdata *pdata = mci->pvt_info;
124	unsigned long val;
125	int rc;
126
127	if (isdigit(*data)) {
128		rc = kstrtoul(data, 0, &val);
129		if (rc)
130			return rc;
131
132		ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_LO, val);
133		return count;
134	}
135	return 0;
136}
137
138static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
139					struct device_attribute *mattr,
140					       const char *data, size_t count)
141{
142	struct mem_ctl_info *mci = to_mci(dev);
143	struct fsl_mc_pdata *pdata = mci->pvt_info;
144	unsigned long val;
145	int rc;
146
147	if (isdigit(*data)) {
148		rc = kstrtoul(data, 0, &val);
149		if (rc)
150			return rc;
151
152		ddr_out32(pdata, FSL_MC_ECC_ERR_INJECT, val);
153		return count;
154	}
155	return 0;
156}
157
158static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
159		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
160static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
161		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
162static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
163		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
164#endif /* CONFIG_EDAC_DEBUG */
165
166static struct attribute *fsl_ddr_dev_attrs[] = {
167#ifdef CONFIG_EDAC_DEBUG
168	&dev_attr_inject_data_hi.attr,
169	&dev_attr_inject_data_lo.attr,
170	&dev_attr_inject_ctrl.attr,
171#endif
172	NULL
173};
174
175ATTRIBUTE_GROUPS(fsl_ddr_dev);
176
177/**************************** MC Err device ***************************/
178
179/*
180 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
181 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
182 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
183 * below correspond to Freescale's manuals.
184 */
185static unsigned int ecc_table[16] = {
186	/* MSB           LSB */
187	/* [0:31]    [32:63] */
188	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
189	0x00ff00ff, 0x00fff0ff,
190	0x0f0f0f0f, 0x0f0fff00,
191	0x11113333, 0x7777000f,
192	0x22224444, 0x8888222f,
193	0x44448888, 0xffff4441,
194	0x8888ffff, 0x11118882,
195	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
196};
197
198/*
199 * Calculate the correct ECC value for a 64-bit value specified by high:low
200 */
201static u8 calculate_ecc(u32 high, u32 low)
202{
203	u32 mask_low;
204	u32 mask_high;
205	int bit_cnt;
206	u8 ecc = 0;
207	int i;
208	int j;
209
210	for (i = 0; i < 8; i++) {
211		mask_high = ecc_table[i * 2];
212		mask_low = ecc_table[i * 2 + 1];
213		bit_cnt = 0;
214
215		for (j = 0; j < 32; j++) {
216			if ((mask_high >> j) & 1)
217				bit_cnt ^= (high >> j) & 1;
218			if ((mask_low >> j) & 1)
219				bit_cnt ^= (low >> j) & 1;
220		}
221
222		ecc |= bit_cnt << i;
223	}
224
225	return ecc;
226}
227
228/*
229 * Create the syndrome code which is generated if the data line specified by
230 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
231 * User's Manual and 9-61 in the MPC8572 User's Manual.
232 */
233static u8 syndrome_from_bit(unsigned int bit) {
234	int i;
235	u8 syndrome = 0;
236
237	/*
238	 * Cycle through the upper or lower 32-bit portion of each value in
239	 * ecc_table depending on if 'bit' is in the upper or lower half of
240	 * 64-bit data.
241	 */
242	for (i = bit < 32; i < 16; i += 2)
243		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
244
245	return syndrome;
246}
247
248/*
249 * Decode data and ecc syndrome to determine what went wrong
250 * Note: This can only decode single-bit errors
251 */
252static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
253		       int *bad_data_bit, int *bad_ecc_bit)
254{
255	int i;
256	u8 syndrome;
257
258	*bad_data_bit = -1;
259	*bad_ecc_bit = -1;
260
261	/*
262	 * Calculate the ECC of the captured data and XOR it with the captured
263	 * ECC to find an ECC syndrome value we can search for
264	 */
265	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
266
267	/* Check if a data line is stuck... */
268	for (i = 0; i < 64; i++) {
269		if (syndrome == syndrome_from_bit(i)) {
270			*bad_data_bit = i;
271			return;
272		}
273	}
274
275	/* If data is correct, check ECC bits for errors... */
276	for (i = 0; i < 8; i++) {
277		if ((syndrome >> i) & 0x1) {
278			*bad_ecc_bit = i;
279			return;
280		}
281	}
282}
283
284#define make64(high, low) (((u64)(high) << 32) | (low))
285
286static void fsl_mc_check(struct mem_ctl_info *mci)
287{
288	struct fsl_mc_pdata *pdata = mci->pvt_info;
289	struct csrow_info *csrow;
290	u32 bus_width;
291	u32 err_detect;
292	u32 syndrome;
293	u64 err_addr;
294	u32 pfn;
295	int row_index;
296	u32 cap_high;
297	u32 cap_low;
298	int bad_data_bit;
299	int bad_ecc_bit;
300
301	err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
302	if (!err_detect)
303		return;
304
305	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
306		      err_detect);
307
308	/* no more processing if not ECC bit errors */
309	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
310		ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
311		return;
312	}
313
314	syndrome = ddr_in32(pdata, FSL_MC_CAPTURE_ECC);
315
316	/* Mask off appropriate bits of syndrome based on bus width */
317	bus_width = (ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG) &
318		     DSC_DBW_MASK) ? 32 : 64;
319	if (bus_width == 64)
320		syndrome &= 0xff;
321	else
322		syndrome &= 0xffff;
323
324	err_addr = make64(
325		ddr_in32(pdata, FSL_MC_CAPTURE_EXT_ADDRESS),
326		ddr_in32(pdata, FSL_MC_CAPTURE_ADDRESS));
327	pfn = err_addr >> PAGE_SHIFT;
328
329	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
330		csrow = mci->csrows[row_index];
331		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
332			break;
333	}
334
335	cap_high = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_HI);
336	cap_low = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_LO);
337
338	/*
339	 * Analyze single-bit errors on 64-bit wide buses
340	 * TODO: Add support for 32-bit wide buses
341	 */
342	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
343		u64 cap = (u64)cap_high << 32 | cap_low;
344		u32 s = syndrome;
345
346		sbe_ecc_decode(cap_high, cap_low, syndrome,
347				&bad_data_bit, &bad_ecc_bit);
348
349		if (bad_data_bit >= 0) {
350			fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
351			cap ^= 1ULL << bad_data_bit;
352		}
353
354		if (bad_ecc_bit >= 0) {
355			fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
356			s ^= 1 << bad_ecc_bit;
357		}
358
359		fsl_mc_printk(mci, KERN_ERR,
360			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
361			upper_32_bits(cap), lower_32_bits(cap), s);
 
 
362	}
363
364	fsl_mc_printk(mci, KERN_ERR,
365			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
366			cap_high, cap_low, syndrome);
367	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
368	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
369
370	/* we are out of range */
371	if (row_index == mci->nr_csrows)
372		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
373
374	if (err_detect & DDR_EDE_SBE)
375		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
376				     pfn, err_addr & ~PAGE_MASK, syndrome,
377				     row_index, 0, -1,
378				     mci->ctl_name, "");
379
380	if (err_detect & DDR_EDE_MBE)
381		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
382				     pfn, err_addr & ~PAGE_MASK, syndrome,
383				     row_index, 0, -1,
384				     mci->ctl_name, "");
385
386	ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
387}
388
389static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
390{
391	struct mem_ctl_info *mci = dev_id;
392	struct fsl_mc_pdata *pdata = mci->pvt_info;
393	u32 err_detect;
394
395	err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
396	if (!err_detect)
397		return IRQ_NONE;
398
399	fsl_mc_check(mci);
400
401	return IRQ_HANDLED;
402}
403
404static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
405{
406	struct fsl_mc_pdata *pdata = mci->pvt_info;
407	struct csrow_info *csrow;
408	struct dimm_info *dimm;
409	u32 sdram_ctl;
410	u32 sdtype;
411	enum mem_type mtype;
412	u32 cs_bnds;
413	int index;
414
415	sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
416
417	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
418	if (sdram_ctl & DSC_RD_EN) {
419		switch (sdtype) {
420		case 0x02000000:
421			mtype = MEM_RDDR;
422			break;
423		case 0x03000000:
424			mtype = MEM_RDDR2;
425			break;
426		case 0x07000000:
427			mtype = MEM_RDDR3;
428			break;
429		case 0x05000000:
430			mtype = MEM_RDDR4;
431			break;
432		default:
433			mtype = MEM_UNKNOWN;
434			break;
435		}
436	} else {
437		switch (sdtype) {
438		case 0x02000000:
439			mtype = MEM_DDR;
440			break;
441		case 0x03000000:
442			mtype = MEM_DDR2;
443			break;
444		case 0x07000000:
445			mtype = MEM_DDR3;
446			break;
447		case 0x05000000:
448			mtype = MEM_DDR4;
449			break;
450		case 0x04000000:
451			mtype = MEM_LPDDR4;
452			break;
453		default:
454			mtype = MEM_UNKNOWN;
455			break;
456		}
457	}
458
459	for (index = 0; index < mci->nr_csrows; index++) {
460		u32 start;
461		u32 end;
462
463		csrow = mci->csrows[index];
464		dimm = csrow->channels[0]->dimm;
465
466		cs_bnds = ddr_in32(pdata, FSL_MC_CS_BNDS_0 +
467				   (index * FSL_MC_CS_BNDS_OFS));
468
469		start = (cs_bnds & 0xffff0000) >> 16;
470		end   = (cs_bnds & 0x0000ffff);
471
472		if (start == end)
473			continue;	/* not populated */
474
475		start <<= (24 - PAGE_SHIFT);
476		end   <<= (24 - PAGE_SHIFT);
477		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
478
479		csrow->first_page = start;
480		csrow->last_page = end;
481
482		dimm->nr_pages = end + 1 - start;
483		dimm->grain = 8;
484		dimm->mtype = mtype;
485		dimm->dtype = DEV_UNKNOWN;
486		if (pdata->flag == TYPE_IMX9)
487			dimm->dtype = DEV_X16;
488		else if (sdram_ctl & DSC_X32_EN)
489			dimm->dtype = DEV_X32;
490		dimm->edac_mode = EDAC_SECDED;
491	}
492}
493
494int fsl_mc_err_probe(struct platform_device *op)
495{
496	struct mem_ctl_info *mci;
497	struct edac_mc_layer layers[2];
498	struct fsl_mc_pdata *pdata;
499	struct resource r;
500	u32 ecc_en_mask;
501	u32 sdram_ctl;
502	int res;
503
504	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
505		return -ENOMEM;
506
507	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
508	layers[0].size = 4;
509	layers[0].is_virt_csrow = true;
510	layers[1].type = EDAC_MC_LAYER_CHANNEL;
511	layers[1].size = 1;
512	layers[1].is_virt_csrow = false;
513	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
514			    sizeof(*pdata));
515	if (!mci) {
516		devres_release_group(&op->dev, fsl_mc_err_probe);
517		return -ENOMEM;
518	}
519
520	pdata = mci->pvt_info;
521	pdata->name = "fsl_mc_err";
522	mci->pdev = &op->dev;
523	pdata->edac_idx = edac_mc_idx++;
524	dev_set_drvdata(mci->pdev, mci);
525	mci->ctl_name = pdata->name;
526	mci->dev_name = pdata->name;
527
528	pdata->flag = (unsigned long)device_get_match_data(&op->dev);
529
530	/*
531	 * Get the endianness of DDR controller registers.
532	 * Default is big endian.
533	 */
534	pdata->little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
535
536	res = of_address_to_resource(op->dev.of_node, 0, &r);
537	if (res) {
538		pr_err("%s: Unable to get resource for MC err regs\n",
539		       __func__);
540		goto err;
541	}
542
543	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
544				     pdata->name)) {
545		pr_err("%s: Error while requesting mem region\n",
546		       __func__);
547		res = -EBUSY;
548		goto err;
549	}
550
551	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
552	if (!pdata->mc_vbase) {
553		pr_err("%s: Unable to setup MC err regs\n", __func__);
554		res = -ENOMEM;
555		goto err;
556	}
557
558	if (pdata->flag == TYPE_IMX9) {
559		pdata->inject_vbase = devm_platform_ioremap_resource_byname(op, "inject");
560		if (IS_ERR(pdata->inject_vbase)) {
561			res = -ENOMEM;
562			goto err;
563		}
564	}
565
566	if (pdata->flag == TYPE_IMX9) {
567		sdram_ctl = ddr_in32(pdata, IMX9_MC_ERR_EN);
568		ecc_en_mask = ERR_ECC_EN | ERR_INLINE_ECC;
569	} else {
570		sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
571		ecc_en_mask = DSC_ECC_EN;
572	}
573
574	if ((sdram_ctl & ecc_en_mask) != ecc_en_mask) {
575		/* no ECC */
576		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
577		res = -ENODEV;
578		goto err;
579	}
580
581	edac_dbg(3, "init mci\n");
582	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
583			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
584			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
585			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
586			 MEM_FLAG_LPDDR4;
587	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
588	mci->edac_cap = EDAC_FLAG_SECDED;
589	mci->mod_name = EDAC_MOD_STR;
590
591	if (edac_op_state == EDAC_OPSTATE_POLL)
592		mci->edac_check = fsl_mc_check;
593
594	mci->ctl_page_to_phys = NULL;
595
596	mci->scrub_mode = SCRUB_SW_SRC;
597
598	fsl_ddr_init_csrows(mci);
599
600	/* store the original error disable bits */
601	pdata->orig_ddr_err_disable = ddr_in32(pdata, FSL_MC_ERR_DISABLE);
602	ddr_out32(pdata, FSL_MC_ERR_DISABLE, 0);
603
604	/* clear all error bits */
605	ddr_out32(pdata, FSL_MC_ERR_DETECT, ~0);
606
607	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
608	if (res) {
609		edac_dbg(3, "failed edac_mc_add_mc()\n");
610		goto err;
611	}
612
613	if (edac_op_state == EDAC_OPSTATE_INT) {
614		ddr_out32(pdata, FSL_MC_ERR_INT_EN,
615			  DDR_EIE_MBEE | DDR_EIE_SBEE);
616
617		/* store the original error management threshold */
618		pdata->orig_ddr_err_sbe = ddr_in32(pdata,
619						   FSL_MC_ERR_SBE) & 0xff0000;
620
621		/* set threshold to 1 error per interrupt */
622		ddr_out32(pdata, FSL_MC_ERR_SBE, 0x10000);
623
624		/* register interrupts */
625		pdata->irq = platform_get_irq(op, 0);
626		res = devm_request_irq(&op->dev, pdata->irq,
627				       fsl_mc_isr,
628				       IRQF_SHARED,
629				       "[EDAC] MC err", mci);
630		if (res < 0) {
631			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
632			       __func__, pdata->irq);
633			res = -ENODEV;
634			goto err2;
635		}
636
637		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
638		       pdata->irq);
639	}
640
641	devres_remove_group(&op->dev, fsl_mc_err_probe);
642	edac_dbg(3, "success\n");
643	pr_info(EDAC_MOD_STR " MC err registered\n");
644
645	return 0;
646
647err2:
648	edac_mc_del_mc(&op->dev);
649err:
650	devres_release_group(&op->dev, fsl_mc_err_probe);
651	edac_mc_free(mci);
652	return res;
653}
654
655void fsl_mc_err_remove(struct platform_device *op)
656{
657	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
658	struct fsl_mc_pdata *pdata = mci->pvt_info;
659
660	edac_dbg(0, "\n");
661
662	if (edac_op_state == EDAC_OPSTATE_INT) {
663		ddr_out32(pdata, FSL_MC_ERR_INT_EN, 0);
664	}
665
666	ddr_out32(pdata, FSL_MC_ERR_DISABLE,
667		  pdata->orig_ddr_err_disable);
668	ddr_out32(pdata, FSL_MC_ERR_SBE, pdata->orig_ddr_err_sbe);
669
670
671	edac_mc_del_mc(&op->dev);
672	edac_mc_free(mci);
 
673}
v5.4
 
  1/*
  2 * Freescale Memory Controller kernel module
  3 *
  4 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
  5 * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
  6 * split out from mpc85xx_edac EDAC driver.
  7 *
  8 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
  9 *
 10 * Author: Dave Jiang <djiang@mvista.com>
 11 *
 12 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
 13 * the terms of the GNU General Public License version 2. This program
 14 * is licensed "as is" without any warranty of any kind, whether express
 15 * or implied.
 16 */
 17#include <linux/module.h>
 18#include <linux/init.h>
 19#include <linux/interrupt.h>
 20#include <linux/ctype.h>
 21#include <linux/io.h>
 22#include <linux/mod_devicetable.h>
 23#include <linux/edac.h>
 24#include <linux/smp.h>
 25#include <linux/gfp.h>
 26
 27#include <linux/of_platform.h>
 28#include <linux/of_device.h>
 29#include <linux/of_address.h>
 30#include "edac_module.h"
 31#include "fsl_ddr_edac.h"
 32
 33#define EDAC_MOD_STR	"fsl_ddr_edac"
 34
 35static int edac_mc_idx;
 36
 37static u32 orig_ddr_err_disable;
 38static u32 orig_ddr_err_sbe;
 39static bool little_endian;
 
 
 
 
 
 40
 41static inline u32 ddr_in32(void __iomem *addr)
 
 
 
 42{
 43	return little_endian ? ioread32(addr) : ioread32be(addr);
 
 
 44}
 45
 46static inline void ddr_out32(void __iomem *addr, u32 value)
 47{
 48	if (little_endian)
 
 
 49		iowrite32(value, addr);
 50	else
 51		iowrite32be(value, addr);
 52}
 53
 54#ifdef CONFIG_EDAC_DEBUG
 55/************************ MC SYSFS parts ***********************************/
 56
 57#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 58
 59static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
 60					  struct device_attribute *mattr,
 61					  char *data)
 62{
 63	struct mem_ctl_info *mci = to_mci(dev);
 64	struct fsl_mc_pdata *pdata = mci->pvt_info;
 65	return sprintf(data, "0x%08x",
 66		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
 67}
 68
 69static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
 70					  struct device_attribute *mattr,
 71					      char *data)
 72{
 73	struct mem_ctl_info *mci = to_mci(dev);
 74	struct fsl_mc_pdata *pdata = mci->pvt_info;
 75	return sprintf(data, "0x%08x",
 76		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
 77}
 78
 79static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
 80				       struct device_attribute *mattr,
 81					   char *data)
 82{
 83	struct mem_ctl_info *mci = to_mci(dev);
 84	struct fsl_mc_pdata *pdata = mci->pvt_info;
 85	return sprintf(data, "0x%08x",
 86		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
 87}
 88
 89static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
 90					   struct device_attribute *mattr,
 91					       const char *data, size_t count)
 92{
 93	struct mem_ctl_info *mci = to_mci(dev);
 94	struct fsl_mc_pdata *pdata = mci->pvt_info;
 95	unsigned long val;
 96	int rc;
 97
 98	if (isdigit(*data)) {
 99		rc = kstrtoul(data, 0, &val);
100		if (rc)
101			return rc;
102
103		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
104		return count;
105	}
106	return 0;
107}
108
109static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
110					   struct device_attribute *mattr,
111					       const char *data, size_t count)
112{
113	struct mem_ctl_info *mci = to_mci(dev);
114	struct fsl_mc_pdata *pdata = mci->pvt_info;
115	unsigned long val;
116	int rc;
117
118	if (isdigit(*data)) {
119		rc = kstrtoul(data, 0, &val);
120		if (rc)
121			return rc;
122
123		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
124		return count;
125	}
126	return 0;
127}
128
129static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
130					struct device_attribute *mattr,
131					       const char *data, size_t count)
132{
133	struct mem_ctl_info *mci = to_mci(dev);
134	struct fsl_mc_pdata *pdata = mci->pvt_info;
135	unsigned long val;
136	int rc;
137
138	if (isdigit(*data)) {
139		rc = kstrtoul(data, 0, &val);
140		if (rc)
141			return rc;
142
143		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
144		return count;
145	}
146	return 0;
147}
148
149static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
150		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
151static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
152		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
153static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
154		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
155#endif /* CONFIG_EDAC_DEBUG */
156
157static struct attribute *fsl_ddr_dev_attrs[] = {
158#ifdef CONFIG_EDAC_DEBUG
159	&dev_attr_inject_data_hi.attr,
160	&dev_attr_inject_data_lo.attr,
161	&dev_attr_inject_ctrl.attr,
162#endif
163	NULL
164};
165
166ATTRIBUTE_GROUPS(fsl_ddr_dev);
167
168/**************************** MC Err device ***************************/
169
170/*
171 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
172 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
173 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
174 * below correspond to Freescale's manuals.
175 */
176static unsigned int ecc_table[16] = {
177	/* MSB           LSB */
178	/* [0:31]    [32:63] */
179	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
180	0x00ff00ff, 0x00fff0ff,
181	0x0f0f0f0f, 0x0f0fff00,
182	0x11113333, 0x7777000f,
183	0x22224444, 0x8888222f,
184	0x44448888, 0xffff4441,
185	0x8888ffff, 0x11118882,
186	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
187};
188
189/*
190 * Calculate the correct ECC value for a 64-bit value specified by high:low
191 */
192static u8 calculate_ecc(u32 high, u32 low)
193{
194	u32 mask_low;
195	u32 mask_high;
196	int bit_cnt;
197	u8 ecc = 0;
198	int i;
199	int j;
200
201	for (i = 0; i < 8; i++) {
202		mask_high = ecc_table[i * 2];
203		mask_low = ecc_table[i * 2 + 1];
204		bit_cnt = 0;
205
206		for (j = 0; j < 32; j++) {
207			if ((mask_high >> j) & 1)
208				bit_cnt ^= (high >> j) & 1;
209			if ((mask_low >> j) & 1)
210				bit_cnt ^= (low >> j) & 1;
211		}
212
213		ecc |= bit_cnt << i;
214	}
215
216	return ecc;
217}
218
219/*
220 * Create the syndrome code which is generated if the data line specified by
221 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
222 * User's Manual and 9-61 in the MPC8572 User's Manual.
223 */
224static u8 syndrome_from_bit(unsigned int bit) {
225	int i;
226	u8 syndrome = 0;
227
228	/*
229	 * Cycle through the upper or lower 32-bit portion of each value in
230	 * ecc_table depending on if 'bit' is in the upper or lower half of
231	 * 64-bit data.
232	 */
233	for (i = bit < 32; i < 16; i += 2)
234		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
235
236	return syndrome;
237}
238
239/*
240 * Decode data and ecc syndrome to determine what went wrong
241 * Note: This can only decode single-bit errors
242 */
243static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
244		       int *bad_data_bit, int *bad_ecc_bit)
245{
246	int i;
247	u8 syndrome;
248
249	*bad_data_bit = -1;
250	*bad_ecc_bit = -1;
251
252	/*
253	 * Calculate the ECC of the captured data and XOR it with the captured
254	 * ECC to find an ECC syndrome value we can search for
255	 */
256	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
257
258	/* Check if a data line is stuck... */
259	for (i = 0; i < 64; i++) {
260		if (syndrome == syndrome_from_bit(i)) {
261			*bad_data_bit = i;
262			return;
263		}
264	}
265
266	/* If data is correct, check ECC bits for errors... */
267	for (i = 0; i < 8; i++) {
268		if ((syndrome >> i) & 0x1) {
269			*bad_ecc_bit = i;
270			return;
271		}
272	}
273}
274
275#define make64(high, low) (((u64)(high) << 32) | (low))
276
277static void fsl_mc_check(struct mem_ctl_info *mci)
278{
279	struct fsl_mc_pdata *pdata = mci->pvt_info;
280	struct csrow_info *csrow;
281	u32 bus_width;
282	u32 err_detect;
283	u32 syndrome;
284	u64 err_addr;
285	u32 pfn;
286	int row_index;
287	u32 cap_high;
288	u32 cap_low;
289	int bad_data_bit;
290	int bad_ecc_bit;
291
292	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
293	if (!err_detect)
294		return;
295
296	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
297		      err_detect);
298
299	/* no more processing if not ECC bit errors */
300	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
301		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
302		return;
303	}
304
305	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
306
307	/* Mask off appropriate bits of syndrome based on bus width */
308	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
309		     DSC_DBW_MASK) ? 32 : 64;
310	if (bus_width == 64)
311		syndrome &= 0xff;
312	else
313		syndrome &= 0xffff;
314
315	err_addr = make64(
316		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
317		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
318	pfn = err_addr >> PAGE_SHIFT;
319
320	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
321		csrow = mci->csrows[row_index];
322		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
323			break;
324	}
325
326	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
327	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
328
329	/*
330	 * Analyze single-bit errors on 64-bit wide buses
331	 * TODO: Add support for 32-bit wide buses
332	 */
333	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
 
 
 
334		sbe_ecc_decode(cap_high, cap_low, syndrome,
335				&bad_data_bit, &bad_ecc_bit);
336
337		if (bad_data_bit != -1)
338			fsl_mc_printk(mci, KERN_ERR,
339				"Faulty Data bit: %d\n", bad_data_bit);
340		if (bad_ecc_bit != -1)
341			fsl_mc_printk(mci, KERN_ERR,
342				"Faulty ECC bit: %d\n", bad_ecc_bit);
 
 
 
343
344		fsl_mc_printk(mci, KERN_ERR,
345			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
346			cap_high ^ (1 << (bad_data_bit - 32)),
347			cap_low ^ (1 << bad_data_bit),
348			syndrome ^ (1 << bad_ecc_bit));
349	}
350
351	fsl_mc_printk(mci, KERN_ERR,
352			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
353			cap_high, cap_low, syndrome);
354	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
355	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
356
357	/* we are out of range */
358	if (row_index == mci->nr_csrows)
359		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
360
361	if (err_detect & DDR_EDE_SBE)
362		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
363				     pfn, err_addr & ~PAGE_MASK, syndrome,
364				     row_index, 0, -1,
365				     mci->ctl_name, "");
366
367	if (err_detect & DDR_EDE_MBE)
368		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
369				     pfn, err_addr & ~PAGE_MASK, syndrome,
370				     row_index, 0, -1,
371				     mci->ctl_name, "");
372
373	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
374}
375
376static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
377{
378	struct mem_ctl_info *mci = dev_id;
379	struct fsl_mc_pdata *pdata = mci->pvt_info;
380	u32 err_detect;
381
382	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
383	if (!err_detect)
384		return IRQ_NONE;
385
386	fsl_mc_check(mci);
387
388	return IRQ_HANDLED;
389}
390
391static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
392{
393	struct fsl_mc_pdata *pdata = mci->pvt_info;
394	struct csrow_info *csrow;
395	struct dimm_info *dimm;
396	u32 sdram_ctl;
397	u32 sdtype;
398	enum mem_type mtype;
399	u32 cs_bnds;
400	int index;
401
402	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
403
404	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
405	if (sdram_ctl & DSC_RD_EN) {
406		switch (sdtype) {
407		case 0x02000000:
408			mtype = MEM_RDDR;
409			break;
410		case 0x03000000:
411			mtype = MEM_RDDR2;
412			break;
413		case 0x07000000:
414			mtype = MEM_RDDR3;
415			break;
416		case 0x05000000:
417			mtype = MEM_RDDR4;
418			break;
419		default:
420			mtype = MEM_UNKNOWN;
421			break;
422		}
423	} else {
424		switch (sdtype) {
425		case 0x02000000:
426			mtype = MEM_DDR;
427			break;
428		case 0x03000000:
429			mtype = MEM_DDR2;
430			break;
431		case 0x07000000:
432			mtype = MEM_DDR3;
433			break;
434		case 0x05000000:
435			mtype = MEM_DDR4;
436			break;
 
 
 
437		default:
438			mtype = MEM_UNKNOWN;
439			break;
440		}
441	}
442
443	for (index = 0; index < mci->nr_csrows; index++) {
444		u32 start;
445		u32 end;
446
447		csrow = mci->csrows[index];
448		dimm = csrow->channels[0]->dimm;
449
450		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
451				   (index * FSL_MC_CS_BNDS_OFS));
452
453		start = (cs_bnds & 0xffff0000) >> 16;
454		end   = (cs_bnds & 0x0000ffff);
455
456		if (start == end)
457			continue;	/* not populated */
458
459		start <<= (24 - PAGE_SHIFT);
460		end   <<= (24 - PAGE_SHIFT);
461		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
462
463		csrow->first_page = start;
464		csrow->last_page = end;
465
466		dimm->nr_pages = end + 1 - start;
467		dimm->grain = 8;
468		dimm->mtype = mtype;
469		dimm->dtype = DEV_UNKNOWN;
470		if (sdram_ctl & DSC_X32_EN)
 
 
471			dimm->dtype = DEV_X32;
472		dimm->edac_mode = EDAC_SECDED;
473	}
474}
475
476int fsl_mc_err_probe(struct platform_device *op)
477{
478	struct mem_ctl_info *mci;
479	struct edac_mc_layer layers[2];
480	struct fsl_mc_pdata *pdata;
481	struct resource r;
 
482	u32 sdram_ctl;
483	int res;
484
485	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
486		return -ENOMEM;
487
488	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
489	layers[0].size = 4;
490	layers[0].is_virt_csrow = true;
491	layers[1].type = EDAC_MC_LAYER_CHANNEL;
492	layers[1].size = 1;
493	layers[1].is_virt_csrow = false;
494	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
495			    sizeof(*pdata));
496	if (!mci) {
497		devres_release_group(&op->dev, fsl_mc_err_probe);
498		return -ENOMEM;
499	}
500
501	pdata = mci->pvt_info;
502	pdata->name = "fsl_mc_err";
503	mci->pdev = &op->dev;
504	pdata->edac_idx = edac_mc_idx++;
505	dev_set_drvdata(mci->pdev, mci);
506	mci->ctl_name = pdata->name;
507	mci->dev_name = pdata->name;
508
 
 
509	/*
510	 * Get the endianness of DDR controller registers.
511	 * Default is big endian.
512	 */
513	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
514
515	res = of_address_to_resource(op->dev.of_node, 0, &r);
516	if (res) {
517		pr_err("%s: Unable to get resource for MC err regs\n",
518		       __func__);
519		goto err;
520	}
521
522	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
523				     pdata->name)) {
524		pr_err("%s: Error while requesting mem region\n",
525		       __func__);
526		res = -EBUSY;
527		goto err;
528	}
529
530	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
531	if (!pdata->mc_vbase) {
532		pr_err("%s: Unable to setup MC err regs\n", __func__);
533		res = -ENOMEM;
534		goto err;
535	}
536
537	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
538	if (!(sdram_ctl & DSC_ECC_EN)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539		/* no ECC */
540		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
541		res = -ENODEV;
542		goto err;
543	}
544
545	edac_dbg(3, "init mci\n");
546	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
547			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
548			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
549			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
 
550	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
551	mci->edac_cap = EDAC_FLAG_SECDED;
552	mci->mod_name = EDAC_MOD_STR;
553
554	if (edac_op_state == EDAC_OPSTATE_POLL)
555		mci->edac_check = fsl_mc_check;
556
557	mci->ctl_page_to_phys = NULL;
558
559	mci->scrub_mode = SCRUB_SW_SRC;
560
561	fsl_ddr_init_csrows(mci);
562
563	/* store the original error disable bits */
564	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
565	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
566
567	/* clear all error bits */
568	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
569
570	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
571	if (res) {
572		edac_dbg(3, "failed edac_mc_add_mc()\n");
573		goto err;
574	}
575
576	if (edac_op_state == EDAC_OPSTATE_INT) {
577		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
578			  DDR_EIE_MBEE | DDR_EIE_SBEE);
579
580		/* store the original error management threshold */
581		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
582					    FSL_MC_ERR_SBE) & 0xff0000;
583
584		/* set threshold to 1 error per interrupt */
585		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
586
587		/* register interrupts */
588		pdata->irq = platform_get_irq(op, 0);
589		res = devm_request_irq(&op->dev, pdata->irq,
590				       fsl_mc_isr,
591				       IRQF_SHARED,
592				       "[EDAC] MC err", mci);
593		if (res < 0) {
594			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
595			       __func__, pdata->irq);
596			res = -ENODEV;
597			goto err2;
598		}
599
600		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
601		       pdata->irq);
602	}
603
604	devres_remove_group(&op->dev, fsl_mc_err_probe);
605	edac_dbg(3, "success\n");
606	pr_info(EDAC_MOD_STR " MC err registered\n");
607
608	return 0;
609
610err2:
611	edac_mc_del_mc(&op->dev);
612err:
613	devres_release_group(&op->dev, fsl_mc_err_probe);
614	edac_mc_free(mci);
615	return res;
616}
617
618int fsl_mc_err_remove(struct platform_device *op)
619{
620	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
621	struct fsl_mc_pdata *pdata = mci->pvt_info;
622
623	edac_dbg(0, "\n");
624
625	if (edac_op_state == EDAC_OPSTATE_INT) {
626		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
627	}
628
629	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
630		  orig_ddr_err_disable);
631	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
 
632
633	edac_mc_del_mc(&op->dev);
634	edac_mc_free(mci);
635	return 0;
636}