Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Freescale Memory Controller kernel module
  4 *
  5 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
  6 * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
  7 * split out from mpc85xx_edac EDAC driver.
  8 *
  9 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
 10 *
 11 * Author: Dave Jiang <djiang@mvista.com>
 12 *
 13 * 2006-2007 (c) MontaVista Software, Inc.
 
 
 
 14 */
 15#include <linux/module.h>
 16#include <linux/init.h>
 17#include <linux/interrupt.h>
 18#include <linux/ctype.h>
 19#include <linux/io.h>
 20#include <linux/mod_devicetable.h>
 21#include <linux/edac.h>
 22#include <linux/smp.h>
 23#include <linux/gfp.h>
 24
 25#include <linux/of.h>
 
 26#include <linux/of_address.h>
 27#include "edac_module.h"
 28#include "fsl_ddr_edac.h"
 29
 30#define EDAC_MOD_STR	"fsl_ddr_edac"
 31
 32static int edac_mc_idx;
 33
 34static inline void __iomem *ddr_reg_addr(struct fsl_mc_pdata *pdata, unsigned int off)
 35{
 36	if (pdata->flag == TYPE_IMX9 && off >= FSL_MC_DATA_ERR_INJECT_HI && off <= FSL_MC_ERR_SBE)
 37		return pdata->inject_vbase + off - FSL_MC_DATA_ERR_INJECT_HI
 38		       + IMX9_MC_DATA_ERR_INJECT_OFF;
 39
 40	if (pdata->flag == TYPE_IMX9 && off >= IMX9_MC_ERR_EN)
 41		return pdata->inject_vbase + off - IMX9_MC_ERR_EN;
 42
 43	return pdata->mc_vbase + off;
 44}
 45
 46static inline u32 ddr_in32(struct fsl_mc_pdata *pdata, unsigned int off)
 47{
 48	void __iomem *addr = ddr_reg_addr(pdata, off);
 49
 50	return pdata->little_endian ? ioread32(addr) : ioread32be(addr);
 51}
 52
 53static inline void ddr_out32(struct fsl_mc_pdata *pdata, unsigned int off, u32 value)
 54{
 55	void __iomem *addr = ddr_reg_addr(pdata, off);
 56
 57	if (pdata->little_endian)
 58		iowrite32(value, addr);
 59	else
 60		iowrite32be(value, addr);
 61}
 62
 63#ifdef CONFIG_EDAC_DEBUG
 64/************************ MC SYSFS parts ***********************************/
 65
 66#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 67
 68static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
 69					  struct device_attribute *mattr,
 70					  char *data)
 71{
 72	struct mem_ctl_info *mci = to_mci(dev);
 73	struct fsl_mc_pdata *pdata = mci->pvt_info;
 74	return sprintf(data, "0x%08x",
 75		       ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_HI));
 76}
 77
 78static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
 79					  struct device_attribute *mattr,
 80					      char *data)
 81{
 82	struct mem_ctl_info *mci = to_mci(dev);
 83	struct fsl_mc_pdata *pdata = mci->pvt_info;
 84	return sprintf(data, "0x%08x",
 85		       ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_LO));
 86}
 87
 88static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
 89				       struct device_attribute *mattr,
 90					   char *data)
 91{
 92	struct mem_ctl_info *mci = to_mci(dev);
 93	struct fsl_mc_pdata *pdata = mci->pvt_info;
 94	return sprintf(data, "0x%08x",
 95		       ddr_in32(pdata, FSL_MC_ECC_ERR_INJECT));
 96}
 97
 98static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
 99					   struct device_attribute *mattr,
100					       const char *data, size_t count)
101{
102	struct mem_ctl_info *mci = to_mci(dev);
103	struct fsl_mc_pdata *pdata = mci->pvt_info;
104	unsigned long val;
105	int rc;
106
107	if (isdigit(*data)) {
108		rc = kstrtoul(data, 0, &val);
109		if (rc)
110			return rc;
111
112		ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_HI, val);
113		return count;
114	}
115	return 0;
116}
117
118static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
119					   struct device_attribute *mattr,
120					       const char *data, size_t count)
121{
122	struct mem_ctl_info *mci = to_mci(dev);
123	struct fsl_mc_pdata *pdata = mci->pvt_info;
124	unsigned long val;
125	int rc;
126
127	if (isdigit(*data)) {
128		rc = kstrtoul(data, 0, &val);
129		if (rc)
130			return rc;
131
132		ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_LO, val);
133		return count;
134	}
135	return 0;
136}
137
138static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
139					struct device_attribute *mattr,
140					       const char *data, size_t count)
141{
142	struct mem_ctl_info *mci = to_mci(dev);
143	struct fsl_mc_pdata *pdata = mci->pvt_info;
144	unsigned long val;
145	int rc;
146
147	if (isdigit(*data)) {
148		rc = kstrtoul(data, 0, &val);
149		if (rc)
150			return rc;
151
152		ddr_out32(pdata, FSL_MC_ECC_ERR_INJECT, val);
153		return count;
154	}
155	return 0;
156}
157
158static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
159		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
160static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
161		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
162static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
163		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
164#endif /* CONFIG_EDAC_DEBUG */
165
166static struct attribute *fsl_ddr_dev_attrs[] = {
167#ifdef CONFIG_EDAC_DEBUG
168	&dev_attr_inject_data_hi.attr,
169	&dev_attr_inject_data_lo.attr,
170	&dev_attr_inject_ctrl.attr,
171#endif
172	NULL
173};
174
175ATTRIBUTE_GROUPS(fsl_ddr_dev);
176
177/**************************** MC Err device ***************************/
178
179/*
180 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
181 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
182 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
183 * below correspond to Freescale's manuals.
184 */
185static unsigned int ecc_table[16] = {
186	/* MSB           LSB */
187	/* [0:31]    [32:63] */
188	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
189	0x00ff00ff, 0x00fff0ff,
190	0x0f0f0f0f, 0x0f0fff00,
191	0x11113333, 0x7777000f,
192	0x22224444, 0x8888222f,
193	0x44448888, 0xffff4441,
194	0x8888ffff, 0x11118882,
195	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
196};
197
198/*
199 * Calculate the correct ECC value for a 64-bit value specified by high:low
200 */
201static u8 calculate_ecc(u32 high, u32 low)
202{
203	u32 mask_low;
204	u32 mask_high;
205	int bit_cnt;
206	u8 ecc = 0;
207	int i;
208	int j;
209
210	for (i = 0; i < 8; i++) {
211		mask_high = ecc_table[i * 2];
212		mask_low = ecc_table[i * 2 + 1];
213		bit_cnt = 0;
214
215		for (j = 0; j < 32; j++) {
216			if ((mask_high >> j) & 1)
217				bit_cnt ^= (high >> j) & 1;
218			if ((mask_low >> j) & 1)
219				bit_cnt ^= (low >> j) & 1;
220		}
221
222		ecc |= bit_cnt << i;
223	}
224
225	return ecc;
226}
227
228/*
229 * Create the syndrome code which is generated if the data line specified by
230 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
231 * User's Manual and 9-61 in the MPC8572 User's Manual.
232 */
233static u8 syndrome_from_bit(unsigned int bit) {
234	int i;
235	u8 syndrome = 0;
236
237	/*
238	 * Cycle through the upper or lower 32-bit portion of each value in
239	 * ecc_table depending on if 'bit' is in the upper or lower half of
240	 * 64-bit data.
241	 */
242	for (i = bit < 32; i < 16; i += 2)
243		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
244
245	return syndrome;
246}
247
248/*
249 * Decode data and ecc syndrome to determine what went wrong
250 * Note: This can only decode single-bit errors
251 */
252static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
253		       int *bad_data_bit, int *bad_ecc_bit)
254{
255	int i;
256	u8 syndrome;
257
258	*bad_data_bit = -1;
259	*bad_ecc_bit = -1;
260
261	/*
262	 * Calculate the ECC of the captured data and XOR it with the captured
263	 * ECC to find an ECC syndrome value we can search for
264	 */
265	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
266
267	/* Check if a data line is stuck... */
268	for (i = 0; i < 64; i++) {
269		if (syndrome == syndrome_from_bit(i)) {
270			*bad_data_bit = i;
271			return;
272		}
273	}
274
275	/* If data is correct, check ECC bits for errors... */
276	for (i = 0; i < 8; i++) {
277		if ((syndrome >> i) & 0x1) {
278			*bad_ecc_bit = i;
279			return;
280		}
281	}
282}
283
284#define make64(high, low) (((u64)(high) << 32) | (low))
285
286static void fsl_mc_check(struct mem_ctl_info *mci)
287{
288	struct fsl_mc_pdata *pdata = mci->pvt_info;
289	struct csrow_info *csrow;
290	u32 bus_width;
291	u32 err_detect;
292	u32 syndrome;
293	u64 err_addr;
294	u32 pfn;
295	int row_index;
296	u32 cap_high;
297	u32 cap_low;
298	int bad_data_bit;
299	int bad_ecc_bit;
300
301	err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
302	if (!err_detect)
303		return;
304
305	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
306		      err_detect);
307
308	/* no more processing if not ECC bit errors */
309	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
310		ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
311		return;
312	}
313
314	syndrome = ddr_in32(pdata, FSL_MC_CAPTURE_ECC);
315
316	/* Mask off appropriate bits of syndrome based on bus width */
317	bus_width = (ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG) &
318		     DSC_DBW_MASK) ? 32 : 64;
319	if (bus_width == 64)
320		syndrome &= 0xff;
321	else
322		syndrome &= 0xffff;
323
324	err_addr = make64(
325		ddr_in32(pdata, FSL_MC_CAPTURE_EXT_ADDRESS),
326		ddr_in32(pdata, FSL_MC_CAPTURE_ADDRESS));
327	pfn = err_addr >> PAGE_SHIFT;
328
329	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
330		csrow = mci->csrows[row_index];
331		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
332			break;
333	}
334
335	cap_high = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_HI);
336	cap_low = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_LO);
337
338	/*
339	 * Analyze single-bit errors on 64-bit wide buses
340	 * TODO: Add support for 32-bit wide buses
341	 */
342	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
343		u64 cap = (u64)cap_high << 32 | cap_low;
344		u32 s = syndrome;
345
346		sbe_ecc_decode(cap_high, cap_low, syndrome,
347				&bad_data_bit, &bad_ecc_bit);
348
349		if (bad_data_bit >= 0) {
350			fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
351			cap ^= 1ULL << bad_data_bit;
352		}
353
354		if (bad_ecc_bit >= 0) {
355			fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
356			s ^= 1 << bad_ecc_bit;
357		}
358
359		fsl_mc_printk(mci, KERN_ERR,
360			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
361			upper_32_bits(cap), lower_32_bits(cap), s);
 
 
362	}
363
364	fsl_mc_printk(mci, KERN_ERR,
365			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
366			cap_high, cap_low, syndrome);
367	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
368	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
369
370	/* we are out of range */
371	if (row_index == mci->nr_csrows)
372		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
373
374	if (err_detect & DDR_EDE_SBE)
375		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
376				     pfn, err_addr & ~PAGE_MASK, syndrome,
377				     row_index, 0, -1,
378				     mci->ctl_name, "");
379
380	if (err_detect & DDR_EDE_MBE)
381		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
382				     pfn, err_addr & ~PAGE_MASK, syndrome,
383				     row_index, 0, -1,
384				     mci->ctl_name, "");
385
386	ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
387}
388
389static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
390{
391	struct mem_ctl_info *mci = dev_id;
392	struct fsl_mc_pdata *pdata = mci->pvt_info;
393	u32 err_detect;
394
395	err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
396	if (!err_detect)
397		return IRQ_NONE;
398
399	fsl_mc_check(mci);
400
401	return IRQ_HANDLED;
402}
403
404static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
405{
406	struct fsl_mc_pdata *pdata = mci->pvt_info;
407	struct csrow_info *csrow;
408	struct dimm_info *dimm;
409	u32 sdram_ctl;
410	u32 sdtype;
411	enum mem_type mtype;
412	u32 cs_bnds;
413	int index;
414
415	sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
416
417	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
418	if (sdram_ctl & DSC_RD_EN) {
419		switch (sdtype) {
420		case 0x02000000:
421			mtype = MEM_RDDR;
422			break;
423		case 0x03000000:
424			mtype = MEM_RDDR2;
425			break;
426		case 0x07000000:
427			mtype = MEM_RDDR3;
428			break;
429		case 0x05000000:
430			mtype = MEM_RDDR4;
431			break;
432		default:
433			mtype = MEM_UNKNOWN;
434			break;
435		}
436	} else {
437		switch (sdtype) {
438		case 0x02000000:
439			mtype = MEM_DDR;
440			break;
441		case 0x03000000:
442			mtype = MEM_DDR2;
443			break;
444		case 0x07000000:
445			mtype = MEM_DDR3;
446			break;
447		case 0x05000000:
448			mtype = MEM_DDR4;
449			break;
450		case 0x04000000:
451			mtype = MEM_LPDDR4;
452			break;
453		default:
454			mtype = MEM_UNKNOWN;
455			break;
456		}
457	}
458
459	for (index = 0; index < mci->nr_csrows; index++) {
460		u32 start;
461		u32 end;
462
463		csrow = mci->csrows[index];
464		dimm = csrow->channels[0]->dimm;
465
466		cs_bnds = ddr_in32(pdata, FSL_MC_CS_BNDS_0 +
467				   (index * FSL_MC_CS_BNDS_OFS));
468
469		start = (cs_bnds & 0xffff0000) >> 16;
470		end   = (cs_bnds & 0x0000ffff);
471
472		if (start == end)
473			continue;	/* not populated */
474
475		start <<= (24 - PAGE_SHIFT);
476		end   <<= (24 - PAGE_SHIFT);
477		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
478
479		csrow->first_page = start;
480		csrow->last_page = end;
481
482		dimm->nr_pages = end + 1 - start;
483		dimm->grain = 8;
484		dimm->mtype = mtype;
485		dimm->dtype = DEV_UNKNOWN;
486		if (pdata->flag == TYPE_IMX9)
487			dimm->dtype = DEV_X16;
488		else if (sdram_ctl & DSC_X32_EN)
489			dimm->dtype = DEV_X32;
490		dimm->edac_mode = EDAC_SECDED;
491	}
492}
493
494int fsl_mc_err_probe(struct platform_device *op)
495{
496	struct mem_ctl_info *mci;
497	struct edac_mc_layer layers[2];
498	struct fsl_mc_pdata *pdata;
499	struct resource r;
500	u32 ecc_en_mask;
501	u32 sdram_ctl;
502	int res;
503
504	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
505		return -ENOMEM;
506
507	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
508	layers[0].size = 4;
509	layers[0].is_virt_csrow = true;
510	layers[1].type = EDAC_MC_LAYER_CHANNEL;
511	layers[1].size = 1;
512	layers[1].is_virt_csrow = false;
513	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
514			    sizeof(*pdata));
515	if (!mci) {
516		devres_release_group(&op->dev, fsl_mc_err_probe);
517		return -ENOMEM;
518	}
519
520	pdata = mci->pvt_info;
521	pdata->name = "fsl_mc_err";
522	mci->pdev = &op->dev;
523	pdata->edac_idx = edac_mc_idx++;
524	dev_set_drvdata(mci->pdev, mci);
525	mci->ctl_name = pdata->name;
526	mci->dev_name = pdata->name;
527
528	pdata->flag = (unsigned long)device_get_match_data(&op->dev);
529
530	/*
531	 * Get the endianness of DDR controller registers.
532	 * Default is big endian.
533	 */
534	pdata->little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
535
536	res = of_address_to_resource(op->dev.of_node, 0, &r);
537	if (res) {
538		pr_err("%s: Unable to get resource for MC err regs\n",
539		       __func__);
540		goto err;
541	}
542
543	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
544				     pdata->name)) {
545		pr_err("%s: Error while requesting mem region\n",
546		       __func__);
547		res = -EBUSY;
548		goto err;
549	}
550
551	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
552	if (!pdata->mc_vbase) {
553		pr_err("%s: Unable to setup MC err regs\n", __func__);
554		res = -ENOMEM;
555		goto err;
556	}
557
558	if (pdata->flag == TYPE_IMX9) {
559		pdata->inject_vbase = devm_platform_ioremap_resource_byname(op, "inject");
560		if (IS_ERR(pdata->inject_vbase)) {
561			res = -ENOMEM;
562			goto err;
563		}
564	}
565
566	if (pdata->flag == TYPE_IMX9) {
567		sdram_ctl = ddr_in32(pdata, IMX9_MC_ERR_EN);
568		ecc_en_mask = ERR_ECC_EN | ERR_INLINE_ECC;
569	} else {
570		sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
571		ecc_en_mask = DSC_ECC_EN;
572	}
573
574	if ((sdram_ctl & ecc_en_mask) != ecc_en_mask) {
575		/* no ECC */
576		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
577		res = -ENODEV;
578		goto err;
579	}
580
581	edac_dbg(3, "init mci\n");
582	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
583			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
584			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
585			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
586			 MEM_FLAG_LPDDR4;
587	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
588	mci->edac_cap = EDAC_FLAG_SECDED;
589	mci->mod_name = EDAC_MOD_STR;
590
591	if (edac_op_state == EDAC_OPSTATE_POLL)
592		mci->edac_check = fsl_mc_check;
593
594	mci->ctl_page_to_phys = NULL;
595
596	mci->scrub_mode = SCRUB_SW_SRC;
597
598	fsl_ddr_init_csrows(mci);
599
600	/* store the original error disable bits */
601	pdata->orig_ddr_err_disable = ddr_in32(pdata, FSL_MC_ERR_DISABLE);
602	ddr_out32(pdata, FSL_MC_ERR_DISABLE, 0);
603
604	/* clear all error bits */
605	ddr_out32(pdata, FSL_MC_ERR_DETECT, ~0);
606
607	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
608	if (res) {
609		edac_dbg(3, "failed edac_mc_add_mc()\n");
610		goto err;
611	}
612
613	if (edac_op_state == EDAC_OPSTATE_INT) {
614		ddr_out32(pdata, FSL_MC_ERR_INT_EN,
615			  DDR_EIE_MBEE | DDR_EIE_SBEE);
616
617		/* store the original error management threshold */
618		pdata->orig_ddr_err_sbe = ddr_in32(pdata,
619						   FSL_MC_ERR_SBE) & 0xff0000;
620
621		/* set threshold to 1 error per interrupt */
622		ddr_out32(pdata, FSL_MC_ERR_SBE, 0x10000);
623
624		/* register interrupts */
625		pdata->irq = platform_get_irq(op, 0);
626		res = devm_request_irq(&op->dev, pdata->irq,
627				       fsl_mc_isr,
628				       IRQF_SHARED,
629				       "[EDAC] MC err", mci);
630		if (res < 0) {
631			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
632			       __func__, pdata->irq);
633			res = -ENODEV;
634			goto err2;
635		}
636
637		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
638		       pdata->irq);
639	}
640
641	devres_remove_group(&op->dev, fsl_mc_err_probe);
642	edac_dbg(3, "success\n");
643	pr_info(EDAC_MOD_STR " MC err registered\n");
644
645	return 0;
646
647err2:
648	edac_mc_del_mc(&op->dev);
649err:
650	devres_release_group(&op->dev, fsl_mc_err_probe);
651	edac_mc_free(mci);
652	return res;
653}
654
655void fsl_mc_err_remove(struct platform_device *op)
656{
657	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
658	struct fsl_mc_pdata *pdata = mci->pvt_info;
659
660	edac_dbg(0, "\n");
661
662	if (edac_op_state == EDAC_OPSTATE_INT) {
663		ddr_out32(pdata, FSL_MC_ERR_INT_EN, 0);
664	}
665
666	ddr_out32(pdata, FSL_MC_ERR_DISABLE,
667		  pdata->orig_ddr_err_disable);
668	ddr_out32(pdata, FSL_MC_ERR_SBE, pdata->orig_ddr_err_sbe);
669
670
671	edac_mc_del_mc(&op->dev);
672	edac_mc_free(mci);
 
673}
v4.17
 
  1/*
  2 * Freescale Memory Controller kernel module
  3 *
  4 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
  5 * ARM-based Layerscape SoCs including LS2xxx. Originally split
  6 * out from mpc85xx_edac EDAC driver.
  7 *
  8 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
  9 *
 10 * Author: Dave Jiang <djiang@mvista.com>
 11 *
 12 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
 13 * the terms of the GNU General Public License version 2. This program
 14 * is licensed "as is" without any warranty of any kind, whether express
 15 * or implied.
 16 */
 17#include <linux/module.h>
 18#include <linux/init.h>
 19#include <linux/interrupt.h>
 20#include <linux/ctype.h>
 21#include <linux/io.h>
 22#include <linux/mod_devicetable.h>
 23#include <linux/edac.h>
 24#include <linux/smp.h>
 25#include <linux/gfp.h>
 26
 27#include <linux/of_platform.h>
 28#include <linux/of_device.h>
 29#include <linux/of_address.h>
 30#include "edac_module.h"
 31#include "fsl_ddr_edac.h"
 32
 33#define EDAC_MOD_STR	"fsl_ddr_edac"
 34
 35static int edac_mc_idx;
 36
 37static u32 orig_ddr_err_disable;
 38static u32 orig_ddr_err_sbe;
 39static bool little_endian;
 
 
 
 
 
 40
 41static inline u32 ddr_in32(void __iomem *addr)
 
 
 
 42{
 43	return little_endian ? ioread32(addr) : ioread32be(addr);
 
 
 44}
 45
 46static inline void ddr_out32(void __iomem *addr, u32 value)
 47{
 48	if (little_endian)
 
 
 49		iowrite32(value, addr);
 50	else
 51		iowrite32be(value, addr);
 52}
 53
 
 54/************************ MC SYSFS parts ***********************************/
 55
 56#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 57
 58static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
 59					  struct device_attribute *mattr,
 60					  char *data)
 61{
 62	struct mem_ctl_info *mci = to_mci(dev);
 63	struct fsl_mc_pdata *pdata = mci->pvt_info;
 64	return sprintf(data, "0x%08x",
 65		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
 66}
 67
 68static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
 69					  struct device_attribute *mattr,
 70					      char *data)
 71{
 72	struct mem_ctl_info *mci = to_mci(dev);
 73	struct fsl_mc_pdata *pdata = mci->pvt_info;
 74	return sprintf(data, "0x%08x",
 75		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
 76}
 77
 78static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
 79				       struct device_attribute *mattr,
 80					   char *data)
 81{
 82	struct mem_ctl_info *mci = to_mci(dev);
 83	struct fsl_mc_pdata *pdata = mci->pvt_info;
 84	return sprintf(data, "0x%08x",
 85		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
 86}
 87
 88static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
 89					   struct device_attribute *mattr,
 90					       const char *data, size_t count)
 91{
 92	struct mem_ctl_info *mci = to_mci(dev);
 93	struct fsl_mc_pdata *pdata = mci->pvt_info;
 94	unsigned long val;
 95	int rc;
 96
 97	if (isdigit(*data)) {
 98		rc = kstrtoul(data, 0, &val);
 99		if (rc)
100			return rc;
101
102		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
103		return count;
104	}
105	return 0;
106}
107
108static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
109					   struct device_attribute *mattr,
110					       const char *data, size_t count)
111{
112	struct mem_ctl_info *mci = to_mci(dev);
113	struct fsl_mc_pdata *pdata = mci->pvt_info;
114	unsigned long val;
115	int rc;
116
117	if (isdigit(*data)) {
118		rc = kstrtoul(data, 0, &val);
119		if (rc)
120			return rc;
121
122		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
123		return count;
124	}
125	return 0;
126}
127
128static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
129					struct device_attribute *mattr,
130					       const char *data, size_t count)
131{
132	struct mem_ctl_info *mci = to_mci(dev);
133	struct fsl_mc_pdata *pdata = mci->pvt_info;
134	unsigned long val;
135	int rc;
136
137	if (isdigit(*data)) {
138		rc = kstrtoul(data, 0, &val);
139		if (rc)
140			return rc;
141
142		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
143		return count;
144	}
145	return 0;
146}
147
148static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
149		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
150static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
151		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
152static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
153		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
 
154
155static struct attribute *fsl_ddr_dev_attrs[] = {
 
156	&dev_attr_inject_data_hi.attr,
157	&dev_attr_inject_data_lo.attr,
158	&dev_attr_inject_ctrl.attr,
 
159	NULL
160};
161
162ATTRIBUTE_GROUPS(fsl_ddr_dev);
163
164/**************************** MC Err device ***************************/
165
166/*
167 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
168 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
169 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
170 * below correspond to Freescale's manuals.
171 */
172static unsigned int ecc_table[16] = {
173	/* MSB           LSB */
174	/* [0:31]    [32:63] */
175	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
176	0x00ff00ff, 0x00fff0ff,
177	0x0f0f0f0f, 0x0f0fff00,
178	0x11113333, 0x7777000f,
179	0x22224444, 0x8888222f,
180	0x44448888, 0xffff4441,
181	0x8888ffff, 0x11118882,
182	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
183};
184
185/*
186 * Calculate the correct ECC value for a 64-bit value specified by high:low
187 */
188static u8 calculate_ecc(u32 high, u32 low)
189{
190	u32 mask_low;
191	u32 mask_high;
192	int bit_cnt;
193	u8 ecc = 0;
194	int i;
195	int j;
196
197	for (i = 0; i < 8; i++) {
198		mask_high = ecc_table[i * 2];
199		mask_low = ecc_table[i * 2 + 1];
200		bit_cnt = 0;
201
202		for (j = 0; j < 32; j++) {
203			if ((mask_high >> j) & 1)
204				bit_cnt ^= (high >> j) & 1;
205			if ((mask_low >> j) & 1)
206				bit_cnt ^= (low >> j) & 1;
207		}
208
209		ecc |= bit_cnt << i;
210	}
211
212	return ecc;
213}
214
215/*
216 * Create the syndrome code which is generated if the data line specified by
217 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
218 * User's Manual and 9-61 in the MPC8572 User's Manual.
219 */
220static u8 syndrome_from_bit(unsigned int bit) {
221	int i;
222	u8 syndrome = 0;
223
224	/*
225	 * Cycle through the upper or lower 32-bit portion of each value in
226	 * ecc_table depending on if 'bit' is in the upper or lower half of
227	 * 64-bit data.
228	 */
229	for (i = bit < 32; i < 16; i += 2)
230		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
231
232	return syndrome;
233}
234
235/*
236 * Decode data and ecc syndrome to determine what went wrong
237 * Note: This can only decode single-bit errors
238 */
239static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
240		       int *bad_data_bit, int *bad_ecc_bit)
241{
242	int i;
243	u8 syndrome;
244
245	*bad_data_bit = -1;
246	*bad_ecc_bit = -1;
247
248	/*
249	 * Calculate the ECC of the captured data and XOR it with the captured
250	 * ECC to find an ECC syndrome value we can search for
251	 */
252	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
253
254	/* Check if a data line is stuck... */
255	for (i = 0; i < 64; i++) {
256		if (syndrome == syndrome_from_bit(i)) {
257			*bad_data_bit = i;
258			return;
259		}
260	}
261
262	/* If data is correct, check ECC bits for errors... */
263	for (i = 0; i < 8; i++) {
264		if ((syndrome >> i) & 0x1) {
265			*bad_ecc_bit = i;
266			return;
267		}
268	}
269}
270
271#define make64(high, low) (((u64)(high) << 32) | (low))
272
273static void fsl_mc_check(struct mem_ctl_info *mci)
274{
275	struct fsl_mc_pdata *pdata = mci->pvt_info;
276	struct csrow_info *csrow;
277	u32 bus_width;
278	u32 err_detect;
279	u32 syndrome;
280	u64 err_addr;
281	u32 pfn;
282	int row_index;
283	u32 cap_high;
284	u32 cap_low;
285	int bad_data_bit;
286	int bad_ecc_bit;
287
288	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
289	if (!err_detect)
290		return;
291
292	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
293		      err_detect);
294
295	/* no more processing if not ECC bit errors */
296	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
297		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
298		return;
299	}
300
301	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
302
303	/* Mask off appropriate bits of syndrome based on bus width */
304	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
305		     DSC_DBW_MASK) ? 32 : 64;
306	if (bus_width == 64)
307		syndrome &= 0xff;
308	else
309		syndrome &= 0xffff;
310
311	err_addr = make64(
312		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
313		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
314	pfn = err_addr >> PAGE_SHIFT;
315
316	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
317		csrow = mci->csrows[row_index];
318		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
319			break;
320	}
321
322	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
323	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
324
325	/*
326	 * Analyze single-bit errors on 64-bit wide buses
327	 * TODO: Add support for 32-bit wide buses
328	 */
329	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
 
 
 
330		sbe_ecc_decode(cap_high, cap_low, syndrome,
331				&bad_data_bit, &bad_ecc_bit);
332
333		if (bad_data_bit != -1)
334			fsl_mc_printk(mci, KERN_ERR,
335				"Faulty Data bit: %d\n", bad_data_bit);
336		if (bad_ecc_bit != -1)
337			fsl_mc_printk(mci, KERN_ERR,
338				"Faulty ECC bit: %d\n", bad_ecc_bit);
 
 
 
339
340		fsl_mc_printk(mci, KERN_ERR,
341			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
342			cap_high ^ (1 << (bad_data_bit - 32)),
343			cap_low ^ (1 << bad_data_bit),
344			syndrome ^ (1 << bad_ecc_bit));
345	}
346
347	fsl_mc_printk(mci, KERN_ERR,
348			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
349			cap_high, cap_low, syndrome);
350	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
351	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
352
353	/* we are out of range */
354	if (row_index == mci->nr_csrows)
355		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
356
357	if (err_detect & DDR_EDE_SBE)
358		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
359				     pfn, err_addr & ~PAGE_MASK, syndrome,
360				     row_index, 0, -1,
361				     mci->ctl_name, "");
362
363	if (err_detect & DDR_EDE_MBE)
364		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
365				     pfn, err_addr & ~PAGE_MASK, syndrome,
366				     row_index, 0, -1,
367				     mci->ctl_name, "");
368
369	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
370}
371
372static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
373{
374	struct mem_ctl_info *mci = dev_id;
375	struct fsl_mc_pdata *pdata = mci->pvt_info;
376	u32 err_detect;
377
378	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
379	if (!err_detect)
380		return IRQ_NONE;
381
382	fsl_mc_check(mci);
383
384	return IRQ_HANDLED;
385}
386
387static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
388{
389	struct fsl_mc_pdata *pdata = mci->pvt_info;
390	struct csrow_info *csrow;
391	struct dimm_info *dimm;
392	u32 sdram_ctl;
393	u32 sdtype;
394	enum mem_type mtype;
395	u32 cs_bnds;
396	int index;
397
398	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
399
400	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
401	if (sdram_ctl & DSC_RD_EN) {
402		switch (sdtype) {
403		case 0x02000000:
404			mtype = MEM_RDDR;
405			break;
406		case 0x03000000:
407			mtype = MEM_RDDR2;
408			break;
409		case 0x07000000:
410			mtype = MEM_RDDR3;
411			break;
412		case 0x05000000:
413			mtype = MEM_RDDR4;
414			break;
415		default:
416			mtype = MEM_UNKNOWN;
417			break;
418		}
419	} else {
420		switch (sdtype) {
421		case 0x02000000:
422			mtype = MEM_DDR;
423			break;
424		case 0x03000000:
425			mtype = MEM_DDR2;
426			break;
427		case 0x07000000:
428			mtype = MEM_DDR3;
429			break;
430		case 0x05000000:
431			mtype = MEM_DDR4;
432			break;
 
 
 
433		default:
434			mtype = MEM_UNKNOWN;
435			break;
436		}
437	}
438
439	for (index = 0; index < mci->nr_csrows; index++) {
440		u32 start;
441		u32 end;
442
443		csrow = mci->csrows[index];
444		dimm = csrow->channels[0]->dimm;
445
446		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
447				   (index * FSL_MC_CS_BNDS_OFS));
448
449		start = (cs_bnds & 0xffff0000) >> 16;
450		end   = (cs_bnds & 0x0000ffff);
451
452		if (start == end)
453			continue;	/* not populated */
454
455		start <<= (24 - PAGE_SHIFT);
456		end   <<= (24 - PAGE_SHIFT);
457		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
458
459		csrow->first_page = start;
460		csrow->last_page = end;
461
462		dimm->nr_pages = end + 1 - start;
463		dimm->grain = 8;
464		dimm->mtype = mtype;
465		dimm->dtype = DEV_UNKNOWN;
466		if (sdram_ctl & DSC_X32_EN)
 
 
467			dimm->dtype = DEV_X32;
468		dimm->edac_mode = EDAC_SECDED;
469	}
470}
471
472int fsl_mc_err_probe(struct platform_device *op)
473{
474	struct mem_ctl_info *mci;
475	struct edac_mc_layer layers[2];
476	struct fsl_mc_pdata *pdata;
477	struct resource r;
 
478	u32 sdram_ctl;
479	int res;
480
481	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
482		return -ENOMEM;
483
484	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
485	layers[0].size = 4;
486	layers[0].is_virt_csrow = true;
487	layers[1].type = EDAC_MC_LAYER_CHANNEL;
488	layers[1].size = 1;
489	layers[1].is_virt_csrow = false;
490	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
491			    sizeof(*pdata));
492	if (!mci) {
493		devres_release_group(&op->dev, fsl_mc_err_probe);
494		return -ENOMEM;
495	}
496
497	pdata = mci->pvt_info;
498	pdata->name = "fsl_mc_err";
499	mci->pdev = &op->dev;
500	pdata->edac_idx = edac_mc_idx++;
501	dev_set_drvdata(mci->pdev, mci);
502	mci->ctl_name = pdata->name;
503	mci->dev_name = pdata->name;
504
 
 
505	/*
506	 * Get the endianness of DDR controller registers.
507	 * Default is big endian.
508	 */
509	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
510
511	res = of_address_to_resource(op->dev.of_node, 0, &r);
512	if (res) {
513		pr_err("%s: Unable to get resource for MC err regs\n",
514		       __func__);
515		goto err;
516	}
517
518	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
519				     pdata->name)) {
520		pr_err("%s: Error while requesting mem region\n",
521		       __func__);
522		res = -EBUSY;
523		goto err;
524	}
525
526	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
527	if (!pdata->mc_vbase) {
528		pr_err("%s: Unable to setup MC err regs\n", __func__);
529		res = -ENOMEM;
530		goto err;
531	}
532
533	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
534	if (!(sdram_ctl & DSC_ECC_EN)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535		/* no ECC */
536		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
537		res = -ENODEV;
538		goto err;
539	}
540
541	edac_dbg(3, "init mci\n");
542	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
543			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
544			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
545			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
 
546	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
547	mci->edac_cap = EDAC_FLAG_SECDED;
548	mci->mod_name = EDAC_MOD_STR;
549
550	if (edac_op_state == EDAC_OPSTATE_POLL)
551		mci->edac_check = fsl_mc_check;
552
553	mci->ctl_page_to_phys = NULL;
554
555	mci->scrub_mode = SCRUB_SW_SRC;
556
557	fsl_ddr_init_csrows(mci);
558
559	/* store the original error disable bits */
560	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
561	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
562
563	/* clear all error bits */
564	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
565
566	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
567	if (res) {
568		edac_dbg(3, "failed edac_mc_add_mc()\n");
569		goto err;
570	}
571
572	if (edac_op_state == EDAC_OPSTATE_INT) {
573		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
574			  DDR_EIE_MBEE | DDR_EIE_SBEE);
575
576		/* store the original error management threshold */
577		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
578					    FSL_MC_ERR_SBE) & 0xff0000;
579
580		/* set threshold to 1 error per interrupt */
581		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
582
583		/* register interrupts */
584		pdata->irq = platform_get_irq(op, 0);
585		res = devm_request_irq(&op->dev, pdata->irq,
586				       fsl_mc_isr,
587				       IRQF_SHARED,
588				       "[EDAC] MC err", mci);
589		if (res < 0) {
590			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
591			       __func__, pdata->irq);
592			res = -ENODEV;
593			goto err2;
594		}
595
596		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
597		       pdata->irq);
598	}
599
600	devres_remove_group(&op->dev, fsl_mc_err_probe);
601	edac_dbg(3, "success\n");
602	pr_info(EDAC_MOD_STR " MC err registered\n");
603
604	return 0;
605
606err2:
607	edac_mc_del_mc(&op->dev);
608err:
609	devres_release_group(&op->dev, fsl_mc_err_probe);
610	edac_mc_free(mci);
611	return res;
612}
613
614int fsl_mc_err_remove(struct platform_device *op)
615{
616	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
617	struct fsl_mc_pdata *pdata = mci->pvt_info;
618
619	edac_dbg(0, "\n");
620
621	if (edac_op_state == EDAC_OPSTATE_INT) {
622		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
623	}
624
625	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
626		  orig_ddr_err_disable);
627	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
 
628
629	edac_mc_del_mc(&op->dev);
630	edac_mc_free(mci);
631	return 0;
632}