Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Freescale Memory Controller kernel module
  4 *
  5 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
  6 * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
  7 * split out from mpc85xx_edac EDAC driver.
  8 *
  9 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
 10 *
 11 * Author: Dave Jiang <djiang@mvista.com>
 12 *
 13 * 2006-2007 (c) MontaVista Software, Inc.
 
 
 
 14 */
 15#include <linux/module.h>
 16#include <linux/init.h>
 17#include <linux/interrupt.h>
 18#include <linux/ctype.h>
 19#include <linux/io.h>
 20#include <linux/mod_devicetable.h>
 21#include <linux/edac.h>
 22#include <linux/smp.h>
 23#include <linux/gfp.h>
 24
 25#include <linux/of.h>
 
 26#include <linux/of_address.h>
 27#include "edac_module.h"
 28#include "fsl_ddr_edac.h"
 29
 30#define EDAC_MOD_STR	"fsl_ddr_edac"
 31
 32static int edac_mc_idx;
 33
 34static u32 orig_ddr_err_disable;
 35static u32 orig_ddr_err_sbe;
 36static bool little_endian;
 37
 38static inline u32 ddr_in32(void __iomem *addr)
 39{
 40	return little_endian ? ioread32(addr) : ioread32be(addr);
 41}
 42
 43static inline void ddr_out32(void __iomem *addr, u32 value)
 44{
 45	if (little_endian)
 46		iowrite32(value, addr);
 47	else
 48		iowrite32be(value, addr);
 49}
 50
 51#ifdef CONFIG_EDAC_DEBUG
 52/************************ MC SYSFS parts ***********************************/
 53
 54#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 55
 56static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
 57					  struct device_attribute *mattr,
 58					  char *data)
 59{
 60	struct mem_ctl_info *mci = to_mci(dev);
 61	struct fsl_mc_pdata *pdata = mci->pvt_info;
 62	return sprintf(data, "0x%08x",
 63		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
 64}
 65
 66static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
 67					  struct device_attribute *mattr,
 68					      char *data)
 69{
 70	struct mem_ctl_info *mci = to_mci(dev);
 71	struct fsl_mc_pdata *pdata = mci->pvt_info;
 72	return sprintf(data, "0x%08x",
 73		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
 74}
 75
 76static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
 77				       struct device_attribute *mattr,
 78					   char *data)
 79{
 80	struct mem_ctl_info *mci = to_mci(dev);
 81	struct fsl_mc_pdata *pdata = mci->pvt_info;
 82	return sprintf(data, "0x%08x",
 83		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
 84}
 85
 86static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
 87					   struct device_attribute *mattr,
 88					       const char *data, size_t count)
 89{
 90	struct mem_ctl_info *mci = to_mci(dev);
 91	struct fsl_mc_pdata *pdata = mci->pvt_info;
 92	unsigned long val;
 93	int rc;
 94
 95	if (isdigit(*data)) {
 96		rc = kstrtoul(data, 0, &val);
 97		if (rc)
 98			return rc;
 99
100		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
101		return count;
102	}
103	return 0;
104}
105
106static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
107					   struct device_attribute *mattr,
108					       const char *data, size_t count)
109{
110	struct mem_ctl_info *mci = to_mci(dev);
111	struct fsl_mc_pdata *pdata = mci->pvt_info;
112	unsigned long val;
113	int rc;
114
115	if (isdigit(*data)) {
116		rc = kstrtoul(data, 0, &val);
117		if (rc)
118			return rc;
119
120		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
121		return count;
122	}
123	return 0;
124}
125
126static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
127					struct device_attribute *mattr,
128					       const char *data, size_t count)
129{
130	struct mem_ctl_info *mci = to_mci(dev);
131	struct fsl_mc_pdata *pdata = mci->pvt_info;
132	unsigned long val;
133	int rc;
134
135	if (isdigit(*data)) {
136		rc = kstrtoul(data, 0, &val);
137		if (rc)
138			return rc;
139
140		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
141		return count;
142	}
143	return 0;
144}
145
146static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
147		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
148static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
149		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
150static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
151		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
152#endif /* CONFIG_EDAC_DEBUG */
153
154static struct attribute *fsl_ddr_dev_attrs[] = {
155#ifdef CONFIG_EDAC_DEBUG
156	&dev_attr_inject_data_hi.attr,
157	&dev_attr_inject_data_lo.attr,
158	&dev_attr_inject_ctrl.attr,
159#endif
160	NULL
161};
162
163ATTRIBUTE_GROUPS(fsl_ddr_dev);
164
165/**************************** MC Err device ***************************/
166
167/*
168 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
169 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
170 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
171 * below correspond to Freescale's manuals.
172 */
173static unsigned int ecc_table[16] = {
174	/* MSB           LSB */
175	/* [0:31]    [32:63] */
176	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
177	0x00ff00ff, 0x00fff0ff,
178	0x0f0f0f0f, 0x0f0fff00,
179	0x11113333, 0x7777000f,
180	0x22224444, 0x8888222f,
181	0x44448888, 0xffff4441,
182	0x8888ffff, 0x11118882,
183	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
184};
185
186/*
187 * Calculate the correct ECC value for a 64-bit value specified by high:low
188 */
189static u8 calculate_ecc(u32 high, u32 low)
190{
191	u32 mask_low;
192	u32 mask_high;
193	int bit_cnt;
194	u8 ecc = 0;
195	int i;
196	int j;
197
198	for (i = 0; i < 8; i++) {
199		mask_high = ecc_table[i * 2];
200		mask_low = ecc_table[i * 2 + 1];
201		bit_cnt = 0;
202
203		for (j = 0; j < 32; j++) {
204			if ((mask_high >> j) & 1)
205				bit_cnt ^= (high >> j) & 1;
206			if ((mask_low >> j) & 1)
207				bit_cnt ^= (low >> j) & 1;
208		}
209
210		ecc |= bit_cnt << i;
211	}
212
213	return ecc;
214}
215
216/*
217 * Create the syndrome code which is generated if the data line specified by
218 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
219 * User's Manual and 9-61 in the MPC8572 User's Manual.
220 */
221static u8 syndrome_from_bit(unsigned int bit) {
222	int i;
223	u8 syndrome = 0;
224
225	/*
226	 * Cycle through the upper or lower 32-bit portion of each value in
227	 * ecc_table depending on if 'bit' is in the upper or lower half of
228	 * 64-bit data.
229	 */
230	for (i = bit < 32; i < 16; i += 2)
231		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
232
233	return syndrome;
234}
235
236/*
237 * Decode data and ecc syndrome to determine what went wrong
238 * Note: This can only decode single-bit errors
239 */
240static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
241		       int *bad_data_bit, int *bad_ecc_bit)
242{
243	int i;
244	u8 syndrome;
245
246	*bad_data_bit = -1;
247	*bad_ecc_bit = -1;
248
249	/*
250	 * Calculate the ECC of the captured data and XOR it with the captured
251	 * ECC to find an ECC syndrome value we can search for
252	 */
253	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
254
255	/* Check if a data line is stuck... */
256	for (i = 0; i < 64; i++) {
257		if (syndrome == syndrome_from_bit(i)) {
258			*bad_data_bit = i;
259			return;
260		}
261	}
262
263	/* If data is correct, check ECC bits for errors... */
264	for (i = 0; i < 8; i++) {
265		if ((syndrome >> i) & 0x1) {
266			*bad_ecc_bit = i;
267			return;
268		}
269	}
270}
271
272#define make64(high, low) (((u64)(high) << 32) | (low))
273
274static void fsl_mc_check(struct mem_ctl_info *mci)
275{
276	struct fsl_mc_pdata *pdata = mci->pvt_info;
277	struct csrow_info *csrow;
278	u32 bus_width;
279	u32 err_detect;
280	u32 syndrome;
281	u64 err_addr;
282	u32 pfn;
283	int row_index;
284	u32 cap_high;
285	u32 cap_low;
286	int bad_data_bit;
287	int bad_ecc_bit;
288
289	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
290	if (!err_detect)
291		return;
292
293	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
294		      err_detect);
295
296	/* no more processing if not ECC bit errors */
297	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
298		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
299		return;
300	}
301
302	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
303
304	/* Mask off appropriate bits of syndrome based on bus width */
305	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
306		     DSC_DBW_MASK) ? 32 : 64;
307	if (bus_width == 64)
308		syndrome &= 0xff;
309	else
310		syndrome &= 0xffff;
311
312	err_addr = make64(
313		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
314		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
315	pfn = err_addr >> PAGE_SHIFT;
316
317	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
318		csrow = mci->csrows[row_index];
319		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
320			break;
321	}
322
323	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
324	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
325
326	/*
327	 * Analyze single-bit errors on 64-bit wide buses
328	 * TODO: Add support for 32-bit wide buses
329	 */
330	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
331		sbe_ecc_decode(cap_high, cap_low, syndrome,
332				&bad_data_bit, &bad_ecc_bit);
333
334		if (bad_data_bit != -1)
335			fsl_mc_printk(mci, KERN_ERR,
336				"Faulty Data bit: %d\n", bad_data_bit);
337		if (bad_ecc_bit != -1)
338			fsl_mc_printk(mci, KERN_ERR,
339				"Faulty ECC bit: %d\n", bad_ecc_bit);
340
341		fsl_mc_printk(mci, KERN_ERR,
342			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
343			cap_high ^ (1 << (bad_data_bit - 32)),
344			cap_low ^ (1 << bad_data_bit),
345			syndrome ^ (1 << bad_ecc_bit));
346	}
347
348	fsl_mc_printk(mci, KERN_ERR,
349			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
350			cap_high, cap_low, syndrome);
351	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
352	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
353
354	/* we are out of range */
355	if (row_index == mci->nr_csrows)
356		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
357
358	if (err_detect & DDR_EDE_SBE)
359		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
360				     pfn, err_addr & ~PAGE_MASK, syndrome,
361				     row_index, 0, -1,
362				     mci->ctl_name, "");
363
364	if (err_detect & DDR_EDE_MBE)
365		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
366				     pfn, err_addr & ~PAGE_MASK, syndrome,
367				     row_index, 0, -1,
368				     mci->ctl_name, "");
369
370	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
371}
372
373static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
374{
375	struct mem_ctl_info *mci = dev_id;
376	struct fsl_mc_pdata *pdata = mci->pvt_info;
377	u32 err_detect;
378
379	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
380	if (!err_detect)
381		return IRQ_NONE;
382
383	fsl_mc_check(mci);
384
385	return IRQ_HANDLED;
386}
387
388static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
389{
390	struct fsl_mc_pdata *pdata = mci->pvt_info;
391	struct csrow_info *csrow;
392	struct dimm_info *dimm;
393	u32 sdram_ctl;
394	u32 sdtype;
395	enum mem_type mtype;
396	u32 cs_bnds;
397	int index;
398
399	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
400
401	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
402	if (sdram_ctl & DSC_RD_EN) {
403		switch (sdtype) {
404		case 0x02000000:
405			mtype = MEM_RDDR;
406			break;
407		case 0x03000000:
408			mtype = MEM_RDDR2;
409			break;
410		case 0x07000000:
411			mtype = MEM_RDDR3;
412			break;
413		case 0x05000000:
414			mtype = MEM_RDDR4;
415			break;
416		default:
417			mtype = MEM_UNKNOWN;
418			break;
419		}
420	} else {
421		switch (sdtype) {
422		case 0x02000000:
423			mtype = MEM_DDR;
424			break;
425		case 0x03000000:
426			mtype = MEM_DDR2;
427			break;
428		case 0x07000000:
429			mtype = MEM_DDR3;
430			break;
431		case 0x05000000:
432			mtype = MEM_DDR4;
433			break;
434		default:
435			mtype = MEM_UNKNOWN;
436			break;
437		}
438	}
439
440	for (index = 0; index < mci->nr_csrows; index++) {
441		u32 start;
442		u32 end;
443
444		csrow = mci->csrows[index];
445		dimm = csrow->channels[0]->dimm;
446
447		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
448				   (index * FSL_MC_CS_BNDS_OFS));
449
450		start = (cs_bnds & 0xffff0000) >> 16;
451		end   = (cs_bnds & 0x0000ffff);
452
453		if (start == end)
454			continue;	/* not populated */
455
456		start <<= (24 - PAGE_SHIFT);
457		end   <<= (24 - PAGE_SHIFT);
458		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
459
460		csrow->first_page = start;
461		csrow->last_page = end;
462
463		dimm->nr_pages = end + 1 - start;
464		dimm->grain = 8;
465		dimm->mtype = mtype;
466		dimm->dtype = DEV_UNKNOWN;
467		if (sdram_ctl & DSC_X32_EN)
468			dimm->dtype = DEV_X32;
469		dimm->edac_mode = EDAC_SECDED;
470	}
471}
472
473int fsl_mc_err_probe(struct platform_device *op)
474{
475	struct mem_ctl_info *mci;
476	struct edac_mc_layer layers[2];
477	struct fsl_mc_pdata *pdata;
478	struct resource r;
479	u32 sdram_ctl;
480	int res;
481
482	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
483		return -ENOMEM;
484
485	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
486	layers[0].size = 4;
487	layers[0].is_virt_csrow = true;
488	layers[1].type = EDAC_MC_LAYER_CHANNEL;
489	layers[1].size = 1;
490	layers[1].is_virt_csrow = false;
491	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
492			    sizeof(*pdata));
493	if (!mci) {
494		devres_release_group(&op->dev, fsl_mc_err_probe);
495		return -ENOMEM;
496	}
497
498	pdata = mci->pvt_info;
499	pdata->name = "fsl_mc_err";
500	mci->pdev = &op->dev;
501	pdata->edac_idx = edac_mc_idx++;
502	dev_set_drvdata(mci->pdev, mci);
503	mci->ctl_name = pdata->name;
504	mci->dev_name = pdata->name;
505
506	/*
507	 * Get the endianness of DDR controller registers.
508	 * Default is big endian.
509	 */
510	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
511
512	res = of_address_to_resource(op->dev.of_node, 0, &r);
513	if (res) {
514		pr_err("%s: Unable to get resource for MC err regs\n",
515		       __func__);
516		goto err;
517	}
518
519	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
520				     pdata->name)) {
521		pr_err("%s: Error while requesting mem region\n",
522		       __func__);
523		res = -EBUSY;
524		goto err;
525	}
526
527	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
528	if (!pdata->mc_vbase) {
529		pr_err("%s: Unable to setup MC err regs\n", __func__);
530		res = -ENOMEM;
531		goto err;
532	}
533
534	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
535	if (!(sdram_ctl & DSC_ECC_EN)) {
536		/* no ECC */
537		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
538		res = -ENODEV;
539		goto err;
540	}
541
542	edac_dbg(3, "init mci\n");
543	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
544			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
545			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
546			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
547	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
548	mci->edac_cap = EDAC_FLAG_SECDED;
549	mci->mod_name = EDAC_MOD_STR;
550
551	if (edac_op_state == EDAC_OPSTATE_POLL)
552		mci->edac_check = fsl_mc_check;
553
554	mci->ctl_page_to_phys = NULL;
555
556	mci->scrub_mode = SCRUB_SW_SRC;
557
558	fsl_ddr_init_csrows(mci);
559
560	/* store the original error disable bits */
561	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
562	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
563
564	/* clear all error bits */
565	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
566
567	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
568	if (res) {
569		edac_dbg(3, "failed edac_mc_add_mc()\n");
570		goto err;
571	}
572
573	if (edac_op_state == EDAC_OPSTATE_INT) {
574		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
575			  DDR_EIE_MBEE | DDR_EIE_SBEE);
576
577		/* store the original error management threshold */
578		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
579					    FSL_MC_ERR_SBE) & 0xff0000;
580
581		/* set threshold to 1 error per interrupt */
582		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
583
584		/* register interrupts */
585		pdata->irq = platform_get_irq(op, 0);
586		res = devm_request_irq(&op->dev, pdata->irq,
587				       fsl_mc_isr,
588				       IRQF_SHARED,
589				       "[EDAC] MC err", mci);
590		if (res < 0) {
591			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
592			       __func__, pdata->irq);
593			res = -ENODEV;
594			goto err2;
595		}
596
597		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
598		       pdata->irq);
599	}
600
601	devres_remove_group(&op->dev, fsl_mc_err_probe);
602	edac_dbg(3, "success\n");
603	pr_info(EDAC_MOD_STR " MC err registered\n");
604
605	return 0;
606
607err2:
608	edac_mc_del_mc(&op->dev);
609err:
610	devres_release_group(&op->dev, fsl_mc_err_probe);
611	edac_mc_free(mci);
612	return res;
613}
614
615void fsl_mc_err_remove(struct platform_device *op)
616{
617	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
618	struct fsl_mc_pdata *pdata = mci->pvt_info;
619
620	edac_dbg(0, "\n");
621
622	if (edac_op_state == EDAC_OPSTATE_INT) {
623		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
624	}
625
626	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
627		  orig_ddr_err_disable);
628	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
629
630	edac_mc_del_mc(&op->dev);
631	edac_mc_free(mci);
 
632}
v4.17
 
  1/*
  2 * Freescale Memory Controller kernel module
  3 *
  4 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
  5 * ARM-based Layerscape SoCs including LS2xxx. Originally split
  6 * out from mpc85xx_edac EDAC driver.
  7 *
  8 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
  9 *
 10 * Author: Dave Jiang <djiang@mvista.com>
 11 *
 12 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
 13 * the terms of the GNU General Public License version 2. This program
 14 * is licensed "as is" without any warranty of any kind, whether express
 15 * or implied.
 16 */
 17#include <linux/module.h>
 18#include <linux/init.h>
 19#include <linux/interrupt.h>
 20#include <linux/ctype.h>
 21#include <linux/io.h>
 22#include <linux/mod_devicetable.h>
 23#include <linux/edac.h>
 24#include <linux/smp.h>
 25#include <linux/gfp.h>
 26
 27#include <linux/of_platform.h>
 28#include <linux/of_device.h>
 29#include <linux/of_address.h>
 30#include "edac_module.h"
 31#include "fsl_ddr_edac.h"
 32
 33#define EDAC_MOD_STR	"fsl_ddr_edac"
 34
 35static int edac_mc_idx;
 36
 37static u32 orig_ddr_err_disable;
 38static u32 orig_ddr_err_sbe;
 39static bool little_endian;
 40
 41static inline u32 ddr_in32(void __iomem *addr)
 42{
 43	return little_endian ? ioread32(addr) : ioread32be(addr);
 44}
 45
 46static inline void ddr_out32(void __iomem *addr, u32 value)
 47{
 48	if (little_endian)
 49		iowrite32(value, addr);
 50	else
 51		iowrite32be(value, addr);
 52}
 53
 
 54/************************ MC SYSFS parts ***********************************/
 55
 56#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 57
 58static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
 59					  struct device_attribute *mattr,
 60					  char *data)
 61{
 62	struct mem_ctl_info *mci = to_mci(dev);
 63	struct fsl_mc_pdata *pdata = mci->pvt_info;
 64	return sprintf(data, "0x%08x",
 65		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
 66}
 67
 68static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
 69					  struct device_attribute *mattr,
 70					      char *data)
 71{
 72	struct mem_ctl_info *mci = to_mci(dev);
 73	struct fsl_mc_pdata *pdata = mci->pvt_info;
 74	return sprintf(data, "0x%08x",
 75		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
 76}
 77
 78static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
 79				       struct device_attribute *mattr,
 80					   char *data)
 81{
 82	struct mem_ctl_info *mci = to_mci(dev);
 83	struct fsl_mc_pdata *pdata = mci->pvt_info;
 84	return sprintf(data, "0x%08x",
 85		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
 86}
 87
 88static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
 89					   struct device_attribute *mattr,
 90					       const char *data, size_t count)
 91{
 92	struct mem_ctl_info *mci = to_mci(dev);
 93	struct fsl_mc_pdata *pdata = mci->pvt_info;
 94	unsigned long val;
 95	int rc;
 96
 97	if (isdigit(*data)) {
 98		rc = kstrtoul(data, 0, &val);
 99		if (rc)
100			return rc;
101
102		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
103		return count;
104	}
105	return 0;
106}
107
108static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
109					   struct device_attribute *mattr,
110					       const char *data, size_t count)
111{
112	struct mem_ctl_info *mci = to_mci(dev);
113	struct fsl_mc_pdata *pdata = mci->pvt_info;
114	unsigned long val;
115	int rc;
116
117	if (isdigit(*data)) {
118		rc = kstrtoul(data, 0, &val);
119		if (rc)
120			return rc;
121
122		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
123		return count;
124	}
125	return 0;
126}
127
128static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
129					struct device_attribute *mattr,
130					       const char *data, size_t count)
131{
132	struct mem_ctl_info *mci = to_mci(dev);
133	struct fsl_mc_pdata *pdata = mci->pvt_info;
134	unsigned long val;
135	int rc;
136
137	if (isdigit(*data)) {
138		rc = kstrtoul(data, 0, &val);
139		if (rc)
140			return rc;
141
142		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
143		return count;
144	}
145	return 0;
146}
147
148static DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
149		   fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
150static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
151		   fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
152static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
153		   fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
 
154
155static struct attribute *fsl_ddr_dev_attrs[] = {
 
156	&dev_attr_inject_data_hi.attr,
157	&dev_attr_inject_data_lo.attr,
158	&dev_attr_inject_ctrl.attr,
 
159	NULL
160};
161
162ATTRIBUTE_GROUPS(fsl_ddr_dev);
163
164/**************************** MC Err device ***************************/
165
166/*
167 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
168 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
169 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
170 * below correspond to Freescale's manuals.
171 */
172static unsigned int ecc_table[16] = {
173	/* MSB           LSB */
174	/* [0:31]    [32:63] */
175	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
176	0x00ff00ff, 0x00fff0ff,
177	0x0f0f0f0f, 0x0f0fff00,
178	0x11113333, 0x7777000f,
179	0x22224444, 0x8888222f,
180	0x44448888, 0xffff4441,
181	0x8888ffff, 0x11118882,
182	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
183};
184
185/*
186 * Calculate the correct ECC value for a 64-bit value specified by high:low
187 */
188static u8 calculate_ecc(u32 high, u32 low)
189{
190	u32 mask_low;
191	u32 mask_high;
192	int bit_cnt;
193	u8 ecc = 0;
194	int i;
195	int j;
196
197	for (i = 0; i < 8; i++) {
198		mask_high = ecc_table[i * 2];
199		mask_low = ecc_table[i * 2 + 1];
200		bit_cnt = 0;
201
202		for (j = 0; j < 32; j++) {
203			if ((mask_high >> j) & 1)
204				bit_cnt ^= (high >> j) & 1;
205			if ((mask_low >> j) & 1)
206				bit_cnt ^= (low >> j) & 1;
207		}
208
209		ecc |= bit_cnt << i;
210	}
211
212	return ecc;
213}
214
215/*
216 * Create the syndrome code which is generated if the data line specified by
217 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
218 * User's Manual and 9-61 in the MPC8572 User's Manual.
219 */
220static u8 syndrome_from_bit(unsigned int bit) {
221	int i;
222	u8 syndrome = 0;
223
224	/*
225	 * Cycle through the upper or lower 32-bit portion of each value in
226	 * ecc_table depending on if 'bit' is in the upper or lower half of
227	 * 64-bit data.
228	 */
229	for (i = bit < 32; i < 16; i += 2)
230		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);
231
232	return syndrome;
233}
234
235/*
236 * Decode data and ecc syndrome to determine what went wrong
237 * Note: This can only decode single-bit errors
238 */
239static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
240		       int *bad_data_bit, int *bad_ecc_bit)
241{
242	int i;
243	u8 syndrome;
244
245	*bad_data_bit = -1;
246	*bad_ecc_bit = -1;
247
248	/*
249	 * Calculate the ECC of the captured data and XOR it with the captured
250	 * ECC to find an ECC syndrome value we can search for
251	 */
252	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;
253
254	/* Check if a data line is stuck... */
255	for (i = 0; i < 64; i++) {
256		if (syndrome == syndrome_from_bit(i)) {
257			*bad_data_bit = i;
258			return;
259		}
260	}
261
262	/* If data is correct, check ECC bits for errors... */
263	for (i = 0; i < 8; i++) {
264		if ((syndrome >> i) & 0x1) {
265			*bad_ecc_bit = i;
266			return;
267		}
268	}
269}
270
271#define make64(high, low) (((u64)(high) << 32) | (low))
272
273static void fsl_mc_check(struct mem_ctl_info *mci)
274{
275	struct fsl_mc_pdata *pdata = mci->pvt_info;
276	struct csrow_info *csrow;
277	u32 bus_width;
278	u32 err_detect;
279	u32 syndrome;
280	u64 err_addr;
281	u32 pfn;
282	int row_index;
283	u32 cap_high;
284	u32 cap_low;
285	int bad_data_bit;
286	int bad_ecc_bit;
287
288	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
289	if (!err_detect)
290		return;
291
292	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
293		      err_detect);
294
295	/* no more processing if not ECC bit errors */
296	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
297		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
298		return;
299	}
300
301	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
302
303	/* Mask off appropriate bits of syndrome based on bus width */
304	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
305		     DSC_DBW_MASK) ? 32 : 64;
306	if (bus_width == 64)
307		syndrome &= 0xff;
308	else
309		syndrome &= 0xffff;
310
311	err_addr = make64(
312		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
313		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
314	pfn = err_addr >> PAGE_SHIFT;
315
316	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
317		csrow = mci->csrows[row_index];
318		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
319			break;
320	}
321
322	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
323	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
324
325	/*
326	 * Analyze single-bit errors on 64-bit wide buses
327	 * TODO: Add support for 32-bit wide buses
328	 */
329	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
330		sbe_ecc_decode(cap_high, cap_low, syndrome,
331				&bad_data_bit, &bad_ecc_bit);
332
333		if (bad_data_bit != -1)
334			fsl_mc_printk(mci, KERN_ERR,
335				"Faulty Data bit: %d\n", bad_data_bit);
336		if (bad_ecc_bit != -1)
337			fsl_mc_printk(mci, KERN_ERR,
338				"Faulty ECC bit: %d\n", bad_ecc_bit);
339
340		fsl_mc_printk(mci, KERN_ERR,
341			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
342			cap_high ^ (1 << (bad_data_bit - 32)),
343			cap_low ^ (1 << bad_data_bit),
344			syndrome ^ (1 << bad_ecc_bit));
345	}
346
347	fsl_mc_printk(mci, KERN_ERR,
348			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
349			cap_high, cap_low, syndrome);
350	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
351	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
352
353	/* we are out of range */
354	if (row_index == mci->nr_csrows)
355		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
356
357	if (err_detect & DDR_EDE_SBE)
358		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
359				     pfn, err_addr & ~PAGE_MASK, syndrome,
360				     row_index, 0, -1,
361				     mci->ctl_name, "");
362
363	if (err_detect & DDR_EDE_MBE)
364		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
365				     pfn, err_addr & ~PAGE_MASK, syndrome,
366				     row_index, 0, -1,
367				     mci->ctl_name, "");
368
369	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
370}
371
372static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
373{
374	struct mem_ctl_info *mci = dev_id;
375	struct fsl_mc_pdata *pdata = mci->pvt_info;
376	u32 err_detect;
377
378	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
379	if (!err_detect)
380		return IRQ_NONE;
381
382	fsl_mc_check(mci);
383
384	return IRQ_HANDLED;
385}
386
387static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
388{
389	struct fsl_mc_pdata *pdata = mci->pvt_info;
390	struct csrow_info *csrow;
391	struct dimm_info *dimm;
392	u32 sdram_ctl;
393	u32 sdtype;
394	enum mem_type mtype;
395	u32 cs_bnds;
396	int index;
397
398	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
399
400	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
401	if (sdram_ctl & DSC_RD_EN) {
402		switch (sdtype) {
403		case 0x02000000:
404			mtype = MEM_RDDR;
405			break;
406		case 0x03000000:
407			mtype = MEM_RDDR2;
408			break;
409		case 0x07000000:
410			mtype = MEM_RDDR3;
411			break;
412		case 0x05000000:
413			mtype = MEM_RDDR4;
414			break;
415		default:
416			mtype = MEM_UNKNOWN;
417			break;
418		}
419	} else {
420		switch (sdtype) {
421		case 0x02000000:
422			mtype = MEM_DDR;
423			break;
424		case 0x03000000:
425			mtype = MEM_DDR2;
426			break;
427		case 0x07000000:
428			mtype = MEM_DDR3;
429			break;
430		case 0x05000000:
431			mtype = MEM_DDR4;
432			break;
433		default:
434			mtype = MEM_UNKNOWN;
435			break;
436		}
437	}
438
439	for (index = 0; index < mci->nr_csrows; index++) {
440		u32 start;
441		u32 end;
442
443		csrow = mci->csrows[index];
444		dimm = csrow->channels[0]->dimm;
445
446		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
447				   (index * FSL_MC_CS_BNDS_OFS));
448
449		start = (cs_bnds & 0xffff0000) >> 16;
450		end   = (cs_bnds & 0x0000ffff);
451
452		if (start == end)
453			continue;	/* not populated */
454
455		start <<= (24 - PAGE_SHIFT);
456		end   <<= (24 - PAGE_SHIFT);
457		end    |= (1 << (24 - PAGE_SHIFT)) - 1;
458
459		csrow->first_page = start;
460		csrow->last_page = end;
461
462		dimm->nr_pages = end + 1 - start;
463		dimm->grain = 8;
464		dimm->mtype = mtype;
465		dimm->dtype = DEV_UNKNOWN;
466		if (sdram_ctl & DSC_X32_EN)
467			dimm->dtype = DEV_X32;
468		dimm->edac_mode = EDAC_SECDED;
469	}
470}
471
472int fsl_mc_err_probe(struct platform_device *op)
473{
474	struct mem_ctl_info *mci;
475	struct edac_mc_layer layers[2];
476	struct fsl_mc_pdata *pdata;
477	struct resource r;
478	u32 sdram_ctl;
479	int res;
480
481	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
482		return -ENOMEM;
483
484	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
485	layers[0].size = 4;
486	layers[0].is_virt_csrow = true;
487	layers[1].type = EDAC_MC_LAYER_CHANNEL;
488	layers[1].size = 1;
489	layers[1].is_virt_csrow = false;
490	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
491			    sizeof(*pdata));
492	if (!mci) {
493		devres_release_group(&op->dev, fsl_mc_err_probe);
494		return -ENOMEM;
495	}
496
497	pdata = mci->pvt_info;
498	pdata->name = "fsl_mc_err";
499	mci->pdev = &op->dev;
500	pdata->edac_idx = edac_mc_idx++;
501	dev_set_drvdata(mci->pdev, mci);
502	mci->ctl_name = pdata->name;
503	mci->dev_name = pdata->name;
504
505	/*
506	 * Get the endianness of DDR controller registers.
507	 * Default is big endian.
508	 */
509	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
510
511	res = of_address_to_resource(op->dev.of_node, 0, &r);
512	if (res) {
513		pr_err("%s: Unable to get resource for MC err regs\n",
514		       __func__);
515		goto err;
516	}
517
518	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
519				     pdata->name)) {
520		pr_err("%s: Error while requesting mem region\n",
521		       __func__);
522		res = -EBUSY;
523		goto err;
524	}
525
526	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
527	if (!pdata->mc_vbase) {
528		pr_err("%s: Unable to setup MC err regs\n", __func__);
529		res = -ENOMEM;
530		goto err;
531	}
532
533	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
534	if (!(sdram_ctl & DSC_ECC_EN)) {
535		/* no ECC */
536		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
537		res = -ENODEV;
538		goto err;
539	}
540
541	edac_dbg(3, "init mci\n");
542	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
543			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
544			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
545			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
546	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
547	mci->edac_cap = EDAC_FLAG_SECDED;
548	mci->mod_name = EDAC_MOD_STR;
549
550	if (edac_op_state == EDAC_OPSTATE_POLL)
551		mci->edac_check = fsl_mc_check;
552
553	mci->ctl_page_to_phys = NULL;
554
555	mci->scrub_mode = SCRUB_SW_SRC;
556
557	fsl_ddr_init_csrows(mci);
558
559	/* store the original error disable bits */
560	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
561	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
562
563	/* clear all error bits */
564	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
565
566	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
567	if (res) {
568		edac_dbg(3, "failed edac_mc_add_mc()\n");
569		goto err;
570	}
571
572	if (edac_op_state == EDAC_OPSTATE_INT) {
573		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
574			  DDR_EIE_MBEE | DDR_EIE_SBEE);
575
576		/* store the original error management threshold */
577		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
578					    FSL_MC_ERR_SBE) & 0xff0000;
579
580		/* set threshold to 1 error per interrupt */
581		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
582
583		/* register interrupts */
584		pdata->irq = platform_get_irq(op, 0);
585		res = devm_request_irq(&op->dev, pdata->irq,
586				       fsl_mc_isr,
587				       IRQF_SHARED,
588				       "[EDAC] MC err", mci);
589		if (res < 0) {
590			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
591			       __func__, pdata->irq);
592			res = -ENODEV;
593			goto err2;
594		}
595
596		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
597		       pdata->irq);
598	}
599
600	devres_remove_group(&op->dev, fsl_mc_err_probe);
601	edac_dbg(3, "success\n");
602	pr_info(EDAC_MOD_STR " MC err registered\n");
603
604	return 0;
605
606err2:
607	edac_mc_del_mc(&op->dev);
608err:
609	devres_release_group(&op->dev, fsl_mc_err_probe);
610	edac_mc_free(mci);
611	return res;
612}
613
614int fsl_mc_err_remove(struct platform_device *op)
615{
616	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
617	struct fsl_mc_pdata *pdata = mci->pvt_info;
618
619	edac_dbg(0, "\n");
620
621	if (edac_op_state == EDAC_OPSTATE_INT) {
622		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
623	}
624
625	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
626		  orig_ddr_err_disable);
627	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
628
629	edac_mc_del_mc(&op->dev);
630	edac_mc_free(mci);
631	return 0;
632}