Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Synopsys DDR ECC Driver
   4 * This driver is based on ppc4xx_edac.c drivers
   5 *
   6 * Copyright (C) 2012 - 2014 Xilinx, Inc.
   7 */
   8
   9#include <linux/edac.h>
  10#include <linux/module.h>
  11#include <linux/platform_device.h>
 
  12#include <linux/interrupt.h>
  13#include <linux/of.h>
  14
  15#include "edac_module.h"
  16
  17/* Number of cs_rows needed per memory controller */
  18#define SYNPS_EDAC_NR_CSROWS		1
  19
  20/* Number of channels per memory controller */
  21#define SYNPS_EDAC_NR_CHANS		1
  22
  23/* Granularity of reported error in bytes */
  24#define SYNPS_EDAC_ERR_GRAIN		1
  25
  26#define SYNPS_EDAC_MSG_SIZE		256
  27
  28#define SYNPS_EDAC_MOD_STRING		"synps_edac"
  29#define SYNPS_EDAC_MOD_VER		"1"
  30
  31/* Synopsys DDR memory controller registers that are relevant to ECC */
  32#define CTRL_OFST			0x0
  33#define T_ZQ_OFST			0xA4
  34
  35/* ECC control register */
  36#define ECC_CTRL_OFST			0xC4
  37/* ECC log register */
  38#define CE_LOG_OFST			0xC8
  39/* ECC address register */
  40#define CE_ADDR_OFST			0xCC
  41/* ECC data[31:0] register */
  42#define CE_DATA_31_0_OFST		0xD0
  43
  44/* Uncorrectable error info registers */
  45#define UE_LOG_OFST			0xDC
  46#define UE_ADDR_OFST			0xE0
  47#define UE_DATA_31_0_OFST		0xE4
  48
  49#define STAT_OFST			0xF0
  50#define SCRUB_OFST			0xF4
  51
  52/* Control register bit field definitions */
  53#define CTRL_BW_MASK			0xC
  54#define CTRL_BW_SHIFT			2
  55
  56#define DDRCTL_WDTH_16			1
  57#define DDRCTL_WDTH_32			0
  58
  59/* ZQ register bit field definitions */
  60#define T_ZQ_DDRMODE_MASK		0x2
  61
  62/* ECC control register bit field definitions */
  63#define ECC_CTRL_CLR_CE_ERR		0x2
  64#define ECC_CTRL_CLR_UE_ERR		0x1
  65
  66/* ECC correctable/uncorrectable error log register definitions */
  67#define LOG_VALID			0x1
  68#define CE_LOG_BITPOS_MASK		0xFE
  69#define CE_LOG_BITPOS_SHIFT		1
  70
  71/* ECC correctable/uncorrectable error address register definitions */
  72#define ADDR_COL_MASK			0xFFF
  73#define ADDR_ROW_MASK			0xFFFF000
  74#define ADDR_ROW_SHIFT			12
  75#define ADDR_BANK_MASK			0x70000000
  76#define ADDR_BANK_SHIFT			28
  77
  78/* ECC statistic register definitions */
  79#define STAT_UECNT_MASK			0xFF
  80#define STAT_CECNT_MASK			0xFF00
  81#define STAT_CECNT_SHIFT		8
  82
  83/* ECC scrub register definitions */
  84#define SCRUB_MODE_MASK			0x7
  85#define SCRUB_MODE_SECDED		0x4
  86
  87/* DDR ECC Quirks */
  88#define DDR_ECC_INTR_SUPPORT		BIT(0)
  89#define DDR_ECC_DATA_POISON_SUPPORT	BIT(1)
  90#define DDR_ECC_INTR_SELF_CLEAR		BIT(2)
  91
  92/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
  93/* ECC Configuration Registers */
  94#define ECC_CFG0_OFST			0x70
  95#define ECC_CFG1_OFST			0x74
  96
  97/* ECC Status Register */
  98#define ECC_STAT_OFST			0x78
  99
 100/* ECC Clear Register */
 101#define ECC_CLR_OFST			0x7C
 102
 103/* ECC Error count Register */
 104#define ECC_ERRCNT_OFST			0x80
 105
 106/* ECC Corrected Error Address Register */
 107#define ECC_CEADDR0_OFST		0x84
 108#define ECC_CEADDR1_OFST		0x88
 109
 110/* ECC Syndrome Registers */
 111#define ECC_CSYND0_OFST			0x8C
 112#define ECC_CSYND1_OFST			0x90
 113#define ECC_CSYND2_OFST			0x94
 114
 115/* ECC Bit Mask0 Address Register */
 116#define ECC_BITMASK0_OFST		0x98
 117#define ECC_BITMASK1_OFST		0x9C
 118#define ECC_BITMASK2_OFST		0xA0
 119
 120/* ECC UnCorrected Error Address Register */
 121#define ECC_UEADDR0_OFST		0xA4
 122#define ECC_UEADDR1_OFST		0xA8
 123
 124/* ECC Syndrome Registers */
 125#define ECC_UESYND0_OFST		0xAC
 126#define ECC_UESYND1_OFST		0xB0
 127#define ECC_UESYND2_OFST		0xB4
 128
 129/* ECC Poison Address Reg */
 130#define ECC_POISON0_OFST		0xB8
 131#define ECC_POISON1_OFST		0xBC
 132
 133#define ECC_ADDRMAP0_OFFSET		0x200
 134
 135/* Control register bitfield definitions */
 136#define ECC_CTRL_BUSWIDTH_MASK		0x3000
 137#define ECC_CTRL_BUSWIDTH_SHIFT		12
 138#define ECC_CTRL_CLR_CE_ERRCNT		BIT(2)
 139#define ECC_CTRL_CLR_UE_ERRCNT		BIT(3)
 140
 141/* DDR Control Register width definitions  */
 142#define DDRCTL_EWDTH_16			2
 143#define DDRCTL_EWDTH_32			1
 144#define DDRCTL_EWDTH_64			0
 145
 146/* ECC status register definitions */
 147#define ECC_STAT_UECNT_MASK		0xF0000
 148#define ECC_STAT_UECNT_SHIFT		16
 149#define ECC_STAT_CECNT_MASK		0xF00
 150#define ECC_STAT_CECNT_SHIFT		8
 151#define ECC_STAT_BITNUM_MASK		0x7F
 152
 153/* ECC error count register definitions */
 154#define ECC_ERRCNT_UECNT_MASK		0xFFFF0000
 155#define ECC_ERRCNT_UECNT_SHIFT		16
 156#define ECC_ERRCNT_CECNT_MASK		0xFFFF
 157
 158/* DDR QOS Interrupt register definitions */
 159#define DDR_QOS_IRQ_STAT_OFST		0x20200
 160#define DDR_QOSUE_MASK			0x4
 161#define	DDR_QOSCE_MASK			0x2
 162#define	ECC_CE_UE_INTR_MASK		0x6
 163#define DDR_QOS_IRQ_EN_OFST		0x20208
 164#define DDR_QOS_IRQ_DB_OFST		0x2020C
 165
 166/* DDR QOS Interrupt register definitions */
 167#define DDR_UE_MASK			BIT(9)
 168#define DDR_CE_MASK			BIT(8)
 169
 170/* ECC Corrected Error Register Mask and Shifts*/
 171#define ECC_CEADDR0_RW_MASK		0x3FFFF
 172#define ECC_CEADDR0_RNK_MASK		BIT(24)
 173#define ECC_CEADDR1_BNKGRP_MASK		0x3000000
 174#define ECC_CEADDR1_BNKNR_MASK		0x70000
 175#define ECC_CEADDR1_BLKNR_MASK		0xFFF
 176#define ECC_CEADDR1_BNKGRP_SHIFT	24
 177#define ECC_CEADDR1_BNKNR_SHIFT		16
 178
 179/* ECC Poison register shifts */
 180#define ECC_POISON0_RANK_SHIFT		24
 181#define ECC_POISON0_RANK_MASK		BIT(24)
 182#define ECC_POISON0_COLUMN_SHIFT	0
 183#define ECC_POISON0_COLUMN_MASK		0xFFF
 184#define ECC_POISON1_BG_SHIFT		28
 185#define ECC_POISON1_BG_MASK		0x30000000
 186#define ECC_POISON1_BANKNR_SHIFT	24
 187#define ECC_POISON1_BANKNR_MASK		0x7000000
 188#define ECC_POISON1_ROW_SHIFT		0
 189#define ECC_POISON1_ROW_MASK		0x3FFFF
 190
 191/* DDR Memory type defines */
 192#define MEM_TYPE_DDR3			0x1
 193#define MEM_TYPE_LPDDR3			0x8
 194#define MEM_TYPE_DDR2			0x4
 195#define MEM_TYPE_DDR4			0x10
 196#define MEM_TYPE_LPDDR4			0x20
 197
 198/* DDRC Software control register */
 199#define DDRC_SWCTL			0x320
 200
 201/* DDRC ECC CE & UE poison mask */
 202#define ECC_CEPOISON_MASK		0x3
 203#define ECC_UEPOISON_MASK		0x1
 204
 205/* DDRC Device config masks */
 206#define DDRC_MSTR_CFG_MASK		0xC0000000
 207#define DDRC_MSTR_CFG_SHIFT		30
 208#define DDRC_MSTR_CFG_X4_MASK		0x0
 209#define DDRC_MSTR_CFG_X8_MASK		0x1
 210#define DDRC_MSTR_CFG_X16_MASK		0x2
 211#define DDRC_MSTR_CFG_X32_MASK		0x3
 212
 213#define DDR_MAX_ROW_SHIFT		18
 214#define DDR_MAX_COL_SHIFT		14
 215#define DDR_MAX_BANK_SHIFT		3
 216#define DDR_MAX_BANKGRP_SHIFT		2
 217
 218#define ROW_MAX_VAL_MASK		0xF
 219#define COL_MAX_VAL_MASK		0xF
 220#define BANK_MAX_VAL_MASK		0x1F
 221#define BANKGRP_MAX_VAL_MASK		0x1F
 222#define RANK_MAX_VAL_MASK		0x1F
 223
 224#define ROW_B0_BASE			6
 225#define ROW_B1_BASE			7
 226#define ROW_B2_BASE			8
 227#define ROW_B3_BASE			9
 228#define ROW_B4_BASE			10
 229#define ROW_B5_BASE			11
 230#define ROW_B6_BASE			12
 231#define ROW_B7_BASE			13
 232#define ROW_B8_BASE			14
 233#define ROW_B9_BASE			15
 234#define ROW_B10_BASE			16
 235#define ROW_B11_BASE			17
 236#define ROW_B12_BASE			18
 237#define ROW_B13_BASE			19
 238#define ROW_B14_BASE			20
 239#define ROW_B15_BASE			21
 240#define ROW_B16_BASE			22
 241#define ROW_B17_BASE			23
 242
 243#define COL_B2_BASE			2
 244#define COL_B3_BASE			3
 245#define COL_B4_BASE			4
 246#define COL_B5_BASE			5
 247#define COL_B6_BASE			6
 248#define COL_B7_BASE			7
 249#define COL_B8_BASE			8
 250#define COL_B9_BASE			9
 251#define COL_B10_BASE			10
 252#define COL_B11_BASE			11
 253#define COL_B12_BASE			12
 254#define COL_B13_BASE			13
 255
 256#define BANK_B0_BASE			2
 257#define BANK_B1_BASE			3
 258#define BANK_B2_BASE			4
 259
 260#define BANKGRP_B0_BASE			2
 261#define BANKGRP_B1_BASE			3
 262
 263#define RANK_B0_BASE			6
 264
 265/**
 266 * struct ecc_error_info - ECC error log information.
 267 * @row:	Row number.
 268 * @col:	Column number.
 269 * @bank:	Bank number.
 270 * @bitpos:	Bit position.
 271 * @data:	Data causing the error.
 272 * @bankgrpnr:	Bank group number.
 273 * @blknr:	Block number.
 274 */
 275struct ecc_error_info {
 276	u32 row;
 277	u32 col;
 278	u32 bank;
 279	u32 bitpos;
 280	u32 data;
 281	u32 bankgrpnr;
 282	u32 blknr;
 283};
 284
 285/**
 286 * struct synps_ecc_status - ECC status information to report.
 287 * @ce_cnt:	Correctable error count.
 288 * @ue_cnt:	Uncorrectable error count.
 289 * @ceinfo:	Correctable error log information.
 290 * @ueinfo:	Uncorrectable error log information.
 291 */
 292struct synps_ecc_status {
 293	u32 ce_cnt;
 294	u32 ue_cnt;
 295	struct ecc_error_info ceinfo;
 296	struct ecc_error_info ueinfo;
 297};
 298
 299/**
 300 * struct synps_edac_priv - DDR memory controller private instance data.
 301 * @baseaddr:		Base address of the DDR controller.
 
 302 * @message:		Buffer for framing the event specific info.
 303 * @stat:		ECC status information.
 304 * @p_data:		Platform data.
 305 * @ce_cnt:		Correctable Error count.
 306 * @ue_cnt:		Uncorrectable Error count.
 307 * @poison_addr:	Data poison address.
 308 * @row_shift:		Bit shifts for row bit.
 309 * @col_shift:		Bit shifts for column bit.
 310 * @bank_shift:		Bit shifts for bank bit.
 311 * @bankgrp_shift:	Bit shifts for bank group bit.
 312 * @rank_shift:		Bit shifts for rank bit.
 313 */
 314struct synps_edac_priv {
 315	void __iomem *baseaddr;
 
 316	char message[SYNPS_EDAC_MSG_SIZE];
 317	struct synps_ecc_status stat;
 318	const struct synps_platform_data *p_data;
 319	u32 ce_cnt;
 320	u32 ue_cnt;
 321#ifdef CONFIG_EDAC_DEBUG
 322	ulong poison_addr;
 323	u32 row_shift[18];
 324	u32 col_shift[14];
 325	u32 bank_shift[3];
 326	u32 bankgrp_shift[2];
 327	u32 rank_shift[1];
 328#endif
 329};
 330
 331/**
 332 * struct synps_platform_data -  synps platform data structure.
 333 * @get_error_info:	Get EDAC error info.
 334 * @get_mtype:		Get mtype.
 335 * @get_dtype:		Get dtype.
 336 * @get_ecc_state:	Get ECC state.
 337 * @quirks:		To differentiate IPs.
 338 */
 339struct synps_platform_data {
 340	int (*get_error_info)(struct synps_edac_priv *priv);
 341	enum mem_type (*get_mtype)(const void __iomem *base);
 342	enum dev_type (*get_dtype)(const void __iomem *base);
 343	bool (*get_ecc_state)(void __iomem *base);
 344	int quirks;
 345};
 346
 347/**
 348 * zynq_get_error_info - Get the current ECC error info.
 349 * @priv:	DDR memory controller private instance data.
 350 *
 351 * Return: one if there is no error, otherwise zero.
 352 */
 353static int zynq_get_error_info(struct synps_edac_priv *priv)
 354{
 355	struct synps_ecc_status *p;
 356	u32 regval, clearval = 0;
 357	void __iomem *base;
 358
 359	base = priv->baseaddr;
 360	p = &priv->stat;
 361
 362	regval = readl(base + STAT_OFST);
 363	if (!regval)
 364		return 1;
 365
 366	p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
 367	p->ue_cnt = regval & STAT_UECNT_MASK;
 368
 369	regval = readl(base + CE_LOG_OFST);
 370	if (!(p->ce_cnt && (regval & LOG_VALID)))
 371		goto ue_err;
 372
 373	p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
 374	regval = readl(base + CE_ADDR_OFST);
 375	p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
 376	p->ceinfo.col = regval & ADDR_COL_MASK;
 377	p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
 378	p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
 379	edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
 380		 p->ceinfo.data);
 381	clearval = ECC_CTRL_CLR_CE_ERR;
 382
 383ue_err:
 384	regval = readl(base + UE_LOG_OFST);
 385	if (!(p->ue_cnt && (regval & LOG_VALID)))
 386		goto out;
 387
 388	regval = readl(base + UE_ADDR_OFST);
 389	p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
 390	p->ueinfo.col = regval & ADDR_COL_MASK;
 391	p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
 392	p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
 393	clearval |= ECC_CTRL_CLR_UE_ERR;
 394
 395out:
 396	writel(clearval, base + ECC_CTRL_OFST);
 397	writel(0x0, base + ECC_CTRL_OFST);
 398
 399	return 0;
 400}
 401
 402/**
 403 * zynqmp_get_error_info - Get the current ECC error info.
 404 * @priv:	DDR memory controller private instance data.
 405 *
 406 * Return: one if there is no error otherwise returns zero.
 407 */
 408static int zynqmp_get_error_info(struct synps_edac_priv *priv)
 409{
 410	struct synps_ecc_status *p;
 411	u32 regval, clearval = 0;
 
 412	void __iomem *base;
 413
 414	base = priv->baseaddr;
 415	p = &priv->stat;
 416
 417	regval = readl(base + ECC_ERRCNT_OFST);
 418	p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
 419	p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
 420	if (!p->ce_cnt)
 421		goto ue_err;
 422
 423	regval = readl(base + ECC_STAT_OFST);
 424	if (!regval)
 425		return 1;
 426
 427	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
 428
 429	regval = readl(base + ECC_CEADDR0_OFST);
 430	p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
 431	regval = readl(base + ECC_CEADDR1_OFST);
 432	p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
 433					ECC_CEADDR1_BNKNR_SHIFT;
 434	p->ceinfo.bankgrpnr = (regval &	ECC_CEADDR1_BNKGRP_MASK) >>
 435					ECC_CEADDR1_BNKGRP_SHIFT;
 436	p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
 437	p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
 438	edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
 439		 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
 440		 readl(base + ECC_CSYND2_OFST));
 441ue_err:
 442	if (!p->ue_cnt)
 443		goto out;
 444
 445	regval = readl(base + ECC_UEADDR0_OFST);
 446	p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
 447	regval = readl(base + ECC_UEADDR1_OFST);
 448	p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
 449					ECC_CEADDR1_BNKGRP_SHIFT;
 450	p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
 451					ECC_CEADDR1_BNKNR_SHIFT;
 452	p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
 453	p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
 454out:
 455	clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
 456	clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
 
 
 
 457	writel(clearval, base + ECC_CLR_OFST);
 458	writel(0x0, base + ECC_CLR_OFST);
 
 459
 460	return 0;
 461}
 462
 463/**
 464 * handle_error - Handle Correctable and Uncorrectable errors.
 465 * @mci:	EDAC memory controller instance.
 466 * @p:		Synopsys ECC status structure.
 467 *
 468 * Handles ECC correctable and uncorrectable errors.
 469 */
 470static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
 471{
 472	struct synps_edac_priv *priv = mci->pvt_info;
 473	struct ecc_error_info *pinf;
 474
 475	if (p->ce_cnt) {
 476		pinf = &p->ceinfo;
 477		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
 478			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 479				 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
 480				 "CE", pinf->row, pinf->bank,
 481				 pinf->bankgrpnr, pinf->blknr,
 482				 pinf->bitpos, pinf->data);
 483		} else {
 484			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 485				 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
 486				 "CE", pinf->row, pinf->bank, pinf->col,
 487				 pinf->bitpos, pinf->data);
 488		}
 489
 490		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
 491				     p->ce_cnt, 0, 0, 0, 0, 0, -1,
 492				     priv->message, "");
 493	}
 494
 495	if (p->ue_cnt) {
 496		pinf = &p->ueinfo;
 497		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
 498			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 499				 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
 500				 "UE", pinf->row, pinf->bank,
 501				 pinf->bankgrpnr, pinf->blknr);
 502		} else {
 503			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 504				 "DDR ECC error type :%s Row %d Bank %d Col %d ",
 505				 "UE", pinf->row, pinf->bank, pinf->col);
 506		}
 507
 508		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
 509				     p->ue_cnt, 0, 0, 0, 0, 0, -1,
 510				     priv->message, "");
 511	}
 512
 513	memset(p, 0, sizeof(*p));
 514}
 515
 516static void enable_intr(struct synps_edac_priv *priv)
 517{
 
 
 518	/* Enable UE/CE Interrupts */
 519	if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
 520		writel(DDR_UE_MASK | DDR_CE_MASK,
 521		       priv->baseaddr + ECC_CLR_OFST);
 522	else
 523		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
 524		       priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
 525
 
 
 
 
 
 
 
 
 
 526}
 527
 528static void disable_intr(struct synps_edac_priv *priv)
 529{
 
 
 530	/* Disable UE/CE Interrupts */
 531	if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
 532		writel(0x0, priv->baseaddr + ECC_CLR_OFST);
 533	else
 534		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
 535		       priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
 
 
 
 
 
 
 
 
 
 536}
 537
 538/**
 539 * intr_handler - Interrupt Handler for ECC interrupts.
 540 * @irq:        IRQ number.
 541 * @dev_id:     Device ID.
 542 *
 543 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
 544 */
 545static irqreturn_t intr_handler(int irq, void *dev_id)
 546{
 547	const struct synps_platform_data *p_data;
 548	struct mem_ctl_info *mci = dev_id;
 549	struct synps_edac_priv *priv;
 550	int status, regval;
 551
 552	priv = mci->pvt_info;
 553	p_data = priv->p_data;
 554
 555	/*
 556	 * v3.0 of the controller has the ce/ue bits cleared automatically,
 557	 * so this condition does not apply.
 558	 */
 559	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
 560		regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
 561		regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
 562		if (!(regval & ECC_CE_UE_INTR_MASK))
 563			return IRQ_NONE;
 564	}
 565
 566	status = p_data->get_error_info(priv);
 567	if (status)
 568		return IRQ_NONE;
 569
 570	priv->ce_cnt += priv->stat.ce_cnt;
 571	priv->ue_cnt += priv->stat.ue_cnt;
 572	handle_error(mci, &priv->stat);
 573
 574	edac_dbg(3, "Total error count CE %d UE %d\n",
 575		 priv->ce_cnt, priv->ue_cnt);
 576	/* v3.0 of the controller does not have this register */
 577	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
 578		writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
 579	else
 580		enable_intr(priv);
 581
 582	return IRQ_HANDLED;
 583}
 584
 585/**
 586 * check_errors - Check controller for ECC errors.
 587 * @mci:	EDAC memory controller instance.
 588 *
 589 * Check and post ECC errors. Called by the polling thread.
 590 */
 591static void check_errors(struct mem_ctl_info *mci)
 592{
 593	const struct synps_platform_data *p_data;
 594	struct synps_edac_priv *priv;
 595	int status;
 596
 597	priv = mci->pvt_info;
 598	p_data = priv->p_data;
 599
 600	status = p_data->get_error_info(priv);
 601	if (status)
 602		return;
 603
 604	priv->ce_cnt += priv->stat.ce_cnt;
 605	priv->ue_cnt += priv->stat.ue_cnt;
 606	handle_error(mci, &priv->stat);
 607
 608	edac_dbg(3, "Total error count CE %d UE %d\n",
 609		 priv->ce_cnt, priv->ue_cnt);
 610}
 611
 612/**
 613 * zynq_get_dtype - Return the controller memory width.
 614 * @base:	DDR memory controller base address.
 615 *
 616 * Get the EDAC device type width appropriate for the current controller
 617 * configuration.
 618 *
 619 * Return: a device type width enumeration.
 620 */
 621static enum dev_type zynq_get_dtype(const void __iomem *base)
 622{
 623	enum dev_type dt;
 624	u32 width;
 625
 626	width = readl(base + CTRL_OFST);
 627	width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
 628
 629	switch (width) {
 630	case DDRCTL_WDTH_16:
 631		dt = DEV_X2;
 632		break;
 633	case DDRCTL_WDTH_32:
 634		dt = DEV_X4;
 635		break;
 636	default:
 637		dt = DEV_UNKNOWN;
 638	}
 639
 640	return dt;
 641}
 642
 643/**
 644 * zynqmp_get_dtype - Return the controller memory width.
 645 * @base:	DDR memory controller base address.
 646 *
 647 * Get the EDAC device type width appropriate for the current controller
 648 * configuration.
 649 *
 650 * Return: a device type width enumeration.
 651 */
 652static enum dev_type zynqmp_get_dtype(const void __iomem *base)
 653{
 654	enum dev_type dt;
 655	u32 width;
 656
 657	width = readl(base + CTRL_OFST);
 658	width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
 659	switch (width) {
 660	case DDRCTL_EWDTH_16:
 661		dt = DEV_X2;
 662		break;
 663	case DDRCTL_EWDTH_32:
 664		dt = DEV_X4;
 665		break;
 666	case DDRCTL_EWDTH_64:
 667		dt = DEV_X8;
 668		break;
 669	default:
 670		dt = DEV_UNKNOWN;
 671	}
 672
 673	return dt;
 674}
 675
 676/**
 677 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
 678 * @base:	DDR memory controller base address.
 679 *
 680 * Get the ECC enable/disable status of the controller.
 681 *
 682 * Return: true if enabled, otherwise false.
 683 */
 684static bool zynq_get_ecc_state(void __iomem *base)
 685{
 686	enum dev_type dt;
 687	u32 ecctype;
 688
 689	dt = zynq_get_dtype(base);
 690	if (dt == DEV_UNKNOWN)
 691		return false;
 692
 693	ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
 694	if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
 695		return true;
 696
 697	return false;
 698}
 699
 700/**
 701 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
 702 * @base:	DDR memory controller base address.
 703 *
 704 * Get the ECC enable/disable status for the controller.
 705 *
 706 * Return: a ECC status boolean i.e true/false - enabled/disabled.
 707 */
 708static bool zynqmp_get_ecc_state(void __iomem *base)
 709{
 710	enum dev_type dt;
 711	u32 ecctype;
 712
 713	dt = zynqmp_get_dtype(base);
 714	if (dt == DEV_UNKNOWN)
 715		return false;
 716
 717	ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
 718	if ((ecctype == SCRUB_MODE_SECDED) &&
 719	    ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
 720		return true;
 721
 722	return false;
 723}
 724
 725/**
 726 * get_memsize - Read the size of the attached memory device.
 727 *
 728 * Return: the memory size in bytes.
 729 */
 730static u32 get_memsize(void)
 731{
 732	struct sysinfo inf;
 733
 734	si_meminfo(&inf);
 735
 736	return inf.totalram * inf.mem_unit;
 737}
 738
 739/**
 740 * zynq_get_mtype - Return the controller memory type.
 741 * @base:	Synopsys ECC status structure.
 742 *
 743 * Get the EDAC memory type appropriate for the current controller
 744 * configuration.
 745 *
 746 * Return: a memory type enumeration.
 747 */
 748static enum mem_type zynq_get_mtype(const void __iomem *base)
 749{
 750	enum mem_type mt;
 751	u32 memtype;
 752
 753	memtype = readl(base + T_ZQ_OFST);
 754
 755	if (memtype & T_ZQ_DDRMODE_MASK)
 756		mt = MEM_DDR3;
 757	else
 758		mt = MEM_DDR2;
 759
 760	return mt;
 761}
 762
 763/**
 764 * zynqmp_get_mtype - Returns controller memory type.
 765 * @base:	Synopsys ECC status structure.
 766 *
 767 * Get the EDAC memory type appropriate for the current controller
 768 * configuration.
 769 *
 770 * Return: a memory type enumeration.
 771 */
 772static enum mem_type zynqmp_get_mtype(const void __iomem *base)
 773{
 774	enum mem_type mt;
 775	u32 memtype;
 776
 777	memtype = readl(base + CTRL_OFST);
 778
 779	if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
 780		mt = MEM_DDR3;
 781	else if (memtype & MEM_TYPE_DDR2)
 782		mt = MEM_RDDR2;
 783	else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
 784		mt = MEM_DDR4;
 785	else
 786		mt = MEM_EMPTY;
 787
 788	return mt;
 789}
 790
 791/**
 792 * init_csrows - Initialize the csrow data.
 793 * @mci:	EDAC memory controller instance.
 794 *
 795 * Initialize the chip select rows associated with the EDAC memory
 796 * controller instance.
 797 */
 798static void init_csrows(struct mem_ctl_info *mci)
 799{
 800	struct synps_edac_priv *priv = mci->pvt_info;
 801	const struct synps_platform_data *p_data;
 802	struct csrow_info *csi;
 803	struct dimm_info *dimm;
 804	u32 size, row;
 805	int j;
 806
 807	p_data = priv->p_data;
 808
 809	for (row = 0; row < mci->nr_csrows; row++) {
 810		csi = mci->csrows[row];
 811		size = get_memsize();
 812
 813		for (j = 0; j < csi->nr_channels; j++) {
 814			dimm		= csi->channels[j]->dimm;
 815			dimm->edac_mode	= EDAC_SECDED;
 816			dimm->mtype	= p_data->get_mtype(priv->baseaddr);
 817			dimm->nr_pages	= (size >> PAGE_SHIFT) / csi->nr_channels;
 818			dimm->grain	= SYNPS_EDAC_ERR_GRAIN;
 819			dimm->dtype	= p_data->get_dtype(priv->baseaddr);
 820		}
 821	}
 822}
 823
 824/**
 825 * mc_init - Initialize one driver instance.
 826 * @mci:	EDAC memory controller instance.
 827 * @pdev:	platform device.
 828 *
 829 * Perform initialization of the EDAC memory controller instance and
 830 * related driver-private data associated with the memory controller the
 831 * instance is bound to.
 832 */
 833static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
 834{
 835	struct synps_edac_priv *priv;
 836
 837	mci->pdev = &pdev->dev;
 838	priv = mci->pvt_info;
 839	platform_set_drvdata(pdev, mci);
 840
 841	/* Initialize controller capabilities and configuration */
 842	mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
 843	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
 844	mci->scrub_cap = SCRUB_HW_SRC;
 845	mci->scrub_mode = SCRUB_NONE;
 846
 847	mci->edac_cap = EDAC_FLAG_SECDED;
 848	mci->ctl_name = "synps_ddr_controller";
 849	mci->dev_name = SYNPS_EDAC_MOD_STRING;
 850	mci->mod_name = SYNPS_EDAC_MOD_VER;
 851
 852	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
 853		edac_op_state = EDAC_OPSTATE_INT;
 854	} else {
 855		edac_op_state = EDAC_OPSTATE_POLL;
 856		mci->edac_check = check_errors;
 857	}
 858
 859	mci->ctl_page_to_phys = NULL;
 860
 861	init_csrows(mci);
 862}
 863
 864static int setup_irq(struct mem_ctl_info *mci,
 865		     struct platform_device *pdev)
 866{
 867	struct synps_edac_priv *priv = mci->pvt_info;
 868	int ret, irq;
 869
 870	irq = platform_get_irq(pdev, 0);
 871	if (irq < 0) {
 872		edac_printk(KERN_ERR, EDAC_MC,
 873			    "No IRQ %d in DT\n", irq);
 874		return irq;
 875	}
 876
 877	ret = devm_request_irq(&pdev->dev, irq, intr_handler,
 878			       0, dev_name(&pdev->dev), mci);
 879	if (ret < 0) {
 880		edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
 881		return ret;
 882	}
 883
 884	enable_intr(priv);
 885
 886	return 0;
 887}
 888
 889static const struct synps_platform_data zynq_edac_def = {
 890	.get_error_info	= zynq_get_error_info,
 891	.get_mtype	= zynq_get_mtype,
 892	.get_dtype	= zynq_get_dtype,
 893	.get_ecc_state	= zynq_get_ecc_state,
 894	.quirks		= 0,
 895};
 896
 897static const struct synps_platform_data zynqmp_edac_def = {
 898	.get_error_info	= zynqmp_get_error_info,
 899	.get_mtype	= zynqmp_get_mtype,
 900	.get_dtype	= zynqmp_get_dtype,
 901	.get_ecc_state	= zynqmp_get_ecc_state,
 902	.quirks         = (DDR_ECC_INTR_SUPPORT
 903#ifdef CONFIG_EDAC_DEBUG
 904			  | DDR_ECC_DATA_POISON_SUPPORT
 905#endif
 906			  ),
 907};
 908
 909static const struct synps_platform_data synopsys_edac_def = {
 910	.get_error_info	= zynqmp_get_error_info,
 911	.get_mtype	= zynqmp_get_mtype,
 912	.get_dtype	= zynqmp_get_dtype,
 913	.get_ecc_state	= zynqmp_get_ecc_state,
 914	.quirks         = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
 915#ifdef CONFIG_EDAC_DEBUG
 916			  | DDR_ECC_DATA_POISON_SUPPORT
 917#endif
 918			  ),
 919};
 920
 921
 922static const struct of_device_id synps_edac_match[] = {
 923	{
 924		.compatible = "xlnx,zynq-ddrc-a05",
 925		.data = (void *)&zynq_edac_def
 926	},
 927	{
 928		.compatible = "xlnx,zynqmp-ddrc-2.40a",
 929		.data = (void *)&zynqmp_edac_def
 930	},
 931	{
 932		.compatible = "snps,ddrc-3.80a",
 933		.data = (void *)&synopsys_edac_def
 934	},
 935	{
 936		/* end of table */
 937	}
 938};
 939
 940MODULE_DEVICE_TABLE(of, synps_edac_match);
 941
 942#ifdef CONFIG_EDAC_DEBUG
 943#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 944
 945/**
 946 * ddr_poison_setup -	Update poison registers.
 947 * @priv:		DDR memory controller private instance data.
 948 *
 949 * Update poison registers as per DDR mapping.
 950 * Return: none.
 951 */
 952static void ddr_poison_setup(struct synps_edac_priv *priv)
 953{
 954	int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
 955	int index;
 956	ulong hif_addr = 0;
 957
 958	hif_addr = priv->poison_addr >> 3;
 959
 960	for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
 961		if (priv->row_shift[index])
 962			row |= (((hif_addr >> priv->row_shift[index]) &
 963						BIT(0)) << index);
 964		else
 965			break;
 966	}
 967
 968	for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
 969		if (priv->col_shift[index] || index < 3)
 970			col |= (((hif_addr >> priv->col_shift[index]) &
 971						BIT(0)) << index);
 972		else
 973			break;
 974	}
 975
 976	for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
 977		if (priv->bank_shift[index])
 978			bank |= (((hif_addr >> priv->bank_shift[index]) &
 979						BIT(0)) << index);
 980		else
 981			break;
 982	}
 983
 984	for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
 985		if (priv->bankgrp_shift[index])
 986			bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
 987						& BIT(0)) << index);
 988		else
 989			break;
 990	}
 991
 992	if (priv->rank_shift[0])
 993		rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
 994
 995	regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
 996	regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
 997	writel(regval, priv->baseaddr + ECC_POISON0_OFST);
 998
 999	regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1000	regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1001	regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1002	writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1003}
1004
1005static ssize_t inject_data_error_show(struct device *dev,
1006				      struct device_attribute *mattr,
1007				      char *data)
1008{
1009	struct mem_ctl_info *mci = to_mci(dev);
1010	struct synps_edac_priv *priv = mci->pvt_info;
1011
1012	return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1013			"Error injection Address: 0x%lx\n\r",
1014			readl(priv->baseaddr + ECC_POISON0_OFST),
1015			readl(priv->baseaddr + ECC_POISON1_OFST),
1016			priv->poison_addr);
1017}
1018
1019static ssize_t inject_data_error_store(struct device *dev,
1020				       struct device_attribute *mattr,
1021				       const char *data, size_t count)
1022{
1023	struct mem_ctl_info *mci = to_mci(dev);
1024	struct synps_edac_priv *priv = mci->pvt_info;
1025
1026	if (kstrtoul(data, 0, &priv->poison_addr))
1027		return -EINVAL;
1028
1029	ddr_poison_setup(priv);
1030
1031	return count;
1032}
1033
1034static ssize_t inject_data_poison_show(struct device *dev,
1035				       struct device_attribute *mattr,
1036				       char *data)
1037{
1038	struct mem_ctl_info *mci = to_mci(dev);
1039	struct synps_edac_priv *priv = mci->pvt_info;
1040
1041	return sprintf(data, "Data Poisoning: %s\n\r",
1042			(((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1043			? ("Correctable Error") : ("UnCorrectable Error"));
1044}
1045
1046static ssize_t inject_data_poison_store(struct device *dev,
1047					struct device_attribute *mattr,
1048					const char *data, size_t count)
1049{
1050	struct mem_ctl_info *mci = to_mci(dev);
1051	struct synps_edac_priv *priv = mci->pvt_info;
1052
1053	writel(0, priv->baseaddr + DDRC_SWCTL);
1054	if (strncmp(data, "CE", 2) == 0)
1055		writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1056	else
1057		writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1058	writel(1, priv->baseaddr + DDRC_SWCTL);
1059
1060	return count;
1061}
1062
1063static DEVICE_ATTR_RW(inject_data_error);
1064static DEVICE_ATTR_RW(inject_data_poison);
1065
1066static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1067{
1068	int rc;
1069
1070	rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1071	if (rc < 0)
1072		return rc;
1073	rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1074	if (rc < 0)
1075		return rc;
1076	return 0;
1077}
1078
1079static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1080{
1081	device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1082	device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1083}
1084
1085static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1086{
1087	u32 addrmap_row_b2_10;
1088	int index;
1089
1090	priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1091	priv->row_shift[1] = ((addrmap[5] >> 8) &
1092			ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1093
1094	addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1095	if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1096		for (index = 2; index < 11; index++)
1097			priv->row_shift[index] = addrmap_row_b2_10 +
1098				index + ROW_B0_BASE;
1099
1100	} else {
1101		priv->row_shift[2] = (addrmap[9] &
1102				ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1103		priv->row_shift[3] = ((addrmap[9] >> 8) &
1104				ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1105		priv->row_shift[4] = ((addrmap[9] >> 16) &
1106				ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1107		priv->row_shift[5] = ((addrmap[9] >> 24) &
1108				ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1109		priv->row_shift[6] = (addrmap[10] &
1110				ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1111		priv->row_shift[7] = ((addrmap[10] >> 8) &
1112				ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1113		priv->row_shift[8] = ((addrmap[10] >> 16) &
1114				ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1115		priv->row_shift[9] = ((addrmap[10] >> 24) &
1116				ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1117		priv->row_shift[10] = (addrmap[11] &
1118				ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1119	}
1120
1121	priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1122				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1123				ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1124	priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1125				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1126				ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1127	priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1128				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1129				ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1130	priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1131				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1132				ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1133	priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1134				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1135				ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1136	priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1137				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1138				ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1139	priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1140				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1141				ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1142}
1143
1144static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1145{
1146	u32 width, memtype;
1147	int index;
1148
1149	memtype = readl(priv->baseaddr + CTRL_OFST);
1150	width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1151
1152	priv->col_shift[0] = 0;
1153	priv->col_shift[1] = 1;
1154	priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1155	priv->col_shift[3] = ((addrmap[2] >> 8) &
1156			COL_MAX_VAL_MASK) + COL_B3_BASE;
1157	priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1158			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1159					COL_MAX_VAL_MASK) + COL_B4_BASE);
1160	priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1161			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1162					COL_MAX_VAL_MASK) + COL_B5_BASE);
1163	priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1164			COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1165					COL_MAX_VAL_MASK) + COL_B6_BASE);
1166	priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1167			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1168					COL_MAX_VAL_MASK) + COL_B7_BASE);
1169	priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1170			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1171					COL_MAX_VAL_MASK) + COL_B8_BASE);
1172	priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1173			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1174					COL_MAX_VAL_MASK) + COL_B9_BASE);
1175	if (width == DDRCTL_EWDTH_64) {
1176		if (memtype & MEM_TYPE_LPDDR3) {
1177			priv->col_shift[10] = ((addrmap[4] &
1178				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1179				((addrmap[4] & COL_MAX_VAL_MASK) +
1180				 COL_B10_BASE);
1181			priv->col_shift[11] = (((addrmap[4] >> 8) &
1182				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1183				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1184				 COL_B11_BASE);
1185		} else {
1186			priv->col_shift[11] = ((addrmap[4] &
1187				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1188				((addrmap[4] & COL_MAX_VAL_MASK) +
1189				 COL_B10_BASE);
1190			priv->col_shift[13] = (((addrmap[4] >> 8) &
1191				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1192				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1193				 COL_B11_BASE);
1194		}
1195	} else if (width == DDRCTL_EWDTH_32) {
1196		if (memtype & MEM_TYPE_LPDDR3) {
1197			priv->col_shift[10] = (((addrmap[3] >> 24) &
1198				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1199				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1200				 COL_B9_BASE);
1201			priv->col_shift[11] = ((addrmap[4] &
1202				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1203				((addrmap[4] & COL_MAX_VAL_MASK) +
1204				 COL_B10_BASE);
1205		} else {
1206			priv->col_shift[11] = (((addrmap[3] >> 24) &
1207				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1208				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1209				 COL_B9_BASE);
1210			priv->col_shift[13] = ((addrmap[4] &
1211				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1212				((addrmap[4] & COL_MAX_VAL_MASK) +
1213				 COL_B10_BASE);
1214		}
1215	} else {
1216		if (memtype & MEM_TYPE_LPDDR3) {
1217			priv->col_shift[10] = (((addrmap[3] >> 16) &
1218				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1219				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1220				 COL_B8_BASE);
1221			priv->col_shift[11] = (((addrmap[3] >> 24) &
1222				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1223				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1224				 COL_B9_BASE);
1225			priv->col_shift[13] = ((addrmap[4] &
1226				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1227				((addrmap[4] & COL_MAX_VAL_MASK) +
1228				 COL_B10_BASE);
1229		} else {
1230			priv->col_shift[11] = (((addrmap[3] >> 16) &
1231				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1232				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1233				 COL_B8_BASE);
1234			priv->col_shift[13] = (((addrmap[3] >> 24) &
1235				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1236				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1237				 COL_B9_BASE);
1238		}
1239	}
1240
1241	if (width) {
1242		for (index = 9; index > width; index--) {
1243			priv->col_shift[index] = priv->col_shift[index - width];
1244			priv->col_shift[index - width] = 0;
1245		}
1246	}
1247
1248}
1249
1250static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1251{
1252	priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1253	priv->bank_shift[1] = ((addrmap[1] >> 8) &
1254				BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1255	priv->bank_shift[2] = (((addrmap[1] >> 16) &
1256				BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1257				(((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1258				 BANK_B2_BASE);
1259
1260}
1261
1262static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1263{
1264	priv->bankgrp_shift[0] = (addrmap[8] &
1265				BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1266	priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1267				BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1268				& BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1269
1270}
1271
1272static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1273{
1274	priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1275				RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1276				RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1277}
1278
1279/**
1280 * setup_address_map -	Set Address Map by querying ADDRMAP registers.
1281 * @priv:		DDR memory controller private instance data.
1282 *
1283 * Set Address Map by querying ADDRMAP registers.
1284 *
1285 * Return: none.
1286 */
1287static void setup_address_map(struct synps_edac_priv *priv)
1288{
1289	u32 addrmap[12];
1290	int index;
1291
1292	for (index = 0; index < 12; index++) {
1293		u32 addrmap_offset;
1294
1295		addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1296		addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1297	}
1298
1299	setup_row_address_map(priv, addrmap);
1300
1301	setup_column_address_map(priv, addrmap);
1302
1303	setup_bank_address_map(priv, addrmap);
1304
1305	setup_bg_address_map(priv, addrmap);
1306
1307	setup_rank_address_map(priv, addrmap);
1308}
1309#endif /* CONFIG_EDAC_DEBUG */
1310
1311/**
1312 * mc_probe - Check controller and bind driver.
1313 * @pdev:	platform device.
1314 *
1315 * Probe a specific controller instance for binding with the driver.
1316 *
1317 * Return: 0 if the controller instance was successfully bound to the
1318 * driver; otherwise, < 0 on error.
1319 */
1320static int mc_probe(struct platform_device *pdev)
1321{
1322	const struct synps_platform_data *p_data;
1323	struct edac_mc_layer layers[2];
1324	struct synps_edac_priv *priv;
1325	struct mem_ctl_info *mci;
1326	void __iomem *baseaddr;
1327	struct resource *res;
1328	int rc;
1329
1330	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1331	baseaddr = devm_ioremap_resource(&pdev->dev, res);
1332	if (IS_ERR(baseaddr))
1333		return PTR_ERR(baseaddr);
1334
1335	p_data = of_device_get_match_data(&pdev->dev);
1336	if (!p_data)
1337		return -ENODEV;
1338
1339	if (!p_data->get_ecc_state(baseaddr)) {
1340		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1341		return -ENXIO;
1342	}
1343
1344	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1345	layers[0].size = SYNPS_EDAC_NR_CSROWS;
1346	layers[0].is_virt_csrow = true;
1347	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1348	layers[1].size = SYNPS_EDAC_NR_CHANS;
1349	layers[1].is_virt_csrow = false;
1350
1351	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1352			    sizeof(struct synps_edac_priv));
1353	if (!mci) {
1354		edac_printk(KERN_ERR, EDAC_MC,
1355			    "Failed memory allocation for mc instance\n");
1356		return -ENOMEM;
1357	}
1358
1359	priv = mci->pvt_info;
1360	priv->baseaddr = baseaddr;
1361	priv->p_data = p_data;
 
1362
1363	mc_init(mci, pdev);
1364
1365	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1366		rc = setup_irq(mci, pdev);
1367		if (rc)
1368			goto free_edac_mc;
1369	}
1370
1371	rc = edac_mc_add_mc(mci);
1372	if (rc) {
1373		edac_printk(KERN_ERR, EDAC_MC,
1374			    "Failed to register with EDAC core\n");
1375		goto free_edac_mc;
1376	}
1377
1378#ifdef CONFIG_EDAC_DEBUG
1379	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1380		rc = edac_create_sysfs_attributes(mci);
1381		if (rc) {
1382			edac_printk(KERN_ERR, EDAC_MC,
1383					"Failed to create sysfs entries\n");
1384			goto free_edac_mc;
1385		}
1386	}
1387
1388	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1389		setup_address_map(priv);
1390#endif
1391
1392	/*
1393	 * Start capturing the correctable and uncorrectable errors. A write of
1394	 * 0 starts the counters.
1395	 */
1396	if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1397		writel(0x0, baseaddr + ECC_CTRL_OFST);
1398
1399	return rc;
1400
1401free_edac_mc:
1402	edac_mc_free(mci);
1403
1404	return rc;
1405}
1406
1407/**
1408 * mc_remove - Unbind driver from controller.
1409 * @pdev:	Platform device.
1410 *
1411 * Return: Unconditionally 0
1412 */
1413static void mc_remove(struct platform_device *pdev)
1414{
1415	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1416	struct synps_edac_priv *priv = mci->pvt_info;
1417
1418	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1419		disable_intr(priv);
1420
1421#ifdef CONFIG_EDAC_DEBUG
1422	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1423		edac_remove_sysfs_attributes(mci);
1424#endif
1425
1426	edac_mc_del_mc(&pdev->dev);
1427	edac_mc_free(mci);
1428}
1429
1430static struct platform_driver synps_edac_mc_driver = {
1431	.driver = {
1432		   .name = "synopsys-edac",
1433		   .of_match_table = synps_edac_match,
1434		   },
1435	.probe = mc_probe,
1436	.remove_new = mc_remove,
1437};
1438
1439module_platform_driver(synps_edac_mc_driver);
1440
1441MODULE_AUTHOR("Xilinx Inc");
1442MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1443MODULE_LICENSE("GPL v2");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Synopsys DDR ECC Driver
   4 * This driver is based on ppc4xx_edac.c drivers
   5 *
   6 * Copyright (C) 2012 - 2014 Xilinx, Inc.
   7 */
   8
   9#include <linux/edac.h>
  10#include <linux/module.h>
  11#include <linux/platform_device.h>
  12#include <linux/spinlock.h>
  13#include <linux/interrupt.h>
  14#include <linux/of.h>
  15
  16#include "edac_module.h"
  17
  18/* Number of cs_rows needed per memory controller */
  19#define SYNPS_EDAC_NR_CSROWS		1
  20
  21/* Number of channels per memory controller */
  22#define SYNPS_EDAC_NR_CHANS		1
  23
  24/* Granularity of reported error in bytes */
  25#define SYNPS_EDAC_ERR_GRAIN		1
  26
  27#define SYNPS_EDAC_MSG_SIZE		256
  28
  29#define SYNPS_EDAC_MOD_STRING		"synps_edac"
  30#define SYNPS_EDAC_MOD_VER		"1"
  31
  32/* Synopsys DDR memory controller registers that are relevant to ECC */
  33#define CTRL_OFST			0x0
  34#define T_ZQ_OFST			0xA4
  35
  36/* ECC control register */
  37#define ECC_CTRL_OFST			0xC4
  38/* ECC log register */
  39#define CE_LOG_OFST			0xC8
  40/* ECC address register */
  41#define CE_ADDR_OFST			0xCC
  42/* ECC data[31:0] register */
  43#define CE_DATA_31_0_OFST		0xD0
  44
  45/* Uncorrectable error info registers */
  46#define UE_LOG_OFST			0xDC
  47#define UE_ADDR_OFST			0xE0
  48#define UE_DATA_31_0_OFST		0xE4
  49
  50#define STAT_OFST			0xF0
  51#define SCRUB_OFST			0xF4
  52
  53/* Control register bit field definitions */
  54#define CTRL_BW_MASK			0xC
  55#define CTRL_BW_SHIFT			2
  56
  57#define DDRCTL_WDTH_16			1
  58#define DDRCTL_WDTH_32			0
  59
  60/* ZQ register bit field definitions */
  61#define T_ZQ_DDRMODE_MASK		0x2
  62
  63/* ECC control register bit field definitions */
  64#define ECC_CTRL_CLR_CE_ERR		0x2
  65#define ECC_CTRL_CLR_UE_ERR		0x1
  66
  67/* ECC correctable/uncorrectable error log register definitions */
  68#define LOG_VALID			0x1
  69#define CE_LOG_BITPOS_MASK		0xFE
  70#define CE_LOG_BITPOS_SHIFT		1
  71
  72/* ECC correctable/uncorrectable error address register definitions */
  73#define ADDR_COL_MASK			0xFFF
  74#define ADDR_ROW_MASK			0xFFFF000
  75#define ADDR_ROW_SHIFT			12
  76#define ADDR_BANK_MASK			0x70000000
  77#define ADDR_BANK_SHIFT			28
  78
  79/* ECC statistic register definitions */
  80#define STAT_UECNT_MASK			0xFF
  81#define STAT_CECNT_MASK			0xFF00
  82#define STAT_CECNT_SHIFT		8
  83
  84/* ECC scrub register definitions */
  85#define SCRUB_MODE_MASK			0x7
  86#define SCRUB_MODE_SECDED		0x4
  87
  88/* DDR ECC Quirks */
  89#define DDR_ECC_INTR_SUPPORT		BIT(0)
  90#define DDR_ECC_DATA_POISON_SUPPORT	BIT(1)
  91#define DDR_ECC_INTR_SELF_CLEAR		BIT(2)
  92
  93/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
  94/* ECC Configuration Registers */
  95#define ECC_CFG0_OFST			0x70
  96#define ECC_CFG1_OFST			0x74
  97
  98/* ECC Status Register */
  99#define ECC_STAT_OFST			0x78
 100
 101/* ECC Clear Register */
 102#define ECC_CLR_OFST			0x7C
 103
 104/* ECC Error count Register */
 105#define ECC_ERRCNT_OFST			0x80
 106
 107/* ECC Corrected Error Address Register */
 108#define ECC_CEADDR0_OFST		0x84
 109#define ECC_CEADDR1_OFST		0x88
 110
 111/* ECC Syndrome Registers */
 112#define ECC_CSYND0_OFST			0x8C
 113#define ECC_CSYND1_OFST			0x90
 114#define ECC_CSYND2_OFST			0x94
 115
 116/* ECC Bit Mask0 Address Register */
 117#define ECC_BITMASK0_OFST		0x98
 118#define ECC_BITMASK1_OFST		0x9C
 119#define ECC_BITMASK2_OFST		0xA0
 120
 121/* ECC UnCorrected Error Address Register */
 122#define ECC_UEADDR0_OFST		0xA4
 123#define ECC_UEADDR1_OFST		0xA8
 124
 125/* ECC Syndrome Registers */
 126#define ECC_UESYND0_OFST		0xAC
 127#define ECC_UESYND1_OFST		0xB0
 128#define ECC_UESYND2_OFST		0xB4
 129
 130/* ECC Poison Address Reg */
 131#define ECC_POISON0_OFST		0xB8
 132#define ECC_POISON1_OFST		0xBC
 133
 134#define ECC_ADDRMAP0_OFFSET		0x200
 135
 136/* Control register bitfield definitions */
 137#define ECC_CTRL_BUSWIDTH_MASK		0x3000
 138#define ECC_CTRL_BUSWIDTH_SHIFT		12
 139#define ECC_CTRL_CLR_CE_ERRCNT		BIT(2)
 140#define ECC_CTRL_CLR_UE_ERRCNT		BIT(3)
 141
 142/* DDR Control Register width definitions  */
 143#define DDRCTL_EWDTH_16			2
 144#define DDRCTL_EWDTH_32			1
 145#define DDRCTL_EWDTH_64			0
 146
 147/* ECC status register definitions */
 148#define ECC_STAT_UECNT_MASK		0xF0000
 149#define ECC_STAT_UECNT_SHIFT		16
 150#define ECC_STAT_CECNT_MASK		0xF00
 151#define ECC_STAT_CECNT_SHIFT		8
 152#define ECC_STAT_BITNUM_MASK		0x7F
 153
 154/* ECC error count register definitions */
 155#define ECC_ERRCNT_UECNT_MASK		0xFFFF0000
 156#define ECC_ERRCNT_UECNT_SHIFT		16
 157#define ECC_ERRCNT_CECNT_MASK		0xFFFF
 158
 159/* DDR QOS Interrupt register definitions */
 160#define DDR_QOS_IRQ_STAT_OFST		0x20200
 161#define DDR_QOSUE_MASK			0x4
 162#define	DDR_QOSCE_MASK			0x2
 163#define	ECC_CE_UE_INTR_MASK		0x6
 164#define DDR_QOS_IRQ_EN_OFST		0x20208
 165#define DDR_QOS_IRQ_DB_OFST		0x2020C
 166
 167/* DDR QOS Interrupt register definitions */
 168#define DDR_UE_MASK			BIT(9)
 169#define DDR_CE_MASK			BIT(8)
 170
 171/* ECC Corrected Error Register Mask and Shifts*/
 172#define ECC_CEADDR0_RW_MASK		0x3FFFF
 173#define ECC_CEADDR0_RNK_MASK		BIT(24)
 174#define ECC_CEADDR1_BNKGRP_MASK		0x3000000
 175#define ECC_CEADDR1_BNKNR_MASK		0x70000
 176#define ECC_CEADDR1_BLKNR_MASK		0xFFF
 177#define ECC_CEADDR1_BNKGRP_SHIFT	24
 178#define ECC_CEADDR1_BNKNR_SHIFT		16
 179
 180/* ECC Poison register shifts */
 181#define ECC_POISON0_RANK_SHIFT		24
 182#define ECC_POISON0_RANK_MASK		BIT(24)
 183#define ECC_POISON0_COLUMN_SHIFT	0
 184#define ECC_POISON0_COLUMN_MASK		0xFFF
 185#define ECC_POISON1_BG_SHIFT		28
 186#define ECC_POISON1_BG_MASK		0x30000000
 187#define ECC_POISON1_BANKNR_SHIFT	24
 188#define ECC_POISON1_BANKNR_MASK		0x7000000
 189#define ECC_POISON1_ROW_SHIFT		0
 190#define ECC_POISON1_ROW_MASK		0x3FFFF
 191
 192/* DDR Memory type defines */
 193#define MEM_TYPE_DDR3			0x1
 194#define MEM_TYPE_LPDDR3			0x8
 195#define MEM_TYPE_DDR2			0x4
 196#define MEM_TYPE_DDR4			0x10
 197#define MEM_TYPE_LPDDR4			0x20
 198
 199/* DDRC Software control register */
 200#define DDRC_SWCTL			0x320
 201
 202/* DDRC ECC CE & UE poison mask */
 203#define ECC_CEPOISON_MASK		0x3
 204#define ECC_UEPOISON_MASK		0x1
 205
 206/* DDRC Device config masks */
 207#define DDRC_MSTR_CFG_MASK		0xC0000000
 208#define DDRC_MSTR_CFG_SHIFT		30
 209#define DDRC_MSTR_CFG_X4_MASK		0x0
 210#define DDRC_MSTR_CFG_X8_MASK		0x1
 211#define DDRC_MSTR_CFG_X16_MASK		0x2
 212#define DDRC_MSTR_CFG_X32_MASK		0x3
 213
 214#define DDR_MAX_ROW_SHIFT		18
 215#define DDR_MAX_COL_SHIFT		14
 216#define DDR_MAX_BANK_SHIFT		3
 217#define DDR_MAX_BANKGRP_SHIFT		2
 218
 219#define ROW_MAX_VAL_MASK		0xF
 220#define COL_MAX_VAL_MASK		0xF
 221#define BANK_MAX_VAL_MASK		0x1F
 222#define BANKGRP_MAX_VAL_MASK		0x1F
 223#define RANK_MAX_VAL_MASK		0x1F
 224
 225#define ROW_B0_BASE			6
 226#define ROW_B1_BASE			7
 227#define ROW_B2_BASE			8
 228#define ROW_B3_BASE			9
 229#define ROW_B4_BASE			10
 230#define ROW_B5_BASE			11
 231#define ROW_B6_BASE			12
 232#define ROW_B7_BASE			13
 233#define ROW_B8_BASE			14
 234#define ROW_B9_BASE			15
 235#define ROW_B10_BASE			16
 236#define ROW_B11_BASE			17
 237#define ROW_B12_BASE			18
 238#define ROW_B13_BASE			19
 239#define ROW_B14_BASE			20
 240#define ROW_B15_BASE			21
 241#define ROW_B16_BASE			22
 242#define ROW_B17_BASE			23
 243
 244#define COL_B2_BASE			2
 245#define COL_B3_BASE			3
 246#define COL_B4_BASE			4
 247#define COL_B5_BASE			5
 248#define COL_B6_BASE			6
 249#define COL_B7_BASE			7
 250#define COL_B8_BASE			8
 251#define COL_B9_BASE			9
 252#define COL_B10_BASE			10
 253#define COL_B11_BASE			11
 254#define COL_B12_BASE			12
 255#define COL_B13_BASE			13
 256
 257#define BANK_B0_BASE			2
 258#define BANK_B1_BASE			3
 259#define BANK_B2_BASE			4
 260
 261#define BANKGRP_B0_BASE			2
 262#define BANKGRP_B1_BASE			3
 263
 264#define RANK_B0_BASE			6
 265
 266/**
 267 * struct ecc_error_info - ECC error log information.
 268 * @row:	Row number.
 269 * @col:	Column number.
 270 * @bank:	Bank number.
 271 * @bitpos:	Bit position.
 272 * @data:	Data causing the error.
 273 * @bankgrpnr:	Bank group number.
 274 * @blknr:	Block number.
 275 */
 276struct ecc_error_info {
 277	u32 row;
 278	u32 col;
 279	u32 bank;
 280	u32 bitpos;
 281	u32 data;
 282	u32 bankgrpnr;
 283	u32 blknr;
 284};
 285
 286/**
 287 * struct synps_ecc_status - ECC status information to report.
 288 * @ce_cnt:	Correctable error count.
 289 * @ue_cnt:	Uncorrectable error count.
 290 * @ceinfo:	Correctable error log information.
 291 * @ueinfo:	Uncorrectable error log information.
 292 */
 293struct synps_ecc_status {
 294	u32 ce_cnt;
 295	u32 ue_cnt;
 296	struct ecc_error_info ceinfo;
 297	struct ecc_error_info ueinfo;
 298};
 299
 300/**
 301 * struct synps_edac_priv - DDR memory controller private instance data.
 302 * @baseaddr:		Base address of the DDR controller.
 303 * @reglock:		Concurrent CSRs access lock.
 304 * @message:		Buffer for framing the event specific info.
 305 * @stat:		ECC status information.
 306 * @p_data:		Platform data.
 307 * @ce_cnt:		Correctable Error count.
 308 * @ue_cnt:		Uncorrectable Error count.
 309 * @poison_addr:	Data poison address.
 310 * @row_shift:		Bit shifts for row bit.
 311 * @col_shift:		Bit shifts for column bit.
 312 * @bank_shift:		Bit shifts for bank bit.
 313 * @bankgrp_shift:	Bit shifts for bank group bit.
 314 * @rank_shift:		Bit shifts for rank bit.
 315 */
 316struct synps_edac_priv {
 317	void __iomem *baseaddr;
 318	spinlock_t reglock;
 319	char message[SYNPS_EDAC_MSG_SIZE];
 320	struct synps_ecc_status stat;
 321	const struct synps_platform_data *p_data;
 322	u32 ce_cnt;
 323	u32 ue_cnt;
 324#ifdef CONFIG_EDAC_DEBUG
 325	ulong poison_addr;
 326	u32 row_shift[18];
 327	u32 col_shift[14];
 328	u32 bank_shift[3];
 329	u32 bankgrp_shift[2];
 330	u32 rank_shift[1];
 331#endif
 332};
 333
 334/**
 335 * struct synps_platform_data -  synps platform data structure.
 336 * @get_error_info:	Get EDAC error info.
 337 * @get_mtype:		Get mtype.
 338 * @get_dtype:		Get dtype.
 339 * @get_ecc_state:	Get ECC state.
 340 * @quirks:		To differentiate IPs.
 341 */
 342struct synps_platform_data {
 343	int (*get_error_info)(struct synps_edac_priv *priv);
 344	enum mem_type (*get_mtype)(const void __iomem *base);
 345	enum dev_type (*get_dtype)(const void __iomem *base);
 346	bool (*get_ecc_state)(void __iomem *base);
 347	int quirks;
 348};
 349
 350/**
 351 * zynq_get_error_info - Get the current ECC error info.
 352 * @priv:	DDR memory controller private instance data.
 353 *
 354 * Return: one if there is no error, otherwise zero.
 355 */
 356static int zynq_get_error_info(struct synps_edac_priv *priv)
 357{
 358	struct synps_ecc_status *p;
 359	u32 regval, clearval = 0;
 360	void __iomem *base;
 361
 362	base = priv->baseaddr;
 363	p = &priv->stat;
 364
 365	regval = readl(base + STAT_OFST);
 366	if (!regval)
 367		return 1;
 368
 369	p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
 370	p->ue_cnt = regval & STAT_UECNT_MASK;
 371
 372	regval = readl(base + CE_LOG_OFST);
 373	if (!(p->ce_cnt && (regval & LOG_VALID)))
 374		goto ue_err;
 375
 376	p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
 377	regval = readl(base + CE_ADDR_OFST);
 378	p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
 379	p->ceinfo.col = regval & ADDR_COL_MASK;
 380	p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
 381	p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
 382	edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
 383		 p->ceinfo.data);
 384	clearval = ECC_CTRL_CLR_CE_ERR;
 385
 386ue_err:
 387	regval = readl(base + UE_LOG_OFST);
 388	if (!(p->ue_cnt && (regval & LOG_VALID)))
 389		goto out;
 390
 391	regval = readl(base + UE_ADDR_OFST);
 392	p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
 393	p->ueinfo.col = regval & ADDR_COL_MASK;
 394	p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
 395	p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
 396	clearval |= ECC_CTRL_CLR_UE_ERR;
 397
 398out:
 399	writel(clearval, base + ECC_CTRL_OFST);
 400	writel(0x0, base + ECC_CTRL_OFST);
 401
 402	return 0;
 403}
 404
 405/**
 406 * zynqmp_get_error_info - Get the current ECC error info.
 407 * @priv:	DDR memory controller private instance data.
 408 *
 409 * Return: one if there is no error otherwise returns zero.
 410 */
 411static int zynqmp_get_error_info(struct synps_edac_priv *priv)
 412{
 413	struct synps_ecc_status *p;
 414	u32 regval, clearval;
 415	unsigned long flags;
 416	void __iomem *base;
 417
 418	base = priv->baseaddr;
 419	p = &priv->stat;
 420
 421	regval = readl(base + ECC_ERRCNT_OFST);
 422	p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
 423	p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
 424	if (!p->ce_cnt)
 425		goto ue_err;
 426
 427	regval = readl(base + ECC_STAT_OFST);
 428	if (!regval)
 429		return 1;
 430
 431	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
 432
 433	regval = readl(base + ECC_CEADDR0_OFST);
 434	p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
 435	regval = readl(base + ECC_CEADDR1_OFST);
 436	p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
 437					ECC_CEADDR1_BNKNR_SHIFT;
 438	p->ceinfo.bankgrpnr = (regval &	ECC_CEADDR1_BNKGRP_MASK) >>
 439					ECC_CEADDR1_BNKGRP_SHIFT;
 440	p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
 441	p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
 442	edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
 443		 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
 444		 readl(base + ECC_CSYND2_OFST));
 445ue_err:
 446	if (!p->ue_cnt)
 447		goto out;
 448
 449	regval = readl(base + ECC_UEADDR0_OFST);
 450	p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
 451	regval = readl(base + ECC_UEADDR1_OFST);
 452	p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
 453					ECC_CEADDR1_BNKGRP_SHIFT;
 454	p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
 455					ECC_CEADDR1_BNKNR_SHIFT;
 456	p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
 457	p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
 458out:
 459	spin_lock_irqsave(&priv->reglock, flags);
 460
 461	clearval = readl(base + ECC_CLR_OFST) |
 462		   ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
 463		   ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
 464	writel(clearval, base + ECC_CLR_OFST);
 465
 466	spin_unlock_irqrestore(&priv->reglock, flags);
 467
 468	return 0;
 469}
 470
 471/**
 472 * handle_error - Handle Correctable and Uncorrectable errors.
 473 * @mci:	EDAC memory controller instance.
 474 * @p:		Synopsys ECC status structure.
 475 *
 476 * Handles ECC correctable and uncorrectable errors.
 477 */
 478static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
 479{
 480	struct synps_edac_priv *priv = mci->pvt_info;
 481	struct ecc_error_info *pinf;
 482
 483	if (p->ce_cnt) {
 484		pinf = &p->ceinfo;
 485		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
 486			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 487				 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
 488				 "CE", pinf->row, pinf->bank,
 489				 pinf->bankgrpnr, pinf->blknr,
 490				 pinf->bitpos, pinf->data);
 491		} else {
 492			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 493				 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
 494				 "CE", pinf->row, pinf->bank, pinf->col,
 495				 pinf->bitpos, pinf->data);
 496		}
 497
 498		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
 499				     p->ce_cnt, 0, 0, 0, 0, 0, -1,
 500				     priv->message, "");
 501	}
 502
 503	if (p->ue_cnt) {
 504		pinf = &p->ueinfo;
 505		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
 506			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 507				 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
 508				 "UE", pinf->row, pinf->bank,
 509				 pinf->bankgrpnr, pinf->blknr);
 510		} else {
 511			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
 512				 "DDR ECC error type :%s Row %d Bank %d Col %d ",
 513				 "UE", pinf->row, pinf->bank, pinf->col);
 514		}
 515
 516		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
 517				     p->ue_cnt, 0, 0, 0, 0, 0, -1,
 518				     priv->message, "");
 519	}
 520
 521	memset(p, 0, sizeof(*p));
 522}
 523
 524static void enable_intr(struct synps_edac_priv *priv)
 525{
 526	unsigned long flags;
 527
 528	/* Enable UE/CE Interrupts */
 529	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
 
 
 
 530		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
 531		       priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
 532
 533		return;
 534	}
 535
 536	spin_lock_irqsave(&priv->reglock, flags);
 537
 538	writel(DDR_UE_MASK | DDR_CE_MASK,
 539	       priv->baseaddr + ECC_CLR_OFST);
 540
 541	spin_unlock_irqrestore(&priv->reglock, flags);
 542}
 543
 544static void disable_intr(struct synps_edac_priv *priv)
 545{
 546	unsigned long flags;
 547
 548	/* Disable UE/CE Interrupts */
 549	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
 
 
 550		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
 551		       priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
 552
 553		return;
 554	}
 555
 556	spin_lock_irqsave(&priv->reglock, flags);
 557
 558	writel(0, priv->baseaddr + ECC_CLR_OFST);
 559
 560	spin_unlock_irqrestore(&priv->reglock, flags);
 561}
 562
 563/**
 564 * intr_handler - Interrupt Handler for ECC interrupts.
 565 * @irq:        IRQ number.
 566 * @dev_id:     Device ID.
 567 *
 568 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
 569 */
 570static irqreturn_t intr_handler(int irq, void *dev_id)
 571{
 572	const struct synps_platform_data *p_data;
 573	struct mem_ctl_info *mci = dev_id;
 574	struct synps_edac_priv *priv;
 575	int status, regval;
 576
 577	priv = mci->pvt_info;
 578	p_data = priv->p_data;
 579
 580	/*
 581	 * v3.0 of the controller has the ce/ue bits cleared automatically,
 582	 * so this condition does not apply.
 583	 */
 584	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
 585		regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
 586		regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
 587		if (!(regval & ECC_CE_UE_INTR_MASK))
 588			return IRQ_NONE;
 589	}
 590
 591	status = p_data->get_error_info(priv);
 592	if (status)
 593		return IRQ_NONE;
 594
 595	priv->ce_cnt += priv->stat.ce_cnt;
 596	priv->ue_cnt += priv->stat.ue_cnt;
 597	handle_error(mci, &priv->stat);
 598
 599	edac_dbg(3, "Total error count CE %d UE %d\n",
 600		 priv->ce_cnt, priv->ue_cnt);
 601	/* v3.0 of the controller does not have this register */
 602	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
 603		writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
 
 
 604
 605	return IRQ_HANDLED;
 606}
 607
 608/**
 609 * check_errors - Check controller for ECC errors.
 610 * @mci:	EDAC memory controller instance.
 611 *
 612 * Check and post ECC errors. Called by the polling thread.
 613 */
 614static void check_errors(struct mem_ctl_info *mci)
 615{
 616	const struct synps_platform_data *p_data;
 617	struct synps_edac_priv *priv;
 618	int status;
 619
 620	priv = mci->pvt_info;
 621	p_data = priv->p_data;
 622
 623	status = p_data->get_error_info(priv);
 624	if (status)
 625		return;
 626
 627	priv->ce_cnt += priv->stat.ce_cnt;
 628	priv->ue_cnt += priv->stat.ue_cnt;
 629	handle_error(mci, &priv->stat);
 630
 631	edac_dbg(3, "Total error count CE %d UE %d\n",
 632		 priv->ce_cnt, priv->ue_cnt);
 633}
 634
 635/**
 636 * zynq_get_dtype - Return the controller memory width.
 637 * @base:	DDR memory controller base address.
 638 *
 639 * Get the EDAC device type width appropriate for the current controller
 640 * configuration.
 641 *
 642 * Return: a device type width enumeration.
 643 */
 644static enum dev_type zynq_get_dtype(const void __iomem *base)
 645{
 646	enum dev_type dt;
 647	u32 width;
 648
 649	width = readl(base + CTRL_OFST);
 650	width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
 651
 652	switch (width) {
 653	case DDRCTL_WDTH_16:
 654		dt = DEV_X2;
 655		break;
 656	case DDRCTL_WDTH_32:
 657		dt = DEV_X4;
 658		break;
 659	default:
 660		dt = DEV_UNKNOWN;
 661	}
 662
 663	return dt;
 664}
 665
 666/**
 667 * zynqmp_get_dtype - Return the controller memory width.
 668 * @base:	DDR memory controller base address.
 669 *
 670 * Get the EDAC device type width appropriate for the current controller
 671 * configuration.
 672 *
 673 * Return: a device type width enumeration.
 674 */
 675static enum dev_type zynqmp_get_dtype(const void __iomem *base)
 676{
 677	enum dev_type dt;
 678	u32 width;
 679
 680	width = readl(base + CTRL_OFST);
 681	width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
 682	switch (width) {
 683	case DDRCTL_EWDTH_16:
 684		dt = DEV_X2;
 685		break;
 686	case DDRCTL_EWDTH_32:
 687		dt = DEV_X4;
 688		break;
 689	case DDRCTL_EWDTH_64:
 690		dt = DEV_X8;
 691		break;
 692	default:
 693		dt = DEV_UNKNOWN;
 694	}
 695
 696	return dt;
 697}
 698
 699/**
 700 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
 701 * @base:	DDR memory controller base address.
 702 *
 703 * Get the ECC enable/disable status of the controller.
 704 *
 705 * Return: true if enabled, otherwise false.
 706 */
 707static bool zynq_get_ecc_state(void __iomem *base)
 708{
 709	enum dev_type dt;
 710	u32 ecctype;
 711
 712	dt = zynq_get_dtype(base);
 713	if (dt == DEV_UNKNOWN)
 714		return false;
 715
 716	ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
 717	if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
 718		return true;
 719
 720	return false;
 721}
 722
 723/**
 724 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
 725 * @base:	DDR memory controller base address.
 726 *
 727 * Get the ECC enable/disable status for the controller.
 728 *
 729 * Return: a ECC status boolean i.e true/false - enabled/disabled.
 730 */
 731static bool zynqmp_get_ecc_state(void __iomem *base)
 732{
 733	enum dev_type dt;
 734	u32 ecctype;
 735
 736	dt = zynqmp_get_dtype(base);
 737	if (dt == DEV_UNKNOWN)
 738		return false;
 739
 740	ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
 741	if ((ecctype == SCRUB_MODE_SECDED) &&
 742	    ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
 743		return true;
 744
 745	return false;
 746}
 747
 748/**
 749 * get_memsize - Read the size of the attached memory device.
 750 *
 751 * Return: the memory size in bytes.
 752 */
 753static u32 get_memsize(void)
 754{
 755	struct sysinfo inf;
 756
 757	si_meminfo(&inf);
 758
 759	return inf.totalram * inf.mem_unit;
 760}
 761
 762/**
 763 * zynq_get_mtype - Return the controller memory type.
 764 * @base:	Synopsys ECC status structure.
 765 *
 766 * Get the EDAC memory type appropriate for the current controller
 767 * configuration.
 768 *
 769 * Return: a memory type enumeration.
 770 */
 771static enum mem_type zynq_get_mtype(const void __iomem *base)
 772{
 773	enum mem_type mt;
 774	u32 memtype;
 775
 776	memtype = readl(base + T_ZQ_OFST);
 777
 778	if (memtype & T_ZQ_DDRMODE_MASK)
 779		mt = MEM_DDR3;
 780	else
 781		mt = MEM_DDR2;
 782
 783	return mt;
 784}
 785
 786/**
 787 * zynqmp_get_mtype - Returns controller memory type.
 788 * @base:	Synopsys ECC status structure.
 789 *
 790 * Get the EDAC memory type appropriate for the current controller
 791 * configuration.
 792 *
 793 * Return: a memory type enumeration.
 794 */
 795static enum mem_type zynqmp_get_mtype(const void __iomem *base)
 796{
 797	enum mem_type mt;
 798	u32 memtype;
 799
 800	memtype = readl(base + CTRL_OFST);
 801
 802	if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
 803		mt = MEM_DDR3;
 804	else if (memtype & MEM_TYPE_DDR2)
 805		mt = MEM_RDDR2;
 806	else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
 807		mt = MEM_DDR4;
 808	else
 809		mt = MEM_EMPTY;
 810
 811	return mt;
 812}
 813
 814/**
 815 * init_csrows - Initialize the csrow data.
 816 * @mci:	EDAC memory controller instance.
 817 *
 818 * Initialize the chip select rows associated with the EDAC memory
 819 * controller instance.
 820 */
 821static void init_csrows(struct mem_ctl_info *mci)
 822{
 823	struct synps_edac_priv *priv = mci->pvt_info;
 824	const struct synps_platform_data *p_data;
 825	struct csrow_info *csi;
 826	struct dimm_info *dimm;
 827	u32 size, row;
 828	int j;
 829
 830	p_data = priv->p_data;
 831
 832	for (row = 0; row < mci->nr_csrows; row++) {
 833		csi = mci->csrows[row];
 834		size = get_memsize();
 835
 836		for (j = 0; j < csi->nr_channels; j++) {
 837			dimm		= csi->channels[j]->dimm;
 838			dimm->edac_mode	= EDAC_SECDED;
 839			dimm->mtype	= p_data->get_mtype(priv->baseaddr);
 840			dimm->nr_pages	= (size >> PAGE_SHIFT) / csi->nr_channels;
 841			dimm->grain	= SYNPS_EDAC_ERR_GRAIN;
 842			dimm->dtype	= p_data->get_dtype(priv->baseaddr);
 843		}
 844	}
 845}
 846
 847/**
 848 * mc_init - Initialize one driver instance.
 849 * @mci:	EDAC memory controller instance.
 850 * @pdev:	platform device.
 851 *
 852 * Perform initialization of the EDAC memory controller instance and
 853 * related driver-private data associated with the memory controller the
 854 * instance is bound to.
 855 */
 856static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
 857{
 858	struct synps_edac_priv *priv;
 859
 860	mci->pdev = &pdev->dev;
 861	priv = mci->pvt_info;
 862	platform_set_drvdata(pdev, mci);
 863
 864	/* Initialize controller capabilities and configuration */
 865	mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
 866	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
 867	mci->scrub_cap = SCRUB_HW_SRC;
 868	mci->scrub_mode = SCRUB_NONE;
 869
 870	mci->edac_cap = EDAC_FLAG_SECDED;
 871	mci->ctl_name = "synps_ddr_controller";
 872	mci->dev_name = SYNPS_EDAC_MOD_STRING;
 873	mci->mod_name = SYNPS_EDAC_MOD_VER;
 874
 875	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
 876		edac_op_state = EDAC_OPSTATE_INT;
 877	} else {
 878		edac_op_state = EDAC_OPSTATE_POLL;
 879		mci->edac_check = check_errors;
 880	}
 881
 882	mci->ctl_page_to_phys = NULL;
 883
 884	init_csrows(mci);
 885}
 886
 887static int setup_irq(struct mem_ctl_info *mci,
 888		     struct platform_device *pdev)
 889{
 890	struct synps_edac_priv *priv = mci->pvt_info;
 891	int ret, irq;
 892
 893	irq = platform_get_irq(pdev, 0);
 894	if (irq < 0) {
 895		edac_printk(KERN_ERR, EDAC_MC,
 896			    "No IRQ %d in DT\n", irq);
 897		return irq;
 898	}
 899
 900	ret = devm_request_irq(&pdev->dev, irq, intr_handler,
 901			       0, dev_name(&pdev->dev), mci);
 902	if (ret < 0) {
 903		edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
 904		return ret;
 905	}
 906
 907	enable_intr(priv);
 908
 909	return 0;
 910}
 911
 912static const struct synps_platform_data zynq_edac_def = {
 913	.get_error_info	= zynq_get_error_info,
 914	.get_mtype	= zynq_get_mtype,
 915	.get_dtype	= zynq_get_dtype,
 916	.get_ecc_state	= zynq_get_ecc_state,
 917	.quirks		= 0,
 918};
 919
 920static const struct synps_platform_data zynqmp_edac_def = {
 921	.get_error_info	= zynqmp_get_error_info,
 922	.get_mtype	= zynqmp_get_mtype,
 923	.get_dtype	= zynqmp_get_dtype,
 924	.get_ecc_state	= zynqmp_get_ecc_state,
 925	.quirks         = (DDR_ECC_INTR_SUPPORT
 926#ifdef CONFIG_EDAC_DEBUG
 927			  | DDR_ECC_DATA_POISON_SUPPORT
 928#endif
 929			  ),
 930};
 931
 932static const struct synps_platform_data synopsys_edac_def = {
 933	.get_error_info	= zynqmp_get_error_info,
 934	.get_mtype	= zynqmp_get_mtype,
 935	.get_dtype	= zynqmp_get_dtype,
 936	.get_ecc_state	= zynqmp_get_ecc_state,
 937	.quirks         = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
 938#ifdef CONFIG_EDAC_DEBUG
 939			  | DDR_ECC_DATA_POISON_SUPPORT
 940#endif
 941			  ),
 942};
 943
 944
 945static const struct of_device_id synps_edac_match[] = {
 946	{
 947		.compatible = "xlnx,zynq-ddrc-a05",
 948		.data = (void *)&zynq_edac_def
 949	},
 950	{
 951		.compatible = "xlnx,zynqmp-ddrc-2.40a",
 952		.data = (void *)&zynqmp_edac_def
 953	},
 954	{
 955		.compatible = "snps,ddrc-3.80a",
 956		.data = (void *)&synopsys_edac_def
 957	},
 958	{
 959		/* end of table */
 960	}
 961};
 962
 963MODULE_DEVICE_TABLE(of, synps_edac_match);
 964
 965#ifdef CONFIG_EDAC_DEBUG
 966#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 967
 968/**
 969 * ddr_poison_setup -	Update poison registers.
 970 * @priv:		DDR memory controller private instance data.
 971 *
 972 * Update poison registers as per DDR mapping.
 973 * Return: none.
 974 */
 975static void ddr_poison_setup(struct synps_edac_priv *priv)
 976{
 977	int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
 978	int index;
 979	ulong hif_addr = 0;
 980
 981	hif_addr = priv->poison_addr >> 3;
 982
 983	for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
 984		if (priv->row_shift[index])
 985			row |= (((hif_addr >> priv->row_shift[index]) &
 986						BIT(0)) << index);
 987		else
 988			break;
 989	}
 990
 991	for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
 992		if (priv->col_shift[index] || index < 3)
 993			col |= (((hif_addr >> priv->col_shift[index]) &
 994						BIT(0)) << index);
 995		else
 996			break;
 997	}
 998
 999	for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
1000		if (priv->bank_shift[index])
1001			bank |= (((hif_addr >> priv->bank_shift[index]) &
1002						BIT(0)) << index);
1003		else
1004			break;
1005	}
1006
1007	for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
1008		if (priv->bankgrp_shift[index])
1009			bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
1010						& BIT(0)) << index);
1011		else
1012			break;
1013	}
1014
1015	if (priv->rank_shift[0])
1016		rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
1017
1018	regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
1019	regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
1020	writel(regval, priv->baseaddr + ECC_POISON0_OFST);
1021
1022	regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1023	regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1024	regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1025	writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1026}
1027
1028static ssize_t inject_data_error_show(struct device *dev,
1029				      struct device_attribute *mattr,
1030				      char *data)
1031{
1032	struct mem_ctl_info *mci = to_mci(dev);
1033	struct synps_edac_priv *priv = mci->pvt_info;
1034
1035	return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1036			"Error injection Address: 0x%lx\n\r",
1037			readl(priv->baseaddr + ECC_POISON0_OFST),
1038			readl(priv->baseaddr + ECC_POISON1_OFST),
1039			priv->poison_addr);
1040}
1041
1042static ssize_t inject_data_error_store(struct device *dev,
1043				       struct device_attribute *mattr,
1044				       const char *data, size_t count)
1045{
1046	struct mem_ctl_info *mci = to_mci(dev);
1047	struct synps_edac_priv *priv = mci->pvt_info;
1048
1049	if (kstrtoul(data, 0, &priv->poison_addr))
1050		return -EINVAL;
1051
1052	ddr_poison_setup(priv);
1053
1054	return count;
1055}
1056
1057static ssize_t inject_data_poison_show(struct device *dev,
1058				       struct device_attribute *mattr,
1059				       char *data)
1060{
1061	struct mem_ctl_info *mci = to_mci(dev);
1062	struct synps_edac_priv *priv = mci->pvt_info;
1063
1064	return sprintf(data, "Data Poisoning: %s\n\r",
1065			(((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1066			? ("Correctable Error") : ("UnCorrectable Error"));
1067}
1068
1069static ssize_t inject_data_poison_store(struct device *dev,
1070					struct device_attribute *mattr,
1071					const char *data, size_t count)
1072{
1073	struct mem_ctl_info *mci = to_mci(dev);
1074	struct synps_edac_priv *priv = mci->pvt_info;
1075
1076	writel(0, priv->baseaddr + DDRC_SWCTL);
1077	if (strncmp(data, "CE", 2) == 0)
1078		writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1079	else
1080		writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1081	writel(1, priv->baseaddr + DDRC_SWCTL);
1082
1083	return count;
1084}
1085
1086static DEVICE_ATTR_RW(inject_data_error);
1087static DEVICE_ATTR_RW(inject_data_poison);
1088
1089static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1090{
1091	int rc;
1092
1093	rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1094	if (rc < 0)
1095		return rc;
1096	rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1097	if (rc < 0)
1098		return rc;
1099	return 0;
1100}
1101
1102static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1103{
1104	device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1105	device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1106}
1107
1108static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1109{
1110	u32 addrmap_row_b2_10;
1111	int index;
1112
1113	priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1114	priv->row_shift[1] = ((addrmap[5] >> 8) &
1115			ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1116
1117	addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1118	if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1119		for (index = 2; index < 11; index++)
1120			priv->row_shift[index] = addrmap_row_b2_10 +
1121				index + ROW_B0_BASE;
1122
1123	} else {
1124		priv->row_shift[2] = (addrmap[9] &
1125				ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1126		priv->row_shift[3] = ((addrmap[9] >> 8) &
1127				ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1128		priv->row_shift[4] = ((addrmap[9] >> 16) &
1129				ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1130		priv->row_shift[5] = ((addrmap[9] >> 24) &
1131				ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1132		priv->row_shift[6] = (addrmap[10] &
1133				ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1134		priv->row_shift[7] = ((addrmap[10] >> 8) &
1135				ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1136		priv->row_shift[8] = ((addrmap[10] >> 16) &
1137				ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1138		priv->row_shift[9] = ((addrmap[10] >> 24) &
1139				ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1140		priv->row_shift[10] = (addrmap[11] &
1141				ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1142	}
1143
1144	priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1145				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1146				ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1147	priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1148				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1149				ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1150	priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1151				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1152				ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1153	priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1154				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1155				ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1156	priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1157				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1158				ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1159	priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1160				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1161				ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1162	priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1163				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1164				ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1165}
1166
1167static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1168{
1169	u32 width, memtype;
1170	int index;
1171
1172	memtype = readl(priv->baseaddr + CTRL_OFST);
1173	width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1174
1175	priv->col_shift[0] = 0;
1176	priv->col_shift[1] = 1;
1177	priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1178	priv->col_shift[3] = ((addrmap[2] >> 8) &
1179			COL_MAX_VAL_MASK) + COL_B3_BASE;
1180	priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1181			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1182					COL_MAX_VAL_MASK) + COL_B4_BASE);
1183	priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1184			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1185					COL_MAX_VAL_MASK) + COL_B5_BASE);
1186	priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1187			COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1188					COL_MAX_VAL_MASK) + COL_B6_BASE);
1189	priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1190			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1191					COL_MAX_VAL_MASK) + COL_B7_BASE);
1192	priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1193			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1194					COL_MAX_VAL_MASK) + COL_B8_BASE);
1195	priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1196			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1197					COL_MAX_VAL_MASK) + COL_B9_BASE);
1198	if (width == DDRCTL_EWDTH_64) {
1199		if (memtype & MEM_TYPE_LPDDR3) {
1200			priv->col_shift[10] = ((addrmap[4] &
1201				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1202				((addrmap[4] & COL_MAX_VAL_MASK) +
1203				 COL_B10_BASE);
1204			priv->col_shift[11] = (((addrmap[4] >> 8) &
1205				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1206				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1207				 COL_B11_BASE);
1208		} else {
1209			priv->col_shift[11] = ((addrmap[4] &
1210				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1211				((addrmap[4] & COL_MAX_VAL_MASK) +
1212				 COL_B10_BASE);
1213			priv->col_shift[13] = (((addrmap[4] >> 8) &
1214				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1215				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1216				 COL_B11_BASE);
1217		}
1218	} else if (width == DDRCTL_EWDTH_32) {
1219		if (memtype & MEM_TYPE_LPDDR3) {
1220			priv->col_shift[10] = (((addrmap[3] >> 24) &
1221				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1222				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1223				 COL_B9_BASE);
1224			priv->col_shift[11] = ((addrmap[4] &
1225				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1226				((addrmap[4] & COL_MAX_VAL_MASK) +
1227				 COL_B10_BASE);
1228		} else {
1229			priv->col_shift[11] = (((addrmap[3] >> 24) &
1230				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1231				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1232				 COL_B9_BASE);
1233			priv->col_shift[13] = ((addrmap[4] &
1234				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1235				((addrmap[4] & COL_MAX_VAL_MASK) +
1236				 COL_B10_BASE);
1237		}
1238	} else {
1239		if (memtype & MEM_TYPE_LPDDR3) {
1240			priv->col_shift[10] = (((addrmap[3] >> 16) &
1241				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1242				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1243				 COL_B8_BASE);
1244			priv->col_shift[11] = (((addrmap[3] >> 24) &
1245				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1246				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1247				 COL_B9_BASE);
1248			priv->col_shift[13] = ((addrmap[4] &
1249				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1250				((addrmap[4] & COL_MAX_VAL_MASK) +
1251				 COL_B10_BASE);
1252		} else {
1253			priv->col_shift[11] = (((addrmap[3] >> 16) &
1254				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1255				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1256				 COL_B8_BASE);
1257			priv->col_shift[13] = (((addrmap[3] >> 24) &
1258				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1259				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1260				 COL_B9_BASE);
1261		}
1262	}
1263
1264	if (width) {
1265		for (index = 9; index > width; index--) {
1266			priv->col_shift[index] = priv->col_shift[index - width];
1267			priv->col_shift[index - width] = 0;
1268		}
1269	}
1270
1271}
1272
1273static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1274{
1275	priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1276	priv->bank_shift[1] = ((addrmap[1] >> 8) &
1277				BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1278	priv->bank_shift[2] = (((addrmap[1] >> 16) &
1279				BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1280				(((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1281				 BANK_B2_BASE);
1282
1283}
1284
1285static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1286{
1287	priv->bankgrp_shift[0] = (addrmap[8] &
1288				BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1289	priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1290				BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1291				& BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1292
1293}
1294
1295static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1296{
1297	priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1298				RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1299				RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1300}
1301
1302/**
1303 * setup_address_map -	Set Address Map by querying ADDRMAP registers.
1304 * @priv:		DDR memory controller private instance data.
1305 *
1306 * Set Address Map by querying ADDRMAP registers.
1307 *
1308 * Return: none.
1309 */
1310static void setup_address_map(struct synps_edac_priv *priv)
1311{
1312	u32 addrmap[12];
1313	int index;
1314
1315	for (index = 0; index < 12; index++) {
1316		u32 addrmap_offset;
1317
1318		addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1319		addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1320	}
1321
1322	setup_row_address_map(priv, addrmap);
1323
1324	setup_column_address_map(priv, addrmap);
1325
1326	setup_bank_address_map(priv, addrmap);
1327
1328	setup_bg_address_map(priv, addrmap);
1329
1330	setup_rank_address_map(priv, addrmap);
1331}
1332#endif /* CONFIG_EDAC_DEBUG */
1333
1334/**
1335 * mc_probe - Check controller and bind driver.
1336 * @pdev:	platform device.
1337 *
1338 * Probe a specific controller instance for binding with the driver.
1339 *
1340 * Return: 0 if the controller instance was successfully bound to the
1341 * driver; otherwise, < 0 on error.
1342 */
1343static int mc_probe(struct platform_device *pdev)
1344{
1345	const struct synps_platform_data *p_data;
1346	struct edac_mc_layer layers[2];
1347	struct synps_edac_priv *priv;
1348	struct mem_ctl_info *mci;
1349	void __iomem *baseaddr;
 
1350	int rc;
1351
1352	baseaddr = devm_platform_ioremap_resource(pdev, 0);
 
1353	if (IS_ERR(baseaddr))
1354		return PTR_ERR(baseaddr);
1355
1356	p_data = of_device_get_match_data(&pdev->dev);
1357	if (!p_data)
1358		return -ENODEV;
1359
1360	if (!p_data->get_ecc_state(baseaddr)) {
1361		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1362		return -ENXIO;
1363	}
1364
1365	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1366	layers[0].size = SYNPS_EDAC_NR_CSROWS;
1367	layers[0].is_virt_csrow = true;
1368	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1369	layers[1].size = SYNPS_EDAC_NR_CHANS;
1370	layers[1].is_virt_csrow = false;
1371
1372	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1373			    sizeof(struct synps_edac_priv));
1374	if (!mci) {
1375		edac_printk(KERN_ERR, EDAC_MC,
1376			    "Failed memory allocation for mc instance\n");
1377		return -ENOMEM;
1378	}
1379
1380	priv = mci->pvt_info;
1381	priv->baseaddr = baseaddr;
1382	priv->p_data = p_data;
1383	spin_lock_init(&priv->reglock);
1384
1385	mc_init(mci, pdev);
1386
1387	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1388		rc = setup_irq(mci, pdev);
1389		if (rc)
1390			goto free_edac_mc;
1391	}
1392
1393	rc = edac_mc_add_mc(mci);
1394	if (rc) {
1395		edac_printk(KERN_ERR, EDAC_MC,
1396			    "Failed to register with EDAC core\n");
1397		goto free_edac_mc;
1398	}
1399
1400#ifdef CONFIG_EDAC_DEBUG
1401	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1402		rc = edac_create_sysfs_attributes(mci);
1403		if (rc) {
1404			edac_printk(KERN_ERR, EDAC_MC,
1405					"Failed to create sysfs entries\n");
1406			goto free_edac_mc;
1407		}
1408	}
1409
1410	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1411		setup_address_map(priv);
1412#endif
1413
1414	/*
1415	 * Start capturing the correctable and uncorrectable errors. A write of
1416	 * 0 starts the counters.
1417	 */
1418	if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1419		writel(0x0, baseaddr + ECC_CTRL_OFST);
1420
1421	return rc;
1422
1423free_edac_mc:
1424	edac_mc_free(mci);
1425
1426	return rc;
1427}
1428
1429/**
1430 * mc_remove - Unbind driver from controller.
1431 * @pdev:	Platform device.
1432 *
1433 * Return: Unconditionally 0
1434 */
1435static void mc_remove(struct platform_device *pdev)
1436{
1437	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1438	struct synps_edac_priv *priv = mci->pvt_info;
1439
1440	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1441		disable_intr(priv);
1442
1443#ifdef CONFIG_EDAC_DEBUG
1444	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1445		edac_remove_sysfs_attributes(mci);
1446#endif
1447
1448	edac_mc_del_mc(&pdev->dev);
1449	edac_mc_free(mci);
1450}
1451
1452static struct platform_driver synps_edac_mc_driver = {
1453	.driver = {
1454		   .name = "synopsys-edac",
1455		   .of_match_table = synps_edac_match,
1456		   },
1457	.probe = mc_probe,
1458	.remove_new = mc_remove,
1459};
1460
1461module_platform_driver(synps_edac_mc_driver);
1462
1463MODULE_AUTHOR("Xilinx Inc");
1464MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1465MODULE_LICENSE("GPL v2");