Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for Intel(R) 10nm server memory controller.
   4 * Copyright (c) 2019, Intel Corporation.
   5 *
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/io.h>
  10#include <asm/cpu_device_id.h>
  11#include <asm/intel-family.h>
  12#include <asm/mce.h>
  13#include "edac_module.h"
  14#include "skx_common.h"
  15
  16#define I10NM_REVISION	"v0.0.6"
  17#define EDAC_MOD_STR	"i10nm_edac"
  18
  19/* Debug macros */
  20#define i10nm_printk(level, fmt, arg...)	\
  21	edac_printk(level, "i10nm", fmt, ##arg)
  22
  23#define I10NM_GET_SCK_BAR(d, reg)	\
  24	pci_read_config_dword((d)->uracu, 0xd0, &(reg))
  25#define I10NM_GET_IMC_BAR(d, i, reg)		\
  26	pci_read_config_dword((d)->uracu,	\
  27	(res_cfg->type == GNR ? 0xd4 : 0xd8) + (i) * 4, &(reg))
  28#define I10NM_GET_SAD(d, offset, i, reg)\
  29	pci_read_config_dword((d)->sad_all, (offset) + (i) * \
  30	(res_cfg->type == GNR ? 12 : 8), &(reg))
  31#define I10NM_GET_HBM_IMC_BAR(d, reg)	\
  32	pci_read_config_dword((d)->uracu, 0xd4, &(reg))
  33#define I10NM_GET_CAPID3_CFG(d, reg)	\
  34	pci_read_config_dword((d)->pcu_cr3,	\
  35	res_cfg->type == GNR ? 0x290 : 0x90, &(reg))
  36#define I10NM_GET_CAPID5_CFG(d, reg)	\
  37	pci_read_config_dword((d)->pcu_cr3,	\
  38	res_cfg->type == GNR ? 0x298 : 0x98, &(reg))
  39#define I10NM_GET_DIMMMTR(m, i, j)	\
  40	readl((m)->mbase + ((m)->hbm_mc ? 0x80c :	\
  41	(res_cfg->type == GNR ? 0xc0c : 0x2080c)) +	\
  42	(i) * (m)->chan_mmio_sz + (j) * 4)
  43#define I10NM_GET_MCDDRTCFG(m, i)	\
  44	readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
  45	(i) * (m)->chan_mmio_sz)
  46#define I10NM_GET_MCMTR(m, i)		\
  47	readl((m)->mbase + ((m)->hbm_mc ? 0xef8 :	\
  48	(res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) +	\
  49	(i) * (m)->chan_mmio_sz)
  50#define I10NM_GET_REG32(m, i, offset)	\
  51	readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
  52#define I10NM_GET_REG64(m, i, offset)	\
  53	readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
  54#define I10NM_SET_REG32(m, i, offset, v)	\
  55	writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
  56
  57#define I10NM_GET_SCK_MMIO_BASE(reg)	(GET_BITFIELD(reg, 0, 28) << 23)
  58#define I10NM_GET_IMC_MMIO_OFFSET(reg)	(GET_BITFIELD(reg, 0, 10) << 12)
  59#define I10NM_GET_IMC_MMIO_SIZE(reg)	((GET_BITFIELD(reg, 13, 23) - \
  60					 GET_BITFIELD(reg, 0, 10) + 1) << 12)
  61#define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg)	\
  62	((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
  63
  64#define I10NM_GNR_IMC_MMIO_OFFSET	0x24c000
  65#define I10NM_GNR_IMC_MMIO_SIZE		0x4000
  66#define I10NM_HBM_IMC_MMIO_SIZE		0x9000
  67#define I10NM_DDR_IMC_CH_CNT(reg)	GET_BITFIELD(reg, 21, 24)
  68#define I10NM_IS_HBM_PRESENT(reg)	GET_BITFIELD(reg, 27, 30)
  69#define I10NM_IS_HBM_IMC(reg)		GET_BITFIELD(reg, 29, 29)
  70
  71#define I10NM_MAX_SAD			16
  72#define I10NM_SAD_ENABLE(reg)		GET_BITFIELD(reg, 0, 0)
  73#define I10NM_SAD_NM_CACHEABLE(reg)	GET_BITFIELD(reg, 5, 5)
  74
  75#define RETRY_RD_ERR_LOG_UC		BIT(1)
  76#define RETRY_RD_ERR_LOG_NOOVER		BIT(14)
  77#define RETRY_RD_ERR_LOG_EN		BIT(15)
  78#define RETRY_RD_ERR_LOG_NOOVER_UC	(BIT(14) | BIT(1))
  79#define RETRY_RD_ERR_LOG_OVER_UC_V	(BIT(2) | BIT(1) | BIT(0))
  80
  81static struct list_head *i10nm_edac_list;
  82
  83static struct res_config *res_cfg;
  84static int retry_rd_err_log;
  85static int decoding_via_mca;
  86static bool mem_cfg_2lm;
  87
  88static u32 offsets_scrub_icx[]  = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
  89static u32 offsets_scrub_spr[]  = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
  90static u32 offsets_scrub_spr_hbm0[]  = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
  91static u32 offsets_scrub_spr_hbm1[]  = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
  92static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
  93static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
  94static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
  95static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
  96static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
  97
  98static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
  99				      u32 *offsets_scrub, u32 *offsets_demand,
 100				      u32 *offsets_demand2)
 101{
 102	u32 s, d, d2;
 103
 104	s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
 105	d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
 106	if (offsets_demand2)
 107		d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
 108
 109	if (enable) {
 110		/* Save default configurations */
 111		imc->chan[chan].retry_rd_err_log_s = s;
 112		imc->chan[chan].retry_rd_err_log_d = d;
 113		if (offsets_demand2)
 114			imc->chan[chan].retry_rd_err_log_d2 = d2;
 115
 116		s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
 117		s |=  RETRY_RD_ERR_LOG_EN;
 118		d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
 119		d |=  RETRY_RD_ERR_LOG_EN;
 120
 121		if (offsets_demand2) {
 122			d2 &= ~RETRY_RD_ERR_LOG_UC;
 123			d2 |=  RETRY_RD_ERR_LOG_NOOVER;
 124			d2 |=  RETRY_RD_ERR_LOG_EN;
 125		}
 126	} else {
 127		/* Restore default configurations */
 128		if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
 129			s |=  RETRY_RD_ERR_LOG_UC;
 130		if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
 131			s |=  RETRY_RD_ERR_LOG_NOOVER;
 132		if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
 133			s &= ~RETRY_RD_ERR_LOG_EN;
 134		if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
 135			d |=  RETRY_RD_ERR_LOG_UC;
 136		if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
 137			d |=  RETRY_RD_ERR_LOG_NOOVER;
 138		if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
 139			d &= ~RETRY_RD_ERR_LOG_EN;
 140
 141		if (offsets_demand2) {
 142			if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
 143				d2 |=  RETRY_RD_ERR_LOG_UC;
 144			if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
 145				d2 &=  ~RETRY_RD_ERR_LOG_NOOVER;
 146			if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
 147				d2 &= ~RETRY_RD_ERR_LOG_EN;
 148		}
 149	}
 150
 151	I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
 152	I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
 153	if (offsets_demand2)
 154		I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
 155}
 156
 157static void enable_retry_rd_err_log(bool enable)
 158{
 159	int i, j, imc_num, chan_num;
 160	struct skx_imc *imc;
 161	struct skx_dev *d;
 162
 163	edac_dbg(2, "\n");
 164
 165	list_for_each_entry(d, i10nm_edac_list, list) {
 166		imc_num  = res_cfg->ddr_imc_num;
 167		chan_num = res_cfg->ddr_chan_num;
 168
 169		for (i = 0; i < imc_num; i++) {
 170			imc = &d->imc[i];
 171			if (!imc->mbase)
 172				continue;
 173
 174			for (j = 0; j < chan_num; j++)
 175				__enable_retry_rd_err_log(imc, j, enable,
 176							  res_cfg->offsets_scrub,
 177							  res_cfg->offsets_demand,
 178							  res_cfg->offsets_demand2);
 179		}
 180
 181		imc_num += res_cfg->hbm_imc_num;
 182		chan_num = res_cfg->hbm_chan_num;
 183
 184		for (; i < imc_num; i++) {
 185			imc = &d->imc[i];
 186			if (!imc->mbase || !imc->hbm_mc)
 187				continue;
 188
 189			for (j = 0; j < chan_num; j++) {
 190				__enable_retry_rd_err_log(imc, j, enable,
 191							  res_cfg->offsets_scrub_hbm0,
 192							  res_cfg->offsets_demand_hbm0,
 193							  NULL);
 194				__enable_retry_rd_err_log(imc, j, enable,
 195							  res_cfg->offsets_scrub_hbm1,
 196							  res_cfg->offsets_demand_hbm1,
 197							  NULL);
 198			}
 199		}
 200	}
 201}
 202
 203static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
 204				  int len, bool scrub_err)
 205{
 206	struct skx_imc *imc = &res->dev->imc[res->imc];
 207	u32 log0, log1, log2, log3, log4;
 208	u32 corr0, corr1, corr2, corr3;
 209	u32 lxg0, lxg1, lxg3, lxg4;
 210	u32 *xffsets = NULL;
 211	u64 log2a, log5;
 212	u64 lxg2a, lxg5;
 213	u32 *offsets;
 214	int n, pch;
 215
 216	if (!imc->mbase)
 217		return;
 218
 219	if (imc->hbm_mc) {
 220		pch = res->cs & 1;
 221
 222		if (pch)
 223			offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
 224					      res_cfg->offsets_demand_hbm1;
 225		else
 226			offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
 227					      res_cfg->offsets_demand_hbm0;
 228	} else {
 229		if (scrub_err) {
 230			offsets = res_cfg->offsets_scrub;
 231		} else {
 232			offsets = res_cfg->offsets_demand;
 233			xffsets = res_cfg->offsets_demand2;
 234		}
 235	}
 236
 237	log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
 238	log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
 239	log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
 240	log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
 241	log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
 242
 243	if (xffsets) {
 244		lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
 245		lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
 246		lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
 247		lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
 248		lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
 249	}
 250
 251	if (res_cfg->type == SPR) {
 252		log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
 253		n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
 254			     log0, log1, log2a, log3, log4, log5);
 255
 256		if (len - n > 0) {
 257			if (xffsets) {
 258				lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
 259				n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
 260					     lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
 261			} else {
 262				n += snprintf(msg + n, len - n, "]");
 263			}
 264		}
 265	} else {
 266		log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
 267		n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
 268			     log0, log1, log2, log3, log4, log5);
 269	}
 270
 271	if (imc->hbm_mc) {
 272		if (pch) {
 273			corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
 274			corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
 275			corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
 276			corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
 277		} else {
 278			corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
 279			corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
 280			corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
 281			corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
 282		}
 283	} else {
 284		corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
 285		corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
 286		corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
 287		corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
 288	}
 289
 290	if (len - n > 0)
 291		snprintf(msg + n, len - n,
 292			 " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
 293			 corr0 & 0xffff, corr0 >> 16,
 294			 corr1 & 0xffff, corr1 >> 16,
 295			 corr2 & 0xffff, corr2 >> 16,
 296			 corr3 & 0xffff, corr3 >> 16);
 297
 298	/* Clear status bits */
 299	if (retry_rd_err_log == 2) {
 300		if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
 301			log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
 302			I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
 303		}
 304
 305		if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
 306			lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
 307			I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
 308		}
 309	}
 310}
 311
 312static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
 313					   unsigned int dev, unsigned int fun)
 314{
 315	struct pci_dev *pdev;
 316
 317	pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
 318	if (!pdev) {
 319		edac_dbg(2, "No device %02x:%02x.%x\n",
 320			 bus, dev, fun);
 321		return NULL;
 322	}
 323
 324	if (unlikely(pci_enable_device(pdev) < 0)) {
 325		edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
 326			 bus, dev, fun);
 327		pci_dev_put(pdev);
 328		return NULL;
 329	}
 330
 331	return pdev;
 332}
 333
 334/**
 335 * i10nm_get_imc_num() - Get the number of present DDR memory controllers.
 336 *
 337 * @cfg : The pointer to the structure of EDAC resource configurations.
 338 *
 339 * For Granite Rapids CPUs, the number of present DDR memory controllers read
 340 * at runtime overwrites the value statically configured in @cfg->ddr_imc_num.
 341 * For other CPUs, the number of present DDR memory controllers is statically
 342 * configured in @cfg->ddr_imc_num.
 343 *
 344 * RETURNS : 0 on success, < 0 on failure.
 345 */
 346static int i10nm_get_imc_num(struct res_config *cfg)
 347{
 348	int n, imc_num, chan_num = 0;
 349	struct skx_dev *d;
 350	u32 reg;
 351
 352	list_for_each_entry(d, i10nm_edac_list, list) {
 353		d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->pcu_cr3_bdf.bus],
 354						 res_cfg->pcu_cr3_bdf.dev,
 355						 res_cfg->pcu_cr3_bdf.fun);
 356		if (!d->pcu_cr3)
 357			continue;
 358
 359		if (I10NM_GET_CAPID5_CFG(d, reg))
 360			continue;
 361
 362		n = I10NM_DDR_IMC_CH_CNT(reg);
 363
 364		if (!chan_num) {
 365			chan_num = n;
 366			edac_dbg(2, "Get DDR CH number: %d\n", chan_num);
 367		} else if (chan_num != n) {
 368			i10nm_printk(KERN_NOTICE, "Get DDR CH numbers: %d, %d\n", chan_num, n);
 369		}
 370	}
 371
 372	switch (cfg->type) {
 373	case GNR:
 374		/*
 375		 * One channel per DDR memory controller for Granite Rapids CPUs.
 376		 */
 377		imc_num = chan_num;
 378
 379		if (!imc_num) {
 380			i10nm_printk(KERN_ERR, "Invalid DDR MC number\n");
 381			return -ENODEV;
 382		}
 383
 384		if (imc_num > I10NM_NUM_DDR_IMC) {
 385			i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
 386			return -EINVAL;
 387		}
 388
 389		if (cfg->ddr_imc_num != imc_num) {
 390			/*
 391			 * Store the number of present DDR memory controllers.
 392			 */
 393			cfg->ddr_imc_num = imc_num;
 394			edac_dbg(2, "Set DDR MC number: %d", imc_num);
 395		}
 396
 397		return 0;
 398	default:
 399		/*
 400		 * For other CPUs, the number of present DDR memory controllers
 401		 * is statically pre-configured in cfg->ddr_imc_num.
 402		 */
 403		return 0;
 404	}
 405}
 406
 407static bool i10nm_check_2lm(struct res_config *cfg)
 408{
 409	struct skx_dev *d;
 410	u32 reg;
 411	int i;
 412
 413	list_for_each_entry(d, i10nm_edac_list, list) {
 414		d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->sad_all_bdf.bus],
 415						 res_cfg->sad_all_bdf.dev,
 416						 res_cfg->sad_all_bdf.fun);
 417		if (!d->sad_all)
 418			continue;
 419
 420		for (i = 0; i < I10NM_MAX_SAD; i++) {
 421			I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg);
 422			if (I10NM_SAD_ENABLE(reg) && I10NM_SAD_NM_CACHEABLE(reg)) {
 423				edac_dbg(2, "2-level memory configuration.\n");
 424				return true;
 425			}
 426		}
 427	}
 428
 429	return false;
 430}
 431
 432/*
 433 * Check whether the error comes from DDRT by ICX/Tremont/SPR model specific error code.
 434 * Refer to SDM vol3B 17.11.3/17.13.2 Intel IMC MC error codes for IA32_MCi_STATUS.
 435 */
 436static bool i10nm_mscod_is_ddrt(u32 mscod)
 437{
 438	switch (res_cfg->type) {
 439	case I10NM:
 440		switch (mscod) {
 441		case 0x0106: case 0x0107:
 442		case 0x0800: case 0x0804:
 443		case 0x0806 ... 0x0808:
 444		case 0x080a ... 0x080e:
 445		case 0x0810: case 0x0811:
 446		case 0x0816: case 0x081e:
 447		case 0x081f:
 448			return true;
 449		}
 450
 451		break;
 452	case SPR:
 453		switch (mscod) {
 454		case 0x0800: case 0x0804:
 455		case 0x0806 ... 0x0808:
 456		case 0x080a ... 0x080e:
 457		case 0x0810: case 0x0811:
 458		case 0x0816: case 0x081e:
 459		case 0x081f:
 460			return true;
 461		}
 462
 463		break;
 464	default:
 465		return false;
 466	}
 467
 468	return false;
 469}
 470
 471static bool i10nm_mc_decode_available(struct mce *mce)
 472{
 473#define ICX_IMCx_CHy		0x06666000
 474	u8 bank;
 475
 476	if (!decoding_via_mca || mem_cfg_2lm)
 477		return false;
 478
 479	if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
 480			!= (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
 481		return false;
 482
 483	bank = mce->bank;
 484
 485	switch (res_cfg->type) {
 486	case I10NM:
 487		/* Check whether the bank is one of {13,14,17,18,21,22,25,26} */
 488		if (!(ICX_IMCx_CHy & (1 << bank)))
 489			return false;
 490		break;
 491	case SPR:
 492		if (bank < 13 || bank > 20)
 493			return false;
 494		break;
 495	default:
 496		return false;
 497	}
 498
 499	/* DDRT errors can't be decoded from MCA bank registers */
 500	if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
 501		return false;
 502
 503	if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
 504		return false;
 505
 506	return true;
 507}
 508
 509static bool i10nm_mc_decode(struct decoded_addr *res)
 510{
 511	struct mce *m = res->mce;
 512	struct skx_dev *d;
 513	u8 bank;
 514
 515	if (!i10nm_mc_decode_available(m))
 516		return false;
 517
 518	list_for_each_entry(d, i10nm_edac_list, list) {
 519		if (d->imc[0].src_id == m->socketid) {
 520			res->socket = m->socketid;
 521			res->dev = d;
 522			break;
 523		}
 524	}
 525
 526	switch (res_cfg->type) {
 527	case I10NM:
 528		bank              = m->bank - 13;
 529		res->imc          = bank / 4;
 530		res->channel      = bank % 2;
 531		res->column       = GET_BITFIELD(m->misc, 9, 18) << 2;
 532		res->row          = GET_BITFIELD(m->misc, 19, 39);
 533		res->bank_group   = GET_BITFIELD(m->misc, 40, 41);
 534		res->bank_address = GET_BITFIELD(m->misc, 42, 43);
 535		res->bank_group  |= GET_BITFIELD(m->misc, 44, 44) << 2;
 536		res->rank         = GET_BITFIELD(m->misc, 56, 58);
 537		res->dimm         = res->rank >> 2;
 538		res->rank         = res->rank % 4;
 539		break;
 540	case SPR:
 541		bank              = m->bank - 13;
 542		res->imc          = bank / 2;
 543		res->channel      = bank % 2;
 544		res->column       = GET_BITFIELD(m->misc, 9, 18) << 2;
 545		res->row          = GET_BITFIELD(m->misc, 19, 36);
 546		res->bank_group   = GET_BITFIELD(m->misc, 37, 38);
 547		res->bank_address = GET_BITFIELD(m->misc, 39, 40);
 548		res->bank_group  |= GET_BITFIELD(m->misc, 41, 41) << 2;
 549		res->rank         = GET_BITFIELD(m->misc, 57, 57);
 550		res->dimm         = GET_BITFIELD(m->misc, 58, 58);
 551		break;
 552	default:
 553		return false;
 554	}
 555
 556	if (!res->dev) {
 557		skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
 558			   m->socketid, res->imc);
 559		return false;
 560	}
 561
 562	return true;
 563}
 564
 565/**
 566 * get_gnr_mdev() - Get the PCI device of the @logical_idx-th DDR memory controller.
 567 *
 568 * @d            : The pointer to the structure of CPU socket EDAC device.
 569 * @logical_idx  : The logical index of the present memory controller (0 ~ max present MC# - 1).
 570 * @physical_idx : To store the corresponding physical index of @logical_idx.
 571 *
 572 * RETURNS       : The PCI device of the @logical_idx-th DDR memory controller, NULL on failure.
 573 */
 574static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *physical_idx)
 575{
 576#define GNR_MAX_IMC_PCI_CNT	28
 577
 578	struct pci_dev *mdev;
 579	int i, logical = 0;
 580
 581	/*
 582	 * Detect present memory controllers from { PCI device: 8-5, function 7-1 }
 583	 */
 584	for (i = 0; i < GNR_MAX_IMC_PCI_CNT; i++) {
 585		mdev = pci_get_dev_wrapper(d->seg,
 586					   d->bus[res_cfg->ddr_mdev_bdf.bus],
 587					   res_cfg->ddr_mdev_bdf.dev + i / 7,
 588					   res_cfg->ddr_mdev_bdf.fun + i % 7);
 589
 590		if (mdev) {
 591			if (logical == logical_idx) {
 592				*physical_idx = i;
 593				return mdev;
 594			}
 595
 596			pci_dev_put(mdev);
 597			logical++;
 598		}
 599	}
 600
 601	return NULL;
 602}
 603
 604/**
 605 * get_ddr_munit() - Get the resource of the i-th DDR memory controller.
 606 *
 607 * @d      : The pointer to the structure of CPU socket EDAC device.
 608 * @i      : The index of the CPU socket relative DDR memory controller.
 609 * @offset : To store the MMIO offset of the i-th DDR memory controller.
 610 * @size   : To store the MMIO size of the i-th DDR memory controller.
 611 *
 612 * RETURNS : The PCI device of the i-th DDR memory controller, NULL on failure.
 613 */
 614static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsigned long *size)
 615{
 616	struct pci_dev *mdev;
 617	int physical_idx;
 618	u32 reg;
 619
 620	switch (res_cfg->type) {
 621	case GNR:
 622		if (I10NM_GET_IMC_BAR(d, 0, reg)) {
 623			i10nm_printk(KERN_ERR, "Failed to get mc0 bar\n");
 624			return NULL;
 625		}
 626
 627		mdev = get_gnr_mdev(d, i, &physical_idx);
 628		if (!mdev)
 629			return NULL;
 630
 631		*offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
 632			  I10NM_GNR_IMC_MMIO_OFFSET +
 633			  physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
 634		*size   = I10NM_GNR_IMC_MMIO_SIZE;
 635
 636		break;
 637	default:
 638		if (I10NM_GET_IMC_BAR(d, i, reg)) {
 639			i10nm_printk(KERN_ERR, "Failed to get mc%d bar\n", i);
 640			return NULL;
 641		}
 642
 643		mdev = pci_get_dev_wrapper(d->seg,
 644					   d->bus[res_cfg->ddr_mdev_bdf.bus],
 645					   res_cfg->ddr_mdev_bdf.dev + i,
 646					   res_cfg->ddr_mdev_bdf.fun);
 647		if (!mdev)
 648			return NULL;
 649
 650		*offset  = I10NM_GET_IMC_MMIO_OFFSET(reg);
 651		*size    = I10NM_GET_IMC_MMIO_SIZE(reg);
 652	}
 653
 654	return mdev;
 655}
 656
 657/**
 658 * i10nm_imc_absent() - Check whether the memory controller @imc is absent
 659 *
 660 * @imc    : The pointer to the structure of memory controller EDAC device.
 661 *
 662 * RETURNS : true if the memory controller EDAC device is absent, false otherwise.
 663 */
 664static bool i10nm_imc_absent(struct skx_imc *imc)
 665{
 666	u32 mcmtr;
 667	int i;
 668
 669	switch (res_cfg->type) {
 670	case SPR:
 671		for (i = 0; i < res_cfg->ddr_chan_num; i++) {
 672			mcmtr = I10NM_GET_MCMTR(imc, i);
 673			edac_dbg(1, "ch%d mcmtr reg %x\n", i, mcmtr);
 674			if (mcmtr != ~0)
 675				return false;
 676		}
 677
 678		/*
 679		 * Some workstations' absent memory controllers still
 680		 * appear as PCIe devices, misleading the EDAC driver.
 681		 * By observing that the MMIO registers of these absent
 682		 * memory controllers consistently hold the value of ~0.
 683		 *
 684		 * We identify a memory controller as absent by checking
 685		 * if its MMIO register "mcmtr" == ~0 in all its channels.
 686		 */
 687		return true;
 688	default:
 689		return false;
 690	}
 691}
 692
 693static int i10nm_get_ddr_munits(void)
 694{
 695	struct pci_dev *mdev;
 696	void __iomem *mbase;
 697	unsigned long size;
 698	struct skx_dev *d;
 699	int i, lmc, j = 0;
 700	u32 reg, off;
 701	u64 base;
 702
 703	list_for_each_entry(d, i10nm_edac_list, list) {
 704		d->util_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->util_all_bdf.bus],
 705						  res_cfg->util_all_bdf.dev,
 706						  res_cfg->util_all_bdf.fun);
 707		if (!d->util_all)
 708			return -ENODEV;
 709
 710		d->uracu = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->uracu_bdf.bus],
 711					       res_cfg->uracu_bdf.dev,
 712					       res_cfg->uracu_bdf.fun);
 713		if (!d->uracu)
 714			return -ENODEV;
 715
 716		if (I10NM_GET_SCK_BAR(d, reg)) {
 717			i10nm_printk(KERN_ERR, "Failed to socket bar\n");
 718			return -ENODEV;
 719		}
 720
 721		base = I10NM_GET_SCK_MMIO_BASE(reg);
 722		edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
 723			 j++, base, reg);
 724
 725		for (lmc = 0, i = 0; i < res_cfg->ddr_imc_num; i++) {
 726			mdev = get_ddr_munit(d, i, &off, &size);
 727
 728			if (i == 0 && !mdev) {
 729				i10nm_printk(KERN_ERR, "No IMC found\n");
 730				return -ENODEV;
 731			}
 732			if (!mdev)
 733				continue;
 734
 735			edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
 736				 i, base + off, size, reg);
 737
 738			mbase = ioremap(base + off, size);
 739			if (!mbase) {
 740				i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
 741					     base + off);
 742				return -ENODEV;
 743			}
 744
 745			d->imc[lmc].mbase = mbase;
 746			if (i10nm_imc_absent(&d->imc[lmc])) {
 747				pci_dev_put(mdev);
 748				iounmap(mbase);
 749				d->imc[lmc].mbase = NULL;
 750				edac_dbg(2, "Skip absent mc%d\n", i);
 751				continue;
 752			} else {
 753				d->imc[lmc].mdev = mdev;
 754				lmc++;
 755			}
 756		}
 757	}
 758
 759	return 0;
 760}
 761
 762static bool i10nm_check_hbm_imc(struct skx_dev *d)
 763{
 764	u32 reg;
 765
 766	if (I10NM_GET_CAPID3_CFG(d, reg)) {
 767		i10nm_printk(KERN_ERR, "Failed to get capid3_cfg\n");
 768		return false;
 769	}
 770
 771	return I10NM_IS_HBM_PRESENT(reg) != 0;
 772}
 773
 774static int i10nm_get_hbm_munits(void)
 775{
 776	struct pci_dev *mdev;
 777	void __iomem *mbase;
 778	u32 reg, off, mcmtr;
 779	struct skx_dev *d;
 780	int i, lmc;
 781	u64 base;
 782
 783	list_for_each_entry(d, i10nm_edac_list, list) {
 784		if (!d->pcu_cr3)
 785			return -ENODEV;
 786
 787		if (!i10nm_check_hbm_imc(d)) {
 788			i10nm_printk(KERN_DEBUG, "No hbm memory\n");
 789			return -ENODEV;
 790		}
 791
 792		if (I10NM_GET_SCK_BAR(d, reg)) {
 793			i10nm_printk(KERN_ERR, "Failed to get socket bar\n");
 794			return -ENODEV;
 795		}
 796		base = I10NM_GET_SCK_MMIO_BASE(reg);
 797
 798		if (I10NM_GET_HBM_IMC_BAR(d, reg)) {
 799			i10nm_printk(KERN_ERR, "Failed to get hbm mc bar\n");
 800			return -ENODEV;
 801		}
 802		base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
 803
 804		lmc = res_cfg->ddr_imc_num;
 805
 806		for (i = 0; i < res_cfg->hbm_imc_num; i++) {
 807			mdev = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->hbm_mdev_bdf.bus],
 808						   res_cfg->hbm_mdev_bdf.dev + i / 4,
 809						   res_cfg->hbm_mdev_bdf.fun + i % 4);
 810
 811			if (i == 0 && !mdev) {
 812				i10nm_printk(KERN_ERR, "No hbm mc found\n");
 813				return -ENODEV;
 814			}
 815			if (!mdev)
 816				continue;
 817
 818			d->imc[lmc].mdev = mdev;
 819			off = i * I10NM_HBM_IMC_MMIO_SIZE;
 820
 821			edac_dbg(2, "hbm mc%d mmio base 0x%llx size 0x%x\n",
 822				 lmc, base + off, I10NM_HBM_IMC_MMIO_SIZE);
 823
 824			mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
 825			if (!mbase) {
 826				pci_dev_put(d->imc[lmc].mdev);
 827				d->imc[lmc].mdev = NULL;
 828
 829				i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
 830					     base + off);
 831				return -ENOMEM;
 832			}
 833
 834			d->imc[lmc].mbase = mbase;
 835			d->imc[lmc].hbm_mc = true;
 836
 837			mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
 838			if (!I10NM_IS_HBM_IMC(mcmtr)) {
 839				iounmap(d->imc[lmc].mbase);
 840				d->imc[lmc].mbase = NULL;
 841				d->imc[lmc].hbm_mc = false;
 842				pci_dev_put(d->imc[lmc].mdev);
 843				d->imc[lmc].mdev = NULL;
 844
 845				i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
 846				return -ENODEV;
 847			}
 848
 849			lmc++;
 850		}
 851	}
 852
 853	return 0;
 854}
 855
 856static struct res_config i10nm_cfg0 = {
 857	.type			= I10NM,
 858	.decs_did		= 0x3452,
 859	.busno_cfg_offset	= 0xcc,
 860	.ddr_imc_num		= 4,
 861	.ddr_chan_num		= 2,
 862	.ddr_dimm_num		= 2,
 863	.ddr_chan_mmio_sz	= 0x4000,
 864	.sad_all_bdf		= {1, 29, 0},
 865	.pcu_cr3_bdf		= {1, 30, 3},
 866	.util_all_bdf		= {1, 29, 1},
 867	.uracu_bdf		= {0, 0, 1},
 868	.ddr_mdev_bdf		= {0, 12, 0},
 869	.hbm_mdev_bdf		= {0, 12, 1},
 870	.sad_all_offset		= 0x108,
 871	.offsets_scrub		= offsets_scrub_icx,
 872	.offsets_demand		= offsets_demand_icx,
 873};
 874
 875static struct res_config i10nm_cfg1 = {
 876	.type			= I10NM,
 877	.decs_did		= 0x3452,
 878	.busno_cfg_offset	= 0xd0,
 879	.ddr_imc_num		= 4,
 880	.ddr_chan_num		= 2,
 881	.ddr_dimm_num		= 2,
 882	.ddr_chan_mmio_sz	= 0x4000,
 883	.sad_all_bdf		= {1, 29, 0},
 884	.pcu_cr3_bdf		= {1, 30, 3},
 885	.util_all_bdf		= {1, 29, 1},
 886	.uracu_bdf		= {0, 0, 1},
 887	.ddr_mdev_bdf		= {0, 12, 0},
 888	.hbm_mdev_bdf		= {0, 12, 1},
 889	.sad_all_offset		= 0x108,
 890	.offsets_scrub		= offsets_scrub_icx,
 891	.offsets_demand		= offsets_demand_icx,
 892};
 893
 894static struct res_config spr_cfg = {
 895	.type			= SPR,
 896	.decs_did		= 0x3252,
 897	.busno_cfg_offset	= 0xd0,
 898	.ddr_imc_num		= 4,
 899	.ddr_chan_num		= 2,
 900	.ddr_dimm_num		= 2,
 901	.hbm_imc_num		= 16,
 902	.hbm_chan_num		= 2,
 903	.hbm_dimm_num		= 1,
 904	.ddr_chan_mmio_sz	= 0x8000,
 905	.hbm_chan_mmio_sz	= 0x4000,
 906	.support_ddr5		= true,
 907	.sad_all_bdf		= {1, 10, 0},
 908	.pcu_cr3_bdf		= {1, 30, 3},
 909	.util_all_bdf		= {1, 29, 1},
 910	.uracu_bdf		= {0, 0, 1},
 911	.ddr_mdev_bdf		= {0, 12, 0},
 912	.hbm_mdev_bdf		= {0, 12, 1},
 913	.sad_all_offset		= 0x300,
 914	.offsets_scrub		= offsets_scrub_spr,
 915	.offsets_scrub_hbm0	= offsets_scrub_spr_hbm0,
 916	.offsets_scrub_hbm1	= offsets_scrub_spr_hbm1,
 917	.offsets_demand		= offsets_demand_spr,
 918	.offsets_demand2	= offsets_demand2_spr,
 919	.offsets_demand_hbm0	= offsets_demand_spr_hbm0,
 920	.offsets_demand_hbm1	= offsets_demand_spr_hbm1,
 921};
 922
 923static struct res_config gnr_cfg = {
 924	.type			= GNR,
 925	.decs_did		= 0x3252,
 926	.busno_cfg_offset	= 0xd0,
 927	.ddr_imc_num		= 12,
 928	.ddr_chan_num		= 1,
 929	.ddr_dimm_num		= 2,
 930	.ddr_chan_mmio_sz	= 0x4000,
 931	.support_ddr5		= true,
 932	.sad_all_bdf		= {0, 13, 0},
 933	.pcu_cr3_bdf		= {0, 5, 0},
 934	.util_all_bdf		= {0, 13, 1},
 935	.uracu_bdf		= {0, 0, 1},
 936	.ddr_mdev_bdf		= {0, 5, 1},
 937	.sad_all_offset		= 0x300,
 938};
 939
 940static const struct x86_cpu_id i10nm_cpuids[] = {
 941	X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D,	X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
 942	X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D,	X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
 943	X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X,	X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
 944	X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X,	X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
 945	X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D,	X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
 946	X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X,	X86_STEPPINGS(0x0, 0xf), &spr_cfg),
 947	X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X,	X86_STEPPINGS(0x0, 0xf), &spr_cfg),
 948	X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X,	X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
 949	X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X,	X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
 950	X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT,	X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
 951	{}
 952};
 953MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
 954
 955static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
 956{
 957	u32 mcmtr;
 958
 959	mcmtr = I10NM_GET_MCMTR(imc, chan);
 960	edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
 961
 962	return !!GET_BITFIELD(mcmtr, 2, 2);
 963}
 964
 965static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
 966				 struct res_config *cfg)
 967{
 968	struct skx_pvt *pvt = mci->pvt_info;
 969	struct skx_imc *imc = pvt->imc;
 970	u32 mtr, mcddrtcfg = 0;
 971	struct dimm_info *dimm;
 972	int i, j, ndimms;
 973
 974	for (i = 0; i < imc->num_channels; i++) {
 975		if (!imc->mbase)
 976			continue;
 977
 978		ndimms = 0;
 979
 980		if (res_cfg->type != GNR)
 981			mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
 982
 983		for (j = 0; j < imc->num_dimms; j++) {
 984			dimm = edac_get_dimm(mci, i, j, 0);
 985			mtr = I10NM_GET_DIMMMTR(imc, i, j);
 986			edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
 987				 mtr, mcddrtcfg, imc->mc, i, j);
 988
 989			if (IS_DIMM_PRESENT(mtr))
 990				ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
 991							    imc, i, j, cfg);
 992			else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
 993				ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
 994							      EDAC_MOD_STR);
 995		}
 996		if (ndimms && !i10nm_check_ecc(imc, i)) {
 997			i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
 998				     imc->mc, i);
 999			return -ENODEV;
1000		}
1001	}
1002
1003	return 0;
1004}
1005
1006static struct notifier_block i10nm_mce_dec = {
1007	.notifier_call	= skx_mce_check_error,
1008	.priority	= MCE_PRIO_EDAC,
1009};
1010
1011static int __init i10nm_init(void)
1012{
1013	u8 mc = 0, src_id = 0, node_id = 0;
1014	const struct x86_cpu_id *id;
1015	struct res_config *cfg;
1016	const char *owner;
1017	struct skx_dev *d;
1018	int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
1019	u64 tolm, tohm;
1020	int imc_num;
1021
1022	edac_dbg(2, "\n");
1023
1024	if (ghes_get_devices())
1025		return -EBUSY;
1026
1027	owner = edac_get_owner();
1028	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1029		return -EBUSY;
1030
1031	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1032		return -ENODEV;
1033
1034	id = x86_match_cpu(i10nm_cpuids);
1035	if (!id)
1036		return -ENODEV;
1037
1038	cfg = (struct res_config *)id->driver_data;
1039	skx_set_res_cfg(cfg);
1040	res_cfg = cfg;
1041
1042	rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
1043	if (rc)
1044		return rc;
1045
1046	rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
1047	if (rc < 0)
1048		goto fail;
1049	if (rc == 0) {
1050		i10nm_printk(KERN_ERR, "No memory controllers found\n");
1051		return -ENODEV;
1052	}
1053
1054	rc = i10nm_get_imc_num(cfg);
1055	if (rc < 0)
1056		goto fail;
1057
1058	mem_cfg_2lm = i10nm_check_2lm(cfg);
1059	skx_set_mem_cfg(mem_cfg_2lm);
1060
1061	rc = i10nm_get_ddr_munits();
1062
1063	if (i10nm_get_hbm_munits() && rc)
1064		goto fail;
1065
1066	imc_num = res_cfg->ddr_imc_num + res_cfg->hbm_imc_num;
1067
1068	list_for_each_entry(d, i10nm_edac_list, list) {
1069		rc = skx_get_src_id(d, 0xf8, &src_id);
1070		if (rc < 0)
1071			goto fail;
1072
1073		rc = skx_get_node_id(d, &node_id);
1074		if (rc < 0)
1075			goto fail;
1076
1077		edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
1078		for (i = 0; i < imc_num; i++) {
1079			if (!d->imc[i].mdev)
1080				continue;
1081
1082			d->imc[i].mc  = mc++;
1083			d->imc[i].lmc = i;
1084			d->imc[i].src_id  = src_id;
1085			d->imc[i].node_id = node_id;
1086			if (d->imc[i].hbm_mc) {
1087				d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
1088				d->imc[i].num_channels = cfg->hbm_chan_num;
1089				d->imc[i].num_dimms    = cfg->hbm_dimm_num;
1090			} else {
1091				d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
1092				d->imc[i].num_channels = cfg->ddr_chan_num;
1093				d->imc[i].num_dimms    = cfg->ddr_dimm_num;
1094			}
1095
1096			rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
1097					      "Intel_10nm Socket", EDAC_MOD_STR,
1098					      i10nm_get_dimm_config, cfg);
1099			if (rc < 0)
1100				goto fail;
1101		}
1102	}
1103
1104	rc = skx_adxl_get();
1105	if (rc)
1106		goto fail;
1107
1108	opstate_init();
1109	mce_register_decode_chain(&i10nm_mce_dec);
1110	skx_setup_debug("i10nm_test");
1111
1112	if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
1113		skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
1114		if (retry_rd_err_log == 2)
1115			enable_retry_rd_err_log(true);
1116	} else {
1117		skx_set_decode(i10nm_mc_decode, NULL);
1118	}
1119
1120	i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
1121
1122	return 0;
1123fail:
1124	skx_remove();
1125	return rc;
1126}
1127
1128static void __exit i10nm_exit(void)
1129{
1130	edac_dbg(2, "\n");
1131
1132	if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
1133		skx_set_decode(NULL, NULL);
1134		if (retry_rd_err_log == 2)
1135			enable_retry_rd_err_log(false);
1136	}
1137
1138	skx_teardown_debug();
1139	mce_unregister_decode_chain(&i10nm_mce_dec);
1140	skx_adxl_put();
1141	skx_remove();
1142}
1143
1144module_init(i10nm_init);
1145module_exit(i10nm_exit);
1146
1147static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp)
1148{
1149	unsigned long val;
1150	int ret;
1151
1152	ret = kstrtoul(buf, 0, &val);
1153
1154	if (ret || val > 1)
1155		return -EINVAL;
1156
1157	if (val && mem_cfg_2lm) {
1158		i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n");
1159		return -EIO;
1160	}
1161
1162	ret = param_set_int(buf, kp);
1163
1164	return ret;
1165}
1166
1167static const struct kernel_param_ops decoding_via_mca_param_ops = {
1168	.set = set_decoding_via_mca,
1169	.get = param_get_int,
1170};
1171
1172module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644);
1173MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable");
1174
1175module_param(retry_rd_err_log, int, 0444);
1176MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
1177
1178MODULE_LICENSE("GPL v2");
1179MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");