Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Driver for Pondicherry2 memory controller.
   4 *
   5 * Copyright (c) 2016, Intel Corporation.
   6 *
   7 * [Derived from sb_edac.c]
   8 *
   9 * Translation of system physical addresses to DIMM addresses
  10 * is a two stage process:
  11 *
  12 * First the Pondicherry 2 memory controller handles slice and channel interleaving
  13 * in "sys2pmi()". This is (almost) completley common between platforms.
  14 *
  15 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
  16 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
  17 */
  18
  19#include <linux/bitmap.h>
  20#include <linux/delay.h>
  21#include <linux/edac.h>
  22#include <linux/init.h>
  23#include <linux/math64.h>
  24#include <linux/mmzone.h>
  25#include <linux/mod_devicetable.h>
  26#include <linux/module.h>
 
  27#include <linux/pci.h>
  28#include <linux/pci_ids.h>
  29#include <linux/sizes.h>
  30#include <linux/slab.h>
 
 
 
  31#include <linux/smp.h>
  32
  33#include <linux/platform_data/x86/p2sb.h>
  34
  35#include <asm/cpu_device_id.h>
  36#include <asm/intel-family.h>
  37#include <asm/processor.h>
  38#include <asm/mce.h>
  39
  40#include "edac_mc.h"
  41#include "edac_module.h"
  42#include "pnd2_edac.h"
  43
  44#define EDAC_MOD_STR		"pnd2_edac"
  45
  46#define APL_NUM_CHANNELS	4
  47#define DNV_NUM_CHANNELS	2
  48#define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
  49
  50enum type {
  51	APL,
  52	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
  53};
  54
  55struct dram_addr {
  56	int chan;
  57	int dimm;
  58	int rank;
  59	int bank;
  60	int row;
  61	int col;
  62};
  63
  64struct pnd2_pvt {
  65	int dimm_geom[APL_NUM_CHANNELS];
  66	u64 tolm, tohm;
  67};
  68
  69/*
  70 * System address space is divided into multiple regions with
  71 * different interleave rules in each. The as0/as1 regions
  72 * have no interleaving at all. The as2 region is interleaved
  73 * between two channels. The mot region is magic and may overlap
  74 * other regions, with its interleave rules taking precedence.
  75 * Addresses not in any of these regions are interleaved across
  76 * all four channels.
  77 */
  78static struct region {
  79	u64	base;
  80	u64	limit;
  81	u8	enabled;
  82} mot, as0, as1, as2;
  83
  84static struct dunit_ops {
  85	char *name;
  86	enum type type;
  87	int pmiaddr_shift;
  88	int pmiidx_shift;
  89	int channels;
  90	int dimms_per_channel;
  91	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
  92	int (*get_registers)(void);
  93	int (*check_ecc)(void);
  94	void (*mk_region)(char *name, struct region *rp, void *asym);
  95	void (*get_dimm_config)(struct mem_ctl_info *mci);
  96	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
  97				   struct dram_addr *daddr, char *msg);
  98} *ops;
  99
 100static struct mem_ctl_info *pnd2_mci;
 101
 102#define PND2_MSG_SIZE	256
 103
 104/* Debug macros */
 105#define pnd2_printk(level, fmt, arg...)			\
 106	edac_printk(level, "pnd2", fmt, ##arg)
 107
 108#define pnd2_mc_printk(mci, level, fmt, arg...)	\
 109	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
 110
 111#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
 112#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
 113#define SELECTOR_DISABLED (-1)
 
 114
 115#define PMI_ADDRESS_WIDTH	31
 116#define PND_MAX_PHYS_BIT	39
 117
 118#define APL_ASYMSHIFT		28
 119#define DNV_ASYMSHIFT		31
 120#define CH_HASH_MASK_LSB	6
 121#define SLICE_HASH_MASK_LSB	6
 122#define MOT_SLC_INTLV_BIT	12
 123#define LOG2_PMI_ADDR_GRANULARITY	5
 124#define MOT_SHIFT	24
 125
 126#define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
 127#define U64_LSHIFT(val, s)	((u64)(val) << (s))
 128
 129/*
 130 * On Apollo Lake we access memory controller registers via a
 131 * side-band mailbox style interface in a hidden PCI device
 132 * configuration space.
 133 */
 134static struct pci_bus	*p2sb_bus;
 135#define P2SB_DEVFN	PCI_DEVFN(0xd, 0)
 136#define P2SB_ADDR_OFF	0xd0
 137#define P2SB_DATA_OFF	0xd4
 138#define P2SB_STAT_OFF	0xd8
 139#define P2SB_ROUT_OFF	0xda
 140#define P2SB_EADD_OFF	0xdc
 141#define P2SB_HIDE_OFF	0xe1
 142
 143#define P2SB_BUSY	1
 144
 145#define P2SB_READ(size, off, ptr) \
 146	pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
 147#define P2SB_WRITE(size, off, val) \
 148	pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
 149
 150static bool p2sb_is_busy(u16 *status)
 151{
 152	P2SB_READ(word, P2SB_STAT_OFF, status);
 153
 154	return !!(*status & P2SB_BUSY);
 155}
 156
 157static int _apl_rd_reg(int port, int off, int op, u32 *data)
 158{
 159	int retries = 0xff, ret;
 160	u16 status;
 161	u8 hidden;
 162
 163	/* Unhide the P2SB device, if it's hidden */
 164	P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
 165	if (hidden)
 166		P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
 167
 168	if (p2sb_is_busy(&status)) {
 169		ret = -EAGAIN;
 170		goto out;
 171	}
 172
 173	P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
 174	P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
 175	P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
 176	P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
 177	P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
 178
 179	while (p2sb_is_busy(&status)) {
 180		if (retries-- == 0) {
 181			ret = -EBUSY;
 182			goto out;
 183		}
 184	}
 185
 186	P2SB_READ(dword, P2SB_DATA_OFF, data);
 187	ret = (status >> 1) & GENMASK(1, 0);
 188out:
 189	/* Hide the P2SB device, if it was hidden before */
 190	if (hidden)
 191		P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
 192
 193	return ret;
 194}
 195
 196static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 197{
 198	int ret = 0;
 199
 200	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
 201	switch (sz) {
 202	case 8:
 203		ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
 204		fallthrough;
 205	case 4:
 206		ret |= _apl_rd_reg(port, off, op, (u32 *)data);
 207		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
 208					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
 209		break;
 210	}
 211
 212	return ret;
 213}
 214
 215static u64 get_mem_ctrl_hub_base_addr(void)
 216{
 217	struct b_cr_mchbar_lo_pci lo;
 218	struct b_cr_mchbar_hi_pci hi;
 219	struct pci_dev *pdev;
 220
 221	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
 222	if (pdev) {
 223		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
 224		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
 225		pci_dev_put(pdev);
 226	} else {
 227		return 0;
 228	}
 229
 230	if (!lo.enable) {
 231		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
 232		return 0;
 233	}
 234
 235	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
 236}
 237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238#define DNV_MCHBAR_SIZE  0x8000
 239#define DNV_SB_PORT_SIZE 0x10000
 240static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 241{
 242	struct pci_dev *pdev;
 243	void __iomem *base;
 244	struct resource r;
 245	int ret;
 246
 247	if (op == 4) {
 248		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
 249		if (!pdev)
 250			return -ENODEV;
 251
 252		pci_read_config_dword(pdev, off, data);
 253		pci_dev_put(pdev);
 254	} else {
 255		/* MMIO via memory controller hub base address */
 256		if (op == 0 && port == 0x4c) {
 257			memset(&r, 0, sizeof(r));
 258
 259			r.start = get_mem_ctrl_hub_base_addr();
 260			if (!r.start)
 261				return -ENODEV;
 262			r.end = r.start + DNV_MCHBAR_SIZE - 1;
 263		} else {
 264			/* MMIO via sideband register base address */
 265			ret = p2sb_bar(NULL, 0, &r);
 266			if (ret)
 267				return ret;
 268
 269			r.start += (port << 16);
 270			r.end = r.start + DNV_SB_PORT_SIZE - 1;
 271		}
 272
 273		base = ioremap(r.start, resource_size(&r));
 274		if (!base)
 275			return -ENODEV;
 276
 277		if (sz == 8)
 278			*(u64 *)data = readq(base + off);
 279		else
 280			*(u32 *)data = readl(base + off);
 281
 282		iounmap(base);
 283	}
 284
 285	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
 286			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
 287
 288	return 0;
 289}
 290
 291#define RD_REGP(regp, regname, port)	\
 292	ops->rd_reg(port,					\
 293		regname##_offset,				\
 294		regname##_r_opcode,				\
 295		regp, sizeof(struct regname),	\
 296		#regname)
 297
 298#define RD_REG(regp, regname)			\
 299	ops->rd_reg(regname ## _port,		\
 300		regname##_offset,				\
 301		regname##_r_opcode,				\
 302		regp, sizeof(struct regname),	\
 303		#regname)
 304
 305static u64 top_lm, top_hm;
 306static bool two_slices;
 307static bool two_channels; /* Both PMI channels in one slice enabled */
 308
 309static u8 sym_chan_mask;
 310static u8 asym_chan_mask;
 311static unsigned long chan_mask;
 312
 313static int slice_selector = -1;
 314static int chan_selector = -1;
 315static u64 slice_hash_mask;
 316static u64 chan_hash_mask;
 317
 318static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
 319{
 320	rp->enabled = 1;
 321	rp->base = base;
 322	rp->limit = limit;
 323	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
 324}
 325
 326static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
 327{
 328	if (mask == 0) {
 329		pr_info(FW_BUG "MOT mask cannot be zero\n");
 330		return;
 331	}
 332	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
 333		pr_info(FW_BUG "MOT mask is invalid\n");
 334		return;
 335	}
 336	if (base & ~mask) {
 337		pr_info(FW_BUG "MOT region base/mask alignment error\n");
 338		return;
 339	}
 340	rp->base = base;
 341	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
 342	rp->enabled = 1;
 343	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
 344}
 345
 346static bool in_region(struct region *rp, u64 addr)
 347{
 348	if (!rp->enabled)
 349		return false;
 350
 351	return rp->base <= addr && addr <= rp->limit;
 352}
 353
 354static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
 355{
 356	int mask = 0;
 357
 358	if (!p->slice_0_mem_disabled)
 359		mask |= p->sym_slice0_channel_enabled;
 360
 361	if (!p->slice_1_disabled)
 362		mask |= p->sym_slice1_channel_enabled << 2;
 363
 364	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
 365		mask &= 0x5;
 366
 367	return mask;
 368}
 369
 370static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
 371			 struct b_cr_asym_mem_region0_mchbar *as0,
 372			 struct b_cr_asym_mem_region1_mchbar *as1,
 373			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
 374{
 375	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
 376	int mask = 0;
 377
 378	if (as2way->asym_2way_interleave_enable)
 379		mask = intlv[as2way->asym_2way_intlv_mode];
 380	if (as0->slice0_asym_enable)
 381		mask |= (1 << as0->slice0_asym_channel_select);
 382	if (as1->slice1_asym_enable)
 383		mask |= (4 << as1->slice1_asym_channel_select);
 384	if (p->slice_0_mem_disabled)
 385		mask &= 0xc;
 386	if (p->slice_1_disabled)
 387		mask &= 0x3;
 388	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
 389		mask &= 0x5;
 390
 391	return mask;
 392}
 393
 394static struct b_cr_tolud_pci tolud;
 395static struct b_cr_touud_lo_pci touud_lo;
 396static struct b_cr_touud_hi_pci touud_hi;
 397static struct b_cr_asym_mem_region0_mchbar asym0;
 398static struct b_cr_asym_mem_region1_mchbar asym1;
 399static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
 400static struct b_cr_mot_out_base_mchbar mot_base;
 401static struct b_cr_mot_out_mask_mchbar mot_mask;
 402static struct b_cr_slice_channel_hash chash;
 403
 404/* Apollo Lake dunit */
 405/*
 406 * Validated on board with just two DIMMs in the [0] and [2] positions
 407 * in this array. Other port number matches documentation, but caution
 408 * advised.
 409 */
 410static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
 411static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
 412
 413/* Denverton dunit */
 414static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
 415static struct d_cr_dsch dsch;
 416static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
 417static struct d_cr_drp drp[DNV_NUM_CHANNELS];
 418static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
 419static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
 420static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
 421static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
 422static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
 423static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
 424
 425static void apl_mk_region(char *name, struct region *rp, void *asym)
 426{
 427	struct b_cr_asym_mem_region0_mchbar *a = asym;
 428
 429	mk_region(name, rp,
 430			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
 431			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
 432			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
 433}
 434
 435static void dnv_mk_region(char *name, struct region *rp, void *asym)
 436{
 437	struct b_cr_asym_mem_region_denverton *a = asym;
 438
 439	mk_region(name, rp,
 440			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
 441			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
 442			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
 443}
 444
 445static int apl_get_registers(void)
 446{
 447	int ret = -ENODEV;
 448	int i;
 449
 450	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
 451		return -ENODEV;
 452
 453	/*
 454	 * RD_REGP() will fail for unpopulated or non-existent
 455	 * DIMM slots. Return success if we find at least one DIMM.
 456	 */
 457	for (i = 0; i < APL_NUM_CHANNELS; i++)
 458		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
 459			ret = 0;
 460
 461	return ret;
 462}
 463
 464static int dnv_get_registers(void)
 465{
 466	int i;
 467
 468	if (RD_REG(&dsch, d_cr_dsch))
 469		return -ENODEV;
 470
 471	for (i = 0; i < DNV_NUM_CHANNELS; i++)
 472		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
 473			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
 474			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
 475			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
 476			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
 477			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
 478			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
 479			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
 480			return -ENODEV;
 481
 482	return 0;
 483}
 484
 485/*
 486 * Read all the h/w config registers once here (they don't
 487 * change at run time. Figure out which address ranges have
 488 * which interleave characteristics.
 489 */
 490static int get_registers(void)
 491{
 492	const int intlv[] = { 10, 11, 12, 12 };
 493
 494	if (RD_REG(&tolud, b_cr_tolud_pci) ||
 495		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
 496		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
 497		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
 498		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
 499		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
 500		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
 501		RD_REG(&chash, b_cr_slice_channel_hash))
 502		return -ENODEV;
 503
 504	if (ops->get_registers())
 505		return -ENODEV;
 506
 507	if (ops->type == DNV) {
 508		/* PMI channel idx (always 0) for asymmetric region */
 509		asym0.slice0_asym_channel_select = 0;
 510		asym1.slice1_asym_channel_select = 0;
 511		/* PMI channel bitmap (always 1) for symmetric region */
 512		chash.sym_slice0_channel_enabled = 0x1;
 513		chash.sym_slice1_channel_enabled = 0x1;
 514	}
 515
 516	if (asym0.slice0_asym_enable)
 517		ops->mk_region("as0", &as0, &asym0);
 518
 519	if (asym1.slice1_asym_enable)
 520		ops->mk_region("as1", &as1, &asym1);
 521
 522	if (asym_2way.asym_2way_interleave_enable) {
 523		mk_region("as2way", &as2,
 524				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
 525				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
 526				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
 527	}
 528
 529	if (mot_base.imr_en) {
 530		mk_region_mask("mot", &mot,
 531					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
 532					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
 533	}
 534
 535	top_lm = U64_LSHIFT(tolud.tolud, 20);
 536	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
 537
 538	two_slices = !chash.slice_1_disabled &&
 539				 !chash.slice_0_mem_disabled &&
 540				 (chash.sym_slice0_channel_enabled != 0) &&
 541				 (chash.sym_slice1_channel_enabled != 0);
 542	two_channels = !chash.ch_1_disabled &&
 543				 !chash.enable_pmi_dual_data_mode &&
 544				 ((chash.sym_slice0_channel_enabled == 3) ||
 545				 (chash.sym_slice1_channel_enabled == 3));
 546
 547	sym_chan_mask = gen_sym_mask(&chash);
 548	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
 549	chan_mask = sym_chan_mask | asym_chan_mask;
 550
 551	if (two_slices && !two_channels) {
 552		if (chash.hvm_mode)
 553			slice_selector = 29;
 554		else
 555			slice_selector = intlv[chash.interleave_mode];
 556	} else if (!two_slices && two_channels) {
 557		if (chash.hvm_mode)
 558			chan_selector = 29;
 559		else
 560			chan_selector = intlv[chash.interleave_mode];
 561	} else if (two_slices && two_channels) {
 562		if (chash.hvm_mode) {
 563			slice_selector = 29;
 564			chan_selector = 30;
 565		} else {
 566			slice_selector = intlv[chash.interleave_mode];
 567			chan_selector = intlv[chash.interleave_mode] + 1;
 568		}
 569	}
 570
 571	if (two_slices) {
 572		if (!chash.hvm_mode)
 573			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
 574		if (!two_channels)
 575			slice_hash_mask |= BIT_ULL(slice_selector);
 576	}
 577
 578	if (two_channels) {
 579		if (!chash.hvm_mode)
 580			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
 581		if (!two_slices)
 582			chan_hash_mask |= BIT_ULL(chan_selector);
 583	}
 584
 585	return 0;
 586}
 587
 588/* Get a contiguous memory address (remove the MMIO gap) */
 589static u64 remove_mmio_gap(u64 sys)
 590{
 591	return (sys < SZ_4G) ? sys : sys - (SZ_4G - top_lm);
 592}
 593
 594/* Squeeze out one address bit, shift upper part down to fill gap */
 595static void remove_addr_bit(u64 *addr, int bitidx)
 596{
 597	u64	mask;
 598
 599	if (bitidx == -1)
 600		return;
 601
 602	mask = BIT_ULL(bitidx) - 1;
 603	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
 604}
 605
 606/* XOR all the bits from addr specified in mask */
 607static int hash_by_mask(u64 addr, u64 mask)
 608{
 609	u64 result = addr & mask;
 610
 611	result = (result >> 32) ^ result;
 612	result = (result >> 16) ^ result;
 613	result = (result >> 8) ^ result;
 614	result = (result >> 4) ^ result;
 615	result = (result >> 2) ^ result;
 616	result = (result >> 1) ^ result;
 617
 618	return (int)result & 1;
 619}
 620
 621/*
 622 * First stage decode. Take the system address and figure out which
 623 * second stage will deal with it based on interleave modes.
 624 */
 625static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
 626{
 627	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
 628	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
 629						MOT_CHAN_INTLV_BIT_1SLC_2CH;
 630	int slice_intlv_bit_rm = SELECTOR_DISABLED;
 631	int chan_intlv_bit_rm = SELECTOR_DISABLED;
 632	/* Determine if address is in the MOT region. */
 633	bool mot_hit = in_region(&mot, addr);
 634	/* Calculate the number of symmetric regions enabled. */
 635	int sym_channels = hweight8(sym_chan_mask);
 636
 637	/*
 638	 * The amount we need to shift the asym base can be determined by the
 639	 * number of enabled symmetric channels.
 640	 * NOTE: This can only work because symmetric memory is not supposed
 641	 * to do a 3-way interleave.
 642	 */
 643	int sym_chan_shift = sym_channels >> 1;
 644
 645	/* Give up if address is out of range, or in MMIO gap */
 646	if (addr >= BIT(PND_MAX_PHYS_BIT) ||
 647	   (addr >= top_lm && addr < SZ_4G) || addr >= top_hm) {
 648		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
 649		return -EINVAL;
 650	}
 651
 652	/* Get a contiguous memory address (remove the MMIO gap) */
 653	contig_addr = remove_mmio_gap(addr);
 654
 655	if (in_region(&as0, addr)) {
 656		*pmiidx = asym0.slice0_asym_channel_select;
 657
 658		contig_base = remove_mmio_gap(as0.base);
 659		contig_offset = contig_addr - contig_base;
 660		contig_base_adj = (contig_base >> sym_chan_shift) *
 661						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
 662		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
 663	} else if (in_region(&as1, addr)) {
 664		*pmiidx = 2u + asym1.slice1_asym_channel_select;
 665
 666		contig_base = remove_mmio_gap(as1.base);
 667		contig_offset = contig_addr - contig_base;
 668		contig_base_adj = (contig_base >> sym_chan_shift) *
 669						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
 670		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
 671	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
 672		bool channel1;
 673
 674		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
 675		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
 676		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
 677			hash_by_mask(contig_addr, chan_hash_mask);
 678		*pmiidx |= (u32)channel1;
 679
 680		contig_base = remove_mmio_gap(as2.base);
 681		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
 682		contig_offset = contig_addr - contig_base;
 683		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
 684		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
 685	} else {
 686		/* Otherwise we're in normal, boring symmetric mode. */
 687		*pmiidx = 0u;
 688
 689		if (two_slices) {
 690			bool slice1;
 691
 692			if (mot_hit) {
 693				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
 694				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
 695			} else {
 696				slice_intlv_bit_rm = slice_selector;
 697				slice1 = hash_by_mask(addr, slice_hash_mask);
 698			}
 699
 700			*pmiidx = (u32)slice1 << 1;
 701		}
 702
 703		if (two_channels) {
 704			bool channel1;
 705
 706			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
 707							MOT_CHAN_INTLV_BIT_1SLC_2CH;
 708
 709			if (mot_hit) {
 710				chan_intlv_bit_rm = mot_intlv_bit;
 711				channel1 = (addr >> mot_intlv_bit) & 1;
 712			} else {
 713				chan_intlv_bit_rm = chan_selector;
 714				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
 715			}
 716
 717			*pmiidx |= (u32)channel1;
 718		}
 719	}
 720
 721	/* Remove the chan_selector bit first */
 722	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
 723	/* Remove the slice bit (we remove it second because it must be lower */
 724	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
 725	*pmiaddr = contig_addr;
 726
 727	return 0;
 728}
 729
 730/* Translate PMI address to memory (rank, row, bank, column) */
 731#define C(n) (BIT(4) | (n))	/* column */
 732#define B(n) (BIT(5) | (n))	/* bank */
 733#define R(n) (BIT(6) | (n))	/* row */
 734#define RS   (BIT(7))		/* rank */
 735
 736/* addrdec values */
 737#define AMAP_1KB	0
 738#define AMAP_2KB	1
 739#define AMAP_4KB	2
 740#define AMAP_RSVD	3
 741
 742/* dden values */
 743#define DEN_4Gb		0
 744#define DEN_8Gb		2
 745
 746/* dwid values */
 747#define X8		0
 748#define X16		1
 749
 750static struct dimm_geometry {
 751	u8	addrdec;
 752	u8	dden;
 753	u8	dwid;
 754	u8	rowbits, colbits;
 755	u16	bits[PMI_ADDRESS_WIDTH];
 756} dimms[] = {
 757	{
 758		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
 759		.rowbits = 15, .colbits = 10,
 760		.bits = {
 761			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 762			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 763			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 764			0,     0,     0,     0
 765		}
 766	},
 767	{
 768		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
 769		.rowbits = 16, .colbits = 10,
 770		.bits = {
 771			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 772			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 773			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 774			R(15), 0,     0,     0
 775		}
 776	},
 777	{
 778		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
 779		.rowbits = 16, .colbits = 10,
 780		.bits = {
 781			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 782			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 783			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 784			R(15), 0,     0,     0
 785		}
 786	},
 787	{
 788		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
 789		.rowbits = 16, .colbits = 11,
 790		.bits = {
 791			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 792			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 793			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
 794			R(14), R(15), 0,     0
 795		}
 796	},
 797	{
 798		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
 799		.rowbits = 15, .colbits = 10,
 800		.bits = {
 801			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 802			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 803			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 804			0,     0,     0,     0
 805		}
 806	},
 807	{
 808		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
 809		.rowbits = 16, .colbits = 10,
 810		.bits = {
 811			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 812			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 813			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 814			R(15), 0,     0,     0
 815		}
 816	},
 817	{
 818		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
 819		.rowbits = 16, .colbits = 10,
 820		.bits = {
 821			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 822			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 823			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 824			R(15), 0,     0,     0
 825		}
 826	},
 827	{
 828		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
 829		.rowbits = 16, .colbits = 11,
 830		.bits = {
 831			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 832			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 833			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
 834			R(14), R(15), 0,     0
 835		}
 836	},
 837	{
 838		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
 839		.rowbits = 15, .colbits = 10,
 840		.bits = {
 841			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 842			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 843			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
 844			0,     0,     0,     0
 845		}
 846	},
 847	{
 848		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
 849		.rowbits = 16, .colbits = 10,
 850		.bits = {
 851			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 852			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 853			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
 854			R(15), 0,     0,     0
 855		}
 856	},
 857	{
 858		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
 859		.rowbits = 16, .colbits = 10,
 860		.bits = {
 861			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 862			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 863			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
 864			R(15), 0,     0,     0
 865		}
 866	},
 867	{
 868		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
 869		.rowbits = 16, .colbits = 11,
 870		.bits = {
 871			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 872			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 873			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
 874			R(14), R(15), 0,     0
 875		}
 876	}
 877};
 878
 879static int bank_hash(u64 pmiaddr, int idx, int shft)
 880{
 881	int bhash = 0;
 882
 883	switch (idx) {
 884	case 0:
 885		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
 886		break;
 887	case 1:
 888		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
 889		bhash ^= ((pmiaddr >> 22) & 1) << 1;
 890		break;
 891	case 2:
 892		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
 893		break;
 894	}
 895
 896	return bhash;
 897}
 898
 899static int rank_hash(u64 pmiaddr)
 900{
 901	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
 902}
 903
 904/* Second stage decode. Compute rank, bank, row & column. */
 905static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
 906		       struct dram_addr *daddr, char *msg)
 907{
 908	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
 909	struct pnd2_pvt *pvt = mci->pvt_info;
 910	int g = pvt->dimm_geom[pmiidx];
 911	struct dimm_geometry *d = &dimms[g];
 912	int column = 0, bank = 0, row = 0, rank = 0;
 913	int i, idx, type, skiprs = 0;
 914
 915	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
 916		int	bit = (pmiaddr >> i) & 1;
 917
 918		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
 919			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
 920			return -EINVAL;
 921		}
 922
 923		type = d->bits[i + skiprs] & ~0xf;
 924		idx = d->bits[i + skiprs] & 0xf;
 925
 926		/*
 927		 * On single rank DIMMs ignore the rank select bit
 928		 * and shift remainder of "bits[]" down one place.
 929		 */
 930		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
 931			skiprs = 1;
 932			type = d->bits[i + skiprs] & ~0xf;
 933			idx = d->bits[i + skiprs] & 0xf;
 934		}
 935
 936		switch (type) {
 937		case C(0):
 938			column |= (bit << idx);
 939			break;
 940		case B(0):
 941			bank |= (bit << idx);
 942			if (cr_drp0->bahen)
 943				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
 944			break;
 945		case R(0):
 946			row |= (bit << idx);
 947			break;
 948		case RS:
 949			rank = bit;
 950			if (cr_drp0->rsien)
 951				rank ^= rank_hash(pmiaddr);
 952			break;
 953		default:
 954			if (bit) {
 955				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
 956				return -EINVAL;
 957			}
 958			goto done;
 959		}
 960	}
 961
 962done:
 963	daddr->col = column;
 964	daddr->bank = bank;
 965	daddr->row = row;
 966	daddr->rank = rank;
 967	daddr->dimm = 0;
 968
 969	return 0;
 970}
 971
 972/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
 973#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
 974
 975static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
 976					   struct dram_addr *daddr, char *msg)
 977{
 978	/* Rank 0 or 1 */
 979	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
 980	/* Rank 2 or 3 */
 981	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
 982
 983	/*
 984	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
 985	 * flip them if DIMM1 is larger than DIMM0.
 986	 */
 987	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
 988
 989	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
 990	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
 991	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
 992	if (dsch.ddr4en)
 993		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
 994	if (dmap1[pmiidx].bxor) {
 995		if (dsch.ddr4en) {
 996			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
 997			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
 998			if (dsch.chan_width == 0)
 999				/* 64/72 bit dram channel width */
1000				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1001			else
1002				/* 32/40 bit dram channel width */
1003				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1004			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1005		} else {
1006			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1007			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1008			if (dsch.chan_width == 0)
1009				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1010			else
1011				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1012		}
1013	}
1014
1015	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1016	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1017	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1018	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1019	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1020	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1021	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1022	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1023	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1024	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1025	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1026	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1027	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1028	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1029	if (dmap4[pmiidx].row14 != 31)
1030		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1031	if (dmap4[pmiidx].row15 != 31)
1032		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1033	if (dmap4[pmiidx].row16 != 31)
1034		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1035	if (dmap4[pmiidx].row17 != 31)
1036		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1037
1038	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1039	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1040	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1041	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1042	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1043	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1044	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1045	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1046		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1047
1048	return 0;
1049}
1050
1051static int check_channel(int ch)
1052{
1053	if (drp0[ch].dramtype != 0) {
1054		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1055		return 1;
1056	} else if (drp0[ch].eccen == 0) {
1057		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1058		return 1;
1059	}
1060	return 0;
1061}
1062
1063static int apl_check_ecc_active(void)
1064{
1065	int	i, ret = 0;
1066
1067	/* Check dramtype and ECC mode for each present DIMM */
1068	for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS)
1069		ret += check_channel(i);
1070
1071	return ret ? -EINVAL : 0;
1072}
1073
1074#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1075
1076static int check_unit(int ch)
1077{
1078	struct d_cr_drp *d = &drp[ch];
1079
1080	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1081		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1082		return 1;
1083	}
1084	return 0;
1085}
1086
1087static int dnv_check_ecc_active(void)
1088{
1089	int	i, ret = 0;
1090
1091	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1092		ret += check_unit(i);
1093	return ret ? -EINVAL : 0;
1094}
1095
1096static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1097								 struct dram_addr *daddr, char *msg)
1098{
1099	u64	pmiaddr;
1100	u32	pmiidx;
1101	int	ret;
1102
1103	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1104	if (ret)
1105		return ret;
1106
1107	pmiaddr >>= ops->pmiaddr_shift;
1108	/* pmi channel idx to dimm channel idx */
1109	pmiidx >>= ops->pmiidx_shift;
1110	daddr->chan = pmiidx;
1111
1112	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1113	if (ret)
1114		return ret;
1115
1116	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1117			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1118
1119	return 0;
1120}
1121
1122static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1123				  struct dram_addr *daddr)
1124{
1125	enum hw_event_mc_err_type tp_event;
1126	char *optype, msg[PND2_MSG_SIZE];
1127	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1128	bool overflow = m->status & MCI_STATUS_OVER;
1129	bool uc_err = m->status & MCI_STATUS_UC;
1130	bool recov = m->status & MCI_STATUS_S;
1131	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1132	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1133	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1134	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1135	int rc;
1136
1137	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
1138						 HW_EVENT_ERR_CORRECTED;
1139
1140	/*
1141	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1142	 * memory errors should fit in this mask:
1143	 *	000f 0000 1mmm cccc (binary)
1144	 * where:
1145	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1146	 *	    won't be shown
1147	 *	mmm = error type
1148	 *	cccc = channel
1149	 * If the mask doesn't match, report an error to the parsing logic
1150	 */
1151	if (!((errcode & 0xef80) == 0x80)) {
1152		optype = "Can't parse: it is not a mem";
1153	} else {
1154		switch (optypenum) {
1155		case 0:
1156			optype = "generic undef request error";
1157			break;
1158		case 1:
1159			optype = "memory read error";
1160			break;
1161		case 2:
1162			optype = "memory write error";
1163			break;
1164		case 3:
1165			optype = "addr/cmd error";
1166			break;
1167		case 4:
1168			optype = "memory scrubbing error";
1169			break;
1170		default:
1171			optype = "reserved";
1172			break;
1173		}
1174	}
1175
1176	/* Only decode errors with an valid address (ADDRV) */
1177	if (!(m->status & MCI_STATUS_ADDRV))
1178		return;
1179
1180	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1181	if (rc)
1182		goto address_error;
1183
1184	snprintf(msg, sizeof(msg),
1185		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1186		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1187		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1188
1189	edac_dbg(0, "%s\n", msg);
1190
1191	/* Call the helper to output message */
1192	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1193						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1194
1195	return;
1196
1197address_error:
1198	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1199}
1200
1201static void apl_get_dimm_config(struct mem_ctl_info *mci)
1202{
1203	struct pnd2_pvt	*pvt = mci->pvt_info;
1204	struct dimm_info *dimm;
1205	struct d_cr_drp0 *d;
1206	u64	capacity;
1207	int	i, g;
1208
1209	for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS) {
1210		dimm = edac_get_dimm(mci, i, 0, 0);
 
 
 
1211		if (!dimm) {
1212			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1213			continue;
1214		}
1215
1216		d = &drp0[i];
1217		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1218			if (dimms[g].addrdec == d->addrdec &&
1219			    dimms[g].dden == d->dden &&
1220			    dimms[g].dwid == d->dwid)
1221				break;
1222
1223		if (g == ARRAY_SIZE(dimms)) {
1224			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1225			continue;
1226		}
1227
1228		pvt->dimm_geom[i] = g;
1229		capacity = (d->rken0 + d->rken1) * 8 * BIT(dimms[g].rowbits + dimms[g].colbits);
 
1230		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1231		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1232		dimm->grain = 32;
1233		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1234		dimm->mtype = MEM_DDR3;
1235		dimm->edac_mode = EDAC_SECDED;
1236		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1237	}
1238}
1239
1240static const int dnv_dtypes[] = {
1241	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1242};
1243
1244static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1245{
1246	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1247	struct dimm_info *dimm;
1248	struct d_cr_drp *d;
1249	u64	capacity;
1250
1251	if (dsch.ddr4en) {
1252		memtype = MEM_DDR4;
1253		banks = 16;
1254		colbits = 10;
1255	} else {
1256		memtype = MEM_DDR3;
1257		banks = 8;
1258	}
1259
1260	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1261		if (dmap4[i].row14 == 31)
1262			rowbits = 14;
1263		else if (dmap4[i].row15 == 31)
1264			rowbits = 15;
1265		else if (dmap4[i].row16 == 31)
1266			rowbits = 16;
1267		else if (dmap4[i].row17 == 31)
1268			rowbits = 17;
1269		else
1270			rowbits = 18;
1271
1272		if (memtype == MEM_DDR3) {
1273			if (dmap1[i].ca11 != 0x3f)
1274				colbits = 12;
1275			else
1276				colbits = 10;
1277		}
1278
1279		d = &drp[i];
1280		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1281		ranks_of_dimm[0] = d->rken0 + d->rken1;
1282		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1283		ranks_of_dimm[1] = d->rken2 + d->rken3;
1284
1285		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1286			if (!ranks_of_dimm[j])
1287				continue;
1288
1289			dimm = edac_get_dimm(mci, i, j, 0);
1290			if (!dimm) {
1291				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1292				continue;
1293			}
1294
1295			capacity = ranks_of_dimm[j] * banks * BIT(rowbits + colbits);
1296			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1297			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1298			dimm->grain = 32;
1299			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1300			dimm->mtype = memtype;
1301			dimm->edac_mode = EDAC_SECDED;
1302			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1303		}
1304	}
1305}
1306
1307static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1308{
1309	struct edac_mc_layer layers[2];
1310	struct mem_ctl_info *mci;
1311	struct pnd2_pvt *pvt;
1312	int rc;
1313
1314	rc = ops->check_ecc();
1315	if (rc < 0)
1316		return rc;
1317
1318	/* Allocate a new MC control structure */
1319	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1320	layers[0].size = ops->channels;
1321	layers[0].is_virt_csrow = false;
1322	layers[1].type = EDAC_MC_LAYER_SLOT;
1323	layers[1].size = ops->dimms_per_channel;
1324	layers[1].is_virt_csrow = true;
1325	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1326	if (!mci)
1327		return -ENOMEM;
1328
1329	pvt = mci->pvt_info;
1330	memset(pvt, 0, sizeof(*pvt));
1331
1332	mci->mod_name = EDAC_MOD_STR;
1333	mci->dev_name = ops->name;
1334	mci->ctl_name = "Pondicherry2";
1335
1336	/* Get dimm basic config and the memory layout */
1337	ops->get_dimm_config(mci);
1338
1339	if (edac_mc_add_mc(mci)) {
1340		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1341		edac_mc_free(mci);
1342		return -EINVAL;
1343	}
1344
1345	*ppmci = mci;
1346
1347	return 0;
1348}
1349
1350static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1351{
1352	if (unlikely(!mci || !mci->pvt_info)) {
1353		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1354		return;
1355	}
1356
1357	/* Remove MC sysfs nodes */
1358	edac_mc_del_mc(NULL);
1359	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1360	edac_mc_free(mci);
1361}
1362
1363/*
1364 * Callback function registered with core kernel mce code.
1365 * Called once for each logged error.
1366 */
1367static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1368{
1369	struct mce *mce = (struct mce *)data;
1370	struct mem_ctl_info *mci;
1371	struct dram_addr daddr;
1372	char *type;
1373
 
 
 
1374	mci = pnd2_mci;
1375	if (!mci || (mce->kflags & MCE_HANDLED_CEC))
1376		return NOTIFY_DONE;
1377
1378	/*
1379	 * Just let mcelog handle it if the error is
1380	 * outside the memory controller. A memory error
1381	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1382	 * bit 12 has an special meaning.
1383	 */
1384	if ((mce->status & 0xefff) >> 7 != 1)
1385		return NOTIFY_DONE;
1386
1387	if (mce->mcgstatus & MCG_STATUS_MCIP)
1388		type = "Exception";
1389	else
1390		type = "Event";
1391
1392	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1393	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1394				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1395	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1396	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1397	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1398	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1399				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1400
1401	pnd2_mce_output_error(mci, mce, &daddr);
1402
1403	/* Advice mcelog that the error were handled */
1404	mce->kflags |= MCE_HANDLED_EDAC;
1405	return NOTIFY_OK;
1406}
1407
1408static struct notifier_block pnd2_mce_dec = {
1409	.notifier_call	= pnd2_mce_check_error,
1410	.priority	= MCE_PRIO_EDAC,
1411};
1412
1413#ifdef CONFIG_EDAC_DEBUG
1414/*
1415 * Write an address to this file to exercise the address decode
1416 * logic in this driver.
1417 */
1418static u64 pnd2_fake_addr;
1419#define PND2_BLOB_SIZE 1024
1420static char pnd2_result[PND2_BLOB_SIZE];
1421static struct dentry *pnd2_test;
1422static struct debugfs_blob_wrapper pnd2_blob = {
1423	.data = pnd2_result,
1424	.size = 0
1425};
1426
1427static int debugfs_u64_set(void *data, u64 val)
1428{
1429	struct dram_addr daddr;
1430	struct mce m;
1431
1432	*(u64 *)data = val;
1433	m.mcgstatus = 0;
1434	/* ADDRV + MemRd + Unknown channel */
1435	m.status = MCI_STATUS_ADDRV + 0x9f;
1436	m.addr = val;
1437	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1438	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1439			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1440			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1441	pnd2_blob.size = strlen(pnd2_blob.data);
1442
1443	return 0;
1444}
1445DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1446
1447static void setup_pnd2_debug(void)
1448{
1449	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1450	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1451							 &pnd2_fake_addr, &fops_u64_wo);
1452	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1453}
1454
1455static void teardown_pnd2_debug(void)
1456{
1457	debugfs_remove_recursive(pnd2_test);
1458}
1459#else
1460static void setup_pnd2_debug(void)	{}
1461static void teardown_pnd2_debug(void)	{}
1462#endif /* CONFIG_EDAC_DEBUG */
1463
1464
1465static int pnd2_probe(void)
1466{
1467	int rc;
1468
1469	edac_dbg(2, "\n");
1470	rc = get_registers();
1471	if (rc)
1472		return rc;
1473
1474	return pnd2_register_mci(&pnd2_mci);
1475}
1476
1477static void pnd2_remove(void)
1478{
1479	edac_dbg(0, "\n");
1480	pnd2_unregister_mci(pnd2_mci);
1481}
1482
1483static struct dunit_ops apl_ops = {
1484		.name			= "pnd2/apl",
1485		.type			= APL,
1486		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1487		.pmiidx_shift		= 0,
1488		.channels		= APL_NUM_CHANNELS,
1489		.dimms_per_channel	= 1,
1490		.rd_reg			= apl_rd_reg,
1491		.get_registers		= apl_get_registers,
1492		.check_ecc		= apl_check_ecc_active,
1493		.mk_region		= apl_mk_region,
1494		.get_dimm_config	= apl_get_dimm_config,
1495		.pmi2mem		= apl_pmi2mem,
1496};
1497
1498static struct dunit_ops dnv_ops = {
1499		.name			= "pnd2/dnv",
1500		.type			= DNV,
1501		.pmiaddr_shift		= 0,
1502		.pmiidx_shift		= 1,
1503		.channels		= DNV_NUM_CHANNELS,
1504		.dimms_per_channel	= 2,
1505		.rd_reg			= dnv_rd_reg,
1506		.get_registers		= dnv_get_registers,
1507		.check_ecc		= dnv_check_ecc_active,
1508		.mk_region		= dnv_mk_region,
1509		.get_dimm_config	= dnv_get_dimm_config,
1510		.pmi2mem		= dnv_pmi2mem,
1511};
1512
1513static const struct x86_cpu_id pnd2_cpuids[] = {
1514	X86_MATCH_VFM(INTEL_ATOM_GOLDMONT,	&apl_ops),
1515	X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D,	&dnv_ops),
1516	{ }
1517};
1518MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1519
1520static int __init pnd2_init(void)
1521{
1522	const struct x86_cpu_id *id;
1523	const char *owner;
1524	int rc;
1525
1526	edac_dbg(2, "\n");
1527
1528	if (ghes_get_devices())
1529		return -EBUSY;
1530
1531	owner = edac_get_owner();
1532	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1533		return -EBUSY;
1534
1535	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1536		return -ENODEV;
1537
1538	id = x86_match_cpu(pnd2_cpuids);
1539	if (!id)
1540		return -ENODEV;
1541
1542	ops = (struct dunit_ops *)id->driver_data;
1543
1544	if (ops->type == APL) {
1545		p2sb_bus = pci_find_bus(0, 0);
1546		if (!p2sb_bus)
1547			return -ENODEV;
1548	}
1549
1550	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1551	opstate_init();
1552
1553	rc = pnd2_probe();
1554	if (rc < 0) {
1555		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1556		return rc;
1557	}
1558
1559	if (!pnd2_mci)
1560		return -ENODEV;
1561
1562	mce_register_decode_chain(&pnd2_mce_dec);
1563	setup_pnd2_debug();
1564
1565	return 0;
1566}
1567
1568static void __exit pnd2_exit(void)
1569{
1570	edac_dbg(2, "\n");
1571	teardown_pnd2_debug();
1572	mce_unregister_decode_chain(&pnd2_mce_dec);
1573	pnd2_remove();
1574}
1575
1576module_init(pnd2_init);
1577module_exit(pnd2_exit);
1578
1579module_param(edac_op_state, int, 0444);
1580MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1581
1582MODULE_LICENSE("GPL v2");
1583MODULE_AUTHOR("Tony Luck");
1584MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Driver for Pondicherry2 memory controller.
   4 *
   5 * Copyright (c) 2016, Intel Corporation.
   6 *
   7 * [Derived from sb_edac.c]
   8 *
   9 * Translation of system physical addresses to DIMM addresses
  10 * is a two stage process:
  11 *
  12 * First the Pondicherry 2 memory controller handles slice and channel interleaving
  13 * in "sys2pmi()". This is (almost) completley common between platforms.
  14 *
  15 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
  16 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
  17 */
  18
 
 
 
 
 
 
 
  19#include <linux/module.h>
  20#include <linux/init.h>
  21#include <linux/pci.h>
  22#include <linux/pci_ids.h>
 
  23#include <linux/slab.h>
  24#include <linux/delay.h>
  25#include <linux/edac.h>
  26#include <linux/mmzone.h>
  27#include <linux/smp.h>
  28#include <linux/bitmap.h>
  29#include <linux/math64.h>
  30#include <linux/mod_devicetable.h>
  31#include <asm/cpu_device_id.h>
  32#include <asm/intel-family.h>
  33#include <asm/processor.h>
  34#include <asm/mce.h>
  35
  36#include "edac_mc.h"
  37#include "edac_module.h"
  38#include "pnd2_edac.h"
  39
  40#define EDAC_MOD_STR		"pnd2_edac"
  41
  42#define APL_NUM_CHANNELS	4
  43#define DNV_NUM_CHANNELS	2
  44#define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
  45
  46enum type {
  47	APL,
  48	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
  49};
  50
  51struct dram_addr {
  52	int chan;
  53	int dimm;
  54	int rank;
  55	int bank;
  56	int row;
  57	int col;
  58};
  59
  60struct pnd2_pvt {
  61	int dimm_geom[APL_NUM_CHANNELS];
  62	u64 tolm, tohm;
  63};
  64
  65/*
  66 * System address space is divided into multiple regions with
  67 * different interleave rules in each. The as0/as1 regions
  68 * have no interleaving at all. The as2 region is interleaved
  69 * between two channels. The mot region is magic and may overlap
  70 * other regions, with its interleave rules taking precedence.
  71 * Addresses not in any of these regions are interleaved across
  72 * all four channels.
  73 */
  74static struct region {
  75	u64	base;
  76	u64	limit;
  77	u8	enabled;
  78} mot, as0, as1, as2;
  79
  80static struct dunit_ops {
  81	char *name;
  82	enum type type;
  83	int pmiaddr_shift;
  84	int pmiidx_shift;
  85	int channels;
  86	int dimms_per_channel;
  87	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
  88	int (*get_registers)(void);
  89	int (*check_ecc)(void);
  90	void (*mk_region)(char *name, struct region *rp, void *asym);
  91	void (*get_dimm_config)(struct mem_ctl_info *mci);
  92	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
  93				   struct dram_addr *daddr, char *msg);
  94} *ops;
  95
  96static struct mem_ctl_info *pnd2_mci;
  97
  98#define PND2_MSG_SIZE	256
  99
 100/* Debug macros */
 101#define pnd2_printk(level, fmt, arg...)			\
 102	edac_printk(level, "pnd2", fmt, ##arg)
 103
 104#define pnd2_mc_printk(mci, level, fmt, arg...)	\
 105	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
 106
 107#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
 108#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
 109#define SELECTOR_DISABLED (-1)
 110#define _4GB (1ul << 32)
 111
 112#define PMI_ADDRESS_WIDTH	31
 113#define PND_MAX_PHYS_BIT	39
 114
 115#define APL_ASYMSHIFT		28
 116#define DNV_ASYMSHIFT		31
 117#define CH_HASH_MASK_LSB	6
 118#define SLICE_HASH_MASK_LSB	6
 119#define MOT_SLC_INTLV_BIT	12
 120#define LOG2_PMI_ADDR_GRANULARITY	5
 121#define MOT_SHIFT	24
 122
 123#define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
 124#define U64_LSHIFT(val, s)	((u64)(val) << (s))
 125
 126/*
 127 * On Apollo Lake we access memory controller registers via a
 128 * side-band mailbox style interface in a hidden PCI device
 129 * configuration space.
 130 */
 131static struct pci_bus	*p2sb_bus;
 132#define P2SB_DEVFN	PCI_DEVFN(0xd, 0)
 133#define P2SB_ADDR_OFF	0xd0
 134#define P2SB_DATA_OFF	0xd4
 135#define P2SB_STAT_OFF	0xd8
 136#define P2SB_ROUT_OFF	0xda
 137#define P2SB_EADD_OFF	0xdc
 138#define P2SB_HIDE_OFF	0xe1
 139
 140#define P2SB_BUSY	1
 141
 142#define P2SB_READ(size, off, ptr) \
 143	pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
 144#define P2SB_WRITE(size, off, val) \
 145	pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
 146
 147static bool p2sb_is_busy(u16 *status)
 148{
 149	P2SB_READ(word, P2SB_STAT_OFF, status);
 150
 151	return !!(*status & P2SB_BUSY);
 152}
 153
 154static int _apl_rd_reg(int port, int off, int op, u32 *data)
 155{
 156	int retries = 0xff, ret;
 157	u16 status;
 158	u8 hidden;
 159
 160	/* Unhide the P2SB device, if it's hidden */
 161	P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
 162	if (hidden)
 163		P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
 164
 165	if (p2sb_is_busy(&status)) {
 166		ret = -EAGAIN;
 167		goto out;
 168	}
 169
 170	P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
 171	P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
 172	P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
 173	P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
 174	P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
 175
 176	while (p2sb_is_busy(&status)) {
 177		if (retries-- == 0) {
 178			ret = -EBUSY;
 179			goto out;
 180		}
 181	}
 182
 183	P2SB_READ(dword, P2SB_DATA_OFF, data);
 184	ret = (status >> 1) & 0x3;
 185out:
 186	/* Hide the P2SB device, if it was hidden before */
 187	if (hidden)
 188		P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
 189
 190	return ret;
 191}
 192
 193static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 194{
 195	int ret = 0;
 196
 197	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
 198	switch (sz) {
 199	case 8:
 200		ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
 201		/* fall through */
 202	case 4:
 203		ret |= _apl_rd_reg(port, off, op, (u32 *)data);
 204		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
 205					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
 206		break;
 207	}
 208
 209	return ret;
 210}
 211
 212static u64 get_mem_ctrl_hub_base_addr(void)
 213{
 214	struct b_cr_mchbar_lo_pci lo;
 215	struct b_cr_mchbar_hi_pci hi;
 216	struct pci_dev *pdev;
 217
 218	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
 219	if (pdev) {
 220		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
 221		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
 222		pci_dev_put(pdev);
 223	} else {
 224		return 0;
 225	}
 226
 227	if (!lo.enable) {
 228		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
 229		return 0;
 230	}
 231
 232	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
 233}
 234
 235static u64 get_sideband_reg_base_addr(void)
 236{
 237	struct pci_dev *pdev;
 238	u32 hi, lo;
 239	u8 hidden;
 240
 241	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
 242	if (pdev) {
 243		/* Unhide the P2SB device, if it's hidden */
 244		pci_read_config_byte(pdev, 0xe1, &hidden);
 245		if (hidden)
 246			pci_write_config_byte(pdev, 0xe1, 0);
 247
 248		pci_read_config_dword(pdev, 0x10, &lo);
 249		pci_read_config_dword(pdev, 0x14, &hi);
 250		lo &= 0xfffffff0;
 251
 252		/* Hide the P2SB device, if it was hidden before */
 253		if (hidden)
 254			pci_write_config_byte(pdev, 0xe1, hidden);
 255
 256		pci_dev_put(pdev);
 257		return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
 258	} else {
 259		return 0xfd000000;
 260	}
 261}
 262
 263#define DNV_MCHBAR_SIZE  0x8000
 264#define DNV_SB_PORT_SIZE 0x10000
 265static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 266{
 267	struct pci_dev *pdev;
 268	char *base;
 269	u64 addr;
 270	unsigned long size;
 271
 272	if (op == 4) {
 273		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
 274		if (!pdev)
 275			return -ENODEV;
 276
 277		pci_read_config_dword(pdev, off, data);
 278		pci_dev_put(pdev);
 279	} else {
 280		/* MMIO via memory controller hub base address */
 281		if (op == 0 && port == 0x4c) {
 282			addr = get_mem_ctrl_hub_base_addr();
 283			if (!addr)
 
 
 284				return -ENODEV;
 285			size = DNV_MCHBAR_SIZE;
 286		} else {
 287			/* MMIO via sideband register base address */
 288			addr = get_sideband_reg_base_addr();
 289			if (!addr)
 290				return -ENODEV;
 291			addr += (port << 16);
 292			size = DNV_SB_PORT_SIZE;
 
 293		}
 294
 295		base = ioremap((resource_size_t)addr, size);
 296		if (!base)
 297			return -ENODEV;
 298
 299		if (sz == 8)
 300			*(u32 *)(data + 4) = *(u32 *)(base + off + 4);
 301		*(u32 *)data = *(u32 *)(base + off);
 
 302
 303		iounmap(base);
 304	}
 305
 306	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
 307			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
 308
 309	return 0;
 310}
 311
 312#define RD_REGP(regp, regname, port)	\
 313	ops->rd_reg(port,					\
 314		regname##_offset,				\
 315		regname##_r_opcode,				\
 316		regp, sizeof(struct regname),	\
 317		#regname)
 318
 319#define RD_REG(regp, regname)			\
 320	ops->rd_reg(regname ## _port,		\
 321		regname##_offset,				\
 322		regname##_r_opcode,				\
 323		regp, sizeof(struct regname),	\
 324		#regname)
 325
 326static u64 top_lm, top_hm;
 327static bool two_slices;
 328static bool two_channels; /* Both PMI channels in one slice enabled */
 329
 330static u8 sym_chan_mask;
 331static u8 asym_chan_mask;
 332static u8 chan_mask;
 333
 334static int slice_selector = -1;
 335static int chan_selector = -1;
 336static u64 slice_hash_mask;
 337static u64 chan_hash_mask;
 338
 339static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
 340{
 341	rp->enabled = 1;
 342	rp->base = base;
 343	rp->limit = limit;
 344	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
 345}
 346
 347static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
 348{
 349	if (mask == 0) {
 350		pr_info(FW_BUG "MOT mask cannot be zero\n");
 351		return;
 352	}
 353	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
 354		pr_info(FW_BUG "MOT mask not power of two\n");
 355		return;
 356	}
 357	if (base & ~mask) {
 358		pr_info(FW_BUG "MOT region base/mask alignment error\n");
 359		return;
 360	}
 361	rp->base = base;
 362	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
 363	rp->enabled = 1;
 364	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
 365}
 366
 367static bool in_region(struct region *rp, u64 addr)
 368{
 369	if (!rp->enabled)
 370		return false;
 371
 372	return rp->base <= addr && addr <= rp->limit;
 373}
 374
 375static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
 376{
 377	int mask = 0;
 378
 379	if (!p->slice_0_mem_disabled)
 380		mask |= p->sym_slice0_channel_enabled;
 381
 382	if (!p->slice_1_disabled)
 383		mask |= p->sym_slice1_channel_enabled << 2;
 384
 385	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
 386		mask &= 0x5;
 387
 388	return mask;
 389}
 390
 391static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
 392			 struct b_cr_asym_mem_region0_mchbar *as0,
 393			 struct b_cr_asym_mem_region1_mchbar *as1,
 394			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
 395{
 396	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
 397	int mask = 0;
 398
 399	if (as2way->asym_2way_interleave_enable)
 400		mask = intlv[as2way->asym_2way_intlv_mode];
 401	if (as0->slice0_asym_enable)
 402		mask |= (1 << as0->slice0_asym_channel_select);
 403	if (as1->slice1_asym_enable)
 404		mask |= (4 << as1->slice1_asym_channel_select);
 405	if (p->slice_0_mem_disabled)
 406		mask &= 0xc;
 407	if (p->slice_1_disabled)
 408		mask &= 0x3;
 409	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
 410		mask &= 0x5;
 411
 412	return mask;
 413}
 414
 415static struct b_cr_tolud_pci tolud;
 416static struct b_cr_touud_lo_pci touud_lo;
 417static struct b_cr_touud_hi_pci touud_hi;
 418static struct b_cr_asym_mem_region0_mchbar asym0;
 419static struct b_cr_asym_mem_region1_mchbar asym1;
 420static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
 421static struct b_cr_mot_out_base_mchbar mot_base;
 422static struct b_cr_mot_out_mask_mchbar mot_mask;
 423static struct b_cr_slice_channel_hash chash;
 424
 425/* Apollo Lake dunit */
 426/*
 427 * Validated on board with just two DIMMs in the [0] and [2] positions
 428 * in this array. Other port number matches documentation, but caution
 429 * advised.
 430 */
 431static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
 432static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
 433
 434/* Denverton dunit */
 435static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
 436static struct d_cr_dsch dsch;
 437static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
 438static struct d_cr_drp drp[DNV_NUM_CHANNELS];
 439static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
 440static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
 441static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
 442static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
 443static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
 444static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
 445
 446static void apl_mk_region(char *name, struct region *rp, void *asym)
 447{
 448	struct b_cr_asym_mem_region0_mchbar *a = asym;
 449
 450	mk_region(name, rp,
 451			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
 452			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
 453			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
 454}
 455
 456static void dnv_mk_region(char *name, struct region *rp, void *asym)
 457{
 458	struct b_cr_asym_mem_region_denverton *a = asym;
 459
 460	mk_region(name, rp,
 461			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
 462			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
 463			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
 464}
 465
 466static int apl_get_registers(void)
 467{
 468	int ret = -ENODEV;
 469	int i;
 470
 471	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
 472		return -ENODEV;
 473
 474	/*
 475	 * RD_REGP() will fail for unpopulated or non-existent
 476	 * DIMM slots. Return success if we find at least one DIMM.
 477	 */
 478	for (i = 0; i < APL_NUM_CHANNELS; i++)
 479		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
 480			ret = 0;
 481
 482	return ret;
 483}
 484
 485static int dnv_get_registers(void)
 486{
 487	int i;
 488
 489	if (RD_REG(&dsch, d_cr_dsch))
 490		return -ENODEV;
 491
 492	for (i = 0; i < DNV_NUM_CHANNELS; i++)
 493		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
 494			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
 495			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
 496			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
 497			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
 498			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
 499			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
 500			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
 501			return -ENODEV;
 502
 503	return 0;
 504}
 505
 506/*
 507 * Read all the h/w config registers once here (they don't
 508 * change at run time. Figure out which address ranges have
 509 * which interleave characteristics.
 510 */
 511static int get_registers(void)
 512{
 513	const int intlv[] = { 10, 11, 12, 12 };
 514
 515	if (RD_REG(&tolud, b_cr_tolud_pci) ||
 516		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
 517		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
 518		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
 519		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
 520		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
 521		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
 522		RD_REG(&chash, b_cr_slice_channel_hash))
 523		return -ENODEV;
 524
 525	if (ops->get_registers())
 526		return -ENODEV;
 527
 528	if (ops->type == DNV) {
 529		/* PMI channel idx (always 0) for asymmetric region */
 530		asym0.slice0_asym_channel_select = 0;
 531		asym1.slice1_asym_channel_select = 0;
 532		/* PMI channel bitmap (always 1) for symmetric region */
 533		chash.sym_slice0_channel_enabled = 0x1;
 534		chash.sym_slice1_channel_enabled = 0x1;
 535	}
 536
 537	if (asym0.slice0_asym_enable)
 538		ops->mk_region("as0", &as0, &asym0);
 539
 540	if (asym1.slice1_asym_enable)
 541		ops->mk_region("as1", &as1, &asym1);
 542
 543	if (asym_2way.asym_2way_interleave_enable) {
 544		mk_region("as2way", &as2,
 545				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
 546				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
 547				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
 548	}
 549
 550	if (mot_base.imr_en) {
 551		mk_region_mask("mot", &mot,
 552					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
 553					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
 554	}
 555
 556	top_lm = U64_LSHIFT(tolud.tolud, 20);
 557	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
 558
 559	two_slices = !chash.slice_1_disabled &&
 560				 !chash.slice_0_mem_disabled &&
 561				 (chash.sym_slice0_channel_enabled != 0) &&
 562				 (chash.sym_slice1_channel_enabled != 0);
 563	two_channels = !chash.ch_1_disabled &&
 564				 !chash.enable_pmi_dual_data_mode &&
 565				 ((chash.sym_slice0_channel_enabled == 3) ||
 566				 (chash.sym_slice1_channel_enabled == 3));
 567
 568	sym_chan_mask = gen_sym_mask(&chash);
 569	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
 570	chan_mask = sym_chan_mask | asym_chan_mask;
 571
 572	if (two_slices && !two_channels) {
 573		if (chash.hvm_mode)
 574			slice_selector = 29;
 575		else
 576			slice_selector = intlv[chash.interleave_mode];
 577	} else if (!two_slices && two_channels) {
 578		if (chash.hvm_mode)
 579			chan_selector = 29;
 580		else
 581			chan_selector = intlv[chash.interleave_mode];
 582	} else if (two_slices && two_channels) {
 583		if (chash.hvm_mode) {
 584			slice_selector = 29;
 585			chan_selector = 30;
 586		} else {
 587			slice_selector = intlv[chash.interleave_mode];
 588			chan_selector = intlv[chash.interleave_mode] + 1;
 589		}
 590	}
 591
 592	if (two_slices) {
 593		if (!chash.hvm_mode)
 594			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
 595		if (!two_channels)
 596			slice_hash_mask |= BIT_ULL(slice_selector);
 597	}
 598
 599	if (two_channels) {
 600		if (!chash.hvm_mode)
 601			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
 602		if (!two_slices)
 603			chan_hash_mask |= BIT_ULL(chan_selector);
 604	}
 605
 606	return 0;
 607}
 608
 609/* Get a contiguous memory address (remove the MMIO gap) */
 610static u64 remove_mmio_gap(u64 sys)
 611{
 612	return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
 613}
 614
 615/* Squeeze out one address bit, shift upper part down to fill gap */
 616static void remove_addr_bit(u64 *addr, int bitidx)
 617{
 618	u64	mask;
 619
 620	if (bitidx == -1)
 621		return;
 622
 623	mask = (1ull << bitidx) - 1;
 624	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
 625}
 626
 627/* XOR all the bits from addr specified in mask */
 628static int hash_by_mask(u64 addr, u64 mask)
 629{
 630	u64 result = addr & mask;
 631
 632	result = (result >> 32) ^ result;
 633	result = (result >> 16) ^ result;
 634	result = (result >> 8) ^ result;
 635	result = (result >> 4) ^ result;
 636	result = (result >> 2) ^ result;
 637	result = (result >> 1) ^ result;
 638
 639	return (int)result & 1;
 640}
 641
 642/*
 643 * First stage decode. Take the system address and figure out which
 644 * second stage will deal with it based on interleave modes.
 645 */
 646static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
 647{
 648	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
 649	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
 650						MOT_CHAN_INTLV_BIT_1SLC_2CH;
 651	int slice_intlv_bit_rm = SELECTOR_DISABLED;
 652	int chan_intlv_bit_rm = SELECTOR_DISABLED;
 653	/* Determine if address is in the MOT region. */
 654	bool mot_hit = in_region(&mot, addr);
 655	/* Calculate the number of symmetric regions enabled. */
 656	int sym_channels = hweight8(sym_chan_mask);
 657
 658	/*
 659	 * The amount we need to shift the asym base can be determined by the
 660	 * number of enabled symmetric channels.
 661	 * NOTE: This can only work because symmetric memory is not supposed
 662	 * to do a 3-way interleave.
 663	 */
 664	int sym_chan_shift = sym_channels >> 1;
 665
 666	/* Give up if address is out of range, or in MMIO gap */
 667	if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
 668	   (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
 669		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
 670		return -EINVAL;
 671	}
 672
 673	/* Get a contiguous memory address (remove the MMIO gap) */
 674	contig_addr = remove_mmio_gap(addr);
 675
 676	if (in_region(&as0, addr)) {
 677		*pmiidx = asym0.slice0_asym_channel_select;
 678
 679		contig_base = remove_mmio_gap(as0.base);
 680		contig_offset = contig_addr - contig_base;
 681		contig_base_adj = (contig_base >> sym_chan_shift) *
 682						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
 683		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
 684	} else if (in_region(&as1, addr)) {
 685		*pmiidx = 2u + asym1.slice1_asym_channel_select;
 686
 687		contig_base = remove_mmio_gap(as1.base);
 688		contig_offset = contig_addr - contig_base;
 689		contig_base_adj = (contig_base >> sym_chan_shift) *
 690						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
 691		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
 692	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
 693		bool channel1;
 694
 695		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
 696		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
 697		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
 698			hash_by_mask(contig_addr, chan_hash_mask);
 699		*pmiidx |= (u32)channel1;
 700
 701		contig_base = remove_mmio_gap(as2.base);
 702		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
 703		contig_offset = contig_addr - contig_base;
 704		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
 705		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
 706	} else {
 707		/* Otherwise we're in normal, boring symmetric mode. */
 708		*pmiidx = 0u;
 709
 710		if (two_slices) {
 711			bool slice1;
 712
 713			if (mot_hit) {
 714				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
 715				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
 716			} else {
 717				slice_intlv_bit_rm = slice_selector;
 718				slice1 = hash_by_mask(addr, slice_hash_mask);
 719			}
 720
 721			*pmiidx = (u32)slice1 << 1;
 722		}
 723
 724		if (two_channels) {
 725			bool channel1;
 726
 727			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
 728							MOT_CHAN_INTLV_BIT_1SLC_2CH;
 729
 730			if (mot_hit) {
 731				chan_intlv_bit_rm = mot_intlv_bit;
 732				channel1 = (addr >> mot_intlv_bit) & 1;
 733			} else {
 734				chan_intlv_bit_rm = chan_selector;
 735				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
 736			}
 737
 738			*pmiidx |= (u32)channel1;
 739		}
 740	}
 741
 742	/* Remove the chan_selector bit first */
 743	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
 744	/* Remove the slice bit (we remove it second because it must be lower */
 745	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
 746	*pmiaddr = contig_addr;
 747
 748	return 0;
 749}
 750
 751/* Translate PMI address to memory (rank, row, bank, column) */
 752#define C(n) (0x10 | (n))	/* column */
 753#define B(n) (0x20 | (n))	/* bank */
 754#define R(n) (0x40 | (n))	/* row */
 755#define RS   (0x80)			/* rank */
 756
 757/* addrdec values */
 758#define AMAP_1KB	0
 759#define AMAP_2KB	1
 760#define AMAP_4KB	2
 761#define AMAP_RSVD	3
 762
 763/* dden values */
 764#define DEN_4Gb		0
 765#define DEN_8Gb		2
 766
 767/* dwid values */
 768#define X8		0
 769#define X16		1
 770
 771static struct dimm_geometry {
 772	u8	addrdec;
 773	u8	dden;
 774	u8	dwid;
 775	u8	rowbits, colbits;
 776	u16	bits[PMI_ADDRESS_WIDTH];
 777} dimms[] = {
 778	{
 779		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
 780		.rowbits = 15, .colbits = 10,
 781		.bits = {
 782			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 783			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 784			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 785			0,     0,     0,     0
 786		}
 787	},
 788	{
 789		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
 790		.rowbits = 16, .colbits = 10,
 791		.bits = {
 792			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 793			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 794			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 795			R(15), 0,     0,     0
 796		}
 797	},
 798	{
 799		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
 800		.rowbits = 16, .colbits = 10,
 801		.bits = {
 802			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 803			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 804			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 805			R(15), 0,     0,     0
 806		}
 807	},
 808	{
 809		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
 810		.rowbits = 16, .colbits = 11,
 811		.bits = {
 812			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
 813			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
 814			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
 815			R(14), R(15), 0,     0
 816		}
 817	},
 818	{
 819		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
 820		.rowbits = 15, .colbits = 10,
 821		.bits = {
 822			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 823			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 824			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 825			0,     0,     0,     0
 826		}
 827	},
 828	{
 829		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
 830		.rowbits = 16, .colbits = 10,
 831		.bits = {
 832			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 833			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 834			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 835			R(15), 0,     0,     0
 836		}
 837	},
 838	{
 839		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
 840		.rowbits = 16, .colbits = 10,
 841		.bits = {
 842			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 843			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 844			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
 845			R(15), 0,     0,     0
 846		}
 847	},
 848	{
 849		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
 850		.rowbits = 16, .colbits = 11,
 851		.bits = {
 852			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
 853			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
 854			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
 855			R(14), R(15), 0,     0
 856		}
 857	},
 858	{
 859		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
 860		.rowbits = 15, .colbits = 10,
 861		.bits = {
 862			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 863			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 864			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
 865			0,     0,     0,     0
 866		}
 867	},
 868	{
 869		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
 870		.rowbits = 16, .colbits = 10,
 871		.bits = {
 872			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 873			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 874			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
 875			R(15), 0,     0,     0
 876		}
 877	},
 878	{
 879		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
 880		.rowbits = 16, .colbits = 10,
 881		.bits = {
 882			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 883			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 884			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
 885			R(15), 0,     0,     0
 886		}
 887	},
 888	{
 889		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
 890		.rowbits = 16, .colbits = 11,
 891		.bits = {
 892			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
 893			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
 894			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
 895			R(14), R(15), 0,     0
 896		}
 897	}
 898};
 899
 900static int bank_hash(u64 pmiaddr, int idx, int shft)
 901{
 902	int bhash = 0;
 903
 904	switch (idx) {
 905	case 0:
 906		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
 907		break;
 908	case 1:
 909		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
 910		bhash ^= ((pmiaddr >> 22) & 1) << 1;
 911		break;
 912	case 2:
 913		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
 914		break;
 915	}
 916
 917	return bhash;
 918}
 919
 920static int rank_hash(u64 pmiaddr)
 921{
 922	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
 923}
 924
 925/* Second stage decode. Compute rank, bank, row & column. */
 926static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
 927		       struct dram_addr *daddr, char *msg)
 928{
 929	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
 930	struct pnd2_pvt *pvt = mci->pvt_info;
 931	int g = pvt->dimm_geom[pmiidx];
 932	struct dimm_geometry *d = &dimms[g];
 933	int column = 0, bank = 0, row = 0, rank = 0;
 934	int i, idx, type, skiprs = 0;
 935
 936	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
 937		int	bit = (pmiaddr >> i) & 1;
 938
 939		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
 940			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
 941			return -EINVAL;
 942		}
 943
 944		type = d->bits[i + skiprs] & ~0xf;
 945		idx = d->bits[i + skiprs] & 0xf;
 946
 947		/*
 948		 * On single rank DIMMs ignore the rank select bit
 949		 * and shift remainder of "bits[]" down one place.
 950		 */
 951		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
 952			skiprs = 1;
 953			type = d->bits[i + skiprs] & ~0xf;
 954			idx = d->bits[i + skiprs] & 0xf;
 955		}
 956
 957		switch (type) {
 958		case C(0):
 959			column |= (bit << idx);
 960			break;
 961		case B(0):
 962			bank |= (bit << idx);
 963			if (cr_drp0->bahen)
 964				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
 965			break;
 966		case R(0):
 967			row |= (bit << idx);
 968			break;
 969		case RS:
 970			rank = bit;
 971			if (cr_drp0->rsien)
 972				rank ^= rank_hash(pmiaddr);
 973			break;
 974		default:
 975			if (bit) {
 976				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
 977				return -EINVAL;
 978			}
 979			goto done;
 980		}
 981	}
 982
 983done:
 984	daddr->col = column;
 985	daddr->bank = bank;
 986	daddr->row = row;
 987	daddr->rank = rank;
 988	daddr->dimm = 0;
 989
 990	return 0;
 991}
 992
 993/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
 994#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
 995
 996static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
 997					   struct dram_addr *daddr, char *msg)
 998{
 999	/* Rank 0 or 1 */
1000	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1001	/* Rank 2 or 3 */
1002	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1003
1004	/*
1005	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1006	 * flip them if DIMM1 is larger than DIMM0.
1007	 */
1008	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1009
1010	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1011	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1012	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1013	if (dsch.ddr4en)
1014		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1015	if (dmap1[pmiidx].bxor) {
1016		if (dsch.ddr4en) {
1017			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1018			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1019			if (dsch.chan_width == 0)
1020				/* 64/72 bit dram channel width */
1021				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1022			else
1023				/* 32/40 bit dram channel width */
1024				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1025			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1026		} else {
1027			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1028			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1029			if (dsch.chan_width == 0)
1030				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1031			else
1032				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1033		}
1034	}
1035
1036	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1037	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1038	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1039	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1040	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1041	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1042	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1043	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1044	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1045	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1046	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1047	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1048	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1049	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1050	if (dmap4[pmiidx].row14 != 31)
1051		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1052	if (dmap4[pmiidx].row15 != 31)
1053		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1054	if (dmap4[pmiidx].row16 != 31)
1055		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1056	if (dmap4[pmiidx].row17 != 31)
1057		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1058
1059	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1060	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1061	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1062	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1063	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1064	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1065	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1066	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1067		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1068
1069	return 0;
1070}
1071
1072static int check_channel(int ch)
1073{
1074	if (drp0[ch].dramtype != 0) {
1075		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1076		return 1;
1077	} else if (drp0[ch].eccen == 0) {
1078		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1079		return 1;
1080	}
1081	return 0;
1082}
1083
1084static int apl_check_ecc_active(void)
1085{
1086	int	i, ret = 0;
1087
1088	/* Check dramtype and ECC mode for each present DIMM */
1089	for (i = 0; i < APL_NUM_CHANNELS; i++)
1090		if (chan_mask & BIT(i))
1091			ret += check_channel(i);
1092	return ret ? -EINVAL : 0;
1093}
1094
1095#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1096
1097static int check_unit(int ch)
1098{
1099	struct d_cr_drp *d = &drp[ch];
1100
1101	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1102		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1103		return 1;
1104	}
1105	return 0;
1106}
1107
1108static int dnv_check_ecc_active(void)
1109{
1110	int	i, ret = 0;
1111
1112	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1113		ret += check_unit(i);
1114	return ret ? -EINVAL : 0;
1115}
1116
1117static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1118								 struct dram_addr *daddr, char *msg)
1119{
1120	u64	pmiaddr;
1121	u32	pmiidx;
1122	int	ret;
1123
1124	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1125	if (ret)
1126		return ret;
1127
1128	pmiaddr >>= ops->pmiaddr_shift;
1129	/* pmi channel idx to dimm channel idx */
1130	pmiidx >>= ops->pmiidx_shift;
1131	daddr->chan = pmiidx;
1132
1133	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1134	if (ret)
1135		return ret;
1136
1137	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1138			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1139
1140	return 0;
1141}
1142
1143static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1144				  struct dram_addr *daddr)
1145{
1146	enum hw_event_mc_err_type tp_event;
1147	char *optype, msg[PND2_MSG_SIZE];
1148	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1149	bool overflow = m->status & MCI_STATUS_OVER;
1150	bool uc_err = m->status & MCI_STATUS_UC;
1151	bool recov = m->status & MCI_STATUS_S;
1152	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1153	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1154	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1155	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1156	int rc;
1157
1158	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1159						 HW_EVENT_ERR_CORRECTED;
1160
1161	/*
1162	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1163	 * memory errors should fit in this mask:
1164	 *	000f 0000 1mmm cccc (binary)
1165	 * where:
1166	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1167	 *	    won't be shown
1168	 *	mmm = error type
1169	 *	cccc = channel
1170	 * If the mask doesn't match, report an error to the parsing logic
1171	 */
1172	if (!((errcode & 0xef80) == 0x80)) {
1173		optype = "Can't parse: it is not a mem";
1174	} else {
1175		switch (optypenum) {
1176		case 0:
1177			optype = "generic undef request error";
1178			break;
1179		case 1:
1180			optype = "memory read error";
1181			break;
1182		case 2:
1183			optype = "memory write error";
1184			break;
1185		case 3:
1186			optype = "addr/cmd error";
1187			break;
1188		case 4:
1189			optype = "memory scrubbing error";
1190			break;
1191		default:
1192			optype = "reserved";
1193			break;
1194		}
1195	}
1196
1197	/* Only decode errors with an valid address (ADDRV) */
1198	if (!(m->status & MCI_STATUS_ADDRV))
1199		return;
1200
1201	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1202	if (rc)
1203		goto address_error;
1204
1205	snprintf(msg, sizeof(msg),
1206		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1207		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1208		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1209
1210	edac_dbg(0, "%s\n", msg);
1211
1212	/* Call the helper to output message */
1213	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1214						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1215
1216	return;
1217
1218address_error:
1219	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1220}
1221
1222static void apl_get_dimm_config(struct mem_ctl_info *mci)
1223{
1224	struct pnd2_pvt	*pvt = mci->pvt_info;
1225	struct dimm_info *dimm;
1226	struct d_cr_drp0 *d;
1227	u64	capacity;
1228	int	i, g;
1229
1230	for (i = 0; i < APL_NUM_CHANNELS; i++) {
1231		if (!(chan_mask & BIT(i)))
1232			continue;
1233
1234		dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1235		if (!dimm) {
1236			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1237			continue;
1238		}
1239
1240		d = &drp0[i];
1241		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1242			if (dimms[g].addrdec == d->addrdec &&
1243			    dimms[g].dden == d->dden &&
1244			    dimms[g].dwid == d->dwid)
1245				break;
1246
1247		if (g == ARRAY_SIZE(dimms)) {
1248			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1249			continue;
1250		}
1251
1252		pvt->dimm_geom[i] = g;
1253		capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1254				   (1ul << dimms[g].colbits);
1255		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1256		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1257		dimm->grain = 32;
1258		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1259		dimm->mtype = MEM_DDR3;
1260		dimm->edac_mode = EDAC_SECDED;
1261		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1262	}
1263}
1264
1265static const int dnv_dtypes[] = {
1266	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1267};
1268
1269static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1270{
1271	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1272	struct dimm_info *dimm;
1273	struct d_cr_drp *d;
1274	u64	capacity;
1275
1276	if (dsch.ddr4en) {
1277		memtype = MEM_DDR4;
1278		banks = 16;
1279		colbits = 10;
1280	} else {
1281		memtype = MEM_DDR3;
1282		banks = 8;
1283	}
1284
1285	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1286		if (dmap4[i].row14 == 31)
1287			rowbits = 14;
1288		else if (dmap4[i].row15 == 31)
1289			rowbits = 15;
1290		else if (dmap4[i].row16 == 31)
1291			rowbits = 16;
1292		else if (dmap4[i].row17 == 31)
1293			rowbits = 17;
1294		else
1295			rowbits = 18;
1296
1297		if (memtype == MEM_DDR3) {
1298			if (dmap1[i].ca11 != 0x3f)
1299				colbits = 12;
1300			else
1301				colbits = 10;
1302		}
1303
1304		d = &drp[i];
1305		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1306		ranks_of_dimm[0] = d->rken0 + d->rken1;
1307		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1308		ranks_of_dimm[1] = d->rken2 + d->rken3;
1309
1310		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1311			if (!ranks_of_dimm[j])
1312				continue;
1313
1314			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1315			if (!dimm) {
1316				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1317				continue;
1318			}
1319
1320			capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1321			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1322			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1323			dimm->grain = 32;
1324			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1325			dimm->mtype = memtype;
1326			dimm->edac_mode = EDAC_SECDED;
1327			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1328		}
1329	}
1330}
1331
1332static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1333{
1334	struct edac_mc_layer layers[2];
1335	struct mem_ctl_info *mci;
1336	struct pnd2_pvt *pvt;
1337	int rc;
1338
1339	rc = ops->check_ecc();
1340	if (rc < 0)
1341		return rc;
1342
1343	/* Allocate a new MC control structure */
1344	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1345	layers[0].size = ops->channels;
1346	layers[0].is_virt_csrow = false;
1347	layers[1].type = EDAC_MC_LAYER_SLOT;
1348	layers[1].size = ops->dimms_per_channel;
1349	layers[1].is_virt_csrow = true;
1350	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1351	if (!mci)
1352		return -ENOMEM;
1353
1354	pvt = mci->pvt_info;
1355	memset(pvt, 0, sizeof(*pvt));
1356
1357	mci->mod_name = EDAC_MOD_STR;
1358	mci->dev_name = ops->name;
1359	mci->ctl_name = "Pondicherry2";
1360
1361	/* Get dimm basic config and the memory layout */
1362	ops->get_dimm_config(mci);
1363
1364	if (edac_mc_add_mc(mci)) {
1365		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1366		edac_mc_free(mci);
1367		return -EINVAL;
1368	}
1369
1370	*ppmci = mci;
1371
1372	return 0;
1373}
1374
1375static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1376{
1377	if (unlikely(!mci || !mci->pvt_info)) {
1378		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1379		return;
1380	}
1381
1382	/* Remove MC sysfs nodes */
1383	edac_mc_del_mc(NULL);
1384	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1385	edac_mc_free(mci);
1386}
1387
1388/*
1389 * Callback function registered with core kernel mce code.
1390 * Called once for each logged error.
1391 */
1392static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1393{
1394	struct mce *mce = (struct mce *)data;
1395	struct mem_ctl_info *mci;
1396	struct dram_addr daddr;
1397	char *type;
1398
1399	if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1400		return NOTIFY_DONE;
1401
1402	mci = pnd2_mci;
1403	if (!mci)
1404		return NOTIFY_DONE;
1405
1406	/*
1407	 * Just let mcelog handle it if the error is
1408	 * outside the memory controller. A memory error
1409	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1410	 * bit 12 has an special meaning.
1411	 */
1412	if ((mce->status & 0xefff) >> 7 != 1)
1413		return NOTIFY_DONE;
1414
1415	if (mce->mcgstatus & MCG_STATUS_MCIP)
1416		type = "Exception";
1417	else
1418		type = "Event";
1419
1420	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1421	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1422				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1423	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1424	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1425	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1426	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1427				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1428
1429	pnd2_mce_output_error(mci, mce, &daddr);
1430
1431	/* Advice mcelog that the error were handled */
1432	return NOTIFY_STOP;
 
1433}
1434
1435static struct notifier_block pnd2_mce_dec = {
1436	.notifier_call	= pnd2_mce_check_error,
 
1437};
1438
1439#ifdef CONFIG_EDAC_DEBUG
1440/*
1441 * Write an address to this file to exercise the address decode
1442 * logic in this driver.
1443 */
1444static u64 pnd2_fake_addr;
1445#define PND2_BLOB_SIZE 1024
1446static char pnd2_result[PND2_BLOB_SIZE];
1447static struct dentry *pnd2_test;
1448static struct debugfs_blob_wrapper pnd2_blob = {
1449	.data = pnd2_result,
1450	.size = 0
1451};
1452
1453static int debugfs_u64_set(void *data, u64 val)
1454{
1455	struct dram_addr daddr;
1456	struct mce m;
1457
1458	*(u64 *)data = val;
1459	m.mcgstatus = 0;
1460	/* ADDRV + MemRd + Unknown channel */
1461	m.status = MCI_STATUS_ADDRV + 0x9f;
1462	m.addr = val;
1463	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1464	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1465			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1466			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1467	pnd2_blob.size = strlen(pnd2_blob.data);
1468
1469	return 0;
1470}
1471DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1472
1473static void setup_pnd2_debug(void)
1474{
1475	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1476	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1477							 &pnd2_fake_addr, &fops_u64_wo);
1478	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1479}
1480
1481static void teardown_pnd2_debug(void)
1482{
1483	debugfs_remove_recursive(pnd2_test);
1484}
1485#else
1486static void setup_pnd2_debug(void)	{}
1487static void teardown_pnd2_debug(void)	{}
1488#endif /* CONFIG_EDAC_DEBUG */
1489
1490
1491static int pnd2_probe(void)
1492{
1493	int rc;
1494
1495	edac_dbg(2, "\n");
1496	rc = get_registers();
1497	if (rc)
1498		return rc;
1499
1500	return pnd2_register_mci(&pnd2_mci);
1501}
1502
1503static void pnd2_remove(void)
1504{
1505	edac_dbg(0, "\n");
1506	pnd2_unregister_mci(pnd2_mci);
1507}
1508
1509static struct dunit_ops apl_ops = {
1510		.name			= "pnd2/apl",
1511		.type			= APL,
1512		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1513		.pmiidx_shift		= 0,
1514		.channels		= APL_NUM_CHANNELS,
1515		.dimms_per_channel	= 1,
1516		.rd_reg			= apl_rd_reg,
1517		.get_registers		= apl_get_registers,
1518		.check_ecc		= apl_check_ecc_active,
1519		.mk_region		= apl_mk_region,
1520		.get_dimm_config	= apl_get_dimm_config,
1521		.pmi2mem		= apl_pmi2mem,
1522};
1523
1524static struct dunit_ops dnv_ops = {
1525		.name			= "pnd2/dnv",
1526		.type			= DNV,
1527		.pmiaddr_shift		= 0,
1528		.pmiidx_shift		= 1,
1529		.channels		= DNV_NUM_CHANNELS,
1530		.dimms_per_channel	= 2,
1531		.rd_reg			= dnv_rd_reg,
1532		.get_registers		= dnv_get_registers,
1533		.check_ecc		= dnv_check_ecc_active,
1534		.mk_region		= dnv_mk_region,
1535		.get_dimm_config	= dnv_get_dimm_config,
1536		.pmi2mem		= dnv_pmi2mem,
1537};
1538
1539static const struct x86_cpu_id pnd2_cpuids[] = {
1540	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1541	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_D, 0, (kernel_ulong_t)&dnv_ops },
1542	{ }
1543};
1544MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1545
1546static int __init pnd2_init(void)
1547{
1548	const struct x86_cpu_id *id;
1549	const char *owner;
1550	int rc;
1551
1552	edac_dbg(2, "\n");
1553
 
 
 
1554	owner = edac_get_owner();
1555	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1556		return -EBUSY;
 
 
 
1557
1558	id = x86_match_cpu(pnd2_cpuids);
1559	if (!id)
1560		return -ENODEV;
1561
1562	ops = (struct dunit_ops *)id->driver_data;
1563
1564	if (ops->type == APL) {
1565		p2sb_bus = pci_find_bus(0, 0);
1566		if (!p2sb_bus)
1567			return -ENODEV;
1568	}
1569
1570	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1571	opstate_init();
1572
1573	rc = pnd2_probe();
1574	if (rc < 0) {
1575		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1576		return rc;
1577	}
1578
1579	if (!pnd2_mci)
1580		return -ENODEV;
1581
1582	mce_register_decode_chain(&pnd2_mce_dec);
1583	setup_pnd2_debug();
1584
1585	return 0;
1586}
1587
1588static void __exit pnd2_exit(void)
1589{
1590	edac_dbg(2, "\n");
1591	teardown_pnd2_debug();
1592	mce_unregister_decode_chain(&pnd2_mce_dec);
1593	pnd2_remove();
1594}
1595
1596module_init(pnd2_init);
1597module_exit(pnd2_exit);
1598
1599module_param(edac_op_state, int, 0444);
1600MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1601
1602MODULE_LICENSE("GPL v2");
1603MODULE_AUTHOR("Tony Luck");
1604MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");