Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
   3 *
   4 * This driver supports the memory controllers found on the Intel
   5 * processor family Sandy Bridge.
   6 *
   7 * Copyright (c) 2011 by:
   8 *	 Mauro Carvalho Chehab
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/pci.h>
  14#include <linux/pci_ids.h>
  15#include <linux/slab.h>
  16#include <linux/delay.h>
  17#include <linux/edac.h>
  18#include <linux/mmzone.h>
  19#include <linux/smp.h>
  20#include <linux/bitmap.h>
  21#include <linux/math64.h>
  22#include <linux/mod_devicetable.h>
  23#include <asm/cpu_device_id.h>
  24#include <asm/intel-family.h>
  25#include <asm/processor.h>
  26#include <asm/mce.h>
  27
  28#include "edac_module.h"
  29
  30/* Static vars */
  31static LIST_HEAD(sbridge_edac_list);
  32static char sb_msg[256];
  33static char sb_msg_full[512];
  34
  35/*
  36 * Alter this version for the module when modifications are made
  37 */
  38#define SBRIDGE_REVISION    " Ver: 1.1.2 "
  39#define EDAC_MOD_STR	    "sb_edac"
  40
  41/*
  42 * Debug macros
  43 */
  44#define sbridge_printk(level, fmt, arg...)			\
  45	edac_printk(level, "sbridge", fmt, ##arg)
  46
  47#define sbridge_mc_printk(mci, level, fmt, arg...)		\
  48	edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
  49
  50/*
  51 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
  52 */
  53#define GET_BITFIELD(v, lo, hi)	\
  54	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
  55
  56/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
  57static const u32 sbridge_dram_rule[] = {
  58	0x80, 0x88, 0x90, 0x98, 0xa0,
  59	0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
  60};
  61
  62static const u32 ibridge_dram_rule[] = {
  63	0x60, 0x68, 0x70, 0x78, 0x80,
  64	0x88, 0x90, 0x98, 0xa0,	0xa8,
  65	0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
  66	0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
  67};
  68
  69static const u32 knl_dram_rule[] = {
  70	0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
  71	0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
  72	0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
  73	0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
  74	0x100, 0x108, 0x110, 0x118,   /* 20-23 */
  75};
  76
  77#define DRAM_RULE_ENABLE(reg)	GET_BITFIELD(reg, 0,  0)
  78#define A7MODE(reg)		GET_BITFIELD(reg, 26, 26)
  79
  80static char *show_dram_attr(u32 attr)
  81{
  82	switch (attr) {
  83		case 0:
  84			return "DRAM";
  85		case 1:
  86			return "MMCFG";
  87		case 2:
  88			return "NXM";
  89		default:
  90			return "unknown";
  91	}
  92}
  93
  94static const u32 sbridge_interleave_list[] = {
  95	0x84, 0x8c, 0x94, 0x9c, 0xa4,
  96	0xac, 0xb4, 0xbc, 0xc4, 0xcc,
  97};
  98
  99static const u32 ibridge_interleave_list[] = {
 100	0x64, 0x6c, 0x74, 0x7c, 0x84,
 101	0x8c, 0x94, 0x9c, 0xa4, 0xac,
 102	0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
 103	0xdc, 0xe4, 0xec, 0xf4, 0xfc,
 104};
 105
 106static const u32 knl_interleave_list[] = {
 107	0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
 108	0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
 109	0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
 110	0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
 111	0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
 112};
 113#define MAX_INTERLEAVE							\
 114	(MAX_T(unsigned int, ARRAY_SIZE(sbridge_interleave_list),	\
 115	       MAX_T(unsigned int, ARRAY_SIZE(ibridge_interleave_list),	\
 116		     ARRAY_SIZE(knl_interleave_list))))
 117
 118struct interleave_pkg {
 119	unsigned char start;
 120	unsigned char end;
 121};
 122
 123static const struct interleave_pkg sbridge_interleave_pkg[] = {
 124	{ 0, 2 },
 125	{ 3, 5 },
 126	{ 8, 10 },
 127	{ 11, 13 },
 128	{ 16, 18 },
 129	{ 19, 21 },
 130	{ 24, 26 },
 131	{ 27, 29 },
 132};
 133
 134static const struct interleave_pkg ibridge_interleave_pkg[] = {
 135	{ 0, 3 },
 136	{ 4, 7 },
 137	{ 8, 11 },
 138	{ 12, 15 },
 139	{ 16, 19 },
 140	{ 20, 23 },
 141	{ 24, 27 },
 142	{ 28, 31 },
 143};
 144
 145static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
 146			  int interleave)
 147{
 148	return GET_BITFIELD(reg, table[interleave].start,
 149			    table[interleave].end);
 150}
 151
 152/* Devices 12 Function 7 */
 153
 154#define TOLM		0x80
 155#define TOHM		0x84
 156#define HASWELL_TOLM	0xd0
 157#define HASWELL_TOHM_0	0xd4
 158#define HASWELL_TOHM_1	0xd8
 159#define KNL_TOLM	0xd0
 160#define KNL_TOHM_0	0xd4
 161#define KNL_TOHM_1	0xd8
 162
 163#define GET_TOLM(reg)		((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
 164#define GET_TOHM(reg)		((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
 165
 166/* Device 13 Function 6 */
 167
 168#define SAD_TARGET	0xf0
 169
 170#define SOURCE_ID(reg)		GET_BITFIELD(reg, 9, 11)
 171
 172#define SOURCE_ID_KNL(reg)	GET_BITFIELD(reg, 12, 14)
 173
 174#define SAD_CONTROL	0xf4
 175
 176/* Device 14 function 0 */
 177
 178static const u32 tad_dram_rule[] = {
 179	0x40, 0x44, 0x48, 0x4c,
 180	0x50, 0x54, 0x58, 0x5c,
 181	0x60, 0x64, 0x68, 0x6c,
 182};
 183#define MAX_TAD	ARRAY_SIZE(tad_dram_rule)
 184
 185#define TAD_LIMIT(reg)		((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
 186#define TAD_SOCK(reg)		GET_BITFIELD(reg, 10, 11)
 187#define TAD_CH(reg)		GET_BITFIELD(reg,  8,  9)
 188#define TAD_TGT3(reg)		GET_BITFIELD(reg,  6,  7)
 189#define TAD_TGT2(reg)		GET_BITFIELD(reg,  4,  5)
 190#define TAD_TGT1(reg)		GET_BITFIELD(reg,  2,  3)
 191#define TAD_TGT0(reg)		GET_BITFIELD(reg,  0,  1)
 192
 193/* Device 15, function 0 */
 194
 195#define MCMTR			0x7c
 196#define KNL_MCMTR		0x624
 197
 198#define IS_ECC_ENABLED(mcmtr)		GET_BITFIELD(mcmtr, 2, 2)
 199#define IS_LOCKSTEP_ENABLED(mcmtr)	GET_BITFIELD(mcmtr, 1, 1)
 200#define IS_CLOSE_PG(mcmtr)		GET_BITFIELD(mcmtr, 0, 0)
 201
 202/* Device 15, function 1 */
 203
 204#define RASENABLES		0xac
 205#define IS_MIRROR_ENABLED(reg)		GET_BITFIELD(reg, 0, 0)
 206
 207/* Device 15, functions 2-5 */
 208
 209static const int mtr_regs[] = {
 210	0x80, 0x84, 0x88,
 211};
 212
 213static const int knl_mtr_reg = 0xb60;
 214
 215#define RANK_DISABLE(mtr)		GET_BITFIELD(mtr, 16, 19)
 216#define IS_DIMM_PRESENT(mtr)		GET_BITFIELD(mtr, 14, 14)
 217#define RANK_CNT_BITS(mtr)		GET_BITFIELD(mtr, 12, 13)
 218#define RANK_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 2, 4)
 219#define COL_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 0, 1)
 220
 221static const u32 tad_ch_nilv_offset[] = {
 222	0x90, 0x94, 0x98, 0x9c,
 223	0xa0, 0xa4, 0xa8, 0xac,
 224	0xb0, 0xb4, 0xb8, 0xbc,
 225};
 226#define CHN_IDX_OFFSET(reg)		GET_BITFIELD(reg, 28, 29)
 227#define TAD_OFFSET(reg)			(GET_BITFIELD(reg,  6, 25) << 26)
 228
 229static const u32 rir_way_limit[] = {
 230	0x108, 0x10c, 0x110, 0x114, 0x118,
 231};
 232#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
 233
 234#define IS_RIR_VALID(reg)	GET_BITFIELD(reg, 31, 31)
 235#define RIR_WAY(reg)		GET_BITFIELD(reg, 28, 29)
 236
 237#define MAX_RIR_WAY	8
 238
 239static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
 240	{ 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
 241	{ 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
 242	{ 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
 243	{ 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
 244	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 245};
 246
 247#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
 248	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
 249
 250#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
 251	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 252
 253/* Device 16, functions 2-7 */
 254
 255/*
 256 * FIXME: Implement the error count reads directly
 257 */
 258
 
 
 
 
 259#define RANK_ODD_OV(reg)		GET_BITFIELD(reg, 31, 31)
 260#define RANK_ODD_ERR_CNT(reg)		GET_BITFIELD(reg, 16, 30)
 261#define RANK_EVEN_OV(reg)		GET_BITFIELD(reg, 15, 15)
 262#define RANK_EVEN_ERR_CNT(reg)		GET_BITFIELD(reg,  0, 14)
 263
 264#if 0 /* Currently unused*/
 265static const u32 correrrcnt[] = {
 266	0x104, 0x108, 0x10c, 0x110,
 267};
 268
 269static const u32 correrrthrsld[] = {
 270	0x11c, 0x120, 0x124, 0x128,
 271};
 272#endif
 273
 274#define RANK_ODD_ERR_THRSLD(reg)	GET_BITFIELD(reg, 16, 30)
 275#define RANK_EVEN_ERR_THRSLD(reg)	GET_BITFIELD(reg,  0, 14)
 276
 277
 278/* Device 17, function 0 */
 279
 280#define SB_RANK_CFG_A		0x0328
 281
 282#define IB_RANK_CFG_A		0x0320
 283
 284/*
 285 * sbridge structs
 286 */
 287
 288#define NUM_CHANNELS		6	/* Max channels per MC */
 289#define MAX_DIMMS		3	/* Max DIMMS per channel */
 290#define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
 291#define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
 292#define KNL_MAX_EDCS		8	/* Embedded DRAM controllers */
 293#define CHANNEL_UNSPECIFIED	0xf	/* Intel IA32 SDM 15-14 */
 294
 295enum type {
 296	SANDY_BRIDGE,
 297	IVY_BRIDGE,
 298	HASWELL,
 299	BROADWELL,
 300	KNIGHTS_LANDING,
 301};
 302
 303enum domain {
 304	IMC0 = 0,
 305	IMC1,
 306	SOCK,
 307};
 308
 309enum mirroring_mode {
 310	NON_MIRRORING,
 311	ADDR_RANGE_MIRRORING,
 312	FULL_MIRRORING,
 313};
 314
 315struct sbridge_pvt;
 316struct sbridge_info {
 317	enum type	type;
 318	u32		mcmtr;
 319	u32		rankcfgr;
 320	u64		(*get_tolm)(struct sbridge_pvt *pvt);
 321	u64		(*get_tohm)(struct sbridge_pvt *pvt);
 322	u64		(*rir_limit)(u32 reg);
 323	u64		(*sad_limit)(u32 reg);
 324	u32		(*interleave_mode)(u32 reg);
 325	u32		(*dram_attr)(u32 reg);
 326	const u32	*dram_rule;
 327	const u32	*interleave_list;
 328	const struct interleave_pkg *interleave_pkg;
 329	u8		max_sad;
 330	u8		(*get_node_id)(struct sbridge_pvt *pvt);
 331	u8		(*get_ha)(u8 bank);
 332	enum mem_type	(*get_memory_type)(struct sbridge_pvt *pvt);
 333	enum dev_type	(*get_width)(struct sbridge_pvt *pvt, u32 mtr);
 334	struct pci_dev	*pci_vtd;
 335};
 336
 337struct sbridge_channel {
 338	u32		ranks;
 339	u32		dimms;
 340	struct dimm {
 341		u32 rowbits;
 342		u32 colbits;
 343		u32 bank_xor_enable;
 344		u32 amap_fine;
 345	} dimm[MAX_DIMMS];
 346};
 347
 348struct pci_id_descr {
 349	int			dev_id;
 350	int			optional;
 351	enum domain		dom;
 352};
 353
 354struct pci_id_table {
 355	const struct pci_id_descr	*descr;
 356	int				n_devs_per_imc;
 357	int				n_devs_per_sock;
 358	int				n_imcs_per_sock;
 359	enum type			type;
 360};
 361
 362struct sbridge_dev {
 363	struct list_head	list;
 364	int			seg;
 365	u8			bus, mc;
 366	u8			node_id, source_id;
 367	struct pci_dev		**pdev;
 368	enum domain		dom;
 369	int			n_devs;
 370	int			i_devs;
 371	struct mem_ctl_info	*mci;
 372};
 373
 374struct knl_pvt {
 375	struct pci_dev          *pci_cha[KNL_MAX_CHAS];
 376	struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
 377	struct pci_dev          *pci_mc0;
 378	struct pci_dev          *pci_mc1;
 379	struct pci_dev          *pci_mc0_misc;
 380	struct pci_dev          *pci_mc1_misc;
 381	struct pci_dev          *pci_mc_info; /* tolm, tohm */
 382};
 383
 384struct sbridge_pvt {
 385	/* Devices per socket */
 386	struct pci_dev		*pci_ddrio;
 387	struct pci_dev		*pci_sad0, *pci_sad1;
 388	struct pci_dev		*pci_br0, *pci_br1;
 389	/* Devices per memory controller */
 390	struct pci_dev		*pci_ha, *pci_ta, *pci_ras;
 391	struct pci_dev		*pci_tad[NUM_CHANNELS];
 392
 393	struct sbridge_dev	*sbridge_dev;
 394
 395	struct sbridge_info	info;
 396	struct sbridge_channel	channel[NUM_CHANNELS];
 397
 398	/* Memory type detection */
 399	bool			is_cur_addr_mirrored, is_lockstep, is_close_pg;
 400	bool			is_chan_hash;
 401	enum mirroring_mode	mirror_mode;
 402
 403	/* Memory description */
 404	u64			tolm, tohm;
 405	struct knl_pvt knl;
 406};
 407
 408#define PCI_DESCR(device_id, opt, domain)	\
 409	.dev_id = (device_id),		\
 410	.optional = opt,	\
 411	.dom = domain
 412
 413static const struct pci_id_descr pci_dev_descr_sbridge[] = {
 414		/* Processor Home Agent */
 415	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
 416
 417		/* Memory controller */
 418	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
 419	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
 420	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
 421	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
 422	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
 423	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
 424	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
 425
 426		/* System Address Decoder */
 427	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
 428	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
 429
 430		/* Broadcast Registers */
 431	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
 432};
 433
 434#define PCI_ID_TABLE_ENTRY(A, N, M, T) {	\
 435	.descr = A,			\
 436	.n_devs_per_imc = N,	\
 437	.n_devs_per_sock = ARRAY_SIZE(A),	\
 438	.n_imcs_per_sock = M,	\
 439	.type = T			\
 440}
 441
 442static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
 443	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
 444	{ NULL, }
 445};
 446
 447/* This changes depending if 1HA or 2HA:
 448 * 1HA:
 449 *	0x0eb8 (17.0) is DDRIO0
 450 * 2HA:
 451 *	0x0ebc (17.4) is DDRIO0
 452 */
 453#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0	0x0eb8
 454#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0	0x0ebc
 455
 456/* pci ids */
 457#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0		0x0ea0
 458#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA		0x0ea8
 459#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS		0x0e71
 460#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0	0x0eaa
 461#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1	0x0eab
 462#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2	0x0eac
 463#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3	0x0ead
 464#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD			0x0ec8
 465#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0			0x0ec9
 466#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1			0x0eca
 467#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1		0x0e60
 468#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA		0x0e68
 469#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS		0x0e79
 470#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0	0x0e6a
 471#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1	0x0e6b
 472#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2	0x0e6c
 473#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3	0x0e6d
 474
 475static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 476		/* Processor Home Agent */
 477	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
 478	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
 479
 480		/* Memory controller */
 481	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
 482	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
 483	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
 484	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
 485	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
 486	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
 487
 488		/* Optional, mode 2HA */
 489	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
 490	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
 491	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
 492	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
 493	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
 494	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
 495
 496	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
 497	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
 498
 499		/* System Address Decoder */
 500	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
 501
 502		/* Broadcast Registers */
 503	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
 504	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
 505
 506};
 507
 508static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 509	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
 510	{ NULL, }
 511};
 512
 513/* Haswell support */
 514/* EN processor:
 515 *	- 1 IMC
 516 *	- 3 DDR3 channels, 2 DPC per channel
 517 * EP processor:
 518 *	- 1 or 2 IMC
 519 *	- 4 DDR4 channels, 3 DPC per channel
 520 * EP 4S processor:
 521 *	- 2 IMC
 522 *	- 4 DDR4 channels, 3 DPC per channel
 523 * EX processor:
 524 *	- 2 IMC
 525 *	- each IMC interfaces with a SMI 2 channel
 526 *	- each SMI channel interfaces with a scalable memory buffer
 527 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 528 */
 529#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
 530#define HASWELL_HASYSDEFEATURE2 0x84
 531#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
 532#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0	0x2fa0
 533#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1	0x2f60
 534#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA	0x2fa8
 535#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM	0x2f71
 536#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA	0x2f68
 537#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM	0x2f79
 538#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
 539#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
 540#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
 541#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
 542#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
 543#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
 544#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
 545#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
 546#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
 547#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
 548#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
 549#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
 550#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
 551#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
 552static const struct pci_id_descr pci_dev_descr_haswell[] = {
 553	/* first item must be the HA */
 554	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
 555	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
 556
 557	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
 558	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
 559	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
 560	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
 561	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
 562	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
 563
 564	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
 565	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
 566	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
 567	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
 568	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
 569	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
 570
 571	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
 572	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
 573	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
 574	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
 575	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
 576	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
 577};
 578
 579static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 580	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
 581	{ NULL, }
 582};
 583
 584/* Knight's Landing Support */
 585/*
 586 * KNL's memory channels are swizzled between memory controllers.
 587 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
 588 */
 589#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
 590
 591/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
 592#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
 593/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
 594#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
 595/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
 596#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
 597/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
 598#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
 599/* SAD target - 1-29-1 (1 of these) */
 600#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
 601/* Caching / Home Agent */
 602#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
 603/* Device with TOLM and TOHM, 0-5-0 (1 of these) */
 604#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
 605
 606/*
 607 * KNL differs from SB, IB, and Haswell in that it has multiple
 608 * instances of the same device with the same device ID, so we handle that
 609 * by creating as many copies in the table as we expect to find.
 610 * (Like device ID must be grouped together.)
 611 */
 612
 613static const struct pci_id_descr pci_dev_descr_knl[] = {
 614	[0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
 615	[2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
 616	[8]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
 617	[9]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
 618	[10]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
 619	[11]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
 620	[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
 621};
 622
 623static const struct pci_id_table pci_dev_descr_knl_table[] = {
 624	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
 625	{ NULL, }
 626};
 627
 628/*
 629 * Broadwell support
 630 *
 631 * DE processor:
 632 *	- 1 IMC
 633 *	- 2 DDR3 channels, 2 DPC per channel
 634 * EP processor:
 635 *	- 1 or 2 IMC
 636 *	- 4 DDR4 channels, 3 DPC per channel
 637 * EP 4S processor:
 638 *	- 2 IMC
 639 *	- 4 DDR4 channels, 3 DPC per channel
 640 * EX processor:
 641 *	- 2 IMC
 642 *	- each IMC interfaces with a SMI 2 channel
 643 *	- each SMI channel interfaces with a scalable memory buffer
 644 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 645 */
 646#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
 647#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0	0x6fa0
 648#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1	0x6f60
 649#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA	0x6fa8
 650#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM	0x6f71
 651#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA	0x6f68
 652#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM	0x6f79
 653#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
 654#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
 655#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
 656#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
 657#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
 658#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
 659#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
 660#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
 661#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
 662#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
 663#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
 664
 665static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 666	/* first item must be the HA */
 667	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
 668	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
 669
 670	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
 671	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
 672	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
 673	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
 674	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
 675	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
 676
 677	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
 678	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
 679	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
 680	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
 681	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
 682	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
 683
 684	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
 685	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
 686	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
 687};
 688
 689static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
 690	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
 691	{ NULL, }
 692};
 693
 694
 695/****************************************************************************
 696			Ancillary status routines
 697 ****************************************************************************/
 698
 699static inline int numrank(enum type type, u32 mtr)
 700{
 701	int ranks = (1 << RANK_CNT_BITS(mtr));
 702	int max = 4;
 703
 704	if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
 705		max = 8;
 706
 707	if (ranks > max) {
 708		edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
 709			 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
 710		return -EINVAL;
 711	}
 712
 713	return ranks;
 714}
 715
 716static inline int numrow(u32 mtr)
 717{
 718	int rows = (RANK_WIDTH_BITS(mtr) + 12);
 719
 720	if (rows < 13 || rows > 18) {
 721		edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
 722			 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
 723		return -EINVAL;
 724	}
 725
 726	return 1 << rows;
 727}
 728
 729static inline int numcol(u32 mtr)
 730{
 731	int cols = (COL_WIDTH_BITS(mtr) + 10);
 732
 733	if (cols > 12) {
 734		edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
 735			 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
 736		return -EINVAL;
 737	}
 738
 739	return 1 << cols;
 740}
 741
 742static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
 743					   int multi_bus,
 744					   struct sbridge_dev *prev)
 745{
 746	struct sbridge_dev *sbridge_dev;
 747
 748	/*
 749	 * If we have devices scattered across several busses that pertain
 750	 * to the same memory controller, we'll lump them all together.
 751	 */
 752	if (multi_bus) {
 753		return list_first_entry_or_null(&sbridge_edac_list,
 754				struct sbridge_dev, list);
 755	}
 756
 757	sbridge_dev = list_entry(prev ? prev->list.next
 758				      : sbridge_edac_list.next, struct sbridge_dev, list);
 759
 760	list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
 761		if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
 762				(dom == SOCK || dom == sbridge_dev->dom))
 763			return sbridge_dev;
 764	}
 765
 766	return NULL;
 767}
 768
 769static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
 770					     const struct pci_id_table *table)
 771{
 772	struct sbridge_dev *sbridge_dev;
 773
 774	sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
 775	if (!sbridge_dev)
 776		return NULL;
 777
 778	sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
 779				    sizeof(*sbridge_dev->pdev),
 780				    GFP_KERNEL);
 781	if (!sbridge_dev->pdev) {
 782		kfree(sbridge_dev);
 783		return NULL;
 784	}
 785
 786	sbridge_dev->seg = seg;
 787	sbridge_dev->bus = bus;
 788	sbridge_dev->dom = dom;
 789	sbridge_dev->n_devs = table->n_devs_per_imc;
 790	list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
 791
 792	return sbridge_dev;
 793}
 794
 795static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
 796{
 797	list_del(&sbridge_dev->list);
 798	kfree(sbridge_dev->pdev);
 799	kfree(sbridge_dev);
 800}
 801
 802static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
 803{
 804	u32 reg;
 805
 806	/* Address range is 32:28 */
 807	pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
 808	return GET_TOLM(reg);
 809}
 810
 811static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
 812{
 813	u32 reg;
 814
 815	pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
 816	return GET_TOHM(reg);
 817}
 818
 819static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
 820{
 821	u32 reg;
 822
 823	pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
 824
 825	return GET_TOLM(reg);
 826}
 827
 828static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
 829{
 830	u32 reg;
 831
 832	pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
 833
 834	return GET_TOHM(reg);
 835}
 836
 837static u64 rir_limit(u32 reg)
 838{
 839	return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
 840}
 841
 842static u64 sad_limit(u32 reg)
 843{
 844	return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
 845}
 846
 847static u32 interleave_mode(u32 reg)
 848{
 849	return GET_BITFIELD(reg, 1, 1);
 850}
 851
 852static u32 dram_attr(u32 reg)
 853{
 854	return GET_BITFIELD(reg, 2, 3);
 855}
 856
 857static u64 knl_sad_limit(u32 reg)
 858{
 859	return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
 860}
 861
 862static u32 knl_interleave_mode(u32 reg)
 863{
 864	return GET_BITFIELD(reg, 1, 2);
 865}
 866
 867static const char * const knl_intlv_mode[] = {
 868	"[8:6]", "[10:8]", "[14:12]", "[32:30]"
 869};
 870
 871static const char *get_intlv_mode_str(u32 reg, enum type t)
 872{
 873	if (t == KNIGHTS_LANDING)
 874		return knl_intlv_mode[knl_interleave_mode(reg)];
 875	else
 876		return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
 877}
 878
 879static u32 dram_attr_knl(u32 reg)
 880{
 881	return GET_BITFIELD(reg, 3, 4);
 882}
 883
 884
 885static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
 886{
 887	u32 reg;
 888	enum mem_type mtype;
 889
 890	if (pvt->pci_ddrio) {
 891		pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
 892				      &reg);
 893		if (GET_BITFIELD(reg, 11, 11))
 894			/* FIXME: Can also be LRDIMM */
 895			mtype = MEM_RDDR3;
 896		else
 897			mtype = MEM_DDR3;
 898	} else
 899		mtype = MEM_UNKNOWN;
 900
 901	return mtype;
 902}
 903
 904static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
 905{
 906	u32 reg;
 907	bool registered = false;
 908	enum mem_type mtype = MEM_UNKNOWN;
 909
 910	if (!pvt->pci_ddrio)
 911		goto out;
 912
 913	pci_read_config_dword(pvt->pci_ddrio,
 914			      HASWELL_DDRCRCLKCONTROLS, &reg);
 915	/* Is_Rdimm */
 916	if (GET_BITFIELD(reg, 16, 16))
 917		registered = true;
 918
 919	pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
 920	if (GET_BITFIELD(reg, 14, 14)) {
 921		if (registered)
 922			mtype = MEM_RDDR4;
 923		else
 924			mtype = MEM_DDR4;
 925	} else {
 926		if (registered)
 927			mtype = MEM_RDDR3;
 928		else
 929			mtype = MEM_DDR3;
 930	}
 931
 932out:
 933	return mtype;
 934}
 935
 936static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
 937{
 938	/* for KNL value is fixed */
 939	return DEV_X16;
 940}
 941
 942static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 943{
 944	/* there's no way to figure out */
 945	return DEV_UNKNOWN;
 946}
 947
 948static enum dev_type __ibridge_get_width(u32 mtr)
 949{
 950	enum dev_type type = DEV_UNKNOWN;
 951
 952	switch (mtr) {
 
 
 
 953	case 2:
 954		type = DEV_X16;
 955		break;
 956	case 1:
 957		type = DEV_X8;
 958		break;
 959	case 0:
 960		type = DEV_X4;
 961		break;
 962	}
 963
 964	return type;
 965}
 966
 967static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 968{
 969	/*
 970	 * ddr3_width on the documentation but also valid for DDR4 on
 971	 * Haswell
 972	 */
 973	return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
 974}
 975
 976static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
 977{
 978	/* ddr3_width on the documentation but also valid for DDR4 */
 979	return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
 980}
 981
 982static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
 983{
 984	/* DDR4 RDIMMS and LRDIMMS are supported */
 985	return MEM_RDDR4;
 986}
 987
 988static u8 get_node_id(struct sbridge_pvt *pvt)
 989{
 990	u32 reg;
 991	pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
 992	return GET_BITFIELD(reg, 0, 2);
 993}
 994
 995static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
 996{
 997	u32 reg;
 998
 999	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1000	return GET_BITFIELD(reg, 0, 3);
1001}
1002
1003static u8 knl_get_node_id(struct sbridge_pvt *pvt)
1004{
1005	u32 reg;
1006
1007	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1008	return GET_BITFIELD(reg, 0, 2);
1009}
1010
1011/*
1012 * Use the reporting bank number to determine which memory
1013 * controller (also known as "ha" for "home agent"). Sandy
1014 * Bridge only has one memory controller per socket, so the
1015 * answer is always zero.
1016 */
1017static u8 sbridge_get_ha(u8 bank)
1018{
1019	return 0;
1020}
1021
1022/*
1023 * On Ivy Bridge, Haswell and Broadwell the error may be in a
1024 * home agent bank (7, 8), or one of the per-channel memory
1025 * controller banks (9 .. 16).
1026 */
1027static u8 ibridge_get_ha(u8 bank)
1028{
1029	switch (bank) {
1030	case 7 ... 8:
1031		return bank - 7;
1032	case 9 ... 16:
1033		return (bank - 9) / 4;
1034	default:
1035		return 0xff;
1036	}
1037}
1038
1039/* Not used, but included for safety/symmetry */
1040static u8 knl_get_ha(u8 bank)
1041{
1042	return 0xff;
1043}
1044
1045static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1046{
1047	u32 reg;
1048
1049	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1050	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1051}
1052
1053static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1054{
1055	u64 rc;
1056	u32 reg;
1057
1058	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1059	rc = GET_BITFIELD(reg, 26, 31);
1060	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1061	rc = ((reg << 6) | rc) << 26;
1062
1063	return rc | 0x3ffffff;
1064}
1065
1066static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1067{
1068	u32 reg;
1069
1070	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1071	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1072}
1073
1074static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1075{
1076	u64 rc;
1077	u32 reg_lo, reg_hi;
1078
1079	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1080	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1081	rc = ((u64)reg_hi << 32) | reg_lo;
1082	return rc | 0x3ffffff;
1083}
1084
1085
1086static u64 haswell_rir_limit(u32 reg)
1087{
1088	return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1089}
1090
1091static inline u8 sad_pkg_socket(u8 pkg)
1092{
1093	/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1094	return ((pkg >> 3) << 2) | (pkg & 0x3);
1095}
1096
1097static inline u8 sad_pkg_ha(u8 pkg)
1098{
1099	return (pkg >> 2) & 0x1;
1100}
1101
1102static int haswell_chan_hash(int idx, u64 addr)
1103{
1104	int i;
1105
1106	/*
1107	 * XOR even bits from 12:26 to bit0 of idx,
1108	 *     odd bits from 13:27 to bit1
1109	 */
1110	for (i = 12; i < 28; i += 2)
1111		idx ^= (addr >> i) & 3;
1112
1113	return idx;
1114}
1115
1116/* Low bits of TAD limit, and some metadata. */
1117static const u32 knl_tad_dram_limit_lo[] = {
1118	0x400, 0x500, 0x600, 0x700,
1119	0x800, 0x900, 0xa00, 0xb00,
1120};
1121
1122/* Low bits of TAD offset. */
1123static const u32 knl_tad_dram_offset_lo[] = {
1124	0x404, 0x504, 0x604, 0x704,
1125	0x804, 0x904, 0xa04, 0xb04,
1126};
1127
1128/* High 16 bits of TAD limit and offset. */
1129static const u32 knl_tad_dram_hi[] = {
1130	0x408, 0x508, 0x608, 0x708,
1131	0x808, 0x908, 0xa08, 0xb08,
1132};
1133
1134/* Number of ways a tad entry is interleaved. */
1135static const u32 knl_tad_ways[] = {
1136	8, 6, 4, 3, 2, 1,
1137};
1138
1139/*
1140 * Retrieve the n'th Target Address Decode table entry
1141 * from the memory controller's TAD table.
1142 *
1143 * @pvt:	driver private data
1144 * @entry:	which entry you want to retrieve
1145 * @mc:		which memory controller (0 or 1)
1146 * @offset:	output tad range offset
1147 * @limit:	output address of first byte above tad range
1148 * @ways:	output number of interleave ways
1149 *
1150 * The offset value has curious semantics.  It's a sort of running total
1151 * of the sizes of all the memory regions that aren't mapped in this
1152 * tad table.
1153 */
1154static int knl_get_tad(const struct sbridge_pvt *pvt,
1155		const int entry,
1156		const int mc,
1157		u64 *offset,
1158		u64 *limit,
1159		int *ways)
1160{
1161	u32 reg_limit_lo, reg_offset_lo, reg_hi;
1162	struct pci_dev *pci_mc;
1163	int way_id;
1164
1165	switch (mc) {
1166	case 0:
1167		pci_mc = pvt->knl.pci_mc0;
1168		break;
1169	case 1:
1170		pci_mc = pvt->knl.pci_mc1;
1171		break;
1172	default:
1173		WARN_ON(1);
1174		return -EINVAL;
1175	}
1176
1177	pci_read_config_dword(pci_mc,
1178			knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1179	pci_read_config_dword(pci_mc,
1180			knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1181	pci_read_config_dword(pci_mc,
1182			knl_tad_dram_hi[entry], &reg_hi);
1183
1184	/* Is this TAD entry enabled? */
1185	if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1186		return -ENODEV;
1187
1188	way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1189
1190	if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1191		*ways = knl_tad_ways[way_id];
1192	} else {
1193		*ways = 0;
1194		sbridge_printk(KERN_ERR,
1195				"Unexpected value %d in mc_tad_limit_lo wayness field\n",
1196				way_id);
1197		return -ENODEV;
1198	}
1199
1200	/*
1201	 * The least significant 6 bits of base and limit are truncated.
1202	 * For limit, we fill the missing bits with 1s.
1203	 */
1204	*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1205				((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1206	*limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1207				((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1208
1209	return 0;
1210}
1211
1212/* Determine which memory controller is responsible for a given channel. */
1213static int knl_channel_mc(int channel)
1214{
1215	WARN_ON(channel < 0 || channel >= 6);
1216
1217	return channel < 3 ? 1 : 0;
1218}
1219
1220/*
1221 * Get the Nth entry from EDC_ROUTE_TABLE register.
1222 * (This is the per-tile mapping of logical interleave targets to
1223 *  physical EDC modules.)
1224 *
1225 * entry 0: 0:2
1226 *       1: 3:5
1227 *       2: 6:8
1228 *       3: 9:11
1229 *       4: 12:14
1230 *       5: 15:17
1231 *       6: 18:20
1232 *       7: 21:23
1233 * reserved: 24:31
1234 */
1235static u32 knl_get_edc_route(int entry, u32 reg)
1236{
1237	WARN_ON(entry >= KNL_MAX_EDCS);
1238	return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1239}
1240
1241/*
1242 * Get the Nth entry from MC_ROUTE_TABLE register.
1243 * (This is the per-tile mapping of logical interleave targets to
1244 *  physical DRAM channels modules.)
1245 *
1246 * entry 0: mc 0:2   channel 18:19
1247 *       1: mc 3:5   channel 20:21
1248 *       2: mc 6:8   channel 22:23
1249 *       3: mc 9:11  channel 24:25
1250 *       4: mc 12:14 channel 26:27
1251 *       5: mc 15:17 channel 28:29
1252 * reserved: 30:31
1253 *
1254 * Though we have 3 bits to identify the MC, we should only see
1255 * the values 0 or 1.
1256 */
1257
1258static u32 knl_get_mc_route(int entry, u32 reg)
1259{
1260	int mc, chan;
1261
1262	WARN_ON(entry >= KNL_MAX_CHANNELS);
1263
1264	mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1265	chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1266
1267	return knl_channel_remap(mc, chan);
1268}
1269
1270/*
1271 * Render the EDC_ROUTE register in human-readable form.
1272 * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1273 */
1274static void knl_show_edc_route(u32 reg, char *s)
1275{
1276	int i;
1277
1278	for (i = 0; i < KNL_MAX_EDCS; i++) {
1279		s[i*2] = knl_get_edc_route(i, reg) + '0';
1280		s[i*2+1] = '-';
1281	}
1282
1283	s[KNL_MAX_EDCS*2 - 1] = '\0';
1284}
1285
1286/*
1287 * Render the MC_ROUTE register in human-readable form.
1288 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1289 */
1290static void knl_show_mc_route(u32 reg, char *s)
1291{
1292	int i;
1293
1294	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1295		s[i*2] = knl_get_mc_route(i, reg) + '0';
1296		s[i*2+1] = '-';
1297	}
1298
1299	s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1300}
1301
1302#define KNL_EDC_ROUTE 0xb8
1303#define KNL_MC_ROUTE 0xb4
1304
1305/* Is this dram rule backed by regular DRAM in flat mode? */
1306#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1307
1308/* Is this dram rule cached? */
1309#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1310
1311/* Is this rule backed by edc ? */
1312#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1313
1314/* Is this rule backed by DRAM, cacheable in EDRAM? */
1315#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1316
1317/* Is this rule mod3? */
1318#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1319
1320/*
1321 * Figure out how big our RAM modules are.
1322 *
1323 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1324 * have to figure this out from the SAD rules, interleave lists, route tables,
1325 * and TAD rules.
1326 *
1327 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1328 * inspect the TAD rules to figure out how large the SAD regions really are.
1329 *
1330 * When we know the real size of a SAD region and how many ways it's
1331 * interleaved, we know the individual contribution of each channel to
1332 * TAD is size/ways.
1333 *
1334 * Finally, we have to check whether each channel participates in each SAD
1335 * region.
1336 *
1337 * Fortunately, KNL only supports one DIMM per channel, so once we know how
1338 * much memory the channel uses, we know the DIMM is at least that large.
1339 * (The BIOS might possibly choose not to map all available memory, in which
1340 * case we will underreport the size of the DIMM.)
1341 *
1342 * In theory, we could try to determine the EDC sizes as well, but that would
1343 * only work in flat mode, not in cache mode.
1344 *
1345 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1346 *            elements)
1347 */
1348static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1349{
1350	u64 sad_base, sad_limit = 0;
1351	u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1352	int sad_rule = 0;
1353	int tad_rule = 0;
1354	int intrlv_ways, tad_ways;
1355	u32 first_pkg, pkg;
1356	int i;
1357	u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1358	u32 dram_rule, interleave_reg;
1359	u32 mc_route_reg[KNL_MAX_CHAS];
1360	u32 edc_route_reg[KNL_MAX_CHAS];
1361	int edram_only;
1362	char edc_route_string[KNL_MAX_EDCS*2];
1363	char mc_route_string[KNL_MAX_CHANNELS*2];
1364	int cur_reg_start;
1365	int mc;
1366	int channel;
1367	int participants[KNL_MAX_CHANNELS];
1368
1369	for (i = 0; i < KNL_MAX_CHANNELS; i++)
1370		mc_sizes[i] = 0;
1371
1372	/* Read the EDC route table in each CHA. */
1373	cur_reg_start = 0;
1374	for (i = 0; i < KNL_MAX_CHAS; i++) {
1375		pci_read_config_dword(pvt->knl.pci_cha[i],
1376				KNL_EDC_ROUTE, &edc_route_reg[i]);
1377
1378		if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1379			knl_show_edc_route(edc_route_reg[i-1],
1380					edc_route_string);
1381			if (cur_reg_start == i-1)
1382				edac_dbg(0, "edc route table for CHA %d: %s\n",
1383					cur_reg_start, edc_route_string);
1384			else
1385				edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1386					cur_reg_start, i-1, edc_route_string);
1387			cur_reg_start = i;
1388		}
1389	}
1390	knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1391	if (cur_reg_start == i-1)
1392		edac_dbg(0, "edc route table for CHA %d: %s\n",
1393			cur_reg_start, edc_route_string);
1394	else
1395		edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1396			cur_reg_start, i-1, edc_route_string);
1397
1398	/* Read the MC route table in each CHA. */
1399	cur_reg_start = 0;
1400	for (i = 0; i < KNL_MAX_CHAS; i++) {
1401		pci_read_config_dword(pvt->knl.pci_cha[i],
1402			KNL_MC_ROUTE, &mc_route_reg[i]);
1403
1404		if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1405			knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1406			if (cur_reg_start == i-1)
1407				edac_dbg(0, "mc route table for CHA %d: %s\n",
1408					cur_reg_start, mc_route_string);
1409			else
1410				edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1411					cur_reg_start, i-1, mc_route_string);
1412			cur_reg_start = i;
1413		}
1414	}
1415	knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1416	if (cur_reg_start == i-1)
1417		edac_dbg(0, "mc route table for CHA %d: %s\n",
1418			cur_reg_start, mc_route_string);
1419	else
1420		edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1421			cur_reg_start, i-1, mc_route_string);
1422
1423	/* Process DRAM rules */
1424	for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1425		/* previous limit becomes the new base */
1426		sad_base = sad_limit;
1427
1428		pci_read_config_dword(pvt->pci_sad0,
1429			pvt->info.dram_rule[sad_rule], &dram_rule);
1430
1431		if (!DRAM_RULE_ENABLE(dram_rule))
1432			break;
1433
1434		edram_only = KNL_EDRAM_ONLY(dram_rule);
1435
1436		sad_limit = pvt->info.sad_limit(dram_rule)+1;
 
1437
1438		pci_read_config_dword(pvt->pci_sad0,
1439			pvt->info.interleave_list[sad_rule], &interleave_reg);
1440
1441		/*
1442		 * Find out how many ways this dram rule is interleaved.
1443		 * We stop when we see the first channel again.
1444		 */
1445		first_pkg = sad_pkg(pvt->info.interleave_pkg,
1446						interleave_reg, 0);
1447		for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1448			pkg = sad_pkg(pvt->info.interleave_pkg,
1449						interleave_reg, intrlv_ways);
1450
1451			if ((pkg & 0x8) == 0) {
1452				/*
1453				 * 0 bit means memory is non-local,
1454				 * which KNL doesn't support
1455				 */
1456				edac_dbg(0, "Unexpected interleave target %d\n",
1457					pkg);
1458				return -1;
1459			}
1460
1461			if (pkg == first_pkg)
1462				break;
1463		}
1464		if (KNL_MOD3(dram_rule))
1465			intrlv_ways *= 3;
1466
1467		edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1468			sad_rule,
1469			sad_base,
1470			sad_limit,
1471			intrlv_ways,
1472			edram_only ? ", EDRAM" : "");
1473
1474		/*
1475		 * Find out how big the SAD region really is by iterating
1476		 * over TAD tables (SAD regions may contain holes).
1477		 * Each memory controller might have a different TAD table, so
1478		 * we have to look at both.
1479		 *
1480		 * Livespace is the memory that's mapped in this TAD table,
1481		 * deadspace is the holes (this could be the MMIO hole, or it
1482		 * could be memory that's mapped by the other TAD table but
1483		 * not this one).
1484		 */
1485		for (mc = 0; mc < 2; mc++) {
1486			sad_actual_size[mc] = 0;
1487			tad_livespace = 0;
1488			for (tad_rule = 0;
1489					tad_rule < ARRAY_SIZE(
1490						knl_tad_dram_limit_lo);
1491					tad_rule++) {
1492				if (knl_get_tad(pvt,
1493						tad_rule,
1494						mc,
1495						&tad_deadspace,
1496						&tad_limit,
1497						&tad_ways))
1498					break;
1499
1500				tad_size = (tad_limit+1) -
1501					(tad_livespace + tad_deadspace);
1502				tad_livespace += tad_size;
1503				tad_base = (tad_limit+1) - tad_size;
1504
1505				if (tad_base < sad_base) {
1506					if (tad_limit > sad_base)
1507						edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1508				} else if (tad_base < sad_limit) {
1509					if (tad_limit+1 > sad_limit) {
1510						edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1511					} else {
1512						/* TAD region is completely inside SAD region */
1513						edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1514							tad_rule, tad_base,
1515							tad_limit, tad_size,
1516							mc);
1517						sad_actual_size[mc] += tad_size;
1518					}
1519				}
1520			}
1521		}
1522
1523		for (mc = 0; mc < 2; mc++) {
1524			edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1525				mc, sad_actual_size[mc], sad_actual_size[mc]);
1526		}
1527
1528		/* Ignore EDRAM rule */
1529		if (edram_only)
1530			continue;
1531
1532		/* Figure out which channels participate in interleave. */
1533		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1534			participants[channel] = 0;
1535
1536		/* For each channel, does at least one CHA have
1537		 * this channel mapped to the given target?
1538		 */
1539		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1540			int target;
1541			int cha;
1542
1543			for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1544				for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1545					if (knl_get_mc_route(target,
1546						mc_route_reg[cha]) == channel
1547						&& !participants[channel]) {
1548						participants[channel] = 1;
1549						break;
1550					}
1551				}
1552			}
1553		}
1554
1555		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1556			mc = knl_channel_mc(channel);
1557			if (participants[channel]) {
1558				edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1559					channel,
1560					sad_actual_size[mc]/intrlv_ways,
1561					sad_rule);
1562				mc_sizes[channel] +=
1563					sad_actual_size[mc]/intrlv_ways;
1564			}
1565		}
1566	}
1567
1568	return 0;
1569}
1570
1571static void get_source_id(struct mem_ctl_info *mci)
1572{
1573	struct sbridge_pvt *pvt = mci->pvt_info;
1574	u32 reg;
1575
1576	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1577	    pvt->info.type == KNIGHTS_LANDING)
1578		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1579	else
1580		pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1581
1582	if (pvt->info.type == KNIGHTS_LANDING)
1583		pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1584	else
1585		pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1586}
1587
1588static int __populate_dimms(struct mem_ctl_info *mci,
1589			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1590			    enum edac_type mode)
1591{
1592	struct sbridge_pvt *pvt = mci->pvt_info;
1593	int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1594							 : NUM_CHANNELS;
1595	unsigned int i, j, banks, ranks, rows, cols, npages;
1596	struct dimm_info *dimm;
1597	enum mem_type mtype;
1598	u64 size;
1599
1600	mtype = pvt->info.get_memory_type(pvt);
1601	if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1602		edac_dbg(0, "Memory is registered\n");
1603	else if (mtype == MEM_UNKNOWN)
1604		edac_dbg(0, "Cannot determine memory type\n");
1605	else
1606		edac_dbg(0, "Memory is unregistered\n");
1607
1608	if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1609		banks = 16;
1610	else
1611		banks = 8;
1612
1613	for (i = 0; i < channels; i++) {
1614		u32 mtr, amap = 0;
1615
1616		int max_dimms_per_channel;
1617
1618		if (pvt->info.type == KNIGHTS_LANDING) {
1619			max_dimms_per_channel = 1;
1620			if (!pvt->knl.pci_channel[i])
1621				continue;
1622		} else {
1623			max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1624			if (!pvt->pci_tad[i])
1625				continue;
1626			pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap);
1627		}
1628
1629		for (j = 0; j < max_dimms_per_channel; j++) {
1630			dimm = edac_get_dimm(mci, i, j, 0);
1631			if (pvt->info.type == KNIGHTS_LANDING) {
1632				pci_read_config_dword(pvt->knl.pci_channel[i],
1633					knl_mtr_reg, &mtr);
1634			} else {
1635				pci_read_config_dword(pvt->pci_tad[i],
1636					mtr_regs[j], &mtr);
1637			}
1638			edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
1639
1640			if (IS_DIMM_PRESENT(mtr)) {
1641				if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1642					sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1643						       pvt->sbridge_dev->source_id,
1644						       pvt->sbridge_dev->dom, i);
1645					return -ENODEV;
1646				}
1647				pvt->channel[i].dimms++;
1648
1649				ranks = numrank(pvt->info.type, mtr);
1650
1651				if (pvt->info.type == KNIGHTS_LANDING) {
1652					/* For DDR4, this is fixed. */
1653					cols = 1 << 10;
1654					rows = knl_mc_sizes[i] /
1655						((u64) cols * ranks * banks * 8);
1656				} else {
1657					rows = numrow(mtr);
1658					cols = numcol(mtr);
1659				}
1660
1661				size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1662				npages = MiB_TO_PAGES(size);
1663
1664				edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1665					 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1666					 size, npages,
1667					 banks, ranks, rows, cols);
1668
1669				dimm->nr_pages = npages;
1670				dimm->grain = 32;
1671				dimm->dtype = pvt->info.get_width(pvt, mtr);
1672				dimm->mtype = mtype;
1673				dimm->edac_mode = mode;
1674				pvt->channel[i].dimm[j].rowbits = order_base_2(rows);
1675				pvt->channel[i].dimm[j].colbits = order_base_2(cols);
1676				pvt->channel[i].dimm[j].bank_xor_enable =
1677						GET_BITFIELD(pvt->info.mcmtr, 9, 9);
1678				pvt->channel[i].dimm[j].amap_fine = GET_BITFIELD(amap, 0, 0);
1679				snprintf(dimm->label, sizeof(dimm->label),
1680						 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1681						 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1682			}
1683		}
1684	}
1685
1686	return 0;
1687}
1688
1689static int get_dimm_config(struct mem_ctl_info *mci)
1690{
1691	struct sbridge_pvt *pvt = mci->pvt_info;
1692	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1693	enum edac_type mode;
1694	u32 reg;
1695
1696	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1697	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1698		 pvt->sbridge_dev->mc,
1699		 pvt->sbridge_dev->node_id,
1700		 pvt->sbridge_dev->source_id);
1701
1702	/* KNL doesn't support mirroring or lockstep,
1703	 * and is always closed page
1704	 */
1705	if (pvt->info.type == KNIGHTS_LANDING) {
1706		mode = EDAC_S4ECD4ED;
1707		pvt->mirror_mode = NON_MIRRORING;
1708		pvt->is_cur_addr_mirrored = false;
1709
1710		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1711			return -1;
1712		if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1713			edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1714			return -ENODEV;
1715		}
1716	} else {
1717		if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1718			if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
1719				edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1720				return -ENODEV;
1721			}
1722			pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1723			if (GET_BITFIELD(reg, 28, 28)) {
1724				pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1725				edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1726				goto next;
1727			}
1728		}
1729		if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
1730			edac_dbg(0, "Failed to read RASENABLES register\n");
1731			return -ENODEV;
1732		}
1733		if (IS_MIRROR_ENABLED(reg)) {
1734			pvt->mirror_mode = FULL_MIRRORING;
1735			edac_dbg(0, "Full memory mirroring is enabled\n");
1736		} else {
1737			pvt->mirror_mode = NON_MIRRORING;
1738			edac_dbg(0, "Memory mirroring is disabled\n");
1739		}
1740
1741next:
1742		if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1743			edac_dbg(0, "Failed to read MCMTR register\n");
1744			return -ENODEV;
1745		}
1746		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1747			edac_dbg(0, "Lockstep is enabled\n");
1748			mode = EDAC_S8ECD8ED;
1749			pvt->is_lockstep = true;
1750		} else {
1751			edac_dbg(0, "Lockstep is disabled\n");
1752			mode = EDAC_S4ECD4ED;
1753			pvt->is_lockstep = false;
1754		}
1755		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1756			edac_dbg(0, "address map is on closed page mode\n");
1757			pvt->is_close_pg = true;
1758		} else {
1759			edac_dbg(0, "address map is on open page mode\n");
1760			pvt->is_close_pg = false;
1761		}
1762	}
1763
1764	return __populate_dimms(mci, knl_mc_sizes, mode);
1765}
1766
1767static void get_memory_layout(const struct mem_ctl_info *mci)
1768{
1769	struct sbridge_pvt *pvt = mci->pvt_info;
1770	int i, j, k, n_sads, n_tads, sad_interl;
1771	u32 reg;
1772	u64 limit, prv = 0;
1773	u64 tmp_mb;
1774	u32 gb, mb;
1775	u32 rir_way;
1776
1777	/*
1778	 * Step 1) Get TOLM/TOHM ranges
1779	 */
1780
1781	pvt->tolm = pvt->info.get_tolm(pvt);
1782	tmp_mb = (1 + pvt->tolm) >> 20;
1783
1784	gb = div_u64_rem(tmp_mb, 1024, &mb);
1785	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1786		gb, (mb*1000)/1024, (u64)pvt->tolm);
1787
1788	/* Address range is already 45:25 */
1789	pvt->tohm = pvt->info.get_tohm(pvt);
1790	tmp_mb = (1 + pvt->tohm) >> 20;
1791
1792	gb = div_u64_rem(tmp_mb, 1024, &mb);
1793	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1794		gb, (mb*1000)/1024, (u64)pvt->tohm);
1795
1796	/*
1797	 * Step 2) Get SAD range and SAD Interleave list
1798	 * TAD registers contain the interleave wayness. However, it
1799	 * seems simpler to just discover it indirectly, with the
1800	 * algorithm bellow.
1801	 */
1802	prv = 0;
1803	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1804		/* SAD_LIMIT Address range is 45:26 */
1805		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1806				      &reg);
1807		limit = pvt->info.sad_limit(reg);
1808
1809		if (!DRAM_RULE_ENABLE(reg))
1810			continue;
1811
1812		if (limit <= prv)
1813			break;
1814
1815		tmp_mb = (limit + 1) >> 20;
1816		gb = div_u64_rem(tmp_mb, 1024, &mb);
1817		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1818			 n_sads,
1819			 show_dram_attr(pvt->info.dram_attr(reg)),
1820			 gb, (mb*1000)/1024,
1821			 ((u64)tmp_mb) << 20L,
1822			 get_intlv_mode_str(reg, pvt->info.type),
1823			 reg);
1824		prv = limit;
1825
1826		pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1827				      &reg);
1828		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1829		for (j = 0; j < 8; j++) {
1830			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1831			if (j > 0 && sad_interl == pkg)
1832				break;
1833
1834			edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1835				 n_sads, j, pkg);
1836		}
1837	}
1838
1839	if (pvt->info.type == KNIGHTS_LANDING)
1840		return;
1841
1842	/*
1843	 * Step 3) Get TAD range
1844	 */
1845	prv = 0;
1846	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1847		pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
1848		limit = TAD_LIMIT(reg);
1849		if (limit <= prv)
1850			break;
1851		tmp_mb = (limit + 1) >> 20;
1852
1853		gb = div_u64_rem(tmp_mb, 1024, &mb);
1854		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1855			 n_tads, gb, (mb*1000)/1024,
1856			 ((u64)tmp_mb) << 20L,
1857			 (u32)(1 << TAD_SOCK(reg)),
1858			 (u32)TAD_CH(reg) + 1,
1859			 (u32)TAD_TGT0(reg),
1860			 (u32)TAD_TGT1(reg),
1861			 (u32)TAD_TGT2(reg),
1862			 (u32)TAD_TGT3(reg),
1863			 reg);
1864		prv = limit;
1865	}
1866
1867	/*
1868	 * Step 4) Get TAD offsets, per each channel
1869	 */
1870	for (i = 0; i < NUM_CHANNELS; i++) {
1871		if (!pvt->channel[i].dimms)
1872			continue;
1873		for (j = 0; j < n_tads; j++) {
1874			pci_read_config_dword(pvt->pci_tad[i],
1875					      tad_ch_nilv_offset[j],
1876					      &reg);
1877			tmp_mb = TAD_OFFSET(reg) >> 20;
1878			gb = div_u64_rem(tmp_mb, 1024, &mb);
1879			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1880				 i, j,
1881				 gb, (mb*1000)/1024,
1882				 ((u64)tmp_mb) << 20L,
1883				 reg);
1884		}
1885	}
1886
1887	/*
1888	 * Step 6) Get RIR Wayness/Limit, per each channel
1889	 */
1890	for (i = 0; i < NUM_CHANNELS; i++) {
1891		if (!pvt->channel[i].dimms)
1892			continue;
1893		for (j = 0; j < MAX_RIR_RANGES; j++) {
1894			pci_read_config_dword(pvt->pci_tad[i],
1895					      rir_way_limit[j],
1896					      &reg);
1897
1898			if (!IS_RIR_VALID(reg))
1899				continue;
1900
1901			tmp_mb = pvt->info.rir_limit(reg) >> 20;
1902			rir_way = 1 << RIR_WAY(reg);
1903			gb = div_u64_rem(tmp_mb, 1024, &mb);
1904			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1905				 i, j,
1906				 gb, (mb*1000)/1024,
1907				 ((u64)tmp_mb) << 20L,
1908				 rir_way,
1909				 reg);
1910
1911			for (k = 0; k < rir_way; k++) {
1912				pci_read_config_dword(pvt->pci_tad[i],
1913						      rir_offset[j][k],
1914						      &reg);
1915				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1916
1917				gb = div_u64_rem(tmp_mb, 1024, &mb);
1918				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1919					 i, j, k,
1920					 gb, (mb*1000)/1024,
1921					 ((u64)tmp_mb) << 20L,
1922					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1923					 reg);
1924			}
1925		}
1926	}
1927}
1928
1929static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1930{
1931	struct sbridge_dev *sbridge_dev;
1932
1933	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1934		if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1935			return sbridge_dev->mci;
1936	}
1937	return NULL;
1938}
1939
1940static u8 sb_close_row[] = {
1941	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
1942};
1943
1944static u8 sb_close_column[] = {
1945	3, 4, 5, 14, 19, 23, 24, 25, 26, 27
1946};
1947
1948static u8 sb_open_row[] = {
1949	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
1950};
1951
1952static u8 sb_open_column[] = {
1953	3, 4, 5, 6, 7, 8, 9, 10, 11, 12
1954};
1955
1956static u8 sb_open_fine_column[] = {
1957	3, 4, 5, 7, 8, 9, 10, 11, 12, 13
1958};
1959
1960static int sb_bits(u64 addr, int nbits, u8 *bits)
1961{
1962	int i, res = 0;
1963
1964	for (i = 0; i < nbits; i++)
1965		res |= ((addr >> bits[i]) & 1) << i;
1966	return res;
1967}
1968
1969static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
1970{
1971	int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
1972
1973	if (do_xor)
1974		ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
1975
1976	return ret;
1977}
1978
1979static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank,
1980			   u64 rank_addr, char *msg)
1981{
1982	int dimmno = 0;
1983	int row, col, bank_address, bank_group;
1984	struct sbridge_pvt *pvt;
1985	u32 bg0 = 0, rowbits = 0, colbits = 0;
1986	u32 amap_fine = 0, bank_xor_enable = 0;
1987
1988	dimmno = (rank < 12) ? rank / 4 : 2;
1989	pvt = mci->pvt_info;
1990	amap_fine =  pvt->channel[ch].dimm[dimmno].amap_fine;
1991	bg0 = amap_fine ? 6 : 13;
1992	rowbits = pvt->channel[ch].dimm[dimmno].rowbits;
1993	colbits = pvt->channel[ch].dimm[dimmno].colbits;
1994	bank_xor_enable = pvt->channel[ch].dimm[dimmno].bank_xor_enable;
1995
1996	if (pvt->is_lockstep) {
1997		pr_warn_once("LockStep row/column decode is not supported yet!\n");
1998		msg[0] = '\0';
1999		return false;
2000	}
2001
2002	if (pvt->is_close_pg) {
2003		row = sb_bits(rank_addr, rowbits, sb_close_row);
2004		col = sb_bits(rank_addr, colbits, sb_close_column);
2005		col |= 0x400; /* C10 is autoprecharge, always set */
2006		bank_address = sb_bank_bits(rank_addr, 8, 9, bank_xor_enable, 22, 28);
2007		bank_group = sb_bank_bits(rank_addr, 6, 7, bank_xor_enable, 20, 21);
2008	} else {
2009		row = sb_bits(rank_addr, rowbits, sb_open_row);
2010		if (amap_fine)
2011			col = sb_bits(rank_addr, colbits, sb_open_fine_column);
2012		else
2013			col = sb_bits(rank_addr, colbits, sb_open_column);
2014		bank_address = sb_bank_bits(rank_addr, 18, 19, bank_xor_enable, 22, 23);
2015		bank_group = sb_bank_bits(rank_addr, bg0, 17, bank_xor_enable, 20, 21);
2016	}
2017
2018	row &= (1u << rowbits) - 1;
2019
2020	sprintf(msg, "row:0x%x col:0x%x bank_addr:%d bank_group:%d",
2021		row, col, bank_address, bank_group);
2022	return true;
2023}
2024
2025static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank,
2026			   u64 rank_addr, char *msg)
2027{
2028	pr_warn_once("DDR3 row/column decode not support yet!\n");
2029	msg[0] = '\0';
2030	return false;
2031}
2032
2033static int get_memory_error_data(struct mem_ctl_info *mci,
2034				 u64 addr,
2035				 u8 *socket, u8 *ha,
2036				 long *channel_mask,
2037				 u8 *rank,
2038				 char **area_type, char *msg)
2039{
2040	struct mem_ctl_info	*new_mci;
2041	struct sbridge_pvt *pvt = mci->pvt_info;
2042	struct pci_dev		*pci_ha;
2043	int			n_rir, n_sads, n_tads, sad_way, sck_xch;
2044	int			sad_interl, idx, base_ch;
2045	int			interleave_mode, shiftup = 0;
2046	unsigned int		sad_interleave[MAX_INTERLEAVE];
2047	u32			reg, dram_rule;
2048	u8			ch_way, sck_way, pkg, sad_ha = 0, rankid = 0;
2049	u32			tad_offset;
2050	u32			rir_way;
2051	u32			mb, gb;
2052	u64			ch_addr, offset, limit = 0, prv = 0;
2053	u64			rank_addr;
2054	enum mem_type		mtype;
2055
2056	/*
2057	 * Step 0) Check if the address is at special memory ranges
2058	 * The check bellow is probably enough to fill all cases where
2059	 * the error is not inside a memory, except for the legacy
2060	 * range (e. g. VGA addresses). It is unlikely, however, that the
2061	 * memory controller would generate an error on that range.
2062	 */
2063	if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
2064		sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
2065		return -EINVAL;
2066	}
2067	if (addr >= (u64)pvt->tohm) {
2068		sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
2069		return -EINVAL;
2070	}
2071
2072	/*
2073	 * Step 1) Get socket
2074	 */
2075	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
2076		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
2077				      &reg);
2078
2079		if (!DRAM_RULE_ENABLE(reg))
2080			continue;
2081
2082		limit = pvt->info.sad_limit(reg);
2083		if (limit <= prv) {
2084			sprintf(msg, "Can't discover the memory socket");
2085			return -EINVAL;
2086		}
2087		if  (addr <= limit)
2088			break;
2089		prv = limit;
2090	}
2091	if (n_sads == pvt->info.max_sad) {
2092		sprintf(msg, "Can't discover the memory socket");
2093		return -EINVAL;
2094	}
2095	dram_rule = reg;
2096	*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
2097	interleave_mode = pvt->info.interleave_mode(dram_rule);
2098
2099	pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
2100			      &reg);
2101
2102	if (pvt->info.type == SANDY_BRIDGE) {
2103		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
2104		for (sad_way = 0; sad_way < 8; sad_way++) {
2105			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
2106			if (sad_way > 0 && sad_interl == pkg)
2107				break;
2108			sad_interleave[sad_way] = pkg;
2109			edac_dbg(0, "SAD interleave #%d: %d\n",
2110				 sad_way, sad_interleave[sad_way]);
2111		}
2112		edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2113			 pvt->sbridge_dev->mc,
2114			 n_sads,
2115			 addr,
2116			 limit,
2117			 sad_way + 7,
2118			 !interleave_mode ? "" : "XOR[18:16]");
2119		if (interleave_mode)
2120			idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2121		else
2122			idx = (addr >> 6) & 7;
2123		switch (sad_way) {
2124		case 1:
2125			idx = 0;
2126			break;
2127		case 2:
2128			idx = idx & 1;
2129			break;
2130		case 4:
2131			idx = idx & 3;
2132			break;
2133		case 8:
2134			break;
2135		default:
2136			sprintf(msg, "Can't discover socket interleave");
2137			return -EINVAL;
2138		}
2139		*socket = sad_interleave[idx];
2140		edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2141			 idx, sad_way, *socket);
2142	} else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2143		int bits, a7mode = A7MODE(dram_rule);
2144
2145		if (a7mode) {
2146			/* A7 mode swaps P9 with P6 */
2147			bits = GET_BITFIELD(addr, 7, 8) << 1;
2148			bits |= GET_BITFIELD(addr, 9, 9);
2149		} else
2150			bits = GET_BITFIELD(addr, 6, 8);
2151
2152		if (interleave_mode == 0) {
2153			/* interleave mode will XOR {8,7,6} with {18,17,16} */
2154			idx = GET_BITFIELD(addr, 16, 18);
2155			idx ^= bits;
2156		} else
2157			idx = bits;
2158
2159		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2160		*socket = sad_pkg_socket(pkg);
2161		sad_ha = sad_pkg_ha(pkg);
2162
2163		if (a7mode) {
2164			/* MCChanShiftUpEnable */
2165			pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
2166			shiftup = GET_BITFIELD(reg, 22, 22);
2167		}
2168
2169		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2170			 idx, *socket, sad_ha, shiftup);
2171	} else {
2172		/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2173		idx = (addr >> 6) & 7;
2174		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2175		*socket = sad_pkg_socket(pkg);
2176		sad_ha = sad_pkg_ha(pkg);
2177		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2178			 idx, *socket, sad_ha);
2179	}
2180
2181	*ha = sad_ha;
2182
2183	/*
2184	 * Move to the proper node structure, in order to access the
2185	 * right PCI registers
2186	 */
2187	new_mci = get_mci_for_node_id(*socket, sad_ha);
2188	if (!new_mci) {
2189		sprintf(msg, "Struct for socket #%u wasn't initialized",
2190			*socket);
2191		return -EINVAL;
2192	}
2193	mci = new_mci;
2194	pvt = mci->pvt_info;
2195
2196	/*
2197	 * Step 2) Get memory channel
2198	 */
2199	prv = 0;
2200	pci_ha = pvt->pci_ha;
2201	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2202		pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2203		limit = TAD_LIMIT(reg);
2204		if (limit <= prv) {
2205			sprintf(msg, "Can't discover the memory channel");
2206			return -EINVAL;
2207		}
2208		if  (addr <= limit)
2209			break;
2210		prv = limit;
2211	}
2212	if (n_tads == MAX_TAD) {
2213		sprintf(msg, "Can't discover the memory channel");
2214		return -EINVAL;
2215	}
2216
2217	ch_way = TAD_CH(reg) + 1;
2218	sck_way = TAD_SOCK(reg);
2219
2220	if (ch_way == 3)
2221		idx = addr >> 6;
2222	else {
2223		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2224		if (pvt->is_chan_hash)
2225			idx = haswell_chan_hash(idx, addr);
2226	}
2227	idx = idx % ch_way;
2228
2229	/*
2230	 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2231	 */
2232	switch (idx) {
2233	case 0:
2234		base_ch = TAD_TGT0(reg);
2235		break;
2236	case 1:
2237		base_ch = TAD_TGT1(reg);
2238		break;
2239	case 2:
2240		base_ch = TAD_TGT2(reg);
2241		break;
2242	case 3:
2243		base_ch = TAD_TGT3(reg);
2244		break;
2245	default:
2246		sprintf(msg, "Can't discover the TAD target");
2247		return -EINVAL;
2248	}
2249	*channel_mask = 1 << base_ch;
2250
2251	pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2252
2253	if (pvt->mirror_mode == FULL_MIRRORING ||
2254	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2255		*channel_mask |= 1 << ((base_ch + 2) % 4);
2256		switch(ch_way) {
2257		case 2:
2258		case 4:
2259			sck_xch = (1 << sck_way) * (ch_way >> 1);
2260			break;
2261		default:
2262			sprintf(msg, "Invalid mirror set. Can't decode addr");
2263			return -EINVAL;
2264		}
2265
2266		pvt->is_cur_addr_mirrored = true;
2267	} else {
2268		sck_xch = (1 << sck_way) * ch_way;
2269		pvt->is_cur_addr_mirrored = false;
2270	}
2271
2272	if (pvt->is_lockstep)
2273		*channel_mask |= 1 << ((base_ch + 1) % 4);
2274
2275	offset = TAD_OFFSET(tad_offset);
2276
2277	edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2278		 n_tads,
2279		 addr,
2280		 limit,
2281		 sck_way,
2282		 ch_way,
2283		 offset,
2284		 idx,
2285		 base_ch,
2286		 *channel_mask);
2287
2288	/* Calculate channel address */
2289	/* Remove the TAD offset */
2290
2291	if (offset > addr) {
2292		sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2293			offset, addr);
2294		return -EINVAL;
2295	}
2296
2297	ch_addr = addr - offset;
2298	ch_addr >>= (6 + shiftup);
2299	ch_addr /= sck_xch;
2300	ch_addr <<= (6 + shiftup);
2301	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2302
2303	/*
2304	 * Step 3) Decode rank
2305	 */
2306	for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2307		pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
2308
2309		if (!IS_RIR_VALID(reg))
2310			continue;
2311
2312		limit = pvt->info.rir_limit(reg);
2313		gb = div_u64_rem(limit >> 20, 1024, &mb);
2314		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2315			 n_rir,
2316			 gb, (mb*1000)/1024,
2317			 limit,
2318			 1 << RIR_WAY(reg));
2319		if  (ch_addr <= limit)
2320			break;
2321	}
2322	if (n_rir == MAX_RIR_RANGES) {
2323		sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2324			ch_addr);
2325		return -EINVAL;
2326	}
2327	rir_way = RIR_WAY(reg);
2328
2329	if (pvt->is_close_pg)
2330		idx = (ch_addr >> 6);
2331	else
2332		idx = (ch_addr >> 13);	/* FIXME: Datasheet says to shift by 15 */
2333	idx %= 1 << rir_way;
2334
2335	pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
2336	*rank = RIR_RNK_TGT(pvt->info.type, reg);
2337
2338	if (pvt->info.type == BROADWELL) {
2339		if (pvt->is_close_pg)
2340			shiftup = 6;
2341		else
2342			shiftup = 13;
2343
2344		rank_addr = ch_addr >> shiftup;
2345		rank_addr /= (1 << rir_way);
2346		rank_addr <<= shiftup;
2347		rank_addr |= ch_addr & GENMASK_ULL(shiftup - 1, 0);
2348		rank_addr -= RIR_OFFSET(pvt->info.type, reg);
2349
2350		mtype = pvt->info.get_memory_type(pvt);
2351		rankid = *rank;
2352		if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
2353			sb_decode_ddr4(mci, base_ch, rankid, rank_addr, msg);
2354		else
2355			sb_decode_ddr3(mci, base_ch, rankid, rank_addr, msg);
2356	} else {
2357		msg[0] = '\0';
2358	}
2359
2360	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2361		 n_rir,
2362		 ch_addr,
2363		 limit,
2364		 rir_way,
2365		 idx);
2366
2367	return 0;
2368}
2369
2370static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
2371					  const struct mce *m, u8 *socket,
2372					  u8 *ha, long *channel_mask,
2373					  char *msg)
2374{
2375	u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
2376	struct mem_ctl_info *new_mci;
2377	struct sbridge_pvt *pvt;
2378	struct pci_dev *pci_ha;
2379	bool tad0;
2380
2381	if (channel >= NUM_CHANNELS) {
2382		sprintf(msg, "Invalid channel 0x%x", channel);
2383		return -EINVAL;
2384	}
2385
2386	pvt = mci->pvt_info;
2387	if (!pvt->info.get_ha) {
2388		sprintf(msg, "No get_ha()");
2389		return -EINVAL;
2390	}
2391	*ha = pvt->info.get_ha(m->bank);
2392	if (*ha != 0 && *ha != 1) {
2393		sprintf(msg, "Impossible bank %d", m->bank);
2394		return -EINVAL;
2395	}
2396
2397	*socket = m->socketid;
2398	new_mci = get_mci_for_node_id(*socket, *ha);
2399	if (!new_mci) {
2400		strcpy(msg, "mci socket got corrupted!");
2401		return -EINVAL;
2402	}
2403
2404	pvt = new_mci->pvt_info;
2405	pci_ha = pvt->pci_ha;
2406	pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
2407	tad0 = m->addr <= TAD_LIMIT(reg);
2408
2409	*channel_mask = 1 << channel;
2410	if (pvt->mirror_mode == FULL_MIRRORING ||
2411	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
2412		*channel_mask |= 1 << ((channel + 2) % 4);
2413		pvt->is_cur_addr_mirrored = true;
2414	} else {
2415		pvt->is_cur_addr_mirrored = false;
2416	}
2417
2418	if (pvt->is_lockstep)
2419		*channel_mask |= 1 << ((channel + 1) % 4);
2420
2421	return 0;
2422}
2423
2424/****************************************************************************
2425	Device initialization routines: put/get, init/exit
2426 ****************************************************************************/
2427
2428/*
2429 *	sbridge_put_all_devices	'put' all the devices that we have
2430 *				reserved via 'get'
2431 */
2432static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2433{
2434	int i;
2435
2436	edac_dbg(0, "\n");
2437	for (i = 0; i < sbridge_dev->n_devs; i++) {
2438		struct pci_dev *pdev = sbridge_dev->pdev[i];
2439		if (!pdev)
2440			continue;
2441		edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2442			 pdev->bus->number,
2443			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2444		pci_dev_put(pdev);
2445	}
2446}
2447
2448static void sbridge_put_all_devices(void)
2449{
2450	struct sbridge_dev *sbridge_dev, *tmp;
2451
2452	list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2453		sbridge_put_devices(sbridge_dev);
2454		free_sbridge_dev(sbridge_dev);
2455	}
2456}
2457
2458static int sbridge_get_onedevice(struct pci_dev **prev,
2459				 u8 *num_mc,
2460				 const struct pci_id_table *table,
2461				 const unsigned devno,
2462				 const int multi_bus)
2463{
2464	struct sbridge_dev *sbridge_dev = NULL;
2465	const struct pci_id_descr *dev_descr = &table->descr[devno];
2466	struct pci_dev *pdev = NULL;
2467	int seg = 0;
2468	u8 bus = 0;
2469	int i = 0;
2470
2471	sbridge_printk(KERN_DEBUG,
2472		"Seeking for: PCI ID %04x:%04x\n",
2473		PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2474
2475	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2476			      dev_descr->dev_id, *prev);
2477
2478	if (!pdev) {
2479		if (*prev) {
2480			*prev = pdev;
2481			return 0;
2482		}
2483
2484		if (dev_descr->optional)
2485			return 0;
2486
2487		/* if the HA wasn't found */
2488		if (devno == 0)
2489			return -ENODEV;
2490
2491		sbridge_printk(KERN_INFO,
2492			"Device not found: %04x:%04x\n",
2493			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2494
2495		/* End of list, leave */
2496		return -ENODEV;
2497	}
2498	seg = pci_domain_nr(pdev->bus);
2499	bus = pdev->bus->number;
2500
2501next_imc:
2502	sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
2503				      multi_bus, sbridge_dev);
2504	if (!sbridge_dev) {
2505		/* If the HA1 wasn't found, don't create EDAC second memory controller */
2506		if (dev_descr->dom == IMC1 && devno != 1) {
2507			edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2508				 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2509			pci_dev_put(pdev);
2510			return 0;
2511		}
2512
2513		if (dev_descr->dom == SOCK)
2514			goto out_imc;
2515
2516		sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
2517		if (!sbridge_dev) {
2518			pci_dev_put(pdev);
2519			return -ENOMEM;
2520		}
2521		(*num_mc)++;
2522	}
2523
2524	if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2525		sbridge_printk(KERN_ERR,
2526			"Duplicated device for %04x:%04x\n",
2527			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2528		pci_dev_put(pdev);
2529		return -ENODEV;
2530	}
2531
2532	sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2533
2534	/* pdev belongs to more than one IMC, do extra gets */
2535	if (++i > 1)
2536		pci_dev_get(pdev);
2537
2538	if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2539		goto next_imc;
2540
2541out_imc:
2542	/* Be sure that the device is enabled */
2543	if (unlikely(pci_enable_device(pdev) < 0)) {
2544		sbridge_printk(KERN_ERR,
2545			"Couldn't enable %04x:%04x\n",
2546			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2547		return -ENODEV;
2548	}
2549
2550	edac_dbg(0, "Detected %04x:%04x\n",
2551		 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2552
2553	/*
2554	 * As stated on drivers/pci/search.c, the reference count for
2555	 * @from is always decremented if it is not %NULL. So, as we need
2556	 * to get all devices up to null, we need to do a get for the device
2557	 */
2558	pci_dev_get(pdev);
2559
2560	*prev = pdev;
2561
2562	return 0;
2563}
2564
2565/*
2566 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2567 *			     devices we want to reference for this driver.
2568 * @num_mc: pointer to the memory controllers count, to be incremented in case
2569 *	    of success.
2570 * @table: model specific table
2571 *
2572 * returns 0 in case of success or error code
2573 */
2574static int sbridge_get_all_devices(u8 *num_mc,
2575					const struct pci_id_table *table)
2576{
2577	int i, rc;
2578	struct pci_dev *pdev = NULL;
2579	int allow_dups = 0;
2580	int multi_bus = 0;
2581
2582	if (table->type == KNIGHTS_LANDING)
2583		allow_dups = multi_bus = 1;
2584	while (table && table->descr) {
2585		for (i = 0; i < table->n_devs_per_sock; i++) {
2586			if (!allow_dups || i == 0 ||
2587					table->descr[i].dev_id !=
2588						table->descr[i-1].dev_id) {
2589				pdev = NULL;
2590			}
2591			do {
2592				rc = sbridge_get_onedevice(&pdev, num_mc,
2593							   table, i, multi_bus);
2594				if (rc < 0) {
2595					if (i == 0) {
2596						i = table->n_devs_per_sock;
2597						break;
2598					}
2599					sbridge_put_all_devices();
2600					return -ENODEV;
2601				}
2602			} while (pdev && !allow_dups);
2603		}
2604		table++;
2605	}
2606
2607	return 0;
2608}
2609
2610/*
2611 * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
2612 * the format: XXXa. So we can convert from a device to the corresponding
2613 * channel like this
2614 */
2615#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2616
2617static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2618				 struct sbridge_dev *sbridge_dev)
2619{
2620	struct sbridge_pvt *pvt = mci->pvt_info;
2621	struct pci_dev *pdev;
2622	u8 saw_chan_mask = 0;
2623	int i;
2624
2625	for (i = 0; i < sbridge_dev->n_devs; i++) {
2626		pdev = sbridge_dev->pdev[i];
2627		if (!pdev)
2628			continue;
2629
2630		switch (pdev->device) {
2631		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2632			pvt->pci_sad0 = pdev;
2633			break;
2634		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2635			pvt->pci_sad1 = pdev;
2636			break;
2637		case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2638			pvt->pci_br0 = pdev;
2639			break;
2640		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2641			pvt->pci_ha = pdev;
2642			break;
2643		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2644			pvt->pci_ta = pdev;
2645			break;
2646		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2647			pvt->pci_ras = pdev;
2648			break;
2649		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2650		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2651		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2652		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2653		{
2654			int id = TAD_DEV_TO_CHAN(pdev->device);
2655			pvt->pci_tad[id] = pdev;
2656			saw_chan_mask |= 1 << id;
2657		}
2658			break;
2659		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2660			pvt->pci_ddrio = pdev;
2661			break;
2662		default:
2663			goto error;
2664		}
2665
2666		edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2667			 pdev->vendor, pdev->device,
2668			 sbridge_dev->bus,
2669			 pdev);
2670	}
2671
2672	/* Check if everything were registered */
2673	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2674	    !pvt->pci_ras || !pvt->pci_ta)
2675		goto enodev;
2676
2677	if (saw_chan_mask != 0x0f)
2678		goto enodev;
2679	return 0;
2680
2681enodev:
2682	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2683	return -ENODEV;
2684
2685error:
2686	sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2687		       PCI_VENDOR_ID_INTEL, pdev->device);
2688	return -EINVAL;
2689}
2690
2691static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2692				 struct sbridge_dev *sbridge_dev)
2693{
2694	struct sbridge_pvt *pvt = mci->pvt_info;
2695	struct pci_dev *pdev;
2696	u8 saw_chan_mask = 0;
2697	int i;
2698
2699	for (i = 0; i < sbridge_dev->n_devs; i++) {
2700		pdev = sbridge_dev->pdev[i];
2701		if (!pdev)
2702			continue;
2703
2704		switch (pdev->device) {
2705		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2706		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2707			pvt->pci_ha = pdev;
2708			break;
2709		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2710		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2711			pvt->pci_ta = pdev;
2712			break;
2713		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2714		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2715			pvt->pci_ras = pdev;
2716			break;
2717		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2718		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2719		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2720		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2721		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2722		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2723		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2724		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2725		{
2726			int id = TAD_DEV_TO_CHAN(pdev->device);
2727			pvt->pci_tad[id] = pdev;
2728			saw_chan_mask |= 1 << id;
2729		}
2730			break;
2731		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2732			pvt->pci_ddrio = pdev;
2733			break;
2734		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2735			pvt->pci_ddrio = pdev;
2736			break;
2737		case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2738			pvt->pci_sad0 = pdev;
2739			break;
2740		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2741			pvt->pci_br0 = pdev;
2742			break;
2743		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2744			pvt->pci_br1 = pdev;
2745			break;
2746		default:
2747			goto error;
2748		}
2749
2750		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2751			 sbridge_dev->bus,
2752			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2753			 pdev);
2754	}
2755
2756	/* Check if everything were registered */
2757	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2758	    !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2759		goto enodev;
2760
2761	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2762	    saw_chan_mask != 0x03)   /* -EP */
2763		goto enodev;
2764	return 0;
2765
2766enodev:
2767	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2768	return -ENODEV;
2769
2770error:
2771	sbridge_printk(KERN_ERR,
2772		       "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2773			pdev->device);
2774	return -EINVAL;
2775}
2776
2777static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2778				 struct sbridge_dev *sbridge_dev)
2779{
2780	struct sbridge_pvt *pvt = mci->pvt_info;
2781	struct pci_dev *pdev;
2782	u8 saw_chan_mask = 0;
2783	int i;
2784
2785	/* there's only one device per system; not tied to any bus */
2786	if (pvt->info.pci_vtd == NULL)
2787		/* result will be checked later */
2788		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2789						   PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2790						   NULL);
2791
2792	for (i = 0; i < sbridge_dev->n_devs; i++) {
2793		pdev = sbridge_dev->pdev[i];
2794		if (!pdev)
2795			continue;
2796
2797		switch (pdev->device) {
2798		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2799			pvt->pci_sad0 = pdev;
2800			break;
2801		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2802			pvt->pci_sad1 = pdev;
2803			break;
2804		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2805		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2806			pvt->pci_ha = pdev;
2807			break;
2808		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2809		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2810			pvt->pci_ta = pdev;
2811			break;
2812		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2813		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2814			pvt->pci_ras = pdev;
2815			break;
2816		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2817		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2818		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2819		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2820		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2821		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2822		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2823		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2824		{
2825			int id = TAD_DEV_TO_CHAN(pdev->device);
2826			pvt->pci_tad[id] = pdev;
2827			saw_chan_mask |= 1 << id;
2828		}
2829			break;
2830		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2831		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2832		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2833		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2834			if (!pvt->pci_ddrio)
2835				pvt->pci_ddrio = pdev;
2836			break;
2837		default:
2838			break;
2839		}
2840
2841		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2842			 sbridge_dev->bus,
2843			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2844			 pdev);
2845	}
2846
2847	/* Check if everything were registered */
2848	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2849	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2850		goto enodev;
2851
2852	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2853	    saw_chan_mask != 0x03)   /* -EP */
2854		goto enodev;
2855	return 0;
2856
2857enodev:
2858	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2859	return -ENODEV;
2860}
2861
2862static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2863				 struct sbridge_dev *sbridge_dev)
2864{
2865	struct sbridge_pvt *pvt = mci->pvt_info;
2866	struct pci_dev *pdev;
2867	u8 saw_chan_mask = 0;
2868	int i;
2869
2870	/* there's only one device per system; not tied to any bus */
2871	if (pvt->info.pci_vtd == NULL)
2872		/* result will be checked later */
2873		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2874						   PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2875						   NULL);
2876
2877	for (i = 0; i < sbridge_dev->n_devs; i++) {
2878		pdev = sbridge_dev->pdev[i];
2879		if (!pdev)
2880			continue;
2881
2882		switch (pdev->device) {
2883		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2884			pvt->pci_sad0 = pdev;
2885			break;
2886		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2887			pvt->pci_sad1 = pdev;
2888			break;
2889		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2890		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2891			pvt->pci_ha = pdev;
2892			break;
2893		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2894		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2895			pvt->pci_ta = pdev;
2896			break;
2897		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2898		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2899			pvt->pci_ras = pdev;
2900			break;
2901		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2902		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2903		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2904		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2905		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2906		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2907		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2908		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2909		{
2910			int id = TAD_DEV_TO_CHAN(pdev->device);
2911			pvt->pci_tad[id] = pdev;
2912			saw_chan_mask |= 1 << id;
2913		}
2914			break;
2915		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2916			pvt->pci_ddrio = pdev;
2917			break;
2918		default:
2919			break;
2920		}
2921
2922		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2923			 sbridge_dev->bus,
2924			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2925			 pdev);
2926	}
2927
2928	/* Check if everything were registered */
2929	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2930	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2931		goto enodev;
2932
2933	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2934	    saw_chan_mask != 0x03)   /* -EP */
2935		goto enodev;
2936	return 0;
2937
2938enodev:
2939	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2940	return -ENODEV;
2941}
2942
2943static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2944			struct sbridge_dev *sbridge_dev)
2945{
2946	struct sbridge_pvt *pvt = mci->pvt_info;
2947	struct pci_dev *pdev;
2948	int dev, func;
2949
2950	int i;
2951	int devidx;
2952
2953	for (i = 0; i < sbridge_dev->n_devs; i++) {
2954		pdev = sbridge_dev->pdev[i];
2955		if (!pdev)
2956			continue;
2957
2958		/* Extract PCI device and function. */
2959		dev = (pdev->devfn >> 3) & 0x1f;
2960		func = pdev->devfn & 0x7;
2961
2962		switch (pdev->device) {
2963		case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2964			if (dev == 8)
2965				pvt->knl.pci_mc0 = pdev;
2966			else if (dev == 9)
2967				pvt->knl.pci_mc1 = pdev;
2968			else {
2969				sbridge_printk(KERN_ERR,
2970					"Memory controller in unexpected place! (dev %d, fn %d)\n",
2971					dev, func);
2972				continue;
2973			}
2974			break;
2975
2976		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2977			pvt->pci_sad0 = pdev;
2978			break;
2979
2980		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2981			pvt->pci_sad1 = pdev;
2982			break;
2983
2984		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2985			/* There are one of these per tile, and range from
2986			 * 1.14.0 to 1.18.5.
2987			 */
2988			devidx = ((dev-14)*8)+func;
2989
2990			if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2991				sbridge_printk(KERN_ERR,
2992					"Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2993					dev, func);
2994				continue;
2995			}
2996
2997			WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2998
2999			pvt->knl.pci_cha[devidx] = pdev;
3000			break;
3001
3002		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
3003			devidx = -1;
3004
3005			/*
3006			 *  MC0 channels 0-2 are device 9 function 2-4,
3007			 *  MC1 channels 3-5 are device 8 function 2-4.
3008			 */
3009
3010			if (dev == 9)
3011				devidx = func-2;
3012			else if (dev == 8)
3013				devidx = 3 + (func-2);
3014
3015			if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
3016				sbridge_printk(KERN_ERR,
3017					"DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
3018					dev, func);
3019				continue;
3020			}
3021
3022			WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
3023			pvt->knl.pci_channel[devidx] = pdev;
3024			break;
3025
3026		case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
3027			pvt->knl.pci_mc_info = pdev;
3028			break;
3029
3030		case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
3031			pvt->pci_ta = pdev;
3032			break;
3033
3034		default:
3035			sbridge_printk(KERN_ERR, "Unexpected device %d\n",
3036				pdev->device);
3037			break;
3038		}
3039	}
3040
3041	if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
3042	    !pvt->pci_sad0     || !pvt->pci_sad1    ||
3043	    !pvt->pci_ta) {
3044		goto enodev;
3045	}
3046
3047	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
3048		if (!pvt->knl.pci_channel[i]) {
3049			sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
3050			goto enodev;
3051		}
3052	}
3053
3054	for (i = 0; i < KNL_MAX_CHAS; i++) {
3055		if (!pvt->knl.pci_cha[i]) {
3056			sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
3057			goto enodev;
3058		}
3059	}
3060
3061	return 0;
3062
3063enodev:
3064	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
3065	return -ENODEV;
3066}
3067
3068/****************************************************************************
3069			Error check routines
3070 ****************************************************************************/
3071
3072/*
3073 * While Sandy Bridge has error count registers, SMI BIOS read values from
3074 * and resets the counters. So, they are not reliable for the OS to read
3075 * from them. So, we have no option but to just trust on whatever MCE is
3076 * telling us about the errors.
3077 */
3078static void sbridge_mce_output_error(struct mem_ctl_info *mci,
3079				    const struct mce *m)
3080{
3081	struct mem_ctl_info *new_mci;
3082	struct sbridge_pvt *pvt = mci->pvt_info;
3083	enum hw_event_mc_err_type tp_event;
 
3084	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
3085	bool overflow = GET_BITFIELD(m->status, 62, 62);
3086	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
3087	bool recoverable;
3088	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
3089	u32 mscod = GET_BITFIELD(m->status, 16, 31);
3090	u32 errcode = GET_BITFIELD(m->status, 0, 15);
3091	u32 channel = GET_BITFIELD(m->status, 0, 3);
3092	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
3093	/*
3094	 * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
3095	 * A value 6 is for cache line aligned address, a value 12 is for page
3096	 * aligned address reported by patrol scrubber.
3097	 */
3098	u32 lsb = GET_BITFIELD(m->misc, 0, 5);
3099	char *optype, *area_type = "DRAM";
3100	long channel_mask, first_channel;
3101	u8  rank = 0xff, socket, ha;
3102	int rc, dimm;
 
3103
3104	if (pvt->info.type != SANDY_BRIDGE)
3105		recoverable = true;
3106	else
3107		recoverable = GET_BITFIELD(m->status, 56, 56);
3108
3109	if (uncorrected_error) {
3110		core_err_cnt = 1;
3111		if (ripv) {
3112			tp_event = HW_EVENT_ERR_UNCORRECTED;
3113		} else {
3114			tp_event = HW_EVENT_ERR_FATAL;
 
 
 
3115		}
3116	} else {
 
3117		tp_event = HW_EVENT_ERR_CORRECTED;
3118	}
3119
3120	/*
3121	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
3122	 * memory errors should fit in this mask:
3123	 *	000f 0000 1mmm cccc (binary)
3124	 * where:
3125	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
3126	 *	    won't be shown
3127	 *	mmm = error type
3128	 *	cccc = channel
3129	 * If the mask doesn't match, report an error to the parsing logic
3130	 */
3131	switch (optypenum) {
3132	case 0:
3133		optype = "generic undef request error";
3134		break;
3135	case 1:
3136		optype = "memory read error";
3137		break;
3138	case 2:
3139		optype = "memory write error";
3140		break;
3141	case 3:
3142		optype = "addr/cmd error";
3143		break;
3144	case 4:
3145		optype = "memory scrubbing error";
3146		break;
3147	default:
3148		optype = "reserved";
3149		break;
3150	}
3151
3152	if (pvt->info.type == KNIGHTS_LANDING) {
3153		if (channel == 14) {
3154			edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
3155				overflow ? " OVERFLOW" : "",
3156				(uncorrected_error && recoverable)
3157				? " recoverable" : "",
3158				mscod, errcode,
3159				m->bank);
3160		} else {
3161			char A = *("A");
3162
3163			/*
3164			 * Reported channel is in range 0-2, so we can't map it
3165			 * back to mc. To figure out mc we check machine check
3166			 * bank register that reported this error.
3167			 * bank15 means mc0 and bank16 means mc1.
3168			 */
3169			channel = knl_channel_remap(m->bank == 16, channel);
3170			channel_mask = 1 << channel;
3171
3172			snprintf(sb_msg, sizeof(sb_msg),
3173				 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3174				 overflow ? " OVERFLOW" : "",
3175				 (uncorrected_error && recoverable)
3176				 ? " recoverable" : " ",
3177				 mscod, errcode, channel, A + channel);
3178			edac_mc_handle_error(tp_event, mci, core_err_cnt,
3179				m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3180				channel, 0, -1,
3181				optype, sb_msg);
3182		}
3183		return;
3184	} else if (lsb < 12) {
3185		rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3186					   &channel_mask, &rank,
3187					   &area_type, sb_msg);
3188	} else {
3189		rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
3190						    &channel_mask, sb_msg);
3191	}
3192
3193	if (rc < 0)
3194		goto err_parsing;
3195	new_mci = get_mci_for_node_id(socket, ha);
3196	if (!new_mci) {
3197		strscpy(sb_msg, "Error: socket got corrupted!");
3198		goto err_parsing;
3199	}
3200	mci = new_mci;
3201	pvt = mci->pvt_info;
3202
3203	first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3204
3205	if (rank == 0xff)
3206		dimm = -1;
3207	else if (rank < 4)
3208		dimm = 0;
3209	else if (rank < 8)
3210		dimm = 1;
3211	else
3212		dimm = 2;
3213
3214	/*
3215	 * FIXME: On some memory configurations (mirror, lockstep), the
3216	 * Memory Controller can't point the error to a single DIMM. The
3217	 * EDAC core should be handling the channel mask, in order to point
3218	 * to the group of dimm's where the error may be happening.
3219	 */
3220	if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3221		channel = first_channel;
3222	snprintf(sb_msg_full, sizeof(sb_msg_full),
3223		 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s",
 
3224		 overflow ? " OVERFLOW" : "",
3225		 (uncorrected_error && recoverable) ? " recoverable" : "",
3226		 area_type,
3227		 mscod, errcode,
3228		 socket, ha,
3229		 channel_mask,
3230		 rank, sb_msg);
3231
3232	edac_dbg(0, "%s\n", sb_msg_full);
3233
3234	/* FIXME: need support for channel mask */
3235
3236	if (channel == CHANNEL_UNSPECIFIED)
3237		channel = -1;
3238
3239	/* Call the helper to output message */
3240	edac_mc_handle_error(tp_event, mci, core_err_cnt,
3241			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3242			     channel, dimm, -1,
3243			     optype, sb_msg_full);
3244	return;
3245err_parsing:
3246	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3247			     -1, -1, -1,
3248			     sb_msg, "");
3249
3250}
3251
3252/*
3253 * Check that logging is enabled and that this is the right type
3254 * of error for us to handle.
3255 */
3256static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3257				   void *data)
3258{
3259	struct mce *mce = (struct mce *)data;
3260	struct mem_ctl_info *mci;
3261	char *type;
3262
3263	if (mce->kflags & MCE_HANDLED_CEC)
3264		return NOTIFY_DONE;
3265
3266	/*
3267	 * Just let mcelog handle it if the error is
3268	 * outside the memory controller. A memory error
3269	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3270	 * bit 12 has an special meaning.
3271	 */
3272	if ((mce->status & 0xefff) >> 7 != 1)
3273		return NOTIFY_DONE;
3274
3275	/* Check ADDRV bit in STATUS */
3276	if (!GET_BITFIELD(mce->status, 58, 58))
3277		return NOTIFY_DONE;
3278
3279	/* Check MISCV bit in STATUS */
3280	if (!GET_BITFIELD(mce->status, 59, 59))
3281		return NOTIFY_DONE;
3282
3283	/* Check address type in MISC (physical address only) */
3284	if (GET_BITFIELD(mce->misc, 6, 8) != 2)
3285		return NOTIFY_DONE;
3286
3287	mci = get_mci_for_node_id(mce->socketid, IMC0);
3288	if (!mci)
3289		return NOTIFY_DONE;
3290
3291	if (mce->mcgstatus & MCG_STATUS_MCIP)
3292		type = "Exception";
3293	else
3294		type = "Event";
3295
3296	sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3297
3298	sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3299			  "Bank %d: %016Lx\n", mce->extcpu, type,
3300			  mce->mcgstatus, mce->bank, mce->status);
3301	sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3302	sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3303	sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3304
3305	sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3306			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3307			  mce->time, mce->socketid, mce->apicid);
3308
3309	sbridge_mce_output_error(mci, mce);
3310
3311	/* Advice mcelog that the error were handled */
3312	mce->kflags |= MCE_HANDLED_EDAC;
3313	return NOTIFY_OK;
3314}
3315
3316static struct notifier_block sbridge_mce_dec = {
3317	.notifier_call	= sbridge_mce_check_error,
3318	.priority	= MCE_PRIO_EDAC,
3319};
3320
3321/****************************************************************************
3322			EDAC register/unregister logic
3323 ****************************************************************************/
3324
3325static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3326{
3327	struct mem_ctl_info *mci = sbridge_dev->mci;
 
3328
3329	if (unlikely(!mci || !mci->pvt_info)) {
3330		edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3331
3332		sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3333		return;
3334	}
3335
 
 
3336	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3337		 mci, &sbridge_dev->pdev[0]->dev);
3338
3339	/* Remove MC sysfs nodes */
3340	edac_mc_del_mc(mci->pdev);
3341
3342	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3343	kfree(mci->ctl_name);
3344	edac_mc_free(mci);
3345	sbridge_dev->mci = NULL;
3346}
3347
3348static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3349{
3350	struct mem_ctl_info *mci;
3351	struct edac_mc_layer layers[2];
3352	struct sbridge_pvt *pvt;
3353	struct pci_dev *pdev = sbridge_dev->pdev[0];
3354	int rc;
3355
3356	/* allocate a new MC control structure */
3357	layers[0].type = EDAC_MC_LAYER_CHANNEL;
3358	layers[0].size = type == KNIGHTS_LANDING ?
3359		KNL_MAX_CHANNELS : NUM_CHANNELS;
3360	layers[0].is_virt_csrow = false;
3361	layers[1].type = EDAC_MC_LAYER_SLOT;
3362	layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3363	layers[1].is_virt_csrow = true;
3364	mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3365			    sizeof(*pvt));
3366
3367	if (unlikely(!mci))
3368		return -ENOMEM;
3369
3370	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3371		 mci, &pdev->dev);
3372
3373	pvt = mci->pvt_info;
3374	memset(pvt, 0, sizeof(*pvt));
3375
3376	/* Associate sbridge_dev and mci for future usage */
3377	pvt->sbridge_dev = sbridge_dev;
3378	sbridge_dev->mci = mci;
3379
3380	mci->mtype_cap = type == KNIGHTS_LANDING ?
3381		MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3382	mci->edac_ctl_cap = EDAC_FLAG_NONE;
3383	mci->edac_cap = EDAC_FLAG_NONE;
3384	mci->mod_name = EDAC_MOD_STR;
3385	mci->dev_name = pci_name(pdev);
3386	mci->ctl_page_to_phys = NULL;
3387
3388	pvt->info.type = type;
3389	switch (type) {
3390	case IVY_BRIDGE:
3391		pvt->info.rankcfgr = IB_RANK_CFG_A;
3392		pvt->info.get_tolm = ibridge_get_tolm;
3393		pvt->info.get_tohm = ibridge_get_tohm;
3394		pvt->info.dram_rule = ibridge_dram_rule;
3395		pvt->info.get_memory_type = get_memory_type;
3396		pvt->info.get_node_id = get_node_id;
3397		pvt->info.get_ha = ibridge_get_ha;
3398		pvt->info.rir_limit = rir_limit;
3399		pvt->info.sad_limit = sad_limit;
3400		pvt->info.interleave_mode = interleave_mode;
3401		pvt->info.dram_attr = dram_attr;
3402		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3403		pvt->info.interleave_list = ibridge_interleave_list;
3404		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3405		pvt->info.get_width = ibridge_get_width;
3406
3407		/* Store pci devices at mci for faster access */
3408		rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3409		if (unlikely(rc < 0))
3410			goto fail0;
3411		get_source_id(mci);
3412		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3413			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3414		break;
3415	case SANDY_BRIDGE:
3416		pvt->info.rankcfgr = SB_RANK_CFG_A;
3417		pvt->info.get_tolm = sbridge_get_tolm;
3418		pvt->info.get_tohm = sbridge_get_tohm;
3419		pvt->info.dram_rule = sbridge_dram_rule;
3420		pvt->info.get_memory_type = get_memory_type;
3421		pvt->info.get_node_id = get_node_id;
3422		pvt->info.get_ha = sbridge_get_ha;
3423		pvt->info.rir_limit = rir_limit;
3424		pvt->info.sad_limit = sad_limit;
3425		pvt->info.interleave_mode = interleave_mode;
3426		pvt->info.dram_attr = dram_attr;
3427		pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3428		pvt->info.interleave_list = sbridge_interleave_list;
3429		pvt->info.interleave_pkg = sbridge_interleave_pkg;
3430		pvt->info.get_width = sbridge_get_width;
3431
3432		/* Store pci devices at mci for faster access */
3433		rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3434		if (unlikely(rc < 0))
3435			goto fail0;
3436		get_source_id(mci);
3437		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3438			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3439		break;
3440	case HASWELL:
3441		/* rankcfgr isn't used */
3442		pvt->info.get_tolm = haswell_get_tolm;
3443		pvt->info.get_tohm = haswell_get_tohm;
3444		pvt->info.dram_rule = ibridge_dram_rule;
3445		pvt->info.get_memory_type = haswell_get_memory_type;
3446		pvt->info.get_node_id = haswell_get_node_id;
3447		pvt->info.get_ha = ibridge_get_ha;
3448		pvt->info.rir_limit = haswell_rir_limit;
3449		pvt->info.sad_limit = sad_limit;
3450		pvt->info.interleave_mode = interleave_mode;
3451		pvt->info.dram_attr = dram_attr;
3452		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3453		pvt->info.interleave_list = ibridge_interleave_list;
3454		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3455		pvt->info.get_width = ibridge_get_width;
3456
3457		/* Store pci devices at mci for faster access */
3458		rc = haswell_mci_bind_devs(mci, sbridge_dev);
3459		if (unlikely(rc < 0))
3460			goto fail0;
3461		get_source_id(mci);
3462		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3463			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3464		break;
3465	case BROADWELL:
3466		/* rankcfgr isn't used */
3467		pvt->info.get_tolm = haswell_get_tolm;
3468		pvt->info.get_tohm = haswell_get_tohm;
3469		pvt->info.dram_rule = ibridge_dram_rule;
3470		pvt->info.get_memory_type = haswell_get_memory_type;
3471		pvt->info.get_node_id = haswell_get_node_id;
3472		pvt->info.get_ha = ibridge_get_ha;
3473		pvt->info.rir_limit = haswell_rir_limit;
3474		pvt->info.sad_limit = sad_limit;
3475		pvt->info.interleave_mode = interleave_mode;
3476		pvt->info.dram_attr = dram_attr;
3477		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3478		pvt->info.interleave_list = ibridge_interleave_list;
3479		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3480		pvt->info.get_width = broadwell_get_width;
3481
3482		/* Store pci devices at mci for faster access */
3483		rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3484		if (unlikely(rc < 0))
3485			goto fail0;
3486		get_source_id(mci);
3487		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3488			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3489		break;
3490	case KNIGHTS_LANDING:
3491		/* pvt->info.rankcfgr == ??? */
3492		pvt->info.get_tolm = knl_get_tolm;
3493		pvt->info.get_tohm = knl_get_tohm;
3494		pvt->info.dram_rule = knl_dram_rule;
3495		pvt->info.get_memory_type = knl_get_memory_type;
3496		pvt->info.get_node_id = knl_get_node_id;
3497		pvt->info.get_ha = knl_get_ha;
3498		pvt->info.rir_limit = NULL;
3499		pvt->info.sad_limit = knl_sad_limit;
3500		pvt->info.interleave_mode = knl_interleave_mode;
3501		pvt->info.dram_attr = dram_attr_knl;
3502		pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3503		pvt->info.interleave_list = knl_interleave_list;
3504		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3505		pvt->info.get_width = knl_get_width;
3506
3507		rc = knl_mci_bind_devs(mci, sbridge_dev);
3508		if (unlikely(rc < 0))
3509			goto fail0;
3510		get_source_id(mci);
3511		mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3512			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3513		break;
3514	}
3515
3516	if (!mci->ctl_name) {
3517		rc = -ENOMEM;
3518		goto fail0;
3519	}
3520
3521	/* Get dimm basic config and the memory layout */
3522	rc = get_dimm_config(mci);
3523	if (rc < 0) {
3524		edac_dbg(0, "MC: failed to get_dimm_config()\n");
3525		goto fail;
3526	}
3527	get_memory_layout(mci);
3528
3529	/* record ptr to the generic device */
3530	mci->pdev = &pdev->dev;
3531
3532	/* add this new MC control structure to EDAC's list of MCs */
3533	if (unlikely(edac_mc_add_mc(mci))) {
3534		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3535		rc = -EINVAL;
3536		goto fail;
3537	}
3538
3539	return 0;
3540
3541fail:
3542	kfree(mci->ctl_name);
3543fail0:
3544	edac_mc_free(mci);
3545	sbridge_dev->mci = NULL;
3546	return rc;
3547}
3548
3549static const struct x86_cpu_id sbridge_cpuids[] = {
3550	X86_MATCH_VFM(INTEL_SANDYBRIDGE_X,	&pci_dev_descr_sbridge_table),
3551	X86_MATCH_VFM(INTEL_IVYBRIDGE_X,	&pci_dev_descr_ibridge_table),
3552	X86_MATCH_VFM(INTEL_HASWELL_X,		&pci_dev_descr_haswell_table),
3553	X86_MATCH_VFM(INTEL_BROADWELL_X,	&pci_dev_descr_broadwell_table),
3554	X86_MATCH_VFM(INTEL_BROADWELL_D,	&pci_dev_descr_broadwell_table),
3555	X86_MATCH_VFM(INTEL_XEON_PHI_KNL,	&pci_dev_descr_knl_table),
3556	X86_MATCH_VFM(INTEL_XEON_PHI_KNM,	&pci_dev_descr_knl_table),
3557	{ }
3558};
3559MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3560
3561/*
3562 *	sbridge_probe	Get all devices and register memory controllers
3563 *			present.
3564 *	return:
3565 *		0 for FOUND a device
3566 *		< 0 for error code
3567 */
3568
3569static int sbridge_probe(const struct x86_cpu_id *id)
3570{
3571	int rc;
3572	u8 mc, num_mc = 0;
3573	struct sbridge_dev *sbridge_dev;
3574	struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3575
3576	/* get the pci devices we want to reserve for our use */
3577	rc = sbridge_get_all_devices(&num_mc, ptable);
3578
3579	if (unlikely(rc < 0)) {
3580		edac_dbg(0, "couldn't get all devices\n");
3581		goto fail0;
3582	}
3583
3584	mc = 0;
3585
3586	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3587		edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3588			 mc, mc + 1, num_mc);
3589
3590		sbridge_dev->mc = mc++;
3591		rc = sbridge_register_mci(sbridge_dev, ptable->type);
3592		if (unlikely(rc < 0))
3593			goto fail1;
3594	}
3595
3596	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3597
3598	return 0;
3599
3600fail1:
3601	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3602		sbridge_unregister_mci(sbridge_dev);
3603
3604	sbridge_put_all_devices();
3605fail0:
3606	return rc;
3607}
3608
3609/*
3610 *	sbridge_remove	cleanup
3611 *
3612 */
3613static void sbridge_remove(void)
3614{
3615	struct sbridge_dev *sbridge_dev;
3616
3617	edac_dbg(0, "\n");
3618
3619	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3620		sbridge_unregister_mci(sbridge_dev);
3621
3622	/* Release PCI resources */
3623	sbridge_put_all_devices();
3624}
3625
3626/*
3627 *	sbridge_init		Module entry function
3628 *			Try to initialize this module for its devices
3629 */
3630static int __init sbridge_init(void)
3631{
3632	const struct x86_cpu_id *id;
3633	const char *owner;
3634	int rc;
3635
3636	edac_dbg(2, "\n");
3637
3638	if (ghes_get_devices())
3639		return -EBUSY;
3640
3641	owner = edac_get_owner();
3642	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3643		return -EBUSY;
3644
3645	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
3646		return -ENODEV;
3647
3648	id = x86_match_cpu(sbridge_cpuids);
3649	if (!id)
3650		return -ENODEV;
3651
3652	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
3653	opstate_init();
3654
3655	rc = sbridge_probe(id);
3656
3657	if (rc >= 0) {
3658		mce_register_decode_chain(&sbridge_mce_dec);
 
 
3659		return 0;
3660	}
3661
3662	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3663		      rc);
3664
3665	return rc;
3666}
3667
3668/*
3669 *	sbridge_exit()	Module exit function
3670 *			Unregister the driver
3671 */
3672static void __exit sbridge_exit(void)
3673{
3674	edac_dbg(2, "\n");
3675	sbridge_remove();
3676	mce_unregister_decode_chain(&sbridge_mce_dec);
3677}
3678
3679module_init(sbridge_init);
3680module_exit(sbridge_exit);
3681
3682module_param(edac_op_state, int, 0444);
3683MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3684
3685MODULE_LICENSE("GPL");
3686MODULE_AUTHOR("Mauro Carvalho Chehab");
3687MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
3688MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3689		   SBRIDGE_REVISION);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
   3 *
   4 * This driver supports the memory controllers found on the Intel
   5 * processor family Sandy Bridge.
   6 *
   7 * Copyright (c) 2011 by:
   8 *	 Mauro Carvalho Chehab
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/pci.h>
  14#include <linux/pci_ids.h>
  15#include <linux/slab.h>
  16#include <linux/delay.h>
  17#include <linux/edac.h>
  18#include <linux/mmzone.h>
  19#include <linux/smp.h>
  20#include <linux/bitmap.h>
  21#include <linux/math64.h>
  22#include <linux/mod_devicetable.h>
  23#include <asm/cpu_device_id.h>
  24#include <asm/intel-family.h>
  25#include <asm/processor.h>
  26#include <asm/mce.h>
  27
  28#include "edac_module.h"
  29
  30/* Static vars */
  31static LIST_HEAD(sbridge_edac_list);
 
 
  32
  33/*
  34 * Alter this version for the module when modifications are made
  35 */
  36#define SBRIDGE_REVISION    " Ver: 1.1.2 "
  37#define EDAC_MOD_STR	    "sb_edac"
  38
  39/*
  40 * Debug macros
  41 */
  42#define sbridge_printk(level, fmt, arg...)			\
  43	edac_printk(level, "sbridge", fmt, ##arg)
  44
  45#define sbridge_mc_printk(mci, level, fmt, arg...)		\
  46	edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
  47
  48/*
  49 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
  50 */
  51#define GET_BITFIELD(v, lo, hi)	\
  52	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
  53
  54/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
  55static const u32 sbridge_dram_rule[] = {
  56	0x80, 0x88, 0x90, 0x98, 0xa0,
  57	0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
  58};
  59
  60static const u32 ibridge_dram_rule[] = {
  61	0x60, 0x68, 0x70, 0x78, 0x80,
  62	0x88, 0x90, 0x98, 0xa0,	0xa8,
  63	0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
  64	0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
  65};
  66
  67static const u32 knl_dram_rule[] = {
  68	0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
  69	0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
  70	0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
  71	0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
  72	0x100, 0x108, 0x110, 0x118,   /* 20-23 */
  73};
  74
  75#define DRAM_RULE_ENABLE(reg)	GET_BITFIELD(reg, 0,  0)
  76#define A7MODE(reg)		GET_BITFIELD(reg, 26, 26)
  77
  78static char *show_dram_attr(u32 attr)
  79{
  80	switch (attr) {
  81		case 0:
  82			return "DRAM";
  83		case 1:
  84			return "MMCFG";
  85		case 2:
  86			return "NXM";
  87		default:
  88			return "unknown";
  89	}
  90}
  91
  92static const u32 sbridge_interleave_list[] = {
  93	0x84, 0x8c, 0x94, 0x9c, 0xa4,
  94	0xac, 0xb4, 0xbc, 0xc4, 0xcc,
  95};
  96
  97static const u32 ibridge_interleave_list[] = {
  98	0x64, 0x6c, 0x74, 0x7c, 0x84,
  99	0x8c, 0x94, 0x9c, 0xa4, 0xac,
 100	0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
 101	0xdc, 0xe4, 0xec, 0xf4, 0xfc,
 102};
 103
 104static const u32 knl_interleave_list[] = {
 105	0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
 106	0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
 107	0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
 108	0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
 109	0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
 110};
 111#define MAX_INTERLEAVE							\
 112	(max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list),	\
 113	       max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list),	\
 114		     ARRAY_SIZE(knl_interleave_list))))
 115
 116struct interleave_pkg {
 117	unsigned char start;
 118	unsigned char end;
 119};
 120
 121static const struct interleave_pkg sbridge_interleave_pkg[] = {
 122	{ 0, 2 },
 123	{ 3, 5 },
 124	{ 8, 10 },
 125	{ 11, 13 },
 126	{ 16, 18 },
 127	{ 19, 21 },
 128	{ 24, 26 },
 129	{ 27, 29 },
 130};
 131
 132static const struct interleave_pkg ibridge_interleave_pkg[] = {
 133	{ 0, 3 },
 134	{ 4, 7 },
 135	{ 8, 11 },
 136	{ 12, 15 },
 137	{ 16, 19 },
 138	{ 20, 23 },
 139	{ 24, 27 },
 140	{ 28, 31 },
 141};
 142
 143static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
 144			  int interleave)
 145{
 146	return GET_BITFIELD(reg, table[interleave].start,
 147			    table[interleave].end);
 148}
 149
 150/* Devices 12 Function 7 */
 151
 152#define TOLM		0x80
 153#define TOHM		0x84
 154#define HASWELL_TOLM	0xd0
 155#define HASWELL_TOHM_0	0xd4
 156#define HASWELL_TOHM_1	0xd8
 157#define KNL_TOLM	0xd0
 158#define KNL_TOHM_0	0xd4
 159#define KNL_TOHM_1	0xd8
 160
 161#define GET_TOLM(reg)		((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
 162#define GET_TOHM(reg)		((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
 163
 164/* Device 13 Function 6 */
 165
 166#define SAD_TARGET	0xf0
 167
 168#define SOURCE_ID(reg)		GET_BITFIELD(reg, 9, 11)
 169
 170#define SOURCE_ID_KNL(reg)	GET_BITFIELD(reg, 12, 14)
 171
 172#define SAD_CONTROL	0xf4
 173
 174/* Device 14 function 0 */
 175
 176static const u32 tad_dram_rule[] = {
 177	0x40, 0x44, 0x48, 0x4c,
 178	0x50, 0x54, 0x58, 0x5c,
 179	0x60, 0x64, 0x68, 0x6c,
 180};
 181#define MAX_TAD	ARRAY_SIZE(tad_dram_rule)
 182
 183#define TAD_LIMIT(reg)		((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
 184#define TAD_SOCK(reg)		GET_BITFIELD(reg, 10, 11)
 185#define TAD_CH(reg)		GET_BITFIELD(reg,  8,  9)
 186#define TAD_TGT3(reg)		GET_BITFIELD(reg,  6,  7)
 187#define TAD_TGT2(reg)		GET_BITFIELD(reg,  4,  5)
 188#define TAD_TGT1(reg)		GET_BITFIELD(reg,  2,  3)
 189#define TAD_TGT0(reg)		GET_BITFIELD(reg,  0,  1)
 190
 191/* Device 15, function 0 */
 192
 193#define MCMTR			0x7c
 194#define KNL_MCMTR		0x624
 195
 196#define IS_ECC_ENABLED(mcmtr)		GET_BITFIELD(mcmtr, 2, 2)
 197#define IS_LOCKSTEP_ENABLED(mcmtr)	GET_BITFIELD(mcmtr, 1, 1)
 198#define IS_CLOSE_PG(mcmtr)		GET_BITFIELD(mcmtr, 0, 0)
 199
 200/* Device 15, function 1 */
 201
 202#define RASENABLES		0xac
 203#define IS_MIRROR_ENABLED(reg)		GET_BITFIELD(reg, 0, 0)
 204
 205/* Device 15, functions 2-5 */
 206
 207static const int mtr_regs[] = {
 208	0x80, 0x84, 0x88,
 209};
 210
 211static const int knl_mtr_reg = 0xb60;
 212
 213#define RANK_DISABLE(mtr)		GET_BITFIELD(mtr, 16, 19)
 214#define IS_DIMM_PRESENT(mtr)		GET_BITFIELD(mtr, 14, 14)
 215#define RANK_CNT_BITS(mtr)		GET_BITFIELD(mtr, 12, 13)
 216#define RANK_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 2, 4)
 217#define COL_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 0, 1)
 218
 219static const u32 tad_ch_nilv_offset[] = {
 220	0x90, 0x94, 0x98, 0x9c,
 221	0xa0, 0xa4, 0xa8, 0xac,
 222	0xb0, 0xb4, 0xb8, 0xbc,
 223};
 224#define CHN_IDX_OFFSET(reg)		GET_BITFIELD(reg, 28, 29)
 225#define TAD_OFFSET(reg)			(GET_BITFIELD(reg,  6, 25) << 26)
 226
 227static const u32 rir_way_limit[] = {
 228	0x108, 0x10c, 0x110, 0x114, 0x118,
 229};
 230#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
 231
 232#define IS_RIR_VALID(reg)	GET_BITFIELD(reg, 31, 31)
 233#define RIR_WAY(reg)		GET_BITFIELD(reg, 28, 29)
 234
 235#define MAX_RIR_WAY	8
 236
 237static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
 238	{ 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
 239	{ 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
 240	{ 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
 241	{ 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
 242	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 243};
 244
 245#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
 246	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
 247
 248#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
 249	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 250
 251/* Device 16, functions 2-7 */
 252
 253/*
 254 * FIXME: Implement the error count reads directly
 255 */
 256
 257static const u32 correrrcnt[] = {
 258	0x104, 0x108, 0x10c, 0x110,
 259};
 260
 261#define RANK_ODD_OV(reg)		GET_BITFIELD(reg, 31, 31)
 262#define RANK_ODD_ERR_CNT(reg)		GET_BITFIELD(reg, 16, 30)
 263#define RANK_EVEN_OV(reg)		GET_BITFIELD(reg, 15, 15)
 264#define RANK_EVEN_ERR_CNT(reg)		GET_BITFIELD(reg,  0, 14)
 265
 
 
 
 
 
 266static const u32 correrrthrsld[] = {
 267	0x11c, 0x120, 0x124, 0x128,
 268};
 
 269
 270#define RANK_ODD_ERR_THRSLD(reg)	GET_BITFIELD(reg, 16, 30)
 271#define RANK_EVEN_ERR_THRSLD(reg)	GET_BITFIELD(reg,  0, 14)
 272
 273
 274/* Device 17, function 0 */
 275
 276#define SB_RANK_CFG_A		0x0328
 277
 278#define IB_RANK_CFG_A		0x0320
 279
 280/*
 281 * sbridge structs
 282 */
 283
 284#define NUM_CHANNELS		6	/* Max channels per MC */
 285#define MAX_DIMMS		3	/* Max DIMMS per channel */
 286#define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
 287#define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
 288#define KNL_MAX_EDCS		8	/* Embedded DRAM controllers */
 289#define CHANNEL_UNSPECIFIED	0xf	/* Intel IA32 SDM 15-14 */
 290
 291enum type {
 292	SANDY_BRIDGE,
 293	IVY_BRIDGE,
 294	HASWELL,
 295	BROADWELL,
 296	KNIGHTS_LANDING,
 297};
 298
 299enum domain {
 300	IMC0 = 0,
 301	IMC1,
 302	SOCK,
 303};
 304
 305enum mirroring_mode {
 306	NON_MIRRORING,
 307	ADDR_RANGE_MIRRORING,
 308	FULL_MIRRORING,
 309};
 310
 311struct sbridge_pvt;
 312struct sbridge_info {
 313	enum type	type;
 314	u32		mcmtr;
 315	u32		rankcfgr;
 316	u64		(*get_tolm)(struct sbridge_pvt *pvt);
 317	u64		(*get_tohm)(struct sbridge_pvt *pvt);
 318	u64		(*rir_limit)(u32 reg);
 319	u64		(*sad_limit)(u32 reg);
 320	u32		(*interleave_mode)(u32 reg);
 321	u32		(*dram_attr)(u32 reg);
 322	const u32	*dram_rule;
 323	const u32	*interleave_list;
 324	const struct interleave_pkg *interleave_pkg;
 325	u8		max_sad;
 326	u8		(*get_node_id)(struct sbridge_pvt *pvt);
 327	u8		(*get_ha)(u8 bank);
 328	enum mem_type	(*get_memory_type)(struct sbridge_pvt *pvt);
 329	enum dev_type	(*get_width)(struct sbridge_pvt *pvt, u32 mtr);
 330	struct pci_dev	*pci_vtd;
 331};
 332
 333struct sbridge_channel {
 334	u32		ranks;
 335	u32		dimms;
 
 
 
 
 
 
 336};
 337
 338struct pci_id_descr {
 339	int			dev_id;
 340	int			optional;
 341	enum domain		dom;
 342};
 343
 344struct pci_id_table {
 345	const struct pci_id_descr	*descr;
 346	int				n_devs_per_imc;
 347	int				n_devs_per_sock;
 348	int				n_imcs_per_sock;
 349	enum type			type;
 350};
 351
 352struct sbridge_dev {
 353	struct list_head	list;
 354	int			seg;
 355	u8			bus, mc;
 356	u8			node_id, source_id;
 357	struct pci_dev		**pdev;
 358	enum domain		dom;
 359	int			n_devs;
 360	int			i_devs;
 361	struct mem_ctl_info	*mci;
 362};
 363
 364struct knl_pvt {
 365	struct pci_dev          *pci_cha[KNL_MAX_CHAS];
 366	struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
 367	struct pci_dev          *pci_mc0;
 368	struct pci_dev          *pci_mc1;
 369	struct pci_dev          *pci_mc0_misc;
 370	struct pci_dev          *pci_mc1_misc;
 371	struct pci_dev          *pci_mc_info; /* tolm, tohm */
 372};
 373
 374struct sbridge_pvt {
 375	/* Devices per socket */
 376	struct pci_dev		*pci_ddrio;
 377	struct pci_dev		*pci_sad0, *pci_sad1;
 378	struct pci_dev		*pci_br0, *pci_br1;
 379	/* Devices per memory controller */
 380	struct pci_dev		*pci_ha, *pci_ta, *pci_ras;
 381	struct pci_dev		*pci_tad[NUM_CHANNELS];
 382
 383	struct sbridge_dev	*sbridge_dev;
 384
 385	struct sbridge_info	info;
 386	struct sbridge_channel	channel[NUM_CHANNELS];
 387
 388	/* Memory type detection */
 389	bool			is_cur_addr_mirrored, is_lockstep, is_close_pg;
 390	bool			is_chan_hash;
 391	enum mirroring_mode	mirror_mode;
 392
 393	/* Memory description */
 394	u64			tolm, tohm;
 395	struct knl_pvt knl;
 396};
 397
 398#define PCI_DESCR(device_id, opt, domain)	\
 399	.dev_id = (device_id),		\
 400	.optional = opt,	\
 401	.dom = domain
 402
 403static const struct pci_id_descr pci_dev_descr_sbridge[] = {
 404		/* Processor Home Agent */
 405	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
 406
 407		/* Memory controller */
 408	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
 409	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
 410	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
 411	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
 412	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
 413	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
 414	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
 415
 416		/* System Address Decoder */
 417	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
 418	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
 419
 420		/* Broadcast Registers */
 421	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
 422};
 423
 424#define PCI_ID_TABLE_ENTRY(A, N, M, T) {	\
 425	.descr = A,			\
 426	.n_devs_per_imc = N,	\
 427	.n_devs_per_sock = ARRAY_SIZE(A),	\
 428	.n_imcs_per_sock = M,	\
 429	.type = T			\
 430}
 431
 432static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
 433	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
 434	{0,}			/* 0 terminated list. */
 435};
 436
 437/* This changes depending if 1HA or 2HA:
 438 * 1HA:
 439 *	0x0eb8 (17.0) is DDRIO0
 440 * 2HA:
 441 *	0x0ebc (17.4) is DDRIO0
 442 */
 443#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0	0x0eb8
 444#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0	0x0ebc
 445
 446/* pci ids */
 447#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0		0x0ea0
 448#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA		0x0ea8
 449#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS		0x0e71
 450#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0	0x0eaa
 451#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1	0x0eab
 452#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2	0x0eac
 453#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3	0x0ead
 454#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD			0x0ec8
 455#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0			0x0ec9
 456#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1			0x0eca
 457#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1		0x0e60
 458#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA		0x0e68
 459#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS		0x0e79
 460#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0	0x0e6a
 461#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1	0x0e6b
 462#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2	0x0e6c
 463#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3	0x0e6d
 464
 465static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 466		/* Processor Home Agent */
 467	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
 468	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
 469
 470		/* Memory controller */
 471	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
 472	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
 473	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
 474	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
 475	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
 476	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
 477
 478		/* Optional, mode 2HA */
 479	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
 480	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
 481	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
 482	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
 483	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
 484	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
 485
 486	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
 487	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
 488
 489		/* System Address Decoder */
 490	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
 491
 492		/* Broadcast Registers */
 493	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
 494	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
 495
 496};
 497
 498static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 499	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
 500	{0,}			/* 0 terminated list. */
 501};
 502
 503/* Haswell support */
 504/* EN processor:
 505 *	- 1 IMC
 506 *	- 3 DDR3 channels, 2 DPC per channel
 507 * EP processor:
 508 *	- 1 or 2 IMC
 509 *	- 4 DDR4 channels, 3 DPC per channel
 510 * EP 4S processor:
 511 *	- 2 IMC
 512 *	- 4 DDR4 channels, 3 DPC per channel
 513 * EX processor:
 514 *	- 2 IMC
 515 *	- each IMC interfaces with a SMI 2 channel
 516 *	- each SMI channel interfaces with a scalable memory buffer
 517 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 518 */
 519#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
 520#define HASWELL_HASYSDEFEATURE2 0x84
 521#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
 522#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0	0x2fa0
 523#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1	0x2f60
 524#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA	0x2fa8
 525#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM	0x2f71
 526#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA	0x2f68
 527#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM	0x2f79
 528#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
 529#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
 530#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
 531#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
 532#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
 533#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
 534#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
 535#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
 536#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
 537#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
 538#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
 539#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
 540#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
 541#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
 542static const struct pci_id_descr pci_dev_descr_haswell[] = {
 543	/* first item must be the HA */
 544	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
 545	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
 546
 547	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
 548	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
 549	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
 550	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
 551	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
 552	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
 553
 554	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
 555	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
 556	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
 557	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
 558	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
 559	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
 560
 561	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
 562	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
 563	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
 564	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
 565	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
 566	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
 567};
 568
 569static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 570	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
 571	{0,}			/* 0 terminated list. */
 572};
 573
 574/* Knight's Landing Support */
 575/*
 576 * KNL's memory channels are swizzled between memory controllers.
 577 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
 578 */
 579#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
 580
 581/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
 582#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
 583/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
 584#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
 585/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
 586#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
 587/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
 588#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
 589/* SAD target - 1-29-1 (1 of these) */
 590#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
 591/* Caching / Home Agent */
 592#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
 593/* Device with TOLM and TOHM, 0-5-0 (1 of these) */
 594#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
 595
 596/*
 597 * KNL differs from SB, IB, and Haswell in that it has multiple
 598 * instances of the same device with the same device ID, so we handle that
 599 * by creating as many copies in the table as we expect to find.
 600 * (Like device ID must be grouped together.)
 601 */
 602
 603static const struct pci_id_descr pci_dev_descr_knl[] = {
 604	[0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
 605	[2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
 606	[8]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
 607	[9]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
 608	[10]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
 609	[11]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
 610	[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
 611};
 612
 613static const struct pci_id_table pci_dev_descr_knl_table[] = {
 614	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
 615	{0,}
 616};
 617
 618/*
 619 * Broadwell support
 620 *
 621 * DE processor:
 622 *	- 1 IMC
 623 *	- 2 DDR3 channels, 2 DPC per channel
 624 * EP processor:
 625 *	- 1 or 2 IMC
 626 *	- 4 DDR4 channels, 3 DPC per channel
 627 * EP 4S processor:
 628 *	- 2 IMC
 629 *	- 4 DDR4 channels, 3 DPC per channel
 630 * EX processor:
 631 *	- 2 IMC
 632 *	- each IMC interfaces with a SMI 2 channel
 633 *	- each SMI channel interfaces with a scalable memory buffer
 634 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 635 */
 636#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
 637#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0	0x6fa0
 638#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1	0x6f60
 639#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA	0x6fa8
 640#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM	0x6f71
 641#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA	0x6f68
 642#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM	0x6f79
 643#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
 644#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
 645#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
 646#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
 647#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
 648#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
 649#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
 650#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
 651#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
 652#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
 653#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
 654
 655static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 656	/* first item must be the HA */
 657	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
 658	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
 659
 660	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
 661	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
 662	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
 663	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
 664	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
 665	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
 666
 667	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
 668	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
 669	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
 670	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
 671	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
 672	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
 673
 674	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
 675	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
 676	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
 677};
 678
 679static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
 680	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
 681	{0,}			/* 0 terminated list. */
 682};
 683
 684
 685/****************************************************************************
 686			Ancillary status routines
 687 ****************************************************************************/
 688
 689static inline int numrank(enum type type, u32 mtr)
 690{
 691	int ranks = (1 << RANK_CNT_BITS(mtr));
 692	int max = 4;
 693
 694	if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
 695		max = 8;
 696
 697	if (ranks > max) {
 698		edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
 699			 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
 700		return -EINVAL;
 701	}
 702
 703	return ranks;
 704}
 705
 706static inline int numrow(u32 mtr)
 707{
 708	int rows = (RANK_WIDTH_BITS(mtr) + 12);
 709
 710	if (rows < 13 || rows > 18) {
 711		edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
 712			 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
 713		return -EINVAL;
 714	}
 715
 716	return 1 << rows;
 717}
 718
 719static inline int numcol(u32 mtr)
 720{
 721	int cols = (COL_WIDTH_BITS(mtr) + 10);
 722
 723	if (cols > 12) {
 724		edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
 725			 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
 726		return -EINVAL;
 727	}
 728
 729	return 1 << cols;
 730}
 731
 732static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
 733					   int multi_bus,
 734					   struct sbridge_dev *prev)
 735{
 736	struct sbridge_dev *sbridge_dev;
 737
 738	/*
 739	 * If we have devices scattered across several busses that pertain
 740	 * to the same memory controller, we'll lump them all together.
 741	 */
 742	if (multi_bus) {
 743		return list_first_entry_or_null(&sbridge_edac_list,
 744				struct sbridge_dev, list);
 745	}
 746
 747	sbridge_dev = list_entry(prev ? prev->list.next
 748				      : sbridge_edac_list.next, struct sbridge_dev, list);
 749
 750	list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
 751		if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
 752				(dom == SOCK || dom == sbridge_dev->dom))
 753			return sbridge_dev;
 754	}
 755
 756	return NULL;
 757}
 758
 759static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
 760					     const struct pci_id_table *table)
 761{
 762	struct sbridge_dev *sbridge_dev;
 763
 764	sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
 765	if (!sbridge_dev)
 766		return NULL;
 767
 768	sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
 769				    sizeof(*sbridge_dev->pdev),
 770				    GFP_KERNEL);
 771	if (!sbridge_dev->pdev) {
 772		kfree(sbridge_dev);
 773		return NULL;
 774	}
 775
 776	sbridge_dev->seg = seg;
 777	sbridge_dev->bus = bus;
 778	sbridge_dev->dom = dom;
 779	sbridge_dev->n_devs = table->n_devs_per_imc;
 780	list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
 781
 782	return sbridge_dev;
 783}
 784
 785static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
 786{
 787	list_del(&sbridge_dev->list);
 788	kfree(sbridge_dev->pdev);
 789	kfree(sbridge_dev);
 790}
 791
 792static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
 793{
 794	u32 reg;
 795
 796	/* Address range is 32:28 */
 797	pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
 798	return GET_TOLM(reg);
 799}
 800
 801static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
 802{
 803	u32 reg;
 804
 805	pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
 806	return GET_TOHM(reg);
 807}
 808
 809static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
 810{
 811	u32 reg;
 812
 813	pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
 814
 815	return GET_TOLM(reg);
 816}
 817
 818static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
 819{
 820	u32 reg;
 821
 822	pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
 823
 824	return GET_TOHM(reg);
 825}
 826
 827static u64 rir_limit(u32 reg)
 828{
 829	return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
 830}
 831
 832static u64 sad_limit(u32 reg)
 833{
 834	return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
 835}
 836
 837static u32 interleave_mode(u32 reg)
 838{
 839	return GET_BITFIELD(reg, 1, 1);
 840}
 841
 842static u32 dram_attr(u32 reg)
 843{
 844	return GET_BITFIELD(reg, 2, 3);
 845}
 846
 847static u64 knl_sad_limit(u32 reg)
 848{
 849	return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
 850}
 851
 852static u32 knl_interleave_mode(u32 reg)
 853{
 854	return GET_BITFIELD(reg, 1, 2);
 855}
 856
 857static const char * const knl_intlv_mode[] = {
 858	"[8:6]", "[10:8]", "[14:12]", "[32:30]"
 859};
 860
 861static const char *get_intlv_mode_str(u32 reg, enum type t)
 862{
 863	if (t == KNIGHTS_LANDING)
 864		return knl_intlv_mode[knl_interleave_mode(reg)];
 865	else
 866		return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
 867}
 868
 869static u32 dram_attr_knl(u32 reg)
 870{
 871	return GET_BITFIELD(reg, 3, 4);
 872}
 873
 874
 875static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
 876{
 877	u32 reg;
 878	enum mem_type mtype;
 879
 880	if (pvt->pci_ddrio) {
 881		pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
 882				      &reg);
 883		if (GET_BITFIELD(reg, 11, 11))
 884			/* FIXME: Can also be LRDIMM */
 885			mtype = MEM_RDDR3;
 886		else
 887			mtype = MEM_DDR3;
 888	} else
 889		mtype = MEM_UNKNOWN;
 890
 891	return mtype;
 892}
 893
 894static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
 895{
 896	u32 reg;
 897	bool registered = false;
 898	enum mem_type mtype = MEM_UNKNOWN;
 899
 900	if (!pvt->pci_ddrio)
 901		goto out;
 902
 903	pci_read_config_dword(pvt->pci_ddrio,
 904			      HASWELL_DDRCRCLKCONTROLS, &reg);
 905	/* Is_Rdimm */
 906	if (GET_BITFIELD(reg, 16, 16))
 907		registered = true;
 908
 909	pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
 910	if (GET_BITFIELD(reg, 14, 14)) {
 911		if (registered)
 912			mtype = MEM_RDDR4;
 913		else
 914			mtype = MEM_DDR4;
 915	} else {
 916		if (registered)
 917			mtype = MEM_RDDR3;
 918		else
 919			mtype = MEM_DDR3;
 920	}
 921
 922out:
 923	return mtype;
 924}
 925
 926static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
 927{
 928	/* for KNL value is fixed */
 929	return DEV_X16;
 930}
 931
 932static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 933{
 934	/* there's no way to figure out */
 935	return DEV_UNKNOWN;
 936}
 937
 938static enum dev_type __ibridge_get_width(u32 mtr)
 939{
 940	enum dev_type type;
 941
 942	switch (mtr) {
 943	case 3:
 944		type = DEV_UNKNOWN;
 945		break;
 946	case 2:
 947		type = DEV_X16;
 948		break;
 949	case 1:
 950		type = DEV_X8;
 951		break;
 952	case 0:
 953		type = DEV_X4;
 954		break;
 955	}
 956
 957	return type;
 958}
 959
 960static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 961{
 962	/*
 963	 * ddr3_width on the documentation but also valid for DDR4 on
 964	 * Haswell
 965	 */
 966	return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
 967}
 968
 969static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
 970{
 971	/* ddr3_width on the documentation but also valid for DDR4 */
 972	return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
 973}
 974
 975static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
 976{
 977	/* DDR4 RDIMMS and LRDIMMS are supported */
 978	return MEM_RDDR4;
 979}
 980
 981static u8 get_node_id(struct sbridge_pvt *pvt)
 982{
 983	u32 reg;
 984	pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
 985	return GET_BITFIELD(reg, 0, 2);
 986}
 987
 988static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
 989{
 990	u32 reg;
 991
 992	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
 993	return GET_BITFIELD(reg, 0, 3);
 994}
 995
 996static u8 knl_get_node_id(struct sbridge_pvt *pvt)
 997{
 998	u32 reg;
 999
1000	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1001	return GET_BITFIELD(reg, 0, 2);
1002}
1003
1004/*
1005 * Use the reporting bank number to determine which memory
1006 * controller (also known as "ha" for "home agent"). Sandy
1007 * Bridge only has one memory controller per socket, so the
1008 * answer is always zero.
1009 */
1010static u8 sbridge_get_ha(u8 bank)
1011{
1012	return 0;
1013}
1014
1015/*
1016 * On Ivy Bridge, Haswell and Broadwell the error may be in a
1017 * home agent bank (7, 8), or one of the per-channel memory
1018 * controller banks (9 .. 16).
1019 */
1020static u8 ibridge_get_ha(u8 bank)
1021{
1022	switch (bank) {
1023	case 7 ... 8:
1024		return bank - 7;
1025	case 9 ... 16:
1026		return (bank - 9) / 4;
1027	default:
1028		return 0xff;
1029	}
1030}
1031
1032/* Not used, but included for safety/symmetry */
1033static u8 knl_get_ha(u8 bank)
1034{
1035	return 0xff;
1036}
1037
1038static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1039{
1040	u32 reg;
1041
1042	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1043	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1044}
1045
1046static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1047{
1048	u64 rc;
1049	u32 reg;
1050
1051	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1052	rc = GET_BITFIELD(reg, 26, 31);
1053	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1054	rc = ((reg << 6) | rc) << 26;
1055
1056	return rc | 0x1ffffff;
1057}
1058
1059static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1060{
1061	u32 reg;
1062
1063	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1064	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1065}
1066
1067static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1068{
1069	u64 rc;
1070	u32 reg_lo, reg_hi;
1071
1072	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1073	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1074	rc = ((u64)reg_hi << 32) | reg_lo;
1075	return rc | 0x3ffffff;
1076}
1077
1078
1079static u64 haswell_rir_limit(u32 reg)
1080{
1081	return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1082}
1083
1084static inline u8 sad_pkg_socket(u8 pkg)
1085{
1086	/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1087	return ((pkg >> 3) << 2) | (pkg & 0x3);
1088}
1089
1090static inline u8 sad_pkg_ha(u8 pkg)
1091{
1092	return (pkg >> 2) & 0x1;
1093}
1094
1095static int haswell_chan_hash(int idx, u64 addr)
1096{
1097	int i;
1098
1099	/*
1100	 * XOR even bits from 12:26 to bit0 of idx,
1101	 *     odd bits from 13:27 to bit1
1102	 */
1103	for (i = 12; i < 28; i += 2)
1104		idx ^= (addr >> i) & 3;
1105
1106	return idx;
1107}
1108
1109/* Low bits of TAD limit, and some metadata. */
1110static const u32 knl_tad_dram_limit_lo[] = {
1111	0x400, 0x500, 0x600, 0x700,
1112	0x800, 0x900, 0xa00, 0xb00,
1113};
1114
1115/* Low bits of TAD offset. */
1116static const u32 knl_tad_dram_offset_lo[] = {
1117	0x404, 0x504, 0x604, 0x704,
1118	0x804, 0x904, 0xa04, 0xb04,
1119};
1120
1121/* High 16 bits of TAD limit and offset. */
1122static const u32 knl_tad_dram_hi[] = {
1123	0x408, 0x508, 0x608, 0x708,
1124	0x808, 0x908, 0xa08, 0xb08,
1125};
1126
1127/* Number of ways a tad entry is interleaved. */
1128static const u32 knl_tad_ways[] = {
1129	8, 6, 4, 3, 2, 1,
1130};
1131
1132/*
1133 * Retrieve the n'th Target Address Decode table entry
1134 * from the memory controller's TAD table.
1135 *
1136 * @pvt:	driver private data
1137 * @entry:	which entry you want to retrieve
1138 * @mc:		which memory controller (0 or 1)
1139 * @offset:	output tad range offset
1140 * @limit:	output address of first byte above tad range
1141 * @ways:	output number of interleave ways
1142 *
1143 * The offset value has curious semantics.  It's a sort of running total
1144 * of the sizes of all the memory regions that aren't mapped in this
1145 * tad table.
1146 */
1147static int knl_get_tad(const struct sbridge_pvt *pvt,
1148		const int entry,
1149		const int mc,
1150		u64 *offset,
1151		u64 *limit,
1152		int *ways)
1153{
1154	u32 reg_limit_lo, reg_offset_lo, reg_hi;
1155	struct pci_dev *pci_mc;
1156	int way_id;
1157
1158	switch (mc) {
1159	case 0:
1160		pci_mc = pvt->knl.pci_mc0;
1161		break;
1162	case 1:
1163		pci_mc = pvt->knl.pci_mc1;
1164		break;
1165	default:
1166		WARN_ON(1);
1167		return -EINVAL;
1168	}
1169
1170	pci_read_config_dword(pci_mc,
1171			knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1172	pci_read_config_dword(pci_mc,
1173			knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1174	pci_read_config_dword(pci_mc,
1175			knl_tad_dram_hi[entry], &reg_hi);
1176
1177	/* Is this TAD entry enabled? */
1178	if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1179		return -ENODEV;
1180
1181	way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1182
1183	if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1184		*ways = knl_tad_ways[way_id];
1185	} else {
1186		*ways = 0;
1187		sbridge_printk(KERN_ERR,
1188				"Unexpected value %d in mc_tad_limit_lo wayness field\n",
1189				way_id);
1190		return -ENODEV;
1191	}
1192
1193	/*
1194	 * The least significant 6 bits of base and limit are truncated.
1195	 * For limit, we fill the missing bits with 1s.
1196	 */
1197	*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1198				((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1199	*limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1200				((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1201
1202	return 0;
1203}
1204
1205/* Determine which memory controller is responsible for a given channel. */
1206static int knl_channel_mc(int channel)
1207{
1208	WARN_ON(channel < 0 || channel >= 6);
1209
1210	return channel < 3 ? 1 : 0;
1211}
1212
1213/*
1214 * Get the Nth entry from EDC_ROUTE_TABLE register.
1215 * (This is the per-tile mapping of logical interleave targets to
1216 *  physical EDC modules.)
1217 *
1218 * entry 0: 0:2
1219 *       1: 3:5
1220 *       2: 6:8
1221 *       3: 9:11
1222 *       4: 12:14
1223 *       5: 15:17
1224 *       6: 18:20
1225 *       7: 21:23
1226 * reserved: 24:31
1227 */
1228static u32 knl_get_edc_route(int entry, u32 reg)
1229{
1230	WARN_ON(entry >= KNL_MAX_EDCS);
1231	return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1232}
1233
1234/*
1235 * Get the Nth entry from MC_ROUTE_TABLE register.
1236 * (This is the per-tile mapping of logical interleave targets to
1237 *  physical DRAM channels modules.)
1238 *
1239 * entry 0: mc 0:2   channel 18:19
1240 *       1: mc 3:5   channel 20:21
1241 *       2: mc 6:8   channel 22:23
1242 *       3: mc 9:11  channel 24:25
1243 *       4: mc 12:14 channel 26:27
1244 *       5: mc 15:17 channel 28:29
1245 * reserved: 30:31
1246 *
1247 * Though we have 3 bits to identify the MC, we should only see
1248 * the values 0 or 1.
1249 */
1250
1251static u32 knl_get_mc_route(int entry, u32 reg)
1252{
1253	int mc, chan;
1254
1255	WARN_ON(entry >= KNL_MAX_CHANNELS);
1256
1257	mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1258	chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1259
1260	return knl_channel_remap(mc, chan);
1261}
1262
1263/*
1264 * Render the EDC_ROUTE register in human-readable form.
1265 * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1266 */
1267static void knl_show_edc_route(u32 reg, char *s)
1268{
1269	int i;
1270
1271	for (i = 0; i < KNL_MAX_EDCS; i++) {
1272		s[i*2] = knl_get_edc_route(i, reg) + '0';
1273		s[i*2+1] = '-';
1274	}
1275
1276	s[KNL_MAX_EDCS*2 - 1] = '\0';
1277}
1278
1279/*
1280 * Render the MC_ROUTE register in human-readable form.
1281 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1282 */
1283static void knl_show_mc_route(u32 reg, char *s)
1284{
1285	int i;
1286
1287	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1288		s[i*2] = knl_get_mc_route(i, reg) + '0';
1289		s[i*2+1] = '-';
1290	}
1291
1292	s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1293}
1294
1295#define KNL_EDC_ROUTE 0xb8
1296#define KNL_MC_ROUTE 0xb4
1297
1298/* Is this dram rule backed by regular DRAM in flat mode? */
1299#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1300
1301/* Is this dram rule cached? */
1302#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1303
1304/* Is this rule backed by edc ? */
1305#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1306
1307/* Is this rule backed by DRAM, cacheable in EDRAM? */
1308#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1309
1310/* Is this rule mod3? */
1311#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1312
1313/*
1314 * Figure out how big our RAM modules are.
1315 *
1316 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1317 * have to figure this out from the SAD rules, interleave lists, route tables,
1318 * and TAD rules.
1319 *
1320 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1321 * inspect the TAD rules to figure out how large the SAD regions really are.
1322 *
1323 * When we know the real size of a SAD region and how many ways it's
1324 * interleaved, we know the individual contribution of each channel to
1325 * TAD is size/ways.
1326 *
1327 * Finally, we have to check whether each channel participates in each SAD
1328 * region.
1329 *
1330 * Fortunately, KNL only supports one DIMM per channel, so once we know how
1331 * much memory the channel uses, we know the DIMM is at least that large.
1332 * (The BIOS might possibly choose not to map all available memory, in which
1333 * case we will underreport the size of the DIMM.)
1334 *
1335 * In theory, we could try to determine the EDC sizes as well, but that would
1336 * only work in flat mode, not in cache mode.
1337 *
1338 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1339 *            elements)
1340 */
1341static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1342{
1343	u64 sad_base, sad_size, sad_limit = 0;
1344	u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1345	int sad_rule = 0;
1346	int tad_rule = 0;
1347	int intrlv_ways, tad_ways;
1348	u32 first_pkg, pkg;
1349	int i;
1350	u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1351	u32 dram_rule, interleave_reg;
1352	u32 mc_route_reg[KNL_MAX_CHAS];
1353	u32 edc_route_reg[KNL_MAX_CHAS];
1354	int edram_only;
1355	char edc_route_string[KNL_MAX_EDCS*2];
1356	char mc_route_string[KNL_MAX_CHANNELS*2];
1357	int cur_reg_start;
1358	int mc;
1359	int channel;
1360	int participants[KNL_MAX_CHANNELS];
1361
1362	for (i = 0; i < KNL_MAX_CHANNELS; i++)
1363		mc_sizes[i] = 0;
1364
1365	/* Read the EDC route table in each CHA. */
1366	cur_reg_start = 0;
1367	for (i = 0; i < KNL_MAX_CHAS; i++) {
1368		pci_read_config_dword(pvt->knl.pci_cha[i],
1369				KNL_EDC_ROUTE, &edc_route_reg[i]);
1370
1371		if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1372			knl_show_edc_route(edc_route_reg[i-1],
1373					edc_route_string);
1374			if (cur_reg_start == i-1)
1375				edac_dbg(0, "edc route table for CHA %d: %s\n",
1376					cur_reg_start, edc_route_string);
1377			else
1378				edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1379					cur_reg_start, i-1, edc_route_string);
1380			cur_reg_start = i;
1381		}
1382	}
1383	knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1384	if (cur_reg_start == i-1)
1385		edac_dbg(0, "edc route table for CHA %d: %s\n",
1386			cur_reg_start, edc_route_string);
1387	else
1388		edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1389			cur_reg_start, i-1, edc_route_string);
1390
1391	/* Read the MC route table in each CHA. */
1392	cur_reg_start = 0;
1393	for (i = 0; i < KNL_MAX_CHAS; i++) {
1394		pci_read_config_dword(pvt->knl.pci_cha[i],
1395			KNL_MC_ROUTE, &mc_route_reg[i]);
1396
1397		if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1398			knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1399			if (cur_reg_start == i-1)
1400				edac_dbg(0, "mc route table for CHA %d: %s\n",
1401					cur_reg_start, mc_route_string);
1402			else
1403				edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1404					cur_reg_start, i-1, mc_route_string);
1405			cur_reg_start = i;
1406		}
1407	}
1408	knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1409	if (cur_reg_start == i-1)
1410		edac_dbg(0, "mc route table for CHA %d: %s\n",
1411			cur_reg_start, mc_route_string);
1412	else
1413		edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1414			cur_reg_start, i-1, mc_route_string);
1415
1416	/* Process DRAM rules */
1417	for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1418		/* previous limit becomes the new base */
1419		sad_base = sad_limit;
1420
1421		pci_read_config_dword(pvt->pci_sad0,
1422			pvt->info.dram_rule[sad_rule], &dram_rule);
1423
1424		if (!DRAM_RULE_ENABLE(dram_rule))
1425			break;
1426
1427		edram_only = KNL_EDRAM_ONLY(dram_rule);
1428
1429		sad_limit = pvt->info.sad_limit(dram_rule)+1;
1430		sad_size = sad_limit - sad_base;
1431
1432		pci_read_config_dword(pvt->pci_sad0,
1433			pvt->info.interleave_list[sad_rule], &interleave_reg);
1434
1435		/*
1436		 * Find out how many ways this dram rule is interleaved.
1437		 * We stop when we see the first channel again.
1438		 */
1439		first_pkg = sad_pkg(pvt->info.interleave_pkg,
1440						interleave_reg, 0);
1441		for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1442			pkg = sad_pkg(pvt->info.interleave_pkg,
1443						interleave_reg, intrlv_ways);
1444
1445			if ((pkg & 0x8) == 0) {
1446				/*
1447				 * 0 bit means memory is non-local,
1448				 * which KNL doesn't support
1449				 */
1450				edac_dbg(0, "Unexpected interleave target %d\n",
1451					pkg);
1452				return -1;
1453			}
1454
1455			if (pkg == first_pkg)
1456				break;
1457		}
1458		if (KNL_MOD3(dram_rule))
1459			intrlv_ways *= 3;
1460
1461		edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1462			sad_rule,
1463			sad_base,
1464			sad_limit,
1465			intrlv_ways,
1466			edram_only ? ", EDRAM" : "");
1467
1468		/*
1469		 * Find out how big the SAD region really is by iterating
1470		 * over TAD tables (SAD regions may contain holes).
1471		 * Each memory controller might have a different TAD table, so
1472		 * we have to look at both.
1473		 *
1474		 * Livespace is the memory that's mapped in this TAD table,
1475		 * deadspace is the holes (this could be the MMIO hole, or it
1476		 * could be memory that's mapped by the other TAD table but
1477		 * not this one).
1478		 */
1479		for (mc = 0; mc < 2; mc++) {
1480			sad_actual_size[mc] = 0;
1481			tad_livespace = 0;
1482			for (tad_rule = 0;
1483					tad_rule < ARRAY_SIZE(
1484						knl_tad_dram_limit_lo);
1485					tad_rule++) {
1486				if (knl_get_tad(pvt,
1487						tad_rule,
1488						mc,
1489						&tad_deadspace,
1490						&tad_limit,
1491						&tad_ways))
1492					break;
1493
1494				tad_size = (tad_limit+1) -
1495					(tad_livespace + tad_deadspace);
1496				tad_livespace += tad_size;
1497				tad_base = (tad_limit+1) - tad_size;
1498
1499				if (tad_base < sad_base) {
1500					if (tad_limit > sad_base)
1501						edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1502				} else if (tad_base < sad_limit) {
1503					if (tad_limit+1 > sad_limit) {
1504						edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1505					} else {
1506						/* TAD region is completely inside SAD region */
1507						edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1508							tad_rule, tad_base,
1509							tad_limit, tad_size,
1510							mc);
1511						sad_actual_size[mc] += tad_size;
1512					}
1513				}
1514			}
1515		}
1516
1517		for (mc = 0; mc < 2; mc++) {
1518			edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1519				mc, sad_actual_size[mc], sad_actual_size[mc]);
1520		}
1521
1522		/* Ignore EDRAM rule */
1523		if (edram_only)
1524			continue;
1525
1526		/* Figure out which channels participate in interleave. */
1527		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1528			participants[channel] = 0;
1529
1530		/* For each channel, does at least one CHA have
1531		 * this channel mapped to the given target?
1532		 */
1533		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1534			int target;
1535			int cha;
1536
1537			for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1538				for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1539					if (knl_get_mc_route(target,
1540						mc_route_reg[cha]) == channel
1541						&& !participants[channel]) {
1542						participants[channel] = 1;
1543						break;
1544					}
1545				}
1546			}
1547		}
1548
1549		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1550			mc = knl_channel_mc(channel);
1551			if (participants[channel]) {
1552				edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1553					channel,
1554					sad_actual_size[mc]/intrlv_ways,
1555					sad_rule);
1556				mc_sizes[channel] +=
1557					sad_actual_size[mc]/intrlv_ways;
1558			}
1559		}
1560	}
1561
1562	return 0;
1563}
1564
1565static void get_source_id(struct mem_ctl_info *mci)
1566{
1567	struct sbridge_pvt *pvt = mci->pvt_info;
1568	u32 reg;
1569
1570	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1571	    pvt->info.type == KNIGHTS_LANDING)
1572		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1573	else
1574		pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1575
1576	if (pvt->info.type == KNIGHTS_LANDING)
1577		pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1578	else
1579		pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1580}
1581
1582static int __populate_dimms(struct mem_ctl_info *mci,
1583			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1584			    enum edac_type mode)
1585{
1586	struct sbridge_pvt *pvt = mci->pvt_info;
1587	int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1588							 : NUM_CHANNELS;
1589	unsigned int i, j, banks, ranks, rows, cols, npages;
1590	struct dimm_info *dimm;
1591	enum mem_type mtype;
1592	u64 size;
1593
1594	mtype = pvt->info.get_memory_type(pvt);
1595	if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1596		edac_dbg(0, "Memory is registered\n");
1597	else if (mtype == MEM_UNKNOWN)
1598		edac_dbg(0, "Cannot determine memory type\n");
1599	else
1600		edac_dbg(0, "Memory is unregistered\n");
1601
1602	if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1603		banks = 16;
1604	else
1605		banks = 8;
1606
1607	for (i = 0; i < channels; i++) {
1608		u32 mtr;
1609
1610		int max_dimms_per_channel;
1611
1612		if (pvt->info.type == KNIGHTS_LANDING) {
1613			max_dimms_per_channel = 1;
1614			if (!pvt->knl.pci_channel[i])
1615				continue;
1616		} else {
1617			max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1618			if (!pvt->pci_tad[i])
1619				continue;
 
1620		}
1621
1622		for (j = 0; j < max_dimms_per_channel; j++) {
1623			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1624			if (pvt->info.type == KNIGHTS_LANDING) {
1625				pci_read_config_dword(pvt->knl.pci_channel[i],
1626					knl_mtr_reg, &mtr);
1627			} else {
1628				pci_read_config_dword(pvt->pci_tad[i],
1629					mtr_regs[j], &mtr);
1630			}
1631			edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
 
1632			if (IS_DIMM_PRESENT(mtr)) {
1633				if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1634					sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1635						       pvt->sbridge_dev->source_id,
1636						       pvt->sbridge_dev->dom, i);
1637					return -ENODEV;
1638				}
1639				pvt->channel[i].dimms++;
1640
1641				ranks = numrank(pvt->info.type, mtr);
1642
1643				if (pvt->info.type == KNIGHTS_LANDING) {
1644					/* For DDR4, this is fixed. */
1645					cols = 1 << 10;
1646					rows = knl_mc_sizes[i] /
1647						((u64) cols * ranks * banks * 8);
1648				} else {
1649					rows = numrow(mtr);
1650					cols = numcol(mtr);
1651				}
1652
1653				size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1654				npages = MiB_TO_PAGES(size);
1655
1656				edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1657					 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1658					 size, npages,
1659					 banks, ranks, rows, cols);
1660
1661				dimm->nr_pages = npages;
1662				dimm->grain = 32;
1663				dimm->dtype = pvt->info.get_width(pvt, mtr);
1664				dimm->mtype = mtype;
1665				dimm->edac_mode = mode;
 
 
 
 
 
1666				snprintf(dimm->label, sizeof(dimm->label),
1667						 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1668						 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1669			}
1670		}
1671	}
1672
1673	return 0;
1674}
1675
1676static int get_dimm_config(struct mem_ctl_info *mci)
1677{
1678	struct sbridge_pvt *pvt = mci->pvt_info;
1679	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1680	enum edac_type mode;
1681	u32 reg;
1682
1683	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1684	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1685		 pvt->sbridge_dev->mc,
1686		 pvt->sbridge_dev->node_id,
1687		 pvt->sbridge_dev->source_id);
1688
1689	/* KNL doesn't support mirroring or lockstep,
1690	 * and is always closed page
1691	 */
1692	if (pvt->info.type == KNIGHTS_LANDING) {
1693		mode = EDAC_S4ECD4ED;
1694		pvt->mirror_mode = NON_MIRRORING;
1695		pvt->is_cur_addr_mirrored = false;
1696
1697		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1698			return -1;
1699		if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1700			edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1701			return -ENODEV;
1702		}
1703	} else {
1704		if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1705			if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
1706				edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1707				return -ENODEV;
1708			}
1709			pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1710			if (GET_BITFIELD(reg, 28, 28)) {
1711				pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1712				edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1713				goto next;
1714			}
1715		}
1716		if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
1717			edac_dbg(0, "Failed to read RASENABLES register\n");
1718			return -ENODEV;
1719		}
1720		if (IS_MIRROR_ENABLED(reg)) {
1721			pvt->mirror_mode = FULL_MIRRORING;
1722			edac_dbg(0, "Full memory mirroring is enabled\n");
1723		} else {
1724			pvt->mirror_mode = NON_MIRRORING;
1725			edac_dbg(0, "Memory mirroring is disabled\n");
1726		}
1727
1728next:
1729		if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1730			edac_dbg(0, "Failed to read MCMTR register\n");
1731			return -ENODEV;
1732		}
1733		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1734			edac_dbg(0, "Lockstep is enabled\n");
1735			mode = EDAC_S8ECD8ED;
1736			pvt->is_lockstep = true;
1737		} else {
1738			edac_dbg(0, "Lockstep is disabled\n");
1739			mode = EDAC_S4ECD4ED;
1740			pvt->is_lockstep = false;
1741		}
1742		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1743			edac_dbg(0, "address map is on closed page mode\n");
1744			pvt->is_close_pg = true;
1745		} else {
1746			edac_dbg(0, "address map is on open page mode\n");
1747			pvt->is_close_pg = false;
1748		}
1749	}
1750
1751	return __populate_dimms(mci, knl_mc_sizes, mode);
1752}
1753
1754static void get_memory_layout(const struct mem_ctl_info *mci)
1755{
1756	struct sbridge_pvt *pvt = mci->pvt_info;
1757	int i, j, k, n_sads, n_tads, sad_interl;
1758	u32 reg;
1759	u64 limit, prv = 0;
1760	u64 tmp_mb;
1761	u32 gb, mb;
1762	u32 rir_way;
1763
1764	/*
1765	 * Step 1) Get TOLM/TOHM ranges
1766	 */
1767
1768	pvt->tolm = pvt->info.get_tolm(pvt);
1769	tmp_mb = (1 + pvt->tolm) >> 20;
1770
1771	gb = div_u64_rem(tmp_mb, 1024, &mb);
1772	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1773		gb, (mb*1000)/1024, (u64)pvt->tolm);
1774
1775	/* Address range is already 45:25 */
1776	pvt->tohm = pvt->info.get_tohm(pvt);
1777	tmp_mb = (1 + pvt->tohm) >> 20;
1778
1779	gb = div_u64_rem(tmp_mb, 1024, &mb);
1780	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1781		gb, (mb*1000)/1024, (u64)pvt->tohm);
1782
1783	/*
1784	 * Step 2) Get SAD range and SAD Interleave list
1785	 * TAD registers contain the interleave wayness. However, it
1786	 * seems simpler to just discover it indirectly, with the
1787	 * algorithm bellow.
1788	 */
1789	prv = 0;
1790	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1791		/* SAD_LIMIT Address range is 45:26 */
1792		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1793				      &reg);
1794		limit = pvt->info.sad_limit(reg);
1795
1796		if (!DRAM_RULE_ENABLE(reg))
1797			continue;
1798
1799		if (limit <= prv)
1800			break;
1801
1802		tmp_mb = (limit + 1) >> 20;
1803		gb = div_u64_rem(tmp_mb, 1024, &mb);
1804		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1805			 n_sads,
1806			 show_dram_attr(pvt->info.dram_attr(reg)),
1807			 gb, (mb*1000)/1024,
1808			 ((u64)tmp_mb) << 20L,
1809			 get_intlv_mode_str(reg, pvt->info.type),
1810			 reg);
1811		prv = limit;
1812
1813		pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1814				      &reg);
1815		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1816		for (j = 0; j < 8; j++) {
1817			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1818			if (j > 0 && sad_interl == pkg)
1819				break;
1820
1821			edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1822				 n_sads, j, pkg);
1823		}
1824	}
1825
1826	if (pvt->info.type == KNIGHTS_LANDING)
1827		return;
1828
1829	/*
1830	 * Step 3) Get TAD range
1831	 */
1832	prv = 0;
1833	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1834		pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
1835		limit = TAD_LIMIT(reg);
1836		if (limit <= prv)
1837			break;
1838		tmp_mb = (limit + 1) >> 20;
1839
1840		gb = div_u64_rem(tmp_mb, 1024, &mb);
1841		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1842			 n_tads, gb, (mb*1000)/1024,
1843			 ((u64)tmp_mb) << 20L,
1844			 (u32)(1 << TAD_SOCK(reg)),
1845			 (u32)TAD_CH(reg) + 1,
1846			 (u32)TAD_TGT0(reg),
1847			 (u32)TAD_TGT1(reg),
1848			 (u32)TAD_TGT2(reg),
1849			 (u32)TAD_TGT3(reg),
1850			 reg);
1851		prv = limit;
1852	}
1853
1854	/*
1855	 * Step 4) Get TAD offsets, per each channel
1856	 */
1857	for (i = 0; i < NUM_CHANNELS; i++) {
1858		if (!pvt->channel[i].dimms)
1859			continue;
1860		for (j = 0; j < n_tads; j++) {
1861			pci_read_config_dword(pvt->pci_tad[i],
1862					      tad_ch_nilv_offset[j],
1863					      &reg);
1864			tmp_mb = TAD_OFFSET(reg) >> 20;
1865			gb = div_u64_rem(tmp_mb, 1024, &mb);
1866			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1867				 i, j,
1868				 gb, (mb*1000)/1024,
1869				 ((u64)tmp_mb) << 20L,
1870				 reg);
1871		}
1872	}
1873
1874	/*
1875	 * Step 6) Get RIR Wayness/Limit, per each channel
1876	 */
1877	for (i = 0; i < NUM_CHANNELS; i++) {
1878		if (!pvt->channel[i].dimms)
1879			continue;
1880		for (j = 0; j < MAX_RIR_RANGES; j++) {
1881			pci_read_config_dword(pvt->pci_tad[i],
1882					      rir_way_limit[j],
1883					      &reg);
1884
1885			if (!IS_RIR_VALID(reg))
1886				continue;
1887
1888			tmp_mb = pvt->info.rir_limit(reg) >> 20;
1889			rir_way = 1 << RIR_WAY(reg);
1890			gb = div_u64_rem(tmp_mb, 1024, &mb);
1891			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1892				 i, j,
1893				 gb, (mb*1000)/1024,
1894				 ((u64)tmp_mb) << 20L,
1895				 rir_way,
1896				 reg);
1897
1898			for (k = 0; k < rir_way; k++) {
1899				pci_read_config_dword(pvt->pci_tad[i],
1900						      rir_offset[j][k],
1901						      &reg);
1902				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1903
1904				gb = div_u64_rem(tmp_mb, 1024, &mb);
1905				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1906					 i, j, k,
1907					 gb, (mb*1000)/1024,
1908					 ((u64)tmp_mb) << 20L,
1909					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1910					 reg);
1911			}
1912		}
1913	}
1914}
1915
1916static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1917{
1918	struct sbridge_dev *sbridge_dev;
1919
1920	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1921		if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1922			return sbridge_dev->mci;
1923	}
1924	return NULL;
1925}
1926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1927static int get_memory_error_data(struct mem_ctl_info *mci,
1928				 u64 addr,
1929				 u8 *socket, u8 *ha,
1930				 long *channel_mask,
1931				 u8 *rank,
1932				 char **area_type, char *msg)
1933{
1934	struct mem_ctl_info	*new_mci;
1935	struct sbridge_pvt *pvt = mci->pvt_info;
1936	struct pci_dev		*pci_ha;
1937	int			n_rir, n_sads, n_tads, sad_way, sck_xch;
1938	int			sad_interl, idx, base_ch;
1939	int			interleave_mode, shiftup = 0;
1940	unsigned int		sad_interleave[MAX_INTERLEAVE];
1941	u32			reg, dram_rule;
1942	u8			ch_way, sck_way, pkg, sad_ha = 0;
1943	u32			tad_offset;
1944	u32			rir_way;
1945	u32			mb, gb;
1946	u64			ch_addr, offset, limit = 0, prv = 0;
1947
 
1948
1949	/*
1950	 * Step 0) Check if the address is at special memory ranges
1951	 * The check bellow is probably enough to fill all cases where
1952	 * the error is not inside a memory, except for the legacy
1953	 * range (e. g. VGA addresses). It is unlikely, however, that the
1954	 * memory controller would generate an error on that range.
1955	 */
1956	if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1957		sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1958		return -EINVAL;
1959	}
1960	if (addr >= (u64)pvt->tohm) {
1961		sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1962		return -EINVAL;
1963	}
1964
1965	/*
1966	 * Step 1) Get socket
1967	 */
1968	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1969		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1970				      &reg);
1971
1972		if (!DRAM_RULE_ENABLE(reg))
1973			continue;
1974
1975		limit = pvt->info.sad_limit(reg);
1976		if (limit <= prv) {
1977			sprintf(msg, "Can't discover the memory socket");
1978			return -EINVAL;
1979		}
1980		if  (addr <= limit)
1981			break;
1982		prv = limit;
1983	}
1984	if (n_sads == pvt->info.max_sad) {
1985		sprintf(msg, "Can't discover the memory socket");
1986		return -EINVAL;
1987	}
1988	dram_rule = reg;
1989	*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1990	interleave_mode = pvt->info.interleave_mode(dram_rule);
1991
1992	pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1993			      &reg);
1994
1995	if (pvt->info.type == SANDY_BRIDGE) {
1996		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1997		for (sad_way = 0; sad_way < 8; sad_way++) {
1998			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1999			if (sad_way > 0 && sad_interl == pkg)
2000				break;
2001			sad_interleave[sad_way] = pkg;
2002			edac_dbg(0, "SAD interleave #%d: %d\n",
2003				 sad_way, sad_interleave[sad_way]);
2004		}
2005		edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2006			 pvt->sbridge_dev->mc,
2007			 n_sads,
2008			 addr,
2009			 limit,
2010			 sad_way + 7,
2011			 !interleave_mode ? "" : "XOR[18:16]");
2012		if (interleave_mode)
2013			idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2014		else
2015			idx = (addr >> 6) & 7;
2016		switch (sad_way) {
2017		case 1:
2018			idx = 0;
2019			break;
2020		case 2:
2021			idx = idx & 1;
2022			break;
2023		case 4:
2024			idx = idx & 3;
2025			break;
2026		case 8:
2027			break;
2028		default:
2029			sprintf(msg, "Can't discover socket interleave");
2030			return -EINVAL;
2031		}
2032		*socket = sad_interleave[idx];
2033		edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2034			 idx, sad_way, *socket);
2035	} else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2036		int bits, a7mode = A7MODE(dram_rule);
2037
2038		if (a7mode) {
2039			/* A7 mode swaps P9 with P6 */
2040			bits = GET_BITFIELD(addr, 7, 8) << 1;
2041			bits |= GET_BITFIELD(addr, 9, 9);
2042		} else
2043			bits = GET_BITFIELD(addr, 6, 8);
2044
2045		if (interleave_mode == 0) {
2046			/* interleave mode will XOR {8,7,6} with {18,17,16} */
2047			idx = GET_BITFIELD(addr, 16, 18);
2048			idx ^= bits;
2049		} else
2050			idx = bits;
2051
2052		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2053		*socket = sad_pkg_socket(pkg);
2054		sad_ha = sad_pkg_ha(pkg);
2055
2056		if (a7mode) {
2057			/* MCChanShiftUpEnable */
2058			pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
2059			shiftup = GET_BITFIELD(reg, 22, 22);
2060		}
2061
2062		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2063			 idx, *socket, sad_ha, shiftup);
2064	} else {
2065		/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2066		idx = (addr >> 6) & 7;
2067		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2068		*socket = sad_pkg_socket(pkg);
2069		sad_ha = sad_pkg_ha(pkg);
2070		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2071			 idx, *socket, sad_ha);
2072	}
2073
2074	*ha = sad_ha;
2075
2076	/*
2077	 * Move to the proper node structure, in order to access the
2078	 * right PCI registers
2079	 */
2080	new_mci = get_mci_for_node_id(*socket, sad_ha);
2081	if (!new_mci) {
2082		sprintf(msg, "Struct for socket #%u wasn't initialized",
2083			*socket);
2084		return -EINVAL;
2085	}
2086	mci = new_mci;
2087	pvt = mci->pvt_info;
2088
2089	/*
2090	 * Step 2) Get memory channel
2091	 */
2092	prv = 0;
2093	pci_ha = pvt->pci_ha;
2094	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2095		pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2096		limit = TAD_LIMIT(reg);
2097		if (limit <= prv) {
2098			sprintf(msg, "Can't discover the memory channel");
2099			return -EINVAL;
2100		}
2101		if  (addr <= limit)
2102			break;
2103		prv = limit;
2104	}
2105	if (n_tads == MAX_TAD) {
2106		sprintf(msg, "Can't discover the memory channel");
2107		return -EINVAL;
2108	}
2109
2110	ch_way = TAD_CH(reg) + 1;
2111	sck_way = TAD_SOCK(reg);
2112
2113	if (ch_way == 3)
2114		idx = addr >> 6;
2115	else {
2116		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2117		if (pvt->is_chan_hash)
2118			idx = haswell_chan_hash(idx, addr);
2119	}
2120	idx = idx % ch_way;
2121
2122	/*
2123	 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2124	 */
2125	switch (idx) {
2126	case 0:
2127		base_ch = TAD_TGT0(reg);
2128		break;
2129	case 1:
2130		base_ch = TAD_TGT1(reg);
2131		break;
2132	case 2:
2133		base_ch = TAD_TGT2(reg);
2134		break;
2135	case 3:
2136		base_ch = TAD_TGT3(reg);
2137		break;
2138	default:
2139		sprintf(msg, "Can't discover the TAD target");
2140		return -EINVAL;
2141	}
2142	*channel_mask = 1 << base_ch;
2143
2144	pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2145
2146	if (pvt->mirror_mode == FULL_MIRRORING ||
2147	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2148		*channel_mask |= 1 << ((base_ch + 2) % 4);
2149		switch(ch_way) {
2150		case 2:
2151		case 4:
2152			sck_xch = (1 << sck_way) * (ch_way >> 1);
2153			break;
2154		default:
2155			sprintf(msg, "Invalid mirror set. Can't decode addr");
2156			return -EINVAL;
2157		}
2158
2159		pvt->is_cur_addr_mirrored = true;
2160	} else {
2161		sck_xch = (1 << sck_way) * ch_way;
2162		pvt->is_cur_addr_mirrored = false;
2163	}
2164
2165	if (pvt->is_lockstep)
2166		*channel_mask |= 1 << ((base_ch + 1) % 4);
2167
2168	offset = TAD_OFFSET(tad_offset);
2169
2170	edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2171		 n_tads,
2172		 addr,
2173		 limit,
2174		 sck_way,
2175		 ch_way,
2176		 offset,
2177		 idx,
2178		 base_ch,
2179		 *channel_mask);
2180
2181	/* Calculate channel address */
2182	/* Remove the TAD offset */
2183
2184	if (offset > addr) {
2185		sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2186			offset, addr);
2187		return -EINVAL;
2188	}
2189
2190	ch_addr = addr - offset;
2191	ch_addr >>= (6 + shiftup);
2192	ch_addr /= sck_xch;
2193	ch_addr <<= (6 + shiftup);
2194	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2195
2196	/*
2197	 * Step 3) Decode rank
2198	 */
2199	for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2200		pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
2201
2202		if (!IS_RIR_VALID(reg))
2203			continue;
2204
2205		limit = pvt->info.rir_limit(reg);
2206		gb = div_u64_rem(limit >> 20, 1024, &mb);
2207		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2208			 n_rir,
2209			 gb, (mb*1000)/1024,
2210			 limit,
2211			 1 << RIR_WAY(reg));
2212		if  (ch_addr <= limit)
2213			break;
2214	}
2215	if (n_rir == MAX_RIR_RANGES) {
2216		sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2217			ch_addr);
2218		return -EINVAL;
2219	}
2220	rir_way = RIR_WAY(reg);
2221
2222	if (pvt->is_close_pg)
2223		idx = (ch_addr >> 6);
2224	else
2225		idx = (ch_addr >> 13);	/* FIXME: Datasheet says to shift by 15 */
2226	idx %= 1 << rir_way;
2227
2228	pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
2229	*rank = RIR_RNK_TGT(pvt->info.type, reg);
2230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2231	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2232		 n_rir,
2233		 ch_addr,
2234		 limit,
2235		 rir_way,
2236		 idx);
2237
2238	return 0;
2239}
2240
2241static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
2242					  const struct mce *m, u8 *socket,
2243					  u8 *ha, long *channel_mask,
2244					  char *msg)
2245{
2246	u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
2247	struct mem_ctl_info *new_mci;
2248	struct sbridge_pvt *pvt;
2249	struct pci_dev *pci_ha;
2250	bool tad0;
2251
2252	if (channel >= NUM_CHANNELS) {
2253		sprintf(msg, "Invalid channel 0x%x", channel);
2254		return -EINVAL;
2255	}
2256
2257	pvt = mci->pvt_info;
2258	if (!pvt->info.get_ha) {
2259		sprintf(msg, "No get_ha()");
2260		return -EINVAL;
2261	}
2262	*ha = pvt->info.get_ha(m->bank);
2263	if (*ha != 0 && *ha != 1) {
2264		sprintf(msg, "Impossible bank %d", m->bank);
2265		return -EINVAL;
2266	}
2267
2268	*socket = m->socketid;
2269	new_mci = get_mci_for_node_id(*socket, *ha);
2270	if (!new_mci) {
2271		strcpy(msg, "mci socket got corrupted!");
2272		return -EINVAL;
2273	}
2274
2275	pvt = new_mci->pvt_info;
2276	pci_ha = pvt->pci_ha;
2277	pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
2278	tad0 = m->addr <= TAD_LIMIT(reg);
2279
2280	*channel_mask = 1 << channel;
2281	if (pvt->mirror_mode == FULL_MIRRORING ||
2282	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
2283		*channel_mask |= 1 << ((channel + 2) % 4);
2284		pvt->is_cur_addr_mirrored = true;
2285	} else {
2286		pvt->is_cur_addr_mirrored = false;
2287	}
2288
2289	if (pvt->is_lockstep)
2290		*channel_mask |= 1 << ((channel + 1) % 4);
2291
2292	return 0;
2293}
2294
2295/****************************************************************************
2296	Device initialization routines: put/get, init/exit
2297 ****************************************************************************/
2298
2299/*
2300 *	sbridge_put_all_devices	'put' all the devices that we have
2301 *				reserved via 'get'
2302 */
2303static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2304{
2305	int i;
2306
2307	edac_dbg(0, "\n");
2308	for (i = 0; i < sbridge_dev->n_devs; i++) {
2309		struct pci_dev *pdev = sbridge_dev->pdev[i];
2310		if (!pdev)
2311			continue;
2312		edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2313			 pdev->bus->number,
2314			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2315		pci_dev_put(pdev);
2316	}
2317}
2318
2319static void sbridge_put_all_devices(void)
2320{
2321	struct sbridge_dev *sbridge_dev, *tmp;
2322
2323	list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2324		sbridge_put_devices(sbridge_dev);
2325		free_sbridge_dev(sbridge_dev);
2326	}
2327}
2328
2329static int sbridge_get_onedevice(struct pci_dev **prev,
2330				 u8 *num_mc,
2331				 const struct pci_id_table *table,
2332				 const unsigned devno,
2333				 const int multi_bus)
2334{
2335	struct sbridge_dev *sbridge_dev = NULL;
2336	const struct pci_id_descr *dev_descr = &table->descr[devno];
2337	struct pci_dev *pdev = NULL;
2338	int seg = 0;
2339	u8 bus = 0;
2340	int i = 0;
2341
2342	sbridge_printk(KERN_DEBUG,
2343		"Seeking for: PCI ID %04x:%04x\n",
2344		PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2345
2346	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2347			      dev_descr->dev_id, *prev);
2348
2349	if (!pdev) {
2350		if (*prev) {
2351			*prev = pdev;
2352			return 0;
2353		}
2354
2355		if (dev_descr->optional)
2356			return 0;
2357
2358		/* if the HA wasn't found */
2359		if (devno == 0)
2360			return -ENODEV;
2361
2362		sbridge_printk(KERN_INFO,
2363			"Device not found: %04x:%04x\n",
2364			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2365
2366		/* End of list, leave */
2367		return -ENODEV;
2368	}
2369	seg = pci_domain_nr(pdev->bus);
2370	bus = pdev->bus->number;
2371
2372next_imc:
2373	sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
2374				      multi_bus, sbridge_dev);
2375	if (!sbridge_dev) {
2376		/* If the HA1 wasn't found, don't create EDAC second memory controller */
2377		if (dev_descr->dom == IMC1 && devno != 1) {
2378			edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2379				 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2380			pci_dev_put(pdev);
2381			return 0;
2382		}
2383
2384		if (dev_descr->dom == SOCK)
2385			goto out_imc;
2386
2387		sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
2388		if (!sbridge_dev) {
2389			pci_dev_put(pdev);
2390			return -ENOMEM;
2391		}
2392		(*num_mc)++;
2393	}
2394
2395	if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2396		sbridge_printk(KERN_ERR,
2397			"Duplicated device for %04x:%04x\n",
2398			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2399		pci_dev_put(pdev);
2400		return -ENODEV;
2401	}
2402
2403	sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2404
2405	/* pdev belongs to more than one IMC, do extra gets */
2406	if (++i > 1)
2407		pci_dev_get(pdev);
2408
2409	if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2410		goto next_imc;
2411
2412out_imc:
2413	/* Be sure that the device is enabled */
2414	if (unlikely(pci_enable_device(pdev) < 0)) {
2415		sbridge_printk(KERN_ERR,
2416			"Couldn't enable %04x:%04x\n",
2417			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2418		return -ENODEV;
2419	}
2420
2421	edac_dbg(0, "Detected %04x:%04x\n",
2422		 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2423
2424	/*
2425	 * As stated on drivers/pci/search.c, the reference count for
2426	 * @from is always decremented if it is not %NULL. So, as we need
2427	 * to get all devices up to null, we need to do a get for the device
2428	 */
2429	pci_dev_get(pdev);
2430
2431	*prev = pdev;
2432
2433	return 0;
2434}
2435
2436/*
2437 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2438 *			     devices we want to reference for this driver.
2439 * @num_mc: pointer to the memory controllers count, to be incremented in case
2440 *	    of success.
2441 * @table: model specific table
2442 *
2443 * returns 0 in case of success or error code
2444 */
2445static int sbridge_get_all_devices(u8 *num_mc,
2446					const struct pci_id_table *table)
2447{
2448	int i, rc;
2449	struct pci_dev *pdev = NULL;
2450	int allow_dups = 0;
2451	int multi_bus = 0;
2452
2453	if (table->type == KNIGHTS_LANDING)
2454		allow_dups = multi_bus = 1;
2455	while (table && table->descr) {
2456		for (i = 0; i < table->n_devs_per_sock; i++) {
2457			if (!allow_dups || i == 0 ||
2458					table->descr[i].dev_id !=
2459						table->descr[i-1].dev_id) {
2460				pdev = NULL;
2461			}
2462			do {
2463				rc = sbridge_get_onedevice(&pdev, num_mc,
2464							   table, i, multi_bus);
2465				if (rc < 0) {
2466					if (i == 0) {
2467						i = table->n_devs_per_sock;
2468						break;
2469					}
2470					sbridge_put_all_devices();
2471					return -ENODEV;
2472				}
2473			} while (pdev && !allow_dups);
2474		}
2475		table++;
2476	}
2477
2478	return 0;
2479}
2480
2481/*
2482 * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
2483 * the format: XXXa. So we can convert from a device to the corresponding
2484 * channel like this
2485 */
2486#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2487
2488static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2489				 struct sbridge_dev *sbridge_dev)
2490{
2491	struct sbridge_pvt *pvt = mci->pvt_info;
2492	struct pci_dev *pdev;
2493	u8 saw_chan_mask = 0;
2494	int i;
2495
2496	for (i = 0; i < sbridge_dev->n_devs; i++) {
2497		pdev = sbridge_dev->pdev[i];
2498		if (!pdev)
2499			continue;
2500
2501		switch (pdev->device) {
2502		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2503			pvt->pci_sad0 = pdev;
2504			break;
2505		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2506			pvt->pci_sad1 = pdev;
2507			break;
2508		case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2509			pvt->pci_br0 = pdev;
2510			break;
2511		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2512			pvt->pci_ha = pdev;
2513			break;
2514		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2515			pvt->pci_ta = pdev;
2516			break;
2517		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2518			pvt->pci_ras = pdev;
2519			break;
2520		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2521		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2522		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2523		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2524		{
2525			int id = TAD_DEV_TO_CHAN(pdev->device);
2526			pvt->pci_tad[id] = pdev;
2527			saw_chan_mask |= 1 << id;
2528		}
2529			break;
2530		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2531			pvt->pci_ddrio = pdev;
2532			break;
2533		default:
2534			goto error;
2535		}
2536
2537		edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2538			 pdev->vendor, pdev->device,
2539			 sbridge_dev->bus,
2540			 pdev);
2541	}
2542
2543	/* Check if everything were registered */
2544	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2545	    !pvt->pci_ras || !pvt->pci_ta)
2546		goto enodev;
2547
2548	if (saw_chan_mask != 0x0f)
2549		goto enodev;
2550	return 0;
2551
2552enodev:
2553	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2554	return -ENODEV;
2555
2556error:
2557	sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2558		       PCI_VENDOR_ID_INTEL, pdev->device);
2559	return -EINVAL;
2560}
2561
2562static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2563				 struct sbridge_dev *sbridge_dev)
2564{
2565	struct sbridge_pvt *pvt = mci->pvt_info;
2566	struct pci_dev *pdev;
2567	u8 saw_chan_mask = 0;
2568	int i;
2569
2570	for (i = 0; i < sbridge_dev->n_devs; i++) {
2571		pdev = sbridge_dev->pdev[i];
2572		if (!pdev)
2573			continue;
2574
2575		switch (pdev->device) {
2576		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2577		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2578			pvt->pci_ha = pdev;
2579			break;
2580		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2581		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2582			pvt->pci_ta = pdev;
2583			break;
2584		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2585		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2586			pvt->pci_ras = pdev;
2587			break;
2588		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2589		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2590		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2591		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2592		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2593		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2594		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2595		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2596		{
2597			int id = TAD_DEV_TO_CHAN(pdev->device);
2598			pvt->pci_tad[id] = pdev;
2599			saw_chan_mask |= 1 << id;
2600		}
2601			break;
2602		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2603			pvt->pci_ddrio = pdev;
2604			break;
2605		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2606			pvt->pci_ddrio = pdev;
2607			break;
2608		case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2609			pvt->pci_sad0 = pdev;
2610			break;
2611		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2612			pvt->pci_br0 = pdev;
2613			break;
2614		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2615			pvt->pci_br1 = pdev;
2616			break;
2617		default:
2618			goto error;
2619		}
2620
2621		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2622			 sbridge_dev->bus,
2623			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2624			 pdev);
2625	}
2626
2627	/* Check if everything were registered */
2628	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2629	    !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2630		goto enodev;
2631
2632	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2633	    saw_chan_mask != 0x03)   /* -EP */
2634		goto enodev;
2635	return 0;
2636
2637enodev:
2638	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2639	return -ENODEV;
2640
2641error:
2642	sbridge_printk(KERN_ERR,
2643		       "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2644			pdev->device);
2645	return -EINVAL;
2646}
2647
2648static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2649				 struct sbridge_dev *sbridge_dev)
2650{
2651	struct sbridge_pvt *pvt = mci->pvt_info;
2652	struct pci_dev *pdev;
2653	u8 saw_chan_mask = 0;
2654	int i;
2655
2656	/* there's only one device per system; not tied to any bus */
2657	if (pvt->info.pci_vtd == NULL)
2658		/* result will be checked later */
2659		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2660						   PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2661						   NULL);
2662
2663	for (i = 0; i < sbridge_dev->n_devs; i++) {
2664		pdev = sbridge_dev->pdev[i];
2665		if (!pdev)
2666			continue;
2667
2668		switch (pdev->device) {
2669		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2670			pvt->pci_sad0 = pdev;
2671			break;
2672		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2673			pvt->pci_sad1 = pdev;
2674			break;
2675		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2676		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2677			pvt->pci_ha = pdev;
2678			break;
2679		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2680		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2681			pvt->pci_ta = pdev;
2682			break;
2683		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2684		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2685			pvt->pci_ras = pdev;
2686			break;
2687		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2688		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2689		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2690		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2691		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2692		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2693		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2694		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2695		{
2696			int id = TAD_DEV_TO_CHAN(pdev->device);
2697			pvt->pci_tad[id] = pdev;
2698			saw_chan_mask |= 1 << id;
2699		}
2700			break;
2701		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2702		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2703		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2704		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2705			if (!pvt->pci_ddrio)
2706				pvt->pci_ddrio = pdev;
2707			break;
2708		default:
2709			break;
2710		}
2711
2712		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2713			 sbridge_dev->bus,
2714			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2715			 pdev);
2716	}
2717
2718	/* Check if everything were registered */
2719	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2720	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2721		goto enodev;
2722
2723	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2724	    saw_chan_mask != 0x03)   /* -EP */
2725		goto enodev;
2726	return 0;
2727
2728enodev:
2729	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2730	return -ENODEV;
2731}
2732
2733static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2734				 struct sbridge_dev *sbridge_dev)
2735{
2736	struct sbridge_pvt *pvt = mci->pvt_info;
2737	struct pci_dev *pdev;
2738	u8 saw_chan_mask = 0;
2739	int i;
2740
2741	/* there's only one device per system; not tied to any bus */
2742	if (pvt->info.pci_vtd == NULL)
2743		/* result will be checked later */
2744		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2745						   PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2746						   NULL);
2747
2748	for (i = 0; i < sbridge_dev->n_devs; i++) {
2749		pdev = sbridge_dev->pdev[i];
2750		if (!pdev)
2751			continue;
2752
2753		switch (pdev->device) {
2754		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2755			pvt->pci_sad0 = pdev;
2756			break;
2757		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2758			pvt->pci_sad1 = pdev;
2759			break;
2760		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2761		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2762			pvt->pci_ha = pdev;
2763			break;
2764		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2765		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2766			pvt->pci_ta = pdev;
2767			break;
2768		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2769		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2770			pvt->pci_ras = pdev;
2771			break;
2772		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2773		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2774		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2775		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2776		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2777		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2778		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2779		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2780		{
2781			int id = TAD_DEV_TO_CHAN(pdev->device);
2782			pvt->pci_tad[id] = pdev;
2783			saw_chan_mask |= 1 << id;
2784		}
2785			break;
2786		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2787			pvt->pci_ddrio = pdev;
2788			break;
2789		default:
2790			break;
2791		}
2792
2793		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2794			 sbridge_dev->bus,
2795			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2796			 pdev);
2797	}
2798
2799	/* Check if everything were registered */
2800	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2801	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2802		goto enodev;
2803
2804	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2805	    saw_chan_mask != 0x03)   /* -EP */
2806		goto enodev;
2807	return 0;
2808
2809enodev:
2810	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2811	return -ENODEV;
2812}
2813
2814static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2815			struct sbridge_dev *sbridge_dev)
2816{
2817	struct sbridge_pvt *pvt = mci->pvt_info;
2818	struct pci_dev *pdev;
2819	int dev, func;
2820
2821	int i;
2822	int devidx;
2823
2824	for (i = 0; i < sbridge_dev->n_devs; i++) {
2825		pdev = sbridge_dev->pdev[i];
2826		if (!pdev)
2827			continue;
2828
2829		/* Extract PCI device and function. */
2830		dev = (pdev->devfn >> 3) & 0x1f;
2831		func = pdev->devfn & 0x7;
2832
2833		switch (pdev->device) {
2834		case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2835			if (dev == 8)
2836				pvt->knl.pci_mc0 = pdev;
2837			else if (dev == 9)
2838				pvt->knl.pci_mc1 = pdev;
2839			else {
2840				sbridge_printk(KERN_ERR,
2841					"Memory controller in unexpected place! (dev %d, fn %d)\n",
2842					dev, func);
2843				continue;
2844			}
2845			break;
2846
2847		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2848			pvt->pci_sad0 = pdev;
2849			break;
2850
2851		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2852			pvt->pci_sad1 = pdev;
2853			break;
2854
2855		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2856			/* There are one of these per tile, and range from
2857			 * 1.14.0 to 1.18.5.
2858			 */
2859			devidx = ((dev-14)*8)+func;
2860
2861			if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2862				sbridge_printk(KERN_ERR,
2863					"Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2864					dev, func);
2865				continue;
2866			}
2867
2868			WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2869
2870			pvt->knl.pci_cha[devidx] = pdev;
2871			break;
2872
2873		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
2874			devidx = -1;
2875
2876			/*
2877			 *  MC0 channels 0-2 are device 9 function 2-4,
2878			 *  MC1 channels 3-5 are device 8 function 2-4.
2879			 */
2880
2881			if (dev == 9)
2882				devidx = func-2;
2883			else if (dev == 8)
2884				devidx = 3 + (func-2);
2885
2886			if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2887				sbridge_printk(KERN_ERR,
2888					"DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2889					dev, func);
2890				continue;
2891			}
2892
2893			WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2894			pvt->knl.pci_channel[devidx] = pdev;
2895			break;
2896
2897		case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2898			pvt->knl.pci_mc_info = pdev;
2899			break;
2900
2901		case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2902			pvt->pci_ta = pdev;
2903			break;
2904
2905		default:
2906			sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2907				pdev->device);
2908			break;
2909		}
2910	}
2911
2912	if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
2913	    !pvt->pci_sad0     || !pvt->pci_sad1    ||
2914	    !pvt->pci_ta) {
2915		goto enodev;
2916	}
2917
2918	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2919		if (!pvt->knl.pci_channel[i]) {
2920			sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2921			goto enodev;
2922		}
2923	}
2924
2925	for (i = 0; i < KNL_MAX_CHAS; i++) {
2926		if (!pvt->knl.pci_cha[i]) {
2927			sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2928			goto enodev;
2929		}
2930	}
2931
2932	return 0;
2933
2934enodev:
2935	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2936	return -ENODEV;
2937}
2938
2939/****************************************************************************
2940			Error check routines
2941 ****************************************************************************/
2942
2943/*
2944 * While Sandy Bridge has error count registers, SMI BIOS read values from
2945 * and resets the counters. So, they are not reliable for the OS to read
2946 * from them. So, we have no option but to just trust on whatever MCE is
2947 * telling us about the errors.
2948 */
2949static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2950				    const struct mce *m)
2951{
2952	struct mem_ctl_info *new_mci;
2953	struct sbridge_pvt *pvt = mci->pvt_info;
2954	enum hw_event_mc_err_type tp_event;
2955	char *type, *optype, msg[256];
2956	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2957	bool overflow = GET_BITFIELD(m->status, 62, 62);
2958	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2959	bool recoverable;
2960	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2961	u32 mscod = GET_BITFIELD(m->status, 16, 31);
2962	u32 errcode = GET_BITFIELD(m->status, 0, 15);
2963	u32 channel = GET_BITFIELD(m->status, 0, 3);
2964	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2965	/*
2966	 * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
2967	 * A value 6 is for cache line aligned address, a value 12 is for page
2968	 * aligned address reported by patrol scrubber.
2969	 */
2970	u32 lsb = GET_BITFIELD(m->misc, 0, 5);
 
2971	long channel_mask, first_channel;
2972	u8  rank = 0xff, socket, ha;
2973	int rc, dimm;
2974	char *area_type = "DRAM";
2975
2976	if (pvt->info.type != SANDY_BRIDGE)
2977		recoverable = true;
2978	else
2979		recoverable = GET_BITFIELD(m->status, 56, 56);
2980
2981	if (uncorrected_error) {
2982		core_err_cnt = 1;
2983		if (ripv) {
2984			type = "FATAL";
 
2985			tp_event = HW_EVENT_ERR_FATAL;
2986		} else {
2987			type = "NON_FATAL";
2988			tp_event = HW_EVENT_ERR_UNCORRECTED;
2989		}
2990	} else {
2991		type = "CORRECTED";
2992		tp_event = HW_EVENT_ERR_CORRECTED;
2993	}
2994
2995	/*
2996	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
2997	 * memory errors should fit in this mask:
2998	 *	000f 0000 1mmm cccc (binary)
2999	 * where:
3000	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
3001	 *	    won't be shown
3002	 *	mmm = error type
3003	 *	cccc = channel
3004	 * If the mask doesn't match, report an error to the parsing logic
3005	 */
3006	switch (optypenum) {
3007	case 0:
3008		optype = "generic undef request error";
3009		break;
3010	case 1:
3011		optype = "memory read error";
3012		break;
3013	case 2:
3014		optype = "memory write error";
3015		break;
3016	case 3:
3017		optype = "addr/cmd error";
3018		break;
3019	case 4:
3020		optype = "memory scrubbing error";
3021		break;
3022	default:
3023		optype = "reserved";
3024		break;
3025	}
3026
3027	if (pvt->info.type == KNIGHTS_LANDING) {
3028		if (channel == 14) {
3029			edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
3030				overflow ? " OVERFLOW" : "",
3031				(uncorrected_error && recoverable)
3032				? " recoverable" : "",
3033				mscod, errcode,
3034				m->bank);
3035		} else {
3036			char A = *("A");
3037
3038			/*
3039			 * Reported channel is in range 0-2, so we can't map it
3040			 * back to mc. To figure out mc we check machine check
3041			 * bank register that reported this error.
3042			 * bank15 means mc0 and bank16 means mc1.
3043			 */
3044			channel = knl_channel_remap(m->bank == 16, channel);
3045			channel_mask = 1 << channel;
3046
3047			snprintf(msg, sizeof(msg),
3048				"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3049				overflow ? " OVERFLOW" : "",
3050				(uncorrected_error && recoverable)
3051				? " recoverable" : " ",
3052				mscod, errcode, channel, A + channel);
3053			edac_mc_handle_error(tp_event, mci, core_err_cnt,
3054				m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3055				channel, 0, -1,
3056				optype, msg);
3057		}
3058		return;
3059	} else if (lsb < 12) {
3060		rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3061					   &channel_mask, &rank,
3062					   &area_type, msg);
3063	} else {
3064		rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
3065						    &channel_mask, msg);
3066	}
3067
3068	if (rc < 0)
3069		goto err_parsing;
3070	new_mci = get_mci_for_node_id(socket, ha);
3071	if (!new_mci) {
3072		strcpy(msg, "Error: socket got corrupted!");
3073		goto err_parsing;
3074	}
3075	mci = new_mci;
3076	pvt = mci->pvt_info;
3077
3078	first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3079
3080	if (rank == 0xff)
3081		dimm = -1;
3082	else if (rank < 4)
3083		dimm = 0;
3084	else if (rank < 8)
3085		dimm = 1;
3086	else
3087		dimm = 2;
3088
3089	/*
3090	 * FIXME: On some memory configurations (mirror, lockstep), the
3091	 * Memory Controller can't point the error to a single DIMM. The
3092	 * EDAC core should be handling the channel mask, in order to point
3093	 * to the group of dimm's where the error may be happening.
3094	 */
3095	if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3096		channel = first_channel;
3097
3098	snprintf(msg, sizeof(msg),
3099		 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3100		 overflow ? " OVERFLOW" : "",
3101		 (uncorrected_error && recoverable) ? " recoverable" : "",
3102		 area_type,
3103		 mscod, errcode,
3104		 socket, ha,
3105		 channel_mask,
3106		 rank);
3107
3108	edac_dbg(0, "%s\n", msg);
3109
3110	/* FIXME: need support for channel mask */
3111
3112	if (channel == CHANNEL_UNSPECIFIED)
3113		channel = -1;
3114
3115	/* Call the helper to output message */
3116	edac_mc_handle_error(tp_event, mci, core_err_cnt,
3117			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3118			     channel, dimm, -1,
3119			     optype, msg);
3120	return;
3121err_parsing:
3122	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3123			     -1, -1, -1,
3124			     msg, "");
3125
3126}
3127
3128/*
3129 * Check that logging is enabled and that this is the right type
3130 * of error for us to handle.
3131 */
3132static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3133				   void *data)
3134{
3135	struct mce *mce = (struct mce *)data;
3136	struct mem_ctl_info *mci;
3137	char *type;
3138
3139	if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3140		return NOTIFY_DONE;
3141
3142	/*
3143	 * Just let mcelog handle it if the error is
3144	 * outside the memory controller. A memory error
3145	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3146	 * bit 12 has an special meaning.
3147	 */
3148	if ((mce->status & 0xefff) >> 7 != 1)
3149		return NOTIFY_DONE;
3150
3151	/* Check ADDRV bit in STATUS */
3152	if (!GET_BITFIELD(mce->status, 58, 58))
3153		return NOTIFY_DONE;
3154
3155	/* Check MISCV bit in STATUS */
3156	if (!GET_BITFIELD(mce->status, 59, 59))
3157		return NOTIFY_DONE;
3158
3159	/* Check address type in MISC (physical address only) */
3160	if (GET_BITFIELD(mce->misc, 6, 8) != 2)
3161		return NOTIFY_DONE;
3162
3163	mci = get_mci_for_node_id(mce->socketid, IMC0);
3164	if (!mci)
3165		return NOTIFY_DONE;
3166
3167	if (mce->mcgstatus & MCG_STATUS_MCIP)
3168		type = "Exception";
3169	else
3170		type = "Event";
3171
3172	sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3173
3174	sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3175			  "Bank %d: %016Lx\n", mce->extcpu, type,
3176			  mce->mcgstatus, mce->bank, mce->status);
3177	sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3178	sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3179	sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3180
3181	sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3182			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3183			  mce->time, mce->socketid, mce->apicid);
3184
3185	sbridge_mce_output_error(mci, mce);
3186
3187	/* Advice mcelog that the error were handled */
3188	return NOTIFY_STOP;
 
3189}
3190
3191static struct notifier_block sbridge_mce_dec = {
3192	.notifier_call	= sbridge_mce_check_error,
3193	.priority	= MCE_PRIO_EDAC,
3194};
3195
3196/****************************************************************************
3197			EDAC register/unregister logic
3198 ****************************************************************************/
3199
3200static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3201{
3202	struct mem_ctl_info *mci = sbridge_dev->mci;
3203	struct sbridge_pvt *pvt;
3204
3205	if (unlikely(!mci || !mci->pvt_info)) {
3206		edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3207
3208		sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3209		return;
3210	}
3211
3212	pvt = mci->pvt_info;
3213
3214	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3215		 mci, &sbridge_dev->pdev[0]->dev);
3216
3217	/* Remove MC sysfs nodes */
3218	edac_mc_del_mc(mci->pdev);
3219
3220	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3221	kfree(mci->ctl_name);
3222	edac_mc_free(mci);
3223	sbridge_dev->mci = NULL;
3224}
3225
3226static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3227{
3228	struct mem_ctl_info *mci;
3229	struct edac_mc_layer layers[2];
3230	struct sbridge_pvt *pvt;
3231	struct pci_dev *pdev = sbridge_dev->pdev[0];
3232	int rc;
3233
3234	/* allocate a new MC control structure */
3235	layers[0].type = EDAC_MC_LAYER_CHANNEL;
3236	layers[0].size = type == KNIGHTS_LANDING ?
3237		KNL_MAX_CHANNELS : NUM_CHANNELS;
3238	layers[0].is_virt_csrow = false;
3239	layers[1].type = EDAC_MC_LAYER_SLOT;
3240	layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3241	layers[1].is_virt_csrow = true;
3242	mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3243			    sizeof(*pvt));
3244
3245	if (unlikely(!mci))
3246		return -ENOMEM;
3247
3248	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3249		 mci, &pdev->dev);
3250
3251	pvt = mci->pvt_info;
3252	memset(pvt, 0, sizeof(*pvt));
3253
3254	/* Associate sbridge_dev and mci for future usage */
3255	pvt->sbridge_dev = sbridge_dev;
3256	sbridge_dev->mci = mci;
3257
3258	mci->mtype_cap = type == KNIGHTS_LANDING ?
3259		MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3260	mci->edac_ctl_cap = EDAC_FLAG_NONE;
3261	mci->edac_cap = EDAC_FLAG_NONE;
3262	mci->mod_name = EDAC_MOD_STR;
3263	mci->dev_name = pci_name(pdev);
3264	mci->ctl_page_to_phys = NULL;
3265
3266	pvt->info.type = type;
3267	switch (type) {
3268	case IVY_BRIDGE:
3269		pvt->info.rankcfgr = IB_RANK_CFG_A;
3270		pvt->info.get_tolm = ibridge_get_tolm;
3271		pvt->info.get_tohm = ibridge_get_tohm;
3272		pvt->info.dram_rule = ibridge_dram_rule;
3273		pvt->info.get_memory_type = get_memory_type;
3274		pvt->info.get_node_id = get_node_id;
3275		pvt->info.get_ha = ibridge_get_ha;
3276		pvt->info.rir_limit = rir_limit;
3277		pvt->info.sad_limit = sad_limit;
3278		pvt->info.interleave_mode = interleave_mode;
3279		pvt->info.dram_attr = dram_attr;
3280		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3281		pvt->info.interleave_list = ibridge_interleave_list;
3282		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3283		pvt->info.get_width = ibridge_get_width;
3284
3285		/* Store pci devices at mci for faster access */
3286		rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3287		if (unlikely(rc < 0))
3288			goto fail0;
3289		get_source_id(mci);
3290		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3291			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3292		break;
3293	case SANDY_BRIDGE:
3294		pvt->info.rankcfgr = SB_RANK_CFG_A;
3295		pvt->info.get_tolm = sbridge_get_tolm;
3296		pvt->info.get_tohm = sbridge_get_tohm;
3297		pvt->info.dram_rule = sbridge_dram_rule;
3298		pvt->info.get_memory_type = get_memory_type;
3299		pvt->info.get_node_id = get_node_id;
3300		pvt->info.get_ha = sbridge_get_ha;
3301		pvt->info.rir_limit = rir_limit;
3302		pvt->info.sad_limit = sad_limit;
3303		pvt->info.interleave_mode = interleave_mode;
3304		pvt->info.dram_attr = dram_attr;
3305		pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3306		pvt->info.interleave_list = sbridge_interleave_list;
3307		pvt->info.interleave_pkg = sbridge_interleave_pkg;
3308		pvt->info.get_width = sbridge_get_width;
3309
3310		/* Store pci devices at mci for faster access */
3311		rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3312		if (unlikely(rc < 0))
3313			goto fail0;
3314		get_source_id(mci);
3315		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3316			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3317		break;
3318	case HASWELL:
3319		/* rankcfgr isn't used */
3320		pvt->info.get_tolm = haswell_get_tolm;
3321		pvt->info.get_tohm = haswell_get_tohm;
3322		pvt->info.dram_rule = ibridge_dram_rule;
3323		pvt->info.get_memory_type = haswell_get_memory_type;
3324		pvt->info.get_node_id = haswell_get_node_id;
3325		pvt->info.get_ha = ibridge_get_ha;
3326		pvt->info.rir_limit = haswell_rir_limit;
3327		pvt->info.sad_limit = sad_limit;
3328		pvt->info.interleave_mode = interleave_mode;
3329		pvt->info.dram_attr = dram_attr;
3330		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3331		pvt->info.interleave_list = ibridge_interleave_list;
3332		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3333		pvt->info.get_width = ibridge_get_width;
3334
3335		/* Store pci devices at mci for faster access */
3336		rc = haswell_mci_bind_devs(mci, sbridge_dev);
3337		if (unlikely(rc < 0))
3338			goto fail0;
3339		get_source_id(mci);
3340		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3341			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3342		break;
3343	case BROADWELL:
3344		/* rankcfgr isn't used */
3345		pvt->info.get_tolm = haswell_get_tolm;
3346		pvt->info.get_tohm = haswell_get_tohm;
3347		pvt->info.dram_rule = ibridge_dram_rule;
3348		pvt->info.get_memory_type = haswell_get_memory_type;
3349		pvt->info.get_node_id = haswell_get_node_id;
3350		pvt->info.get_ha = ibridge_get_ha;
3351		pvt->info.rir_limit = haswell_rir_limit;
3352		pvt->info.sad_limit = sad_limit;
3353		pvt->info.interleave_mode = interleave_mode;
3354		pvt->info.dram_attr = dram_attr;
3355		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3356		pvt->info.interleave_list = ibridge_interleave_list;
3357		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3358		pvt->info.get_width = broadwell_get_width;
3359
3360		/* Store pci devices at mci for faster access */
3361		rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3362		if (unlikely(rc < 0))
3363			goto fail0;
3364		get_source_id(mci);
3365		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3366			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3367		break;
3368	case KNIGHTS_LANDING:
3369		/* pvt->info.rankcfgr == ??? */
3370		pvt->info.get_tolm = knl_get_tolm;
3371		pvt->info.get_tohm = knl_get_tohm;
3372		pvt->info.dram_rule = knl_dram_rule;
3373		pvt->info.get_memory_type = knl_get_memory_type;
3374		pvt->info.get_node_id = knl_get_node_id;
3375		pvt->info.get_ha = knl_get_ha;
3376		pvt->info.rir_limit = NULL;
3377		pvt->info.sad_limit = knl_sad_limit;
3378		pvt->info.interleave_mode = knl_interleave_mode;
3379		pvt->info.dram_attr = dram_attr_knl;
3380		pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3381		pvt->info.interleave_list = knl_interleave_list;
3382		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3383		pvt->info.get_width = knl_get_width;
3384
3385		rc = knl_mci_bind_devs(mci, sbridge_dev);
3386		if (unlikely(rc < 0))
3387			goto fail0;
3388		get_source_id(mci);
3389		mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3390			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3391		break;
3392	}
3393
3394	if (!mci->ctl_name) {
3395		rc = -ENOMEM;
3396		goto fail0;
3397	}
3398
3399	/* Get dimm basic config and the memory layout */
3400	rc = get_dimm_config(mci);
3401	if (rc < 0) {
3402		edac_dbg(0, "MC: failed to get_dimm_config()\n");
3403		goto fail;
3404	}
3405	get_memory_layout(mci);
3406
3407	/* record ptr to the generic device */
3408	mci->pdev = &pdev->dev;
3409
3410	/* add this new MC control structure to EDAC's list of MCs */
3411	if (unlikely(edac_mc_add_mc(mci))) {
3412		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3413		rc = -EINVAL;
3414		goto fail;
3415	}
3416
3417	return 0;
3418
3419fail:
3420	kfree(mci->ctl_name);
3421fail0:
3422	edac_mc_free(mci);
3423	sbridge_dev->mci = NULL;
3424	return rc;
3425}
3426
3427static const struct x86_cpu_id sbridge_cpuids[] = {
3428	INTEL_CPU_FAM6(SANDYBRIDGE_X,	  pci_dev_descr_sbridge_table),
3429	INTEL_CPU_FAM6(IVYBRIDGE_X,	  pci_dev_descr_ibridge_table),
3430	INTEL_CPU_FAM6(HASWELL_X,	  pci_dev_descr_haswell_table),
3431	INTEL_CPU_FAM6(BROADWELL_X,	  pci_dev_descr_broadwell_table),
3432	INTEL_CPU_FAM6(BROADWELL_D,	  pci_dev_descr_broadwell_table),
3433	INTEL_CPU_FAM6(XEON_PHI_KNL,	  pci_dev_descr_knl_table),
3434	INTEL_CPU_FAM6(XEON_PHI_KNM,	  pci_dev_descr_knl_table),
3435	{ }
3436};
3437MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3438
3439/*
3440 *	sbridge_probe	Get all devices and register memory controllers
3441 *			present.
3442 *	return:
3443 *		0 for FOUND a device
3444 *		< 0 for error code
3445 */
3446
3447static int sbridge_probe(const struct x86_cpu_id *id)
3448{
3449	int rc = -ENODEV;
3450	u8 mc, num_mc = 0;
3451	struct sbridge_dev *sbridge_dev;
3452	struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3453
3454	/* get the pci devices we want to reserve for our use */
3455	rc = sbridge_get_all_devices(&num_mc, ptable);
3456
3457	if (unlikely(rc < 0)) {
3458		edac_dbg(0, "couldn't get all devices\n");
3459		goto fail0;
3460	}
3461
3462	mc = 0;
3463
3464	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3465		edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3466			 mc, mc + 1, num_mc);
3467
3468		sbridge_dev->mc = mc++;
3469		rc = sbridge_register_mci(sbridge_dev, ptable->type);
3470		if (unlikely(rc < 0))
3471			goto fail1;
3472	}
3473
3474	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3475
3476	return 0;
3477
3478fail1:
3479	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3480		sbridge_unregister_mci(sbridge_dev);
3481
3482	sbridge_put_all_devices();
3483fail0:
3484	return rc;
3485}
3486
3487/*
3488 *	sbridge_remove	cleanup
3489 *
3490 */
3491static void sbridge_remove(void)
3492{
3493	struct sbridge_dev *sbridge_dev;
3494
3495	edac_dbg(0, "\n");
3496
3497	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3498		sbridge_unregister_mci(sbridge_dev);
3499
3500	/* Release PCI resources */
3501	sbridge_put_all_devices();
3502}
3503
3504/*
3505 *	sbridge_init		Module entry function
3506 *			Try to initialize this module for its devices
3507 */
3508static int __init sbridge_init(void)
3509{
3510	const struct x86_cpu_id *id;
3511	const char *owner;
3512	int rc;
3513
3514	edac_dbg(2, "\n");
3515
 
 
 
3516	owner = edac_get_owner();
3517	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3518		return -EBUSY;
3519
 
 
 
3520	id = x86_match_cpu(sbridge_cpuids);
3521	if (!id)
3522		return -ENODEV;
3523
3524	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
3525	opstate_init();
3526
3527	rc = sbridge_probe(id);
3528
3529	if (rc >= 0) {
3530		mce_register_decode_chain(&sbridge_mce_dec);
3531		if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3532			sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
3533		return 0;
3534	}
3535
3536	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3537		      rc);
3538
3539	return rc;
3540}
3541
3542/*
3543 *	sbridge_exit()	Module exit function
3544 *			Unregister the driver
3545 */
3546static void __exit sbridge_exit(void)
3547{
3548	edac_dbg(2, "\n");
3549	sbridge_remove();
3550	mce_unregister_decode_chain(&sbridge_mce_dec);
3551}
3552
3553module_init(sbridge_init);
3554module_exit(sbridge_exit);
3555
3556module_param(edac_op_state, int, 0444);
3557MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3558
3559MODULE_LICENSE("GPL");
3560MODULE_AUTHOR("Mauro Carvalho Chehab");
3561MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
3562MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3563		   SBRIDGE_REVISION);