Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
   3 *
   4 * This driver supports the memory controllers found on the Intel
   5 * processor family Sandy Bridge.
   6 *
 
 
 
   7 * Copyright (c) 2011 by:
   8 *	 Mauro Carvalho Chehab
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/pci.h>
  14#include <linux/pci_ids.h>
  15#include <linux/slab.h>
  16#include <linux/delay.h>
  17#include <linux/edac.h>
  18#include <linux/mmzone.h>
  19#include <linux/smp.h>
  20#include <linux/bitmap.h>
  21#include <linux/math64.h>
  22#include <linux/mod_devicetable.h>
  23#include <asm/cpu_device_id.h>
  24#include <asm/intel-family.h>
  25#include <asm/processor.h>
  26#include <asm/mce.h>
  27
  28#include "edac_module.h"
  29
  30/* Static vars */
  31static LIST_HEAD(sbridge_edac_list);
  32static char sb_msg[256];
  33static char sb_msg_full[512];
  34
  35/*
  36 * Alter this version for the module when modifications are made
  37 */
  38#define SBRIDGE_REVISION    " Ver: 1.1.2 "
  39#define EDAC_MOD_STR	    "sb_edac"
  40
  41/*
  42 * Debug macros
  43 */
  44#define sbridge_printk(level, fmt, arg...)			\
  45	edac_printk(level, "sbridge", fmt, ##arg)
  46
  47#define sbridge_mc_printk(mci, level, fmt, arg...)		\
  48	edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
  49
  50/*
  51 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
  52 */
  53#define GET_BITFIELD(v, lo, hi)	\
  54	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
  55
  56/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
  57static const u32 sbridge_dram_rule[] = {
  58	0x80, 0x88, 0x90, 0x98, 0xa0,
  59	0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
  60};
  61
  62static const u32 ibridge_dram_rule[] = {
  63	0x60, 0x68, 0x70, 0x78, 0x80,
  64	0x88, 0x90, 0x98, 0xa0,	0xa8,
  65	0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
  66	0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
  67};
  68
  69static const u32 knl_dram_rule[] = {
  70	0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
  71	0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
  72	0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
  73	0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
  74	0x100, 0x108, 0x110, 0x118,   /* 20-23 */
  75};
  76
  77#define DRAM_RULE_ENABLE(reg)	GET_BITFIELD(reg, 0,  0)
  78#define A7MODE(reg)		GET_BITFIELD(reg, 26, 26)
  79
  80static char *show_dram_attr(u32 attr)
  81{
  82	switch (attr) {
  83		case 0:
  84			return "DRAM";
  85		case 1:
  86			return "MMCFG";
  87		case 2:
  88			return "NXM";
  89		default:
  90			return "unknown";
  91	}
  92}
  93
  94static const u32 sbridge_interleave_list[] = {
  95	0x84, 0x8c, 0x94, 0x9c, 0xa4,
  96	0xac, 0xb4, 0xbc, 0xc4, 0xcc,
  97};
  98
  99static const u32 ibridge_interleave_list[] = {
 100	0x64, 0x6c, 0x74, 0x7c, 0x84,
 101	0x8c, 0x94, 0x9c, 0xa4, 0xac,
 102	0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
 103	0xdc, 0xe4, 0xec, 0xf4, 0xfc,
 104};
 105
 106static const u32 knl_interleave_list[] = {
 107	0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
 108	0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
 109	0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
 110	0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
 111	0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
 112};
 113#define MAX_INTERLEAVE							\
 114	(MAX_T(unsigned int, ARRAY_SIZE(sbridge_interleave_list),	\
 115	       MAX_T(unsigned int, ARRAY_SIZE(ibridge_interleave_list),	\
 116		     ARRAY_SIZE(knl_interleave_list))))
 117
 118struct interleave_pkg {
 119	unsigned char start;
 120	unsigned char end;
 121};
 122
 123static const struct interleave_pkg sbridge_interleave_pkg[] = {
 124	{ 0, 2 },
 125	{ 3, 5 },
 126	{ 8, 10 },
 127	{ 11, 13 },
 128	{ 16, 18 },
 129	{ 19, 21 },
 130	{ 24, 26 },
 131	{ 27, 29 },
 132};
 133
 134static const struct interleave_pkg ibridge_interleave_pkg[] = {
 135	{ 0, 3 },
 136	{ 4, 7 },
 137	{ 8, 11 },
 138	{ 12, 15 },
 139	{ 16, 19 },
 140	{ 20, 23 },
 141	{ 24, 27 },
 142	{ 28, 31 },
 143};
 144
 145static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
 146			  int interleave)
 147{
 148	return GET_BITFIELD(reg, table[interleave].start,
 149			    table[interleave].end);
 150}
 151
 152/* Devices 12 Function 7 */
 153
 154#define TOLM		0x80
 155#define TOHM		0x84
 156#define HASWELL_TOLM	0xd0
 157#define HASWELL_TOHM_0	0xd4
 158#define HASWELL_TOHM_1	0xd8
 159#define KNL_TOLM	0xd0
 160#define KNL_TOHM_0	0xd4
 161#define KNL_TOHM_1	0xd8
 162
 163#define GET_TOLM(reg)		((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
 164#define GET_TOHM(reg)		((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
 165
 166/* Device 13 Function 6 */
 167
 168#define SAD_TARGET	0xf0
 169
 170#define SOURCE_ID(reg)		GET_BITFIELD(reg, 9, 11)
 171
 172#define SOURCE_ID_KNL(reg)	GET_BITFIELD(reg, 12, 14)
 173
 174#define SAD_CONTROL	0xf4
 175
 176/* Device 14 function 0 */
 177
 178static const u32 tad_dram_rule[] = {
 179	0x40, 0x44, 0x48, 0x4c,
 180	0x50, 0x54, 0x58, 0x5c,
 181	0x60, 0x64, 0x68, 0x6c,
 182};
 183#define MAX_TAD	ARRAY_SIZE(tad_dram_rule)
 184
 185#define TAD_LIMIT(reg)		((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
 186#define TAD_SOCK(reg)		GET_BITFIELD(reg, 10, 11)
 187#define TAD_CH(reg)		GET_BITFIELD(reg,  8,  9)
 188#define TAD_TGT3(reg)		GET_BITFIELD(reg,  6,  7)
 189#define TAD_TGT2(reg)		GET_BITFIELD(reg,  4,  5)
 190#define TAD_TGT1(reg)		GET_BITFIELD(reg,  2,  3)
 191#define TAD_TGT0(reg)		GET_BITFIELD(reg,  0,  1)
 192
 193/* Device 15, function 0 */
 194
 195#define MCMTR			0x7c
 196#define KNL_MCMTR		0x624
 197
 198#define IS_ECC_ENABLED(mcmtr)		GET_BITFIELD(mcmtr, 2, 2)
 199#define IS_LOCKSTEP_ENABLED(mcmtr)	GET_BITFIELD(mcmtr, 1, 1)
 200#define IS_CLOSE_PG(mcmtr)		GET_BITFIELD(mcmtr, 0, 0)
 201
 202/* Device 15, function 1 */
 203
 204#define RASENABLES		0xac
 205#define IS_MIRROR_ENABLED(reg)		GET_BITFIELD(reg, 0, 0)
 206
 207/* Device 15, functions 2-5 */
 208
 209static const int mtr_regs[] = {
 210	0x80, 0x84, 0x88,
 211};
 212
 213static const int knl_mtr_reg = 0xb60;
 214
 215#define RANK_DISABLE(mtr)		GET_BITFIELD(mtr, 16, 19)
 216#define IS_DIMM_PRESENT(mtr)		GET_BITFIELD(mtr, 14, 14)
 217#define RANK_CNT_BITS(mtr)		GET_BITFIELD(mtr, 12, 13)
 218#define RANK_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 2, 4)
 219#define COL_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 0, 1)
 220
 221static const u32 tad_ch_nilv_offset[] = {
 222	0x90, 0x94, 0x98, 0x9c,
 223	0xa0, 0xa4, 0xa8, 0xac,
 224	0xb0, 0xb4, 0xb8, 0xbc,
 225};
 226#define CHN_IDX_OFFSET(reg)		GET_BITFIELD(reg, 28, 29)
 227#define TAD_OFFSET(reg)			(GET_BITFIELD(reg,  6, 25) << 26)
 228
 229static const u32 rir_way_limit[] = {
 230	0x108, 0x10c, 0x110, 0x114, 0x118,
 231};
 232#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
 233
 234#define IS_RIR_VALID(reg)	GET_BITFIELD(reg, 31, 31)
 235#define RIR_WAY(reg)		GET_BITFIELD(reg, 28, 29)
 236
 237#define MAX_RIR_WAY	8
 238
 239static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
 240	{ 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
 241	{ 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
 242	{ 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
 243	{ 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
 244	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 245};
 246
 247#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
 248	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
 249
 250#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
 251	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 252
 253/* Device 16, functions 2-7 */
 254
 255/*
 256 * FIXME: Implement the error count reads directly
 257 */
 258
 
 
 
 
 259#define RANK_ODD_OV(reg)		GET_BITFIELD(reg, 31, 31)
 260#define RANK_ODD_ERR_CNT(reg)		GET_BITFIELD(reg, 16, 30)
 261#define RANK_EVEN_OV(reg)		GET_BITFIELD(reg, 15, 15)
 262#define RANK_EVEN_ERR_CNT(reg)		GET_BITFIELD(reg,  0, 14)
 263
 264#if 0 /* Currently unused*/
 265static const u32 correrrcnt[] = {
 266	0x104, 0x108, 0x10c, 0x110,
 267};
 268
 269static const u32 correrrthrsld[] = {
 270	0x11c, 0x120, 0x124, 0x128,
 271};
 272#endif
 273
 274#define RANK_ODD_ERR_THRSLD(reg)	GET_BITFIELD(reg, 16, 30)
 275#define RANK_EVEN_ERR_THRSLD(reg)	GET_BITFIELD(reg,  0, 14)
 276
 277
 278/* Device 17, function 0 */
 279
 280#define SB_RANK_CFG_A		0x0328
 281
 282#define IB_RANK_CFG_A		0x0320
 283
 284/*
 285 * sbridge structs
 286 */
 287
 288#define NUM_CHANNELS		6	/* Max channels per MC */
 289#define MAX_DIMMS		3	/* Max DIMMS per channel */
 290#define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
 291#define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
 292#define KNL_MAX_EDCS		8	/* Embedded DRAM controllers */
 293#define CHANNEL_UNSPECIFIED	0xf	/* Intel IA32 SDM 15-14 */
 294
 295enum type {
 296	SANDY_BRIDGE,
 297	IVY_BRIDGE,
 298	HASWELL,
 299	BROADWELL,
 300	KNIGHTS_LANDING,
 301};
 302
 303enum domain {
 304	IMC0 = 0,
 305	IMC1,
 306	SOCK,
 307};
 308
 309enum mirroring_mode {
 310	NON_MIRRORING,
 311	ADDR_RANGE_MIRRORING,
 312	FULL_MIRRORING,
 313};
 314
 315struct sbridge_pvt;
 316struct sbridge_info {
 317	enum type	type;
 318	u32		mcmtr;
 319	u32		rankcfgr;
 320	u64		(*get_tolm)(struct sbridge_pvt *pvt);
 321	u64		(*get_tohm)(struct sbridge_pvt *pvt);
 322	u64		(*rir_limit)(u32 reg);
 323	u64		(*sad_limit)(u32 reg);
 324	u32		(*interleave_mode)(u32 reg);
 325	u32		(*dram_attr)(u32 reg);
 326	const u32	*dram_rule;
 327	const u32	*interleave_list;
 328	const struct interleave_pkg *interleave_pkg;
 329	u8		max_sad;
 330	u8		(*get_node_id)(struct sbridge_pvt *pvt);
 331	u8		(*get_ha)(u8 bank);
 332	enum mem_type	(*get_memory_type)(struct sbridge_pvt *pvt);
 333	enum dev_type	(*get_width)(struct sbridge_pvt *pvt, u32 mtr);
 334	struct pci_dev	*pci_vtd;
 335};
 336
 337struct sbridge_channel {
 338	u32		ranks;
 339	u32		dimms;
 340	struct dimm {
 341		u32 rowbits;
 342		u32 colbits;
 343		u32 bank_xor_enable;
 344		u32 amap_fine;
 345	} dimm[MAX_DIMMS];
 346};
 347
 348struct pci_id_descr {
 349	int			dev_id;
 350	int			optional;
 351	enum domain		dom;
 352};
 353
 354struct pci_id_table {
 355	const struct pci_id_descr	*descr;
 356	int				n_devs_per_imc;
 357	int				n_devs_per_sock;
 358	int				n_imcs_per_sock;
 359	enum type			type;
 360};
 361
 362struct sbridge_dev {
 363	struct list_head	list;
 364	int			seg;
 365	u8			bus, mc;
 366	u8			node_id, source_id;
 367	struct pci_dev		**pdev;
 368	enum domain		dom;
 369	int			n_devs;
 370	int			i_devs;
 371	struct mem_ctl_info	*mci;
 372};
 373
 374struct knl_pvt {
 375	struct pci_dev          *pci_cha[KNL_MAX_CHAS];
 376	struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
 377	struct pci_dev          *pci_mc0;
 378	struct pci_dev          *pci_mc1;
 379	struct pci_dev          *pci_mc0_misc;
 380	struct pci_dev          *pci_mc1_misc;
 381	struct pci_dev          *pci_mc_info; /* tolm, tohm */
 382};
 383
 384struct sbridge_pvt {
 385	/* Devices per socket */
 386	struct pci_dev		*pci_ddrio;
 387	struct pci_dev		*pci_sad0, *pci_sad1;
 388	struct pci_dev		*pci_br0, *pci_br1;
 389	/* Devices per memory controller */
 390	struct pci_dev		*pci_ha, *pci_ta, *pci_ras;
 391	struct pci_dev		*pci_tad[NUM_CHANNELS];
 392
 393	struct sbridge_dev	*sbridge_dev;
 394
 395	struct sbridge_info	info;
 396	struct sbridge_channel	channel[NUM_CHANNELS];
 397
 398	/* Memory type detection */
 399	bool			is_cur_addr_mirrored, is_lockstep, is_close_pg;
 400	bool			is_chan_hash;
 401	enum mirroring_mode	mirror_mode;
 402
 403	/* Memory description */
 404	u64			tolm, tohm;
 405	struct knl_pvt knl;
 406};
 407
 408#define PCI_DESCR(device_id, opt, domain)	\
 409	.dev_id = (device_id),		\
 410	.optional = opt,	\
 411	.dom = domain
 412
 413static const struct pci_id_descr pci_dev_descr_sbridge[] = {
 414		/* Processor Home Agent */
 415	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
 416
 417		/* Memory controller */
 418	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
 419	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
 420	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
 421	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
 422	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
 423	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
 424	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
 425
 426		/* System Address Decoder */
 427	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
 428	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
 429
 430		/* Broadcast Registers */
 431	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
 432};
 433
 434#define PCI_ID_TABLE_ENTRY(A, N, M, T) {	\
 435	.descr = A,			\
 436	.n_devs_per_imc = N,	\
 437	.n_devs_per_sock = ARRAY_SIZE(A),	\
 438	.n_imcs_per_sock = M,	\
 439	.type = T			\
 440}
 441
 442static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
 443	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
 444	{ NULL, }
 445};
 446
 447/* This changes depending if 1HA or 2HA:
 448 * 1HA:
 449 *	0x0eb8 (17.0) is DDRIO0
 450 * 2HA:
 451 *	0x0ebc (17.4) is DDRIO0
 452 */
 453#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0	0x0eb8
 454#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0	0x0ebc
 455
 456/* pci ids */
 457#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0		0x0ea0
 458#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA		0x0ea8
 459#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS		0x0e71
 460#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0	0x0eaa
 461#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1	0x0eab
 462#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2	0x0eac
 463#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3	0x0ead
 464#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD			0x0ec8
 465#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0			0x0ec9
 466#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1			0x0eca
 467#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1		0x0e60
 468#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA		0x0e68
 469#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS		0x0e79
 470#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0	0x0e6a
 471#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1	0x0e6b
 472#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2	0x0e6c
 473#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3	0x0e6d
 474
 475static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 476		/* Processor Home Agent */
 477	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
 478	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
 479
 480		/* Memory controller */
 481	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
 482	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
 483	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
 484	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
 485	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
 486	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
 487
 488		/* Optional, mode 2HA */
 489	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
 490	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
 491	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
 492	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
 493	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
 494	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
 495
 496	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
 497	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
 498
 499		/* System Address Decoder */
 500	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
 501
 502		/* Broadcast Registers */
 503	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
 504	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
 505
 506};
 507
 508static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 509	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
 510	{ NULL, }
 511};
 512
 513/* Haswell support */
 514/* EN processor:
 515 *	- 1 IMC
 516 *	- 3 DDR3 channels, 2 DPC per channel
 517 * EP processor:
 518 *	- 1 or 2 IMC
 519 *	- 4 DDR4 channels, 3 DPC per channel
 520 * EP 4S processor:
 521 *	- 2 IMC
 522 *	- 4 DDR4 channels, 3 DPC per channel
 523 * EX processor:
 524 *	- 2 IMC
 525 *	- each IMC interfaces with a SMI 2 channel
 526 *	- each SMI channel interfaces with a scalable memory buffer
 527 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 528 */
 529#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
 530#define HASWELL_HASYSDEFEATURE2 0x84
 531#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
 532#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0	0x2fa0
 533#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1	0x2f60
 534#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA	0x2fa8
 535#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM	0x2f71
 536#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA	0x2f68
 537#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM	0x2f79
 538#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
 539#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
 540#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
 541#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
 542#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
 543#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
 544#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
 545#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
 546#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
 547#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
 548#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
 549#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
 550#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
 551#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
 552static const struct pci_id_descr pci_dev_descr_haswell[] = {
 553	/* first item must be the HA */
 554	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
 555	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
 556
 557	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
 558	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
 559	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
 560	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
 561	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
 562	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
 563
 564	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
 565	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
 566	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
 567	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
 568	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
 569	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
 570
 571	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
 572	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
 573	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
 574	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
 575	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
 576	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
 577};
 578
 579static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 580	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
 581	{ NULL, }
 582};
 583
 584/* Knight's Landing Support */
 585/*
 586 * KNL's memory channels are swizzled between memory controllers.
 587 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
 588 */
 589#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
 590
 591/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
 592#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
 593/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
 594#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
 595/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
 596#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
 597/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
 598#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
 599/* SAD target - 1-29-1 (1 of these) */
 600#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
 601/* Caching / Home Agent */
 602#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
 603/* Device with TOLM and TOHM, 0-5-0 (1 of these) */
 604#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
 605
 606/*
 607 * KNL differs from SB, IB, and Haswell in that it has multiple
 608 * instances of the same device with the same device ID, so we handle that
 609 * by creating as many copies in the table as we expect to find.
 610 * (Like device ID must be grouped together.)
 611 */
 612
 613static const struct pci_id_descr pci_dev_descr_knl[] = {
 614	[0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
 615	[2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
 616	[8]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
 617	[9]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
 618	[10]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
 619	[11]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
 620	[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
 621};
 622
 623static const struct pci_id_table pci_dev_descr_knl_table[] = {
 624	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
 625	{ NULL, }
 626};
 627
 628/*
 629 * Broadwell support
 630 *
 631 * DE processor:
 632 *	- 1 IMC
 633 *	- 2 DDR3 channels, 2 DPC per channel
 634 * EP processor:
 635 *	- 1 or 2 IMC
 636 *	- 4 DDR4 channels, 3 DPC per channel
 637 * EP 4S processor:
 638 *	- 2 IMC
 639 *	- 4 DDR4 channels, 3 DPC per channel
 640 * EX processor:
 641 *	- 2 IMC
 642 *	- each IMC interfaces with a SMI 2 channel
 643 *	- each SMI channel interfaces with a scalable memory buffer
 644 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 645 */
 646#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
 647#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0	0x6fa0
 648#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1	0x6f60
 649#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA	0x6fa8
 650#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM	0x6f71
 651#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA	0x6f68
 652#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM	0x6f79
 653#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
 654#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
 655#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
 656#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
 657#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
 658#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
 659#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
 660#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
 661#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
 662#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
 663#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
 664
 665static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 666	/* first item must be the HA */
 667	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
 668	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
 669
 670	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
 671	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
 672	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
 673	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
 674	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
 675	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
 676
 677	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
 678	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
 679	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
 680	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
 681	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
 682	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
 683
 684	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
 685	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
 686	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
 687};
 688
 689static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
 690	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
 691	{ NULL, }
 692};
 693
 694
 695/****************************************************************************
 696			Ancillary status routines
 697 ****************************************************************************/
 698
 699static inline int numrank(enum type type, u32 mtr)
 700{
 701	int ranks = (1 << RANK_CNT_BITS(mtr));
 702	int max = 4;
 703
 704	if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
 705		max = 8;
 706
 707	if (ranks > max) {
 708		edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
 709			 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
 710		return -EINVAL;
 711	}
 712
 713	return ranks;
 714}
 715
 716static inline int numrow(u32 mtr)
 717{
 718	int rows = (RANK_WIDTH_BITS(mtr) + 12);
 719
 720	if (rows < 13 || rows > 18) {
 721		edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
 722			 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
 723		return -EINVAL;
 724	}
 725
 726	return 1 << rows;
 727}
 728
 729static inline int numcol(u32 mtr)
 730{
 731	int cols = (COL_WIDTH_BITS(mtr) + 10);
 732
 733	if (cols > 12) {
 734		edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
 735			 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
 736		return -EINVAL;
 737	}
 738
 739	return 1 << cols;
 740}
 741
 742static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
 743					   int multi_bus,
 744					   struct sbridge_dev *prev)
 745{
 746	struct sbridge_dev *sbridge_dev;
 747
 748	/*
 749	 * If we have devices scattered across several busses that pertain
 750	 * to the same memory controller, we'll lump them all together.
 751	 */
 752	if (multi_bus) {
 753		return list_first_entry_or_null(&sbridge_edac_list,
 754				struct sbridge_dev, list);
 755	}
 756
 757	sbridge_dev = list_entry(prev ? prev->list.next
 758				      : sbridge_edac_list.next, struct sbridge_dev, list);
 759
 760	list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
 761		if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
 762				(dom == SOCK || dom == sbridge_dev->dom))
 763			return sbridge_dev;
 764	}
 765
 766	return NULL;
 767}
 768
 769static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
 770					     const struct pci_id_table *table)
 771{
 772	struct sbridge_dev *sbridge_dev;
 773
 774	sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
 775	if (!sbridge_dev)
 776		return NULL;
 777
 778	sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
 779				    sizeof(*sbridge_dev->pdev),
 780				    GFP_KERNEL);
 781	if (!sbridge_dev->pdev) {
 782		kfree(sbridge_dev);
 783		return NULL;
 784	}
 785
 786	sbridge_dev->seg = seg;
 787	sbridge_dev->bus = bus;
 788	sbridge_dev->dom = dom;
 789	sbridge_dev->n_devs = table->n_devs_per_imc;
 790	list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
 791
 792	return sbridge_dev;
 793}
 794
 795static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
 796{
 797	list_del(&sbridge_dev->list);
 798	kfree(sbridge_dev->pdev);
 799	kfree(sbridge_dev);
 800}
 801
 802static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
 803{
 804	u32 reg;
 805
 806	/* Address range is 32:28 */
 807	pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
 808	return GET_TOLM(reg);
 809}
 810
 811static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
 812{
 813	u32 reg;
 814
 815	pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
 816	return GET_TOHM(reg);
 817}
 818
 819static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
 820{
 821	u32 reg;
 822
 823	pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
 824
 825	return GET_TOLM(reg);
 826}
 827
 828static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
 829{
 830	u32 reg;
 831
 832	pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
 833
 834	return GET_TOHM(reg);
 835}
 836
 837static u64 rir_limit(u32 reg)
 838{
 839	return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
 840}
 841
 842static u64 sad_limit(u32 reg)
 843{
 844	return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
 845}
 846
 847static u32 interleave_mode(u32 reg)
 848{
 849	return GET_BITFIELD(reg, 1, 1);
 850}
 851
 852static u32 dram_attr(u32 reg)
 853{
 854	return GET_BITFIELD(reg, 2, 3);
 855}
 856
 857static u64 knl_sad_limit(u32 reg)
 858{
 859	return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
 860}
 861
 862static u32 knl_interleave_mode(u32 reg)
 863{
 864	return GET_BITFIELD(reg, 1, 2);
 865}
 866
 867static const char * const knl_intlv_mode[] = {
 868	"[8:6]", "[10:8]", "[14:12]", "[32:30]"
 869};
 870
 871static const char *get_intlv_mode_str(u32 reg, enum type t)
 872{
 873	if (t == KNIGHTS_LANDING)
 874		return knl_intlv_mode[knl_interleave_mode(reg)];
 875	else
 876		return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
 877}
 878
 879static u32 dram_attr_knl(u32 reg)
 880{
 881	return GET_BITFIELD(reg, 3, 4);
 882}
 883
 884
 885static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
 886{
 887	u32 reg;
 888	enum mem_type mtype;
 889
 890	if (pvt->pci_ddrio) {
 891		pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
 892				      &reg);
 893		if (GET_BITFIELD(reg, 11, 11))
 894			/* FIXME: Can also be LRDIMM */
 895			mtype = MEM_RDDR3;
 896		else
 897			mtype = MEM_DDR3;
 898	} else
 899		mtype = MEM_UNKNOWN;
 900
 901	return mtype;
 902}
 903
 904static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
 905{
 906	u32 reg;
 907	bool registered = false;
 908	enum mem_type mtype = MEM_UNKNOWN;
 909
 910	if (!pvt->pci_ddrio)
 911		goto out;
 912
 913	pci_read_config_dword(pvt->pci_ddrio,
 914			      HASWELL_DDRCRCLKCONTROLS, &reg);
 915	/* Is_Rdimm */
 916	if (GET_BITFIELD(reg, 16, 16))
 917		registered = true;
 918
 919	pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
 920	if (GET_BITFIELD(reg, 14, 14)) {
 921		if (registered)
 922			mtype = MEM_RDDR4;
 923		else
 924			mtype = MEM_DDR4;
 925	} else {
 926		if (registered)
 927			mtype = MEM_RDDR3;
 928		else
 929			mtype = MEM_DDR3;
 930	}
 931
 932out:
 933	return mtype;
 934}
 935
 936static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
 937{
 938	/* for KNL value is fixed */
 939	return DEV_X16;
 940}
 941
 942static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 943{
 944	/* there's no way to figure out */
 945	return DEV_UNKNOWN;
 946}
 947
 948static enum dev_type __ibridge_get_width(u32 mtr)
 949{
 950	enum dev_type type = DEV_UNKNOWN;
 951
 952	switch (mtr) {
 
 
 
 953	case 2:
 954		type = DEV_X16;
 955		break;
 956	case 1:
 957		type = DEV_X8;
 958		break;
 959	case 0:
 960		type = DEV_X4;
 961		break;
 962	}
 963
 964	return type;
 965}
 966
 967static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 968{
 969	/*
 970	 * ddr3_width on the documentation but also valid for DDR4 on
 971	 * Haswell
 972	 */
 973	return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
 974}
 975
 976static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
 977{
 978	/* ddr3_width on the documentation but also valid for DDR4 */
 979	return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
 980}
 981
 982static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
 983{
 984	/* DDR4 RDIMMS and LRDIMMS are supported */
 985	return MEM_RDDR4;
 986}
 987
 988static u8 get_node_id(struct sbridge_pvt *pvt)
 989{
 990	u32 reg;
 991	pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
 992	return GET_BITFIELD(reg, 0, 2);
 993}
 994
 995static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
 996{
 997	u32 reg;
 998
 999	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1000	return GET_BITFIELD(reg, 0, 3);
1001}
1002
1003static u8 knl_get_node_id(struct sbridge_pvt *pvt)
1004{
1005	u32 reg;
1006
1007	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1008	return GET_BITFIELD(reg, 0, 2);
1009}
1010
1011/*
1012 * Use the reporting bank number to determine which memory
1013 * controller (also known as "ha" for "home agent"). Sandy
1014 * Bridge only has one memory controller per socket, so the
1015 * answer is always zero.
1016 */
1017static u8 sbridge_get_ha(u8 bank)
1018{
1019	return 0;
1020}
1021
1022/*
1023 * On Ivy Bridge, Haswell and Broadwell the error may be in a
1024 * home agent bank (7, 8), or one of the per-channel memory
1025 * controller banks (9 .. 16).
1026 */
1027static u8 ibridge_get_ha(u8 bank)
1028{
1029	switch (bank) {
1030	case 7 ... 8:
1031		return bank - 7;
1032	case 9 ... 16:
1033		return (bank - 9) / 4;
1034	default:
1035		return 0xff;
1036	}
1037}
1038
1039/* Not used, but included for safety/symmetry */
1040static u8 knl_get_ha(u8 bank)
1041{
1042	return 0xff;
1043}
1044
1045static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1046{
1047	u32 reg;
1048
1049	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1050	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1051}
1052
1053static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1054{
1055	u64 rc;
1056	u32 reg;
1057
1058	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1059	rc = GET_BITFIELD(reg, 26, 31);
1060	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1061	rc = ((reg << 6) | rc) << 26;
1062
1063	return rc | 0x3ffffff;
1064}
1065
1066static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1067{
1068	u32 reg;
1069
1070	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1071	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1072}
1073
1074static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1075{
1076	u64 rc;
1077	u32 reg_lo, reg_hi;
1078
1079	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1080	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1081	rc = ((u64)reg_hi << 32) | reg_lo;
1082	return rc | 0x3ffffff;
1083}
1084
1085
1086static u64 haswell_rir_limit(u32 reg)
1087{
1088	return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1089}
1090
1091static inline u8 sad_pkg_socket(u8 pkg)
1092{
1093	/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1094	return ((pkg >> 3) << 2) | (pkg & 0x3);
1095}
1096
1097static inline u8 sad_pkg_ha(u8 pkg)
1098{
1099	return (pkg >> 2) & 0x1;
1100}
1101
1102static int haswell_chan_hash(int idx, u64 addr)
1103{
1104	int i;
1105
1106	/*
1107	 * XOR even bits from 12:26 to bit0 of idx,
1108	 *     odd bits from 13:27 to bit1
1109	 */
1110	for (i = 12; i < 28; i += 2)
1111		idx ^= (addr >> i) & 3;
1112
1113	return idx;
1114}
1115
1116/* Low bits of TAD limit, and some metadata. */
1117static const u32 knl_tad_dram_limit_lo[] = {
1118	0x400, 0x500, 0x600, 0x700,
1119	0x800, 0x900, 0xa00, 0xb00,
1120};
1121
1122/* Low bits of TAD offset. */
1123static const u32 knl_tad_dram_offset_lo[] = {
1124	0x404, 0x504, 0x604, 0x704,
1125	0x804, 0x904, 0xa04, 0xb04,
1126};
1127
1128/* High 16 bits of TAD limit and offset. */
1129static const u32 knl_tad_dram_hi[] = {
1130	0x408, 0x508, 0x608, 0x708,
1131	0x808, 0x908, 0xa08, 0xb08,
1132};
1133
1134/* Number of ways a tad entry is interleaved. */
1135static const u32 knl_tad_ways[] = {
1136	8, 6, 4, 3, 2, 1,
1137};
1138
1139/*
1140 * Retrieve the n'th Target Address Decode table entry
1141 * from the memory controller's TAD table.
1142 *
1143 * @pvt:	driver private data
1144 * @entry:	which entry you want to retrieve
1145 * @mc:		which memory controller (0 or 1)
1146 * @offset:	output tad range offset
1147 * @limit:	output address of first byte above tad range
1148 * @ways:	output number of interleave ways
1149 *
1150 * The offset value has curious semantics.  It's a sort of running total
1151 * of the sizes of all the memory regions that aren't mapped in this
1152 * tad table.
1153 */
1154static int knl_get_tad(const struct sbridge_pvt *pvt,
1155		const int entry,
1156		const int mc,
1157		u64 *offset,
1158		u64 *limit,
1159		int *ways)
1160{
1161	u32 reg_limit_lo, reg_offset_lo, reg_hi;
1162	struct pci_dev *pci_mc;
1163	int way_id;
1164
1165	switch (mc) {
1166	case 0:
1167		pci_mc = pvt->knl.pci_mc0;
1168		break;
1169	case 1:
1170		pci_mc = pvt->knl.pci_mc1;
1171		break;
1172	default:
1173		WARN_ON(1);
1174		return -EINVAL;
1175	}
1176
1177	pci_read_config_dword(pci_mc,
1178			knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1179	pci_read_config_dword(pci_mc,
1180			knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1181	pci_read_config_dword(pci_mc,
1182			knl_tad_dram_hi[entry], &reg_hi);
1183
1184	/* Is this TAD entry enabled? */
1185	if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1186		return -ENODEV;
1187
1188	way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1189
1190	if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1191		*ways = knl_tad_ways[way_id];
1192	} else {
1193		*ways = 0;
1194		sbridge_printk(KERN_ERR,
1195				"Unexpected value %d in mc_tad_limit_lo wayness field\n",
1196				way_id);
1197		return -ENODEV;
1198	}
1199
1200	/*
1201	 * The least significant 6 bits of base and limit are truncated.
1202	 * For limit, we fill the missing bits with 1s.
1203	 */
1204	*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1205				((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1206	*limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1207				((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1208
1209	return 0;
1210}
1211
1212/* Determine which memory controller is responsible for a given channel. */
1213static int knl_channel_mc(int channel)
1214{
1215	WARN_ON(channel < 0 || channel >= 6);
1216
1217	return channel < 3 ? 1 : 0;
1218}
1219
1220/*
1221 * Get the Nth entry from EDC_ROUTE_TABLE register.
1222 * (This is the per-tile mapping of logical interleave targets to
1223 *  physical EDC modules.)
1224 *
1225 * entry 0: 0:2
1226 *       1: 3:5
1227 *       2: 6:8
1228 *       3: 9:11
1229 *       4: 12:14
1230 *       5: 15:17
1231 *       6: 18:20
1232 *       7: 21:23
1233 * reserved: 24:31
1234 */
1235static u32 knl_get_edc_route(int entry, u32 reg)
1236{
1237	WARN_ON(entry >= KNL_MAX_EDCS);
1238	return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1239}
1240
1241/*
1242 * Get the Nth entry from MC_ROUTE_TABLE register.
1243 * (This is the per-tile mapping of logical interleave targets to
1244 *  physical DRAM channels modules.)
1245 *
1246 * entry 0: mc 0:2   channel 18:19
1247 *       1: mc 3:5   channel 20:21
1248 *       2: mc 6:8   channel 22:23
1249 *       3: mc 9:11  channel 24:25
1250 *       4: mc 12:14 channel 26:27
1251 *       5: mc 15:17 channel 28:29
1252 * reserved: 30:31
1253 *
1254 * Though we have 3 bits to identify the MC, we should only see
1255 * the values 0 or 1.
1256 */
1257
1258static u32 knl_get_mc_route(int entry, u32 reg)
1259{
1260	int mc, chan;
1261
1262	WARN_ON(entry >= KNL_MAX_CHANNELS);
1263
1264	mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1265	chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1266
1267	return knl_channel_remap(mc, chan);
1268}
1269
1270/*
1271 * Render the EDC_ROUTE register in human-readable form.
1272 * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1273 */
1274static void knl_show_edc_route(u32 reg, char *s)
1275{
1276	int i;
1277
1278	for (i = 0; i < KNL_MAX_EDCS; i++) {
1279		s[i*2] = knl_get_edc_route(i, reg) + '0';
1280		s[i*2+1] = '-';
1281	}
1282
1283	s[KNL_MAX_EDCS*2 - 1] = '\0';
1284}
1285
1286/*
1287 * Render the MC_ROUTE register in human-readable form.
1288 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1289 */
1290static void knl_show_mc_route(u32 reg, char *s)
1291{
1292	int i;
1293
1294	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1295		s[i*2] = knl_get_mc_route(i, reg) + '0';
1296		s[i*2+1] = '-';
1297	}
1298
1299	s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1300}
1301
1302#define KNL_EDC_ROUTE 0xb8
1303#define KNL_MC_ROUTE 0xb4
1304
1305/* Is this dram rule backed by regular DRAM in flat mode? */
1306#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1307
1308/* Is this dram rule cached? */
1309#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1310
1311/* Is this rule backed by edc ? */
1312#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1313
1314/* Is this rule backed by DRAM, cacheable in EDRAM? */
1315#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1316
1317/* Is this rule mod3? */
1318#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1319
1320/*
1321 * Figure out how big our RAM modules are.
1322 *
1323 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1324 * have to figure this out from the SAD rules, interleave lists, route tables,
1325 * and TAD rules.
1326 *
1327 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1328 * inspect the TAD rules to figure out how large the SAD regions really are.
1329 *
1330 * When we know the real size of a SAD region and how many ways it's
1331 * interleaved, we know the individual contribution of each channel to
1332 * TAD is size/ways.
1333 *
1334 * Finally, we have to check whether each channel participates in each SAD
1335 * region.
1336 *
1337 * Fortunately, KNL only supports one DIMM per channel, so once we know how
1338 * much memory the channel uses, we know the DIMM is at least that large.
1339 * (The BIOS might possibly choose not to map all available memory, in which
1340 * case we will underreport the size of the DIMM.)
1341 *
1342 * In theory, we could try to determine the EDC sizes as well, but that would
1343 * only work in flat mode, not in cache mode.
1344 *
1345 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1346 *            elements)
1347 */
1348static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1349{
1350	u64 sad_base, sad_limit = 0;
1351	u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1352	int sad_rule = 0;
1353	int tad_rule = 0;
1354	int intrlv_ways, tad_ways;
1355	u32 first_pkg, pkg;
1356	int i;
1357	u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1358	u32 dram_rule, interleave_reg;
1359	u32 mc_route_reg[KNL_MAX_CHAS];
1360	u32 edc_route_reg[KNL_MAX_CHAS];
1361	int edram_only;
1362	char edc_route_string[KNL_MAX_EDCS*2];
1363	char mc_route_string[KNL_MAX_CHANNELS*2];
1364	int cur_reg_start;
1365	int mc;
1366	int channel;
1367	int participants[KNL_MAX_CHANNELS];
1368
1369	for (i = 0; i < KNL_MAX_CHANNELS; i++)
1370		mc_sizes[i] = 0;
1371
1372	/* Read the EDC route table in each CHA. */
1373	cur_reg_start = 0;
1374	for (i = 0; i < KNL_MAX_CHAS; i++) {
1375		pci_read_config_dword(pvt->knl.pci_cha[i],
1376				KNL_EDC_ROUTE, &edc_route_reg[i]);
1377
1378		if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1379			knl_show_edc_route(edc_route_reg[i-1],
1380					edc_route_string);
1381			if (cur_reg_start == i-1)
1382				edac_dbg(0, "edc route table for CHA %d: %s\n",
1383					cur_reg_start, edc_route_string);
1384			else
1385				edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1386					cur_reg_start, i-1, edc_route_string);
1387			cur_reg_start = i;
1388		}
1389	}
1390	knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1391	if (cur_reg_start == i-1)
1392		edac_dbg(0, "edc route table for CHA %d: %s\n",
1393			cur_reg_start, edc_route_string);
1394	else
1395		edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1396			cur_reg_start, i-1, edc_route_string);
1397
1398	/* Read the MC route table in each CHA. */
1399	cur_reg_start = 0;
1400	for (i = 0; i < KNL_MAX_CHAS; i++) {
1401		pci_read_config_dword(pvt->knl.pci_cha[i],
1402			KNL_MC_ROUTE, &mc_route_reg[i]);
1403
1404		if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1405			knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1406			if (cur_reg_start == i-1)
1407				edac_dbg(0, "mc route table for CHA %d: %s\n",
1408					cur_reg_start, mc_route_string);
1409			else
1410				edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1411					cur_reg_start, i-1, mc_route_string);
1412			cur_reg_start = i;
1413		}
1414	}
1415	knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1416	if (cur_reg_start == i-1)
1417		edac_dbg(0, "mc route table for CHA %d: %s\n",
1418			cur_reg_start, mc_route_string);
1419	else
1420		edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1421			cur_reg_start, i-1, mc_route_string);
1422
1423	/* Process DRAM rules */
1424	for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1425		/* previous limit becomes the new base */
1426		sad_base = sad_limit;
1427
1428		pci_read_config_dword(pvt->pci_sad0,
1429			pvt->info.dram_rule[sad_rule], &dram_rule);
1430
1431		if (!DRAM_RULE_ENABLE(dram_rule))
1432			break;
1433
1434		edram_only = KNL_EDRAM_ONLY(dram_rule);
1435
1436		sad_limit = pvt->info.sad_limit(dram_rule)+1;
 
1437
1438		pci_read_config_dword(pvt->pci_sad0,
1439			pvt->info.interleave_list[sad_rule], &interleave_reg);
1440
1441		/*
1442		 * Find out how many ways this dram rule is interleaved.
1443		 * We stop when we see the first channel again.
1444		 */
1445		first_pkg = sad_pkg(pvt->info.interleave_pkg,
1446						interleave_reg, 0);
1447		for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1448			pkg = sad_pkg(pvt->info.interleave_pkg,
1449						interleave_reg, intrlv_ways);
1450
1451			if ((pkg & 0x8) == 0) {
1452				/*
1453				 * 0 bit means memory is non-local,
1454				 * which KNL doesn't support
1455				 */
1456				edac_dbg(0, "Unexpected interleave target %d\n",
1457					pkg);
1458				return -1;
1459			}
1460
1461			if (pkg == first_pkg)
1462				break;
1463		}
1464		if (KNL_MOD3(dram_rule))
1465			intrlv_ways *= 3;
1466
1467		edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1468			sad_rule,
1469			sad_base,
1470			sad_limit,
1471			intrlv_ways,
1472			edram_only ? ", EDRAM" : "");
1473
1474		/*
1475		 * Find out how big the SAD region really is by iterating
1476		 * over TAD tables (SAD regions may contain holes).
1477		 * Each memory controller might have a different TAD table, so
1478		 * we have to look at both.
1479		 *
1480		 * Livespace is the memory that's mapped in this TAD table,
1481		 * deadspace is the holes (this could be the MMIO hole, or it
1482		 * could be memory that's mapped by the other TAD table but
1483		 * not this one).
1484		 */
1485		for (mc = 0; mc < 2; mc++) {
1486			sad_actual_size[mc] = 0;
1487			tad_livespace = 0;
1488			for (tad_rule = 0;
1489					tad_rule < ARRAY_SIZE(
1490						knl_tad_dram_limit_lo);
1491					tad_rule++) {
1492				if (knl_get_tad(pvt,
1493						tad_rule,
1494						mc,
1495						&tad_deadspace,
1496						&tad_limit,
1497						&tad_ways))
1498					break;
1499
1500				tad_size = (tad_limit+1) -
1501					(tad_livespace + tad_deadspace);
1502				tad_livespace += tad_size;
1503				tad_base = (tad_limit+1) - tad_size;
1504
1505				if (tad_base < sad_base) {
1506					if (tad_limit > sad_base)
1507						edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1508				} else if (tad_base < sad_limit) {
1509					if (tad_limit+1 > sad_limit) {
1510						edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1511					} else {
1512						/* TAD region is completely inside SAD region */
1513						edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1514							tad_rule, tad_base,
1515							tad_limit, tad_size,
1516							mc);
1517						sad_actual_size[mc] += tad_size;
1518					}
1519				}
 
1520			}
1521		}
1522
1523		for (mc = 0; mc < 2; mc++) {
1524			edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1525				mc, sad_actual_size[mc], sad_actual_size[mc]);
1526		}
1527
1528		/* Ignore EDRAM rule */
1529		if (edram_only)
1530			continue;
1531
1532		/* Figure out which channels participate in interleave. */
1533		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1534			participants[channel] = 0;
1535
1536		/* For each channel, does at least one CHA have
1537		 * this channel mapped to the given target?
1538		 */
1539		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1540			int target;
1541			int cha;
1542
1543			for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1544				for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1545					if (knl_get_mc_route(target,
1546						mc_route_reg[cha]) == channel
1547						&& !participants[channel]) {
1548						participants[channel] = 1;
1549						break;
1550					}
1551				}
1552			}
1553		}
1554
1555		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1556			mc = knl_channel_mc(channel);
1557			if (participants[channel]) {
1558				edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1559					channel,
1560					sad_actual_size[mc]/intrlv_ways,
1561					sad_rule);
1562				mc_sizes[channel] +=
1563					sad_actual_size[mc]/intrlv_ways;
1564			}
1565		}
1566	}
1567
1568	return 0;
1569}
1570
1571static void get_source_id(struct mem_ctl_info *mci)
1572{
1573	struct sbridge_pvt *pvt = mci->pvt_info;
1574	u32 reg;
1575
1576	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1577	    pvt->info.type == KNIGHTS_LANDING)
1578		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1579	else
1580		pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1581
1582	if (pvt->info.type == KNIGHTS_LANDING)
1583		pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1584	else
1585		pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1586}
1587
1588static int __populate_dimms(struct mem_ctl_info *mci,
1589			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1590			    enum edac_type mode)
1591{
1592	struct sbridge_pvt *pvt = mci->pvt_info;
1593	int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1594							 : NUM_CHANNELS;
1595	unsigned int i, j, banks, ranks, rows, cols, npages;
1596	struct dimm_info *dimm;
1597	enum mem_type mtype;
1598	u64 size;
1599
1600	mtype = pvt->info.get_memory_type(pvt);
1601	if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1602		edac_dbg(0, "Memory is registered\n");
1603	else if (mtype == MEM_UNKNOWN)
1604		edac_dbg(0, "Cannot determine memory type\n");
1605	else
1606		edac_dbg(0, "Memory is unregistered\n");
1607
1608	if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1609		banks = 16;
1610	else
1611		banks = 8;
1612
1613	for (i = 0; i < channels; i++) {
1614		u32 mtr, amap = 0;
1615
1616		int max_dimms_per_channel;
1617
1618		if (pvt->info.type == KNIGHTS_LANDING) {
1619			max_dimms_per_channel = 1;
1620			if (!pvt->knl.pci_channel[i])
1621				continue;
1622		} else {
1623			max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1624			if (!pvt->pci_tad[i])
1625				continue;
1626			pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap);
1627		}
1628
1629		for (j = 0; j < max_dimms_per_channel; j++) {
1630			dimm = edac_get_dimm(mci, i, j, 0);
1631			if (pvt->info.type == KNIGHTS_LANDING) {
1632				pci_read_config_dword(pvt->knl.pci_channel[i],
1633					knl_mtr_reg, &mtr);
1634			} else {
1635				pci_read_config_dword(pvt->pci_tad[i],
1636					mtr_regs[j], &mtr);
1637			}
1638			edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
1639
1640			if (IS_DIMM_PRESENT(mtr)) {
1641				if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1642					sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1643						       pvt->sbridge_dev->source_id,
1644						       pvt->sbridge_dev->dom, i);
1645					return -ENODEV;
1646				}
1647				pvt->channel[i].dimms++;
1648
1649				ranks = numrank(pvt->info.type, mtr);
1650
1651				if (pvt->info.type == KNIGHTS_LANDING) {
1652					/* For DDR4, this is fixed. */
1653					cols = 1 << 10;
1654					rows = knl_mc_sizes[i] /
1655						((u64) cols * ranks * banks * 8);
1656				} else {
1657					rows = numrow(mtr);
1658					cols = numcol(mtr);
1659				}
1660
1661				size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1662				npages = MiB_TO_PAGES(size);
1663
1664				edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1665					 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1666					 size, npages,
1667					 banks, ranks, rows, cols);
1668
1669				dimm->nr_pages = npages;
1670				dimm->grain = 32;
1671				dimm->dtype = pvt->info.get_width(pvt, mtr);
1672				dimm->mtype = mtype;
1673				dimm->edac_mode = mode;
1674				pvt->channel[i].dimm[j].rowbits = order_base_2(rows);
1675				pvt->channel[i].dimm[j].colbits = order_base_2(cols);
1676				pvt->channel[i].dimm[j].bank_xor_enable =
1677						GET_BITFIELD(pvt->info.mcmtr, 9, 9);
1678				pvt->channel[i].dimm[j].amap_fine = GET_BITFIELD(amap, 0, 0);
1679				snprintf(dimm->label, sizeof(dimm->label),
1680						 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1681						 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1682			}
1683		}
1684	}
1685
1686	return 0;
1687}
1688
1689static int get_dimm_config(struct mem_ctl_info *mci)
1690{
1691	struct sbridge_pvt *pvt = mci->pvt_info;
1692	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1693	enum edac_type mode;
1694	u32 reg;
1695
1696	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1697	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1698		 pvt->sbridge_dev->mc,
1699		 pvt->sbridge_dev->node_id,
1700		 pvt->sbridge_dev->source_id);
1701
1702	/* KNL doesn't support mirroring or lockstep,
1703	 * and is always closed page
1704	 */
1705	if (pvt->info.type == KNIGHTS_LANDING) {
1706		mode = EDAC_S4ECD4ED;
1707		pvt->mirror_mode = NON_MIRRORING;
1708		pvt->is_cur_addr_mirrored = false;
1709
1710		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1711			return -1;
1712		if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1713			edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1714			return -ENODEV;
1715		}
1716	} else {
1717		if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1718			if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
1719				edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1720				return -ENODEV;
1721			}
1722			pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1723			if (GET_BITFIELD(reg, 28, 28)) {
1724				pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1725				edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1726				goto next;
1727			}
1728		}
1729		if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
1730			edac_dbg(0, "Failed to read RASENABLES register\n");
1731			return -ENODEV;
1732		}
1733		if (IS_MIRROR_ENABLED(reg)) {
1734			pvt->mirror_mode = FULL_MIRRORING;
1735			edac_dbg(0, "Full memory mirroring is enabled\n");
1736		} else {
1737			pvt->mirror_mode = NON_MIRRORING;
1738			edac_dbg(0, "Memory mirroring is disabled\n");
1739		}
1740
1741next:
1742		if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1743			edac_dbg(0, "Failed to read MCMTR register\n");
1744			return -ENODEV;
1745		}
1746		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1747			edac_dbg(0, "Lockstep is enabled\n");
1748			mode = EDAC_S8ECD8ED;
1749			pvt->is_lockstep = true;
1750		} else {
1751			edac_dbg(0, "Lockstep is disabled\n");
1752			mode = EDAC_S4ECD4ED;
1753			pvt->is_lockstep = false;
1754		}
1755		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1756			edac_dbg(0, "address map is on closed page mode\n");
1757			pvt->is_close_pg = true;
1758		} else {
1759			edac_dbg(0, "address map is on open page mode\n");
1760			pvt->is_close_pg = false;
1761		}
1762	}
1763
1764	return __populate_dimms(mci, knl_mc_sizes, mode);
1765}
1766
1767static void get_memory_layout(const struct mem_ctl_info *mci)
1768{
1769	struct sbridge_pvt *pvt = mci->pvt_info;
1770	int i, j, k, n_sads, n_tads, sad_interl;
1771	u32 reg;
1772	u64 limit, prv = 0;
1773	u64 tmp_mb;
1774	u32 gb, mb;
1775	u32 rir_way;
1776
1777	/*
1778	 * Step 1) Get TOLM/TOHM ranges
1779	 */
1780
1781	pvt->tolm = pvt->info.get_tolm(pvt);
1782	tmp_mb = (1 + pvt->tolm) >> 20;
1783
1784	gb = div_u64_rem(tmp_mb, 1024, &mb);
1785	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1786		gb, (mb*1000)/1024, (u64)pvt->tolm);
1787
1788	/* Address range is already 45:25 */
1789	pvt->tohm = pvt->info.get_tohm(pvt);
1790	tmp_mb = (1 + pvt->tohm) >> 20;
1791
1792	gb = div_u64_rem(tmp_mb, 1024, &mb);
1793	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1794		gb, (mb*1000)/1024, (u64)pvt->tohm);
1795
1796	/*
1797	 * Step 2) Get SAD range and SAD Interleave list
1798	 * TAD registers contain the interleave wayness. However, it
1799	 * seems simpler to just discover it indirectly, with the
1800	 * algorithm bellow.
1801	 */
1802	prv = 0;
1803	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1804		/* SAD_LIMIT Address range is 45:26 */
1805		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1806				      &reg);
1807		limit = pvt->info.sad_limit(reg);
1808
1809		if (!DRAM_RULE_ENABLE(reg))
1810			continue;
1811
1812		if (limit <= prv)
1813			break;
1814
1815		tmp_mb = (limit + 1) >> 20;
1816		gb = div_u64_rem(tmp_mb, 1024, &mb);
1817		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1818			 n_sads,
1819			 show_dram_attr(pvt->info.dram_attr(reg)),
1820			 gb, (mb*1000)/1024,
1821			 ((u64)tmp_mb) << 20L,
1822			 get_intlv_mode_str(reg, pvt->info.type),
1823			 reg);
1824		prv = limit;
1825
1826		pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1827				      &reg);
1828		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1829		for (j = 0; j < 8; j++) {
1830			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1831			if (j > 0 && sad_interl == pkg)
1832				break;
1833
1834			edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1835				 n_sads, j, pkg);
1836		}
1837	}
1838
1839	if (pvt->info.type == KNIGHTS_LANDING)
1840		return;
1841
1842	/*
1843	 * Step 3) Get TAD range
1844	 */
1845	prv = 0;
1846	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1847		pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
1848		limit = TAD_LIMIT(reg);
1849		if (limit <= prv)
1850			break;
1851		tmp_mb = (limit + 1) >> 20;
1852
1853		gb = div_u64_rem(tmp_mb, 1024, &mb);
1854		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1855			 n_tads, gb, (mb*1000)/1024,
1856			 ((u64)tmp_mb) << 20L,
1857			 (u32)(1 << TAD_SOCK(reg)),
1858			 (u32)TAD_CH(reg) + 1,
1859			 (u32)TAD_TGT0(reg),
1860			 (u32)TAD_TGT1(reg),
1861			 (u32)TAD_TGT2(reg),
1862			 (u32)TAD_TGT3(reg),
1863			 reg);
1864		prv = limit;
1865	}
1866
1867	/*
1868	 * Step 4) Get TAD offsets, per each channel
1869	 */
1870	for (i = 0; i < NUM_CHANNELS; i++) {
1871		if (!pvt->channel[i].dimms)
1872			continue;
1873		for (j = 0; j < n_tads; j++) {
1874			pci_read_config_dword(pvt->pci_tad[i],
1875					      tad_ch_nilv_offset[j],
1876					      &reg);
1877			tmp_mb = TAD_OFFSET(reg) >> 20;
1878			gb = div_u64_rem(tmp_mb, 1024, &mb);
1879			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1880				 i, j,
1881				 gb, (mb*1000)/1024,
1882				 ((u64)tmp_mb) << 20L,
1883				 reg);
1884		}
1885	}
1886
1887	/*
1888	 * Step 6) Get RIR Wayness/Limit, per each channel
1889	 */
1890	for (i = 0; i < NUM_CHANNELS; i++) {
1891		if (!pvt->channel[i].dimms)
1892			continue;
1893		for (j = 0; j < MAX_RIR_RANGES; j++) {
1894			pci_read_config_dword(pvt->pci_tad[i],
1895					      rir_way_limit[j],
1896					      &reg);
1897
1898			if (!IS_RIR_VALID(reg))
1899				continue;
1900
1901			tmp_mb = pvt->info.rir_limit(reg) >> 20;
1902			rir_way = 1 << RIR_WAY(reg);
1903			gb = div_u64_rem(tmp_mb, 1024, &mb);
1904			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1905				 i, j,
1906				 gb, (mb*1000)/1024,
1907				 ((u64)tmp_mb) << 20L,
1908				 rir_way,
1909				 reg);
1910
1911			for (k = 0; k < rir_way; k++) {
1912				pci_read_config_dword(pvt->pci_tad[i],
1913						      rir_offset[j][k],
1914						      &reg);
1915				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1916
1917				gb = div_u64_rem(tmp_mb, 1024, &mb);
1918				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1919					 i, j, k,
1920					 gb, (mb*1000)/1024,
1921					 ((u64)tmp_mb) << 20L,
1922					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1923					 reg);
1924			}
1925		}
1926	}
1927}
1928
1929static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1930{
1931	struct sbridge_dev *sbridge_dev;
1932
1933	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1934		if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1935			return sbridge_dev->mci;
1936	}
1937	return NULL;
1938}
1939
1940static u8 sb_close_row[] = {
1941	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
1942};
1943
1944static u8 sb_close_column[] = {
1945	3, 4, 5, 14, 19, 23, 24, 25, 26, 27
1946};
1947
1948static u8 sb_open_row[] = {
1949	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
1950};
1951
1952static u8 sb_open_column[] = {
1953	3, 4, 5, 6, 7, 8, 9, 10, 11, 12
1954};
1955
1956static u8 sb_open_fine_column[] = {
1957	3, 4, 5, 7, 8, 9, 10, 11, 12, 13
1958};
1959
1960static int sb_bits(u64 addr, int nbits, u8 *bits)
1961{
1962	int i, res = 0;
1963
1964	for (i = 0; i < nbits; i++)
1965		res |= ((addr >> bits[i]) & 1) << i;
1966	return res;
1967}
1968
1969static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
1970{
1971	int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
1972
1973	if (do_xor)
1974		ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
1975
1976	return ret;
1977}
1978
1979static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank,
1980			   u64 rank_addr, char *msg)
1981{
1982	int dimmno = 0;
1983	int row, col, bank_address, bank_group;
1984	struct sbridge_pvt *pvt;
1985	u32 bg0 = 0, rowbits = 0, colbits = 0;
1986	u32 amap_fine = 0, bank_xor_enable = 0;
1987
1988	dimmno = (rank < 12) ? rank / 4 : 2;
1989	pvt = mci->pvt_info;
1990	amap_fine =  pvt->channel[ch].dimm[dimmno].amap_fine;
1991	bg0 = amap_fine ? 6 : 13;
1992	rowbits = pvt->channel[ch].dimm[dimmno].rowbits;
1993	colbits = pvt->channel[ch].dimm[dimmno].colbits;
1994	bank_xor_enable = pvt->channel[ch].dimm[dimmno].bank_xor_enable;
1995
1996	if (pvt->is_lockstep) {
1997		pr_warn_once("LockStep row/column decode is not supported yet!\n");
1998		msg[0] = '\0';
1999		return false;
2000	}
2001
2002	if (pvt->is_close_pg) {
2003		row = sb_bits(rank_addr, rowbits, sb_close_row);
2004		col = sb_bits(rank_addr, colbits, sb_close_column);
2005		col |= 0x400; /* C10 is autoprecharge, always set */
2006		bank_address = sb_bank_bits(rank_addr, 8, 9, bank_xor_enable, 22, 28);
2007		bank_group = sb_bank_bits(rank_addr, 6, 7, bank_xor_enable, 20, 21);
2008	} else {
2009		row = sb_bits(rank_addr, rowbits, sb_open_row);
2010		if (amap_fine)
2011			col = sb_bits(rank_addr, colbits, sb_open_fine_column);
2012		else
2013			col = sb_bits(rank_addr, colbits, sb_open_column);
2014		bank_address = sb_bank_bits(rank_addr, 18, 19, bank_xor_enable, 22, 23);
2015		bank_group = sb_bank_bits(rank_addr, bg0, 17, bank_xor_enable, 20, 21);
2016	}
2017
2018	row &= (1u << rowbits) - 1;
2019
2020	sprintf(msg, "row:0x%x col:0x%x bank_addr:%d bank_group:%d",
2021		row, col, bank_address, bank_group);
2022	return true;
2023}
2024
2025static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank,
2026			   u64 rank_addr, char *msg)
2027{
2028	pr_warn_once("DDR3 row/column decode not support yet!\n");
2029	msg[0] = '\0';
2030	return false;
2031}
2032
2033static int get_memory_error_data(struct mem_ctl_info *mci,
2034				 u64 addr,
2035				 u8 *socket, u8 *ha,
2036				 long *channel_mask,
2037				 u8 *rank,
2038				 char **area_type, char *msg)
2039{
2040	struct mem_ctl_info	*new_mci;
2041	struct sbridge_pvt *pvt = mci->pvt_info;
2042	struct pci_dev		*pci_ha;
2043	int			n_rir, n_sads, n_tads, sad_way, sck_xch;
2044	int			sad_interl, idx, base_ch;
2045	int			interleave_mode, shiftup = 0;
2046	unsigned int		sad_interleave[MAX_INTERLEAVE];
2047	u32			reg, dram_rule;
2048	u8			ch_way, sck_way, pkg, sad_ha = 0, rankid = 0;
2049	u32			tad_offset;
2050	u32			rir_way;
2051	u32			mb, gb;
2052	u64			ch_addr, offset, limit = 0, prv = 0;
2053	u64			rank_addr;
2054	enum mem_type		mtype;
2055
2056	/*
2057	 * Step 0) Check if the address is at special memory ranges
2058	 * The check bellow is probably enough to fill all cases where
2059	 * the error is not inside a memory, except for the legacy
2060	 * range (e. g. VGA addresses). It is unlikely, however, that the
2061	 * memory controller would generate an error on that range.
2062	 */
2063	if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
2064		sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
2065		return -EINVAL;
2066	}
2067	if (addr >= (u64)pvt->tohm) {
2068		sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
2069		return -EINVAL;
2070	}
2071
2072	/*
2073	 * Step 1) Get socket
2074	 */
2075	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
2076		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
2077				      &reg);
2078
2079		if (!DRAM_RULE_ENABLE(reg))
2080			continue;
2081
2082		limit = pvt->info.sad_limit(reg);
2083		if (limit <= prv) {
2084			sprintf(msg, "Can't discover the memory socket");
2085			return -EINVAL;
2086		}
2087		if  (addr <= limit)
2088			break;
2089		prv = limit;
2090	}
2091	if (n_sads == pvt->info.max_sad) {
2092		sprintf(msg, "Can't discover the memory socket");
2093		return -EINVAL;
2094	}
2095	dram_rule = reg;
2096	*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
2097	interleave_mode = pvt->info.interleave_mode(dram_rule);
2098
2099	pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
2100			      &reg);
2101
2102	if (pvt->info.type == SANDY_BRIDGE) {
2103		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
2104		for (sad_way = 0; sad_way < 8; sad_way++) {
2105			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
2106			if (sad_way > 0 && sad_interl == pkg)
2107				break;
2108			sad_interleave[sad_way] = pkg;
2109			edac_dbg(0, "SAD interleave #%d: %d\n",
2110				 sad_way, sad_interleave[sad_way]);
2111		}
2112		edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2113			 pvt->sbridge_dev->mc,
2114			 n_sads,
2115			 addr,
2116			 limit,
2117			 sad_way + 7,
2118			 !interleave_mode ? "" : "XOR[18:16]");
2119		if (interleave_mode)
2120			idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2121		else
2122			idx = (addr >> 6) & 7;
2123		switch (sad_way) {
2124		case 1:
2125			idx = 0;
2126			break;
2127		case 2:
2128			idx = idx & 1;
2129			break;
2130		case 4:
2131			idx = idx & 3;
2132			break;
2133		case 8:
2134			break;
2135		default:
2136			sprintf(msg, "Can't discover socket interleave");
2137			return -EINVAL;
2138		}
2139		*socket = sad_interleave[idx];
2140		edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2141			 idx, sad_way, *socket);
2142	} else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2143		int bits, a7mode = A7MODE(dram_rule);
2144
2145		if (a7mode) {
2146			/* A7 mode swaps P9 with P6 */
2147			bits = GET_BITFIELD(addr, 7, 8) << 1;
2148			bits |= GET_BITFIELD(addr, 9, 9);
2149		} else
2150			bits = GET_BITFIELD(addr, 6, 8);
2151
2152		if (interleave_mode == 0) {
2153			/* interleave mode will XOR {8,7,6} with {18,17,16} */
2154			idx = GET_BITFIELD(addr, 16, 18);
2155			idx ^= bits;
2156		} else
2157			idx = bits;
2158
2159		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2160		*socket = sad_pkg_socket(pkg);
2161		sad_ha = sad_pkg_ha(pkg);
2162
2163		if (a7mode) {
2164			/* MCChanShiftUpEnable */
2165			pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
2166			shiftup = GET_BITFIELD(reg, 22, 22);
2167		}
2168
2169		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2170			 idx, *socket, sad_ha, shiftup);
2171	} else {
2172		/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2173		idx = (addr >> 6) & 7;
2174		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2175		*socket = sad_pkg_socket(pkg);
2176		sad_ha = sad_pkg_ha(pkg);
2177		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2178			 idx, *socket, sad_ha);
2179	}
2180
2181	*ha = sad_ha;
2182
2183	/*
2184	 * Move to the proper node structure, in order to access the
2185	 * right PCI registers
2186	 */
2187	new_mci = get_mci_for_node_id(*socket, sad_ha);
2188	if (!new_mci) {
2189		sprintf(msg, "Struct for socket #%u wasn't initialized",
2190			*socket);
2191		return -EINVAL;
2192	}
2193	mci = new_mci;
2194	pvt = mci->pvt_info;
2195
2196	/*
2197	 * Step 2) Get memory channel
2198	 */
2199	prv = 0;
2200	pci_ha = pvt->pci_ha;
2201	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2202		pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2203		limit = TAD_LIMIT(reg);
2204		if (limit <= prv) {
2205			sprintf(msg, "Can't discover the memory channel");
2206			return -EINVAL;
2207		}
2208		if  (addr <= limit)
2209			break;
2210		prv = limit;
2211	}
2212	if (n_tads == MAX_TAD) {
2213		sprintf(msg, "Can't discover the memory channel");
2214		return -EINVAL;
2215	}
2216
2217	ch_way = TAD_CH(reg) + 1;
2218	sck_way = TAD_SOCK(reg);
2219
2220	if (ch_way == 3)
2221		idx = addr >> 6;
2222	else {
2223		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2224		if (pvt->is_chan_hash)
2225			idx = haswell_chan_hash(idx, addr);
2226	}
2227	idx = idx % ch_way;
2228
2229	/*
2230	 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2231	 */
2232	switch (idx) {
2233	case 0:
2234		base_ch = TAD_TGT0(reg);
2235		break;
2236	case 1:
2237		base_ch = TAD_TGT1(reg);
2238		break;
2239	case 2:
2240		base_ch = TAD_TGT2(reg);
2241		break;
2242	case 3:
2243		base_ch = TAD_TGT3(reg);
2244		break;
2245	default:
2246		sprintf(msg, "Can't discover the TAD target");
2247		return -EINVAL;
2248	}
2249	*channel_mask = 1 << base_ch;
2250
2251	pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2252
2253	if (pvt->mirror_mode == FULL_MIRRORING ||
2254	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2255		*channel_mask |= 1 << ((base_ch + 2) % 4);
2256		switch(ch_way) {
2257		case 2:
2258		case 4:
2259			sck_xch = (1 << sck_way) * (ch_way >> 1);
2260			break;
2261		default:
2262			sprintf(msg, "Invalid mirror set. Can't decode addr");
2263			return -EINVAL;
2264		}
2265
2266		pvt->is_cur_addr_mirrored = true;
2267	} else {
2268		sck_xch = (1 << sck_way) * ch_way;
2269		pvt->is_cur_addr_mirrored = false;
2270	}
2271
2272	if (pvt->is_lockstep)
2273		*channel_mask |= 1 << ((base_ch + 1) % 4);
2274
2275	offset = TAD_OFFSET(tad_offset);
2276
2277	edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2278		 n_tads,
2279		 addr,
2280		 limit,
2281		 sck_way,
2282		 ch_way,
2283		 offset,
2284		 idx,
2285		 base_ch,
2286		 *channel_mask);
2287
2288	/* Calculate channel address */
2289	/* Remove the TAD offset */
2290
2291	if (offset > addr) {
2292		sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2293			offset, addr);
2294		return -EINVAL;
2295	}
2296
2297	ch_addr = addr - offset;
2298	ch_addr >>= (6 + shiftup);
2299	ch_addr /= sck_xch;
2300	ch_addr <<= (6 + shiftup);
2301	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2302
2303	/*
2304	 * Step 3) Decode rank
2305	 */
2306	for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2307		pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
2308
2309		if (!IS_RIR_VALID(reg))
2310			continue;
2311
2312		limit = pvt->info.rir_limit(reg);
2313		gb = div_u64_rem(limit >> 20, 1024, &mb);
2314		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2315			 n_rir,
2316			 gb, (mb*1000)/1024,
2317			 limit,
2318			 1 << RIR_WAY(reg));
2319		if  (ch_addr <= limit)
2320			break;
2321	}
2322	if (n_rir == MAX_RIR_RANGES) {
2323		sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2324			ch_addr);
2325		return -EINVAL;
2326	}
2327	rir_way = RIR_WAY(reg);
2328
2329	if (pvt->is_close_pg)
2330		idx = (ch_addr >> 6);
2331	else
2332		idx = (ch_addr >> 13);	/* FIXME: Datasheet says to shift by 15 */
2333	idx %= 1 << rir_way;
2334
2335	pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
2336	*rank = RIR_RNK_TGT(pvt->info.type, reg);
2337
2338	if (pvt->info.type == BROADWELL) {
2339		if (pvt->is_close_pg)
2340			shiftup = 6;
2341		else
2342			shiftup = 13;
2343
2344		rank_addr = ch_addr >> shiftup;
2345		rank_addr /= (1 << rir_way);
2346		rank_addr <<= shiftup;
2347		rank_addr |= ch_addr & GENMASK_ULL(shiftup - 1, 0);
2348		rank_addr -= RIR_OFFSET(pvt->info.type, reg);
2349
2350		mtype = pvt->info.get_memory_type(pvt);
2351		rankid = *rank;
2352		if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
2353			sb_decode_ddr4(mci, base_ch, rankid, rank_addr, msg);
2354		else
2355			sb_decode_ddr3(mci, base_ch, rankid, rank_addr, msg);
2356	} else {
2357		msg[0] = '\0';
2358	}
2359
2360	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2361		 n_rir,
2362		 ch_addr,
2363		 limit,
2364		 rir_way,
2365		 idx);
2366
2367	return 0;
2368}
2369
2370static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
2371					  const struct mce *m, u8 *socket,
2372					  u8 *ha, long *channel_mask,
2373					  char *msg)
2374{
2375	u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
2376	struct mem_ctl_info *new_mci;
2377	struct sbridge_pvt *pvt;
2378	struct pci_dev *pci_ha;
2379	bool tad0;
2380
2381	if (channel >= NUM_CHANNELS) {
2382		sprintf(msg, "Invalid channel 0x%x", channel);
2383		return -EINVAL;
2384	}
2385
2386	pvt = mci->pvt_info;
2387	if (!pvt->info.get_ha) {
2388		sprintf(msg, "No get_ha()");
2389		return -EINVAL;
2390	}
2391	*ha = pvt->info.get_ha(m->bank);
2392	if (*ha != 0 && *ha != 1) {
2393		sprintf(msg, "Impossible bank %d", m->bank);
2394		return -EINVAL;
2395	}
2396
2397	*socket = m->socketid;
2398	new_mci = get_mci_for_node_id(*socket, *ha);
2399	if (!new_mci) {
2400		strcpy(msg, "mci socket got corrupted!");
2401		return -EINVAL;
2402	}
2403
2404	pvt = new_mci->pvt_info;
2405	pci_ha = pvt->pci_ha;
2406	pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
2407	tad0 = m->addr <= TAD_LIMIT(reg);
2408
2409	*channel_mask = 1 << channel;
2410	if (pvt->mirror_mode == FULL_MIRRORING ||
2411	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
2412		*channel_mask |= 1 << ((channel + 2) % 4);
2413		pvt->is_cur_addr_mirrored = true;
2414	} else {
2415		pvt->is_cur_addr_mirrored = false;
2416	}
2417
2418	if (pvt->is_lockstep)
2419		*channel_mask |= 1 << ((channel + 1) % 4);
2420
2421	return 0;
2422}
2423
2424/****************************************************************************
2425	Device initialization routines: put/get, init/exit
2426 ****************************************************************************/
2427
2428/*
2429 *	sbridge_put_all_devices	'put' all the devices that we have
2430 *				reserved via 'get'
2431 */
2432static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2433{
2434	int i;
2435
2436	edac_dbg(0, "\n");
2437	for (i = 0; i < sbridge_dev->n_devs; i++) {
2438		struct pci_dev *pdev = sbridge_dev->pdev[i];
2439		if (!pdev)
2440			continue;
2441		edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2442			 pdev->bus->number,
2443			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2444		pci_dev_put(pdev);
2445	}
2446}
2447
2448static void sbridge_put_all_devices(void)
2449{
2450	struct sbridge_dev *sbridge_dev, *tmp;
2451
2452	list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2453		sbridge_put_devices(sbridge_dev);
2454		free_sbridge_dev(sbridge_dev);
2455	}
2456}
2457
2458static int sbridge_get_onedevice(struct pci_dev **prev,
2459				 u8 *num_mc,
2460				 const struct pci_id_table *table,
2461				 const unsigned devno,
2462				 const int multi_bus)
2463{
2464	struct sbridge_dev *sbridge_dev = NULL;
2465	const struct pci_id_descr *dev_descr = &table->descr[devno];
2466	struct pci_dev *pdev = NULL;
2467	int seg = 0;
2468	u8 bus = 0;
2469	int i = 0;
2470
2471	sbridge_printk(KERN_DEBUG,
2472		"Seeking for: PCI ID %04x:%04x\n",
2473		PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2474
2475	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2476			      dev_descr->dev_id, *prev);
2477
2478	if (!pdev) {
2479		if (*prev) {
2480			*prev = pdev;
2481			return 0;
2482		}
2483
2484		if (dev_descr->optional)
2485			return 0;
2486
2487		/* if the HA wasn't found */
2488		if (devno == 0)
2489			return -ENODEV;
2490
2491		sbridge_printk(KERN_INFO,
2492			"Device not found: %04x:%04x\n",
2493			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2494
2495		/* End of list, leave */
2496		return -ENODEV;
2497	}
2498	seg = pci_domain_nr(pdev->bus);
2499	bus = pdev->bus->number;
2500
2501next_imc:
2502	sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
2503				      multi_bus, sbridge_dev);
2504	if (!sbridge_dev) {
2505		/* If the HA1 wasn't found, don't create EDAC second memory controller */
2506		if (dev_descr->dom == IMC1 && devno != 1) {
2507			edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2508				 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2509			pci_dev_put(pdev);
2510			return 0;
2511		}
2512
2513		if (dev_descr->dom == SOCK)
2514			goto out_imc;
2515
2516		sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
2517		if (!sbridge_dev) {
2518			pci_dev_put(pdev);
2519			return -ENOMEM;
2520		}
2521		(*num_mc)++;
2522	}
2523
2524	if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2525		sbridge_printk(KERN_ERR,
2526			"Duplicated device for %04x:%04x\n",
2527			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2528		pci_dev_put(pdev);
2529		return -ENODEV;
2530	}
2531
2532	sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2533
2534	/* pdev belongs to more than one IMC, do extra gets */
2535	if (++i > 1)
2536		pci_dev_get(pdev);
2537
2538	if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2539		goto next_imc;
2540
2541out_imc:
2542	/* Be sure that the device is enabled */
2543	if (unlikely(pci_enable_device(pdev) < 0)) {
2544		sbridge_printk(KERN_ERR,
2545			"Couldn't enable %04x:%04x\n",
2546			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2547		return -ENODEV;
2548	}
2549
2550	edac_dbg(0, "Detected %04x:%04x\n",
2551		 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2552
2553	/*
2554	 * As stated on drivers/pci/search.c, the reference count for
2555	 * @from is always decremented if it is not %NULL. So, as we need
2556	 * to get all devices up to null, we need to do a get for the device
2557	 */
2558	pci_dev_get(pdev);
2559
2560	*prev = pdev;
2561
2562	return 0;
2563}
2564
2565/*
2566 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2567 *			     devices we want to reference for this driver.
2568 * @num_mc: pointer to the memory controllers count, to be incremented in case
2569 *	    of success.
2570 * @table: model specific table
2571 *
2572 * returns 0 in case of success or error code
2573 */
2574static int sbridge_get_all_devices(u8 *num_mc,
2575					const struct pci_id_table *table)
2576{
2577	int i, rc;
2578	struct pci_dev *pdev = NULL;
2579	int allow_dups = 0;
2580	int multi_bus = 0;
2581
2582	if (table->type == KNIGHTS_LANDING)
2583		allow_dups = multi_bus = 1;
2584	while (table && table->descr) {
2585		for (i = 0; i < table->n_devs_per_sock; i++) {
2586			if (!allow_dups || i == 0 ||
2587					table->descr[i].dev_id !=
2588						table->descr[i-1].dev_id) {
2589				pdev = NULL;
2590			}
2591			do {
2592				rc = sbridge_get_onedevice(&pdev, num_mc,
2593							   table, i, multi_bus);
2594				if (rc < 0) {
2595					if (i == 0) {
2596						i = table->n_devs_per_sock;
2597						break;
2598					}
2599					sbridge_put_all_devices();
2600					return -ENODEV;
2601				}
2602			} while (pdev && !allow_dups);
2603		}
2604		table++;
2605	}
2606
2607	return 0;
2608}
2609
2610/*
2611 * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
2612 * the format: XXXa. So we can convert from a device to the corresponding
2613 * channel like this
2614 */
2615#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2616
2617static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2618				 struct sbridge_dev *sbridge_dev)
2619{
2620	struct sbridge_pvt *pvt = mci->pvt_info;
2621	struct pci_dev *pdev;
2622	u8 saw_chan_mask = 0;
2623	int i;
2624
2625	for (i = 0; i < sbridge_dev->n_devs; i++) {
2626		pdev = sbridge_dev->pdev[i];
2627		if (!pdev)
2628			continue;
2629
2630		switch (pdev->device) {
2631		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2632			pvt->pci_sad0 = pdev;
2633			break;
2634		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2635			pvt->pci_sad1 = pdev;
2636			break;
2637		case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2638			pvt->pci_br0 = pdev;
2639			break;
2640		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2641			pvt->pci_ha = pdev;
2642			break;
2643		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2644			pvt->pci_ta = pdev;
2645			break;
2646		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2647			pvt->pci_ras = pdev;
2648			break;
2649		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2650		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2651		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2652		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2653		{
2654			int id = TAD_DEV_TO_CHAN(pdev->device);
2655			pvt->pci_tad[id] = pdev;
2656			saw_chan_mask |= 1 << id;
2657		}
2658			break;
2659		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2660			pvt->pci_ddrio = pdev;
2661			break;
2662		default:
2663			goto error;
2664		}
2665
2666		edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2667			 pdev->vendor, pdev->device,
2668			 sbridge_dev->bus,
2669			 pdev);
2670	}
2671
2672	/* Check if everything were registered */
2673	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2674	    !pvt->pci_ras || !pvt->pci_ta)
2675		goto enodev;
2676
2677	if (saw_chan_mask != 0x0f)
2678		goto enodev;
2679	return 0;
2680
2681enodev:
2682	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2683	return -ENODEV;
2684
2685error:
2686	sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2687		       PCI_VENDOR_ID_INTEL, pdev->device);
2688	return -EINVAL;
2689}
2690
2691static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2692				 struct sbridge_dev *sbridge_dev)
2693{
2694	struct sbridge_pvt *pvt = mci->pvt_info;
2695	struct pci_dev *pdev;
2696	u8 saw_chan_mask = 0;
2697	int i;
2698
2699	for (i = 0; i < sbridge_dev->n_devs; i++) {
2700		pdev = sbridge_dev->pdev[i];
2701		if (!pdev)
2702			continue;
2703
2704		switch (pdev->device) {
2705		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2706		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2707			pvt->pci_ha = pdev;
2708			break;
2709		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2710		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2711			pvt->pci_ta = pdev;
2712			break;
2713		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2714		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2715			pvt->pci_ras = pdev;
2716			break;
2717		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2718		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2719		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2720		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2721		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2722		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2723		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2724		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2725		{
2726			int id = TAD_DEV_TO_CHAN(pdev->device);
2727			pvt->pci_tad[id] = pdev;
2728			saw_chan_mask |= 1 << id;
2729		}
2730			break;
2731		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2732			pvt->pci_ddrio = pdev;
2733			break;
2734		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2735			pvt->pci_ddrio = pdev;
2736			break;
2737		case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2738			pvt->pci_sad0 = pdev;
2739			break;
2740		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2741			pvt->pci_br0 = pdev;
2742			break;
2743		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2744			pvt->pci_br1 = pdev;
2745			break;
2746		default:
2747			goto error;
2748		}
2749
2750		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2751			 sbridge_dev->bus,
2752			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2753			 pdev);
2754	}
2755
2756	/* Check if everything were registered */
2757	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2758	    !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2759		goto enodev;
2760
2761	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2762	    saw_chan_mask != 0x03)   /* -EP */
2763		goto enodev;
2764	return 0;
2765
2766enodev:
2767	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2768	return -ENODEV;
2769
2770error:
2771	sbridge_printk(KERN_ERR,
2772		       "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2773			pdev->device);
2774	return -EINVAL;
2775}
2776
2777static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2778				 struct sbridge_dev *sbridge_dev)
2779{
2780	struct sbridge_pvt *pvt = mci->pvt_info;
2781	struct pci_dev *pdev;
2782	u8 saw_chan_mask = 0;
2783	int i;
2784
2785	/* there's only one device per system; not tied to any bus */
2786	if (pvt->info.pci_vtd == NULL)
2787		/* result will be checked later */
2788		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2789						   PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2790						   NULL);
2791
2792	for (i = 0; i < sbridge_dev->n_devs; i++) {
2793		pdev = sbridge_dev->pdev[i];
2794		if (!pdev)
2795			continue;
2796
2797		switch (pdev->device) {
2798		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2799			pvt->pci_sad0 = pdev;
2800			break;
2801		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2802			pvt->pci_sad1 = pdev;
2803			break;
2804		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2805		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2806			pvt->pci_ha = pdev;
2807			break;
2808		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2809		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2810			pvt->pci_ta = pdev;
2811			break;
2812		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2813		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2814			pvt->pci_ras = pdev;
2815			break;
2816		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2817		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2818		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2819		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2820		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2821		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2822		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2823		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2824		{
2825			int id = TAD_DEV_TO_CHAN(pdev->device);
2826			pvt->pci_tad[id] = pdev;
2827			saw_chan_mask |= 1 << id;
2828		}
2829			break;
2830		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2831		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2832		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2833		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2834			if (!pvt->pci_ddrio)
2835				pvt->pci_ddrio = pdev;
2836			break;
2837		default:
2838			break;
2839		}
2840
2841		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2842			 sbridge_dev->bus,
2843			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2844			 pdev);
2845	}
2846
2847	/* Check if everything were registered */
2848	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2849	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2850		goto enodev;
2851
2852	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2853	    saw_chan_mask != 0x03)   /* -EP */
2854		goto enodev;
2855	return 0;
2856
2857enodev:
2858	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2859	return -ENODEV;
2860}
2861
2862static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2863				 struct sbridge_dev *sbridge_dev)
2864{
2865	struct sbridge_pvt *pvt = mci->pvt_info;
2866	struct pci_dev *pdev;
2867	u8 saw_chan_mask = 0;
2868	int i;
2869
2870	/* there's only one device per system; not tied to any bus */
2871	if (pvt->info.pci_vtd == NULL)
2872		/* result will be checked later */
2873		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2874						   PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2875						   NULL);
2876
2877	for (i = 0; i < sbridge_dev->n_devs; i++) {
2878		pdev = sbridge_dev->pdev[i];
2879		if (!pdev)
2880			continue;
2881
2882		switch (pdev->device) {
2883		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2884			pvt->pci_sad0 = pdev;
2885			break;
2886		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2887			pvt->pci_sad1 = pdev;
2888			break;
2889		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2890		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2891			pvt->pci_ha = pdev;
2892			break;
2893		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2894		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2895			pvt->pci_ta = pdev;
2896			break;
2897		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2898		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2899			pvt->pci_ras = pdev;
2900			break;
2901		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2902		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2903		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2904		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2905		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2906		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2907		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2908		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2909		{
2910			int id = TAD_DEV_TO_CHAN(pdev->device);
2911			pvt->pci_tad[id] = pdev;
2912			saw_chan_mask |= 1 << id;
2913		}
2914			break;
2915		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2916			pvt->pci_ddrio = pdev;
2917			break;
2918		default:
2919			break;
2920		}
2921
2922		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2923			 sbridge_dev->bus,
2924			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2925			 pdev);
2926	}
2927
2928	/* Check if everything were registered */
2929	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2930	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2931		goto enodev;
2932
2933	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2934	    saw_chan_mask != 0x03)   /* -EP */
2935		goto enodev;
2936	return 0;
2937
2938enodev:
2939	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2940	return -ENODEV;
2941}
2942
2943static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2944			struct sbridge_dev *sbridge_dev)
2945{
2946	struct sbridge_pvt *pvt = mci->pvt_info;
2947	struct pci_dev *pdev;
2948	int dev, func;
2949
2950	int i;
2951	int devidx;
2952
2953	for (i = 0; i < sbridge_dev->n_devs; i++) {
2954		pdev = sbridge_dev->pdev[i];
2955		if (!pdev)
2956			continue;
2957
2958		/* Extract PCI device and function. */
2959		dev = (pdev->devfn >> 3) & 0x1f;
2960		func = pdev->devfn & 0x7;
2961
2962		switch (pdev->device) {
2963		case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2964			if (dev == 8)
2965				pvt->knl.pci_mc0 = pdev;
2966			else if (dev == 9)
2967				pvt->knl.pci_mc1 = pdev;
2968			else {
2969				sbridge_printk(KERN_ERR,
2970					"Memory controller in unexpected place! (dev %d, fn %d)\n",
2971					dev, func);
2972				continue;
2973			}
2974			break;
2975
2976		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2977			pvt->pci_sad0 = pdev;
2978			break;
2979
2980		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2981			pvt->pci_sad1 = pdev;
2982			break;
2983
2984		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2985			/* There are one of these per tile, and range from
2986			 * 1.14.0 to 1.18.5.
2987			 */
2988			devidx = ((dev-14)*8)+func;
2989
2990			if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2991				sbridge_printk(KERN_ERR,
2992					"Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2993					dev, func);
2994				continue;
2995			}
2996
2997			WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2998
2999			pvt->knl.pci_cha[devidx] = pdev;
3000			break;
3001
3002		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
3003			devidx = -1;
3004
3005			/*
3006			 *  MC0 channels 0-2 are device 9 function 2-4,
3007			 *  MC1 channels 3-5 are device 8 function 2-4.
3008			 */
3009
3010			if (dev == 9)
3011				devidx = func-2;
3012			else if (dev == 8)
3013				devidx = 3 + (func-2);
3014
3015			if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
3016				sbridge_printk(KERN_ERR,
3017					"DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
3018					dev, func);
3019				continue;
3020			}
3021
3022			WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
3023			pvt->knl.pci_channel[devidx] = pdev;
3024			break;
3025
3026		case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
3027			pvt->knl.pci_mc_info = pdev;
3028			break;
3029
3030		case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
3031			pvt->pci_ta = pdev;
3032			break;
3033
3034		default:
3035			sbridge_printk(KERN_ERR, "Unexpected device %d\n",
3036				pdev->device);
3037			break;
3038		}
3039	}
3040
3041	if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
3042	    !pvt->pci_sad0     || !pvt->pci_sad1    ||
3043	    !pvt->pci_ta) {
3044		goto enodev;
3045	}
3046
3047	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
3048		if (!pvt->knl.pci_channel[i]) {
3049			sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
3050			goto enodev;
3051		}
3052	}
3053
3054	for (i = 0; i < KNL_MAX_CHAS; i++) {
3055		if (!pvt->knl.pci_cha[i]) {
3056			sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
3057			goto enodev;
3058		}
3059	}
3060
3061	return 0;
3062
3063enodev:
3064	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
3065	return -ENODEV;
3066}
3067
3068/****************************************************************************
3069			Error check routines
3070 ****************************************************************************/
3071
3072/*
3073 * While Sandy Bridge has error count registers, SMI BIOS read values from
3074 * and resets the counters. So, they are not reliable for the OS to read
3075 * from them. So, we have no option but to just trust on whatever MCE is
3076 * telling us about the errors.
3077 */
3078static void sbridge_mce_output_error(struct mem_ctl_info *mci,
3079				    const struct mce *m)
3080{
3081	struct mem_ctl_info *new_mci;
3082	struct sbridge_pvt *pvt = mci->pvt_info;
3083	enum hw_event_mc_err_type tp_event;
 
3084	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
3085	bool overflow = GET_BITFIELD(m->status, 62, 62);
3086	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
3087	bool recoverable;
3088	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
3089	u32 mscod = GET_BITFIELD(m->status, 16, 31);
3090	u32 errcode = GET_BITFIELD(m->status, 0, 15);
3091	u32 channel = GET_BITFIELD(m->status, 0, 3);
3092	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
3093	/*
3094	 * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
3095	 * A value 6 is for cache line aligned address, a value 12 is for page
3096	 * aligned address reported by patrol scrubber.
3097	 */
3098	u32 lsb = GET_BITFIELD(m->misc, 0, 5);
3099	char *optype, *area_type = "DRAM";
3100	long channel_mask, first_channel;
3101	u8  rank = 0xff, socket, ha;
3102	int rc, dimm;
 
3103
3104	if (pvt->info.type != SANDY_BRIDGE)
3105		recoverable = true;
3106	else
3107		recoverable = GET_BITFIELD(m->status, 56, 56);
3108
3109	if (uncorrected_error) {
3110		core_err_cnt = 1;
3111		if (ripv) {
3112			tp_event = HW_EVENT_ERR_UNCORRECTED;
3113		} else {
3114			tp_event = HW_EVENT_ERR_FATAL;
 
 
 
3115		}
3116	} else {
 
3117		tp_event = HW_EVENT_ERR_CORRECTED;
3118	}
3119
3120	/*
3121	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
3122	 * memory errors should fit in this mask:
3123	 *	000f 0000 1mmm cccc (binary)
3124	 * where:
3125	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
3126	 *	    won't be shown
3127	 *	mmm = error type
3128	 *	cccc = channel
3129	 * If the mask doesn't match, report an error to the parsing logic
3130	 */
3131	switch (optypenum) {
3132	case 0:
3133		optype = "generic undef request error";
3134		break;
3135	case 1:
3136		optype = "memory read error";
3137		break;
3138	case 2:
3139		optype = "memory write error";
3140		break;
3141	case 3:
3142		optype = "addr/cmd error";
3143		break;
3144	case 4:
3145		optype = "memory scrubbing error";
3146		break;
3147	default:
3148		optype = "reserved";
3149		break;
 
 
 
 
3150	}
3151
 
 
 
 
3152	if (pvt->info.type == KNIGHTS_LANDING) {
3153		if (channel == 14) {
3154			edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
3155				overflow ? " OVERFLOW" : "",
3156				(uncorrected_error && recoverable)
3157				? " recoverable" : "",
3158				mscod, errcode,
3159				m->bank);
3160		} else {
3161			char A = *("A");
3162
3163			/*
3164			 * Reported channel is in range 0-2, so we can't map it
3165			 * back to mc. To figure out mc we check machine check
3166			 * bank register that reported this error.
3167			 * bank15 means mc0 and bank16 means mc1.
3168			 */
3169			channel = knl_channel_remap(m->bank == 16, channel);
3170			channel_mask = 1 << channel;
3171
3172			snprintf(sb_msg, sizeof(sb_msg),
3173				 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3174				 overflow ? " OVERFLOW" : "",
3175				 (uncorrected_error && recoverable)
3176				 ? " recoverable" : " ",
3177				 mscod, errcode, channel, A + channel);
3178			edac_mc_handle_error(tp_event, mci, core_err_cnt,
3179				m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3180				channel, 0, -1,
3181				optype, sb_msg);
3182		}
3183		return;
3184	} else if (lsb < 12) {
3185		rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3186					   &channel_mask, &rank,
3187					   &area_type, sb_msg);
3188	} else {
3189		rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
3190						    &channel_mask, sb_msg);
3191	}
3192
3193	if (rc < 0)
3194		goto err_parsing;
3195	new_mci = get_mci_for_node_id(socket, ha);
3196	if (!new_mci) {
3197		strscpy(sb_msg, "Error: socket got corrupted!");
3198		goto err_parsing;
3199	}
3200	mci = new_mci;
3201	pvt = mci->pvt_info;
3202
3203	first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3204
3205	if (rank == 0xff)
3206		dimm = -1;
3207	else if (rank < 4)
3208		dimm = 0;
3209	else if (rank < 8)
3210		dimm = 1;
3211	else
3212		dimm = 2;
3213
 
3214	/*
3215	 * FIXME: On some memory configurations (mirror, lockstep), the
3216	 * Memory Controller can't point the error to a single DIMM. The
3217	 * EDAC core should be handling the channel mask, in order to point
3218	 * to the group of dimm's where the error may be happening.
3219	 */
3220	if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3221		channel = first_channel;
3222	snprintf(sb_msg_full, sizeof(sb_msg_full),
3223		 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s",
 
3224		 overflow ? " OVERFLOW" : "",
3225		 (uncorrected_error && recoverable) ? " recoverable" : "",
3226		 area_type,
3227		 mscod, errcode,
3228		 socket, ha,
3229		 channel_mask,
3230		 rank, sb_msg);
3231
3232	edac_dbg(0, "%s\n", sb_msg_full);
3233
3234	/* FIXME: need support for channel mask */
3235
3236	if (channel == CHANNEL_UNSPECIFIED)
3237		channel = -1;
3238
3239	/* Call the helper to output message */
3240	edac_mc_handle_error(tp_event, mci, core_err_cnt,
3241			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3242			     channel, dimm, -1,
3243			     optype, sb_msg_full);
3244	return;
3245err_parsing:
3246	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3247			     -1, -1, -1,
3248			     sb_msg, "");
3249
3250}
3251
3252/*
3253 * Check that logging is enabled and that this is the right type
3254 * of error for us to handle.
3255 */
3256static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3257				   void *data)
3258{
3259	struct mce *mce = (struct mce *)data;
3260	struct mem_ctl_info *mci;
 
3261	char *type;
3262
3263	if (mce->kflags & MCE_HANDLED_CEC)
3264		return NOTIFY_DONE;
3265
 
 
 
 
 
3266	/*
3267	 * Just let mcelog handle it if the error is
3268	 * outside the memory controller. A memory error
3269	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3270	 * bit 12 has an special meaning.
3271	 */
3272	if ((mce->status & 0xefff) >> 7 != 1)
3273		return NOTIFY_DONE;
3274
3275	/* Check ADDRV bit in STATUS */
3276	if (!GET_BITFIELD(mce->status, 58, 58))
3277		return NOTIFY_DONE;
3278
3279	/* Check MISCV bit in STATUS */
3280	if (!GET_BITFIELD(mce->status, 59, 59))
3281		return NOTIFY_DONE;
3282
3283	/* Check address type in MISC (physical address only) */
3284	if (GET_BITFIELD(mce->misc, 6, 8) != 2)
3285		return NOTIFY_DONE;
3286
3287	mci = get_mci_for_node_id(mce->socketid, IMC0);
3288	if (!mci)
3289		return NOTIFY_DONE;
3290
3291	if (mce->mcgstatus & MCG_STATUS_MCIP)
3292		type = "Exception";
3293	else
3294		type = "Event";
3295
3296	sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3297
3298	sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3299			  "Bank %d: %016Lx\n", mce->extcpu, type,
3300			  mce->mcgstatus, mce->bank, mce->status);
3301	sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3302	sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3303	sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3304
3305	sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3306			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3307			  mce->time, mce->socketid, mce->apicid);
3308
3309	sbridge_mce_output_error(mci, mce);
3310
3311	/* Advice mcelog that the error were handled */
3312	mce->kflags |= MCE_HANDLED_EDAC;
3313	return NOTIFY_OK;
3314}
3315
3316static struct notifier_block sbridge_mce_dec = {
3317	.notifier_call	= sbridge_mce_check_error,
3318	.priority	= MCE_PRIO_EDAC,
3319};
3320
3321/****************************************************************************
3322			EDAC register/unregister logic
3323 ****************************************************************************/
3324
3325static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3326{
3327	struct mem_ctl_info *mci = sbridge_dev->mci;
 
3328
3329	if (unlikely(!mci || !mci->pvt_info)) {
3330		edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3331
3332		sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3333		return;
3334	}
3335
 
 
3336	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3337		 mci, &sbridge_dev->pdev[0]->dev);
3338
3339	/* Remove MC sysfs nodes */
3340	edac_mc_del_mc(mci->pdev);
3341
3342	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3343	kfree(mci->ctl_name);
3344	edac_mc_free(mci);
3345	sbridge_dev->mci = NULL;
3346}
3347
3348static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3349{
3350	struct mem_ctl_info *mci;
3351	struct edac_mc_layer layers[2];
3352	struct sbridge_pvt *pvt;
3353	struct pci_dev *pdev = sbridge_dev->pdev[0];
3354	int rc;
3355
3356	/* allocate a new MC control structure */
3357	layers[0].type = EDAC_MC_LAYER_CHANNEL;
3358	layers[0].size = type == KNIGHTS_LANDING ?
3359		KNL_MAX_CHANNELS : NUM_CHANNELS;
3360	layers[0].is_virt_csrow = false;
3361	layers[1].type = EDAC_MC_LAYER_SLOT;
3362	layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3363	layers[1].is_virt_csrow = true;
3364	mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3365			    sizeof(*pvt));
3366
3367	if (unlikely(!mci))
3368		return -ENOMEM;
3369
3370	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3371		 mci, &pdev->dev);
3372
3373	pvt = mci->pvt_info;
3374	memset(pvt, 0, sizeof(*pvt));
3375
3376	/* Associate sbridge_dev and mci for future usage */
3377	pvt->sbridge_dev = sbridge_dev;
3378	sbridge_dev->mci = mci;
3379
3380	mci->mtype_cap = type == KNIGHTS_LANDING ?
3381		MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3382	mci->edac_ctl_cap = EDAC_FLAG_NONE;
3383	mci->edac_cap = EDAC_FLAG_NONE;
3384	mci->mod_name = EDAC_MOD_STR;
3385	mci->dev_name = pci_name(pdev);
3386	mci->ctl_page_to_phys = NULL;
3387
3388	pvt->info.type = type;
3389	switch (type) {
3390	case IVY_BRIDGE:
3391		pvt->info.rankcfgr = IB_RANK_CFG_A;
3392		pvt->info.get_tolm = ibridge_get_tolm;
3393		pvt->info.get_tohm = ibridge_get_tohm;
3394		pvt->info.dram_rule = ibridge_dram_rule;
3395		pvt->info.get_memory_type = get_memory_type;
3396		pvt->info.get_node_id = get_node_id;
3397		pvt->info.get_ha = ibridge_get_ha;
3398		pvt->info.rir_limit = rir_limit;
3399		pvt->info.sad_limit = sad_limit;
3400		pvt->info.interleave_mode = interleave_mode;
3401		pvt->info.dram_attr = dram_attr;
3402		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3403		pvt->info.interleave_list = ibridge_interleave_list;
3404		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3405		pvt->info.get_width = ibridge_get_width;
3406
3407		/* Store pci devices at mci for faster access */
3408		rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3409		if (unlikely(rc < 0))
3410			goto fail0;
3411		get_source_id(mci);
3412		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3413			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3414		break;
3415	case SANDY_BRIDGE:
3416		pvt->info.rankcfgr = SB_RANK_CFG_A;
3417		pvt->info.get_tolm = sbridge_get_tolm;
3418		pvt->info.get_tohm = sbridge_get_tohm;
3419		pvt->info.dram_rule = sbridge_dram_rule;
3420		pvt->info.get_memory_type = get_memory_type;
3421		pvt->info.get_node_id = get_node_id;
3422		pvt->info.get_ha = sbridge_get_ha;
3423		pvt->info.rir_limit = rir_limit;
3424		pvt->info.sad_limit = sad_limit;
3425		pvt->info.interleave_mode = interleave_mode;
3426		pvt->info.dram_attr = dram_attr;
3427		pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3428		pvt->info.interleave_list = sbridge_interleave_list;
3429		pvt->info.interleave_pkg = sbridge_interleave_pkg;
3430		pvt->info.get_width = sbridge_get_width;
3431
3432		/* Store pci devices at mci for faster access */
3433		rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3434		if (unlikely(rc < 0))
3435			goto fail0;
3436		get_source_id(mci);
3437		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3438			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3439		break;
3440	case HASWELL:
3441		/* rankcfgr isn't used */
3442		pvt->info.get_tolm = haswell_get_tolm;
3443		pvt->info.get_tohm = haswell_get_tohm;
3444		pvt->info.dram_rule = ibridge_dram_rule;
3445		pvt->info.get_memory_type = haswell_get_memory_type;
3446		pvt->info.get_node_id = haswell_get_node_id;
3447		pvt->info.get_ha = ibridge_get_ha;
3448		pvt->info.rir_limit = haswell_rir_limit;
3449		pvt->info.sad_limit = sad_limit;
3450		pvt->info.interleave_mode = interleave_mode;
3451		pvt->info.dram_attr = dram_attr;
3452		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3453		pvt->info.interleave_list = ibridge_interleave_list;
3454		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3455		pvt->info.get_width = ibridge_get_width;
3456
3457		/* Store pci devices at mci for faster access */
3458		rc = haswell_mci_bind_devs(mci, sbridge_dev);
3459		if (unlikely(rc < 0))
3460			goto fail0;
3461		get_source_id(mci);
3462		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3463			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3464		break;
3465	case BROADWELL:
3466		/* rankcfgr isn't used */
3467		pvt->info.get_tolm = haswell_get_tolm;
3468		pvt->info.get_tohm = haswell_get_tohm;
3469		pvt->info.dram_rule = ibridge_dram_rule;
3470		pvt->info.get_memory_type = haswell_get_memory_type;
3471		pvt->info.get_node_id = haswell_get_node_id;
3472		pvt->info.get_ha = ibridge_get_ha;
3473		pvt->info.rir_limit = haswell_rir_limit;
3474		pvt->info.sad_limit = sad_limit;
3475		pvt->info.interleave_mode = interleave_mode;
3476		pvt->info.dram_attr = dram_attr;
3477		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3478		pvt->info.interleave_list = ibridge_interleave_list;
3479		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3480		pvt->info.get_width = broadwell_get_width;
3481
3482		/* Store pci devices at mci for faster access */
3483		rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3484		if (unlikely(rc < 0))
3485			goto fail0;
3486		get_source_id(mci);
3487		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3488			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3489		break;
3490	case KNIGHTS_LANDING:
3491		/* pvt->info.rankcfgr == ??? */
3492		pvt->info.get_tolm = knl_get_tolm;
3493		pvt->info.get_tohm = knl_get_tohm;
3494		pvt->info.dram_rule = knl_dram_rule;
3495		pvt->info.get_memory_type = knl_get_memory_type;
3496		pvt->info.get_node_id = knl_get_node_id;
3497		pvt->info.get_ha = knl_get_ha;
3498		pvt->info.rir_limit = NULL;
3499		pvt->info.sad_limit = knl_sad_limit;
3500		pvt->info.interleave_mode = knl_interleave_mode;
3501		pvt->info.dram_attr = dram_attr_knl;
3502		pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3503		pvt->info.interleave_list = knl_interleave_list;
3504		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3505		pvt->info.get_width = knl_get_width;
3506
3507		rc = knl_mci_bind_devs(mci, sbridge_dev);
3508		if (unlikely(rc < 0))
3509			goto fail0;
3510		get_source_id(mci);
3511		mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3512			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3513		break;
3514	}
3515
3516	if (!mci->ctl_name) {
3517		rc = -ENOMEM;
3518		goto fail0;
3519	}
3520
3521	/* Get dimm basic config and the memory layout */
3522	rc = get_dimm_config(mci);
3523	if (rc < 0) {
3524		edac_dbg(0, "MC: failed to get_dimm_config()\n");
3525		goto fail;
3526	}
3527	get_memory_layout(mci);
3528
3529	/* record ptr to the generic device */
3530	mci->pdev = &pdev->dev;
3531
3532	/* add this new MC control structure to EDAC's list of MCs */
3533	if (unlikely(edac_mc_add_mc(mci))) {
3534		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3535		rc = -EINVAL;
3536		goto fail;
3537	}
3538
3539	return 0;
3540
3541fail:
3542	kfree(mci->ctl_name);
3543fail0:
3544	edac_mc_free(mci);
3545	sbridge_dev->mci = NULL;
3546	return rc;
3547}
3548
 
 
 
3549static const struct x86_cpu_id sbridge_cpuids[] = {
3550	X86_MATCH_VFM(INTEL_SANDYBRIDGE_X,	&pci_dev_descr_sbridge_table),
3551	X86_MATCH_VFM(INTEL_IVYBRIDGE_X,	&pci_dev_descr_ibridge_table),
3552	X86_MATCH_VFM(INTEL_HASWELL_X,		&pci_dev_descr_haswell_table),
3553	X86_MATCH_VFM(INTEL_BROADWELL_X,	&pci_dev_descr_broadwell_table),
3554	X86_MATCH_VFM(INTEL_BROADWELL_D,	&pci_dev_descr_broadwell_table),
3555	X86_MATCH_VFM(INTEL_XEON_PHI_KNL,	&pci_dev_descr_knl_table),
3556	X86_MATCH_VFM(INTEL_XEON_PHI_KNM,	&pci_dev_descr_knl_table),
3557	{ }
3558};
3559MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3560
3561/*
3562 *	sbridge_probe	Get all devices and register memory controllers
3563 *			present.
3564 *	return:
3565 *		0 for FOUND a device
3566 *		< 0 for error code
3567 */
3568
3569static int sbridge_probe(const struct x86_cpu_id *id)
3570{
3571	int rc;
3572	u8 mc, num_mc = 0;
3573	struct sbridge_dev *sbridge_dev;
3574	struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3575
3576	/* get the pci devices we want to reserve for our use */
3577	rc = sbridge_get_all_devices(&num_mc, ptable);
3578
3579	if (unlikely(rc < 0)) {
3580		edac_dbg(0, "couldn't get all devices\n");
3581		goto fail0;
3582	}
3583
3584	mc = 0;
3585
3586	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3587		edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3588			 mc, mc + 1, num_mc);
3589
3590		sbridge_dev->mc = mc++;
3591		rc = sbridge_register_mci(sbridge_dev, ptable->type);
3592		if (unlikely(rc < 0))
3593			goto fail1;
3594	}
3595
3596	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3597
3598	return 0;
3599
3600fail1:
3601	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3602		sbridge_unregister_mci(sbridge_dev);
3603
3604	sbridge_put_all_devices();
3605fail0:
3606	return rc;
3607}
3608
3609/*
3610 *	sbridge_remove	cleanup
3611 *
3612 */
3613static void sbridge_remove(void)
3614{
3615	struct sbridge_dev *sbridge_dev;
3616
3617	edac_dbg(0, "\n");
3618
3619	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3620		sbridge_unregister_mci(sbridge_dev);
3621
3622	/* Release PCI resources */
3623	sbridge_put_all_devices();
3624}
3625
3626/*
3627 *	sbridge_init		Module entry function
3628 *			Try to initialize this module for its devices
3629 */
3630static int __init sbridge_init(void)
3631{
3632	const struct x86_cpu_id *id;
3633	const char *owner;
3634	int rc;
3635
3636	edac_dbg(2, "\n");
3637
3638	if (ghes_get_devices())
3639		return -EBUSY;
3640
3641	owner = edac_get_owner();
3642	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3643		return -EBUSY;
3644
3645	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
3646		return -ENODEV;
3647
3648	id = x86_match_cpu(sbridge_cpuids);
3649	if (!id)
3650		return -ENODEV;
3651
3652	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
3653	opstate_init();
3654
3655	rc = sbridge_probe(id);
3656
3657	if (rc >= 0) {
3658		mce_register_decode_chain(&sbridge_mce_dec);
 
 
3659		return 0;
3660	}
3661
3662	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3663		      rc);
3664
3665	return rc;
3666}
3667
3668/*
3669 *	sbridge_exit()	Module exit function
3670 *			Unregister the driver
3671 */
3672static void __exit sbridge_exit(void)
3673{
3674	edac_dbg(2, "\n");
3675	sbridge_remove();
3676	mce_unregister_decode_chain(&sbridge_mce_dec);
3677}
3678
3679module_init(sbridge_init);
3680module_exit(sbridge_exit);
3681
3682module_param(edac_op_state, int, 0444);
3683MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3684
3685MODULE_LICENSE("GPL");
3686MODULE_AUTHOR("Mauro Carvalho Chehab");
3687MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
3688MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3689		   SBRIDGE_REVISION);
v4.17
 
   1/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
   2 *
   3 * This driver supports the memory controllers found on the Intel
   4 * processor family Sandy Bridge.
   5 *
   6 * This file may be distributed under the terms of the
   7 * GNU General Public License version 2 only.
   8 *
   9 * Copyright (c) 2011 by:
  10 *	 Mauro Carvalho Chehab
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/pci.h>
  16#include <linux/pci_ids.h>
  17#include <linux/slab.h>
  18#include <linux/delay.h>
  19#include <linux/edac.h>
  20#include <linux/mmzone.h>
  21#include <linux/smp.h>
  22#include <linux/bitmap.h>
  23#include <linux/math64.h>
  24#include <linux/mod_devicetable.h>
  25#include <asm/cpu_device_id.h>
  26#include <asm/intel-family.h>
  27#include <asm/processor.h>
  28#include <asm/mce.h>
  29
  30#include "edac_module.h"
  31
  32/* Static vars */
  33static LIST_HEAD(sbridge_edac_list);
 
 
  34
  35/*
  36 * Alter this version for the module when modifications are made
  37 */
  38#define SBRIDGE_REVISION    " Ver: 1.1.2 "
  39#define EDAC_MOD_STR	    "sb_edac"
  40
  41/*
  42 * Debug macros
  43 */
  44#define sbridge_printk(level, fmt, arg...)			\
  45	edac_printk(level, "sbridge", fmt, ##arg)
  46
  47#define sbridge_mc_printk(mci, level, fmt, arg...)		\
  48	edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
  49
  50/*
  51 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
  52 */
  53#define GET_BITFIELD(v, lo, hi)	\
  54	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
  55
  56/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
  57static const u32 sbridge_dram_rule[] = {
  58	0x80, 0x88, 0x90, 0x98, 0xa0,
  59	0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
  60};
  61
  62static const u32 ibridge_dram_rule[] = {
  63	0x60, 0x68, 0x70, 0x78, 0x80,
  64	0x88, 0x90, 0x98, 0xa0,	0xa8,
  65	0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
  66	0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
  67};
  68
  69static const u32 knl_dram_rule[] = {
  70	0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
  71	0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
  72	0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
  73	0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
  74	0x100, 0x108, 0x110, 0x118,   /* 20-23 */
  75};
  76
  77#define DRAM_RULE_ENABLE(reg)	GET_BITFIELD(reg, 0,  0)
  78#define A7MODE(reg)		GET_BITFIELD(reg, 26, 26)
  79
  80static char *show_dram_attr(u32 attr)
  81{
  82	switch (attr) {
  83		case 0:
  84			return "DRAM";
  85		case 1:
  86			return "MMCFG";
  87		case 2:
  88			return "NXM";
  89		default:
  90			return "unknown";
  91	}
  92}
  93
  94static const u32 sbridge_interleave_list[] = {
  95	0x84, 0x8c, 0x94, 0x9c, 0xa4,
  96	0xac, 0xb4, 0xbc, 0xc4, 0xcc,
  97};
  98
  99static const u32 ibridge_interleave_list[] = {
 100	0x64, 0x6c, 0x74, 0x7c, 0x84,
 101	0x8c, 0x94, 0x9c, 0xa4, 0xac,
 102	0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
 103	0xdc, 0xe4, 0xec, 0xf4, 0xfc,
 104};
 105
 106static const u32 knl_interleave_list[] = {
 107	0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
 108	0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
 109	0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
 110	0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
 111	0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
 112};
 113#define MAX_INTERLEAVE							\
 114	(max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list),	\
 115	       max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list),	\
 116		     ARRAY_SIZE(knl_interleave_list))))
 117
 118struct interleave_pkg {
 119	unsigned char start;
 120	unsigned char end;
 121};
 122
 123static const struct interleave_pkg sbridge_interleave_pkg[] = {
 124	{ 0, 2 },
 125	{ 3, 5 },
 126	{ 8, 10 },
 127	{ 11, 13 },
 128	{ 16, 18 },
 129	{ 19, 21 },
 130	{ 24, 26 },
 131	{ 27, 29 },
 132};
 133
 134static const struct interleave_pkg ibridge_interleave_pkg[] = {
 135	{ 0, 3 },
 136	{ 4, 7 },
 137	{ 8, 11 },
 138	{ 12, 15 },
 139	{ 16, 19 },
 140	{ 20, 23 },
 141	{ 24, 27 },
 142	{ 28, 31 },
 143};
 144
 145static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
 146			  int interleave)
 147{
 148	return GET_BITFIELD(reg, table[interleave].start,
 149			    table[interleave].end);
 150}
 151
 152/* Devices 12 Function 7 */
 153
 154#define TOLM		0x80
 155#define TOHM		0x84
 156#define HASWELL_TOLM	0xd0
 157#define HASWELL_TOHM_0	0xd4
 158#define HASWELL_TOHM_1	0xd8
 159#define KNL_TOLM	0xd0
 160#define KNL_TOHM_0	0xd4
 161#define KNL_TOHM_1	0xd8
 162
 163#define GET_TOLM(reg)		((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
 164#define GET_TOHM(reg)		((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
 165
 166/* Device 13 Function 6 */
 167
 168#define SAD_TARGET	0xf0
 169
 170#define SOURCE_ID(reg)		GET_BITFIELD(reg, 9, 11)
 171
 172#define SOURCE_ID_KNL(reg)	GET_BITFIELD(reg, 12, 14)
 173
 174#define SAD_CONTROL	0xf4
 175
 176/* Device 14 function 0 */
 177
 178static const u32 tad_dram_rule[] = {
 179	0x40, 0x44, 0x48, 0x4c,
 180	0x50, 0x54, 0x58, 0x5c,
 181	0x60, 0x64, 0x68, 0x6c,
 182};
 183#define MAX_TAD	ARRAY_SIZE(tad_dram_rule)
 184
 185#define TAD_LIMIT(reg)		((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
 186#define TAD_SOCK(reg)		GET_BITFIELD(reg, 10, 11)
 187#define TAD_CH(reg)		GET_BITFIELD(reg,  8,  9)
 188#define TAD_TGT3(reg)		GET_BITFIELD(reg,  6,  7)
 189#define TAD_TGT2(reg)		GET_BITFIELD(reg,  4,  5)
 190#define TAD_TGT1(reg)		GET_BITFIELD(reg,  2,  3)
 191#define TAD_TGT0(reg)		GET_BITFIELD(reg,  0,  1)
 192
 193/* Device 15, function 0 */
 194
 195#define MCMTR			0x7c
 196#define KNL_MCMTR		0x624
 197
 198#define IS_ECC_ENABLED(mcmtr)		GET_BITFIELD(mcmtr, 2, 2)
 199#define IS_LOCKSTEP_ENABLED(mcmtr)	GET_BITFIELD(mcmtr, 1, 1)
 200#define IS_CLOSE_PG(mcmtr)		GET_BITFIELD(mcmtr, 0, 0)
 201
 202/* Device 15, function 1 */
 203
 204#define RASENABLES		0xac
 205#define IS_MIRROR_ENABLED(reg)		GET_BITFIELD(reg, 0, 0)
 206
 207/* Device 15, functions 2-5 */
 208
 209static const int mtr_regs[] = {
 210	0x80, 0x84, 0x88,
 211};
 212
 213static const int knl_mtr_reg = 0xb60;
 214
 215#define RANK_DISABLE(mtr)		GET_BITFIELD(mtr, 16, 19)
 216#define IS_DIMM_PRESENT(mtr)		GET_BITFIELD(mtr, 14, 14)
 217#define RANK_CNT_BITS(mtr)		GET_BITFIELD(mtr, 12, 13)
 218#define RANK_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 2, 4)
 219#define COL_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 0, 1)
 220
 221static const u32 tad_ch_nilv_offset[] = {
 222	0x90, 0x94, 0x98, 0x9c,
 223	0xa0, 0xa4, 0xa8, 0xac,
 224	0xb0, 0xb4, 0xb8, 0xbc,
 225};
 226#define CHN_IDX_OFFSET(reg)		GET_BITFIELD(reg, 28, 29)
 227#define TAD_OFFSET(reg)			(GET_BITFIELD(reg,  6, 25) << 26)
 228
 229static const u32 rir_way_limit[] = {
 230	0x108, 0x10c, 0x110, 0x114, 0x118,
 231};
 232#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
 233
 234#define IS_RIR_VALID(reg)	GET_BITFIELD(reg, 31, 31)
 235#define RIR_WAY(reg)		GET_BITFIELD(reg, 28, 29)
 236
 237#define MAX_RIR_WAY	8
 238
 239static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
 240	{ 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
 241	{ 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
 242	{ 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
 243	{ 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
 244	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 245};
 246
 247#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
 248	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
 249
 250#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
 251	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 252
 253/* Device 16, functions 2-7 */
 254
 255/*
 256 * FIXME: Implement the error count reads directly
 257 */
 258
 259static const u32 correrrcnt[] = {
 260	0x104, 0x108, 0x10c, 0x110,
 261};
 262
 263#define RANK_ODD_OV(reg)		GET_BITFIELD(reg, 31, 31)
 264#define RANK_ODD_ERR_CNT(reg)		GET_BITFIELD(reg, 16, 30)
 265#define RANK_EVEN_OV(reg)		GET_BITFIELD(reg, 15, 15)
 266#define RANK_EVEN_ERR_CNT(reg)		GET_BITFIELD(reg,  0, 14)
 267
 
 
 
 
 
 268static const u32 correrrthrsld[] = {
 269	0x11c, 0x120, 0x124, 0x128,
 270};
 
 271
 272#define RANK_ODD_ERR_THRSLD(reg)	GET_BITFIELD(reg, 16, 30)
 273#define RANK_EVEN_ERR_THRSLD(reg)	GET_BITFIELD(reg,  0, 14)
 274
 275
 276/* Device 17, function 0 */
 277
 278#define SB_RANK_CFG_A		0x0328
 279
 280#define IB_RANK_CFG_A		0x0320
 281
 282/*
 283 * sbridge structs
 284 */
 285
 286#define NUM_CHANNELS		6	/* Max channels per MC */
 287#define MAX_DIMMS		3	/* Max DIMMS per channel */
 288#define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
 289#define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
 290#define KNL_MAX_EDCS		8	/* Embedded DRAM controllers */
 291#define CHANNEL_UNSPECIFIED	0xf	/* Intel IA32 SDM 15-14 */
 292
 293enum type {
 294	SANDY_BRIDGE,
 295	IVY_BRIDGE,
 296	HASWELL,
 297	BROADWELL,
 298	KNIGHTS_LANDING,
 299};
 300
 301enum domain {
 302	IMC0 = 0,
 303	IMC1,
 304	SOCK,
 305};
 306
 307enum mirroring_mode {
 308	NON_MIRRORING,
 309	ADDR_RANGE_MIRRORING,
 310	FULL_MIRRORING,
 311};
 312
 313struct sbridge_pvt;
 314struct sbridge_info {
 315	enum type	type;
 316	u32		mcmtr;
 317	u32		rankcfgr;
 318	u64		(*get_tolm)(struct sbridge_pvt *pvt);
 319	u64		(*get_tohm)(struct sbridge_pvt *pvt);
 320	u64		(*rir_limit)(u32 reg);
 321	u64		(*sad_limit)(u32 reg);
 322	u32		(*interleave_mode)(u32 reg);
 323	u32		(*dram_attr)(u32 reg);
 324	const u32	*dram_rule;
 325	const u32	*interleave_list;
 326	const struct interleave_pkg *interleave_pkg;
 327	u8		max_sad;
 328	u8		(*get_node_id)(struct sbridge_pvt *pvt);
 
 329	enum mem_type	(*get_memory_type)(struct sbridge_pvt *pvt);
 330	enum dev_type	(*get_width)(struct sbridge_pvt *pvt, u32 mtr);
 331	struct pci_dev	*pci_vtd;
 332};
 333
 334struct sbridge_channel {
 335	u32		ranks;
 336	u32		dimms;
 
 
 
 
 
 
 337};
 338
 339struct pci_id_descr {
 340	int			dev_id;
 341	int			optional;
 342	enum domain		dom;
 343};
 344
 345struct pci_id_table {
 346	const struct pci_id_descr	*descr;
 347	int				n_devs_per_imc;
 348	int				n_devs_per_sock;
 349	int				n_imcs_per_sock;
 350	enum type			type;
 351};
 352
 353struct sbridge_dev {
 354	struct list_head	list;
 
 355	u8			bus, mc;
 356	u8			node_id, source_id;
 357	struct pci_dev		**pdev;
 358	enum domain		dom;
 359	int			n_devs;
 360	int			i_devs;
 361	struct mem_ctl_info	*mci;
 362};
 363
 364struct knl_pvt {
 365	struct pci_dev          *pci_cha[KNL_MAX_CHAS];
 366	struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
 367	struct pci_dev          *pci_mc0;
 368	struct pci_dev          *pci_mc1;
 369	struct pci_dev          *pci_mc0_misc;
 370	struct pci_dev          *pci_mc1_misc;
 371	struct pci_dev          *pci_mc_info; /* tolm, tohm */
 372};
 373
 374struct sbridge_pvt {
 375	/* Devices per socket */
 376	struct pci_dev		*pci_ddrio;
 377	struct pci_dev		*pci_sad0, *pci_sad1;
 378	struct pci_dev		*pci_br0, *pci_br1;
 379	/* Devices per memory controller */
 380	struct pci_dev		*pci_ha, *pci_ta, *pci_ras;
 381	struct pci_dev		*pci_tad[NUM_CHANNELS];
 382
 383	struct sbridge_dev	*sbridge_dev;
 384
 385	struct sbridge_info	info;
 386	struct sbridge_channel	channel[NUM_CHANNELS];
 387
 388	/* Memory type detection */
 389	bool			is_cur_addr_mirrored, is_lockstep, is_close_pg;
 390	bool			is_chan_hash;
 391	enum mirroring_mode	mirror_mode;
 392
 393	/* Memory description */
 394	u64			tolm, tohm;
 395	struct knl_pvt knl;
 396};
 397
 398#define PCI_DESCR(device_id, opt, domain)	\
 399	.dev_id = (device_id),		\
 400	.optional = opt,	\
 401	.dom = domain
 402
 403static const struct pci_id_descr pci_dev_descr_sbridge[] = {
 404		/* Processor Home Agent */
 405	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
 406
 407		/* Memory controller */
 408	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
 409	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
 410	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
 411	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
 412	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
 413	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
 414	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
 415
 416		/* System Address Decoder */
 417	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
 418	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
 419
 420		/* Broadcast Registers */
 421	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
 422};
 423
 424#define PCI_ID_TABLE_ENTRY(A, N, M, T) {	\
 425	.descr = A,			\
 426	.n_devs_per_imc = N,	\
 427	.n_devs_per_sock = ARRAY_SIZE(A),	\
 428	.n_imcs_per_sock = M,	\
 429	.type = T			\
 430}
 431
 432static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
 433	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
 434	{0,}			/* 0 terminated list. */
 435};
 436
 437/* This changes depending if 1HA or 2HA:
 438 * 1HA:
 439 *	0x0eb8 (17.0) is DDRIO0
 440 * 2HA:
 441 *	0x0ebc (17.4) is DDRIO0
 442 */
 443#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0	0x0eb8
 444#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0	0x0ebc
 445
 446/* pci ids */
 447#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0		0x0ea0
 448#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA		0x0ea8
 449#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS		0x0e71
 450#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0	0x0eaa
 451#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1	0x0eab
 452#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2	0x0eac
 453#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3	0x0ead
 454#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD			0x0ec8
 455#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0			0x0ec9
 456#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1			0x0eca
 457#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1		0x0e60
 458#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA		0x0e68
 459#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS		0x0e79
 460#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0	0x0e6a
 461#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1	0x0e6b
 462#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2	0x0e6c
 463#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3	0x0e6d
 464
 465static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 466		/* Processor Home Agent */
 467	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
 468	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
 469
 470		/* Memory controller */
 471	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
 472	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
 473	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
 474	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
 475	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
 476	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
 477
 478		/* Optional, mode 2HA */
 479	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
 480	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
 481	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
 482	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
 483	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
 484	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
 485
 486	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
 487	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
 488
 489		/* System Address Decoder */
 490	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
 491
 492		/* Broadcast Registers */
 493	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
 494	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
 495
 496};
 497
 498static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 499	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
 500	{0,}			/* 0 terminated list. */
 501};
 502
 503/* Haswell support */
 504/* EN processor:
 505 *	- 1 IMC
 506 *	- 3 DDR3 channels, 2 DPC per channel
 507 * EP processor:
 508 *	- 1 or 2 IMC
 509 *	- 4 DDR4 channels, 3 DPC per channel
 510 * EP 4S processor:
 511 *	- 2 IMC
 512 *	- 4 DDR4 channels, 3 DPC per channel
 513 * EX processor:
 514 *	- 2 IMC
 515 *	- each IMC interfaces with a SMI 2 channel
 516 *	- each SMI channel interfaces with a scalable memory buffer
 517 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 518 */
 519#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
 520#define HASWELL_HASYSDEFEATURE2 0x84
 521#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
 522#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0	0x2fa0
 523#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1	0x2f60
 524#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA	0x2fa8
 525#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM	0x2f71
 526#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA	0x2f68
 527#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM	0x2f79
 528#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
 529#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
 530#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
 531#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
 532#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
 533#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
 534#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
 535#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
 536#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
 537#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
 538#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
 539#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
 540#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
 541#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
 542static const struct pci_id_descr pci_dev_descr_haswell[] = {
 543	/* first item must be the HA */
 544	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
 545	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
 546
 547	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
 548	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
 549	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
 550	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
 551	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
 552	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
 553
 554	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
 555	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
 556	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
 557	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
 558	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
 559	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
 560
 561	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
 562	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
 563	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
 564	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
 565	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
 566	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
 567};
 568
 569static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 570	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
 571	{0,}			/* 0 terminated list. */
 572};
 573
 574/* Knight's Landing Support */
 575/*
 576 * KNL's memory channels are swizzled between memory controllers.
 577 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
 578 */
 579#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
 580
 581/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
 582#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
 583/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
 584#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
 585/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
 586#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
 587/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
 588#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
 589/* SAD target - 1-29-1 (1 of these) */
 590#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
 591/* Caching / Home Agent */
 592#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
 593/* Device with TOLM and TOHM, 0-5-0 (1 of these) */
 594#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
 595
 596/*
 597 * KNL differs from SB, IB, and Haswell in that it has multiple
 598 * instances of the same device with the same device ID, so we handle that
 599 * by creating as many copies in the table as we expect to find.
 600 * (Like device ID must be grouped together.)
 601 */
 602
 603static const struct pci_id_descr pci_dev_descr_knl[] = {
 604	[0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
 605	[2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
 606	[8]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
 607	[9]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
 608	[10]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
 609	[11]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
 610	[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
 611};
 612
 613static const struct pci_id_table pci_dev_descr_knl_table[] = {
 614	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
 615	{0,}
 616};
 617
 618/*
 619 * Broadwell support
 620 *
 621 * DE processor:
 622 *	- 1 IMC
 623 *	- 2 DDR3 channels, 2 DPC per channel
 624 * EP processor:
 625 *	- 1 or 2 IMC
 626 *	- 4 DDR4 channels, 3 DPC per channel
 627 * EP 4S processor:
 628 *	- 2 IMC
 629 *	- 4 DDR4 channels, 3 DPC per channel
 630 * EX processor:
 631 *	- 2 IMC
 632 *	- each IMC interfaces with a SMI 2 channel
 633 *	- each SMI channel interfaces with a scalable memory buffer
 634 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 635 */
 636#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
 637#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0	0x6fa0
 638#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1	0x6f60
 639#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA	0x6fa8
 640#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM	0x6f71
 641#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA	0x6f68
 642#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM	0x6f79
 643#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
 644#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
 645#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
 646#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
 647#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
 648#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
 649#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
 650#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
 651#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
 652#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
 653#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
 654
 655static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 656	/* first item must be the HA */
 657	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
 658	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
 659
 660	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
 661	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
 662	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
 663	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
 664	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
 665	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
 666
 667	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
 668	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
 669	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
 670	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
 671	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
 672	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
 673
 674	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
 675	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
 676	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
 677};
 678
 679static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
 680	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
 681	{0,}			/* 0 terminated list. */
 682};
 683
 684
 685/****************************************************************************
 686			Ancillary status routines
 687 ****************************************************************************/
 688
 689static inline int numrank(enum type type, u32 mtr)
 690{
 691	int ranks = (1 << RANK_CNT_BITS(mtr));
 692	int max = 4;
 693
 694	if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
 695		max = 8;
 696
 697	if (ranks > max) {
 698		edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
 699			 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
 700		return -EINVAL;
 701	}
 702
 703	return ranks;
 704}
 705
 706static inline int numrow(u32 mtr)
 707{
 708	int rows = (RANK_WIDTH_BITS(mtr) + 12);
 709
 710	if (rows < 13 || rows > 18) {
 711		edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
 712			 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
 713		return -EINVAL;
 714	}
 715
 716	return 1 << rows;
 717}
 718
 719static inline int numcol(u32 mtr)
 720{
 721	int cols = (COL_WIDTH_BITS(mtr) + 10);
 722
 723	if (cols > 12) {
 724		edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
 725			 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
 726		return -EINVAL;
 727	}
 728
 729	return 1 << cols;
 730}
 731
 732static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
 
 733					   struct sbridge_dev *prev)
 734{
 735	struct sbridge_dev *sbridge_dev;
 736
 737	/*
 738	 * If we have devices scattered across several busses that pertain
 739	 * to the same memory controller, we'll lump them all together.
 740	 */
 741	if (multi_bus) {
 742		return list_first_entry_or_null(&sbridge_edac_list,
 743				struct sbridge_dev, list);
 744	}
 745
 746	sbridge_dev = list_entry(prev ? prev->list.next
 747				      : sbridge_edac_list.next, struct sbridge_dev, list);
 748
 749	list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
 750		if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
 
 751			return sbridge_dev;
 752	}
 753
 754	return NULL;
 755}
 756
 757static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
 758					     const struct pci_id_table *table)
 759{
 760	struct sbridge_dev *sbridge_dev;
 761
 762	sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
 763	if (!sbridge_dev)
 764		return NULL;
 765
 766	sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
 767				    sizeof(*sbridge_dev->pdev),
 768				    GFP_KERNEL);
 769	if (!sbridge_dev->pdev) {
 770		kfree(sbridge_dev);
 771		return NULL;
 772	}
 773
 
 774	sbridge_dev->bus = bus;
 775	sbridge_dev->dom = dom;
 776	sbridge_dev->n_devs = table->n_devs_per_imc;
 777	list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
 778
 779	return sbridge_dev;
 780}
 781
 782static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
 783{
 784	list_del(&sbridge_dev->list);
 785	kfree(sbridge_dev->pdev);
 786	kfree(sbridge_dev);
 787}
 788
 789static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
 790{
 791	u32 reg;
 792
 793	/* Address range is 32:28 */
 794	pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
 795	return GET_TOLM(reg);
 796}
 797
 798static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
 799{
 800	u32 reg;
 801
 802	pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
 803	return GET_TOHM(reg);
 804}
 805
 806static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
 807{
 808	u32 reg;
 809
 810	pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
 811
 812	return GET_TOLM(reg);
 813}
 814
 815static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
 816{
 817	u32 reg;
 818
 819	pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
 820
 821	return GET_TOHM(reg);
 822}
 823
 824static u64 rir_limit(u32 reg)
 825{
 826	return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
 827}
 828
 829static u64 sad_limit(u32 reg)
 830{
 831	return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
 832}
 833
 834static u32 interleave_mode(u32 reg)
 835{
 836	return GET_BITFIELD(reg, 1, 1);
 837}
 838
 839static u32 dram_attr(u32 reg)
 840{
 841	return GET_BITFIELD(reg, 2, 3);
 842}
 843
 844static u64 knl_sad_limit(u32 reg)
 845{
 846	return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
 847}
 848
 849static u32 knl_interleave_mode(u32 reg)
 850{
 851	return GET_BITFIELD(reg, 1, 2);
 852}
 853
 854static const char * const knl_intlv_mode[] = {
 855	"[8:6]", "[10:8]", "[14:12]", "[32:30]"
 856};
 857
 858static const char *get_intlv_mode_str(u32 reg, enum type t)
 859{
 860	if (t == KNIGHTS_LANDING)
 861		return knl_intlv_mode[knl_interleave_mode(reg)];
 862	else
 863		return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
 864}
 865
 866static u32 dram_attr_knl(u32 reg)
 867{
 868	return GET_BITFIELD(reg, 3, 4);
 869}
 870
 871
 872static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
 873{
 874	u32 reg;
 875	enum mem_type mtype;
 876
 877	if (pvt->pci_ddrio) {
 878		pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
 879				      &reg);
 880		if (GET_BITFIELD(reg, 11, 11))
 881			/* FIXME: Can also be LRDIMM */
 882			mtype = MEM_RDDR3;
 883		else
 884			mtype = MEM_DDR3;
 885	} else
 886		mtype = MEM_UNKNOWN;
 887
 888	return mtype;
 889}
 890
 891static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
 892{
 893	u32 reg;
 894	bool registered = false;
 895	enum mem_type mtype = MEM_UNKNOWN;
 896
 897	if (!pvt->pci_ddrio)
 898		goto out;
 899
 900	pci_read_config_dword(pvt->pci_ddrio,
 901			      HASWELL_DDRCRCLKCONTROLS, &reg);
 902	/* Is_Rdimm */
 903	if (GET_BITFIELD(reg, 16, 16))
 904		registered = true;
 905
 906	pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
 907	if (GET_BITFIELD(reg, 14, 14)) {
 908		if (registered)
 909			mtype = MEM_RDDR4;
 910		else
 911			mtype = MEM_DDR4;
 912	} else {
 913		if (registered)
 914			mtype = MEM_RDDR3;
 915		else
 916			mtype = MEM_DDR3;
 917	}
 918
 919out:
 920	return mtype;
 921}
 922
 923static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
 924{
 925	/* for KNL value is fixed */
 926	return DEV_X16;
 927}
 928
 929static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 930{
 931	/* there's no way to figure out */
 932	return DEV_UNKNOWN;
 933}
 934
 935static enum dev_type __ibridge_get_width(u32 mtr)
 936{
 937	enum dev_type type;
 938
 939	switch (mtr) {
 940	case 3:
 941		type = DEV_UNKNOWN;
 942		break;
 943	case 2:
 944		type = DEV_X16;
 945		break;
 946	case 1:
 947		type = DEV_X8;
 948		break;
 949	case 0:
 950		type = DEV_X4;
 951		break;
 952	}
 953
 954	return type;
 955}
 956
 957static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 958{
 959	/*
 960	 * ddr3_width on the documentation but also valid for DDR4 on
 961	 * Haswell
 962	 */
 963	return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
 964}
 965
 966static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
 967{
 968	/* ddr3_width on the documentation but also valid for DDR4 */
 969	return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
 970}
 971
 972static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
 973{
 974	/* DDR4 RDIMMS and LRDIMMS are supported */
 975	return MEM_RDDR4;
 976}
 977
 978static u8 get_node_id(struct sbridge_pvt *pvt)
 979{
 980	u32 reg;
 981	pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
 982	return GET_BITFIELD(reg, 0, 2);
 983}
 984
 985static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
 986{
 987	u32 reg;
 988
 989	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
 990	return GET_BITFIELD(reg, 0, 3);
 991}
 992
 993static u8 knl_get_node_id(struct sbridge_pvt *pvt)
 994{
 995	u32 reg;
 996
 997	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
 998	return GET_BITFIELD(reg, 0, 2);
 999}
1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
1002static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1003{
1004	u32 reg;
1005
1006	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1007	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1008}
1009
1010static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1011{
1012	u64 rc;
1013	u32 reg;
1014
1015	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1016	rc = GET_BITFIELD(reg, 26, 31);
1017	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1018	rc = ((reg << 6) | rc) << 26;
1019
1020	return rc | 0x1ffffff;
1021}
1022
1023static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1024{
1025	u32 reg;
1026
1027	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1028	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1029}
1030
1031static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1032{
1033	u64 rc;
1034	u32 reg_lo, reg_hi;
1035
1036	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1037	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1038	rc = ((u64)reg_hi << 32) | reg_lo;
1039	return rc | 0x3ffffff;
1040}
1041
1042
1043static u64 haswell_rir_limit(u32 reg)
1044{
1045	return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1046}
1047
1048static inline u8 sad_pkg_socket(u8 pkg)
1049{
1050	/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1051	return ((pkg >> 3) << 2) | (pkg & 0x3);
1052}
1053
1054static inline u8 sad_pkg_ha(u8 pkg)
1055{
1056	return (pkg >> 2) & 0x1;
1057}
1058
1059static int haswell_chan_hash(int idx, u64 addr)
1060{
1061	int i;
1062
1063	/*
1064	 * XOR even bits from 12:26 to bit0 of idx,
1065	 *     odd bits from 13:27 to bit1
1066	 */
1067	for (i = 12; i < 28; i += 2)
1068		idx ^= (addr >> i) & 3;
1069
1070	return idx;
1071}
1072
1073/* Low bits of TAD limit, and some metadata. */
1074static const u32 knl_tad_dram_limit_lo[] = {
1075	0x400, 0x500, 0x600, 0x700,
1076	0x800, 0x900, 0xa00, 0xb00,
1077};
1078
1079/* Low bits of TAD offset. */
1080static const u32 knl_tad_dram_offset_lo[] = {
1081	0x404, 0x504, 0x604, 0x704,
1082	0x804, 0x904, 0xa04, 0xb04,
1083};
1084
1085/* High 16 bits of TAD limit and offset. */
1086static const u32 knl_tad_dram_hi[] = {
1087	0x408, 0x508, 0x608, 0x708,
1088	0x808, 0x908, 0xa08, 0xb08,
1089};
1090
1091/* Number of ways a tad entry is interleaved. */
1092static const u32 knl_tad_ways[] = {
1093	8, 6, 4, 3, 2, 1,
1094};
1095
1096/*
1097 * Retrieve the n'th Target Address Decode table entry
1098 * from the memory controller's TAD table.
1099 *
1100 * @pvt:	driver private data
1101 * @entry:	which entry you want to retrieve
1102 * @mc:		which memory controller (0 or 1)
1103 * @offset:	output tad range offset
1104 * @limit:	output address of first byte above tad range
1105 * @ways:	output number of interleave ways
1106 *
1107 * The offset value has curious semantics.  It's a sort of running total
1108 * of the sizes of all the memory regions that aren't mapped in this
1109 * tad table.
1110 */
1111static int knl_get_tad(const struct sbridge_pvt *pvt,
1112		const int entry,
1113		const int mc,
1114		u64 *offset,
1115		u64 *limit,
1116		int *ways)
1117{
1118	u32 reg_limit_lo, reg_offset_lo, reg_hi;
1119	struct pci_dev *pci_mc;
1120	int way_id;
1121
1122	switch (mc) {
1123	case 0:
1124		pci_mc = pvt->knl.pci_mc0;
1125		break;
1126	case 1:
1127		pci_mc = pvt->knl.pci_mc1;
1128		break;
1129	default:
1130		WARN_ON(1);
1131		return -EINVAL;
1132	}
1133
1134	pci_read_config_dword(pci_mc,
1135			knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1136	pci_read_config_dword(pci_mc,
1137			knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1138	pci_read_config_dword(pci_mc,
1139			knl_tad_dram_hi[entry], &reg_hi);
1140
1141	/* Is this TAD entry enabled? */
1142	if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1143		return -ENODEV;
1144
1145	way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1146
1147	if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1148		*ways = knl_tad_ways[way_id];
1149	} else {
1150		*ways = 0;
1151		sbridge_printk(KERN_ERR,
1152				"Unexpected value %d in mc_tad_limit_lo wayness field\n",
1153				way_id);
1154		return -ENODEV;
1155	}
1156
1157	/*
1158	 * The least significant 6 bits of base and limit are truncated.
1159	 * For limit, we fill the missing bits with 1s.
1160	 */
1161	*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1162				((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1163	*limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1164				((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1165
1166	return 0;
1167}
1168
1169/* Determine which memory controller is responsible for a given channel. */
1170static int knl_channel_mc(int channel)
1171{
1172	WARN_ON(channel < 0 || channel >= 6);
1173
1174	return channel < 3 ? 1 : 0;
1175}
1176
1177/*
1178 * Get the Nth entry from EDC_ROUTE_TABLE register.
1179 * (This is the per-tile mapping of logical interleave targets to
1180 *  physical EDC modules.)
1181 *
1182 * entry 0: 0:2
1183 *       1: 3:5
1184 *       2: 6:8
1185 *       3: 9:11
1186 *       4: 12:14
1187 *       5: 15:17
1188 *       6: 18:20
1189 *       7: 21:23
1190 * reserved: 24:31
1191 */
1192static u32 knl_get_edc_route(int entry, u32 reg)
1193{
1194	WARN_ON(entry >= KNL_MAX_EDCS);
1195	return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1196}
1197
1198/*
1199 * Get the Nth entry from MC_ROUTE_TABLE register.
1200 * (This is the per-tile mapping of logical interleave targets to
1201 *  physical DRAM channels modules.)
1202 *
1203 * entry 0: mc 0:2   channel 18:19
1204 *       1: mc 3:5   channel 20:21
1205 *       2: mc 6:8   channel 22:23
1206 *       3: mc 9:11  channel 24:25
1207 *       4: mc 12:14 channel 26:27
1208 *       5: mc 15:17 channel 28:29
1209 * reserved: 30:31
1210 *
1211 * Though we have 3 bits to identify the MC, we should only see
1212 * the values 0 or 1.
1213 */
1214
1215static u32 knl_get_mc_route(int entry, u32 reg)
1216{
1217	int mc, chan;
1218
1219	WARN_ON(entry >= KNL_MAX_CHANNELS);
1220
1221	mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1222	chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1223
1224	return knl_channel_remap(mc, chan);
1225}
1226
1227/*
1228 * Render the EDC_ROUTE register in human-readable form.
1229 * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1230 */
1231static void knl_show_edc_route(u32 reg, char *s)
1232{
1233	int i;
1234
1235	for (i = 0; i < KNL_MAX_EDCS; i++) {
1236		s[i*2] = knl_get_edc_route(i, reg) + '0';
1237		s[i*2+1] = '-';
1238	}
1239
1240	s[KNL_MAX_EDCS*2 - 1] = '\0';
1241}
1242
1243/*
1244 * Render the MC_ROUTE register in human-readable form.
1245 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1246 */
1247static void knl_show_mc_route(u32 reg, char *s)
1248{
1249	int i;
1250
1251	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1252		s[i*2] = knl_get_mc_route(i, reg) + '0';
1253		s[i*2+1] = '-';
1254	}
1255
1256	s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1257}
1258
1259#define KNL_EDC_ROUTE 0xb8
1260#define KNL_MC_ROUTE 0xb4
1261
1262/* Is this dram rule backed by regular DRAM in flat mode? */
1263#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1264
1265/* Is this dram rule cached? */
1266#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1267
1268/* Is this rule backed by edc ? */
1269#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1270
1271/* Is this rule backed by DRAM, cacheable in EDRAM? */
1272#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1273
1274/* Is this rule mod3? */
1275#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1276
1277/*
1278 * Figure out how big our RAM modules are.
1279 *
1280 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1281 * have to figure this out from the SAD rules, interleave lists, route tables,
1282 * and TAD rules.
1283 *
1284 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1285 * inspect the TAD rules to figure out how large the SAD regions really are.
1286 *
1287 * When we know the real size of a SAD region and how many ways it's
1288 * interleaved, we know the individual contribution of each channel to
1289 * TAD is size/ways.
1290 *
1291 * Finally, we have to check whether each channel participates in each SAD
1292 * region.
1293 *
1294 * Fortunately, KNL only supports one DIMM per channel, so once we know how
1295 * much memory the channel uses, we know the DIMM is at least that large.
1296 * (The BIOS might possibly choose not to map all available memory, in which
1297 * case we will underreport the size of the DIMM.)
1298 *
1299 * In theory, we could try to determine the EDC sizes as well, but that would
1300 * only work in flat mode, not in cache mode.
1301 *
1302 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1303 *            elements)
1304 */
1305static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1306{
1307	u64 sad_base, sad_size, sad_limit = 0;
1308	u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1309	int sad_rule = 0;
1310	int tad_rule = 0;
1311	int intrlv_ways, tad_ways;
1312	u32 first_pkg, pkg;
1313	int i;
1314	u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1315	u32 dram_rule, interleave_reg;
1316	u32 mc_route_reg[KNL_MAX_CHAS];
1317	u32 edc_route_reg[KNL_MAX_CHAS];
1318	int edram_only;
1319	char edc_route_string[KNL_MAX_EDCS*2];
1320	char mc_route_string[KNL_MAX_CHANNELS*2];
1321	int cur_reg_start;
1322	int mc;
1323	int channel;
1324	int participants[KNL_MAX_CHANNELS];
1325
1326	for (i = 0; i < KNL_MAX_CHANNELS; i++)
1327		mc_sizes[i] = 0;
1328
1329	/* Read the EDC route table in each CHA. */
1330	cur_reg_start = 0;
1331	for (i = 0; i < KNL_MAX_CHAS; i++) {
1332		pci_read_config_dword(pvt->knl.pci_cha[i],
1333				KNL_EDC_ROUTE, &edc_route_reg[i]);
1334
1335		if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1336			knl_show_edc_route(edc_route_reg[i-1],
1337					edc_route_string);
1338			if (cur_reg_start == i-1)
1339				edac_dbg(0, "edc route table for CHA %d: %s\n",
1340					cur_reg_start, edc_route_string);
1341			else
1342				edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1343					cur_reg_start, i-1, edc_route_string);
1344			cur_reg_start = i;
1345		}
1346	}
1347	knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1348	if (cur_reg_start == i-1)
1349		edac_dbg(0, "edc route table for CHA %d: %s\n",
1350			cur_reg_start, edc_route_string);
1351	else
1352		edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1353			cur_reg_start, i-1, edc_route_string);
1354
1355	/* Read the MC route table in each CHA. */
1356	cur_reg_start = 0;
1357	for (i = 0; i < KNL_MAX_CHAS; i++) {
1358		pci_read_config_dword(pvt->knl.pci_cha[i],
1359			KNL_MC_ROUTE, &mc_route_reg[i]);
1360
1361		if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1362			knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1363			if (cur_reg_start == i-1)
1364				edac_dbg(0, "mc route table for CHA %d: %s\n",
1365					cur_reg_start, mc_route_string);
1366			else
1367				edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1368					cur_reg_start, i-1, mc_route_string);
1369			cur_reg_start = i;
1370		}
1371	}
1372	knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1373	if (cur_reg_start == i-1)
1374		edac_dbg(0, "mc route table for CHA %d: %s\n",
1375			cur_reg_start, mc_route_string);
1376	else
1377		edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1378			cur_reg_start, i-1, mc_route_string);
1379
1380	/* Process DRAM rules */
1381	for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1382		/* previous limit becomes the new base */
1383		sad_base = sad_limit;
1384
1385		pci_read_config_dword(pvt->pci_sad0,
1386			pvt->info.dram_rule[sad_rule], &dram_rule);
1387
1388		if (!DRAM_RULE_ENABLE(dram_rule))
1389			break;
1390
1391		edram_only = KNL_EDRAM_ONLY(dram_rule);
1392
1393		sad_limit = pvt->info.sad_limit(dram_rule)+1;
1394		sad_size = sad_limit - sad_base;
1395
1396		pci_read_config_dword(pvt->pci_sad0,
1397			pvt->info.interleave_list[sad_rule], &interleave_reg);
1398
1399		/*
1400		 * Find out how many ways this dram rule is interleaved.
1401		 * We stop when we see the first channel again.
1402		 */
1403		first_pkg = sad_pkg(pvt->info.interleave_pkg,
1404						interleave_reg, 0);
1405		for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1406			pkg = sad_pkg(pvt->info.interleave_pkg,
1407						interleave_reg, intrlv_ways);
1408
1409			if ((pkg & 0x8) == 0) {
1410				/*
1411				 * 0 bit means memory is non-local,
1412				 * which KNL doesn't support
1413				 */
1414				edac_dbg(0, "Unexpected interleave target %d\n",
1415					pkg);
1416				return -1;
1417			}
1418
1419			if (pkg == first_pkg)
1420				break;
1421		}
1422		if (KNL_MOD3(dram_rule))
1423			intrlv_ways *= 3;
1424
1425		edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1426			sad_rule,
1427			sad_base,
1428			sad_limit,
1429			intrlv_ways,
1430			edram_only ? ", EDRAM" : "");
1431
1432		/*
1433		 * Find out how big the SAD region really is by iterating
1434		 * over TAD tables (SAD regions may contain holes).
1435		 * Each memory controller might have a different TAD table, so
1436		 * we have to look at both.
1437		 *
1438		 * Livespace is the memory that's mapped in this TAD table,
1439		 * deadspace is the holes (this could be the MMIO hole, or it
1440		 * could be memory that's mapped by the other TAD table but
1441		 * not this one).
1442		 */
1443		for (mc = 0; mc < 2; mc++) {
1444			sad_actual_size[mc] = 0;
1445			tad_livespace = 0;
1446			for (tad_rule = 0;
1447					tad_rule < ARRAY_SIZE(
1448						knl_tad_dram_limit_lo);
1449					tad_rule++) {
1450				if (knl_get_tad(pvt,
1451						tad_rule,
1452						mc,
1453						&tad_deadspace,
1454						&tad_limit,
1455						&tad_ways))
1456					break;
1457
1458				tad_size = (tad_limit+1) -
1459					(tad_livespace + tad_deadspace);
1460				tad_livespace += tad_size;
1461				tad_base = (tad_limit+1) - tad_size;
1462
1463				if (tad_base < sad_base) {
1464					if (tad_limit > sad_base)
1465						edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1466				} else if (tad_base < sad_limit) {
1467					if (tad_limit+1 > sad_limit) {
1468						edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1469					} else {
1470						/* TAD region is completely inside SAD region */
1471						edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1472							tad_rule, tad_base,
1473							tad_limit, tad_size,
1474							mc);
1475						sad_actual_size[mc] += tad_size;
1476					}
1477				}
1478				tad_base = tad_limit+1;
1479			}
1480		}
1481
1482		for (mc = 0; mc < 2; mc++) {
1483			edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1484				mc, sad_actual_size[mc], sad_actual_size[mc]);
1485		}
1486
1487		/* Ignore EDRAM rule */
1488		if (edram_only)
1489			continue;
1490
1491		/* Figure out which channels participate in interleave. */
1492		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1493			participants[channel] = 0;
1494
1495		/* For each channel, does at least one CHA have
1496		 * this channel mapped to the given target?
1497		 */
1498		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1499			int target;
1500			int cha;
1501
1502			for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1503				for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1504					if (knl_get_mc_route(target,
1505						mc_route_reg[cha]) == channel
1506						&& !participants[channel]) {
1507						participants[channel] = 1;
1508						break;
1509					}
1510				}
1511			}
1512		}
1513
1514		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1515			mc = knl_channel_mc(channel);
1516			if (participants[channel]) {
1517				edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1518					channel,
1519					sad_actual_size[mc]/intrlv_ways,
1520					sad_rule);
1521				mc_sizes[channel] +=
1522					sad_actual_size[mc]/intrlv_ways;
1523			}
1524		}
1525	}
1526
1527	return 0;
1528}
1529
1530static void get_source_id(struct mem_ctl_info *mci)
1531{
1532	struct sbridge_pvt *pvt = mci->pvt_info;
1533	u32 reg;
1534
1535	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1536	    pvt->info.type == KNIGHTS_LANDING)
1537		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1538	else
1539		pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1540
1541	if (pvt->info.type == KNIGHTS_LANDING)
1542		pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1543	else
1544		pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1545}
1546
1547static int __populate_dimms(struct mem_ctl_info *mci,
1548			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1549			    enum edac_type mode)
1550{
1551	struct sbridge_pvt *pvt = mci->pvt_info;
1552	int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1553							 : NUM_CHANNELS;
1554	unsigned int i, j, banks, ranks, rows, cols, npages;
1555	struct dimm_info *dimm;
1556	enum mem_type mtype;
1557	u64 size;
1558
1559	mtype = pvt->info.get_memory_type(pvt);
1560	if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1561		edac_dbg(0, "Memory is registered\n");
1562	else if (mtype == MEM_UNKNOWN)
1563		edac_dbg(0, "Cannot determine memory type\n");
1564	else
1565		edac_dbg(0, "Memory is unregistered\n");
1566
1567	if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1568		banks = 16;
1569	else
1570		banks = 8;
1571
1572	for (i = 0; i < channels; i++) {
1573		u32 mtr;
1574
1575		int max_dimms_per_channel;
1576
1577		if (pvt->info.type == KNIGHTS_LANDING) {
1578			max_dimms_per_channel = 1;
1579			if (!pvt->knl.pci_channel[i])
1580				continue;
1581		} else {
1582			max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1583			if (!pvt->pci_tad[i])
1584				continue;
 
1585		}
1586
1587		for (j = 0; j < max_dimms_per_channel; j++) {
1588			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1589			if (pvt->info.type == KNIGHTS_LANDING) {
1590				pci_read_config_dword(pvt->knl.pci_channel[i],
1591					knl_mtr_reg, &mtr);
1592			} else {
1593				pci_read_config_dword(pvt->pci_tad[i],
1594					mtr_regs[j], &mtr);
1595			}
1596			edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
 
1597			if (IS_DIMM_PRESENT(mtr)) {
1598				if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1599					sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1600						       pvt->sbridge_dev->source_id,
1601						       pvt->sbridge_dev->dom, i);
1602					return -ENODEV;
1603				}
1604				pvt->channel[i].dimms++;
1605
1606				ranks = numrank(pvt->info.type, mtr);
1607
1608				if (pvt->info.type == KNIGHTS_LANDING) {
1609					/* For DDR4, this is fixed. */
1610					cols = 1 << 10;
1611					rows = knl_mc_sizes[i] /
1612						((u64) cols * ranks * banks * 8);
1613				} else {
1614					rows = numrow(mtr);
1615					cols = numcol(mtr);
1616				}
1617
1618				size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1619				npages = MiB_TO_PAGES(size);
1620
1621				edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1622					 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1623					 size, npages,
1624					 banks, ranks, rows, cols);
1625
1626				dimm->nr_pages = npages;
1627				dimm->grain = 32;
1628				dimm->dtype = pvt->info.get_width(pvt, mtr);
1629				dimm->mtype = mtype;
1630				dimm->edac_mode = mode;
 
 
 
 
 
1631				snprintf(dimm->label, sizeof(dimm->label),
1632						 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1633						 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1634			}
1635		}
1636	}
1637
1638	return 0;
1639}
1640
1641static int get_dimm_config(struct mem_ctl_info *mci)
1642{
1643	struct sbridge_pvt *pvt = mci->pvt_info;
1644	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1645	enum edac_type mode;
1646	u32 reg;
1647
1648	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1649	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1650		 pvt->sbridge_dev->mc,
1651		 pvt->sbridge_dev->node_id,
1652		 pvt->sbridge_dev->source_id);
1653
1654	/* KNL doesn't support mirroring or lockstep,
1655	 * and is always closed page
1656	 */
1657	if (pvt->info.type == KNIGHTS_LANDING) {
1658		mode = EDAC_S4ECD4ED;
1659		pvt->mirror_mode = NON_MIRRORING;
1660		pvt->is_cur_addr_mirrored = false;
1661
1662		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1663			return -1;
1664		if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1665			edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1666			return -ENODEV;
1667		}
1668	} else {
1669		if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1670			if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
1671				edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1672				return -ENODEV;
1673			}
1674			pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1675			if (GET_BITFIELD(reg, 28, 28)) {
1676				pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1677				edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1678				goto next;
1679			}
1680		}
1681		if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
1682			edac_dbg(0, "Failed to read RASENABLES register\n");
1683			return -ENODEV;
1684		}
1685		if (IS_MIRROR_ENABLED(reg)) {
1686			pvt->mirror_mode = FULL_MIRRORING;
1687			edac_dbg(0, "Full memory mirroring is enabled\n");
1688		} else {
1689			pvt->mirror_mode = NON_MIRRORING;
1690			edac_dbg(0, "Memory mirroring is disabled\n");
1691		}
1692
1693next:
1694		if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1695			edac_dbg(0, "Failed to read MCMTR register\n");
1696			return -ENODEV;
1697		}
1698		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1699			edac_dbg(0, "Lockstep is enabled\n");
1700			mode = EDAC_S8ECD8ED;
1701			pvt->is_lockstep = true;
1702		} else {
1703			edac_dbg(0, "Lockstep is disabled\n");
1704			mode = EDAC_S4ECD4ED;
1705			pvt->is_lockstep = false;
1706		}
1707		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1708			edac_dbg(0, "address map is on closed page mode\n");
1709			pvt->is_close_pg = true;
1710		} else {
1711			edac_dbg(0, "address map is on open page mode\n");
1712			pvt->is_close_pg = false;
1713		}
1714	}
1715
1716	return __populate_dimms(mci, knl_mc_sizes, mode);
1717}
1718
1719static void get_memory_layout(const struct mem_ctl_info *mci)
1720{
1721	struct sbridge_pvt *pvt = mci->pvt_info;
1722	int i, j, k, n_sads, n_tads, sad_interl;
1723	u32 reg;
1724	u64 limit, prv = 0;
1725	u64 tmp_mb;
1726	u32 gb, mb;
1727	u32 rir_way;
1728
1729	/*
1730	 * Step 1) Get TOLM/TOHM ranges
1731	 */
1732
1733	pvt->tolm = pvt->info.get_tolm(pvt);
1734	tmp_mb = (1 + pvt->tolm) >> 20;
1735
1736	gb = div_u64_rem(tmp_mb, 1024, &mb);
1737	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1738		gb, (mb*1000)/1024, (u64)pvt->tolm);
1739
1740	/* Address range is already 45:25 */
1741	pvt->tohm = pvt->info.get_tohm(pvt);
1742	tmp_mb = (1 + pvt->tohm) >> 20;
1743
1744	gb = div_u64_rem(tmp_mb, 1024, &mb);
1745	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1746		gb, (mb*1000)/1024, (u64)pvt->tohm);
1747
1748	/*
1749	 * Step 2) Get SAD range and SAD Interleave list
1750	 * TAD registers contain the interleave wayness. However, it
1751	 * seems simpler to just discover it indirectly, with the
1752	 * algorithm bellow.
1753	 */
1754	prv = 0;
1755	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1756		/* SAD_LIMIT Address range is 45:26 */
1757		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1758				      &reg);
1759		limit = pvt->info.sad_limit(reg);
1760
1761		if (!DRAM_RULE_ENABLE(reg))
1762			continue;
1763
1764		if (limit <= prv)
1765			break;
1766
1767		tmp_mb = (limit + 1) >> 20;
1768		gb = div_u64_rem(tmp_mb, 1024, &mb);
1769		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1770			 n_sads,
1771			 show_dram_attr(pvt->info.dram_attr(reg)),
1772			 gb, (mb*1000)/1024,
1773			 ((u64)tmp_mb) << 20L,
1774			 get_intlv_mode_str(reg, pvt->info.type),
1775			 reg);
1776		prv = limit;
1777
1778		pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1779				      &reg);
1780		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1781		for (j = 0; j < 8; j++) {
1782			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1783			if (j > 0 && sad_interl == pkg)
1784				break;
1785
1786			edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1787				 n_sads, j, pkg);
1788		}
1789	}
1790
1791	if (pvt->info.type == KNIGHTS_LANDING)
1792		return;
1793
1794	/*
1795	 * Step 3) Get TAD range
1796	 */
1797	prv = 0;
1798	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1799		pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
1800		limit = TAD_LIMIT(reg);
1801		if (limit <= prv)
1802			break;
1803		tmp_mb = (limit + 1) >> 20;
1804
1805		gb = div_u64_rem(tmp_mb, 1024, &mb);
1806		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1807			 n_tads, gb, (mb*1000)/1024,
1808			 ((u64)tmp_mb) << 20L,
1809			 (u32)(1 << TAD_SOCK(reg)),
1810			 (u32)TAD_CH(reg) + 1,
1811			 (u32)TAD_TGT0(reg),
1812			 (u32)TAD_TGT1(reg),
1813			 (u32)TAD_TGT2(reg),
1814			 (u32)TAD_TGT3(reg),
1815			 reg);
1816		prv = limit;
1817	}
1818
1819	/*
1820	 * Step 4) Get TAD offsets, per each channel
1821	 */
1822	for (i = 0; i < NUM_CHANNELS; i++) {
1823		if (!pvt->channel[i].dimms)
1824			continue;
1825		for (j = 0; j < n_tads; j++) {
1826			pci_read_config_dword(pvt->pci_tad[i],
1827					      tad_ch_nilv_offset[j],
1828					      &reg);
1829			tmp_mb = TAD_OFFSET(reg) >> 20;
1830			gb = div_u64_rem(tmp_mb, 1024, &mb);
1831			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1832				 i, j,
1833				 gb, (mb*1000)/1024,
1834				 ((u64)tmp_mb) << 20L,
1835				 reg);
1836		}
1837	}
1838
1839	/*
1840	 * Step 6) Get RIR Wayness/Limit, per each channel
1841	 */
1842	for (i = 0; i < NUM_CHANNELS; i++) {
1843		if (!pvt->channel[i].dimms)
1844			continue;
1845		for (j = 0; j < MAX_RIR_RANGES; j++) {
1846			pci_read_config_dword(pvt->pci_tad[i],
1847					      rir_way_limit[j],
1848					      &reg);
1849
1850			if (!IS_RIR_VALID(reg))
1851				continue;
1852
1853			tmp_mb = pvt->info.rir_limit(reg) >> 20;
1854			rir_way = 1 << RIR_WAY(reg);
1855			gb = div_u64_rem(tmp_mb, 1024, &mb);
1856			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1857				 i, j,
1858				 gb, (mb*1000)/1024,
1859				 ((u64)tmp_mb) << 20L,
1860				 rir_way,
1861				 reg);
1862
1863			for (k = 0; k < rir_way; k++) {
1864				pci_read_config_dword(pvt->pci_tad[i],
1865						      rir_offset[j][k],
1866						      &reg);
1867				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1868
1869				gb = div_u64_rem(tmp_mb, 1024, &mb);
1870				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1871					 i, j, k,
1872					 gb, (mb*1000)/1024,
1873					 ((u64)tmp_mb) << 20L,
1874					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1875					 reg);
1876			}
1877		}
1878	}
1879}
1880
1881static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1882{
1883	struct sbridge_dev *sbridge_dev;
1884
1885	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1886		if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1887			return sbridge_dev->mci;
1888	}
1889	return NULL;
1890}
1891
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1892static int get_memory_error_data(struct mem_ctl_info *mci,
1893				 u64 addr,
1894				 u8 *socket, u8 *ha,
1895				 long *channel_mask,
1896				 u8 *rank,
1897				 char **area_type, char *msg)
1898{
1899	struct mem_ctl_info	*new_mci;
1900	struct sbridge_pvt *pvt = mci->pvt_info;
1901	struct pci_dev		*pci_ha;
1902	int			n_rir, n_sads, n_tads, sad_way, sck_xch;
1903	int			sad_interl, idx, base_ch;
1904	int			interleave_mode, shiftup = 0;
1905	unsigned int		sad_interleave[MAX_INTERLEAVE];
1906	u32			reg, dram_rule;
1907	u8			ch_way, sck_way, pkg, sad_ha = 0;
1908	u32			tad_offset;
1909	u32			rir_way;
1910	u32			mb, gb;
1911	u64			ch_addr, offset, limit = 0, prv = 0;
1912
 
1913
1914	/*
1915	 * Step 0) Check if the address is at special memory ranges
1916	 * The check bellow is probably enough to fill all cases where
1917	 * the error is not inside a memory, except for the legacy
1918	 * range (e. g. VGA addresses). It is unlikely, however, that the
1919	 * memory controller would generate an error on that range.
1920	 */
1921	if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1922		sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1923		return -EINVAL;
1924	}
1925	if (addr >= (u64)pvt->tohm) {
1926		sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1927		return -EINVAL;
1928	}
1929
1930	/*
1931	 * Step 1) Get socket
1932	 */
1933	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1934		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1935				      &reg);
1936
1937		if (!DRAM_RULE_ENABLE(reg))
1938			continue;
1939
1940		limit = pvt->info.sad_limit(reg);
1941		if (limit <= prv) {
1942			sprintf(msg, "Can't discover the memory socket");
1943			return -EINVAL;
1944		}
1945		if  (addr <= limit)
1946			break;
1947		prv = limit;
1948	}
1949	if (n_sads == pvt->info.max_sad) {
1950		sprintf(msg, "Can't discover the memory socket");
1951		return -EINVAL;
1952	}
1953	dram_rule = reg;
1954	*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1955	interleave_mode = pvt->info.interleave_mode(dram_rule);
1956
1957	pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1958			      &reg);
1959
1960	if (pvt->info.type == SANDY_BRIDGE) {
1961		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1962		for (sad_way = 0; sad_way < 8; sad_way++) {
1963			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1964			if (sad_way > 0 && sad_interl == pkg)
1965				break;
1966			sad_interleave[sad_way] = pkg;
1967			edac_dbg(0, "SAD interleave #%d: %d\n",
1968				 sad_way, sad_interleave[sad_way]);
1969		}
1970		edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
1971			 pvt->sbridge_dev->mc,
1972			 n_sads,
1973			 addr,
1974			 limit,
1975			 sad_way + 7,
1976			 !interleave_mode ? "" : "XOR[18:16]");
1977		if (interleave_mode)
1978			idx = ((addr >> 6) ^ (addr >> 16)) & 7;
1979		else
1980			idx = (addr >> 6) & 7;
1981		switch (sad_way) {
1982		case 1:
1983			idx = 0;
1984			break;
1985		case 2:
1986			idx = idx & 1;
1987			break;
1988		case 4:
1989			idx = idx & 3;
1990			break;
1991		case 8:
1992			break;
1993		default:
1994			sprintf(msg, "Can't discover socket interleave");
1995			return -EINVAL;
1996		}
1997		*socket = sad_interleave[idx];
1998		edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
1999			 idx, sad_way, *socket);
2000	} else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2001		int bits, a7mode = A7MODE(dram_rule);
2002
2003		if (a7mode) {
2004			/* A7 mode swaps P9 with P6 */
2005			bits = GET_BITFIELD(addr, 7, 8) << 1;
2006			bits |= GET_BITFIELD(addr, 9, 9);
2007		} else
2008			bits = GET_BITFIELD(addr, 6, 8);
2009
2010		if (interleave_mode == 0) {
2011			/* interleave mode will XOR {8,7,6} with {18,17,16} */
2012			idx = GET_BITFIELD(addr, 16, 18);
2013			idx ^= bits;
2014		} else
2015			idx = bits;
2016
2017		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2018		*socket = sad_pkg_socket(pkg);
2019		sad_ha = sad_pkg_ha(pkg);
2020
2021		if (a7mode) {
2022			/* MCChanShiftUpEnable */
2023			pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
2024			shiftup = GET_BITFIELD(reg, 22, 22);
2025		}
2026
2027		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2028			 idx, *socket, sad_ha, shiftup);
2029	} else {
2030		/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2031		idx = (addr >> 6) & 7;
2032		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2033		*socket = sad_pkg_socket(pkg);
2034		sad_ha = sad_pkg_ha(pkg);
2035		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2036			 idx, *socket, sad_ha);
2037	}
2038
2039	*ha = sad_ha;
2040
2041	/*
2042	 * Move to the proper node structure, in order to access the
2043	 * right PCI registers
2044	 */
2045	new_mci = get_mci_for_node_id(*socket, sad_ha);
2046	if (!new_mci) {
2047		sprintf(msg, "Struct for socket #%u wasn't initialized",
2048			*socket);
2049		return -EINVAL;
2050	}
2051	mci = new_mci;
2052	pvt = mci->pvt_info;
2053
2054	/*
2055	 * Step 2) Get memory channel
2056	 */
2057	prv = 0;
2058	pci_ha = pvt->pci_ha;
2059	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2060		pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2061		limit = TAD_LIMIT(reg);
2062		if (limit <= prv) {
2063			sprintf(msg, "Can't discover the memory channel");
2064			return -EINVAL;
2065		}
2066		if  (addr <= limit)
2067			break;
2068		prv = limit;
2069	}
2070	if (n_tads == MAX_TAD) {
2071		sprintf(msg, "Can't discover the memory channel");
2072		return -EINVAL;
2073	}
2074
2075	ch_way = TAD_CH(reg) + 1;
2076	sck_way = TAD_SOCK(reg);
2077
2078	if (ch_way == 3)
2079		idx = addr >> 6;
2080	else {
2081		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2082		if (pvt->is_chan_hash)
2083			idx = haswell_chan_hash(idx, addr);
2084	}
2085	idx = idx % ch_way;
2086
2087	/*
2088	 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2089	 */
2090	switch (idx) {
2091	case 0:
2092		base_ch = TAD_TGT0(reg);
2093		break;
2094	case 1:
2095		base_ch = TAD_TGT1(reg);
2096		break;
2097	case 2:
2098		base_ch = TAD_TGT2(reg);
2099		break;
2100	case 3:
2101		base_ch = TAD_TGT3(reg);
2102		break;
2103	default:
2104		sprintf(msg, "Can't discover the TAD target");
2105		return -EINVAL;
2106	}
2107	*channel_mask = 1 << base_ch;
2108
2109	pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2110
2111	if (pvt->mirror_mode == FULL_MIRRORING ||
2112	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2113		*channel_mask |= 1 << ((base_ch + 2) % 4);
2114		switch(ch_way) {
2115		case 2:
2116		case 4:
2117			sck_xch = (1 << sck_way) * (ch_way >> 1);
2118			break;
2119		default:
2120			sprintf(msg, "Invalid mirror set. Can't decode addr");
2121			return -EINVAL;
2122		}
2123
2124		pvt->is_cur_addr_mirrored = true;
2125	} else {
2126		sck_xch = (1 << sck_way) * ch_way;
2127		pvt->is_cur_addr_mirrored = false;
2128	}
2129
2130	if (pvt->is_lockstep)
2131		*channel_mask |= 1 << ((base_ch + 1) % 4);
2132
2133	offset = TAD_OFFSET(tad_offset);
2134
2135	edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2136		 n_tads,
2137		 addr,
2138		 limit,
2139		 sck_way,
2140		 ch_way,
2141		 offset,
2142		 idx,
2143		 base_ch,
2144		 *channel_mask);
2145
2146	/* Calculate channel address */
2147	/* Remove the TAD offset */
2148
2149	if (offset > addr) {
2150		sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2151			offset, addr);
2152		return -EINVAL;
2153	}
2154
2155	ch_addr = addr - offset;
2156	ch_addr >>= (6 + shiftup);
2157	ch_addr /= sck_xch;
2158	ch_addr <<= (6 + shiftup);
2159	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2160
2161	/*
2162	 * Step 3) Decode rank
2163	 */
2164	for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2165		pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
2166
2167		if (!IS_RIR_VALID(reg))
2168			continue;
2169
2170		limit = pvt->info.rir_limit(reg);
2171		gb = div_u64_rem(limit >> 20, 1024, &mb);
2172		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2173			 n_rir,
2174			 gb, (mb*1000)/1024,
2175			 limit,
2176			 1 << RIR_WAY(reg));
2177		if  (ch_addr <= limit)
2178			break;
2179	}
2180	if (n_rir == MAX_RIR_RANGES) {
2181		sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2182			ch_addr);
2183		return -EINVAL;
2184	}
2185	rir_way = RIR_WAY(reg);
2186
2187	if (pvt->is_close_pg)
2188		idx = (ch_addr >> 6);
2189	else
2190		idx = (ch_addr >> 13);	/* FIXME: Datasheet says to shift by 15 */
2191	idx %= 1 << rir_way;
2192
2193	pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
2194	*rank = RIR_RNK_TGT(pvt->info.type, reg);
2195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2196	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2197		 n_rir,
2198		 ch_addr,
2199		 limit,
2200		 rir_way,
2201		 idx);
2202
2203	return 0;
2204}
2205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2206/****************************************************************************
2207	Device initialization routines: put/get, init/exit
2208 ****************************************************************************/
2209
2210/*
2211 *	sbridge_put_all_devices	'put' all the devices that we have
2212 *				reserved via 'get'
2213 */
2214static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2215{
2216	int i;
2217
2218	edac_dbg(0, "\n");
2219	for (i = 0; i < sbridge_dev->n_devs; i++) {
2220		struct pci_dev *pdev = sbridge_dev->pdev[i];
2221		if (!pdev)
2222			continue;
2223		edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2224			 pdev->bus->number,
2225			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2226		pci_dev_put(pdev);
2227	}
2228}
2229
2230static void sbridge_put_all_devices(void)
2231{
2232	struct sbridge_dev *sbridge_dev, *tmp;
2233
2234	list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2235		sbridge_put_devices(sbridge_dev);
2236		free_sbridge_dev(sbridge_dev);
2237	}
2238}
2239
2240static int sbridge_get_onedevice(struct pci_dev **prev,
2241				 u8 *num_mc,
2242				 const struct pci_id_table *table,
2243				 const unsigned devno,
2244				 const int multi_bus)
2245{
2246	struct sbridge_dev *sbridge_dev = NULL;
2247	const struct pci_id_descr *dev_descr = &table->descr[devno];
2248	struct pci_dev *pdev = NULL;
 
2249	u8 bus = 0;
2250	int i = 0;
2251
2252	sbridge_printk(KERN_DEBUG,
2253		"Seeking for: PCI ID %04x:%04x\n",
2254		PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2255
2256	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2257			      dev_descr->dev_id, *prev);
2258
2259	if (!pdev) {
2260		if (*prev) {
2261			*prev = pdev;
2262			return 0;
2263		}
2264
2265		if (dev_descr->optional)
2266			return 0;
2267
2268		/* if the HA wasn't found */
2269		if (devno == 0)
2270			return -ENODEV;
2271
2272		sbridge_printk(KERN_INFO,
2273			"Device not found: %04x:%04x\n",
2274			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2275
2276		/* End of list, leave */
2277		return -ENODEV;
2278	}
 
2279	bus = pdev->bus->number;
2280
2281next_imc:
2282	sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
 
2283	if (!sbridge_dev) {
2284		/* If the HA1 wasn't found, don't create EDAC second memory controller */
2285		if (dev_descr->dom == IMC1 && devno != 1) {
2286			edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2287				 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2288			pci_dev_put(pdev);
2289			return 0;
2290		}
2291
2292		if (dev_descr->dom == SOCK)
2293			goto out_imc;
2294
2295		sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
2296		if (!sbridge_dev) {
2297			pci_dev_put(pdev);
2298			return -ENOMEM;
2299		}
2300		(*num_mc)++;
2301	}
2302
2303	if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2304		sbridge_printk(KERN_ERR,
2305			"Duplicated device for %04x:%04x\n",
2306			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2307		pci_dev_put(pdev);
2308		return -ENODEV;
2309	}
2310
2311	sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2312
2313	/* pdev belongs to more than one IMC, do extra gets */
2314	if (++i > 1)
2315		pci_dev_get(pdev);
2316
2317	if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2318		goto next_imc;
2319
2320out_imc:
2321	/* Be sure that the device is enabled */
2322	if (unlikely(pci_enable_device(pdev) < 0)) {
2323		sbridge_printk(KERN_ERR,
2324			"Couldn't enable %04x:%04x\n",
2325			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2326		return -ENODEV;
2327	}
2328
2329	edac_dbg(0, "Detected %04x:%04x\n",
2330		 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2331
2332	/*
2333	 * As stated on drivers/pci/search.c, the reference count for
2334	 * @from is always decremented if it is not %NULL. So, as we need
2335	 * to get all devices up to null, we need to do a get for the device
2336	 */
2337	pci_dev_get(pdev);
2338
2339	*prev = pdev;
2340
2341	return 0;
2342}
2343
2344/*
2345 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2346 *			     devices we want to reference for this driver.
2347 * @num_mc: pointer to the memory controllers count, to be incremented in case
2348 *	    of success.
2349 * @table: model specific table
2350 *
2351 * returns 0 in case of success or error code
2352 */
2353static int sbridge_get_all_devices(u8 *num_mc,
2354					const struct pci_id_table *table)
2355{
2356	int i, rc;
2357	struct pci_dev *pdev = NULL;
2358	int allow_dups = 0;
2359	int multi_bus = 0;
2360
2361	if (table->type == KNIGHTS_LANDING)
2362		allow_dups = multi_bus = 1;
2363	while (table && table->descr) {
2364		for (i = 0; i < table->n_devs_per_sock; i++) {
2365			if (!allow_dups || i == 0 ||
2366					table->descr[i].dev_id !=
2367						table->descr[i-1].dev_id) {
2368				pdev = NULL;
2369			}
2370			do {
2371				rc = sbridge_get_onedevice(&pdev, num_mc,
2372							   table, i, multi_bus);
2373				if (rc < 0) {
2374					if (i == 0) {
2375						i = table->n_devs_per_sock;
2376						break;
2377					}
2378					sbridge_put_all_devices();
2379					return -ENODEV;
2380				}
2381			} while (pdev && !allow_dups);
2382		}
2383		table++;
2384	}
2385
2386	return 0;
2387}
2388
2389/*
2390 * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
2391 * the format: XXXa. So we can convert from a device to the corresponding
2392 * channel like this
2393 */
2394#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2395
2396static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2397				 struct sbridge_dev *sbridge_dev)
2398{
2399	struct sbridge_pvt *pvt = mci->pvt_info;
2400	struct pci_dev *pdev;
2401	u8 saw_chan_mask = 0;
2402	int i;
2403
2404	for (i = 0; i < sbridge_dev->n_devs; i++) {
2405		pdev = sbridge_dev->pdev[i];
2406		if (!pdev)
2407			continue;
2408
2409		switch (pdev->device) {
2410		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2411			pvt->pci_sad0 = pdev;
2412			break;
2413		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2414			pvt->pci_sad1 = pdev;
2415			break;
2416		case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2417			pvt->pci_br0 = pdev;
2418			break;
2419		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2420			pvt->pci_ha = pdev;
2421			break;
2422		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2423			pvt->pci_ta = pdev;
2424			break;
2425		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2426			pvt->pci_ras = pdev;
2427			break;
2428		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2429		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2430		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2431		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2432		{
2433			int id = TAD_DEV_TO_CHAN(pdev->device);
2434			pvt->pci_tad[id] = pdev;
2435			saw_chan_mask |= 1 << id;
2436		}
2437			break;
2438		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2439			pvt->pci_ddrio = pdev;
2440			break;
2441		default:
2442			goto error;
2443		}
2444
2445		edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2446			 pdev->vendor, pdev->device,
2447			 sbridge_dev->bus,
2448			 pdev);
2449	}
2450
2451	/* Check if everything were registered */
2452	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2453	    !pvt->pci_ras || !pvt->pci_ta)
2454		goto enodev;
2455
2456	if (saw_chan_mask != 0x0f)
2457		goto enodev;
2458	return 0;
2459
2460enodev:
2461	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2462	return -ENODEV;
2463
2464error:
2465	sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2466		       PCI_VENDOR_ID_INTEL, pdev->device);
2467	return -EINVAL;
2468}
2469
2470static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2471				 struct sbridge_dev *sbridge_dev)
2472{
2473	struct sbridge_pvt *pvt = mci->pvt_info;
2474	struct pci_dev *pdev;
2475	u8 saw_chan_mask = 0;
2476	int i;
2477
2478	for (i = 0; i < sbridge_dev->n_devs; i++) {
2479		pdev = sbridge_dev->pdev[i];
2480		if (!pdev)
2481			continue;
2482
2483		switch (pdev->device) {
2484		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2485		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2486			pvt->pci_ha = pdev;
2487			break;
2488		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2489		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2490			pvt->pci_ta = pdev;
2491			break;
2492		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2493		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2494			pvt->pci_ras = pdev;
2495			break;
2496		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2497		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2498		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2499		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2500		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2501		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2502		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2503		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2504		{
2505			int id = TAD_DEV_TO_CHAN(pdev->device);
2506			pvt->pci_tad[id] = pdev;
2507			saw_chan_mask |= 1 << id;
2508		}
2509			break;
2510		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2511			pvt->pci_ddrio = pdev;
2512			break;
2513		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2514			pvt->pci_ddrio = pdev;
2515			break;
2516		case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2517			pvt->pci_sad0 = pdev;
2518			break;
2519		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2520			pvt->pci_br0 = pdev;
2521			break;
2522		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2523			pvt->pci_br1 = pdev;
2524			break;
2525		default:
2526			goto error;
2527		}
2528
2529		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2530			 sbridge_dev->bus,
2531			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2532			 pdev);
2533	}
2534
2535	/* Check if everything were registered */
2536	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2537	    !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2538		goto enodev;
2539
2540	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2541	    saw_chan_mask != 0x03)   /* -EP */
2542		goto enodev;
2543	return 0;
2544
2545enodev:
2546	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2547	return -ENODEV;
2548
2549error:
2550	sbridge_printk(KERN_ERR,
2551		       "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2552			pdev->device);
2553	return -EINVAL;
2554}
2555
2556static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2557				 struct sbridge_dev *sbridge_dev)
2558{
2559	struct sbridge_pvt *pvt = mci->pvt_info;
2560	struct pci_dev *pdev;
2561	u8 saw_chan_mask = 0;
2562	int i;
2563
2564	/* there's only one device per system; not tied to any bus */
2565	if (pvt->info.pci_vtd == NULL)
2566		/* result will be checked later */
2567		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2568						   PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2569						   NULL);
2570
2571	for (i = 0; i < sbridge_dev->n_devs; i++) {
2572		pdev = sbridge_dev->pdev[i];
2573		if (!pdev)
2574			continue;
2575
2576		switch (pdev->device) {
2577		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2578			pvt->pci_sad0 = pdev;
2579			break;
2580		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2581			pvt->pci_sad1 = pdev;
2582			break;
2583		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2584		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2585			pvt->pci_ha = pdev;
2586			break;
2587		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2588		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2589			pvt->pci_ta = pdev;
2590			break;
2591		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2592		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2593			pvt->pci_ras = pdev;
2594			break;
2595		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2596		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2597		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2598		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2599		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2600		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2601		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2602		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2603		{
2604			int id = TAD_DEV_TO_CHAN(pdev->device);
2605			pvt->pci_tad[id] = pdev;
2606			saw_chan_mask |= 1 << id;
2607		}
2608			break;
2609		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2610		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2611		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2612		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2613			if (!pvt->pci_ddrio)
2614				pvt->pci_ddrio = pdev;
2615			break;
2616		default:
2617			break;
2618		}
2619
2620		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2621			 sbridge_dev->bus,
2622			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2623			 pdev);
2624	}
2625
2626	/* Check if everything were registered */
2627	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2628	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2629		goto enodev;
2630
2631	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2632	    saw_chan_mask != 0x03)   /* -EP */
2633		goto enodev;
2634	return 0;
2635
2636enodev:
2637	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2638	return -ENODEV;
2639}
2640
2641static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2642				 struct sbridge_dev *sbridge_dev)
2643{
2644	struct sbridge_pvt *pvt = mci->pvt_info;
2645	struct pci_dev *pdev;
2646	u8 saw_chan_mask = 0;
2647	int i;
2648
2649	/* there's only one device per system; not tied to any bus */
2650	if (pvt->info.pci_vtd == NULL)
2651		/* result will be checked later */
2652		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2653						   PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2654						   NULL);
2655
2656	for (i = 0; i < sbridge_dev->n_devs; i++) {
2657		pdev = sbridge_dev->pdev[i];
2658		if (!pdev)
2659			continue;
2660
2661		switch (pdev->device) {
2662		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2663			pvt->pci_sad0 = pdev;
2664			break;
2665		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2666			pvt->pci_sad1 = pdev;
2667			break;
2668		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2669		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2670			pvt->pci_ha = pdev;
2671			break;
2672		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2673		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2674			pvt->pci_ta = pdev;
2675			break;
2676		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2677		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2678			pvt->pci_ras = pdev;
2679			break;
2680		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2681		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2682		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2683		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2684		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2685		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2686		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2687		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2688		{
2689			int id = TAD_DEV_TO_CHAN(pdev->device);
2690			pvt->pci_tad[id] = pdev;
2691			saw_chan_mask |= 1 << id;
2692		}
2693			break;
2694		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2695			pvt->pci_ddrio = pdev;
2696			break;
2697		default:
2698			break;
2699		}
2700
2701		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2702			 sbridge_dev->bus,
2703			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2704			 pdev);
2705	}
2706
2707	/* Check if everything were registered */
2708	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2709	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2710		goto enodev;
2711
2712	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2713	    saw_chan_mask != 0x03)   /* -EP */
2714		goto enodev;
2715	return 0;
2716
2717enodev:
2718	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2719	return -ENODEV;
2720}
2721
2722static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2723			struct sbridge_dev *sbridge_dev)
2724{
2725	struct sbridge_pvt *pvt = mci->pvt_info;
2726	struct pci_dev *pdev;
2727	int dev, func;
2728
2729	int i;
2730	int devidx;
2731
2732	for (i = 0; i < sbridge_dev->n_devs; i++) {
2733		pdev = sbridge_dev->pdev[i];
2734		if (!pdev)
2735			continue;
2736
2737		/* Extract PCI device and function. */
2738		dev = (pdev->devfn >> 3) & 0x1f;
2739		func = pdev->devfn & 0x7;
2740
2741		switch (pdev->device) {
2742		case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2743			if (dev == 8)
2744				pvt->knl.pci_mc0 = pdev;
2745			else if (dev == 9)
2746				pvt->knl.pci_mc1 = pdev;
2747			else {
2748				sbridge_printk(KERN_ERR,
2749					"Memory controller in unexpected place! (dev %d, fn %d)\n",
2750					dev, func);
2751				continue;
2752			}
2753			break;
2754
2755		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2756			pvt->pci_sad0 = pdev;
2757			break;
2758
2759		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2760			pvt->pci_sad1 = pdev;
2761			break;
2762
2763		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2764			/* There are one of these per tile, and range from
2765			 * 1.14.0 to 1.18.5.
2766			 */
2767			devidx = ((dev-14)*8)+func;
2768
2769			if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2770				sbridge_printk(KERN_ERR,
2771					"Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2772					dev, func);
2773				continue;
2774			}
2775
2776			WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2777
2778			pvt->knl.pci_cha[devidx] = pdev;
2779			break;
2780
2781		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
2782			devidx = -1;
2783
2784			/*
2785			 *  MC0 channels 0-2 are device 9 function 2-4,
2786			 *  MC1 channels 3-5 are device 8 function 2-4.
2787			 */
2788
2789			if (dev == 9)
2790				devidx = func-2;
2791			else if (dev == 8)
2792				devidx = 3 + (func-2);
2793
2794			if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2795				sbridge_printk(KERN_ERR,
2796					"DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2797					dev, func);
2798				continue;
2799			}
2800
2801			WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2802			pvt->knl.pci_channel[devidx] = pdev;
2803			break;
2804
2805		case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2806			pvt->knl.pci_mc_info = pdev;
2807			break;
2808
2809		case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2810			pvt->pci_ta = pdev;
2811			break;
2812
2813		default:
2814			sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2815				pdev->device);
2816			break;
2817		}
2818	}
2819
2820	if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
2821	    !pvt->pci_sad0     || !pvt->pci_sad1    ||
2822	    !pvt->pci_ta) {
2823		goto enodev;
2824	}
2825
2826	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2827		if (!pvt->knl.pci_channel[i]) {
2828			sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2829			goto enodev;
2830		}
2831	}
2832
2833	for (i = 0; i < KNL_MAX_CHAS; i++) {
2834		if (!pvt->knl.pci_cha[i]) {
2835			sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2836			goto enodev;
2837		}
2838	}
2839
2840	return 0;
2841
2842enodev:
2843	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2844	return -ENODEV;
2845}
2846
2847/****************************************************************************
2848			Error check routines
2849 ****************************************************************************/
2850
2851/*
2852 * While Sandy Bridge has error count registers, SMI BIOS read values from
2853 * and resets the counters. So, they are not reliable for the OS to read
2854 * from them. So, we have no option but to just trust on whatever MCE is
2855 * telling us about the errors.
2856 */
2857static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2858				    const struct mce *m)
2859{
2860	struct mem_ctl_info *new_mci;
2861	struct sbridge_pvt *pvt = mci->pvt_info;
2862	enum hw_event_mc_err_type tp_event;
2863	char *type, *optype, msg[256];
2864	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2865	bool overflow = GET_BITFIELD(m->status, 62, 62);
2866	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2867	bool recoverable;
2868	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2869	u32 mscod = GET_BITFIELD(m->status, 16, 31);
2870	u32 errcode = GET_BITFIELD(m->status, 0, 15);
2871	u32 channel = GET_BITFIELD(m->status, 0, 3);
2872	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
 
 
 
 
 
 
 
2873	long channel_mask, first_channel;
2874	u8  rank, socket, ha;
2875	int rc, dimm;
2876	char *area_type = NULL;
2877
2878	if (pvt->info.type != SANDY_BRIDGE)
2879		recoverable = true;
2880	else
2881		recoverable = GET_BITFIELD(m->status, 56, 56);
2882
2883	if (uncorrected_error) {
 
2884		if (ripv) {
2885			type = "FATAL";
 
2886			tp_event = HW_EVENT_ERR_FATAL;
2887		} else {
2888			type = "NON_FATAL";
2889			tp_event = HW_EVENT_ERR_UNCORRECTED;
2890		}
2891	} else {
2892		type = "CORRECTED";
2893		tp_event = HW_EVENT_ERR_CORRECTED;
2894	}
2895
2896	/*
2897	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
2898	 * memory errors should fit in this mask:
2899	 *	000f 0000 1mmm cccc (binary)
2900	 * where:
2901	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
2902	 *	    won't be shown
2903	 *	mmm = error type
2904	 *	cccc = channel
2905	 * If the mask doesn't match, report an error to the parsing logic
2906	 */
2907	if (! ((errcode & 0xef80) == 0x80)) {
2908		optype = "Can't parse: it is not a mem";
2909	} else {
2910		switch (optypenum) {
2911		case 0:
2912			optype = "generic undef request error";
2913			break;
2914		case 1:
2915			optype = "memory read error";
2916			break;
2917		case 2:
2918			optype = "memory write error";
2919			break;
2920		case 3:
2921			optype = "addr/cmd error";
2922			break;
2923		case 4:
2924			optype = "memory scrubbing error";
2925			break;
2926		default:
2927			optype = "reserved";
2928			break;
2929		}
2930	}
2931
2932	/* Only decode errors with an valid address (ADDRV) */
2933	if (!GET_BITFIELD(m->status, 58, 58))
2934		return;
2935
2936	if (pvt->info.type == KNIGHTS_LANDING) {
2937		if (channel == 14) {
2938			edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
2939				overflow ? " OVERFLOW" : "",
2940				(uncorrected_error && recoverable)
2941				? " recoverable" : "",
2942				mscod, errcode,
2943				m->bank);
2944		} else {
2945			char A = *("A");
2946
2947			/*
2948			 * Reported channel is in range 0-2, so we can't map it
2949			 * back to mc. To figure out mc we check machine check
2950			 * bank register that reported this error.
2951			 * bank15 means mc0 and bank16 means mc1.
2952			 */
2953			channel = knl_channel_remap(m->bank == 16, channel);
2954			channel_mask = 1 << channel;
2955
2956			snprintf(msg, sizeof(msg),
2957				"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
2958				overflow ? " OVERFLOW" : "",
2959				(uncorrected_error && recoverable)
2960				? " recoverable" : " ",
2961				mscod, errcode, channel, A + channel);
2962			edac_mc_handle_error(tp_event, mci, core_err_cnt,
2963				m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
2964				channel, 0, -1,
2965				optype, msg);
2966		}
2967		return;
 
 
 
 
2968	} else {
2969		rc = get_memory_error_data(mci, m->addr, &socket, &ha,
2970				&channel_mask, &rank, &area_type, msg);
2971	}
2972
2973	if (rc < 0)
2974		goto err_parsing;
2975	new_mci = get_mci_for_node_id(socket, ha);
2976	if (!new_mci) {
2977		strcpy(msg, "Error: socket got corrupted!");
2978		goto err_parsing;
2979	}
2980	mci = new_mci;
2981	pvt = mci->pvt_info;
2982
2983	first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
2984
2985	if (rank < 4)
 
 
2986		dimm = 0;
2987	else if (rank < 8)
2988		dimm = 1;
2989	else
2990		dimm = 2;
2991
2992
2993	/*
2994	 * FIXME: On some memory configurations (mirror, lockstep), the
2995	 * Memory Controller can't point the error to a single DIMM. The
2996	 * EDAC core should be handling the channel mask, in order to point
2997	 * to the group of dimm's where the error may be happening.
2998	 */
2999	if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3000		channel = first_channel;
3001
3002	snprintf(msg, sizeof(msg),
3003		 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3004		 overflow ? " OVERFLOW" : "",
3005		 (uncorrected_error && recoverable) ? " recoverable" : "",
3006		 area_type,
3007		 mscod, errcode,
3008		 socket, ha,
3009		 channel_mask,
3010		 rank);
3011
3012	edac_dbg(0, "%s\n", msg);
3013
3014	/* FIXME: need support for channel mask */
3015
3016	if (channel == CHANNEL_UNSPECIFIED)
3017		channel = -1;
3018
3019	/* Call the helper to output message */
3020	edac_mc_handle_error(tp_event, mci, core_err_cnt,
3021			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3022			     channel, dimm, -1,
3023			     optype, msg);
3024	return;
3025err_parsing:
3026	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3027			     -1, -1, -1,
3028			     msg, "");
3029
3030}
3031
3032/*
3033 * Check that logging is enabled and that this is the right type
3034 * of error for us to handle.
3035 */
3036static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3037				   void *data)
3038{
3039	struct mce *mce = (struct mce *)data;
3040	struct mem_ctl_info *mci;
3041	struct sbridge_pvt *pvt;
3042	char *type;
3043
3044	if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3045		return NOTIFY_DONE;
3046
3047	mci = get_mci_for_node_id(mce->socketid, IMC0);
3048	if (!mci)
3049		return NOTIFY_DONE;
3050	pvt = mci->pvt_info;
3051
3052	/*
3053	 * Just let mcelog handle it if the error is
3054	 * outside the memory controller. A memory error
3055	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3056	 * bit 12 has an special meaning.
3057	 */
3058	if ((mce->status & 0xefff) >> 7 != 1)
3059		return NOTIFY_DONE;
3060
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3061	if (mce->mcgstatus & MCG_STATUS_MCIP)
3062		type = "Exception";
3063	else
3064		type = "Event";
3065
3066	sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3067
3068	sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3069			  "Bank %d: %016Lx\n", mce->extcpu, type,
3070			  mce->mcgstatus, mce->bank, mce->status);
3071	sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3072	sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3073	sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3074
3075	sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3076			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3077			  mce->time, mce->socketid, mce->apicid);
3078
3079	sbridge_mce_output_error(mci, mce);
3080
3081	/* Advice mcelog that the error were handled */
3082	return NOTIFY_STOP;
 
3083}
3084
3085static struct notifier_block sbridge_mce_dec = {
3086	.notifier_call	= sbridge_mce_check_error,
3087	.priority	= MCE_PRIO_EDAC,
3088};
3089
3090/****************************************************************************
3091			EDAC register/unregister logic
3092 ****************************************************************************/
3093
3094static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3095{
3096	struct mem_ctl_info *mci = sbridge_dev->mci;
3097	struct sbridge_pvt *pvt;
3098
3099	if (unlikely(!mci || !mci->pvt_info)) {
3100		edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3101
3102		sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3103		return;
3104	}
3105
3106	pvt = mci->pvt_info;
3107
3108	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3109		 mci, &sbridge_dev->pdev[0]->dev);
3110
3111	/* Remove MC sysfs nodes */
3112	edac_mc_del_mc(mci->pdev);
3113
3114	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3115	kfree(mci->ctl_name);
3116	edac_mc_free(mci);
3117	sbridge_dev->mci = NULL;
3118}
3119
3120static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3121{
3122	struct mem_ctl_info *mci;
3123	struct edac_mc_layer layers[2];
3124	struct sbridge_pvt *pvt;
3125	struct pci_dev *pdev = sbridge_dev->pdev[0];
3126	int rc;
3127
3128	/* allocate a new MC control structure */
3129	layers[0].type = EDAC_MC_LAYER_CHANNEL;
3130	layers[0].size = type == KNIGHTS_LANDING ?
3131		KNL_MAX_CHANNELS : NUM_CHANNELS;
3132	layers[0].is_virt_csrow = false;
3133	layers[1].type = EDAC_MC_LAYER_SLOT;
3134	layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3135	layers[1].is_virt_csrow = true;
3136	mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3137			    sizeof(*pvt));
3138
3139	if (unlikely(!mci))
3140		return -ENOMEM;
3141
3142	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3143		 mci, &pdev->dev);
3144
3145	pvt = mci->pvt_info;
3146	memset(pvt, 0, sizeof(*pvt));
3147
3148	/* Associate sbridge_dev and mci for future usage */
3149	pvt->sbridge_dev = sbridge_dev;
3150	sbridge_dev->mci = mci;
3151
3152	mci->mtype_cap = type == KNIGHTS_LANDING ?
3153		MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3154	mci->edac_ctl_cap = EDAC_FLAG_NONE;
3155	mci->edac_cap = EDAC_FLAG_NONE;
3156	mci->mod_name = EDAC_MOD_STR;
3157	mci->dev_name = pci_name(pdev);
3158	mci->ctl_page_to_phys = NULL;
3159
3160	pvt->info.type = type;
3161	switch (type) {
3162	case IVY_BRIDGE:
3163		pvt->info.rankcfgr = IB_RANK_CFG_A;
3164		pvt->info.get_tolm = ibridge_get_tolm;
3165		pvt->info.get_tohm = ibridge_get_tohm;
3166		pvt->info.dram_rule = ibridge_dram_rule;
3167		pvt->info.get_memory_type = get_memory_type;
3168		pvt->info.get_node_id = get_node_id;
 
3169		pvt->info.rir_limit = rir_limit;
3170		pvt->info.sad_limit = sad_limit;
3171		pvt->info.interleave_mode = interleave_mode;
3172		pvt->info.dram_attr = dram_attr;
3173		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3174		pvt->info.interleave_list = ibridge_interleave_list;
3175		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3176		pvt->info.get_width = ibridge_get_width;
3177
3178		/* Store pci devices at mci for faster access */
3179		rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3180		if (unlikely(rc < 0))
3181			goto fail0;
3182		get_source_id(mci);
3183		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3184			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3185		break;
3186	case SANDY_BRIDGE:
3187		pvt->info.rankcfgr = SB_RANK_CFG_A;
3188		pvt->info.get_tolm = sbridge_get_tolm;
3189		pvt->info.get_tohm = sbridge_get_tohm;
3190		pvt->info.dram_rule = sbridge_dram_rule;
3191		pvt->info.get_memory_type = get_memory_type;
3192		pvt->info.get_node_id = get_node_id;
 
3193		pvt->info.rir_limit = rir_limit;
3194		pvt->info.sad_limit = sad_limit;
3195		pvt->info.interleave_mode = interleave_mode;
3196		pvt->info.dram_attr = dram_attr;
3197		pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3198		pvt->info.interleave_list = sbridge_interleave_list;
3199		pvt->info.interleave_pkg = sbridge_interleave_pkg;
3200		pvt->info.get_width = sbridge_get_width;
3201
3202		/* Store pci devices at mci for faster access */
3203		rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3204		if (unlikely(rc < 0))
3205			goto fail0;
3206		get_source_id(mci);
3207		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3208			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3209		break;
3210	case HASWELL:
3211		/* rankcfgr isn't used */
3212		pvt->info.get_tolm = haswell_get_tolm;
3213		pvt->info.get_tohm = haswell_get_tohm;
3214		pvt->info.dram_rule = ibridge_dram_rule;
3215		pvt->info.get_memory_type = haswell_get_memory_type;
3216		pvt->info.get_node_id = haswell_get_node_id;
 
3217		pvt->info.rir_limit = haswell_rir_limit;
3218		pvt->info.sad_limit = sad_limit;
3219		pvt->info.interleave_mode = interleave_mode;
3220		pvt->info.dram_attr = dram_attr;
3221		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3222		pvt->info.interleave_list = ibridge_interleave_list;
3223		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3224		pvt->info.get_width = ibridge_get_width;
3225
3226		/* Store pci devices at mci for faster access */
3227		rc = haswell_mci_bind_devs(mci, sbridge_dev);
3228		if (unlikely(rc < 0))
3229			goto fail0;
3230		get_source_id(mci);
3231		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3232			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3233		break;
3234	case BROADWELL:
3235		/* rankcfgr isn't used */
3236		pvt->info.get_tolm = haswell_get_tolm;
3237		pvt->info.get_tohm = haswell_get_tohm;
3238		pvt->info.dram_rule = ibridge_dram_rule;
3239		pvt->info.get_memory_type = haswell_get_memory_type;
3240		pvt->info.get_node_id = haswell_get_node_id;
 
3241		pvt->info.rir_limit = haswell_rir_limit;
3242		pvt->info.sad_limit = sad_limit;
3243		pvt->info.interleave_mode = interleave_mode;
3244		pvt->info.dram_attr = dram_attr;
3245		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3246		pvt->info.interleave_list = ibridge_interleave_list;
3247		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3248		pvt->info.get_width = broadwell_get_width;
3249
3250		/* Store pci devices at mci for faster access */
3251		rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3252		if (unlikely(rc < 0))
3253			goto fail0;
3254		get_source_id(mci);
3255		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3256			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3257		break;
3258	case KNIGHTS_LANDING:
3259		/* pvt->info.rankcfgr == ??? */
3260		pvt->info.get_tolm = knl_get_tolm;
3261		pvt->info.get_tohm = knl_get_tohm;
3262		pvt->info.dram_rule = knl_dram_rule;
3263		pvt->info.get_memory_type = knl_get_memory_type;
3264		pvt->info.get_node_id = knl_get_node_id;
 
3265		pvt->info.rir_limit = NULL;
3266		pvt->info.sad_limit = knl_sad_limit;
3267		pvt->info.interleave_mode = knl_interleave_mode;
3268		pvt->info.dram_attr = dram_attr_knl;
3269		pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3270		pvt->info.interleave_list = knl_interleave_list;
3271		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3272		pvt->info.get_width = knl_get_width;
3273
3274		rc = knl_mci_bind_devs(mci, sbridge_dev);
3275		if (unlikely(rc < 0))
3276			goto fail0;
3277		get_source_id(mci);
3278		mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3279			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3280		break;
3281	}
3282
3283	if (!mci->ctl_name) {
3284		rc = -ENOMEM;
3285		goto fail0;
3286	}
3287
3288	/* Get dimm basic config and the memory layout */
3289	rc = get_dimm_config(mci);
3290	if (rc < 0) {
3291		edac_dbg(0, "MC: failed to get_dimm_config()\n");
3292		goto fail;
3293	}
3294	get_memory_layout(mci);
3295
3296	/* record ptr to the generic device */
3297	mci->pdev = &pdev->dev;
3298
3299	/* add this new MC control structure to EDAC's list of MCs */
3300	if (unlikely(edac_mc_add_mc(mci))) {
3301		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3302		rc = -EINVAL;
3303		goto fail;
3304	}
3305
3306	return 0;
3307
3308fail:
3309	kfree(mci->ctl_name);
3310fail0:
3311	edac_mc_free(mci);
3312	sbridge_dev->mci = NULL;
3313	return rc;
3314}
3315
3316#define ICPU(model, table) \
3317	{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
3318
3319static const struct x86_cpu_id sbridge_cpuids[] = {
3320	ICPU(INTEL_FAM6_SANDYBRIDGE_X,	  pci_dev_descr_sbridge_table),
3321	ICPU(INTEL_FAM6_IVYBRIDGE_X,	  pci_dev_descr_ibridge_table),
3322	ICPU(INTEL_FAM6_HASWELL_X,	  pci_dev_descr_haswell_table),
3323	ICPU(INTEL_FAM6_BROADWELL_X,	  pci_dev_descr_broadwell_table),
3324	ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
3325	ICPU(INTEL_FAM6_XEON_PHI_KNL,	  pci_dev_descr_knl_table),
3326	ICPU(INTEL_FAM6_XEON_PHI_KNM,	  pci_dev_descr_knl_table),
3327	{ }
3328};
3329MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3330
3331/*
3332 *	sbridge_probe	Get all devices and register memory controllers
3333 *			present.
3334 *	return:
3335 *		0 for FOUND a device
3336 *		< 0 for error code
3337 */
3338
3339static int sbridge_probe(const struct x86_cpu_id *id)
3340{
3341	int rc = -ENODEV;
3342	u8 mc, num_mc = 0;
3343	struct sbridge_dev *sbridge_dev;
3344	struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3345
3346	/* get the pci devices we want to reserve for our use */
3347	rc = sbridge_get_all_devices(&num_mc, ptable);
3348
3349	if (unlikely(rc < 0)) {
3350		edac_dbg(0, "couldn't get all devices\n");
3351		goto fail0;
3352	}
3353
3354	mc = 0;
3355
3356	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3357		edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3358			 mc, mc + 1, num_mc);
3359
3360		sbridge_dev->mc = mc++;
3361		rc = sbridge_register_mci(sbridge_dev, ptable->type);
3362		if (unlikely(rc < 0))
3363			goto fail1;
3364	}
3365
3366	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3367
3368	return 0;
3369
3370fail1:
3371	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3372		sbridge_unregister_mci(sbridge_dev);
3373
3374	sbridge_put_all_devices();
3375fail0:
3376	return rc;
3377}
3378
3379/*
3380 *	sbridge_remove	cleanup
3381 *
3382 */
3383static void sbridge_remove(void)
3384{
3385	struct sbridge_dev *sbridge_dev;
3386
3387	edac_dbg(0, "\n");
3388
3389	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3390		sbridge_unregister_mci(sbridge_dev);
3391
3392	/* Release PCI resources */
3393	sbridge_put_all_devices();
3394}
3395
3396/*
3397 *	sbridge_init		Module entry function
3398 *			Try to initialize this module for its devices
3399 */
3400static int __init sbridge_init(void)
3401{
3402	const struct x86_cpu_id *id;
3403	const char *owner;
3404	int rc;
3405
3406	edac_dbg(2, "\n");
3407
 
 
 
3408	owner = edac_get_owner();
3409	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3410		return -EBUSY;
3411
 
 
 
3412	id = x86_match_cpu(sbridge_cpuids);
3413	if (!id)
3414		return -ENODEV;
3415
3416	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
3417	opstate_init();
3418
3419	rc = sbridge_probe(id);
3420
3421	if (rc >= 0) {
3422		mce_register_decode_chain(&sbridge_mce_dec);
3423		if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3424			sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
3425		return 0;
3426	}
3427
3428	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3429		      rc);
3430
3431	return rc;
3432}
3433
3434/*
3435 *	sbridge_exit()	Module exit function
3436 *			Unregister the driver
3437 */
3438static void __exit sbridge_exit(void)
3439{
3440	edac_dbg(2, "\n");
3441	sbridge_remove();
3442	mce_unregister_decode_chain(&sbridge_mce_dec);
3443}
3444
3445module_init(sbridge_init);
3446module_exit(sbridge_exit);
3447
3448module_param(edac_op_state, int, 0444);
3449MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3450
3451MODULE_LICENSE("GPL");
3452MODULE_AUTHOR("Mauro Carvalho Chehab");
3453MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
3454MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3455		   SBRIDGE_REVISION);