Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
   1/*
   2 * Intel 7300 class Memory Controllers kernel module (Clarksboro)
   3 *
   4 * This file may be distributed under the terms of the
   5 * GNU General Public License version 2 only.
   6 *
   7 * Copyright (c) 2010 by:
   8 *	 Mauro Carvalho Chehab
   9 *
  10 * Red Hat Inc. http://www.redhat.com
  11 *
  12 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
  13 *	http://www.intel.com/Assets/PDF/datasheet/318082.pdf
  14 *
  15 * TODO: The chipset allow checking for PCI Express errors also. Currently,
  16 *	 the driver covers only memory error errors
  17 *
  18 * This driver uses "csrows" EDAC attribute to represent DIMM slot#
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/init.h>
  23#include <linux/pci.h>
  24#include <linux/pci_ids.h>
  25#include <linux/slab.h>
  26#include <linux/edac.h>
  27#include <linux/mmzone.h>
  28
  29#include "edac_core.h"
  30
  31/*
  32 * Alter this version for the I7300 module when modifications are made
  33 */
  34#define I7300_REVISION    " Ver: 1.0.0"
  35
  36#define EDAC_MOD_STR      "i7300_edac"
  37
  38#define i7300_printk(level, fmt, arg...) \
  39	edac_printk(level, "i7300", fmt, ##arg)
  40
  41#define i7300_mc_printk(mci, level, fmt, arg...) \
  42	edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
  43
  44/***********************************************
  45 * i7300 Limit constants Structs and static vars
  46 ***********************************************/
  47
  48/*
  49 * Memory topology is organized as:
  50 *	Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
  51 *	Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
  52 * Each channel can have to 8 DIMM sets (called as SLOTS)
  53 * Slots should generally be filled in pairs
  54 *	Except on Single Channel mode of operation
  55 *		just slot 0/channel0 filled on this mode
  56 *	On normal operation mode, the two channels on a branch should be
  57 *		filled together for the same SLOT#
  58 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
  59 *		channels on both branches should be filled
  60 */
  61
  62/* Limits for i7300 */
  63#define MAX_SLOTS		8
  64#define MAX_BRANCHES		2
  65#define MAX_CH_PER_BRANCH	2
  66#define MAX_CHANNELS		(MAX_CH_PER_BRANCH * MAX_BRANCHES)
  67#define MAX_MIR			3
  68
  69#define to_channel(ch, branch)	((((branch)) << 1) | (ch))
  70
  71#define to_csrow(slot, ch, branch)					\
  72		(to_channel(ch, branch) | ((slot) << 2))
  73
  74/* Device name and register DID (Device ID) */
  75struct i7300_dev_info {
  76	const char *ctl_name;	/* name for this device */
  77	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
  78};
  79
  80/* Table of devices attributes supported by this driver */
  81static const struct i7300_dev_info i7300_devs[] = {
  82	{
  83		.ctl_name = "I7300",
  84		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
  85	},
  86};
  87
  88struct i7300_dimm_info {
  89	int megabytes;		/* size, 0 means not present  */
  90};
  91
  92/* driver private data structure */
  93struct i7300_pvt {
  94	struct pci_dev *pci_dev_16_0_fsb_ctlr;		/* 16.0 */
  95	struct pci_dev *pci_dev_16_1_fsb_addr_map;	/* 16.1 */
  96	struct pci_dev *pci_dev_16_2_fsb_err_regs;	/* 16.2 */
  97	struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES];	/* 21.0  and 22.0 */
  98
  99	u16 tolm;				/* top of low memory */
 100	u64 ambase;				/* AMB BAR */
 101
 102	u32 mc_settings;			/* Report several settings */
 103	u32 mc_settings_a;
 104
 105	u16 mir[MAX_MIR];			/* Memory Interleave Reg*/
 106
 107	u16 mtr[MAX_SLOTS][MAX_BRANCHES];	/* Memory Technlogy Reg */
 108	u16 ambpresent[MAX_CHANNELS];		/* AMB present regs */
 109
 110	/* DIMM information matrix, allocating architecture maximums */
 111	struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
 112
 113	/* Temporary buffer for use when preparing error messages */
 114	char *tmp_prt_buffer;
 115};
 116
 117/* FIXME: Why do we need to have this static? */
 118static struct edac_pci_ctl_info *i7300_pci;
 119
 120/***************************************************
 121 * i7300 Register definitions for memory enumeration
 122 ***************************************************/
 123
 124/*
 125 * Device 16,
 126 * Function 0: System Address (not documented)
 127 * Function 1: Memory Branch Map, Control, Errors Register
 128 */
 129
 130	/* OFFSETS for Function 0 */
 131#define AMBASE			0x48 /* AMB Mem Mapped Reg Region Base */
 132#define MAXCH			0x56 /* Max Channel Number */
 133#define MAXDIMMPERCH		0x57 /* Max DIMM PER Channel Number */
 134
 135	/* OFFSETS for Function 1 */
 136#define MC_SETTINGS		0x40
 137  #define IS_MIRRORED(mc)		((mc) & (1 << 16))
 138  #define IS_ECC_ENABLED(mc)		((mc) & (1 << 5))
 139  #define IS_RETRY_ENABLED(mc)		((mc) & (1 << 31))
 140  #define IS_SCRBALGO_ENHANCED(mc)	((mc) & (1 << 8))
 141
 142#define MC_SETTINGS_A		0x58
 143  #define IS_SINGLE_MODE(mca)		((mca) & (1 << 14))
 144
 145#define TOLM			0x6C
 146
 147#define MIR0			0x80
 148#define MIR1			0x84
 149#define MIR2			0x88
 150
 151/*
 152 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
 153 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
 154 * seems that we cannot use this information directly for the same usage.
 155 * Each memory slot may have up to 2 AMB interfaces, one for income and another
 156 * for outcome interface to the next slot.
 157 * For now, the driver just stores the AMB present registers, but rely only at
 158 * the MTR info to detect memory.
 159 * Datasheet is also not clear about how to map each AMBPRESENT registers to
 160 * one of the 4 available channels.
 161 */
 162#define AMBPRESENT_0	0x64
 163#define AMBPRESENT_1	0x66
 164
 165static const u16 mtr_regs[MAX_SLOTS] = {
 166	0x80, 0x84, 0x88, 0x8c,
 167	0x82, 0x86, 0x8a, 0x8e
 168};
 169
 170/*
 171 * Defines to extract the vaious fields from the
 172 *	MTRx - Memory Technology Registers
 173 */
 174#define MTR_DIMMS_PRESENT(mtr)		((mtr) & (1 << 8))
 175#define MTR_DIMMS_ETHROTTLE(mtr)	((mtr) & (1 << 7))
 176#define MTR_DRAM_WIDTH(mtr)		(((mtr) & (1 << 6)) ? 8 : 4)
 177#define MTR_DRAM_BANKS(mtr)		(((mtr) & (1 << 5)) ? 8 : 4)
 178#define MTR_DIMM_RANKS(mtr)		(((mtr) & (1 << 4)) ? 1 : 0)
 179#define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
 180#define MTR_DRAM_BANKS_ADDR_BITS	2
 181#define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
 182#define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
 183#define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)
 184
 185/************************************************
 186 * i7300 Register definitions for error detection
 187 ************************************************/
 188
 189/*
 190 * Device 16.1: FBD Error Registers
 191 */
 192#define FERR_FAT_FBD	0x98
 193static const char *ferr_fat_fbd_name[] = {
 194	[22] = "Non-Redundant Fast Reset Timeout",
 195	[2]  = ">Tmid Thermal event with intelligent throttling disabled",
 196	[1]  = "Memory or FBD configuration CRC read error",
 197	[0]  = "Memory Write error on non-redundant retry or "
 198	       "FBD configuration Write error on retry",
 199};
 200#define GET_FBD_FAT_IDX(fbderr)	(((fbderr) >> 28) & 3)
 201#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
 202
 203#define FERR_NF_FBD	0xa0
 204static const char *ferr_nf_fbd_name[] = {
 205	[24] = "DIMM-Spare Copy Completed",
 206	[23] = "DIMM-Spare Copy Initiated",
 207	[22] = "Redundant Fast Reset Timeout",
 208	[21] = "Memory Write error on redundant retry",
 209	[18] = "SPD protocol Error",
 210	[17] = "FBD Northbound parity error on FBD Sync Status",
 211	[16] = "Correctable Patrol Data ECC",
 212	[15] = "Correctable Resilver- or Spare-Copy Data ECC",
 213	[14] = "Correctable Mirrored Demand Data ECC",
 214	[13] = "Correctable Non-Mirrored Demand Data ECC",
 215	[11] = "Memory or FBD configuration CRC read error",
 216	[10] = "FBD Configuration Write error on first attempt",
 217	[9]  = "Memory Write error on first attempt",
 218	[8]  = "Non-Aliased Uncorrectable Patrol Data ECC",
 219	[7]  = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
 220	[6]  = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
 221	[5]  = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
 222	[4]  = "Aliased Uncorrectable Patrol Data ECC",
 223	[3]  = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
 224	[2]  = "Aliased Uncorrectable Mirrored Demand Data ECC",
 225	[1]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
 226	[0]  = "Uncorrectable Data ECC on Replay",
 227};
 228#define GET_FBD_NF_IDX(fbderr)	(((fbderr) >> 28) & 3)
 229#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
 230			      (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
 231			      (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
 232			      (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
 233			      (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
 234			      (1 << 1)  | (1 << 0))
 235
 236#define EMASK_FBD	0xa8
 237#define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
 238			    (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
 239			    (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
 240			    (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
 241			    (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
 242			    (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
 243			    (1 << 1)  | (1 << 0))
 244
 245/*
 246 * Device 16.2: Global Error Registers
 247 */
 248
 249#define FERR_GLOBAL_HI	0x48
 250static const char *ferr_global_hi_name[] = {
 251	[3] = "FSB 3 Fatal Error",
 252	[2] = "FSB 2 Fatal Error",
 253	[1] = "FSB 1 Fatal Error",
 254	[0] = "FSB 0 Fatal Error",
 255};
 256#define ferr_global_hi_is_fatal(errno)	1
 257
 258#define FERR_GLOBAL_LO	0x40
 259static const char *ferr_global_lo_name[] = {
 260	[31] = "Internal MCH Fatal Error",
 261	[30] = "Intel QuickData Technology Device Fatal Error",
 262	[29] = "FSB1 Fatal Error",
 263	[28] = "FSB0 Fatal Error",
 264	[27] = "FBD Channel 3 Fatal Error",
 265	[26] = "FBD Channel 2 Fatal Error",
 266	[25] = "FBD Channel 1 Fatal Error",
 267	[24] = "FBD Channel 0 Fatal Error",
 268	[23] = "PCI Express Device 7Fatal Error",
 269	[22] = "PCI Express Device 6 Fatal Error",
 270	[21] = "PCI Express Device 5 Fatal Error",
 271	[20] = "PCI Express Device 4 Fatal Error",
 272	[19] = "PCI Express Device 3 Fatal Error",
 273	[18] = "PCI Express Device 2 Fatal Error",
 274	[17] = "PCI Express Device 1 Fatal Error",
 275	[16] = "ESI Fatal Error",
 276	[15] = "Internal MCH Non-Fatal Error",
 277	[14] = "Intel QuickData Technology Device Non Fatal Error",
 278	[13] = "FSB1 Non-Fatal Error",
 279	[12] = "FSB 0 Non-Fatal Error",
 280	[11] = "FBD Channel 3 Non-Fatal Error",
 281	[10] = "FBD Channel 2 Non-Fatal Error",
 282	[9]  = "FBD Channel 1 Non-Fatal Error",
 283	[8]  = "FBD Channel 0 Non-Fatal Error",
 284	[7]  = "PCI Express Device 7 Non-Fatal Error",
 285	[6]  = "PCI Express Device 6 Non-Fatal Error",
 286	[5]  = "PCI Express Device 5 Non-Fatal Error",
 287	[4]  = "PCI Express Device 4 Non-Fatal Error",
 288	[3]  = "PCI Express Device 3 Non-Fatal Error",
 289	[2]  = "PCI Express Device 2 Non-Fatal Error",
 290	[1]  = "PCI Express Device 1 Non-Fatal Error",
 291	[0]  = "ESI Non-Fatal Error",
 292};
 293#define ferr_global_lo_is_fatal(errno)	((errno < 16) ? 0 : 1)
 294
 295#define NRECMEMA	0xbe
 296  #define NRECMEMA_BANK(v)	(((v) >> 12) & 7)
 297  #define NRECMEMA_RANK(v)	(((v) >> 8) & 15)
 298
 299#define NRECMEMB	0xc0
 300  #define NRECMEMB_IS_WR(v)	((v) & (1 << 31))
 301  #define NRECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
 302  #define NRECMEMB_RAS(v)	((v) & 0xffff)
 303
 304#define REDMEMA		0xdc
 305
 306#define REDMEMB		0x7c
 307  #define IS_SECOND_CH(v)	((v) * (1 << 17))
 308
 309#define RECMEMA		0xe0
 310  #define RECMEMA_BANK(v)	(((v) >> 12) & 7)
 311  #define RECMEMA_RANK(v)	(((v) >> 8) & 15)
 312
 313#define RECMEMB		0xe4
 314  #define RECMEMB_IS_WR(v)	((v) & (1 << 31))
 315  #define RECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
 316  #define RECMEMB_RAS(v)	((v) & 0xffff)
 317
 318/********************************************
 319 * i7300 Functions related to error detection
 320 ********************************************/
 321
 322/**
 323 * get_err_from_table() - Gets the error message from a table
 324 * @table:	table name (array of char *)
 325 * @size:	number of elements at the table
 326 * @pos:	position of the element to be returned
 327 *
 328 * This is a small routine that gets the pos-th element of a table. If the
 329 * element doesn't exist (or it is empty), it returns "reserved".
 330 * Instead of calling it directly, the better is to call via the macro
 331 * GET_ERR_FROM_TABLE(), that automatically checks the table size via
 332 * ARRAY_SIZE() macro
 333 */
 334static const char *get_err_from_table(const char *table[], int size, int pos)
 335{
 336	if (unlikely(pos >= size))
 337		return "Reserved";
 338
 339	if (unlikely(!table[pos]))
 340		return "Reserved";
 341
 342	return table[pos];
 343}
 344
 345#define GET_ERR_FROM_TABLE(table, pos)				\
 346	get_err_from_table(table, ARRAY_SIZE(table), pos)
 347
 348/**
 349 * i7300_process_error_global() - Retrieve the hardware error information from
 350 *				  the hardware global error registers and
 351 *				  sends it to dmesg
 352 * @mci: struct mem_ctl_info pointer
 353 */
 354static void i7300_process_error_global(struct mem_ctl_info *mci)
 355{
 356	struct i7300_pvt *pvt;
 357	u32 errnum, error_reg;
 358	unsigned long errors;
 359	const char *specific;
 360	bool is_fatal;
 361
 362	pvt = mci->pvt_info;
 363
 364	/* read in the 1st FATAL error register */
 365	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 366			      FERR_GLOBAL_HI, &error_reg);
 367	if (unlikely(error_reg)) {
 368		errors = error_reg;
 369		errnum = find_first_bit(&errors,
 370					ARRAY_SIZE(ferr_global_hi_name));
 371		specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
 372		is_fatal = ferr_global_hi_is_fatal(errnum);
 373
 374		/* Clear the error bit */
 375		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 376				       FERR_GLOBAL_HI, error_reg);
 377
 378		goto error_global;
 379	}
 380
 381	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 382			      FERR_GLOBAL_LO, &error_reg);
 383	if (unlikely(error_reg)) {
 384		errors = error_reg;
 385		errnum = find_first_bit(&errors,
 386					ARRAY_SIZE(ferr_global_lo_name));
 387		specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
 388		is_fatal = ferr_global_lo_is_fatal(errnum);
 389
 390		/* Clear the error bit */
 391		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 392				       FERR_GLOBAL_LO, error_reg);
 393
 394		goto error_global;
 395	}
 396	return;
 397
 398error_global:
 399	i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
 400			is_fatal ? "Fatal" : "NOT fatal", specific);
 401}
 402
 403/**
 404 * i7300_process_fbd_error() - Retrieve the hardware error information from
 405 *			       the FBD error registers and sends it via
 406 *			       EDAC error API calls
 407 * @mci: struct mem_ctl_info pointer
 408 */
 409static void i7300_process_fbd_error(struct mem_ctl_info *mci)
 410{
 411	struct i7300_pvt *pvt;
 412	u32 errnum, value, error_reg;
 413	u16 val16;
 414	unsigned branch, channel, bank, rank, cas, ras;
 415	u32 syndrome;
 416
 417	unsigned long errors;
 418	const char *specific;
 419	bool is_wr;
 420
 421	pvt = mci->pvt_info;
 422
 423	/* read in the 1st FATAL error register */
 424	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 425			      FERR_FAT_FBD, &error_reg);
 426	if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
 427		errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
 428		errnum = find_first_bit(&errors,
 429					ARRAY_SIZE(ferr_fat_fbd_name));
 430		specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
 431		branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
 432
 433		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
 434				     NRECMEMA, &val16);
 435		bank = NRECMEMA_BANK(val16);
 436		rank = NRECMEMA_RANK(val16);
 437
 438		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 439				NRECMEMB, &value);
 440		is_wr = NRECMEMB_IS_WR(value);
 441		cas = NRECMEMB_CAS(value);
 442		ras = NRECMEMB_RAS(value);
 443
 444		/* Clean the error register */
 445		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 446				FERR_FAT_FBD, error_reg);
 447
 448		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
 449			 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
 450			 bank, ras, cas, errors, specific);
 451
 452		edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
 453				     branch, -1, rank,
 454				     is_wr ? "Write error" : "Read error",
 455				     pvt->tmp_prt_buffer);
 456
 457	}
 458
 459	/* read in the 1st NON-FATAL error register */
 460	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 461			      FERR_NF_FBD, &error_reg);
 462	if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
 463		errors = error_reg & FERR_NF_FBD_ERR_MASK;
 464		errnum = find_first_bit(&errors,
 465					ARRAY_SIZE(ferr_nf_fbd_name));
 466		specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
 467		branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
 468
 469		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 470			REDMEMA, &syndrome);
 471
 472		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
 473				     RECMEMA, &val16);
 474		bank = RECMEMA_BANK(val16);
 475		rank = RECMEMA_RANK(val16);
 476
 477		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 478				RECMEMB, &value);
 479		is_wr = RECMEMB_IS_WR(value);
 480		cas = RECMEMB_CAS(value);
 481		ras = RECMEMB_RAS(value);
 482
 483		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 484				     REDMEMB, &value);
 485		channel = (branch << 1);
 486		if (IS_SECOND_CH(value))
 487			channel++;
 488
 489		/* Clear the error bit */
 490		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 491				FERR_NF_FBD, error_reg);
 492
 493		/* Form out message */
 494		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
 495			 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
 496			 bank, ras, cas, errors, specific);
 497
 498		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
 499				     syndrome,
 500				     branch >> 1, channel % 2, rank,
 501				     is_wr ? "Write error" : "Read error",
 502				     pvt->tmp_prt_buffer);
 503	}
 504	return;
 505}
 506
 507/**
 508 * i7300_check_error() - Calls the error checking subroutines
 509 * @mci: struct mem_ctl_info pointer
 510 */
 511static void i7300_check_error(struct mem_ctl_info *mci)
 512{
 513	i7300_process_error_global(mci);
 514	i7300_process_fbd_error(mci);
 515};
 516
 517/**
 518 * i7300_clear_error() - Clears the error registers
 519 * @mci: struct mem_ctl_info pointer
 520 */
 521static void i7300_clear_error(struct mem_ctl_info *mci)
 522{
 523	struct i7300_pvt *pvt = mci->pvt_info;
 524	u32 value;
 525	/*
 526	 * All error values are RWC - we need to read and write 1 to the
 527	 * bit that we want to cleanup
 528	 */
 529
 530	/* Clear global error registers */
 531	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 532			      FERR_GLOBAL_HI, &value);
 533	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 534			      FERR_GLOBAL_HI, value);
 535
 536	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 537			      FERR_GLOBAL_LO, &value);
 538	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 539			      FERR_GLOBAL_LO, value);
 540
 541	/* Clear FBD error registers */
 542	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 543			      FERR_FAT_FBD, &value);
 544	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 545			      FERR_FAT_FBD, value);
 546
 547	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 548			      FERR_NF_FBD, &value);
 549	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 550			      FERR_NF_FBD, value);
 551}
 552
 553/**
 554 * i7300_enable_error_reporting() - Enable the memory reporting logic at the
 555 *				    hardware
 556 * @mci: struct mem_ctl_info pointer
 557 */
 558static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
 559{
 560	struct i7300_pvt *pvt = mci->pvt_info;
 561	u32 fbd_error_mask;
 562
 563	/* Read the FBD Error Mask Register */
 564	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 565			      EMASK_FBD, &fbd_error_mask);
 566
 567	/* Enable with a '0' */
 568	fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
 569
 570	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 571			       EMASK_FBD, fbd_error_mask);
 572}
 573
 574/************************************************
 575 * i7300 Functions related to memory enumberation
 576 ************************************************/
 577
 578/**
 579 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
 580 * @pvt: pointer to the private data struct used by i7300 driver
 581 * @slot: DIMM slot (0 to 7)
 582 * @ch: Channel number within the branch (0 or 1)
 583 * @branch: Branch number (0 or 1)
 584 * @dinfo: Pointer to DIMM info where dimm size is stored
 585 * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
 586 */
 587static int decode_mtr(struct i7300_pvt *pvt,
 588		      int slot, int ch, int branch,
 589		      struct i7300_dimm_info *dinfo,
 590		      struct dimm_info *dimm)
 591{
 592	int mtr, ans, addrBits, channel;
 593
 594	channel = to_channel(ch, branch);
 595
 596	mtr = pvt->mtr[slot][branch];
 597	ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
 598
 599	edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
 600		 slot, channel, ans ? "" : "NOT ");
 601
 602	/* Determine if there is a DIMM present in this DIMM slot */
 603	if (!ans)
 604		return 0;
 605
 606	/* Start with the number of bits for a Bank
 607	* on the DRAM */
 608	addrBits = MTR_DRAM_BANKS_ADDR_BITS;
 609	/* Add thenumber of ROW bits */
 610	addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
 611	/* add the number of COLUMN bits */
 612	addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
 613	/* add the number of RANK bits */
 614	addrBits += MTR_DIMM_RANKS(mtr);
 615
 616	addrBits += 6;	/* add 64 bits per DIMM */
 617	addrBits -= 20;	/* divide by 2^^20 */
 618	addrBits -= 3;	/* 8 bits per bytes */
 619
 620	dinfo->megabytes = 1 << addrBits;
 621
 622	edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
 623
 624	edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
 625		 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
 626
 627	edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
 628	edac_dbg(2, "\t\tNUMRANK: %s\n",
 629		 MTR_DIMM_RANKS(mtr) ? "double" : "single");
 630	edac_dbg(2, "\t\tNUMROW: %s\n",
 631		 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
 632		 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
 633		 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
 634		 "65,536 - 16 rows");
 635	edac_dbg(2, "\t\tNUMCOL: %s\n",
 636		 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
 637		 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
 638		 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
 639		 "reserved");
 640	edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
 641
 642	/*
 643	 * The type of error detection actually depends of the
 644	 * mode of operation. When it is just one single memory chip, at
 645	 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
 646	 * In normal or mirrored mode, it uses Lockstep mode,
 647	 * with the possibility of using an extended algorithm for x8 memories
 648	 * See datasheet Sections 7.3.6 to 7.3.8
 649	 */
 650
 651	dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
 652	dimm->grain = 8;
 653	dimm->mtype = MEM_FB_DDR2;
 654	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
 655		dimm->edac_mode = EDAC_SECDED;
 656		edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
 657	} else {
 658		edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
 659		if (MTR_DRAM_WIDTH(mtr) == 8)
 660			dimm->edac_mode = EDAC_S8ECD8ED;
 661		else
 662			dimm->edac_mode = EDAC_S4ECD4ED;
 663	}
 664
 665	/* ask what device type on this row */
 666	if (MTR_DRAM_WIDTH(mtr) == 8) {
 667		edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
 668			 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
 669			 "enhanced" : "normal");
 670
 671		dimm->dtype = DEV_X8;
 672	} else
 673		dimm->dtype = DEV_X4;
 674
 675	return mtr;
 676}
 677
 678/**
 679 * print_dimm_size() - Prints dump of the memory organization
 680 * @pvt: pointer to the private data struct used by i7300 driver
 681 *
 682 * Useful for debug. If debug is disabled, this routine do nothing
 683 */
 684static void print_dimm_size(struct i7300_pvt *pvt)
 685{
 686#ifdef CONFIG_EDAC_DEBUG
 687	struct i7300_dimm_info *dinfo;
 688	char *p;
 689	int space, n;
 690	int channel, slot;
 691
 692	space = PAGE_SIZE;
 693	p = pvt->tmp_prt_buffer;
 694
 695	n = snprintf(p, space, "              ");
 696	p += n;
 697	space -= n;
 698	for (channel = 0; channel < MAX_CHANNELS; channel++) {
 699		n = snprintf(p, space, "channel %d | ", channel);
 700		p += n;
 701		space -= n;
 702	}
 703	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 704	p = pvt->tmp_prt_buffer;
 705	space = PAGE_SIZE;
 706	n = snprintf(p, space, "-------------------------------"
 707			       "------------------------------");
 708	p += n;
 709	space -= n;
 710	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 711	p = pvt->tmp_prt_buffer;
 712	space = PAGE_SIZE;
 713
 714	for (slot = 0; slot < MAX_SLOTS; slot++) {
 715		n = snprintf(p, space, "csrow/SLOT %d  ", slot);
 716		p += n;
 717		space -= n;
 718
 719		for (channel = 0; channel < MAX_CHANNELS; channel++) {
 720			dinfo = &pvt->dimm_info[slot][channel];
 721			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
 722			p += n;
 723			space -= n;
 724		}
 725
 726		edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 727		p = pvt->tmp_prt_buffer;
 728		space = PAGE_SIZE;
 729	}
 730
 731	n = snprintf(p, space, "-------------------------------"
 732			       "------------------------------");
 733	p += n;
 734	space -= n;
 735	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 736	p = pvt->tmp_prt_buffer;
 737	space = PAGE_SIZE;
 738#endif
 739}
 740
 741/**
 742 * i7300_init_csrows() - Initialize the 'csrows' table within
 743 *			 the mci control structure with the
 744 *			 addressing of memory.
 745 * @mci: struct mem_ctl_info pointer
 746 */
 747static int i7300_init_csrows(struct mem_ctl_info *mci)
 748{
 749	struct i7300_pvt *pvt;
 750	struct i7300_dimm_info *dinfo;
 751	int rc = -ENODEV;
 752	int mtr;
 753	int ch, branch, slot, channel, max_channel, max_branch;
 754	struct dimm_info *dimm;
 755
 756	pvt = mci->pvt_info;
 757
 758	edac_dbg(2, "Memory Technology Registers:\n");
 759
 760	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
 761		max_branch = 1;
 762		max_channel = 1;
 763	} else {
 764		max_branch = MAX_BRANCHES;
 765		max_channel = MAX_CH_PER_BRANCH;
 766	}
 767
 768	/* Get the AMB present registers for the four channels */
 769	for (branch = 0; branch < max_branch; branch++) {
 770		/* Read and dump branch 0's MTRs */
 771		channel = to_channel(0, branch);
 772		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
 773				     AMBPRESENT_0,
 774				&pvt->ambpresent[channel]);
 775		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
 776			 channel, pvt->ambpresent[channel]);
 777
 778		if (max_channel == 1)
 779			continue;
 780
 781		channel = to_channel(1, branch);
 782		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
 783				     AMBPRESENT_1,
 784				&pvt->ambpresent[channel]);
 785		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
 786			 channel, pvt->ambpresent[channel]);
 787	}
 788
 789	/* Get the set of MTR[0-7] regs by each branch */
 790	for (slot = 0; slot < MAX_SLOTS; slot++) {
 791		int where = mtr_regs[slot];
 792		for (branch = 0; branch < max_branch; branch++) {
 793			pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
 794					where,
 795					&pvt->mtr[slot][branch]);
 796			for (ch = 0; ch < max_channel; ch++) {
 797				int channel = to_channel(ch, branch);
 798
 799				dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
 800					       mci->n_layers, branch, ch, slot);
 801
 802				dinfo = &pvt->dimm_info[slot][channel];
 803
 804				mtr = decode_mtr(pvt, slot, ch, branch,
 805						 dinfo, dimm);
 806
 807				/* if no DIMMS on this row, continue */
 808				if (!MTR_DIMMS_PRESENT(mtr))
 809					continue;
 810
 811				rc = 0;
 812
 813			}
 814		}
 815	}
 816
 817	return rc;
 818}
 819
 820/**
 821 * decode_mir() - Decodes Memory Interleave Register (MIR) info
 822 * @int mir_no: number of the MIR register to decode
 823 * @mir: array with the MIR data cached on the driver
 824 */
 825static void decode_mir(int mir_no, u16 mir[MAX_MIR])
 826{
 827	if (mir[mir_no] & 3)
 828		edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
 829			 mir_no,
 830			 (mir[mir_no] >> 4) & 0xfff,
 831			 (mir[mir_no] & 1) ? "B0" : "",
 832			 (mir[mir_no] & 2) ? "B1" : "");
 833}
 834
 835/**
 836 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
 837 * @mci: struct mem_ctl_info pointer
 838 *
 839 * Data read is cached internally for its usage when needed
 840 */
 841static int i7300_get_mc_regs(struct mem_ctl_info *mci)
 842{
 843	struct i7300_pvt *pvt;
 844	u32 actual_tolm;
 845	int i, rc;
 846
 847	pvt = mci->pvt_info;
 848
 849	pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
 850			(u32 *) &pvt->ambase);
 851
 852	edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
 853
 854	/* Get the Branch Map regs */
 855	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
 856	pvt->tolm >>= 12;
 857	edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
 858		 pvt->tolm, pvt->tolm);
 859
 860	actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
 861	edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
 862		 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
 863
 864	/* Get memory controller settings */
 865	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
 866			     &pvt->mc_settings);
 867	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
 868			     &pvt->mc_settings_a);
 869
 870	if (IS_SINGLE_MODE(pvt->mc_settings_a))
 871		edac_dbg(0, "Memory controller operating on single mode\n");
 872	else
 873		edac_dbg(0, "Memory controller operating on %smirrored mode\n",
 874			 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
 875
 876	edac_dbg(0, "Error detection is %s\n",
 877		 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
 878	edac_dbg(0, "Retry is %s\n",
 879		 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
 880
 881	/* Get Memory Interleave Range registers */
 882	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
 883			     &pvt->mir[0]);
 884	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
 885			     &pvt->mir[1]);
 886	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
 887			     &pvt->mir[2]);
 888
 889	/* Decode the MIR regs */
 890	for (i = 0; i < MAX_MIR; i++)
 891		decode_mir(i, pvt->mir);
 892
 893	rc = i7300_init_csrows(mci);
 894	if (rc < 0)
 895		return rc;
 896
 897	/* Go and determine the size of each DIMM and place in an
 898	 * orderly matrix */
 899	print_dimm_size(pvt);
 900
 901	return 0;
 902}
 903
 904/*************************************************
 905 * i7300 Functions related to device probe/release
 906 *************************************************/
 907
 908/**
 909 * i7300_put_devices() - Release the PCI devices
 910 * @mci: struct mem_ctl_info pointer
 911 */
 912static void i7300_put_devices(struct mem_ctl_info *mci)
 913{
 914	struct i7300_pvt *pvt;
 915	int branch;
 916
 917	pvt = mci->pvt_info;
 918
 919	/* Decrement usage count for devices */
 920	for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
 921		pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
 922	pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
 923	pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
 924}
 925
 926/**
 927 * i7300_get_devices() - Find and perform 'get' operation on the MCH's
 928 *			 device/functions we want to reference for this driver
 929 * @mci: struct mem_ctl_info pointer
 930 *
 931 * Access and prepare the several devices for usage:
 932 * I7300 devices used by this driver:
 933 *    Device 16, functions 0,1 and 2:	PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
 934 *    Device 21 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
 935 *    Device 22 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
 936 */
 937static int i7300_get_devices(struct mem_ctl_info *mci)
 938{
 939	struct i7300_pvt *pvt;
 940	struct pci_dev *pdev;
 941
 942	pvt = mci->pvt_info;
 943
 944	/* Attempt to 'get' the MCH register we want */
 945	pdev = NULL;
 946	while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
 947				      PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
 948				      pdev))) {
 949		/* Store device 16 funcs 1 and 2 */
 950		switch (PCI_FUNC(pdev->devfn)) {
 951		case 1:
 952			if (!pvt->pci_dev_16_1_fsb_addr_map)
 953				pvt->pci_dev_16_1_fsb_addr_map =
 954							pci_dev_get(pdev);
 955			break;
 956		case 2:
 957			if (!pvt->pci_dev_16_2_fsb_err_regs)
 958				pvt->pci_dev_16_2_fsb_err_regs =
 959							pci_dev_get(pdev);
 960			break;
 961		}
 962	}
 963
 964	if (!pvt->pci_dev_16_1_fsb_addr_map ||
 965	    !pvt->pci_dev_16_2_fsb_err_regs) {
 966		/* At least one device was not found */
 967		i7300_printk(KERN_ERR,
 968			"'system address,Process Bus' device not found:"
 969			"vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
 970			PCI_VENDOR_ID_INTEL,
 971			PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
 972		goto error;
 973	}
 974
 975	edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
 976		 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
 977		 pvt->pci_dev_16_0_fsb_ctlr->vendor,
 978		 pvt->pci_dev_16_0_fsb_ctlr->device);
 979	edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
 980		 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
 981		 pvt->pci_dev_16_1_fsb_addr_map->vendor,
 982		 pvt->pci_dev_16_1_fsb_addr_map->device);
 983	edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
 984		 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
 985		 pvt->pci_dev_16_2_fsb_err_regs->vendor,
 986		 pvt->pci_dev_16_2_fsb_err_regs->device);
 987
 988	pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
 989					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
 990					    NULL);
 991	if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
 992		i7300_printk(KERN_ERR,
 993			"MC: 'BRANCH 0' device not found:"
 994			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
 995			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
 996		goto error;
 997	}
 998
 999	pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
1000					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
1001					    NULL);
1002	if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
1003		i7300_printk(KERN_ERR,
1004			"MC: 'BRANCH 1' device not found:"
1005			"vendor 0x%x device 0x%x Func 0 "
1006			"(broken BIOS?)\n",
1007			PCI_VENDOR_ID_INTEL,
1008			PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
1009		goto error;
1010	}
1011
1012	return 0;
1013
1014error:
1015	i7300_put_devices(mci);
1016	return -ENODEV;
1017}
1018
1019/**
1020 * i7300_init_one() - Probe for one instance of the device
1021 * @pdev: struct pci_dev pointer
1022 * @id: struct pci_device_id pointer - currently unused
1023 */
1024static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1025{
1026	struct mem_ctl_info *mci;
1027	struct edac_mc_layer layers[3];
1028	struct i7300_pvt *pvt;
1029	int rc;
1030
1031	/* wake up device */
1032	rc = pci_enable_device(pdev);
1033	if (rc == -EIO)
1034		return rc;
1035
1036	edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1037		 pdev->bus->number,
1038		 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1039
1040	/* We only are looking for func 0 of the set */
1041	if (PCI_FUNC(pdev->devfn) != 0)
1042		return -ENODEV;
1043
1044	/* allocate a new MC control structure */
1045	layers[0].type = EDAC_MC_LAYER_BRANCH;
1046	layers[0].size = MAX_BRANCHES;
1047	layers[0].is_virt_csrow = false;
1048	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1049	layers[1].size = MAX_CH_PER_BRANCH;
1050	layers[1].is_virt_csrow = true;
1051	layers[2].type = EDAC_MC_LAYER_SLOT;
1052	layers[2].size = MAX_SLOTS;
1053	layers[2].is_virt_csrow = true;
1054	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1055	if (mci == NULL)
1056		return -ENOMEM;
1057
1058	edac_dbg(0, "MC: mci = %p\n", mci);
1059
1060	mci->pdev = &pdev->dev;	/* record ptr  to the generic device */
1061
1062	pvt = mci->pvt_info;
1063	pvt->pci_dev_16_0_fsb_ctlr = pdev;	/* Record this device in our private */
1064
1065	pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1066	if (!pvt->tmp_prt_buffer) {
1067		edac_mc_free(mci);
1068		return -ENOMEM;
1069	}
1070
1071	/* 'get' the pci devices we want to reserve for our use */
1072	if (i7300_get_devices(mci))
1073		goto fail0;
1074
1075	mci->mc_idx = 0;
1076	mci->mtype_cap = MEM_FLAG_FB_DDR2;
1077	mci->edac_ctl_cap = EDAC_FLAG_NONE;
1078	mci->edac_cap = EDAC_FLAG_NONE;
1079	mci->mod_name = "i7300_edac.c";
1080	mci->mod_ver = I7300_REVISION;
1081	mci->ctl_name = i7300_devs[0].ctl_name;
1082	mci->dev_name = pci_name(pdev);
1083	mci->ctl_page_to_phys = NULL;
1084
1085	/* Set the function pointer to an actual operation function */
1086	mci->edac_check = i7300_check_error;
1087
1088	/* initialize the MC control structure 'csrows' table
1089	 * with the mapping and control information */
1090	if (i7300_get_mc_regs(mci)) {
1091		edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1092		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
1093	} else {
1094		edac_dbg(1, "MC: Enable error reporting now\n");
1095		i7300_enable_error_reporting(mci);
1096	}
1097
1098	/* add this new MC control structure to EDAC's list of MCs */
1099	if (edac_mc_add_mc(mci)) {
1100		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1101		/* FIXME: perhaps some code should go here that disables error
1102		 * reporting if we just enabled it
1103		 */
1104		goto fail1;
1105	}
1106
1107	i7300_clear_error(mci);
1108
1109	/* allocating generic PCI control info */
1110	i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1111	if (!i7300_pci) {
1112		printk(KERN_WARNING
1113			"%s(): Unable to create PCI control\n",
1114			__func__);
1115		printk(KERN_WARNING
1116			"%s(): PCI error report via EDAC not setup\n",
1117			__func__);
1118	}
1119
1120	return 0;
1121
1122	/* Error exit unwinding stack */
1123fail1:
1124
1125	i7300_put_devices(mci);
1126
1127fail0:
1128	kfree(pvt->tmp_prt_buffer);
1129	edac_mc_free(mci);
1130	return -ENODEV;
1131}
1132
1133/**
1134 * i7300_remove_one() - Remove the driver
1135 * @pdev: struct pci_dev pointer
1136 */
1137static void i7300_remove_one(struct pci_dev *pdev)
1138{
1139	struct mem_ctl_info *mci;
1140	char *tmp;
1141
1142	edac_dbg(0, "\n");
1143
1144	if (i7300_pci)
1145		edac_pci_release_generic_ctl(i7300_pci);
1146
1147	mci = edac_mc_del_mc(&pdev->dev);
1148	if (!mci)
1149		return;
1150
1151	tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1152
1153	/* retrieve references to resources, and free those resources */
1154	i7300_put_devices(mci);
1155
1156	kfree(tmp);
1157	edac_mc_free(mci);
1158}
1159
1160/*
1161 * pci_device_id: table for which devices we are looking for
1162 *
1163 * Has only 8086:360c PCI ID
1164 */
1165static const struct pci_device_id i7300_pci_tbl[] = {
1166	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1167	{0,}			/* 0 terminated list. */
1168};
1169
1170MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1171
1172/*
1173 * i7300_driver: pci_driver structure for this module
1174 */
1175static struct pci_driver i7300_driver = {
1176	.name = "i7300_edac",
1177	.probe = i7300_init_one,
1178	.remove = i7300_remove_one,
1179	.id_table = i7300_pci_tbl,
1180};
1181
1182/**
1183 * i7300_init() - Registers the driver
1184 */
1185static int __init i7300_init(void)
1186{
1187	int pci_rc;
1188
1189	edac_dbg(2, "\n");
1190
1191	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1192	opstate_init();
1193
1194	pci_rc = pci_register_driver(&i7300_driver);
1195
1196	return (pci_rc < 0) ? pci_rc : 0;
1197}
1198
1199/**
1200 * i7300_init() - Unregisters the driver
1201 */
1202static void __exit i7300_exit(void)
1203{
1204	edac_dbg(2, "\n");
1205	pci_unregister_driver(&i7300_driver);
1206}
1207
1208module_init(i7300_init);
1209module_exit(i7300_exit);
1210
1211MODULE_LICENSE("GPL");
1212MODULE_AUTHOR("Mauro Carvalho Chehab");
1213MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1214MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1215		   I7300_REVISION);
1216
1217module_param(edac_op_state, int, 0444);
1218MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel 7300 class Memory Controllers kernel module (Clarksboro)
   4 *
   5 * Copyright (c) 2010 by:
   6 *	 Mauro Carvalho Chehab
   7 *
   8 * Red Hat Inc. https://www.redhat.com
   9 *
  10 * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
  11 *	http://www.intel.com/Assets/PDF/datasheet/318082.pdf
  12 *
  13 * TODO: The chipset allow checking for PCI Express errors also. Currently,
  14 *	 the driver covers only memory error errors
  15 *
  16 * This driver uses "csrows" EDAC attribute to represent DIMM slot#
  17 */
  18
  19#include <linux/module.h>
  20#include <linux/init.h>
  21#include <linux/pci.h>
  22#include <linux/pci_ids.h>
  23#include <linux/slab.h>
  24#include <linux/edac.h>
  25#include <linux/mmzone.h>
  26
  27#include "edac_module.h"
  28
  29/*
  30 * Alter this version for the I7300 module when modifications are made
  31 */
  32#define I7300_REVISION    " Ver: 1.0.0"
  33
  34#define EDAC_MOD_STR      "i7300_edac"
  35
  36#define i7300_printk(level, fmt, arg...) \
  37	edac_printk(level, "i7300", fmt, ##arg)
  38
  39#define i7300_mc_printk(mci, level, fmt, arg...) \
  40	edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
  41
  42/***********************************************
  43 * i7300 Limit constants Structs and static vars
  44 ***********************************************/
  45
  46/*
  47 * Memory topology is organized as:
  48 *	Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
  49 *	Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
  50 * Each channel can have to 8 DIMM sets (called as SLOTS)
  51 * Slots should generally be filled in pairs
  52 *	Except on Single Channel mode of operation
  53 *		just slot 0/channel0 filled on this mode
  54 *	On normal operation mode, the two channels on a branch should be
  55 *		filled together for the same SLOT#
  56 * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
  57 *		channels on both branches should be filled
  58 */
  59
  60/* Limits for i7300 */
  61#define MAX_SLOTS		8
  62#define MAX_BRANCHES		2
  63#define MAX_CH_PER_BRANCH	2
  64#define MAX_CHANNELS		(MAX_CH_PER_BRANCH * MAX_BRANCHES)
  65#define MAX_MIR			3
  66
  67#define to_channel(ch, branch)	((((branch)) << 1) | (ch))
  68
  69#define to_csrow(slot, ch, branch)					\
  70		(to_channel(ch, branch) | ((slot) << 2))
  71
  72/* Device name and register DID (Device ID) */
  73struct i7300_dev_info {
  74	const char *ctl_name;	/* name for this device */
  75	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
  76};
  77
  78/* Table of devices attributes supported by this driver */
  79static const struct i7300_dev_info i7300_devs[] = {
  80	{
  81		.ctl_name = "I7300",
  82		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
  83	},
  84};
  85
  86struct i7300_dimm_info {
  87	int megabytes;		/* size, 0 means not present  */
  88};
  89
  90/* driver private data structure */
  91struct i7300_pvt {
  92	struct pci_dev *pci_dev_16_0_fsb_ctlr;		/* 16.0 */
  93	struct pci_dev *pci_dev_16_1_fsb_addr_map;	/* 16.1 */
  94	struct pci_dev *pci_dev_16_2_fsb_err_regs;	/* 16.2 */
  95	struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES];	/* 21.0  and 22.0 */
  96
  97	u16 tolm;				/* top of low memory */
  98	u64 ambase;				/* AMB BAR */
  99
 100	u32 mc_settings;			/* Report several settings */
 101	u32 mc_settings_a;
 102
 103	u16 mir[MAX_MIR];			/* Memory Interleave Reg*/
 104
 105	u16 mtr[MAX_SLOTS][MAX_BRANCHES];	/* Memory Technlogy Reg */
 106	u16 ambpresent[MAX_CHANNELS];		/* AMB present regs */
 107
 108	/* DIMM information matrix, allocating architecture maximums */
 109	struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
 110
 111	/* Temporary buffer for use when preparing error messages */
 112	char *tmp_prt_buffer;
 113};
 114
 115/* FIXME: Why do we need to have this static? */
 116static struct edac_pci_ctl_info *i7300_pci;
 117
 118/***************************************************
 119 * i7300 Register definitions for memory enumeration
 120 ***************************************************/
 121
 122/*
 123 * Device 16,
 124 * Function 0: System Address (not documented)
 125 * Function 1: Memory Branch Map, Control, Errors Register
 126 */
 127
 128	/* OFFSETS for Function 0 */
 129#define AMBASE			0x48 /* AMB Mem Mapped Reg Region Base */
 130#define MAXCH			0x56 /* Max Channel Number */
 131#define MAXDIMMPERCH		0x57 /* Max DIMM PER Channel Number */
 132
 133	/* OFFSETS for Function 1 */
 134#define MC_SETTINGS		0x40
 135  #define IS_MIRRORED(mc)		((mc) & (1 << 16))
 136  #define IS_ECC_ENABLED(mc)		((mc) & (1 << 5))
 137  #define IS_RETRY_ENABLED(mc)		((mc) & (1 << 31))
 138  #define IS_SCRBALGO_ENHANCED(mc)	((mc) & (1 << 8))
 139
 140#define MC_SETTINGS_A		0x58
 141  #define IS_SINGLE_MODE(mca)		((mca) & (1 << 14))
 142
 143#define TOLM			0x6C
 144
 145#define MIR0			0x80
 146#define MIR1			0x84
 147#define MIR2			0x88
 148
 149/*
 150 * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
 151 * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
 152 * seems that we cannot use this information directly for the same usage.
 153 * Each memory slot may have up to 2 AMB interfaces, one for income and another
 154 * for outcome interface to the next slot.
 155 * For now, the driver just stores the AMB present registers, but rely only at
 156 * the MTR info to detect memory.
 157 * Datasheet is also not clear about how to map each AMBPRESENT registers to
 158 * one of the 4 available channels.
 159 */
 160#define AMBPRESENT_0	0x64
 161#define AMBPRESENT_1	0x66
 162
 163static const u16 mtr_regs[MAX_SLOTS] = {
 164	0x80, 0x84, 0x88, 0x8c,
 165	0x82, 0x86, 0x8a, 0x8e
 166};
 167
 168/*
 169 * Defines to extract the vaious fields from the
 170 *	MTRx - Memory Technology Registers
 171 */
 172#define MTR_DIMMS_PRESENT(mtr)		((mtr) & (1 << 8))
 173#define MTR_DIMMS_ETHROTTLE(mtr)	((mtr) & (1 << 7))
 174#define MTR_DRAM_WIDTH(mtr)		(((mtr) & (1 << 6)) ? 8 : 4)
 175#define MTR_DRAM_BANKS(mtr)		(((mtr) & (1 << 5)) ? 8 : 4)
 176#define MTR_DIMM_RANKS(mtr)		(((mtr) & (1 << 4)) ? 1 : 0)
 177#define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
 178#define MTR_DRAM_BANKS_ADDR_BITS	2
 179#define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
 180#define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
 181#define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)
 182
 183/************************************************
 184 * i7300 Register definitions for error detection
 185 ************************************************/
 186
 187/*
 188 * Device 16.1: FBD Error Registers
 189 */
 190#define FERR_FAT_FBD	0x98
 191static const char *ferr_fat_fbd_name[] = {
 192	[22] = "Non-Redundant Fast Reset Timeout",
 193	[2]  = ">Tmid Thermal event with intelligent throttling disabled",
 194	[1]  = "Memory or FBD configuration CRC read error",
 195	[0]  = "Memory Write error on non-redundant retry or "
 196	       "FBD configuration Write error on retry",
 197};
 198#define GET_FBD_FAT_IDX(fbderr)	(((fbderr) >> 28) & 3)
 199#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
 200
 201#define FERR_NF_FBD	0xa0
 202static const char *ferr_nf_fbd_name[] = {
 203	[24] = "DIMM-Spare Copy Completed",
 204	[23] = "DIMM-Spare Copy Initiated",
 205	[22] = "Redundant Fast Reset Timeout",
 206	[21] = "Memory Write error on redundant retry",
 207	[18] = "SPD protocol Error",
 208	[17] = "FBD Northbound parity error on FBD Sync Status",
 209	[16] = "Correctable Patrol Data ECC",
 210	[15] = "Correctable Resilver- or Spare-Copy Data ECC",
 211	[14] = "Correctable Mirrored Demand Data ECC",
 212	[13] = "Correctable Non-Mirrored Demand Data ECC",
 213	[11] = "Memory or FBD configuration CRC read error",
 214	[10] = "FBD Configuration Write error on first attempt",
 215	[9]  = "Memory Write error on first attempt",
 216	[8]  = "Non-Aliased Uncorrectable Patrol Data ECC",
 217	[7]  = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
 218	[6]  = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
 219	[5]  = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
 220	[4]  = "Aliased Uncorrectable Patrol Data ECC",
 221	[3]  = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
 222	[2]  = "Aliased Uncorrectable Mirrored Demand Data ECC",
 223	[1]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
 224	[0]  = "Uncorrectable Data ECC on Replay",
 225};
 226#define GET_FBD_NF_IDX(fbderr)	(((fbderr) >> 28) & 3)
 227#define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
 228			      (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
 229			      (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
 230			      (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
 231			      (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
 232			      (1 << 1)  | (1 << 0))
 233
 234#define EMASK_FBD	0xa8
 235#define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
 236			    (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
 237			    (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
 238			    (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
 239			    (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
 240			    (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
 241			    (1 << 1)  | (1 << 0))
 242
 243/*
 244 * Device 16.2: Global Error Registers
 245 */
 246
 247#define FERR_GLOBAL_HI	0x48
 248static const char *ferr_global_hi_name[] = {
 249	[3] = "FSB 3 Fatal Error",
 250	[2] = "FSB 2 Fatal Error",
 251	[1] = "FSB 1 Fatal Error",
 252	[0] = "FSB 0 Fatal Error",
 253};
 254#define ferr_global_hi_is_fatal(errno)	1
 255
 256#define FERR_GLOBAL_LO	0x40
 257static const char *ferr_global_lo_name[] = {
 258	[31] = "Internal MCH Fatal Error",
 259	[30] = "Intel QuickData Technology Device Fatal Error",
 260	[29] = "FSB1 Fatal Error",
 261	[28] = "FSB0 Fatal Error",
 262	[27] = "FBD Channel 3 Fatal Error",
 263	[26] = "FBD Channel 2 Fatal Error",
 264	[25] = "FBD Channel 1 Fatal Error",
 265	[24] = "FBD Channel 0 Fatal Error",
 266	[23] = "PCI Express Device 7Fatal Error",
 267	[22] = "PCI Express Device 6 Fatal Error",
 268	[21] = "PCI Express Device 5 Fatal Error",
 269	[20] = "PCI Express Device 4 Fatal Error",
 270	[19] = "PCI Express Device 3 Fatal Error",
 271	[18] = "PCI Express Device 2 Fatal Error",
 272	[17] = "PCI Express Device 1 Fatal Error",
 273	[16] = "ESI Fatal Error",
 274	[15] = "Internal MCH Non-Fatal Error",
 275	[14] = "Intel QuickData Technology Device Non Fatal Error",
 276	[13] = "FSB1 Non-Fatal Error",
 277	[12] = "FSB 0 Non-Fatal Error",
 278	[11] = "FBD Channel 3 Non-Fatal Error",
 279	[10] = "FBD Channel 2 Non-Fatal Error",
 280	[9]  = "FBD Channel 1 Non-Fatal Error",
 281	[8]  = "FBD Channel 0 Non-Fatal Error",
 282	[7]  = "PCI Express Device 7 Non-Fatal Error",
 283	[6]  = "PCI Express Device 6 Non-Fatal Error",
 284	[5]  = "PCI Express Device 5 Non-Fatal Error",
 285	[4]  = "PCI Express Device 4 Non-Fatal Error",
 286	[3]  = "PCI Express Device 3 Non-Fatal Error",
 287	[2]  = "PCI Express Device 2 Non-Fatal Error",
 288	[1]  = "PCI Express Device 1 Non-Fatal Error",
 289	[0]  = "ESI Non-Fatal Error",
 290};
 291#define ferr_global_lo_is_fatal(errno)	((errno < 16) ? 0 : 1)
 292
 293#define NRECMEMA	0xbe
 294  #define NRECMEMA_BANK(v)	(((v) >> 12) & 7)
 295  #define NRECMEMA_RANK(v)	(((v) >> 8) & 15)
 296
 297#define NRECMEMB	0xc0
 298  #define NRECMEMB_IS_WR(v)	((v) & (1 << 31))
 299  #define NRECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
 300  #define NRECMEMB_RAS(v)	((v) & 0xffff)
 301
 302#define REDMEMA		0xdc
 303
 304#define REDMEMB		0x7c
 305
 306#define RECMEMA		0xe0
 307  #define RECMEMA_BANK(v)	(((v) >> 12) & 7)
 308  #define RECMEMA_RANK(v)	(((v) >> 8) & 15)
 309
 310#define RECMEMB		0xe4
 311  #define RECMEMB_IS_WR(v)	((v) & (1 << 31))
 312  #define RECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
 313  #define RECMEMB_RAS(v)	((v) & 0xffff)
 314
 315/********************************************
 316 * i7300 Functions related to error detection
 317 ********************************************/
 318
 319/**
 320 * get_err_from_table() - Gets the error message from a table
 321 * @table:	table name (array of char *)
 322 * @size:	number of elements at the table
 323 * @pos:	position of the element to be returned
 324 *
 325 * This is a small routine that gets the pos-th element of a table. If the
 326 * element doesn't exist (or it is empty), it returns "reserved".
 327 * Instead of calling it directly, the better is to call via the macro
 328 * GET_ERR_FROM_TABLE(), that automatically checks the table size via
 329 * ARRAY_SIZE() macro
 330 */
 331static const char *get_err_from_table(const char *table[], int size, int pos)
 332{
 333	if (unlikely(pos >= size))
 334		return "Reserved";
 335
 336	if (unlikely(!table[pos]))
 337		return "Reserved";
 338
 339	return table[pos];
 340}
 341
 342#define GET_ERR_FROM_TABLE(table, pos)				\
 343	get_err_from_table(table, ARRAY_SIZE(table), pos)
 344
 345/**
 346 * i7300_process_error_global() - Retrieve the hardware error information from
 347 *				  the hardware global error registers and
 348 *				  sends it to dmesg
 349 * @mci: struct mem_ctl_info pointer
 350 */
 351static void i7300_process_error_global(struct mem_ctl_info *mci)
 352{
 353	struct i7300_pvt *pvt;
 354	u32 errnum, error_reg;
 355	unsigned long errors;
 356	const char *specific;
 357	bool is_fatal;
 358
 359	pvt = mci->pvt_info;
 360
 361	/* read in the 1st FATAL error register */
 362	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 363			      FERR_GLOBAL_HI, &error_reg);
 364	if (unlikely(error_reg)) {
 365		errors = error_reg;
 366		errnum = find_first_bit(&errors,
 367					ARRAY_SIZE(ferr_global_hi_name));
 368		specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
 369		is_fatal = ferr_global_hi_is_fatal(errnum);
 370
 371		/* Clear the error bit */
 372		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 373				       FERR_GLOBAL_HI, error_reg);
 374
 375		goto error_global;
 376	}
 377
 378	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 379			      FERR_GLOBAL_LO, &error_reg);
 380	if (unlikely(error_reg)) {
 381		errors = error_reg;
 382		errnum = find_first_bit(&errors,
 383					ARRAY_SIZE(ferr_global_lo_name));
 384		specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
 385		is_fatal = ferr_global_lo_is_fatal(errnum);
 386
 387		/* Clear the error bit */
 388		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 389				       FERR_GLOBAL_LO, error_reg);
 390
 391		goto error_global;
 392	}
 393	return;
 394
 395error_global:
 396	i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
 397			is_fatal ? "Fatal" : "NOT fatal", specific);
 398}
 399
 400/**
 401 * i7300_process_fbd_error() - Retrieve the hardware error information from
 402 *			       the FBD error registers and sends it via
 403 *			       EDAC error API calls
 404 * @mci: struct mem_ctl_info pointer
 405 */
 406static void i7300_process_fbd_error(struct mem_ctl_info *mci)
 407{
 408	struct i7300_pvt *pvt;
 409	u32 errnum, value, error_reg;
 410	u16 val16;
 411	unsigned branch, channel, bank, rank, cas, ras;
 412	u32 syndrome;
 413
 414	unsigned long errors;
 415	const char *specific;
 416	bool is_wr;
 417
 418	pvt = mci->pvt_info;
 419
 420	/* read in the 1st FATAL error register */
 421	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 422			      FERR_FAT_FBD, &error_reg);
 423	if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
 424		errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
 425		errnum = find_first_bit(&errors,
 426					ARRAY_SIZE(ferr_fat_fbd_name));
 427		specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
 428		branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
 429
 430		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
 431				     NRECMEMA, &val16);
 432		bank = NRECMEMA_BANK(val16);
 433		rank = NRECMEMA_RANK(val16);
 434
 435		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 436				NRECMEMB, &value);
 437		is_wr = NRECMEMB_IS_WR(value);
 438		cas = NRECMEMB_CAS(value);
 439		ras = NRECMEMB_RAS(value);
 440
 441		/* Clean the error register */
 442		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 443				FERR_FAT_FBD, error_reg);
 444
 445		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
 446			 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
 447			 bank, ras, cas, errors, specific);
 448
 449		edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
 450				     branch, -1, rank,
 451				     is_wr ? "Write error" : "Read error",
 452				     pvt->tmp_prt_buffer);
 453
 454	}
 455
 456	/* read in the 1st NON-FATAL error register */
 457	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 458			      FERR_NF_FBD, &error_reg);
 459	if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
 460		errors = error_reg & FERR_NF_FBD_ERR_MASK;
 461		errnum = find_first_bit(&errors,
 462					ARRAY_SIZE(ferr_nf_fbd_name));
 463		specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
 464		branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
 465
 466		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 467			REDMEMA, &syndrome);
 468
 469		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
 470				     RECMEMA, &val16);
 471		bank = RECMEMA_BANK(val16);
 472		rank = RECMEMA_RANK(val16);
 473
 474		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 475				RECMEMB, &value);
 476		is_wr = RECMEMB_IS_WR(value);
 477		cas = RECMEMB_CAS(value);
 478		ras = RECMEMB_RAS(value);
 479
 480		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 481				     REDMEMB, &value);
 482		channel = (branch << 1);
 483
 484		/* Second channel ? */
 485		channel += !!(value & BIT(17));
 486
 487		/* Clear the error bit */
 488		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 489				FERR_NF_FBD, error_reg);
 490
 491		/* Form out message */
 492		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
 493			 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
 494			 bank, ras, cas, errors, specific);
 495
 496		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
 497				     syndrome,
 498				     branch >> 1, channel % 2, rank,
 499				     is_wr ? "Write error" : "Read error",
 500				     pvt->tmp_prt_buffer);
 501	}
 502	return;
 503}
 504
 505/**
 506 * i7300_check_error() - Calls the error checking subroutines
 507 * @mci: struct mem_ctl_info pointer
 508 */
 509static void i7300_check_error(struct mem_ctl_info *mci)
 510{
 511	i7300_process_error_global(mci);
 512	i7300_process_fbd_error(mci);
 513};
 514
 515/**
 516 * i7300_clear_error() - Clears the error registers
 517 * @mci: struct mem_ctl_info pointer
 518 */
 519static void i7300_clear_error(struct mem_ctl_info *mci)
 520{
 521	struct i7300_pvt *pvt = mci->pvt_info;
 522	u32 value;
 523	/*
 524	 * All error values are RWC - we need to read and write 1 to the
 525	 * bit that we want to cleanup
 526	 */
 527
 528	/* Clear global error registers */
 529	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 530			      FERR_GLOBAL_HI, &value);
 531	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 532			      FERR_GLOBAL_HI, value);
 533
 534	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 535			      FERR_GLOBAL_LO, &value);
 536	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
 537			      FERR_GLOBAL_LO, value);
 538
 539	/* Clear FBD error registers */
 540	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 541			      FERR_FAT_FBD, &value);
 542	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 543			      FERR_FAT_FBD, value);
 544
 545	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 546			      FERR_NF_FBD, &value);
 547	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 548			      FERR_NF_FBD, value);
 549}
 550
 551/**
 552 * i7300_enable_error_reporting() - Enable the memory reporting logic at the
 553 *				    hardware
 554 * @mci: struct mem_ctl_info pointer
 555 */
 556static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
 557{
 558	struct i7300_pvt *pvt = mci->pvt_info;
 559	u32 fbd_error_mask;
 560
 561	/* Read the FBD Error Mask Register */
 562	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 563			      EMASK_FBD, &fbd_error_mask);
 564
 565	/* Enable with a '0' */
 566	fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
 567
 568	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
 569			       EMASK_FBD, fbd_error_mask);
 570}
 571
 572/************************************************
 573 * i7300 Functions related to memory enumberation
 574 ************************************************/
 575
 576/**
 577 * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
 578 * @pvt: pointer to the private data struct used by i7300 driver
 579 * @slot: DIMM slot (0 to 7)
 580 * @ch: Channel number within the branch (0 or 1)
 581 * @branch: Branch number (0 or 1)
 582 * @dinfo: Pointer to DIMM info where dimm size is stored
 583 * @dimm: Pointer to the struct dimm_info that corresponds to that element
 584 */
 585static int decode_mtr(struct i7300_pvt *pvt,
 586		      int slot, int ch, int branch,
 587		      struct i7300_dimm_info *dinfo,
 588		      struct dimm_info *dimm)
 589{
 590	int mtr, ans, addrBits, channel;
 591
 592	channel = to_channel(ch, branch);
 593
 594	mtr = pvt->mtr[slot][branch];
 595	ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
 596
 597	edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
 598		 slot, channel, ans ? "" : "NOT ");
 599
 600	/* Determine if there is a DIMM present in this DIMM slot */
 601	if (!ans)
 602		return 0;
 603
 604	/* Start with the number of bits for a Bank
 605	* on the DRAM */
 606	addrBits = MTR_DRAM_BANKS_ADDR_BITS;
 607	/* Add thenumber of ROW bits */
 608	addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
 609	/* add the number of COLUMN bits */
 610	addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
 611	/* add the number of RANK bits */
 612	addrBits += MTR_DIMM_RANKS(mtr);
 613
 614	addrBits += 6;	/* add 64 bits per DIMM */
 615	addrBits -= 20;	/* divide by 2^^20 */
 616	addrBits -= 3;	/* 8 bits per bytes */
 617
 618	dinfo->megabytes = 1 << addrBits;
 619
 620	edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
 621
 622	edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
 623		 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
 624
 625	edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
 626	edac_dbg(2, "\t\tNUMRANK: %s\n",
 627		 MTR_DIMM_RANKS(mtr) ? "double" : "single");
 628	edac_dbg(2, "\t\tNUMROW: %s\n",
 629		 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
 630		 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
 631		 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
 632		 "65,536 - 16 rows");
 633	edac_dbg(2, "\t\tNUMCOL: %s\n",
 634		 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
 635		 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
 636		 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
 637		 "reserved");
 638	edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
 639
 640	/*
 641	 * The type of error detection actually depends of the
 642	 * mode of operation. When it is just one single memory chip, at
 643	 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
 644	 * In normal or mirrored mode, it uses Lockstep mode,
 645	 * with the possibility of using an extended algorithm for x8 memories
 646	 * See datasheet Sections 7.3.6 to 7.3.8
 647	 */
 648
 649	dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
 650	dimm->grain = 8;
 651	dimm->mtype = MEM_FB_DDR2;
 652	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
 653		dimm->edac_mode = EDAC_SECDED;
 654		edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
 655	} else {
 656		edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
 657		if (MTR_DRAM_WIDTH(mtr) == 8)
 658			dimm->edac_mode = EDAC_S8ECD8ED;
 659		else
 660			dimm->edac_mode = EDAC_S4ECD4ED;
 661	}
 662
 663	/* ask what device type on this row */
 664	if (MTR_DRAM_WIDTH(mtr) == 8) {
 665		edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
 666			 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
 667			 "enhanced" : "normal");
 668
 669		dimm->dtype = DEV_X8;
 670	} else
 671		dimm->dtype = DEV_X4;
 672
 673	return mtr;
 674}
 675
 676/**
 677 * print_dimm_size() - Prints dump of the memory organization
 678 * @pvt: pointer to the private data struct used by i7300 driver
 679 *
 680 * Useful for debug. If debug is disabled, this routine do nothing
 681 */
 682static void print_dimm_size(struct i7300_pvt *pvt)
 683{
 684#ifdef CONFIG_EDAC_DEBUG
 685	struct i7300_dimm_info *dinfo;
 686	char *p;
 687	int space, n;
 688	int channel, slot;
 689
 690	space = PAGE_SIZE;
 691	p = pvt->tmp_prt_buffer;
 692
 693	n = snprintf(p, space, "              ");
 694	p += n;
 695	space -= n;
 696	for (channel = 0; channel < MAX_CHANNELS; channel++) {
 697		n = snprintf(p, space, "channel %d | ", channel);
 698		p += n;
 699		space -= n;
 700	}
 701	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 702	p = pvt->tmp_prt_buffer;
 703	space = PAGE_SIZE;
 704	n = snprintf(p, space, "-------------------------------"
 705			       "------------------------------");
 706	p += n;
 707	space -= n;
 708	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 709	p = pvt->tmp_prt_buffer;
 710	space = PAGE_SIZE;
 711
 712	for (slot = 0; slot < MAX_SLOTS; slot++) {
 713		n = snprintf(p, space, "csrow/SLOT %d  ", slot);
 714		p += n;
 715		space -= n;
 716
 717		for (channel = 0; channel < MAX_CHANNELS; channel++) {
 718			dinfo = &pvt->dimm_info[slot][channel];
 719			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
 720			p += n;
 721			space -= n;
 722		}
 723
 724		edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 725		p = pvt->tmp_prt_buffer;
 726		space = PAGE_SIZE;
 727	}
 728
 729	n = snprintf(p, space, "-------------------------------"
 730			       "------------------------------");
 731	p += n;
 732	space -= n;
 733	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
 734	p = pvt->tmp_prt_buffer;
 735	space = PAGE_SIZE;
 736#endif
 737}
 738
 739/**
 740 * i7300_init_csrows() - Initialize the 'csrows' table within
 741 *			 the mci control structure with the
 742 *			 addressing of memory.
 743 * @mci: struct mem_ctl_info pointer
 744 */
 745static int i7300_init_csrows(struct mem_ctl_info *mci)
 746{
 747	struct i7300_pvt *pvt;
 748	struct i7300_dimm_info *dinfo;
 749	int rc = -ENODEV;
 750	int mtr;
 751	int ch, branch, slot, channel, max_channel, max_branch;
 752	struct dimm_info *dimm;
 753
 754	pvt = mci->pvt_info;
 755
 756	edac_dbg(2, "Memory Technology Registers:\n");
 757
 758	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
 759		max_branch = 1;
 760		max_channel = 1;
 761	} else {
 762		max_branch = MAX_BRANCHES;
 763		max_channel = MAX_CH_PER_BRANCH;
 764	}
 765
 766	/* Get the AMB present registers for the four channels */
 767	for (branch = 0; branch < max_branch; branch++) {
 768		/* Read and dump branch 0's MTRs */
 769		channel = to_channel(0, branch);
 770		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
 771				     AMBPRESENT_0,
 772				&pvt->ambpresent[channel]);
 773		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
 774			 channel, pvt->ambpresent[channel]);
 775
 776		if (max_channel == 1)
 777			continue;
 778
 779		channel = to_channel(1, branch);
 780		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
 781				     AMBPRESENT_1,
 782				&pvt->ambpresent[channel]);
 783		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
 784			 channel, pvt->ambpresent[channel]);
 785	}
 786
 787	/* Get the set of MTR[0-7] regs by each branch */
 788	for (slot = 0; slot < MAX_SLOTS; slot++) {
 789		int where = mtr_regs[slot];
 790		for (branch = 0; branch < max_branch; branch++) {
 791			pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
 792					where,
 793					&pvt->mtr[slot][branch]);
 794			for (ch = 0; ch < max_channel; ch++) {
 795				int channel = to_channel(ch, branch);
 796
 797				dimm = edac_get_dimm(mci, branch, ch, slot);
 798
 799				dinfo = &pvt->dimm_info[slot][channel];
 800
 801				mtr = decode_mtr(pvt, slot, ch, branch,
 802						 dinfo, dimm);
 803
 804				/* if no DIMMS on this row, continue */
 805				if (!MTR_DIMMS_PRESENT(mtr))
 806					continue;
 807
 808				rc = 0;
 809
 810			}
 811		}
 812	}
 813
 814	return rc;
 815}
 816
 817/**
 818 * decode_mir() - Decodes Memory Interleave Register (MIR) info
 819 * @mir_no: number of the MIR register to decode
 820 * @mir: array with the MIR data cached on the driver
 821 */
 822static void decode_mir(int mir_no, u16 mir[MAX_MIR])
 823{
 824	if (mir[mir_no] & 3)
 825		edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
 826			 mir_no,
 827			 (mir[mir_no] >> 4) & 0xfff,
 828			 (mir[mir_no] & 1) ? "B0" : "",
 829			 (mir[mir_no] & 2) ? "B1" : "");
 830}
 831
 832/**
 833 * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
 834 * @mci: struct mem_ctl_info pointer
 835 *
 836 * Data read is cached internally for its usage when needed
 837 */
 838static int i7300_get_mc_regs(struct mem_ctl_info *mci)
 839{
 840	struct i7300_pvt *pvt;
 841	u32 actual_tolm;
 842	int i, rc;
 843
 844	pvt = mci->pvt_info;
 845
 846	pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
 847			(u32 *) &pvt->ambase);
 848
 849	edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
 850
 851	/* Get the Branch Map regs */
 852	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
 853	pvt->tolm >>= 12;
 854	edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
 855		 pvt->tolm, pvt->tolm);
 856
 857	actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
 858	edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
 859		 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
 860
 861	/* Get memory controller settings */
 862	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
 863			     &pvt->mc_settings);
 864	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
 865			     &pvt->mc_settings_a);
 866
 867	if (IS_SINGLE_MODE(pvt->mc_settings_a))
 868		edac_dbg(0, "Memory controller operating on single mode\n");
 869	else
 870		edac_dbg(0, "Memory controller operating on %smirrored mode\n",
 871			 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
 872
 873	edac_dbg(0, "Error detection is %s\n",
 874		 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
 875	edac_dbg(0, "Retry is %s\n",
 876		 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
 877
 878	/* Get Memory Interleave Range registers */
 879	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
 880			     &pvt->mir[0]);
 881	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
 882			     &pvt->mir[1]);
 883	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
 884			     &pvt->mir[2]);
 885
 886	/* Decode the MIR regs */
 887	for (i = 0; i < MAX_MIR; i++)
 888		decode_mir(i, pvt->mir);
 889
 890	rc = i7300_init_csrows(mci);
 891	if (rc < 0)
 892		return rc;
 893
 894	/* Go and determine the size of each DIMM and place in an
 895	 * orderly matrix */
 896	print_dimm_size(pvt);
 897
 898	return 0;
 899}
 900
 901/*************************************************
 902 * i7300 Functions related to device probe/release
 903 *************************************************/
 904
 905/**
 906 * i7300_put_devices() - Release the PCI devices
 907 * @mci: struct mem_ctl_info pointer
 908 */
 909static void i7300_put_devices(struct mem_ctl_info *mci)
 910{
 911	struct i7300_pvt *pvt;
 912	int branch;
 913
 914	pvt = mci->pvt_info;
 915
 916	/* Decrement usage count for devices */
 917	for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
 918		pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
 919	pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
 920	pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
 921}
 922
 923/**
 924 * i7300_get_devices() - Find and perform 'get' operation on the MCH's
 925 *			 device/functions we want to reference for this driver
 926 * @mci: struct mem_ctl_info pointer
 927 *
 928 * Access and prepare the several devices for usage:
 929 * I7300 devices used by this driver:
 930 *    Device 16, functions 0,1 and 2:	PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
 931 *    Device 21 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
 932 *    Device 22 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
 933 */
 934static int i7300_get_devices(struct mem_ctl_info *mci)
 935{
 936	struct i7300_pvt *pvt;
 937	struct pci_dev *pdev;
 938
 939	pvt = mci->pvt_info;
 940
 941	/* Attempt to 'get' the MCH register we want */
 942	pdev = NULL;
 943	while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
 944				      PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
 945				      pdev))) {
 946		/* Store device 16 funcs 1 and 2 */
 947		switch (PCI_FUNC(pdev->devfn)) {
 948		case 1:
 949			if (!pvt->pci_dev_16_1_fsb_addr_map)
 950				pvt->pci_dev_16_1_fsb_addr_map =
 951							pci_dev_get(pdev);
 952			break;
 953		case 2:
 954			if (!pvt->pci_dev_16_2_fsb_err_regs)
 955				pvt->pci_dev_16_2_fsb_err_regs =
 956							pci_dev_get(pdev);
 957			break;
 958		}
 959	}
 960
 961	if (!pvt->pci_dev_16_1_fsb_addr_map ||
 962	    !pvt->pci_dev_16_2_fsb_err_regs) {
 963		/* At least one device was not found */
 964		i7300_printk(KERN_ERR,
 965			"'system address,Process Bus' device not found:"
 966			"vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
 967			PCI_VENDOR_ID_INTEL,
 968			PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
 969		goto error;
 970	}
 971
 972	edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
 973		 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
 974		 pvt->pci_dev_16_0_fsb_ctlr->vendor,
 975		 pvt->pci_dev_16_0_fsb_ctlr->device);
 976	edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
 977		 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
 978		 pvt->pci_dev_16_1_fsb_addr_map->vendor,
 979		 pvt->pci_dev_16_1_fsb_addr_map->device);
 980	edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
 981		 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
 982		 pvt->pci_dev_16_2_fsb_err_regs->vendor,
 983		 pvt->pci_dev_16_2_fsb_err_regs->device);
 984
 985	pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
 986					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
 987					    NULL);
 988	if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
 989		i7300_printk(KERN_ERR,
 990			"MC: 'BRANCH 0' device not found:"
 991			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
 992			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
 993		goto error;
 994	}
 995
 996	pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
 997					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
 998					    NULL);
 999	if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
1000		i7300_printk(KERN_ERR,
1001			"MC: 'BRANCH 1' device not found:"
1002			"vendor 0x%x device 0x%x Func 0 "
1003			"(broken BIOS?)\n",
1004			PCI_VENDOR_ID_INTEL,
1005			PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
1006		goto error;
1007	}
1008
1009	return 0;
1010
1011error:
1012	i7300_put_devices(mci);
1013	return -ENODEV;
1014}
1015
1016/**
1017 * i7300_init_one() - Probe for one instance of the device
1018 * @pdev: struct pci_dev pointer
1019 * @id: struct pci_device_id pointer - currently unused
1020 */
1021static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1022{
1023	struct mem_ctl_info *mci;
1024	struct edac_mc_layer layers[3];
1025	struct i7300_pvt *pvt;
1026	int rc;
1027
1028	/* wake up device */
1029	rc = pci_enable_device(pdev);
1030	if (rc == -EIO)
1031		return rc;
1032
1033	edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1034		 pdev->bus->number,
1035		 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1036
1037	/* We only are looking for func 0 of the set */
1038	if (PCI_FUNC(pdev->devfn) != 0)
1039		return -ENODEV;
1040
1041	/* allocate a new MC control structure */
1042	layers[0].type = EDAC_MC_LAYER_BRANCH;
1043	layers[0].size = MAX_BRANCHES;
1044	layers[0].is_virt_csrow = false;
1045	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1046	layers[1].size = MAX_CH_PER_BRANCH;
1047	layers[1].is_virt_csrow = true;
1048	layers[2].type = EDAC_MC_LAYER_SLOT;
1049	layers[2].size = MAX_SLOTS;
1050	layers[2].is_virt_csrow = true;
1051	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1052	if (mci == NULL)
1053		return -ENOMEM;
1054
1055	edac_dbg(0, "MC: mci = %p\n", mci);
1056
1057	mci->pdev = &pdev->dev;	/* record ptr  to the generic device */
1058
1059	pvt = mci->pvt_info;
1060	pvt->pci_dev_16_0_fsb_ctlr = pdev;	/* Record this device in our private */
1061
1062	pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1063	if (!pvt->tmp_prt_buffer) {
1064		edac_mc_free(mci);
1065		return -ENOMEM;
1066	}
1067
1068	/* 'get' the pci devices we want to reserve for our use */
1069	if (i7300_get_devices(mci))
1070		goto fail0;
1071
1072	mci->mc_idx = 0;
1073	mci->mtype_cap = MEM_FLAG_FB_DDR2;
1074	mci->edac_ctl_cap = EDAC_FLAG_NONE;
1075	mci->edac_cap = EDAC_FLAG_NONE;
1076	mci->mod_name = "i7300_edac.c";
1077	mci->ctl_name = i7300_devs[0].ctl_name;
1078	mci->dev_name = pci_name(pdev);
1079	mci->ctl_page_to_phys = NULL;
1080
1081	/* Set the function pointer to an actual operation function */
1082	mci->edac_check = i7300_check_error;
1083
1084	/* initialize the MC control structure 'csrows' table
1085	 * with the mapping and control information */
1086	if (i7300_get_mc_regs(mci)) {
1087		edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1088		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
1089	} else {
1090		edac_dbg(1, "MC: Enable error reporting now\n");
1091		i7300_enable_error_reporting(mci);
1092	}
1093
1094	/* add this new MC control structure to EDAC's list of MCs */
1095	if (edac_mc_add_mc(mci)) {
1096		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1097		/* FIXME: perhaps some code should go here that disables error
1098		 * reporting if we just enabled it
1099		 */
1100		goto fail1;
1101	}
1102
1103	i7300_clear_error(mci);
1104
1105	/* allocating generic PCI control info */
1106	i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1107	if (!i7300_pci) {
1108		printk(KERN_WARNING
1109			"%s(): Unable to create PCI control\n",
1110			__func__);
1111		printk(KERN_WARNING
1112			"%s(): PCI error report via EDAC not setup\n",
1113			__func__);
1114	}
1115
1116	return 0;
1117
1118	/* Error exit unwinding stack */
1119fail1:
1120
1121	i7300_put_devices(mci);
1122
1123fail0:
1124	kfree(pvt->tmp_prt_buffer);
1125	edac_mc_free(mci);
1126	return -ENODEV;
1127}
1128
1129/**
1130 * i7300_remove_one() - Remove the driver
1131 * @pdev: struct pci_dev pointer
1132 */
1133static void i7300_remove_one(struct pci_dev *pdev)
1134{
1135	struct mem_ctl_info *mci;
1136	char *tmp;
1137
1138	edac_dbg(0, "\n");
1139
1140	if (i7300_pci)
1141		edac_pci_release_generic_ctl(i7300_pci);
1142
1143	mci = edac_mc_del_mc(&pdev->dev);
1144	if (!mci)
1145		return;
1146
1147	tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1148
1149	/* retrieve references to resources, and free those resources */
1150	i7300_put_devices(mci);
1151
1152	kfree(tmp);
1153	edac_mc_free(mci);
1154}
1155
1156/*
1157 * pci_device_id: table for which devices we are looking for
1158 *
1159 * Has only 8086:360c PCI ID
1160 */
1161static const struct pci_device_id i7300_pci_tbl[] = {
1162	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1163	{0,}			/* 0 terminated list. */
1164};
1165
1166MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1167
1168/*
1169 * i7300_driver: pci_driver structure for this module
1170 */
1171static struct pci_driver i7300_driver = {
1172	.name = "i7300_edac",
1173	.probe = i7300_init_one,
1174	.remove = i7300_remove_one,
1175	.id_table = i7300_pci_tbl,
1176};
1177
1178/**
1179 * i7300_init() - Registers the driver
1180 */
1181static int __init i7300_init(void)
1182{
1183	int pci_rc;
1184
1185	edac_dbg(2, "\n");
1186
1187	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1188	opstate_init();
1189
1190	pci_rc = pci_register_driver(&i7300_driver);
1191
1192	return (pci_rc < 0) ? pci_rc : 0;
1193}
1194
1195/**
1196 * i7300_exit() - Unregisters the driver
1197 */
1198static void __exit i7300_exit(void)
1199{
1200	edac_dbg(2, "\n");
1201	pci_unregister_driver(&i7300_driver);
1202}
1203
1204module_init(i7300_init);
1205module_exit(i7300_exit);
1206
1207MODULE_LICENSE("GPL");
1208MODULE_AUTHOR("Mauro Carvalho Chehab");
1209MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
1210MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1211		   I7300_REVISION);
1212
1213module_param(edac_op_state, int, 0444);
1214MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");