Linux Audio

Check our new training course

Loading...
  1/*
  2 * Intel e7xxx Memory Controller kernel module
  3 * (C) 2003 Linux Networx (http://lnxi.com)
  4 * This file may be distributed under the terms of the
  5 * GNU General Public License.
  6 *
  7 * See "enum e7xxx_chips" below for supported chipsets
  8 *
  9 * Written by Thayne Harbaugh
 10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
 11 *	http://www.anime.net/~goemon/linux-ecc/
 12 *
 13 * Datasheet:
 14 *	http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
 15 *
 16 * Contributors:
 17 *	Eric Biederman (Linux Networx)
 18 *	Tom Zimmerman (Linux Networx)
 19 *	Jim Garlick (Lawrence Livermore National Labs)
 20 *	Dave Peterson (Lawrence Livermore National Labs)
 21 *	That One Guy (Some other place)
 22 *	Wang Zhenyu (intel.com)
 23 *
 24 * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
 25 *
 26 */
 27
 28#include <linux/module.h>
 29#include <linux/init.h>
 30#include <linux/pci.h>
 31#include <linux/pci_ids.h>
 32#include <linux/edac.h>
 33#include "edac_core.h"
 34
 35#define	E7XXX_REVISION " Ver: 2.0.2"
 36#define	EDAC_MOD_STR	"e7xxx_edac"
 37
 38#define e7xxx_printk(level, fmt, arg...) \
 39	edac_printk(level, "e7xxx", fmt, ##arg)
 40
 41#define e7xxx_mc_printk(mci, level, fmt, arg...) \
 42	edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
 43
 44#ifndef PCI_DEVICE_ID_INTEL_7205_0
 45#define PCI_DEVICE_ID_INTEL_7205_0	0x255d
 46#endif				/* PCI_DEVICE_ID_INTEL_7205_0 */
 47
 48#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
 49#define PCI_DEVICE_ID_INTEL_7205_1_ERR	0x2551
 50#endif				/* PCI_DEVICE_ID_INTEL_7205_1_ERR */
 51
 52#ifndef PCI_DEVICE_ID_INTEL_7500_0
 53#define PCI_DEVICE_ID_INTEL_7500_0	0x2540
 54#endif				/* PCI_DEVICE_ID_INTEL_7500_0 */
 55
 56#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
 57#define PCI_DEVICE_ID_INTEL_7500_1_ERR	0x2541
 58#endif				/* PCI_DEVICE_ID_INTEL_7500_1_ERR */
 59
 60#ifndef PCI_DEVICE_ID_INTEL_7501_0
 61#define PCI_DEVICE_ID_INTEL_7501_0	0x254c
 62#endif				/* PCI_DEVICE_ID_INTEL_7501_0 */
 63
 64#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
 65#define PCI_DEVICE_ID_INTEL_7501_1_ERR	0x2541
 66#endif				/* PCI_DEVICE_ID_INTEL_7501_1_ERR */
 67
 68#ifndef PCI_DEVICE_ID_INTEL_7505_0
 69#define PCI_DEVICE_ID_INTEL_7505_0	0x2550
 70#endif				/* PCI_DEVICE_ID_INTEL_7505_0 */
 71
 72#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
 73#define PCI_DEVICE_ID_INTEL_7505_1_ERR	0x2551
 74#endif				/* PCI_DEVICE_ID_INTEL_7505_1_ERR */
 75
 76#define E7XXX_NR_CSROWS		8	/* number of csrows */
 77#define E7XXX_NR_DIMMS		8	/* 2 channels, 4 dimms/channel */
 78
 79/* E7XXX register addresses - device 0 function 0 */
 80#define E7XXX_DRB		0x60	/* DRAM row boundary register (8b) */
 81#define E7XXX_DRA		0x70	/* DRAM row attribute register (8b) */
 82					/*
 83					 * 31   Device width row 7 0=x8 1=x4
 84					 * 27   Device width row 6
 85					 * 23   Device width row 5
 86					 * 19   Device width row 4
 87					 * 15   Device width row 3
 88					 * 11   Device width row 2
 89					 *  7   Device width row 1
 90					 *  3   Device width row 0
 91					 */
 92#define E7XXX_DRC		0x7C	/* DRAM controller mode reg (32b) */
 93					/*
 94					 * 22    Number channels 0=1,1=2
 95					 * 19:18 DRB Granularity 32/64MB
 96					 */
 97#define E7XXX_TOLM		0xC4	/* DRAM top of low memory reg (16b) */
 98#define E7XXX_REMAPBASE		0xC6	/* DRAM remap base address reg (16b) */
 99#define E7XXX_REMAPLIMIT	0xC8	/* DRAM remap limit address reg (16b) */
100
101/* E7XXX register addresses - device 0 function 1 */
102#define E7XXX_DRAM_FERR		0x80	/* DRAM first error register (8b) */
103#define E7XXX_DRAM_NERR		0x82	/* DRAM next error register (8b) */
104#define E7XXX_DRAM_CELOG_ADD	0xA0	/* DRAM first correctable memory */
105					/*     error address register (32b) */
106					/*
107					 * 31:28 Reserved
108					 * 27:6  CE address (4k block 33:12)
109					 *  5:0  Reserved
110					 */
111#define E7XXX_DRAM_UELOG_ADD	0xB0	/* DRAM first uncorrectable memory */
112					/*     error address register (32b) */
113					/*
114					 * 31:28 Reserved
115					 * 27:6  CE address (4k block 33:12)
116					 *  5:0  Reserved
117					 */
118#define E7XXX_DRAM_CELOG_SYNDROME 0xD0	/* DRAM first correctable memory */
119					/*     error syndrome register (16b) */
120
121enum e7xxx_chips {
122	E7500 = 0,
123	E7501,
124	E7505,
125	E7205,
126};
127
128struct e7xxx_pvt {
129	struct pci_dev *bridge_ck;
130	u32 tolm;
131	u32 remapbase;
132	u32 remaplimit;
133	const struct e7xxx_dev_info *dev_info;
134};
135
136struct e7xxx_dev_info {
137	u16 err_dev;
138	const char *ctl_name;
139};
140
141struct e7xxx_error_info {
142	u8 dram_ferr;
143	u8 dram_nerr;
144	u32 dram_celog_add;
145	u16 dram_celog_syndrome;
146	u32 dram_uelog_add;
147};
148
149static struct edac_pci_ctl_info *e7xxx_pci;
150
151static const struct e7xxx_dev_info e7xxx_devs[] = {
152	[E7500] = {
153		.err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
154		.ctl_name = "E7500"},
155	[E7501] = {
156		.err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
157		.ctl_name = "E7501"},
158	[E7505] = {
159		.err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
160		.ctl_name = "E7505"},
161	[E7205] = {
162		.err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
163		.ctl_name = "E7205"},
164};
165
166/* FIXME - is this valid for both SECDED and S4ECD4ED? */
167static inline int e7xxx_find_channel(u16 syndrome)
168{
169	debugf3("%s()\n", __func__);
170
171	if ((syndrome & 0xff00) == 0)
172		return 0;
173
174	if ((syndrome & 0x00ff) == 0)
175		return 1;
176
177	if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
178		return 0;
179
180	return 1;
181}
182
183static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
184				unsigned long page)
185{
186	u32 remap;
187	struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
188
189	debugf3("%s()\n", __func__);
190
191	if ((page < pvt->tolm) ||
192		((page >= 0x100000) && (page < pvt->remapbase)))
193		return page;
194
195	remap = (page - pvt->tolm) + pvt->remapbase;
196
197	if (remap < pvt->remaplimit)
198		return remap;
199
200	e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
201	return pvt->tolm - 1;
202}
203
204static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
205{
206	u32 error_1b, page;
207	u16 syndrome;
208	int row;
209	int channel;
210
211	debugf3("%s()\n", __func__);
212	/* read the error address */
213	error_1b = info->dram_celog_add;
214	/* FIXME - should use PAGE_SHIFT */
215	page = error_1b >> 6;	/* convert the address to 4k page */
216	/* read the syndrome */
217	syndrome = info->dram_celog_syndrome;
218	/* FIXME - check for -1 */
219	row = edac_mc_find_csrow_by_page(mci, page);
220	/* convert syndrome to channel */
221	channel = e7xxx_find_channel(syndrome);
222	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
223			     row, channel, -1, "e7xxx CE", "", NULL);
224}
225
226static void process_ce_no_info(struct mem_ctl_info *mci)
227{
228	debugf3("%s()\n", __func__);
229	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
230			     "e7xxx CE log register overflow", "", NULL);
231}
232
233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
234{
235	u32 error_2b, block_page;
236	int row;
237
238	debugf3("%s()\n", __func__);
239	/* read the error address */
240	error_2b = info->dram_uelog_add;
241	/* FIXME - should use PAGE_SHIFT */
242	block_page = error_2b >> 6;	/* convert to 4k address */
243	row = edac_mc_find_csrow_by_page(mci, block_page);
244
245	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
246			     row, -1, -1, "e7xxx UE", "", NULL);
247}
248
249static void process_ue_no_info(struct mem_ctl_info *mci)
250{
251	debugf3("%s()\n", __func__);
252
253	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
254			     "e7xxx UE log register overflow", "", NULL);
255}
256
257static void e7xxx_get_error_info(struct mem_ctl_info *mci,
258				 struct e7xxx_error_info *info)
259{
260	struct e7xxx_pvt *pvt;
261
262	pvt = (struct e7xxx_pvt *)mci->pvt_info;
263	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
264	pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
265
266	if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
267		pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
268				&info->dram_celog_add);
269		pci_read_config_word(pvt->bridge_ck,
270				E7XXX_DRAM_CELOG_SYNDROME,
271				&info->dram_celog_syndrome);
272	}
273
274	if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
275		pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
276				&info->dram_uelog_add);
277
278	if (info->dram_ferr & 3)
279		pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
280
281	if (info->dram_nerr & 3)
282		pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
283}
284
285static int e7xxx_process_error_info(struct mem_ctl_info *mci,
286				struct e7xxx_error_info *info,
287				int handle_errors)
288{
289	int error_found;
290
291	error_found = 0;
292
293	/* decode and report errors */
294	if (info->dram_ferr & 1) {	/* check first error correctable */
295		error_found = 1;
296
297		if (handle_errors)
298			process_ce(mci, info);
299	}
300
301	if (info->dram_ferr & 2) {	/* check first error uncorrectable */
302		error_found = 1;
303
304		if (handle_errors)
305			process_ue(mci, info);
306	}
307
308	if (info->dram_nerr & 1) {	/* check next error correctable */
309		error_found = 1;
310
311		if (handle_errors) {
312			if (info->dram_ferr & 1)
313				process_ce_no_info(mci);
314			else
315				process_ce(mci, info);
316		}
317	}
318
319	if (info->dram_nerr & 2) {	/* check next error uncorrectable */
320		error_found = 1;
321
322		if (handle_errors) {
323			if (info->dram_ferr & 2)
324				process_ue_no_info(mci);
325			else
326				process_ue(mci, info);
327		}
328	}
329
330	return error_found;
331}
332
333static void e7xxx_check(struct mem_ctl_info *mci)
334{
335	struct e7xxx_error_info info;
336
337	debugf3("%s()\n", __func__);
338	e7xxx_get_error_info(mci, &info);
339	e7xxx_process_error_info(mci, &info, 1);
340}
341
342/* Return 1 if dual channel mode is active.  Else return 0. */
343static inline int dual_channel_active(u32 drc, int dev_idx)
344{
345	return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
346}
347
348/* Return DRB granularity (0=32mb, 1=64mb). */
349static inline int drb_granularity(u32 drc, int dev_idx)
350{
351	/* only e7501 can be single channel */
352	return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
353}
354
355static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
356			int dev_idx, u32 drc)
357{
358	unsigned long last_cumul_size;
359	int index, j;
360	u8 value;
361	u32 dra, cumul_size, nr_pages;
362	int drc_chan, drc_drbg, drc_ddim, mem_dev;
363	struct csrow_info *csrow;
364	struct dimm_info *dimm;
365
366	pci_read_config_dword(pdev, E7XXX_DRA, &dra);
367	drc_chan = dual_channel_active(drc, dev_idx);
368	drc_drbg = drb_granularity(drc, dev_idx);
369	drc_ddim = (drc >> 20) & 0x3;
370	last_cumul_size = 0;
371
372	/* The dram row boundary (DRB) reg values are boundary address
373	 * for each DRAM row with a granularity of 32 or 64MB (single/dual
374	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
375	 * contain the total memory contained in all eight rows.
376	 */
377	for (index = 0; index < mci->nr_csrows; index++) {
378		/* mem_dev 0=x8, 1=x4 */
379		mem_dev = (dra >> (index * 4 + 3)) & 0x1;
380		csrow = &mci->csrows[index];
381
382		pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
383		/* convert a 64 or 32 MiB DRB to a page size. */
384		cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
385		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
386			cumul_size);
387		if (cumul_size == last_cumul_size)
388			continue;	/* not populated */
389
390		csrow->first_page = last_cumul_size;
391		csrow->last_page = cumul_size - 1;
392		nr_pages = cumul_size - last_cumul_size;
393		last_cumul_size = cumul_size;
394
395		for (j = 0; j < drc_chan + 1; j++) {
396			dimm = csrow->channels[j].dimm;
397
398			dimm->nr_pages = nr_pages / (drc_chan + 1);
399			dimm->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
400			dimm->mtype = MEM_RDDR;	/* only one type supported */
401			dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
402
403			/*
404			* if single channel or x8 devices then SECDED
405			* if dual channel and x4 then S4ECD4ED
406			*/
407			if (drc_ddim) {
408				if (drc_chan && mem_dev) {
409					dimm->edac_mode = EDAC_S4ECD4ED;
410					mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
411				} else {
412					dimm->edac_mode = EDAC_SECDED;
413					mci->edac_cap |= EDAC_FLAG_SECDED;
414				}
415			} else
416				dimm->edac_mode = EDAC_NONE;
417		}
418	}
419}
420
421static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
422{
423	u16 pci_data;
424	struct mem_ctl_info *mci = NULL;
425	struct edac_mc_layer layers[2];
426	struct e7xxx_pvt *pvt = NULL;
427	u32 drc;
428	int drc_chan;
429	struct e7xxx_error_info discard;
430
431	debugf0("%s(): mci\n", __func__);
432
433	pci_read_config_dword(pdev, E7XXX_DRC, &drc);
434
435	drc_chan = dual_channel_active(drc, dev_idx);
436	/*
437	 * According with the datasheet, this device has a maximum of
438	 * 4 DIMMS per channel, either single-rank or dual-rank. So, the
439	 * total amount of dimms is 8 (E7XXX_NR_DIMMS).
440	 * That means that the DIMM is mapped as CSROWs, and the channel
441	 * will map the rank. So, an error to either channel should be
442	 * attributed to the same dimm.
443	 */
444	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
445	layers[0].size = E7XXX_NR_CSROWS;
446	layers[0].is_virt_csrow = true;
447	layers[1].type = EDAC_MC_LAYER_CHANNEL;
448	layers[1].size = drc_chan + 1;
449	layers[1].is_virt_csrow = false;
450	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
451	if (mci == NULL)
452		return -ENOMEM;
453
454	debugf3("%s(): init mci\n", __func__);
455	mci->mtype_cap = MEM_FLAG_RDDR;
456	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
457		EDAC_FLAG_S4ECD4ED;
458	/* FIXME - what if different memory types are in different csrows? */
459	mci->mod_name = EDAC_MOD_STR;
460	mci->mod_ver = E7XXX_REVISION;
461	mci->dev = &pdev->dev;
462	debugf3("%s(): init pvt\n", __func__);
463	pvt = (struct e7xxx_pvt *)mci->pvt_info;
464	pvt->dev_info = &e7xxx_devs[dev_idx];
465	pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
466					pvt->dev_info->err_dev, pvt->bridge_ck);
467
468	if (!pvt->bridge_ck) {
469		e7xxx_printk(KERN_ERR, "error reporting device not found:"
470			"vendor %x device 0x%x (broken BIOS?)\n",
471			PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
472		goto fail0;
473	}
474
475	debugf3("%s(): more mci init\n", __func__);
476	mci->ctl_name = pvt->dev_info->ctl_name;
477	mci->dev_name = pci_name(pdev);
478	mci->edac_check = e7xxx_check;
479	mci->ctl_page_to_phys = ctl_page_to_phys;
480	e7xxx_init_csrows(mci, pdev, dev_idx, drc);
481	mci->edac_cap |= EDAC_FLAG_NONE;
482	debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
483	/* load the top of low memory, remap base, and remap limit vars */
484	pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
485	pvt->tolm = ((u32) pci_data) << 4;
486	pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
487	pvt->remapbase = ((u32) pci_data) << 14;
488	pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
489	pvt->remaplimit = ((u32) pci_data) << 14;
490	e7xxx_printk(KERN_INFO,
491		"tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
492		pvt->remapbase, pvt->remaplimit);
493
494	/* clear any pending errors, or initial state bits */
495	e7xxx_get_error_info(mci, &discard);
496
497	/* Here we assume that we will never see multiple instances of this
498	 * type of memory controller.  The ID is therefore hardcoded to 0.
499	 */
500	if (edac_mc_add_mc(mci)) {
501		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
502		goto fail1;
503	}
504
505	/* allocating generic PCI control info */
506	e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
507	if (!e7xxx_pci) {
508		printk(KERN_WARNING
509			"%s(): Unable to create PCI control\n",
510			__func__);
511		printk(KERN_WARNING
512			"%s(): PCI error report via EDAC not setup\n",
513			__func__);
514	}
515
516	/* get this far and it's successful */
517	debugf3("%s(): success\n", __func__);
518	return 0;
519
520fail1:
521	pci_dev_put(pvt->bridge_ck);
522
523fail0:
524	edac_mc_free(mci);
525
526	return -ENODEV;
527}
528
529/* returns count (>= 0), or negative on error */
530static int __devinit e7xxx_init_one(struct pci_dev *pdev,
531				const struct pci_device_id *ent)
532{
533	debugf0("%s()\n", __func__);
534
535	/* wake up and enable device */
536	return pci_enable_device(pdev) ?
537		-EIO : e7xxx_probe1(pdev, ent->driver_data);
538}
539
540static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
541{
542	struct mem_ctl_info *mci;
543	struct e7xxx_pvt *pvt;
544
545	debugf0("%s()\n", __func__);
546
547	if (e7xxx_pci)
548		edac_pci_release_generic_ctl(e7xxx_pci);
549
550	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
551		return;
552
553	pvt = (struct e7xxx_pvt *)mci->pvt_info;
554	pci_dev_put(pvt->bridge_ck);
555	edac_mc_free(mci);
556}
557
558static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = {
559	{
560	 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
561	 E7205},
562	{
563	 PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
564	 E7500},
565	{
566	 PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
567	 E7501},
568	{
569	 PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
570	 E7505},
571	{
572	 0,
573	 }			/* 0 terminated list. */
574};
575
576MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
577
578static struct pci_driver e7xxx_driver = {
579	.name = EDAC_MOD_STR,
580	.probe = e7xxx_init_one,
581	.remove = __devexit_p(e7xxx_remove_one),
582	.id_table = e7xxx_pci_tbl,
583};
584
585static int __init e7xxx_init(void)
586{
587       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
588       opstate_init();
589
590	return pci_register_driver(&e7xxx_driver);
591}
592
593static void __exit e7xxx_exit(void)
594{
595	pci_unregister_driver(&e7xxx_driver);
596}
597
598module_init(e7xxx_init);
599module_exit(e7xxx_exit);
600
601MODULE_LICENSE("GPL");
602MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
603		"Based on.work by Dan Hollis et al");
604MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
605module_param(edac_op_state, int, 0444);
606MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");