Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * edac_mc kernel module
   3 * (C) 2005-2007 Linux Networx (http://lnxi.com)
   4 *
   5 * This file may be distributed under the terms of the
   6 * GNU General Public License.
   7 *
   8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
   9 *
  10 * (c) 2012-2013 - Mauro Carvalho Chehab
  11 *	The entire API were re-written, and ported to use struct device
  12 *
  13 */
  14
  15#include <linux/ctype.h>
  16#include <linux/slab.h>
  17#include <linux/edac.h>
  18#include <linux/bug.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/uaccess.h>
  21
  22#include "edac_mc.h"
  23#include "edac_module.h"
  24
  25/* MC EDAC Controls, setable by module parameter, and sysfs */
  26static int edac_mc_log_ue = 1;
  27static int edac_mc_log_ce = 1;
  28static int edac_mc_panic_on_ue;
  29static unsigned int edac_mc_poll_msec = 1000;
  30
  31/* Getter functions for above */
  32int edac_mc_get_log_ue(void)
  33{
  34	return edac_mc_log_ue;
  35}
  36
  37int edac_mc_get_log_ce(void)
  38{
  39	return edac_mc_log_ce;
  40}
  41
  42int edac_mc_get_panic_on_ue(void)
  43{
  44	return edac_mc_panic_on_ue;
  45}
  46
  47/* this is temporary */
  48unsigned int edac_mc_get_poll_msec(void)
  49{
  50	return edac_mc_poll_msec;
  51}
  52
  53static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
  54{
  55	unsigned int i;
  56	int ret;
  57
  58	if (!val)
  59		return -EINVAL;
  60
  61	ret = kstrtouint(val, 0, &i);
  62	if (ret)
  63		return ret;
  64
  65	if (i < 1000)
  66		return -EINVAL;
  67
  68	*((unsigned int *)kp->arg) = i;
  69
  70	/* notify edac_mc engine to reset the poll period */
  71	edac_mc_reset_delay_period(i);
  72
  73	return 0;
  74}
  75
  76/* Parameter declarations for above */
  77module_param(edac_mc_panic_on_ue, int, 0644);
  78MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
  79module_param(edac_mc_log_ue, int, 0644);
  80MODULE_PARM_DESC(edac_mc_log_ue,
  81		 "Log uncorrectable error to console: 0=off 1=on");
  82module_param(edac_mc_log_ce, int, 0644);
  83MODULE_PARM_DESC(edac_mc_log_ce,
  84		 "Log correctable error to console: 0=off 1=on");
  85module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
  86		  &edac_mc_poll_msec, 0644);
  87MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
  88
  89static struct device *mci_pdev;
  90
  91/*
  92 * various constants for Memory Controllers
  93 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94static const char * const dev_types[] = {
  95	[DEV_UNKNOWN] = "Unknown",
  96	[DEV_X1] = "x1",
  97	[DEV_X2] = "x2",
  98	[DEV_X4] = "x4",
  99	[DEV_X8] = "x8",
 100	[DEV_X16] = "x16",
 101	[DEV_X32] = "x32",
 102	[DEV_X64] = "x64"
 103};
 104
 105static const char * const edac_caps[] = {
 106	[EDAC_UNKNOWN] = "Unknown",
 107	[EDAC_NONE] = "None",
 108	[EDAC_RESERVED] = "Reserved",
 109	[EDAC_PARITY] = "PARITY",
 110	[EDAC_EC] = "EC",
 111	[EDAC_SECDED] = "SECDED",
 112	[EDAC_S2ECD2ED] = "S2ECD2ED",
 113	[EDAC_S4ECD4ED] = "S4ECD4ED",
 114	[EDAC_S8ECD8ED] = "S8ECD8ED",
 115	[EDAC_S16ECD16ED] = "S16ECD16ED"
 116};
 117
 118#ifdef CONFIG_EDAC_LEGACY_SYSFS
 119/*
 120 * EDAC sysfs CSROW data structures and methods
 121 */
 122
 123#define to_csrow(k) container_of(k, struct csrow_info, dev)
 124
 125/*
 126 * We need it to avoid namespace conflicts between the legacy API
 127 * and the per-dimm/per-rank one
 128 */
 129#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
 130	static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
 131
 132struct dev_ch_attribute {
 133	struct device_attribute attr;
 134	unsigned int channel;
 135};
 136
 137#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
 138	static struct dev_ch_attribute dev_attr_legacy_##_name = \
 139		{ __ATTR(_name, _mode, _show, _store), (_var) }
 140
 141#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
 142
 143/* Set of more default csrow<id> attribute show/store functions */
 144static ssize_t csrow_ue_count_show(struct device *dev,
 145				   struct device_attribute *mattr, char *data)
 146{
 147	struct csrow_info *csrow = to_csrow(dev);
 148
 149	return sprintf(data, "%u\n", csrow->ue_count);
 150}
 151
 152static ssize_t csrow_ce_count_show(struct device *dev,
 153				   struct device_attribute *mattr, char *data)
 154{
 155	struct csrow_info *csrow = to_csrow(dev);
 156
 157	return sprintf(data, "%u\n", csrow->ce_count);
 158}
 159
 160static ssize_t csrow_size_show(struct device *dev,
 161			       struct device_attribute *mattr, char *data)
 162{
 163	struct csrow_info *csrow = to_csrow(dev);
 164	int i;
 165	u32 nr_pages = 0;
 166
 167	for (i = 0; i < csrow->nr_channels; i++)
 168		nr_pages += csrow->channels[i]->dimm->nr_pages;
 169	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
 170}
 171
 172static ssize_t csrow_mem_type_show(struct device *dev,
 173				   struct device_attribute *mattr, char *data)
 174{
 175	struct csrow_info *csrow = to_csrow(dev);
 176
 177	return sprintf(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
 178}
 179
 180static ssize_t csrow_dev_type_show(struct device *dev,
 181				   struct device_attribute *mattr, char *data)
 182{
 183	struct csrow_info *csrow = to_csrow(dev);
 184
 185	return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
 186}
 187
 188static ssize_t csrow_edac_mode_show(struct device *dev,
 189				    struct device_attribute *mattr,
 190				    char *data)
 191{
 192	struct csrow_info *csrow = to_csrow(dev);
 193
 194	return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
 195}
 196
 197/* show/store functions for DIMM Label attributes */
 198static ssize_t channel_dimm_label_show(struct device *dev,
 199				       struct device_attribute *mattr,
 200				       char *data)
 201{
 202	struct csrow_info *csrow = to_csrow(dev);
 203	unsigned int chan = to_channel(mattr);
 204	struct rank_info *rank = csrow->channels[chan];
 205
 206	/* if field has not been initialized, there is nothing to send */
 207	if (!rank->dimm->label[0])
 208		return 0;
 209
 210	return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
 211			rank->dimm->label);
 212}
 213
 214static ssize_t channel_dimm_label_store(struct device *dev,
 215					struct device_attribute *mattr,
 216					const char *data, size_t count)
 217{
 218	struct csrow_info *csrow = to_csrow(dev);
 219	unsigned int chan = to_channel(mattr);
 220	struct rank_info *rank = csrow->channels[chan];
 221	size_t copy_count = count;
 222
 223	if (count == 0)
 224		return -EINVAL;
 225
 226	if (data[count - 1] == '\0' || data[count - 1] == '\n')
 227		copy_count -= 1;
 228
 229	if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
 230		return -EINVAL;
 231
 232	memcpy(rank->dimm->label, data, copy_count);
 233	rank->dimm->label[copy_count] = '\0';
 234
 235	return count;
 236}
 237
 238/* show function for dynamic chX_ce_count attribute */
 239static ssize_t channel_ce_count_show(struct device *dev,
 240				     struct device_attribute *mattr, char *data)
 241{
 242	struct csrow_info *csrow = to_csrow(dev);
 243	unsigned int chan = to_channel(mattr);
 244	struct rank_info *rank = csrow->channels[chan];
 245
 246	return sprintf(data, "%u\n", rank->ce_count);
 247}
 248
 249/* cwrow<id>/attribute files */
 250DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
 251DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
 252DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
 253DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
 254DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
 255DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
 256
 257/* default attributes of the CSROW<id> object */
 258static struct attribute *csrow_attrs[] = {
 259	&dev_attr_legacy_dev_type.attr,
 260	&dev_attr_legacy_mem_type.attr,
 261	&dev_attr_legacy_edac_mode.attr,
 262	&dev_attr_legacy_size_mb.attr,
 263	&dev_attr_legacy_ue_count.attr,
 264	&dev_attr_legacy_ce_count.attr,
 265	NULL,
 266};
 267
 268static const struct attribute_group csrow_attr_grp = {
 269	.attrs	= csrow_attrs,
 270};
 271
 272static const struct attribute_group *csrow_attr_groups[] = {
 273	&csrow_attr_grp,
 274	NULL
 275};
 276
 277static const struct device_type csrow_attr_type = {
 
 
 
 
 
 
 
 
 278	.groups		= csrow_attr_groups,
 
 279};
 280
 281/*
 282 * possible dynamic channel DIMM Label attribute files
 283 *
 284 */
 
 285DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
 286	channel_dimm_label_show, channel_dimm_label_store, 0);
 287DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
 288	channel_dimm_label_show, channel_dimm_label_store, 1);
 289DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
 290	channel_dimm_label_show, channel_dimm_label_store, 2);
 291DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
 292	channel_dimm_label_show, channel_dimm_label_store, 3);
 293DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
 294	channel_dimm_label_show, channel_dimm_label_store, 4);
 295DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
 296	channel_dimm_label_show, channel_dimm_label_store, 5);
 297DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
 298	channel_dimm_label_show, channel_dimm_label_store, 6);
 299DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
 300	channel_dimm_label_show, channel_dimm_label_store, 7);
 301DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR,
 302	channel_dimm_label_show, channel_dimm_label_store, 8);
 303DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR,
 304	channel_dimm_label_show, channel_dimm_label_store, 9);
 305DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
 306	channel_dimm_label_show, channel_dimm_label_store, 10);
 307DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
 308	channel_dimm_label_show, channel_dimm_label_store, 11);
 309
 310/* Total possible dynamic DIMM Label attribute file table */
 311static struct attribute *dynamic_csrow_dimm_attr[] = {
 312	&dev_attr_legacy_ch0_dimm_label.attr.attr,
 313	&dev_attr_legacy_ch1_dimm_label.attr.attr,
 314	&dev_attr_legacy_ch2_dimm_label.attr.attr,
 315	&dev_attr_legacy_ch3_dimm_label.attr.attr,
 316	&dev_attr_legacy_ch4_dimm_label.attr.attr,
 317	&dev_attr_legacy_ch5_dimm_label.attr.attr,
 318	&dev_attr_legacy_ch6_dimm_label.attr.attr,
 319	&dev_attr_legacy_ch7_dimm_label.attr.attr,
 320	&dev_attr_legacy_ch8_dimm_label.attr.attr,
 321	&dev_attr_legacy_ch9_dimm_label.attr.attr,
 322	&dev_attr_legacy_ch10_dimm_label.attr.attr,
 323	&dev_attr_legacy_ch11_dimm_label.attr.attr,
 324	NULL
 325};
 326
 327/* possible dynamic channel ce_count attribute files */
 328DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
 329		   channel_ce_count_show, NULL, 0);
 330DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
 331		   channel_ce_count_show, NULL, 1);
 332DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
 333		   channel_ce_count_show, NULL, 2);
 334DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
 335		   channel_ce_count_show, NULL, 3);
 336DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
 337		   channel_ce_count_show, NULL, 4);
 338DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
 339		   channel_ce_count_show, NULL, 5);
 340DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
 341		   channel_ce_count_show, NULL, 6);
 342DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
 343		   channel_ce_count_show, NULL, 7);
 344DEVICE_CHANNEL(ch8_ce_count, S_IRUGO,
 345		   channel_ce_count_show, NULL, 8);
 346DEVICE_CHANNEL(ch9_ce_count, S_IRUGO,
 347		   channel_ce_count_show, NULL, 9);
 348DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
 349		   channel_ce_count_show, NULL, 10);
 350DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
 351		   channel_ce_count_show, NULL, 11);
 352
 353/* Total possible dynamic ce_count attribute file table */
 354static struct attribute *dynamic_csrow_ce_count_attr[] = {
 355	&dev_attr_legacy_ch0_ce_count.attr.attr,
 356	&dev_attr_legacy_ch1_ce_count.attr.attr,
 357	&dev_attr_legacy_ch2_ce_count.attr.attr,
 358	&dev_attr_legacy_ch3_ce_count.attr.attr,
 359	&dev_attr_legacy_ch4_ce_count.attr.attr,
 360	&dev_attr_legacy_ch5_ce_count.attr.attr,
 361	&dev_attr_legacy_ch6_ce_count.attr.attr,
 362	&dev_attr_legacy_ch7_ce_count.attr.attr,
 363	&dev_attr_legacy_ch8_ce_count.attr.attr,
 364	&dev_attr_legacy_ch9_ce_count.attr.attr,
 365	&dev_attr_legacy_ch10_ce_count.attr.attr,
 366	&dev_attr_legacy_ch11_ce_count.attr.attr,
 367	NULL
 368};
 369
 370static umode_t csrow_dev_is_visible(struct kobject *kobj,
 371				    struct attribute *attr, int idx)
 372{
 373	struct device *dev = kobj_to_dev(kobj);
 374	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
 375
 376	if (idx >= csrow->nr_channels)
 377		return 0;
 378
 379	if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
 380		WARN_ONCE(1, "idx: %d\n", idx);
 381		return 0;
 382	}
 383
 384	/* Only expose populated DIMMs */
 385	if (!csrow->channels[idx]->dimm->nr_pages)
 386		return 0;
 387
 388	return attr->mode;
 389}
 390
 391
 392static const struct attribute_group csrow_dev_dimm_group = {
 393	.attrs = dynamic_csrow_dimm_attr,
 394	.is_visible = csrow_dev_is_visible,
 395};
 396
 397static const struct attribute_group csrow_dev_ce_count_group = {
 398	.attrs = dynamic_csrow_ce_count_attr,
 399	.is_visible = csrow_dev_is_visible,
 400};
 401
 402static const struct attribute_group *csrow_dev_groups[] = {
 403	&csrow_dev_dimm_group,
 404	&csrow_dev_ce_count_group,
 405	NULL
 406};
 407
 408static void csrow_release(struct device *dev)
 409{
 410	/*
 411	 * Nothing to do, just unregister sysfs here. The mci
 412	 * device owns the data and will also release it.
 413	 */
 414}
 415
 416static inline int nr_pages_per_csrow(struct csrow_info *csrow)
 417{
 418	int chan, nr_pages = 0;
 419
 420	for (chan = 0; chan < csrow->nr_channels; chan++)
 421		nr_pages += csrow->channels[chan]->dimm->nr_pages;
 422
 423	return nr_pages;
 424}
 425
 426/* Create a CSROW object under specifed edac_mc_device */
 427static int edac_create_csrow_object(struct mem_ctl_info *mci,
 428				    struct csrow_info *csrow, int index)
 429{
 430	int err;
 431
 432	csrow->dev.type = &csrow_attr_type;
 
 433	csrow->dev.groups = csrow_dev_groups;
 434	csrow->dev.release = csrow_release;
 435	device_initialize(&csrow->dev);
 436	csrow->dev.parent = &mci->dev;
 437	csrow->mci = mci;
 438	dev_set_name(&csrow->dev, "csrow%d", index);
 439	dev_set_drvdata(&csrow->dev, csrow);
 440
 441	err = device_add(&csrow->dev);
 442	if (err) {
 443		edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
 444		put_device(&csrow->dev);
 445		return err;
 446	}
 447
 448	edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
 449
 450	return 0;
 451}
 452
 453/* Create a CSROW object under specifed edac_mc_device */
 454static int edac_create_csrow_objects(struct mem_ctl_info *mci)
 455{
 456	int err, i;
 457	struct csrow_info *csrow;
 458
 459	for (i = 0; i < mci->nr_csrows; i++) {
 460		csrow = mci->csrows[i];
 461		if (!nr_pages_per_csrow(csrow))
 462			continue;
 463		err = edac_create_csrow_object(mci, mci->csrows[i], i);
 464		if (err < 0)
 
 
 
 465			goto error;
 
 466	}
 467	return 0;
 468
 469error:
 470	for (--i; i >= 0; i--) {
 471		if (device_is_registered(&mci->csrows[i]->dev))
 472			device_unregister(&mci->csrows[i]->dev);
 
 
 473	}
 474
 475	return err;
 476}
 477
 478static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
 479{
 480	int i;
 
 481
 482	for (i = 0; i < mci->nr_csrows; i++) {
 483		if (device_is_registered(&mci->csrows[i]->dev))
 484			device_unregister(&mci->csrows[i]->dev);
 
 
 485	}
 486}
 487
 488#endif
 489
 490/*
 491 * Per-dimm (or per-rank) devices
 492 */
 493
 494#define to_dimm(k) container_of(k, struct dimm_info, dev)
 495
 496/* show/store functions for DIMM Label attributes */
 497static ssize_t dimmdev_location_show(struct device *dev,
 498				     struct device_attribute *mattr, char *data)
 499{
 500	struct dimm_info *dimm = to_dimm(dev);
 501	ssize_t count;
 502
 503	count = edac_dimm_info_location(dimm, data, PAGE_SIZE);
 504	count += scnprintf(data + count, PAGE_SIZE - count, "\n");
 505
 506	return count;
 507}
 508
 509static ssize_t dimmdev_label_show(struct device *dev,
 510				  struct device_attribute *mattr, char *data)
 511{
 512	struct dimm_info *dimm = to_dimm(dev);
 513
 514	/* if field has not been initialized, there is nothing to send */
 515	if (!dimm->label[0])
 516		return 0;
 517
 518	return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
 519}
 520
 521static ssize_t dimmdev_label_store(struct device *dev,
 522				   struct device_attribute *mattr,
 523				   const char *data,
 524				   size_t count)
 525{
 526	struct dimm_info *dimm = to_dimm(dev);
 527	size_t copy_count = count;
 528
 529	if (count == 0)
 530		return -EINVAL;
 531
 532	if (data[count - 1] == '\0' || data[count - 1] == '\n')
 533		copy_count -= 1;
 534
 535	if (copy_count == 0 || copy_count >= sizeof(dimm->label))
 536		return -EINVAL;
 537
 538	memcpy(dimm->label, data, copy_count);
 539	dimm->label[copy_count] = '\0';
 540
 541	return count;
 542}
 543
 544static ssize_t dimmdev_size_show(struct device *dev,
 545				 struct device_attribute *mattr, char *data)
 546{
 547	struct dimm_info *dimm = to_dimm(dev);
 548
 549	return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
 550}
 551
 552static ssize_t dimmdev_mem_type_show(struct device *dev,
 553				     struct device_attribute *mattr, char *data)
 554{
 555	struct dimm_info *dimm = to_dimm(dev);
 556
 557	return sprintf(data, "%s\n", edac_mem_types[dimm->mtype]);
 558}
 559
 560static ssize_t dimmdev_dev_type_show(struct device *dev,
 561				     struct device_attribute *mattr, char *data)
 562{
 563	struct dimm_info *dimm = to_dimm(dev);
 564
 565	return sprintf(data, "%s\n", dev_types[dimm->dtype]);
 566}
 567
 568static ssize_t dimmdev_edac_mode_show(struct device *dev,
 569				      struct device_attribute *mattr,
 570				      char *data)
 571{
 572	struct dimm_info *dimm = to_dimm(dev);
 573
 574	return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
 575}
 576
 577static ssize_t dimmdev_ce_count_show(struct device *dev,
 578				      struct device_attribute *mattr,
 579				      char *data)
 580{
 581	struct dimm_info *dimm = to_dimm(dev);
 582
 583	return sprintf(data, "%u\n", dimm->ce_count);
 584}
 585
 586static ssize_t dimmdev_ue_count_show(struct device *dev,
 587				      struct device_attribute *mattr,
 588				      char *data)
 589{
 590	struct dimm_info *dimm = to_dimm(dev);
 591
 592	return sprintf(data, "%u\n", dimm->ue_count);
 593}
 594
 595/* dimm/rank attribute files */
 596static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
 597		   dimmdev_label_show, dimmdev_label_store);
 598static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
 599static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
 600static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
 601static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
 602static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
 603static DEVICE_ATTR(dimm_ce_count, S_IRUGO, dimmdev_ce_count_show, NULL);
 604static DEVICE_ATTR(dimm_ue_count, S_IRUGO, dimmdev_ue_count_show, NULL);
 605
 606/* attributes of the dimm<id>/rank<id> object */
 607static struct attribute *dimm_attrs[] = {
 608	&dev_attr_dimm_label.attr,
 609	&dev_attr_dimm_location.attr,
 610	&dev_attr_size.attr,
 611	&dev_attr_dimm_mem_type.attr,
 612	&dev_attr_dimm_dev_type.attr,
 613	&dev_attr_dimm_edac_mode.attr,
 614	&dev_attr_dimm_ce_count.attr,
 615	&dev_attr_dimm_ue_count.attr,
 616	NULL,
 617};
 618
 619static const struct attribute_group dimm_attr_grp = {
 620	.attrs	= dimm_attrs,
 621};
 622
 623static const struct attribute_group *dimm_attr_groups[] = {
 624	&dimm_attr_grp,
 625	NULL
 626};
 627
 628static const struct device_type dimm_attr_type = {
 629	.groups		= dimm_attr_groups,
 630};
 631
 632static void dimm_release(struct device *dev)
 633{
 634	/*
 635	 * Nothing to do, just unregister sysfs here. The mci
 636	 * device owns the data and will also release it.
 637	 */
 638}
 639
 
 
 
 
 
 640/* Create a DIMM object under specifed memory controller device */
 641static int edac_create_dimm_object(struct mem_ctl_info *mci,
 642				   struct dimm_info *dimm)
 
 643{
 644	int err;
 645	dimm->mci = mci;
 646
 647	dimm->dev.type = &dimm_attr_type;
 648	dimm->dev.release = dimm_release;
 649	device_initialize(&dimm->dev);
 650
 651	dimm->dev.parent = &mci->dev;
 652	if (mci->csbased)
 653		dev_set_name(&dimm->dev, "rank%d", dimm->idx);
 654	else
 655		dev_set_name(&dimm->dev, "dimm%d", dimm->idx);
 656	dev_set_drvdata(&dimm->dev, dimm);
 657	pm_runtime_forbid(&mci->dev);
 658
 659	err = device_add(&dimm->dev);
 660	if (err) {
 661		edac_dbg(1, "failure: create device %s\n", dev_name(&dimm->dev));
 662		put_device(&dimm->dev);
 663		return err;
 664	}
 665
 666	if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
 667		char location[80];
 668
 669		edac_dimm_info_location(dimm, location, sizeof(location));
 670		edac_dbg(0, "device %s created at location %s\n",
 671			dev_name(&dimm->dev), location);
 672	}
 673
 674	return 0;
 675}
 676
 677/*
 678 * Memory controller device
 679 */
 680
 681#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 682
 683static ssize_t mci_reset_counters_store(struct device *dev,
 684					struct device_attribute *mattr,
 685					const char *data, size_t count)
 686{
 687	struct mem_ctl_info *mci = to_mci(dev);
 688	struct dimm_info *dimm;
 689	int row, chan;
 690
 691	mci->ue_mc = 0;
 692	mci->ce_mc = 0;
 693	mci->ue_noinfo_count = 0;
 694	mci->ce_noinfo_count = 0;
 695
 696	for (row = 0; row < mci->nr_csrows; row++) {
 697		struct csrow_info *ri = mci->csrows[row];
 698
 699		ri->ue_count = 0;
 700		ri->ce_count = 0;
 701
 702		for (chan = 0; chan < ri->nr_channels; chan++)
 703			ri->channels[chan]->ce_count = 0;
 704	}
 705
 706	mci_for_each_dimm(mci, dimm) {
 707		dimm->ue_count = 0;
 708		dimm->ce_count = 0;
 
 
 709	}
 710
 711	mci->start_time = jiffies;
 712	return count;
 713}
 714
 715/* Memory scrubbing interface:
 716 *
 717 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
 718 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
 719 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
 720 *
 721 * Negative value still means that an error has occurred while setting
 722 * the scrub rate.
 723 */
 724static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
 725					  struct device_attribute *mattr,
 726					  const char *data, size_t count)
 727{
 728	struct mem_ctl_info *mci = to_mci(dev);
 729	unsigned long bandwidth = 0;
 730	int new_bw = 0;
 731
 732	if (kstrtoul(data, 10, &bandwidth) < 0)
 733		return -EINVAL;
 734
 735	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
 736	if (new_bw < 0) {
 737		edac_printk(KERN_WARNING, EDAC_MC,
 738			    "Error setting scrub rate to: %lu\n", bandwidth);
 739		return -EINVAL;
 740	}
 741
 742	return count;
 743}
 744
 745/*
 746 * ->get_sdram_scrub_rate() return value semantics same as above.
 747 */
 748static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
 749					 struct device_attribute *mattr,
 750					 char *data)
 751{
 752	struct mem_ctl_info *mci = to_mci(dev);
 753	int bandwidth = 0;
 754
 755	bandwidth = mci->get_sdram_scrub_rate(mci);
 756	if (bandwidth < 0) {
 757		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
 758		return bandwidth;
 759	}
 760
 761	return sprintf(data, "%d\n", bandwidth);
 762}
 763
 764/* default attribute files for the MCI object */
 765static ssize_t mci_ue_count_show(struct device *dev,
 766				 struct device_attribute *mattr,
 767				 char *data)
 768{
 769	struct mem_ctl_info *mci = to_mci(dev);
 770
 771	return sprintf(data, "%u\n", mci->ue_mc);
 772}
 773
 774static ssize_t mci_ce_count_show(struct device *dev,
 775				 struct device_attribute *mattr,
 776				 char *data)
 777{
 778	struct mem_ctl_info *mci = to_mci(dev);
 779
 780	return sprintf(data, "%u\n", mci->ce_mc);
 781}
 782
 783static ssize_t mci_ce_noinfo_show(struct device *dev,
 784				  struct device_attribute *mattr,
 785				  char *data)
 786{
 787	struct mem_ctl_info *mci = to_mci(dev);
 788
 789	return sprintf(data, "%u\n", mci->ce_noinfo_count);
 790}
 791
 792static ssize_t mci_ue_noinfo_show(struct device *dev,
 793				  struct device_attribute *mattr,
 794				  char *data)
 795{
 796	struct mem_ctl_info *mci = to_mci(dev);
 797
 798	return sprintf(data, "%u\n", mci->ue_noinfo_count);
 799}
 800
 801static ssize_t mci_seconds_show(struct device *dev,
 802				struct device_attribute *mattr,
 803				char *data)
 804{
 805	struct mem_ctl_info *mci = to_mci(dev);
 806
 807	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
 808}
 809
 810static ssize_t mci_ctl_name_show(struct device *dev,
 811				 struct device_attribute *mattr,
 812				 char *data)
 813{
 814	struct mem_ctl_info *mci = to_mci(dev);
 815
 816	return sprintf(data, "%s\n", mci->ctl_name);
 817}
 818
 819static ssize_t mci_size_mb_show(struct device *dev,
 820				struct device_attribute *mattr,
 821				char *data)
 822{
 823	struct mem_ctl_info *mci = to_mci(dev);
 824	int total_pages = 0, csrow_idx, j;
 825
 826	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
 827		struct csrow_info *csrow = mci->csrows[csrow_idx];
 828
 829		for (j = 0; j < csrow->nr_channels; j++) {
 830			struct dimm_info *dimm = csrow->channels[j]->dimm;
 831
 832			total_pages += dimm->nr_pages;
 833		}
 834	}
 835
 836	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
 837}
 838
 839static ssize_t mci_max_location_show(struct device *dev,
 840				     struct device_attribute *mattr,
 841				     char *data)
 842{
 843	struct mem_ctl_info *mci = to_mci(dev);
 844	int len = PAGE_SIZE;
 845	char *p = data;
 846	int i, n;
 847
 848	for (i = 0; i < mci->n_layers; i++) {
 849		n = scnprintf(p, len, "%s %d ",
 850			      edac_layer_name[mci->layers[i].type],
 851			      mci->layers[i].size - 1);
 852		len -= n;
 853		if (len <= 0)
 854			goto out;
 855
 856		p += n;
 857	}
 858
 859	p += scnprintf(p, len, "\n");
 860out:
 861	return p - data;
 862}
 863
 864/* default Control file */
 865static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
 866
 867/* default Attribute files */
 868static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
 869static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
 870static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
 871static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
 872static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
 873static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
 874static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
 875static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
 876
 877/* memory scrubber attribute file */
 878static DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
 879	    mci_sdram_scrub_rate_store); /* umode set later in is_visible */
 880
 881static struct attribute *mci_attrs[] = {
 882	&dev_attr_reset_counters.attr,
 883	&dev_attr_mc_name.attr,
 884	&dev_attr_size_mb.attr,
 885	&dev_attr_seconds_since_reset.attr,
 886	&dev_attr_ue_noinfo_count.attr,
 887	&dev_attr_ce_noinfo_count.attr,
 888	&dev_attr_ue_count.attr,
 889	&dev_attr_ce_count.attr,
 890	&dev_attr_max_location.attr,
 891	&dev_attr_sdram_scrub_rate.attr,
 892	NULL
 893};
 894
 895static umode_t mci_attr_is_visible(struct kobject *kobj,
 896				   struct attribute *attr, int idx)
 897{
 898	struct device *dev = kobj_to_dev(kobj);
 899	struct mem_ctl_info *mci = to_mci(dev);
 900	umode_t mode = 0;
 901
 902	if (attr != &dev_attr_sdram_scrub_rate.attr)
 903		return attr->mode;
 904	if (mci->get_sdram_scrub_rate)
 905		mode |= S_IRUGO;
 906	if (mci->set_sdram_scrub_rate)
 907		mode |= S_IWUSR;
 908	return mode;
 909}
 910
 911static const struct attribute_group mci_attr_grp = {
 912	.attrs	= mci_attrs,
 913	.is_visible = mci_attr_is_visible,
 914};
 915
 916static const struct attribute_group *mci_attr_groups[] = {
 917	&mci_attr_grp,
 918	NULL
 919};
 920
 921static const struct device_type mci_attr_type = {
 
 
 
 
 
 
 
 
 922	.groups		= mci_attr_groups,
 
 923};
 924
 925/*
 926 * Create a new Memory Controller kobject instance,
 927 *	mc<id> under the 'mc' directory
 928 *
 929 * Return:
 930 *	0	Success
 931 *	!0	Failure
 932 */
 933int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
 934				 const struct attribute_group **groups)
 935{
 936	struct dimm_info *dimm;
 937	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 938
 939	/* get the /sys/devices/system/edac subsys reference */
 940	mci->dev.type = &mci_attr_type;
 
 
 941	mci->dev.parent = mci_pdev;
 
 942	mci->dev.groups = groups;
 943	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
 944	dev_set_drvdata(&mci->dev, mci);
 945	pm_runtime_forbid(&mci->dev);
 946
 
 947	err = device_add(&mci->dev);
 948	if (err < 0) {
 949		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
 950		/* no put_device() here, free mci with _edac_mc_free() */
 951		return err;
 952	}
 953
 954	edac_dbg(0, "device %s created\n", dev_name(&mci->dev));
 955
 956	/*
 957	 * Create the dimm/rank devices
 958	 */
 959	mci_for_each_dimm(mci, dimm) {
 
 960		/* Only expose populated DIMMs */
 961		if (!dimm->nr_pages)
 962			continue;
 963
 964		err = edac_create_dimm_object(mci, dimm);
 965		if (err)
 966			goto fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 967	}
 968
 969#ifdef CONFIG_EDAC_LEGACY_SYSFS
 970	err = edac_create_csrow_objects(mci);
 971	if (err < 0)
 972		goto fail;
 973#endif
 974
 975	edac_create_debugfs_nodes(mci);
 976	return 0;
 977
 978fail:
 979	edac_remove_sysfs_mci_device(mci);
 
 
 
 
 
 
 
 
 
 
 980
 981	return err;
 982}
 983
 984/*
 985 * remove a Memory Controller instance
 986 */
 987void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
 988{
 989	struct dimm_info *dimm;
 990
 991	if (!device_is_registered(&mci->dev))
 992		return;
 993
 994	edac_dbg(0, "\n");
 995
 996#ifdef CONFIG_EDAC_DEBUG
 997	edac_debugfs_remove_recursive(mci->debugfs);
 998#endif
 999#ifdef CONFIG_EDAC_LEGACY_SYSFS
1000	edac_delete_csrow_objects(mci);
1001#endif
1002
1003	mci_for_each_dimm(mci, dimm) {
1004		if (!device_is_registered(&dimm->dev))
 
1005			continue;
1006		edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
1007		device_unregister(&dimm->dev);
1008	}
 
 
 
 
 
1009
1010	/* only remove the device, but keep mci */
1011	device_del(&mci->dev);
 
 
1012}
1013
1014static void mc_attr_release(struct device *dev)
1015{
1016	/*
1017	 * There's no container structure here, as this is just the mci
1018	 * parent device, used to create the /sys/devices/mc sysfs node.
1019	 * So, there are no attributes on it.
1020	 */
1021	edac_dbg(1, "device %s released\n", dev_name(dev));
1022	kfree(dev);
1023}
1024
 
 
 
1025/*
1026 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1027 */
1028int __init edac_mc_sysfs_init(void)
1029{
1030	int err;
1031
1032	mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1033	if (!mci_pdev)
1034		return -ENOMEM;
 
 
1035
1036	mci_pdev->bus = edac_get_sysfs_subsys();
1037	mci_pdev->release = mc_attr_release;
1038	mci_pdev->init_name = "mc";
 
1039
1040	err = device_register(mci_pdev);
1041	if (err < 0) {
1042		edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
1043		put_device(mci_pdev);
1044		return err;
1045	}
1046
1047	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1048
1049	return 0;
 
 
 
 
 
1050}
1051
1052void edac_mc_sysfs_exit(void)
1053{
1054	device_unregister(mci_pdev);
1055}
v4.6
   1/*
   2 * edac_mc kernel module
   3 * (C) 2005-2007 Linux Networx (http://lnxi.com)
   4 *
   5 * This file may be distributed under the terms of the
   6 * GNU General Public License.
   7 *
   8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
   9 *
  10 * (c) 2012-2013 - Mauro Carvalho Chehab
  11 *	The entire API were re-written, and ported to use struct device
  12 *
  13 */
  14
  15#include <linux/ctype.h>
  16#include <linux/slab.h>
  17#include <linux/edac.h>
  18#include <linux/bug.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/uaccess.h>
  21
  22#include "edac_core.h"
  23#include "edac_module.h"
  24
  25/* MC EDAC Controls, setable by module parameter, and sysfs */
  26static int edac_mc_log_ue = 1;
  27static int edac_mc_log_ce = 1;
  28static int edac_mc_panic_on_ue;
  29static int edac_mc_poll_msec = 1000;
  30
  31/* Getter functions for above */
  32int edac_mc_get_log_ue(void)
  33{
  34	return edac_mc_log_ue;
  35}
  36
  37int edac_mc_get_log_ce(void)
  38{
  39	return edac_mc_log_ce;
  40}
  41
  42int edac_mc_get_panic_on_ue(void)
  43{
  44	return edac_mc_panic_on_ue;
  45}
  46
  47/* this is temporary */
  48int edac_mc_get_poll_msec(void)
  49{
  50	return edac_mc_poll_msec;
  51}
  52
  53static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
  54{
  55	unsigned long l;
  56	int ret;
  57
  58	if (!val)
  59		return -EINVAL;
  60
  61	ret = kstrtoul(val, 0, &l);
  62	if (ret)
  63		return ret;
  64
  65	if (l < 1000)
  66		return -EINVAL;
  67
  68	*((unsigned long *)kp->arg) = l;
  69
  70	/* notify edac_mc engine to reset the poll period */
  71	edac_mc_reset_delay_period(l);
  72
  73	return 0;
  74}
  75
  76/* Parameter declarations for above */
  77module_param(edac_mc_panic_on_ue, int, 0644);
  78MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
  79module_param(edac_mc_log_ue, int, 0644);
  80MODULE_PARM_DESC(edac_mc_log_ue,
  81		 "Log uncorrectable error to console: 0=off 1=on");
  82module_param(edac_mc_log_ce, int, 0644);
  83MODULE_PARM_DESC(edac_mc_log_ce,
  84		 "Log correctable error to console: 0=off 1=on");
  85module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
  86		  &edac_mc_poll_msec, 0644);
  87MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
  88
  89static struct device *mci_pdev;
  90
  91/*
  92 * various constants for Memory Controllers
  93 */
  94static const char * const mem_types[] = {
  95	[MEM_EMPTY] = "Empty",
  96	[MEM_RESERVED] = "Reserved",
  97	[MEM_UNKNOWN] = "Unknown",
  98	[MEM_FPM] = "FPM",
  99	[MEM_EDO] = "EDO",
 100	[MEM_BEDO] = "BEDO",
 101	[MEM_SDR] = "Unbuffered-SDR",
 102	[MEM_RDR] = "Registered-SDR",
 103	[MEM_DDR] = "Unbuffered-DDR",
 104	[MEM_RDDR] = "Registered-DDR",
 105	[MEM_RMBS] = "RMBS",
 106	[MEM_DDR2] = "Unbuffered-DDR2",
 107	[MEM_FB_DDR2] = "FullyBuffered-DDR2",
 108	[MEM_RDDR2] = "Registered-DDR2",
 109	[MEM_XDR] = "XDR",
 110	[MEM_DDR3] = "Unbuffered-DDR3",
 111	[MEM_RDDR3] = "Registered-DDR3",
 112	[MEM_DDR4] = "Unbuffered-DDR4",
 113	[MEM_RDDR4] = "Registered-DDR4"
 114};
 115
 116static const char * const dev_types[] = {
 117	[DEV_UNKNOWN] = "Unknown",
 118	[DEV_X1] = "x1",
 119	[DEV_X2] = "x2",
 120	[DEV_X4] = "x4",
 121	[DEV_X8] = "x8",
 122	[DEV_X16] = "x16",
 123	[DEV_X32] = "x32",
 124	[DEV_X64] = "x64"
 125};
 126
 127static const char * const edac_caps[] = {
 128	[EDAC_UNKNOWN] = "Unknown",
 129	[EDAC_NONE] = "None",
 130	[EDAC_RESERVED] = "Reserved",
 131	[EDAC_PARITY] = "PARITY",
 132	[EDAC_EC] = "EC",
 133	[EDAC_SECDED] = "SECDED",
 134	[EDAC_S2ECD2ED] = "S2ECD2ED",
 135	[EDAC_S4ECD4ED] = "S4ECD4ED",
 136	[EDAC_S8ECD8ED] = "S8ECD8ED",
 137	[EDAC_S16ECD16ED] = "S16ECD16ED"
 138};
 139
 140#ifdef CONFIG_EDAC_LEGACY_SYSFS
 141/*
 142 * EDAC sysfs CSROW data structures and methods
 143 */
 144
 145#define to_csrow(k) container_of(k, struct csrow_info, dev)
 146
 147/*
 148 * We need it to avoid namespace conflicts between the legacy API
 149 * and the per-dimm/per-rank one
 150 */
 151#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
 152	static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
 153
 154struct dev_ch_attribute {
 155	struct device_attribute attr;
 156	int channel;
 157};
 158
 159#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
 160	static struct dev_ch_attribute dev_attr_legacy_##_name = \
 161		{ __ATTR(_name, _mode, _show, _store), (_var) }
 162
 163#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
 164
 165/* Set of more default csrow<id> attribute show/store functions */
 166static ssize_t csrow_ue_count_show(struct device *dev,
 167				   struct device_attribute *mattr, char *data)
 168{
 169	struct csrow_info *csrow = to_csrow(dev);
 170
 171	return sprintf(data, "%u\n", csrow->ue_count);
 172}
 173
 174static ssize_t csrow_ce_count_show(struct device *dev,
 175				   struct device_attribute *mattr, char *data)
 176{
 177	struct csrow_info *csrow = to_csrow(dev);
 178
 179	return sprintf(data, "%u\n", csrow->ce_count);
 180}
 181
 182static ssize_t csrow_size_show(struct device *dev,
 183			       struct device_attribute *mattr, char *data)
 184{
 185	struct csrow_info *csrow = to_csrow(dev);
 186	int i;
 187	u32 nr_pages = 0;
 188
 189	for (i = 0; i < csrow->nr_channels; i++)
 190		nr_pages += csrow->channels[i]->dimm->nr_pages;
 191	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
 192}
 193
 194static ssize_t csrow_mem_type_show(struct device *dev,
 195				   struct device_attribute *mattr, char *data)
 196{
 197	struct csrow_info *csrow = to_csrow(dev);
 198
 199	return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
 200}
 201
 202static ssize_t csrow_dev_type_show(struct device *dev,
 203				   struct device_attribute *mattr, char *data)
 204{
 205	struct csrow_info *csrow = to_csrow(dev);
 206
 207	return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
 208}
 209
 210static ssize_t csrow_edac_mode_show(struct device *dev,
 211				    struct device_attribute *mattr,
 212				    char *data)
 213{
 214	struct csrow_info *csrow = to_csrow(dev);
 215
 216	return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
 217}
 218
 219/* show/store functions for DIMM Label attributes */
 220static ssize_t channel_dimm_label_show(struct device *dev,
 221				       struct device_attribute *mattr,
 222				       char *data)
 223{
 224	struct csrow_info *csrow = to_csrow(dev);
 225	unsigned chan = to_channel(mattr);
 226	struct rank_info *rank = csrow->channels[chan];
 227
 228	/* if field has not been initialized, there is nothing to send */
 229	if (!rank->dimm->label[0])
 230		return 0;
 231
 232	return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
 233			rank->dimm->label);
 234}
 235
 236static ssize_t channel_dimm_label_store(struct device *dev,
 237					struct device_attribute *mattr,
 238					const char *data, size_t count)
 239{
 240	struct csrow_info *csrow = to_csrow(dev);
 241	unsigned chan = to_channel(mattr);
 242	struct rank_info *rank = csrow->channels[chan];
 243	size_t copy_count = count;
 244
 245	if (count == 0)
 246		return -EINVAL;
 247
 248	if (data[count - 1] == '\0' || data[count - 1] == '\n')
 249		copy_count -= 1;
 250
 251	if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
 252		return -EINVAL;
 253
 254	strncpy(rank->dimm->label, data, copy_count);
 255	rank->dimm->label[copy_count] = '\0';
 256
 257	return count;
 258}
 259
 260/* show function for dynamic chX_ce_count attribute */
 261static ssize_t channel_ce_count_show(struct device *dev,
 262				     struct device_attribute *mattr, char *data)
 263{
 264	struct csrow_info *csrow = to_csrow(dev);
 265	unsigned chan = to_channel(mattr);
 266	struct rank_info *rank = csrow->channels[chan];
 267
 268	return sprintf(data, "%u\n", rank->ce_count);
 269}
 270
 271/* cwrow<id>/attribute files */
 272DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
 273DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
 274DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
 275DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
 276DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
 277DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
 278
 279/* default attributes of the CSROW<id> object */
 280static struct attribute *csrow_attrs[] = {
 281	&dev_attr_legacy_dev_type.attr,
 282	&dev_attr_legacy_mem_type.attr,
 283	&dev_attr_legacy_edac_mode.attr,
 284	&dev_attr_legacy_size_mb.attr,
 285	&dev_attr_legacy_ue_count.attr,
 286	&dev_attr_legacy_ce_count.attr,
 287	NULL,
 288};
 289
 290static struct attribute_group csrow_attr_grp = {
 291	.attrs	= csrow_attrs,
 292};
 293
 294static const struct attribute_group *csrow_attr_groups[] = {
 295	&csrow_attr_grp,
 296	NULL
 297};
 298
 299static void csrow_attr_release(struct device *dev)
 300{
 301	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
 302
 303	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
 304	kfree(csrow);
 305}
 306
 307static struct device_type csrow_attr_type = {
 308	.groups		= csrow_attr_groups,
 309	.release	= csrow_attr_release,
 310};
 311
 312/*
 313 * possible dynamic channel DIMM Label attribute files
 314 *
 315 */
 316
 317DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
 318	channel_dimm_label_show, channel_dimm_label_store, 0);
 319DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
 320	channel_dimm_label_show, channel_dimm_label_store, 1);
 321DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
 322	channel_dimm_label_show, channel_dimm_label_store, 2);
 323DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
 324	channel_dimm_label_show, channel_dimm_label_store, 3);
 325DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
 326	channel_dimm_label_show, channel_dimm_label_store, 4);
 327DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
 328	channel_dimm_label_show, channel_dimm_label_store, 5);
 
 
 
 
 
 
 
 
 
 
 
 
 329
 330/* Total possible dynamic DIMM Label attribute file table */
 331static struct attribute *dynamic_csrow_dimm_attr[] = {
 332	&dev_attr_legacy_ch0_dimm_label.attr.attr,
 333	&dev_attr_legacy_ch1_dimm_label.attr.attr,
 334	&dev_attr_legacy_ch2_dimm_label.attr.attr,
 335	&dev_attr_legacy_ch3_dimm_label.attr.attr,
 336	&dev_attr_legacy_ch4_dimm_label.attr.attr,
 337	&dev_attr_legacy_ch5_dimm_label.attr.attr,
 
 
 
 
 
 
 338	NULL
 339};
 340
 341/* possible dynamic channel ce_count attribute files */
 342DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
 343		   channel_ce_count_show, NULL, 0);
 344DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
 345		   channel_ce_count_show, NULL, 1);
 346DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
 347		   channel_ce_count_show, NULL, 2);
 348DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
 349		   channel_ce_count_show, NULL, 3);
 350DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
 351		   channel_ce_count_show, NULL, 4);
 352DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
 353		   channel_ce_count_show, NULL, 5);
 
 
 
 
 
 
 
 
 
 
 
 
 354
 355/* Total possible dynamic ce_count attribute file table */
 356static struct attribute *dynamic_csrow_ce_count_attr[] = {
 357	&dev_attr_legacy_ch0_ce_count.attr.attr,
 358	&dev_attr_legacy_ch1_ce_count.attr.attr,
 359	&dev_attr_legacy_ch2_ce_count.attr.attr,
 360	&dev_attr_legacy_ch3_ce_count.attr.attr,
 361	&dev_attr_legacy_ch4_ce_count.attr.attr,
 362	&dev_attr_legacy_ch5_ce_count.attr.attr,
 
 
 
 
 
 
 363	NULL
 364};
 365
 366static umode_t csrow_dev_is_visible(struct kobject *kobj,
 367				    struct attribute *attr, int idx)
 368{
 369	struct device *dev = kobj_to_dev(kobj);
 370	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
 371
 372	if (idx >= csrow->nr_channels)
 373		return 0;
 
 
 
 
 
 
 374	/* Only expose populated DIMMs */
 375	if (!csrow->channels[idx]->dimm->nr_pages)
 376		return 0;
 
 377	return attr->mode;
 378}
 379
 380
 381static const struct attribute_group csrow_dev_dimm_group = {
 382	.attrs = dynamic_csrow_dimm_attr,
 383	.is_visible = csrow_dev_is_visible,
 384};
 385
 386static const struct attribute_group csrow_dev_ce_count_group = {
 387	.attrs = dynamic_csrow_ce_count_attr,
 388	.is_visible = csrow_dev_is_visible,
 389};
 390
 391static const struct attribute_group *csrow_dev_groups[] = {
 392	&csrow_dev_dimm_group,
 393	&csrow_dev_ce_count_group,
 394	NULL
 395};
 396
 
 
 
 
 
 
 
 
 397static inline int nr_pages_per_csrow(struct csrow_info *csrow)
 398{
 399	int chan, nr_pages = 0;
 400
 401	for (chan = 0; chan < csrow->nr_channels; chan++)
 402		nr_pages += csrow->channels[chan]->dimm->nr_pages;
 403
 404	return nr_pages;
 405}
 406
 407/* Create a CSROW object under specifed edac_mc_device */
 408static int edac_create_csrow_object(struct mem_ctl_info *mci,
 409				    struct csrow_info *csrow, int index)
 410{
 
 
 411	csrow->dev.type = &csrow_attr_type;
 412	csrow->dev.bus = mci->bus;
 413	csrow->dev.groups = csrow_dev_groups;
 
 414	device_initialize(&csrow->dev);
 415	csrow->dev.parent = &mci->dev;
 416	csrow->mci = mci;
 417	dev_set_name(&csrow->dev, "csrow%d", index);
 418	dev_set_drvdata(&csrow->dev, csrow);
 419
 420	edac_dbg(0, "creating (virtual) csrow node %s\n",
 421		 dev_name(&csrow->dev));
 
 
 
 
 
 
 422
 423	return device_add(&csrow->dev);
 424}
 425
 426/* Create a CSROW object under specifed edac_mc_device */
 427static int edac_create_csrow_objects(struct mem_ctl_info *mci)
 428{
 429	int err, i;
 430	struct csrow_info *csrow;
 431
 432	for (i = 0; i < mci->nr_csrows; i++) {
 433		csrow = mci->csrows[i];
 434		if (!nr_pages_per_csrow(csrow))
 435			continue;
 436		err = edac_create_csrow_object(mci, mci->csrows[i], i);
 437		if (err < 0) {
 438			edac_dbg(1,
 439				 "failure: create csrow objects for csrow %d\n",
 440				 i);
 441			goto error;
 442		}
 443	}
 444	return 0;
 445
 446error:
 447	for (--i; i >= 0; i--) {
 448		csrow = mci->csrows[i];
 449		if (!nr_pages_per_csrow(csrow))
 450			continue;
 451		put_device(&mci->csrows[i]->dev);
 452	}
 453
 454	return err;
 455}
 456
 457static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
 458{
 459	int i;
 460	struct csrow_info *csrow;
 461
 462	for (i = mci->nr_csrows - 1; i >= 0; i--) {
 463		csrow = mci->csrows[i];
 464		if (!nr_pages_per_csrow(csrow))
 465			continue;
 466		device_unregister(&mci->csrows[i]->dev);
 467	}
 468}
 
 469#endif
 470
 471/*
 472 * Per-dimm (or per-rank) devices
 473 */
 474
 475#define to_dimm(k) container_of(k, struct dimm_info, dev)
 476
 477/* show/store functions for DIMM Label attributes */
 478static ssize_t dimmdev_location_show(struct device *dev,
 479				     struct device_attribute *mattr, char *data)
 480{
 481	struct dimm_info *dimm = to_dimm(dev);
 
 
 
 
 482
 483	return edac_dimm_info_location(dimm, data, PAGE_SIZE);
 484}
 485
 486static ssize_t dimmdev_label_show(struct device *dev,
 487				  struct device_attribute *mattr, char *data)
 488{
 489	struct dimm_info *dimm = to_dimm(dev);
 490
 491	/* if field has not been initialized, there is nothing to send */
 492	if (!dimm->label[0])
 493		return 0;
 494
 495	return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
 496}
 497
 498static ssize_t dimmdev_label_store(struct device *dev,
 499				   struct device_attribute *mattr,
 500				   const char *data,
 501				   size_t count)
 502{
 503	struct dimm_info *dimm = to_dimm(dev);
 504	size_t copy_count = count;
 505
 506	if (count == 0)
 507		return -EINVAL;
 508
 509	if (data[count - 1] == '\0' || data[count - 1] == '\n')
 510		copy_count -= 1;
 511
 512	if (copy_count == 0 || copy_count >= sizeof(dimm->label))
 513		return -EINVAL;
 514
 515	strncpy(dimm->label, data, copy_count);
 516	dimm->label[copy_count] = '\0';
 517
 518	return count;
 519}
 520
 521static ssize_t dimmdev_size_show(struct device *dev,
 522				 struct device_attribute *mattr, char *data)
 523{
 524	struct dimm_info *dimm = to_dimm(dev);
 525
 526	return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
 527}
 528
 529static ssize_t dimmdev_mem_type_show(struct device *dev,
 530				     struct device_attribute *mattr, char *data)
 531{
 532	struct dimm_info *dimm = to_dimm(dev);
 533
 534	return sprintf(data, "%s\n", mem_types[dimm->mtype]);
 535}
 536
 537static ssize_t dimmdev_dev_type_show(struct device *dev,
 538				     struct device_attribute *mattr, char *data)
 539{
 540	struct dimm_info *dimm = to_dimm(dev);
 541
 542	return sprintf(data, "%s\n", dev_types[dimm->dtype]);
 543}
 544
 545static ssize_t dimmdev_edac_mode_show(struct device *dev,
 546				      struct device_attribute *mattr,
 547				      char *data)
 548{
 549	struct dimm_info *dimm = to_dimm(dev);
 550
 551	return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
 552}
 553
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554/* dimm/rank attribute files */
 555static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
 556		   dimmdev_label_show, dimmdev_label_store);
 557static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
 558static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
 559static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
 560static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
 561static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
 
 
 562
 563/* attributes of the dimm<id>/rank<id> object */
 564static struct attribute *dimm_attrs[] = {
 565	&dev_attr_dimm_label.attr,
 566	&dev_attr_dimm_location.attr,
 567	&dev_attr_size.attr,
 568	&dev_attr_dimm_mem_type.attr,
 569	&dev_attr_dimm_dev_type.attr,
 570	&dev_attr_dimm_edac_mode.attr,
 
 
 571	NULL,
 572};
 573
 574static struct attribute_group dimm_attr_grp = {
 575	.attrs	= dimm_attrs,
 576};
 577
 578static const struct attribute_group *dimm_attr_groups[] = {
 579	&dimm_attr_grp,
 580	NULL
 581};
 582
 583static void dimm_attr_release(struct device *dev)
 
 
 
 
 584{
 585	struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
 586
 587	edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
 588	kfree(dimm);
 589}
 590
 591static struct device_type dimm_attr_type = {
 592	.groups		= dimm_attr_groups,
 593	.release	= dimm_attr_release,
 594};
 595
 596/* Create a DIMM object under specifed memory controller device */
 597static int edac_create_dimm_object(struct mem_ctl_info *mci,
 598				   struct dimm_info *dimm,
 599				   int index)
 600{
 601	int err;
 602	dimm->mci = mci;
 603
 604	dimm->dev.type = &dimm_attr_type;
 605	dimm->dev.bus = mci->bus;
 606	device_initialize(&dimm->dev);
 607
 608	dimm->dev.parent = &mci->dev;
 609	if (mci->csbased)
 610		dev_set_name(&dimm->dev, "rank%d", index);
 611	else
 612		dev_set_name(&dimm->dev, "dimm%d", index);
 613	dev_set_drvdata(&dimm->dev, dimm);
 614	pm_runtime_forbid(&mci->dev);
 615
 616	err =  device_add(&dimm->dev);
 
 
 
 
 
 
 
 
 617
 618	edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
 
 
 
 619
 620	return err;
 621}
 622
 623/*
 624 * Memory controller device
 625 */
 626
 627#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 628
 629static ssize_t mci_reset_counters_store(struct device *dev,
 630					struct device_attribute *mattr,
 631					const char *data, size_t count)
 632{
 633	struct mem_ctl_info *mci = to_mci(dev);
 634	int cnt, row, chan, i;
 
 
 635	mci->ue_mc = 0;
 636	mci->ce_mc = 0;
 637	mci->ue_noinfo_count = 0;
 638	mci->ce_noinfo_count = 0;
 639
 640	for (row = 0; row < mci->nr_csrows; row++) {
 641		struct csrow_info *ri = mci->csrows[row];
 642
 643		ri->ue_count = 0;
 644		ri->ce_count = 0;
 645
 646		for (chan = 0; chan < ri->nr_channels; chan++)
 647			ri->channels[chan]->ce_count = 0;
 648	}
 649
 650	cnt = 1;
 651	for (i = 0; i < mci->n_layers; i++) {
 652		cnt *= mci->layers[i].size;
 653		memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
 654		memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
 655	}
 656
 657	mci->start_time = jiffies;
 658	return count;
 659}
 660
 661/* Memory scrubbing interface:
 662 *
 663 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
 664 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
 665 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
 666 *
 667 * Negative value still means that an error has occurred while setting
 668 * the scrub rate.
 669 */
 670static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
 671					  struct device_attribute *mattr,
 672					  const char *data, size_t count)
 673{
 674	struct mem_ctl_info *mci = to_mci(dev);
 675	unsigned long bandwidth = 0;
 676	int new_bw = 0;
 677
 678	if (kstrtoul(data, 10, &bandwidth) < 0)
 679		return -EINVAL;
 680
 681	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
 682	if (new_bw < 0) {
 683		edac_printk(KERN_WARNING, EDAC_MC,
 684			    "Error setting scrub rate to: %lu\n", bandwidth);
 685		return -EINVAL;
 686	}
 687
 688	return count;
 689}
 690
 691/*
 692 * ->get_sdram_scrub_rate() return value semantics same as above.
 693 */
 694static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
 695					 struct device_attribute *mattr,
 696					 char *data)
 697{
 698	struct mem_ctl_info *mci = to_mci(dev);
 699	int bandwidth = 0;
 700
 701	bandwidth = mci->get_sdram_scrub_rate(mci);
 702	if (bandwidth < 0) {
 703		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
 704		return bandwidth;
 705	}
 706
 707	return sprintf(data, "%d\n", bandwidth);
 708}
 709
 710/* default attribute files for the MCI object */
 711static ssize_t mci_ue_count_show(struct device *dev,
 712				 struct device_attribute *mattr,
 713				 char *data)
 714{
 715	struct mem_ctl_info *mci = to_mci(dev);
 716
 717	return sprintf(data, "%d\n", mci->ue_mc);
 718}
 719
 720static ssize_t mci_ce_count_show(struct device *dev,
 721				 struct device_attribute *mattr,
 722				 char *data)
 723{
 724	struct mem_ctl_info *mci = to_mci(dev);
 725
 726	return sprintf(data, "%d\n", mci->ce_mc);
 727}
 728
 729static ssize_t mci_ce_noinfo_show(struct device *dev,
 730				  struct device_attribute *mattr,
 731				  char *data)
 732{
 733	struct mem_ctl_info *mci = to_mci(dev);
 734
 735	return sprintf(data, "%d\n", mci->ce_noinfo_count);
 736}
 737
 738static ssize_t mci_ue_noinfo_show(struct device *dev,
 739				  struct device_attribute *mattr,
 740				  char *data)
 741{
 742	struct mem_ctl_info *mci = to_mci(dev);
 743
 744	return sprintf(data, "%d\n", mci->ue_noinfo_count);
 745}
 746
 747static ssize_t mci_seconds_show(struct device *dev,
 748				struct device_attribute *mattr,
 749				char *data)
 750{
 751	struct mem_ctl_info *mci = to_mci(dev);
 752
 753	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
 754}
 755
 756static ssize_t mci_ctl_name_show(struct device *dev,
 757				 struct device_attribute *mattr,
 758				 char *data)
 759{
 760	struct mem_ctl_info *mci = to_mci(dev);
 761
 762	return sprintf(data, "%s\n", mci->ctl_name);
 763}
 764
 765static ssize_t mci_size_mb_show(struct device *dev,
 766				struct device_attribute *mattr,
 767				char *data)
 768{
 769	struct mem_ctl_info *mci = to_mci(dev);
 770	int total_pages = 0, csrow_idx, j;
 771
 772	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
 773		struct csrow_info *csrow = mci->csrows[csrow_idx];
 774
 775		for (j = 0; j < csrow->nr_channels; j++) {
 776			struct dimm_info *dimm = csrow->channels[j]->dimm;
 777
 778			total_pages += dimm->nr_pages;
 779		}
 780	}
 781
 782	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
 783}
 784
 785static ssize_t mci_max_location_show(struct device *dev,
 786				     struct device_attribute *mattr,
 787				     char *data)
 788{
 789	struct mem_ctl_info *mci = to_mci(dev);
 790	int i;
 791	char *p = data;
 
 792
 793	for (i = 0; i < mci->n_layers; i++) {
 794		p += sprintf(p, "%s %d ",
 795			     edac_layer_name[mci->layers[i].type],
 796			     mci->layers[i].size - 1);
 
 
 
 
 
 797	}
 798
 
 
 799	return p - data;
 800}
 801
 802/* default Control file */
 803static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
 804
 805/* default Attribute files */
 806static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
 807static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
 808static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
 809static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
 810static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
 811static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
 812static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
 813static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
 814
 815/* memory scrubber attribute file */
 816DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
 817	    mci_sdram_scrub_rate_store); /* umode set later in is_visible */
 818
 819static struct attribute *mci_attrs[] = {
 820	&dev_attr_reset_counters.attr,
 821	&dev_attr_mc_name.attr,
 822	&dev_attr_size_mb.attr,
 823	&dev_attr_seconds_since_reset.attr,
 824	&dev_attr_ue_noinfo_count.attr,
 825	&dev_attr_ce_noinfo_count.attr,
 826	&dev_attr_ue_count.attr,
 827	&dev_attr_ce_count.attr,
 828	&dev_attr_max_location.attr,
 829	&dev_attr_sdram_scrub_rate.attr,
 830	NULL
 831};
 832
 833static umode_t mci_attr_is_visible(struct kobject *kobj,
 834				   struct attribute *attr, int idx)
 835{
 836	struct device *dev = kobj_to_dev(kobj);
 837	struct mem_ctl_info *mci = to_mci(dev);
 838	umode_t mode = 0;
 839
 840	if (attr != &dev_attr_sdram_scrub_rate.attr)
 841		return attr->mode;
 842	if (mci->get_sdram_scrub_rate)
 843		mode |= S_IRUGO;
 844	if (mci->set_sdram_scrub_rate)
 845		mode |= S_IWUSR;
 846	return mode;
 847}
 848
 849static struct attribute_group mci_attr_grp = {
 850	.attrs	= mci_attrs,
 851	.is_visible = mci_attr_is_visible,
 852};
 853
 854static const struct attribute_group *mci_attr_groups[] = {
 855	&mci_attr_grp,
 856	NULL
 857};
 858
 859static void mci_attr_release(struct device *dev)
 860{
 861	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
 862
 863	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
 864	kfree(mci);
 865}
 866
 867static struct device_type mci_attr_type = {
 868	.groups		= mci_attr_groups,
 869	.release	= mci_attr_release,
 870};
 871
 872/*
 873 * Create a new Memory Controller kobject instance,
 874 *	mc<id> under the 'mc' directory
 875 *
 876 * Return:
 877 *	0	Success
 878 *	!0	Failure
 879 */
 880int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
 881				 const struct attribute_group **groups)
 882{
 883	char *name;
 884	int i, err;
 885
 886	/*
 887	 * The memory controller needs its own bus, in order to avoid
 888	 * namespace conflicts at /sys/bus/edac.
 889	 */
 890	name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
 891	if (!name)
 892		return -ENOMEM;
 893
 894	mci->bus->name = name;
 895
 896	edac_dbg(0, "creating bus %s\n", mci->bus->name);
 897
 898	err = bus_register(mci->bus);
 899	if (err < 0) {
 900		kfree(name);
 901		return err;
 902	}
 903
 904	/* get the /sys/devices/system/edac subsys reference */
 905	mci->dev.type = &mci_attr_type;
 906	device_initialize(&mci->dev);
 907
 908	mci->dev.parent = mci_pdev;
 909	mci->dev.bus = mci->bus;
 910	mci->dev.groups = groups;
 911	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
 912	dev_set_drvdata(&mci->dev, mci);
 913	pm_runtime_forbid(&mci->dev);
 914
 915	edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
 916	err = device_add(&mci->dev);
 917	if (err < 0) {
 918		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
 919		goto fail_unregister_bus;
 
 920	}
 921
 
 
 922	/*
 923	 * Create the dimm/rank devices
 924	 */
 925	for (i = 0; i < mci->tot_dimms; i++) {
 926		struct dimm_info *dimm = mci->dimms[i];
 927		/* Only expose populated DIMMs */
 928		if (!dimm->nr_pages)
 929			continue;
 930
 931#ifdef CONFIG_EDAC_DEBUG
 932		edac_dbg(1, "creating dimm%d, located at ", i);
 933		if (edac_debug_level >= 1) {
 934			int lay;
 935			for (lay = 0; lay < mci->n_layers; lay++)
 936				printk(KERN_CONT "%s %d ",
 937					edac_layer_name[mci->layers[lay].type],
 938					dimm->location[lay]);
 939			printk(KERN_CONT "\n");
 940		}
 941#endif
 942		err = edac_create_dimm_object(mci, dimm, i);
 943		if (err) {
 944			edac_dbg(1, "failure: create dimm %d obj\n", i);
 945			goto fail_unregister_dimm;
 946		}
 947	}
 948
 949#ifdef CONFIG_EDAC_LEGACY_SYSFS
 950	err = edac_create_csrow_objects(mci);
 951	if (err < 0)
 952		goto fail_unregister_dimm;
 953#endif
 954
 955	edac_create_debugfs_nodes(mci);
 956	return 0;
 957
 958fail_unregister_dimm:
 959	for (i--; i >= 0; i--) {
 960		struct dimm_info *dimm = mci->dimms[i];
 961		if (!dimm->nr_pages)
 962			continue;
 963
 964		device_unregister(&dimm->dev);
 965	}
 966	device_unregister(&mci->dev);
 967fail_unregister_bus:
 968	bus_unregister(mci->bus);
 969	kfree(name);
 970
 971	return err;
 972}
 973
 974/*
 975 * remove a Memory Controller instance
 976 */
 977void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
 978{
 979	int i;
 
 
 
 980
 981	edac_dbg(0, "\n");
 982
 983#ifdef CONFIG_EDAC_DEBUG
 984	edac_debugfs_remove_recursive(mci->debugfs);
 985#endif
 986#ifdef CONFIG_EDAC_LEGACY_SYSFS
 987	edac_delete_csrow_objects(mci);
 988#endif
 989
 990	for (i = 0; i < mci->tot_dimms; i++) {
 991		struct dimm_info *dimm = mci->dimms[i];
 992		if (dimm->nr_pages == 0)
 993			continue;
 994		edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
 995		device_unregister(&dimm->dev);
 996	}
 997}
 998
 999void edac_unregister_sysfs(struct mem_ctl_info *mci)
1000{
1001	const char *name = mci->bus->name;
1002
1003	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1004	device_unregister(&mci->dev);
1005	bus_unregister(mci->bus);
1006	kfree(name);
1007}
1008
1009static void mc_attr_release(struct device *dev)
1010{
1011	/*
1012	 * There's no container structure here, as this is just the mci
1013	 * parent device, used to create the /sys/devices/mc sysfs node.
1014	 * So, there are no attributes on it.
1015	 */
1016	edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1017	kfree(dev);
1018}
1019
1020static struct device_type mc_attr_type = {
1021	.release	= mc_attr_release,
1022};
1023/*
1024 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1025 */
1026int __init edac_mc_sysfs_init(void)
1027{
1028	int err;
1029
1030	mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1031	if (!mci_pdev) {
1032		err = -ENOMEM;
1033		goto out;
1034	}
1035
1036	mci_pdev->bus = edac_get_sysfs_subsys();
1037	mci_pdev->type = &mc_attr_type;
1038	device_initialize(mci_pdev);
1039	dev_set_name(mci_pdev, "mc");
1040
1041	err = device_add(mci_pdev);
1042	if (err < 0)
1043		goto out_dev_free;
 
 
 
1044
1045	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1046
1047	return 0;
1048
1049 out_dev_free:
1050	kfree(mci_pdev);
1051 out:
1052	return err;
1053}
1054
1055void edac_mc_sysfs_exit(void)
1056{
1057	device_unregister(mci_pdev);
1058}