Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sysfs.c - ACPI sysfs interface to userspace.
   4 */
   5
   6#define pr_fmt(fmt) "ACPI: " fmt
   7
   8#include <linux/acpi.h>
   9#include <linux/bitmap.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/kstrtox.h>
  13#include <linux/moduleparam.h>
 
  14
  15#include "internal.h"
  16
 
 
 
  17#ifdef CONFIG_ACPI_DEBUG
  18/*
  19 * ACPI debug sysfs I/F, including:
  20 * /sys/modules/acpi/parameters/debug_layer
  21 * /sys/modules/acpi/parameters/debug_level
  22 * /sys/modules/acpi/parameters/trace_method_name
  23 * /sys/modules/acpi/parameters/trace_state
  24 * /sys/modules/acpi/parameters/trace_debug_layer
  25 * /sys/modules/acpi/parameters/trace_debug_level
  26 */
  27
  28struct acpi_dlayer {
  29	const char *name;
  30	unsigned long value;
  31};
  32struct acpi_dlevel {
  33	const char *name;
  34	unsigned long value;
  35};
  36#define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
  37
  38static const struct acpi_dlayer acpi_debug_layers[] = {
  39	ACPI_DEBUG_INIT(ACPI_UTILITIES),
  40	ACPI_DEBUG_INIT(ACPI_HARDWARE),
  41	ACPI_DEBUG_INIT(ACPI_EVENTS),
  42	ACPI_DEBUG_INIT(ACPI_TABLES),
  43	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
  44	ACPI_DEBUG_INIT(ACPI_PARSER),
  45	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
  46	ACPI_DEBUG_INIT(ACPI_EXECUTER),
  47	ACPI_DEBUG_INIT(ACPI_RESOURCES),
  48	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
  49	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
  50	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
  51	ACPI_DEBUG_INIT(ACPI_COMPILER),
  52	ACPI_DEBUG_INIT(ACPI_TOOLS),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53};
  54
  55static const struct acpi_dlevel acpi_debug_levels[] = {
  56	ACPI_DEBUG_INIT(ACPI_LV_INIT),
  57	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
  58	ACPI_DEBUG_INIT(ACPI_LV_INFO),
  59	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
  60	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
  61
  62	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
  63	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
  64	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
  65	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
  66	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
  67	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
  68	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
  69	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
  70	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
  71	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
  72	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
  73	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
  74	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
  75	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
  76
  77	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
  78	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
  79	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
  80
  81	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
  82	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
  83	ACPI_DEBUG_INIT(ACPI_LV_IO),
  84	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
  85
  86	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
  87	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
  88	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
  89	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
  90};
  91
  92static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
  93{
  94	int result = 0;
  95	int i;
  96
  97	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
  98
  99	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
 100		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 101				  acpi_debug_layers[i].name,
 102				  acpi_debug_layers[i].value,
 103				  (acpi_dbg_layer & acpi_debug_layers[i].value)
 104				  ? '*' : ' ');
 105	}
 106	result +=
 107	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
 108		    ACPI_ALL_DRIVERS,
 109		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
 110		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
 111		    == 0 ? ' ' : '-');
 112	result +=
 113	    sprintf(buffer + result,
 114		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
 115		    acpi_dbg_layer);
 116
 117	return result;
 118}
 119
 120static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 121{
 122	int result = 0;
 123	int i;
 124
 125	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
 126
 127	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
 128		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 129				  acpi_debug_levels[i].name,
 130				  acpi_debug_levels[i].value,
 131				  (acpi_dbg_level & acpi_debug_levels[i].value)
 132				  ? '*' : ' ');
 133	}
 134	result +=
 135	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
 136		    acpi_dbg_level);
 137
 138	return result;
 139}
 140
 141static const struct kernel_param_ops param_ops_debug_layer = {
 142	.set = param_set_uint,
 143	.get = param_get_debug_layer,
 144};
 145
 146static const struct kernel_param_ops param_ops_debug_level = {
 147	.set = param_set_uint,
 148	.get = param_get_debug_level,
 149};
 150
 151module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
 152module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 153
 154static char trace_method_name[1024];
 155
 156static int param_set_trace_method_name(const char *val,
 157				       const struct kernel_param *kp)
 158{
 159	u32 saved_flags = 0;
 160	bool is_abs_path = true;
 161
 162	if (*val != '\\')
 163		is_abs_path = false;
 164
 165	if ((is_abs_path && strlen(val) > 1023) ||
 166	    (!is_abs_path && strlen(val) > 1022)) {
 167		pr_err("%s: string parameter too long\n", kp->name);
 168		return -ENOSPC;
 169	}
 170
 171	/*
 172	 * It's not safe to update acpi_gbl_trace_method_name without
 173	 * having the tracer stopped, so we save the original tracer
 174	 * state and disable it.
 175	 */
 176	saved_flags = acpi_gbl_trace_flags;
 177	(void)acpi_debug_trace(NULL,
 178			       acpi_gbl_trace_dbg_level,
 179			       acpi_gbl_trace_dbg_layer,
 180			       0);
 181
 182	/* This is a hack.  We can't kmalloc in early boot. */
 183	if (is_abs_path)
 184		strcpy(trace_method_name, val);
 185	else {
 186		trace_method_name[0] = '\\';
 187		strcpy(trace_method_name+1, val);
 188	}
 189
 190	/* Restore the original tracer state */
 191	(void)acpi_debug_trace(trace_method_name,
 192			       acpi_gbl_trace_dbg_level,
 193			       acpi_gbl_trace_dbg_layer,
 194			       saved_flags);
 195
 196	return 0;
 197}
 198
 199static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
 200{
 201	return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name);
 202}
 203
 204static const struct kernel_param_ops param_ops_trace_method = {
 205	.set = param_set_trace_method_name,
 206	.get = param_get_trace_method_name,
 207};
 208
 209static const struct kernel_param_ops param_ops_trace_attrib = {
 210	.set = param_set_uint,
 211	.get = param_get_uint,
 212};
 213
 214module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
 215module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
 216module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
 217
 218static int param_set_trace_state(const char *val,
 219				 const struct kernel_param *kp)
 220{
 221	acpi_status status;
 222	const char *method = trace_method_name;
 223	u32 flags = 0;
 224
 225/* So "xxx-once" comparison should go prior than "xxx" comparison */
 226#define acpi_compare_param(val, key)	\
 227	strncmp((val), (key), sizeof(key) - 1)
 228
 229	if (!acpi_compare_param(val, "enable")) {
 230		method = NULL;
 231		flags = ACPI_TRACE_ENABLED;
 232	} else if (!acpi_compare_param(val, "disable"))
 233		method = NULL;
 234	else if (!acpi_compare_param(val, "method-once"))
 235		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
 236	else if (!acpi_compare_param(val, "method"))
 237		flags = ACPI_TRACE_ENABLED;
 238	else if (!acpi_compare_param(val, "opcode-once"))
 239		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
 240	else if (!acpi_compare_param(val, "opcode"))
 241		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
 242	else
 243		return -EINVAL;
 244
 245	status = acpi_debug_trace(method,
 246				  acpi_gbl_trace_dbg_level,
 247				  acpi_gbl_trace_dbg_layer,
 248				  flags);
 249	if (ACPI_FAILURE(status))
 250		return -EBUSY;
 251
 252	return 0;
 253}
 254
 255static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
 256{
 257	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
 258		return sprintf(buffer, "disable\n");
 259	if (!acpi_gbl_trace_method_name)
 260		return sprintf(buffer, "enable\n");
 261	if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
 262		return sprintf(buffer, "method-once\n");
 263	else
 264		return sprintf(buffer, "method\n");
 
 
 
 
 265}
 266
 267module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
 268		  NULL, 0644);
 269#endif /* CONFIG_ACPI_DEBUG */
 270
 271
 272/* /sys/modules/acpi/parameters/aml_debug_output */
 273
 274module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
 275		   byte, 0644);
 276MODULE_PARM_DESC(aml_debug_output,
 277		 "To enable/disable the ACPI Debug Object output.");
 278
 279/* /sys/module/acpi/parameters/acpica_version */
 280static int param_get_acpica_version(char *buffer,
 281				    const struct kernel_param *kp)
 282{
 283	int result;
 284
 285	result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
 286
 287	return result;
 288}
 289
 290module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
 291
 292/*
 293 * ACPI table sysfs I/F:
 294 * /sys/firmware/acpi/tables/
 295 * /sys/firmware/acpi/tables/data/
 296 * /sys/firmware/acpi/tables/dynamic/
 297 */
 298
 299static LIST_HEAD(acpi_table_attr_list);
 300static struct kobject *tables_kobj;
 301static struct kobject *tables_data_kobj;
 302static struct kobject *dynamic_tables_kobj;
 303static struct kobject *hotplug_kobj;
 304
 305#define ACPI_MAX_TABLE_INSTANCES	999
 306#define ACPI_INST_SIZE			4 /* including trailing 0 */
 307
 308struct acpi_table_attr {
 309	struct bin_attribute attr;
 310	char name[ACPI_NAMESEG_SIZE];
 311	int instance;
 312	char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
 313	struct list_head node;
 314};
 315
 316struct acpi_data_attr {
 317	struct bin_attribute attr;
 318	u64	addr;
 319};
 320
 321static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
 322			       struct bin_attribute *bin_attr, char *buf,
 323			       loff_t offset, size_t count)
 324{
 325	struct acpi_table_attr *table_attr =
 326	    container_of(bin_attr, struct acpi_table_attr, attr);
 327	struct acpi_table_header *table_header = NULL;
 328	acpi_status status;
 329	ssize_t rc;
 330
 331	status = acpi_get_table(table_attr->name, table_attr->instance,
 332				&table_header);
 333	if (ACPI_FAILURE(status))
 334		return -ENODEV;
 335
 336	rc = memory_read_from_buffer(buf, count, &offset, table_header,
 337			table_header->length);
 338	acpi_put_table(table_header);
 339	return rc;
 340}
 341
 342static int acpi_table_attr_init(struct kobject *tables_obj,
 343				struct acpi_table_attr *table_attr,
 344				struct acpi_table_header *table_header)
 345{
 346	struct acpi_table_header *header = NULL;
 347	struct acpi_table_attr *attr = NULL;
 348	char instance_str[ACPI_INST_SIZE];
 349
 350	sysfs_attr_init(&table_attr->attr.attr);
 351	ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
 352
 353	list_for_each_entry(attr, &acpi_table_attr_list, node) {
 354		if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
 355			if (table_attr->instance < attr->instance)
 356				table_attr->instance = attr->instance;
 357	}
 358	table_attr->instance++;
 359	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
 360		pr_warn("%4.4s: too many table instances\n", table_attr->name);
 
 361		return -ERANGE;
 362	}
 363
 364	ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
 365	table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
 366	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
 367					 !acpi_get_table
 368					 (table_header->signature, 2, &header))) {
 369		snprintf(instance_str, sizeof(instance_str), "%u",
 370			 table_attr->instance);
 371		strcat(table_attr->filename, instance_str);
 372	}
 373
 374	table_attr->attr.size = table_header->length;
 375	table_attr->attr.read = acpi_table_show;
 376	table_attr->attr.attr.name = table_attr->filename;
 377	table_attr->attr.attr.mode = 0400;
 378
 379	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
 380}
 381
 382acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
 383{
 384	struct acpi_table_attr *table_attr;
 385
 386	switch (event) {
 387	case ACPI_TABLE_EVENT_INSTALL:
 388		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 
 389		if (!table_attr)
 390			return AE_NO_MEMORY;
 391
 392		if (acpi_table_attr_init(dynamic_tables_kobj,
 393					 table_attr, table)) {
 394			kfree(table_attr);
 395			return AE_ERROR;
 396		}
 397		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 398		break;
 399	case ACPI_TABLE_EVENT_LOAD:
 400	case ACPI_TABLE_EVENT_UNLOAD:
 401	case ACPI_TABLE_EVENT_UNINSTALL:
 402		/*
 403		 * we do not need to do anything right now
 404		 * because the table is not deleted from the
 405		 * global table list when unloading it.
 406		 */
 407		break;
 408	default:
 409		return AE_BAD_PARAMETER;
 410	}
 411	return AE_OK;
 412}
 413
 414static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
 415			      struct bin_attribute *bin_attr, char *buf,
 416			      loff_t offset, size_t count)
 417{
 418	struct acpi_data_attr *data_attr;
 419	void __iomem *base;
 420	ssize_t size;
 421
 422	data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
 423	size = data_attr->attr.size;
 424
 425	if (offset < 0)
 426		return -EINVAL;
 427
 428	if (offset >= size)
 429		return 0;
 430
 431	if (count > size - offset)
 432		count = size - offset;
 433
 434	base = acpi_os_map_iomem(data_attr->addr, size);
 435	if (!base)
 436		return -ENOMEM;
 437
 438	memcpy_fromio(buf, base + offset, count);
 439
 440	acpi_os_unmap_iomem(base, size);
 441
 442	return count;
 443}
 444
 445static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
 446{
 447	struct acpi_table_bert *bert = th;
 448
 449	if (bert->header.length < sizeof(struct acpi_table_bert) ||
 450	    bert->region_length < sizeof(struct acpi_hest_generic_status)) {
 451		kfree(data_attr);
 452		return -EINVAL;
 453	}
 454	data_attr->addr = bert->address;
 455	data_attr->attr.size = bert->region_length;
 456	data_attr->attr.attr.name = "BERT";
 457
 458	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 459}
 460
 461static int acpi_ccel_data_init(void *th, struct acpi_data_attr *data_attr)
 462{
 463	struct acpi_table_ccel *ccel = th;
 464
 465	if (ccel->header.length < sizeof(struct acpi_table_ccel) ||
 466	    !ccel->log_area_start_address || !ccel->log_area_minimum_length) {
 467		kfree(data_attr);
 468		return -EINVAL;
 469	}
 470	data_attr->addr = ccel->log_area_start_address;
 471	data_attr->attr.size = ccel->log_area_minimum_length;
 472	data_attr->attr.attr.name = "CCEL";
 473
 474	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 475}
 476
 477static struct acpi_data_obj {
 478	char *name;
 479	int (*fn)(void *, struct acpi_data_attr *);
 480} acpi_data_objs[] = {
 481	{ ACPI_SIG_BERT, acpi_bert_data_init },
 482	{ ACPI_SIG_CCEL, acpi_ccel_data_init },
 483};
 484
 485#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
 486
 487static int acpi_table_data_init(struct acpi_table_header *th)
 488{
 489	struct acpi_data_attr *data_attr;
 490	int i;
 491
 492	for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
 493		if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
 494			data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
 495			if (!data_attr)
 496				return -ENOMEM;
 497			sysfs_attr_init(&data_attr->attr.attr);
 498			data_attr->attr.read = acpi_data_show;
 499			data_attr->attr.attr.mode = 0400;
 500			return acpi_data_objs[i].fn(th, data_attr);
 501		}
 502	}
 503	return 0;
 504}
 505
 506static int acpi_tables_sysfs_init(void)
 507{
 508	struct acpi_table_attr *table_attr;
 509	struct acpi_table_header *table_header = NULL;
 510	int table_index;
 511	acpi_status status;
 512	int ret;
 513
 514	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
 515	if (!tables_kobj)
 516		goto err;
 517
 518	tables_data_kobj = kobject_create_and_add("data", tables_kobj);
 519	if (!tables_data_kobj)
 520		goto err_tables_data;
 521
 522	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
 523	if (!dynamic_tables_kobj)
 524		goto err_dynamic_tables;
 525
 526	for (table_index = 0;; table_index++) {
 527		status = acpi_get_table_by_index(table_index, &table_header);
 528
 529		if (status == AE_BAD_PARAMETER)
 530			break;
 531
 532		if (ACPI_FAILURE(status))
 533			continue;
 534
 535		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 536		if (!table_attr)
 537			return -ENOMEM;
 538
 539		ret = acpi_table_attr_init(tables_kobj,
 540					   table_attr, table_header);
 541		if (ret) {
 542			kfree(table_attr);
 543			return ret;
 544		}
 545		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 546		acpi_table_data_init(table_header);
 547	}
 548
 549	kobject_uevent(tables_kobj, KOBJ_ADD);
 550	kobject_uevent(tables_data_kobj, KOBJ_ADD);
 551	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
 552
 553	return 0;
 554err_dynamic_tables:
 555	kobject_put(tables_data_kobj);
 556err_tables_data:
 557	kobject_put(tables_kobj);
 558err:
 559	return -ENOMEM;
 560}
 561
 562/*
 563 * Detailed ACPI IRQ counters:
 564 * /sys/firmware/acpi/interrupts/
 565 */
 566
 567u32 acpi_irq_handled;
 568u32 acpi_irq_not_handled;
 569
 570#define COUNT_GPE 0
 571#define COUNT_SCI 1		/* acpi_irq_handled */
 572#define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
 573#define COUNT_ERROR 3		/* other */
 574#define NUM_COUNTERS_EXTRA 4
 575
 576struct event_counter {
 577	u32 count;
 578	u32 flags;
 579};
 580
 581static struct event_counter *all_counters;
 582static u32 num_gpes;
 583static u32 num_counters;
 584static struct attribute **all_attrs;
 585static u32 acpi_gpe_count;
 586
 587static struct attribute_group interrupt_stats_attr_group = {
 588	.name = "interrupts",
 589};
 590
 591static struct kobj_attribute *counter_attrs;
 592
 593static void delete_gpe_attr_array(void)
 594{
 595	struct event_counter *tmp = all_counters;
 596
 597	all_counters = NULL;
 598	kfree(tmp);
 599
 600	if (counter_attrs) {
 601		int i;
 602
 603		for (i = 0; i < num_gpes; i++)
 604			kfree(counter_attrs[i].attr.name);
 605
 606		kfree(counter_attrs);
 607	}
 608	kfree(all_attrs);
 
 
 609}
 610
 611static void gpe_count(u32 gpe_number)
 612{
 613	acpi_gpe_count++;
 614
 615	if (!all_counters)
 616		return;
 617
 618	if (gpe_number < num_gpes)
 619		all_counters[gpe_number].count++;
 620	else
 621		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 622			     COUNT_ERROR].count++;
 
 
 623}
 624
 625static void fixed_event_count(u32 event_number)
 626{
 627	if (!all_counters)
 628		return;
 629
 630	if (event_number < ACPI_NUM_FIXED_EVENTS)
 631		all_counters[num_gpes + event_number].count++;
 632	else
 633		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 634			     COUNT_ERROR].count++;
 
 
 635}
 636
 637static void acpi_global_event_handler(u32 event_type, acpi_handle device,
 638	u32 event_number, void *context)
 639{
 640	if (event_type == ACPI_EVENT_TYPE_GPE) {
 641		gpe_count(event_number);
 642		pr_debug("GPE event 0x%02x\n", event_number);
 643	} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
 644		fixed_event_count(event_number);
 645		pr_debug("Fixed event 0x%02x\n", event_number);
 646	} else {
 647		pr_debug("Other event 0x%02x\n", event_number);
 648	}
 649}
 650
 651static int get_status(u32 index, acpi_event_status *ret,
 652		      acpi_handle *handle)
 653{
 654	acpi_status status;
 655
 656	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 657		return -EINVAL;
 658
 659	if (index < num_gpes) {
 660		status = acpi_get_gpe_device(index, handle);
 661		if (ACPI_FAILURE(status)) {
 662			pr_warn("Invalid GPE 0x%x", index);
 663			return -ENXIO;
 
 664		}
 665		status = acpi_get_gpe_status(*handle, index, ret);
 666	} else {
 667		status = acpi_get_event_status(index - num_gpes, ret);
 668	}
 669	if (ACPI_FAILURE(status))
 670		return -EIO;
 671
 672	return 0;
 673}
 674
 675static ssize_t counter_show(struct kobject *kobj,
 676			    struct kobj_attribute *attr, char *buf)
 677{
 678	int index = attr - counter_attrs;
 679	int size;
 680	acpi_handle handle;
 681	acpi_event_status status;
 682	int result = 0;
 683
 684	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
 685	    acpi_irq_handled;
 686	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
 687	    acpi_irq_not_handled;
 688	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
 689	    acpi_gpe_count;
 690	size = sprintf(buf, "%8u", all_counters[index].count);
 691
 692	/* "gpe_all" or "sci" */
 693	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 694		goto end;
 695
 696	result = get_status(index, &status, &handle);
 697	if (result)
 698		goto end;
 699
 700	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
 701		size += sprintf(buf + size, "  EN");
 702	else
 703		size += sprintf(buf + size, "    ");
 704	if (status & ACPI_EVENT_FLAG_STATUS_SET)
 705		size += sprintf(buf + size, " STS");
 706	else
 707		size += sprintf(buf + size, "    ");
 708
 709	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
 710		size += sprintf(buf + size, " invalid     ");
 711	else if (status & ACPI_EVENT_FLAG_ENABLED)
 712		size += sprintf(buf + size, " enabled     ");
 713	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
 714		size += sprintf(buf + size, " wake_enabled");
 715	else
 716		size += sprintf(buf + size, " disabled    ");
 717	if (status & ACPI_EVENT_FLAG_MASKED)
 718		size += sprintf(buf + size, " masked  ");
 719	else
 720		size += sprintf(buf + size, " unmasked");
 721
 722end:
 723	size += sprintf(buf + size, "\n");
 724	return result ? result : size;
 725}
 726
 727/*
 728 * counter_set() sets the specified counter.
 729 * setting the total "sci" file to any value clears all counters.
 730 * enable/disable/clear a gpe/fixed event in user space.
 731 */
 732static ssize_t counter_set(struct kobject *kobj,
 733			   struct kobj_attribute *attr, const char *buf,
 734			   size_t size)
 735{
 736	int index = attr - counter_attrs;
 737	acpi_event_status status;
 738	acpi_handle handle;
 739	int result = 0;
 740	unsigned long tmp;
 741
 742	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
 743		int i;
 744		for (i = 0; i < num_counters; ++i)
 745			all_counters[i].count = 0;
 746		acpi_gpe_count = 0;
 747		acpi_irq_handled = 0;
 748		acpi_irq_not_handled = 0;
 749		goto end;
 750	}
 751
 752	/* show the event status for both GPEs and Fixed Events */
 753	result = get_status(index, &status, &handle);
 754	if (result)
 755		goto end;
 756
 757	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
 758		pr_warn("Can not change Invalid GPE/Fixed Event status\n");
 
 759		return -EINVAL;
 760	}
 761
 762	if (index < num_gpes) {
 763		if (!strcmp(buf, "disable\n") &&
 764		    (status & ACPI_EVENT_FLAG_ENABLED))
 765			result = acpi_disable_gpe(handle, index);
 766		else if (!strcmp(buf, "enable\n") &&
 767			 !(status & ACPI_EVENT_FLAG_ENABLED))
 768			result = acpi_enable_gpe(handle, index);
 769		else if (!strcmp(buf, "clear\n") &&
 770			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 771			result = acpi_clear_gpe(handle, index);
 772		else if (!strcmp(buf, "mask\n"))
 773			result = acpi_mask_gpe(handle, index, TRUE);
 774		else if (!strcmp(buf, "unmask\n"))
 775			result = acpi_mask_gpe(handle, index, FALSE);
 776		else if (!kstrtoul(buf, 0, &tmp))
 777			all_counters[index].count = tmp;
 778		else
 779			result = -EINVAL;
 780	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
 781		int event = index - num_gpes;
 782		if (!strcmp(buf, "disable\n") &&
 783		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
 784			result = acpi_disable_event(event, ACPI_NOT_ISR);
 785		else if (!strcmp(buf, "enable\n") &&
 786			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
 787			result = acpi_enable_event(event, ACPI_NOT_ISR);
 788		else if (!strcmp(buf, "clear\n") &&
 789			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 790			result = acpi_clear_event(event);
 791		else if (!kstrtoul(buf, 0, &tmp))
 792			all_counters[index].count = tmp;
 793		else
 794			result = -EINVAL;
 795	} else
 796		all_counters[index].count = strtoul(buf, NULL, 0);
 797
 798	if (ACPI_FAILURE(result))
 799		result = -EINVAL;
 800end:
 801	return result ? result : size;
 802}
 803
 804/*
 805 * A Quirk Mechanism for GPE Flooding Prevention:
 806 *
 807 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
 808 * flooding typically cannot be detected and automatically prevented by
 809 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
 810 * the AML tables. This normally indicates a feature gap in Linux, thus
 811 * instead of providing endless quirk tables, we provide a boot parameter
 812 * for those who want this quirk. For example, if the users want to prevent
 813 * the GPE flooding for GPE 00, they need to specify the following boot
 814 * parameter:
 815 *   acpi_mask_gpe=0x00
 816 * Note, the parameter can be a list (see bitmap_parselist() for the details).
 817 * The masking status can be modified by the following runtime controlling
 818 * interface:
 819 *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
 820 */
 821#define ACPI_MASKABLE_GPE_MAX	0x100
 822static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
 
 
 
 
 
 
 823
 824static int __init acpi_gpe_set_masked_gpes(char *val)
 825{
 826	int ret;
 827	u8 gpe;
 828
 829	ret = kstrtou8(val, 0, &gpe);
 830	if (ret) {
 831		ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
 832		if (ret)
 833			return ret;
 834	} else
 835		set_bit(gpe, acpi_masked_gpes_map);
 836
 837	return 1;
 838}
 839__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
 840
 841void __init acpi_gpe_apply_masked_gpes(void)
 842{
 843	acpi_handle handle;
 844	acpi_status status;
 845	u16 gpe;
 846
 847	for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
 848		status = acpi_get_gpe_device(gpe, &handle);
 849		if (ACPI_SUCCESS(status)) {
 850			pr_info("Masking GPE 0x%x.\n", gpe);
 851			(void)acpi_mask_gpe(handle, gpe, TRUE);
 
 
 
 
 852		}
 853	}
 854}
 855
 856void acpi_irq_stats_init(void)
 857{
 858	acpi_status status;
 859	int i;
 860
 861	if (all_counters)
 862		return;
 863
 864	num_gpes = acpi_current_gpe_count;
 865	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 866
 867	all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
 
 868	if (all_attrs == NULL)
 869		return;
 870
 871	all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
 
 872	if (all_counters == NULL)
 873		goto fail;
 874
 875	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
 876	if (ACPI_FAILURE(status))
 877		goto fail;
 878
 879	counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
 
 880	if (counter_attrs == NULL)
 881		goto fail;
 882
 883	for (i = 0; i < num_counters; ++i) {
 884		char buffer[12];
 885		char *name;
 886
 887		if (i < num_gpes)
 888			sprintf(buffer, "gpe%02X", i);
 889		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
 890			sprintf(buffer, "ff_pmtimer");
 891		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
 892			sprintf(buffer, "ff_gbl_lock");
 893		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
 894			sprintf(buffer, "ff_pwr_btn");
 895		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
 896			sprintf(buffer, "ff_slp_btn");
 897		else if (i == num_gpes + ACPI_EVENT_RTC)
 898			sprintf(buffer, "ff_rt_clk");
 899		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
 900			sprintf(buffer, "gpe_all");
 901		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
 902			sprintf(buffer, "sci");
 903		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
 904			sprintf(buffer, "sci_not");
 905		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
 906			sprintf(buffer, "error");
 907		else
 908			sprintf(buffer, "bug%02X", i);
 909
 910		name = kstrdup(buffer, GFP_KERNEL);
 911		if (name == NULL)
 912			goto fail;
 913
 914		sysfs_attr_init(&counter_attrs[i].attr);
 915		counter_attrs[i].attr.name = name;
 916		counter_attrs[i].attr.mode = 0644;
 917		counter_attrs[i].show = counter_show;
 918		counter_attrs[i].store = counter_set;
 919
 920		all_attrs[i] = &counter_attrs[i].attr;
 921	}
 922
 923	interrupt_stats_attr_group.attrs = all_attrs;
 924	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
 925		return;
 926
 927fail:
 928	delete_gpe_attr_array();
 
 929}
 930
 931static void __exit interrupt_stats_exit(void)
 932{
 933	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
 934
 935	delete_gpe_attr_array();
 
 
 936}
 937
 938static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 
 
 939{
 940	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 941}
 942
 943static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
 
 944
 945static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 
 946{
 947	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 948
 949	return sprintf(buf, "%d\n", hotplug->enabled);
 950}
 951
 952static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 953			     const char *buf, size_t size)
 
 954{
 955	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 956	unsigned int val;
 957
 958	if (kstrtouint(buf, 10, &val) || val > 1)
 959		return -EINVAL;
 960
 961	acpi_scan_hotplug_enabled(hotplug, val);
 962	return size;
 963}
 964
 965static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
 
 
 966
 967static struct attribute *hotplug_profile_attrs[] = {
 968	&hotplug_enabled_attr.attr,
 969	NULL
 970};
 971ATTRIBUTE_GROUPS(hotplug_profile);
 972
 973static const struct kobj_type acpi_hotplug_profile_ktype = {
 974	.sysfs_ops = &kobj_sysfs_ops,
 975	.default_groups = hotplug_profile_groups,
 976};
 977
 978void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
 979				    const char *name)
 980{
 981	int error;
 982
 983	if (!hotplug_kobj)
 984		goto err_out;
 985
 986	error = kobject_init_and_add(&hotplug->kobj,
 987		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
 988	if (error) {
 989		kobject_put(&hotplug->kobj);
 990		goto err_out;
 991	}
 992
 993	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
 994	return;
 995
 996 err_out:
 997	pr_err("Unable to add hotplug profile '%s'\n", name);
 998}
 999
1000static ssize_t force_remove_show(struct kobject *kobj,
1001				 struct kobj_attribute *attr, char *buf)
1002{
1003	return sprintf(buf, "%d\n", 0);
1004}
1005
1006static ssize_t force_remove_store(struct kobject *kobj,
1007				  struct kobj_attribute *attr,
1008				  const char *buf, size_t size)
1009{
1010	bool val;
1011	int ret;
1012
1013	ret = kstrtobool(buf, &val);
1014	if (ret < 0)
1015		return ret;
1016
1017	if (val) {
1018		pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1019		return -EINVAL;
1020	}
1021	return size;
1022}
1023
1024static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
 
 
1025
1026int __init acpi_sysfs_init(void)
1027{
1028	int result;
1029
1030	result = acpi_tables_sysfs_init();
1031	if (result)
1032		return result;
1033
1034	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1035	if (!hotplug_kobj)
1036		return -ENOMEM;
1037
1038	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1039	if (result)
1040		return result;
1041
1042	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1043	return result;
1044}
v4.10.11
 
  1/*
  2 * sysfs.c - ACPI sysfs interface to userspace.
  3 */
  4
 
 
 
 
  5#include <linux/init.h>
  6#include <linux/kernel.h>
 
  7#include <linux/moduleparam.h>
  8#include <linux/acpi.h>
  9
 10#include "internal.h"
 11
 12#define _COMPONENT		ACPI_SYSTEM_COMPONENT
 13ACPI_MODULE_NAME("sysfs");
 14
 15#ifdef CONFIG_ACPI_DEBUG
 16/*
 17 * ACPI debug sysfs I/F, including:
 18 * /sys/modules/acpi/parameters/debug_layer
 19 * /sys/modules/acpi/parameters/debug_level
 20 * /sys/modules/acpi/parameters/trace_method_name
 21 * /sys/modules/acpi/parameters/trace_state
 22 * /sys/modules/acpi/parameters/trace_debug_layer
 23 * /sys/modules/acpi/parameters/trace_debug_level
 24 */
 25
 26struct acpi_dlayer {
 27	const char *name;
 28	unsigned long value;
 29};
 30struct acpi_dlevel {
 31	const char *name;
 32	unsigned long value;
 33};
 34#define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
 35
 36static const struct acpi_dlayer acpi_debug_layers[] = {
 37	ACPI_DEBUG_INIT(ACPI_UTILITIES),
 38	ACPI_DEBUG_INIT(ACPI_HARDWARE),
 39	ACPI_DEBUG_INIT(ACPI_EVENTS),
 40	ACPI_DEBUG_INIT(ACPI_TABLES),
 41	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
 42	ACPI_DEBUG_INIT(ACPI_PARSER),
 43	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
 44	ACPI_DEBUG_INIT(ACPI_EXECUTER),
 45	ACPI_DEBUG_INIT(ACPI_RESOURCES),
 46	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
 47	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
 48	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
 49	ACPI_DEBUG_INIT(ACPI_COMPILER),
 50	ACPI_DEBUG_INIT(ACPI_TOOLS),
 51
 52	ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
 53	ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
 54	ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
 55	ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
 56	ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
 57	ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
 58	ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
 59	ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
 60	ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
 61	ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
 62	ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
 63	ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
 64	ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
 65	ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
 66};
 67
 68static const struct acpi_dlevel acpi_debug_levels[] = {
 69	ACPI_DEBUG_INIT(ACPI_LV_INIT),
 70	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
 71	ACPI_DEBUG_INIT(ACPI_LV_INFO),
 72	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
 73	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
 74
 75	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
 76	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
 77	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
 78	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
 79	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
 80	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
 81	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
 82	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
 83	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
 84	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
 85	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
 86	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
 87	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
 88	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
 89
 90	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
 91	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
 92	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
 93
 94	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
 95	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
 96	ACPI_DEBUG_INIT(ACPI_LV_IO),
 97	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
 98
 99	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
100	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
101	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
102	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
103};
104
105static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
106{
107	int result = 0;
108	int i;
109
110	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
111
112	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
113		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
114				  acpi_debug_layers[i].name,
115				  acpi_debug_layers[i].value,
116				  (acpi_dbg_layer & acpi_debug_layers[i].value)
117				  ? '*' : ' ');
118	}
119	result +=
120	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
121		    ACPI_ALL_DRIVERS,
122		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
123		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
124		    == 0 ? ' ' : '-');
125	result +=
126	    sprintf(buffer + result,
127		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
128		    acpi_dbg_layer);
129
130	return result;
131}
132
133static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
134{
135	int result = 0;
136	int i;
137
138	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
139
140	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
141		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
142				  acpi_debug_levels[i].name,
143				  acpi_debug_levels[i].value,
144				  (acpi_dbg_level & acpi_debug_levels[i].value)
145				  ? '*' : ' ');
146	}
147	result +=
148	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
149		    acpi_dbg_level);
150
151	return result;
152}
153
154static const struct kernel_param_ops param_ops_debug_layer = {
155	.set = param_set_uint,
156	.get = param_get_debug_layer,
157};
158
159static const struct kernel_param_ops param_ops_debug_level = {
160	.set = param_set_uint,
161	.get = param_get_debug_level,
162};
163
164module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
165module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
166
167static char trace_method_name[1024];
168
169int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
 
170{
171	u32 saved_flags = 0;
172	bool is_abs_path = true;
173
174	if (*val != '\\')
175		is_abs_path = false;
176
177	if ((is_abs_path && strlen(val) > 1023) ||
178	    (!is_abs_path && strlen(val) > 1022)) {
179		pr_err("%s: string parameter too long\n", kp->name);
180		return -ENOSPC;
181	}
182
183	/*
184	 * It's not safe to update acpi_gbl_trace_method_name without
185	 * having the tracer stopped, so we save the original tracer
186	 * state and disable it.
187	 */
188	saved_flags = acpi_gbl_trace_flags;
189	(void)acpi_debug_trace(NULL,
190			       acpi_gbl_trace_dbg_level,
191			       acpi_gbl_trace_dbg_layer,
192			       0);
193
194	/* This is a hack.  We can't kmalloc in early boot. */
195	if (is_abs_path)
196		strcpy(trace_method_name, val);
197	else {
198		trace_method_name[0] = '\\';
199		strcpy(trace_method_name+1, val);
200	}
201
202	/* Restore the original tracer state */
203	(void)acpi_debug_trace(trace_method_name,
204			       acpi_gbl_trace_dbg_level,
205			       acpi_gbl_trace_dbg_layer,
206			       saved_flags);
207
208	return 0;
209}
210
211static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
212{
213	return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
214}
215
216static const struct kernel_param_ops param_ops_trace_method = {
217	.set = param_set_trace_method_name,
218	.get = param_get_trace_method_name,
219};
220
221static const struct kernel_param_ops param_ops_trace_attrib = {
222	.set = param_set_uint,
223	.get = param_get_uint,
224};
225
226module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
227module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
228module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
229
230static int param_set_trace_state(const char *val, struct kernel_param *kp)
 
231{
232	acpi_status status;
233	const char *method = trace_method_name;
234	u32 flags = 0;
235
236/* So "xxx-once" comparison should go prior than "xxx" comparison */
237#define acpi_compare_param(val, key)	\
238	strncmp((val), (key), sizeof(key) - 1)
239
240	if (!acpi_compare_param(val, "enable")) {
241		method = NULL;
242		flags = ACPI_TRACE_ENABLED;
243	} else if (!acpi_compare_param(val, "disable"))
244		method = NULL;
245	else if (!acpi_compare_param(val, "method-once"))
246		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
247	else if (!acpi_compare_param(val, "method"))
248		flags = ACPI_TRACE_ENABLED;
249	else if (!acpi_compare_param(val, "opcode-once"))
250		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
251	else if (!acpi_compare_param(val, "opcode"))
252		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
253	else
254		return -EINVAL;
255
256	status = acpi_debug_trace(method,
257				  acpi_gbl_trace_dbg_level,
258				  acpi_gbl_trace_dbg_layer,
259				  flags);
260	if (ACPI_FAILURE(status))
261		return -EBUSY;
262
263	return 0;
264}
265
266static int param_get_trace_state(char *buffer, struct kernel_param *kp)
267{
268	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
269		return sprintf(buffer, "disable");
270	else {
271		if (acpi_gbl_trace_method_name) {
272			if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
273				return sprintf(buffer, "method-once");
274			else
275				return sprintf(buffer, "method");
276		} else
277			return sprintf(buffer, "enable");
278	}
279	return 0;
280}
281
282module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
283		  NULL, 0644);
284#endif /* CONFIG_ACPI_DEBUG */
285
286
287/* /sys/modules/acpi/parameters/aml_debug_output */
288
289module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
290		   byte, 0644);
291MODULE_PARM_DESC(aml_debug_output,
292		 "To enable/disable the ACPI Debug Object output.");
293
294/* /sys/module/acpi/parameters/acpica_version */
295static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
 
296{
297	int result;
298
299	result = sprintf(buffer, "%x", ACPI_CA_VERSION);
300
301	return result;
302}
303
304module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
305
306/*
307 * ACPI table sysfs I/F:
308 * /sys/firmware/acpi/tables/
 
309 * /sys/firmware/acpi/tables/dynamic/
310 */
311
312static LIST_HEAD(acpi_table_attr_list);
313static struct kobject *tables_kobj;
 
314static struct kobject *dynamic_tables_kobj;
315static struct kobject *hotplug_kobj;
316
317#define ACPI_MAX_TABLE_INSTANCES	999
318#define ACPI_INST_SIZE			4 /* including trailing 0 */
319
320struct acpi_table_attr {
321	struct bin_attribute attr;
322	char name[ACPI_NAME_SIZE];
323	int instance;
324	char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE];
325	struct list_head node;
326};
327
 
 
 
 
 
328static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
329			       struct bin_attribute *bin_attr, char *buf,
330			       loff_t offset, size_t count)
331{
332	struct acpi_table_attr *table_attr =
333	    container_of(bin_attr, struct acpi_table_attr, attr);
334	struct acpi_table_header *table_header = NULL;
335	acpi_status status;
 
336
337	status = acpi_get_table(table_attr->name, table_attr->instance,
338				&table_header);
339	if (ACPI_FAILURE(status))
340		return -ENODEV;
341
342	return memory_read_from_buffer(buf, count, &offset,
343				       table_header, table_header->length);
 
 
344}
345
346static int acpi_table_attr_init(struct kobject *tables_obj,
347				struct acpi_table_attr *table_attr,
348				struct acpi_table_header *table_header)
349{
350	struct acpi_table_header *header = NULL;
351	struct acpi_table_attr *attr = NULL;
352	char instance_str[ACPI_INST_SIZE];
353
354	sysfs_attr_init(&table_attr->attr.attr);
355	ACPI_MOVE_NAME(table_attr->name, table_header->signature);
356
357	list_for_each_entry(attr, &acpi_table_attr_list, node) {
358		if (ACPI_COMPARE_NAME(table_attr->name, attr->name))
359			if (table_attr->instance < attr->instance)
360				table_attr->instance = attr->instance;
361	}
362	table_attr->instance++;
363	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
364		pr_warn("%4.4s: too many table instances\n",
365			table_attr->name);
366		return -ERANGE;
367	}
368
369	ACPI_MOVE_NAME(table_attr->filename, table_header->signature);
370	table_attr->filename[ACPI_NAME_SIZE] = '\0';
371	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
372					 !acpi_get_table
373					 (table_header->signature, 2, &header))) {
374		snprintf(instance_str, sizeof(instance_str), "%u",
375			 table_attr->instance);
376		strcat(table_attr->filename, instance_str);
377	}
378
379	table_attr->attr.size = table_header->length;
380	table_attr->attr.read = acpi_table_show;
381	table_attr->attr.attr.name = table_attr->filename;
382	table_attr->attr.attr.mode = 0400;
383
384	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
385}
386
387acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
388{
389	struct acpi_table_attr *table_attr;
390
391	switch (event) {
392	case ACPI_TABLE_EVENT_INSTALL:
393		table_attr =
394		    kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
395		if (!table_attr)
396			return AE_NO_MEMORY;
397
398		if (acpi_table_attr_init(dynamic_tables_kobj,
399					 table_attr, table)) {
400			kfree(table_attr);
401			return AE_ERROR;
402		}
403		list_add_tail(&table_attr->node, &acpi_table_attr_list);
404		break;
405	case ACPI_TABLE_EVENT_LOAD:
406	case ACPI_TABLE_EVENT_UNLOAD:
407	case ACPI_TABLE_EVENT_UNINSTALL:
408		/*
409		 * we do not need to do anything right now
410		 * because the table is not deleted from the
411		 * global table list when unloading it.
412		 */
413		break;
414	default:
415		return AE_BAD_PARAMETER;
416	}
417	return AE_OK;
418}
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420static int acpi_tables_sysfs_init(void)
421{
422	struct acpi_table_attr *table_attr;
423	struct acpi_table_header *table_header = NULL;
424	int table_index;
425	acpi_status status;
426	int ret;
427
428	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
429	if (!tables_kobj)
430		goto err;
431
 
 
 
 
432	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
433	if (!dynamic_tables_kobj)
434		goto err_dynamic_tables;
435
436	for (table_index = 0;; table_index++) {
437		status = acpi_get_table_by_index(table_index, &table_header);
438
439		if (status == AE_BAD_PARAMETER)
440			break;
441
442		if (ACPI_FAILURE(status))
443			continue;
444
445		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
446		if (!table_attr)
447			return -ENOMEM;
448
449		ret = acpi_table_attr_init(tables_kobj,
450					   table_attr, table_header);
451		if (ret) {
452			kfree(table_attr);
453			return ret;
454		}
455		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 
456	}
457
458	kobject_uevent(tables_kobj, KOBJ_ADD);
 
459	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
460
461	return 0;
462err_dynamic_tables:
 
 
463	kobject_put(tables_kobj);
464err:
465	return -ENOMEM;
466}
467
468/*
469 * Detailed ACPI IRQ counters:
470 * /sys/firmware/acpi/interrupts/
471 */
472
473u32 acpi_irq_handled;
474u32 acpi_irq_not_handled;
475
476#define COUNT_GPE 0
477#define COUNT_SCI 1		/* acpi_irq_handled */
478#define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
479#define COUNT_ERROR 3		/* other */
480#define NUM_COUNTERS_EXTRA 4
481
482struct event_counter {
483	u32 count;
484	u32 flags;
485};
486
487static struct event_counter *all_counters;
488static u32 num_gpes;
489static u32 num_counters;
490static struct attribute **all_attrs;
491static u32 acpi_gpe_count;
492
493static struct attribute_group interrupt_stats_attr_group = {
494	.name = "interrupts",
495};
496
497static struct kobj_attribute *counter_attrs;
498
499static void delete_gpe_attr_array(void)
500{
501	struct event_counter *tmp = all_counters;
502
503	all_counters = NULL;
504	kfree(tmp);
505
506	if (counter_attrs) {
507		int i;
508
509		for (i = 0; i < num_gpes; i++)
510			kfree(counter_attrs[i].attr.name);
511
512		kfree(counter_attrs);
513	}
514	kfree(all_attrs);
515
516	return;
517}
518
519static void gpe_count(u32 gpe_number)
520{
521	acpi_gpe_count++;
522
523	if (!all_counters)
524		return;
525
526	if (gpe_number < num_gpes)
527		all_counters[gpe_number].count++;
528	else
529		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
530			     COUNT_ERROR].count++;
531
532	return;
533}
534
535static void fixed_event_count(u32 event_number)
536{
537	if (!all_counters)
538		return;
539
540	if (event_number < ACPI_NUM_FIXED_EVENTS)
541		all_counters[num_gpes + event_number].count++;
542	else
543		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
544			     COUNT_ERROR].count++;
545
546	return;
547}
548
549static void acpi_global_event_handler(u32 event_type, acpi_handle device,
550	u32 event_number, void *context)
551{
552	if (event_type == ACPI_EVENT_TYPE_GPE)
553		gpe_count(event_number);
554
555	if (event_type == ACPI_EVENT_TYPE_FIXED)
556		fixed_event_count(event_number);
 
 
 
 
557}
558
559static int get_status(u32 index, acpi_event_status *status,
560		      acpi_handle *handle)
561{
562	int result;
563
564	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
565		return -EINVAL;
566
567	if (index < num_gpes) {
568		result = acpi_get_gpe_device(index, handle);
569		if (result) {
570			ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
571					"Invalid GPE 0x%x", index));
572			return result;
573		}
574		result = acpi_get_gpe_status(*handle, index, status);
575	} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
576		result = acpi_get_event_status(index - num_gpes, status);
 
 
 
577
578	return result;
579}
580
581static ssize_t counter_show(struct kobject *kobj,
582			    struct kobj_attribute *attr, char *buf)
583{
584	int index = attr - counter_attrs;
585	int size;
586	acpi_handle handle;
587	acpi_event_status status;
588	int result = 0;
589
590	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
591	    acpi_irq_handled;
592	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
593	    acpi_irq_not_handled;
594	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
595	    acpi_gpe_count;
596	size = sprintf(buf, "%8u", all_counters[index].count);
597
598	/* "gpe_all" or "sci" */
599	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
600		goto end;
601
602	result = get_status(index, &status, &handle);
603	if (result)
604		goto end;
605
606	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
607		size += sprintf(buf + size, "  EN");
608	else
609		size += sprintf(buf + size, "    ");
610	if (status & ACPI_EVENT_FLAG_STATUS_SET)
611		size += sprintf(buf + size, " STS");
612	else
613		size += sprintf(buf + size, "    ");
614
615	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
616		size += sprintf(buf + size, " invalid     ");
617	else if (status & ACPI_EVENT_FLAG_ENABLED)
618		size += sprintf(buf + size, " enabled     ");
619	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
620		size += sprintf(buf + size, " wake_enabled");
621	else
622		size += sprintf(buf + size, " disabled    ");
623	if (status & ACPI_EVENT_FLAG_MASKED)
624		size += sprintf(buf + size, " masked  ");
625	else
626		size += sprintf(buf + size, " unmasked");
627
628end:
629	size += sprintf(buf + size, "\n");
630	return result ? result : size;
631}
632
633/*
634 * counter_set() sets the specified counter.
635 * setting the total "sci" file to any value clears all counters.
636 * enable/disable/clear a gpe/fixed event in user space.
637 */
638static ssize_t counter_set(struct kobject *kobj,
639			   struct kobj_attribute *attr, const char *buf,
640			   size_t size)
641{
642	int index = attr - counter_attrs;
643	acpi_event_status status;
644	acpi_handle handle;
645	int result = 0;
646	unsigned long tmp;
647
648	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
649		int i;
650		for (i = 0; i < num_counters; ++i)
651			all_counters[i].count = 0;
652		acpi_gpe_count = 0;
653		acpi_irq_handled = 0;
654		acpi_irq_not_handled = 0;
655		goto end;
656	}
657
658	/* show the event status for both GPEs and Fixed Events */
659	result = get_status(index, &status, &handle);
660	if (result)
661		goto end;
662
663	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
664		printk(KERN_WARNING PREFIX
665		       "Can not change Invalid GPE/Fixed Event status\n");
666		return -EINVAL;
667	}
668
669	if (index < num_gpes) {
670		if (!strcmp(buf, "disable\n") &&
671		    (status & ACPI_EVENT_FLAG_ENABLED))
672			result = acpi_disable_gpe(handle, index);
673		else if (!strcmp(buf, "enable\n") &&
674			 !(status & ACPI_EVENT_FLAG_ENABLED))
675			result = acpi_enable_gpe(handle, index);
676		else if (!strcmp(buf, "clear\n") &&
677			 (status & ACPI_EVENT_FLAG_STATUS_SET))
678			result = acpi_clear_gpe(handle, index);
679		else if (!strcmp(buf, "mask\n"))
680			result = acpi_mask_gpe(handle, index, TRUE);
681		else if (!strcmp(buf, "unmask\n"))
682			result = acpi_mask_gpe(handle, index, FALSE);
683		else if (!kstrtoul(buf, 0, &tmp))
684			all_counters[index].count = tmp;
685		else
686			result = -EINVAL;
687	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
688		int event = index - num_gpes;
689		if (!strcmp(buf, "disable\n") &&
690		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
691			result = acpi_disable_event(event, ACPI_NOT_ISR);
692		else if (!strcmp(buf, "enable\n") &&
693			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
694			result = acpi_enable_event(event, ACPI_NOT_ISR);
695		else if (!strcmp(buf, "clear\n") &&
696			 (status & ACPI_EVENT_FLAG_STATUS_SET))
697			result = acpi_clear_event(event);
698		else if (!kstrtoul(buf, 0, &tmp))
699			all_counters[index].count = tmp;
700		else
701			result = -EINVAL;
702	} else
703		all_counters[index].count = strtoul(buf, NULL, 0);
704
705	if (ACPI_FAILURE(result))
706		result = -EINVAL;
707end:
708	return result ? result : size;
709}
710
711/*
712 * A Quirk Mechanism for GPE Flooding Prevention:
713 *
714 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
715 * flooding typically cannot be detected and automatically prevented by
716 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
717 * the AML tables. This normally indicates a feature gap in Linux, thus
718 * instead of providing endless quirk tables, we provide a boot parameter
719 * for those who want this quirk. For example, if the users want to prevent
720 * the GPE flooding for GPE 00, they need to specify the following boot
721 * parameter:
722 *   acpi_mask_gpe=0x00
 
723 * The masking status can be modified by the following runtime controlling
724 * interface:
725 *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
726 */
727
728/*
729 * Currently, the GPE flooding prevention only supports to mask the GPEs
730 * numbered from 00 to 7f.
731 */
732#define ACPI_MASKABLE_GPE_MAX	0x80
733
734static u64 __initdata acpi_masked_gpes;
735
736static int __init acpi_gpe_set_masked_gpes(char *val)
737{
 
738	u8 gpe;
739
740	if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
741		return -EINVAL;
742	acpi_masked_gpes |= ((u64)1<<gpe);
 
 
 
 
743
744	return 1;
745}
746__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
747
748void __init acpi_gpe_apply_masked_gpes(void)
749{
750	acpi_handle handle;
751	acpi_status status;
752	u8 gpe;
753
754	for (gpe = 0;
755	     gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
756	     gpe++) {
757		if (acpi_masked_gpes & ((u64)1<<gpe)) {
758			status = acpi_get_gpe_device(gpe, &handle);
759			if (ACPI_SUCCESS(status)) {
760				pr_info("Masking GPE 0x%x.\n", gpe);
761				(void)acpi_mask_gpe(handle, gpe, TRUE);
762			}
763		}
764	}
765}
766
767void acpi_irq_stats_init(void)
768{
769	acpi_status status;
770	int i;
771
772	if (all_counters)
773		return;
774
775	num_gpes = acpi_current_gpe_count;
776	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
777
778	all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
779			    GFP_KERNEL);
780	if (all_attrs == NULL)
781		return;
782
783	all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
784			       GFP_KERNEL);
785	if (all_counters == NULL)
786		goto fail;
787
788	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
789	if (ACPI_FAILURE(status))
790		goto fail;
791
792	counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
793				GFP_KERNEL);
794	if (counter_attrs == NULL)
795		goto fail;
796
797	for (i = 0; i < num_counters; ++i) {
798		char buffer[12];
799		char *name;
800
801		if (i < num_gpes)
802			sprintf(buffer, "gpe%02X", i);
803		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
804			sprintf(buffer, "ff_pmtimer");
805		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
806			sprintf(buffer, "ff_gbl_lock");
807		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
808			sprintf(buffer, "ff_pwr_btn");
809		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
810			sprintf(buffer, "ff_slp_btn");
811		else if (i == num_gpes + ACPI_EVENT_RTC)
812			sprintf(buffer, "ff_rt_clk");
813		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
814			sprintf(buffer, "gpe_all");
815		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
816			sprintf(buffer, "sci");
817		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
818			sprintf(buffer, "sci_not");
819		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
820			sprintf(buffer, "error");
821		else
822			sprintf(buffer, "bug%02X", i);
823
824		name = kstrdup(buffer, GFP_KERNEL);
825		if (name == NULL)
826			goto fail;
827
828		sysfs_attr_init(&counter_attrs[i].attr);
829		counter_attrs[i].attr.name = name;
830		counter_attrs[i].attr.mode = 0644;
831		counter_attrs[i].show = counter_show;
832		counter_attrs[i].store = counter_set;
833
834		all_attrs[i] = &counter_attrs[i].attr;
835	}
836
837	interrupt_stats_attr_group.attrs = all_attrs;
838	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
839		return;
840
841fail:
842	delete_gpe_attr_array();
843	return;
844}
845
846static void __exit interrupt_stats_exit(void)
847{
848	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
849
850	delete_gpe_attr_array();
851
852	return;
853}
854
855static ssize_t
856acpi_show_profile(struct device *dev, struct device_attribute *attr,
857		  char *buf)
858{
859	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
860}
861
862static const struct device_attribute pm_profile_attr =
863	__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
864
865static ssize_t hotplug_enabled_show(struct kobject *kobj,
866				    struct kobj_attribute *attr, char *buf)
867{
868	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
869
870	return sprintf(buf, "%d\n", hotplug->enabled);
871}
872
873static ssize_t hotplug_enabled_store(struct kobject *kobj,
874				     struct kobj_attribute *attr,
875				     const char *buf, size_t size)
876{
877	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
878	unsigned int val;
879
880	if (kstrtouint(buf, 10, &val) || val > 1)
881		return -EINVAL;
882
883	acpi_scan_hotplug_enabled(hotplug, val);
884	return size;
885}
886
887static struct kobj_attribute hotplug_enabled_attr =
888	__ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
889		hotplug_enabled_store);
890
891static struct attribute *hotplug_profile_attrs[] = {
892	&hotplug_enabled_attr.attr,
893	NULL
894};
 
895
896static struct kobj_type acpi_hotplug_profile_ktype = {
897	.sysfs_ops = &kobj_sysfs_ops,
898	.default_attrs = hotplug_profile_attrs,
899};
900
901void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
902				    const char *name)
903{
904	int error;
905
906	if (!hotplug_kobj)
907		goto err_out;
908
909	error = kobject_init_and_add(&hotplug->kobj,
910		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
911	if (error)
 
912		goto err_out;
 
913
914	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
915	return;
916
917 err_out:
918	pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
919}
920
921static ssize_t force_remove_show(struct kobject *kobj,
922				 struct kobj_attribute *attr, char *buf)
923{
924	return sprintf(buf, "%d\n", !!acpi_force_hot_remove);
925}
926
927static ssize_t force_remove_store(struct kobject *kobj,
928				  struct kobj_attribute *attr,
929				  const char *buf, size_t size)
930{
931	bool val;
932	int ret;
933
934	ret = strtobool(buf, &val);
935	if (ret < 0)
936		return ret;
937
938	lock_device_hotplug();
939	acpi_force_hot_remove = val;
940	unlock_device_hotplug();
 
941	return size;
942}
943
944static const struct kobj_attribute force_remove_attr =
945	__ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
946	       force_remove_store);
947
948int __init acpi_sysfs_init(void)
949{
950	int result;
951
952	result = acpi_tables_sysfs_init();
953	if (result)
954		return result;
955
956	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
957	if (!hotplug_kobj)
958		return -ENOMEM;
959
960	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
961	if (result)
962		return result;
963
964	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
965	return result;
966}