Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sysfs.c - ACPI sysfs interface to userspace.
   4 */
   5
   6#define pr_fmt(fmt) "ACPI: " fmt
   7
 
 
   8#include <linux/init.h>
   9#include <linux/kernel.h>
 
  10#include <linux/moduleparam.h>
  11#include <linux/acpi.h>
  12
  13#include "internal.h"
  14
  15#define _COMPONENT		ACPI_SYSTEM_COMPONENT
  16ACPI_MODULE_NAME("sysfs");
  17
  18#ifdef CONFIG_ACPI_DEBUG
  19/*
  20 * ACPI debug sysfs I/F, including:
  21 * /sys/modules/acpi/parameters/debug_layer
  22 * /sys/modules/acpi/parameters/debug_level
  23 * /sys/modules/acpi/parameters/trace_method_name
  24 * /sys/modules/acpi/parameters/trace_state
  25 * /sys/modules/acpi/parameters/trace_debug_layer
  26 * /sys/modules/acpi/parameters/trace_debug_level
  27 */
  28
  29struct acpi_dlayer {
  30	const char *name;
  31	unsigned long value;
  32};
  33struct acpi_dlevel {
  34	const char *name;
  35	unsigned long value;
  36};
  37#define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
  38
  39static const struct acpi_dlayer acpi_debug_layers[] = {
  40	ACPI_DEBUG_INIT(ACPI_UTILITIES),
  41	ACPI_DEBUG_INIT(ACPI_HARDWARE),
  42	ACPI_DEBUG_INIT(ACPI_EVENTS),
  43	ACPI_DEBUG_INIT(ACPI_TABLES),
  44	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
  45	ACPI_DEBUG_INIT(ACPI_PARSER),
  46	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
  47	ACPI_DEBUG_INIT(ACPI_EXECUTER),
  48	ACPI_DEBUG_INIT(ACPI_RESOURCES),
  49	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
  50	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
  51	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
  52	ACPI_DEBUG_INIT(ACPI_COMPILER),
  53	ACPI_DEBUG_INIT(ACPI_TOOLS),
  54
  55	ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
  56	ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
  57	ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
  58	ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
  59	ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
  60	ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
  61	ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
  62	ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
  63	ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
  64	ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
  65	ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
  66	ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
  67	ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
  68	ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
  69};
  70
  71static const struct acpi_dlevel acpi_debug_levels[] = {
  72	ACPI_DEBUG_INIT(ACPI_LV_INIT),
  73	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
  74	ACPI_DEBUG_INIT(ACPI_LV_INFO),
  75	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
  76	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
  77
  78	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
  79	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
  80	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
  81	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
  82	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
  83	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
  84	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
  85	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
  86	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
  87	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
  88	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
  89	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
  90	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
  91	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
  92
  93	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
  94	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
  95	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
  96
  97	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
  98	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
  99	ACPI_DEBUG_INIT(ACPI_LV_IO),
 100	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
 101
 102	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
 103	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
 104	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
 105	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
 106};
 107
 108static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
 109{
 110	int result = 0;
 111	int i;
 112
 113	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
 114
 115	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
 116		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 117				  acpi_debug_layers[i].name,
 118				  acpi_debug_layers[i].value,
 119				  (acpi_dbg_layer & acpi_debug_layers[i].value)
 120				  ? '*' : ' ');
 121	}
 122	result +=
 123	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
 124		    ACPI_ALL_DRIVERS,
 125		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
 126		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
 127		    == 0 ? ' ' : '-');
 128	result +=
 129	    sprintf(buffer + result,
 130		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
 131		    acpi_dbg_layer);
 132
 133	return result;
 134}
 135
 136static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 137{
 138	int result = 0;
 139	int i;
 140
 141	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
 142
 143	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
 144		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 145				  acpi_debug_levels[i].name,
 146				  acpi_debug_levels[i].value,
 147				  (acpi_dbg_level & acpi_debug_levels[i].value)
 148				  ? '*' : ' ');
 149	}
 150	result +=
 151	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
 152		    acpi_dbg_level);
 153
 154	return result;
 155}
 156
 157static const struct kernel_param_ops param_ops_debug_layer = {
 158	.set = param_set_uint,
 159	.get = param_get_debug_layer,
 160};
 161
 162static const struct kernel_param_ops param_ops_debug_level = {
 163	.set = param_set_uint,
 164	.get = param_get_debug_level,
 165};
 166
 167module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
 168module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 169
 170static char trace_method_name[1024];
 171
 172static int param_set_trace_method_name(const char *val,
 173				       const struct kernel_param *kp)
 174{
 175	u32 saved_flags = 0;
 176	bool is_abs_path = true;
 177
 178	if (*val != '\\')
 179		is_abs_path = false;
 180
 181	if ((is_abs_path && strlen(val) > 1023) ||
 182	    (!is_abs_path && strlen(val) > 1022)) {
 183		pr_err("%s: string parameter too long\n", kp->name);
 184		return -ENOSPC;
 185	}
 186
 187	/*
 188	 * It's not safe to update acpi_gbl_trace_method_name without
 189	 * having the tracer stopped, so we save the original tracer
 190	 * state and disable it.
 191	 */
 192	saved_flags = acpi_gbl_trace_flags;
 193	(void)acpi_debug_trace(NULL,
 194			       acpi_gbl_trace_dbg_level,
 195			       acpi_gbl_trace_dbg_layer,
 196			       0);
 197
 198	/* This is a hack.  We can't kmalloc in early boot. */
 199	if (is_abs_path)
 200		strcpy(trace_method_name, val);
 201	else {
 202		trace_method_name[0] = '\\';
 203		strcpy(trace_method_name+1, val);
 204	}
 205
 206	/* Restore the original tracer state */
 207	(void)acpi_debug_trace(trace_method_name,
 208			       acpi_gbl_trace_dbg_level,
 209			       acpi_gbl_trace_dbg_layer,
 210			       saved_flags);
 211
 212	return 0;
 213}
 214
 215static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
 216{
 217	return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name);
 218}
 219
 220static const struct kernel_param_ops param_ops_trace_method = {
 221	.set = param_set_trace_method_name,
 222	.get = param_get_trace_method_name,
 223};
 224
 225static const struct kernel_param_ops param_ops_trace_attrib = {
 226	.set = param_set_uint,
 227	.get = param_get_uint,
 228};
 229
 230module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
 231module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
 232module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
 233
 234static int param_set_trace_state(const char *val,
 235				 const struct kernel_param *kp)
 236{
 237	acpi_status status;
 238	const char *method = trace_method_name;
 239	u32 flags = 0;
 240
 241/* So "xxx-once" comparison should go prior than "xxx" comparison */
 242#define acpi_compare_param(val, key)	\
 243	strncmp((val), (key), sizeof(key) - 1)
 244
 245	if (!acpi_compare_param(val, "enable")) {
 246		method = NULL;
 247		flags = ACPI_TRACE_ENABLED;
 248	} else if (!acpi_compare_param(val, "disable"))
 249		method = NULL;
 250	else if (!acpi_compare_param(val, "method-once"))
 251		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
 252	else if (!acpi_compare_param(val, "method"))
 253		flags = ACPI_TRACE_ENABLED;
 254	else if (!acpi_compare_param(val, "opcode-once"))
 255		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
 256	else if (!acpi_compare_param(val, "opcode"))
 257		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
 258	else
 259		return -EINVAL;
 260
 261	status = acpi_debug_trace(method,
 262				  acpi_gbl_trace_dbg_level,
 263				  acpi_gbl_trace_dbg_layer,
 264				  flags);
 265	if (ACPI_FAILURE(status))
 266		return -EBUSY;
 267
 268	return 0;
 269}
 270
 271static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
 272{
 273	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
 274		return sprintf(buffer, "disable\n");
 275	else {
 276		if (acpi_gbl_trace_method_name) {
 277			if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
 278				return sprintf(buffer, "method-once\n");
 279			else
 280				return sprintf(buffer, "method\n");
 281		} else
 282			return sprintf(buffer, "enable\n");
 283	}
 284	return 0;
 285}
 286
 287module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
 288		  NULL, 0644);
 289#endif /* CONFIG_ACPI_DEBUG */
 290
 291
 292/* /sys/modules/acpi/parameters/aml_debug_output */
 293
 294module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
 295		   byte, 0644);
 296MODULE_PARM_DESC(aml_debug_output,
 297		 "To enable/disable the ACPI Debug Object output.");
 298
 299/* /sys/module/acpi/parameters/acpica_version */
 300static int param_get_acpica_version(char *buffer,
 301				    const struct kernel_param *kp)
 302{
 303	int result;
 304
 305	result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
 306
 307	return result;
 308}
 309
 310module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
 311
 312/*
 313 * ACPI table sysfs I/F:
 314 * /sys/firmware/acpi/tables/
 315 * /sys/firmware/acpi/tables/data/
 316 * /sys/firmware/acpi/tables/dynamic/
 317 */
 318
 319static LIST_HEAD(acpi_table_attr_list);
 320static struct kobject *tables_kobj;
 321static struct kobject *tables_data_kobj;
 322static struct kobject *dynamic_tables_kobj;
 323static struct kobject *hotplug_kobj;
 324
 325#define ACPI_MAX_TABLE_INSTANCES	999
 326#define ACPI_INST_SIZE			4 /* including trailing 0 */
 327
 328struct acpi_table_attr {
 329	struct bin_attribute attr;
 330	char name[ACPI_NAMESEG_SIZE];
 331	int instance;
 332	char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
 333	struct list_head node;
 334};
 335
 336struct acpi_data_attr {
 337	struct bin_attribute attr;
 338	u64	addr;
 339};
 340
 341static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
 342			       struct bin_attribute *bin_attr, char *buf,
 343			       loff_t offset, size_t count)
 344{
 345	struct acpi_table_attr *table_attr =
 346	    container_of(bin_attr, struct acpi_table_attr, attr);
 347	struct acpi_table_header *table_header = NULL;
 348	acpi_status status;
 349	ssize_t rc;
 350
 351	status = acpi_get_table(table_attr->name, table_attr->instance,
 352				&table_header);
 353	if (ACPI_FAILURE(status))
 354		return -ENODEV;
 355
 356	rc = memory_read_from_buffer(buf, count, &offset, table_header,
 357			table_header->length);
 358	acpi_put_table(table_header);
 359	return rc;
 360}
 361
 362static int acpi_table_attr_init(struct kobject *tables_obj,
 363				struct acpi_table_attr *table_attr,
 364				struct acpi_table_header *table_header)
 365{
 366	struct acpi_table_header *header = NULL;
 367	struct acpi_table_attr *attr = NULL;
 368	char instance_str[ACPI_INST_SIZE];
 369
 370	sysfs_attr_init(&table_attr->attr.attr);
 371	ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
 372
 373	list_for_each_entry(attr, &acpi_table_attr_list, node) {
 374		if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
 375			if (table_attr->instance < attr->instance)
 376				table_attr->instance = attr->instance;
 377	}
 378	table_attr->instance++;
 379	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
 380		pr_warn("%4.4s: too many table instances\n",
 381			table_attr->name);
 382		return -ERANGE;
 383	}
 384
 385	ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
 386	table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
 387	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
 388					 !acpi_get_table
 389					 (table_header->signature, 2, &header))) {
 390		snprintf(instance_str, sizeof(instance_str), "%u",
 391			 table_attr->instance);
 392		strcat(table_attr->filename, instance_str);
 393	}
 394
 395	table_attr->attr.size = table_header->length;
 396	table_attr->attr.read = acpi_table_show;
 397	table_attr->attr.attr.name = table_attr->filename;
 398	table_attr->attr.attr.mode = 0400;
 399
 400	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
 401}
 402
 403acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
 404{
 405	struct acpi_table_attr *table_attr;
 406
 407	switch (event) {
 408	case ACPI_TABLE_EVENT_INSTALL:
 409		table_attr =
 410		    kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
 411		if (!table_attr)
 412			return AE_NO_MEMORY;
 413
 414		if (acpi_table_attr_init(dynamic_tables_kobj,
 415					 table_attr, table)) {
 416			kfree(table_attr);
 417			return AE_ERROR;
 418		}
 419		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 420		break;
 421	case ACPI_TABLE_EVENT_LOAD:
 422	case ACPI_TABLE_EVENT_UNLOAD:
 423	case ACPI_TABLE_EVENT_UNINSTALL:
 424		/*
 425		 * we do not need to do anything right now
 426		 * because the table is not deleted from the
 427		 * global table list when unloading it.
 428		 */
 429		break;
 430	default:
 431		return AE_BAD_PARAMETER;
 432	}
 433	return AE_OK;
 434}
 435
 436static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
 437			      struct bin_attribute *bin_attr, char *buf,
 438			      loff_t offset, size_t count)
 439{
 440	struct acpi_data_attr *data_attr;
 441	void __iomem *base;
 442	ssize_t rc;
 443
 444	data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
 
 445
 446	base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
 
 
 
 
 
 
 
 
 
 447	if (!base)
 448		return -ENOMEM;
 449	rc = memory_read_from_buffer(buf, count, &offset, base,
 450				     data_attr->attr.size);
 451	acpi_os_unmap_memory(base, data_attr->attr.size);
 452
 453	return rc;
 
 
 
 
 454}
 455
 456static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
 457{
 458	struct acpi_table_bert *bert = th;
 459
 460	if (bert->header.length < sizeof(struct acpi_table_bert) ||
 461	    bert->region_length < sizeof(struct acpi_hest_generic_status)) {
 462		kfree(data_attr);
 463		return -EINVAL;
 464	}
 465	data_attr->addr = bert->address;
 466	data_attr->attr.size = bert->region_length;
 467	data_attr->attr.attr.name = "BERT";
 468
 469	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 470}
 471
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472static struct acpi_data_obj {
 473	char *name;
 474	int (*fn)(void *, struct acpi_data_attr *);
 475} acpi_data_objs[] = {
 476	{ ACPI_SIG_BERT, acpi_bert_data_init },
 
 477};
 478
 479#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
 480
 481static int acpi_table_data_init(struct acpi_table_header *th)
 482{
 483	struct acpi_data_attr *data_attr;
 484	int i;
 485
 486	for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
 487		if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
 488			data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
 489			if (!data_attr)
 490				return -ENOMEM;
 491			sysfs_attr_init(&data_attr->attr.attr);
 492			data_attr->attr.read = acpi_data_show;
 493			data_attr->attr.attr.mode = 0400;
 494			return acpi_data_objs[i].fn(th, data_attr);
 495		}
 496	}
 497	return 0;
 498}
 499
 500static int acpi_tables_sysfs_init(void)
 501{
 502	struct acpi_table_attr *table_attr;
 503	struct acpi_table_header *table_header = NULL;
 504	int table_index;
 505	acpi_status status;
 506	int ret;
 507
 508	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
 509	if (!tables_kobj)
 510		goto err;
 511
 512	tables_data_kobj = kobject_create_and_add("data", tables_kobj);
 513	if (!tables_data_kobj)
 514		goto err_tables_data;
 515
 516	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
 517	if (!dynamic_tables_kobj)
 518		goto err_dynamic_tables;
 519
 520	for (table_index = 0;; table_index++) {
 521		status = acpi_get_table_by_index(table_index, &table_header);
 522
 523		if (status == AE_BAD_PARAMETER)
 524			break;
 525
 526		if (ACPI_FAILURE(status))
 527			continue;
 528
 529		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 530		if (!table_attr)
 531			return -ENOMEM;
 532
 533		ret = acpi_table_attr_init(tables_kobj,
 534					   table_attr, table_header);
 535		if (ret) {
 536			kfree(table_attr);
 537			return ret;
 538		}
 539		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 540		acpi_table_data_init(table_header);
 541	}
 542
 543	kobject_uevent(tables_kobj, KOBJ_ADD);
 544	kobject_uevent(tables_data_kobj, KOBJ_ADD);
 545	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
 546
 547	return 0;
 548err_dynamic_tables:
 549	kobject_put(tables_data_kobj);
 550err_tables_data:
 551	kobject_put(tables_kobj);
 552err:
 553	return -ENOMEM;
 554}
 555
 556/*
 557 * Detailed ACPI IRQ counters:
 558 * /sys/firmware/acpi/interrupts/
 559 */
 560
 561u32 acpi_irq_handled;
 562u32 acpi_irq_not_handled;
 563
 564#define COUNT_GPE 0
 565#define COUNT_SCI 1		/* acpi_irq_handled */
 566#define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
 567#define COUNT_ERROR 3		/* other */
 568#define NUM_COUNTERS_EXTRA 4
 569
 570struct event_counter {
 571	u32 count;
 572	u32 flags;
 573};
 574
 575static struct event_counter *all_counters;
 576static u32 num_gpes;
 577static u32 num_counters;
 578static struct attribute **all_attrs;
 579static u32 acpi_gpe_count;
 580
 581static struct attribute_group interrupt_stats_attr_group = {
 582	.name = "interrupts",
 583};
 584
 585static struct kobj_attribute *counter_attrs;
 586
 587static void delete_gpe_attr_array(void)
 588{
 589	struct event_counter *tmp = all_counters;
 590
 591	all_counters = NULL;
 592	kfree(tmp);
 593
 594	if (counter_attrs) {
 595		int i;
 596
 597		for (i = 0; i < num_gpes; i++)
 598			kfree(counter_attrs[i].attr.name);
 599
 600		kfree(counter_attrs);
 601	}
 602	kfree(all_attrs);
 603
 604	return;
 605}
 606
 607static void gpe_count(u32 gpe_number)
 608{
 609	acpi_gpe_count++;
 610
 611	if (!all_counters)
 612		return;
 613
 614	if (gpe_number < num_gpes)
 615		all_counters[gpe_number].count++;
 616	else
 617		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 618			     COUNT_ERROR].count++;
 619
 620	return;
 621}
 622
 623static void fixed_event_count(u32 event_number)
 624{
 625	if (!all_counters)
 626		return;
 627
 628	if (event_number < ACPI_NUM_FIXED_EVENTS)
 629		all_counters[num_gpes + event_number].count++;
 630	else
 631		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 632			     COUNT_ERROR].count++;
 633
 634	return;
 635}
 636
 637static void acpi_global_event_handler(u32 event_type, acpi_handle device,
 638	u32 event_number, void *context)
 639{
 640	if (event_type == ACPI_EVENT_TYPE_GPE) {
 641		gpe_count(event_number);
 642		pr_debug("GPE event 0x%02x\n", event_number);
 643	} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
 644		fixed_event_count(event_number);
 645		pr_debug("Fixed event 0x%02x\n", event_number);
 646	} else {
 647		pr_debug("Other event 0x%02x\n", event_number);
 648	}
 649}
 650
 651static int get_status(u32 index, acpi_event_status *ret,
 652		      acpi_handle *handle)
 653{
 654	acpi_status status;
 655
 656	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 657		return -EINVAL;
 658
 659	if (index < num_gpes) {
 660		status = acpi_get_gpe_device(index, handle);
 661		if (ACPI_FAILURE(status)) {
 662			ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
 663					"Invalid GPE 0x%x", index));
 664			return -ENXIO;
 665		}
 666		status = acpi_get_gpe_status(*handle, index, ret);
 667	} else {
 668		status = acpi_get_event_status(index - num_gpes, ret);
 669	}
 670	if (ACPI_FAILURE(status))
 671		return -EIO;
 672
 673	return 0;
 674}
 675
 676static ssize_t counter_show(struct kobject *kobj,
 677			    struct kobj_attribute *attr, char *buf)
 678{
 679	int index = attr - counter_attrs;
 680	int size;
 681	acpi_handle handle;
 682	acpi_event_status status;
 683	int result = 0;
 684
 685	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
 686	    acpi_irq_handled;
 687	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
 688	    acpi_irq_not_handled;
 689	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
 690	    acpi_gpe_count;
 691	size = sprintf(buf, "%8u", all_counters[index].count);
 692
 693	/* "gpe_all" or "sci" */
 694	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 695		goto end;
 696
 697	result = get_status(index, &status, &handle);
 698	if (result)
 699		goto end;
 700
 701	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
 702		size += sprintf(buf + size, "  EN");
 703	else
 704		size += sprintf(buf + size, "    ");
 705	if (status & ACPI_EVENT_FLAG_STATUS_SET)
 706		size += sprintf(buf + size, " STS");
 707	else
 708		size += sprintf(buf + size, "    ");
 709
 710	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
 711		size += sprintf(buf + size, " invalid     ");
 712	else if (status & ACPI_EVENT_FLAG_ENABLED)
 713		size += sprintf(buf + size, " enabled     ");
 714	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
 715		size += sprintf(buf + size, " wake_enabled");
 716	else
 717		size += sprintf(buf + size, " disabled    ");
 718	if (status & ACPI_EVENT_FLAG_MASKED)
 719		size += sprintf(buf + size, " masked  ");
 720	else
 721		size += sprintf(buf + size, " unmasked");
 722
 723end:
 724	size += sprintf(buf + size, "\n");
 725	return result ? result : size;
 726}
 727
 728/*
 729 * counter_set() sets the specified counter.
 730 * setting the total "sci" file to any value clears all counters.
 731 * enable/disable/clear a gpe/fixed event in user space.
 732 */
 733static ssize_t counter_set(struct kobject *kobj,
 734			   struct kobj_attribute *attr, const char *buf,
 735			   size_t size)
 736{
 737	int index = attr - counter_attrs;
 738	acpi_event_status status;
 739	acpi_handle handle;
 740	int result = 0;
 741	unsigned long tmp;
 742
 743	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
 744		int i;
 745		for (i = 0; i < num_counters; ++i)
 746			all_counters[i].count = 0;
 747		acpi_gpe_count = 0;
 748		acpi_irq_handled = 0;
 749		acpi_irq_not_handled = 0;
 750		goto end;
 751	}
 752
 753	/* show the event status for both GPEs and Fixed Events */
 754	result = get_status(index, &status, &handle);
 755	if (result)
 756		goto end;
 757
 758	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
 759		printk(KERN_WARNING PREFIX
 760		       "Can not change Invalid GPE/Fixed Event status\n");
 761		return -EINVAL;
 762	}
 763
 764	if (index < num_gpes) {
 765		if (!strcmp(buf, "disable\n") &&
 766		    (status & ACPI_EVENT_FLAG_ENABLED))
 767			result = acpi_disable_gpe(handle, index);
 768		else if (!strcmp(buf, "enable\n") &&
 769			 !(status & ACPI_EVENT_FLAG_ENABLED))
 770			result = acpi_enable_gpe(handle, index);
 771		else if (!strcmp(buf, "clear\n") &&
 772			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 773			result = acpi_clear_gpe(handle, index);
 774		else if (!strcmp(buf, "mask\n"))
 775			result = acpi_mask_gpe(handle, index, TRUE);
 776		else if (!strcmp(buf, "unmask\n"))
 777			result = acpi_mask_gpe(handle, index, FALSE);
 778		else if (!kstrtoul(buf, 0, &tmp))
 779			all_counters[index].count = tmp;
 780		else
 781			result = -EINVAL;
 782	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
 783		int event = index - num_gpes;
 784		if (!strcmp(buf, "disable\n") &&
 785		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
 786			result = acpi_disable_event(event, ACPI_NOT_ISR);
 787		else if (!strcmp(buf, "enable\n") &&
 788			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
 789			result = acpi_enable_event(event, ACPI_NOT_ISR);
 790		else if (!strcmp(buf, "clear\n") &&
 791			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 792			result = acpi_clear_event(event);
 793		else if (!kstrtoul(buf, 0, &tmp))
 794			all_counters[index].count = tmp;
 795		else
 796			result = -EINVAL;
 797	} else
 798		all_counters[index].count = strtoul(buf, NULL, 0);
 799
 800	if (ACPI_FAILURE(result))
 801		result = -EINVAL;
 802end:
 803	return result ? result : size;
 804}
 805
 806/*
 807 * A Quirk Mechanism for GPE Flooding Prevention:
 808 *
 809 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
 810 * flooding typically cannot be detected and automatically prevented by
 811 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
 812 * the AML tables. This normally indicates a feature gap in Linux, thus
 813 * instead of providing endless quirk tables, we provide a boot parameter
 814 * for those who want this quirk. For example, if the users want to prevent
 815 * the GPE flooding for GPE 00, they need to specify the following boot
 816 * parameter:
 817 *   acpi_mask_gpe=0x00
 
 818 * The masking status can be modified by the following runtime controlling
 819 * interface:
 820 *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
 821 */
 822#define ACPI_MASKABLE_GPE_MAX	0x100
 823static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
 824
 825static int __init acpi_gpe_set_masked_gpes(char *val)
 826{
 
 827	u8 gpe;
 828
 829	if (kstrtou8(val, 0, &gpe))
 830		return -EINVAL;
 831	set_bit(gpe, acpi_masked_gpes_map);
 
 
 
 
 832
 833	return 1;
 834}
 835__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
 836
 837void __init acpi_gpe_apply_masked_gpes(void)
 838{
 839	acpi_handle handle;
 840	acpi_status status;
 841	u16 gpe;
 842
 843	for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
 844		status = acpi_get_gpe_device(gpe, &handle);
 845		if (ACPI_SUCCESS(status)) {
 846			pr_info("Masking GPE 0x%x.\n", gpe);
 847			(void)acpi_mask_gpe(handle, gpe, TRUE);
 848		}
 849	}
 850}
 851
 852void acpi_irq_stats_init(void)
 853{
 854	acpi_status status;
 855	int i;
 856
 857	if (all_counters)
 858		return;
 859
 860	num_gpes = acpi_current_gpe_count;
 861	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 862
 863	all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
 864			    GFP_KERNEL);
 865	if (all_attrs == NULL)
 866		return;
 867
 868	all_counters = kcalloc(num_counters, sizeof(struct event_counter),
 869			       GFP_KERNEL);
 870	if (all_counters == NULL)
 871		goto fail;
 872
 873	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
 874	if (ACPI_FAILURE(status))
 875		goto fail;
 876
 877	counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
 878				GFP_KERNEL);
 879	if (counter_attrs == NULL)
 880		goto fail;
 881
 882	for (i = 0; i < num_counters; ++i) {
 883		char buffer[12];
 884		char *name;
 885
 886		if (i < num_gpes)
 887			sprintf(buffer, "gpe%02X", i);
 888		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
 889			sprintf(buffer, "ff_pmtimer");
 890		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
 891			sprintf(buffer, "ff_gbl_lock");
 892		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
 893			sprintf(buffer, "ff_pwr_btn");
 894		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
 895			sprintf(buffer, "ff_slp_btn");
 896		else if (i == num_gpes + ACPI_EVENT_RTC)
 897			sprintf(buffer, "ff_rt_clk");
 898		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
 899			sprintf(buffer, "gpe_all");
 900		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
 901			sprintf(buffer, "sci");
 902		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
 903			sprintf(buffer, "sci_not");
 904		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
 905			sprintf(buffer, "error");
 906		else
 907			sprintf(buffer, "bug%02X", i);
 908
 909		name = kstrdup(buffer, GFP_KERNEL);
 910		if (name == NULL)
 911			goto fail;
 912
 913		sysfs_attr_init(&counter_attrs[i].attr);
 914		counter_attrs[i].attr.name = name;
 915		counter_attrs[i].attr.mode = 0644;
 916		counter_attrs[i].show = counter_show;
 917		counter_attrs[i].store = counter_set;
 918
 919		all_attrs[i] = &counter_attrs[i].attr;
 920	}
 921
 922	interrupt_stats_attr_group.attrs = all_attrs;
 923	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
 924		return;
 925
 926fail:
 927	delete_gpe_attr_array();
 928	return;
 929}
 930
 931static void __exit interrupt_stats_exit(void)
 932{
 933	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
 934
 935	delete_gpe_attr_array();
 936
 937	return;
 938}
 939
 940static ssize_t
 941acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
 942		  char *buf)
 943{
 944	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 945}
 946
 947static const struct kobj_attribute pm_profile_attr =
 948	__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
 949
 950static ssize_t hotplug_enabled_show(struct kobject *kobj,
 951				    struct kobj_attribute *attr, char *buf)
 952{
 953	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 954
 955	return sprintf(buf, "%d\n", hotplug->enabled);
 956}
 957
 958static ssize_t hotplug_enabled_store(struct kobject *kobj,
 959				     struct kobj_attribute *attr,
 960				     const char *buf, size_t size)
 961{
 962	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 963	unsigned int val;
 964
 965	if (kstrtouint(buf, 10, &val) || val > 1)
 966		return -EINVAL;
 967
 968	acpi_scan_hotplug_enabled(hotplug, val);
 969	return size;
 970}
 971
 972static struct kobj_attribute hotplug_enabled_attr =
 973	__ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
 974		hotplug_enabled_store);
 975
 976static struct attribute *hotplug_profile_attrs[] = {
 977	&hotplug_enabled_attr.attr,
 978	NULL
 979};
 
 980
 981static struct kobj_type acpi_hotplug_profile_ktype = {
 982	.sysfs_ops = &kobj_sysfs_ops,
 983	.default_attrs = hotplug_profile_attrs,
 984};
 985
 986void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
 987				    const char *name)
 988{
 989	int error;
 990
 991	if (!hotplug_kobj)
 992		goto err_out;
 993
 994	error = kobject_init_and_add(&hotplug->kobj,
 995		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
 996	if (error) {
 997		kobject_put(&hotplug->kobj);
 998		goto err_out;
 999	}
1000
1001	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
1002	return;
1003
1004 err_out:
1005	pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
1006}
1007
1008static ssize_t force_remove_show(struct kobject *kobj,
1009				 struct kobj_attribute *attr, char *buf)
1010{
1011	return sprintf(buf, "%d\n", 0);
1012}
1013
1014static ssize_t force_remove_store(struct kobject *kobj,
1015				  struct kobj_attribute *attr,
1016				  const char *buf, size_t size)
1017{
1018	bool val;
1019	int ret;
1020
1021	ret = strtobool(buf, &val);
1022	if (ret < 0)
1023		return ret;
1024
1025	if (val) {
1026		pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1027		return -EINVAL;
1028	}
1029	return size;
1030}
1031
1032static const struct kobj_attribute force_remove_attr =
1033	__ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
1034	       force_remove_store);
1035
1036int __init acpi_sysfs_init(void)
1037{
1038	int result;
1039
1040	result = acpi_tables_sysfs_init();
1041	if (result)
1042		return result;
1043
1044	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1045	if (!hotplug_kobj)
1046		return -ENOMEM;
1047
1048	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1049	if (result)
1050		return result;
1051
1052	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1053	return result;
1054}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sysfs.c - ACPI sysfs interface to userspace.
   4 */
   5
   6#define pr_fmt(fmt) "ACPI: " fmt
   7
   8#include <linux/acpi.h>
   9#include <linux/bitmap.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/kstrtox.h>
  13#include <linux/moduleparam.h>
 
  14
  15#include "internal.h"
  16
 
 
 
  17#ifdef CONFIG_ACPI_DEBUG
  18/*
  19 * ACPI debug sysfs I/F, including:
  20 * /sys/modules/acpi/parameters/debug_layer
  21 * /sys/modules/acpi/parameters/debug_level
  22 * /sys/modules/acpi/parameters/trace_method_name
  23 * /sys/modules/acpi/parameters/trace_state
  24 * /sys/modules/acpi/parameters/trace_debug_layer
  25 * /sys/modules/acpi/parameters/trace_debug_level
  26 */
  27
  28struct acpi_dlayer {
  29	const char *name;
  30	unsigned long value;
  31};
  32struct acpi_dlevel {
  33	const char *name;
  34	unsigned long value;
  35};
  36#define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
  37
  38static const struct acpi_dlayer acpi_debug_layers[] = {
  39	ACPI_DEBUG_INIT(ACPI_UTILITIES),
  40	ACPI_DEBUG_INIT(ACPI_HARDWARE),
  41	ACPI_DEBUG_INIT(ACPI_EVENTS),
  42	ACPI_DEBUG_INIT(ACPI_TABLES),
  43	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
  44	ACPI_DEBUG_INIT(ACPI_PARSER),
  45	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
  46	ACPI_DEBUG_INIT(ACPI_EXECUTER),
  47	ACPI_DEBUG_INIT(ACPI_RESOURCES),
  48	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
  49	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
  50	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
  51	ACPI_DEBUG_INIT(ACPI_COMPILER),
  52	ACPI_DEBUG_INIT(ACPI_TOOLS),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53};
  54
  55static const struct acpi_dlevel acpi_debug_levels[] = {
  56	ACPI_DEBUG_INIT(ACPI_LV_INIT),
  57	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
  58	ACPI_DEBUG_INIT(ACPI_LV_INFO),
  59	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
  60	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
  61
  62	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
  63	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
  64	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
  65	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
  66	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
  67	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
  68	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
  69	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
  70	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
  71	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
  72	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
  73	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
  74	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
  75	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
  76
  77	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
  78	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
  79	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
  80
  81	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
  82	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
  83	ACPI_DEBUG_INIT(ACPI_LV_IO),
  84	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
  85
  86	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
  87	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
  88	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
  89	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
  90};
  91
  92static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
  93{
  94	int result = 0;
  95	int i;
  96
  97	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
  98
  99	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
 100		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 101				  acpi_debug_layers[i].name,
 102				  acpi_debug_layers[i].value,
 103				  (acpi_dbg_layer & acpi_debug_layers[i].value)
 104				  ? '*' : ' ');
 105	}
 106	result +=
 107	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
 108		    ACPI_ALL_DRIVERS,
 109		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
 110		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
 111		    == 0 ? ' ' : '-');
 112	result +=
 113	    sprintf(buffer + result,
 114		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
 115		    acpi_dbg_layer);
 116
 117	return result;
 118}
 119
 120static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 121{
 122	int result = 0;
 123	int i;
 124
 125	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
 126
 127	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
 128		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 129				  acpi_debug_levels[i].name,
 130				  acpi_debug_levels[i].value,
 131				  (acpi_dbg_level & acpi_debug_levels[i].value)
 132				  ? '*' : ' ');
 133	}
 134	result +=
 135	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
 136		    acpi_dbg_level);
 137
 138	return result;
 139}
 140
 141static const struct kernel_param_ops param_ops_debug_layer = {
 142	.set = param_set_uint,
 143	.get = param_get_debug_layer,
 144};
 145
 146static const struct kernel_param_ops param_ops_debug_level = {
 147	.set = param_set_uint,
 148	.get = param_get_debug_level,
 149};
 150
 151module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
 152module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 153
 154static char trace_method_name[1024];
 155
 156static int param_set_trace_method_name(const char *val,
 157				       const struct kernel_param *kp)
 158{
 159	u32 saved_flags = 0;
 160	bool is_abs_path = true;
 161
 162	if (*val != '\\')
 163		is_abs_path = false;
 164
 165	if ((is_abs_path && strlen(val) > 1023) ||
 166	    (!is_abs_path && strlen(val) > 1022)) {
 167		pr_err("%s: string parameter too long\n", kp->name);
 168		return -ENOSPC;
 169	}
 170
 171	/*
 172	 * It's not safe to update acpi_gbl_trace_method_name without
 173	 * having the tracer stopped, so we save the original tracer
 174	 * state and disable it.
 175	 */
 176	saved_flags = acpi_gbl_trace_flags;
 177	(void)acpi_debug_trace(NULL,
 178			       acpi_gbl_trace_dbg_level,
 179			       acpi_gbl_trace_dbg_layer,
 180			       0);
 181
 182	/* This is a hack.  We can't kmalloc in early boot. */
 183	if (is_abs_path)
 184		strcpy(trace_method_name, val);
 185	else {
 186		trace_method_name[0] = '\\';
 187		strcpy(trace_method_name+1, val);
 188	}
 189
 190	/* Restore the original tracer state */
 191	(void)acpi_debug_trace(trace_method_name,
 192			       acpi_gbl_trace_dbg_level,
 193			       acpi_gbl_trace_dbg_layer,
 194			       saved_flags);
 195
 196	return 0;
 197}
 198
 199static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
 200{
 201	return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name);
 202}
 203
 204static const struct kernel_param_ops param_ops_trace_method = {
 205	.set = param_set_trace_method_name,
 206	.get = param_get_trace_method_name,
 207};
 208
 209static const struct kernel_param_ops param_ops_trace_attrib = {
 210	.set = param_set_uint,
 211	.get = param_get_uint,
 212};
 213
 214module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
 215module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
 216module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
 217
 218static int param_set_trace_state(const char *val,
 219				 const struct kernel_param *kp)
 220{
 221	acpi_status status;
 222	const char *method = trace_method_name;
 223	u32 flags = 0;
 224
 225/* So "xxx-once" comparison should go prior than "xxx" comparison */
 226#define acpi_compare_param(val, key)	\
 227	strncmp((val), (key), sizeof(key) - 1)
 228
 229	if (!acpi_compare_param(val, "enable")) {
 230		method = NULL;
 231		flags = ACPI_TRACE_ENABLED;
 232	} else if (!acpi_compare_param(val, "disable"))
 233		method = NULL;
 234	else if (!acpi_compare_param(val, "method-once"))
 235		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
 236	else if (!acpi_compare_param(val, "method"))
 237		flags = ACPI_TRACE_ENABLED;
 238	else if (!acpi_compare_param(val, "opcode-once"))
 239		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
 240	else if (!acpi_compare_param(val, "opcode"))
 241		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
 242	else
 243		return -EINVAL;
 244
 245	status = acpi_debug_trace(method,
 246				  acpi_gbl_trace_dbg_level,
 247				  acpi_gbl_trace_dbg_layer,
 248				  flags);
 249	if (ACPI_FAILURE(status))
 250		return -EBUSY;
 251
 252	return 0;
 253}
 254
 255static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
 256{
 257	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
 258		return sprintf(buffer, "disable\n");
 259	if (!acpi_gbl_trace_method_name)
 260		return sprintf(buffer, "enable\n");
 261	if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
 262		return sprintf(buffer, "method-once\n");
 263	else
 264		return sprintf(buffer, "method\n");
 
 
 
 
 265}
 266
 267module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
 268		  NULL, 0644);
 269#endif /* CONFIG_ACPI_DEBUG */
 270
 271
 272/* /sys/modules/acpi/parameters/aml_debug_output */
 273
 274module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
 275		   byte, 0644);
 276MODULE_PARM_DESC(aml_debug_output,
 277		 "To enable/disable the ACPI Debug Object output.");
 278
 279/* /sys/module/acpi/parameters/acpica_version */
 280static int param_get_acpica_version(char *buffer,
 281				    const struct kernel_param *kp)
 282{
 283	int result;
 284
 285	result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
 286
 287	return result;
 288}
 289
 290module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
 291
 292/*
 293 * ACPI table sysfs I/F:
 294 * /sys/firmware/acpi/tables/
 295 * /sys/firmware/acpi/tables/data/
 296 * /sys/firmware/acpi/tables/dynamic/
 297 */
 298
 299static LIST_HEAD(acpi_table_attr_list);
 300static struct kobject *tables_kobj;
 301static struct kobject *tables_data_kobj;
 302static struct kobject *dynamic_tables_kobj;
 303static struct kobject *hotplug_kobj;
 304
 305#define ACPI_MAX_TABLE_INSTANCES	999
 306#define ACPI_INST_SIZE			4 /* including trailing 0 */
 307
 308struct acpi_table_attr {
 309	struct bin_attribute attr;
 310	char name[ACPI_NAMESEG_SIZE];
 311	int instance;
 312	char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
 313	struct list_head node;
 314};
 315
 316struct acpi_data_attr {
 317	struct bin_attribute attr;
 318	u64	addr;
 319};
 320
 321static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
 322			       struct bin_attribute *bin_attr, char *buf,
 323			       loff_t offset, size_t count)
 324{
 325	struct acpi_table_attr *table_attr =
 326	    container_of(bin_attr, struct acpi_table_attr, attr);
 327	struct acpi_table_header *table_header = NULL;
 328	acpi_status status;
 329	ssize_t rc;
 330
 331	status = acpi_get_table(table_attr->name, table_attr->instance,
 332				&table_header);
 333	if (ACPI_FAILURE(status))
 334		return -ENODEV;
 335
 336	rc = memory_read_from_buffer(buf, count, &offset, table_header,
 337			table_header->length);
 338	acpi_put_table(table_header);
 339	return rc;
 340}
 341
 342static int acpi_table_attr_init(struct kobject *tables_obj,
 343				struct acpi_table_attr *table_attr,
 344				struct acpi_table_header *table_header)
 345{
 346	struct acpi_table_header *header = NULL;
 347	struct acpi_table_attr *attr = NULL;
 348	char instance_str[ACPI_INST_SIZE];
 349
 350	sysfs_attr_init(&table_attr->attr.attr);
 351	ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
 352
 353	list_for_each_entry(attr, &acpi_table_attr_list, node) {
 354		if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
 355			if (table_attr->instance < attr->instance)
 356				table_attr->instance = attr->instance;
 357	}
 358	table_attr->instance++;
 359	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
 360		pr_warn("%4.4s: too many table instances\n", table_attr->name);
 
 361		return -ERANGE;
 362	}
 363
 364	ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
 365	table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
 366	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
 367					 !acpi_get_table
 368					 (table_header->signature, 2, &header))) {
 369		snprintf(instance_str, sizeof(instance_str), "%u",
 370			 table_attr->instance);
 371		strcat(table_attr->filename, instance_str);
 372	}
 373
 374	table_attr->attr.size = table_header->length;
 375	table_attr->attr.read = acpi_table_show;
 376	table_attr->attr.attr.name = table_attr->filename;
 377	table_attr->attr.attr.mode = 0400;
 378
 379	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
 380}
 381
 382acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
 383{
 384	struct acpi_table_attr *table_attr;
 385
 386	switch (event) {
 387	case ACPI_TABLE_EVENT_INSTALL:
 388		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 
 389		if (!table_attr)
 390			return AE_NO_MEMORY;
 391
 392		if (acpi_table_attr_init(dynamic_tables_kobj,
 393					 table_attr, table)) {
 394			kfree(table_attr);
 395			return AE_ERROR;
 396		}
 397		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 398		break;
 399	case ACPI_TABLE_EVENT_LOAD:
 400	case ACPI_TABLE_EVENT_UNLOAD:
 401	case ACPI_TABLE_EVENT_UNINSTALL:
 402		/*
 403		 * we do not need to do anything right now
 404		 * because the table is not deleted from the
 405		 * global table list when unloading it.
 406		 */
 407		break;
 408	default:
 409		return AE_BAD_PARAMETER;
 410	}
 411	return AE_OK;
 412}
 413
 414static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
 415			      struct bin_attribute *bin_attr, char *buf,
 416			      loff_t offset, size_t count)
 417{
 418	struct acpi_data_attr *data_attr;
 419	void __iomem *base;
 420	ssize_t size;
 421
 422	data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
 423	size = data_attr->attr.size;
 424
 425	if (offset < 0)
 426		return -EINVAL;
 427
 428	if (offset >= size)
 429		return 0;
 430
 431	if (count > size - offset)
 432		count = size - offset;
 433
 434	base = acpi_os_map_iomem(data_attr->addr, size);
 435	if (!base)
 436		return -ENOMEM;
 
 
 
 437
 438	memcpy_fromio(buf, base + offset, count);
 439
 440	acpi_os_unmap_iomem(base, size);
 441
 442	return count;
 443}
 444
 445static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
 446{
 447	struct acpi_table_bert *bert = th;
 448
 449	if (bert->header.length < sizeof(struct acpi_table_bert) ||
 450	    bert->region_length < sizeof(struct acpi_hest_generic_status)) {
 451		kfree(data_attr);
 452		return -EINVAL;
 453	}
 454	data_attr->addr = bert->address;
 455	data_attr->attr.size = bert->region_length;
 456	data_attr->attr.attr.name = "BERT";
 457
 458	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 459}
 460
 461static int acpi_ccel_data_init(void *th, struct acpi_data_attr *data_attr)
 462{
 463	struct acpi_table_ccel *ccel = th;
 464
 465	if (ccel->header.length < sizeof(struct acpi_table_ccel) ||
 466	    !ccel->log_area_start_address || !ccel->log_area_minimum_length) {
 467		kfree(data_attr);
 468		return -EINVAL;
 469	}
 470	data_attr->addr = ccel->log_area_start_address;
 471	data_attr->attr.size = ccel->log_area_minimum_length;
 472	data_attr->attr.attr.name = "CCEL";
 473
 474	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 475}
 476
 477static struct acpi_data_obj {
 478	char *name;
 479	int (*fn)(void *, struct acpi_data_attr *);
 480} acpi_data_objs[] = {
 481	{ ACPI_SIG_BERT, acpi_bert_data_init },
 482	{ ACPI_SIG_CCEL, acpi_ccel_data_init },
 483};
 484
 485#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
 486
 487static int acpi_table_data_init(struct acpi_table_header *th)
 488{
 489	struct acpi_data_attr *data_attr;
 490	int i;
 491
 492	for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
 493		if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
 494			data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
 495			if (!data_attr)
 496				return -ENOMEM;
 497			sysfs_attr_init(&data_attr->attr.attr);
 498			data_attr->attr.read = acpi_data_show;
 499			data_attr->attr.attr.mode = 0400;
 500			return acpi_data_objs[i].fn(th, data_attr);
 501		}
 502	}
 503	return 0;
 504}
 505
 506static int acpi_tables_sysfs_init(void)
 507{
 508	struct acpi_table_attr *table_attr;
 509	struct acpi_table_header *table_header = NULL;
 510	int table_index;
 511	acpi_status status;
 512	int ret;
 513
 514	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
 515	if (!tables_kobj)
 516		goto err;
 517
 518	tables_data_kobj = kobject_create_and_add("data", tables_kobj);
 519	if (!tables_data_kobj)
 520		goto err_tables_data;
 521
 522	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
 523	if (!dynamic_tables_kobj)
 524		goto err_dynamic_tables;
 525
 526	for (table_index = 0;; table_index++) {
 527		status = acpi_get_table_by_index(table_index, &table_header);
 528
 529		if (status == AE_BAD_PARAMETER)
 530			break;
 531
 532		if (ACPI_FAILURE(status))
 533			continue;
 534
 535		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 536		if (!table_attr)
 537			return -ENOMEM;
 538
 539		ret = acpi_table_attr_init(tables_kobj,
 540					   table_attr, table_header);
 541		if (ret) {
 542			kfree(table_attr);
 543			return ret;
 544		}
 545		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 546		acpi_table_data_init(table_header);
 547	}
 548
 549	kobject_uevent(tables_kobj, KOBJ_ADD);
 550	kobject_uevent(tables_data_kobj, KOBJ_ADD);
 551	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
 552
 553	return 0;
 554err_dynamic_tables:
 555	kobject_put(tables_data_kobj);
 556err_tables_data:
 557	kobject_put(tables_kobj);
 558err:
 559	return -ENOMEM;
 560}
 561
 562/*
 563 * Detailed ACPI IRQ counters:
 564 * /sys/firmware/acpi/interrupts/
 565 */
 566
 567u32 acpi_irq_handled;
 568u32 acpi_irq_not_handled;
 569
 570#define COUNT_GPE 0
 571#define COUNT_SCI 1		/* acpi_irq_handled */
 572#define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
 573#define COUNT_ERROR 3		/* other */
 574#define NUM_COUNTERS_EXTRA 4
 575
 576struct event_counter {
 577	u32 count;
 578	u32 flags;
 579};
 580
 581static struct event_counter *all_counters;
 582static u32 num_gpes;
 583static u32 num_counters;
 584static struct attribute **all_attrs;
 585static u32 acpi_gpe_count;
 586
 587static struct attribute_group interrupt_stats_attr_group = {
 588	.name = "interrupts",
 589};
 590
 591static struct kobj_attribute *counter_attrs;
 592
 593static void delete_gpe_attr_array(void)
 594{
 595	struct event_counter *tmp = all_counters;
 596
 597	all_counters = NULL;
 598	kfree(tmp);
 599
 600	if (counter_attrs) {
 601		int i;
 602
 603		for (i = 0; i < num_gpes; i++)
 604			kfree(counter_attrs[i].attr.name);
 605
 606		kfree(counter_attrs);
 607	}
 608	kfree(all_attrs);
 
 
 609}
 610
 611static void gpe_count(u32 gpe_number)
 612{
 613	acpi_gpe_count++;
 614
 615	if (!all_counters)
 616		return;
 617
 618	if (gpe_number < num_gpes)
 619		all_counters[gpe_number].count++;
 620	else
 621		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 622			     COUNT_ERROR].count++;
 
 
 623}
 624
 625static void fixed_event_count(u32 event_number)
 626{
 627	if (!all_counters)
 628		return;
 629
 630	if (event_number < ACPI_NUM_FIXED_EVENTS)
 631		all_counters[num_gpes + event_number].count++;
 632	else
 633		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 634			     COUNT_ERROR].count++;
 
 
 635}
 636
 637static void acpi_global_event_handler(u32 event_type, acpi_handle device,
 638	u32 event_number, void *context)
 639{
 640	if (event_type == ACPI_EVENT_TYPE_GPE) {
 641		gpe_count(event_number);
 642		pr_debug("GPE event 0x%02x\n", event_number);
 643	} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
 644		fixed_event_count(event_number);
 645		pr_debug("Fixed event 0x%02x\n", event_number);
 646	} else {
 647		pr_debug("Other event 0x%02x\n", event_number);
 648	}
 649}
 650
 651static int get_status(u32 index, acpi_event_status *ret,
 652		      acpi_handle *handle)
 653{
 654	acpi_status status;
 655
 656	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 657		return -EINVAL;
 658
 659	if (index < num_gpes) {
 660		status = acpi_get_gpe_device(index, handle);
 661		if (ACPI_FAILURE(status)) {
 662			pr_warn("Invalid GPE 0x%x", index);
 
 663			return -ENXIO;
 664		}
 665		status = acpi_get_gpe_status(*handle, index, ret);
 666	} else {
 667		status = acpi_get_event_status(index - num_gpes, ret);
 668	}
 669	if (ACPI_FAILURE(status))
 670		return -EIO;
 671
 672	return 0;
 673}
 674
 675static ssize_t counter_show(struct kobject *kobj,
 676			    struct kobj_attribute *attr, char *buf)
 677{
 678	int index = attr - counter_attrs;
 679	int size;
 680	acpi_handle handle;
 681	acpi_event_status status;
 682	int result = 0;
 683
 684	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
 685	    acpi_irq_handled;
 686	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
 687	    acpi_irq_not_handled;
 688	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
 689	    acpi_gpe_count;
 690	size = sprintf(buf, "%8u", all_counters[index].count);
 691
 692	/* "gpe_all" or "sci" */
 693	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 694		goto end;
 695
 696	result = get_status(index, &status, &handle);
 697	if (result)
 698		goto end;
 699
 700	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
 701		size += sprintf(buf + size, "  EN");
 702	else
 703		size += sprintf(buf + size, "    ");
 704	if (status & ACPI_EVENT_FLAG_STATUS_SET)
 705		size += sprintf(buf + size, " STS");
 706	else
 707		size += sprintf(buf + size, "    ");
 708
 709	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
 710		size += sprintf(buf + size, " invalid     ");
 711	else if (status & ACPI_EVENT_FLAG_ENABLED)
 712		size += sprintf(buf + size, " enabled     ");
 713	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
 714		size += sprintf(buf + size, " wake_enabled");
 715	else
 716		size += sprintf(buf + size, " disabled    ");
 717	if (status & ACPI_EVENT_FLAG_MASKED)
 718		size += sprintf(buf + size, " masked  ");
 719	else
 720		size += sprintf(buf + size, " unmasked");
 721
 722end:
 723	size += sprintf(buf + size, "\n");
 724	return result ? result : size;
 725}
 726
 727/*
 728 * counter_set() sets the specified counter.
 729 * setting the total "sci" file to any value clears all counters.
 730 * enable/disable/clear a gpe/fixed event in user space.
 731 */
 732static ssize_t counter_set(struct kobject *kobj,
 733			   struct kobj_attribute *attr, const char *buf,
 734			   size_t size)
 735{
 736	int index = attr - counter_attrs;
 737	acpi_event_status status;
 738	acpi_handle handle;
 739	int result = 0;
 740	unsigned long tmp;
 741
 742	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
 743		int i;
 744		for (i = 0; i < num_counters; ++i)
 745			all_counters[i].count = 0;
 746		acpi_gpe_count = 0;
 747		acpi_irq_handled = 0;
 748		acpi_irq_not_handled = 0;
 749		goto end;
 750	}
 751
 752	/* show the event status for both GPEs and Fixed Events */
 753	result = get_status(index, &status, &handle);
 754	if (result)
 755		goto end;
 756
 757	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
 758		pr_warn("Can not change Invalid GPE/Fixed Event status\n");
 
 759		return -EINVAL;
 760	}
 761
 762	if (index < num_gpes) {
 763		if (!strcmp(buf, "disable\n") &&
 764		    (status & ACPI_EVENT_FLAG_ENABLED))
 765			result = acpi_disable_gpe(handle, index);
 766		else if (!strcmp(buf, "enable\n") &&
 767			 !(status & ACPI_EVENT_FLAG_ENABLED))
 768			result = acpi_enable_gpe(handle, index);
 769		else if (!strcmp(buf, "clear\n") &&
 770			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 771			result = acpi_clear_gpe(handle, index);
 772		else if (!strcmp(buf, "mask\n"))
 773			result = acpi_mask_gpe(handle, index, TRUE);
 774		else if (!strcmp(buf, "unmask\n"))
 775			result = acpi_mask_gpe(handle, index, FALSE);
 776		else if (!kstrtoul(buf, 0, &tmp))
 777			all_counters[index].count = tmp;
 778		else
 779			result = -EINVAL;
 780	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
 781		int event = index - num_gpes;
 782		if (!strcmp(buf, "disable\n") &&
 783		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
 784			result = acpi_disable_event(event, ACPI_NOT_ISR);
 785		else if (!strcmp(buf, "enable\n") &&
 786			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
 787			result = acpi_enable_event(event, ACPI_NOT_ISR);
 788		else if (!strcmp(buf, "clear\n") &&
 789			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 790			result = acpi_clear_event(event);
 791		else if (!kstrtoul(buf, 0, &tmp))
 792			all_counters[index].count = tmp;
 793		else
 794			result = -EINVAL;
 795	} else
 796		all_counters[index].count = strtoul(buf, NULL, 0);
 797
 798	if (ACPI_FAILURE(result))
 799		result = -EINVAL;
 800end:
 801	return result ? result : size;
 802}
 803
 804/*
 805 * A Quirk Mechanism for GPE Flooding Prevention:
 806 *
 807 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
 808 * flooding typically cannot be detected and automatically prevented by
 809 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
 810 * the AML tables. This normally indicates a feature gap in Linux, thus
 811 * instead of providing endless quirk tables, we provide a boot parameter
 812 * for those who want this quirk. For example, if the users want to prevent
 813 * the GPE flooding for GPE 00, they need to specify the following boot
 814 * parameter:
 815 *   acpi_mask_gpe=0x00
 816 * Note, the parameter can be a list (see bitmap_parselist() for the details).
 817 * The masking status can be modified by the following runtime controlling
 818 * interface:
 819 *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
 820 */
 821#define ACPI_MASKABLE_GPE_MAX	0x100
 822static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
 823
 824static int __init acpi_gpe_set_masked_gpes(char *val)
 825{
 826	int ret;
 827	u8 gpe;
 828
 829	ret = kstrtou8(val, 0, &gpe);
 830	if (ret) {
 831		ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
 832		if (ret)
 833			return ret;
 834	} else
 835		set_bit(gpe, acpi_masked_gpes_map);
 836
 837	return 1;
 838}
 839__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
 840
 841void __init acpi_gpe_apply_masked_gpes(void)
 842{
 843	acpi_handle handle;
 844	acpi_status status;
 845	u16 gpe;
 846
 847	for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
 848		status = acpi_get_gpe_device(gpe, &handle);
 849		if (ACPI_SUCCESS(status)) {
 850			pr_info("Masking GPE 0x%x.\n", gpe);
 851			(void)acpi_mask_gpe(handle, gpe, TRUE);
 852		}
 853	}
 854}
 855
 856void acpi_irq_stats_init(void)
 857{
 858	acpi_status status;
 859	int i;
 860
 861	if (all_counters)
 862		return;
 863
 864	num_gpes = acpi_current_gpe_count;
 865	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 866
 867	all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
 
 868	if (all_attrs == NULL)
 869		return;
 870
 871	all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
 
 872	if (all_counters == NULL)
 873		goto fail;
 874
 875	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
 876	if (ACPI_FAILURE(status))
 877		goto fail;
 878
 879	counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
 
 880	if (counter_attrs == NULL)
 881		goto fail;
 882
 883	for (i = 0; i < num_counters; ++i) {
 884		char buffer[12];
 885		char *name;
 886
 887		if (i < num_gpes)
 888			sprintf(buffer, "gpe%02X", i);
 889		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
 890			sprintf(buffer, "ff_pmtimer");
 891		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
 892			sprintf(buffer, "ff_gbl_lock");
 893		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
 894			sprintf(buffer, "ff_pwr_btn");
 895		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
 896			sprintf(buffer, "ff_slp_btn");
 897		else if (i == num_gpes + ACPI_EVENT_RTC)
 898			sprintf(buffer, "ff_rt_clk");
 899		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
 900			sprintf(buffer, "gpe_all");
 901		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
 902			sprintf(buffer, "sci");
 903		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
 904			sprintf(buffer, "sci_not");
 905		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
 906			sprintf(buffer, "error");
 907		else
 908			sprintf(buffer, "bug%02X", i);
 909
 910		name = kstrdup(buffer, GFP_KERNEL);
 911		if (name == NULL)
 912			goto fail;
 913
 914		sysfs_attr_init(&counter_attrs[i].attr);
 915		counter_attrs[i].attr.name = name;
 916		counter_attrs[i].attr.mode = 0644;
 917		counter_attrs[i].show = counter_show;
 918		counter_attrs[i].store = counter_set;
 919
 920		all_attrs[i] = &counter_attrs[i].attr;
 921	}
 922
 923	interrupt_stats_attr_group.attrs = all_attrs;
 924	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
 925		return;
 926
 927fail:
 928	delete_gpe_attr_array();
 
 929}
 930
 931static void __exit interrupt_stats_exit(void)
 932{
 933	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
 934
 935	delete_gpe_attr_array();
 
 
 936}
 937
 938static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 
 
 939{
 940	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 941}
 942
 943static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
 
 944
 945static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 
 946{
 947	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 948
 949	return sprintf(buf, "%d\n", hotplug->enabled);
 950}
 951
 952static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 953			     const char *buf, size_t size)
 
 954{
 955	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 956	unsigned int val;
 957
 958	if (kstrtouint(buf, 10, &val) || val > 1)
 959		return -EINVAL;
 960
 961	acpi_scan_hotplug_enabled(hotplug, val);
 962	return size;
 963}
 964
 965static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
 
 
 966
 967static struct attribute *hotplug_profile_attrs[] = {
 968	&hotplug_enabled_attr.attr,
 969	NULL
 970};
 971ATTRIBUTE_GROUPS(hotplug_profile);
 972
 973static const struct kobj_type acpi_hotplug_profile_ktype = {
 974	.sysfs_ops = &kobj_sysfs_ops,
 975	.default_groups = hotplug_profile_groups,
 976};
 977
 978void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
 979				    const char *name)
 980{
 981	int error;
 982
 983	if (!hotplug_kobj)
 984		goto err_out;
 985
 986	error = kobject_init_and_add(&hotplug->kobj,
 987		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
 988	if (error) {
 989		kobject_put(&hotplug->kobj);
 990		goto err_out;
 991	}
 992
 993	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
 994	return;
 995
 996 err_out:
 997	pr_err("Unable to add hotplug profile '%s'\n", name);
 998}
 999
1000static ssize_t force_remove_show(struct kobject *kobj,
1001				 struct kobj_attribute *attr, char *buf)
1002{
1003	return sprintf(buf, "%d\n", 0);
1004}
1005
1006static ssize_t force_remove_store(struct kobject *kobj,
1007				  struct kobj_attribute *attr,
1008				  const char *buf, size_t size)
1009{
1010	bool val;
1011	int ret;
1012
1013	ret = kstrtobool(buf, &val);
1014	if (ret < 0)
1015		return ret;
1016
1017	if (val) {
1018		pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1019		return -EINVAL;
1020	}
1021	return size;
1022}
1023
1024static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
 
 
1025
1026int __init acpi_sysfs_init(void)
1027{
1028	int result;
1029
1030	result = acpi_tables_sysfs_init();
1031	if (result)
1032		return result;
1033
1034	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1035	if (!hotplug_kobj)
1036		return -ENOMEM;
1037
1038	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1039	if (result)
1040		return result;
1041
1042	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1043	return result;
1044}