Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sysfs.c - ACPI sysfs interface to userspace.
   4 */
   5
   6#define pr_fmt(fmt) "ACPI: " fmt
   7
   8#include <linux/acpi.h>
   9#include <linux/bitmap.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/kstrtox.h>
  13#include <linux/moduleparam.h>
  14
  15#include "internal.h"
  16
  17#ifdef CONFIG_ACPI_DEBUG
  18/*
  19 * ACPI debug sysfs I/F, including:
  20 * /sys/modules/acpi/parameters/debug_layer
  21 * /sys/modules/acpi/parameters/debug_level
  22 * /sys/modules/acpi/parameters/trace_method_name
  23 * /sys/modules/acpi/parameters/trace_state
  24 * /sys/modules/acpi/parameters/trace_debug_layer
  25 * /sys/modules/acpi/parameters/trace_debug_level
  26 */
  27
  28struct acpi_dlayer {
  29	const char *name;
  30	unsigned long value;
  31};
  32struct acpi_dlevel {
  33	const char *name;
  34	unsigned long value;
  35};
  36#define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
  37
  38static const struct acpi_dlayer acpi_debug_layers[] = {
  39	ACPI_DEBUG_INIT(ACPI_UTILITIES),
  40	ACPI_DEBUG_INIT(ACPI_HARDWARE),
  41	ACPI_DEBUG_INIT(ACPI_EVENTS),
  42	ACPI_DEBUG_INIT(ACPI_TABLES),
  43	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
  44	ACPI_DEBUG_INIT(ACPI_PARSER),
  45	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
  46	ACPI_DEBUG_INIT(ACPI_EXECUTER),
  47	ACPI_DEBUG_INIT(ACPI_RESOURCES),
  48	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
  49	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
  50	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
  51	ACPI_DEBUG_INIT(ACPI_COMPILER),
  52	ACPI_DEBUG_INIT(ACPI_TOOLS),
  53};
  54
  55static const struct acpi_dlevel acpi_debug_levels[] = {
  56	ACPI_DEBUG_INIT(ACPI_LV_INIT),
  57	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
  58	ACPI_DEBUG_INIT(ACPI_LV_INFO),
  59	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
  60	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
  61
  62	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
  63	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
  64	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
  65	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
  66	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
  67	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
  68	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
  69	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
  70	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
  71	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
  72	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
  73	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
  74	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
  75	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
  76
  77	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
  78	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
  79	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
  80
  81	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
  82	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
  83	ACPI_DEBUG_INIT(ACPI_LV_IO),
  84	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
  85
  86	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
  87	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
  88	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
  89	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
  90};
  91
  92static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
  93{
  94	int result = 0;
  95	int i;
  96
  97	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
  98
  99	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
 100		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 101				  acpi_debug_layers[i].name,
 102				  acpi_debug_layers[i].value,
 103				  (acpi_dbg_layer & acpi_debug_layers[i].value)
 104				  ? '*' : ' ');
 105	}
 106	result +=
 107	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
 108		    ACPI_ALL_DRIVERS,
 109		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
 110		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
 111		    == 0 ? ' ' : '-');
 112	result +=
 113	    sprintf(buffer + result,
 114		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
 115		    acpi_dbg_layer);
 116
 117	return result;
 118}
 119
 120static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 121{
 122	int result = 0;
 123	int i;
 124
 125	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
 126
 127	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
 128		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 129				  acpi_debug_levels[i].name,
 130				  acpi_debug_levels[i].value,
 131				  (acpi_dbg_level & acpi_debug_levels[i].value)
 132				  ? '*' : ' ');
 133	}
 134	result +=
 135	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
 136		    acpi_dbg_level);
 137
 138	return result;
 139}
 140
 141static const struct kernel_param_ops param_ops_debug_layer = {
 142	.set = param_set_uint,
 143	.get = param_get_debug_layer,
 144};
 145
 146static const struct kernel_param_ops param_ops_debug_level = {
 147	.set = param_set_uint,
 148	.get = param_get_debug_level,
 149};
 150
 151module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
 152module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 153
 154static char trace_method_name[1024];
 155
 156static int param_set_trace_method_name(const char *val,
 157				       const struct kernel_param *kp)
 158{
 159	u32 saved_flags = 0;
 160	bool is_abs_path = true;
 161
 162	if (*val != '\\')
 163		is_abs_path = false;
 164
 165	if ((is_abs_path && strlen(val) > 1023) ||
 166	    (!is_abs_path && strlen(val) > 1022)) {
 167		pr_err("%s: string parameter too long\n", kp->name);
 168		return -ENOSPC;
 169	}
 170
 171	/*
 172	 * It's not safe to update acpi_gbl_trace_method_name without
 173	 * having the tracer stopped, so we save the original tracer
 174	 * state and disable it.
 175	 */
 176	saved_flags = acpi_gbl_trace_flags;
 177	(void)acpi_debug_trace(NULL,
 178			       acpi_gbl_trace_dbg_level,
 179			       acpi_gbl_trace_dbg_layer,
 180			       0);
 181
 182	/* This is a hack.  We can't kmalloc in early boot. */
 183	if (is_abs_path)
 184		strcpy(trace_method_name, val);
 185	else {
 186		trace_method_name[0] = '\\';
 187		strcpy(trace_method_name+1, val);
 188	}
 189
 190	/* Restore the original tracer state */
 191	(void)acpi_debug_trace(trace_method_name,
 192			       acpi_gbl_trace_dbg_level,
 193			       acpi_gbl_trace_dbg_layer,
 194			       saved_flags);
 195
 196	return 0;
 197}
 198
 199static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
 200{
 201	return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name);
 202}
 203
 204static const struct kernel_param_ops param_ops_trace_method = {
 205	.set = param_set_trace_method_name,
 206	.get = param_get_trace_method_name,
 207};
 208
 209static const struct kernel_param_ops param_ops_trace_attrib = {
 210	.set = param_set_uint,
 211	.get = param_get_uint,
 212};
 213
 214module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
 215module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
 216module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
 217
 218static int param_set_trace_state(const char *val,
 219				 const struct kernel_param *kp)
 220{
 221	acpi_status status;
 222	const char *method = trace_method_name;
 223	u32 flags = 0;
 224
 225/* So "xxx-once" comparison should go prior than "xxx" comparison */
 226#define acpi_compare_param(val, key)	\
 227	strncmp((val), (key), sizeof(key) - 1)
 228
 229	if (!acpi_compare_param(val, "enable")) {
 230		method = NULL;
 231		flags = ACPI_TRACE_ENABLED;
 232	} else if (!acpi_compare_param(val, "disable"))
 233		method = NULL;
 234	else if (!acpi_compare_param(val, "method-once"))
 235		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
 236	else if (!acpi_compare_param(val, "method"))
 237		flags = ACPI_TRACE_ENABLED;
 238	else if (!acpi_compare_param(val, "opcode-once"))
 239		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
 240	else if (!acpi_compare_param(val, "opcode"))
 241		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
 242	else
 243		return -EINVAL;
 244
 245	status = acpi_debug_trace(method,
 246				  acpi_gbl_trace_dbg_level,
 247				  acpi_gbl_trace_dbg_layer,
 248				  flags);
 249	if (ACPI_FAILURE(status))
 250		return -EBUSY;
 251
 252	return 0;
 253}
 254
 255static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
 256{
 257	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
 258		return sprintf(buffer, "disable\n");
 259	if (!acpi_gbl_trace_method_name)
 260		return sprintf(buffer, "enable\n");
 261	if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
 262		return sprintf(buffer, "method-once\n");
 263	else
 264		return sprintf(buffer, "method\n");
 265}
 266
 267module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
 268		  NULL, 0644);
 269#endif /* CONFIG_ACPI_DEBUG */
 270
 271
 272/* /sys/modules/acpi/parameters/aml_debug_output */
 273
 274module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
 275		   byte, 0644);
 276MODULE_PARM_DESC(aml_debug_output,
 277		 "To enable/disable the ACPI Debug Object output.");
 278
 279/* /sys/module/acpi/parameters/acpica_version */
 280static int param_get_acpica_version(char *buffer,
 281				    const struct kernel_param *kp)
 282{
 283	int result;
 284
 285	result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
 286
 287	return result;
 288}
 289
 290module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
 291
 292/*
 293 * ACPI table sysfs I/F:
 294 * /sys/firmware/acpi/tables/
 295 * /sys/firmware/acpi/tables/data/
 296 * /sys/firmware/acpi/tables/dynamic/
 297 */
 298
 299static LIST_HEAD(acpi_table_attr_list);
 300static struct kobject *tables_kobj;
 301static struct kobject *tables_data_kobj;
 302static struct kobject *dynamic_tables_kobj;
 303static struct kobject *hotplug_kobj;
 304
 305#define ACPI_MAX_TABLE_INSTANCES	999
 306#define ACPI_INST_SIZE			4 /* including trailing 0 */
 307
 308struct acpi_table_attr {
 309	struct bin_attribute attr;
 310	char name[ACPI_NAMESEG_SIZE];
 311	int instance;
 312	char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
 313	struct list_head node;
 314};
 315
 316struct acpi_data_attr {
 317	struct bin_attribute attr;
 318	u64	addr;
 319};
 320
 321static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
 322			       struct bin_attribute *bin_attr, char *buf,
 323			       loff_t offset, size_t count)
 324{
 325	struct acpi_table_attr *table_attr =
 326	    container_of(bin_attr, struct acpi_table_attr, attr);
 327	struct acpi_table_header *table_header = NULL;
 328	acpi_status status;
 329	ssize_t rc;
 330
 331	status = acpi_get_table(table_attr->name, table_attr->instance,
 332				&table_header);
 333	if (ACPI_FAILURE(status))
 334		return -ENODEV;
 335
 336	rc = memory_read_from_buffer(buf, count, &offset, table_header,
 337			table_header->length);
 338	acpi_put_table(table_header);
 339	return rc;
 340}
 341
 342static int acpi_table_attr_init(struct kobject *tables_obj,
 343				struct acpi_table_attr *table_attr,
 344				struct acpi_table_header *table_header)
 345{
 346	struct acpi_table_header *header = NULL;
 347	struct acpi_table_attr *attr = NULL;
 348	char instance_str[ACPI_INST_SIZE];
 349
 350	sysfs_attr_init(&table_attr->attr.attr);
 351	ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
 352
 353	list_for_each_entry(attr, &acpi_table_attr_list, node) {
 354		if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
 355			if (table_attr->instance < attr->instance)
 356				table_attr->instance = attr->instance;
 357	}
 358	table_attr->instance++;
 359	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
 360		pr_warn("%4.4s: too many table instances\n", table_attr->name);
 361		return -ERANGE;
 362	}
 363
 364	ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
 365	table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
 366	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
 367					 !acpi_get_table
 368					 (table_header->signature, 2, &header))) {
 369		snprintf(instance_str, sizeof(instance_str), "%u",
 370			 table_attr->instance);
 371		strcat(table_attr->filename, instance_str);
 372	}
 373
 374	table_attr->attr.size = table_header->length;
 375	table_attr->attr.read = acpi_table_show;
 376	table_attr->attr.attr.name = table_attr->filename;
 377	table_attr->attr.attr.mode = 0400;
 378
 379	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
 380}
 381
 382acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
 383{
 384	struct acpi_table_attr *table_attr;
 385
 386	switch (event) {
 387	case ACPI_TABLE_EVENT_INSTALL:
 388		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 389		if (!table_attr)
 390			return AE_NO_MEMORY;
 391
 392		if (acpi_table_attr_init(dynamic_tables_kobj,
 393					 table_attr, table)) {
 394			kfree(table_attr);
 395			return AE_ERROR;
 396		}
 397		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 398		break;
 399	case ACPI_TABLE_EVENT_LOAD:
 400	case ACPI_TABLE_EVENT_UNLOAD:
 401	case ACPI_TABLE_EVENT_UNINSTALL:
 402		/*
 403		 * we do not need to do anything right now
 404		 * because the table is not deleted from the
 405		 * global table list when unloading it.
 406		 */
 407		break;
 408	default:
 409		return AE_BAD_PARAMETER;
 410	}
 411	return AE_OK;
 412}
 413
 414static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
 415			      struct bin_attribute *bin_attr, char *buf,
 416			      loff_t offset, size_t count)
 417{
 418	struct acpi_data_attr *data_attr;
 419	void __iomem *base;
 420	ssize_t size;
 421
 422	data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
 423	size = data_attr->attr.size;
 424
 425	if (offset < 0)
 426		return -EINVAL;
 427
 428	if (offset >= size)
 429		return 0;
 430
 431	if (count > size - offset)
 432		count = size - offset;
 433
 434	base = acpi_os_map_iomem(data_attr->addr, size);
 435	if (!base)
 436		return -ENOMEM;
 
 
 
 437
 438	memcpy_fromio(buf, base + offset, count);
 439
 440	acpi_os_unmap_iomem(base, size);
 441
 442	return count;
 443}
 444
 445static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
 446{
 447	struct acpi_table_bert *bert = th;
 448
 449	if (bert->header.length < sizeof(struct acpi_table_bert) ||
 450	    bert->region_length < sizeof(struct acpi_hest_generic_status)) {
 451		kfree(data_attr);
 452		return -EINVAL;
 453	}
 454	data_attr->addr = bert->address;
 455	data_attr->attr.size = bert->region_length;
 456	data_attr->attr.attr.name = "BERT";
 457
 458	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 459}
 460
 461static int acpi_ccel_data_init(void *th, struct acpi_data_attr *data_attr)
 462{
 463	struct acpi_table_ccel *ccel = th;
 464
 465	if (ccel->header.length < sizeof(struct acpi_table_ccel) ||
 466	    !ccel->log_area_start_address || !ccel->log_area_minimum_length) {
 467		kfree(data_attr);
 468		return -EINVAL;
 469	}
 470	data_attr->addr = ccel->log_area_start_address;
 471	data_attr->attr.size = ccel->log_area_minimum_length;
 472	data_attr->attr.attr.name = "CCEL";
 473
 474	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 475}
 476
 477static struct acpi_data_obj {
 478	char *name;
 479	int (*fn)(void *, struct acpi_data_attr *);
 480} acpi_data_objs[] = {
 481	{ ACPI_SIG_BERT, acpi_bert_data_init },
 482	{ ACPI_SIG_CCEL, acpi_ccel_data_init },
 483};
 484
 485#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
 486
 487static int acpi_table_data_init(struct acpi_table_header *th)
 488{
 489	struct acpi_data_attr *data_attr;
 490	int i;
 491
 492	for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
 493		if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
 494			data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
 495			if (!data_attr)
 496				return -ENOMEM;
 497			sysfs_attr_init(&data_attr->attr.attr);
 498			data_attr->attr.read = acpi_data_show;
 499			data_attr->attr.attr.mode = 0400;
 500			return acpi_data_objs[i].fn(th, data_attr);
 501		}
 502	}
 503	return 0;
 504}
 505
 506static int acpi_tables_sysfs_init(void)
 507{
 508	struct acpi_table_attr *table_attr;
 509	struct acpi_table_header *table_header = NULL;
 510	int table_index;
 511	acpi_status status;
 512	int ret;
 513
 514	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
 515	if (!tables_kobj)
 516		goto err;
 517
 518	tables_data_kobj = kobject_create_and_add("data", tables_kobj);
 519	if (!tables_data_kobj)
 520		goto err_tables_data;
 521
 522	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
 523	if (!dynamic_tables_kobj)
 524		goto err_dynamic_tables;
 525
 526	for (table_index = 0;; table_index++) {
 527		status = acpi_get_table_by_index(table_index, &table_header);
 528
 529		if (status == AE_BAD_PARAMETER)
 530			break;
 531
 532		if (ACPI_FAILURE(status))
 533			continue;
 534
 535		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 536		if (!table_attr)
 537			return -ENOMEM;
 538
 539		ret = acpi_table_attr_init(tables_kobj,
 540					   table_attr, table_header);
 541		if (ret) {
 542			kfree(table_attr);
 543			return ret;
 544		}
 545		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 546		acpi_table_data_init(table_header);
 547	}
 548
 549	kobject_uevent(tables_kobj, KOBJ_ADD);
 550	kobject_uevent(tables_data_kobj, KOBJ_ADD);
 551	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
 552
 553	return 0;
 554err_dynamic_tables:
 555	kobject_put(tables_data_kobj);
 556err_tables_data:
 557	kobject_put(tables_kobj);
 558err:
 559	return -ENOMEM;
 560}
 561
 562/*
 563 * Detailed ACPI IRQ counters:
 564 * /sys/firmware/acpi/interrupts/
 565 */
 566
 567u32 acpi_irq_handled;
 568u32 acpi_irq_not_handled;
 569
 570#define COUNT_GPE 0
 571#define COUNT_SCI 1		/* acpi_irq_handled */
 572#define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
 573#define COUNT_ERROR 3		/* other */
 574#define NUM_COUNTERS_EXTRA 4
 575
 576struct event_counter {
 577	u32 count;
 578	u32 flags;
 579};
 580
 581static struct event_counter *all_counters;
 582static u32 num_gpes;
 583static u32 num_counters;
 584static struct attribute **all_attrs;
 585static u32 acpi_gpe_count;
 586
 587static struct attribute_group interrupt_stats_attr_group = {
 588	.name = "interrupts",
 589};
 590
 591static struct kobj_attribute *counter_attrs;
 592
 593static void delete_gpe_attr_array(void)
 594{
 595	struct event_counter *tmp = all_counters;
 596
 597	all_counters = NULL;
 598	kfree(tmp);
 599
 600	if (counter_attrs) {
 601		int i;
 602
 603		for (i = 0; i < num_gpes; i++)
 604			kfree(counter_attrs[i].attr.name);
 605
 606		kfree(counter_attrs);
 607	}
 608	kfree(all_attrs);
 609}
 610
 611static void gpe_count(u32 gpe_number)
 612{
 613	acpi_gpe_count++;
 614
 615	if (!all_counters)
 616		return;
 617
 618	if (gpe_number < num_gpes)
 619		all_counters[gpe_number].count++;
 620	else
 621		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 622			     COUNT_ERROR].count++;
 623}
 624
 625static void fixed_event_count(u32 event_number)
 626{
 627	if (!all_counters)
 628		return;
 629
 630	if (event_number < ACPI_NUM_FIXED_EVENTS)
 631		all_counters[num_gpes + event_number].count++;
 632	else
 633		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 634			     COUNT_ERROR].count++;
 635}
 636
 637static void acpi_global_event_handler(u32 event_type, acpi_handle device,
 638	u32 event_number, void *context)
 639{
 640	if (event_type == ACPI_EVENT_TYPE_GPE) {
 641		gpe_count(event_number);
 642		pr_debug("GPE event 0x%02x\n", event_number);
 643	} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
 644		fixed_event_count(event_number);
 645		pr_debug("Fixed event 0x%02x\n", event_number);
 646	} else {
 647		pr_debug("Other event 0x%02x\n", event_number);
 648	}
 649}
 650
 651static int get_status(u32 index, acpi_event_status *ret,
 652		      acpi_handle *handle)
 653{
 654	acpi_status status;
 655
 656	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 657		return -EINVAL;
 658
 659	if (index < num_gpes) {
 660		status = acpi_get_gpe_device(index, handle);
 661		if (ACPI_FAILURE(status)) {
 662			pr_warn("Invalid GPE 0x%x", index);
 663			return -ENXIO;
 664		}
 665		status = acpi_get_gpe_status(*handle, index, ret);
 666	} else {
 667		status = acpi_get_event_status(index - num_gpes, ret);
 668	}
 669	if (ACPI_FAILURE(status))
 670		return -EIO;
 671
 672	return 0;
 673}
 674
 675static ssize_t counter_show(struct kobject *kobj,
 676			    struct kobj_attribute *attr, char *buf)
 677{
 678	int index = attr - counter_attrs;
 679	int size;
 680	acpi_handle handle;
 681	acpi_event_status status;
 682	int result = 0;
 683
 684	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
 685	    acpi_irq_handled;
 686	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
 687	    acpi_irq_not_handled;
 688	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
 689	    acpi_gpe_count;
 690	size = sprintf(buf, "%8u", all_counters[index].count);
 691
 692	/* "gpe_all" or "sci" */
 693	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 694		goto end;
 695
 696	result = get_status(index, &status, &handle);
 697	if (result)
 698		goto end;
 699
 700	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
 701		size += sprintf(buf + size, "  EN");
 702	else
 703		size += sprintf(buf + size, "    ");
 704	if (status & ACPI_EVENT_FLAG_STATUS_SET)
 705		size += sprintf(buf + size, " STS");
 706	else
 707		size += sprintf(buf + size, "    ");
 708
 709	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
 710		size += sprintf(buf + size, " invalid     ");
 711	else if (status & ACPI_EVENT_FLAG_ENABLED)
 712		size += sprintf(buf + size, " enabled     ");
 713	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
 714		size += sprintf(buf + size, " wake_enabled");
 715	else
 716		size += sprintf(buf + size, " disabled    ");
 717	if (status & ACPI_EVENT_FLAG_MASKED)
 718		size += sprintf(buf + size, " masked  ");
 719	else
 720		size += sprintf(buf + size, " unmasked");
 721
 722end:
 723	size += sprintf(buf + size, "\n");
 724	return result ? result : size;
 725}
 726
 727/*
 728 * counter_set() sets the specified counter.
 729 * setting the total "sci" file to any value clears all counters.
 730 * enable/disable/clear a gpe/fixed event in user space.
 731 */
 732static ssize_t counter_set(struct kobject *kobj,
 733			   struct kobj_attribute *attr, const char *buf,
 734			   size_t size)
 735{
 736	int index = attr - counter_attrs;
 737	acpi_event_status status;
 738	acpi_handle handle;
 739	int result = 0;
 740	unsigned long tmp;
 741
 742	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
 743		int i;
 744		for (i = 0; i < num_counters; ++i)
 745			all_counters[i].count = 0;
 746		acpi_gpe_count = 0;
 747		acpi_irq_handled = 0;
 748		acpi_irq_not_handled = 0;
 749		goto end;
 750	}
 751
 752	/* show the event status for both GPEs and Fixed Events */
 753	result = get_status(index, &status, &handle);
 754	if (result)
 755		goto end;
 756
 757	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
 758		pr_warn("Can not change Invalid GPE/Fixed Event status\n");
 759		return -EINVAL;
 760	}
 761
 762	if (index < num_gpes) {
 763		if (!strcmp(buf, "disable\n") &&
 764		    (status & ACPI_EVENT_FLAG_ENABLED))
 765			result = acpi_disable_gpe(handle, index);
 766		else if (!strcmp(buf, "enable\n") &&
 767			 !(status & ACPI_EVENT_FLAG_ENABLED))
 768			result = acpi_enable_gpe(handle, index);
 769		else if (!strcmp(buf, "clear\n") &&
 770			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 771			result = acpi_clear_gpe(handle, index);
 772		else if (!strcmp(buf, "mask\n"))
 773			result = acpi_mask_gpe(handle, index, TRUE);
 774		else if (!strcmp(buf, "unmask\n"))
 775			result = acpi_mask_gpe(handle, index, FALSE);
 776		else if (!kstrtoul(buf, 0, &tmp))
 777			all_counters[index].count = tmp;
 778		else
 779			result = -EINVAL;
 780	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
 781		int event = index - num_gpes;
 782		if (!strcmp(buf, "disable\n") &&
 783		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
 784			result = acpi_disable_event(event, ACPI_NOT_ISR);
 785		else if (!strcmp(buf, "enable\n") &&
 786			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
 787			result = acpi_enable_event(event, ACPI_NOT_ISR);
 788		else if (!strcmp(buf, "clear\n") &&
 789			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 790			result = acpi_clear_event(event);
 791		else if (!kstrtoul(buf, 0, &tmp))
 792			all_counters[index].count = tmp;
 793		else
 794			result = -EINVAL;
 795	} else
 796		all_counters[index].count = strtoul(buf, NULL, 0);
 797
 798	if (ACPI_FAILURE(result))
 799		result = -EINVAL;
 800end:
 801	return result ? result : size;
 802}
 803
 804/*
 805 * A Quirk Mechanism for GPE Flooding Prevention:
 806 *
 807 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
 808 * flooding typically cannot be detected and automatically prevented by
 809 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
 810 * the AML tables. This normally indicates a feature gap in Linux, thus
 811 * instead of providing endless quirk tables, we provide a boot parameter
 812 * for those who want this quirk. For example, if the users want to prevent
 813 * the GPE flooding for GPE 00, they need to specify the following boot
 814 * parameter:
 815 *   acpi_mask_gpe=0x00
 816 * Note, the parameter can be a list (see bitmap_parselist() for the details).
 817 * The masking status can be modified by the following runtime controlling
 818 * interface:
 819 *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
 820 */
 821#define ACPI_MASKABLE_GPE_MAX	0x100
 822static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
 823
 824static int __init acpi_gpe_set_masked_gpes(char *val)
 825{
 826	int ret;
 827	u8 gpe;
 828
 829	ret = kstrtou8(val, 0, &gpe);
 830	if (ret) {
 831		ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
 832		if (ret)
 833			return ret;
 834	} else
 835		set_bit(gpe, acpi_masked_gpes_map);
 836
 837	return 1;
 838}
 839__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
 840
 841void __init acpi_gpe_apply_masked_gpes(void)
 842{
 843	acpi_handle handle;
 844	acpi_status status;
 845	u16 gpe;
 846
 847	for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
 848		status = acpi_get_gpe_device(gpe, &handle);
 849		if (ACPI_SUCCESS(status)) {
 850			pr_info("Masking GPE 0x%x.\n", gpe);
 851			(void)acpi_mask_gpe(handle, gpe, TRUE);
 852		}
 853	}
 854}
 855
 856void acpi_irq_stats_init(void)
 857{
 858	acpi_status status;
 859	int i;
 860
 861	if (all_counters)
 862		return;
 863
 864	num_gpes = acpi_current_gpe_count;
 865	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 866
 867	all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
 868	if (all_attrs == NULL)
 869		return;
 870
 871	all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
 872	if (all_counters == NULL)
 873		goto fail;
 874
 875	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
 876	if (ACPI_FAILURE(status))
 877		goto fail;
 878
 879	counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
 880	if (counter_attrs == NULL)
 881		goto fail;
 882
 883	for (i = 0; i < num_counters; ++i) {
 884		char buffer[12];
 885		char *name;
 886
 887		if (i < num_gpes)
 888			sprintf(buffer, "gpe%02X", i);
 889		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
 890			sprintf(buffer, "ff_pmtimer");
 891		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
 892			sprintf(buffer, "ff_gbl_lock");
 893		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
 894			sprintf(buffer, "ff_pwr_btn");
 895		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
 896			sprintf(buffer, "ff_slp_btn");
 897		else if (i == num_gpes + ACPI_EVENT_RTC)
 898			sprintf(buffer, "ff_rt_clk");
 899		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
 900			sprintf(buffer, "gpe_all");
 901		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
 902			sprintf(buffer, "sci");
 903		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
 904			sprintf(buffer, "sci_not");
 905		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
 906			sprintf(buffer, "error");
 907		else
 908			sprintf(buffer, "bug%02X", i);
 909
 910		name = kstrdup(buffer, GFP_KERNEL);
 911		if (name == NULL)
 912			goto fail;
 913
 914		sysfs_attr_init(&counter_attrs[i].attr);
 915		counter_attrs[i].attr.name = name;
 916		counter_attrs[i].attr.mode = 0644;
 917		counter_attrs[i].show = counter_show;
 918		counter_attrs[i].store = counter_set;
 919
 920		all_attrs[i] = &counter_attrs[i].attr;
 921	}
 922
 923	interrupt_stats_attr_group.attrs = all_attrs;
 924	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
 925		return;
 926
 927fail:
 928	delete_gpe_attr_array();
 929}
 930
 931static void __exit interrupt_stats_exit(void)
 932{
 933	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
 934
 935	delete_gpe_attr_array();
 936}
 937
 938static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 939{
 940	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 941}
 942
 943static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
 944
 945static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 946{
 947	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 948
 949	return sprintf(buf, "%d\n", hotplug->enabled);
 950}
 951
 952static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 953			     const char *buf, size_t size)
 954{
 955	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 956	unsigned int val;
 957
 958	if (kstrtouint(buf, 10, &val) || val > 1)
 959		return -EINVAL;
 960
 961	acpi_scan_hotplug_enabled(hotplug, val);
 962	return size;
 963}
 964
 965static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
 966
 967static struct attribute *hotplug_profile_attrs[] = {
 968	&hotplug_enabled_attr.attr,
 969	NULL
 970};
 971ATTRIBUTE_GROUPS(hotplug_profile);
 972
 973static const struct kobj_type acpi_hotplug_profile_ktype = {
 974	.sysfs_ops = &kobj_sysfs_ops,
 975	.default_groups = hotplug_profile_groups,
 976};
 977
 978void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
 979				    const char *name)
 980{
 981	int error;
 982
 983	if (!hotplug_kobj)
 984		goto err_out;
 985
 986	error = kobject_init_and_add(&hotplug->kobj,
 987		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
 988	if (error) {
 989		kobject_put(&hotplug->kobj);
 990		goto err_out;
 991	}
 992
 993	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
 994	return;
 995
 996 err_out:
 997	pr_err("Unable to add hotplug profile '%s'\n", name);
 998}
 999
1000static ssize_t force_remove_show(struct kobject *kobj,
1001				 struct kobj_attribute *attr, char *buf)
1002{
1003	return sprintf(buf, "%d\n", 0);
1004}
1005
1006static ssize_t force_remove_store(struct kobject *kobj,
1007				  struct kobj_attribute *attr,
1008				  const char *buf, size_t size)
1009{
1010	bool val;
1011	int ret;
1012
1013	ret = kstrtobool(buf, &val);
1014	if (ret < 0)
1015		return ret;
1016
1017	if (val) {
1018		pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1019		return -EINVAL;
1020	}
1021	return size;
1022}
1023
1024static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
1025
1026int __init acpi_sysfs_init(void)
1027{
1028	int result;
1029
1030	result = acpi_tables_sysfs_init();
1031	if (result)
1032		return result;
1033
1034	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1035	if (!hotplug_kobj)
1036		return -ENOMEM;
1037
1038	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1039	if (result)
1040		return result;
1041
1042	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1043	return result;
1044}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sysfs.c - ACPI sysfs interface to userspace.
   4 */
   5
   6#define pr_fmt(fmt) "ACPI: " fmt
   7
   8#include <linux/acpi.h>
   9#include <linux/bitmap.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
 
  12#include <linux/moduleparam.h>
  13
  14#include "internal.h"
  15
  16#ifdef CONFIG_ACPI_DEBUG
  17/*
  18 * ACPI debug sysfs I/F, including:
  19 * /sys/modules/acpi/parameters/debug_layer
  20 * /sys/modules/acpi/parameters/debug_level
  21 * /sys/modules/acpi/parameters/trace_method_name
  22 * /sys/modules/acpi/parameters/trace_state
  23 * /sys/modules/acpi/parameters/trace_debug_layer
  24 * /sys/modules/acpi/parameters/trace_debug_level
  25 */
  26
  27struct acpi_dlayer {
  28	const char *name;
  29	unsigned long value;
  30};
  31struct acpi_dlevel {
  32	const char *name;
  33	unsigned long value;
  34};
  35#define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
  36
  37static const struct acpi_dlayer acpi_debug_layers[] = {
  38	ACPI_DEBUG_INIT(ACPI_UTILITIES),
  39	ACPI_DEBUG_INIT(ACPI_HARDWARE),
  40	ACPI_DEBUG_INIT(ACPI_EVENTS),
  41	ACPI_DEBUG_INIT(ACPI_TABLES),
  42	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
  43	ACPI_DEBUG_INIT(ACPI_PARSER),
  44	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
  45	ACPI_DEBUG_INIT(ACPI_EXECUTER),
  46	ACPI_DEBUG_INIT(ACPI_RESOURCES),
  47	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
  48	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
  49	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
  50	ACPI_DEBUG_INIT(ACPI_COMPILER),
  51	ACPI_DEBUG_INIT(ACPI_TOOLS),
  52};
  53
  54static const struct acpi_dlevel acpi_debug_levels[] = {
  55	ACPI_DEBUG_INIT(ACPI_LV_INIT),
  56	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
  57	ACPI_DEBUG_INIT(ACPI_LV_INFO),
  58	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
  59	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
  60
  61	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
  62	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
  63	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
  64	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
  65	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
  66	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
  67	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
  68	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
  69	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
  70	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
  71	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
  72	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
  73	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
  74	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
  75
  76	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
  77	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
  78	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
  79
  80	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
  81	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
  82	ACPI_DEBUG_INIT(ACPI_LV_IO),
  83	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
  84
  85	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
  86	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
  87	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
  88	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
  89};
  90
  91static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
  92{
  93	int result = 0;
  94	int i;
  95
  96	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
  97
  98	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
  99		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 100				  acpi_debug_layers[i].name,
 101				  acpi_debug_layers[i].value,
 102				  (acpi_dbg_layer & acpi_debug_layers[i].value)
 103				  ? '*' : ' ');
 104	}
 105	result +=
 106	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
 107		    ACPI_ALL_DRIVERS,
 108		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
 109		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
 110		    == 0 ? ' ' : '-');
 111	result +=
 112	    sprintf(buffer + result,
 113		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
 114		    acpi_dbg_layer);
 115
 116	return result;
 117}
 118
 119static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 120{
 121	int result = 0;
 122	int i;
 123
 124	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
 125
 126	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
 127		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
 128				  acpi_debug_levels[i].name,
 129				  acpi_debug_levels[i].value,
 130				  (acpi_dbg_level & acpi_debug_levels[i].value)
 131				  ? '*' : ' ');
 132	}
 133	result +=
 134	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
 135		    acpi_dbg_level);
 136
 137	return result;
 138}
 139
 140static const struct kernel_param_ops param_ops_debug_layer = {
 141	.set = param_set_uint,
 142	.get = param_get_debug_layer,
 143};
 144
 145static const struct kernel_param_ops param_ops_debug_level = {
 146	.set = param_set_uint,
 147	.get = param_get_debug_level,
 148};
 149
 150module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
 151module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 152
 153static char trace_method_name[1024];
 154
 155static int param_set_trace_method_name(const char *val,
 156				       const struct kernel_param *kp)
 157{
 158	u32 saved_flags = 0;
 159	bool is_abs_path = true;
 160
 161	if (*val != '\\')
 162		is_abs_path = false;
 163
 164	if ((is_abs_path && strlen(val) > 1023) ||
 165	    (!is_abs_path && strlen(val) > 1022)) {
 166		pr_err("%s: string parameter too long\n", kp->name);
 167		return -ENOSPC;
 168	}
 169
 170	/*
 171	 * It's not safe to update acpi_gbl_trace_method_name without
 172	 * having the tracer stopped, so we save the original tracer
 173	 * state and disable it.
 174	 */
 175	saved_flags = acpi_gbl_trace_flags;
 176	(void)acpi_debug_trace(NULL,
 177			       acpi_gbl_trace_dbg_level,
 178			       acpi_gbl_trace_dbg_layer,
 179			       0);
 180
 181	/* This is a hack.  We can't kmalloc in early boot. */
 182	if (is_abs_path)
 183		strcpy(trace_method_name, val);
 184	else {
 185		trace_method_name[0] = '\\';
 186		strcpy(trace_method_name+1, val);
 187	}
 188
 189	/* Restore the original tracer state */
 190	(void)acpi_debug_trace(trace_method_name,
 191			       acpi_gbl_trace_dbg_level,
 192			       acpi_gbl_trace_dbg_layer,
 193			       saved_flags);
 194
 195	return 0;
 196}
 197
 198static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
 199{
 200	return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name);
 201}
 202
 203static const struct kernel_param_ops param_ops_trace_method = {
 204	.set = param_set_trace_method_name,
 205	.get = param_get_trace_method_name,
 206};
 207
 208static const struct kernel_param_ops param_ops_trace_attrib = {
 209	.set = param_set_uint,
 210	.get = param_get_uint,
 211};
 212
 213module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
 214module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
 215module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
 216
 217static int param_set_trace_state(const char *val,
 218				 const struct kernel_param *kp)
 219{
 220	acpi_status status;
 221	const char *method = trace_method_name;
 222	u32 flags = 0;
 223
 224/* So "xxx-once" comparison should go prior than "xxx" comparison */
 225#define acpi_compare_param(val, key)	\
 226	strncmp((val), (key), sizeof(key) - 1)
 227
 228	if (!acpi_compare_param(val, "enable")) {
 229		method = NULL;
 230		flags = ACPI_TRACE_ENABLED;
 231	} else if (!acpi_compare_param(val, "disable"))
 232		method = NULL;
 233	else if (!acpi_compare_param(val, "method-once"))
 234		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
 235	else if (!acpi_compare_param(val, "method"))
 236		flags = ACPI_TRACE_ENABLED;
 237	else if (!acpi_compare_param(val, "opcode-once"))
 238		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
 239	else if (!acpi_compare_param(val, "opcode"))
 240		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
 241	else
 242		return -EINVAL;
 243
 244	status = acpi_debug_trace(method,
 245				  acpi_gbl_trace_dbg_level,
 246				  acpi_gbl_trace_dbg_layer,
 247				  flags);
 248	if (ACPI_FAILURE(status))
 249		return -EBUSY;
 250
 251	return 0;
 252}
 253
 254static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
 255{
 256	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
 257		return sprintf(buffer, "disable\n");
 258	if (!acpi_gbl_trace_method_name)
 259		return sprintf(buffer, "enable\n");
 260	if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
 261		return sprintf(buffer, "method-once\n");
 262	else
 263		return sprintf(buffer, "method\n");
 264}
 265
 266module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
 267		  NULL, 0644);
 268#endif /* CONFIG_ACPI_DEBUG */
 269
 270
 271/* /sys/modules/acpi/parameters/aml_debug_output */
 272
 273module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
 274		   byte, 0644);
 275MODULE_PARM_DESC(aml_debug_output,
 276		 "To enable/disable the ACPI Debug Object output.");
 277
 278/* /sys/module/acpi/parameters/acpica_version */
 279static int param_get_acpica_version(char *buffer,
 280				    const struct kernel_param *kp)
 281{
 282	int result;
 283
 284	result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
 285
 286	return result;
 287}
 288
 289module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
 290
 291/*
 292 * ACPI table sysfs I/F:
 293 * /sys/firmware/acpi/tables/
 294 * /sys/firmware/acpi/tables/data/
 295 * /sys/firmware/acpi/tables/dynamic/
 296 */
 297
 298static LIST_HEAD(acpi_table_attr_list);
 299static struct kobject *tables_kobj;
 300static struct kobject *tables_data_kobj;
 301static struct kobject *dynamic_tables_kobj;
 302static struct kobject *hotplug_kobj;
 303
 304#define ACPI_MAX_TABLE_INSTANCES	999
 305#define ACPI_INST_SIZE			4 /* including trailing 0 */
 306
 307struct acpi_table_attr {
 308	struct bin_attribute attr;
 309	char name[ACPI_NAMESEG_SIZE];
 310	int instance;
 311	char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
 312	struct list_head node;
 313};
 314
 315struct acpi_data_attr {
 316	struct bin_attribute attr;
 317	u64	addr;
 318};
 319
 320static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
 321			       struct bin_attribute *bin_attr, char *buf,
 322			       loff_t offset, size_t count)
 323{
 324	struct acpi_table_attr *table_attr =
 325	    container_of(bin_attr, struct acpi_table_attr, attr);
 326	struct acpi_table_header *table_header = NULL;
 327	acpi_status status;
 328	ssize_t rc;
 329
 330	status = acpi_get_table(table_attr->name, table_attr->instance,
 331				&table_header);
 332	if (ACPI_FAILURE(status))
 333		return -ENODEV;
 334
 335	rc = memory_read_from_buffer(buf, count, &offset, table_header,
 336			table_header->length);
 337	acpi_put_table(table_header);
 338	return rc;
 339}
 340
 341static int acpi_table_attr_init(struct kobject *tables_obj,
 342				struct acpi_table_attr *table_attr,
 343				struct acpi_table_header *table_header)
 344{
 345	struct acpi_table_header *header = NULL;
 346	struct acpi_table_attr *attr = NULL;
 347	char instance_str[ACPI_INST_SIZE];
 348
 349	sysfs_attr_init(&table_attr->attr.attr);
 350	ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
 351
 352	list_for_each_entry(attr, &acpi_table_attr_list, node) {
 353		if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
 354			if (table_attr->instance < attr->instance)
 355				table_attr->instance = attr->instance;
 356	}
 357	table_attr->instance++;
 358	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
 359		pr_warn("%4.4s: too many table instances\n", table_attr->name);
 360		return -ERANGE;
 361	}
 362
 363	ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
 364	table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
 365	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
 366					 !acpi_get_table
 367					 (table_header->signature, 2, &header))) {
 368		snprintf(instance_str, sizeof(instance_str), "%u",
 369			 table_attr->instance);
 370		strcat(table_attr->filename, instance_str);
 371	}
 372
 373	table_attr->attr.size = table_header->length;
 374	table_attr->attr.read = acpi_table_show;
 375	table_attr->attr.attr.name = table_attr->filename;
 376	table_attr->attr.attr.mode = 0400;
 377
 378	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
 379}
 380
 381acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
 382{
 383	struct acpi_table_attr *table_attr;
 384
 385	switch (event) {
 386	case ACPI_TABLE_EVENT_INSTALL:
 387		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 388		if (!table_attr)
 389			return AE_NO_MEMORY;
 390
 391		if (acpi_table_attr_init(dynamic_tables_kobj,
 392					 table_attr, table)) {
 393			kfree(table_attr);
 394			return AE_ERROR;
 395		}
 396		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 397		break;
 398	case ACPI_TABLE_EVENT_LOAD:
 399	case ACPI_TABLE_EVENT_UNLOAD:
 400	case ACPI_TABLE_EVENT_UNINSTALL:
 401		/*
 402		 * we do not need to do anything right now
 403		 * because the table is not deleted from the
 404		 * global table list when unloading it.
 405		 */
 406		break;
 407	default:
 408		return AE_BAD_PARAMETER;
 409	}
 410	return AE_OK;
 411}
 412
 413static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
 414			      struct bin_attribute *bin_attr, char *buf,
 415			      loff_t offset, size_t count)
 416{
 417	struct acpi_data_attr *data_attr;
 418	void *base;
 419	ssize_t rc;
 420
 421	data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
 
 
 
 
 
 
 
 
 
 
 422
 423	base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
 424	if (!base)
 425		return -ENOMEM;
 426	rc = memory_read_from_buffer(buf, count, &offset, base,
 427				     data_attr->attr.size);
 428	acpi_os_unmap_memory(base, data_attr->attr.size);
 429
 430	return rc;
 
 
 
 
 431}
 432
 433static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
 434{
 435	struct acpi_table_bert *bert = th;
 436
 437	if (bert->header.length < sizeof(struct acpi_table_bert) ||
 438	    bert->region_length < sizeof(struct acpi_hest_generic_status)) {
 439		kfree(data_attr);
 440		return -EINVAL;
 441	}
 442	data_attr->addr = bert->address;
 443	data_attr->attr.size = bert->region_length;
 444	data_attr->attr.attr.name = "BERT";
 445
 446	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
 447}
 448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449static struct acpi_data_obj {
 450	char *name;
 451	int (*fn)(void *, struct acpi_data_attr *);
 452} acpi_data_objs[] = {
 453	{ ACPI_SIG_BERT, acpi_bert_data_init },
 
 454};
 455
 456#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
 457
 458static int acpi_table_data_init(struct acpi_table_header *th)
 459{
 460	struct acpi_data_attr *data_attr;
 461	int i;
 462
 463	for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
 464		if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
 465			data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
 466			if (!data_attr)
 467				return -ENOMEM;
 468			sysfs_attr_init(&data_attr->attr.attr);
 469			data_attr->attr.read = acpi_data_show;
 470			data_attr->attr.attr.mode = 0400;
 471			return acpi_data_objs[i].fn(th, data_attr);
 472		}
 473	}
 474	return 0;
 475}
 476
 477static int acpi_tables_sysfs_init(void)
 478{
 479	struct acpi_table_attr *table_attr;
 480	struct acpi_table_header *table_header = NULL;
 481	int table_index;
 482	acpi_status status;
 483	int ret;
 484
 485	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
 486	if (!tables_kobj)
 487		goto err;
 488
 489	tables_data_kobj = kobject_create_and_add("data", tables_kobj);
 490	if (!tables_data_kobj)
 491		goto err_tables_data;
 492
 493	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
 494	if (!dynamic_tables_kobj)
 495		goto err_dynamic_tables;
 496
 497	for (table_index = 0;; table_index++) {
 498		status = acpi_get_table_by_index(table_index, &table_header);
 499
 500		if (status == AE_BAD_PARAMETER)
 501			break;
 502
 503		if (ACPI_FAILURE(status))
 504			continue;
 505
 506		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
 507		if (!table_attr)
 508			return -ENOMEM;
 509
 510		ret = acpi_table_attr_init(tables_kobj,
 511					   table_attr, table_header);
 512		if (ret) {
 513			kfree(table_attr);
 514			return ret;
 515		}
 516		list_add_tail(&table_attr->node, &acpi_table_attr_list);
 517		acpi_table_data_init(table_header);
 518	}
 519
 520	kobject_uevent(tables_kobj, KOBJ_ADD);
 521	kobject_uevent(tables_data_kobj, KOBJ_ADD);
 522	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
 523
 524	return 0;
 525err_dynamic_tables:
 526	kobject_put(tables_data_kobj);
 527err_tables_data:
 528	kobject_put(tables_kobj);
 529err:
 530	return -ENOMEM;
 531}
 532
 533/*
 534 * Detailed ACPI IRQ counters:
 535 * /sys/firmware/acpi/interrupts/
 536 */
 537
 538u32 acpi_irq_handled;
 539u32 acpi_irq_not_handled;
 540
 541#define COUNT_GPE 0
 542#define COUNT_SCI 1		/* acpi_irq_handled */
 543#define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
 544#define COUNT_ERROR 3		/* other */
 545#define NUM_COUNTERS_EXTRA 4
 546
 547struct event_counter {
 548	u32 count;
 549	u32 flags;
 550};
 551
 552static struct event_counter *all_counters;
 553static u32 num_gpes;
 554static u32 num_counters;
 555static struct attribute **all_attrs;
 556static u32 acpi_gpe_count;
 557
 558static struct attribute_group interrupt_stats_attr_group = {
 559	.name = "interrupts",
 560};
 561
 562static struct kobj_attribute *counter_attrs;
 563
 564static void delete_gpe_attr_array(void)
 565{
 566	struct event_counter *tmp = all_counters;
 567
 568	all_counters = NULL;
 569	kfree(tmp);
 570
 571	if (counter_attrs) {
 572		int i;
 573
 574		for (i = 0; i < num_gpes; i++)
 575			kfree(counter_attrs[i].attr.name);
 576
 577		kfree(counter_attrs);
 578	}
 579	kfree(all_attrs);
 580}
 581
 582static void gpe_count(u32 gpe_number)
 583{
 584	acpi_gpe_count++;
 585
 586	if (!all_counters)
 587		return;
 588
 589	if (gpe_number < num_gpes)
 590		all_counters[gpe_number].count++;
 591	else
 592		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 593			     COUNT_ERROR].count++;
 594}
 595
 596static void fixed_event_count(u32 event_number)
 597{
 598	if (!all_counters)
 599		return;
 600
 601	if (event_number < ACPI_NUM_FIXED_EVENTS)
 602		all_counters[num_gpes + event_number].count++;
 603	else
 604		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
 605			     COUNT_ERROR].count++;
 606}
 607
 608static void acpi_global_event_handler(u32 event_type, acpi_handle device,
 609	u32 event_number, void *context)
 610{
 611	if (event_type == ACPI_EVENT_TYPE_GPE) {
 612		gpe_count(event_number);
 613		pr_debug("GPE event 0x%02x\n", event_number);
 614	} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
 615		fixed_event_count(event_number);
 616		pr_debug("Fixed event 0x%02x\n", event_number);
 617	} else {
 618		pr_debug("Other event 0x%02x\n", event_number);
 619	}
 620}
 621
 622static int get_status(u32 index, acpi_event_status *ret,
 623		      acpi_handle *handle)
 624{
 625	acpi_status status;
 626
 627	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 628		return -EINVAL;
 629
 630	if (index < num_gpes) {
 631		status = acpi_get_gpe_device(index, handle);
 632		if (ACPI_FAILURE(status)) {
 633			pr_warn("Invalid GPE 0x%x", index);
 634			return -ENXIO;
 635		}
 636		status = acpi_get_gpe_status(*handle, index, ret);
 637	} else {
 638		status = acpi_get_event_status(index - num_gpes, ret);
 639	}
 640	if (ACPI_FAILURE(status))
 641		return -EIO;
 642
 643	return 0;
 644}
 645
 646static ssize_t counter_show(struct kobject *kobj,
 647			    struct kobj_attribute *attr, char *buf)
 648{
 649	int index = attr - counter_attrs;
 650	int size;
 651	acpi_handle handle;
 652	acpi_event_status status;
 653	int result = 0;
 654
 655	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
 656	    acpi_irq_handled;
 657	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
 658	    acpi_irq_not_handled;
 659	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
 660	    acpi_gpe_count;
 661	size = sprintf(buf, "%8u", all_counters[index].count);
 662
 663	/* "gpe_all" or "sci" */
 664	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
 665		goto end;
 666
 667	result = get_status(index, &status, &handle);
 668	if (result)
 669		goto end;
 670
 671	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
 672		size += sprintf(buf + size, "  EN");
 673	else
 674		size += sprintf(buf + size, "    ");
 675	if (status & ACPI_EVENT_FLAG_STATUS_SET)
 676		size += sprintf(buf + size, " STS");
 677	else
 678		size += sprintf(buf + size, "    ");
 679
 680	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
 681		size += sprintf(buf + size, " invalid     ");
 682	else if (status & ACPI_EVENT_FLAG_ENABLED)
 683		size += sprintf(buf + size, " enabled     ");
 684	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
 685		size += sprintf(buf + size, " wake_enabled");
 686	else
 687		size += sprintf(buf + size, " disabled    ");
 688	if (status & ACPI_EVENT_FLAG_MASKED)
 689		size += sprintf(buf + size, " masked  ");
 690	else
 691		size += sprintf(buf + size, " unmasked");
 692
 693end:
 694	size += sprintf(buf + size, "\n");
 695	return result ? result : size;
 696}
 697
 698/*
 699 * counter_set() sets the specified counter.
 700 * setting the total "sci" file to any value clears all counters.
 701 * enable/disable/clear a gpe/fixed event in user space.
 702 */
 703static ssize_t counter_set(struct kobject *kobj,
 704			   struct kobj_attribute *attr, const char *buf,
 705			   size_t size)
 706{
 707	int index = attr - counter_attrs;
 708	acpi_event_status status;
 709	acpi_handle handle;
 710	int result = 0;
 711	unsigned long tmp;
 712
 713	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
 714		int i;
 715		for (i = 0; i < num_counters; ++i)
 716			all_counters[i].count = 0;
 717		acpi_gpe_count = 0;
 718		acpi_irq_handled = 0;
 719		acpi_irq_not_handled = 0;
 720		goto end;
 721	}
 722
 723	/* show the event status for both GPEs and Fixed Events */
 724	result = get_status(index, &status, &handle);
 725	if (result)
 726		goto end;
 727
 728	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
 729		pr_warn("Can not change Invalid GPE/Fixed Event status\n");
 730		return -EINVAL;
 731	}
 732
 733	if (index < num_gpes) {
 734		if (!strcmp(buf, "disable\n") &&
 735		    (status & ACPI_EVENT_FLAG_ENABLED))
 736			result = acpi_disable_gpe(handle, index);
 737		else if (!strcmp(buf, "enable\n") &&
 738			 !(status & ACPI_EVENT_FLAG_ENABLED))
 739			result = acpi_enable_gpe(handle, index);
 740		else if (!strcmp(buf, "clear\n") &&
 741			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 742			result = acpi_clear_gpe(handle, index);
 743		else if (!strcmp(buf, "mask\n"))
 744			result = acpi_mask_gpe(handle, index, TRUE);
 745		else if (!strcmp(buf, "unmask\n"))
 746			result = acpi_mask_gpe(handle, index, FALSE);
 747		else if (!kstrtoul(buf, 0, &tmp))
 748			all_counters[index].count = tmp;
 749		else
 750			result = -EINVAL;
 751	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
 752		int event = index - num_gpes;
 753		if (!strcmp(buf, "disable\n") &&
 754		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
 755			result = acpi_disable_event(event, ACPI_NOT_ISR);
 756		else if (!strcmp(buf, "enable\n") &&
 757			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
 758			result = acpi_enable_event(event, ACPI_NOT_ISR);
 759		else if (!strcmp(buf, "clear\n") &&
 760			 (status & ACPI_EVENT_FLAG_STATUS_SET))
 761			result = acpi_clear_event(event);
 762		else if (!kstrtoul(buf, 0, &tmp))
 763			all_counters[index].count = tmp;
 764		else
 765			result = -EINVAL;
 766	} else
 767		all_counters[index].count = strtoul(buf, NULL, 0);
 768
 769	if (ACPI_FAILURE(result))
 770		result = -EINVAL;
 771end:
 772	return result ? result : size;
 773}
 774
 775/*
 776 * A Quirk Mechanism for GPE Flooding Prevention:
 777 *
 778 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
 779 * flooding typically cannot be detected and automatically prevented by
 780 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
 781 * the AML tables. This normally indicates a feature gap in Linux, thus
 782 * instead of providing endless quirk tables, we provide a boot parameter
 783 * for those who want this quirk. For example, if the users want to prevent
 784 * the GPE flooding for GPE 00, they need to specify the following boot
 785 * parameter:
 786 *   acpi_mask_gpe=0x00
 787 * Note, the parameter can be a list (see bitmap_parselist() for the details).
 788 * The masking status can be modified by the following runtime controlling
 789 * interface:
 790 *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
 791 */
 792#define ACPI_MASKABLE_GPE_MAX	0x100
 793static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
 794
 795static int __init acpi_gpe_set_masked_gpes(char *val)
 796{
 797	int ret;
 798	u8 gpe;
 799
 800	ret = kstrtou8(val, 0, &gpe);
 801	if (ret) {
 802		ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
 803		if (ret)
 804			return ret;
 805	} else
 806		set_bit(gpe, acpi_masked_gpes_map);
 807
 808	return 1;
 809}
 810__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
 811
 812void __init acpi_gpe_apply_masked_gpes(void)
 813{
 814	acpi_handle handle;
 815	acpi_status status;
 816	u16 gpe;
 817
 818	for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
 819		status = acpi_get_gpe_device(gpe, &handle);
 820		if (ACPI_SUCCESS(status)) {
 821			pr_info("Masking GPE 0x%x.\n", gpe);
 822			(void)acpi_mask_gpe(handle, gpe, TRUE);
 823		}
 824	}
 825}
 826
 827void acpi_irq_stats_init(void)
 828{
 829	acpi_status status;
 830	int i;
 831
 832	if (all_counters)
 833		return;
 834
 835	num_gpes = acpi_current_gpe_count;
 836	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 837
 838	all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
 839	if (all_attrs == NULL)
 840		return;
 841
 842	all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
 843	if (all_counters == NULL)
 844		goto fail;
 845
 846	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
 847	if (ACPI_FAILURE(status))
 848		goto fail;
 849
 850	counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
 851	if (counter_attrs == NULL)
 852		goto fail;
 853
 854	for (i = 0; i < num_counters; ++i) {
 855		char buffer[12];
 856		char *name;
 857
 858		if (i < num_gpes)
 859			sprintf(buffer, "gpe%02X", i);
 860		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
 861			sprintf(buffer, "ff_pmtimer");
 862		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
 863			sprintf(buffer, "ff_gbl_lock");
 864		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
 865			sprintf(buffer, "ff_pwr_btn");
 866		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
 867			sprintf(buffer, "ff_slp_btn");
 868		else if (i == num_gpes + ACPI_EVENT_RTC)
 869			sprintf(buffer, "ff_rt_clk");
 870		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
 871			sprintf(buffer, "gpe_all");
 872		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
 873			sprintf(buffer, "sci");
 874		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
 875			sprintf(buffer, "sci_not");
 876		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
 877			sprintf(buffer, "error");
 878		else
 879			sprintf(buffer, "bug%02X", i);
 880
 881		name = kstrdup(buffer, GFP_KERNEL);
 882		if (name == NULL)
 883			goto fail;
 884
 885		sysfs_attr_init(&counter_attrs[i].attr);
 886		counter_attrs[i].attr.name = name;
 887		counter_attrs[i].attr.mode = 0644;
 888		counter_attrs[i].show = counter_show;
 889		counter_attrs[i].store = counter_set;
 890
 891		all_attrs[i] = &counter_attrs[i].attr;
 892	}
 893
 894	interrupt_stats_attr_group.attrs = all_attrs;
 895	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
 896		return;
 897
 898fail:
 899	delete_gpe_attr_array();
 900}
 901
 902static void __exit interrupt_stats_exit(void)
 903{
 904	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
 905
 906	delete_gpe_attr_array();
 907}
 908
 909static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 910{
 911	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 912}
 913
 914static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
 915
 916static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 917{
 918	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 919
 920	return sprintf(buf, "%d\n", hotplug->enabled);
 921}
 922
 923static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
 924			     const char *buf, size_t size)
 925{
 926	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 927	unsigned int val;
 928
 929	if (kstrtouint(buf, 10, &val) || val > 1)
 930		return -EINVAL;
 931
 932	acpi_scan_hotplug_enabled(hotplug, val);
 933	return size;
 934}
 935
 936static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
 937
 938static struct attribute *hotplug_profile_attrs[] = {
 939	&hotplug_enabled_attr.attr,
 940	NULL
 941};
 
 942
 943static struct kobj_type acpi_hotplug_profile_ktype = {
 944	.sysfs_ops = &kobj_sysfs_ops,
 945	.default_attrs = hotplug_profile_attrs,
 946};
 947
 948void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
 949				    const char *name)
 950{
 951	int error;
 952
 953	if (!hotplug_kobj)
 954		goto err_out;
 955
 956	error = kobject_init_and_add(&hotplug->kobj,
 957		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
 958	if (error) {
 959		kobject_put(&hotplug->kobj);
 960		goto err_out;
 961	}
 962
 963	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
 964	return;
 965
 966 err_out:
 967	pr_err("Unable to add hotplug profile '%s'\n", name);
 968}
 969
 970static ssize_t force_remove_show(struct kobject *kobj,
 971				 struct kobj_attribute *attr, char *buf)
 972{
 973	return sprintf(buf, "%d\n", 0);
 974}
 975
 976static ssize_t force_remove_store(struct kobject *kobj,
 977				  struct kobj_attribute *attr,
 978				  const char *buf, size_t size)
 979{
 980	bool val;
 981	int ret;
 982
 983	ret = strtobool(buf, &val);
 984	if (ret < 0)
 985		return ret;
 986
 987	if (val) {
 988		pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
 989		return -EINVAL;
 990	}
 991	return size;
 992}
 993
 994static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
 995
 996int __init acpi_sysfs_init(void)
 997{
 998	int result;
 999
1000	result = acpi_tables_sysfs_init();
1001	if (result)
1002		return result;
1003
1004	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1005	if (!hotplug_kobj)
1006		return -ENOMEM;
1007
1008	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1009	if (result)
1010		return result;
1011
1012	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1013	return result;
1014}