Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * sysfs.c - ACPI sysfs interface to userspace.
4 */
5
6#define pr_fmt(fmt) "ACPI: " fmt
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/moduleparam.h>
11#include <linux/acpi.h>
12
13#include "internal.h"
14
15#define _COMPONENT ACPI_SYSTEM_COMPONENT
16ACPI_MODULE_NAME("sysfs");
17
18#ifdef CONFIG_ACPI_DEBUG
19/*
20 * ACPI debug sysfs I/F, including:
21 * /sys/modules/acpi/parameters/debug_layer
22 * /sys/modules/acpi/parameters/debug_level
23 * /sys/modules/acpi/parameters/trace_method_name
24 * /sys/modules/acpi/parameters/trace_state
25 * /sys/modules/acpi/parameters/trace_debug_layer
26 * /sys/modules/acpi/parameters/trace_debug_level
27 */
28
29struct acpi_dlayer {
30 const char *name;
31 unsigned long value;
32};
33struct acpi_dlevel {
34 const char *name;
35 unsigned long value;
36};
37#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
38
39static const struct acpi_dlayer acpi_debug_layers[] = {
40 ACPI_DEBUG_INIT(ACPI_UTILITIES),
41 ACPI_DEBUG_INIT(ACPI_HARDWARE),
42 ACPI_DEBUG_INIT(ACPI_EVENTS),
43 ACPI_DEBUG_INIT(ACPI_TABLES),
44 ACPI_DEBUG_INIT(ACPI_NAMESPACE),
45 ACPI_DEBUG_INIT(ACPI_PARSER),
46 ACPI_DEBUG_INIT(ACPI_DISPATCHER),
47 ACPI_DEBUG_INIT(ACPI_EXECUTER),
48 ACPI_DEBUG_INIT(ACPI_RESOURCES),
49 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
50 ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
51 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
52 ACPI_DEBUG_INIT(ACPI_COMPILER),
53 ACPI_DEBUG_INIT(ACPI_TOOLS),
54
55 ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
56 ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
57 ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
58 ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
59 ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
60 ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
61 ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
62 ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
63 ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
64 ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
65 ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
66 ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
67 ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
68 ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
69};
70
71static const struct acpi_dlevel acpi_debug_levels[] = {
72 ACPI_DEBUG_INIT(ACPI_LV_INIT),
73 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
74 ACPI_DEBUG_INIT(ACPI_LV_INFO),
75 ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
76 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
77
78 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
79 ACPI_DEBUG_INIT(ACPI_LV_PARSE),
80 ACPI_DEBUG_INIT(ACPI_LV_LOAD),
81 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
82 ACPI_DEBUG_INIT(ACPI_LV_EXEC),
83 ACPI_DEBUG_INIT(ACPI_LV_NAMES),
84 ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
85 ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
86 ACPI_DEBUG_INIT(ACPI_LV_TABLES),
87 ACPI_DEBUG_INIT(ACPI_LV_VALUES),
88 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
89 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
90 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
91 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
92
93 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
94 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
95 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
96
97 ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
98 ACPI_DEBUG_INIT(ACPI_LV_THREADS),
99 ACPI_DEBUG_INIT(ACPI_LV_IO),
100 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
101
102 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
103 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
104 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
105 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
106};
107
108static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
109{
110 int result = 0;
111 int i;
112
113 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
114
115 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
116 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
117 acpi_debug_layers[i].name,
118 acpi_debug_layers[i].value,
119 (acpi_dbg_layer & acpi_debug_layers[i].value)
120 ? '*' : ' ');
121 }
122 result +=
123 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
124 ACPI_ALL_DRIVERS,
125 (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
126 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
127 == 0 ? ' ' : '-');
128 result +=
129 sprintf(buffer + result,
130 "--\ndebug_layer = 0x%08X ( * = enabled)\n",
131 acpi_dbg_layer);
132
133 return result;
134}
135
136static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
137{
138 int result = 0;
139 int i;
140
141 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
142
143 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
144 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
145 acpi_debug_levels[i].name,
146 acpi_debug_levels[i].value,
147 (acpi_dbg_level & acpi_debug_levels[i].value)
148 ? '*' : ' ');
149 }
150 result +=
151 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
152 acpi_dbg_level);
153
154 return result;
155}
156
157static const struct kernel_param_ops param_ops_debug_layer = {
158 .set = param_set_uint,
159 .get = param_get_debug_layer,
160};
161
162static const struct kernel_param_ops param_ops_debug_level = {
163 .set = param_set_uint,
164 .get = param_get_debug_level,
165};
166
167module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
168module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
169
170static char trace_method_name[1024];
171
172static int param_set_trace_method_name(const char *val,
173 const struct kernel_param *kp)
174{
175 u32 saved_flags = 0;
176 bool is_abs_path = true;
177
178 if (*val != '\\')
179 is_abs_path = false;
180
181 if ((is_abs_path && strlen(val) > 1023) ||
182 (!is_abs_path && strlen(val) > 1022)) {
183 pr_err("%s: string parameter too long\n", kp->name);
184 return -ENOSPC;
185 }
186
187 /*
188 * It's not safe to update acpi_gbl_trace_method_name without
189 * having the tracer stopped, so we save the original tracer
190 * state and disable it.
191 */
192 saved_flags = acpi_gbl_trace_flags;
193 (void)acpi_debug_trace(NULL,
194 acpi_gbl_trace_dbg_level,
195 acpi_gbl_trace_dbg_layer,
196 0);
197
198 /* This is a hack. We can't kmalloc in early boot. */
199 if (is_abs_path)
200 strcpy(trace_method_name, val);
201 else {
202 trace_method_name[0] = '\\';
203 strcpy(trace_method_name+1, val);
204 }
205
206 /* Restore the original tracer state */
207 (void)acpi_debug_trace(trace_method_name,
208 acpi_gbl_trace_dbg_level,
209 acpi_gbl_trace_dbg_layer,
210 saved_flags);
211
212 return 0;
213}
214
215static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
216{
217 return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name);
218}
219
220static const struct kernel_param_ops param_ops_trace_method = {
221 .set = param_set_trace_method_name,
222 .get = param_get_trace_method_name,
223};
224
225static const struct kernel_param_ops param_ops_trace_attrib = {
226 .set = param_set_uint,
227 .get = param_get_uint,
228};
229
230module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644);
231module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
232module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
233
234static int param_set_trace_state(const char *val,
235 const struct kernel_param *kp)
236{
237 acpi_status status;
238 const char *method = trace_method_name;
239 u32 flags = 0;
240
241/* So "xxx-once" comparison should go prior than "xxx" comparison */
242#define acpi_compare_param(val, key) \
243 strncmp((val), (key), sizeof(key) - 1)
244
245 if (!acpi_compare_param(val, "enable")) {
246 method = NULL;
247 flags = ACPI_TRACE_ENABLED;
248 } else if (!acpi_compare_param(val, "disable"))
249 method = NULL;
250 else if (!acpi_compare_param(val, "method-once"))
251 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
252 else if (!acpi_compare_param(val, "method"))
253 flags = ACPI_TRACE_ENABLED;
254 else if (!acpi_compare_param(val, "opcode-once"))
255 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
256 else if (!acpi_compare_param(val, "opcode"))
257 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
258 else
259 return -EINVAL;
260
261 status = acpi_debug_trace(method,
262 acpi_gbl_trace_dbg_level,
263 acpi_gbl_trace_dbg_layer,
264 flags);
265 if (ACPI_FAILURE(status))
266 return -EBUSY;
267
268 return 0;
269}
270
271static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
272{
273 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
274 return sprintf(buffer, "disable\n");
275 else {
276 if (acpi_gbl_trace_method_name) {
277 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
278 return sprintf(buffer, "method-once\n");
279 else
280 return sprintf(buffer, "method\n");
281 } else
282 return sprintf(buffer, "enable\n");
283 }
284 return 0;
285}
286
287module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
288 NULL, 0644);
289#endif /* CONFIG_ACPI_DEBUG */
290
291
292/* /sys/modules/acpi/parameters/aml_debug_output */
293
294module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
295 byte, 0644);
296MODULE_PARM_DESC(aml_debug_output,
297 "To enable/disable the ACPI Debug Object output.");
298
299/* /sys/module/acpi/parameters/acpica_version */
300static int param_get_acpica_version(char *buffer,
301 const struct kernel_param *kp)
302{
303 int result;
304
305 result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
306
307 return result;
308}
309
310module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
311
312/*
313 * ACPI table sysfs I/F:
314 * /sys/firmware/acpi/tables/
315 * /sys/firmware/acpi/tables/data/
316 * /sys/firmware/acpi/tables/dynamic/
317 */
318
319static LIST_HEAD(acpi_table_attr_list);
320static struct kobject *tables_kobj;
321static struct kobject *tables_data_kobj;
322static struct kobject *dynamic_tables_kobj;
323static struct kobject *hotplug_kobj;
324
325#define ACPI_MAX_TABLE_INSTANCES 999
326#define ACPI_INST_SIZE 4 /* including trailing 0 */
327
328struct acpi_table_attr {
329 struct bin_attribute attr;
330 char name[ACPI_NAMESEG_SIZE];
331 int instance;
332 char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
333 struct list_head node;
334};
335
336struct acpi_data_attr {
337 struct bin_attribute attr;
338 u64 addr;
339};
340
341static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
342 struct bin_attribute *bin_attr, char *buf,
343 loff_t offset, size_t count)
344{
345 struct acpi_table_attr *table_attr =
346 container_of(bin_attr, struct acpi_table_attr, attr);
347 struct acpi_table_header *table_header = NULL;
348 acpi_status status;
349 ssize_t rc;
350
351 status = acpi_get_table(table_attr->name, table_attr->instance,
352 &table_header);
353 if (ACPI_FAILURE(status))
354 return -ENODEV;
355
356 rc = memory_read_from_buffer(buf, count, &offset, table_header,
357 table_header->length);
358 acpi_put_table(table_header);
359 return rc;
360}
361
362static int acpi_table_attr_init(struct kobject *tables_obj,
363 struct acpi_table_attr *table_attr,
364 struct acpi_table_header *table_header)
365{
366 struct acpi_table_header *header = NULL;
367 struct acpi_table_attr *attr = NULL;
368 char instance_str[ACPI_INST_SIZE];
369
370 sysfs_attr_init(&table_attr->attr.attr);
371 ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
372
373 list_for_each_entry(attr, &acpi_table_attr_list, node) {
374 if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
375 if (table_attr->instance < attr->instance)
376 table_attr->instance = attr->instance;
377 }
378 table_attr->instance++;
379 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
380 pr_warn("%4.4s: too many table instances\n",
381 table_attr->name);
382 return -ERANGE;
383 }
384
385 ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
386 table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
387 if (table_attr->instance > 1 || (table_attr->instance == 1 &&
388 !acpi_get_table
389 (table_header->signature, 2, &header))) {
390 snprintf(instance_str, sizeof(instance_str), "%u",
391 table_attr->instance);
392 strcat(table_attr->filename, instance_str);
393 }
394
395 table_attr->attr.size = table_header->length;
396 table_attr->attr.read = acpi_table_show;
397 table_attr->attr.attr.name = table_attr->filename;
398 table_attr->attr.attr.mode = 0400;
399
400 return sysfs_create_bin_file(tables_obj, &table_attr->attr);
401}
402
403acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
404{
405 struct acpi_table_attr *table_attr;
406
407 switch (event) {
408 case ACPI_TABLE_EVENT_INSTALL:
409 table_attr =
410 kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
411 if (!table_attr)
412 return AE_NO_MEMORY;
413
414 if (acpi_table_attr_init(dynamic_tables_kobj,
415 table_attr, table)) {
416 kfree(table_attr);
417 return AE_ERROR;
418 }
419 list_add_tail(&table_attr->node, &acpi_table_attr_list);
420 break;
421 case ACPI_TABLE_EVENT_LOAD:
422 case ACPI_TABLE_EVENT_UNLOAD:
423 case ACPI_TABLE_EVENT_UNINSTALL:
424 /*
425 * we do not need to do anything right now
426 * because the table is not deleted from the
427 * global table list when unloading it.
428 */
429 break;
430 default:
431 return AE_BAD_PARAMETER;
432 }
433 return AE_OK;
434}
435
436static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
437 struct bin_attribute *bin_attr, char *buf,
438 loff_t offset, size_t count)
439{
440 struct acpi_data_attr *data_attr;
441 void __iomem *base;
442 ssize_t rc;
443
444 data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
445
446 base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
447 if (!base)
448 return -ENOMEM;
449 rc = memory_read_from_buffer(buf, count, &offset, base,
450 data_attr->attr.size);
451 acpi_os_unmap_memory(base, data_attr->attr.size);
452
453 return rc;
454}
455
456static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
457{
458 struct acpi_table_bert *bert = th;
459
460 if (bert->header.length < sizeof(struct acpi_table_bert) ||
461 bert->region_length < sizeof(struct acpi_hest_generic_status)) {
462 kfree(data_attr);
463 return -EINVAL;
464 }
465 data_attr->addr = bert->address;
466 data_attr->attr.size = bert->region_length;
467 data_attr->attr.attr.name = "BERT";
468
469 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
470}
471
472static struct acpi_data_obj {
473 char *name;
474 int (*fn)(void *, struct acpi_data_attr *);
475} acpi_data_objs[] = {
476 { ACPI_SIG_BERT, acpi_bert_data_init },
477};
478
479#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
480
481static int acpi_table_data_init(struct acpi_table_header *th)
482{
483 struct acpi_data_attr *data_attr;
484 int i;
485
486 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
487 if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
488 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
489 if (!data_attr)
490 return -ENOMEM;
491 sysfs_attr_init(&data_attr->attr.attr);
492 data_attr->attr.read = acpi_data_show;
493 data_attr->attr.attr.mode = 0400;
494 return acpi_data_objs[i].fn(th, data_attr);
495 }
496 }
497 return 0;
498}
499
500static int acpi_tables_sysfs_init(void)
501{
502 struct acpi_table_attr *table_attr;
503 struct acpi_table_header *table_header = NULL;
504 int table_index;
505 acpi_status status;
506 int ret;
507
508 tables_kobj = kobject_create_and_add("tables", acpi_kobj);
509 if (!tables_kobj)
510 goto err;
511
512 tables_data_kobj = kobject_create_and_add("data", tables_kobj);
513 if (!tables_data_kobj)
514 goto err_tables_data;
515
516 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
517 if (!dynamic_tables_kobj)
518 goto err_dynamic_tables;
519
520 for (table_index = 0;; table_index++) {
521 status = acpi_get_table_by_index(table_index, &table_header);
522
523 if (status == AE_BAD_PARAMETER)
524 break;
525
526 if (ACPI_FAILURE(status))
527 continue;
528
529 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
530 if (!table_attr)
531 return -ENOMEM;
532
533 ret = acpi_table_attr_init(tables_kobj,
534 table_attr, table_header);
535 if (ret) {
536 kfree(table_attr);
537 return ret;
538 }
539 list_add_tail(&table_attr->node, &acpi_table_attr_list);
540 acpi_table_data_init(table_header);
541 }
542
543 kobject_uevent(tables_kobj, KOBJ_ADD);
544 kobject_uevent(tables_data_kobj, KOBJ_ADD);
545 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
546
547 return 0;
548err_dynamic_tables:
549 kobject_put(tables_data_kobj);
550err_tables_data:
551 kobject_put(tables_kobj);
552err:
553 return -ENOMEM;
554}
555
556/*
557 * Detailed ACPI IRQ counters:
558 * /sys/firmware/acpi/interrupts/
559 */
560
561u32 acpi_irq_handled;
562u32 acpi_irq_not_handled;
563
564#define COUNT_GPE 0
565#define COUNT_SCI 1 /* acpi_irq_handled */
566#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
567#define COUNT_ERROR 3 /* other */
568#define NUM_COUNTERS_EXTRA 4
569
570struct event_counter {
571 u32 count;
572 u32 flags;
573};
574
575static struct event_counter *all_counters;
576static u32 num_gpes;
577static u32 num_counters;
578static struct attribute **all_attrs;
579static u32 acpi_gpe_count;
580
581static struct attribute_group interrupt_stats_attr_group = {
582 .name = "interrupts",
583};
584
585static struct kobj_attribute *counter_attrs;
586
587static void delete_gpe_attr_array(void)
588{
589 struct event_counter *tmp = all_counters;
590
591 all_counters = NULL;
592 kfree(tmp);
593
594 if (counter_attrs) {
595 int i;
596
597 for (i = 0; i < num_gpes; i++)
598 kfree(counter_attrs[i].attr.name);
599
600 kfree(counter_attrs);
601 }
602 kfree(all_attrs);
603
604 return;
605}
606
607static void gpe_count(u32 gpe_number)
608{
609 acpi_gpe_count++;
610
611 if (!all_counters)
612 return;
613
614 if (gpe_number < num_gpes)
615 all_counters[gpe_number].count++;
616 else
617 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
618 COUNT_ERROR].count++;
619
620 return;
621}
622
623static void fixed_event_count(u32 event_number)
624{
625 if (!all_counters)
626 return;
627
628 if (event_number < ACPI_NUM_FIXED_EVENTS)
629 all_counters[num_gpes + event_number].count++;
630 else
631 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
632 COUNT_ERROR].count++;
633
634 return;
635}
636
637static void acpi_global_event_handler(u32 event_type, acpi_handle device,
638 u32 event_number, void *context)
639{
640 if (event_type == ACPI_EVENT_TYPE_GPE) {
641 gpe_count(event_number);
642 pr_debug("GPE event 0x%02x\n", event_number);
643 } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
644 fixed_event_count(event_number);
645 pr_debug("Fixed event 0x%02x\n", event_number);
646 } else {
647 pr_debug("Other event 0x%02x\n", event_number);
648 }
649}
650
651static int get_status(u32 index, acpi_event_status *ret,
652 acpi_handle *handle)
653{
654 acpi_status status;
655
656 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
657 return -EINVAL;
658
659 if (index < num_gpes) {
660 status = acpi_get_gpe_device(index, handle);
661 if (ACPI_FAILURE(status)) {
662 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
663 "Invalid GPE 0x%x", index));
664 return -ENXIO;
665 }
666 status = acpi_get_gpe_status(*handle, index, ret);
667 } else {
668 status = acpi_get_event_status(index - num_gpes, ret);
669 }
670 if (ACPI_FAILURE(status))
671 return -EIO;
672
673 return 0;
674}
675
676static ssize_t counter_show(struct kobject *kobj,
677 struct kobj_attribute *attr, char *buf)
678{
679 int index = attr - counter_attrs;
680 int size;
681 acpi_handle handle;
682 acpi_event_status status;
683 int result = 0;
684
685 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
686 acpi_irq_handled;
687 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
688 acpi_irq_not_handled;
689 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
690 acpi_gpe_count;
691 size = sprintf(buf, "%8u", all_counters[index].count);
692
693 /* "gpe_all" or "sci" */
694 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
695 goto end;
696
697 result = get_status(index, &status, &handle);
698 if (result)
699 goto end;
700
701 if (status & ACPI_EVENT_FLAG_ENABLE_SET)
702 size += sprintf(buf + size, " EN");
703 else
704 size += sprintf(buf + size, " ");
705 if (status & ACPI_EVENT_FLAG_STATUS_SET)
706 size += sprintf(buf + size, " STS");
707 else
708 size += sprintf(buf + size, " ");
709
710 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
711 size += sprintf(buf + size, " invalid ");
712 else if (status & ACPI_EVENT_FLAG_ENABLED)
713 size += sprintf(buf + size, " enabled ");
714 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
715 size += sprintf(buf + size, " wake_enabled");
716 else
717 size += sprintf(buf + size, " disabled ");
718 if (status & ACPI_EVENT_FLAG_MASKED)
719 size += sprintf(buf + size, " masked ");
720 else
721 size += sprintf(buf + size, " unmasked");
722
723end:
724 size += sprintf(buf + size, "\n");
725 return result ? result : size;
726}
727
728/*
729 * counter_set() sets the specified counter.
730 * setting the total "sci" file to any value clears all counters.
731 * enable/disable/clear a gpe/fixed event in user space.
732 */
733static ssize_t counter_set(struct kobject *kobj,
734 struct kobj_attribute *attr, const char *buf,
735 size_t size)
736{
737 int index = attr - counter_attrs;
738 acpi_event_status status;
739 acpi_handle handle;
740 int result = 0;
741 unsigned long tmp;
742
743 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
744 int i;
745 for (i = 0; i < num_counters; ++i)
746 all_counters[i].count = 0;
747 acpi_gpe_count = 0;
748 acpi_irq_handled = 0;
749 acpi_irq_not_handled = 0;
750 goto end;
751 }
752
753 /* show the event status for both GPEs and Fixed Events */
754 result = get_status(index, &status, &handle);
755 if (result)
756 goto end;
757
758 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
759 printk(KERN_WARNING PREFIX
760 "Can not change Invalid GPE/Fixed Event status\n");
761 return -EINVAL;
762 }
763
764 if (index < num_gpes) {
765 if (!strcmp(buf, "disable\n") &&
766 (status & ACPI_EVENT_FLAG_ENABLED))
767 result = acpi_disable_gpe(handle, index);
768 else if (!strcmp(buf, "enable\n") &&
769 !(status & ACPI_EVENT_FLAG_ENABLED))
770 result = acpi_enable_gpe(handle, index);
771 else if (!strcmp(buf, "clear\n") &&
772 (status & ACPI_EVENT_FLAG_STATUS_SET))
773 result = acpi_clear_gpe(handle, index);
774 else if (!strcmp(buf, "mask\n"))
775 result = acpi_mask_gpe(handle, index, TRUE);
776 else if (!strcmp(buf, "unmask\n"))
777 result = acpi_mask_gpe(handle, index, FALSE);
778 else if (!kstrtoul(buf, 0, &tmp))
779 all_counters[index].count = tmp;
780 else
781 result = -EINVAL;
782 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
783 int event = index - num_gpes;
784 if (!strcmp(buf, "disable\n") &&
785 (status & ACPI_EVENT_FLAG_ENABLE_SET))
786 result = acpi_disable_event(event, ACPI_NOT_ISR);
787 else if (!strcmp(buf, "enable\n") &&
788 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
789 result = acpi_enable_event(event, ACPI_NOT_ISR);
790 else if (!strcmp(buf, "clear\n") &&
791 (status & ACPI_EVENT_FLAG_STATUS_SET))
792 result = acpi_clear_event(event);
793 else if (!kstrtoul(buf, 0, &tmp))
794 all_counters[index].count = tmp;
795 else
796 result = -EINVAL;
797 } else
798 all_counters[index].count = strtoul(buf, NULL, 0);
799
800 if (ACPI_FAILURE(result))
801 result = -EINVAL;
802end:
803 return result ? result : size;
804}
805
806/*
807 * A Quirk Mechanism for GPE Flooding Prevention:
808 *
809 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
810 * flooding typically cannot be detected and automatically prevented by
811 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
812 * the AML tables. This normally indicates a feature gap in Linux, thus
813 * instead of providing endless quirk tables, we provide a boot parameter
814 * for those who want this quirk. For example, if the users want to prevent
815 * the GPE flooding for GPE 00, they need to specify the following boot
816 * parameter:
817 * acpi_mask_gpe=0x00
818 * The masking status can be modified by the following runtime controlling
819 * interface:
820 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
821 */
822#define ACPI_MASKABLE_GPE_MAX 0x100
823static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
824
825static int __init acpi_gpe_set_masked_gpes(char *val)
826{
827 u8 gpe;
828
829 if (kstrtou8(val, 0, &gpe))
830 return -EINVAL;
831 set_bit(gpe, acpi_masked_gpes_map);
832
833 return 1;
834}
835__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
836
837void __init acpi_gpe_apply_masked_gpes(void)
838{
839 acpi_handle handle;
840 acpi_status status;
841 u16 gpe;
842
843 for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
844 status = acpi_get_gpe_device(gpe, &handle);
845 if (ACPI_SUCCESS(status)) {
846 pr_info("Masking GPE 0x%x.\n", gpe);
847 (void)acpi_mask_gpe(handle, gpe, TRUE);
848 }
849 }
850}
851
852void acpi_irq_stats_init(void)
853{
854 acpi_status status;
855 int i;
856
857 if (all_counters)
858 return;
859
860 num_gpes = acpi_current_gpe_count;
861 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
862
863 all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
864 GFP_KERNEL);
865 if (all_attrs == NULL)
866 return;
867
868 all_counters = kcalloc(num_counters, sizeof(struct event_counter),
869 GFP_KERNEL);
870 if (all_counters == NULL)
871 goto fail;
872
873 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
874 if (ACPI_FAILURE(status))
875 goto fail;
876
877 counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
878 GFP_KERNEL);
879 if (counter_attrs == NULL)
880 goto fail;
881
882 for (i = 0; i < num_counters; ++i) {
883 char buffer[12];
884 char *name;
885
886 if (i < num_gpes)
887 sprintf(buffer, "gpe%02X", i);
888 else if (i == num_gpes + ACPI_EVENT_PMTIMER)
889 sprintf(buffer, "ff_pmtimer");
890 else if (i == num_gpes + ACPI_EVENT_GLOBAL)
891 sprintf(buffer, "ff_gbl_lock");
892 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
893 sprintf(buffer, "ff_pwr_btn");
894 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
895 sprintf(buffer, "ff_slp_btn");
896 else if (i == num_gpes + ACPI_EVENT_RTC)
897 sprintf(buffer, "ff_rt_clk");
898 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
899 sprintf(buffer, "gpe_all");
900 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
901 sprintf(buffer, "sci");
902 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
903 sprintf(buffer, "sci_not");
904 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
905 sprintf(buffer, "error");
906 else
907 sprintf(buffer, "bug%02X", i);
908
909 name = kstrdup(buffer, GFP_KERNEL);
910 if (name == NULL)
911 goto fail;
912
913 sysfs_attr_init(&counter_attrs[i].attr);
914 counter_attrs[i].attr.name = name;
915 counter_attrs[i].attr.mode = 0644;
916 counter_attrs[i].show = counter_show;
917 counter_attrs[i].store = counter_set;
918
919 all_attrs[i] = &counter_attrs[i].attr;
920 }
921
922 interrupt_stats_attr_group.attrs = all_attrs;
923 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
924 return;
925
926fail:
927 delete_gpe_attr_array();
928 return;
929}
930
931static void __exit interrupt_stats_exit(void)
932{
933 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
934
935 delete_gpe_attr_array();
936
937 return;
938}
939
940static ssize_t
941acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
942 char *buf)
943{
944 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
945}
946
947static const struct kobj_attribute pm_profile_attr =
948 __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
949
950static ssize_t hotplug_enabled_show(struct kobject *kobj,
951 struct kobj_attribute *attr, char *buf)
952{
953 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
954
955 return sprintf(buf, "%d\n", hotplug->enabled);
956}
957
958static ssize_t hotplug_enabled_store(struct kobject *kobj,
959 struct kobj_attribute *attr,
960 const char *buf, size_t size)
961{
962 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
963 unsigned int val;
964
965 if (kstrtouint(buf, 10, &val) || val > 1)
966 return -EINVAL;
967
968 acpi_scan_hotplug_enabled(hotplug, val);
969 return size;
970}
971
972static struct kobj_attribute hotplug_enabled_attr =
973 __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
974 hotplug_enabled_store);
975
976static struct attribute *hotplug_profile_attrs[] = {
977 &hotplug_enabled_attr.attr,
978 NULL
979};
980
981static struct kobj_type acpi_hotplug_profile_ktype = {
982 .sysfs_ops = &kobj_sysfs_ops,
983 .default_attrs = hotplug_profile_attrs,
984};
985
986void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
987 const char *name)
988{
989 int error;
990
991 if (!hotplug_kobj)
992 goto err_out;
993
994 error = kobject_init_and_add(&hotplug->kobj,
995 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
996 if (error) {
997 kobject_put(&hotplug->kobj);
998 goto err_out;
999 }
1000
1001 kobject_uevent(&hotplug->kobj, KOBJ_ADD);
1002 return;
1003
1004 err_out:
1005 pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
1006}
1007
1008static ssize_t force_remove_show(struct kobject *kobj,
1009 struct kobj_attribute *attr, char *buf)
1010{
1011 return sprintf(buf, "%d\n", 0);
1012}
1013
1014static ssize_t force_remove_store(struct kobject *kobj,
1015 struct kobj_attribute *attr,
1016 const char *buf, size_t size)
1017{
1018 bool val;
1019 int ret;
1020
1021 ret = strtobool(buf, &val);
1022 if (ret < 0)
1023 return ret;
1024
1025 if (val) {
1026 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1027 return -EINVAL;
1028 }
1029 return size;
1030}
1031
1032static const struct kobj_attribute force_remove_attr =
1033 __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
1034 force_remove_store);
1035
1036int __init acpi_sysfs_init(void)
1037{
1038 int result;
1039
1040 result = acpi_tables_sysfs_init();
1041 if (result)
1042 return result;
1043
1044 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1045 if (!hotplug_kobj)
1046 return -ENOMEM;
1047
1048 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1049 if (result)
1050 return result;
1051
1052 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1053 return result;
1054}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * sysfs.c - ACPI sysfs interface to userspace.
4 */
5
6#define pr_fmt(fmt) "ACPI: " fmt
7
8#include <linux/acpi.h>
9#include <linux/bitmap.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/kstrtox.h>
13#include <linux/moduleparam.h>
14
15#include "internal.h"
16
17#ifdef CONFIG_ACPI_DEBUG
18/*
19 * ACPI debug sysfs I/F, including:
20 * /sys/modules/acpi/parameters/debug_layer
21 * /sys/modules/acpi/parameters/debug_level
22 * /sys/modules/acpi/parameters/trace_method_name
23 * /sys/modules/acpi/parameters/trace_state
24 * /sys/modules/acpi/parameters/trace_debug_layer
25 * /sys/modules/acpi/parameters/trace_debug_level
26 */
27
28struct acpi_dlayer {
29 const char *name;
30 unsigned long value;
31};
32struct acpi_dlevel {
33 const char *name;
34 unsigned long value;
35};
36#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
37
38static const struct acpi_dlayer acpi_debug_layers[] = {
39 ACPI_DEBUG_INIT(ACPI_UTILITIES),
40 ACPI_DEBUG_INIT(ACPI_HARDWARE),
41 ACPI_DEBUG_INIT(ACPI_EVENTS),
42 ACPI_DEBUG_INIT(ACPI_TABLES),
43 ACPI_DEBUG_INIT(ACPI_NAMESPACE),
44 ACPI_DEBUG_INIT(ACPI_PARSER),
45 ACPI_DEBUG_INIT(ACPI_DISPATCHER),
46 ACPI_DEBUG_INIT(ACPI_EXECUTER),
47 ACPI_DEBUG_INIT(ACPI_RESOURCES),
48 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
49 ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
50 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
51 ACPI_DEBUG_INIT(ACPI_COMPILER),
52 ACPI_DEBUG_INIT(ACPI_TOOLS),
53};
54
55static const struct acpi_dlevel acpi_debug_levels[] = {
56 ACPI_DEBUG_INIT(ACPI_LV_INIT),
57 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
58 ACPI_DEBUG_INIT(ACPI_LV_INFO),
59 ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
60 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
61
62 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
63 ACPI_DEBUG_INIT(ACPI_LV_PARSE),
64 ACPI_DEBUG_INIT(ACPI_LV_LOAD),
65 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
66 ACPI_DEBUG_INIT(ACPI_LV_EXEC),
67 ACPI_DEBUG_INIT(ACPI_LV_NAMES),
68 ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
69 ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
70 ACPI_DEBUG_INIT(ACPI_LV_TABLES),
71 ACPI_DEBUG_INIT(ACPI_LV_VALUES),
72 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
73 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
74 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
75 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
76
77 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
78 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
79 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
80
81 ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
82 ACPI_DEBUG_INIT(ACPI_LV_THREADS),
83 ACPI_DEBUG_INIT(ACPI_LV_IO),
84 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
85
86 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
87 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
88 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
89 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
90};
91
92static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
93{
94 int result = 0;
95 int i;
96
97 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
98
99 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
100 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
101 acpi_debug_layers[i].name,
102 acpi_debug_layers[i].value,
103 (acpi_dbg_layer & acpi_debug_layers[i].value)
104 ? '*' : ' ');
105 }
106 result +=
107 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
108 ACPI_ALL_DRIVERS,
109 (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
110 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
111 == 0 ? ' ' : '-');
112 result +=
113 sprintf(buffer + result,
114 "--\ndebug_layer = 0x%08X ( * = enabled)\n",
115 acpi_dbg_layer);
116
117 return result;
118}
119
120static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
121{
122 int result = 0;
123 int i;
124
125 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
126
127 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
128 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
129 acpi_debug_levels[i].name,
130 acpi_debug_levels[i].value,
131 (acpi_dbg_level & acpi_debug_levels[i].value)
132 ? '*' : ' ');
133 }
134 result +=
135 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
136 acpi_dbg_level);
137
138 return result;
139}
140
141static const struct kernel_param_ops param_ops_debug_layer = {
142 .set = param_set_uint,
143 .get = param_get_debug_layer,
144};
145
146static const struct kernel_param_ops param_ops_debug_level = {
147 .set = param_set_uint,
148 .get = param_get_debug_level,
149};
150
151module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
152module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
153
154static char trace_method_name[1024];
155
156static int param_set_trace_method_name(const char *val,
157 const struct kernel_param *kp)
158{
159 u32 saved_flags = 0;
160 bool is_abs_path = true;
161
162 if (*val != '\\')
163 is_abs_path = false;
164
165 if ((is_abs_path && strlen(val) > 1023) ||
166 (!is_abs_path && strlen(val) > 1022)) {
167 pr_err("%s: string parameter too long\n", kp->name);
168 return -ENOSPC;
169 }
170
171 /*
172 * It's not safe to update acpi_gbl_trace_method_name without
173 * having the tracer stopped, so we save the original tracer
174 * state and disable it.
175 */
176 saved_flags = acpi_gbl_trace_flags;
177 (void)acpi_debug_trace(NULL,
178 acpi_gbl_trace_dbg_level,
179 acpi_gbl_trace_dbg_layer,
180 0);
181
182 /* This is a hack. We can't kmalloc in early boot. */
183 if (is_abs_path)
184 strcpy(trace_method_name, val);
185 else {
186 trace_method_name[0] = '\\';
187 strcpy(trace_method_name+1, val);
188 }
189
190 /* Restore the original tracer state */
191 (void)acpi_debug_trace(trace_method_name,
192 acpi_gbl_trace_dbg_level,
193 acpi_gbl_trace_dbg_layer,
194 saved_flags);
195
196 return 0;
197}
198
199static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
200{
201 return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name);
202}
203
204static const struct kernel_param_ops param_ops_trace_method = {
205 .set = param_set_trace_method_name,
206 .get = param_get_trace_method_name,
207};
208
209static const struct kernel_param_ops param_ops_trace_attrib = {
210 .set = param_set_uint,
211 .get = param_get_uint,
212};
213
214module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644);
215module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
216module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
217
218static int param_set_trace_state(const char *val,
219 const struct kernel_param *kp)
220{
221 acpi_status status;
222 const char *method = trace_method_name;
223 u32 flags = 0;
224
225/* So "xxx-once" comparison should go prior than "xxx" comparison */
226#define acpi_compare_param(val, key) \
227 strncmp((val), (key), sizeof(key) - 1)
228
229 if (!acpi_compare_param(val, "enable")) {
230 method = NULL;
231 flags = ACPI_TRACE_ENABLED;
232 } else if (!acpi_compare_param(val, "disable"))
233 method = NULL;
234 else if (!acpi_compare_param(val, "method-once"))
235 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
236 else if (!acpi_compare_param(val, "method"))
237 flags = ACPI_TRACE_ENABLED;
238 else if (!acpi_compare_param(val, "opcode-once"))
239 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
240 else if (!acpi_compare_param(val, "opcode"))
241 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
242 else
243 return -EINVAL;
244
245 status = acpi_debug_trace(method,
246 acpi_gbl_trace_dbg_level,
247 acpi_gbl_trace_dbg_layer,
248 flags);
249 if (ACPI_FAILURE(status))
250 return -EBUSY;
251
252 return 0;
253}
254
255static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
256{
257 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
258 return sprintf(buffer, "disable\n");
259 if (!acpi_gbl_trace_method_name)
260 return sprintf(buffer, "enable\n");
261 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
262 return sprintf(buffer, "method-once\n");
263 else
264 return sprintf(buffer, "method\n");
265}
266
267module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
268 NULL, 0644);
269#endif /* CONFIG_ACPI_DEBUG */
270
271
272/* /sys/modules/acpi/parameters/aml_debug_output */
273
274module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
275 byte, 0644);
276MODULE_PARM_DESC(aml_debug_output,
277 "To enable/disable the ACPI Debug Object output.");
278
279/* /sys/module/acpi/parameters/acpica_version */
280static int param_get_acpica_version(char *buffer,
281 const struct kernel_param *kp)
282{
283 int result;
284
285 result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
286
287 return result;
288}
289
290module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
291
292/*
293 * ACPI table sysfs I/F:
294 * /sys/firmware/acpi/tables/
295 * /sys/firmware/acpi/tables/data/
296 * /sys/firmware/acpi/tables/dynamic/
297 */
298
299static LIST_HEAD(acpi_table_attr_list);
300static struct kobject *tables_kobj;
301static struct kobject *tables_data_kobj;
302static struct kobject *dynamic_tables_kobj;
303static struct kobject *hotplug_kobj;
304
305#define ACPI_MAX_TABLE_INSTANCES 999
306#define ACPI_INST_SIZE 4 /* including trailing 0 */
307
308struct acpi_table_attr {
309 struct bin_attribute attr;
310 char name[ACPI_NAMESEG_SIZE];
311 int instance;
312 char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
313 struct list_head node;
314};
315
316struct acpi_data_attr {
317 struct bin_attribute attr;
318 u64 addr;
319};
320
321static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
322 struct bin_attribute *bin_attr, char *buf,
323 loff_t offset, size_t count)
324{
325 struct acpi_table_attr *table_attr =
326 container_of(bin_attr, struct acpi_table_attr, attr);
327 struct acpi_table_header *table_header = NULL;
328 acpi_status status;
329 ssize_t rc;
330
331 status = acpi_get_table(table_attr->name, table_attr->instance,
332 &table_header);
333 if (ACPI_FAILURE(status))
334 return -ENODEV;
335
336 rc = memory_read_from_buffer(buf, count, &offset, table_header,
337 table_header->length);
338 acpi_put_table(table_header);
339 return rc;
340}
341
342static int acpi_table_attr_init(struct kobject *tables_obj,
343 struct acpi_table_attr *table_attr,
344 struct acpi_table_header *table_header)
345{
346 struct acpi_table_header *header = NULL;
347 struct acpi_table_attr *attr = NULL;
348 char instance_str[ACPI_INST_SIZE];
349
350 sysfs_attr_init(&table_attr->attr.attr);
351 ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
352
353 list_for_each_entry(attr, &acpi_table_attr_list, node) {
354 if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
355 if (table_attr->instance < attr->instance)
356 table_attr->instance = attr->instance;
357 }
358 table_attr->instance++;
359 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
360 pr_warn("%4.4s: too many table instances\n", table_attr->name);
361 return -ERANGE;
362 }
363
364 ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
365 table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
366 if (table_attr->instance > 1 || (table_attr->instance == 1 &&
367 !acpi_get_table
368 (table_header->signature, 2, &header))) {
369 snprintf(instance_str, sizeof(instance_str), "%u",
370 table_attr->instance);
371 strcat(table_attr->filename, instance_str);
372 }
373
374 table_attr->attr.size = table_header->length;
375 table_attr->attr.read = acpi_table_show;
376 table_attr->attr.attr.name = table_attr->filename;
377 table_attr->attr.attr.mode = 0400;
378
379 return sysfs_create_bin_file(tables_obj, &table_attr->attr);
380}
381
382acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
383{
384 struct acpi_table_attr *table_attr;
385
386 switch (event) {
387 case ACPI_TABLE_EVENT_INSTALL:
388 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
389 if (!table_attr)
390 return AE_NO_MEMORY;
391
392 if (acpi_table_attr_init(dynamic_tables_kobj,
393 table_attr, table)) {
394 kfree(table_attr);
395 return AE_ERROR;
396 }
397 list_add_tail(&table_attr->node, &acpi_table_attr_list);
398 break;
399 case ACPI_TABLE_EVENT_LOAD:
400 case ACPI_TABLE_EVENT_UNLOAD:
401 case ACPI_TABLE_EVENT_UNINSTALL:
402 /*
403 * we do not need to do anything right now
404 * because the table is not deleted from the
405 * global table list when unloading it.
406 */
407 break;
408 default:
409 return AE_BAD_PARAMETER;
410 }
411 return AE_OK;
412}
413
414static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
415 struct bin_attribute *bin_attr, char *buf,
416 loff_t offset, size_t count)
417{
418 struct acpi_data_attr *data_attr;
419 void __iomem *base;
420 ssize_t size;
421
422 data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
423 size = data_attr->attr.size;
424
425 if (offset < 0)
426 return -EINVAL;
427
428 if (offset >= size)
429 return 0;
430
431 if (count > size - offset)
432 count = size - offset;
433
434 base = acpi_os_map_iomem(data_attr->addr, size);
435 if (!base)
436 return -ENOMEM;
437
438 memcpy_fromio(buf, base + offset, count);
439
440 acpi_os_unmap_iomem(base, size);
441
442 return count;
443}
444
445static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
446{
447 struct acpi_table_bert *bert = th;
448
449 if (bert->header.length < sizeof(struct acpi_table_bert) ||
450 bert->region_length < sizeof(struct acpi_hest_generic_status)) {
451 kfree(data_attr);
452 return -EINVAL;
453 }
454 data_attr->addr = bert->address;
455 data_attr->attr.size = bert->region_length;
456 data_attr->attr.attr.name = "BERT";
457
458 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
459}
460
461static struct acpi_data_obj {
462 char *name;
463 int (*fn)(void *, struct acpi_data_attr *);
464} acpi_data_objs[] = {
465 { ACPI_SIG_BERT, acpi_bert_data_init },
466};
467
468#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
469
470static int acpi_table_data_init(struct acpi_table_header *th)
471{
472 struct acpi_data_attr *data_attr;
473 int i;
474
475 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
476 if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
477 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
478 if (!data_attr)
479 return -ENOMEM;
480 sysfs_attr_init(&data_attr->attr.attr);
481 data_attr->attr.read = acpi_data_show;
482 data_attr->attr.attr.mode = 0400;
483 return acpi_data_objs[i].fn(th, data_attr);
484 }
485 }
486 return 0;
487}
488
489static int acpi_tables_sysfs_init(void)
490{
491 struct acpi_table_attr *table_attr;
492 struct acpi_table_header *table_header = NULL;
493 int table_index;
494 acpi_status status;
495 int ret;
496
497 tables_kobj = kobject_create_and_add("tables", acpi_kobj);
498 if (!tables_kobj)
499 goto err;
500
501 tables_data_kobj = kobject_create_and_add("data", tables_kobj);
502 if (!tables_data_kobj)
503 goto err_tables_data;
504
505 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
506 if (!dynamic_tables_kobj)
507 goto err_dynamic_tables;
508
509 for (table_index = 0;; table_index++) {
510 status = acpi_get_table_by_index(table_index, &table_header);
511
512 if (status == AE_BAD_PARAMETER)
513 break;
514
515 if (ACPI_FAILURE(status))
516 continue;
517
518 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
519 if (!table_attr)
520 return -ENOMEM;
521
522 ret = acpi_table_attr_init(tables_kobj,
523 table_attr, table_header);
524 if (ret) {
525 kfree(table_attr);
526 return ret;
527 }
528 list_add_tail(&table_attr->node, &acpi_table_attr_list);
529 acpi_table_data_init(table_header);
530 }
531
532 kobject_uevent(tables_kobj, KOBJ_ADD);
533 kobject_uevent(tables_data_kobj, KOBJ_ADD);
534 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
535
536 return 0;
537err_dynamic_tables:
538 kobject_put(tables_data_kobj);
539err_tables_data:
540 kobject_put(tables_kobj);
541err:
542 return -ENOMEM;
543}
544
545/*
546 * Detailed ACPI IRQ counters:
547 * /sys/firmware/acpi/interrupts/
548 */
549
550u32 acpi_irq_handled;
551u32 acpi_irq_not_handled;
552
553#define COUNT_GPE 0
554#define COUNT_SCI 1 /* acpi_irq_handled */
555#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
556#define COUNT_ERROR 3 /* other */
557#define NUM_COUNTERS_EXTRA 4
558
559struct event_counter {
560 u32 count;
561 u32 flags;
562};
563
564static struct event_counter *all_counters;
565static u32 num_gpes;
566static u32 num_counters;
567static struct attribute **all_attrs;
568static u32 acpi_gpe_count;
569
570static struct attribute_group interrupt_stats_attr_group = {
571 .name = "interrupts",
572};
573
574static struct kobj_attribute *counter_attrs;
575
576static void delete_gpe_attr_array(void)
577{
578 struct event_counter *tmp = all_counters;
579
580 all_counters = NULL;
581 kfree(tmp);
582
583 if (counter_attrs) {
584 int i;
585
586 for (i = 0; i < num_gpes; i++)
587 kfree(counter_attrs[i].attr.name);
588
589 kfree(counter_attrs);
590 }
591 kfree(all_attrs);
592}
593
594static void gpe_count(u32 gpe_number)
595{
596 acpi_gpe_count++;
597
598 if (!all_counters)
599 return;
600
601 if (gpe_number < num_gpes)
602 all_counters[gpe_number].count++;
603 else
604 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
605 COUNT_ERROR].count++;
606}
607
608static void fixed_event_count(u32 event_number)
609{
610 if (!all_counters)
611 return;
612
613 if (event_number < ACPI_NUM_FIXED_EVENTS)
614 all_counters[num_gpes + event_number].count++;
615 else
616 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
617 COUNT_ERROR].count++;
618}
619
620static void acpi_global_event_handler(u32 event_type, acpi_handle device,
621 u32 event_number, void *context)
622{
623 if (event_type == ACPI_EVENT_TYPE_GPE) {
624 gpe_count(event_number);
625 pr_debug("GPE event 0x%02x\n", event_number);
626 } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
627 fixed_event_count(event_number);
628 pr_debug("Fixed event 0x%02x\n", event_number);
629 } else {
630 pr_debug("Other event 0x%02x\n", event_number);
631 }
632}
633
634static int get_status(u32 index, acpi_event_status *ret,
635 acpi_handle *handle)
636{
637 acpi_status status;
638
639 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
640 return -EINVAL;
641
642 if (index < num_gpes) {
643 status = acpi_get_gpe_device(index, handle);
644 if (ACPI_FAILURE(status)) {
645 pr_warn("Invalid GPE 0x%x", index);
646 return -ENXIO;
647 }
648 status = acpi_get_gpe_status(*handle, index, ret);
649 } else {
650 status = acpi_get_event_status(index - num_gpes, ret);
651 }
652 if (ACPI_FAILURE(status))
653 return -EIO;
654
655 return 0;
656}
657
658static ssize_t counter_show(struct kobject *kobj,
659 struct kobj_attribute *attr, char *buf)
660{
661 int index = attr - counter_attrs;
662 int size;
663 acpi_handle handle;
664 acpi_event_status status;
665 int result = 0;
666
667 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
668 acpi_irq_handled;
669 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
670 acpi_irq_not_handled;
671 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
672 acpi_gpe_count;
673 size = sprintf(buf, "%8u", all_counters[index].count);
674
675 /* "gpe_all" or "sci" */
676 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
677 goto end;
678
679 result = get_status(index, &status, &handle);
680 if (result)
681 goto end;
682
683 if (status & ACPI_EVENT_FLAG_ENABLE_SET)
684 size += sprintf(buf + size, " EN");
685 else
686 size += sprintf(buf + size, " ");
687 if (status & ACPI_EVENT_FLAG_STATUS_SET)
688 size += sprintf(buf + size, " STS");
689 else
690 size += sprintf(buf + size, " ");
691
692 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
693 size += sprintf(buf + size, " invalid ");
694 else if (status & ACPI_EVENT_FLAG_ENABLED)
695 size += sprintf(buf + size, " enabled ");
696 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
697 size += sprintf(buf + size, " wake_enabled");
698 else
699 size += sprintf(buf + size, " disabled ");
700 if (status & ACPI_EVENT_FLAG_MASKED)
701 size += sprintf(buf + size, " masked ");
702 else
703 size += sprintf(buf + size, " unmasked");
704
705end:
706 size += sprintf(buf + size, "\n");
707 return result ? result : size;
708}
709
710/*
711 * counter_set() sets the specified counter.
712 * setting the total "sci" file to any value clears all counters.
713 * enable/disable/clear a gpe/fixed event in user space.
714 */
715static ssize_t counter_set(struct kobject *kobj,
716 struct kobj_attribute *attr, const char *buf,
717 size_t size)
718{
719 int index = attr - counter_attrs;
720 acpi_event_status status;
721 acpi_handle handle;
722 int result = 0;
723 unsigned long tmp;
724
725 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
726 int i;
727 for (i = 0; i < num_counters; ++i)
728 all_counters[i].count = 0;
729 acpi_gpe_count = 0;
730 acpi_irq_handled = 0;
731 acpi_irq_not_handled = 0;
732 goto end;
733 }
734
735 /* show the event status for both GPEs and Fixed Events */
736 result = get_status(index, &status, &handle);
737 if (result)
738 goto end;
739
740 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
741 pr_warn("Can not change Invalid GPE/Fixed Event status\n");
742 return -EINVAL;
743 }
744
745 if (index < num_gpes) {
746 if (!strcmp(buf, "disable\n") &&
747 (status & ACPI_EVENT_FLAG_ENABLED))
748 result = acpi_disable_gpe(handle, index);
749 else if (!strcmp(buf, "enable\n") &&
750 !(status & ACPI_EVENT_FLAG_ENABLED))
751 result = acpi_enable_gpe(handle, index);
752 else if (!strcmp(buf, "clear\n") &&
753 (status & ACPI_EVENT_FLAG_STATUS_SET))
754 result = acpi_clear_gpe(handle, index);
755 else if (!strcmp(buf, "mask\n"))
756 result = acpi_mask_gpe(handle, index, TRUE);
757 else if (!strcmp(buf, "unmask\n"))
758 result = acpi_mask_gpe(handle, index, FALSE);
759 else if (!kstrtoul(buf, 0, &tmp))
760 all_counters[index].count = tmp;
761 else
762 result = -EINVAL;
763 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
764 int event = index - num_gpes;
765 if (!strcmp(buf, "disable\n") &&
766 (status & ACPI_EVENT_FLAG_ENABLE_SET))
767 result = acpi_disable_event(event, ACPI_NOT_ISR);
768 else if (!strcmp(buf, "enable\n") &&
769 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
770 result = acpi_enable_event(event, ACPI_NOT_ISR);
771 else if (!strcmp(buf, "clear\n") &&
772 (status & ACPI_EVENT_FLAG_STATUS_SET))
773 result = acpi_clear_event(event);
774 else if (!kstrtoul(buf, 0, &tmp))
775 all_counters[index].count = tmp;
776 else
777 result = -EINVAL;
778 } else
779 all_counters[index].count = strtoul(buf, NULL, 0);
780
781 if (ACPI_FAILURE(result))
782 result = -EINVAL;
783end:
784 return result ? result : size;
785}
786
787/*
788 * A Quirk Mechanism for GPE Flooding Prevention:
789 *
790 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
791 * flooding typically cannot be detected and automatically prevented by
792 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
793 * the AML tables. This normally indicates a feature gap in Linux, thus
794 * instead of providing endless quirk tables, we provide a boot parameter
795 * for those who want this quirk. For example, if the users want to prevent
796 * the GPE flooding for GPE 00, they need to specify the following boot
797 * parameter:
798 * acpi_mask_gpe=0x00
799 * Note, the parameter can be a list (see bitmap_parselist() for the details).
800 * The masking status can be modified by the following runtime controlling
801 * interface:
802 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
803 */
804#define ACPI_MASKABLE_GPE_MAX 0x100
805static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
806
807static int __init acpi_gpe_set_masked_gpes(char *val)
808{
809 int ret;
810 u8 gpe;
811
812 ret = kstrtou8(val, 0, &gpe);
813 if (ret) {
814 ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
815 if (ret)
816 return ret;
817 } else
818 set_bit(gpe, acpi_masked_gpes_map);
819
820 return 1;
821}
822__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
823
824void __init acpi_gpe_apply_masked_gpes(void)
825{
826 acpi_handle handle;
827 acpi_status status;
828 u16 gpe;
829
830 for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
831 status = acpi_get_gpe_device(gpe, &handle);
832 if (ACPI_SUCCESS(status)) {
833 pr_info("Masking GPE 0x%x.\n", gpe);
834 (void)acpi_mask_gpe(handle, gpe, TRUE);
835 }
836 }
837}
838
839void acpi_irq_stats_init(void)
840{
841 acpi_status status;
842 int i;
843
844 if (all_counters)
845 return;
846
847 num_gpes = acpi_current_gpe_count;
848 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
849
850 all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
851 if (all_attrs == NULL)
852 return;
853
854 all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
855 if (all_counters == NULL)
856 goto fail;
857
858 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
859 if (ACPI_FAILURE(status))
860 goto fail;
861
862 counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
863 if (counter_attrs == NULL)
864 goto fail;
865
866 for (i = 0; i < num_counters; ++i) {
867 char buffer[12];
868 char *name;
869
870 if (i < num_gpes)
871 sprintf(buffer, "gpe%02X", i);
872 else if (i == num_gpes + ACPI_EVENT_PMTIMER)
873 sprintf(buffer, "ff_pmtimer");
874 else if (i == num_gpes + ACPI_EVENT_GLOBAL)
875 sprintf(buffer, "ff_gbl_lock");
876 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
877 sprintf(buffer, "ff_pwr_btn");
878 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
879 sprintf(buffer, "ff_slp_btn");
880 else if (i == num_gpes + ACPI_EVENT_RTC)
881 sprintf(buffer, "ff_rt_clk");
882 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
883 sprintf(buffer, "gpe_all");
884 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
885 sprintf(buffer, "sci");
886 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
887 sprintf(buffer, "sci_not");
888 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
889 sprintf(buffer, "error");
890 else
891 sprintf(buffer, "bug%02X", i);
892
893 name = kstrdup(buffer, GFP_KERNEL);
894 if (name == NULL)
895 goto fail;
896
897 sysfs_attr_init(&counter_attrs[i].attr);
898 counter_attrs[i].attr.name = name;
899 counter_attrs[i].attr.mode = 0644;
900 counter_attrs[i].show = counter_show;
901 counter_attrs[i].store = counter_set;
902
903 all_attrs[i] = &counter_attrs[i].attr;
904 }
905
906 interrupt_stats_attr_group.attrs = all_attrs;
907 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
908 return;
909
910fail:
911 delete_gpe_attr_array();
912}
913
914static void __exit interrupt_stats_exit(void)
915{
916 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
917
918 delete_gpe_attr_array();
919}
920
921static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
922{
923 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
924}
925
926static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
927
928static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
929{
930 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
931
932 return sprintf(buf, "%d\n", hotplug->enabled);
933}
934
935static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
936 const char *buf, size_t size)
937{
938 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
939 unsigned int val;
940
941 if (kstrtouint(buf, 10, &val) || val > 1)
942 return -EINVAL;
943
944 acpi_scan_hotplug_enabled(hotplug, val);
945 return size;
946}
947
948static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
949
950static struct attribute *hotplug_profile_attrs[] = {
951 &hotplug_enabled_attr.attr,
952 NULL
953};
954ATTRIBUTE_GROUPS(hotplug_profile);
955
956static struct kobj_type acpi_hotplug_profile_ktype = {
957 .sysfs_ops = &kobj_sysfs_ops,
958 .default_groups = hotplug_profile_groups,
959};
960
961void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
962 const char *name)
963{
964 int error;
965
966 if (!hotplug_kobj)
967 goto err_out;
968
969 error = kobject_init_and_add(&hotplug->kobj,
970 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
971 if (error) {
972 kobject_put(&hotplug->kobj);
973 goto err_out;
974 }
975
976 kobject_uevent(&hotplug->kobj, KOBJ_ADD);
977 return;
978
979 err_out:
980 pr_err("Unable to add hotplug profile '%s'\n", name);
981}
982
983static ssize_t force_remove_show(struct kobject *kobj,
984 struct kobj_attribute *attr, char *buf)
985{
986 return sprintf(buf, "%d\n", 0);
987}
988
989static ssize_t force_remove_store(struct kobject *kobj,
990 struct kobj_attribute *attr,
991 const char *buf, size_t size)
992{
993 bool val;
994 int ret;
995
996 ret = kstrtobool(buf, &val);
997 if (ret < 0)
998 return ret;
999
1000 if (val) {
1001 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1002 return -EINVAL;
1003 }
1004 return size;
1005}
1006
1007static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
1008
1009int __init acpi_sysfs_init(void)
1010{
1011 int result;
1012
1013 result = acpi_tables_sysfs_init();
1014 if (result)
1015 return result;
1016
1017 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1018 if (!hotplug_kobj)
1019 return -ENOMEM;
1020
1021 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1022 if (result)
1023 return result;
1024
1025 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1026 return result;
1027}