Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ACPI AML interfacing support
4 *
5 * Copyright (C) 2015, Intel Corporation
6 * Authors: Lv Zheng <lv.zheng@intel.com>
7 */
8
9/* #define DEBUG */
10#define pr_fmt(fmt) "ACPI: AML: " fmt
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/wait.h>
15#include <linux/poll.h>
16#include <linux/sched.h>
17#include <linux/kthread.h>
18#include <linux/proc_fs.h>
19#include <linux/debugfs.h>
20#include <linux/circ_buf.h>
21#include <linux/acpi.h>
22#include "internal.h"
23
24#define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
25#define ACPI_AML_BUF_SIZE PAGE_SIZE
26
27#define circ_count(circ) \
28 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
29#define circ_count_to_end(circ) \
30 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
31#define circ_space(circ) \
32 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
33#define circ_space_to_end(circ) \
34 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
35
36#define ACPI_AML_OPENED 0x0001
37#define ACPI_AML_CLOSED 0x0002
38#define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
39#define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
40#define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
41#define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
42#define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
43#define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
44#define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
45#define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
46
47struct acpi_aml_io {
48 wait_queue_head_t wait;
49 unsigned long flags;
50 unsigned long users;
51 struct mutex lock;
52 struct task_struct *thread;
53 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
54 struct circ_buf out_crc;
55 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
56 struct circ_buf in_crc;
57 acpi_osd_exec_callback function;
58 void *context;
59 unsigned long usages;
60};
61
62static struct acpi_aml_io acpi_aml_io;
63static bool acpi_aml_initialized;
64static struct file *acpi_aml_active_reader;
65static struct dentry *acpi_aml_dentry;
66
67static inline bool __acpi_aml_running(void)
68{
69 return acpi_aml_io.thread ? true : false;
70}
71
72static inline bool __acpi_aml_access_ok(unsigned long flag)
73{
74 /*
75 * The debugger interface is in opened state (OPENED && !CLOSED),
76 * then it is allowed to access the debugger buffers from either
77 * user space or the kernel space.
78 * In addition, for the kernel space, only the debugger thread
79 * (thread ID matched) is allowed to access.
80 */
81 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
82 (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
83 !__acpi_aml_running())
84 return false;
85 if ((flag & ACPI_AML_KERN) &&
86 current != acpi_aml_io.thread)
87 return false;
88 return true;
89}
90
91static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
92{
93 /*
94 * Another read is not in progress and there is data in buffer
95 * available for read.
96 */
97 if (!(acpi_aml_io.flags & flag) && circ_count(circ))
98 return true;
99 return false;
100}
101
102static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
103{
104 /*
105 * Another write is not in progress and there is buffer space
106 * available for write.
107 */
108 if (!(acpi_aml_io.flags & flag) && circ_space(circ))
109 return true;
110 return false;
111}
112
113static inline bool __acpi_aml_busy(void)
114{
115 if (acpi_aml_io.flags & ACPI_AML_BUSY)
116 return true;
117 return false;
118}
119
120static inline bool __acpi_aml_opened(void)
121{
122 if (acpi_aml_io.flags & ACPI_AML_OPEN)
123 return true;
124 return false;
125}
126
127static inline bool __acpi_aml_used(void)
128{
129 return acpi_aml_io.usages ? true : false;
130}
131
132static inline bool acpi_aml_running(void)
133{
134 bool ret;
135
136 mutex_lock(&acpi_aml_io.lock);
137 ret = __acpi_aml_running();
138 mutex_unlock(&acpi_aml_io.lock);
139 return ret;
140}
141
142static bool acpi_aml_busy(void)
143{
144 bool ret;
145
146 mutex_lock(&acpi_aml_io.lock);
147 ret = __acpi_aml_busy();
148 mutex_unlock(&acpi_aml_io.lock);
149 return ret;
150}
151
152static bool acpi_aml_used(void)
153{
154 bool ret;
155
156 /*
157 * The usage count is prepared to avoid race conditions between the
158 * starts and the stops of the debugger thread.
159 */
160 mutex_lock(&acpi_aml_io.lock);
161 ret = __acpi_aml_used();
162 mutex_unlock(&acpi_aml_io.lock);
163 return ret;
164}
165
166static bool acpi_aml_kern_readable(void)
167{
168 bool ret;
169
170 mutex_lock(&acpi_aml_io.lock);
171 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
172 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
173 mutex_unlock(&acpi_aml_io.lock);
174 return ret;
175}
176
177static bool acpi_aml_kern_writable(void)
178{
179 bool ret;
180
181 mutex_lock(&acpi_aml_io.lock);
182 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
183 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
184 mutex_unlock(&acpi_aml_io.lock);
185 return ret;
186}
187
188static bool acpi_aml_user_readable(void)
189{
190 bool ret;
191
192 mutex_lock(&acpi_aml_io.lock);
193 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
194 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
195 mutex_unlock(&acpi_aml_io.lock);
196 return ret;
197}
198
199static bool acpi_aml_user_writable(void)
200{
201 bool ret;
202
203 mutex_lock(&acpi_aml_io.lock);
204 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
205 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
206 mutex_unlock(&acpi_aml_io.lock);
207 return ret;
208}
209
210static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
211{
212 int ret = 0;
213
214 mutex_lock(&acpi_aml_io.lock);
215 if (!__acpi_aml_access_ok(flag)) {
216 ret = -EFAULT;
217 goto out;
218 }
219 if (!__acpi_aml_writable(circ, flag)) {
220 ret = -EAGAIN;
221 goto out;
222 }
223 acpi_aml_io.flags |= flag;
224out:
225 mutex_unlock(&acpi_aml_io.lock);
226 return ret;
227}
228
229static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
230{
231 int ret = 0;
232
233 mutex_lock(&acpi_aml_io.lock);
234 if (!__acpi_aml_access_ok(flag)) {
235 ret = -EFAULT;
236 goto out;
237 }
238 if (!__acpi_aml_readable(circ, flag)) {
239 ret = -EAGAIN;
240 goto out;
241 }
242 acpi_aml_io.flags |= flag;
243out:
244 mutex_unlock(&acpi_aml_io.lock);
245 return ret;
246}
247
248static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
249{
250 mutex_lock(&acpi_aml_io.lock);
251 acpi_aml_io.flags &= ~flag;
252 if (wakeup)
253 wake_up_interruptible(&acpi_aml_io.wait);
254 mutex_unlock(&acpi_aml_io.lock);
255}
256
257static int acpi_aml_write_kern(const char *buf, int len)
258{
259 int ret;
260 struct circ_buf *crc = &acpi_aml_io.out_crc;
261 int n;
262 char *p;
263
264 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
265 if (ret < 0)
266 return ret;
267 /* sync tail before inserting logs */
268 smp_mb();
269 p = &crc->buf[crc->head];
270 n = min(len, circ_space_to_end(crc));
271 memcpy(p, buf, n);
272 /* sync head after inserting logs */
273 smp_wmb();
274 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
275 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
276 return n;
277}
278
279static int acpi_aml_readb_kern(void)
280{
281 int ret;
282 struct circ_buf *crc = &acpi_aml_io.in_crc;
283 char *p;
284
285 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
286 if (ret < 0)
287 return ret;
288 /* sync head before removing cmds */
289 smp_rmb();
290 p = &crc->buf[crc->tail];
291 ret = (int)*p;
292 /* sync tail before inserting cmds */
293 smp_mb();
294 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
295 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
296 return ret;
297}
298
299/*
300 * acpi_aml_write_log() - Capture debugger output
301 * @msg: the debugger output
302 *
303 * This function should be used to implement acpi_os_printf() to filter out
304 * the debugger output and store the output into the debugger interface
305 * buffer. Return the size of stored logs or errno.
306 */
307static ssize_t acpi_aml_write_log(const char *msg)
308{
309 int ret = 0;
310 int count = 0, size = 0;
311
312 if (!acpi_aml_initialized)
313 return -ENODEV;
314 if (msg)
315 count = strlen(msg);
316 while (count > 0) {
317again:
318 ret = acpi_aml_write_kern(msg + size, count);
319 if (ret == -EAGAIN) {
320 ret = wait_event_interruptible(acpi_aml_io.wait,
321 acpi_aml_kern_writable());
322 /*
323 * We need to retry when the condition
324 * becomes true.
325 */
326 if (ret == 0)
327 goto again;
328 break;
329 }
330 if (ret < 0)
331 break;
332 size += ret;
333 count -= ret;
334 }
335 return size > 0 ? size : ret;
336}
337
338/*
339 * acpi_aml_read_cmd() - Capture debugger input
340 * @msg: the debugger input
341 * @size: the size of the debugger input
342 *
343 * This function should be used to implement acpi_os_get_line() to capture
344 * the debugger input commands and store the input commands into the
345 * debugger interface buffer. Return the size of stored commands or errno.
346 */
347static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
348{
349 int ret = 0;
350 int size = 0;
351
352 /*
353 * This is ensured by the running fact of the debugger thread
354 * unless a bug is introduced.
355 */
356 BUG_ON(!acpi_aml_initialized);
357 while (count > 0) {
358again:
359 /*
360 * Check each input byte to find the end of the command.
361 */
362 ret = acpi_aml_readb_kern();
363 if (ret == -EAGAIN) {
364 ret = wait_event_interruptible(acpi_aml_io.wait,
365 acpi_aml_kern_readable());
366 /*
367 * We need to retry when the condition becomes
368 * true.
369 */
370 if (ret == 0)
371 goto again;
372 }
373 if (ret < 0)
374 break;
375 *(msg + size) = (char)ret;
376 size++;
377 count--;
378 if (ret == '\n') {
379 /*
380 * acpi_os_get_line() requires a zero terminated command
381 * string.
382 */
383 *(msg + size - 1) = '\0';
384 break;
385 }
386 }
387 return size > 0 ? size : ret;
388}
389
390static int acpi_aml_thread(void *unused)
391{
392 acpi_osd_exec_callback function = NULL;
393 void *context;
394
395 mutex_lock(&acpi_aml_io.lock);
396 if (acpi_aml_io.function) {
397 acpi_aml_io.usages++;
398 function = acpi_aml_io.function;
399 context = acpi_aml_io.context;
400 }
401 mutex_unlock(&acpi_aml_io.lock);
402
403 if (function)
404 function(context);
405
406 mutex_lock(&acpi_aml_io.lock);
407 acpi_aml_io.usages--;
408 if (!__acpi_aml_used()) {
409 acpi_aml_io.thread = NULL;
410 wake_up(&acpi_aml_io.wait);
411 }
412 mutex_unlock(&acpi_aml_io.lock);
413
414 return 0;
415}
416
417/*
418 * acpi_aml_create_thread() - Create AML debugger thread
419 * @function: the debugger thread callback
420 * @context: the context to be passed to the debugger thread
421 *
422 * This function should be used to implement acpi_os_execute() which is
423 * used by the ACPICA debugger to create the debugger thread.
424 */
425static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
426{
427 struct task_struct *t;
428
429 mutex_lock(&acpi_aml_io.lock);
430 acpi_aml_io.function = function;
431 acpi_aml_io.context = context;
432 mutex_unlock(&acpi_aml_io.lock);
433
434 t = kthread_create(acpi_aml_thread, NULL, "aml");
435 if (IS_ERR(t)) {
436 pr_err("Failed to create AML debugger thread.\n");
437 return PTR_ERR(t);
438 }
439
440 mutex_lock(&acpi_aml_io.lock);
441 acpi_aml_io.thread = t;
442 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
443 wake_up_process(t);
444 mutex_unlock(&acpi_aml_io.lock);
445 return 0;
446}
447
448static int acpi_aml_wait_command_ready(bool single_step,
449 char *buffer, size_t length)
450{
451 acpi_status status;
452
453 if (single_step)
454 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
455 else
456 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
457
458 status = acpi_os_get_line(buffer, length, NULL);
459 if (ACPI_FAILURE(status))
460 return -EINVAL;
461 return 0;
462}
463
464static int acpi_aml_notify_command_complete(void)
465{
466 return 0;
467}
468
469static int acpi_aml_open(struct inode *inode, struct file *file)
470{
471 int ret = 0;
472 acpi_status status;
473
474 mutex_lock(&acpi_aml_io.lock);
475 /*
476 * The debugger interface is being closed, no new user is allowed
477 * during this period.
478 */
479 if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
480 ret = -EBUSY;
481 goto err_lock;
482 }
483 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
484 /*
485 * Only one reader is allowed to initiate the debugger
486 * thread.
487 */
488 if (acpi_aml_active_reader) {
489 ret = -EBUSY;
490 goto err_lock;
491 } else {
492 pr_debug("Opening debugger reader.\n");
493 acpi_aml_active_reader = file;
494 }
495 } else {
496 /*
497 * No writer is allowed unless the debugger thread is
498 * ready.
499 */
500 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
501 ret = -ENODEV;
502 goto err_lock;
503 }
504 }
505 if (acpi_aml_active_reader == file) {
506 pr_debug("Opening debugger interface.\n");
507 mutex_unlock(&acpi_aml_io.lock);
508
509 pr_debug("Initializing debugger thread.\n");
510 status = acpi_initialize_debugger();
511 if (ACPI_FAILURE(status)) {
512 pr_err("Failed to initialize debugger.\n");
513 ret = -EINVAL;
514 goto err_exit;
515 }
516 pr_debug("Debugger thread initialized.\n");
517
518 mutex_lock(&acpi_aml_io.lock);
519 acpi_aml_io.flags |= ACPI_AML_OPENED;
520 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
521 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
522 pr_debug("Debugger interface opened.\n");
523 }
524 acpi_aml_io.users++;
525err_lock:
526 if (ret < 0) {
527 if (acpi_aml_active_reader == file)
528 acpi_aml_active_reader = NULL;
529 }
530 mutex_unlock(&acpi_aml_io.lock);
531err_exit:
532 return ret;
533}
534
535static int acpi_aml_release(struct inode *inode, struct file *file)
536{
537 mutex_lock(&acpi_aml_io.lock);
538 acpi_aml_io.users--;
539 if (file == acpi_aml_active_reader) {
540 pr_debug("Closing debugger reader.\n");
541 acpi_aml_active_reader = NULL;
542
543 pr_debug("Closing debugger interface.\n");
544 acpi_aml_io.flags |= ACPI_AML_CLOSED;
545
546 /*
547 * Wake up all user space/kernel space blocked
548 * readers/writers.
549 */
550 wake_up_interruptible(&acpi_aml_io.wait);
551 mutex_unlock(&acpi_aml_io.lock);
552 /*
553 * Wait all user space/kernel space readers/writers to
554 * stop so that ACPICA command loop of the debugger thread
555 * should fail all its command line reads after this point.
556 */
557 wait_event(acpi_aml_io.wait, !acpi_aml_busy());
558
559 /*
560 * Then we try to terminate the debugger thread if it is
561 * not terminated.
562 */
563 pr_debug("Terminating debugger thread.\n");
564 acpi_terminate_debugger();
565 wait_event(acpi_aml_io.wait, !acpi_aml_used());
566 pr_debug("Debugger thread terminated.\n");
567
568 mutex_lock(&acpi_aml_io.lock);
569 acpi_aml_io.flags &= ~ACPI_AML_OPENED;
570 }
571 if (acpi_aml_io.users == 0) {
572 pr_debug("Debugger interface closed.\n");
573 acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
574 }
575 mutex_unlock(&acpi_aml_io.lock);
576 return 0;
577}
578
579static int acpi_aml_read_user(char __user *buf, int len)
580{
581 int ret;
582 struct circ_buf *crc = &acpi_aml_io.out_crc;
583 int n;
584 char *p;
585
586 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
587 if (ret < 0)
588 return ret;
589 /* sync head before removing logs */
590 smp_rmb();
591 p = &crc->buf[crc->tail];
592 n = min(len, circ_count_to_end(crc));
593 if (copy_to_user(buf, p, n)) {
594 ret = -EFAULT;
595 goto out;
596 }
597 /* sync tail after removing logs */
598 smp_mb();
599 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
600 ret = n;
601out:
602 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
603 return ret;
604}
605
606static ssize_t acpi_aml_read(struct file *file, char __user *buf,
607 size_t count, loff_t *ppos)
608{
609 int ret = 0;
610 int size = 0;
611
612 if (!count)
613 return 0;
614 if (!access_ok(buf, count))
615 return -EFAULT;
616
617 while (count > 0) {
618again:
619 ret = acpi_aml_read_user(buf + size, count);
620 if (ret == -EAGAIN) {
621 if (file->f_flags & O_NONBLOCK)
622 break;
623 else {
624 ret = wait_event_interruptible(acpi_aml_io.wait,
625 acpi_aml_user_readable());
626 /*
627 * We need to retry when the condition
628 * becomes true.
629 */
630 if (ret == 0)
631 goto again;
632 }
633 }
634 if (ret < 0) {
635 if (!acpi_aml_running())
636 ret = 0;
637 break;
638 }
639 if (ret) {
640 size += ret;
641 count -= ret;
642 *ppos += ret;
643 break;
644 }
645 }
646 return size > 0 ? size : ret;
647}
648
649static int acpi_aml_write_user(const char __user *buf, int len)
650{
651 int ret;
652 struct circ_buf *crc = &acpi_aml_io.in_crc;
653 int n;
654 char *p;
655
656 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
657 if (ret < 0)
658 return ret;
659 /* sync tail before inserting cmds */
660 smp_mb();
661 p = &crc->buf[crc->head];
662 n = min(len, circ_space_to_end(crc));
663 if (copy_from_user(p, buf, n)) {
664 ret = -EFAULT;
665 goto out;
666 }
667 /* sync head after inserting cmds */
668 smp_wmb();
669 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
670 ret = n;
671out:
672 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
673 return n;
674}
675
676static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
677 size_t count, loff_t *ppos)
678{
679 int ret = 0;
680 int size = 0;
681
682 if (!count)
683 return 0;
684 if (!access_ok(buf, count))
685 return -EFAULT;
686
687 while (count > 0) {
688again:
689 ret = acpi_aml_write_user(buf + size, count);
690 if (ret == -EAGAIN) {
691 if (file->f_flags & O_NONBLOCK)
692 break;
693 else {
694 ret = wait_event_interruptible(acpi_aml_io.wait,
695 acpi_aml_user_writable());
696 /*
697 * We need to retry when the condition
698 * becomes true.
699 */
700 if (ret == 0)
701 goto again;
702 }
703 }
704 if (ret < 0) {
705 if (!acpi_aml_running())
706 ret = 0;
707 break;
708 }
709 if (ret) {
710 size += ret;
711 count -= ret;
712 *ppos += ret;
713 }
714 }
715 return size > 0 ? size : ret;
716}
717
718static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
719{
720 __poll_t masks = 0;
721
722 poll_wait(file, &acpi_aml_io.wait, wait);
723 if (acpi_aml_user_readable())
724 masks |= EPOLLIN | EPOLLRDNORM;
725 if (acpi_aml_user_writable())
726 masks |= EPOLLOUT | EPOLLWRNORM;
727
728 return masks;
729}
730
731static const struct file_operations acpi_aml_operations = {
732 .read = acpi_aml_read,
733 .write = acpi_aml_write,
734 .poll = acpi_aml_poll,
735 .open = acpi_aml_open,
736 .release = acpi_aml_release,
737 .llseek = generic_file_llseek,
738};
739
740static const struct acpi_debugger_ops acpi_aml_debugger = {
741 .create_thread = acpi_aml_create_thread,
742 .read_cmd = acpi_aml_read_cmd,
743 .write_log = acpi_aml_write_log,
744 .wait_command_ready = acpi_aml_wait_command_ready,
745 .notify_command_complete = acpi_aml_notify_command_complete,
746};
747
748static int __init acpi_aml_init(void)
749{
750 int ret;
751
752 /* Initialize AML IO interface */
753 mutex_init(&acpi_aml_io.lock);
754 init_waitqueue_head(&acpi_aml_io.wait);
755 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
756 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
757
758 acpi_aml_dentry = debugfs_create_file("acpidbg",
759 S_IFREG | S_IRUGO | S_IWUSR,
760 acpi_debugfs_dir, NULL,
761 &acpi_aml_operations);
762
763 ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
764 if (ret) {
765 debugfs_remove(acpi_aml_dentry);
766 acpi_aml_dentry = NULL;
767 return ret;
768 }
769
770 acpi_aml_initialized = true;
771 return 0;
772}
773
774static void __exit acpi_aml_exit(void)
775{
776 if (acpi_aml_initialized) {
777 acpi_unregister_debugger(&acpi_aml_debugger);
778 debugfs_remove(acpi_aml_dentry);
779 acpi_aml_dentry = NULL;
780 acpi_aml_initialized = false;
781 }
782}
783
784module_init(acpi_aml_init);
785module_exit(acpi_aml_exit);
786
787MODULE_AUTHOR("Lv Zheng");
788MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
789MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ACPI AML interfacing support
4 *
5 * Copyright (C) 2015, Intel Corporation
6 * Authors: Lv Zheng <lv.zheng@intel.com>
7 */
8
9/* #define DEBUG */
10#define pr_fmt(fmt) "ACPI: AML: " fmt
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/wait.h>
15#include <linux/poll.h>
16#include <linux/sched.h>
17#include <linux/kthread.h>
18#include <linux/proc_fs.h>
19#include <linux/debugfs.h>
20#include <linux/circ_buf.h>
21#include <linux/acpi.h>
22#include "internal.h"
23
24#define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
25#define ACPI_AML_BUF_SIZE PAGE_SIZE
26
27#define circ_count(circ) \
28 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
29#define circ_count_to_end(circ) \
30 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
31#define circ_space(circ) \
32 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
33#define circ_space_to_end(circ) \
34 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
35
36#define ACPI_AML_OPENED 0x0001
37#define ACPI_AML_CLOSED 0x0002
38#define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
39#define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
40#define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
41#define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
42#define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
43#define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
44#define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
45#define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
46
47struct acpi_aml_io {
48 wait_queue_head_t wait;
49 unsigned long flags;
50 unsigned long users;
51 struct mutex lock;
52 struct task_struct *thread;
53 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
54 struct circ_buf out_crc;
55 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
56 struct circ_buf in_crc;
57 acpi_osd_exec_callback function;
58 void *context;
59 unsigned long usages;
60};
61
62static struct acpi_aml_io acpi_aml_io;
63static bool acpi_aml_initialized;
64static struct file *acpi_aml_active_reader;
65static struct dentry *acpi_aml_dentry;
66
67static inline bool __acpi_aml_running(void)
68{
69 return acpi_aml_io.thread ? true : false;
70}
71
72static inline bool __acpi_aml_access_ok(unsigned long flag)
73{
74 /*
75 * The debugger interface is in opened state (OPENED && !CLOSED),
76 * then it is allowed to access the debugger buffers from either
77 * user space or the kernel space.
78 * In addition, for the kernel space, only the debugger thread
79 * (thread ID matched) is allowed to access.
80 */
81 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
82 (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
83 !__acpi_aml_running())
84 return false;
85 if ((flag & ACPI_AML_KERN) &&
86 current != acpi_aml_io.thread)
87 return false;
88 return true;
89}
90
91static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
92{
93 /*
94 * Another read is not in progress and there is data in buffer
95 * available for read.
96 */
97 if (!(acpi_aml_io.flags & flag) && circ_count(circ))
98 return true;
99 return false;
100}
101
102static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
103{
104 /*
105 * Another write is not in progress and there is buffer space
106 * available for write.
107 */
108 if (!(acpi_aml_io.flags & flag) && circ_space(circ))
109 return true;
110 return false;
111}
112
113static inline bool __acpi_aml_busy(void)
114{
115 if (acpi_aml_io.flags & ACPI_AML_BUSY)
116 return true;
117 return false;
118}
119
120static inline bool __acpi_aml_used(void)
121{
122 return acpi_aml_io.usages ? true : false;
123}
124
125static inline bool acpi_aml_running(void)
126{
127 bool ret;
128
129 mutex_lock(&acpi_aml_io.lock);
130 ret = __acpi_aml_running();
131 mutex_unlock(&acpi_aml_io.lock);
132 return ret;
133}
134
135static bool acpi_aml_busy(void)
136{
137 bool ret;
138
139 mutex_lock(&acpi_aml_io.lock);
140 ret = __acpi_aml_busy();
141 mutex_unlock(&acpi_aml_io.lock);
142 return ret;
143}
144
145static bool acpi_aml_used(void)
146{
147 bool ret;
148
149 /*
150 * The usage count is prepared to avoid race conditions between the
151 * starts and the stops of the debugger thread.
152 */
153 mutex_lock(&acpi_aml_io.lock);
154 ret = __acpi_aml_used();
155 mutex_unlock(&acpi_aml_io.lock);
156 return ret;
157}
158
159static bool acpi_aml_kern_readable(void)
160{
161 bool ret;
162
163 mutex_lock(&acpi_aml_io.lock);
164 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
165 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
166 mutex_unlock(&acpi_aml_io.lock);
167 return ret;
168}
169
170static bool acpi_aml_kern_writable(void)
171{
172 bool ret;
173
174 mutex_lock(&acpi_aml_io.lock);
175 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
176 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
177 mutex_unlock(&acpi_aml_io.lock);
178 return ret;
179}
180
181static bool acpi_aml_user_readable(void)
182{
183 bool ret;
184
185 mutex_lock(&acpi_aml_io.lock);
186 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
187 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
188 mutex_unlock(&acpi_aml_io.lock);
189 return ret;
190}
191
192static bool acpi_aml_user_writable(void)
193{
194 bool ret;
195
196 mutex_lock(&acpi_aml_io.lock);
197 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
198 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
199 mutex_unlock(&acpi_aml_io.lock);
200 return ret;
201}
202
203static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
204{
205 int ret = 0;
206
207 mutex_lock(&acpi_aml_io.lock);
208 if (!__acpi_aml_access_ok(flag)) {
209 ret = -EFAULT;
210 goto out;
211 }
212 if (!__acpi_aml_writable(circ, flag)) {
213 ret = -EAGAIN;
214 goto out;
215 }
216 acpi_aml_io.flags |= flag;
217out:
218 mutex_unlock(&acpi_aml_io.lock);
219 return ret;
220}
221
222static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
223{
224 int ret = 0;
225
226 mutex_lock(&acpi_aml_io.lock);
227 if (!__acpi_aml_access_ok(flag)) {
228 ret = -EFAULT;
229 goto out;
230 }
231 if (!__acpi_aml_readable(circ, flag)) {
232 ret = -EAGAIN;
233 goto out;
234 }
235 acpi_aml_io.flags |= flag;
236out:
237 mutex_unlock(&acpi_aml_io.lock);
238 return ret;
239}
240
241static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
242{
243 mutex_lock(&acpi_aml_io.lock);
244 acpi_aml_io.flags &= ~flag;
245 if (wakeup)
246 wake_up_interruptible(&acpi_aml_io.wait);
247 mutex_unlock(&acpi_aml_io.lock);
248}
249
250static int acpi_aml_write_kern(const char *buf, int len)
251{
252 int ret;
253 struct circ_buf *crc = &acpi_aml_io.out_crc;
254 int n;
255 char *p;
256
257 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
258 if (ret < 0)
259 return ret;
260 /* sync tail before inserting logs */
261 smp_mb();
262 p = &crc->buf[crc->head];
263 n = min(len, circ_space_to_end(crc));
264 memcpy(p, buf, n);
265 /* sync head after inserting logs */
266 smp_wmb();
267 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
268 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
269 return n;
270}
271
272static int acpi_aml_readb_kern(void)
273{
274 int ret;
275 struct circ_buf *crc = &acpi_aml_io.in_crc;
276 char *p;
277
278 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
279 if (ret < 0)
280 return ret;
281 /* sync head before removing cmds */
282 smp_rmb();
283 p = &crc->buf[crc->tail];
284 ret = (int)*p;
285 /* sync tail before inserting cmds */
286 smp_mb();
287 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
288 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
289 return ret;
290}
291
292/*
293 * acpi_aml_write_log() - Capture debugger output
294 * @msg: the debugger output
295 *
296 * This function should be used to implement acpi_os_printf() to filter out
297 * the debugger output and store the output into the debugger interface
298 * buffer. Return the size of stored logs or errno.
299 */
300static ssize_t acpi_aml_write_log(const char *msg)
301{
302 int ret = 0;
303 int count = 0, size = 0;
304
305 if (!acpi_aml_initialized)
306 return -ENODEV;
307 if (msg)
308 count = strlen(msg);
309 while (count > 0) {
310again:
311 ret = acpi_aml_write_kern(msg + size, count);
312 if (ret == -EAGAIN) {
313 ret = wait_event_interruptible(acpi_aml_io.wait,
314 acpi_aml_kern_writable());
315 /*
316 * We need to retry when the condition
317 * becomes true.
318 */
319 if (ret == 0)
320 goto again;
321 break;
322 }
323 if (ret < 0)
324 break;
325 size += ret;
326 count -= ret;
327 }
328 return size > 0 ? size : ret;
329}
330
331/*
332 * acpi_aml_read_cmd() - Capture debugger input
333 * @msg: the debugger input
334 * @size: the size of the debugger input
335 *
336 * This function should be used to implement acpi_os_get_line() to capture
337 * the debugger input commands and store the input commands into the
338 * debugger interface buffer. Return the size of stored commands or errno.
339 */
340static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
341{
342 int ret = 0;
343 int size = 0;
344
345 /*
346 * This is ensured by the running fact of the debugger thread
347 * unless a bug is introduced.
348 */
349 BUG_ON(!acpi_aml_initialized);
350 while (count > 0) {
351again:
352 /*
353 * Check each input byte to find the end of the command.
354 */
355 ret = acpi_aml_readb_kern();
356 if (ret == -EAGAIN) {
357 ret = wait_event_interruptible(acpi_aml_io.wait,
358 acpi_aml_kern_readable());
359 /*
360 * We need to retry when the condition becomes
361 * true.
362 */
363 if (ret == 0)
364 goto again;
365 }
366 if (ret < 0)
367 break;
368 *(msg + size) = (char)ret;
369 size++;
370 count--;
371 if (ret == '\n') {
372 /*
373 * acpi_os_get_line() requires a zero terminated command
374 * string.
375 */
376 *(msg + size - 1) = '\0';
377 break;
378 }
379 }
380 return size > 0 ? size : ret;
381}
382
383static int acpi_aml_thread(void *unused)
384{
385 acpi_osd_exec_callback function = NULL;
386 void *context;
387
388 mutex_lock(&acpi_aml_io.lock);
389 if (acpi_aml_io.function) {
390 acpi_aml_io.usages++;
391 function = acpi_aml_io.function;
392 context = acpi_aml_io.context;
393 }
394 mutex_unlock(&acpi_aml_io.lock);
395
396 if (function)
397 function(context);
398
399 mutex_lock(&acpi_aml_io.lock);
400 acpi_aml_io.usages--;
401 if (!__acpi_aml_used()) {
402 acpi_aml_io.thread = NULL;
403 wake_up(&acpi_aml_io.wait);
404 }
405 mutex_unlock(&acpi_aml_io.lock);
406
407 return 0;
408}
409
410/*
411 * acpi_aml_create_thread() - Create AML debugger thread
412 * @function: the debugger thread callback
413 * @context: the context to be passed to the debugger thread
414 *
415 * This function should be used to implement acpi_os_execute() which is
416 * used by the ACPICA debugger to create the debugger thread.
417 */
418static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
419{
420 struct task_struct *t;
421
422 mutex_lock(&acpi_aml_io.lock);
423 acpi_aml_io.function = function;
424 acpi_aml_io.context = context;
425 mutex_unlock(&acpi_aml_io.lock);
426
427 t = kthread_create(acpi_aml_thread, NULL, "aml");
428 if (IS_ERR(t)) {
429 pr_err("Failed to create AML debugger thread.\n");
430 return PTR_ERR(t);
431 }
432
433 mutex_lock(&acpi_aml_io.lock);
434 acpi_aml_io.thread = t;
435 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
436 wake_up_process(t);
437 mutex_unlock(&acpi_aml_io.lock);
438 return 0;
439}
440
441static int acpi_aml_wait_command_ready(bool single_step,
442 char *buffer, size_t length)
443{
444 acpi_status status;
445
446 if (single_step)
447 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
448 else
449 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
450
451 status = acpi_os_get_line(buffer, length, NULL);
452 if (ACPI_FAILURE(status))
453 return -EINVAL;
454 return 0;
455}
456
457static int acpi_aml_notify_command_complete(void)
458{
459 return 0;
460}
461
462static int acpi_aml_open(struct inode *inode, struct file *file)
463{
464 int ret = 0;
465 acpi_status status;
466
467 mutex_lock(&acpi_aml_io.lock);
468 /*
469 * The debugger interface is being closed, no new user is allowed
470 * during this period.
471 */
472 if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
473 ret = -EBUSY;
474 goto err_lock;
475 }
476 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
477 /*
478 * Only one reader is allowed to initiate the debugger
479 * thread.
480 */
481 if (acpi_aml_active_reader) {
482 ret = -EBUSY;
483 goto err_lock;
484 } else {
485 pr_debug("Opening debugger reader.\n");
486 acpi_aml_active_reader = file;
487 }
488 } else {
489 /*
490 * No writer is allowed unless the debugger thread is
491 * ready.
492 */
493 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
494 ret = -ENODEV;
495 goto err_lock;
496 }
497 }
498 if (acpi_aml_active_reader == file) {
499 pr_debug("Opening debugger interface.\n");
500 mutex_unlock(&acpi_aml_io.lock);
501
502 pr_debug("Initializing debugger thread.\n");
503 status = acpi_initialize_debugger();
504 if (ACPI_FAILURE(status)) {
505 pr_err("Failed to initialize debugger.\n");
506 ret = -EINVAL;
507 goto err_exit;
508 }
509 pr_debug("Debugger thread initialized.\n");
510
511 mutex_lock(&acpi_aml_io.lock);
512 acpi_aml_io.flags |= ACPI_AML_OPENED;
513 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
514 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
515 pr_debug("Debugger interface opened.\n");
516 }
517 acpi_aml_io.users++;
518err_lock:
519 if (ret < 0) {
520 if (acpi_aml_active_reader == file)
521 acpi_aml_active_reader = NULL;
522 }
523 mutex_unlock(&acpi_aml_io.lock);
524err_exit:
525 return ret;
526}
527
528static int acpi_aml_release(struct inode *inode, struct file *file)
529{
530 mutex_lock(&acpi_aml_io.lock);
531 acpi_aml_io.users--;
532 if (file == acpi_aml_active_reader) {
533 pr_debug("Closing debugger reader.\n");
534 acpi_aml_active_reader = NULL;
535
536 pr_debug("Closing debugger interface.\n");
537 acpi_aml_io.flags |= ACPI_AML_CLOSED;
538
539 /*
540 * Wake up all user space/kernel space blocked
541 * readers/writers.
542 */
543 wake_up_interruptible(&acpi_aml_io.wait);
544 mutex_unlock(&acpi_aml_io.lock);
545 /*
546 * Wait all user space/kernel space readers/writers to
547 * stop so that ACPICA command loop of the debugger thread
548 * should fail all its command line reads after this point.
549 */
550 wait_event(acpi_aml_io.wait, !acpi_aml_busy());
551
552 /*
553 * Then we try to terminate the debugger thread if it is
554 * not terminated.
555 */
556 pr_debug("Terminating debugger thread.\n");
557 acpi_terminate_debugger();
558 wait_event(acpi_aml_io.wait, !acpi_aml_used());
559 pr_debug("Debugger thread terminated.\n");
560
561 mutex_lock(&acpi_aml_io.lock);
562 acpi_aml_io.flags &= ~ACPI_AML_OPENED;
563 }
564 if (acpi_aml_io.users == 0) {
565 pr_debug("Debugger interface closed.\n");
566 acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
567 }
568 mutex_unlock(&acpi_aml_io.lock);
569 return 0;
570}
571
572static int acpi_aml_read_user(char __user *buf, int len)
573{
574 int ret;
575 struct circ_buf *crc = &acpi_aml_io.out_crc;
576 int n;
577 char *p;
578
579 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
580 if (ret < 0)
581 return ret;
582 /* sync head before removing logs */
583 smp_rmb();
584 p = &crc->buf[crc->tail];
585 n = min(len, circ_count_to_end(crc));
586 if (copy_to_user(buf, p, n)) {
587 ret = -EFAULT;
588 goto out;
589 }
590 /* sync tail after removing logs */
591 smp_mb();
592 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
593 ret = n;
594out:
595 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
596 return ret;
597}
598
599static ssize_t acpi_aml_read(struct file *file, char __user *buf,
600 size_t count, loff_t *ppos)
601{
602 int ret = 0;
603 int size = 0;
604
605 if (!count)
606 return 0;
607 if (!access_ok(buf, count))
608 return -EFAULT;
609
610 while (count > 0) {
611again:
612 ret = acpi_aml_read_user(buf + size, count);
613 if (ret == -EAGAIN) {
614 if (file->f_flags & O_NONBLOCK)
615 break;
616 else {
617 ret = wait_event_interruptible(acpi_aml_io.wait,
618 acpi_aml_user_readable());
619 /*
620 * We need to retry when the condition
621 * becomes true.
622 */
623 if (ret == 0)
624 goto again;
625 }
626 }
627 if (ret < 0) {
628 if (!acpi_aml_running())
629 ret = 0;
630 break;
631 }
632 if (ret) {
633 size += ret;
634 count -= ret;
635 *ppos += ret;
636 break;
637 }
638 }
639 return size > 0 ? size : ret;
640}
641
642static int acpi_aml_write_user(const char __user *buf, int len)
643{
644 int ret;
645 struct circ_buf *crc = &acpi_aml_io.in_crc;
646 int n;
647 char *p;
648
649 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
650 if (ret < 0)
651 return ret;
652 /* sync tail before inserting cmds */
653 smp_mb();
654 p = &crc->buf[crc->head];
655 n = min(len, circ_space_to_end(crc));
656 if (copy_from_user(p, buf, n)) {
657 ret = -EFAULT;
658 goto out;
659 }
660 /* sync head after inserting cmds */
661 smp_wmb();
662 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
663 ret = n;
664out:
665 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
666 return n;
667}
668
669static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
670 size_t count, loff_t *ppos)
671{
672 int ret = 0;
673 int size = 0;
674
675 if (!count)
676 return 0;
677 if (!access_ok(buf, count))
678 return -EFAULT;
679
680 while (count > 0) {
681again:
682 ret = acpi_aml_write_user(buf + size, count);
683 if (ret == -EAGAIN) {
684 if (file->f_flags & O_NONBLOCK)
685 break;
686 else {
687 ret = wait_event_interruptible(acpi_aml_io.wait,
688 acpi_aml_user_writable());
689 /*
690 * We need to retry when the condition
691 * becomes true.
692 */
693 if (ret == 0)
694 goto again;
695 }
696 }
697 if (ret < 0) {
698 if (!acpi_aml_running())
699 ret = 0;
700 break;
701 }
702 if (ret) {
703 size += ret;
704 count -= ret;
705 *ppos += ret;
706 }
707 }
708 return size > 0 ? size : ret;
709}
710
711static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
712{
713 __poll_t masks = 0;
714
715 poll_wait(file, &acpi_aml_io.wait, wait);
716 if (acpi_aml_user_readable())
717 masks |= EPOLLIN | EPOLLRDNORM;
718 if (acpi_aml_user_writable())
719 masks |= EPOLLOUT | EPOLLWRNORM;
720
721 return masks;
722}
723
724static const struct file_operations acpi_aml_operations = {
725 .read = acpi_aml_read,
726 .write = acpi_aml_write,
727 .poll = acpi_aml_poll,
728 .open = acpi_aml_open,
729 .release = acpi_aml_release,
730 .llseek = generic_file_llseek,
731};
732
733static const struct acpi_debugger_ops acpi_aml_debugger = {
734 .create_thread = acpi_aml_create_thread,
735 .read_cmd = acpi_aml_read_cmd,
736 .write_log = acpi_aml_write_log,
737 .wait_command_ready = acpi_aml_wait_command_ready,
738 .notify_command_complete = acpi_aml_notify_command_complete,
739};
740
741static int __init acpi_aml_init(void)
742{
743 int ret;
744
745 if (acpi_disabled)
746 return -ENODEV;
747
748 /* Initialize AML IO interface */
749 mutex_init(&acpi_aml_io.lock);
750 init_waitqueue_head(&acpi_aml_io.wait);
751 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
752 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
753
754 acpi_aml_dentry = debugfs_create_file("acpidbg",
755 S_IFREG | S_IRUGO | S_IWUSR,
756 acpi_debugfs_dir, NULL,
757 &acpi_aml_operations);
758
759 ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
760 if (ret) {
761 debugfs_remove(acpi_aml_dentry);
762 acpi_aml_dentry = NULL;
763 return ret;
764 }
765
766 acpi_aml_initialized = true;
767 return 0;
768}
769
770static void __exit acpi_aml_exit(void)
771{
772 if (acpi_aml_initialized) {
773 acpi_unregister_debugger(&acpi_aml_debugger);
774 debugfs_remove(acpi_aml_dentry);
775 acpi_aml_dentry = NULL;
776 acpi_aml_initialized = false;
777 }
778}
779
780module_init(acpi_aml_init);
781module_exit(acpi_aml_exit);
782
783MODULE_AUTHOR("Lv Zheng");
784MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
785MODULE_LICENSE("GPL");