Loading...
1/*
2 * bios-less APM driver for ARM Linux
3 * Jamey Hicks <jamey@crl.dec.com>
4 * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
5 *
6 * APM 1.2 Reference:
7 * Intel Corporation, Microsoft Corporation. Advanced Power Management
8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
9 *
10 * This document is available from Microsoft at:
11 * http://www.microsoft.com/whdc/archive/amp_12.mspx
12 */
13#include <linux/module.h>
14#include <linux/poll.h>
15#include <linux/slab.h>
16#include <linux/mutex.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <linux/miscdevice.h>
20#include <linux/apm_bios.h>
21#include <linux/capability.h>
22#include <linux/sched.h>
23#include <linux/suspend.h>
24#include <linux/apm-emulation.h>
25#include <linux/freezer.h>
26#include <linux/device.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/init.h>
30#include <linux/completion.h>
31#include <linux/kthread.h>
32#include <linux/delay.h>
33
34
35/*
36 * The apm_bios device is one of the misc char devices.
37 * This is its minor number.
38 */
39#define APM_MINOR_DEV 134
40
41/*
42 * One option can be changed at boot time as follows:
43 * apm=on/off enable/disable APM
44 */
45
46/*
47 * Maximum number of events stored
48 */
49#define APM_MAX_EVENTS 16
50
51struct apm_queue {
52 unsigned int event_head;
53 unsigned int event_tail;
54 apm_event_t events[APM_MAX_EVENTS];
55};
56
57/*
58 * thread states (for threads using a writable /dev/apm_bios fd):
59 *
60 * SUSPEND_NONE: nothing happening
61 * SUSPEND_PENDING: suspend event queued for thread and pending to be read
62 * SUSPEND_READ: suspend event read, pending acknowledgement
63 * SUSPEND_ACKED: acknowledgement received from thread (via ioctl),
64 * waiting for resume
65 * SUSPEND_ACKTO: acknowledgement timeout
66 * SUSPEND_DONE: thread had acked suspend and is now notified of
67 * resume
68 *
69 * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume
70 *
71 * A thread migrates in one of three paths:
72 * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
73 * -6-> ACKTO -7-> NONE
74 * NONE -8-> WAIT -9-> NONE
75 *
76 * While in PENDING or READ, the thread is accounted for in the
77 * suspend_acks_pending counter.
78 *
79 * The transitions are invoked as follows:
80 * 1: suspend event is signalled from the core PM code
81 * 2: the suspend event is read from the fd by the userspace thread
82 * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
83 * 4: core PM code signals that we have resumed
84 * 5: APM_IOC_SUSPEND ioctl returns
85 *
86 * 6: the notifier invoked from the core PM code timed out waiting
87 * for all relevant threds to enter ACKED state and puts those
88 * that haven't into ACKTO
89 * 7: those threads issue APM_IOC_SUSPEND ioctl too late,
90 * get an error
91 *
92 * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
93 * ioctl code invokes pm_suspend()
94 * 9: pm_suspend() returns indicating resume
95 */
96enum apm_suspend_state {
97 SUSPEND_NONE,
98 SUSPEND_PENDING,
99 SUSPEND_READ,
100 SUSPEND_ACKED,
101 SUSPEND_ACKTO,
102 SUSPEND_WAIT,
103 SUSPEND_DONE,
104};
105
106/*
107 * The per-file APM data
108 */
109struct apm_user {
110 struct list_head list;
111
112 unsigned int suser: 1;
113 unsigned int writer: 1;
114 unsigned int reader: 1;
115
116 int suspend_result;
117 enum apm_suspend_state suspend_state;
118
119 struct apm_queue queue;
120};
121
122/*
123 * Local variables
124 */
125static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
126static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
127static int apm_disabled;
128static struct task_struct *kapmd_tsk;
129
130static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
131static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
132
133/*
134 * This is a list of everyone who has opened /dev/apm_bios
135 */
136static DECLARE_RWSEM(user_list_lock);
137static LIST_HEAD(apm_user_list);
138
139/*
140 * kapmd info. kapmd provides us a process context to handle
141 * "APM" events within - specifically necessary if we're going
142 * to be suspending the system.
143 */
144static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
145static DEFINE_SPINLOCK(kapmd_queue_lock);
146static struct apm_queue kapmd_queue;
147
148static DEFINE_MUTEX(state_lock);
149
150static const char driver_version[] = "1.13"; /* no spaces */
151
152
153
154/*
155 * Compatibility cruft until the IPAQ people move over to the new
156 * interface.
157 */
158static void __apm_get_power_status(struct apm_power_info *info)
159{
160}
161
162/*
163 * This allows machines to provide their own "apm get power status" function.
164 */
165void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
166EXPORT_SYMBOL(apm_get_power_status);
167
168
169/*
170 * APM event queue management.
171 */
172static inline int queue_empty(struct apm_queue *q)
173{
174 return q->event_head == q->event_tail;
175}
176
177static inline apm_event_t queue_get_event(struct apm_queue *q)
178{
179 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
180 return q->events[q->event_tail];
181}
182
183static void queue_add_event(struct apm_queue *q, apm_event_t event)
184{
185 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
186 if (q->event_head == q->event_tail) {
187 static int notified;
188
189 if (notified++ == 0)
190 printk(KERN_ERR "apm: an event queue overflowed\n");
191 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
192 }
193 q->events[q->event_head] = event;
194}
195
196static void queue_event(apm_event_t event)
197{
198 struct apm_user *as;
199
200 down_read(&user_list_lock);
201 list_for_each_entry(as, &apm_user_list, list) {
202 if (as->reader)
203 queue_add_event(&as->queue, event);
204 }
205 up_read(&user_list_lock);
206 wake_up_interruptible(&apm_waitqueue);
207}
208
209static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
210{
211 struct apm_user *as = fp->private_data;
212 apm_event_t event;
213 int i = count, ret = 0;
214
215 if (count < sizeof(apm_event_t))
216 return -EINVAL;
217
218 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
219 return -EAGAIN;
220
221 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
222
223 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
224 event = queue_get_event(&as->queue);
225
226 ret = -EFAULT;
227 if (copy_to_user(buf, &event, sizeof(event)))
228 break;
229
230 mutex_lock(&state_lock);
231 if (as->suspend_state == SUSPEND_PENDING &&
232 (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
233 as->suspend_state = SUSPEND_READ;
234 mutex_unlock(&state_lock);
235
236 buf += sizeof(event);
237 i -= sizeof(event);
238 }
239
240 if (i < count)
241 ret = count - i;
242
243 return ret;
244}
245
246static unsigned int apm_poll(struct file *fp, poll_table * wait)
247{
248 struct apm_user *as = fp->private_data;
249
250 poll_wait(fp, &apm_waitqueue, wait);
251 return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
252}
253
254/*
255 * apm_ioctl - handle APM ioctl
256 *
257 * APM_IOC_SUSPEND
258 * This IOCTL is overloaded, and performs two functions. It is used to:
259 * - initiate a suspend
260 * - acknowledge a suspend read from /dev/apm_bios.
261 * Only when everyone who has opened /dev/apm_bios with write permission
262 * has acknowledge does the actual suspend happen.
263 */
264static long
265apm_ioctl(struct file *filp, u_int cmd, u_long arg)
266{
267 struct apm_user *as = filp->private_data;
268 int err = -EINVAL;
269
270 if (!as->suser || !as->writer)
271 return -EPERM;
272
273 switch (cmd) {
274 case APM_IOC_SUSPEND:
275 mutex_lock(&state_lock);
276
277 as->suspend_result = -EINTR;
278
279 switch (as->suspend_state) {
280 case SUSPEND_READ:
281 /*
282 * If we read a suspend command from /dev/apm_bios,
283 * then the corresponding APM_IOC_SUSPEND ioctl is
284 * interpreted as an acknowledge.
285 */
286 as->suspend_state = SUSPEND_ACKED;
287 atomic_dec(&suspend_acks_pending);
288 mutex_unlock(&state_lock);
289
290 /*
291 * suspend_acks_pending changed, the notifier needs to
292 * be woken up for this
293 */
294 wake_up(&apm_suspend_waitqueue);
295
296 /*
297 * Wait for the suspend/resume to complete. If there
298 * are pending acknowledges, we wait here for them.
299 * wait_event_freezable() is interruptible and pending
300 * signal can cause busy looping. We aren't doing
301 * anything critical, chill a bit on each iteration.
302 */
303 while (wait_event_freezable(apm_suspend_waitqueue,
304 as->suspend_state != SUSPEND_ACKED))
305 msleep(10);
306 break;
307 case SUSPEND_ACKTO:
308 as->suspend_result = -ETIMEDOUT;
309 mutex_unlock(&state_lock);
310 break;
311 default:
312 as->suspend_state = SUSPEND_WAIT;
313 mutex_unlock(&state_lock);
314
315 /*
316 * Otherwise it is a request to suspend the system.
317 * Just invoke pm_suspend(), we'll handle it from
318 * there via the notifier.
319 */
320 as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
321 }
322
323 mutex_lock(&state_lock);
324 err = as->suspend_result;
325 as->suspend_state = SUSPEND_NONE;
326 mutex_unlock(&state_lock);
327 break;
328 }
329
330 return err;
331}
332
333static int apm_release(struct inode * inode, struct file * filp)
334{
335 struct apm_user *as = filp->private_data;
336
337 filp->private_data = NULL;
338
339 down_write(&user_list_lock);
340 list_del(&as->list);
341 up_write(&user_list_lock);
342
343 /*
344 * We are now unhooked from the chain. As far as new
345 * events are concerned, we no longer exist.
346 */
347 mutex_lock(&state_lock);
348 if (as->suspend_state == SUSPEND_PENDING ||
349 as->suspend_state == SUSPEND_READ)
350 atomic_dec(&suspend_acks_pending);
351 mutex_unlock(&state_lock);
352
353 wake_up(&apm_suspend_waitqueue);
354
355 kfree(as);
356 return 0;
357}
358
359static int apm_open(struct inode * inode, struct file * filp)
360{
361 struct apm_user *as;
362
363 as = kzalloc(sizeof(*as), GFP_KERNEL);
364 if (as) {
365 /*
366 * XXX - this is a tiny bit broken, when we consider BSD
367 * process accounting. If the device is opened by root, we
368 * instantly flag that we used superuser privs. Who knows,
369 * we might close the device immediately without doing a
370 * privileged operation -- cevans
371 */
372 as->suser = capable(CAP_SYS_ADMIN);
373 as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
374 as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
375
376 down_write(&user_list_lock);
377 list_add(&as->list, &apm_user_list);
378 up_write(&user_list_lock);
379
380 filp->private_data = as;
381 }
382
383 return as ? 0 : -ENOMEM;
384}
385
386static const struct file_operations apm_bios_fops = {
387 .owner = THIS_MODULE,
388 .read = apm_read,
389 .poll = apm_poll,
390 .unlocked_ioctl = apm_ioctl,
391 .open = apm_open,
392 .release = apm_release,
393 .llseek = noop_llseek,
394};
395
396static struct miscdevice apm_device = {
397 .minor = APM_MINOR_DEV,
398 .name = "apm_bios",
399 .fops = &apm_bios_fops
400};
401
402
403#ifdef CONFIG_PROC_FS
404/*
405 * Arguments, with symbols from linux/apm_bios.h.
406 *
407 * 0) Linux driver version (this will change if format changes)
408 * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
409 * 2) APM flags from APM Installation Check (0x00):
410 * bit 0: APM_16_BIT_SUPPORT
411 * bit 1: APM_32_BIT_SUPPORT
412 * bit 2: APM_IDLE_SLOWS_CLOCK
413 * bit 3: APM_BIOS_DISABLED
414 * bit 4: APM_BIOS_DISENGAGED
415 * 3) AC line status
416 * 0x00: Off-line
417 * 0x01: On-line
418 * 0x02: On backup power (BIOS >= 1.1 only)
419 * 0xff: Unknown
420 * 4) Battery status
421 * 0x00: High
422 * 0x01: Low
423 * 0x02: Critical
424 * 0x03: Charging
425 * 0x04: Selected battery not present (BIOS >= 1.2 only)
426 * 0xff: Unknown
427 * 5) Battery flag
428 * bit 0: High
429 * bit 1: Low
430 * bit 2: Critical
431 * bit 3: Charging
432 * bit 7: No system battery
433 * 0xff: Unknown
434 * 6) Remaining battery life (percentage of charge):
435 * 0-100: valid
436 * -1: Unknown
437 * 7) Remaining battery life (time units):
438 * Number of remaining minutes or seconds
439 * -1: Unknown
440 * 8) min = minutes; sec = seconds
441 */
442static int proc_apm_show(struct seq_file *m, void *v)
443{
444 struct apm_power_info info;
445 char *units;
446
447 info.ac_line_status = 0xff;
448 info.battery_status = 0xff;
449 info.battery_flag = 0xff;
450 info.battery_life = -1;
451 info.time = -1;
452 info.units = -1;
453
454 if (apm_get_power_status)
455 apm_get_power_status(&info);
456
457 switch (info.units) {
458 default: units = "?"; break;
459 case 0: units = "min"; break;
460 case 1: units = "sec"; break;
461 }
462
463 seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
464 driver_version, APM_32_BIT_SUPPORT,
465 info.ac_line_status, info.battery_status,
466 info.battery_flag, info.battery_life,
467 info.time, units);
468
469 return 0;
470}
471
472static int proc_apm_open(struct inode *inode, struct file *file)
473{
474 return single_open(file, proc_apm_show, NULL);
475}
476
477static const struct file_operations apm_proc_fops = {
478 .owner = THIS_MODULE,
479 .open = proc_apm_open,
480 .read = seq_read,
481 .llseek = seq_lseek,
482 .release = single_release,
483};
484#endif
485
486static int kapmd(void *arg)
487{
488 do {
489 apm_event_t event;
490
491 wait_event_interruptible(kapmd_wait,
492 !queue_empty(&kapmd_queue) || kthread_should_stop());
493
494 if (kthread_should_stop())
495 break;
496
497 spin_lock_irq(&kapmd_queue_lock);
498 event = 0;
499 if (!queue_empty(&kapmd_queue))
500 event = queue_get_event(&kapmd_queue);
501 spin_unlock_irq(&kapmd_queue_lock);
502
503 switch (event) {
504 case 0:
505 break;
506
507 case APM_LOW_BATTERY:
508 case APM_POWER_STATUS_CHANGE:
509 queue_event(event);
510 break;
511
512 case APM_USER_SUSPEND:
513 case APM_SYS_SUSPEND:
514 pm_suspend(PM_SUSPEND_MEM);
515 break;
516
517 case APM_CRITICAL_SUSPEND:
518 atomic_inc(&userspace_notification_inhibit);
519 pm_suspend(PM_SUSPEND_MEM);
520 atomic_dec(&userspace_notification_inhibit);
521 break;
522 }
523 } while (1);
524
525 return 0;
526}
527
528static int apm_suspend_notifier(struct notifier_block *nb,
529 unsigned long event,
530 void *dummy)
531{
532 struct apm_user *as;
533 int err;
534 unsigned long apm_event;
535
536 /* short-cut emergency suspends */
537 if (atomic_read(&userspace_notification_inhibit))
538 return NOTIFY_DONE;
539
540 switch (event) {
541 case PM_SUSPEND_PREPARE:
542 case PM_HIBERNATION_PREPARE:
543 apm_event = (event == PM_SUSPEND_PREPARE) ?
544 APM_USER_SUSPEND : APM_USER_HIBERNATION;
545 /*
546 * Queue an event to all "writer" users that we want
547 * to suspend and need their ack.
548 */
549 mutex_lock(&state_lock);
550 down_read(&user_list_lock);
551
552 list_for_each_entry(as, &apm_user_list, list) {
553 if (as->suspend_state != SUSPEND_WAIT && as->reader &&
554 as->writer && as->suser) {
555 as->suspend_state = SUSPEND_PENDING;
556 atomic_inc(&suspend_acks_pending);
557 queue_add_event(&as->queue, apm_event);
558 }
559 }
560
561 up_read(&user_list_lock);
562 mutex_unlock(&state_lock);
563 wake_up_interruptible(&apm_waitqueue);
564
565 /*
566 * Wait for the the suspend_acks_pending variable to drop to
567 * zero, meaning everybody acked the suspend event (or the
568 * process was killed.)
569 *
570 * If the app won't answer within a short while we assume it
571 * locked up and ignore it.
572 */
573 err = wait_event_interruptible_timeout(
574 apm_suspend_waitqueue,
575 atomic_read(&suspend_acks_pending) == 0,
576 5*HZ);
577
578 /* timed out */
579 if (err == 0) {
580 /*
581 * Move anybody who timed out to "ack timeout" state.
582 *
583 * We could time out and the userspace does the ACK
584 * right after we time out but before we enter the
585 * locked section here, but that's fine.
586 */
587 mutex_lock(&state_lock);
588 down_read(&user_list_lock);
589 list_for_each_entry(as, &apm_user_list, list) {
590 if (as->suspend_state == SUSPEND_PENDING ||
591 as->suspend_state == SUSPEND_READ) {
592 as->suspend_state = SUSPEND_ACKTO;
593 atomic_dec(&suspend_acks_pending);
594 }
595 }
596 up_read(&user_list_lock);
597 mutex_unlock(&state_lock);
598 }
599
600 /* let suspend proceed */
601 if (err >= 0)
602 return NOTIFY_OK;
603
604 /* interrupted by signal */
605 return notifier_from_errno(err);
606
607 case PM_POST_SUSPEND:
608 case PM_POST_HIBERNATION:
609 apm_event = (event == PM_POST_SUSPEND) ?
610 APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
611 /*
612 * Anyone on the APM queues will think we're still suspended.
613 * Send a message so everyone knows we're now awake again.
614 */
615 queue_event(apm_event);
616
617 /*
618 * Finally, wake up anyone who is sleeping on the suspend.
619 */
620 mutex_lock(&state_lock);
621 down_read(&user_list_lock);
622 list_for_each_entry(as, &apm_user_list, list) {
623 if (as->suspend_state == SUSPEND_ACKED) {
624 /*
625 * TODO: maybe grab error code, needs core
626 * changes to push the error to the notifier
627 * chain (could use the second parameter if
628 * implemented)
629 */
630 as->suspend_result = 0;
631 as->suspend_state = SUSPEND_DONE;
632 }
633 }
634 up_read(&user_list_lock);
635 mutex_unlock(&state_lock);
636
637 wake_up(&apm_suspend_waitqueue);
638 return NOTIFY_OK;
639
640 default:
641 return NOTIFY_DONE;
642 }
643}
644
645static struct notifier_block apm_notif_block = {
646 .notifier_call = apm_suspend_notifier,
647};
648
649static int __init apm_init(void)
650{
651 int ret;
652
653 if (apm_disabled) {
654 printk(KERN_NOTICE "apm: disabled on user request.\n");
655 return -ENODEV;
656 }
657
658 kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
659 if (IS_ERR(kapmd_tsk)) {
660 ret = PTR_ERR(kapmd_tsk);
661 kapmd_tsk = NULL;
662 goto out;
663 }
664 wake_up_process(kapmd_tsk);
665
666#ifdef CONFIG_PROC_FS
667 proc_create("apm", 0, NULL, &apm_proc_fops);
668#endif
669
670 ret = misc_register(&apm_device);
671 if (ret)
672 goto out_stop;
673
674 ret = register_pm_notifier(&apm_notif_block);
675 if (ret)
676 goto out_unregister;
677
678 return 0;
679
680 out_unregister:
681 misc_deregister(&apm_device);
682 out_stop:
683 remove_proc_entry("apm", NULL);
684 kthread_stop(kapmd_tsk);
685 out:
686 return ret;
687}
688
689static void __exit apm_exit(void)
690{
691 unregister_pm_notifier(&apm_notif_block);
692 misc_deregister(&apm_device);
693 remove_proc_entry("apm", NULL);
694
695 kthread_stop(kapmd_tsk);
696}
697
698module_init(apm_init);
699module_exit(apm_exit);
700
701MODULE_AUTHOR("Stephen Rothwell");
702MODULE_DESCRIPTION("Advanced Power Management");
703MODULE_LICENSE("GPL");
704
705#ifndef MODULE
706static int __init apm_setup(char *str)
707{
708 while ((str != NULL) && (*str != '\0')) {
709 if (strncmp(str, "off", 3) == 0)
710 apm_disabled = 1;
711 if (strncmp(str, "on", 2) == 0)
712 apm_disabled = 0;
713 str = strchr(str, ',');
714 if (str != NULL)
715 str += strspn(str, ", \t");
716 }
717 return 1;
718}
719
720__setup("apm=", apm_setup);
721#endif
722
723/**
724 * apm_queue_event - queue an APM event for kapmd
725 * @event: APM event
726 *
727 * Queue an APM event for kapmd to process and ultimately take the
728 * appropriate action. Only a subset of events are handled:
729 * %APM_LOW_BATTERY
730 * %APM_POWER_STATUS_CHANGE
731 * %APM_USER_SUSPEND
732 * %APM_SYS_SUSPEND
733 * %APM_CRITICAL_SUSPEND
734 */
735void apm_queue_event(apm_event_t event)
736{
737 unsigned long flags;
738
739 spin_lock_irqsave(&kapmd_queue_lock, flags);
740 queue_add_event(&kapmd_queue, event);
741 spin_unlock_irqrestore(&kapmd_queue_lock, flags);
742
743 wake_up_interruptible(&kapmd_wait);
744}
745EXPORT_SYMBOL(apm_queue_event);
1/*
2 * bios-less APM driver for ARM Linux
3 * Jamey Hicks <jamey@crl.dec.com>
4 * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
5 *
6 * APM 1.2 Reference:
7 * Intel Corporation, Microsoft Corporation. Advanced Power Management
8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
9 *
10 * This document is available from Microsoft at:
11 * http://www.microsoft.com/whdc/archive/amp_12.mspx
12 */
13#include <linux/module.h>
14#include <linux/poll.h>
15#include <linux/slab.h>
16#include <linux/mutex.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <linux/miscdevice.h>
20#include <linux/apm_bios.h>
21#include <linux/capability.h>
22#include <linux/sched.h>
23#include <linux/suspend.h>
24#include <linux/apm-emulation.h>
25#include <linux/freezer.h>
26#include <linux/device.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/init.h>
30#include <linux/completion.h>
31#include <linux/kthread.h>
32#include <linux/delay.h>
33
34/*
35 * One option can be changed at boot time as follows:
36 * apm=on/off enable/disable APM
37 */
38
39/*
40 * Maximum number of events stored
41 */
42#define APM_MAX_EVENTS 16
43
44struct apm_queue {
45 unsigned int event_head;
46 unsigned int event_tail;
47 apm_event_t events[APM_MAX_EVENTS];
48};
49
50/*
51 * thread states (for threads using a writable /dev/apm_bios fd):
52 *
53 * SUSPEND_NONE: nothing happening
54 * SUSPEND_PENDING: suspend event queued for thread and pending to be read
55 * SUSPEND_READ: suspend event read, pending acknowledgement
56 * SUSPEND_ACKED: acknowledgement received from thread (via ioctl),
57 * waiting for resume
58 * SUSPEND_ACKTO: acknowledgement timeout
59 * SUSPEND_DONE: thread had acked suspend and is now notified of
60 * resume
61 *
62 * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume
63 *
64 * A thread migrates in one of three paths:
65 * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
66 * -6-> ACKTO -7-> NONE
67 * NONE -8-> WAIT -9-> NONE
68 *
69 * While in PENDING or READ, the thread is accounted for in the
70 * suspend_acks_pending counter.
71 *
72 * The transitions are invoked as follows:
73 * 1: suspend event is signalled from the core PM code
74 * 2: the suspend event is read from the fd by the userspace thread
75 * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
76 * 4: core PM code signals that we have resumed
77 * 5: APM_IOC_SUSPEND ioctl returns
78 *
79 * 6: the notifier invoked from the core PM code timed out waiting
80 * for all relevant threds to enter ACKED state and puts those
81 * that haven't into ACKTO
82 * 7: those threads issue APM_IOC_SUSPEND ioctl too late,
83 * get an error
84 *
85 * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
86 * ioctl code invokes pm_suspend()
87 * 9: pm_suspend() returns indicating resume
88 */
89enum apm_suspend_state {
90 SUSPEND_NONE,
91 SUSPEND_PENDING,
92 SUSPEND_READ,
93 SUSPEND_ACKED,
94 SUSPEND_ACKTO,
95 SUSPEND_WAIT,
96 SUSPEND_DONE,
97};
98
99/*
100 * The per-file APM data
101 */
102struct apm_user {
103 struct list_head list;
104
105 unsigned int suser: 1;
106 unsigned int writer: 1;
107 unsigned int reader: 1;
108
109 int suspend_result;
110 enum apm_suspend_state suspend_state;
111
112 struct apm_queue queue;
113};
114
115/*
116 * Local variables
117 */
118static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
119static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
120static int apm_disabled;
121static struct task_struct *kapmd_tsk;
122
123static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
124static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
125
126/*
127 * This is a list of everyone who has opened /dev/apm_bios
128 */
129static DECLARE_RWSEM(user_list_lock);
130static LIST_HEAD(apm_user_list);
131
132/*
133 * kapmd info. kapmd provides us a process context to handle
134 * "APM" events within - specifically necessary if we're going
135 * to be suspending the system.
136 */
137static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
138static DEFINE_SPINLOCK(kapmd_queue_lock);
139static struct apm_queue kapmd_queue;
140
141static DEFINE_MUTEX(state_lock);
142
143static const char driver_version[] = "1.13"; /* no spaces */
144
145
146
147/*
148 * Compatibility cruft until the IPAQ people move over to the new
149 * interface.
150 */
151static void __apm_get_power_status(struct apm_power_info *info)
152{
153}
154
155/*
156 * This allows machines to provide their own "apm get power status" function.
157 */
158void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
159EXPORT_SYMBOL(apm_get_power_status);
160
161
162/*
163 * APM event queue management.
164 */
165static inline int queue_empty(struct apm_queue *q)
166{
167 return q->event_head == q->event_tail;
168}
169
170static inline apm_event_t queue_get_event(struct apm_queue *q)
171{
172 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
173 return q->events[q->event_tail];
174}
175
176static void queue_add_event(struct apm_queue *q, apm_event_t event)
177{
178 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
179 if (q->event_head == q->event_tail) {
180 static int notified;
181
182 if (notified++ == 0)
183 printk(KERN_ERR "apm: an event queue overflowed\n");
184 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
185 }
186 q->events[q->event_head] = event;
187}
188
189static void queue_event(apm_event_t event)
190{
191 struct apm_user *as;
192
193 down_read(&user_list_lock);
194 list_for_each_entry(as, &apm_user_list, list) {
195 if (as->reader)
196 queue_add_event(&as->queue, event);
197 }
198 up_read(&user_list_lock);
199 wake_up_interruptible(&apm_waitqueue);
200}
201
202static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
203{
204 struct apm_user *as = fp->private_data;
205 apm_event_t event;
206 int i = count, ret = 0;
207
208 if (count < sizeof(apm_event_t))
209 return -EINVAL;
210
211 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
212 return -EAGAIN;
213
214 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
215
216 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
217 event = queue_get_event(&as->queue);
218
219 ret = -EFAULT;
220 if (copy_to_user(buf, &event, sizeof(event)))
221 break;
222
223 mutex_lock(&state_lock);
224 if (as->suspend_state == SUSPEND_PENDING &&
225 (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
226 as->suspend_state = SUSPEND_READ;
227 mutex_unlock(&state_lock);
228
229 buf += sizeof(event);
230 i -= sizeof(event);
231 }
232
233 if (i < count)
234 ret = count - i;
235
236 return ret;
237}
238
239static __poll_t apm_poll(struct file *fp, poll_table * wait)
240{
241 struct apm_user *as = fp->private_data;
242
243 poll_wait(fp, &apm_waitqueue, wait);
244 return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM;
245}
246
247/*
248 * apm_ioctl - handle APM ioctl
249 *
250 * APM_IOC_SUSPEND
251 * This IOCTL is overloaded, and performs two functions. It is used to:
252 * - initiate a suspend
253 * - acknowledge a suspend read from /dev/apm_bios.
254 * Only when everyone who has opened /dev/apm_bios with write permission
255 * has acknowledge does the actual suspend happen.
256 */
257static long
258apm_ioctl(struct file *filp, u_int cmd, u_long arg)
259{
260 struct apm_user *as = filp->private_data;
261 int err = -EINVAL;
262
263 if (!as->suser || !as->writer)
264 return -EPERM;
265
266 switch (cmd) {
267 case APM_IOC_SUSPEND:
268 mutex_lock(&state_lock);
269
270 as->suspend_result = -EINTR;
271
272 switch (as->suspend_state) {
273 case SUSPEND_READ:
274 /*
275 * If we read a suspend command from /dev/apm_bios,
276 * then the corresponding APM_IOC_SUSPEND ioctl is
277 * interpreted as an acknowledge.
278 */
279 as->suspend_state = SUSPEND_ACKED;
280 atomic_dec(&suspend_acks_pending);
281 mutex_unlock(&state_lock);
282
283 /*
284 * suspend_acks_pending changed, the notifier needs to
285 * be woken up for this
286 */
287 wake_up(&apm_suspend_waitqueue);
288
289 /*
290 * Wait for the suspend/resume to complete. If there
291 * are pending acknowledges, we wait here for them.
292 * wait_event_freezable() is interruptible and pending
293 * signal can cause busy looping. We aren't doing
294 * anything critical, chill a bit on each iteration.
295 */
296 while (wait_event_freezable(apm_suspend_waitqueue,
297 as->suspend_state != SUSPEND_ACKED))
298 msleep(10);
299 break;
300 case SUSPEND_ACKTO:
301 as->suspend_result = -ETIMEDOUT;
302 mutex_unlock(&state_lock);
303 break;
304 default:
305 as->suspend_state = SUSPEND_WAIT;
306 mutex_unlock(&state_lock);
307
308 /*
309 * Otherwise it is a request to suspend the system.
310 * Just invoke pm_suspend(), we'll handle it from
311 * there via the notifier.
312 */
313 as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
314 }
315
316 mutex_lock(&state_lock);
317 err = as->suspend_result;
318 as->suspend_state = SUSPEND_NONE;
319 mutex_unlock(&state_lock);
320 break;
321 }
322
323 return err;
324}
325
326static int apm_release(struct inode * inode, struct file * filp)
327{
328 struct apm_user *as = filp->private_data;
329
330 filp->private_data = NULL;
331
332 down_write(&user_list_lock);
333 list_del(&as->list);
334 up_write(&user_list_lock);
335
336 /*
337 * We are now unhooked from the chain. As far as new
338 * events are concerned, we no longer exist.
339 */
340 mutex_lock(&state_lock);
341 if (as->suspend_state == SUSPEND_PENDING ||
342 as->suspend_state == SUSPEND_READ)
343 atomic_dec(&suspend_acks_pending);
344 mutex_unlock(&state_lock);
345
346 wake_up(&apm_suspend_waitqueue);
347
348 kfree(as);
349 return 0;
350}
351
352static int apm_open(struct inode * inode, struct file * filp)
353{
354 struct apm_user *as;
355
356 as = kzalloc(sizeof(*as), GFP_KERNEL);
357 if (as) {
358 /*
359 * XXX - this is a tiny bit broken, when we consider BSD
360 * process accounting. If the device is opened by root, we
361 * instantly flag that we used superuser privs. Who knows,
362 * we might close the device immediately without doing a
363 * privileged operation -- cevans
364 */
365 as->suser = capable(CAP_SYS_ADMIN);
366 as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
367 as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
368
369 down_write(&user_list_lock);
370 list_add(&as->list, &apm_user_list);
371 up_write(&user_list_lock);
372
373 filp->private_data = as;
374 }
375
376 return as ? 0 : -ENOMEM;
377}
378
379static const struct file_operations apm_bios_fops = {
380 .owner = THIS_MODULE,
381 .read = apm_read,
382 .poll = apm_poll,
383 .unlocked_ioctl = apm_ioctl,
384 .open = apm_open,
385 .release = apm_release,
386 .llseek = noop_llseek,
387};
388
389static struct miscdevice apm_device = {
390 .minor = APM_MINOR_DEV,
391 .name = "apm_bios",
392 .fops = &apm_bios_fops
393};
394
395
396#ifdef CONFIG_PROC_FS
397/*
398 * Arguments, with symbols from linux/apm_bios.h.
399 *
400 * 0) Linux driver version (this will change if format changes)
401 * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
402 * 2) APM flags from APM Installation Check (0x00):
403 * bit 0: APM_16_BIT_SUPPORT
404 * bit 1: APM_32_BIT_SUPPORT
405 * bit 2: APM_IDLE_SLOWS_CLOCK
406 * bit 3: APM_BIOS_DISABLED
407 * bit 4: APM_BIOS_DISENGAGED
408 * 3) AC line status
409 * 0x00: Off-line
410 * 0x01: On-line
411 * 0x02: On backup power (BIOS >= 1.1 only)
412 * 0xff: Unknown
413 * 4) Battery status
414 * 0x00: High
415 * 0x01: Low
416 * 0x02: Critical
417 * 0x03: Charging
418 * 0x04: Selected battery not present (BIOS >= 1.2 only)
419 * 0xff: Unknown
420 * 5) Battery flag
421 * bit 0: High
422 * bit 1: Low
423 * bit 2: Critical
424 * bit 3: Charging
425 * bit 7: No system battery
426 * 0xff: Unknown
427 * 6) Remaining battery life (percentage of charge):
428 * 0-100: valid
429 * -1: Unknown
430 * 7) Remaining battery life (time units):
431 * Number of remaining minutes or seconds
432 * -1: Unknown
433 * 8) min = minutes; sec = seconds
434 */
435static int proc_apm_show(struct seq_file *m, void *v)
436{
437 struct apm_power_info info;
438 char *units;
439
440 info.ac_line_status = 0xff;
441 info.battery_status = 0xff;
442 info.battery_flag = 0xff;
443 info.battery_life = -1;
444 info.time = -1;
445 info.units = -1;
446
447 if (apm_get_power_status)
448 apm_get_power_status(&info);
449
450 switch (info.units) {
451 default: units = "?"; break;
452 case 0: units = "min"; break;
453 case 1: units = "sec"; break;
454 }
455
456 seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
457 driver_version, APM_32_BIT_SUPPORT,
458 info.ac_line_status, info.battery_status,
459 info.battery_flag, info.battery_life,
460 info.time, units);
461
462 return 0;
463}
464
465static int proc_apm_open(struct inode *inode, struct file *file)
466{
467 return single_open(file, proc_apm_show, NULL);
468}
469
470static const struct file_operations apm_proc_fops = {
471 .owner = THIS_MODULE,
472 .open = proc_apm_open,
473 .read = seq_read,
474 .llseek = seq_lseek,
475 .release = single_release,
476};
477#endif
478
479static int kapmd(void *arg)
480{
481 do {
482 apm_event_t event;
483
484 wait_event_interruptible(kapmd_wait,
485 !queue_empty(&kapmd_queue) || kthread_should_stop());
486
487 if (kthread_should_stop())
488 break;
489
490 spin_lock_irq(&kapmd_queue_lock);
491 event = 0;
492 if (!queue_empty(&kapmd_queue))
493 event = queue_get_event(&kapmd_queue);
494 spin_unlock_irq(&kapmd_queue_lock);
495
496 switch (event) {
497 case 0:
498 break;
499
500 case APM_LOW_BATTERY:
501 case APM_POWER_STATUS_CHANGE:
502 queue_event(event);
503 break;
504
505 case APM_USER_SUSPEND:
506 case APM_SYS_SUSPEND:
507 pm_suspend(PM_SUSPEND_MEM);
508 break;
509
510 case APM_CRITICAL_SUSPEND:
511 atomic_inc(&userspace_notification_inhibit);
512 pm_suspend(PM_SUSPEND_MEM);
513 atomic_dec(&userspace_notification_inhibit);
514 break;
515 }
516 } while (1);
517
518 return 0;
519}
520
521static int apm_suspend_notifier(struct notifier_block *nb,
522 unsigned long event,
523 void *dummy)
524{
525 struct apm_user *as;
526 int err;
527 unsigned long apm_event;
528
529 /* short-cut emergency suspends */
530 if (atomic_read(&userspace_notification_inhibit))
531 return NOTIFY_DONE;
532
533 switch (event) {
534 case PM_SUSPEND_PREPARE:
535 case PM_HIBERNATION_PREPARE:
536 apm_event = (event == PM_SUSPEND_PREPARE) ?
537 APM_USER_SUSPEND : APM_USER_HIBERNATION;
538 /*
539 * Queue an event to all "writer" users that we want
540 * to suspend and need their ack.
541 */
542 mutex_lock(&state_lock);
543 down_read(&user_list_lock);
544
545 list_for_each_entry(as, &apm_user_list, list) {
546 if (as->suspend_state != SUSPEND_WAIT && as->reader &&
547 as->writer && as->suser) {
548 as->suspend_state = SUSPEND_PENDING;
549 atomic_inc(&suspend_acks_pending);
550 queue_add_event(&as->queue, apm_event);
551 }
552 }
553
554 up_read(&user_list_lock);
555 mutex_unlock(&state_lock);
556 wake_up_interruptible(&apm_waitqueue);
557
558 /*
559 * Wait for the the suspend_acks_pending variable to drop to
560 * zero, meaning everybody acked the suspend event (or the
561 * process was killed.)
562 *
563 * If the app won't answer within a short while we assume it
564 * locked up and ignore it.
565 */
566 err = wait_event_interruptible_timeout(
567 apm_suspend_waitqueue,
568 atomic_read(&suspend_acks_pending) == 0,
569 5*HZ);
570
571 /* timed out */
572 if (err == 0) {
573 /*
574 * Move anybody who timed out to "ack timeout" state.
575 *
576 * We could time out and the userspace does the ACK
577 * right after we time out but before we enter the
578 * locked section here, but that's fine.
579 */
580 mutex_lock(&state_lock);
581 down_read(&user_list_lock);
582 list_for_each_entry(as, &apm_user_list, list) {
583 if (as->suspend_state == SUSPEND_PENDING ||
584 as->suspend_state == SUSPEND_READ) {
585 as->suspend_state = SUSPEND_ACKTO;
586 atomic_dec(&suspend_acks_pending);
587 }
588 }
589 up_read(&user_list_lock);
590 mutex_unlock(&state_lock);
591 }
592
593 /* let suspend proceed */
594 if (err >= 0)
595 return NOTIFY_OK;
596
597 /* interrupted by signal */
598 return notifier_from_errno(err);
599
600 case PM_POST_SUSPEND:
601 case PM_POST_HIBERNATION:
602 apm_event = (event == PM_POST_SUSPEND) ?
603 APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
604 /*
605 * Anyone on the APM queues will think we're still suspended.
606 * Send a message so everyone knows we're now awake again.
607 */
608 queue_event(apm_event);
609
610 /*
611 * Finally, wake up anyone who is sleeping on the suspend.
612 */
613 mutex_lock(&state_lock);
614 down_read(&user_list_lock);
615 list_for_each_entry(as, &apm_user_list, list) {
616 if (as->suspend_state == SUSPEND_ACKED) {
617 /*
618 * TODO: maybe grab error code, needs core
619 * changes to push the error to the notifier
620 * chain (could use the second parameter if
621 * implemented)
622 */
623 as->suspend_result = 0;
624 as->suspend_state = SUSPEND_DONE;
625 }
626 }
627 up_read(&user_list_lock);
628 mutex_unlock(&state_lock);
629
630 wake_up(&apm_suspend_waitqueue);
631 return NOTIFY_OK;
632
633 default:
634 return NOTIFY_DONE;
635 }
636}
637
638static struct notifier_block apm_notif_block = {
639 .notifier_call = apm_suspend_notifier,
640};
641
642static int __init apm_init(void)
643{
644 int ret;
645
646 if (apm_disabled) {
647 printk(KERN_NOTICE "apm: disabled on user request.\n");
648 return -ENODEV;
649 }
650
651 kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
652 if (IS_ERR(kapmd_tsk)) {
653 ret = PTR_ERR(kapmd_tsk);
654 kapmd_tsk = NULL;
655 goto out;
656 }
657 wake_up_process(kapmd_tsk);
658
659#ifdef CONFIG_PROC_FS
660 proc_create("apm", 0, NULL, &apm_proc_fops);
661#endif
662
663 ret = misc_register(&apm_device);
664 if (ret)
665 goto out_stop;
666
667 ret = register_pm_notifier(&apm_notif_block);
668 if (ret)
669 goto out_unregister;
670
671 return 0;
672
673 out_unregister:
674 misc_deregister(&apm_device);
675 out_stop:
676 remove_proc_entry("apm", NULL);
677 kthread_stop(kapmd_tsk);
678 out:
679 return ret;
680}
681
682static void __exit apm_exit(void)
683{
684 unregister_pm_notifier(&apm_notif_block);
685 misc_deregister(&apm_device);
686 remove_proc_entry("apm", NULL);
687
688 kthread_stop(kapmd_tsk);
689}
690
691module_init(apm_init);
692module_exit(apm_exit);
693
694MODULE_AUTHOR("Stephen Rothwell");
695MODULE_DESCRIPTION("Advanced Power Management");
696MODULE_LICENSE("GPL");
697
698#ifndef MODULE
699static int __init apm_setup(char *str)
700{
701 while ((str != NULL) && (*str != '\0')) {
702 if (strncmp(str, "off", 3) == 0)
703 apm_disabled = 1;
704 if (strncmp(str, "on", 2) == 0)
705 apm_disabled = 0;
706 str = strchr(str, ',');
707 if (str != NULL)
708 str += strspn(str, ", \t");
709 }
710 return 1;
711}
712
713__setup("apm=", apm_setup);
714#endif
715
716/**
717 * apm_queue_event - queue an APM event for kapmd
718 * @event: APM event
719 *
720 * Queue an APM event for kapmd to process and ultimately take the
721 * appropriate action. Only a subset of events are handled:
722 * %APM_LOW_BATTERY
723 * %APM_POWER_STATUS_CHANGE
724 * %APM_USER_SUSPEND
725 * %APM_SYS_SUSPEND
726 * %APM_CRITICAL_SUSPEND
727 */
728void apm_queue_event(apm_event_t event)
729{
730 unsigned long flags;
731
732 spin_lock_irqsave(&kapmd_queue_lock, flags);
733 queue_add_event(&kapmd_queue, event);
734 spin_unlock_irqrestore(&kapmd_queue_lock, flags);
735
736 wake_up_interruptible(&kapmd_wait);
737}
738EXPORT_SYMBOL(apm_queue_event);