Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * sleep.c - ACPI sleep support.
4 *
5 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
6 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
7 * Copyright (c) 2000-2003 Patrick Mochel
8 * Copyright (c) 2003 Open Source Development Lab
9 */
10
11#define pr_fmt(fmt) "ACPI: PM: " fmt
12
13#include <linux/delay.h>
14#include <linux/irq.h>
15#include <linux/dmi.h>
16#include <linux/device.h>
17#include <linux/interrupt.h>
18#include <linux/suspend.h>
19#include <linux/reboot.h>
20#include <linux/acpi.h>
21#include <linux/module.h>
22#include <linux/syscore_ops.h>
23#include <asm/io.h>
24#include <trace/events/power.h>
25
26#include "internal.h"
27#include "sleep.h"
28
29/*
30 * Some HW-full platforms do not have _S5, so they may need
31 * to leverage efi power off for a shutdown.
32 */
33bool acpi_no_s5;
34static u8 sleep_states[ACPI_S_STATE_COUNT];
35
36static void acpi_sleep_tts_switch(u32 acpi_state)
37{
38 acpi_status status;
39
40 status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
41 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
42 /*
43 * OS can't evaluate the _TTS object correctly. Some warning
44 * message will be printed. But it won't break anything.
45 */
46 pr_notice("Failure in evaluating _TTS object\n");
47 }
48}
49
50static int tts_notify_reboot(struct notifier_block *this,
51 unsigned long code, void *x)
52{
53 acpi_sleep_tts_switch(ACPI_STATE_S5);
54 return NOTIFY_DONE;
55}
56
57static struct notifier_block tts_notifier = {
58 .notifier_call = tts_notify_reboot,
59 .next = NULL,
60 .priority = 0,
61};
62
63#ifndef acpi_skip_set_wakeup_address
64#define acpi_skip_set_wakeup_address() false
65#endif
66
67static int acpi_sleep_prepare(u32 acpi_state)
68{
69#ifdef CONFIG_ACPI_SLEEP
70 unsigned long acpi_wakeup_address;
71
72 /* do we have a wakeup address for S2 and S3? */
73 if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
74 acpi_wakeup_address = acpi_get_wakeup_address();
75 if (!acpi_wakeup_address)
76 return -EFAULT;
77 acpi_set_waking_vector(acpi_wakeup_address);
78
79 }
80#endif
81 pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
82 acpi_enable_wakeup_devices(acpi_state);
83 acpi_enter_sleep_state_prep(acpi_state);
84 return 0;
85}
86
87bool acpi_sleep_state_supported(u8 sleep_state)
88{
89 acpi_status status;
90 u8 type_a, type_b;
91
92 status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
93 return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
94 || (acpi_gbl_FADT.sleep_control.address
95 && acpi_gbl_FADT.sleep_status.address));
96}
97
98#ifdef CONFIG_ACPI_SLEEP
99static u32 acpi_target_sleep_state = ACPI_STATE_S0;
100
101u32 acpi_target_system_state(void)
102{
103 return acpi_target_sleep_state;
104}
105EXPORT_SYMBOL_GPL(acpi_target_system_state);
106
107static bool pwr_btn_event_pending;
108
109/*
110 * The ACPI specification wants us to save NVS memory regions during hibernation
111 * and to restore them during the subsequent resume. Windows does that also for
112 * suspend to RAM. However, it is known that this mechanism does not work on
113 * all machines, so we allow the user to disable it with the help of the
114 * 'acpi_sleep=nonvs' kernel command line option.
115 */
116static bool nvs_nosave;
117
118void __init acpi_nvs_nosave(void)
119{
120 nvs_nosave = true;
121}
122
123/*
124 * The ACPI specification wants us to save NVS memory regions during hibernation
125 * but says nothing about saving NVS during S3. Not all versions of Windows
126 * save NVS on S3 suspend either, and it is clear that not all systems need
127 * NVS to be saved at S3 time. To improve suspend/resume time, allow the
128 * user to disable saving NVS on S3 if their system does not require it, but
129 * continue to save/restore NVS for S4 as specified.
130 */
131static bool nvs_nosave_s3;
132
133void __init acpi_nvs_nosave_s3(void)
134{
135 nvs_nosave_s3 = true;
136}
137
138static int __init init_nvs_save_s3(const struct dmi_system_id *d)
139{
140 nvs_nosave_s3 = false;
141 return 0;
142}
143
144/*
145 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
146 * user to request that behavior by using the 'acpi_old_suspend_ordering'
147 * kernel command line option that causes the following variable to be set.
148 */
149static bool old_suspend_ordering;
150
151void __init acpi_old_suspend_ordering(void)
152{
153 old_suspend_ordering = true;
154}
155
156static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
157{
158 acpi_old_suspend_ordering();
159 return 0;
160}
161
162static int __init init_nvs_nosave(const struct dmi_system_id *d)
163{
164 acpi_nvs_nosave();
165 return 0;
166}
167
168bool acpi_sleep_default_s3;
169
170static int __init init_default_s3(const struct dmi_system_id *d)
171{
172 acpi_sleep_default_s3 = true;
173 return 0;
174}
175
176static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
177 {
178 .callback = init_old_suspend_ordering,
179 .ident = "Abit KN9 (nForce4 variant)",
180 .matches = {
181 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
182 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
183 },
184 },
185 {
186 .callback = init_old_suspend_ordering,
187 .ident = "HP xw4600 Workstation",
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
190 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
191 },
192 },
193 {
194 .callback = init_old_suspend_ordering,
195 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
196 .matches = {
197 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
198 DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
199 },
200 },
201 {
202 .callback = init_old_suspend_ordering,
203 .ident = "Panasonic CF51-2L",
204 .matches = {
205 DMI_MATCH(DMI_BOARD_VENDOR,
206 "Matsushita Electric Industrial Co.,Ltd."),
207 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
208 },
209 },
210 {
211 .callback = init_nvs_nosave,
212 .ident = "Sony Vaio VGN-FW41E_H",
213 .matches = {
214 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
215 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
216 },
217 },
218 {
219 .callback = init_nvs_nosave,
220 .ident = "Sony Vaio VGN-FW21E",
221 .matches = {
222 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
223 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
224 },
225 },
226 {
227 .callback = init_nvs_nosave,
228 .ident = "Sony Vaio VGN-FW21M",
229 .matches = {
230 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
231 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
232 },
233 },
234 {
235 .callback = init_nvs_nosave,
236 .ident = "Sony Vaio VPCEB17FX",
237 .matches = {
238 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
239 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
240 },
241 },
242 {
243 .callback = init_nvs_nosave,
244 .ident = "Sony Vaio VGN-SR11M",
245 .matches = {
246 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
247 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
248 },
249 },
250 {
251 .callback = init_nvs_nosave,
252 .ident = "Everex StepNote Series",
253 .matches = {
254 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
255 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
256 },
257 },
258 {
259 .callback = init_nvs_nosave,
260 .ident = "Sony Vaio VPCEB1Z1E",
261 .matches = {
262 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
263 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
264 },
265 },
266 {
267 .callback = init_nvs_nosave,
268 .ident = "Sony Vaio VGN-NW130D",
269 .matches = {
270 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
271 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
272 },
273 },
274 {
275 .callback = init_nvs_nosave,
276 .ident = "Sony Vaio VPCCW29FX",
277 .matches = {
278 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
279 DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
280 },
281 },
282 {
283 .callback = init_nvs_nosave,
284 .ident = "Averatec AV1020-ED2",
285 .matches = {
286 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
287 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
288 },
289 },
290 {
291 .callback = init_old_suspend_ordering,
292 .ident = "Asus A8N-SLI DELUXE",
293 .matches = {
294 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
295 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
296 },
297 },
298 {
299 .callback = init_old_suspend_ordering,
300 .ident = "Asus A8N-SLI Premium",
301 .matches = {
302 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
303 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
304 },
305 },
306 {
307 .callback = init_nvs_nosave,
308 .ident = "Sony Vaio VGN-SR26GN_P",
309 .matches = {
310 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
311 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
312 },
313 },
314 {
315 .callback = init_nvs_nosave,
316 .ident = "Sony Vaio VPCEB1S1E",
317 .matches = {
318 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
319 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
320 },
321 },
322 {
323 .callback = init_nvs_nosave,
324 .ident = "Sony Vaio VGN-FW520F",
325 .matches = {
326 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
327 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
328 },
329 },
330 {
331 .callback = init_nvs_nosave,
332 .ident = "Asus K54C",
333 .matches = {
334 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
335 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
336 },
337 },
338 {
339 .callback = init_nvs_nosave,
340 .ident = "Asus K54HR",
341 .matches = {
342 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
344 },
345 },
346 {
347 .callback = init_nvs_save_s3,
348 .ident = "Asus 1025C",
349 .matches = {
350 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
351 DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
352 },
353 },
354 /*
355 * The ASUS ROG M16 from 2023 has many events which wake it from s2idle
356 * resulting in excessive battery drain and risk of laptop overheating,
357 * these events can be caused by the MMC or y AniMe display if installed.
358 * The match is valid for all of the GU604V<x> range.
359 */
360 {
361 .callback = init_default_s3,
362 .ident = "ASUS ROG Zephyrus M16 (2023)",
363 .matches = {
364 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
365 DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus M16 GU604V"),
366 },
367 },
368 /*
369 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
370 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
371 * saving during S3.
372 */
373 {
374 .callback = init_nvs_save_s3,
375 .ident = "Lenovo G50-45",
376 .matches = {
377 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
378 DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
379 },
380 },
381 {
382 .callback = init_nvs_save_s3,
383 .ident = "Lenovo G40-45",
384 .matches = {
385 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
386 DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
387 },
388 },
389 /*
390 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
391 * the Low Power S0 Idle firmware interface (see
392 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
393 */
394 {
395 .callback = init_default_s3,
396 .ident = "ThinkPad X1 Tablet(2016)",
397 .matches = {
398 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
399 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
400 },
401 },
402 {},
403};
404
405static bool ignore_blacklist;
406
407void __init acpi_sleep_no_blacklist(void)
408{
409 ignore_blacklist = true;
410}
411
412static void __init acpi_sleep_dmi_check(void)
413{
414 if (ignore_blacklist)
415 return;
416
417 if (dmi_get_bios_year() >= 2012)
418 acpi_nvs_nosave_s3();
419
420 dmi_check_system(acpisleep_dmi_table);
421}
422
423/**
424 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
425 */
426static int acpi_pm_freeze(void)
427{
428 acpi_disable_all_gpes();
429 acpi_os_wait_events_complete();
430 acpi_ec_block_transactions();
431 return 0;
432}
433
434/**
435 * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
436 */
437static int acpi_pm_pre_suspend(void)
438{
439 acpi_pm_freeze();
440 return suspend_nvs_save();
441}
442
443/**
444 * __acpi_pm_prepare - Prepare the platform to enter the target state.
445 *
446 * If necessary, set the firmware waking vector and do arch-specific
447 * nastiness to get the wakeup code to the waking vector.
448 */
449static int __acpi_pm_prepare(void)
450{
451 int error = acpi_sleep_prepare(acpi_target_sleep_state);
452 if (error)
453 acpi_target_sleep_state = ACPI_STATE_S0;
454
455 return error;
456}
457
458/**
459 * acpi_pm_prepare - Prepare the platform to enter the target sleep
460 * state and disable the GPEs.
461 */
462static int acpi_pm_prepare(void)
463{
464 int error = __acpi_pm_prepare();
465 if (!error)
466 error = acpi_pm_pre_suspend();
467
468 return error;
469}
470
471/**
472 * acpi_pm_finish - Instruct the platform to leave a sleep state.
473 *
474 * This is called after we wake back up (or if entering the sleep state
475 * failed).
476 */
477static void acpi_pm_finish(void)
478{
479 struct acpi_device *pwr_btn_adev;
480 u32 acpi_state = acpi_target_sleep_state;
481
482 acpi_ec_unblock_transactions();
483 suspend_nvs_free();
484
485 if (acpi_state == ACPI_STATE_S0)
486 return;
487
488 pr_info("Waking up from system sleep state S%d\n", acpi_state);
489 acpi_disable_wakeup_devices(acpi_state);
490 acpi_leave_sleep_state(acpi_state);
491
492 /* reset firmware waking vector */
493 acpi_set_waking_vector(0);
494
495 acpi_target_sleep_state = ACPI_STATE_S0;
496
497 acpi_resume_power_resources();
498
499 /* If we were woken with the fixed power button, provide a small
500 * hint to userspace in the form of a wakeup event on the fixed power
501 * button device (if it can be found).
502 *
503 * We delay the event generation til now, as the PM layer requires
504 * timekeeping to be running before we generate events. */
505 if (!pwr_btn_event_pending)
506 return;
507
508 pwr_btn_event_pending = false;
509 pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
510 NULL, -1);
511 if (pwr_btn_adev) {
512 pm_wakeup_event(&pwr_btn_adev->dev, 0);
513 acpi_dev_put(pwr_btn_adev);
514 }
515}
516
517/**
518 * acpi_pm_start - Start system PM transition.
519 * @acpi_state: The target ACPI power state to transition to.
520 */
521static void acpi_pm_start(u32 acpi_state)
522{
523 acpi_target_sleep_state = acpi_state;
524 acpi_sleep_tts_switch(acpi_target_sleep_state);
525 acpi_scan_lock_acquire();
526}
527
528/**
529 * acpi_pm_end - Finish up system PM transition.
530 */
531static void acpi_pm_end(void)
532{
533 acpi_turn_off_unused_power_resources();
534 acpi_scan_lock_release();
535 /*
536 * This is necessary in case acpi_pm_finish() is not called during a
537 * failing transition to a sleep state.
538 */
539 acpi_target_sleep_state = ACPI_STATE_S0;
540 acpi_sleep_tts_switch(acpi_target_sleep_state);
541}
542#else /* !CONFIG_ACPI_SLEEP */
543#define sleep_no_lps0 (1)
544#define acpi_target_sleep_state ACPI_STATE_S0
545#define acpi_sleep_default_s3 (1)
546static inline void acpi_sleep_dmi_check(void) {}
547#endif /* CONFIG_ACPI_SLEEP */
548
549#ifdef CONFIG_SUSPEND
550static u32 acpi_suspend_states[] = {
551 [PM_SUSPEND_ON] = ACPI_STATE_S0,
552 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
553 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
554 [PM_SUSPEND_MAX] = ACPI_STATE_S5
555};
556
557/**
558 * acpi_suspend_begin - Set the target system sleep state to the state
559 * associated with given @pm_state, if supported.
560 * @pm_state: The target system power management state.
561 */
562static int acpi_suspend_begin(suspend_state_t pm_state)
563{
564 u32 acpi_state = acpi_suspend_states[pm_state];
565 int error;
566
567 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
568 if (error)
569 return error;
570
571 if (!sleep_states[acpi_state]) {
572 pr_err("ACPI does not support sleep state S%u\n", acpi_state);
573 return -ENOSYS;
574 }
575 if (acpi_state > ACPI_STATE_S1)
576 pm_set_suspend_via_firmware();
577
578 acpi_pm_start(acpi_state);
579 return 0;
580}
581
582/**
583 * acpi_suspend_enter - Actually enter a sleep state.
584 * @pm_state: ignored
585 *
586 * Flush caches and go to sleep. For STR we have to call arch-specific
587 * assembly, which in turn call acpi_enter_sleep_state().
588 * It's unfortunate, but it works. Please fix if you're feeling frisky.
589 */
590static int acpi_suspend_enter(suspend_state_t pm_state)
591{
592 acpi_status status = AE_OK;
593 u32 acpi_state = acpi_target_sleep_state;
594 int error;
595
596 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
597 switch (acpi_state) {
598 case ACPI_STATE_S1:
599 barrier();
600 status = acpi_enter_sleep_state(acpi_state);
601 break;
602
603 case ACPI_STATE_S3:
604 if (!acpi_suspend_lowlevel)
605 return -ENOSYS;
606 error = acpi_suspend_lowlevel();
607 if (error)
608 return error;
609 pr_info("Low-level resume complete\n");
610 pm_set_resume_via_firmware();
611 break;
612 }
613 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
614
615 /* This violates the spec but is required for bug compatibility. */
616 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
617
618 /* Reprogram control registers */
619 acpi_leave_sleep_state_prep(acpi_state);
620
621 /* ACPI 3.0 specs (P62) says that it's the responsibility
622 * of the OSPM to clear the status bit [ implying that the
623 * POWER_BUTTON event should not reach userspace ]
624 *
625 * However, we do generate a small hint for userspace in the form of
626 * a wakeup event. We flag this condition for now and generate the
627 * event later, as we're currently too early in resume to be able to
628 * generate wakeup events.
629 */
630 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
631 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
632
633 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
634
635 if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
636 acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
637 /* Flag for later */
638 pwr_btn_event_pending = true;
639 }
640 }
641
642 /*
643 * Disable all GPE and clear their status bits before interrupts are
644 * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
645 * prevent them from producing spurious interrups.
646 *
647 * acpi_leave_sleep_state() will reenable specific GPEs later.
648 *
649 * Because this code runs on one CPU with disabled interrupts (all of
650 * the other CPUs are offline at this time), it need not acquire any
651 * sleeping locks which may trigger an implicit preemption point even
652 * if there is no contention, so avoid doing that by using a low-level
653 * library routine here.
654 */
655 acpi_hw_disable_all_gpes();
656 /* Allow EC transactions to happen. */
657 acpi_ec_unblock_transactions();
658
659 suspend_nvs_restore();
660
661 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
662}
663
664static int acpi_suspend_state_valid(suspend_state_t pm_state)
665{
666 u32 acpi_state;
667
668 switch (pm_state) {
669 case PM_SUSPEND_ON:
670 case PM_SUSPEND_STANDBY:
671 case PM_SUSPEND_MEM:
672 acpi_state = acpi_suspend_states[pm_state];
673
674 return sleep_states[acpi_state];
675 default:
676 return 0;
677 }
678}
679
680static const struct platform_suspend_ops acpi_suspend_ops = {
681 .valid = acpi_suspend_state_valid,
682 .begin = acpi_suspend_begin,
683 .prepare_late = acpi_pm_prepare,
684 .enter = acpi_suspend_enter,
685 .wake = acpi_pm_finish,
686 .end = acpi_pm_end,
687};
688
689/**
690 * acpi_suspend_begin_old - Set the target system sleep state to the
691 * state associated with given @pm_state, if supported, and
692 * execute the _PTS control method. This function is used if the
693 * pre-ACPI 2.0 suspend ordering has been requested.
694 * @pm_state: The target suspend state for the system.
695 */
696static int acpi_suspend_begin_old(suspend_state_t pm_state)
697{
698 int error = acpi_suspend_begin(pm_state);
699 if (!error)
700 error = __acpi_pm_prepare();
701
702 return error;
703}
704
705/*
706 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
707 * been requested.
708 */
709static const struct platform_suspend_ops acpi_suspend_ops_old = {
710 .valid = acpi_suspend_state_valid,
711 .begin = acpi_suspend_begin_old,
712 .prepare_late = acpi_pm_pre_suspend,
713 .enter = acpi_suspend_enter,
714 .wake = acpi_pm_finish,
715 .end = acpi_pm_end,
716 .recover = acpi_pm_finish,
717};
718
719static bool s2idle_wakeup;
720
721int acpi_s2idle_begin(void)
722{
723 acpi_scan_lock_acquire();
724 return 0;
725}
726
727int acpi_s2idle_prepare(void)
728{
729 if (acpi_sci_irq_valid()) {
730 int error;
731
732 error = enable_irq_wake(acpi_sci_irq);
733 if (error)
734 pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n",
735 acpi_sci_irq, error);
736
737 acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
738 }
739
740 acpi_enable_wakeup_devices(ACPI_STATE_S0);
741
742 /* Change the configuration of GPEs to avoid spurious wakeup. */
743 acpi_enable_all_wakeup_gpes();
744 acpi_os_wait_events_complete();
745
746 s2idle_wakeup = true;
747 return 0;
748}
749
750bool acpi_s2idle_wake(void)
751{
752 if (!acpi_sci_irq_valid())
753 return pm_wakeup_pending();
754
755 while (pm_wakeup_pending()) {
756 /*
757 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
758 * SCI has not triggered while suspended, so bail out (the
759 * wakeup is pending anyway and the SCI is not the source of
760 * it).
761 */
762 if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
763 pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
764 return true;
765 }
766
767 /*
768 * If the status bit of any enabled fixed event is set, the
769 * wakeup is regarded as valid.
770 */
771 if (acpi_any_fixed_event_status_set()) {
772 pm_pr_dbg("ACPI fixed event wakeup\n");
773 return true;
774 }
775
776 /* Check wakeups from drivers sharing the SCI. */
777 if (acpi_check_wakeup_handlers()) {
778 pm_pr_dbg("ACPI custom handler wakeup\n");
779 return true;
780 }
781
782 /*
783 * Check non-EC GPE wakeups and if there are none, cancel the
784 * SCI-related wakeup and dispatch the EC GPE.
785 */
786 if (acpi_ec_dispatch_gpe()) {
787 pm_pr_dbg("ACPI non-EC GPE wakeup\n");
788 return true;
789 }
790
791 acpi_os_wait_events_complete();
792
793 /*
794 * The SCI is in the "suspended" state now and it cannot produce
795 * new wakeup events till the rearming below, so if any of them
796 * are pending here, they must be resulting from the processing
797 * of EC events above or coming from somewhere else.
798 */
799 if (pm_wakeup_pending()) {
800 pm_pr_dbg("Wakeup after ACPI Notify sync\n");
801 return true;
802 }
803
804 pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
805
806 pm_wakeup_clear(acpi_sci_irq);
807 rearm_wake_irq(acpi_sci_irq);
808 }
809
810 return false;
811}
812
813void acpi_s2idle_restore(void)
814{
815 /*
816 * Drain pending events before restoring the working-state configuration
817 * of GPEs.
818 */
819 acpi_os_wait_events_complete(); /* synchronize GPE processing */
820 acpi_ec_flush_work(); /* flush the EC driver's workqueues */
821 acpi_os_wait_events_complete(); /* synchronize Notify handling */
822
823 s2idle_wakeup = false;
824
825 acpi_enable_all_runtime_gpes();
826
827 acpi_disable_wakeup_devices(ACPI_STATE_S0);
828
829 if (acpi_sci_irq_valid()) {
830 acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
831 disable_irq_wake(acpi_sci_irq);
832 }
833}
834
835void acpi_s2idle_end(void)
836{
837 acpi_scan_lock_release();
838}
839
840static const struct platform_s2idle_ops acpi_s2idle_ops = {
841 .begin = acpi_s2idle_begin,
842 .prepare = acpi_s2idle_prepare,
843 .wake = acpi_s2idle_wake,
844 .restore = acpi_s2idle_restore,
845 .end = acpi_s2idle_end,
846};
847
848void __weak acpi_s2idle_setup(void)
849{
850 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
851 pr_info("Efficient low-power S0 idle declared\n");
852
853 s2idle_set_ops(&acpi_s2idle_ops);
854}
855
856static void __init acpi_sleep_suspend_setup(void)
857{
858 bool suspend_ops_needed = false;
859 int i;
860
861 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
862 if (acpi_sleep_state_supported(i)) {
863 sleep_states[i] = 1;
864 suspend_ops_needed = true;
865 }
866
867 if (suspend_ops_needed)
868 suspend_set_ops(old_suspend_ordering ?
869 &acpi_suspend_ops_old : &acpi_suspend_ops);
870
871 acpi_s2idle_setup();
872}
873
874#else /* !CONFIG_SUSPEND */
875#define s2idle_wakeup (false)
876static inline void acpi_sleep_suspend_setup(void) {}
877#endif /* !CONFIG_SUSPEND */
878
879bool acpi_s2idle_wakeup(void)
880{
881 return s2idle_wakeup;
882}
883
884#ifdef CONFIG_PM_SLEEP
885static u32 saved_bm_rld;
886
887static int acpi_save_bm_rld(void)
888{
889 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
890 return 0;
891}
892
893static void acpi_restore_bm_rld(void)
894{
895 u32 resumed_bm_rld = 0;
896
897 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
898 if (resumed_bm_rld == saved_bm_rld)
899 return;
900
901 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
902}
903
904static struct syscore_ops acpi_sleep_syscore_ops = {
905 .suspend = acpi_save_bm_rld,
906 .resume = acpi_restore_bm_rld,
907};
908
909static void acpi_sleep_syscore_init(void)
910{
911 register_syscore_ops(&acpi_sleep_syscore_ops);
912}
913#else
914static inline void acpi_sleep_syscore_init(void) {}
915#endif /* CONFIG_PM_SLEEP */
916
917#ifdef CONFIG_HIBERNATION
918static unsigned long s4_hardware_signature;
919static struct acpi_table_facs *facs;
920int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
921
922static int acpi_hibernation_begin(pm_message_t stage)
923{
924 if (!nvs_nosave) {
925 int error = suspend_nvs_alloc();
926 if (error)
927 return error;
928 }
929
930 if (stage.event == PM_EVENT_HIBERNATE)
931 pm_set_suspend_via_firmware();
932
933 acpi_pm_start(ACPI_STATE_S4);
934 return 0;
935}
936
937static int acpi_hibernation_enter(void)
938{
939 acpi_status status = AE_OK;
940
941 /* This shouldn't return. If it returns, we have a problem */
942 status = acpi_enter_sleep_state(ACPI_STATE_S4);
943 /* Reprogram control registers */
944 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
945
946 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
947}
948
949static void acpi_hibernation_leave(void)
950{
951 pm_set_resume_via_firmware();
952 /*
953 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
954 * enable it here.
955 */
956 acpi_enable();
957 /* Reprogram control registers */
958 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
959 /* Check the hardware signature */
960 if (facs && s4_hardware_signature != facs->hardware_signature)
961 pr_crit("Hardware changed while hibernated, success doubtful!\n");
962 /* Restore the NVS memory area */
963 suspend_nvs_restore();
964 /* Allow EC transactions to happen. */
965 acpi_ec_unblock_transactions();
966}
967
968static void acpi_pm_thaw(void)
969{
970 acpi_ec_unblock_transactions();
971 acpi_enable_all_runtime_gpes();
972}
973
974static const struct platform_hibernation_ops acpi_hibernation_ops = {
975 .begin = acpi_hibernation_begin,
976 .end = acpi_pm_end,
977 .pre_snapshot = acpi_pm_prepare,
978 .finish = acpi_pm_finish,
979 .prepare = acpi_pm_prepare,
980 .enter = acpi_hibernation_enter,
981 .leave = acpi_hibernation_leave,
982 .pre_restore = acpi_pm_freeze,
983 .restore_cleanup = acpi_pm_thaw,
984};
985
986/**
987 * acpi_hibernation_begin_old - Set the target system sleep state to
988 * ACPI_STATE_S4 and execute the _PTS control method. This
989 * function is used if the pre-ACPI 2.0 suspend ordering has been
990 * requested.
991 * @stage: The power management event message.
992 */
993static int acpi_hibernation_begin_old(pm_message_t stage)
994{
995 int error;
996 /*
997 * The _TTS object should always be evaluated before the _PTS object.
998 * When the old_suspended_ordering is true, the _PTS object is
999 * evaluated in the acpi_sleep_prepare.
1000 */
1001 acpi_sleep_tts_switch(ACPI_STATE_S4);
1002
1003 error = acpi_sleep_prepare(ACPI_STATE_S4);
1004 if (error)
1005 return error;
1006
1007 if (!nvs_nosave) {
1008 error = suspend_nvs_alloc();
1009 if (error)
1010 return error;
1011 }
1012
1013 if (stage.event == PM_EVENT_HIBERNATE)
1014 pm_set_suspend_via_firmware();
1015
1016 acpi_target_sleep_state = ACPI_STATE_S4;
1017 acpi_scan_lock_acquire();
1018 return 0;
1019}
1020
1021/*
1022 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1023 * been requested.
1024 */
1025static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1026 .begin = acpi_hibernation_begin_old,
1027 .end = acpi_pm_end,
1028 .pre_snapshot = acpi_pm_pre_suspend,
1029 .prepare = acpi_pm_freeze,
1030 .finish = acpi_pm_finish,
1031 .enter = acpi_hibernation_enter,
1032 .leave = acpi_hibernation_leave,
1033 .pre_restore = acpi_pm_freeze,
1034 .restore_cleanup = acpi_pm_thaw,
1035 .recover = acpi_pm_finish,
1036};
1037
1038static void acpi_sleep_hibernate_setup(void)
1039{
1040 if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1041 return;
1042
1043 hibernation_set_ops(old_suspend_ordering ?
1044 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
1045 sleep_states[ACPI_STATE_S4] = 1;
1046 if (!acpi_check_s4_hw_signature)
1047 return;
1048
1049 acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1050 if (facs) {
1051 /*
1052 * s4_hardware_signature is the local variable which is just
1053 * used to warn about mismatch after we're attempting to
1054 * resume (in violation of the ACPI specification.)
1055 */
1056 s4_hardware_signature = facs->hardware_signature;
1057
1058 if (acpi_check_s4_hw_signature > 0) {
1059 /*
1060 * If we're actually obeying the ACPI specification
1061 * then the signature is written out as part of the
1062 * swsusp header, in order to allow the boot kernel
1063 * to gracefully decline to resume.
1064 */
1065 swsusp_hardware_signature = facs->hardware_signature;
1066 }
1067 }
1068}
1069#else /* !CONFIG_HIBERNATION */
1070static inline void acpi_sleep_hibernate_setup(void) {}
1071#endif /* !CONFIG_HIBERNATION */
1072
1073static int acpi_power_off_prepare(struct sys_off_data *data)
1074{
1075 /* Prepare to power off the system */
1076 acpi_sleep_prepare(ACPI_STATE_S5);
1077 acpi_disable_all_gpes();
1078 acpi_os_wait_events_complete();
1079 return NOTIFY_DONE;
1080}
1081
1082static int acpi_power_off(struct sys_off_data *data)
1083{
1084 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1085 pr_debug("%s called\n", __func__);
1086 local_irq_disable();
1087 acpi_enter_sleep_state(ACPI_STATE_S5);
1088 return NOTIFY_DONE;
1089}
1090
1091int __init acpi_sleep_init(void)
1092{
1093 char supported[ACPI_S_STATE_COUNT * 3 + 1];
1094 char *pos = supported;
1095 int i;
1096
1097 acpi_sleep_dmi_check();
1098
1099 sleep_states[ACPI_STATE_S0] = 1;
1100
1101 acpi_sleep_syscore_init();
1102 acpi_sleep_suspend_setup();
1103 acpi_sleep_hibernate_setup();
1104
1105 if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1106 sleep_states[ACPI_STATE_S5] = 1;
1107
1108 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
1109 SYS_OFF_PRIO_FIRMWARE,
1110 acpi_power_off_prepare, NULL);
1111
1112 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
1113 SYS_OFF_PRIO_FIRMWARE,
1114 acpi_power_off, NULL);
1115
1116 /*
1117 * Windows uses S5 for reboot, so some BIOSes depend on it to
1118 * perform proper reboot.
1119 */
1120 register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
1121 SYS_OFF_PRIO_FIRMWARE,
1122 acpi_power_off_prepare, NULL);
1123 } else {
1124 acpi_no_s5 = true;
1125 }
1126
1127 supported[0] = 0;
1128 for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1129 if (sleep_states[i])
1130 pos += sprintf(pos, " S%d", i);
1131 }
1132 pr_info("(supports%s)\n", supported);
1133
1134 /*
1135 * Register the tts_notifier to reboot notifier list so that the _TTS
1136 * object can also be evaluated when the system enters S5.
1137 */
1138 register_reboot_notifier(&tts_notifier);
1139 return 0;
1140}
1/*
2 * sleep.c - ACPI sleep support.
3 *
4 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
5 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
6 * Copyright (c) 2000-2003 Patrick Mochel
7 * Copyright (c) 2003 Open Source Development Lab
8 *
9 * This file is released under the GPLv2.
10 *
11 */
12
13#include <linux/delay.h>
14#include <linux/irq.h>
15#include <linux/dmi.h>
16#include <linux/device.h>
17#include <linux/interrupt.h>
18#include <linux/suspend.h>
19#include <linux/reboot.h>
20#include <linux/acpi.h>
21#include <linux/module.h>
22#include <linux/syscore_ops.h>
23#include <asm/io.h>
24#include <trace/events/power.h>
25
26#include "internal.h"
27#include "sleep.h"
28
29/*
30 * Some HW-full platforms do not have _S5, so they may need
31 * to leverage efi power off for a shutdown.
32 */
33bool acpi_no_s5;
34static u8 sleep_states[ACPI_S_STATE_COUNT];
35
36static void acpi_sleep_tts_switch(u32 acpi_state)
37{
38 acpi_status status;
39
40 status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
41 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
42 /*
43 * OS can't evaluate the _TTS object correctly. Some warning
44 * message will be printed. But it won't break anything.
45 */
46 printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
47 }
48}
49
50static int tts_notify_reboot(struct notifier_block *this,
51 unsigned long code, void *x)
52{
53 acpi_sleep_tts_switch(ACPI_STATE_S5);
54 return NOTIFY_DONE;
55}
56
57static struct notifier_block tts_notifier = {
58 .notifier_call = tts_notify_reboot,
59 .next = NULL,
60 .priority = 0,
61};
62
63static int acpi_sleep_prepare(u32 acpi_state)
64{
65#ifdef CONFIG_ACPI_SLEEP
66 /* do we have a wakeup address for S2 and S3? */
67 if (acpi_state == ACPI_STATE_S3) {
68 if (!acpi_wakeup_address)
69 return -EFAULT;
70 acpi_set_waking_vector(acpi_wakeup_address);
71
72 }
73 ACPI_FLUSH_CPU_CACHE();
74#endif
75 printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
76 acpi_state);
77 acpi_enable_wakeup_devices(acpi_state);
78 acpi_enter_sleep_state_prep(acpi_state);
79 return 0;
80}
81
82static bool acpi_sleep_state_supported(u8 sleep_state)
83{
84 acpi_status status;
85 u8 type_a, type_b;
86
87 status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
88 return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
89 || (acpi_gbl_FADT.sleep_control.address
90 && acpi_gbl_FADT.sleep_status.address));
91}
92
93#ifdef CONFIG_ACPI_SLEEP
94static u32 acpi_target_sleep_state = ACPI_STATE_S0;
95
96u32 acpi_target_system_state(void)
97{
98 return acpi_target_sleep_state;
99}
100EXPORT_SYMBOL_GPL(acpi_target_system_state);
101
102static bool pwr_btn_event_pending;
103
104/*
105 * The ACPI specification wants us to save NVS memory regions during hibernation
106 * and to restore them during the subsequent resume. Windows does that also for
107 * suspend to RAM. However, it is known that this mechanism does not work on
108 * all machines, so we allow the user to disable it with the help of the
109 * 'acpi_sleep=nonvs' kernel command line option.
110 */
111static bool nvs_nosave;
112
113void __init acpi_nvs_nosave(void)
114{
115 nvs_nosave = true;
116}
117
118/*
119 * The ACPI specification wants us to save NVS memory regions during hibernation
120 * but says nothing about saving NVS during S3. Not all versions of Windows
121 * save NVS on S3 suspend either, and it is clear that not all systems need
122 * NVS to be saved at S3 time. To improve suspend/resume time, allow the
123 * user to disable saving NVS on S3 if their system does not require it, but
124 * continue to save/restore NVS for S4 as specified.
125 */
126static bool nvs_nosave_s3;
127
128void __init acpi_nvs_nosave_s3(void)
129{
130 nvs_nosave_s3 = true;
131}
132
133static int __init init_nvs_save_s3(const struct dmi_system_id *d)
134{
135 nvs_nosave_s3 = false;
136 return 0;
137}
138
139/*
140 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
141 * user to request that behavior by using the 'acpi_old_suspend_ordering'
142 * kernel command line option that causes the following variable to be set.
143 */
144static bool old_suspend_ordering;
145
146void __init acpi_old_suspend_ordering(void)
147{
148 old_suspend_ordering = true;
149}
150
151static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
152{
153 acpi_old_suspend_ordering();
154 return 0;
155}
156
157static int __init init_nvs_nosave(const struct dmi_system_id *d)
158{
159 acpi_nvs_nosave();
160 return 0;
161}
162
163static bool acpi_sleep_no_lps0;
164
165static int __init init_no_lps0(const struct dmi_system_id *d)
166{
167 acpi_sleep_no_lps0 = true;
168 return 0;
169}
170
171static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
172 {
173 .callback = init_old_suspend_ordering,
174 .ident = "Abit KN9 (nForce4 variant)",
175 .matches = {
176 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
177 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
178 },
179 },
180 {
181 .callback = init_old_suspend_ordering,
182 .ident = "HP xw4600 Workstation",
183 .matches = {
184 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
185 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
186 },
187 },
188 {
189 .callback = init_old_suspend_ordering,
190 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
191 .matches = {
192 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
193 DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
194 },
195 },
196 {
197 .callback = init_old_suspend_ordering,
198 .ident = "Panasonic CF51-2L",
199 .matches = {
200 DMI_MATCH(DMI_BOARD_VENDOR,
201 "Matsushita Electric Industrial Co.,Ltd."),
202 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
203 },
204 },
205 {
206 .callback = init_nvs_nosave,
207 .ident = "Sony Vaio VGN-FW41E_H",
208 .matches = {
209 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
210 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
211 },
212 },
213 {
214 .callback = init_nvs_nosave,
215 .ident = "Sony Vaio VGN-FW21E",
216 .matches = {
217 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
218 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
219 },
220 },
221 {
222 .callback = init_nvs_nosave,
223 .ident = "Sony Vaio VGN-FW21M",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
226 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
227 },
228 },
229 {
230 .callback = init_nvs_nosave,
231 .ident = "Sony Vaio VPCEB17FX",
232 .matches = {
233 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
234 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
235 },
236 },
237 {
238 .callback = init_nvs_nosave,
239 .ident = "Sony Vaio VGN-SR11M",
240 .matches = {
241 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
242 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
243 },
244 },
245 {
246 .callback = init_nvs_nosave,
247 .ident = "Everex StepNote Series",
248 .matches = {
249 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
250 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
251 },
252 },
253 {
254 .callback = init_nvs_nosave,
255 .ident = "Sony Vaio VPCEB1Z1E",
256 .matches = {
257 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
258 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
259 },
260 },
261 {
262 .callback = init_nvs_nosave,
263 .ident = "Sony Vaio VGN-NW130D",
264 .matches = {
265 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
266 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
267 },
268 },
269 {
270 .callback = init_nvs_nosave,
271 .ident = "Sony Vaio VPCCW29FX",
272 .matches = {
273 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
274 DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
275 },
276 },
277 {
278 .callback = init_nvs_nosave,
279 .ident = "Averatec AV1020-ED2",
280 .matches = {
281 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
282 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
283 },
284 },
285 {
286 .callback = init_old_suspend_ordering,
287 .ident = "Asus A8N-SLI DELUXE",
288 .matches = {
289 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
290 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
291 },
292 },
293 {
294 .callback = init_old_suspend_ordering,
295 .ident = "Asus A8N-SLI Premium",
296 .matches = {
297 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
298 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
299 },
300 },
301 {
302 .callback = init_nvs_nosave,
303 .ident = "Sony Vaio VGN-SR26GN_P",
304 .matches = {
305 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
306 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
307 },
308 },
309 {
310 .callback = init_nvs_nosave,
311 .ident = "Sony Vaio VPCEB1S1E",
312 .matches = {
313 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
314 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
315 },
316 },
317 {
318 .callback = init_nvs_nosave,
319 .ident = "Sony Vaio VGN-FW520F",
320 .matches = {
321 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
322 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
323 },
324 },
325 {
326 .callback = init_nvs_nosave,
327 .ident = "Asus K54C",
328 .matches = {
329 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
330 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
331 },
332 },
333 {
334 .callback = init_nvs_nosave,
335 .ident = "Asus K54HR",
336 .matches = {
337 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
338 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
339 },
340 },
341 /*
342 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
343 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
344 * saving during S3.
345 */
346 {
347 .callback = init_nvs_save_s3,
348 .ident = "Lenovo G50-45",
349 .matches = {
350 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
351 DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
352 },
353 },
354 /*
355 * https://bugzilla.kernel.org/show_bug.cgi?id=196907
356 * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
357 * S0 Idle firmware interface.
358 */
359 {
360 .callback = init_no_lps0,
361 .ident = "Dell XPS13 9360",
362 .matches = {
363 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
365 },
366 },
367 /*
368 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
369 * the Low Power S0 Idle firmware interface (see
370 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
371 */
372 {
373 .callback = init_no_lps0,
374 .ident = "ThinkPad X1 Tablet(2016)",
375 .matches = {
376 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
377 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
378 },
379 },
380 {},
381};
382
383static bool ignore_blacklist;
384
385void __init acpi_sleep_no_blacklist(void)
386{
387 ignore_blacklist = true;
388}
389
390static void __init acpi_sleep_dmi_check(void)
391{
392 if (ignore_blacklist)
393 return;
394
395 if (dmi_get_bios_year() >= 2012)
396 acpi_nvs_nosave_s3();
397
398 dmi_check_system(acpisleep_dmi_table);
399}
400
401/**
402 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
403 */
404static int acpi_pm_freeze(void)
405{
406 acpi_disable_all_gpes();
407 acpi_os_wait_events_complete();
408 acpi_ec_block_transactions();
409 return 0;
410}
411
412/**
413 * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
414 */
415static int acpi_pm_pre_suspend(void)
416{
417 acpi_pm_freeze();
418 return suspend_nvs_save();
419}
420
421/**
422 * __acpi_pm_prepare - Prepare the platform to enter the target state.
423 *
424 * If necessary, set the firmware waking vector and do arch-specific
425 * nastiness to get the wakeup code to the waking vector.
426 */
427static int __acpi_pm_prepare(void)
428{
429 int error = acpi_sleep_prepare(acpi_target_sleep_state);
430 if (error)
431 acpi_target_sleep_state = ACPI_STATE_S0;
432
433 return error;
434}
435
436/**
437 * acpi_pm_prepare - Prepare the platform to enter the target sleep
438 * state and disable the GPEs.
439 */
440static int acpi_pm_prepare(void)
441{
442 int error = __acpi_pm_prepare();
443 if (!error)
444 error = acpi_pm_pre_suspend();
445
446 return error;
447}
448
449static int find_powerf_dev(struct device *dev, void *data)
450{
451 struct acpi_device *device = to_acpi_device(dev);
452 const char *hid = acpi_device_hid(device);
453
454 return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
455}
456
457/**
458 * acpi_pm_finish - Instruct the platform to leave a sleep state.
459 *
460 * This is called after we wake back up (or if entering the sleep state
461 * failed).
462 */
463static void acpi_pm_finish(void)
464{
465 struct device *pwr_btn_dev;
466 u32 acpi_state = acpi_target_sleep_state;
467
468 acpi_ec_unblock_transactions();
469 suspend_nvs_free();
470
471 if (acpi_state == ACPI_STATE_S0)
472 return;
473
474 printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
475 acpi_state);
476 acpi_disable_wakeup_devices(acpi_state);
477 acpi_leave_sleep_state(acpi_state);
478
479 /* reset firmware waking vector */
480 acpi_set_waking_vector(0);
481
482 acpi_target_sleep_state = ACPI_STATE_S0;
483
484 acpi_resume_power_resources();
485
486 /* If we were woken with the fixed power button, provide a small
487 * hint to userspace in the form of a wakeup event on the fixed power
488 * button device (if it can be found).
489 *
490 * We delay the event generation til now, as the PM layer requires
491 * timekeeping to be running before we generate events. */
492 if (!pwr_btn_event_pending)
493 return;
494
495 pwr_btn_event_pending = false;
496 pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
497 find_powerf_dev);
498 if (pwr_btn_dev) {
499 pm_wakeup_event(pwr_btn_dev, 0);
500 put_device(pwr_btn_dev);
501 }
502}
503
504/**
505 * acpi_pm_start - Start system PM transition.
506 */
507static void acpi_pm_start(u32 acpi_state)
508{
509 acpi_target_sleep_state = acpi_state;
510 acpi_sleep_tts_switch(acpi_target_sleep_state);
511 acpi_scan_lock_acquire();
512}
513
514/**
515 * acpi_pm_end - Finish up system PM transition.
516 */
517static void acpi_pm_end(void)
518{
519 acpi_turn_off_unused_power_resources();
520 acpi_scan_lock_release();
521 /*
522 * This is necessary in case acpi_pm_finish() is not called during a
523 * failing transition to a sleep state.
524 */
525 acpi_target_sleep_state = ACPI_STATE_S0;
526 acpi_sleep_tts_switch(acpi_target_sleep_state);
527}
528#else /* !CONFIG_ACPI_SLEEP */
529#define acpi_target_sleep_state ACPI_STATE_S0
530#define acpi_sleep_no_lps0 (false)
531static inline void acpi_sleep_dmi_check(void) {}
532#endif /* CONFIG_ACPI_SLEEP */
533
534#ifdef CONFIG_SUSPEND
535static u32 acpi_suspend_states[] = {
536 [PM_SUSPEND_ON] = ACPI_STATE_S0,
537 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
538 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
539 [PM_SUSPEND_MAX] = ACPI_STATE_S5
540};
541
542/**
543 * acpi_suspend_begin - Set the target system sleep state to the state
544 * associated with given @pm_state, if supported.
545 */
546static int acpi_suspend_begin(suspend_state_t pm_state)
547{
548 u32 acpi_state = acpi_suspend_states[pm_state];
549 int error;
550
551 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
552 if (error)
553 return error;
554
555 if (!sleep_states[acpi_state]) {
556 pr_err("ACPI does not support sleep state S%u\n", acpi_state);
557 return -ENOSYS;
558 }
559 if (acpi_state > ACPI_STATE_S1)
560 pm_set_suspend_via_firmware();
561
562 acpi_pm_start(acpi_state);
563 return 0;
564}
565
566/**
567 * acpi_suspend_enter - Actually enter a sleep state.
568 * @pm_state: ignored
569 *
570 * Flush caches and go to sleep. For STR we have to call arch-specific
571 * assembly, which in turn call acpi_enter_sleep_state().
572 * It's unfortunate, but it works. Please fix if you're feeling frisky.
573 */
574static int acpi_suspend_enter(suspend_state_t pm_state)
575{
576 acpi_status status = AE_OK;
577 u32 acpi_state = acpi_target_sleep_state;
578 int error;
579
580 ACPI_FLUSH_CPU_CACHE();
581
582 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
583 switch (acpi_state) {
584 case ACPI_STATE_S1:
585 barrier();
586 status = acpi_enter_sleep_state(acpi_state);
587 break;
588
589 case ACPI_STATE_S3:
590 if (!acpi_suspend_lowlevel)
591 return -ENOSYS;
592 error = acpi_suspend_lowlevel();
593 if (error)
594 return error;
595 pr_info(PREFIX "Low-level resume complete\n");
596 pm_set_resume_via_firmware();
597 break;
598 }
599 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
600
601 /* This violates the spec but is required for bug compatibility. */
602 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
603
604 /* Reprogram control registers */
605 acpi_leave_sleep_state_prep(acpi_state);
606
607 /* ACPI 3.0 specs (P62) says that it's the responsibility
608 * of the OSPM to clear the status bit [ implying that the
609 * POWER_BUTTON event should not reach userspace ]
610 *
611 * However, we do generate a small hint for userspace in the form of
612 * a wakeup event. We flag this condition for now and generate the
613 * event later, as we're currently too early in resume to be able to
614 * generate wakeup events.
615 */
616 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
617 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
618
619 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
620
621 if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
622 acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
623 /* Flag for later */
624 pwr_btn_event_pending = true;
625 }
626 }
627
628 /*
629 * Disable and clear GPE status before interrupt is enabled. Some GPEs
630 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
631 * acpi_leave_sleep_state will reenable specific GPEs later
632 */
633 acpi_disable_all_gpes();
634 /* Allow EC transactions to happen. */
635 acpi_ec_unblock_transactions();
636
637 suspend_nvs_restore();
638
639 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
640}
641
642static int acpi_suspend_state_valid(suspend_state_t pm_state)
643{
644 u32 acpi_state;
645
646 switch (pm_state) {
647 case PM_SUSPEND_ON:
648 case PM_SUSPEND_STANDBY:
649 case PM_SUSPEND_MEM:
650 acpi_state = acpi_suspend_states[pm_state];
651
652 return sleep_states[acpi_state];
653 default:
654 return 0;
655 }
656}
657
658static const struct platform_suspend_ops acpi_suspend_ops = {
659 .valid = acpi_suspend_state_valid,
660 .begin = acpi_suspend_begin,
661 .prepare_late = acpi_pm_prepare,
662 .enter = acpi_suspend_enter,
663 .wake = acpi_pm_finish,
664 .end = acpi_pm_end,
665};
666
667/**
668 * acpi_suspend_begin_old - Set the target system sleep state to the
669 * state associated with given @pm_state, if supported, and
670 * execute the _PTS control method. This function is used if the
671 * pre-ACPI 2.0 suspend ordering has been requested.
672 */
673static int acpi_suspend_begin_old(suspend_state_t pm_state)
674{
675 int error = acpi_suspend_begin(pm_state);
676 if (!error)
677 error = __acpi_pm_prepare();
678
679 return error;
680}
681
682/*
683 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
684 * been requested.
685 */
686static const struct platform_suspend_ops acpi_suspend_ops_old = {
687 .valid = acpi_suspend_state_valid,
688 .begin = acpi_suspend_begin_old,
689 .prepare_late = acpi_pm_pre_suspend,
690 .enter = acpi_suspend_enter,
691 .wake = acpi_pm_finish,
692 .end = acpi_pm_end,
693 .recover = acpi_pm_finish,
694};
695
696static bool s2idle_in_progress;
697static bool s2idle_wakeup;
698
699/*
700 * On platforms supporting the Low Power S0 Idle interface there is an ACPI
701 * device object with the PNP0D80 compatible device ID (System Power Management
702 * Controller) and a specific _DSM method under it. That method, if present,
703 * can be used to indicate to the platform that the OS is transitioning into a
704 * low-power state in which certain types of activity are not desirable or that
705 * it is leaving such a state, which allows the platform to adjust its operation
706 * mode accordingly.
707 */
708static const struct acpi_device_id lps0_device_ids[] = {
709 {"PNP0D80", },
710 {"", },
711};
712
713#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
714
715#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
716#define ACPI_LPS0_SCREEN_OFF 3
717#define ACPI_LPS0_SCREEN_ON 4
718#define ACPI_LPS0_ENTRY 5
719#define ACPI_LPS0_EXIT 6
720
721#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
722#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
723
724static acpi_handle lps0_device_handle;
725static guid_t lps0_dsm_guid;
726static char lps0_dsm_func_mask;
727
728/* Device constraint entry structure */
729struct lpi_device_info {
730 char *name;
731 int enabled;
732 union acpi_object *package;
733};
734
735/* Constraint package structure */
736struct lpi_device_constraint {
737 int uid;
738 int min_dstate;
739 int function_states;
740};
741
742struct lpi_constraints {
743 acpi_handle handle;
744 int min_dstate;
745};
746
747static struct lpi_constraints *lpi_constraints_table;
748static int lpi_constraints_table_size;
749
750static void lpi_device_get_constraints(void)
751{
752 union acpi_object *out_obj;
753 int i;
754
755 out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
756 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
757 NULL, ACPI_TYPE_PACKAGE);
758
759 acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
760 out_obj ? "successful" : "failed");
761
762 if (!out_obj)
763 return;
764
765 lpi_constraints_table = kcalloc(out_obj->package.count,
766 sizeof(*lpi_constraints_table),
767 GFP_KERNEL);
768 if (!lpi_constraints_table)
769 goto free_acpi_buffer;
770
771 acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
772
773 for (i = 0; i < out_obj->package.count; i++) {
774 struct lpi_constraints *constraint;
775 acpi_status status;
776 union acpi_object *package = &out_obj->package.elements[i];
777 struct lpi_device_info info = { };
778 int package_count = 0, j;
779
780 if (!package)
781 continue;
782
783 for (j = 0; j < package->package.count; ++j) {
784 union acpi_object *element =
785 &(package->package.elements[j]);
786
787 switch (element->type) {
788 case ACPI_TYPE_INTEGER:
789 info.enabled = element->integer.value;
790 break;
791 case ACPI_TYPE_STRING:
792 info.name = element->string.pointer;
793 break;
794 case ACPI_TYPE_PACKAGE:
795 package_count = element->package.count;
796 info.package = element->package.elements;
797 break;
798 }
799 }
800
801 if (!info.enabled || !info.package || !info.name)
802 continue;
803
804 constraint = &lpi_constraints_table[lpi_constraints_table_size];
805
806 status = acpi_get_handle(NULL, info.name, &constraint->handle);
807 if (ACPI_FAILURE(status))
808 continue;
809
810 acpi_handle_debug(lps0_device_handle,
811 "index:%d Name:%s\n", i, info.name);
812
813 constraint->min_dstate = -1;
814
815 for (j = 0; j < package_count; ++j) {
816 union acpi_object *info_obj = &info.package[j];
817 union acpi_object *cnstr_pkg;
818 union acpi_object *obj;
819 struct lpi_device_constraint dev_info;
820
821 switch (info_obj->type) {
822 case ACPI_TYPE_INTEGER:
823 /* version */
824 break;
825 case ACPI_TYPE_PACKAGE:
826 if (info_obj->package.count < 2)
827 break;
828
829 cnstr_pkg = info_obj->package.elements;
830 obj = &cnstr_pkg[0];
831 dev_info.uid = obj->integer.value;
832 obj = &cnstr_pkg[1];
833 dev_info.min_dstate = obj->integer.value;
834
835 acpi_handle_debug(lps0_device_handle,
836 "uid:%d min_dstate:%s\n",
837 dev_info.uid,
838 acpi_power_state_string(dev_info.min_dstate));
839
840 constraint->min_dstate = dev_info.min_dstate;
841 break;
842 }
843 }
844
845 if (constraint->min_dstate < 0) {
846 acpi_handle_debug(lps0_device_handle,
847 "Incomplete constraint defined\n");
848 continue;
849 }
850
851 lpi_constraints_table_size++;
852 }
853
854 acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
855
856free_acpi_buffer:
857 ACPI_FREE(out_obj);
858}
859
860static void lpi_check_constraints(void)
861{
862 int i;
863
864 for (i = 0; i < lpi_constraints_table_size; ++i) {
865 acpi_handle handle = lpi_constraints_table[i].handle;
866 struct acpi_device *adev;
867
868 if (!handle || acpi_bus_get_device(handle, &adev))
869 continue;
870
871 acpi_handle_debug(handle,
872 "LPI: required min power state:%s current power state:%s\n",
873 acpi_power_state_string(lpi_constraints_table[i].min_dstate),
874 acpi_power_state_string(adev->power.state));
875
876 if (!adev->flags.power_manageable) {
877 acpi_handle_info(handle, "LPI: Device not power manageable\n");
878 lpi_constraints_table[i].handle = NULL;
879 continue;
880 }
881
882 if (adev->power.state < lpi_constraints_table[i].min_dstate)
883 acpi_handle_info(handle,
884 "LPI: Constraint not met; min power state:%s current power state:%s\n",
885 acpi_power_state_string(lpi_constraints_table[i].min_dstate),
886 acpi_power_state_string(adev->power.state));
887 }
888}
889
890static void acpi_sleep_run_lps0_dsm(unsigned int func)
891{
892 union acpi_object *out_obj;
893
894 if (!(lps0_dsm_func_mask & (1 << func)))
895 return;
896
897 out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, 1, func, NULL);
898 ACPI_FREE(out_obj);
899
900 acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
901 func, out_obj ? "successful" : "failed");
902}
903
904static int lps0_device_attach(struct acpi_device *adev,
905 const struct acpi_device_id *not_used)
906{
907 union acpi_object *out_obj;
908
909 if (lps0_device_handle)
910 return 0;
911
912 if (acpi_sleep_no_lps0) {
913 acpi_handle_info(adev->handle,
914 "Low Power S0 Idle interface disabled\n");
915 return 0;
916 }
917
918 if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
919 return 0;
920
921 guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
922 /* Check if the _DSM is present and as expected. */
923 out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
924 if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
925 char bitmask = *(char *)out_obj->buffer.pointer;
926
927 if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
928 (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
929 lps0_dsm_func_mask = bitmask;
930 lps0_device_handle = adev->handle;
931 /*
932 * Use suspend-to-idle by default if the default
933 * suspend mode was not set from the command line.
934 */
935 if (mem_sleep_default > PM_SUSPEND_MEM)
936 mem_sleep_current = PM_SUSPEND_TO_IDLE;
937 }
938
939 acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
940 bitmask);
941 } else {
942 acpi_handle_debug(adev->handle,
943 "_DSM function 0 evaluation failed\n");
944 }
945 ACPI_FREE(out_obj);
946
947 lpi_device_get_constraints();
948
949 return 0;
950}
951
952static struct acpi_scan_handler lps0_handler = {
953 .ids = lps0_device_ids,
954 .attach = lps0_device_attach,
955};
956
957static int acpi_s2idle_begin(void)
958{
959 acpi_scan_lock_acquire();
960 s2idle_in_progress = true;
961 return 0;
962}
963
964static int acpi_s2idle_prepare(void)
965{
966 if (lps0_device_handle) {
967 acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
968 acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
969 }
970
971 if (acpi_sci_irq_valid())
972 enable_irq_wake(acpi_sci_irq);
973
974 return 0;
975}
976
977static void acpi_s2idle_wake(void)
978{
979
980 if (pm_debug_messages_on)
981 lpi_check_constraints();
982
983 /*
984 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
985 * that the SCI has triggered while suspended, so cancel the wakeup in
986 * case it has not been a wakeup event (the GPEs will be checked later).
987 */
988 if (acpi_sci_irq_valid() &&
989 !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
990 pm_system_cancel_wakeup();
991 s2idle_wakeup = true;
992 }
993}
994
995static void acpi_s2idle_sync(void)
996{
997 /*
998 * Process all pending events in case there are any wakeup ones.
999 *
1000 * The EC driver uses the system workqueue and an additional special
1001 * one, so those need to be flushed too.
1002 */
1003 acpi_os_wait_events_complete(); /* synchronize SCI IRQ handling */
1004 acpi_ec_flush_work();
1005 acpi_os_wait_events_complete(); /* synchronize Notify handling */
1006 s2idle_wakeup = false;
1007}
1008
1009static void acpi_s2idle_restore(void)
1010{
1011 if (acpi_sci_irq_valid())
1012 disable_irq_wake(acpi_sci_irq);
1013
1014 if (lps0_device_handle) {
1015 acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
1016 acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
1017 }
1018}
1019
1020static void acpi_s2idle_end(void)
1021{
1022 s2idle_in_progress = false;
1023 acpi_scan_lock_release();
1024}
1025
1026static const struct platform_s2idle_ops acpi_s2idle_ops = {
1027 .begin = acpi_s2idle_begin,
1028 .prepare = acpi_s2idle_prepare,
1029 .wake = acpi_s2idle_wake,
1030 .sync = acpi_s2idle_sync,
1031 .restore = acpi_s2idle_restore,
1032 .end = acpi_s2idle_end,
1033};
1034
1035static void acpi_sleep_suspend_setup(void)
1036{
1037 int i;
1038
1039 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
1040 if (acpi_sleep_state_supported(i))
1041 sleep_states[i] = 1;
1042
1043 suspend_set_ops(old_suspend_ordering ?
1044 &acpi_suspend_ops_old : &acpi_suspend_ops);
1045
1046 acpi_scan_add_handler(&lps0_handler);
1047 s2idle_set_ops(&acpi_s2idle_ops);
1048}
1049
1050#else /* !CONFIG_SUSPEND */
1051#define s2idle_in_progress (false)
1052#define s2idle_wakeup (false)
1053#define lps0_device_handle (NULL)
1054static inline void acpi_sleep_suspend_setup(void) {}
1055#endif /* !CONFIG_SUSPEND */
1056
1057bool acpi_s2idle_wakeup(void)
1058{
1059 return s2idle_wakeup;
1060}
1061
1062bool acpi_sleep_no_ec_events(void)
1063{
1064 return !s2idle_in_progress || !lps0_device_handle;
1065}
1066
1067#ifdef CONFIG_PM_SLEEP
1068static u32 saved_bm_rld;
1069
1070static int acpi_save_bm_rld(void)
1071{
1072 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
1073 return 0;
1074}
1075
1076static void acpi_restore_bm_rld(void)
1077{
1078 u32 resumed_bm_rld = 0;
1079
1080 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
1081 if (resumed_bm_rld == saved_bm_rld)
1082 return;
1083
1084 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
1085}
1086
1087static struct syscore_ops acpi_sleep_syscore_ops = {
1088 .suspend = acpi_save_bm_rld,
1089 .resume = acpi_restore_bm_rld,
1090};
1091
1092static void acpi_sleep_syscore_init(void)
1093{
1094 register_syscore_ops(&acpi_sleep_syscore_ops);
1095}
1096#else
1097static inline void acpi_sleep_syscore_init(void) {}
1098#endif /* CONFIG_PM_SLEEP */
1099
1100#ifdef CONFIG_HIBERNATION
1101static unsigned long s4_hardware_signature;
1102static struct acpi_table_facs *facs;
1103static bool nosigcheck;
1104
1105void __init acpi_no_s4_hw_signature(void)
1106{
1107 nosigcheck = true;
1108}
1109
1110static int acpi_hibernation_begin(void)
1111{
1112 int error;
1113
1114 error = nvs_nosave ? 0 : suspend_nvs_alloc();
1115 if (!error)
1116 acpi_pm_start(ACPI_STATE_S4);
1117
1118 return error;
1119}
1120
1121static int acpi_hibernation_enter(void)
1122{
1123 acpi_status status = AE_OK;
1124
1125 ACPI_FLUSH_CPU_CACHE();
1126
1127 /* This shouldn't return. If it returns, we have a problem */
1128 status = acpi_enter_sleep_state(ACPI_STATE_S4);
1129 /* Reprogram control registers */
1130 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
1131
1132 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
1133}
1134
1135static void acpi_hibernation_leave(void)
1136{
1137 pm_set_resume_via_firmware();
1138 /*
1139 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
1140 * enable it here.
1141 */
1142 acpi_enable();
1143 /* Reprogram control registers */
1144 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
1145 /* Check the hardware signature */
1146 if (facs && s4_hardware_signature != facs->hardware_signature)
1147 pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
1148 /* Restore the NVS memory area */
1149 suspend_nvs_restore();
1150 /* Allow EC transactions to happen. */
1151 acpi_ec_unblock_transactions();
1152}
1153
1154static void acpi_pm_thaw(void)
1155{
1156 acpi_ec_unblock_transactions();
1157 acpi_enable_all_runtime_gpes();
1158}
1159
1160static const struct platform_hibernation_ops acpi_hibernation_ops = {
1161 .begin = acpi_hibernation_begin,
1162 .end = acpi_pm_end,
1163 .pre_snapshot = acpi_pm_prepare,
1164 .finish = acpi_pm_finish,
1165 .prepare = acpi_pm_prepare,
1166 .enter = acpi_hibernation_enter,
1167 .leave = acpi_hibernation_leave,
1168 .pre_restore = acpi_pm_freeze,
1169 .restore_cleanup = acpi_pm_thaw,
1170};
1171
1172/**
1173 * acpi_hibernation_begin_old - Set the target system sleep state to
1174 * ACPI_STATE_S4 and execute the _PTS control method. This
1175 * function is used if the pre-ACPI 2.0 suspend ordering has been
1176 * requested.
1177 */
1178static int acpi_hibernation_begin_old(void)
1179{
1180 int error;
1181 /*
1182 * The _TTS object should always be evaluated before the _PTS object.
1183 * When the old_suspended_ordering is true, the _PTS object is
1184 * evaluated in the acpi_sleep_prepare.
1185 */
1186 acpi_sleep_tts_switch(ACPI_STATE_S4);
1187
1188 error = acpi_sleep_prepare(ACPI_STATE_S4);
1189
1190 if (!error) {
1191 if (!nvs_nosave)
1192 error = suspend_nvs_alloc();
1193 if (!error) {
1194 acpi_target_sleep_state = ACPI_STATE_S4;
1195 acpi_scan_lock_acquire();
1196 }
1197 }
1198 return error;
1199}
1200
1201/*
1202 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1203 * been requested.
1204 */
1205static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1206 .begin = acpi_hibernation_begin_old,
1207 .end = acpi_pm_end,
1208 .pre_snapshot = acpi_pm_pre_suspend,
1209 .prepare = acpi_pm_freeze,
1210 .finish = acpi_pm_finish,
1211 .enter = acpi_hibernation_enter,
1212 .leave = acpi_hibernation_leave,
1213 .pre_restore = acpi_pm_freeze,
1214 .restore_cleanup = acpi_pm_thaw,
1215 .recover = acpi_pm_finish,
1216};
1217
1218static void acpi_sleep_hibernate_setup(void)
1219{
1220 if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1221 return;
1222
1223 hibernation_set_ops(old_suspend_ordering ?
1224 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
1225 sleep_states[ACPI_STATE_S4] = 1;
1226 if (nosigcheck)
1227 return;
1228
1229 acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1230 if (facs)
1231 s4_hardware_signature = facs->hardware_signature;
1232}
1233#else /* !CONFIG_HIBERNATION */
1234static inline void acpi_sleep_hibernate_setup(void) {}
1235#endif /* !CONFIG_HIBERNATION */
1236
1237static void acpi_power_off_prepare(void)
1238{
1239 /* Prepare to power off the system */
1240 acpi_sleep_prepare(ACPI_STATE_S5);
1241 acpi_disable_all_gpes();
1242 acpi_os_wait_events_complete();
1243}
1244
1245static void acpi_power_off(void)
1246{
1247 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1248 printk(KERN_DEBUG "%s called\n", __func__);
1249 local_irq_disable();
1250 acpi_enter_sleep_state(ACPI_STATE_S5);
1251}
1252
1253int __init acpi_sleep_init(void)
1254{
1255 char supported[ACPI_S_STATE_COUNT * 3 + 1];
1256 char *pos = supported;
1257 int i;
1258
1259 acpi_sleep_dmi_check();
1260
1261 sleep_states[ACPI_STATE_S0] = 1;
1262
1263 acpi_sleep_syscore_init();
1264 acpi_sleep_suspend_setup();
1265 acpi_sleep_hibernate_setup();
1266
1267 if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1268 sleep_states[ACPI_STATE_S5] = 1;
1269 pm_power_off_prepare = acpi_power_off_prepare;
1270 pm_power_off = acpi_power_off;
1271 } else {
1272 acpi_no_s5 = true;
1273 }
1274
1275 supported[0] = 0;
1276 for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1277 if (sleep_states[i])
1278 pos += sprintf(pos, " S%d", i);
1279 }
1280 pr_info(PREFIX "(supports%s)\n", supported);
1281
1282 /*
1283 * Register the tts_notifier to reboot notifier list so that the _TTS
1284 * object can also be evaluated when the system enters S5.
1285 */
1286 register_reboot_notifier(&tts_notifier);
1287 return 0;
1288}