Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * sleep.c - ACPI sleep support.
   4 *
   5 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
   6 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
   7 * Copyright (c) 2000-2003 Patrick Mochel
   8 * Copyright (c) 2003 Open Source Development Lab
   9 */
  10
 
 
  11#include <linux/delay.h>
  12#include <linux/irq.h>
  13#include <linux/dmi.h>
  14#include <linux/device.h>
  15#include <linux/interrupt.h>
  16#include <linux/suspend.h>
  17#include <linux/reboot.h>
  18#include <linux/acpi.h>
  19#include <linux/module.h>
  20#include <linux/syscore_ops.h>
  21#include <asm/io.h>
  22#include <trace/events/power.h>
  23
  24#include "internal.h"
  25#include "sleep.h"
  26
  27/*
  28 * Some HW-full platforms do not have _S5, so they may need
  29 * to leverage efi power off for a shutdown.
  30 */
  31bool acpi_no_s5;
  32static u8 sleep_states[ACPI_S_STATE_COUNT];
  33
  34static void acpi_sleep_tts_switch(u32 acpi_state)
  35{
  36	acpi_status status;
  37
  38	status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
  39	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
  40		/*
  41		 * OS can't evaluate the _TTS object correctly. Some warning
  42		 * message will be printed. But it won't break anything.
  43		 */
  44		printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
  45	}
  46}
  47
  48static int tts_notify_reboot(struct notifier_block *this,
  49			unsigned long code, void *x)
  50{
  51	acpi_sleep_tts_switch(ACPI_STATE_S5);
  52	return NOTIFY_DONE;
  53}
  54
  55static struct notifier_block tts_notifier = {
  56	.notifier_call	= tts_notify_reboot,
  57	.next		= NULL,
  58	.priority	= 0,
  59};
  60
 
 
 
 
  61static int acpi_sleep_prepare(u32 acpi_state)
  62{
  63#ifdef CONFIG_ACPI_SLEEP
  64	unsigned long acpi_wakeup_address;
  65
  66	/* do we have a wakeup address for S2 and S3? */
  67	if (acpi_state == ACPI_STATE_S3) {
  68		acpi_wakeup_address = acpi_get_wakeup_address();
  69		if (!acpi_wakeup_address)
  70			return -EFAULT;
  71		acpi_set_waking_vector(acpi_wakeup_address);
  72
  73	}
  74	ACPI_FLUSH_CPU_CACHE();
  75#endif
  76	printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
  77		acpi_state);
  78	acpi_enable_wakeup_devices(acpi_state);
  79	acpi_enter_sleep_state_prep(acpi_state);
  80	return 0;
  81}
  82
  83bool acpi_sleep_state_supported(u8 sleep_state)
  84{
  85	acpi_status status;
  86	u8 type_a, type_b;
  87
  88	status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
  89	return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
  90		|| (acpi_gbl_FADT.sleep_control.address
  91			&& acpi_gbl_FADT.sleep_status.address));
  92}
  93
  94#ifdef CONFIG_ACPI_SLEEP
  95static bool sleep_no_lps0 __read_mostly;
  96module_param(sleep_no_lps0, bool, 0644);
  97MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
  98
  99static u32 acpi_target_sleep_state = ACPI_STATE_S0;
 100
 101u32 acpi_target_system_state(void)
 102{
 103	return acpi_target_sleep_state;
 104}
 105EXPORT_SYMBOL_GPL(acpi_target_system_state);
 106
 107static bool pwr_btn_event_pending;
 108
 109/*
 110 * The ACPI specification wants us to save NVS memory regions during hibernation
 111 * and to restore them during the subsequent resume.  Windows does that also for
 112 * suspend to RAM.  However, it is known that this mechanism does not work on
 113 * all machines, so we allow the user to disable it with the help of the
 114 * 'acpi_sleep=nonvs' kernel command line option.
 115 */
 116static bool nvs_nosave;
 117
 118void __init acpi_nvs_nosave(void)
 119{
 120	nvs_nosave = true;
 121}
 122
 123/*
 124 * The ACPI specification wants us to save NVS memory regions during hibernation
 125 * but says nothing about saving NVS during S3.  Not all versions of Windows
 126 * save NVS on S3 suspend either, and it is clear that not all systems need
 127 * NVS to be saved at S3 time.  To improve suspend/resume time, allow the
 128 * user to disable saving NVS on S3 if their system does not require it, but
 129 * continue to save/restore NVS for S4 as specified.
 130 */
 131static bool nvs_nosave_s3;
 132
 133void __init acpi_nvs_nosave_s3(void)
 134{
 135	nvs_nosave_s3 = true;
 136}
 137
 138static int __init init_nvs_save_s3(const struct dmi_system_id *d)
 139{
 140	nvs_nosave_s3 = false;
 141	return 0;
 142}
 143
 144/*
 145 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
 146 * user to request that behavior by using the 'acpi_old_suspend_ordering'
 147 * kernel command line option that causes the following variable to be set.
 148 */
 149static bool old_suspend_ordering;
 150
 151void __init acpi_old_suspend_ordering(void)
 152{
 153	old_suspend_ordering = true;
 154}
 155
 156static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
 157{
 158	acpi_old_suspend_ordering();
 159	return 0;
 160}
 161
 162static int __init init_nvs_nosave(const struct dmi_system_id *d)
 163{
 164	acpi_nvs_nosave();
 165	return 0;
 166}
 167
 168static bool acpi_sleep_default_s3;
 169
 170static int __init init_default_s3(const struct dmi_system_id *d)
 171{
 172	acpi_sleep_default_s3 = true;
 173	return 0;
 174}
 175
 176static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
 177	{
 178	.callback = init_old_suspend_ordering,
 179	.ident = "Abit KN9 (nForce4 variant)",
 180	.matches = {
 181		DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
 182		DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
 183		},
 184	},
 185	{
 186	.callback = init_old_suspend_ordering,
 187	.ident = "HP xw4600 Workstation",
 188	.matches = {
 189		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
 190		DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
 191		},
 192	},
 193	{
 194	.callback = init_old_suspend_ordering,
 195	.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
 196	.matches = {
 197		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
 198		DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
 199		},
 200	},
 201	{
 202	.callback = init_old_suspend_ordering,
 203	.ident = "Panasonic CF51-2L",
 204	.matches = {
 205		DMI_MATCH(DMI_BOARD_VENDOR,
 206				"Matsushita Electric Industrial Co.,Ltd."),
 207		DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
 208		},
 209	},
 210	{
 211	.callback = init_nvs_nosave,
 212	.ident = "Sony Vaio VGN-FW41E_H",
 213	.matches = {
 214		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 215		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
 216		},
 217	},
 218	{
 219	.callback = init_nvs_nosave,
 220	.ident = "Sony Vaio VGN-FW21E",
 221	.matches = {
 222		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 223		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
 224		},
 225	},
 226	{
 227	.callback = init_nvs_nosave,
 228	.ident = "Sony Vaio VGN-FW21M",
 229	.matches = {
 230		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 231		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
 232		},
 233	},
 234	{
 235	.callback = init_nvs_nosave,
 236	.ident = "Sony Vaio VPCEB17FX",
 237	.matches = {
 238		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 239		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
 240		},
 241	},
 242	{
 243	.callback = init_nvs_nosave,
 244	.ident = "Sony Vaio VGN-SR11M",
 245	.matches = {
 246		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 247		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
 248		},
 249	},
 250	{
 251	.callback = init_nvs_nosave,
 252	.ident = "Everex StepNote Series",
 253	.matches = {
 254		DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
 255		DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
 256		},
 257	},
 258	{
 259	.callback = init_nvs_nosave,
 260	.ident = "Sony Vaio VPCEB1Z1E",
 261	.matches = {
 262		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 263		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
 264		},
 265	},
 266	{
 267	.callback = init_nvs_nosave,
 268	.ident = "Sony Vaio VGN-NW130D",
 269	.matches = {
 270		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 271		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
 272		},
 273	},
 274	{
 275	.callback = init_nvs_nosave,
 276	.ident = "Sony Vaio VPCCW29FX",
 277	.matches = {
 278		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 279		DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
 280		},
 281	},
 282	{
 283	.callback = init_nvs_nosave,
 284	.ident = "Averatec AV1020-ED2",
 285	.matches = {
 286		DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
 287		DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
 288		},
 289	},
 290	{
 291	.callback = init_old_suspend_ordering,
 292	.ident = "Asus A8N-SLI DELUXE",
 293	.matches = {
 294		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 295		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
 296		},
 297	},
 298	{
 299	.callback = init_old_suspend_ordering,
 300	.ident = "Asus A8N-SLI Premium",
 301	.matches = {
 302		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 303		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
 304		},
 305	},
 306	{
 307	.callback = init_nvs_nosave,
 308	.ident = "Sony Vaio VGN-SR26GN_P",
 309	.matches = {
 310		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 311		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
 312		},
 313	},
 314	{
 315	.callback = init_nvs_nosave,
 316	.ident = "Sony Vaio VPCEB1S1E",
 317	.matches = {
 318		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 319		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
 320		},
 321	},
 322	{
 323	.callback = init_nvs_nosave,
 324	.ident = "Sony Vaio VGN-FW520F",
 325	.matches = {
 326		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 327		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
 328		},
 329	},
 330	{
 331	.callback = init_nvs_nosave,
 332	.ident = "Asus K54C",
 333	.matches = {
 334		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
 335		DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
 336		},
 337	},
 338	{
 339	.callback = init_nvs_nosave,
 340	.ident = "Asus K54HR",
 341	.matches = {
 342		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
 343		DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
 344		},
 345	},
 346	{
 347	.callback = init_nvs_save_s3,
 348	.ident = "Asus 1025C",
 349	.matches = {
 350		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
 351		DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
 352		},
 353	},
 354	/*
 355	 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
 356	 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
 357	 * saving during S3.
 358	 */
 359	{
 360	.callback = init_nvs_save_s3,
 361	.ident = "Lenovo G50-45",
 362	.matches = {
 363		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 364		DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
 365		},
 366	},
 
 
 
 
 
 
 
 
 367	/*
 368	 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
 369	 * the Low Power S0 Idle firmware interface (see
 370	 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
 371	 */
 372	{
 373	.callback = init_default_s3,
 374	.ident = "ThinkPad X1 Tablet(2016)",
 375	.matches = {
 376		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 377		DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
 378		},
 379	},
 380	{},
 381};
 382
 383static bool ignore_blacklist;
 384
 385void __init acpi_sleep_no_blacklist(void)
 386{
 387	ignore_blacklist = true;
 388}
 389
 390static void __init acpi_sleep_dmi_check(void)
 391{
 392	if (ignore_blacklist)
 393		return;
 394
 395	if (dmi_get_bios_year() >= 2012)
 396		acpi_nvs_nosave_s3();
 397
 398	dmi_check_system(acpisleep_dmi_table);
 399}
 400
 401/**
 402 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
 403 */
 404static int acpi_pm_freeze(void)
 405{
 406	acpi_disable_all_gpes();
 407	acpi_os_wait_events_complete();
 408	acpi_ec_block_transactions();
 409	return 0;
 410}
 411
 412/**
 413 * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
 414 */
 415static int acpi_pm_pre_suspend(void)
 416{
 417	acpi_pm_freeze();
 418	return suspend_nvs_save();
 419}
 420
 421/**
 422 *	__acpi_pm_prepare - Prepare the platform to enter the target state.
 423 *
 424 *	If necessary, set the firmware waking vector and do arch-specific
 425 *	nastiness to get the wakeup code to the waking vector.
 426 */
 427static int __acpi_pm_prepare(void)
 428{
 429	int error = acpi_sleep_prepare(acpi_target_sleep_state);
 430	if (error)
 431		acpi_target_sleep_state = ACPI_STATE_S0;
 432
 433	return error;
 434}
 435
 436/**
 437 *	acpi_pm_prepare - Prepare the platform to enter the target sleep
 438 *		state and disable the GPEs.
 439 */
 440static int acpi_pm_prepare(void)
 441{
 442	int error = __acpi_pm_prepare();
 443	if (!error)
 444		error = acpi_pm_pre_suspend();
 445
 446	return error;
 447}
 448
 449/**
 450 *	acpi_pm_finish - Instruct the platform to leave a sleep state.
 451 *
 452 *	This is called after we wake back up (or if entering the sleep state
 453 *	failed).
 454 */
 455static void acpi_pm_finish(void)
 456{
 457	struct acpi_device *pwr_btn_adev;
 458	u32 acpi_state = acpi_target_sleep_state;
 459
 460	acpi_ec_unblock_transactions();
 461	suspend_nvs_free();
 462
 463	if (acpi_state == ACPI_STATE_S0)
 464		return;
 465
 466	printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
 467		acpi_state);
 468	acpi_disable_wakeup_devices(acpi_state);
 469	acpi_leave_sleep_state(acpi_state);
 470
 471	/* reset firmware waking vector */
 472	acpi_set_waking_vector(0);
 473
 474	acpi_target_sleep_state = ACPI_STATE_S0;
 475
 476	acpi_resume_power_resources();
 477
 478	/* If we were woken with the fixed power button, provide a small
 479	 * hint to userspace in the form of a wakeup event on the fixed power
 480	 * button device (if it can be found).
 481	 *
 482	 * We delay the event generation til now, as the PM layer requires
 483	 * timekeeping to be running before we generate events. */
 484	if (!pwr_btn_event_pending)
 485		return;
 486
 487	pwr_btn_event_pending = false;
 488	pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
 489						    NULL, -1);
 490	if (pwr_btn_adev) {
 491		pm_wakeup_event(&pwr_btn_adev->dev, 0);
 492		acpi_dev_put(pwr_btn_adev);
 493	}
 494}
 495
 496/**
 497 * acpi_pm_start - Start system PM transition.
 
 498 */
 499static void acpi_pm_start(u32 acpi_state)
 500{
 501	acpi_target_sleep_state = acpi_state;
 502	acpi_sleep_tts_switch(acpi_target_sleep_state);
 503	acpi_scan_lock_acquire();
 504}
 505
 506/**
 507 * acpi_pm_end - Finish up system PM transition.
 508 */
 509static void acpi_pm_end(void)
 510{
 511	acpi_turn_off_unused_power_resources();
 512	acpi_scan_lock_release();
 513	/*
 514	 * This is necessary in case acpi_pm_finish() is not called during a
 515	 * failing transition to a sleep state.
 516	 */
 517	acpi_target_sleep_state = ACPI_STATE_S0;
 518	acpi_sleep_tts_switch(acpi_target_sleep_state);
 519}
 520#else /* !CONFIG_ACPI_SLEEP */
 521#define sleep_no_lps0	(1)
 522#define acpi_target_sleep_state	ACPI_STATE_S0
 523#define acpi_sleep_default_s3	(1)
 524static inline void acpi_sleep_dmi_check(void) {}
 525#endif /* CONFIG_ACPI_SLEEP */
 526
 527#ifdef CONFIG_SUSPEND
 528static u32 acpi_suspend_states[] = {
 529	[PM_SUSPEND_ON] = ACPI_STATE_S0,
 530	[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
 531	[PM_SUSPEND_MEM] = ACPI_STATE_S3,
 532	[PM_SUSPEND_MAX] = ACPI_STATE_S5
 533};
 534
 535/**
 536 *	acpi_suspend_begin - Set the target system sleep state to the state
 537 *		associated with given @pm_state, if supported.
 
 538 */
 539static int acpi_suspend_begin(suspend_state_t pm_state)
 540{
 541	u32 acpi_state = acpi_suspend_states[pm_state];
 542	int error;
 543
 544	error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
 545	if (error)
 546		return error;
 547
 548	if (!sleep_states[acpi_state]) {
 549		pr_err("ACPI does not support sleep state S%u\n", acpi_state);
 550		return -ENOSYS;
 551	}
 552	if (acpi_state > ACPI_STATE_S1)
 553		pm_set_suspend_via_firmware();
 554
 555	acpi_pm_start(acpi_state);
 556	return 0;
 557}
 558
 559/**
 560 *	acpi_suspend_enter - Actually enter a sleep state.
 561 *	@pm_state: ignored
 562 *
 563 *	Flush caches and go to sleep. For STR we have to call arch-specific
 564 *	assembly, which in turn call acpi_enter_sleep_state().
 565 *	It's unfortunate, but it works. Please fix if you're feeling frisky.
 566 */
 567static int acpi_suspend_enter(suspend_state_t pm_state)
 568{
 569	acpi_status status = AE_OK;
 570	u32 acpi_state = acpi_target_sleep_state;
 571	int error;
 572
 573	ACPI_FLUSH_CPU_CACHE();
 574
 575	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
 576	switch (acpi_state) {
 577	case ACPI_STATE_S1:
 578		barrier();
 579		status = acpi_enter_sleep_state(acpi_state);
 580		break;
 581
 582	case ACPI_STATE_S3:
 583		if (!acpi_suspend_lowlevel)
 584			return -ENOSYS;
 585		error = acpi_suspend_lowlevel();
 586		if (error)
 587			return error;
 588		pr_info(PREFIX "Low-level resume complete\n");
 589		pm_set_resume_via_firmware();
 590		break;
 591	}
 592	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
 593
 594	/* This violates the spec but is required for bug compatibility. */
 595	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
 596
 597	/* Reprogram control registers */
 598	acpi_leave_sleep_state_prep(acpi_state);
 599
 600	/* ACPI 3.0 specs (P62) says that it's the responsibility
 601	 * of the OSPM to clear the status bit [ implying that the
 602	 * POWER_BUTTON event should not reach userspace ]
 603	 *
 604	 * However, we do generate a small hint for userspace in the form of
 605	 * a wakeup event. We flag this condition for now and generate the
 606	 * event later, as we're currently too early in resume to be able to
 607	 * generate wakeup events.
 608	 */
 609	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
 610		acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
 611
 612		acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
 613
 614		if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
 615			acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
 616			/* Flag for later */
 617			pwr_btn_event_pending = true;
 618		}
 619	}
 620
 621	/*
 622	 * Disable and clear GPE status before interrupt is enabled. Some GPEs
 623	 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
 624	 * acpi_leave_sleep_state will reenable specific GPEs later
 
 
 
 
 
 
 
 
 625	 */
 626	acpi_disable_all_gpes();
 627	/* Allow EC transactions to happen. */
 628	acpi_ec_unblock_transactions();
 629
 630	suspend_nvs_restore();
 631
 632	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
 633}
 634
 635static int acpi_suspend_state_valid(suspend_state_t pm_state)
 636{
 637	u32 acpi_state;
 638
 639	switch (pm_state) {
 640	case PM_SUSPEND_ON:
 641	case PM_SUSPEND_STANDBY:
 642	case PM_SUSPEND_MEM:
 643		acpi_state = acpi_suspend_states[pm_state];
 644
 645		return sleep_states[acpi_state];
 646	default:
 647		return 0;
 648	}
 649}
 650
 651static const struct platform_suspend_ops acpi_suspend_ops = {
 652	.valid = acpi_suspend_state_valid,
 653	.begin = acpi_suspend_begin,
 654	.prepare_late = acpi_pm_prepare,
 655	.enter = acpi_suspend_enter,
 656	.wake = acpi_pm_finish,
 657	.end = acpi_pm_end,
 658};
 659
 660/**
 661 *	acpi_suspend_begin_old - Set the target system sleep state to the
 662 *		state associated with given @pm_state, if supported, and
 663 *		execute the _PTS control method.  This function is used if the
 664 *		pre-ACPI 2.0 suspend ordering has been requested.
 
 665 */
 666static int acpi_suspend_begin_old(suspend_state_t pm_state)
 667{
 668	int error = acpi_suspend_begin(pm_state);
 669	if (!error)
 670		error = __acpi_pm_prepare();
 671
 672	return error;
 673}
 674
 675/*
 676 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
 677 * been requested.
 678 */
 679static const struct platform_suspend_ops acpi_suspend_ops_old = {
 680	.valid = acpi_suspend_state_valid,
 681	.begin = acpi_suspend_begin_old,
 682	.prepare_late = acpi_pm_pre_suspend,
 683	.enter = acpi_suspend_enter,
 684	.wake = acpi_pm_finish,
 685	.end = acpi_pm_end,
 686	.recover = acpi_pm_finish,
 687};
 688
 689static bool s2idle_wakeup;
 690
 691/*
 692 * On platforms supporting the Low Power S0 Idle interface there is an ACPI
 693 * device object with the PNP0D80 compatible device ID (System Power Management
 694 * Controller) and a specific _DSM method under it.  That method, if present,
 695 * can be used to indicate to the platform that the OS is transitioning into a
 696 * low-power state in which certain types of activity are not desirable or that
 697 * it is leaving such a state, which allows the platform to adjust its operation
 698 * mode accordingly.
 699 */
 700static const struct acpi_device_id lps0_device_ids[] = {
 701	{"PNP0D80", },
 702	{"", },
 703};
 704
 705#define ACPI_LPS0_DSM_UUID	"c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
 706
 707#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS	1
 708#define ACPI_LPS0_SCREEN_OFF	3
 709#define ACPI_LPS0_SCREEN_ON	4
 710#define ACPI_LPS0_ENTRY		5
 711#define ACPI_LPS0_EXIT		6
 712
 713static acpi_handle lps0_device_handle;
 714static guid_t lps0_dsm_guid;
 715static char lps0_dsm_func_mask;
 716
 717/* Device constraint entry structure */
 718struct lpi_device_info {
 719	char *name;
 720	int enabled;
 721	union acpi_object *package;
 722};
 723
 724/* Constraint package structure */
 725struct lpi_device_constraint {
 726	int uid;
 727	int min_dstate;
 728	int function_states;
 729};
 730
 731struct lpi_constraints {
 732	acpi_handle handle;
 733	int min_dstate;
 734};
 735
 736static struct lpi_constraints *lpi_constraints_table;
 737static int lpi_constraints_table_size;
 738
 739static void lpi_device_get_constraints(void)
 740{
 741	union acpi_object *out_obj;
 742	int i;
 743
 744	out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
 745					  1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
 746					  NULL, ACPI_TYPE_PACKAGE);
 747
 748	acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
 749			  out_obj ? "successful" : "failed");
 750
 751	if (!out_obj)
 752		return;
 753
 754	lpi_constraints_table = kcalloc(out_obj->package.count,
 755					sizeof(*lpi_constraints_table),
 756					GFP_KERNEL);
 757	if (!lpi_constraints_table)
 758		goto free_acpi_buffer;
 759
 760	acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
 761
 762	for (i = 0; i < out_obj->package.count; i++) {
 763		struct lpi_constraints *constraint;
 764		acpi_status status;
 765		union acpi_object *package = &out_obj->package.elements[i];
 766		struct lpi_device_info info = { };
 767		int package_count = 0, j;
 768
 769		if (!package)
 770			continue;
 771
 772		for (j = 0; j < package->package.count; ++j) {
 773			union acpi_object *element =
 774					&(package->package.elements[j]);
 775
 776			switch (element->type) {
 777			case ACPI_TYPE_INTEGER:
 778				info.enabled = element->integer.value;
 779				break;
 780			case ACPI_TYPE_STRING:
 781				info.name = element->string.pointer;
 782				break;
 783			case ACPI_TYPE_PACKAGE:
 784				package_count = element->package.count;
 785				info.package = element->package.elements;
 786				break;
 787			}
 788		}
 789
 790		if (!info.enabled || !info.package || !info.name)
 791			continue;
 792
 793		constraint = &lpi_constraints_table[lpi_constraints_table_size];
 794
 795		status = acpi_get_handle(NULL, info.name, &constraint->handle);
 796		if (ACPI_FAILURE(status))
 797			continue;
 798
 799		acpi_handle_debug(lps0_device_handle,
 800				  "index:%d Name:%s\n", i, info.name);
 801
 802		constraint->min_dstate = -1;
 803
 804		for (j = 0; j < package_count; ++j) {
 805			union acpi_object *info_obj = &info.package[j];
 806			union acpi_object *cnstr_pkg;
 807			union acpi_object *obj;
 808			struct lpi_device_constraint dev_info;
 809
 810			switch (info_obj->type) {
 811			case ACPI_TYPE_INTEGER:
 812				/* version */
 813				break;
 814			case ACPI_TYPE_PACKAGE:
 815				if (info_obj->package.count < 2)
 816					break;
 817
 818				cnstr_pkg = info_obj->package.elements;
 819				obj = &cnstr_pkg[0];
 820				dev_info.uid = obj->integer.value;
 821				obj = &cnstr_pkg[1];
 822				dev_info.min_dstate = obj->integer.value;
 823
 824				acpi_handle_debug(lps0_device_handle,
 825					"uid:%d min_dstate:%s\n",
 826					dev_info.uid,
 827					acpi_power_state_string(dev_info.min_dstate));
 828
 829				constraint->min_dstate = dev_info.min_dstate;
 830				break;
 831			}
 832		}
 833
 834		if (constraint->min_dstate < 0) {
 835			acpi_handle_debug(lps0_device_handle,
 836					  "Incomplete constraint defined\n");
 837			continue;
 838		}
 839
 840		lpi_constraints_table_size++;
 841	}
 842
 843	acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
 844
 845free_acpi_buffer:
 846	ACPI_FREE(out_obj);
 847}
 848
 849static void lpi_check_constraints(void)
 850{
 851	int i;
 852
 853	for (i = 0; i < lpi_constraints_table_size; ++i) {
 854		acpi_handle handle = lpi_constraints_table[i].handle;
 855		struct acpi_device *adev;
 856
 857		if (!handle || acpi_bus_get_device(handle, &adev))
 858			continue;
 859
 860		acpi_handle_debug(handle,
 861			"LPI: required min power state:%s current power state:%s\n",
 862			acpi_power_state_string(lpi_constraints_table[i].min_dstate),
 863			acpi_power_state_string(adev->power.state));
 864
 865		if (!adev->flags.power_manageable) {
 866			acpi_handle_info(handle, "LPI: Device not power manageable\n");
 867			lpi_constraints_table[i].handle = NULL;
 868			continue;
 869		}
 870
 871		if (adev->power.state < lpi_constraints_table[i].min_dstate)
 872			acpi_handle_info(handle,
 873				"LPI: Constraint not met; min power state:%s current power state:%s\n",
 874				acpi_power_state_string(lpi_constraints_table[i].min_dstate),
 875				acpi_power_state_string(adev->power.state));
 876	}
 877}
 878
 879static void acpi_sleep_run_lps0_dsm(unsigned int func)
 880{
 881	union acpi_object *out_obj;
 882
 883	if (!(lps0_dsm_func_mask & (1 << func)))
 884		return;
 885
 886	out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, 1, func, NULL);
 887	ACPI_FREE(out_obj);
 888
 889	acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
 890			  func, out_obj ? "successful" : "failed");
 891}
 892
 893static int lps0_device_attach(struct acpi_device *adev,
 894			      const struct acpi_device_id *not_used)
 895{
 896	union acpi_object *out_obj;
 897
 898	if (lps0_device_handle)
 899		return 0;
 900
 901	if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
 902		return 0;
 903
 904	guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
 905	/* Check if the _DSM is present and as expected. */
 906	out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
 907	if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
 908		acpi_handle_debug(adev->handle,
 909				  "_DSM function 0 evaluation failed\n");
 910		return 0;
 911	}
 912
 913	lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
 914
 915	ACPI_FREE(out_obj);
 916
 917	acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
 918			  lps0_dsm_func_mask);
 919
 920	lps0_device_handle = adev->handle;
 921
 922	lpi_device_get_constraints();
 923
 924	/*
 925	 * Use suspend-to-idle by default if the default suspend mode was not
 926	 * set from the command line.
 927	 */
 928	if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3)
 929		mem_sleep_current = PM_SUSPEND_TO_IDLE;
 930
 931	/*
 932	 * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
 933	 * EC GPE to be enabled while suspended for certain wakeup devices to
 934	 * work, so mark it as wakeup-capable.
 935	 */
 936	acpi_ec_mark_gpe_for_wake();
 937
 938	return 0;
 939}
 940
 941static struct acpi_scan_handler lps0_handler = {
 942	.ids = lps0_device_ids,
 943	.attach = lps0_device_attach,
 944};
 945
 946static int acpi_s2idle_begin(void)
 947{
 948	acpi_scan_lock_acquire();
 949	return 0;
 950}
 951
 952static int acpi_s2idle_prepare(void)
 953{
 954	if (acpi_sci_irq_valid()) {
 955		enable_irq_wake(acpi_sci_irq);
 
 
 
 
 
 
 956		acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
 957	}
 958
 959	acpi_enable_wakeup_devices(ACPI_STATE_S0);
 960
 961	/* Change the configuration of GPEs to avoid spurious wakeup. */
 962	acpi_enable_all_wakeup_gpes();
 963	acpi_os_wait_events_complete();
 964
 965	s2idle_wakeup = true;
 966	return 0;
 967}
 968
 969static int acpi_s2idle_prepare_late(void)
 970{
 971	if (!lps0_device_handle || sleep_no_lps0)
 972		return 0;
 973
 974	if (pm_debug_messages_on)
 975		lpi_check_constraints();
 976
 977	acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
 978	acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
 979
 980	return 0;
 981}
 982
 983static bool acpi_s2idle_wake(void)
 984{
 985	if (!acpi_sci_irq_valid())
 986		return pm_wakeup_pending();
 987
 988	while (pm_wakeup_pending()) {
 989		/*
 990		 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
 991		 * SCI has not triggered while suspended, so bail out (the
 992		 * wakeup is pending anyway and the SCI is not the source of
 993		 * it).
 994		 */
 995		if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
 996			pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
 997			return true;
 998		}
 999
1000		/*
1001		 * If the status bit of any enabled fixed event is set, the
1002		 * wakeup is regarded as valid.
1003		 */
1004		if (acpi_any_fixed_event_status_set()) {
1005			pm_pr_dbg("ACPI fixed event wakeup\n");
1006			return true;
1007		}
1008
1009		/* Check wakeups from drivers sharing the SCI. */
1010		if (acpi_check_wakeup_handlers()) {
1011			pm_pr_dbg("ACPI custom handler wakeup\n");
1012			return true;
1013		}
1014
1015		/* Check non-EC GPE wakeups and dispatch the EC GPE. */
 
 
 
1016		if (acpi_ec_dispatch_gpe()) {
1017			pm_pr_dbg("ACPI non-EC GPE wakeup\n");
1018			return true;
1019		}
1020
1021		/*
1022		 * Cancel the SCI wakeup and process all pending events in case
1023		 * there are any wakeup ones in there.
1024		 *
1025		 * Note that if any non-EC GPEs are active at this point, the
1026		 * SCI will retrigger after the rearming below, so no events
1027		 * should be missed by canceling the wakeup here.
1028		 */
1029		pm_system_cancel_wakeup();
1030		acpi_os_wait_events_complete();
1031
1032		/*
1033		 * The SCI is in the "suspended" state now and it cannot produce
1034		 * new wakeup events till the rearming below, so if any of them
1035		 * are pending here, they must be resulting from the processing
1036		 * of EC events above or coming from somewhere else.
1037		 */
1038		if (pm_wakeup_pending()) {
1039			pm_pr_dbg("Wakeup after ACPI Notify sync\n");
1040			return true;
1041		}
1042
 
 
 
1043		rearm_wake_irq(acpi_sci_irq);
1044	}
1045
1046	return false;
1047}
1048
1049static void acpi_s2idle_restore_early(void)
1050{
1051	if (!lps0_device_handle || sleep_no_lps0)
1052		return;
1053
1054	acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
1055	acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
1056}
1057
1058static void acpi_s2idle_restore(void)
1059{
1060	/*
1061	 * Drain pending events before restoring the working-state configuration
1062	 * of GPEs.
1063	 */
1064	acpi_os_wait_events_complete(); /* synchronize GPE processing */
1065	acpi_ec_flush_work(); /* flush the EC driver's workqueues */
1066	acpi_os_wait_events_complete(); /* synchronize Notify handling */
1067
1068	s2idle_wakeup = false;
1069
1070	acpi_enable_all_runtime_gpes();
1071
1072	acpi_disable_wakeup_devices(ACPI_STATE_S0);
1073
1074	if (acpi_sci_irq_valid()) {
1075		acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
1076		disable_irq_wake(acpi_sci_irq);
1077	}
1078}
1079
1080static void acpi_s2idle_end(void)
1081{
1082	acpi_scan_lock_release();
1083}
1084
1085static const struct platform_s2idle_ops acpi_s2idle_ops = {
1086	.begin = acpi_s2idle_begin,
1087	.prepare = acpi_s2idle_prepare,
1088	.prepare_late = acpi_s2idle_prepare_late,
1089	.wake = acpi_s2idle_wake,
1090	.restore_early = acpi_s2idle_restore_early,
1091	.restore = acpi_s2idle_restore,
1092	.end = acpi_s2idle_end,
1093};
1094
1095static void acpi_sleep_suspend_setup(void)
 
 
 
 
 
 
 
 
1096{
 
1097	int i;
1098
1099	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
1100		if (acpi_sleep_state_supported(i))
1101			sleep_states[i] = 1;
 
 
1102
1103	suspend_set_ops(old_suspend_ordering ?
1104		&acpi_suspend_ops_old : &acpi_suspend_ops);
 
1105
1106	acpi_scan_add_handler(&lps0_handler);
1107	s2idle_set_ops(&acpi_s2idle_ops);
1108}
1109
1110#else /* !CONFIG_SUSPEND */
1111#define s2idle_wakeup		(false)
1112#define lps0_device_handle	(NULL)
1113static inline void acpi_sleep_suspend_setup(void) {}
1114#endif /* !CONFIG_SUSPEND */
1115
1116bool acpi_s2idle_wakeup(void)
1117{
1118	return s2idle_wakeup;
1119}
1120
1121#ifdef CONFIG_PM_SLEEP
1122static u32 saved_bm_rld;
1123
1124static int  acpi_save_bm_rld(void)
1125{
1126	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
1127	return 0;
1128}
1129
1130static void  acpi_restore_bm_rld(void)
1131{
1132	u32 resumed_bm_rld = 0;
1133
1134	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
1135	if (resumed_bm_rld == saved_bm_rld)
1136		return;
1137
1138	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
1139}
1140
1141static struct syscore_ops acpi_sleep_syscore_ops = {
1142	.suspend = acpi_save_bm_rld,
1143	.resume = acpi_restore_bm_rld,
1144};
1145
1146static void acpi_sleep_syscore_init(void)
1147{
1148	register_syscore_ops(&acpi_sleep_syscore_ops);
1149}
1150#else
1151static inline void acpi_sleep_syscore_init(void) {}
1152#endif /* CONFIG_PM_SLEEP */
1153
1154#ifdef CONFIG_HIBERNATION
1155static unsigned long s4_hardware_signature;
1156static struct acpi_table_facs *facs;
1157static bool nosigcheck;
1158
1159void __init acpi_no_s4_hw_signature(void)
1160{
1161	nosigcheck = true;
1162}
1163
1164static int acpi_hibernation_begin(pm_message_t stage)
1165{
1166	if (!nvs_nosave) {
1167		int error = suspend_nvs_alloc();
1168		if (error)
1169			return error;
1170	}
1171
1172	if (stage.event == PM_EVENT_HIBERNATE)
1173		pm_set_suspend_via_firmware();
1174
1175	acpi_pm_start(ACPI_STATE_S4);
1176	return 0;
1177}
1178
1179static int acpi_hibernation_enter(void)
1180{
1181	acpi_status status = AE_OK;
1182
1183	ACPI_FLUSH_CPU_CACHE();
1184
1185	/* This shouldn't return.  If it returns, we have a problem */
1186	status = acpi_enter_sleep_state(ACPI_STATE_S4);
1187	/* Reprogram control registers */
1188	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
1189
1190	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
1191}
1192
1193static void acpi_hibernation_leave(void)
1194{
1195	pm_set_resume_via_firmware();
1196	/*
1197	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
1198	 * enable it here.
1199	 */
1200	acpi_enable();
1201	/* Reprogram control registers */
1202	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
1203	/* Check the hardware signature */
1204	if (facs && s4_hardware_signature != facs->hardware_signature)
1205		pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
1206	/* Restore the NVS memory area */
1207	suspend_nvs_restore();
1208	/* Allow EC transactions to happen. */
1209	acpi_ec_unblock_transactions();
1210}
1211
1212static void acpi_pm_thaw(void)
1213{
1214	acpi_ec_unblock_transactions();
1215	acpi_enable_all_runtime_gpes();
1216}
1217
1218static const struct platform_hibernation_ops acpi_hibernation_ops = {
1219	.begin = acpi_hibernation_begin,
1220	.end = acpi_pm_end,
1221	.pre_snapshot = acpi_pm_prepare,
1222	.finish = acpi_pm_finish,
1223	.prepare = acpi_pm_prepare,
1224	.enter = acpi_hibernation_enter,
1225	.leave = acpi_hibernation_leave,
1226	.pre_restore = acpi_pm_freeze,
1227	.restore_cleanup = acpi_pm_thaw,
1228};
1229
1230/**
1231 *	acpi_hibernation_begin_old - Set the target system sleep state to
1232 *		ACPI_STATE_S4 and execute the _PTS control method.  This
1233 *		function is used if the pre-ACPI 2.0 suspend ordering has been
1234 *		requested.
 
1235 */
1236static int acpi_hibernation_begin_old(pm_message_t stage)
1237{
1238	int error;
1239	/*
1240	 * The _TTS object should always be evaluated before the _PTS object.
1241	 * When the old_suspended_ordering is true, the _PTS object is
1242	 * evaluated in the acpi_sleep_prepare.
1243	 */
1244	acpi_sleep_tts_switch(ACPI_STATE_S4);
1245
1246	error = acpi_sleep_prepare(ACPI_STATE_S4);
1247	if (error)
1248		return error;
1249
1250	if (!nvs_nosave) {
1251		error = suspend_nvs_alloc();
1252		if (error)
1253			return error;
1254	}
1255
1256	if (stage.event == PM_EVENT_HIBERNATE)
1257		pm_set_suspend_via_firmware();
1258
1259	acpi_target_sleep_state = ACPI_STATE_S4;
1260	acpi_scan_lock_acquire();
1261	return 0;
1262}
1263
1264/*
1265 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1266 * been requested.
1267 */
1268static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1269	.begin = acpi_hibernation_begin_old,
1270	.end = acpi_pm_end,
1271	.pre_snapshot = acpi_pm_pre_suspend,
1272	.prepare = acpi_pm_freeze,
1273	.finish = acpi_pm_finish,
1274	.enter = acpi_hibernation_enter,
1275	.leave = acpi_hibernation_leave,
1276	.pre_restore = acpi_pm_freeze,
1277	.restore_cleanup = acpi_pm_thaw,
1278	.recover = acpi_pm_finish,
1279};
1280
1281static void acpi_sleep_hibernate_setup(void)
1282{
1283	if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1284		return;
1285
1286	hibernation_set_ops(old_suspend_ordering ?
1287			&acpi_hibernation_ops_old : &acpi_hibernation_ops);
1288	sleep_states[ACPI_STATE_S4] = 1;
1289	if (nosigcheck)
1290		return;
1291
1292	acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1293	if (facs) {
 
 
 
 
 
1294		s4_hardware_signature = facs->hardware_signature;
1295		acpi_put_table((struct acpi_table_header *)facs);
 
 
 
 
 
 
 
 
 
1296	}
1297}
1298#else /* !CONFIG_HIBERNATION */
1299static inline void acpi_sleep_hibernate_setup(void) {}
1300#endif /* !CONFIG_HIBERNATION */
1301
1302static void acpi_power_off_prepare(void)
1303{
1304	/* Prepare to power off the system */
1305	acpi_sleep_prepare(ACPI_STATE_S5);
1306	acpi_disable_all_gpes();
1307	acpi_os_wait_events_complete();
 
1308}
1309
1310static void acpi_power_off(void)
1311{
1312	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1313	printk(KERN_DEBUG "%s called\n", __func__);
1314	local_irq_disable();
1315	acpi_enter_sleep_state(ACPI_STATE_S5);
 
1316}
1317
1318int __init acpi_sleep_init(void)
1319{
1320	char supported[ACPI_S_STATE_COUNT * 3 + 1];
1321	char *pos = supported;
1322	int i;
1323
1324	acpi_sleep_dmi_check();
1325
1326	sleep_states[ACPI_STATE_S0] = 1;
1327
1328	acpi_sleep_syscore_init();
1329	acpi_sleep_suspend_setup();
1330	acpi_sleep_hibernate_setup();
1331
1332	if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1333		sleep_states[ACPI_STATE_S5] = 1;
1334		pm_power_off_prepare = acpi_power_off_prepare;
1335		pm_power_off = acpi_power_off;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1336	} else {
1337		acpi_no_s5 = true;
1338	}
1339
1340	supported[0] = 0;
1341	for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1342		if (sleep_states[i])
1343			pos += sprintf(pos, " S%d", i);
1344	}
1345	pr_info(PREFIX "(supports%s)\n", supported);
1346
1347	/*
1348	 * Register the tts_notifier to reboot notifier list so that the _TTS
1349	 * object can also be evaluated when the system enters S5.
1350	 */
1351	register_reboot_notifier(&tts_notifier);
1352	return 0;
1353}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * sleep.c - ACPI sleep support.
   4 *
   5 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
   6 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
   7 * Copyright (c) 2000-2003 Patrick Mochel
   8 * Copyright (c) 2003 Open Source Development Lab
   9 */
  10
  11#define pr_fmt(fmt) "ACPI: PM: " fmt
  12
  13#include <linux/delay.h>
  14#include <linux/irq.h>
  15#include <linux/dmi.h>
  16#include <linux/device.h>
  17#include <linux/interrupt.h>
  18#include <linux/suspend.h>
  19#include <linux/reboot.h>
  20#include <linux/acpi.h>
  21#include <linux/module.h>
  22#include <linux/syscore_ops.h>
  23#include <asm/io.h>
  24#include <trace/events/power.h>
  25
  26#include "internal.h"
  27#include "sleep.h"
  28
  29/*
  30 * Some HW-full platforms do not have _S5, so they may need
  31 * to leverage efi power off for a shutdown.
  32 */
  33bool acpi_no_s5;
  34static u8 sleep_states[ACPI_S_STATE_COUNT];
  35
  36static void acpi_sleep_tts_switch(u32 acpi_state)
  37{
  38	acpi_status status;
  39
  40	status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
  41	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
  42		/*
  43		 * OS can't evaluate the _TTS object correctly. Some warning
  44		 * message will be printed. But it won't break anything.
  45		 */
  46		pr_notice("Failure in evaluating _TTS object\n");
  47	}
  48}
  49
  50static int tts_notify_reboot(struct notifier_block *this,
  51			unsigned long code, void *x)
  52{
  53	acpi_sleep_tts_switch(ACPI_STATE_S5);
  54	return NOTIFY_DONE;
  55}
  56
  57static struct notifier_block tts_notifier = {
  58	.notifier_call	= tts_notify_reboot,
  59	.next		= NULL,
  60	.priority	= 0,
  61};
  62
  63#ifndef acpi_skip_set_wakeup_address
  64#define acpi_skip_set_wakeup_address() false
  65#endif
  66
  67static int acpi_sleep_prepare(u32 acpi_state)
  68{
  69#ifdef CONFIG_ACPI_SLEEP
  70	unsigned long acpi_wakeup_address;
  71
  72	/* do we have a wakeup address for S2 and S3? */
  73	if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
  74		acpi_wakeup_address = acpi_get_wakeup_address();
  75		if (!acpi_wakeup_address)
  76			return -EFAULT;
  77		acpi_set_waking_vector(acpi_wakeup_address);
  78
  79	}
 
  80#endif
  81	pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
 
  82	acpi_enable_wakeup_devices(acpi_state);
  83	acpi_enter_sleep_state_prep(acpi_state);
  84	return 0;
  85}
  86
  87bool acpi_sleep_state_supported(u8 sleep_state)
  88{
  89	acpi_status status;
  90	u8 type_a, type_b;
  91
  92	status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
  93	return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
  94		|| (acpi_gbl_FADT.sleep_control.address
  95			&& acpi_gbl_FADT.sleep_status.address));
  96}
  97
  98#ifdef CONFIG_ACPI_SLEEP
 
 
 
 
  99static u32 acpi_target_sleep_state = ACPI_STATE_S0;
 100
 101u32 acpi_target_system_state(void)
 102{
 103	return acpi_target_sleep_state;
 104}
 105EXPORT_SYMBOL_GPL(acpi_target_system_state);
 106
 107static bool pwr_btn_event_pending;
 108
 109/*
 110 * The ACPI specification wants us to save NVS memory regions during hibernation
 111 * and to restore them during the subsequent resume.  Windows does that also for
 112 * suspend to RAM.  However, it is known that this mechanism does not work on
 113 * all machines, so we allow the user to disable it with the help of the
 114 * 'acpi_sleep=nonvs' kernel command line option.
 115 */
 116static bool nvs_nosave;
 117
 118void __init acpi_nvs_nosave(void)
 119{
 120	nvs_nosave = true;
 121}
 122
 123/*
 124 * The ACPI specification wants us to save NVS memory regions during hibernation
 125 * but says nothing about saving NVS during S3.  Not all versions of Windows
 126 * save NVS on S3 suspend either, and it is clear that not all systems need
 127 * NVS to be saved at S3 time.  To improve suspend/resume time, allow the
 128 * user to disable saving NVS on S3 if their system does not require it, but
 129 * continue to save/restore NVS for S4 as specified.
 130 */
 131static bool nvs_nosave_s3;
 132
 133void __init acpi_nvs_nosave_s3(void)
 134{
 135	nvs_nosave_s3 = true;
 136}
 137
 138static int __init init_nvs_save_s3(const struct dmi_system_id *d)
 139{
 140	nvs_nosave_s3 = false;
 141	return 0;
 142}
 143
 144/*
 145 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
 146 * user to request that behavior by using the 'acpi_old_suspend_ordering'
 147 * kernel command line option that causes the following variable to be set.
 148 */
 149static bool old_suspend_ordering;
 150
 151void __init acpi_old_suspend_ordering(void)
 152{
 153	old_suspend_ordering = true;
 154}
 155
 156static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
 157{
 158	acpi_old_suspend_ordering();
 159	return 0;
 160}
 161
 162static int __init init_nvs_nosave(const struct dmi_system_id *d)
 163{
 164	acpi_nvs_nosave();
 165	return 0;
 166}
 167
 168bool acpi_sleep_default_s3;
 169
 170static int __init init_default_s3(const struct dmi_system_id *d)
 171{
 172	acpi_sleep_default_s3 = true;
 173	return 0;
 174}
 175
 176static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
 177	{
 178	.callback = init_old_suspend_ordering,
 179	.ident = "Abit KN9 (nForce4 variant)",
 180	.matches = {
 181		DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
 182		DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
 183		},
 184	},
 185	{
 186	.callback = init_old_suspend_ordering,
 187	.ident = "HP xw4600 Workstation",
 188	.matches = {
 189		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
 190		DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
 191		},
 192	},
 193	{
 194	.callback = init_old_suspend_ordering,
 195	.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
 196	.matches = {
 197		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
 198		DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
 199		},
 200	},
 201	{
 202	.callback = init_old_suspend_ordering,
 203	.ident = "Panasonic CF51-2L",
 204	.matches = {
 205		DMI_MATCH(DMI_BOARD_VENDOR,
 206				"Matsushita Electric Industrial Co.,Ltd."),
 207		DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
 208		},
 209	},
 210	{
 211	.callback = init_nvs_nosave,
 212	.ident = "Sony Vaio VGN-FW41E_H",
 213	.matches = {
 214		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 215		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
 216		},
 217	},
 218	{
 219	.callback = init_nvs_nosave,
 220	.ident = "Sony Vaio VGN-FW21E",
 221	.matches = {
 222		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 223		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
 224		},
 225	},
 226	{
 227	.callback = init_nvs_nosave,
 228	.ident = "Sony Vaio VGN-FW21M",
 229	.matches = {
 230		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 231		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
 232		},
 233	},
 234	{
 235	.callback = init_nvs_nosave,
 236	.ident = "Sony Vaio VPCEB17FX",
 237	.matches = {
 238		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 239		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
 240		},
 241	},
 242	{
 243	.callback = init_nvs_nosave,
 244	.ident = "Sony Vaio VGN-SR11M",
 245	.matches = {
 246		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 247		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
 248		},
 249	},
 250	{
 251	.callback = init_nvs_nosave,
 252	.ident = "Everex StepNote Series",
 253	.matches = {
 254		DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
 255		DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
 256		},
 257	},
 258	{
 259	.callback = init_nvs_nosave,
 260	.ident = "Sony Vaio VPCEB1Z1E",
 261	.matches = {
 262		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 263		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
 264		},
 265	},
 266	{
 267	.callback = init_nvs_nosave,
 268	.ident = "Sony Vaio VGN-NW130D",
 269	.matches = {
 270		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 271		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
 272		},
 273	},
 274	{
 275	.callback = init_nvs_nosave,
 276	.ident = "Sony Vaio VPCCW29FX",
 277	.matches = {
 278		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 279		DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
 280		},
 281	},
 282	{
 283	.callback = init_nvs_nosave,
 284	.ident = "Averatec AV1020-ED2",
 285	.matches = {
 286		DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
 287		DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
 288		},
 289	},
 290	{
 291	.callback = init_old_suspend_ordering,
 292	.ident = "Asus A8N-SLI DELUXE",
 293	.matches = {
 294		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 295		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
 296		},
 297	},
 298	{
 299	.callback = init_old_suspend_ordering,
 300	.ident = "Asus A8N-SLI Premium",
 301	.matches = {
 302		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 303		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
 304		},
 305	},
 306	{
 307	.callback = init_nvs_nosave,
 308	.ident = "Sony Vaio VGN-SR26GN_P",
 309	.matches = {
 310		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 311		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
 312		},
 313	},
 314	{
 315	.callback = init_nvs_nosave,
 316	.ident = "Sony Vaio VPCEB1S1E",
 317	.matches = {
 318		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 319		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
 320		},
 321	},
 322	{
 323	.callback = init_nvs_nosave,
 324	.ident = "Sony Vaio VGN-FW520F",
 325	.matches = {
 326		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
 327		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
 328		},
 329	},
 330	{
 331	.callback = init_nvs_nosave,
 332	.ident = "Asus K54C",
 333	.matches = {
 334		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
 335		DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
 336		},
 337	},
 338	{
 339	.callback = init_nvs_nosave,
 340	.ident = "Asus K54HR",
 341	.matches = {
 342		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
 343		DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
 344		},
 345	},
 346	{
 347	.callback = init_nvs_save_s3,
 348	.ident = "Asus 1025C",
 349	.matches = {
 350		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
 351		DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
 352		},
 353	},
 354	/*
 355	 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
 356	 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
 357	 * saving during S3.
 358	 */
 359	{
 360	.callback = init_nvs_save_s3,
 361	.ident = "Lenovo G50-45",
 362	.matches = {
 363		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 364		DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
 365		},
 366	},
 367	{
 368	.callback = init_nvs_save_s3,
 369	.ident = "Lenovo G40-45",
 370	.matches = {
 371		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 372		DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
 373		},
 374	},
 375	/*
 376	 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
 377	 * the Low Power S0 Idle firmware interface (see
 378	 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
 379	 */
 380	{
 381	.callback = init_default_s3,
 382	.ident = "ThinkPad X1 Tablet(2016)",
 383	.matches = {
 384		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
 385		DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
 386		},
 387	},
 388	{},
 389};
 390
 391static bool ignore_blacklist;
 392
 393void __init acpi_sleep_no_blacklist(void)
 394{
 395	ignore_blacklist = true;
 396}
 397
 398static void __init acpi_sleep_dmi_check(void)
 399{
 400	if (ignore_blacklist)
 401		return;
 402
 403	if (dmi_get_bios_year() >= 2012)
 404		acpi_nvs_nosave_s3();
 405
 406	dmi_check_system(acpisleep_dmi_table);
 407}
 408
 409/**
 410 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
 411 */
 412static int acpi_pm_freeze(void)
 413{
 414	acpi_disable_all_gpes();
 415	acpi_os_wait_events_complete();
 416	acpi_ec_block_transactions();
 417	return 0;
 418}
 419
 420/**
 421 * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
 422 */
 423static int acpi_pm_pre_suspend(void)
 424{
 425	acpi_pm_freeze();
 426	return suspend_nvs_save();
 427}
 428
 429/**
 430 *	__acpi_pm_prepare - Prepare the platform to enter the target state.
 431 *
 432 *	If necessary, set the firmware waking vector and do arch-specific
 433 *	nastiness to get the wakeup code to the waking vector.
 434 */
 435static int __acpi_pm_prepare(void)
 436{
 437	int error = acpi_sleep_prepare(acpi_target_sleep_state);
 438	if (error)
 439		acpi_target_sleep_state = ACPI_STATE_S0;
 440
 441	return error;
 442}
 443
 444/**
 445 *	acpi_pm_prepare - Prepare the platform to enter the target sleep
 446 *		state and disable the GPEs.
 447 */
 448static int acpi_pm_prepare(void)
 449{
 450	int error = __acpi_pm_prepare();
 451	if (!error)
 452		error = acpi_pm_pre_suspend();
 453
 454	return error;
 455}
 456
 457/**
 458 *	acpi_pm_finish - Instruct the platform to leave a sleep state.
 459 *
 460 *	This is called after we wake back up (or if entering the sleep state
 461 *	failed).
 462 */
 463static void acpi_pm_finish(void)
 464{
 465	struct acpi_device *pwr_btn_adev;
 466	u32 acpi_state = acpi_target_sleep_state;
 467
 468	acpi_ec_unblock_transactions();
 469	suspend_nvs_free();
 470
 471	if (acpi_state == ACPI_STATE_S0)
 472		return;
 473
 474	pr_info("Waking up from system sleep state S%d\n", acpi_state);
 
 475	acpi_disable_wakeup_devices(acpi_state);
 476	acpi_leave_sleep_state(acpi_state);
 477
 478	/* reset firmware waking vector */
 479	acpi_set_waking_vector(0);
 480
 481	acpi_target_sleep_state = ACPI_STATE_S0;
 482
 483	acpi_resume_power_resources();
 484
 485	/* If we were woken with the fixed power button, provide a small
 486	 * hint to userspace in the form of a wakeup event on the fixed power
 487	 * button device (if it can be found).
 488	 *
 489	 * We delay the event generation til now, as the PM layer requires
 490	 * timekeeping to be running before we generate events. */
 491	if (!pwr_btn_event_pending)
 492		return;
 493
 494	pwr_btn_event_pending = false;
 495	pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
 496						    NULL, -1);
 497	if (pwr_btn_adev) {
 498		pm_wakeup_event(&pwr_btn_adev->dev, 0);
 499		acpi_dev_put(pwr_btn_adev);
 500	}
 501}
 502
 503/**
 504 * acpi_pm_start - Start system PM transition.
 505 * @acpi_state: The target ACPI power state to transition to.
 506 */
 507static void acpi_pm_start(u32 acpi_state)
 508{
 509	acpi_target_sleep_state = acpi_state;
 510	acpi_sleep_tts_switch(acpi_target_sleep_state);
 511	acpi_scan_lock_acquire();
 512}
 513
 514/**
 515 * acpi_pm_end - Finish up system PM transition.
 516 */
 517static void acpi_pm_end(void)
 518{
 519	acpi_turn_off_unused_power_resources();
 520	acpi_scan_lock_release();
 521	/*
 522	 * This is necessary in case acpi_pm_finish() is not called during a
 523	 * failing transition to a sleep state.
 524	 */
 525	acpi_target_sleep_state = ACPI_STATE_S0;
 526	acpi_sleep_tts_switch(acpi_target_sleep_state);
 527}
 528#else /* !CONFIG_ACPI_SLEEP */
 529#define sleep_no_lps0	(1)
 530#define acpi_target_sleep_state	ACPI_STATE_S0
 531#define acpi_sleep_default_s3	(1)
 532static inline void acpi_sleep_dmi_check(void) {}
 533#endif /* CONFIG_ACPI_SLEEP */
 534
 535#ifdef CONFIG_SUSPEND
 536static u32 acpi_suspend_states[] = {
 537	[PM_SUSPEND_ON] = ACPI_STATE_S0,
 538	[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
 539	[PM_SUSPEND_MEM] = ACPI_STATE_S3,
 540	[PM_SUSPEND_MAX] = ACPI_STATE_S5
 541};
 542
 543/**
 544 * acpi_suspend_begin - Set the target system sleep state to the state
 545 *	associated with given @pm_state, if supported.
 546 * @pm_state: The target system power management state.
 547 */
 548static int acpi_suspend_begin(suspend_state_t pm_state)
 549{
 550	u32 acpi_state = acpi_suspend_states[pm_state];
 551	int error;
 552
 553	error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
 554	if (error)
 555		return error;
 556
 557	if (!sleep_states[acpi_state]) {
 558		pr_err("ACPI does not support sleep state S%u\n", acpi_state);
 559		return -ENOSYS;
 560	}
 561	if (acpi_state > ACPI_STATE_S1)
 562		pm_set_suspend_via_firmware();
 563
 564	acpi_pm_start(acpi_state);
 565	return 0;
 566}
 567
 568/**
 569 *	acpi_suspend_enter - Actually enter a sleep state.
 570 *	@pm_state: ignored
 571 *
 572 *	Flush caches and go to sleep. For STR we have to call arch-specific
 573 *	assembly, which in turn call acpi_enter_sleep_state().
 574 *	It's unfortunate, but it works. Please fix if you're feeling frisky.
 575 */
 576static int acpi_suspend_enter(suspend_state_t pm_state)
 577{
 578	acpi_status status = AE_OK;
 579	u32 acpi_state = acpi_target_sleep_state;
 580	int error;
 581
 
 
 582	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
 583	switch (acpi_state) {
 584	case ACPI_STATE_S1:
 585		barrier();
 586		status = acpi_enter_sleep_state(acpi_state);
 587		break;
 588
 589	case ACPI_STATE_S3:
 590		if (!acpi_suspend_lowlevel)
 591			return -ENOSYS;
 592		error = acpi_suspend_lowlevel();
 593		if (error)
 594			return error;
 595		pr_info("Low-level resume complete\n");
 596		pm_set_resume_via_firmware();
 597		break;
 598	}
 599	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
 600
 601	/* This violates the spec but is required for bug compatibility. */
 602	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
 603
 604	/* Reprogram control registers */
 605	acpi_leave_sleep_state_prep(acpi_state);
 606
 607	/* ACPI 3.0 specs (P62) says that it's the responsibility
 608	 * of the OSPM to clear the status bit [ implying that the
 609	 * POWER_BUTTON event should not reach userspace ]
 610	 *
 611	 * However, we do generate a small hint for userspace in the form of
 612	 * a wakeup event. We flag this condition for now and generate the
 613	 * event later, as we're currently too early in resume to be able to
 614	 * generate wakeup events.
 615	 */
 616	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
 617		acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
 618
 619		acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
 620
 621		if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
 622			acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
 623			/* Flag for later */
 624			pwr_btn_event_pending = true;
 625		}
 626	}
 627
 628	/*
 629	 * Disable all GPE and clear their status bits before interrupts are
 630	 * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
 631	 * prevent them from producing spurious interrups.
 632	 *
 633	 * acpi_leave_sleep_state() will reenable specific GPEs later.
 634	 *
 635	 * Because this code runs on one CPU with disabled interrupts (all of
 636	 * the other CPUs are offline at this time), it need not acquire any
 637	 * sleeping locks which may trigger an implicit preemption point even
 638	 * if there is no contention, so avoid doing that by using a low-level
 639	 * library routine here.
 640	 */
 641	acpi_hw_disable_all_gpes();
 642	/* Allow EC transactions to happen. */
 643	acpi_ec_unblock_transactions();
 644
 645	suspend_nvs_restore();
 646
 647	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
 648}
 649
 650static int acpi_suspend_state_valid(suspend_state_t pm_state)
 651{
 652	u32 acpi_state;
 653
 654	switch (pm_state) {
 655	case PM_SUSPEND_ON:
 656	case PM_SUSPEND_STANDBY:
 657	case PM_SUSPEND_MEM:
 658		acpi_state = acpi_suspend_states[pm_state];
 659
 660		return sleep_states[acpi_state];
 661	default:
 662		return 0;
 663	}
 664}
 665
 666static const struct platform_suspend_ops acpi_suspend_ops = {
 667	.valid = acpi_suspend_state_valid,
 668	.begin = acpi_suspend_begin,
 669	.prepare_late = acpi_pm_prepare,
 670	.enter = acpi_suspend_enter,
 671	.wake = acpi_pm_finish,
 672	.end = acpi_pm_end,
 673};
 674
 675/**
 676 * acpi_suspend_begin_old - Set the target system sleep state to the
 677 *	state associated with given @pm_state, if supported, and
 678 *	execute the _PTS control method.  This function is used if the
 679 *	pre-ACPI 2.0 suspend ordering has been requested.
 680 * @pm_state: The target suspend state for the system.
 681 */
 682static int acpi_suspend_begin_old(suspend_state_t pm_state)
 683{
 684	int error = acpi_suspend_begin(pm_state);
 685	if (!error)
 686		error = __acpi_pm_prepare();
 687
 688	return error;
 689}
 690
 691/*
 692 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
 693 * been requested.
 694 */
 695static const struct platform_suspend_ops acpi_suspend_ops_old = {
 696	.valid = acpi_suspend_state_valid,
 697	.begin = acpi_suspend_begin_old,
 698	.prepare_late = acpi_pm_pre_suspend,
 699	.enter = acpi_suspend_enter,
 700	.wake = acpi_pm_finish,
 701	.end = acpi_pm_end,
 702	.recover = acpi_pm_finish,
 703};
 704
 705static bool s2idle_wakeup;
 706
 707int acpi_s2idle_begin(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 708{
 709	acpi_scan_lock_acquire();
 710	return 0;
 711}
 712
 713int acpi_s2idle_prepare(void)
 714{
 715	if (acpi_sci_irq_valid()) {
 716		int error;
 717
 718		error = enable_irq_wake(acpi_sci_irq);
 719		if (error)
 720			pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n",
 721				acpi_sci_irq, error);
 722
 723		acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
 724	}
 725
 726	acpi_enable_wakeup_devices(ACPI_STATE_S0);
 727
 728	/* Change the configuration of GPEs to avoid spurious wakeup. */
 729	acpi_enable_all_wakeup_gpes();
 730	acpi_os_wait_events_complete();
 731
 732	s2idle_wakeup = true;
 733	return 0;
 734}
 735
 736bool acpi_s2idle_wake(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737{
 738	if (!acpi_sci_irq_valid())
 739		return pm_wakeup_pending();
 740
 741	while (pm_wakeup_pending()) {
 742		/*
 743		 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
 744		 * SCI has not triggered while suspended, so bail out (the
 745		 * wakeup is pending anyway and the SCI is not the source of
 746		 * it).
 747		 */
 748		if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
 749			pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
 750			return true;
 751		}
 752
 753		/*
 754		 * If the status bit of any enabled fixed event is set, the
 755		 * wakeup is regarded as valid.
 756		 */
 757		if (acpi_any_fixed_event_status_set()) {
 758			pm_pr_dbg("ACPI fixed event wakeup\n");
 759			return true;
 760		}
 761
 762		/* Check wakeups from drivers sharing the SCI. */
 763		if (acpi_check_wakeup_handlers()) {
 764			pm_pr_dbg("ACPI custom handler wakeup\n");
 765			return true;
 766		}
 767
 768		/*
 769		 * Check non-EC GPE wakeups and if there are none, cancel the
 770		 * SCI-related wakeup and dispatch the EC GPE.
 771		 */
 772		if (acpi_ec_dispatch_gpe()) {
 773			pm_pr_dbg("ACPI non-EC GPE wakeup\n");
 774			return true;
 775		}
 776
 
 
 
 
 
 
 
 
 
 777		acpi_os_wait_events_complete();
 778
 779		/*
 780		 * The SCI is in the "suspended" state now and it cannot produce
 781		 * new wakeup events till the rearming below, so if any of them
 782		 * are pending here, they must be resulting from the processing
 783		 * of EC events above or coming from somewhere else.
 784		 */
 785		if (pm_wakeup_pending()) {
 786			pm_pr_dbg("Wakeup after ACPI Notify sync\n");
 787			return true;
 788		}
 789
 790		pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
 791
 792		pm_wakeup_clear(acpi_sci_irq);
 793		rearm_wake_irq(acpi_sci_irq);
 794	}
 795
 796	return false;
 797}
 798
 799void acpi_s2idle_restore(void)
 
 
 
 
 
 
 
 
 
 800{
 801	/*
 802	 * Drain pending events before restoring the working-state configuration
 803	 * of GPEs.
 804	 */
 805	acpi_os_wait_events_complete(); /* synchronize GPE processing */
 806	acpi_ec_flush_work(); /* flush the EC driver's workqueues */
 807	acpi_os_wait_events_complete(); /* synchronize Notify handling */
 808
 809	s2idle_wakeup = false;
 810
 811	acpi_enable_all_runtime_gpes();
 812
 813	acpi_disable_wakeup_devices(ACPI_STATE_S0);
 814
 815	if (acpi_sci_irq_valid()) {
 816		acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
 817		disable_irq_wake(acpi_sci_irq);
 818	}
 819}
 820
 821void acpi_s2idle_end(void)
 822{
 823	acpi_scan_lock_release();
 824}
 825
 826static const struct platform_s2idle_ops acpi_s2idle_ops = {
 827	.begin = acpi_s2idle_begin,
 828	.prepare = acpi_s2idle_prepare,
 
 829	.wake = acpi_s2idle_wake,
 
 830	.restore = acpi_s2idle_restore,
 831	.end = acpi_s2idle_end,
 832};
 833
 834void __weak acpi_s2idle_setup(void)
 835{
 836	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
 837		pr_info("Efficient low-power S0 idle declared\n");
 838
 839	s2idle_set_ops(&acpi_s2idle_ops);
 840}
 841
 842static void __init acpi_sleep_suspend_setup(void)
 843{
 844	bool suspend_ops_needed = false;
 845	int i;
 846
 847	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
 848		if (acpi_sleep_state_supported(i)) {
 849			sleep_states[i] = 1;
 850			suspend_ops_needed = true;
 851		}
 852
 853	if (suspend_ops_needed)
 854		suspend_set_ops(old_suspend_ordering ?
 855				&acpi_suspend_ops_old : &acpi_suspend_ops);
 856
 857	acpi_s2idle_setup();
 
 858}
 859
 860#else /* !CONFIG_SUSPEND */
 861#define s2idle_wakeup		(false)
 
 862static inline void acpi_sleep_suspend_setup(void) {}
 863#endif /* !CONFIG_SUSPEND */
 864
 865bool acpi_s2idle_wakeup(void)
 866{
 867	return s2idle_wakeup;
 868}
 869
 870#ifdef CONFIG_PM_SLEEP
 871static u32 saved_bm_rld;
 872
 873static int  acpi_save_bm_rld(void)
 874{
 875	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
 876	return 0;
 877}
 878
 879static void  acpi_restore_bm_rld(void)
 880{
 881	u32 resumed_bm_rld = 0;
 882
 883	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
 884	if (resumed_bm_rld == saved_bm_rld)
 885		return;
 886
 887	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
 888}
 889
 890static struct syscore_ops acpi_sleep_syscore_ops = {
 891	.suspend = acpi_save_bm_rld,
 892	.resume = acpi_restore_bm_rld,
 893};
 894
 895static void acpi_sleep_syscore_init(void)
 896{
 897	register_syscore_ops(&acpi_sleep_syscore_ops);
 898}
 899#else
 900static inline void acpi_sleep_syscore_init(void) {}
 901#endif /* CONFIG_PM_SLEEP */
 902
 903#ifdef CONFIG_HIBERNATION
 904static unsigned long s4_hardware_signature;
 905static struct acpi_table_facs *facs;
 906int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
 
 
 
 
 
 907
 908static int acpi_hibernation_begin(pm_message_t stage)
 909{
 910	if (!nvs_nosave) {
 911		int error = suspend_nvs_alloc();
 912		if (error)
 913			return error;
 914	}
 915
 916	if (stage.event == PM_EVENT_HIBERNATE)
 917		pm_set_suspend_via_firmware();
 918
 919	acpi_pm_start(ACPI_STATE_S4);
 920	return 0;
 921}
 922
 923static int acpi_hibernation_enter(void)
 924{
 925	acpi_status status = AE_OK;
 926
 
 
 927	/* This shouldn't return.  If it returns, we have a problem */
 928	status = acpi_enter_sleep_state(ACPI_STATE_S4);
 929	/* Reprogram control registers */
 930	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
 931
 932	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
 933}
 934
 935static void acpi_hibernation_leave(void)
 936{
 937	pm_set_resume_via_firmware();
 938	/*
 939	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
 940	 * enable it here.
 941	 */
 942	acpi_enable();
 943	/* Reprogram control registers */
 944	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
 945	/* Check the hardware signature */
 946	if (facs && s4_hardware_signature != facs->hardware_signature)
 947		pr_crit("Hardware changed while hibernated, success doubtful!\n");
 948	/* Restore the NVS memory area */
 949	suspend_nvs_restore();
 950	/* Allow EC transactions to happen. */
 951	acpi_ec_unblock_transactions();
 952}
 953
 954static void acpi_pm_thaw(void)
 955{
 956	acpi_ec_unblock_transactions();
 957	acpi_enable_all_runtime_gpes();
 958}
 959
 960static const struct platform_hibernation_ops acpi_hibernation_ops = {
 961	.begin = acpi_hibernation_begin,
 962	.end = acpi_pm_end,
 963	.pre_snapshot = acpi_pm_prepare,
 964	.finish = acpi_pm_finish,
 965	.prepare = acpi_pm_prepare,
 966	.enter = acpi_hibernation_enter,
 967	.leave = acpi_hibernation_leave,
 968	.pre_restore = acpi_pm_freeze,
 969	.restore_cleanup = acpi_pm_thaw,
 970};
 971
 972/**
 973 * acpi_hibernation_begin_old - Set the target system sleep state to
 974 *	ACPI_STATE_S4 and execute the _PTS control method.  This
 975 *	function is used if the pre-ACPI 2.0 suspend ordering has been
 976 *	requested.
 977 * @stage: The power management event message.
 978 */
 979static int acpi_hibernation_begin_old(pm_message_t stage)
 980{
 981	int error;
 982	/*
 983	 * The _TTS object should always be evaluated before the _PTS object.
 984	 * When the old_suspended_ordering is true, the _PTS object is
 985	 * evaluated in the acpi_sleep_prepare.
 986	 */
 987	acpi_sleep_tts_switch(ACPI_STATE_S4);
 988
 989	error = acpi_sleep_prepare(ACPI_STATE_S4);
 990	if (error)
 991		return error;
 992
 993	if (!nvs_nosave) {
 994		error = suspend_nvs_alloc();
 995		if (error)
 996			return error;
 997	}
 998
 999	if (stage.event == PM_EVENT_HIBERNATE)
1000		pm_set_suspend_via_firmware();
1001
1002	acpi_target_sleep_state = ACPI_STATE_S4;
1003	acpi_scan_lock_acquire();
1004	return 0;
1005}
1006
1007/*
1008 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1009 * been requested.
1010 */
1011static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1012	.begin = acpi_hibernation_begin_old,
1013	.end = acpi_pm_end,
1014	.pre_snapshot = acpi_pm_pre_suspend,
1015	.prepare = acpi_pm_freeze,
1016	.finish = acpi_pm_finish,
1017	.enter = acpi_hibernation_enter,
1018	.leave = acpi_hibernation_leave,
1019	.pre_restore = acpi_pm_freeze,
1020	.restore_cleanup = acpi_pm_thaw,
1021	.recover = acpi_pm_finish,
1022};
1023
1024static void acpi_sleep_hibernate_setup(void)
1025{
1026	if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1027		return;
1028
1029	hibernation_set_ops(old_suspend_ordering ?
1030			&acpi_hibernation_ops_old : &acpi_hibernation_ops);
1031	sleep_states[ACPI_STATE_S4] = 1;
1032	if (!acpi_check_s4_hw_signature)
1033		return;
1034
1035	acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1036	if (facs) {
1037		/*
1038		 * s4_hardware_signature is the local variable which is just
1039		 * used to warn about mismatch after we're attempting to
1040		 * resume (in violation of the ACPI specification.)
1041		 */
1042		s4_hardware_signature = facs->hardware_signature;
1043
1044		if (acpi_check_s4_hw_signature > 0) {
1045			/*
1046			 * If we're actually obeying the ACPI specification
1047			 * then the signature is written out as part of the
1048			 * swsusp header, in order to allow the boot kernel
1049			 * to gracefully decline to resume.
1050			 */
1051			swsusp_hardware_signature = facs->hardware_signature;
1052		}
1053	}
1054}
1055#else /* !CONFIG_HIBERNATION */
1056static inline void acpi_sleep_hibernate_setup(void) {}
1057#endif /* !CONFIG_HIBERNATION */
1058
1059static int acpi_power_off_prepare(struct sys_off_data *data)
1060{
1061	/* Prepare to power off the system */
1062	acpi_sleep_prepare(ACPI_STATE_S5);
1063	acpi_disable_all_gpes();
1064	acpi_os_wait_events_complete();
1065	return NOTIFY_DONE;
1066}
1067
1068static int acpi_power_off(struct sys_off_data *data)
1069{
1070	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1071	pr_debug("%s called\n", __func__);
1072	local_irq_disable();
1073	acpi_enter_sleep_state(ACPI_STATE_S5);
1074	return NOTIFY_DONE;
1075}
1076
1077int __init acpi_sleep_init(void)
1078{
1079	char supported[ACPI_S_STATE_COUNT * 3 + 1];
1080	char *pos = supported;
1081	int i;
1082
1083	acpi_sleep_dmi_check();
1084
1085	sleep_states[ACPI_STATE_S0] = 1;
1086
1087	acpi_sleep_syscore_init();
1088	acpi_sleep_suspend_setup();
1089	acpi_sleep_hibernate_setup();
1090
1091	if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1092		sleep_states[ACPI_STATE_S5] = 1;
1093
1094		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
1095					 SYS_OFF_PRIO_FIRMWARE,
1096					 acpi_power_off_prepare, NULL);
1097
1098		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
1099					 SYS_OFF_PRIO_FIRMWARE,
1100					 acpi_power_off, NULL);
1101
1102		/*
1103		 * Windows uses S5 for reboot, so some BIOSes depend on it to
1104		 * perform proper reboot.
1105		 */
1106		register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
1107					 SYS_OFF_PRIO_FIRMWARE,
1108					 acpi_power_off_prepare, NULL);
1109	} else {
1110		acpi_no_s5 = true;
1111	}
1112
1113	supported[0] = 0;
1114	for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1115		if (sleep_states[i])
1116			pos += sprintf(pos, " S%d", i);
1117	}
1118	pr_info("(supports%s)\n", supported);
1119
1120	/*
1121	 * Register the tts_notifier to reboot notifier list so that the _TTS
1122	 * object can also be evaluated when the system enters S5.
1123	 */
1124	register_reboot_notifier(&tts_notifier);
1125	return 0;
1126}