Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * processor_idle - idle state submodule to the ACPI processor driver
   4 *
   5 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   6 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   7 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
   8 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 *  			- Added processor hotplug support
  10 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  11 *  			- Added support for C3 on SMP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 */
  13#define pr_fmt(fmt) "ACPI: " fmt
  14
 
  15#include <linux/module.h>
 
 
 
  16#include <linux/acpi.h>
  17#include <linux/dmi.h>
  18#include <linux/sched.h>       /* need_resched() */
  19#include <linux/tick.h>
 
 
  20#include <linux/cpuidle.h>
  21#include <linux/cpu.h>
  22#include <acpi/processor.h>
  23
  24/*
  25 * Include the apic definitions for x86 to have the APIC timer related defines
  26 * available also for UP (on SMP it gets magically included via linux/smp.h).
  27 * asm/acpi.h is not an option, as it would require more include magic. Also
  28 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  29 */
  30#ifdef CONFIG_X86
  31#include <asm/apic.h>
  32#endif
  33
 
 
 
 
 
 
 
 
 
  34#define ACPI_PROCESSOR_CLASS            "processor"
  35#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
  36ACPI_MODULE_NAME("processor_idle");
  37
  38#define ACPI_IDLE_STATE_START	(IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
 
 
  39
  40static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  41module_param(max_cstate, uint, 0000);
  42static unsigned int nocst __read_mostly;
  43module_param(nocst, uint, 0000);
  44static int bm_check_disable __read_mostly;
  45module_param(bm_check_disable, uint, 0000);
  46
  47static unsigned int latency_factor __read_mostly = 2;
  48module_param(latency_factor, uint, 0644);
  49
  50static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
  51
  52struct cpuidle_driver acpi_idle_driver = {
  53	.name =		"acpi_idle",
  54	.owner =	THIS_MODULE,
  55};
  56
  57#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
  58static
  59DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
  60
  61static int disabled_by_idle_boot_param(void)
  62{
  63	return boot_option_idle_override == IDLE_POLL ||
 
  64		boot_option_idle_override == IDLE_HALT;
  65}
  66
  67/*
  68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  69 * For now disable this. Probably a bug somewhere else.
  70 *
  71 * To skip this limit, boot/load with a large max_cstate limit.
  72 */
  73static int set_max_cstate(const struct dmi_system_id *id)
  74{
  75	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  76		return 0;
  77
  78	pr_notice("%s detected - limiting to C%ld max_cstate."
  79		  " Override with \"processor.max_cstate=%d\"\n", id->ident,
  80		  (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  81
  82	max_cstate = (long)id->driver_data;
  83
  84	return 0;
  85}
  86
  87static const struct dmi_system_id processor_power_dmi_table[] = {
 
 
  88	{ set_max_cstate, "Clevo 5600D", {
  89	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  90	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  91	 (void *)2},
  92	{ set_max_cstate, "Pavilion zv5000", {
  93	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  94	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
  95	 (void *)1},
  96	{ set_max_cstate, "Asus L8400B", {
  97	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
  98	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
  99	 (void *)1},
 100	{},
 101};
 102
 103
 104/*
 105 * Callers should disable interrupts before the call and enable
 106 * interrupts after return.
 107 */
 108static void __cpuidle acpi_safe_halt(void)
 109{
 110	if (!tif_need_resched()) {
 
 
 
 
 
 
 111		safe_halt();
 112		local_irq_disable();
 113	}
 
 114}
 115
 116#ifdef ARCH_APICTIMER_STOPS_ON_C3
 117
 118/*
 119 * Some BIOS implementations switch to C3 in the published C2 state.
 120 * This seems to be a common problem on AMD boxen, but other vendors
 121 * are affected too. We pick the most conservative approach: we assume
 122 * that the local APIC stops in both C2 and C3.
 123 */
 124static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 125				   struct acpi_processor_cx *cx)
 126{
 127	struct acpi_processor_power *pwr = &pr->power;
 128	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
 129
 130	if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
 131		return;
 132
 133	if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
 134		type = ACPI_STATE_C1;
 135
 136	/*
 137	 * Check, if one of the previous states already marked the lapic
 138	 * unstable
 139	 */
 140	if (pwr->timer_broadcast_on_state < state)
 141		return;
 142
 143	if (cx->type >= type)
 144		pr->power.timer_broadcast_on_state = state;
 145}
 146
 147static void __lapic_timer_propagate_broadcast(void *arg)
 148{
 149	struct acpi_processor *pr = (struct acpi_processor *) arg;
 
 150
 151	if (pr->power.timer_broadcast_on_state < INT_MAX)
 152		tick_broadcast_enable();
 153	else
 154		tick_broadcast_disable();
 155}
 156
 157static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
 158{
 159	smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
 160				 (void *)pr, 1);
 161}
 162
 163/* Power(C) State timer broadcast control */
 164static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
 165					struct acpi_processor_cx *cx)
 166{
 167	return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
 
 
 
 
 
 
 
 
 
 168}
 169
 170#else
 171
 172static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 173				   struct acpi_processor_cx *cstate) { }
 174static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
 175
 176static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
 177					struct acpi_processor_cx *cx)
 178{
 179	return false;
 180}
 181
 182#endif
 183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 184#if defined(CONFIG_X86)
 185static void tsc_check_state(int state)
 186{
 187	switch (boot_cpu_data.x86_vendor) {
 188	case X86_VENDOR_HYGON:
 189	case X86_VENDOR_AMD:
 190	case X86_VENDOR_INTEL:
 191	case X86_VENDOR_CENTAUR:
 192	case X86_VENDOR_ZHAOXIN:
 193		/*
 194		 * AMD Fam10h TSC will tick in all
 195		 * C/P/S0/S1 states when this bit is set.
 196		 */
 197		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 198			return;
 199		fallthrough;
 
 200	default:
 201		/* TSC could halt in idle, so notify users */
 202		if (state > ACPI_STATE_C1)
 203			mark_tsc_unstable("TSC halts in idle");
 204	}
 205}
 206#else
 207static void tsc_check_state(int state) { return; }
 208#endif
 209
 210static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 211{
 212
 
 
 
 213	if (!pr->pblk)
 214		return -ENODEV;
 215
 216	/* if info is obtained from pblk/fadt, type equals state */
 217	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
 218	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
 219
 220#ifndef CONFIG_HOTPLUG_CPU
 221	/*
 222	 * Check for P_LVL2_UP flag before entering C2 and above on
 223	 * an SMP system.
 224	 */
 225	if ((num_online_cpus() > 1) &&
 226	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 227		return -ENODEV;
 228#endif
 229
 230	/* determine C2 and C3 address from pblk */
 231	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
 232	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
 233
 234	/* determine latencies from FADT */
 235	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
 236	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
 237
 238	/*
 239	 * FADT specified C2 latency must be less than or equal to
 240	 * 100 microseconds.
 241	 */
 242	if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
 243		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 244			"C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
 245		/* invalidate C2 */
 246		pr->power.states[ACPI_STATE_C2].address = 0;
 247	}
 248
 249	/*
 250	 * FADT supplied C3 latency must be less than or equal to
 251	 * 1000 microseconds.
 252	 */
 253	if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
 254		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 255			"C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
 256		/* invalidate C3 */
 257		pr->power.states[ACPI_STATE_C3].address = 0;
 258	}
 259
 260	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 261			  "lvl2[0x%08x] lvl3[0x%08x]\n",
 262			  pr->power.states[ACPI_STATE_C2].address,
 263			  pr->power.states[ACPI_STATE_C3].address));
 264
 265	snprintf(pr->power.states[ACPI_STATE_C2].desc,
 266			 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
 267			 pr->power.states[ACPI_STATE_C2].address);
 268	snprintf(pr->power.states[ACPI_STATE_C3].desc,
 269			 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
 270			 pr->power.states[ACPI_STATE_C3].address);
 271
 272	return 0;
 273}
 274
 275static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
 276{
 277	if (!pr->power.states[ACPI_STATE_C1].valid) {
 278		/* set the first C-State to C1 */
 279		/* all processors need to support C1 */
 280		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
 281		pr->power.states[ACPI_STATE_C1].valid = 1;
 282		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
 283
 284		snprintf(pr->power.states[ACPI_STATE_C1].desc,
 285			 ACPI_CX_DESC_LEN, "ACPI HLT");
 286	}
 287	/* the C0 state only exists as a filler in our array */
 288	pr->power.states[ACPI_STATE_C0].valid = 1;
 289	return 0;
 290}
 291
 292static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 293{
 294	int ret;
 
 
 
 
 
 
 295
 296	if (nocst)
 297		return -ENODEV;
 298
 299	ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
 300	if (ret)
 301		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 302
 303	if (!pr->power.count)
 304		return -EFAULT;
 
 
 
 
 305
 
 306	pr->flags.has_cst = 1;
 307	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308}
 309
 310static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
 311					   struct acpi_processor_cx *cx)
 312{
 313	static int bm_check_flag = -1;
 314	static int bm_control_flag = -1;
 315
 316
 317	if (!cx->address)
 318		return;
 319
 320	/*
 321	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
 322	 * DMA transfers are used by any ISA device to avoid livelock.
 323	 * Note that we could disable Type-F DMA (as recommended by
 324	 * the erratum), but this is known to disrupt certain ISA
 325	 * devices thus we take the conservative approach.
 326	 */
 327	else if (errata.piix4.fdma) {
 328		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 329				  "C3 not supported on PIIX4 with Type-F DMA\n"));
 330		return;
 331	}
 332
 333	/* All the logic here assumes flags.bm_check is same across all CPUs */
 334	if (bm_check_flag == -1) {
 335		/* Determine whether bm_check is needed based on CPU  */
 336		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
 337		bm_check_flag = pr->flags.bm_check;
 338		bm_control_flag = pr->flags.bm_control;
 339	} else {
 340		pr->flags.bm_check = bm_check_flag;
 341		pr->flags.bm_control = bm_control_flag;
 342	}
 343
 344	if (pr->flags.bm_check) {
 345		if (!pr->flags.bm_control) {
 346			if (pr->flags.has_cst != 1) {
 347				/* bus mastering control is necessary */
 348				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 349					"C3 support requires BM control\n"));
 350				return;
 351			} else {
 352				/* Here we enter C3 without bus mastering */
 353				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 354					"C3 support without BM control\n"));
 355			}
 356		}
 357	} else {
 358		/*
 359		 * WBINVD should be set in fadt, for C3 state to be
 360		 * supported on when bm_check is not required.
 361		 */
 362		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
 363			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 364					  "Cache invalidation should work properly"
 365					  " for C3 to be enabled on SMP systems\n"));
 366			return;
 367		}
 368	}
 369
 370	/*
 371	 * Otherwise we've met all of our C3 requirements.
 372	 * Normalize the C3 latency to expidite policy.  Enable
 373	 * checking of bus mastering status (bm_check) so we can
 374	 * use this in our C3 policy
 375	 */
 376	cx->valid = 1;
 377
 
 378	/*
 379	 * On older chipsets, BM_RLD needs to be set
 380	 * in order for Bus Master activity to wake the
 381	 * system from C3.  Newer chipsets handle DMA
 382	 * during C3 automatically and BM_RLD is a NOP.
 383	 * In either case, the proper way to
 384	 * handle BM_RLD is to set it and leave it set.
 385	 */
 386	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
 387
 388	return;
 389}
 390
 391static int acpi_processor_power_verify(struct acpi_processor *pr)
 392{
 393	unsigned int i;
 394	unsigned int working = 0;
 395
 396	pr->power.timer_broadcast_on_state = INT_MAX;
 397
 398	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 399		struct acpi_processor_cx *cx = &pr->power.states[i];
 400
 401		switch (cx->type) {
 402		case ACPI_STATE_C1:
 403			cx->valid = 1;
 404			break;
 405
 406		case ACPI_STATE_C2:
 407			if (!cx->address)
 408				break;
 409			cx->valid = 1;
 
 410			break;
 411
 412		case ACPI_STATE_C3:
 413			acpi_processor_power_verify_c3(pr, cx);
 414			break;
 415		}
 416		if (!cx->valid)
 417			continue;
 418
 419		lapic_timer_check_state(i, pr, cx);
 420		tsc_check_state(cx->type);
 421		working++;
 422	}
 423
 424	lapic_timer_propagate_broadcast(pr);
 425
 426	return (working);
 427}
 428
 429static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
 430{
 431	unsigned int i;
 432	int result;
 433
 434
 435	/* NOTE: the idle thread may not be running while calling
 436	 * this function */
 437
 438	/* Zero initialize all the C-states info. */
 439	memset(pr->power.states, 0, sizeof(pr->power.states));
 440
 441	result = acpi_processor_get_power_info_cst(pr);
 442	if (result == -ENODEV)
 443		result = acpi_processor_get_power_info_fadt(pr);
 444
 445	if (result)
 446		return result;
 447
 448	acpi_processor_get_power_info_default(pr);
 449
 450	pr->power.count = acpi_processor_power_verify(pr);
 451
 452	/*
 453	 * if one state of type C2 or C3 is available, mark this
 454	 * CPU as being "idle manageable"
 455	 */
 456	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
 457		if (pr->power.states[i].valid) {
 458			pr->power.count = i;
 459			pr->flags.power = 1;
 
 460		}
 461	}
 462
 463	return 0;
 464}
 465
 466/**
 467 * acpi_idle_bm_check - checks if bus master activity was detected
 468 */
 469static int acpi_idle_bm_check(void)
 470{
 471	u32 bm_status = 0;
 472
 473	if (bm_check_disable)
 474		return 0;
 475
 476	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
 477	if (bm_status)
 478		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
 479	/*
 480	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
 481	 * the true state of bus mastering activity; forcing us to
 482	 * manually check the BMIDEA bit of each IDE channel.
 483	 */
 484	else if (errata.piix4.bmisx) {
 485		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
 486		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
 487			bm_status = 1;
 488	}
 489	return bm_status;
 490}
 491
 492static void wait_for_freeze(void)
 493{
 494#ifdef	CONFIG_X86
 495	/* No delay is needed if we are in guest */
 496	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 497		return;
 498#endif
 499	/* Dummy wait op - must do something useless after P_LVL2 read
 500	   because chipsets cannot guarantee that STPCLK# signal
 501	   gets asserted in time to freeze execution properly. */
 502	inl(acpi_gbl_FADT.xpm_timer_block.address);
 503}
 504
 505/**
 506 * acpi_idle_do_entry - enter idle state using the appropriate method
 507 * @cx: cstate data
 508 *
 509 * Caller disables interrupt before call and enables interrupt after return.
 510 */
 511static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
 512{
 
 
 513	if (cx->entry_method == ACPI_CSTATE_FFH) {
 514		/* Call into architectural FFH based C-state */
 515		acpi_processor_ffh_cstate_enter(cx);
 516	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
 517		acpi_safe_halt();
 518	} else {
 519		/* IO port based C-state */
 520		inb(cx->address);
 521		wait_for_freeze();
 
 
 
 522	}
 
 523}
 524
 525/**
 526 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
 527 * @dev: the target CPU
 528 * @index: the index of suggested state
 
 
 529 */
 530static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
 
 531{
 532	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 533
 534	ACPI_FLUSH_CPU_CACHE();
 535
 536	while (1) {
 537
 538		if (cx->entry_method == ACPI_CSTATE_HALT)
 539			safe_halt();
 540		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
 541			inb(cx->address);
 542			wait_for_freeze();
 543		} else
 544			return -ENODEV;
 545	}
 546
 547	/* Never reached */
 548	return 0;
 549}
 550
 551static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
 552{
 553	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
 554		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
 555}
 556
 557static int c3_cpu_count;
 558static DEFINE_RAW_SPINLOCK(c3_lock);
 559
 560/**
 561 * acpi_idle_enter_bm - enters C3 with proper BM handling
 562 * @drv: cpuidle driver
 563 * @pr: Target processor
 564 * @cx: Target state context
 565 * @index: index of target state
 566 */
 567static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
 568			       struct acpi_processor *pr,
 569			       struct acpi_processor_cx *cx,
 570			       int index)
 571{
 572	static struct acpi_processor_cx safe_cx = {
 573		.entry_method = ACPI_CSTATE_HALT,
 574	};
 575
 576	/*
 577	 * disable bus master
 578	 * bm_check implies we need ARB_DIS
 579	 * bm_control implies whether we can do ARB_DIS
 580	 *
 581	 * That leaves a case where bm_check is set and bm_control is not set.
 582	 * In that case we cannot do much, we enter C3 without doing anything.
 583	 */
 584	bool dis_bm = pr->flags.bm_control;
 585
 586	/* If we can skip BM, demote to a safe state. */
 587	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
 588		dis_bm = false;
 589		index = drv->safe_state_index;
 590		if (index >= 0) {
 591			cx = this_cpu_read(acpi_cstate[index]);
 592		} else {
 593			cx = &safe_cx;
 594			index = -EBUSY;
 595		}
 596	}
 597
 598	if (dis_bm) {
 599		raw_spin_lock(&c3_lock);
 600		c3_cpu_count++;
 601		/* Disable bus master arbitration when all CPUs are in C3 */
 602		if (c3_cpu_count == num_online_cpus())
 603			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
 604		raw_spin_unlock(&c3_lock);
 605	}
 606
 607	rcu_idle_enter();
 608
 609	acpi_idle_do_entry(cx);
 
 
 610
 611	rcu_idle_exit();
 
 
 612
 613	/* Re-enable bus master arbitration */
 614	if (dis_bm) {
 615		raw_spin_lock(&c3_lock);
 616		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
 617		c3_cpu_count--;
 618		raw_spin_unlock(&c3_lock);
 619	}
 620
 621	return index;
 622}
 623
 624static int acpi_idle_enter(struct cpuidle_device *dev,
 625			   struct cpuidle_driver *drv, int index)
 
 
 
 
 
 626{
 627	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 628	struct acpi_processor *pr;
 
 
 
 
 629
 630	pr = __this_cpu_read(processors);
 
 631	if (unlikely(!pr))
 632		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634	if (cx->type != ACPI_STATE_C1) {
 635		if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
 636			return acpi_idle_enter_bm(drv, pr, cx, index);
 637
 638		/* C2 to C1 demotion. */
 639		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
 640			index = ACPI_IDLE_STATE_START;
 641			cx = per_cpu(acpi_cstate[index], dev->cpu);
 642		}
 643	}
 644
 
 
 
 
 
 
 645	if (cx->type == ACPI_STATE_C3)
 646		ACPI_FLUSH_CPU_CACHE();
 647
 
 
 
 648	acpi_idle_do_entry(cx);
 649
 650	return index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 651}
 652
 653static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
 654				  struct cpuidle_driver *drv, int index)
 
 
 
 
 
 
 
 
 
 
 655{
 656	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 
 
 
 
 657
 658	if (cx->type == ACPI_STATE_C3) {
 659		struct acpi_processor *pr = __this_cpu_read(processors);
 660
 661		if (unlikely(!pr))
 662			return 0;
 663
 664		if (pr->flags.bm_check) {
 665			u8 bm_sts_skip = cx->bm_sts_skip;
 666
 667			/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
 668			cx->bm_sts_skip = 1;
 669			acpi_idle_enter_bm(drv, pr, cx, index);
 670			cx->bm_sts_skip = bm_sts_skip;
 671
 672			return 0;
 
 
 
 673		} else {
 674			ACPI_FLUSH_CPU_CACHE();
 
 
 
 675		}
 676	}
 677	acpi_idle_do_entry(cx);
 678
 679	return 0;
 680}
 681
 682static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
 683					   struct cpuidle_device *dev)
 684{
 685	int i, count = ACPI_IDLE_STATE_START;
 686	struct acpi_processor_cx *cx;
 687	struct cpuidle_state *state;
 688
 689	if (max_cstate == 0)
 690		max_cstate = 1;
 691
 692	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 693		state = &acpi_idle_driver.states[count];
 694		cx = &pr->power.states[i];
 695
 696		if (!cx->valid)
 697			continue;
 698
 699		per_cpu(acpi_cstate[count], dev->cpu) = cx;
 700
 701		if (lapic_timer_needs_broadcast(pr, cx))
 702			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
 703
 704		if (cx->type == ACPI_STATE_C3) {
 705			state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
 706			if (pr->flags.bm_check)
 707				state->flags |= CPUIDLE_FLAG_RCU_IDLE;
 708		}
 709
 710		count++;
 711		if (count == CPUIDLE_STATE_MAX)
 712			break;
 713	}
 714
 715	if (!count)
 716		return -EINVAL;
 717
 718	return 0;
 719}
 720
 721static int acpi_processor_setup_cstates(struct acpi_processor *pr)
 722{
 723	int i, count;
 724	struct acpi_processor_cx *cx;
 725	struct cpuidle_state *state;
 726	struct cpuidle_driver *drv = &acpi_idle_driver;
 727
 728	if (max_cstate == 0)
 729		max_cstate = 1;
 730
 731	if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
 732		cpuidle_poll_state_init(drv);
 733		count = 1;
 734	} else {
 735		count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 736	}
 737
 738	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 739		cx = &pr->power.states[i];
 740
 741		if (!cx->valid)
 742			continue;
 743
 744		state = &drv->states[count];
 745		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
 746		strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
 747		state->exit_latency = cx->latency;
 748		state->target_residency = cx->latency * latency_factor;
 749		state->enter = acpi_idle_enter;
 750
 751		state->flags = 0;
 752		if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
 753			state->enter_dead = acpi_idle_play_dead;
 754			drv->safe_state_index = count;
 755		}
 756		/*
 757		 * Halt-induced C1 is not good for ->enter_s2idle, because it
 758		 * re-enables interrupts on exit.  Moreover, C1 is generally not
 759		 * particularly interesting from the suspend-to-idle angle, so
 760		 * avoid C1 and the situations in which we may need to fall back
 761		 * to it altogether.
 762		 */
 763		if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
 764			state->enter_s2idle = acpi_idle_enter_s2idle;
 765
 766		count++;
 767		if (count == CPUIDLE_STATE_MAX)
 768			break;
 
 
 
 769	}
 
 
 
 
 770
 771	drv->state_count = count;
 772
 773	if (!count)
 774		return -EINVAL;
 775
 776	return 0;
 777}
 778
 779static inline void acpi_processor_cstate_first_run_checks(void)
 780{
 781	static int first_run;
 782
 783	if (first_run)
 784		return;
 785	dmi_check_system(processor_power_dmi_table);
 786	max_cstate = acpi_processor_cstate_check(max_cstate);
 787	if (max_cstate < ACPI_C_STATES_MAX)
 788		pr_notice("ACPI: processor limited to max C-state %d\n",
 789			  max_cstate);
 790	first_run++;
 791
 792	if (nocst)
 793		return;
 794
 795	acpi_processor_claim_cst_control();
 796}
 797#else
 798
 799static inline int disabled_by_idle_boot_param(void) { return 0; }
 800static inline void acpi_processor_cstate_first_run_checks(void) { }
 801static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
 802{
 803	return -ENODEV;
 804}
 805
 806static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
 807					   struct cpuidle_device *dev)
 808{
 809	return -EINVAL;
 810}
 811
 812static int acpi_processor_setup_cstates(struct acpi_processor *pr)
 813{
 814	return -EINVAL;
 815}
 816
 817#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
 818
 819struct acpi_lpi_states_array {
 820	unsigned int size;
 821	unsigned int composite_states_size;
 822	struct acpi_lpi_state *entries;
 823	struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
 824};
 825
 826static int obj_get_integer(union acpi_object *obj, u32 *value)
 827{
 828	if (obj->type != ACPI_TYPE_INTEGER)
 829		return -EINVAL;
 830
 831	*value = obj->integer.value;
 832	return 0;
 833}
 834
 835static int acpi_processor_evaluate_lpi(acpi_handle handle,
 836				       struct acpi_lpi_states_array *info)
 837{
 838	acpi_status status;
 839	int ret = 0;
 840	int pkg_count, state_idx = 1, loop;
 841	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 842	union acpi_object *lpi_data;
 843	struct acpi_lpi_state *lpi_state;
 844
 845	status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
 846	if (ACPI_FAILURE(status)) {
 847		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
 848		return -ENODEV;
 849	}
 850
 851	lpi_data = buffer.pointer;
 852
 853	/* There must be at least 4 elements = 3 elements + 1 package */
 854	if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
 855	    lpi_data->package.count < 4) {
 856		pr_debug("not enough elements in _LPI\n");
 857		ret = -ENODATA;
 858		goto end;
 859	}
 860
 861	pkg_count = lpi_data->package.elements[2].integer.value;
 
 862
 863	/* Validate number of power states. */
 864	if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
 865		pr_debug("count given by _LPI is not valid\n");
 866		ret = -ENODATA;
 867		goto end;
 868	}
 869
 870	lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
 871	if (!lpi_state) {
 872		ret = -ENOMEM;
 873		goto end;
 874	}
 875
 876	info->size = pkg_count;
 877	info->entries = lpi_state;
 878
 879	/* LPI States start at index 3 */
 880	for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
 881		union acpi_object *element, *pkg_elem, *obj;
 882
 883		element = &lpi_data->package.elements[loop];
 884		if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
 885			continue;
 886
 887		pkg_elem = element->package.elements;
 888
 889		obj = pkg_elem + 6;
 890		if (obj->type == ACPI_TYPE_BUFFER) {
 891			struct acpi_power_register *reg;
 892
 893			reg = (struct acpi_power_register *)obj->buffer.pointer;
 894			if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
 895			    reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
 896				continue;
 897
 898			lpi_state->address = reg->address;
 899			lpi_state->entry_method =
 900				reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
 901				ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
 902		} else if (obj->type == ACPI_TYPE_INTEGER) {
 903			lpi_state->entry_method = ACPI_CSTATE_INTEGER;
 904			lpi_state->address = obj->integer.value;
 905		} else {
 906			continue;
 907		}
 908
 909		/* elements[7,8] skipped for now i.e. Residency/Usage counter*/
 910
 911		obj = pkg_elem + 9;
 912		if (obj->type == ACPI_TYPE_STRING)
 913			strlcpy(lpi_state->desc, obj->string.pointer,
 914				ACPI_CX_DESC_LEN);
 915
 916		lpi_state->index = state_idx;
 917		if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
 918			pr_debug("No min. residency found, assuming 10 us\n");
 919			lpi_state->min_residency = 10;
 920		}
 921
 922		if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
 923			pr_debug("No wakeup residency found, assuming 10 us\n");
 924			lpi_state->wake_latency = 10;
 925		}
 926
 927		if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
 928			lpi_state->flags = 0;
 929
 930		if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
 931			lpi_state->arch_flags = 0;
 932
 933		if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
 934			lpi_state->res_cnt_freq = 1;
 935
 936		if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
 937			lpi_state->enable_parent_state = 0;
 938	}
 939
 940	acpi_handle_debug(handle, "Found %d power states\n", state_idx);
 941end:
 942	kfree(buffer.pointer);
 943	return ret;
 944}
 945
 946/*
 947 * flat_state_cnt - the number of composite LPI states after the process of flattening
 948 */
 949static int flat_state_cnt;
 950
 951/**
 952 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
 953 *
 954 * @local: local LPI state
 955 * @parent: parent LPI state
 956 * @result: composite LPI state
 957 */
 958static bool combine_lpi_states(struct acpi_lpi_state *local,
 959			       struct acpi_lpi_state *parent,
 960			       struct acpi_lpi_state *result)
 961{
 962	if (parent->entry_method == ACPI_CSTATE_INTEGER) {
 963		if (!parent->address) /* 0 means autopromotable */
 964			return false;
 965		result->address = local->address + parent->address;
 966	} else {
 967		result->address = parent->address;
 968	}
 969
 970	result->min_residency = max(local->min_residency, parent->min_residency);
 971	result->wake_latency = local->wake_latency + parent->wake_latency;
 972	result->enable_parent_state = parent->enable_parent_state;
 973	result->entry_method = local->entry_method;
 974
 975	result->flags = parent->flags;
 976	result->arch_flags = parent->arch_flags;
 977	result->index = parent->index;
 978
 979	strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
 980	strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
 981	strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
 982	return true;
 983}
 984
 985#define ACPI_LPI_STATE_FLAGS_ENABLED			BIT(0)
 986
 987static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
 988				  struct acpi_lpi_state *t)
 989{
 990	curr_level->composite_states[curr_level->composite_states_size++] = t;
 991}
 992
 993static int flatten_lpi_states(struct acpi_processor *pr,
 994			      struct acpi_lpi_states_array *curr_level,
 995			      struct acpi_lpi_states_array *prev_level)
 996{
 997	int i, j, state_count = curr_level->size;
 998	struct acpi_lpi_state *p, *t = curr_level->entries;
 999
1000	curr_level->composite_states_size = 0;
1001	for (j = 0; j < state_count; j++, t++) {
1002		struct acpi_lpi_state *flpi;
 
1003
1004		if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1005			continue;
 
 
 
1006
1007		if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1008			pr_warn("Limiting number of LPI states to max (%d)\n",
1009				ACPI_PROCESSOR_MAX_POWER);
1010			pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1011			break;
1012		}
1013
1014		flpi = &pr->power.lpi_states[flat_state_cnt];
1015
1016		if (!prev_level) { /* leaf/processor node */
1017			memcpy(flpi, t, sizeof(*t));
1018			stash_composite_state(curr_level, flpi);
1019			flat_state_cnt++;
1020			continue;
1021		}
1022
1023		for (i = 0; i < prev_level->composite_states_size; i++) {
1024			p = prev_level->composite_states[i];
1025			if (t->index <= p->enable_parent_state &&
1026			    combine_lpi_states(p, t, flpi)) {
1027				stash_composite_state(curr_level, flpi);
1028				flat_state_cnt++;
1029				flpi++;
1030			}
1031		}
1032	}
1033
1034	kfree(curr_level->entries);
1035	return 0;
1036}
1037
1038static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1039{
1040	int ret, i;
1041	acpi_status status;
1042	acpi_handle handle = pr->handle, pr_ahandle;
1043	struct acpi_device *d = NULL;
1044	struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1045
1046	if (!osc_pc_lpi_support_confirmed)
1047		return -EOPNOTSUPP;
1048
1049	if (!acpi_has_method(handle, "_LPI"))
1050		return -EINVAL;
1051
1052	flat_state_cnt = 0;
1053	prev = &info[0];
1054	curr = &info[1];
1055	handle = pr->handle;
1056	ret = acpi_processor_evaluate_lpi(handle, prev);
1057	if (ret)
1058		return ret;
1059	flatten_lpi_states(pr, prev, NULL);
1060
1061	status = acpi_get_parent(handle, &pr_ahandle);
1062	while (ACPI_SUCCESS(status)) {
1063		acpi_bus_get_device(pr_ahandle, &d);
1064		handle = pr_ahandle;
1065
1066		if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1067			break;
1068
1069		/* can be optional ? */
1070		if (!acpi_has_method(handle, "_LPI"))
 
 
 
1071			break;
 
1072
1073		ret = acpi_processor_evaluate_lpi(handle, curr);
1074		if (ret)
1075			break;
1076
1077		/* flatten all the LPI states in this level of hierarchy */
1078		flatten_lpi_states(pr, curr, prev);
1079
1080		tmp = prev, prev = curr, curr = tmp;
1081
1082		status = acpi_get_parent(handle, &pr_ahandle);
1083	}
1084
1085	pr->power.count = flat_state_cnt;
1086	/* reset the index after flattening */
1087	for (i = 0; i < pr->power.count; i++)
1088		pr->power.lpi_states[i].index = i;
1089
1090	/* Tell driver that _LPI is supported. */
1091	pr->flags.has_lpi = 1;
1092	pr->flags.power = 1;
1093
1094	return 0;
1095}
1096
1097int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1098{
1099	return -ENODEV;
1100}
1101
1102int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1103{
1104	return -ENODEV;
1105}
1106
1107/**
1108 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1109 * @dev: the target CPU
1110 * @drv: cpuidle driver containing cpuidle state info
1111 * @index: index of target state
1112 *
1113 * Return: 0 for success or negative value for error
1114 */
1115static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1116			       struct cpuidle_driver *drv, int index)
1117{
1118	struct acpi_processor *pr;
1119	struct acpi_lpi_state *lpi;
1120
1121	pr = __this_cpu_read(processors);
1122
1123	if (unlikely(!pr))
1124		return -EINVAL;
1125
1126	lpi = &pr->power.lpi_states[index];
1127	if (lpi->entry_method == ACPI_CSTATE_FFH)
1128		return acpi_processor_ffh_lpi_enter(lpi);
1129
1130	return -EINVAL;
1131}
1132
1133static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1134{
1135	int i;
1136	struct acpi_lpi_state *lpi;
1137	struct cpuidle_state *state;
1138	struct cpuidle_driver *drv = &acpi_idle_driver;
1139
1140	if (!pr->flags.has_lpi)
1141		return -EOPNOTSUPP;
1142
1143	for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1144		lpi = &pr->power.lpi_states[i];
1145
1146		state = &drv->states[i];
1147		snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1148		strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1149		state->exit_latency = lpi->wake_latency;
1150		state->target_residency = lpi->min_residency;
1151		if (lpi->arch_flags)
1152			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1153		state->enter = acpi_idle_lpi_enter;
1154		drv->safe_state_index = i;
1155	}
1156
1157	drv->state_count = i;
1158
1159	return 0;
1160}
1161
1162/**
1163 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1164 * global state data i.e. idle routines
1165 *
1166 * @pr: the ACPI processor
1167 */
1168static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1169{
1170	int i;
1171	struct cpuidle_driver *drv = &acpi_idle_driver;
1172
1173	if (!pr->flags.power_setup_done || !pr->flags.power)
1174		return -EINVAL;
1175
1176	drv->safe_state_index = -1;
1177	for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1178		drv->states[i].name[0] = '\0';
1179		drv->states[i].desc[0] = '\0';
1180	}
1181
1182	if (pr->flags.has_lpi)
1183		return acpi_processor_setup_lpi_states(pr);
1184
1185	return acpi_processor_setup_cstates(pr);
1186}
1187
1188/**
1189 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1190 * device i.e. per-cpu data
1191 *
1192 * @pr: the ACPI processor
1193 * @dev : the cpuidle device
1194 */
1195static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1196					    struct cpuidle_device *dev)
1197{
1198	if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1199		return -EINVAL;
1200
1201	dev->cpu = pr->id;
1202	if (pr->flags.has_lpi)
1203		return acpi_processor_ffh_lpi_probe(pr->id);
1204
1205	return acpi_processor_setup_cpuidle_cx(pr, dev);
1206}
1207
1208static int acpi_processor_get_power_info(struct acpi_processor *pr)
1209{
1210	int ret;
1211
1212	ret = acpi_processor_get_lpi_info(pr);
1213	if (ret)
1214		ret = acpi_processor_get_cstate_info(pr);
1215
1216	return ret;
1217}
1218
1219int acpi_processor_hotplug(struct acpi_processor *pr)
1220{
1221	int ret = 0;
1222	struct cpuidle_device *dev;
1223
1224	if (disabled_by_idle_boot_param())
1225		return 0;
1226
 
 
 
 
 
 
 
1227	if (!pr->flags.power_setup_done)
1228		return -ENODEV;
1229
1230	dev = per_cpu(acpi_cpuidle_device, pr->id);
1231	cpuidle_pause_and_lock();
1232	cpuidle_disable_device(dev);
1233	ret = acpi_processor_get_power_info(pr);
1234	if (!ret && pr->flags.power) {
1235		acpi_processor_setup_cpuidle_dev(pr, dev);
1236		ret = cpuidle_enable_device(dev);
1237	}
1238	cpuidle_resume_and_unlock();
1239
1240	return ret;
1241}
1242
1243int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
 
1244{
1245	int cpu;
1246	struct acpi_processor *_pr;
1247	struct cpuidle_device *dev;
1248
1249	if (disabled_by_idle_boot_param())
1250		return 0;
1251
1252	if (!pr->flags.power_setup_done)
1253		return -ENODEV;
1254
1255	/*
1256	 * FIXME:  Design the ACPI notification to make it once per
1257	 * system instead of once per-cpu.  This condition is a hack
1258	 * to make the code that updates C-States be called once.
1259	 */
 
1260
1261	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
 
1262
1263		/* Protect against cpu-hotplug */
1264		get_online_cpus();
1265		cpuidle_pause_and_lock();
1266
1267		/* Disable all cpuidle devices */
1268		for_each_online_cpu(cpu) {
1269			_pr = per_cpu(processors, cpu);
1270			if (!_pr || !_pr->flags.power_setup_done)
1271				continue;
1272			dev = per_cpu(acpi_cpuidle_device, cpu);
1273			cpuidle_disable_device(dev);
1274		}
1275
1276		/* Populate Updated C-state information */
1277		acpi_processor_get_power_info(pr);
1278		acpi_processor_setup_cpuidle_states(pr);
1279
1280		/* Enable all cpuidle devices */
1281		for_each_online_cpu(cpu) {
1282			_pr = per_cpu(processors, cpu);
1283			if (!_pr || !_pr->flags.power_setup_done)
1284				continue;
1285			acpi_processor_get_power_info(_pr);
1286			if (_pr->flags.power) {
1287				dev = per_cpu(acpi_cpuidle_device, cpu);
1288				acpi_processor_setup_cpuidle_dev(_pr, dev);
1289				cpuidle_enable_device(dev);
1290			}
1291		}
1292		cpuidle_resume_and_unlock();
1293		put_online_cpus();
1294	}
1295
1296	return 0;
1297}
1298
1299static int acpi_processor_registered;
1300
1301int acpi_processor_power_init(struct acpi_processor *pr)
1302{
1303	int retval;
1304	struct cpuidle_device *dev;
1305
1306	if (disabled_by_idle_boot_param())
1307		return 0;
1308
1309	acpi_processor_cstate_first_run_checks();
1310
1311	if (!acpi_processor_get_power_info(pr))
1312		pr->flags.power_setup_done = 1;
1313
1314	/*
1315	 * Install the idle handler if processor power management is supported.
1316	 * Note that we use previously set idle handler will be used on
1317	 * platforms that only support C1.
1318	 */
1319	if (pr->flags.power) {
1320		/* Register acpi_idle_driver if not already registered */
1321		if (!acpi_processor_registered) {
1322			acpi_processor_setup_cpuidle_states(pr);
1323			retval = cpuidle_register_driver(&acpi_idle_driver);
1324			if (retval)
1325				return retval;
1326			pr_debug("%s registered with cpuidle\n",
1327				 acpi_idle_driver.name);
1328		}
1329
1330		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1331		if (!dev)
1332			return -ENOMEM;
1333		per_cpu(acpi_cpuidle_device, pr->id) = dev;
1334
1335		acpi_processor_setup_cpuidle_dev(pr, dev);
1336
1337		/* Register per-cpu cpuidle_device. Cpuidle driver
1338		 * must already be registered before registering device
1339		 */
1340		retval = cpuidle_register_device(dev);
1341		if (retval) {
1342			if (acpi_processor_registered == 0)
1343				cpuidle_unregister_driver(&acpi_idle_driver);
1344			return retval;
1345		}
1346		acpi_processor_registered++;
1347	}
1348	return 0;
1349}
1350
1351int acpi_processor_power_exit(struct acpi_processor *pr)
 
1352{
1353	struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1354
1355	if (disabled_by_idle_boot_param())
1356		return 0;
1357
1358	if (pr->flags.power) {
1359		cpuidle_unregister_device(dev);
1360		acpi_processor_registered--;
1361		if (acpi_processor_registered == 0)
1362			cpuidle_unregister_driver(&acpi_idle_driver);
1363	}
1364
1365	pr->flags.power_setup_done = 0;
 
1366	return 0;
1367}
v3.1
 
   1/*
   2 * processor_idle - idle state submodule to the ACPI processor driver
   3 *
   4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
   7 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   8 *  			- Added processor hotplug support
   9 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10 *  			- Added support for C3 on SMP
  11 *
  12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13 *
  14 *  This program is free software; you can redistribute it and/or modify
  15 *  it under the terms of the GNU General Public License as published by
  16 *  the Free Software Foundation; either version 2 of the License, or (at
  17 *  your option) any later version.
  18 *
  19 *  This program is distributed in the hope that it will be useful, but
  20 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  21 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  22 *  General Public License for more details.
  23 *
  24 *  You should have received a copy of the GNU General Public License along
  25 *  with this program; if not, write to the Free Software Foundation, Inc.,
  26 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  27 *
  28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  29 */
 
  30
  31#include <linux/kernel.h>
  32#include <linux/module.h>
  33#include <linux/init.h>
  34#include <linux/cpufreq.h>
  35#include <linux/slab.h>
  36#include <linux/acpi.h>
  37#include <linux/dmi.h>
  38#include <linux/moduleparam.h>
  39#include <linux/sched.h>	/* need_resched() */
  40#include <linux/pm_qos_params.h>
  41#include <linux/clockchips.h>
  42#include <linux/cpuidle.h>
  43#include <linux/irqflags.h>
 
  44
  45/*
  46 * Include the apic definitions for x86 to have the APIC timer related defines
  47 * available also for UP (on SMP it gets magically included via linux/smp.h).
  48 * asm/acpi.h is not an option, as it would require more include magic. Also
  49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  50 */
  51#ifdef CONFIG_X86
  52#include <asm/apic.h>
  53#endif
  54
  55#include <asm/io.h>
  56#include <asm/uaccess.h>
  57
  58#include <acpi/acpi_bus.h>
  59#include <acpi/processor.h>
  60#include <asm/processor.h>
  61
  62#define PREFIX "ACPI: "
  63
  64#define ACPI_PROCESSOR_CLASS            "processor"
  65#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
  66ACPI_MODULE_NAME("processor_idle");
  67#define PM_TIMER_TICK_NS		(1000000000ULL/PM_TIMER_FREQUENCY)
  68#define C2_OVERHEAD			1	/* 1us */
  69#define C3_OVERHEAD			1	/* 1us */
  70#define PM_TIMER_TICKS_TO_US(p)		(((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
  71
  72static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  73module_param(max_cstate, uint, 0000);
  74static unsigned int nocst __read_mostly;
  75module_param(nocst, uint, 0000);
  76static int bm_check_disable __read_mostly;
  77module_param(bm_check_disable, uint, 0000);
  78
  79static unsigned int latency_factor __read_mostly = 2;
  80module_param(latency_factor, uint, 0644);
  81
 
 
 
 
 
 
 
 
 
 
 
  82static int disabled_by_idle_boot_param(void)
  83{
  84	return boot_option_idle_override == IDLE_POLL ||
  85		boot_option_idle_override == IDLE_FORCE_MWAIT ||
  86		boot_option_idle_override == IDLE_HALT;
  87}
  88
  89/*
  90 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  91 * For now disable this. Probably a bug somewhere else.
  92 *
  93 * To skip this limit, boot/load with a large max_cstate limit.
  94 */
  95static int set_max_cstate(const struct dmi_system_id *id)
  96{
  97	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  98		return 0;
  99
 100	printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
 101	       " Override with \"processor.max_cstate=%d\"\n", id->ident,
 102	       (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
 103
 104	max_cstate = (long)id->driver_data;
 105
 106	return 0;
 107}
 108
 109/* Actually this shouldn't be __cpuinitdata, would be better to fix the
 110   callers to only run once -AK */
 111static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
 112	{ set_max_cstate, "Clevo 5600D", {
 113	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
 114	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
 115	 (void *)2},
 116	{ set_max_cstate, "Pavilion zv5000", {
 117	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
 118	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
 119	 (void *)1},
 120	{ set_max_cstate, "Asus L8400B", {
 121	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
 122	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
 123	 (void *)1},
 124	{},
 125};
 126
 127
 128/*
 129 * Callers should disable interrupts before the call and enable
 130 * interrupts after return.
 131 */
 132static void acpi_safe_halt(void)
 133{
 134	current_thread_info()->status &= ~TS_POLLING;
 135	/*
 136	 * TS_POLLING-cleared state must be visible before we
 137	 * test NEED_RESCHED:
 138	 */
 139	smp_mb();
 140	if (!need_resched()) {
 141		safe_halt();
 142		local_irq_disable();
 143	}
 144	current_thread_info()->status |= TS_POLLING;
 145}
 146
 147#ifdef ARCH_APICTIMER_STOPS_ON_C3
 148
 149/*
 150 * Some BIOS implementations switch to C3 in the published C2 state.
 151 * This seems to be a common problem on AMD boxen, but other vendors
 152 * are affected too. We pick the most conservative approach: we assume
 153 * that the local APIC stops in both C2 and C3.
 154 */
 155static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 156				   struct acpi_processor_cx *cx)
 157{
 158	struct acpi_processor_power *pwr = &pr->power;
 159	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
 160
 161	if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
 162		return;
 163
 164	if (amd_e400_c1e_detected)
 165		type = ACPI_STATE_C1;
 166
 167	/*
 168	 * Check, if one of the previous states already marked the lapic
 169	 * unstable
 170	 */
 171	if (pwr->timer_broadcast_on_state < state)
 172		return;
 173
 174	if (cx->type >= type)
 175		pr->power.timer_broadcast_on_state = state;
 176}
 177
 178static void __lapic_timer_propagate_broadcast(void *arg)
 179{
 180	struct acpi_processor *pr = (struct acpi_processor *) arg;
 181	unsigned long reason;
 182
 183	reason = pr->power.timer_broadcast_on_state < INT_MAX ?
 184		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
 185
 186	clockevents_notify(reason, &pr->id);
 187}
 188
 189static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
 190{
 191	smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
 192				 (void *)pr, 1);
 193}
 194
 195/* Power(C) State timer broadcast control */
 196static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 197				       struct acpi_processor_cx *cx,
 198				       int broadcast)
 199{
 200	int state = cx - pr->power.states;
 201
 202	if (state >= pr->power.timer_broadcast_on_state) {
 203		unsigned long reason;
 204
 205		reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
 206			CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
 207		clockevents_notify(reason, &pr->id);
 208	}
 209}
 210
 211#else
 212
 213static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 214				   struct acpi_processor_cx *cstate) { }
 215static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
 216static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 217				       struct acpi_processor_cx *cx,
 218				       int broadcast)
 219{
 
 220}
 221
 222#endif
 223
 224/*
 225 * Suspend / resume control
 226 */
 227static int acpi_idle_suspend;
 228static u32 saved_bm_rld;
 229
 230static void acpi_idle_bm_rld_save(void)
 231{
 232	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
 233}
 234static void acpi_idle_bm_rld_restore(void)
 235{
 236	u32 resumed_bm_rld;
 237
 238	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
 239
 240	if (resumed_bm_rld != saved_bm_rld)
 241		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
 242}
 243
 244int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 245{
 246	if (acpi_idle_suspend == 1)
 247		return 0;
 248
 249	acpi_idle_bm_rld_save();
 250	acpi_idle_suspend = 1;
 251	return 0;
 252}
 253
 254int acpi_processor_resume(struct acpi_device * device)
 255{
 256	if (acpi_idle_suspend == 0)
 257		return 0;
 258
 259	acpi_idle_bm_rld_restore();
 260	acpi_idle_suspend = 0;
 261	return 0;
 262}
 263
 264#if defined(CONFIG_X86)
 265static void tsc_check_state(int state)
 266{
 267	switch (boot_cpu_data.x86_vendor) {
 
 268	case X86_VENDOR_AMD:
 269	case X86_VENDOR_INTEL:
 
 
 270		/*
 271		 * AMD Fam10h TSC will tick in all
 272		 * C/P/S0/S1 states when this bit is set.
 273		 */
 274		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 275			return;
 276
 277		/*FALL THROUGH*/
 278	default:
 279		/* TSC could halt in idle, so notify users */
 280		if (state > ACPI_STATE_C1)
 281			mark_tsc_unstable("TSC halts in idle");
 282	}
 283}
 284#else
 285static void tsc_check_state(int state) { return; }
 286#endif
 287
 288static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 289{
 290
 291	if (!pr)
 292		return -EINVAL;
 293
 294	if (!pr->pblk)
 295		return -ENODEV;
 296
 297	/* if info is obtained from pblk/fadt, type equals state */
 298	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
 299	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
 300
 301#ifndef CONFIG_HOTPLUG_CPU
 302	/*
 303	 * Check for P_LVL2_UP flag before entering C2 and above on
 304	 * an SMP system.
 305	 */
 306	if ((num_online_cpus() > 1) &&
 307	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 308		return -ENODEV;
 309#endif
 310
 311	/* determine C2 and C3 address from pblk */
 312	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
 313	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
 314
 315	/* determine latencies from FADT */
 316	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
 317	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
 318
 319	/*
 320	 * FADT specified C2 latency must be less than or equal to
 321	 * 100 microseconds.
 322	 */
 323	if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
 324		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 325			"C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
 326		/* invalidate C2 */
 327		pr->power.states[ACPI_STATE_C2].address = 0;
 328	}
 329
 330	/*
 331	 * FADT supplied C3 latency must be less than or equal to
 332	 * 1000 microseconds.
 333	 */
 334	if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
 335		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 336			"C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
 337		/* invalidate C3 */
 338		pr->power.states[ACPI_STATE_C3].address = 0;
 339	}
 340
 341	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 342			  "lvl2[0x%08x] lvl3[0x%08x]\n",
 343			  pr->power.states[ACPI_STATE_C2].address,
 344			  pr->power.states[ACPI_STATE_C3].address));
 345
 
 
 
 
 
 
 
 346	return 0;
 347}
 348
 349static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
 350{
 351	if (!pr->power.states[ACPI_STATE_C1].valid) {
 352		/* set the first C-State to C1 */
 353		/* all processors need to support C1 */
 354		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
 355		pr->power.states[ACPI_STATE_C1].valid = 1;
 356		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
 
 
 
 357	}
 358	/* the C0 state only exists as a filler in our array */
 359	pr->power.states[ACPI_STATE_C0].valid = 1;
 360	return 0;
 361}
 362
 363static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 364{
 365	acpi_status status = 0;
 366	u64 count;
 367	int current_count;
 368	int i;
 369	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 370	union acpi_object *cst;
 371
 372
 373	if (nocst)
 374		return -ENODEV;
 375
 376	current_count = 0;
 377
 378	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
 379	if (ACPI_FAILURE(status)) {
 380		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
 381		return -ENODEV;
 382	}
 383
 384	cst = buffer.pointer;
 385
 386	/* There must be at least 2 elements */
 387	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
 388		printk(KERN_ERR PREFIX "not enough elements in _CST\n");
 389		status = -EFAULT;
 390		goto end;
 391	}
 392
 393	count = cst->package.elements[0].integer.value;
 394
 395	/* Validate number of power states. */
 396	if (count < 1 || count != cst->package.count - 1) {
 397		printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
 398		status = -EFAULT;
 399		goto end;
 400	}
 401
 402	/* Tell driver that at least _CST is supported. */
 403	pr->flags.has_cst = 1;
 404
 405	for (i = 1; i <= count; i++) {
 406		union acpi_object *element;
 407		union acpi_object *obj;
 408		struct acpi_power_register *reg;
 409		struct acpi_processor_cx cx;
 410
 411		memset(&cx, 0, sizeof(cx));
 412
 413		element = &(cst->package.elements[i]);
 414		if (element->type != ACPI_TYPE_PACKAGE)
 415			continue;
 416
 417		if (element->package.count != 4)
 418			continue;
 419
 420		obj = &(element->package.elements[0]);
 421
 422		if (obj->type != ACPI_TYPE_BUFFER)
 423			continue;
 424
 425		reg = (struct acpi_power_register *)obj->buffer.pointer;
 426
 427		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
 428		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
 429			continue;
 430
 431		/* There should be an easy way to extract an integer... */
 432		obj = &(element->package.elements[1]);
 433		if (obj->type != ACPI_TYPE_INTEGER)
 434			continue;
 435
 436		cx.type = obj->integer.value;
 437		/*
 438		 * Some buggy BIOSes won't list C1 in _CST -
 439		 * Let acpi_processor_get_power_info_default() handle them later
 440		 */
 441		if (i == 1 && cx.type != ACPI_STATE_C1)
 442			current_count++;
 443
 444		cx.address = reg->address;
 445		cx.index = current_count + 1;
 446
 447		cx.entry_method = ACPI_CSTATE_SYSTEMIO;
 448		if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
 449			if (acpi_processor_ffh_cstate_probe
 450					(pr->id, &cx, reg) == 0) {
 451				cx.entry_method = ACPI_CSTATE_FFH;
 452			} else if (cx.type == ACPI_STATE_C1) {
 453				/*
 454				 * C1 is a special case where FIXED_HARDWARE
 455				 * can be handled in non-MWAIT way as well.
 456				 * In that case, save this _CST entry info.
 457				 * Otherwise, ignore this info and continue.
 458				 */
 459				cx.entry_method = ACPI_CSTATE_HALT;
 460				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 461			} else {
 462				continue;
 463			}
 464			if (cx.type == ACPI_STATE_C1 &&
 465			    (boot_option_idle_override == IDLE_NOMWAIT)) {
 466				/*
 467				 * In most cases the C1 space_id obtained from
 468				 * _CST object is FIXED_HARDWARE access mode.
 469				 * But when the option of idle=halt is added,
 470				 * the entry_method type should be changed from
 471				 * CSTATE_FFH to CSTATE_HALT.
 472				 * When the option of idle=nomwait is added,
 473				 * the C1 entry_method type should be
 474				 * CSTATE_HALT.
 475				 */
 476				cx.entry_method = ACPI_CSTATE_HALT;
 477				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 478			}
 479		} else {
 480			snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
 481				 cx.address);
 482		}
 483
 484		if (cx.type == ACPI_STATE_C1) {
 485			cx.valid = 1;
 486		}
 487
 488		obj = &(element->package.elements[2]);
 489		if (obj->type != ACPI_TYPE_INTEGER)
 490			continue;
 491
 492		cx.latency = obj->integer.value;
 493
 494		obj = &(element->package.elements[3]);
 495		if (obj->type != ACPI_TYPE_INTEGER)
 496			continue;
 497
 498		cx.power = obj->integer.value;
 499
 500		current_count++;
 501		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
 502
 503		/*
 504		 * We support total ACPI_PROCESSOR_MAX_POWER - 1
 505		 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
 506		 */
 507		if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
 508			printk(KERN_WARNING
 509			       "Limiting number of power states to max (%d)\n",
 510			       ACPI_PROCESSOR_MAX_POWER);
 511			printk(KERN_WARNING
 512			       "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
 513			break;
 514		}
 515	}
 516
 517	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
 518			  current_count));
 519
 520	/* Validate number of power states discovered */
 521	if (current_count < 2)
 522		status = -EFAULT;
 523
 524      end:
 525	kfree(buffer.pointer);
 526
 527	return status;
 528}
 529
 530static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
 531					   struct acpi_processor_cx *cx)
 532{
 533	static int bm_check_flag = -1;
 534	static int bm_control_flag = -1;
 535
 536
 537	if (!cx->address)
 538		return;
 539
 540	/*
 541	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
 542	 * DMA transfers are used by any ISA device to avoid livelock.
 543	 * Note that we could disable Type-F DMA (as recommended by
 544	 * the erratum), but this is known to disrupt certain ISA
 545	 * devices thus we take the conservative approach.
 546	 */
 547	else if (errata.piix4.fdma) {
 548		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 549				  "C3 not supported on PIIX4 with Type-F DMA\n"));
 550		return;
 551	}
 552
 553	/* All the logic here assumes flags.bm_check is same across all CPUs */
 554	if (bm_check_flag == -1) {
 555		/* Determine whether bm_check is needed based on CPU  */
 556		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
 557		bm_check_flag = pr->flags.bm_check;
 558		bm_control_flag = pr->flags.bm_control;
 559	} else {
 560		pr->flags.bm_check = bm_check_flag;
 561		pr->flags.bm_control = bm_control_flag;
 562	}
 563
 564	if (pr->flags.bm_check) {
 565		if (!pr->flags.bm_control) {
 566			if (pr->flags.has_cst != 1) {
 567				/* bus mastering control is necessary */
 568				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 569					"C3 support requires BM control\n"));
 570				return;
 571			} else {
 572				/* Here we enter C3 without bus mastering */
 573				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 574					"C3 support without BM control\n"));
 575			}
 576		}
 577	} else {
 578		/*
 579		 * WBINVD should be set in fadt, for C3 state to be
 580		 * supported on when bm_check is not required.
 581		 */
 582		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
 583			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 584					  "Cache invalidation should work properly"
 585					  " for C3 to be enabled on SMP systems\n"));
 586			return;
 587		}
 588	}
 589
 590	/*
 591	 * Otherwise we've met all of our C3 requirements.
 592	 * Normalize the C3 latency to expidite policy.  Enable
 593	 * checking of bus mastering status (bm_check) so we can
 594	 * use this in our C3 policy
 595	 */
 596	cx->valid = 1;
 597
 598	cx->latency_ticks = cx->latency;
 599	/*
 600	 * On older chipsets, BM_RLD needs to be set
 601	 * in order for Bus Master activity to wake the
 602	 * system from C3.  Newer chipsets handle DMA
 603	 * during C3 automatically and BM_RLD is a NOP.
 604	 * In either case, the proper way to
 605	 * handle BM_RLD is to set it and leave it set.
 606	 */
 607	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
 608
 609	return;
 610}
 611
 612static int acpi_processor_power_verify(struct acpi_processor *pr)
 613{
 614	unsigned int i;
 615	unsigned int working = 0;
 616
 617	pr->power.timer_broadcast_on_state = INT_MAX;
 618
 619	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 620		struct acpi_processor_cx *cx = &pr->power.states[i];
 621
 622		switch (cx->type) {
 623		case ACPI_STATE_C1:
 624			cx->valid = 1;
 625			break;
 626
 627		case ACPI_STATE_C2:
 628			if (!cx->address)
 629				break;
 630			cx->valid = 1; 
 631			cx->latency_ticks = cx->latency; /* Normalize latency */
 632			break;
 633
 634		case ACPI_STATE_C3:
 635			acpi_processor_power_verify_c3(pr, cx);
 636			break;
 637		}
 638		if (!cx->valid)
 639			continue;
 640
 641		lapic_timer_check_state(i, pr, cx);
 642		tsc_check_state(cx->type);
 643		working++;
 644	}
 645
 646	lapic_timer_propagate_broadcast(pr);
 647
 648	return (working);
 649}
 650
 651static int acpi_processor_get_power_info(struct acpi_processor *pr)
 652{
 653	unsigned int i;
 654	int result;
 655
 656
 657	/* NOTE: the idle thread may not be running while calling
 658	 * this function */
 659
 660	/* Zero initialize all the C-states info. */
 661	memset(pr->power.states, 0, sizeof(pr->power.states));
 662
 663	result = acpi_processor_get_power_info_cst(pr);
 664	if (result == -ENODEV)
 665		result = acpi_processor_get_power_info_fadt(pr);
 666
 667	if (result)
 668		return result;
 669
 670	acpi_processor_get_power_info_default(pr);
 671
 672	pr->power.count = acpi_processor_power_verify(pr);
 673
 674	/*
 675	 * if one state of type C2 or C3 is available, mark this
 676	 * CPU as being "idle manageable"
 677	 */
 678	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
 679		if (pr->power.states[i].valid) {
 680			pr->power.count = i;
 681			if (pr->power.states[i].type >= ACPI_STATE_C2)
 682				pr->flags.power = 1;
 683		}
 684	}
 685
 686	return 0;
 687}
 688
 689/**
 690 * acpi_idle_bm_check - checks if bus master activity was detected
 691 */
 692static int acpi_idle_bm_check(void)
 693{
 694	u32 bm_status = 0;
 695
 696	if (bm_check_disable)
 697		return 0;
 698
 699	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
 700	if (bm_status)
 701		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
 702	/*
 703	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
 704	 * the true state of bus mastering activity; forcing us to
 705	 * manually check the BMIDEA bit of each IDE channel.
 706	 */
 707	else if (errata.piix4.bmisx) {
 708		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
 709		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
 710			bm_status = 1;
 711	}
 712	return bm_status;
 713}
 714
 
 
 
 
 
 
 
 
 
 
 
 
 
 715/**
 716 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
 717 * @cx: cstate data
 718 *
 719 * Caller disables interrupt before call and enables interrupt after return.
 720 */
 721static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 722{
 723	/* Don't trace irqs off for idle */
 724	stop_critical_timings();
 725	if (cx->entry_method == ACPI_CSTATE_FFH) {
 726		/* Call into architectural FFH based C-state */
 727		acpi_processor_ffh_cstate_enter(cx);
 728	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
 729		acpi_safe_halt();
 730	} else {
 731		/* IO port based C-state */
 732		inb(cx->address);
 733		/* Dummy wait op - must do something useless after P_LVL2 read
 734		   because chipsets cannot guarantee that STPCLK# signal
 735		   gets asserted in time to freeze execution properly. */
 736		inl(acpi_gbl_FADT.xpm_timer_block.address);
 737	}
 738	start_critical_timings();
 739}
 740
 741/**
 742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
 743 * @dev: the target CPU
 744 * @state: the state data
 745 *
 746 * This is equivalent to the HALT instruction.
 747 */
 748static int acpi_idle_enter_c1(struct cpuidle_device *dev,
 749			      struct cpuidle_state *state)
 750{
 751	ktime_t  kt1, kt2;
 752	s64 idle_time;
 753	struct acpi_processor *pr;
 754	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 755
 756	pr = __this_cpu_read(processors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 757
 758	if (unlikely(!pr))
 759		return 0;
 
 
 
 
 
 
 
 760
 761	local_irq_disable();
 
 
 
 
 
 
 
 
 
 
 762
 763	/* Do not access any ACPI IO ports in suspend path */
 764	if (acpi_idle_suspend) {
 765		local_irq_enable();
 766		cpu_relax();
 767		return 0;
 
 
 768	}
 769
 770	lapic_timer_state_broadcast(pr, cx, 1);
 771	kt1 = ktime_get_real();
 772	acpi_idle_do_entry(cx);
 773	kt2 = ktime_get_real();
 774	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
 775
 776	local_irq_enable();
 777	cx->usage++;
 778	lapic_timer_state_broadcast(pr, cx, 0);
 779
 780	return idle_time;
 
 
 
 
 
 
 
 
 781}
 782
 783/**
 784 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 785 * @dev: the target CPU
 786 * @state: the state data
 787 */
 788static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 789				  struct cpuidle_state *state)
 790{
 
 791	struct acpi_processor *pr;
 792	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 793	ktime_t  kt1, kt2;
 794	s64 idle_time_ns;
 795	s64 idle_time;
 796
 797	pr = __this_cpu_read(processors);
 798
 799	if (unlikely(!pr))
 800		return 0;
 801
 802	if (acpi_idle_suspend)
 803		return(acpi_idle_enter_c1(dev, state));
 804
 805	local_irq_disable();
 806
 807	if (cx->entry_method != ACPI_CSTATE_FFH) {
 808		current_thread_info()->status &= ~TS_POLLING;
 809		/*
 810		 * TS_POLLING-cleared state must be visible before we test
 811		 * NEED_RESCHED:
 812		 */
 813		smp_mb();
 814
 815		if (unlikely(need_resched())) {
 816			current_thread_info()->status |= TS_POLLING;
 817			local_irq_enable();
 818			return 0;
 
 
 
 
 819		}
 820	}
 821
 822	/*
 823	 * Must be done before busmaster disable as we might need to
 824	 * access HPET !
 825	 */
 826	lapic_timer_state_broadcast(pr, cx, 1);
 827
 828	if (cx->type == ACPI_STATE_C3)
 829		ACPI_FLUSH_CPU_CACHE();
 830
 831	kt1 = ktime_get_real();
 832	/* Tell the scheduler that we are going deep-idle: */
 833	sched_clock_idle_sleep_event();
 834	acpi_idle_do_entry(cx);
 835	kt2 = ktime_get_real();
 836	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
 837	idle_time = idle_time_ns;
 838	do_div(idle_time, NSEC_PER_USEC);
 839
 840	/* Tell the scheduler how much we idled: */
 841	sched_clock_idle_wakeup_event(idle_time_ns);
 842
 843	local_irq_enable();
 844	if (cx->entry_method != ACPI_CSTATE_FFH)
 845		current_thread_info()->status |= TS_POLLING;
 846
 847	cx->usage++;
 848
 849	lapic_timer_state_broadcast(pr, cx, 0);
 850	cx->time += idle_time;
 851	return idle_time;
 852}
 853
 854static int c3_cpu_count;
 855static DEFINE_SPINLOCK(c3_lock);
 856
 857/**
 858 * acpi_idle_enter_bm - enters C3 with proper BM handling
 859 * @dev: the target CPU
 860 * @state: the state data
 861 *
 862 * If BM is detected, the deepest non-C3 idle state is entered instead.
 863 */
 864static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 865			      struct cpuidle_state *state)
 866{
 867	struct acpi_processor *pr;
 868	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 869	ktime_t  kt1, kt2;
 870	s64 idle_time_ns;
 871	s64 idle_time;
 872
 
 
 873
 874	pr = __this_cpu_read(processors);
 
 875
 876	if (unlikely(!pr))
 877		return 0;
 878
 879	if (acpi_idle_suspend)
 880		return(acpi_idle_enter_c1(dev, state));
 
 
 881
 882	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
 883		if (dev->safe_state) {
 884			dev->last_state = dev->safe_state;
 885			return dev->safe_state->enter(dev, dev->safe_state);
 886		} else {
 887			local_irq_disable();
 888			acpi_safe_halt();
 889			local_irq_enable();
 890			return 0;
 891		}
 892	}
 
 
 
 
 
 
 
 
 
 
 
 893
 894	local_irq_disable();
 
 895
 896	if (cx->entry_method != ACPI_CSTATE_FFH) {
 897		current_thread_info()->status &= ~TS_POLLING;
 898		/*
 899		 * TS_POLLING-cleared state must be visible before we test
 900		 * NEED_RESCHED:
 901		 */
 902		smp_mb();
 
 
 
 
 903
 904		if (unlikely(need_resched())) {
 905			current_thread_info()->status |= TS_POLLING;
 906			local_irq_enable();
 907			return 0;
 908		}
 
 
 
 
 909	}
 910
 911	acpi_unlazy_tlb(smp_processor_id());
 
 912
 913	/* Tell the scheduler that we are going deep-idle: */
 914	sched_clock_idle_sleep_event();
 915	/*
 916	 * Must be done before busmaster disable as we might need to
 917	 * access HPET !
 918	 */
 919	lapic_timer_state_broadcast(pr, cx, 1);
 
 
 
 
 
 920
 921	kt1 = ktime_get_real();
 922	/*
 923	 * disable bus master
 924	 * bm_check implies we need ARB_DIS
 925	 * !bm_check implies we need cache flush
 926	 * bm_control implies whether we can do ARB_DIS
 927	 *
 928	 * That leaves a case where bm_check is set and bm_control is
 929	 * not set. In that case we cannot do much, we enter C3
 930	 * without doing anything.
 931	 */
 932	if (pr->flags.bm_check && pr->flags.bm_control) {
 933		spin_lock(&c3_lock);
 934		c3_cpu_count++;
 935		/* Disable bus master arbitration when all CPUs are in C3 */
 936		if (c3_cpu_count == num_online_cpus())
 937			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
 938		spin_unlock(&c3_lock);
 939	} else if (!pr->flags.bm_check) {
 940		ACPI_FLUSH_CPU_CACHE();
 941	}
 942
 943	acpi_idle_do_entry(cx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944
 945	/* Re-enable bus master arbitration */
 946	if (pr->flags.bm_check && pr->flags.bm_control) {
 947		spin_lock(&c3_lock);
 948		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
 949		c3_cpu_count--;
 950		spin_unlock(&c3_lock);
 951	}
 952	kt2 = ktime_get_real();
 953	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
 954	idle_time = idle_time_ns;
 955	do_div(idle_time, NSEC_PER_USEC);
 956
 957	/* Tell the scheduler how much we idled: */
 958	sched_clock_idle_wakeup_event(idle_time_ns);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959
 960	local_irq_enable();
 961	if (cx->entry_method != ACPI_CSTATE_FFH)
 962		current_thread_info()->status |= TS_POLLING;
 
 
 
 963
 964	cx->usage++;
 
 
 
 
 965
 966	lapic_timer_state_broadcast(pr, cx, 0);
 967	cx->time += idle_time;
 968	return idle_time;
 969}
 970
 971struct cpuidle_driver acpi_idle_driver = {
 972	.name =		"acpi_idle",
 973	.owner =	THIS_MODULE,
 
 
 
 
 974};
 975
 976/**
 977 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
 978 * @pr: the ACPI processor
 979 */
 980static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
 
 
 
 
 
 
 981{
 982	int i, count = CPUIDLE_DRIVER_STATE_START;
 983	struct acpi_processor_cx *cx;
 984	struct cpuidle_state *state;
 985	struct cpuidle_device *dev = &pr->power.dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986
 987	if (!pr->flags.power_setup_done)
 988		return -EINVAL;
 989
 990	if (pr->flags.power == 0) {
 991		return -EINVAL;
 
 
 
 992	}
 993
 994	dev->cpu = pr->id;
 995	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
 996		dev->states[i].name[0] = '\0';
 997		dev->states[i].desc[0] = '\0';
 998	}
 999
1000	if (max_cstate == 0)
1001		max_cstate = 1;
1002
1003	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1004		cx = &pr->power.states[i];
1005		state = &dev->states[count];
1006
1007		if (!cx->valid)
 
1008			continue;
1009
1010#ifdef CONFIG_HOTPLUG_CPU
1011		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1012		    !pr->flags.has_cst &&
1013		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014			continue;
1015#endif
1016		cpuidle_set_statedata(state, cx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1017
1018		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1019		strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1020		state->exit_latency = cx->latency;
1021		state->target_residency = cx->latency * latency_factor;
1022
1023		state->flags = 0;
1024		switch (cx->type) {
1025			case ACPI_STATE_C1:
1026			if (cx->entry_method == ACPI_CSTATE_FFH)
1027				state->flags |= CPUIDLE_FLAG_TIME_VALID;
1028
1029			state->enter = acpi_idle_enter_c1;
1030			dev->safe_state = state;
 
 
1031			break;
 
1032
1033			case ACPI_STATE_C2:
1034			state->flags |= CPUIDLE_FLAG_TIME_VALID;
1035			state->enter = acpi_idle_enter_simple;
1036			dev->safe_state = state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037			break;
1038
1039			case ACPI_STATE_C3:
1040			state->flags |= CPUIDLE_FLAG_TIME_VALID;
1041			state->enter = pr->flags.bm_check ?
1042					acpi_idle_enter_bm :
1043					acpi_idle_enter_simple;
1044			break;
1045		}
1046
1047		count++;
1048		if (count == CPUIDLE_STATE_MAX)
1049			break;
 
 
 
 
 
 
 
1050	}
1051
1052	dev->state_count = count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053
1054	if (!count)
1055		return -EINVAL;
1056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1057	return 0;
1058}
1059
1060int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1061{
1062	int ret = 0;
 
1063
1064	if (disabled_by_idle_boot_param())
1065		return 0;
1066
1067	if (!pr)
1068		return -EINVAL;
1069
1070	if (nocst) {
1071		return -ENODEV;
1072	}
1073
1074	if (!pr->flags.power_setup_done)
1075		return -ENODEV;
1076
 
1077	cpuidle_pause_and_lock();
1078	cpuidle_disable_device(&pr->power.dev);
1079	acpi_processor_get_power_info(pr);
1080	if (pr->flags.power) {
1081		acpi_processor_setup_cpuidle(pr);
1082		ret = cpuidle_enable_device(&pr->power.dev);
1083	}
1084	cpuidle_resume_and_unlock();
1085
1086	return ret;
1087}
1088
1089int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1090			      struct acpi_device *device)
1091{
1092	acpi_status status = 0;
1093	static int first_run;
 
1094
1095	if (disabled_by_idle_boot_param())
1096		return 0;
1097
1098	if (!first_run) {
1099		dmi_check_system(processor_power_dmi_table);
1100		max_cstate = acpi_processor_cstate_check(max_cstate);
1101		if (max_cstate < ACPI_C_STATES_MAX)
1102			printk(KERN_NOTICE
1103			       "ACPI: processor limited to max C-state %d\n",
1104			       max_cstate);
1105		first_run++;
1106	}
1107
1108	if (!pr)
1109		return -EINVAL;
1110
1111	if (acpi_gbl_FADT.cst_control && !nocst) {
1112		status =
1113		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1114		if (ACPI_FAILURE(status)) {
1115			ACPI_EXCEPTION((AE_INFO, status,
1116					"Notifying BIOS of _CST ability failed"));
 
 
 
 
 
1117		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118	}
1119
1120	acpi_processor_get_power_info(pr);
1121	pr->flags.power_setup_done = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122
1123	/*
1124	 * Install the idle handler if processor power management is supported.
1125	 * Note that we use previously set idle handler will be used on
1126	 * platforms that only support C1.
1127	 */
1128	if (pr->flags.power) {
1129		acpi_processor_setup_cpuidle(pr);
1130		if (cpuidle_register_device(&pr->power.dev))
1131			return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132	}
1133	return 0;
1134}
1135
1136int acpi_processor_power_exit(struct acpi_processor *pr,
1137			      struct acpi_device *device)
1138{
 
 
1139	if (disabled_by_idle_boot_param())
1140		return 0;
1141
1142	cpuidle_unregister_device(&pr->power.dev);
 
 
 
 
 
 
1143	pr->flags.power_setup_done = 0;
1144
1145	return 0;
1146}