Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * processor_idle - idle state submodule to the ACPI processor driver
   3 *
   4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
   7 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   8 *  			- Added processor hotplug support
   9 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10 *  			- Added support for C3 on SMP
  11 *
  12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13 *
  14 *  This program is free software; you can redistribute it and/or modify
  15 *  it under the terms of the GNU General Public License as published by
  16 *  the Free Software Foundation; either version 2 of the License, or (at
  17 *  your option) any later version.
  18 *
  19 *  This program is distributed in the hope that it will be useful, but
  20 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  21 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  22 *  General Public License for more details.
  23 *
  24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  25 */
  26#define pr_fmt(fmt) "ACPI: " fmt
  27
  28#include <linux/module.h>
  29#include <linux/acpi.h>
  30#include <linux/dmi.h>
  31#include <linux/sched.h>       /* need_resched() */
 
  32#include <linux/tick.h>
  33#include <linux/cpuidle.h>
 
 
 
  34#include <acpi/processor.h>
 
  35
  36/*
  37 * Include the apic definitions for x86 to have the APIC timer related defines
  38 * available also for UP (on SMP it gets magically included via linux/smp.h).
  39 * asm/acpi.h is not an option, as it would require more include magic. Also
  40 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  41 */
  42#ifdef CONFIG_X86
  43#include <asm/apic.h>
 
  44#endif
  45
  46#define ACPI_PROCESSOR_CLASS            "processor"
  47#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
  48ACPI_MODULE_NAME("processor_idle");
  49
  50static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  51module_param(max_cstate, uint, 0000);
  52static unsigned int nocst __read_mostly;
  53module_param(nocst, uint, 0000);
  54static int bm_check_disable __read_mostly;
  55module_param(bm_check_disable, uint, 0000);
  56
  57static unsigned int latency_factor __read_mostly = 2;
  58module_param(latency_factor, uint, 0644);
  59
  60static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
  61
 
 
 
 
 
 
  62static
  63DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
  64
  65static int disabled_by_idle_boot_param(void)
  66{
  67	return boot_option_idle_override == IDLE_POLL ||
  68		boot_option_idle_override == IDLE_HALT;
  69}
  70
  71/*
  72 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  73 * For now disable this. Probably a bug somewhere else.
  74 *
  75 * To skip this limit, boot/load with a large max_cstate limit.
  76 */
  77static int set_max_cstate(const struct dmi_system_id *id)
  78{
  79	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  80		return 0;
  81
  82	pr_notice("%s detected - limiting to C%ld max_cstate."
  83		  " Override with \"processor.max_cstate=%d\"\n", id->ident,
  84		  (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  85
  86	max_cstate = (long)id->driver_data;
  87
  88	return 0;
  89}
  90
  91static const struct dmi_system_id processor_power_dmi_table[] = {
  92	{ set_max_cstate, "Clevo 5600D", {
  93	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  94	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  95	 (void *)2},
  96	{ set_max_cstate, "Pavilion zv5000", {
  97	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  98	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
  99	 (void *)1},
 100	{ set_max_cstate, "Asus L8400B", {
 101	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
 102	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
 103	 (void *)1},
 104	{},
 105};
 106
 107
 108/*
 109 * Callers should disable interrupts before the call and enable
 110 * interrupts after return.
 111 */
 112static void acpi_safe_halt(void)
 113{
 114	if (!tif_need_resched()) {
 115		safe_halt();
 116		local_irq_disable();
 117	}
 118}
 119
 120#ifdef ARCH_APICTIMER_STOPS_ON_C3
 121
 122/*
 123 * Some BIOS implementations switch to C3 in the published C2 state.
 124 * This seems to be a common problem on AMD boxen, but other vendors
 125 * are affected too. We pick the most conservative approach: we assume
 126 * that the local APIC stops in both C2 and C3.
 127 */
 128static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 129				   struct acpi_processor_cx *cx)
 130{
 131	struct acpi_processor_power *pwr = &pr->power;
 132	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
 133
 134	if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
 135		return;
 136
 137	if (amd_e400_c1e_detected)
 138		type = ACPI_STATE_C1;
 139
 140	/*
 141	 * Check, if one of the previous states already marked the lapic
 142	 * unstable
 143	 */
 144	if (pwr->timer_broadcast_on_state < state)
 145		return;
 146
 147	if (cx->type >= type)
 148		pr->power.timer_broadcast_on_state = state;
 149}
 150
 151static void __lapic_timer_propagate_broadcast(void *arg)
 152{
 153	struct acpi_processor *pr = (struct acpi_processor *) arg;
 154
 155	if (pr->power.timer_broadcast_on_state < INT_MAX)
 156		tick_broadcast_enable();
 157	else
 158		tick_broadcast_disable();
 159}
 160
 161static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
 162{
 163	smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
 164				 (void *)pr, 1);
 165}
 166
 167/* Power(C) State timer broadcast control */
 168static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 169				       struct acpi_processor_cx *cx,
 170				       int broadcast)
 171{
 172	int state = cx - pr->power.states;
 173
 174	if (state >= pr->power.timer_broadcast_on_state) {
 175		if (broadcast)
 176			tick_broadcast_enter();
 177		else
 178			tick_broadcast_exit();
 179	}
 180}
 181
 182#else
 183
 184static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 185				   struct acpi_processor_cx *cstate) { }
 186static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
 187static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 188				       struct acpi_processor_cx *cx,
 189				       int broadcast)
 190{
 
 191}
 192
 193#endif
 194
 195#if defined(CONFIG_X86)
 196static void tsc_check_state(int state)
 197{
 198	switch (boot_cpu_data.x86_vendor) {
 
 199	case X86_VENDOR_AMD:
 200	case X86_VENDOR_INTEL:
 
 
 201		/*
 202		 * AMD Fam10h TSC will tick in all
 203		 * C/P/S0/S1 states when this bit is set.
 204		 */
 205		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 206			return;
 207
 208		/*FALL THROUGH*/
 209	default:
 210		/* TSC could halt in idle, so notify users */
 211		if (state > ACPI_STATE_C1)
 212			mark_tsc_unstable("TSC halts in idle");
 213	}
 214}
 215#else
 216static void tsc_check_state(int state) { return; }
 217#endif
 218
 219static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 220{
 221
 222	if (!pr->pblk)
 223		return -ENODEV;
 224
 225	/* if info is obtained from pblk/fadt, type equals state */
 226	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
 227	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
 228
 229#ifndef CONFIG_HOTPLUG_CPU
 230	/*
 231	 * Check for P_LVL2_UP flag before entering C2 and above on
 232	 * an SMP system.
 233	 */
 234	if ((num_online_cpus() > 1) &&
 235	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 236		return -ENODEV;
 237#endif
 238
 239	/* determine C2 and C3 address from pblk */
 240	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
 241	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
 242
 243	/* determine latencies from FADT */
 244	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
 245	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
 246
 247	/*
 248	 * FADT specified C2 latency must be less than or equal to
 249	 * 100 microseconds.
 250	 */
 251	if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
 252		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 253			"C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
 254		/* invalidate C2 */
 255		pr->power.states[ACPI_STATE_C2].address = 0;
 256	}
 257
 258	/*
 259	 * FADT supplied C3 latency must be less than or equal to
 260	 * 1000 microseconds.
 261	 */
 262	if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
 263		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 264			"C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
 265		/* invalidate C3 */
 266		pr->power.states[ACPI_STATE_C3].address = 0;
 267	}
 268
 269	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 270			  "lvl2[0x%08x] lvl3[0x%08x]\n",
 271			  pr->power.states[ACPI_STATE_C2].address,
 272			  pr->power.states[ACPI_STATE_C3].address));
 
 
 
 
 
 
 
 273
 274	return 0;
 275}
 276
 277static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
 278{
 279	if (!pr->power.states[ACPI_STATE_C1].valid) {
 280		/* set the first C-State to C1 */
 281		/* all processors need to support C1 */
 282		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
 283		pr->power.states[ACPI_STATE_C1].valid = 1;
 284		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
 
 
 
 285	}
 286	/* the C0 state only exists as a filler in our array */
 287	pr->power.states[ACPI_STATE_C0].valid = 1;
 288	return 0;
 289}
 290
 291static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 292{
 293	acpi_status status;
 294	u64 count;
 295	int current_count;
 296	int i, ret = 0;
 297	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 298	union acpi_object *cst;
 299
 300
 301	if (nocst)
 302		return -ENODEV;
 303
 304	current_count = 0;
 305
 306	status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
 307	if (ACPI_FAILURE(status)) {
 308		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
 309		return -ENODEV;
 310	}
 311
 312	cst = buffer.pointer;
 313
 314	/* There must be at least 2 elements */
 315	if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
 316		pr_err("not enough elements in _CST\n");
 317		ret = -EFAULT;
 318		goto end;
 319	}
 320
 321	count = cst->package.elements[0].integer.value;
 322
 323	/* Validate number of power states. */
 324	if (count < 1 || count != cst->package.count - 1) {
 325		pr_err("count given by _CST is not valid\n");
 326		ret = -EFAULT;
 327		goto end;
 328	}
 329
 330	/* Tell driver that at least _CST is supported. */
 331	pr->flags.has_cst = 1;
 332
 333	for (i = 1; i <= count; i++) {
 334		union acpi_object *element;
 335		union acpi_object *obj;
 336		struct acpi_power_register *reg;
 337		struct acpi_processor_cx cx;
 338
 339		memset(&cx, 0, sizeof(cx));
 340
 341		element = &(cst->package.elements[i]);
 342		if (element->type != ACPI_TYPE_PACKAGE)
 343			continue;
 344
 345		if (element->package.count != 4)
 346			continue;
 347
 348		obj = &(element->package.elements[0]);
 349
 350		if (obj->type != ACPI_TYPE_BUFFER)
 351			continue;
 352
 353		reg = (struct acpi_power_register *)obj->buffer.pointer;
 354
 355		if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
 356		    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
 357			continue;
 358
 359		/* There should be an easy way to extract an integer... */
 360		obj = &(element->package.elements[1]);
 361		if (obj->type != ACPI_TYPE_INTEGER)
 362			continue;
 363
 364		cx.type = obj->integer.value;
 365		/*
 366		 * Some buggy BIOSes won't list C1 in _CST -
 367		 * Let acpi_processor_get_power_info_default() handle them later
 368		 */
 369		if (i == 1 && cx.type != ACPI_STATE_C1)
 370			current_count++;
 371
 372		cx.address = reg->address;
 373		cx.index = current_count + 1;
 374
 375		cx.entry_method = ACPI_CSTATE_SYSTEMIO;
 376		if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
 377			if (acpi_processor_ffh_cstate_probe
 378					(pr->id, &cx, reg) == 0) {
 379				cx.entry_method = ACPI_CSTATE_FFH;
 380			} else if (cx.type == ACPI_STATE_C1) {
 381				/*
 382				 * C1 is a special case where FIXED_HARDWARE
 383				 * can be handled in non-MWAIT way as well.
 384				 * In that case, save this _CST entry info.
 385				 * Otherwise, ignore this info and continue.
 386				 */
 387				cx.entry_method = ACPI_CSTATE_HALT;
 388				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 389			} else {
 390				continue;
 391			}
 392			if (cx.type == ACPI_STATE_C1 &&
 393			    (boot_option_idle_override == IDLE_NOMWAIT)) {
 394				/*
 395				 * In most cases the C1 space_id obtained from
 396				 * _CST object is FIXED_HARDWARE access mode.
 397				 * But when the option of idle=halt is added,
 398				 * the entry_method type should be changed from
 399				 * CSTATE_FFH to CSTATE_HALT.
 400				 * When the option of idle=nomwait is added,
 401				 * the C1 entry_method type should be
 402				 * CSTATE_HALT.
 403				 */
 404				cx.entry_method = ACPI_CSTATE_HALT;
 405				snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 406			}
 407		} else {
 408			snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
 409				 cx.address);
 410		}
 411
 412		if (cx.type == ACPI_STATE_C1) {
 413			cx.valid = 1;
 414		}
 415
 416		obj = &(element->package.elements[2]);
 417		if (obj->type != ACPI_TYPE_INTEGER)
 418			continue;
 419
 420		cx.latency = obj->integer.value;
 421
 422		obj = &(element->package.elements[3]);
 423		if (obj->type != ACPI_TYPE_INTEGER)
 424			continue;
 425
 426		current_count++;
 427		memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
 428
 429		/*
 430		 * We support total ACPI_PROCESSOR_MAX_POWER - 1
 431		 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
 432		 */
 433		if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
 434			pr_warn("Limiting number of power states to max (%d)\n",
 435				ACPI_PROCESSOR_MAX_POWER);
 436			pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
 437			break;
 438		}
 439	}
 440
 441	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
 442			  current_count));
 443
 444	/* Validate number of power states discovered */
 445	if (current_count < 2)
 446		ret = -EFAULT;
 447
 448      end:
 449	kfree(buffer.pointer);
 450
 451	return ret;
 452}
 453
 454static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
 455					   struct acpi_processor_cx *cx)
 456{
 457	static int bm_check_flag = -1;
 458	static int bm_control_flag = -1;
 459
 460
 461	if (!cx->address)
 462		return;
 463
 464	/*
 465	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
 466	 * DMA transfers are used by any ISA device to avoid livelock.
 467	 * Note that we could disable Type-F DMA (as recommended by
 468	 * the erratum), but this is known to disrupt certain ISA
 469	 * devices thus we take the conservative approach.
 470	 */
 471	else if (errata.piix4.fdma) {
 472		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 473				  "C3 not supported on PIIX4 with Type-F DMA\n"));
 474		return;
 475	}
 476
 477	/* All the logic here assumes flags.bm_check is same across all CPUs */
 478	if (bm_check_flag == -1) {
 479		/* Determine whether bm_check is needed based on CPU  */
 480		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
 481		bm_check_flag = pr->flags.bm_check;
 482		bm_control_flag = pr->flags.bm_control;
 483	} else {
 484		pr->flags.bm_check = bm_check_flag;
 485		pr->flags.bm_control = bm_control_flag;
 486	}
 487
 488	if (pr->flags.bm_check) {
 489		if (!pr->flags.bm_control) {
 490			if (pr->flags.has_cst != 1) {
 491				/* bus mastering control is necessary */
 492				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 493					"C3 support requires BM control\n"));
 494				return;
 495			} else {
 496				/* Here we enter C3 without bus mastering */
 497				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 498					"C3 support without BM control\n"));
 499			}
 500		}
 501	} else {
 502		/*
 503		 * WBINVD should be set in fadt, for C3 state to be
 504		 * supported on when bm_check is not required.
 505		 */
 506		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
 507			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 508					  "Cache invalidation should work properly"
 509					  " for C3 to be enabled on SMP systems\n"));
 510			return;
 511		}
 512	}
 513
 514	/*
 515	 * Otherwise we've met all of our C3 requirements.
 516	 * Normalize the C3 latency to expidite policy.  Enable
 517	 * checking of bus mastering status (bm_check) so we can
 518	 * use this in our C3 policy
 519	 */
 520	cx->valid = 1;
 521
 522	/*
 523	 * On older chipsets, BM_RLD needs to be set
 524	 * in order for Bus Master activity to wake the
 525	 * system from C3.  Newer chipsets handle DMA
 526	 * during C3 automatically and BM_RLD is a NOP.
 527	 * In either case, the proper way to
 528	 * handle BM_RLD is to set it and leave it set.
 529	 */
 530	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
 
 531
 532	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533}
 534
 535static int acpi_processor_power_verify(struct acpi_processor *pr)
 536{
 537	unsigned int i;
 538	unsigned int working = 0;
 
 
 
 539
 540	pr->power.timer_broadcast_on_state = INT_MAX;
 541
 542	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 543		struct acpi_processor_cx *cx = &pr->power.states[i];
 544
 545		switch (cx->type) {
 546		case ACPI_STATE_C1:
 547			cx->valid = 1;
 548			break;
 549
 550		case ACPI_STATE_C2:
 551			if (!cx->address)
 552				break;
 553			cx->valid = 1;
 554			break;
 555
 556		case ACPI_STATE_C3:
 557			acpi_processor_power_verify_c3(pr, cx);
 558			break;
 559		}
 560		if (!cx->valid)
 561			continue;
 
 
 
 
 562
 563		lapic_timer_check_state(i, pr, cx);
 564		tsc_check_state(cx->type);
 565		working++;
 566	}
 567
 
 
 
 
 
 
 
 
 568	lapic_timer_propagate_broadcast(pr);
 569
 570	return (working);
 571}
 572
 573static int acpi_processor_get_power_info(struct acpi_processor *pr)
 574{
 575	unsigned int i;
 576	int result;
 577
 578
 579	/* NOTE: the idle thread may not be running while calling
 580	 * this function */
 581
 582	/* Zero initialize all the C-states info. */
 583	memset(pr->power.states, 0, sizeof(pr->power.states));
 584
 585	result = acpi_processor_get_power_info_cst(pr);
 586	if (result == -ENODEV)
 587		result = acpi_processor_get_power_info_fadt(pr);
 588
 589	if (result)
 590		return result;
 591
 592	acpi_processor_get_power_info_default(pr);
 593
 594	pr->power.count = acpi_processor_power_verify(pr);
 595
 596	/*
 597	 * if one state of type C2 or C3 is available, mark this
 598	 * CPU as being "idle manageable"
 599	 */
 600	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
 601		if (pr->power.states[i].valid) {
 602			pr->power.count = i;
 603			if (pr->power.states[i].type >= ACPI_STATE_C2)
 604				pr->flags.power = 1;
 605		}
 606	}
 607
 608	return 0;
 609}
 610
 611/**
 612 * acpi_idle_bm_check - checks if bus master activity was detected
 613 */
 614static int acpi_idle_bm_check(void)
 615{
 616	u32 bm_status = 0;
 617
 618	if (bm_check_disable)
 619		return 0;
 620
 621	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
 622	if (bm_status)
 623		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
 624	/*
 625	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
 626	 * the true state of bus mastering activity; forcing us to
 627	 * manually check the BMIDEA bit of each IDE channel.
 628	 */
 629	else if (errata.piix4.bmisx) {
 630		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
 631		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
 632			bm_status = 1;
 633	}
 634	return bm_status;
 635}
 636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 637/**
 638 * acpi_idle_do_entry - enter idle state using the appropriate method
 639 * @cx: cstate data
 640 *
 641 * Caller disables interrupt before call and enables interrupt after return.
 642 */
 643static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 644{
 
 
 645	if (cx->entry_method == ACPI_CSTATE_FFH) {
 646		/* Call into architectural FFH based C-state */
 647		acpi_processor_ffh_cstate_enter(cx);
 648	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
 649		acpi_safe_halt();
 650	} else {
 651		/* IO port based C-state */
 652		inb(cx->address);
 653		/* Dummy wait op - must do something useless after P_LVL2 read
 654		   because chipsets cannot guarantee that STPCLK# signal
 655		   gets asserted in time to freeze execution properly. */
 656		inl(acpi_gbl_FADT.xpm_timer_block.address);
 657	}
 
 
 658}
 659
 660/**
 661 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
 662 * @dev: the target CPU
 663 * @index: the index of suggested state
 664 */
 665static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
 666{
 667	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 668
 669	ACPI_FLUSH_CPU_CACHE();
 670
 671	while (1) {
 672
 673		if (cx->entry_method == ACPI_CSTATE_HALT)
 674			safe_halt();
 675		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
 676			inb(cx->address);
 677			/* See comment in acpi_idle_do_entry() */
 678			inl(acpi_gbl_FADT.xpm_timer_block.address);
 679		} else
 680			return -ENODEV;
 681	}
 682
 683	/* Never reached */
 684	return 0;
 685}
 686
 687static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
 688{
 689	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
 690		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
 691}
 692
 693static int c3_cpu_count;
 694static DEFINE_RAW_SPINLOCK(c3_lock);
 695
 696/**
 697 * acpi_idle_enter_bm - enters C3 with proper BM handling
 
 698 * @pr: Target processor
 699 * @cx: Target state context
 700 * @timer_bc: Whether or not to change timer mode to broadcast
 701 */
 702static void acpi_idle_enter_bm(struct acpi_processor *pr,
 703			       struct acpi_processor_cx *cx, bool timer_bc)
 704{
 705	acpi_unlazy_tlb(smp_processor_id());
 706
 707	/*
 708	 * Must be done before busmaster disable as we might need to
 709	 * access HPET !
 710	 */
 711	if (timer_bc)
 712		lapic_timer_state_broadcast(pr, cx, 1);
 713
 714	/*
 715	 * disable bus master
 716	 * bm_check implies we need ARB_DIS
 717	 * bm_control implies whether we can do ARB_DIS
 718	 *
 719	 * That leaves a case where bm_check is set and bm_control is
 720	 * not set. In that case we cannot do much, we enter C3
 721	 * without doing anything.
 722	 */
 723	if (pr->flags.bm_control) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724		raw_spin_lock(&c3_lock);
 725		c3_cpu_count++;
 726		/* Disable bus master arbitration when all CPUs are in C3 */
 727		if (c3_cpu_count == num_online_cpus())
 728			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
 729		raw_spin_unlock(&c3_lock);
 730	}
 731
 
 
 732	acpi_idle_do_entry(cx);
 733
 
 
 734	/* Re-enable bus master arbitration */
 735	if (pr->flags.bm_control) {
 736		raw_spin_lock(&c3_lock);
 737		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
 738		c3_cpu_count--;
 739		raw_spin_unlock(&c3_lock);
 740	}
 741
 742	if (timer_bc)
 743		lapic_timer_state_broadcast(pr, cx, 0);
 
 744}
 745
 746static int acpi_idle_enter(struct cpuidle_device *dev,
 747			   struct cpuidle_driver *drv, int index)
 748{
 749	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 750	struct acpi_processor *pr;
 751
 752	pr = __this_cpu_read(processors);
 753	if (unlikely(!pr))
 754		return -EINVAL;
 755
 756	if (cx->type != ACPI_STATE_C1) {
 
 
 
 
 757		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
 758			index = CPUIDLE_DRIVER_STATE_START;
 759			cx = per_cpu(acpi_cstate[index], dev->cpu);
 760		} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
 761			if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
 762				acpi_idle_enter_bm(pr, cx, true);
 763				return index;
 764			} else if (drv->safe_state_index >= 0) {
 765				index = drv->safe_state_index;
 766				cx = per_cpu(acpi_cstate[index], dev->cpu);
 767			} else {
 768				acpi_safe_halt();
 769				return -EBUSY;
 770			}
 771		}
 772	}
 773
 774	lapic_timer_state_broadcast(pr, cx, 1);
 775
 776	if (cx->type == ACPI_STATE_C3)
 777		ACPI_FLUSH_CPU_CACHE();
 778
 779	acpi_idle_do_entry(cx);
 780
 781	lapic_timer_state_broadcast(pr, cx, 0);
 782
 783	return index;
 784}
 785
 786static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
 787				   struct cpuidle_driver *drv, int index)
 788{
 789	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 790
 791	if (cx->type == ACPI_STATE_C3) {
 792		struct acpi_processor *pr = __this_cpu_read(processors);
 793
 794		if (unlikely(!pr))
 795			return;
 796
 797		if (pr->flags.bm_check) {
 798			acpi_idle_enter_bm(pr, cx, false);
 799			return;
 
 
 
 
 
 
 800		} else {
 801			ACPI_FLUSH_CPU_CACHE();
 802		}
 803	}
 804	acpi_idle_do_entry(cx);
 805}
 806
 807struct cpuidle_driver acpi_idle_driver = {
 808	.name =		"acpi_idle",
 809	.owner =	THIS_MODULE,
 810};
 811
 812/**
 813 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
 814 * device i.e. per-cpu data
 815 *
 816 * @pr: the ACPI processor
 817 * @dev : the cpuidle device
 818 */
 819static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
 820					   struct cpuidle_device *dev)
 821{
 822	int i, count = CPUIDLE_DRIVER_STATE_START;
 823	struct acpi_processor_cx *cx;
 824
 825	if (!pr->flags.power_setup_done)
 826		return -EINVAL;
 827
 828	if (pr->flags.power == 0) {
 829		return -EINVAL;
 830	}
 831
 832	if (!dev)
 833		return -EINVAL;
 834
 835	dev->cpu = pr->id;
 836
 837	if (max_cstate == 0)
 838		max_cstate = 1;
 839
 840	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 
 841		cx = &pr->power.states[i];
 842
 843		if (!cx->valid)
 844			continue;
 845
 846		per_cpu(acpi_cstate[count], dev->cpu) = cx;
 847
 
 
 
 
 
 
 
 
 
 848		count++;
 849		if (count == CPUIDLE_STATE_MAX)
 850			break;
 851	}
 852
 853	if (!count)
 854		return -EINVAL;
 855
 856	return 0;
 857}
 858
 859/**
 860 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
 861 * global state data i.e. idle routines
 862 *
 863 * @pr: the ACPI processor
 864 */
 865static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
 866{
 867	int i, count = CPUIDLE_DRIVER_STATE_START;
 868	struct acpi_processor_cx *cx;
 869	struct cpuidle_state *state;
 870	struct cpuidle_driver *drv = &acpi_idle_driver;
 871
 872	if (!pr->flags.power_setup_done)
 873		return -EINVAL;
 874
 875	if (pr->flags.power == 0)
 876		return -EINVAL;
 877
 878	drv->safe_state_index = -1;
 879	for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
 880		drv->states[i].name[0] = '\0';
 881		drv->states[i].desc[0] = '\0';
 882	}
 883
 884	if (max_cstate == 0)
 885		max_cstate = 1;
 886
 
 
 
 
 
 
 
 887	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 888		cx = &pr->power.states[i];
 889
 890		if (!cx->valid)
 891			continue;
 892
 893		state = &drv->states[count];
 894		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
 895		strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
 896		state->exit_latency = cx->latency;
 897		state->target_residency = cx->latency * latency_factor;
 898		state->enter = acpi_idle_enter;
 899
 900		state->flags = 0;
 901		if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
 
 902			state->enter_dead = acpi_idle_play_dead;
 903			drv->safe_state_index = count;
 
 904		}
 905		/*
 906		 * Halt-induced C1 is not good for ->enter_freeze, because it
 907		 * re-enables interrupts on exit.  Moreover, C1 is generally not
 908		 * particularly interesting from the suspend-to-idle angle, so
 909		 * avoid C1 and the situations in which we may need to fall back
 910		 * to it altogether.
 911		 */
 912		if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
 913			state->enter_freeze = acpi_idle_enter_freeze;
 914
 915		count++;
 916		if (count == CPUIDLE_STATE_MAX)
 917			break;
 918	}
 919
 920	drv->state_count = count;
 921
 922	if (!count)
 923		return -EINVAL;
 924
 925	return 0;
 926}
 927
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928int acpi_processor_hotplug(struct acpi_processor *pr)
 929{
 930	int ret = 0;
 931	struct cpuidle_device *dev;
 932
 933	if (disabled_by_idle_boot_param())
 934		return 0;
 935
 936	if (nocst)
 937		return -ENODEV;
 938
 939	if (!pr->flags.power_setup_done)
 940		return -ENODEV;
 941
 942	dev = per_cpu(acpi_cpuidle_device, pr->id);
 943	cpuidle_pause_and_lock();
 944	cpuidle_disable_device(dev);
 945	acpi_processor_get_power_info(pr);
 946	if (pr->flags.power) {
 947		acpi_processor_setup_cpuidle_cx(pr, dev);
 948		ret = cpuidle_enable_device(dev);
 949	}
 950	cpuidle_resume_and_unlock();
 951
 952	return ret;
 953}
 954
 955int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 956{
 957	int cpu;
 958	struct acpi_processor *_pr;
 959	struct cpuidle_device *dev;
 960
 961	if (disabled_by_idle_boot_param())
 962		return 0;
 963
 964	if (nocst)
 965		return -ENODEV;
 966
 967	if (!pr->flags.power_setup_done)
 968		return -ENODEV;
 969
 970	/*
 971	 * FIXME:  Design the ACPI notification to make it once per
 972	 * system instead of once per-cpu.  This condition is a hack
 973	 * to make the code that updates C-States be called once.
 974	 */
 975
 976	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
 977
 978		/* Protect against cpu-hotplug */
 979		get_online_cpus();
 980		cpuidle_pause_and_lock();
 981
 982		/* Disable all cpuidle devices */
 983		for_each_online_cpu(cpu) {
 984			_pr = per_cpu(processors, cpu);
 985			if (!_pr || !_pr->flags.power_setup_done)
 986				continue;
 987			dev = per_cpu(acpi_cpuidle_device, cpu);
 988			cpuidle_disable_device(dev);
 989		}
 990
 991		/* Populate Updated C-state information */
 992		acpi_processor_get_power_info(pr);
 993		acpi_processor_setup_cpuidle_states(pr);
 994
 995		/* Enable all cpuidle devices */
 996		for_each_online_cpu(cpu) {
 997			_pr = per_cpu(processors, cpu);
 998			if (!_pr || !_pr->flags.power_setup_done)
 999				continue;
1000			acpi_processor_get_power_info(_pr);
1001			if (_pr->flags.power) {
1002				dev = per_cpu(acpi_cpuidle_device, cpu);
1003				acpi_processor_setup_cpuidle_cx(_pr, dev);
1004				cpuidle_enable_device(dev);
1005			}
1006		}
1007		cpuidle_resume_and_unlock();
1008		put_online_cpus();
1009	}
1010
1011	return 0;
1012}
1013
1014static int acpi_processor_registered;
1015
1016int acpi_processor_power_init(struct acpi_processor *pr)
1017{
1018	acpi_status status;
1019	int retval;
1020	struct cpuidle_device *dev;
1021	static int first_run;
1022
1023	if (disabled_by_idle_boot_param())
1024		return 0;
1025
1026	if (!first_run) {
1027		dmi_check_system(processor_power_dmi_table);
1028		max_cstate = acpi_processor_cstate_check(max_cstate);
1029		if (max_cstate < ACPI_C_STATES_MAX)
1030			printk(KERN_NOTICE
1031			       "ACPI: processor limited to max C-state %d\n",
1032			       max_cstate);
1033		first_run++;
1034	}
1035
1036	if (acpi_gbl_FADT.cst_control && !nocst) {
1037		status =
1038		    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1039		if (ACPI_FAILURE(status)) {
1040			ACPI_EXCEPTION((AE_INFO, status,
1041					"Notifying BIOS of _CST ability failed"));
1042		}
1043	}
1044
1045	acpi_processor_get_power_info(pr);
1046	pr->flags.power_setup_done = 1;
1047
1048	/*
1049	 * Install the idle handler if processor power management is supported.
1050	 * Note that we use previously set idle handler will be used on
1051	 * platforms that only support C1.
1052	 */
1053	if (pr->flags.power) {
1054		/* Register acpi_idle_driver if not already registered */
1055		if (!acpi_processor_registered) {
1056			acpi_processor_setup_cpuidle_states(pr);
1057			retval = cpuidle_register_driver(&acpi_idle_driver);
1058			if (retval)
1059				return retval;
1060			pr_debug("%s registered with cpuidle\n",
1061				 acpi_idle_driver.name);
1062		}
1063
1064		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1065		if (!dev)
1066			return -ENOMEM;
1067		per_cpu(acpi_cpuidle_device, pr->id) = dev;
1068
1069		acpi_processor_setup_cpuidle_cx(pr, dev);
1070
1071		/* Register per-cpu cpuidle_device. Cpuidle driver
1072		 * must already be registered before registering device
1073		 */
1074		retval = cpuidle_register_device(dev);
1075		if (retval) {
1076			if (acpi_processor_registered == 0)
1077				cpuidle_unregister_driver(&acpi_idle_driver);
1078			return retval;
1079		}
1080		acpi_processor_registered++;
1081	}
1082	return 0;
1083}
1084
1085int acpi_processor_power_exit(struct acpi_processor *pr)
1086{
1087	struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1088
1089	if (disabled_by_idle_boot_param())
1090		return 0;
1091
1092	if (pr->flags.power) {
1093		cpuidle_unregister_device(dev);
1094		acpi_processor_registered--;
1095		if (acpi_processor_registered == 0)
1096			cpuidle_unregister_driver(&acpi_idle_driver);
 
 
1097	}
1098
1099	pr->flags.power_setup_done = 0;
1100	return 0;
1101}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * processor_idle - idle state submodule to the ACPI processor driver
   4 *
   5 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   6 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   7 *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
   8 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 *  			- Added processor hotplug support
  10 *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  11 *  			- Added support for C3 on SMP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 */
  13#define pr_fmt(fmt) "ACPI: " fmt
  14
  15#include <linux/module.h>
  16#include <linux/acpi.h>
  17#include <linux/dmi.h>
  18#include <linux/sched.h>       /* need_resched() */
  19#include <linux/sort.h>
  20#include <linux/tick.h>
  21#include <linux/cpuidle.h>
  22#include <linux/cpu.h>
  23#include <linux/minmax.h>
  24#include <linux/perf_event.h>
  25#include <acpi/processor.h>
  26#include <linux/context_tracking.h>
  27
  28/*
  29 * Include the apic definitions for x86 to have the APIC timer related defines
  30 * available also for UP (on SMP it gets magically included via linux/smp.h).
  31 * asm/acpi.h is not an option, as it would require more include magic. Also
  32 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  33 */
  34#ifdef CONFIG_X86
  35#include <asm/apic.h>
  36#include <asm/cpu.h>
  37#endif
  38
  39#define ACPI_IDLE_STATE_START	(IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
 
 
  40
  41static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  42module_param(max_cstate, uint, 0400);
  43static bool nocst __read_mostly;
  44module_param(nocst, bool, 0400);
  45static bool bm_check_disable __read_mostly;
  46module_param(bm_check_disable, bool, 0400);
  47
  48static unsigned int latency_factor __read_mostly = 2;
  49module_param(latency_factor, uint, 0644);
  50
  51static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
  52
  53struct cpuidle_driver acpi_idle_driver = {
  54	.name =		"acpi_idle",
  55	.owner =	THIS_MODULE,
  56};
  57
  58#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
  59static
  60DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
  61
  62static int disabled_by_idle_boot_param(void)
  63{
  64	return boot_option_idle_override == IDLE_POLL ||
  65		boot_option_idle_override == IDLE_HALT;
  66}
  67
  68/*
  69 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  70 * For now disable this. Probably a bug somewhere else.
  71 *
  72 * To skip this limit, boot/load with a large max_cstate limit.
  73 */
  74static int set_max_cstate(const struct dmi_system_id *id)
  75{
  76	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  77		return 0;
  78
  79	pr_notice("%s detected - limiting to C%ld max_cstate."
  80		  " Override with \"processor.max_cstate=%d\"\n", id->ident,
  81		  (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  82
  83	max_cstate = (long)id->driver_data;
  84
  85	return 0;
  86}
  87
  88static const struct dmi_system_id processor_power_dmi_table[] = {
  89	{ set_max_cstate, "Clevo 5600D", {
  90	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  91	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  92	 (void *)2},
  93	{ set_max_cstate, "Pavilion zv5000", {
  94	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  95	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
  96	 (void *)1},
  97	{ set_max_cstate, "Asus L8400B", {
  98	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
  99	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
 100	 (void *)1},
 101	{},
 102};
 103
 104
 105/*
 106 * Callers should disable interrupts before the call and enable
 107 * interrupts after return.
 108 */
 109static void __cpuidle acpi_safe_halt(void)
 110{
 111	if (!tif_need_resched()) {
 112		raw_safe_halt();
 113		raw_local_irq_disable();
 114	}
 115}
 116
 117#ifdef ARCH_APICTIMER_STOPS_ON_C3
 118
 119/*
 120 * Some BIOS implementations switch to C3 in the published C2 state.
 121 * This seems to be a common problem on AMD boxen, but other vendors
 122 * are affected too. We pick the most conservative approach: we assume
 123 * that the local APIC stops in both C2 and C3.
 124 */
 125static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 126				   struct acpi_processor_cx *cx)
 127{
 128	struct acpi_processor_power *pwr = &pr->power;
 129	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
 130
 131	if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
 132		return;
 133
 134	if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
 135		type = ACPI_STATE_C1;
 136
 137	/*
 138	 * Check, if one of the previous states already marked the lapic
 139	 * unstable
 140	 */
 141	if (pwr->timer_broadcast_on_state < state)
 142		return;
 143
 144	if (cx->type >= type)
 145		pr->power.timer_broadcast_on_state = state;
 146}
 147
 148static void __lapic_timer_propagate_broadcast(void *arg)
 149{
 150	struct acpi_processor *pr = arg;
 151
 152	if (pr->power.timer_broadcast_on_state < INT_MAX)
 153		tick_broadcast_enable();
 154	else
 155		tick_broadcast_disable();
 156}
 157
 158static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
 159{
 160	smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
 161				 (void *)pr, 1);
 162}
 163
 164/* Power(C) State timer broadcast control */
 165static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
 166					struct acpi_processor_cx *cx)
 167{
 168	return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
 
 
 
 
 
 
 
 
 169}
 170
 171#else
 172
 173static void lapic_timer_check_state(int state, struct acpi_processor *pr,
 174				   struct acpi_processor_cx *cstate) { }
 175static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
 176
 177static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
 178					struct acpi_processor_cx *cx)
 179{
 180	return false;
 181}
 182
 183#endif
 184
 185#if defined(CONFIG_X86)
 186static void tsc_check_state(int state)
 187{
 188	switch (boot_cpu_data.x86_vendor) {
 189	case X86_VENDOR_HYGON:
 190	case X86_VENDOR_AMD:
 191	case X86_VENDOR_INTEL:
 192	case X86_VENDOR_CENTAUR:
 193	case X86_VENDOR_ZHAOXIN:
 194		/*
 195		 * AMD Fam10h TSC will tick in all
 196		 * C/P/S0/S1 states when this bit is set.
 197		 */
 198		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 199			return;
 200		fallthrough;
 
 201	default:
 202		/* TSC could halt in idle, so notify users */
 203		if (state > ACPI_STATE_C1)
 204			mark_tsc_unstable("TSC halts in idle");
 205	}
 206}
 207#else
 208static void tsc_check_state(int state) { return; }
 209#endif
 210
 211static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 212{
 213
 214	if (!pr->pblk)
 215		return -ENODEV;
 216
 217	/* if info is obtained from pblk/fadt, type equals state */
 218	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
 219	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
 220
 221#ifndef CONFIG_HOTPLUG_CPU
 222	/*
 223	 * Check for P_LVL2_UP flag before entering C2 and above on
 224	 * an SMP system.
 225	 */
 226	if ((num_online_cpus() > 1) &&
 227	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 228		return -ENODEV;
 229#endif
 230
 231	/* determine C2 and C3 address from pblk */
 232	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
 233	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
 234
 235	/* determine latencies from FADT */
 236	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
 237	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
 238
 239	/*
 240	 * FADT specified C2 latency must be less than or equal to
 241	 * 100 microseconds.
 242	 */
 243	if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
 244		acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
 245				  acpi_gbl_FADT.c2_latency);
 246		/* invalidate C2 */
 247		pr->power.states[ACPI_STATE_C2].address = 0;
 248	}
 249
 250	/*
 251	 * FADT supplied C3 latency must be less than or equal to
 252	 * 1000 microseconds.
 253	 */
 254	if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
 255		acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
 256				  acpi_gbl_FADT.c3_latency);
 257		/* invalidate C3 */
 258		pr->power.states[ACPI_STATE_C3].address = 0;
 259	}
 260
 261	acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
 
 262			  pr->power.states[ACPI_STATE_C2].address,
 263			  pr->power.states[ACPI_STATE_C3].address);
 264
 265	snprintf(pr->power.states[ACPI_STATE_C2].desc,
 266			 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
 267			 pr->power.states[ACPI_STATE_C2].address);
 268	snprintf(pr->power.states[ACPI_STATE_C3].desc,
 269			 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
 270			 pr->power.states[ACPI_STATE_C3].address);
 271
 272	return 0;
 273}
 274
 275static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
 276{
 277	if (!pr->power.states[ACPI_STATE_C1].valid) {
 278		/* set the first C-State to C1 */
 279		/* all processors need to support C1 */
 280		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
 281		pr->power.states[ACPI_STATE_C1].valid = 1;
 282		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
 283
 284		snprintf(pr->power.states[ACPI_STATE_C1].desc,
 285			 ACPI_CX_DESC_LEN, "ACPI HLT");
 286	}
 287	/* the C0 state only exists as a filler in our array */
 288	pr->power.states[ACPI_STATE_C0].valid = 1;
 289	return 0;
 290}
 291
 292static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 293{
 294	int ret;
 
 
 
 
 
 
 295
 296	if (nocst)
 297		return -ENODEV;
 298
 299	ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
 300	if (ret)
 301		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 302
 303	if (!pr->power.count)
 304		return -EFAULT;
 
 
 
 
 305
 
 306	pr->flags.has_cst = 1;
 307	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308}
 309
 310static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
 311					   struct acpi_processor_cx *cx)
 312{
 313	static int bm_check_flag = -1;
 314	static int bm_control_flag = -1;
 315
 316
 317	if (!cx->address)
 318		return;
 319
 320	/*
 321	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
 322	 * DMA transfers are used by any ISA device to avoid livelock.
 323	 * Note that we could disable Type-F DMA (as recommended by
 324	 * the erratum), but this is known to disrupt certain ISA
 325	 * devices thus we take the conservative approach.
 326	 */
 327	if (errata.piix4.fdma) {
 328		acpi_handle_debug(pr->handle,
 329				  "C3 not supported on PIIX4 with Type-F DMA\n");
 330		return;
 331	}
 332
 333	/* All the logic here assumes flags.bm_check is same across all CPUs */
 334	if (bm_check_flag == -1) {
 335		/* Determine whether bm_check is needed based on CPU  */
 336		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
 337		bm_check_flag = pr->flags.bm_check;
 338		bm_control_flag = pr->flags.bm_control;
 339	} else {
 340		pr->flags.bm_check = bm_check_flag;
 341		pr->flags.bm_control = bm_control_flag;
 342	}
 343
 344	if (pr->flags.bm_check) {
 345		if (!pr->flags.bm_control) {
 346			if (pr->flags.has_cst != 1) {
 347				/* bus mastering control is necessary */
 348				acpi_handle_debug(pr->handle,
 349						  "C3 support requires BM control\n");
 350				return;
 351			} else {
 352				/* Here we enter C3 without bus mastering */
 353				acpi_handle_debug(pr->handle,
 354						  "C3 support without BM control\n");
 355			}
 356		}
 357	} else {
 358		/*
 359		 * WBINVD should be set in fadt, for C3 state to be
 360		 * supported on when bm_check is not required.
 361		 */
 362		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
 363			acpi_handle_debug(pr->handle,
 364					  "Cache invalidation should work properly"
 365					  " for C3 to be enabled on SMP systems\n");
 366			return;
 367		}
 368	}
 369
 370	/*
 371	 * Otherwise we've met all of our C3 requirements.
 372	 * Normalize the C3 latency to expidite policy.  Enable
 373	 * checking of bus mastering status (bm_check) so we can
 374	 * use this in our C3 policy
 375	 */
 376	cx->valid = 1;
 377
 378	/*
 379	 * On older chipsets, BM_RLD needs to be set
 380	 * in order for Bus Master activity to wake the
 381	 * system from C3.  Newer chipsets handle DMA
 382	 * during C3 automatically and BM_RLD is a NOP.
 383	 * In either case, the proper way to
 384	 * handle BM_RLD is to set it and leave it set.
 385	 */
 386	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
 387}
 388
 389static int acpi_cst_latency_cmp(const void *a, const void *b)
 390{
 391	const struct acpi_processor_cx *x = a, *y = b;
 392
 393	if (!(x->valid && y->valid))
 394		return 0;
 395	if (x->latency > y->latency)
 396		return 1;
 397	if (x->latency < y->latency)
 398		return -1;
 399	return 0;
 400}
 401static void acpi_cst_latency_swap(void *a, void *b, int n)
 402{
 403	struct acpi_processor_cx *x = a, *y = b;
 404
 405	if (!(x->valid && y->valid))
 406		return;
 407	swap(x->latency, y->latency);
 408}
 409
 410static int acpi_processor_power_verify(struct acpi_processor *pr)
 411{
 412	unsigned int i;
 413	unsigned int working = 0;
 414	unsigned int last_latency = 0;
 415	unsigned int last_type = 0;
 416	bool buggy_latency = false;
 417
 418	pr->power.timer_broadcast_on_state = INT_MAX;
 419
 420	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 421		struct acpi_processor_cx *cx = &pr->power.states[i];
 422
 423		switch (cx->type) {
 424		case ACPI_STATE_C1:
 425			cx->valid = 1;
 426			break;
 427
 428		case ACPI_STATE_C2:
 429			if (!cx->address)
 430				break;
 431			cx->valid = 1;
 432			break;
 433
 434		case ACPI_STATE_C3:
 435			acpi_processor_power_verify_c3(pr, cx);
 436			break;
 437		}
 438		if (!cx->valid)
 439			continue;
 440		if (cx->type >= last_type && cx->latency < last_latency)
 441			buggy_latency = true;
 442		last_latency = cx->latency;
 443		last_type = cx->type;
 444
 445		lapic_timer_check_state(i, pr, cx);
 446		tsc_check_state(cx->type);
 447		working++;
 448	}
 449
 450	if (buggy_latency) {
 451		pr_notice("FW issue: working around C-state latencies out of order\n");
 452		sort(&pr->power.states[1], max_cstate,
 453		     sizeof(struct acpi_processor_cx),
 454		     acpi_cst_latency_cmp,
 455		     acpi_cst_latency_swap);
 456	}
 457
 458	lapic_timer_propagate_broadcast(pr);
 459
 460	return working;
 461}
 462
 463static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
 464{
 465	unsigned int i;
 466	int result;
 467
 468
 469	/* NOTE: the idle thread may not be running while calling
 470	 * this function */
 471
 472	/* Zero initialize all the C-states info. */
 473	memset(pr->power.states, 0, sizeof(pr->power.states));
 474
 475	result = acpi_processor_get_power_info_cst(pr);
 476	if (result == -ENODEV)
 477		result = acpi_processor_get_power_info_fadt(pr);
 478
 479	if (result)
 480		return result;
 481
 482	acpi_processor_get_power_info_default(pr);
 483
 484	pr->power.count = acpi_processor_power_verify(pr);
 485
 486	/*
 487	 * if one state of type C2 or C3 is available, mark this
 488	 * CPU as being "idle manageable"
 489	 */
 490	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
 491		if (pr->power.states[i].valid) {
 492			pr->power.count = i;
 493			pr->flags.power = 1;
 
 494		}
 495	}
 496
 497	return 0;
 498}
 499
 500/**
 501 * acpi_idle_bm_check - checks if bus master activity was detected
 502 */
 503static int acpi_idle_bm_check(void)
 504{
 505	u32 bm_status = 0;
 506
 507	if (bm_check_disable)
 508		return 0;
 509
 510	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
 511	if (bm_status)
 512		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
 513	/*
 514	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
 515	 * the true state of bus mastering activity; forcing us to
 516	 * manually check the BMIDEA bit of each IDE channel.
 517	 */
 518	else if (errata.piix4.bmisx) {
 519		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
 520		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
 521			bm_status = 1;
 522	}
 523	return bm_status;
 524}
 525
 526static __cpuidle void io_idle(unsigned long addr)
 527{
 528	/* IO port based C-state */
 529	inb(addr);
 530
 531#ifdef	CONFIG_X86
 532	/* No delay is needed if we are in guest */
 533	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 534		return;
 535	/*
 536	 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
 537	 * not this code.  Assume that any Intel systems using this
 538	 * are ancient and may need the dummy wait.  This also assumes
 539	 * that the motivating chipset issue was Intel-only.
 540	 */
 541	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 542		return;
 543#endif
 544	/*
 545	 * Dummy wait op - must do something useless after P_LVL2 read
 546	 * because chipsets cannot guarantee that STPCLK# signal gets
 547	 * asserted in time to freeze execution properly
 548	 *
 549	 * This workaround has been in place since the original ACPI
 550	 * implementation was merged, circa 2002.
 551	 *
 552	 * If a profile is pointing to this instruction, please first
 553	 * consider moving your system to a more modern idle
 554	 * mechanism.
 555	 */
 556	inl(acpi_gbl_FADT.xpm_timer_block.address);
 557}
 558
 559/**
 560 * acpi_idle_do_entry - enter idle state using the appropriate method
 561 * @cx: cstate data
 562 *
 563 * Caller disables interrupt before call and enables interrupt after return.
 564 */
 565static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
 566{
 567	perf_lopwr_cb(true);
 568
 569	if (cx->entry_method == ACPI_CSTATE_FFH) {
 570		/* Call into architectural FFH based C-state */
 571		acpi_processor_ffh_cstate_enter(cx);
 572	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
 573		acpi_safe_halt();
 574	} else {
 575		io_idle(cx->address);
 
 
 
 
 
 576	}
 577
 578	perf_lopwr_cb(false);
 579}
 580
 581/**
 582 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
 583 * @dev: the target CPU
 584 * @index: the index of suggested state
 585 */
 586static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
 587{
 588	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 589
 590	ACPI_FLUSH_CPU_CACHE();
 591
 592	while (1) {
 593
 594		if (cx->entry_method == ACPI_CSTATE_HALT)
 595			raw_safe_halt();
 596		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
 597			io_idle(cx->address);
 
 
 598		} else
 599			return -ENODEV;
 600	}
 601
 602	/* Never reached */
 603	return 0;
 604}
 605
 606static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
 607{
 608	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
 609		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
 610}
 611
 612static int c3_cpu_count;
 613static DEFINE_RAW_SPINLOCK(c3_lock);
 614
 615/**
 616 * acpi_idle_enter_bm - enters C3 with proper BM handling
 617 * @drv: cpuidle driver
 618 * @pr: Target processor
 619 * @cx: Target state context
 620 * @index: index of target state
 621 */
 622static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
 623			       struct acpi_processor *pr,
 624			       struct acpi_processor_cx *cx,
 625			       int index)
 626{
 627	static struct acpi_processor_cx safe_cx = {
 628		.entry_method = ACPI_CSTATE_HALT,
 629	};
 
 
 
 630
 631	/*
 632	 * disable bus master
 633	 * bm_check implies we need ARB_DIS
 634	 * bm_control implies whether we can do ARB_DIS
 635	 *
 636	 * That leaves a case where bm_check is set and bm_control is not set.
 637	 * In that case we cannot do much, we enter C3 without doing anything.
 
 638	 */
 639	bool dis_bm = pr->flags.bm_control;
 640
 641	instrumentation_begin();
 642
 643	/* If we can skip BM, demote to a safe state. */
 644	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
 645		dis_bm = false;
 646		index = drv->safe_state_index;
 647		if (index >= 0) {
 648			cx = this_cpu_read(acpi_cstate[index]);
 649		} else {
 650			cx = &safe_cx;
 651			index = -EBUSY;
 652		}
 653	}
 654
 655	if (dis_bm) {
 656		raw_spin_lock(&c3_lock);
 657		c3_cpu_count++;
 658		/* Disable bus master arbitration when all CPUs are in C3 */
 659		if (c3_cpu_count == num_online_cpus())
 660			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
 661		raw_spin_unlock(&c3_lock);
 662	}
 663
 664	ct_cpuidle_enter();
 665
 666	acpi_idle_do_entry(cx);
 667
 668	ct_cpuidle_exit();
 669
 670	/* Re-enable bus master arbitration */
 671	if (dis_bm) {
 672		raw_spin_lock(&c3_lock);
 673		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
 674		c3_cpu_count--;
 675		raw_spin_unlock(&c3_lock);
 676	}
 677
 678	instrumentation_end();
 679
 680	return index;
 681}
 682
 683static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
 684			   struct cpuidle_driver *drv, int index)
 685{
 686	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 687	struct acpi_processor *pr;
 688
 689	pr = __this_cpu_read(processors);
 690	if (unlikely(!pr))
 691		return -EINVAL;
 692
 693	if (cx->type != ACPI_STATE_C1) {
 694		if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
 695			return acpi_idle_enter_bm(drv, pr, cx, index);
 696
 697		/* C2 to C1 demotion. */
 698		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
 699			index = ACPI_IDLE_STATE_START;
 700			cx = per_cpu(acpi_cstate[index], dev->cpu);
 
 
 
 
 
 
 
 
 
 
 
 701		}
 702	}
 703
 
 
 704	if (cx->type == ACPI_STATE_C3)
 705		ACPI_FLUSH_CPU_CACHE();
 706
 707	acpi_idle_do_entry(cx);
 708
 
 
 709	return index;
 710}
 711
 712static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
 713				  struct cpuidle_driver *drv, int index)
 714{
 715	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 716
 717	if (cx->type == ACPI_STATE_C3) {
 718		struct acpi_processor *pr = __this_cpu_read(processors);
 719
 720		if (unlikely(!pr))
 721			return 0;
 722
 723		if (pr->flags.bm_check) {
 724			u8 bm_sts_skip = cx->bm_sts_skip;
 725
 726			/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
 727			cx->bm_sts_skip = 1;
 728			acpi_idle_enter_bm(drv, pr, cx, index);
 729			cx->bm_sts_skip = bm_sts_skip;
 730
 731			return 0;
 732		} else {
 733			ACPI_FLUSH_CPU_CACHE();
 734		}
 735	}
 736	acpi_idle_do_entry(cx);
 
 737
 738	return 0;
 739}
 
 
 740
 
 
 
 
 
 
 
 741static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
 742					   struct cpuidle_device *dev)
 743{
 744	int i, count = ACPI_IDLE_STATE_START;
 745	struct acpi_processor_cx *cx;
 746	struct cpuidle_state *state;
 
 
 
 
 
 
 
 
 
 
 
 747
 748	if (max_cstate == 0)
 749		max_cstate = 1;
 750
 751	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 752		state = &acpi_idle_driver.states[count];
 753		cx = &pr->power.states[i];
 754
 755		if (!cx->valid)
 756			continue;
 757
 758		per_cpu(acpi_cstate[count], dev->cpu) = cx;
 759
 760		if (lapic_timer_needs_broadcast(pr, cx))
 761			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
 762
 763		if (cx->type == ACPI_STATE_C3) {
 764			state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
 765			if (pr->flags.bm_check)
 766				state->flags |= CPUIDLE_FLAG_RCU_IDLE;
 767		}
 768
 769		count++;
 770		if (count == CPUIDLE_STATE_MAX)
 771			break;
 772	}
 773
 774	if (!count)
 775		return -EINVAL;
 776
 777	return 0;
 778}
 779
 780static int acpi_processor_setup_cstates(struct acpi_processor *pr)
 
 
 
 
 
 
 781{
 782	int i, count;
 783	struct acpi_processor_cx *cx;
 784	struct cpuidle_state *state;
 785	struct cpuidle_driver *drv = &acpi_idle_driver;
 786
 
 
 
 
 
 
 
 
 
 
 
 
 787	if (max_cstate == 0)
 788		max_cstate = 1;
 789
 790	if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
 791		cpuidle_poll_state_init(drv);
 792		count = 1;
 793	} else {
 794		count = 0;
 795	}
 796
 797	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 798		cx = &pr->power.states[i];
 799
 800		if (!cx->valid)
 801			continue;
 802
 803		state = &drv->states[count];
 804		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
 805		strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
 806		state->exit_latency = cx->latency;
 807		state->target_residency = cx->latency * latency_factor;
 808		state->enter = acpi_idle_enter;
 809
 810		state->flags = 0;
 811		if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
 812		    cx->type == ACPI_STATE_C3) {
 813			state->enter_dead = acpi_idle_play_dead;
 814			if (cx->type != ACPI_STATE_C3)
 815				drv->safe_state_index = count;
 816		}
 817		/*
 818		 * Halt-induced C1 is not good for ->enter_s2idle, because it
 819		 * re-enables interrupts on exit.  Moreover, C1 is generally not
 820		 * particularly interesting from the suspend-to-idle angle, so
 821		 * avoid C1 and the situations in which we may need to fall back
 822		 * to it altogether.
 823		 */
 824		if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
 825			state->enter_s2idle = acpi_idle_enter_s2idle;
 826
 827		count++;
 828		if (count == CPUIDLE_STATE_MAX)
 829			break;
 830	}
 831
 832	drv->state_count = count;
 833
 834	if (!count)
 835		return -EINVAL;
 836
 837	return 0;
 838}
 839
 840static inline void acpi_processor_cstate_first_run_checks(void)
 841{
 842	static int first_run;
 843
 844	if (first_run)
 845		return;
 846	dmi_check_system(processor_power_dmi_table);
 847	max_cstate = acpi_processor_cstate_check(max_cstate);
 848	if (max_cstate < ACPI_C_STATES_MAX)
 849		pr_notice("processor limited to max C-state %d\n", max_cstate);
 850
 851	first_run++;
 852
 853	if (nocst)
 854		return;
 855
 856	acpi_processor_claim_cst_control();
 857}
 858#else
 859
 860static inline int disabled_by_idle_boot_param(void) { return 0; }
 861static inline void acpi_processor_cstate_first_run_checks(void) { }
 862static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
 863{
 864	return -ENODEV;
 865}
 866
 867static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
 868					   struct cpuidle_device *dev)
 869{
 870	return -EINVAL;
 871}
 872
 873static int acpi_processor_setup_cstates(struct acpi_processor *pr)
 874{
 875	return -EINVAL;
 876}
 877
 878#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
 879
 880struct acpi_lpi_states_array {
 881	unsigned int size;
 882	unsigned int composite_states_size;
 883	struct acpi_lpi_state *entries;
 884	struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
 885};
 886
 887static int obj_get_integer(union acpi_object *obj, u32 *value)
 888{
 889	if (obj->type != ACPI_TYPE_INTEGER)
 890		return -EINVAL;
 891
 892	*value = obj->integer.value;
 893	return 0;
 894}
 895
 896static int acpi_processor_evaluate_lpi(acpi_handle handle,
 897				       struct acpi_lpi_states_array *info)
 898{
 899	acpi_status status;
 900	int ret = 0;
 901	int pkg_count, state_idx = 1, loop;
 902	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 903	union acpi_object *lpi_data;
 904	struct acpi_lpi_state *lpi_state;
 905
 906	status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
 907	if (ACPI_FAILURE(status)) {
 908		acpi_handle_debug(handle, "No _LPI, giving up\n");
 909		return -ENODEV;
 910	}
 911
 912	lpi_data = buffer.pointer;
 913
 914	/* There must be at least 4 elements = 3 elements + 1 package */
 915	if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
 916	    lpi_data->package.count < 4) {
 917		pr_debug("not enough elements in _LPI\n");
 918		ret = -ENODATA;
 919		goto end;
 920	}
 921
 922	pkg_count = lpi_data->package.elements[2].integer.value;
 923
 924	/* Validate number of power states. */
 925	if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
 926		pr_debug("count given by _LPI is not valid\n");
 927		ret = -ENODATA;
 928		goto end;
 929	}
 930
 931	lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
 932	if (!lpi_state) {
 933		ret = -ENOMEM;
 934		goto end;
 935	}
 936
 937	info->size = pkg_count;
 938	info->entries = lpi_state;
 939
 940	/* LPI States start at index 3 */
 941	for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
 942		union acpi_object *element, *pkg_elem, *obj;
 943
 944		element = &lpi_data->package.elements[loop];
 945		if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
 946			continue;
 947
 948		pkg_elem = element->package.elements;
 949
 950		obj = pkg_elem + 6;
 951		if (obj->type == ACPI_TYPE_BUFFER) {
 952			struct acpi_power_register *reg;
 953
 954			reg = (struct acpi_power_register *)obj->buffer.pointer;
 955			if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
 956			    reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
 957				continue;
 958
 959			lpi_state->address = reg->address;
 960			lpi_state->entry_method =
 961				reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
 962				ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
 963		} else if (obj->type == ACPI_TYPE_INTEGER) {
 964			lpi_state->entry_method = ACPI_CSTATE_INTEGER;
 965			lpi_state->address = obj->integer.value;
 966		} else {
 967			continue;
 968		}
 969
 970		/* elements[7,8] skipped for now i.e. Residency/Usage counter*/
 971
 972		obj = pkg_elem + 9;
 973		if (obj->type == ACPI_TYPE_STRING)
 974			strscpy(lpi_state->desc, obj->string.pointer,
 975				ACPI_CX_DESC_LEN);
 976
 977		lpi_state->index = state_idx;
 978		if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
 979			pr_debug("No min. residency found, assuming 10 us\n");
 980			lpi_state->min_residency = 10;
 981		}
 982
 983		if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
 984			pr_debug("No wakeup residency found, assuming 10 us\n");
 985			lpi_state->wake_latency = 10;
 986		}
 987
 988		if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
 989			lpi_state->flags = 0;
 990
 991		if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
 992			lpi_state->arch_flags = 0;
 993
 994		if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
 995			lpi_state->res_cnt_freq = 1;
 996
 997		if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
 998			lpi_state->enable_parent_state = 0;
 999	}
1000
1001	acpi_handle_debug(handle, "Found %d power states\n", state_idx);
1002end:
1003	kfree(buffer.pointer);
1004	return ret;
1005}
1006
1007/*
1008 * flat_state_cnt - the number of composite LPI states after the process of flattening
1009 */
1010static int flat_state_cnt;
1011
1012/**
1013 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1014 *
1015 * @local: local LPI state
1016 * @parent: parent LPI state
1017 * @result: composite LPI state
1018 */
1019static bool combine_lpi_states(struct acpi_lpi_state *local,
1020			       struct acpi_lpi_state *parent,
1021			       struct acpi_lpi_state *result)
1022{
1023	if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1024		if (!parent->address) /* 0 means autopromotable */
1025			return false;
1026		result->address = local->address + parent->address;
1027	} else {
1028		result->address = parent->address;
1029	}
1030
1031	result->min_residency = max(local->min_residency, parent->min_residency);
1032	result->wake_latency = local->wake_latency + parent->wake_latency;
1033	result->enable_parent_state = parent->enable_parent_state;
1034	result->entry_method = local->entry_method;
1035
1036	result->flags = parent->flags;
1037	result->arch_flags = parent->arch_flags;
1038	result->index = parent->index;
1039
1040	strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1041	strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1042	strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1043	return true;
1044}
1045
1046#define ACPI_LPI_STATE_FLAGS_ENABLED			BIT(0)
1047
1048static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1049				  struct acpi_lpi_state *t)
1050{
1051	curr_level->composite_states[curr_level->composite_states_size++] = t;
1052}
1053
1054static int flatten_lpi_states(struct acpi_processor *pr,
1055			      struct acpi_lpi_states_array *curr_level,
1056			      struct acpi_lpi_states_array *prev_level)
1057{
1058	int i, j, state_count = curr_level->size;
1059	struct acpi_lpi_state *p, *t = curr_level->entries;
1060
1061	curr_level->composite_states_size = 0;
1062	for (j = 0; j < state_count; j++, t++) {
1063		struct acpi_lpi_state *flpi;
1064
1065		if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1066			continue;
1067
1068		if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1069			pr_warn("Limiting number of LPI states to max (%d)\n",
1070				ACPI_PROCESSOR_MAX_POWER);
1071			pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1072			break;
1073		}
1074
1075		flpi = &pr->power.lpi_states[flat_state_cnt];
1076
1077		if (!prev_level) { /* leaf/processor node */
1078			memcpy(flpi, t, sizeof(*t));
1079			stash_composite_state(curr_level, flpi);
1080			flat_state_cnt++;
1081			continue;
1082		}
1083
1084		for (i = 0; i < prev_level->composite_states_size; i++) {
1085			p = prev_level->composite_states[i];
1086			if (t->index <= p->enable_parent_state &&
1087			    combine_lpi_states(p, t, flpi)) {
1088				stash_composite_state(curr_level, flpi);
1089				flat_state_cnt++;
1090				flpi++;
1091			}
1092		}
1093	}
1094
1095	kfree(curr_level->entries);
1096	return 0;
1097}
1098
1099int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1100{
1101	return -EOPNOTSUPP;
1102}
1103
1104static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1105{
1106	int ret, i;
1107	acpi_status status;
1108	acpi_handle handle = pr->handle, pr_ahandle;
1109	struct acpi_device *d = NULL;
1110	struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1111
1112	/* make sure our architecture has support */
1113	ret = acpi_processor_ffh_lpi_probe(pr->id);
1114	if (ret == -EOPNOTSUPP)
1115		return ret;
1116
1117	if (!osc_pc_lpi_support_confirmed)
1118		return -EOPNOTSUPP;
1119
1120	if (!acpi_has_method(handle, "_LPI"))
1121		return -EINVAL;
1122
1123	flat_state_cnt = 0;
1124	prev = &info[0];
1125	curr = &info[1];
1126	handle = pr->handle;
1127	ret = acpi_processor_evaluate_lpi(handle, prev);
1128	if (ret)
1129		return ret;
1130	flatten_lpi_states(pr, prev, NULL);
1131
1132	status = acpi_get_parent(handle, &pr_ahandle);
1133	while (ACPI_SUCCESS(status)) {
1134		d = acpi_fetch_acpi_dev(pr_ahandle);
1135		if (!d)
1136			break;
1137
1138		handle = pr_ahandle;
1139
1140		if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1141			break;
1142
1143		/* can be optional ? */
1144		if (!acpi_has_method(handle, "_LPI"))
1145			break;
1146
1147		ret = acpi_processor_evaluate_lpi(handle, curr);
1148		if (ret)
1149			break;
1150
1151		/* flatten all the LPI states in this level of hierarchy */
1152		flatten_lpi_states(pr, curr, prev);
1153
1154		tmp = prev, prev = curr, curr = tmp;
1155
1156		status = acpi_get_parent(handle, &pr_ahandle);
1157	}
1158
1159	pr->power.count = flat_state_cnt;
1160	/* reset the index after flattening */
1161	for (i = 0; i < pr->power.count; i++)
1162		pr->power.lpi_states[i].index = i;
1163
1164	/* Tell driver that _LPI is supported. */
1165	pr->flags.has_lpi = 1;
1166	pr->flags.power = 1;
1167
1168	return 0;
1169}
1170
1171int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1172{
1173	return -ENODEV;
1174}
1175
1176/**
1177 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1178 * @dev: the target CPU
1179 * @drv: cpuidle driver containing cpuidle state info
1180 * @index: index of target state
1181 *
1182 * Return: 0 for success or negative value for error
1183 */
1184static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1185			       struct cpuidle_driver *drv, int index)
1186{
1187	struct acpi_processor *pr;
1188	struct acpi_lpi_state *lpi;
1189
1190	pr = __this_cpu_read(processors);
1191
1192	if (unlikely(!pr))
1193		return -EINVAL;
1194
1195	lpi = &pr->power.lpi_states[index];
1196	if (lpi->entry_method == ACPI_CSTATE_FFH)
1197		return acpi_processor_ffh_lpi_enter(lpi);
1198
1199	return -EINVAL;
1200}
1201
1202static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1203{
1204	int i;
1205	struct acpi_lpi_state *lpi;
1206	struct cpuidle_state *state;
1207	struct cpuidle_driver *drv = &acpi_idle_driver;
1208
1209	if (!pr->flags.has_lpi)
1210		return -EOPNOTSUPP;
1211
1212	for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1213		lpi = &pr->power.lpi_states[i];
1214
1215		state = &drv->states[i];
1216		snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1217		strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1218		state->exit_latency = lpi->wake_latency;
1219		state->target_residency = lpi->min_residency;
1220		state->flags |= arch_get_idle_state_flags(lpi->arch_flags);
1221		if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
1222			state->flags |= CPUIDLE_FLAG_RCU_IDLE;
1223		state->enter = acpi_idle_lpi_enter;
1224		drv->safe_state_index = i;
1225	}
1226
1227	drv->state_count = i;
1228
1229	return 0;
1230}
1231
1232/**
1233 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1234 * global state data i.e. idle routines
1235 *
1236 * @pr: the ACPI processor
1237 */
1238static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1239{
1240	int i;
1241	struct cpuidle_driver *drv = &acpi_idle_driver;
1242
1243	if (!pr->flags.power_setup_done || !pr->flags.power)
1244		return -EINVAL;
1245
1246	drv->safe_state_index = -1;
1247	for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1248		drv->states[i].name[0] = '\0';
1249		drv->states[i].desc[0] = '\0';
1250	}
1251
1252	if (pr->flags.has_lpi)
1253		return acpi_processor_setup_lpi_states(pr);
1254
1255	return acpi_processor_setup_cstates(pr);
1256}
1257
1258/**
1259 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1260 * device i.e. per-cpu data
1261 *
1262 * @pr: the ACPI processor
1263 * @dev : the cpuidle device
1264 */
1265static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1266					    struct cpuidle_device *dev)
1267{
1268	if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1269		return -EINVAL;
1270
1271	dev->cpu = pr->id;
1272	if (pr->flags.has_lpi)
1273		return acpi_processor_ffh_lpi_probe(pr->id);
1274
1275	return acpi_processor_setup_cpuidle_cx(pr, dev);
1276}
1277
1278static int acpi_processor_get_power_info(struct acpi_processor *pr)
1279{
1280	int ret;
1281
1282	ret = acpi_processor_get_lpi_info(pr);
1283	if (ret)
1284		ret = acpi_processor_get_cstate_info(pr);
1285
1286	return ret;
1287}
1288
1289int acpi_processor_hotplug(struct acpi_processor *pr)
1290{
1291	int ret = 0;
1292	struct cpuidle_device *dev;
1293
1294	if (disabled_by_idle_boot_param())
1295		return 0;
1296
 
 
 
1297	if (!pr->flags.power_setup_done)
1298		return -ENODEV;
1299
1300	dev = per_cpu(acpi_cpuidle_device, pr->id);
1301	cpuidle_pause_and_lock();
1302	cpuidle_disable_device(dev);
1303	ret = acpi_processor_get_power_info(pr);
1304	if (!ret && pr->flags.power) {
1305		acpi_processor_setup_cpuidle_dev(pr, dev);
1306		ret = cpuidle_enable_device(dev);
1307	}
1308	cpuidle_resume_and_unlock();
1309
1310	return ret;
1311}
1312
1313int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1314{
1315	int cpu;
1316	struct acpi_processor *_pr;
1317	struct cpuidle_device *dev;
1318
1319	if (disabled_by_idle_boot_param())
1320		return 0;
1321
 
 
 
1322	if (!pr->flags.power_setup_done)
1323		return -ENODEV;
1324
1325	/*
1326	 * FIXME:  Design the ACPI notification to make it once per
1327	 * system instead of once per-cpu.  This condition is a hack
1328	 * to make the code that updates C-States be called once.
1329	 */
1330
1331	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1332
1333		/* Protect against cpu-hotplug */
1334		cpus_read_lock();
1335		cpuidle_pause_and_lock();
1336
1337		/* Disable all cpuidle devices */
1338		for_each_online_cpu(cpu) {
1339			_pr = per_cpu(processors, cpu);
1340			if (!_pr || !_pr->flags.power_setup_done)
1341				continue;
1342			dev = per_cpu(acpi_cpuidle_device, cpu);
1343			cpuidle_disable_device(dev);
1344		}
1345
1346		/* Populate Updated C-state information */
1347		acpi_processor_get_power_info(pr);
1348		acpi_processor_setup_cpuidle_states(pr);
1349
1350		/* Enable all cpuidle devices */
1351		for_each_online_cpu(cpu) {
1352			_pr = per_cpu(processors, cpu);
1353			if (!_pr || !_pr->flags.power_setup_done)
1354				continue;
1355			acpi_processor_get_power_info(_pr);
1356			if (_pr->flags.power) {
1357				dev = per_cpu(acpi_cpuidle_device, cpu);
1358				acpi_processor_setup_cpuidle_dev(_pr, dev);
1359				cpuidle_enable_device(dev);
1360			}
1361		}
1362		cpuidle_resume_and_unlock();
1363		cpus_read_unlock();
1364	}
1365
1366	return 0;
1367}
1368
1369static int acpi_processor_registered;
1370
1371int acpi_processor_power_init(struct acpi_processor *pr)
1372{
 
1373	int retval;
1374	struct cpuidle_device *dev;
 
1375
1376	if (disabled_by_idle_boot_param())
1377		return 0;
1378
1379	acpi_processor_cstate_first_run_checks();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1380
1381	if (!acpi_processor_get_power_info(pr))
1382		pr->flags.power_setup_done = 1;
1383
1384	/*
1385	 * Install the idle handler if processor power management is supported.
1386	 * Note that we use previously set idle handler will be used on
1387	 * platforms that only support C1.
1388	 */
1389	if (pr->flags.power) {
1390		/* Register acpi_idle_driver if not already registered */
1391		if (!acpi_processor_registered) {
1392			acpi_processor_setup_cpuidle_states(pr);
1393			retval = cpuidle_register_driver(&acpi_idle_driver);
1394			if (retval)
1395				return retval;
1396			pr_debug("%s registered with cpuidle\n",
1397				 acpi_idle_driver.name);
1398		}
1399
1400		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1401		if (!dev)
1402			return -ENOMEM;
1403		per_cpu(acpi_cpuidle_device, pr->id) = dev;
1404
1405		acpi_processor_setup_cpuidle_dev(pr, dev);
1406
1407		/* Register per-cpu cpuidle_device. Cpuidle driver
1408		 * must already be registered before registering device
1409		 */
1410		retval = cpuidle_register_device(dev);
1411		if (retval) {
1412			if (acpi_processor_registered == 0)
1413				cpuidle_unregister_driver(&acpi_idle_driver);
1414			return retval;
1415		}
1416		acpi_processor_registered++;
1417	}
1418	return 0;
1419}
1420
1421int acpi_processor_power_exit(struct acpi_processor *pr)
1422{
1423	struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1424
1425	if (disabled_by_idle_boot_param())
1426		return 0;
1427
1428	if (pr->flags.power) {
1429		cpuidle_unregister_device(dev);
1430		acpi_processor_registered--;
1431		if (acpi_processor_registered == 0)
1432			cpuidle_unregister_driver(&acpi_idle_driver);
1433
1434		kfree(dev);
1435	}
1436
1437	pr->flags.power_setup_done = 0;
1438	return 0;
1439}