Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v4.6
   1/*
   2 * intel_idle.c - native hardware idle loop for modern Intel processors
   3 *
   4 * Copyright (c) 2013, Intel Corporation.
   5 * Len Brown <len.brown@intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program; if not, write to the Free Software Foundation, Inc.,
  18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  19 */
  20
  21/*
  22 * intel_idle is a cpuidle driver that loads on specific Intel processors
  23 * in lieu of the legacy ACPI processor_idle driver.  The intent is to
  24 * make Linux more efficient on these processors, as intel_idle knows
  25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
  26 */
  27
  28/*
  29 * Design Assumptions
  30 *
  31 * All CPUs have same idle states as boot CPU
  32 *
  33 * Chipset BM_STS (bus master status) bit is a NOP
  34 *	for preventing entry into deep C-stats
  35 */
  36
  37/*
  38 * Known limitations
  39 *
  40 * The driver currently initializes for_each_online_cpu() upon modprobe.
  41 * It it unaware of subsequent processors hot-added to the system.
  42 * This means that if you boot with maxcpus=n and later online
  43 * processors above n, those processors will use C1 only.
  44 *
  45 * ACPI has a .suspend hack to turn off deep c-statees during suspend
  46 * to avoid complications with the lapic timer workaround.
  47 * Have not seen issues with suspend, but may need same workaround here.
  48 *
  49 * There is currently no kernel-based automatic probing/loading mechanism
  50 * if the driver is built as a module.
  51 */
  52
  53/* un-comment DEBUG to enable pr_debug() statements */
  54#define DEBUG
  55
  56#include <linux/kernel.h>
  57#include <linux/cpuidle.h>
  58#include <linux/tick.h>
 
  59#include <trace/events/power.h>
  60#include <linux/sched.h>
  61#include <linux/notifier.h>
  62#include <linux/cpu.h>
  63#include <linux/module.h>
  64#include <asm/cpu_device_id.h>
  65#include <asm/mwait.h>
  66#include <asm/msr.h>
  67
  68#define INTEL_IDLE_VERSION "0.4.1"
  69#define PREFIX "intel_idle: "
  70
  71static struct cpuidle_driver intel_idle_driver = {
  72	.name = "intel_idle",
  73	.owner = THIS_MODULE,
  74};
  75/* intel_idle.max_cstate=0 disables driver */
  76static int max_cstate = CPUIDLE_STATE_MAX - 1;
  77
  78static unsigned int mwait_substates;
  79
  80#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
  81/* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
  82static unsigned int lapic_timer_reliable_states = (1 << 1);	 /* Default to only C1 */
  83
  84struct idle_cpu {
  85	struct cpuidle_state *state_table;
  86
  87	/*
  88	 * Hardware C-state auto-demotion may not always be optimal.
  89	 * Indicate which enable bits to clear here.
  90	 */
  91	unsigned long auto_demotion_disable_flags;
  92	bool byt_auto_demotion_disable_flag;
  93	bool disable_promotion_to_c1e;
  94};
  95
  96static const struct idle_cpu *icpu;
  97static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
  98static int intel_idle(struct cpuidle_device *dev,
  99			struct cpuidle_driver *drv, int index);
 100static void intel_idle_freeze(struct cpuidle_device *dev,
 101			      struct cpuidle_driver *drv, int index);
 102static int intel_idle_cpu_init(int cpu);
 103
 104static struct cpuidle_state *cpuidle_state_table;
 105
 106/*
 107 * Set this flag for states where the HW flushes the TLB for us
 108 * and so we don't need cross-calls to keep it consistent.
 109 * If this flag is set, SW flushes the TLB, so even if the
 110 * HW doesn't do the flushing, this flag is safe to use.
 111 */
 112#define CPUIDLE_FLAG_TLB_FLUSHED	0x10000
 113
 114/*
 115 * MWAIT takes an 8-bit "hint" in EAX "suggesting"
 116 * the C-state (top nibble) and sub-state (bottom nibble)
 117 * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
 118 *
 119 * We store the hint at the top of our "flags" for each state.
 120 */
 121#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
 122#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
 123
 124/*
 125 * States are indexed by the cstate number,
 126 * which is also the index into the MWAIT hint array.
 127 * Thus C0 is a dummy.
 128 */
 129static struct cpuidle_state nehalem_cstates[] = {
 130	{
 
 131		.name = "C1-NHM",
 132		.desc = "MWAIT 0x00",
 133		.flags = MWAIT2flg(0x00),
 134		.exit_latency = 3,
 135		.target_residency = 6,
 136		.enter = &intel_idle,
 137		.enter_freeze = intel_idle_freeze, },
 138	{
 139		.name = "C1E-NHM",
 140		.desc = "MWAIT 0x01",
 141		.flags = MWAIT2flg(0x01),
 142		.exit_latency = 10,
 143		.target_residency = 20,
 144		.enter = &intel_idle,
 145		.enter_freeze = intel_idle_freeze, },
 146	{
 147		.name = "C3-NHM",
 148		.desc = "MWAIT 0x10",
 149		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 150		.exit_latency = 20,
 151		.target_residency = 80,
 152		.enter = &intel_idle,
 153		.enter_freeze = intel_idle_freeze, },
 154	{
 155		.name = "C6-NHM",
 156		.desc = "MWAIT 0x20",
 157		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 158		.exit_latency = 200,
 159		.target_residency = 800,
 160		.enter = &intel_idle,
 161		.enter_freeze = intel_idle_freeze, },
 162	{
 163		.enter = NULL }
 164};
 165
 166static struct cpuidle_state snb_cstates[] = {
 167	{
 
 168		.name = "C1-SNB",
 169		.desc = "MWAIT 0x00",
 170		.flags = MWAIT2flg(0x00),
 171		.exit_latency = 2,
 172		.target_residency = 2,
 173		.enter = &intel_idle,
 174		.enter_freeze = intel_idle_freeze, },
 175	{
 176		.name = "C1E-SNB",
 177		.desc = "MWAIT 0x01",
 178		.flags = MWAIT2flg(0x01),
 179		.exit_latency = 10,
 180		.target_residency = 20,
 181		.enter = &intel_idle,
 182		.enter_freeze = intel_idle_freeze, },
 183	{
 184		.name = "C3-SNB",
 185		.desc = "MWAIT 0x10",
 186		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 187		.exit_latency = 80,
 188		.target_residency = 211,
 189		.enter = &intel_idle,
 190		.enter_freeze = intel_idle_freeze, },
 191	{
 192		.name = "C6-SNB",
 193		.desc = "MWAIT 0x20",
 194		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 195		.exit_latency = 104,
 196		.target_residency = 345,
 197		.enter = &intel_idle,
 198		.enter_freeze = intel_idle_freeze, },
 199	{
 200		.name = "C7-SNB",
 201		.desc = "MWAIT 0x30",
 202		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
 203		.exit_latency = 109,
 204		.target_residency = 345,
 205		.enter = &intel_idle,
 206		.enter_freeze = intel_idle_freeze, },
 207	{
 208		.enter = NULL }
 209};
 210
 211static struct cpuidle_state byt_cstates[] = {
 212	{
 213		.name = "C1-BYT",
 214		.desc = "MWAIT 0x00",
 215		.flags = MWAIT2flg(0x00),
 216		.exit_latency = 1,
 217		.target_residency = 1,
 218		.enter = &intel_idle,
 219		.enter_freeze = intel_idle_freeze, },
 220	{
 221		.name = "C6N-BYT",
 222		.desc = "MWAIT 0x58",
 223		.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
 224		.exit_latency = 300,
 225		.target_residency = 275,
 226		.enter = &intel_idle,
 227		.enter_freeze = intel_idle_freeze, },
 228	{
 229		.name = "C6S-BYT",
 230		.desc = "MWAIT 0x52",
 231		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
 232		.exit_latency = 500,
 233		.target_residency = 560,
 234		.enter = &intel_idle,
 235		.enter_freeze = intel_idle_freeze, },
 236	{
 237		.name = "C7-BYT",
 238		.desc = "MWAIT 0x60",
 239		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
 240		.exit_latency = 1200,
 241		.target_residency = 4000,
 242		.enter = &intel_idle,
 243		.enter_freeze = intel_idle_freeze, },
 244	{
 245		.name = "C7S-BYT",
 246		.desc = "MWAIT 0x64",
 247		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
 248		.exit_latency = 10000,
 249		.target_residency = 20000,
 250		.enter = &intel_idle,
 251		.enter_freeze = intel_idle_freeze, },
 252	{
 253		.enter = NULL }
 254};
 255
 256static struct cpuidle_state cht_cstates[] = {
 257	{
 258		.name = "C1-CHT",
 259		.desc = "MWAIT 0x00",
 260		.flags = MWAIT2flg(0x00),
 261		.exit_latency = 1,
 262		.target_residency = 1,
 263		.enter = &intel_idle,
 264		.enter_freeze = intel_idle_freeze, },
 265	{
 266		.name = "C6N-CHT",
 267		.desc = "MWAIT 0x58",
 268		.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
 269		.exit_latency = 80,
 270		.target_residency = 275,
 271		.enter = &intel_idle,
 272		.enter_freeze = intel_idle_freeze, },
 273	{
 274		.name = "C6S-CHT",
 275		.desc = "MWAIT 0x52",
 276		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
 277		.exit_latency = 200,
 278		.target_residency = 560,
 279		.enter = &intel_idle,
 280		.enter_freeze = intel_idle_freeze, },
 281	{
 282		.name = "C7-CHT",
 283		.desc = "MWAIT 0x60",
 284		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
 285		.exit_latency = 1200,
 286		.target_residency = 4000,
 287		.enter = &intel_idle,
 288		.enter_freeze = intel_idle_freeze, },
 289	{
 290		.name = "C7S-CHT",
 291		.desc = "MWAIT 0x64",
 292		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
 293		.exit_latency = 10000,
 294		.target_residency = 20000,
 295		.enter = &intel_idle,
 296		.enter_freeze = intel_idle_freeze, },
 297	{
 298		.enter = NULL }
 299};
 300
 301static struct cpuidle_state ivb_cstates[] = {
 302	{
 303		.name = "C1-IVB",
 
 304		.desc = "MWAIT 0x00",
 305		.flags = MWAIT2flg(0x00),
 306		.exit_latency = 1,
 307		.target_residency = 1,
 308		.enter = &intel_idle,
 309		.enter_freeze = intel_idle_freeze, },
 310	{
 311		.name = "C1E-IVB",
 312		.desc = "MWAIT 0x01",
 313		.flags = MWAIT2flg(0x01),
 314		.exit_latency = 10,
 315		.target_residency = 20,
 316		.enter = &intel_idle,
 317		.enter_freeze = intel_idle_freeze, },
 318	{
 319		.name = "C3-IVB",
 320		.desc = "MWAIT 0x10",
 321		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 322		.exit_latency = 59,
 323		.target_residency = 156,
 324		.enter = &intel_idle,
 325		.enter_freeze = intel_idle_freeze, },
 326	{
 327		.name = "C6-IVB",
 328		.desc = "MWAIT 0x20",
 329		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 330		.exit_latency = 80,
 331		.target_residency = 300,
 332		.enter = &intel_idle,
 333		.enter_freeze = intel_idle_freeze, },
 334	{
 335		.name = "C7-IVB",
 336		.desc = "MWAIT 0x30",
 337		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
 338		.exit_latency = 87,
 339		.target_residency = 300,
 340		.enter = &intel_idle,
 341		.enter_freeze = intel_idle_freeze, },
 342	{
 343		.enter = NULL }
 344};
 345
 346static struct cpuidle_state ivt_cstates[] = {
 347	{
 348		.name = "C1-IVT",
 349		.desc = "MWAIT 0x00",
 350		.flags = MWAIT2flg(0x00),
 351		.exit_latency = 1,
 352		.target_residency = 1,
 353		.enter = &intel_idle,
 354		.enter_freeze = intel_idle_freeze, },
 355	{
 356		.name = "C1E-IVT",
 357		.desc = "MWAIT 0x01",
 358		.flags = MWAIT2flg(0x01),
 359		.exit_latency = 10,
 360		.target_residency = 80,
 361		.enter = &intel_idle,
 362		.enter_freeze = intel_idle_freeze, },
 363	{
 364		.name = "C3-IVT",
 365		.desc = "MWAIT 0x10",
 366		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 367		.exit_latency = 59,
 368		.target_residency = 156,
 369		.enter = &intel_idle,
 370		.enter_freeze = intel_idle_freeze, },
 371	{
 372		.name = "C6-IVT",
 373		.desc = "MWAIT 0x20",
 374		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 375		.exit_latency = 82,
 376		.target_residency = 300,
 377		.enter = &intel_idle,
 378		.enter_freeze = intel_idle_freeze, },
 379	{
 380		.enter = NULL }
 381};
 382
 383static struct cpuidle_state ivt_cstates_4s[] = {
 384	{
 385		.name = "C1-IVT-4S",
 386		.desc = "MWAIT 0x00",
 387		.flags = MWAIT2flg(0x00),
 388		.exit_latency = 1,
 389		.target_residency = 1,
 390		.enter = &intel_idle,
 391		.enter_freeze = intel_idle_freeze, },
 392	{
 393		.name = "C1E-IVT-4S",
 394		.desc = "MWAIT 0x01",
 395		.flags = MWAIT2flg(0x01),
 396		.exit_latency = 10,
 397		.target_residency = 250,
 398		.enter = &intel_idle,
 399		.enter_freeze = intel_idle_freeze, },
 400	{
 401		.name = "C3-IVT-4S",
 402		.desc = "MWAIT 0x10",
 403		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 404		.exit_latency = 59,
 405		.target_residency = 300,
 406		.enter = &intel_idle,
 407		.enter_freeze = intel_idle_freeze, },
 408	{
 409		.name = "C6-IVT-4S",
 410		.desc = "MWAIT 0x20",
 411		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 412		.exit_latency = 84,
 413		.target_residency = 400,
 414		.enter = &intel_idle,
 415		.enter_freeze = intel_idle_freeze, },
 416	{
 417		.enter = NULL }
 418};
 419
 420static struct cpuidle_state ivt_cstates_8s[] = {
 421	{
 422		.name = "C1-IVT-8S",
 423		.desc = "MWAIT 0x00",
 424		.flags = MWAIT2flg(0x00),
 425		.exit_latency = 1,
 426		.target_residency = 1,
 427		.enter = &intel_idle,
 428		.enter_freeze = intel_idle_freeze, },
 429	{
 430		.name = "C1E-IVT-8S",
 431		.desc = "MWAIT 0x01",
 432		.flags = MWAIT2flg(0x01),
 433		.exit_latency = 10,
 434		.target_residency = 500,
 435		.enter = &intel_idle,
 436		.enter_freeze = intel_idle_freeze, },
 437	{
 438		.name = "C3-IVT-8S",
 439		.desc = "MWAIT 0x10",
 440		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 441		.exit_latency = 59,
 442		.target_residency = 600,
 443		.enter = &intel_idle,
 444		.enter_freeze = intel_idle_freeze, },
 445	{
 446		.name = "C6-IVT-8S",
 447		.desc = "MWAIT 0x20",
 448		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 449		.exit_latency = 88,
 450		.target_residency = 700,
 451		.enter = &intel_idle,
 452		.enter_freeze = intel_idle_freeze, },
 453	{
 454		.enter = NULL }
 455};
 456
 457static struct cpuidle_state hsw_cstates[] = {
 458	{
 459		.name = "C1-HSW",
 460		.desc = "MWAIT 0x00",
 461		.flags = MWAIT2flg(0x00),
 462		.exit_latency = 2,
 463		.target_residency = 2,
 464		.enter = &intel_idle,
 465		.enter_freeze = intel_idle_freeze, },
 466	{
 467		.name = "C1E-HSW",
 468		.desc = "MWAIT 0x01",
 469		.flags = MWAIT2flg(0x01),
 470		.exit_latency = 10,
 471		.target_residency = 20,
 472		.enter = &intel_idle,
 473		.enter_freeze = intel_idle_freeze, },
 474	{
 475		.name = "C3-HSW",
 476		.desc = "MWAIT 0x10",
 477		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 478		.exit_latency = 33,
 479		.target_residency = 100,
 480		.enter = &intel_idle,
 481		.enter_freeze = intel_idle_freeze, },
 482	{
 483		.name = "C6-HSW",
 484		.desc = "MWAIT 0x20",
 485		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 486		.exit_latency = 133,
 487		.target_residency = 400,
 488		.enter = &intel_idle,
 489		.enter_freeze = intel_idle_freeze, },
 490	{
 491		.name = "C7s-HSW",
 492		.desc = "MWAIT 0x32",
 493		.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
 494		.exit_latency = 166,
 495		.target_residency = 500,
 496		.enter = &intel_idle,
 497		.enter_freeze = intel_idle_freeze, },
 498	{
 499		.name = "C8-HSW",
 500		.desc = "MWAIT 0x40",
 501		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
 502		.exit_latency = 300,
 503		.target_residency = 900,
 504		.enter = &intel_idle,
 505		.enter_freeze = intel_idle_freeze, },
 506	{
 507		.name = "C9-HSW",
 508		.desc = "MWAIT 0x50",
 509		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
 510		.exit_latency = 600,
 511		.target_residency = 1800,
 512		.enter = &intel_idle,
 513		.enter_freeze = intel_idle_freeze, },
 514	{
 515		.name = "C10-HSW",
 516		.desc = "MWAIT 0x60",
 517		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
 518		.exit_latency = 2600,
 519		.target_residency = 7700,
 520		.enter = &intel_idle,
 521		.enter_freeze = intel_idle_freeze, },
 522	{
 523		.enter = NULL }
 524};
 525static struct cpuidle_state bdw_cstates[] = {
 526	{
 527		.name = "C1-BDW",
 528		.desc = "MWAIT 0x00",
 529		.flags = MWAIT2flg(0x00),
 530		.exit_latency = 2,
 531		.target_residency = 2,
 532		.enter = &intel_idle,
 533		.enter_freeze = intel_idle_freeze, },
 534	{
 535		.name = "C1E-BDW",
 536		.desc = "MWAIT 0x01",
 537		.flags = MWAIT2flg(0x01),
 538		.exit_latency = 10,
 539		.target_residency = 20,
 540		.enter = &intel_idle,
 541		.enter_freeze = intel_idle_freeze, },
 542	{
 543		.name = "C3-BDW",
 544		.desc = "MWAIT 0x10",
 545		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 546		.exit_latency = 40,
 547		.target_residency = 100,
 548		.enter = &intel_idle,
 549		.enter_freeze = intel_idle_freeze, },
 550	{
 551		.name = "C6-BDW",
 552		.desc = "MWAIT 0x20",
 553		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 554		.exit_latency = 133,
 555		.target_residency = 400,
 556		.enter = &intel_idle,
 557		.enter_freeze = intel_idle_freeze, },
 558	{
 559		.name = "C7s-BDW",
 560		.desc = "MWAIT 0x32",
 561		.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
 562		.exit_latency = 166,
 563		.target_residency = 500,
 564		.enter = &intel_idle,
 565		.enter_freeze = intel_idle_freeze, },
 566	{
 567		.name = "C8-BDW",
 568		.desc = "MWAIT 0x40",
 569		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
 570		.exit_latency = 300,
 571		.target_residency = 900,
 572		.enter = &intel_idle,
 573		.enter_freeze = intel_idle_freeze, },
 574	{
 575		.name = "C9-BDW",
 576		.desc = "MWAIT 0x50",
 577		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
 578		.exit_latency = 600,
 579		.target_residency = 1800,
 580		.enter = &intel_idle,
 581		.enter_freeze = intel_idle_freeze, },
 582	{
 583		.name = "C10-BDW",
 584		.desc = "MWAIT 0x60",
 585		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
 586		.exit_latency = 2600,
 587		.target_residency = 7700,
 588		.enter = &intel_idle,
 589		.enter_freeze = intel_idle_freeze, },
 590	{
 591		.enter = NULL }
 592};
 593
 594static struct cpuidle_state skl_cstates[] = {
 595	{
 596		.name = "C1-SKL",
 597		.desc = "MWAIT 0x00",
 598		.flags = MWAIT2flg(0x00),
 599		.exit_latency = 2,
 600		.target_residency = 2,
 601		.enter = &intel_idle,
 602		.enter_freeze = intel_idle_freeze, },
 603	{
 604		.name = "C1E-SKL",
 605		.desc = "MWAIT 0x01",
 606		.flags = MWAIT2flg(0x01),
 607		.exit_latency = 10,
 608		.target_residency = 20,
 609		.enter = &intel_idle,
 610		.enter_freeze = intel_idle_freeze, },
 611	{
 612		.name = "C3-SKL",
 613		.desc = "MWAIT 0x10",
 614		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 615		.exit_latency = 70,
 616		.target_residency = 100,
 617		.enter = &intel_idle,
 618		.enter_freeze = intel_idle_freeze, },
 619	{
 620		.name = "C6-SKL",
 621		.desc = "MWAIT 0x20",
 622		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 623		.exit_latency = 85,
 624		.target_residency = 200,
 625		.enter = &intel_idle,
 626		.enter_freeze = intel_idle_freeze, },
 627	{
 628		.name = "C7s-SKL",
 629		.desc = "MWAIT 0x33",
 630		.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
 631		.exit_latency = 124,
 632		.target_residency = 800,
 633		.enter = &intel_idle,
 634		.enter_freeze = intel_idle_freeze, },
 635	{
 636		.name = "C8-SKL",
 637		.desc = "MWAIT 0x40",
 638		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
 639		.exit_latency = 200,
 640		.target_residency = 800,
 641		.enter = &intel_idle,
 642		.enter_freeze = intel_idle_freeze, },
 643	{
 644		.name = "C9-SKL",
 645		.desc = "MWAIT 0x50",
 646		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
 647		.exit_latency = 480,
 648		.target_residency = 5000,
 649		.enter = &intel_idle,
 650		.enter_freeze = intel_idle_freeze, },
 651	{
 652		.name = "C10-SKL",
 653		.desc = "MWAIT 0x60",
 654		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
 655		.exit_latency = 890,
 656		.target_residency = 5000,
 657		.enter = &intel_idle,
 658		.enter_freeze = intel_idle_freeze, },
 659	{
 660		.enter = NULL }
 661};
 662
 663static struct cpuidle_state skx_cstates[] = {
 664	{
 665		.name = "C1-SKX",
 666		.desc = "MWAIT 0x00",
 667		.flags = MWAIT2flg(0x00),
 668		.exit_latency = 2,
 669		.target_residency = 2,
 670		.enter = &intel_idle,
 671		.enter_freeze = intel_idle_freeze, },
 672	{
 673		.name = "C1E-SKX",
 674		.desc = "MWAIT 0x01",
 675		.flags = MWAIT2flg(0x01),
 676		.exit_latency = 10,
 677		.target_residency = 20,
 678		.enter = &intel_idle,
 679		.enter_freeze = intel_idle_freeze, },
 680	{
 681		.name = "C6-SKX",
 682		.desc = "MWAIT 0x20",
 683		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
 684		.exit_latency = 133,
 685		.target_residency = 600,
 686		.enter = &intel_idle,
 687		.enter_freeze = intel_idle_freeze, },
 688	{
 689		.enter = NULL }
 690};
 691
 692static struct cpuidle_state atom_cstates[] = {
 693	{
 694		.name = "C1E-ATM",
 695		.desc = "MWAIT 0x00",
 696		.flags = MWAIT2flg(0x00),
 697		.exit_latency = 10,
 698		.target_residency = 20,
 699		.enter = &intel_idle,
 700		.enter_freeze = intel_idle_freeze, },
 701	{
 702		.name = "C2-ATM",
 703		.desc = "MWAIT 0x10",
 704		.flags = MWAIT2flg(0x10),
 705		.exit_latency = 20,
 706		.target_residency = 80,
 707		.enter = &intel_idle,
 708		.enter_freeze = intel_idle_freeze, },
 709	{
 710		.name = "C4-ATM",
 711		.desc = "MWAIT 0x30",
 712		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
 713		.exit_latency = 100,
 714		.target_residency = 400,
 715		.enter = &intel_idle,
 716		.enter_freeze = intel_idle_freeze, },
 717	{
 718		.name = "C6-ATM",
 719		.desc = "MWAIT 0x52",
 720		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
 721		.exit_latency = 140,
 722		.target_residency = 560,
 723		.enter = &intel_idle,
 724		.enter_freeze = intel_idle_freeze, },
 725	{
 726		.enter = NULL }
 727};
 728static struct cpuidle_state avn_cstates[] = {
 729	{
 730		.name = "C1-AVN",
 731		.desc = "MWAIT 0x00",
 732		.flags = MWAIT2flg(0x00),
 733		.exit_latency = 2,
 734		.target_residency = 2,
 735		.enter = &intel_idle,
 736		.enter_freeze = intel_idle_freeze, },
 737	{
 738		.name = "C6-AVN",
 739		.desc = "MWAIT 0x51",
 740		.flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
 741		.exit_latency = 15,
 742		.target_residency = 45,
 743		.enter = &intel_idle,
 744		.enter_freeze = intel_idle_freeze, },
 745	{
 746		.enter = NULL }
 747};
 748static struct cpuidle_state knl_cstates[] = {
 749	{
 750		.name = "C1-KNL",
 751		.desc = "MWAIT 0x00",
 752		.flags = MWAIT2flg(0x00),
 753		.exit_latency = 1,
 754		.target_residency = 2,
 755		.enter = &intel_idle,
 756		.enter_freeze = intel_idle_freeze },
 757	{
 758		.name = "C6-KNL",
 759		.desc = "MWAIT 0x10",
 760		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
 761		.exit_latency = 120,
 762		.target_residency = 500,
 763		.enter = &intel_idle,
 764		.enter_freeze = intel_idle_freeze },
 765	{
 766		.enter = NULL }
 767};
 768
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 769/**
 770 * intel_idle
 771 * @dev: cpuidle_device
 772 * @drv: cpuidle driver
 773 * @index: index of cpuidle state
 774 *
 775 * Must be called under local_irq_disable().
 776 */
 777static int intel_idle(struct cpuidle_device *dev,
 778		struct cpuidle_driver *drv, int index)
 779{
 780	unsigned long ecx = 1; /* break on interrupt flag */
 781	struct cpuidle_state *state = &drv->states[index];
 782	unsigned long eax = flg2MWAIT(state->flags);
 
 783	unsigned int cstate;
 
 
 784	int cpu = smp_processor_id();
 785
 786	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 787
 788	/*
 789	 * leave_mm() to avoid costly and often unnecessary wakeups
 790	 * for flushing the user TLB's associated with the active mm.
 791	 */
 792	if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
 793		leave_mm(cpu);
 794
 795	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 796		tick_broadcast_enter();
 797
 798	mwait_idle_with_hints(eax, ecx);
 799
 800	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 801		tick_broadcast_exit();
 802
 803	return index;
 804}
 
 
 
 805
 806/**
 807 * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle
 808 * @dev: cpuidle_device
 809 * @drv: cpuidle driver
 810 * @index: state index
 811 */
 812static void intel_idle_freeze(struct cpuidle_device *dev,
 813			     struct cpuidle_driver *drv, int index)
 814{
 815	unsigned long ecx = 1; /* break on interrupt flag */
 816	unsigned long eax = flg2MWAIT(drv->states[index].flags);
 817
 818	mwait_idle_with_hints(eax, ecx);
 
 
 
 
 
 
 
 
 819}
 820
 821static void __setup_broadcast_timer(void *arg)
 822{
 823	unsigned long on = (unsigned long)arg;
 
 
 
 
 824
 825	if (on)
 826		tick_broadcast_enable();
 827	else
 828		tick_broadcast_disable();
 829}
 830
 831static int cpu_hotplug_notify(struct notifier_block *n,
 832			      unsigned long action, void *hcpu)
 833{
 834	int hotcpu = (unsigned long)hcpu;
 835	struct cpuidle_device *dev;
 836
 837	switch (action & ~CPU_TASKS_FROZEN) {
 838	case CPU_ONLINE:
 839
 840		if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
 841			smp_call_function_single(hotcpu, __setup_broadcast_timer,
 842						 (void *)true, 1);
 843
 844		/*
 845		 * Some systems can hotplug a cpu at runtime after
 846		 * the kernel has booted, we have to initialize the
 847		 * driver in this case
 848		 */
 849		dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
 850		if (dev->registered)
 851			break;
 852
 853		if (intel_idle_cpu_init(hotcpu))
 854			return NOTIFY_BAD;
 855
 856		break;
 857	}
 858	return NOTIFY_OK;
 859}
 860
 861static struct notifier_block cpu_hotplug_notifier = {
 862	.notifier_call = cpu_hotplug_notify,
 863};
 864
 865static void auto_demotion_disable(void *dummy)
 866{
 867	unsigned long long msr_bits;
 868
 869	rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
 870	msr_bits &= ~(icpu->auto_demotion_disable_flags);
 871	wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
 872}
 873static void c1e_promotion_disable(void *dummy)
 874{
 875	unsigned long long msr_bits;
 876
 877	rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
 878	msr_bits &= ~0x2;
 879	wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
 880}
 881
 882static const struct idle_cpu idle_cpu_nehalem = {
 883	.state_table = nehalem_cstates,
 884	.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
 885	.disable_promotion_to_c1e = true,
 886};
 887
 888static const struct idle_cpu idle_cpu_atom = {
 889	.state_table = atom_cstates,
 890};
 891
 892static const struct idle_cpu idle_cpu_lincroft = {
 893	.state_table = atom_cstates,
 894	.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
 895};
 896
 897static const struct idle_cpu idle_cpu_snb = {
 898	.state_table = snb_cstates,
 899	.disable_promotion_to_c1e = true,
 900};
 901
 902static const struct idle_cpu idle_cpu_byt = {
 903	.state_table = byt_cstates,
 904	.disable_promotion_to_c1e = true,
 905	.byt_auto_demotion_disable_flag = true,
 906};
 907
 908static const struct idle_cpu idle_cpu_cht = {
 909	.state_table = cht_cstates,
 910	.disable_promotion_to_c1e = true,
 911	.byt_auto_demotion_disable_flag = true,
 912};
 913
 914static const struct idle_cpu idle_cpu_ivb = {
 915	.state_table = ivb_cstates,
 916	.disable_promotion_to_c1e = true,
 917};
 918
 919static const struct idle_cpu idle_cpu_ivt = {
 920	.state_table = ivt_cstates,
 921	.disable_promotion_to_c1e = true,
 922};
 923
 924static const struct idle_cpu idle_cpu_hsw = {
 925	.state_table = hsw_cstates,
 926	.disable_promotion_to_c1e = true,
 927};
 928
 929static const struct idle_cpu idle_cpu_bdw = {
 930	.state_table = bdw_cstates,
 931	.disable_promotion_to_c1e = true,
 932};
 933
 934static const struct idle_cpu idle_cpu_skl = {
 935	.state_table = skl_cstates,
 936	.disable_promotion_to_c1e = true,
 937};
 938
 939static const struct idle_cpu idle_cpu_skx = {
 940	.state_table = skx_cstates,
 941	.disable_promotion_to_c1e = true,
 942};
 943
 944static const struct idle_cpu idle_cpu_avn = {
 945	.state_table = avn_cstates,
 946	.disable_promotion_to_c1e = true,
 947};
 948
 949static const struct idle_cpu idle_cpu_knl = {
 950	.state_table = knl_cstates,
 951};
 952
 953#define ICPU(model, cpu) \
 954	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 955
 956static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 957	ICPU(0x1a, idle_cpu_nehalem),
 958	ICPU(0x1e, idle_cpu_nehalem),
 959	ICPU(0x1f, idle_cpu_nehalem),
 960	ICPU(0x25, idle_cpu_nehalem),
 961	ICPU(0x2c, idle_cpu_nehalem),
 962	ICPU(0x2e, idle_cpu_nehalem),
 963	ICPU(0x1c, idle_cpu_atom),
 964	ICPU(0x26, idle_cpu_lincroft),
 965	ICPU(0x2f, idle_cpu_nehalem),
 966	ICPU(0x2a, idle_cpu_snb),
 967	ICPU(0x2d, idle_cpu_snb),
 968	ICPU(0x36, idle_cpu_atom),
 969	ICPU(0x37, idle_cpu_byt),
 970	ICPU(0x4c, idle_cpu_cht),
 971	ICPU(0x3a, idle_cpu_ivb),
 972	ICPU(0x3e, idle_cpu_ivt),
 973	ICPU(0x3c, idle_cpu_hsw),
 974	ICPU(0x3f, idle_cpu_hsw),
 975	ICPU(0x45, idle_cpu_hsw),
 976	ICPU(0x46, idle_cpu_hsw),
 977	ICPU(0x4d, idle_cpu_avn),
 978	ICPU(0x3d, idle_cpu_bdw),
 979	ICPU(0x47, idle_cpu_bdw),
 980	ICPU(0x4f, idle_cpu_bdw),
 981	ICPU(0x56, idle_cpu_bdw),
 982	ICPU(0x4e, idle_cpu_skl),
 983	ICPU(0x5e, idle_cpu_skl),
 984	ICPU(0x8e, idle_cpu_skl),
 985	ICPU(0x9e, idle_cpu_skl),
 986	ICPU(0x55, idle_cpu_skx),
 987	ICPU(0x57, idle_cpu_knl),
 988	{}
 989};
 990MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
 991
 992/*
 993 * intel_idle_probe()
 994 */
 995static int __init intel_idle_probe(void)
 996{
 997	unsigned int eax, ebx, ecx;
 998	const struct x86_cpu_id *id;
 999
1000	if (max_cstate == 0) {
1001		pr_debug(PREFIX "disabled\n");
1002		return -EPERM;
1003	}
1004
1005	id = x86_match_cpu(intel_idle_ids);
1006	if (!id) {
1007		if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1008		    boot_cpu_data.x86 == 6)
1009			pr_debug(PREFIX "does not run on family %d model %d\n",
1010				boot_cpu_data.x86, boot_cpu_data.x86_model);
1011		return -ENODEV;
1012	}
1013
1014	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
1015		return -ENODEV;
1016
1017	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
1018
1019	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
1020	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
1021	    !mwait_substates)
1022			return -ENODEV;
1023
1024	pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
1025
1026	icpu = (const struct idle_cpu *)id->driver_data;
1027	cpuidle_state_table = icpu->state_table;
1028
 
 
 
 
 
 
 
1029	pr_debug(PREFIX "v" INTEL_IDLE_VERSION
1030		" model 0x%X\n", boot_cpu_data.x86_model);
1031
 
 
1032	return 0;
1033}
1034
1035/*
1036 * intel_idle_cpuidle_devices_uninit()
1037 * Unregisters the cpuidle devices.
1038 */
1039static void intel_idle_cpuidle_devices_uninit(void)
1040{
1041	int i;
1042	struct cpuidle_device *dev;
1043
1044	for_each_online_cpu(i) {
1045		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1046		cpuidle_unregister_device(dev);
1047	}
1048}
1049
1050/*
1051 * ivt_idle_state_table_update(void)
1052 *
1053 * Tune IVT multi-socket targets
1054 * Assumption: num_sockets == (max_package_num + 1)
1055 */
1056static void ivt_idle_state_table_update(void)
1057{
1058	/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
1059	int cpu, package_num, num_sockets = 1;
1060
1061	for_each_online_cpu(cpu) {
1062		package_num = topology_physical_package_id(cpu);
1063		if (package_num + 1 > num_sockets) {
1064			num_sockets = package_num + 1;
1065
1066			if (num_sockets > 4) {
1067				cpuidle_state_table = ivt_cstates_8s;
1068				return;
1069			}
1070		}
1071	}
1072
1073	if (num_sockets > 2)
1074		cpuidle_state_table = ivt_cstates_4s;
1075
1076	/* else, 1 and 2 socket systems use default ivt_cstates */
1077}
1078/*
1079 * sklh_idle_state_table_update(void)
1080 *
1081 * On SKL-H (model 0x5e) disable C8 and C9 if:
1082 * C10 is enabled and SGX disabled
1083 */
1084static void sklh_idle_state_table_update(void)
1085{
1086	unsigned long long msr;
1087	unsigned int eax, ebx, ecx, edx;
1088
1089
1090	/* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
1091	if (max_cstate <= 7)
1092		return;
1093
1094	/* if PC10 not present in CPUID.MWAIT.EDX */
1095	if ((mwait_substates & (0xF << 28)) == 0)
1096		return;
1097
1098	rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
1099
1100	/* PC10 is not enabled in PKG C-state limit */
1101	if ((msr & 0xF) != 8)
1102		return;
1103
1104	ecx = 0;
1105	cpuid(7, &eax, &ebx, &ecx, &edx);
1106
1107	/* if SGX is present */
1108	if (ebx & (1 << 2)) {
1109
1110		rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1111
1112		/* if SGX is enabled */
1113		if (msr & (1 << 18))
1114			return;
1115	}
1116
1117	skl_cstates[5].disabled = 1;	/* C8-SKL */
1118	skl_cstates[6].disabled = 1;	/* C9-SKL */
1119}
1120/*
1121 * intel_idle_state_table_update()
1122 *
1123 * Update the default state_table for this CPU-id
1124 */
1125
1126static void intel_idle_state_table_update(void)
1127{
1128	switch (boot_cpu_data.x86_model) {
1129
1130	case 0x3e: /* IVT */
1131		ivt_idle_state_table_update();
1132		break;
1133	case 0x5e: /* SKL-H */
1134		sklh_idle_state_table_update();
1135		break;
1136	}
1137}
1138
1139/*
1140 * intel_idle_cpuidle_driver_init()
1141 * allocate, initialize cpuidle_states
1142 */
1143static void __init intel_idle_cpuidle_driver_init(void)
1144{
1145	int cstate;
1146	struct cpuidle_driver *drv = &intel_idle_driver;
1147
1148	intel_idle_state_table_update();
1149
1150	drv->state_count = 1;
1151
1152	for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
1153		int num_substates, mwait_hint, mwait_cstate;
1154
1155		if ((cpuidle_state_table[cstate].enter == NULL) &&
1156		    (cpuidle_state_table[cstate].enter_freeze == NULL))
1157			break;
1158
1159		if (cstate + 1 > max_cstate) {
1160			printk(PREFIX "max_cstate %d reached\n",
1161				max_cstate);
1162			break;
1163		}
1164
1165		mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
1166		mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
1167
1168		/* number of sub-states for this state in CPUID.MWAIT */
1169		num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
1170					& MWAIT_SUBSTATE_MASK;
1171
1172		/* if NO sub-states for this state in CPUID, skip it */
1173		if (num_substates == 0)
1174			continue;
1175
1176		/* if state marked as disabled, skip it */
1177		if (cpuidle_state_table[cstate].disabled != 0) {
1178			pr_debug(PREFIX "state %s is disabled",
1179				cpuidle_state_table[cstate].name);
 
 
 
1180			continue;
1181		}
1182
1183
1184		if (((mwait_cstate + 1) > 2) &&
1185			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1186			mark_tsc_unstable("TSC halts in idle"
1187					" states deeper than C2");
1188
1189		drv->states[drv->state_count] =	/* structure copy */
1190			cpuidle_state_table[cstate];
1191
1192		drv->state_count += 1;
1193	}
1194
1195	if (icpu->byt_auto_demotion_disable_flag) {
1196		wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
1197		wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
1198	}
1199}
1200
1201
1202/*
1203 * intel_idle_cpu_init()
1204 * allocate, initialize, register cpuidle_devices
1205 * @cpu: cpu/core to initialize
1206 */
1207static int intel_idle_cpu_init(int cpu)
1208{
 
1209	struct cpuidle_device *dev;
1210
1211	dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
1212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1213	dev->cpu = cpu;
1214
1215	if (cpuidle_register_device(dev)) {
1216		pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
 
1217		return -EIO;
1218	}
1219
1220	if (icpu->auto_demotion_disable_flags)
1221		smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
1222
1223	if (icpu->disable_promotion_to_c1e)
1224		smp_call_function_single(cpu, c1e_promotion_disable, NULL, 1);
1225
1226	return 0;
1227}
 
1228
1229static int __init intel_idle_init(void)
1230{
1231	int retval, i;
1232
1233	/* Do not load intel_idle at all for now if idle= is passed */
1234	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
1235		return -ENODEV;
1236
1237	retval = intel_idle_probe();
1238	if (retval)
1239		return retval;
1240
1241	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
1242	if (intel_idle_cpuidle_devices == NULL)
1243		return -ENOMEM;
1244
1245	intel_idle_cpuidle_driver_init();
1246	retval = cpuidle_register_driver(&intel_idle_driver);
1247	if (retval) {
1248		struct cpuidle_driver *drv = cpuidle_get_driver();
1249		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
1250			drv ? drv->name : "none");
1251		free_percpu(intel_idle_cpuidle_devices);
1252		return retval;
1253	}
1254
1255	cpu_notifier_register_begin();
 
 
1256
1257	for_each_online_cpu(i) {
1258		retval = intel_idle_cpu_init(i);
1259		if (retval) {
1260			intel_idle_cpuidle_devices_uninit();
1261			cpu_notifier_register_done();
1262			cpuidle_unregister_driver(&intel_idle_driver);
1263			free_percpu(intel_idle_cpuidle_devices);
1264			return retval;
1265		}
1266	}
1267	__register_cpu_notifier(&cpu_hotplug_notifier);
1268
1269	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
1270		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1271	else
1272		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1273
1274	cpu_notifier_register_done();
1275
1276	pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
1277		lapic_timer_reliable_states);
1278
1279	return 0;
1280}
1281
1282static void __exit intel_idle_exit(void)
1283{
1284	struct cpuidle_device *dev;
1285	int i;
1286
1287	cpu_notifier_register_begin();
1288
1289	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
1290		on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1291	__unregister_cpu_notifier(&cpu_hotplug_notifier);
1292
1293	for_each_possible_cpu(i) {
1294		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1295		cpuidle_unregister_device(dev);
1296	}
1297
1298	cpu_notifier_register_done();
1299
1300	cpuidle_unregister_driver(&intel_idle_driver);
1301	free_percpu(intel_idle_cpuidle_devices);
1302}
1303
1304module_init(intel_idle_init);
1305module_exit(intel_idle_exit);
1306
1307module_param(max_cstate, int, 0444);
1308
1309MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
1310MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
1311MODULE_LICENSE("GPL");
v3.5.6
  1/*
  2 * intel_idle.c - native hardware idle loop for modern Intel processors
  3 *
  4 * Copyright (c) 2010, Intel Corporation.
  5 * Len Brown <len.brown@intel.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms and conditions of the GNU General Public License,
  9 * version 2, as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope it will be useful, but WITHOUT
 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14 * more details.
 15 *
 16 * You should have received a copy of the GNU General Public License along with
 17 * this program; if not, write to the Free Software Foundation, Inc.,
 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 19 */
 20
 21/*
 22 * intel_idle is a cpuidle driver that loads on specific Intel processors
 23 * in lieu of the legacy ACPI processor_idle driver.  The intent is to
 24 * make Linux more efficient on these processors, as intel_idle knows
 25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
 26 */
 27
 28/*
 29 * Design Assumptions
 30 *
 31 * All CPUs have same idle states as boot CPU
 32 *
 33 * Chipset BM_STS (bus master status) bit is a NOP
 34 *	for preventing entry into deep C-stats
 35 */
 36
 37/*
 38 * Known limitations
 39 *
 40 * The driver currently initializes for_each_online_cpu() upon modprobe.
 41 * It it unaware of subsequent processors hot-added to the system.
 42 * This means that if you boot with maxcpus=n and later online
 43 * processors above n, those processors will use C1 only.
 44 *
 45 * ACPI has a .suspend hack to turn off deep c-statees during suspend
 46 * to avoid complications with the lapic timer workaround.
 47 * Have not seen issues with suspend, but may need same workaround here.
 48 *
 49 * There is currently no kernel-based automatic probing/loading mechanism
 50 * if the driver is built as a module.
 51 */
 52
 53/* un-comment DEBUG to enable pr_debug() statements */
 54#define DEBUG
 55
 56#include <linux/kernel.h>
 57#include <linux/cpuidle.h>
 58#include <linux/clockchips.h>
 59#include <linux/hrtimer.h>	/* ktime_get_real() */
 60#include <trace/events/power.h>
 61#include <linux/sched.h>
 62#include <linux/notifier.h>
 63#include <linux/cpu.h>
 64#include <linux/module.h>
 65#include <asm/cpu_device_id.h>
 66#include <asm/mwait.h>
 67#include <asm/msr.h>
 68
 69#define INTEL_IDLE_VERSION "0.4"
 70#define PREFIX "intel_idle: "
 71
 72static struct cpuidle_driver intel_idle_driver = {
 73	.name = "intel_idle",
 74	.owner = THIS_MODULE,
 75};
 76/* intel_idle.max_cstate=0 disables driver */
 77static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
 78
 79static unsigned int mwait_substates;
 80
 81#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
 82/* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
 83static unsigned int lapic_timer_reliable_states = (1 << 1);	 /* Default to only C1 */
 84
 85struct idle_cpu {
 86	struct cpuidle_state *state_table;
 87
 88	/*
 89	 * Hardware C-state auto-demotion may not always be optimal.
 90	 * Indicate which enable bits to clear here.
 91	 */
 92	unsigned long auto_demotion_disable_flags;
 
 
 93};
 94
 95static const struct idle_cpu *icpu;
 96static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 97static int intel_idle(struct cpuidle_device *dev,
 98			struct cpuidle_driver *drv, int index);
 
 
 
 99
100static struct cpuidle_state *cpuidle_state_table;
101
102/*
103 * Set this flag for states where the HW flushes the TLB for us
104 * and so we don't need cross-calls to keep it consistent.
105 * If this flag is set, SW flushes the TLB, so even if the
106 * HW doesn't do the flushing, this flag is safe to use.
107 */
108#define CPUIDLE_FLAG_TLB_FLUSHED	0x10000
109
110/*
 
 
 
 
 
 
 
 
 
 
111 * States are indexed by the cstate number,
112 * which is also the index into the MWAIT hint array.
113 * Thus C0 is a dummy.
114 */
115static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
116	{ /* MWAIT C0 */ },
117	{ /* MWAIT C1 */
118		.name = "C1-NHM",
119		.desc = "MWAIT 0x00",
120		.flags = CPUIDLE_FLAG_TIME_VALID,
121		.exit_latency = 3,
122		.target_residency = 6,
123		.enter = &intel_idle },
124	{ /* MWAIT C2 */
 
 
 
 
 
 
 
 
 
125		.name = "C3-NHM",
126		.desc = "MWAIT 0x10",
127		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
128		.exit_latency = 20,
129		.target_residency = 80,
130		.enter = &intel_idle },
131	{ /* MWAIT C3 */
 
132		.name = "C6-NHM",
133		.desc = "MWAIT 0x20",
134		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
135		.exit_latency = 200,
136		.target_residency = 800,
137		.enter = &intel_idle },
 
 
 
138};
139
140static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
141	{ /* MWAIT C0 */ },
142	{ /* MWAIT C1 */
143		.name = "C1-SNB",
144		.desc = "MWAIT 0x00",
145		.flags = CPUIDLE_FLAG_TIME_VALID,
146		.exit_latency = 1,
147		.target_residency = 1,
148		.enter = &intel_idle },
149	{ /* MWAIT C2 */
 
 
 
 
 
 
 
 
 
150		.name = "C3-SNB",
151		.desc = "MWAIT 0x10",
152		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
153		.exit_latency = 80,
154		.target_residency = 211,
155		.enter = &intel_idle },
156	{ /* MWAIT C3 */
 
157		.name = "C6-SNB",
158		.desc = "MWAIT 0x20",
159		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
160		.exit_latency = 104,
161		.target_residency = 345,
162		.enter = &intel_idle },
163	{ /* MWAIT C4 */
 
164		.name = "C7-SNB",
165		.desc = "MWAIT 0x30",
166		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
167		.exit_latency = 109,
168		.target_residency = 345,
169		.enter = &intel_idle },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170};
171
172static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
173	{ /* MWAIT C0 */ },
174	{ /* MWAIT C1 */
175		.name = "C1-ATM",
176		.desc = "MWAIT 0x00",
177		.flags = CPUIDLE_FLAG_TIME_VALID,
178		.exit_latency = 1,
179		.target_residency = 4,
180		.enter = &intel_idle },
181	{ /* MWAIT C2 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182		.name = "C2-ATM",
183		.desc = "MWAIT 0x10",
184		.flags = CPUIDLE_FLAG_TIME_VALID,
185		.exit_latency = 20,
186		.target_residency = 80,
187		.enter = &intel_idle },
188	{ /* MWAIT C3 */ },
189	{ /* MWAIT C4 */
190		.name = "C4-ATM",
191		.desc = "MWAIT 0x30",
192		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
193		.exit_latency = 100,
194		.target_residency = 400,
195		.enter = &intel_idle },
196	{ /* MWAIT C5 */ },
197	{ /* MWAIT C6 */
198		.name = "C6-ATM",
199		.desc = "MWAIT 0x52",
200		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
201		.exit_latency = 140,
202		.target_residency = 560,
203		.enter = &intel_idle },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204};
205
206static long get_driver_data(int cstate)
207{
208	int driver_data;
209	switch (cstate) {
210
211	case 1:	/* MWAIT C1 */
212		driver_data = 0x00;
213		break;
214	case 2:	/* MWAIT C2 */
215		driver_data = 0x10;
216		break;
217	case 3:	/* MWAIT C3 */
218		driver_data = 0x20;
219		break;
220	case 4:	/* MWAIT C4 */
221		driver_data = 0x30;
222		break;
223	case 5:	/* MWAIT C5 */
224		driver_data = 0x40;
225		break;
226	case 6:	/* MWAIT C6 */
227		driver_data = 0x52;
228		break;
229	default:
230		driver_data = 0x00;
231	}
232	return driver_data;
233}
234
235/**
236 * intel_idle
237 * @dev: cpuidle_device
238 * @drv: cpuidle driver
239 * @index: index of cpuidle state
240 *
241 * Must be called under local_irq_disable().
242 */
243static int intel_idle(struct cpuidle_device *dev,
244		struct cpuidle_driver *drv, int index)
245{
246	unsigned long ecx = 1; /* break on interrupt flag */
247	struct cpuidle_state *state = &drv->states[index];
248	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
249	unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
250	unsigned int cstate;
251	ktime_t kt_before, kt_after;
252	s64 usec_delta;
253	int cpu = smp_processor_id();
254
255	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
256
257	/*
258	 * leave_mm() to avoid costly and often unnecessary wakeups
259	 * for flushing the user TLB's associated with the active mm.
260	 */
261	if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
262		leave_mm(cpu);
263
264	if (!(lapic_timer_reliable_states & (1 << (cstate))))
265		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
266
267	kt_before = ktime_get_real();
268
269	stop_critical_timings();
270	if (!need_resched()) {
271
272		__monitor((void *)&current_thread_info()->flags, 0, 0);
273		smp_mb();
274		if (!need_resched())
275			__mwait(eax, ecx);
276	}
277
278	start_critical_timings();
279
280	kt_after = ktime_get_real();
281	usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
 
 
 
 
 
 
 
282
283	local_irq_enable();
284
285	if (!(lapic_timer_reliable_states & (1 << (cstate))))
286		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
287
288	/* Update cpuidle counters */
289	dev->last_residency = (int)usec_delta;
290
291	return index;
292}
293
294static void __setup_broadcast_timer(void *arg)
295{
296	unsigned long reason = (unsigned long)arg;
297	int cpu = smp_processor_id();
298
299	reason = reason ?
300		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
301
302	clockevents_notify(reason, &cpu);
 
 
 
303}
304
305static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
306		unsigned long action, void *hcpu)
307{
308	int hotcpu = (unsigned long)hcpu;
 
309
310	switch (action & 0xf) {
311	case CPU_ONLINE:
312		smp_call_function_single(hotcpu, __setup_broadcast_timer,
313			(void *)true, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314		break;
315	}
316	return NOTIFY_OK;
317}
318
319static struct notifier_block setup_broadcast_notifier = {
320	.notifier_call = setup_broadcast_cpuhp_notify,
321};
322
323static void auto_demotion_disable(void *dummy)
324{
325	unsigned long long msr_bits;
326
327	rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
328	msr_bits &= ~(icpu->auto_demotion_disable_flags);
329	wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
330}
 
 
 
 
 
 
 
 
331
332static const struct idle_cpu idle_cpu_nehalem = {
333	.state_table = nehalem_cstates,
334	.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
 
335};
336
337static const struct idle_cpu idle_cpu_atom = {
338	.state_table = atom_cstates,
339};
340
341static const struct idle_cpu idle_cpu_lincroft = {
342	.state_table = atom_cstates,
343	.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
344};
345
346static const struct idle_cpu idle_cpu_snb = {
347	.state_table = snb_cstates,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348};
349
350#define ICPU(model, cpu) \
351	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
352
353static const struct x86_cpu_id intel_idle_ids[] = {
354	ICPU(0x1a, idle_cpu_nehalem),
355	ICPU(0x1e, idle_cpu_nehalem),
356	ICPU(0x1f, idle_cpu_nehalem),
357	ICPU(0x25, idle_cpu_nehalem),
358	ICPU(0x2c, idle_cpu_nehalem),
359	ICPU(0x2e, idle_cpu_nehalem),
360	ICPU(0x1c, idle_cpu_atom),
361	ICPU(0x26, idle_cpu_lincroft),
362	ICPU(0x2f, idle_cpu_nehalem),
363	ICPU(0x2a, idle_cpu_snb),
364	ICPU(0x2d, idle_cpu_snb),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365	{}
366};
367MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
368
369/*
370 * intel_idle_probe()
371 */
372static int intel_idle_probe(void)
373{
374	unsigned int eax, ebx, ecx;
375	const struct x86_cpu_id *id;
376
377	if (max_cstate == 0) {
378		pr_debug(PREFIX "disabled\n");
379		return -EPERM;
380	}
381
382	id = x86_match_cpu(intel_idle_ids);
383	if (!id) {
384		if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
385		    boot_cpu_data.x86 == 6)
386			pr_debug(PREFIX "does not run on family %d model %d\n",
387				boot_cpu_data.x86, boot_cpu_data.x86_model);
388		return -ENODEV;
389	}
390
391	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
392		return -ENODEV;
393
394	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
395
396	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
397	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
398	    !mwait_substates)
399			return -ENODEV;
400
401	pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
402
403	icpu = (const struct idle_cpu *)id->driver_data;
404	cpuidle_state_table = icpu->state_table;
405
406	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
407		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
408	else {
409		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
410		register_cpu_notifier(&setup_broadcast_notifier);
411	}
412
413	pr_debug(PREFIX "v" INTEL_IDLE_VERSION
414		" model 0x%X\n", boot_cpu_data.x86_model);
415
416	pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
417		lapic_timer_reliable_states);
418	return 0;
419}
420
421/*
422 * intel_idle_cpuidle_devices_uninit()
423 * unregister, free cpuidle_devices
424 */
425static void intel_idle_cpuidle_devices_uninit(void)
426{
427	int i;
428	struct cpuidle_device *dev;
429
430	for_each_online_cpu(i) {
431		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
432		cpuidle_unregister_device(dev);
433	}
 
434
435	free_percpu(intel_idle_cpuidle_devices);
436	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437}
 
438/*
439 * intel_idle_cpuidle_driver_init()
440 * allocate, initialize cpuidle_states
441 */
442static int intel_idle_cpuidle_driver_init(void)
443{
444	int cstate;
445	struct cpuidle_driver *drv = &intel_idle_driver;
446
 
 
447	drv->state_count = 1;
448
449	for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
450		int num_substates;
 
 
 
 
451
452		if (cstate > max_cstate) {
453			printk(PREFIX "max_cstate %d reached\n",
454				max_cstate);
455			break;
456		}
457
458		/* does the state exist in CPUID.MWAIT? */
459		num_substates = (mwait_substates >> ((cstate) * 4))
 
 
 
460					& MWAIT_SUBSTATE_MASK;
 
 
461		if (num_substates == 0)
462			continue;
463		/* is the state not enabled? */
464		if (cpuidle_state_table[cstate].enter == NULL) {
465			/* does the driver not know about the state? */
466			if (*cpuidle_state_table[cstate].name == '\0')
467				pr_debug(PREFIX "unaware of model 0x%x"
468					" MWAIT %d please"
469					" contact lenb@kernel.org",
470				boot_cpu_data.x86_model, cstate);
471			continue;
472		}
473
474		if ((cstate > 2) &&
 
475			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
476			mark_tsc_unstable("TSC halts in idle"
477					" states deeper than C2");
478
479		drv->states[drv->state_count] =	/* structure copy */
480			cpuidle_state_table[cstate];
481
482		drv->state_count += 1;
483	}
484
485	if (icpu->auto_demotion_disable_flags)
486		on_each_cpu(auto_demotion_disable, NULL, 1);
487
488	return 0;
489}
490
491
492/*
493 * intel_idle_cpu_init()
494 * allocate, initialize, register cpuidle_devices
495 * @cpu: cpu/core to initialize
496 */
497int intel_idle_cpu_init(int cpu)
498{
499	int cstate;
500	struct cpuidle_device *dev;
501
502	dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
503
504	dev->state_count = 1;
505
506	for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
507		int num_substates;
508
509		if (cstate > max_cstate) {
510			printk(PREFIX "max_cstate %d reached\n", max_cstate);
511			break;
512		}
513
514		/* does the state exist in CPUID.MWAIT? */
515		num_substates = (mwait_substates >> ((cstate) * 4))
516			& MWAIT_SUBSTATE_MASK;
517		if (num_substates == 0)
518			continue;
519		/* is the state not enabled? */
520		if (cpuidle_state_table[cstate].enter == NULL)
521			continue;
522
523		dev->states_usage[dev->state_count].driver_data =
524			(void *)get_driver_data(cstate);
525
526		dev->state_count += 1;
527	}
528
529	dev->cpu = cpu;
530
531	if (cpuidle_register_device(dev)) {
532		pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
533		intel_idle_cpuidle_devices_uninit();
534		return -EIO;
535	}
536
537	if (icpu->auto_demotion_disable_flags)
538		smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
539
 
 
 
540	return 0;
541}
542EXPORT_SYMBOL_GPL(intel_idle_cpu_init);
543
544static int __init intel_idle_init(void)
545{
546	int retval, i;
547
548	/* Do not load intel_idle at all for now if idle= is passed */
549	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
550		return -ENODEV;
551
552	retval = intel_idle_probe();
553	if (retval)
554		return retval;
555
 
 
 
 
556	intel_idle_cpuidle_driver_init();
557	retval = cpuidle_register_driver(&intel_idle_driver);
558	if (retval) {
 
559		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
560			cpuidle_get_driver()->name);
 
561		return retval;
562	}
563
564	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
565	if (intel_idle_cpuidle_devices == NULL)
566		return -ENOMEM;
567
568	for_each_online_cpu(i) {
569		retval = intel_idle_cpu_init(i);
570		if (retval) {
 
 
571			cpuidle_unregister_driver(&intel_idle_driver);
 
572			return retval;
573		}
574	}
 
 
 
 
 
 
 
 
 
 
 
575
576	return 0;
577}
578
579static void __exit intel_idle_exit(void)
580{
581	intel_idle_cpuidle_devices_uninit();
582	cpuidle_unregister_driver(&intel_idle_driver);
 
 
583
584	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
585		on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
586		unregister_cpu_notifier(&setup_broadcast_notifier);
 
 
 
 
587	}
588
589	return;
 
 
 
590}
591
592module_init(intel_idle_init);
593module_exit(intel_idle_exit);
594
595module_param(max_cstate, int, 0444);
596
597MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
598MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
599MODULE_LICENSE("GPL");