Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * processor_idle - idle state submodule to the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 * - Added support for C3 on SMP
12 */
13#define pr_fmt(fmt) "ACPI: " fmt
14
15#include <linux/module.h>
16#include <linux/acpi.h>
17#include <linux/dmi.h>
18#include <linux/sched.h> /* need_resched() */
19#include <linux/tick.h>
20#include <linux/cpuidle.h>
21#include <linux/cpu.h>
22#include <acpi/processor.h>
23
24/*
25 * Include the apic definitions for x86 to have the APIC timer related defines
26 * available also for UP (on SMP it gets magically included via linux/smp.h).
27 * asm/acpi.h is not an option, as it would require more include magic. Also
28 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
29 */
30#ifdef CONFIG_X86
31#include <asm/apic.h>
32#endif
33
34#define ACPI_PROCESSOR_CLASS "processor"
35#define _COMPONENT ACPI_PROCESSOR_COMPONENT
36ACPI_MODULE_NAME("processor_idle");
37
38#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
39
40static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
41module_param(max_cstate, uint, 0000);
42static unsigned int nocst __read_mostly;
43module_param(nocst, uint, 0000);
44static int bm_check_disable __read_mostly;
45module_param(bm_check_disable, uint, 0000);
46
47static unsigned int latency_factor __read_mostly = 2;
48module_param(latency_factor, uint, 0644);
49
50static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
51
52struct cpuidle_driver acpi_idle_driver = {
53 .name = "acpi_idle",
54 .owner = THIS_MODULE,
55};
56
57#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
58static
59DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
60
61static int disabled_by_idle_boot_param(void)
62{
63 return boot_option_idle_override == IDLE_POLL ||
64 boot_option_idle_override == IDLE_HALT;
65}
66
67/*
68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
69 * For now disable this. Probably a bug somewhere else.
70 *
71 * To skip this limit, boot/load with a large max_cstate limit.
72 */
73static int set_max_cstate(const struct dmi_system_id *id)
74{
75 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
76 return 0;
77
78 pr_notice("%s detected - limiting to C%ld max_cstate."
79 " Override with \"processor.max_cstate=%d\"\n", id->ident,
80 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
81
82 max_cstate = (long)id->driver_data;
83
84 return 0;
85}
86
87static const struct dmi_system_id processor_power_dmi_table[] = {
88 { set_max_cstate, "Clevo 5600D", {
89 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
90 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
91 (void *)2},
92 { set_max_cstate, "Pavilion zv5000", {
93 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
94 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
95 (void *)1},
96 { set_max_cstate, "Asus L8400B", {
97 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
98 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
99 (void *)1},
100 {},
101};
102
103
104/*
105 * Callers should disable interrupts before the call and enable
106 * interrupts after return.
107 */
108static void __cpuidle acpi_safe_halt(void)
109{
110 if (!tif_need_resched()) {
111 safe_halt();
112 local_irq_disable();
113 }
114}
115
116#ifdef ARCH_APICTIMER_STOPS_ON_C3
117
118/*
119 * Some BIOS implementations switch to C3 in the published C2 state.
120 * This seems to be a common problem on AMD boxen, but other vendors
121 * are affected too. We pick the most conservative approach: we assume
122 * that the local APIC stops in both C2 and C3.
123 */
124static void lapic_timer_check_state(int state, struct acpi_processor *pr,
125 struct acpi_processor_cx *cx)
126{
127 struct acpi_processor_power *pwr = &pr->power;
128 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
129
130 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
131 return;
132
133 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
134 type = ACPI_STATE_C1;
135
136 /*
137 * Check, if one of the previous states already marked the lapic
138 * unstable
139 */
140 if (pwr->timer_broadcast_on_state < state)
141 return;
142
143 if (cx->type >= type)
144 pr->power.timer_broadcast_on_state = state;
145}
146
147static void __lapic_timer_propagate_broadcast(void *arg)
148{
149 struct acpi_processor *pr = (struct acpi_processor *) arg;
150
151 if (pr->power.timer_broadcast_on_state < INT_MAX)
152 tick_broadcast_enable();
153 else
154 tick_broadcast_disable();
155}
156
157static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
158{
159 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
160 (void *)pr, 1);
161}
162
163/* Power(C) State timer broadcast control */
164static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
165 struct acpi_processor_cx *cx)
166{
167 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
168}
169
170#else
171
172static void lapic_timer_check_state(int state, struct acpi_processor *pr,
173 struct acpi_processor_cx *cstate) { }
174static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
175
176static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
177 struct acpi_processor_cx *cx)
178{
179 return false;
180}
181
182#endif
183
184#if defined(CONFIG_X86)
185static void tsc_check_state(int state)
186{
187 switch (boot_cpu_data.x86_vendor) {
188 case X86_VENDOR_HYGON:
189 case X86_VENDOR_AMD:
190 case X86_VENDOR_INTEL:
191 case X86_VENDOR_CENTAUR:
192 case X86_VENDOR_ZHAOXIN:
193 /*
194 * AMD Fam10h TSC will tick in all
195 * C/P/S0/S1 states when this bit is set.
196 */
197 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
198 return;
199 fallthrough;
200 default:
201 /* TSC could halt in idle, so notify users */
202 if (state > ACPI_STATE_C1)
203 mark_tsc_unstable("TSC halts in idle");
204 }
205}
206#else
207static void tsc_check_state(int state) { return; }
208#endif
209
210static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
211{
212
213 if (!pr->pblk)
214 return -ENODEV;
215
216 /* if info is obtained from pblk/fadt, type equals state */
217 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
218 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
219
220#ifndef CONFIG_HOTPLUG_CPU
221 /*
222 * Check for P_LVL2_UP flag before entering C2 and above on
223 * an SMP system.
224 */
225 if ((num_online_cpus() > 1) &&
226 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
227 return -ENODEV;
228#endif
229
230 /* determine C2 and C3 address from pblk */
231 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
232 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
233
234 /* determine latencies from FADT */
235 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
236 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
237
238 /*
239 * FADT specified C2 latency must be less than or equal to
240 * 100 microseconds.
241 */
242 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
243 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
244 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
245 /* invalidate C2 */
246 pr->power.states[ACPI_STATE_C2].address = 0;
247 }
248
249 /*
250 * FADT supplied C3 latency must be less than or equal to
251 * 1000 microseconds.
252 */
253 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
254 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
255 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
256 /* invalidate C3 */
257 pr->power.states[ACPI_STATE_C3].address = 0;
258 }
259
260 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
261 "lvl2[0x%08x] lvl3[0x%08x]\n",
262 pr->power.states[ACPI_STATE_C2].address,
263 pr->power.states[ACPI_STATE_C3].address));
264
265 snprintf(pr->power.states[ACPI_STATE_C2].desc,
266 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
267 pr->power.states[ACPI_STATE_C2].address);
268 snprintf(pr->power.states[ACPI_STATE_C3].desc,
269 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
270 pr->power.states[ACPI_STATE_C3].address);
271
272 return 0;
273}
274
275static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
276{
277 if (!pr->power.states[ACPI_STATE_C1].valid) {
278 /* set the first C-State to C1 */
279 /* all processors need to support C1 */
280 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
281 pr->power.states[ACPI_STATE_C1].valid = 1;
282 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
283
284 snprintf(pr->power.states[ACPI_STATE_C1].desc,
285 ACPI_CX_DESC_LEN, "ACPI HLT");
286 }
287 /* the C0 state only exists as a filler in our array */
288 pr->power.states[ACPI_STATE_C0].valid = 1;
289 return 0;
290}
291
292static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
293{
294 int ret;
295
296 if (nocst)
297 return -ENODEV;
298
299 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
300 if (ret)
301 return ret;
302
303 if (!pr->power.count)
304 return -EFAULT;
305
306 pr->flags.has_cst = 1;
307 return 0;
308}
309
310static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
311 struct acpi_processor_cx *cx)
312{
313 static int bm_check_flag = -1;
314 static int bm_control_flag = -1;
315
316
317 if (!cx->address)
318 return;
319
320 /*
321 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
322 * DMA transfers are used by any ISA device to avoid livelock.
323 * Note that we could disable Type-F DMA (as recommended by
324 * the erratum), but this is known to disrupt certain ISA
325 * devices thus we take the conservative approach.
326 */
327 else if (errata.piix4.fdma) {
328 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
329 "C3 not supported on PIIX4 with Type-F DMA\n"));
330 return;
331 }
332
333 /* All the logic here assumes flags.bm_check is same across all CPUs */
334 if (bm_check_flag == -1) {
335 /* Determine whether bm_check is needed based on CPU */
336 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
337 bm_check_flag = pr->flags.bm_check;
338 bm_control_flag = pr->flags.bm_control;
339 } else {
340 pr->flags.bm_check = bm_check_flag;
341 pr->flags.bm_control = bm_control_flag;
342 }
343
344 if (pr->flags.bm_check) {
345 if (!pr->flags.bm_control) {
346 if (pr->flags.has_cst != 1) {
347 /* bus mastering control is necessary */
348 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
349 "C3 support requires BM control\n"));
350 return;
351 } else {
352 /* Here we enter C3 without bus mastering */
353 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
354 "C3 support without BM control\n"));
355 }
356 }
357 } else {
358 /*
359 * WBINVD should be set in fadt, for C3 state to be
360 * supported on when bm_check is not required.
361 */
362 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
363 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
364 "Cache invalidation should work properly"
365 " for C3 to be enabled on SMP systems\n"));
366 return;
367 }
368 }
369
370 /*
371 * Otherwise we've met all of our C3 requirements.
372 * Normalize the C3 latency to expidite policy. Enable
373 * checking of bus mastering status (bm_check) so we can
374 * use this in our C3 policy
375 */
376 cx->valid = 1;
377
378 /*
379 * On older chipsets, BM_RLD needs to be set
380 * in order for Bus Master activity to wake the
381 * system from C3. Newer chipsets handle DMA
382 * during C3 automatically and BM_RLD is a NOP.
383 * In either case, the proper way to
384 * handle BM_RLD is to set it and leave it set.
385 */
386 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
387
388 return;
389}
390
391static int acpi_processor_power_verify(struct acpi_processor *pr)
392{
393 unsigned int i;
394 unsigned int working = 0;
395
396 pr->power.timer_broadcast_on_state = INT_MAX;
397
398 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
399 struct acpi_processor_cx *cx = &pr->power.states[i];
400
401 switch (cx->type) {
402 case ACPI_STATE_C1:
403 cx->valid = 1;
404 break;
405
406 case ACPI_STATE_C2:
407 if (!cx->address)
408 break;
409 cx->valid = 1;
410 break;
411
412 case ACPI_STATE_C3:
413 acpi_processor_power_verify_c3(pr, cx);
414 break;
415 }
416 if (!cx->valid)
417 continue;
418
419 lapic_timer_check_state(i, pr, cx);
420 tsc_check_state(cx->type);
421 working++;
422 }
423
424 lapic_timer_propagate_broadcast(pr);
425
426 return (working);
427}
428
429static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
430{
431 unsigned int i;
432 int result;
433
434
435 /* NOTE: the idle thread may not be running while calling
436 * this function */
437
438 /* Zero initialize all the C-states info. */
439 memset(pr->power.states, 0, sizeof(pr->power.states));
440
441 result = acpi_processor_get_power_info_cst(pr);
442 if (result == -ENODEV)
443 result = acpi_processor_get_power_info_fadt(pr);
444
445 if (result)
446 return result;
447
448 acpi_processor_get_power_info_default(pr);
449
450 pr->power.count = acpi_processor_power_verify(pr);
451
452 /*
453 * if one state of type C2 or C3 is available, mark this
454 * CPU as being "idle manageable"
455 */
456 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
457 if (pr->power.states[i].valid) {
458 pr->power.count = i;
459 pr->flags.power = 1;
460 }
461 }
462
463 return 0;
464}
465
466/**
467 * acpi_idle_bm_check - checks if bus master activity was detected
468 */
469static int acpi_idle_bm_check(void)
470{
471 u32 bm_status = 0;
472
473 if (bm_check_disable)
474 return 0;
475
476 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
477 if (bm_status)
478 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
479 /*
480 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
481 * the true state of bus mastering activity; forcing us to
482 * manually check the BMIDEA bit of each IDE channel.
483 */
484 else if (errata.piix4.bmisx) {
485 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
486 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
487 bm_status = 1;
488 }
489 return bm_status;
490}
491
492static void wait_for_freeze(void)
493{
494#ifdef CONFIG_X86
495 /* No delay is needed if we are in guest */
496 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
497 return;
498#endif
499 /* Dummy wait op - must do something useless after P_LVL2 read
500 because chipsets cannot guarantee that STPCLK# signal
501 gets asserted in time to freeze execution properly. */
502 inl(acpi_gbl_FADT.xpm_timer_block.address);
503}
504
505/**
506 * acpi_idle_do_entry - enter idle state using the appropriate method
507 * @cx: cstate data
508 *
509 * Caller disables interrupt before call and enables interrupt after return.
510 */
511static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
512{
513 if (cx->entry_method == ACPI_CSTATE_FFH) {
514 /* Call into architectural FFH based C-state */
515 acpi_processor_ffh_cstate_enter(cx);
516 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
517 acpi_safe_halt();
518 } else {
519 /* IO port based C-state */
520 inb(cx->address);
521 wait_for_freeze();
522 }
523}
524
525/**
526 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
527 * @dev: the target CPU
528 * @index: the index of suggested state
529 */
530static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
531{
532 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
533
534 ACPI_FLUSH_CPU_CACHE();
535
536 while (1) {
537
538 if (cx->entry_method == ACPI_CSTATE_HALT)
539 safe_halt();
540 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
541 inb(cx->address);
542 wait_for_freeze();
543 } else
544 return -ENODEV;
545 }
546
547 /* Never reached */
548 return 0;
549}
550
551static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
552{
553 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
554 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
555}
556
557static int c3_cpu_count;
558static DEFINE_RAW_SPINLOCK(c3_lock);
559
560/**
561 * acpi_idle_enter_bm - enters C3 with proper BM handling
562 * @drv: cpuidle driver
563 * @pr: Target processor
564 * @cx: Target state context
565 * @index: index of target state
566 */
567static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
568 struct acpi_processor *pr,
569 struct acpi_processor_cx *cx,
570 int index)
571{
572 static struct acpi_processor_cx safe_cx = {
573 .entry_method = ACPI_CSTATE_HALT,
574 };
575
576 /*
577 * disable bus master
578 * bm_check implies we need ARB_DIS
579 * bm_control implies whether we can do ARB_DIS
580 *
581 * That leaves a case where bm_check is set and bm_control is not set.
582 * In that case we cannot do much, we enter C3 without doing anything.
583 */
584 bool dis_bm = pr->flags.bm_control;
585
586 /* If we can skip BM, demote to a safe state. */
587 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
588 dis_bm = false;
589 index = drv->safe_state_index;
590 if (index >= 0) {
591 cx = this_cpu_read(acpi_cstate[index]);
592 } else {
593 cx = &safe_cx;
594 index = -EBUSY;
595 }
596 }
597
598 if (dis_bm) {
599 raw_spin_lock(&c3_lock);
600 c3_cpu_count++;
601 /* Disable bus master arbitration when all CPUs are in C3 */
602 if (c3_cpu_count == num_online_cpus())
603 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
604 raw_spin_unlock(&c3_lock);
605 }
606
607 rcu_idle_enter();
608
609 acpi_idle_do_entry(cx);
610
611 rcu_idle_exit();
612
613 /* Re-enable bus master arbitration */
614 if (dis_bm) {
615 raw_spin_lock(&c3_lock);
616 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
617 c3_cpu_count--;
618 raw_spin_unlock(&c3_lock);
619 }
620
621 return index;
622}
623
624static int acpi_idle_enter(struct cpuidle_device *dev,
625 struct cpuidle_driver *drv, int index)
626{
627 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
628 struct acpi_processor *pr;
629
630 pr = __this_cpu_read(processors);
631 if (unlikely(!pr))
632 return -EINVAL;
633
634 if (cx->type != ACPI_STATE_C1) {
635 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
636 return acpi_idle_enter_bm(drv, pr, cx, index);
637
638 /* C2 to C1 demotion. */
639 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
640 index = ACPI_IDLE_STATE_START;
641 cx = per_cpu(acpi_cstate[index], dev->cpu);
642 }
643 }
644
645 if (cx->type == ACPI_STATE_C3)
646 ACPI_FLUSH_CPU_CACHE();
647
648 acpi_idle_do_entry(cx);
649
650 return index;
651}
652
653static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
654 struct cpuidle_driver *drv, int index)
655{
656 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
657
658 if (cx->type == ACPI_STATE_C3) {
659 struct acpi_processor *pr = __this_cpu_read(processors);
660
661 if (unlikely(!pr))
662 return 0;
663
664 if (pr->flags.bm_check) {
665 u8 bm_sts_skip = cx->bm_sts_skip;
666
667 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
668 cx->bm_sts_skip = 1;
669 acpi_idle_enter_bm(drv, pr, cx, index);
670 cx->bm_sts_skip = bm_sts_skip;
671
672 return 0;
673 } else {
674 ACPI_FLUSH_CPU_CACHE();
675 }
676 }
677 acpi_idle_do_entry(cx);
678
679 return 0;
680}
681
682static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
683 struct cpuidle_device *dev)
684{
685 int i, count = ACPI_IDLE_STATE_START;
686 struct acpi_processor_cx *cx;
687 struct cpuidle_state *state;
688
689 if (max_cstate == 0)
690 max_cstate = 1;
691
692 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
693 state = &acpi_idle_driver.states[count];
694 cx = &pr->power.states[i];
695
696 if (!cx->valid)
697 continue;
698
699 per_cpu(acpi_cstate[count], dev->cpu) = cx;
700
701 if (lapic_timer_needs_broadcast(pr, cx))
702 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
703
704 if (cx->type == ACPI_STATE_C3) {
705 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
706 if (pr->flags.bm_check)
707 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
708 }
709
710 count++;
711 if (count == CPUIDLE_STATE_MAX)
712 break;
713 }
714
715 if (!count)
716 return -EINVAL;
717
718 return 0;
719}
720
721static int acpi_processor_setup_cstates(struct acpi_processor *pr)
722{
723 int i, count;
724 struct acpi_processor_cx *cx;
725 struct cpuidle_state *state;
726 struct cpuidle_driver *drv = &acpi_idle_driver;
727
728 if (max_cstate == 0)
729 max_cstate = 1;
730
731 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
732 cpuidle_poll_state_init(drv);
733 count = 1;
734 } else {
735 count = 0;
736 }
737
738 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
739 cx = &pr->power.states[i];
740
741 if (!cx->valid)
742 continue;
743
744 state = &drv->states[count];
745 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
746 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
747 state->exit_latency = cx->latency;
748 state->target_residency = cx->latency * latency_factor;
749 state->enter = acpi_idle_enter;
750
751 state->flags = 0;
752 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
753 state->enter_dead = acpi_idle_play_dead;
754 drv->safe_state_index = count;
755 }
756 /*
757 * Halt-induced C1 is not good for ->enter_s2idle, because it
758 * re-enables interrupts on exit. Moreover, C1 is generally not
759 * particularly interesting from the suspend-to-idle angle, so
760 * avoid C1 and the situations in which we may need to fall back
761 * to it altogether.
762 */
763 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
764 state->enter_s2idle = acpi_idle_enter_s2idle;
765
766 count++;
767 if (count == CPUIDLE_STATE_MAX)
768 break;
769 }
770
771 drv->state_count = count;
772
773 if (!count)
774 return -EINVAL;
775
776 return 0;
777}
778
779static inline void acpi_processor_cstate_first_run_checks(void)
780{
781 static int first_run;
782
783 if (first_run)
784 return;
785 dmi_check_system(processor_power_dmi_table);
786 max_cstate = acpi_processor_cstate_check(max_cstate);
787 if (max_cstate < ACPI_C_STATES_MAX)
788 pr_notice("ACPI: processor limited to max C-state %d\n",
789 max_cstate);
790 first_run++;
791
792 if (nocst)
793 return;
794
795 acpi_processor_claim_cst_control();
796}
797#else
798
799static inline int disabled_by_idle_boot_param(void) { return 0; }
800static inline void acpi_processor_cstate_first_run_checks(void) { }
801static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
802{
803 return -ENODEV;
804}
805
806static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
807 struct cpuidle_device *dev)
808{
809 return -EINVAL;
810}
811
812static int acpi_processor_setup_cstates(struct acpi_processor *pr)
813{
814 return -EINVAL;
815}
816
817#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
818
819struct acpi_lpi_states_array {
820 unsigned int size;
821 unsigned int composite_states_size;
822 struct acpi_lpi_state *entries;
823 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
824};
825
826static int obj_get_integer(union acpi_object *obj, u32 *value)
827{
828 if (obj->type != ACPI_TYPE_INTEGER)
829 return -EINVAL;
830
831 *value = obj->integer.value;
832 return 0;
833}
834
835static int acpi_processor_evaluate_lpi(acpi_handle handle,
836 struct acpi_lpi_states_array *info)
837{
838 acpi_status status;
839 int ret = 0;
840 int pkg_count, state_idx = 1, loop;
841 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
842 union acpi_object *lpi_data;
843 struct acpi_lpi_state *lpi_state;
844
845 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
846 if (ACPI_FAILURE(status)) {
847 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
848 return -ENODEV;
849 }
850
851 lpi_data = buffer.pointer;
852
853 /* There must be at least 4 elements = 3 elements + 1 package */
854 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
855 lpi_data->package.count < 4) {
856 pr_debug("not enough elements in _LPI\n");
857 ret = -ENODATA;
858 goto end;
859 }
860
861 pkg_count = lpi_data->package.elements[2].integer.value;
862
863 /* Validate number of power states. */
864 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
865 pr_debug("count given by _LPI is not valid\n");
866 ret = -ENODATA;
867 goto end;
868 }
869
870 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
871 if (!lpi_state) {
872 ret = -ENOMEM;
873 goto end;
874 }
875
876 info->size = pkg_count;
877 info->entries = lpi_state;
878
879 /* LPI States start at index 3 */
880 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
881 union acpi_object *element, *pkg_elem, *obj;
882
883 element = &lpi_data->package.elements[loop];
884 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
885 continue;
886
887 pkg_elem = element->package.elements;
888
889 obj = pkg_elem + 6;
890 if (obj->type == ACPI_TYPE_BUFFER) {
891 struct acpi_power_register *reg;
892
893 reg = (struct acpi_power_register *)obj->buffer.pointer;
894 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
895 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
896 continue;
897
898 lpi_state->address = reg->address;
899 lpi_state->entry_method =
900 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
901 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
902 } else if (obj->type == ACPI_TYPE_INTEGER) {
903 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
904 lpi_state->address = obj->integer.value;
905 } else {
906 continue;
907 }
908
909 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
910
911 obj = pkg_elem + 9;
912 if (obj->type == ACPI_TYPE_STRING)
913 strlcpy(lpi_state->desc, obj->string.pointer,
914 ACPI_CX_DESC_LEN);
915
916 lpi_state->index = state_idx;
917 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
918 pr_debug("No min. residency found, assuming 10 us\n");
919 lpi_state->min_residency = 10;
920 }
921
922 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
923 pr_debug("No wakeup residency found, assuming 10 us\n");
924 lpi_state->wake_latency = 10;
925 }
926
927 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
928 lpi_state->flags = 0;
929
930 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
931 lpi_state->arch_flags = 0;
932
933 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
934 lpi_state->res_cnt_freq = 1;
935
936 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
937 lpi_state->enable_parent_state = 0;
938 }
939
940 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
941end:
942 kfree(buffer.pointer);
943 return ret;
944}
945
946/*
947 * flat_state_cnt - the number of composite LPI states after the process of flattening
948 */
949static int flat_state_cnt;
950
951/**
952 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
953 *
954 * @local: local LPI state
955 * @parent: parent LPI state
956 * @result: composite LPI state
957 */
958static bool combine_lpi_states(struct acpi_lpi_state *local,
959 struct acpi_lpi_state *parent,
960 struct acpi_lpi_state *result)
961{
962 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
963 if (!parent->address) /* 0 means autopromotable */
964 return false;
965 result->address = local->address + parent->address;
966 } else {
967 result->address = parent->address;
968 }
969
970 result->min_residency = max(local->min_residency, parent->min_residency);
971 result->wake_latency = local->wake_latency + parent->wake_latency;
972 result->enable_parent_state = parent->enable_parent_state;
973 result->entry_method = local->entry_method;
974
975 result->flags = parent->flags;
976 result->arch_flags = parent->arch_flags;
977 result->index = parent->index;
978
979 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
980 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
981 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
982 return true;
983}
984
985#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
986
987static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
988 struct acpi_lpi_state *t)
989{
990 curr_level->composite_states[curr_level->composite_states_size++] = t;
991}
992
993static int flatten_lpi_states(struct acpi_processor *pr,
994 struct acpi_lpi_states_array *curr_level,
995 struct acpi_lpi_states_array *prev_level)
996{
997 int i, j, state_count = curr_level->size;
998 struct acpi_lpi_state *p, *t = curr_level->entries;
999
1000 curr_level->composite_states_size = 0;
1001 for (j = 0; j < state_count; j++, t++) {
1002 struct acpi_lpi_state *flpi;
1003
1004 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1005 continue;
1006
1007 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1008 pr_warn("Limiting number of LPI states to max (%d)\n",
1009 ACPI_PROCESSOR_MAX_POWER);
1010 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1011 break;
1012 }
1013
1014 flpi = &pr->power.lpi_states[flat_state_cnt];
1015
1016 if (!prev_level) { /* leaf/processor node */
1017 memcpy(flpi, t, sizeof(*t));
1018 stash_composite_state(curr_level, flpi);
1019 flat_state_cnt++;
1020 continue;
1021 }
1022
1023 for (i = 0; i < prev_level->composite_states_size; i++) {
1024 p = prev_level->composite_states[i];
1025 if (t->index <= p->enable_parent_state &&
1026 combine_lpi_states(p, t, flpi)) {
1027 stash_composite_state(curr_level, flpi);
1028 flat_state_cnt++;
1029 flpi++;
1030 }
1031 }
1032 }
1033
1034 kfree(curr_level->entries);
1035 return 0;
1036}
1037
1038static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1039{
1040 int ret, i;
1041 acpi_status status;
1042 acpi_handle handle = pr->handle, pr_ahandle;
1043 struct acpi_device *d = NULL;
1044 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1045
1046 if (!osc_pc_lpi_support_confirmed)
1047 return -EOPNOTSUPP;
1048
1049 if (!acpi_has_method(handle, "_LPI"))
1050 return -EINVAL;
1051
1052 flat_state_cnt = 0;
1053 prev = &info[0];
1054 curr = &info[1];
1055 handle = pr->handle;
1056 ret = acpi_processor_evaluate_lpi(handle, prev);
1057 if (ret)
1058 return ret;
1059 flatten_lpi_states(pr, prev, NULL);
1060
1061 status = acpi_get_parent(handle, &pr_ahandle);
1062 while (ACPI_SUCCESS(status)) {
1063 acpi_bus_get_device(pr_ahandle, &d);
1064 handle = pr_ahandle;
1065
1066 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1067 break;
1068
1069 /* can be optional ? */
1070 if (!acpi_has_method(handle, "_LPI"))
1071 break;
1072
1073 ret = acpi_processor_evaluate_lpi(handle, curr);
1074 if (ret)
1075 break;
1076
1077 /* flatten all the LPI states in this level of hierarchy */
1078 flatten_lpi_states(pr, curr, prev);
1079
1080 tmp = prev, prev = curr, curr = tmp;
1081
1082 status = acpi_get_parent(handle, &pr_ahandle);
1083 }
1084
1085 pr->power.count = flat_state_cnt;
1086 /* reset the index after flattening */
1087 for (i = 0; i < pr->power.count; i++)
1088 pr->power.lpi_states[i].index = i;
1089
1090 /* Tell driver that _LPI is supported. */
1091 pr->flags.has_lpi = 1;
1092 pr->flags.power = 1;
1093
1094 return 0;
1095}
1096
1097int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1098{
1099 return -ENODEV;
1100}
1101
1102int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1103{
1104 return -ENODEV;
1105}
1106
1107/**
1108 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1109 * @dev: the target CPU
1110 * @drv: cpuidle driver containing cpuidle state info
1111 * @index: index of target state
1112 *
1113 * Return: 0 for success or negative value for error
1114 */
1115static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1116 struct cpuidle_driver *drv, int index)
1117{
1118 struct acpi_processor *pr;
1119 struct acpi_lpi_state *lpi;
1120
1121 pr = __this_cpu_read(processors);
1122
1123 if (unlikely(!pr))
1124 return -EINVAL;
1125
1126 lpi = &pr->power.lpi_states[index];
1127 if (lpi->entry_method == ACPI_CSTATE_FFH)
1128 return acpi_processor_ffh_lpi_enter(lpi);
1129
1130 return -EINVAL;
1131}
1132
1133static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1134{
1135 int i;
1136 struct acpi_lpi_state *lpi;
1137 struct cpuidle_state *state;
1138 struct cpuidle_driver *drv = &acpi_idle_driver;
1139
1140 if (!pr->flags.has_lpi)
1141 return -EOPNOTSUPP;
1142
1143 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1144 lpi = &pr->power.lpi_states[i];
1145
1146 state = &drv->states[i];
1147 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1148 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1149 state->exit_latency = lpi->wake_latency;
1150 state->target_residency = lpi->min_residency;
1151 if (lpi->arch_flags)
1152 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1153 state->enter = acpi_idle_lpi_enter;
1154 drv->safe_state_index = i;
1155 }
1156
1157 drv->state_count = i;
1158
1159 return 0;
1160}
1161
1162/**
1163 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1164 * global state data i.e. idle routines
1165 *
1166 * @pr: the ACPI processor
1167 */
1168static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1169{
1170 int i;
1171 struct cpuidle_driver *drv = &acpi_idle_driver;
1172
1173 if (!pr->flags.power_setup_done || !pr->flags.power)
1174 return -EINVAL;
1175
1176 drv->safe_state_index = -1;
1177 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1178 drv->states[i].name[0] = '\0';
1179 drv->states[i].desc[0] = '\0';
1180 }
1181
1182 if (pr->flags.has_lpi)
1183 return acpi_processor_setup_lpi_states(pr);
1184
1185 return acpi_processor_setup_cstates(pr);
1186}
1187
1188/**
1189 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1190 * device i.e. per-cpu data
1191 *
1192 * @pr: the ACPI processor
1193 * @dev : the cpuidle device
1194 */
1195static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1196 struct cpuidle_device *dev)
1197{
1198 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1199 return -EINVAL;
1200
1201 dev->cpu = pr->id;
1202 if (pr->flags.has_lpi)
1203 return acpi_processor_ffh_lpi_probe(pr->id);
1204
1205 return acpi_processor_setup_cpuidle_cx(pr, dev);
1206}
1207
1208static int acpi_processor_get_power_info(struct acpi_processor *pr)
1209{
1210 int ret;
1211
1212 ret = acpi_processor_get_lpi_info(pr);
1213 if (ret)
1214 ret = acpi_processor_get_cstate_info(pr);
1215
1216 return ret;
1217}
1218
1219int acpi_processor_hotplug(struct acpi_processor *pr)
1220{
1221 int ret = 0;
1222 struct cpuidle_device *dev;
1223
1224 if (disabled_by_idle_boot_param())
1225 return 0;
1226
1227 if (!pr->flags.power_setup_done)
1228 return -ENODEV;
1229
1230 dev = per_cpu(acpi_cpuidle_device, pr->id);
1231 cpuidle_pause_and_lock();
1232 cpuidle_disable_device(dev);
1233 ret = acpi_processor_get_power_info(pr);
1234 if (!ret && pr->flags.power) {
1235 acpi_processor_setup_cpuidle_dev(pr, dev);
1236 ret = cpuidle_enable_device(dev);
1237 }
1238 cpuidle_resume_and_unlock();
1239
1240 return ret;
1241}
1242
1243int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1244{
1245 int cpu;
1246 struct acpi_processor *_pr;
1247 struct cpuidle_device *dev;
1248
1249 if (disabled_by_idle_boot_param())
1250 return 0;
1251
1252 if (!pr->flags.power_setup_done)
1253 return -ENODEV;
1254
1255 /*
1256 * FIXME: Design the ACPI notification to make it once per
1257 * system instead of once per-cpu. This condition is a hack
1258 * to make the code that updates C-States be called once.
1259 */
1260
1261 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1262
1263 /* Protect against cpu-hotplug */
1264 get_online_cpus();
1265 cpuidle_pause_and_lock();
1266
1267 /* Disable all cpuidle devices */
1268 for_each_online_cpu(cpu) {
1269 _pr = per_cpu(processors, cpu);
1270 if (!_pr || !_pr->flags.power_setup_done)
1271 continue;
1272 dev = per_cpu(acpi_cpuidle_device, cpu);
1273 cpuidle_disable_device(dev);
1274 }
1275
1276 /* Populate Updated C-state information */
1277 acpi_processor_get_power_info(pr);
1278 acpi_processor_setup_cpuidle_states(pr);
1279
1280 /* Enable all cpuidle devices */
1281 for_each_online_cpu(cpu) {
1282 _pr = per_cpu(processors, cpu);
1283 if (!_pr || !_pr->flags.power_setup_done)
1284 continue;
1285 acpi_processor_get_power_info(_pr);
1286 if (_pr->flags.power) {
1287 dev = per_cpu(acpi_cpuidle_device, cpu);
1288 acpi_processor_setup_cpuidle_dev(_pr, dev);
1289 cpuidle_enable_device(dev);
1290 }
1291 }
1292 cpuidle_resume_and_unlock();
1293 put_online_cpus();
1294 }
1295
1296 return 0;
1297}
1298
1299static int acpi_processor_registered;
1300
1301int acpi_processor_power_init(struct acpi_processor *pr)
1302{
1303 int retval;
1304 struct cpuidle_device *dev;
1305
1306 if (disabled_by_idle_boot_param())
1307 return 0;
1308
1309 acpi_processor_cstate_first_run_checks();
1310
1311 if (!acpi_processor_get_power_info(pr))
1312 pr->flags.power_setup_done = 1;
1313
1314 /*
1315 * Install the idle handler if processor power management is supported.
1316 * Note that we use previously set idle handler will be used on
1317 * platforms that only support C1.
1318 */
1319 if (pr->flags.power) {
1320 /* Register acpi_idle_driver if not already registered */
1321 if (!acpi_processor_registered) {
1322 acpi_processor_setup_cpuidle_states(pr);
1323 retval = cpuidle_register_driver(&acpi_idle_driver);
1324 if (retval)
1325 return retval;
1326 pr_debug("%s registered with cpuidle\n",
1327 acpi_idle_driver.name);
1328 }
1329
1330 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1331 if (!dev)
1332 return -ENOMEM;
1333 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1334
1335 acpi_processor_setup_cpuidle_dev(pr, dev);
1336
1337 /* Register per-cpu cpuidle_device. Cpuidle driver
1338 * must already be registered before registering device
1339 */
1340 retval = cpuidle_register_device(dev);
1341 if (retval) {
1342 if (acpi_processor_registered == 0)
1343 cpuidle_unregister_driver(&acpi_idle_driver);
1344 return retval;
1345 }
1346 acpi_processor_registered++;
1347 }
1348 return 0;
1349}
1350
1351int acpi_processor_power_exit(struct acpi_processor *pr)
1352{
1353 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1354
1355 if (disabled_by_idle_boot_param())
1356 return 0;
1357
1358 if (pr->flags.power) {
1359 cpuidle_unregister_device(dev);
1360 acpi_processor_registered--;
1361 if (acpi_processor_registered == 0)
1362 cpuidle_unregister_driver(&acpi_idle_driver);
1363 }
1364
1365 pr->flags.power_setup_done = 0;
1366 return 0;
1367}
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/cpufreq.h>
35#include <linux/slab.h>
36#include <linux/acpi.h>
37#include <linux/dmi.h>
38#include <linux/moduleparam.h>
39#include <linux/sched.h> /* need_resched() */
40#include <linux/pm_qos.h>
41#include <linux/clockchips.h>
42#include <linux/cpuidle.h>
43#include <linux/irqflags.h>
44
45/*
46 * Include the apic definitions for x86 to have the APIC timer related defines
47 * available also for UP (on SMP it gets magically included via linux/smp.h).
48 * asm/acpi.h is not an option, as it would require more include magic. Also
49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
50 */
51#ifdef CONFIG_X86
52#include <asm/apic.h>
53#endif
54
55#include <asm/io.h>
56#include <asm/uaccess.h>
57
58#include <acpi/acpi_bus.h>
59#include <acpi/processor.h>
60#include <asm/processor.h>
61
62#define PREFIX "ACPI: "
63
64#define ACPI_PROCESSOR_CLASS "processor"
65#define _COMPONENT ACPI_PROCESSOR_COMPONENT
66ACPI_MODULE_NAME("processor_idle");
67#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68#define C2_OVERHEAD 1 /* 1us */
69#define C3_OVERHEAD 1 /* 1us */
70#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
71
72static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
73module_param(max_cstate, uint, 0000);
74static unsigned int nocst __read_mostly;
75module_param(nocst, uint, 0000);
76static int bm_check_disable __read_mostly;
77module_param(bm_check_disable, uint, 0000);
78
79static unsigned int latency_factor __read_mostly = 2;
80module_param(latency_factor, uint, 0644);
81
82static int disabled_by_idle_boot_param(void)
83{
84 return boot_option_idle_override == IDLE_POLL ||
85 boot_option_idle_override == IDLE_FORCE_MWAIT ||
86 boot_option_idle_override == IDLE_HALT;
87}
88
89/*
90 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
91 * For now disable this. Probably a bug somewhere else.
92 *
93 * To skip this limit, boot/load with a large max_cstate limit.
94 */
95static int set_max_cstate(const struct dmi_system_id *id)
96{
97 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
98 return 0;
99
100 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
101 " Override with \"processor.max_cstate=%d\"\n", id->ident,
102 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
103
104 max_cstate = (long)id->driver_data;
105
106 return 0;
107}
108
109/* Actually this shouldn't be __cpuinitdata, would be better to fix the
110 callers to only run once -AK */
111static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
112 { set_max_cstate, "Clevo 5600D", {
113 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
114 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
115 (void *)2},
116 { set_max_cstate, "Pavilion zv5000", {
117 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
118 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
119 (void *)1},
120 { set_max_cstate, "Asus L8400B", {
121 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
122 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
123 (void *)1},
124 {},
125};
126
127
128/*
129 * Callers should disable interrupts before the call and enable
130 * interrupts after return.
131 */
132static void acpi_safe_halt(void)
133{
134 current_thread_info()->status &= ~TS_POLLING;
135 /*
136 * TS_POLLING-cleared state must be visible before we
137 * test NEED_RESCHED:
138 */
139 smp_mb();
140 if (!need_resched()) {
141 safe_halt();
142 local_irq_disable();
143 }
144 current_thread_info()->status |= TS_POLLING;
145}
146
147#ifdef ARCH_APICTIMER_STOPS_ON_C3
148
149/*
150 * Some BIOS implementations switch to C3 in the published C2 state.
151 * This seems to be a common problem on AMD boxen, but other vendors
152 * are affected too. We pick the most conservative approach: we assume
153 * that the local APIC stops in both C2 and C3.
154 */
155static void lapic_timer_check_state(int state, struct acpi_processor *pr,
156 struct acpi_processor_cx *cx)
157{
158 struct acpi_processor_power *pwr = &pr->power;
159 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
160
161 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
162 return;
163
164 if (amd_e400_c1e_detected)
165 type = ACPI_STATE_C1;
166
167 /*
168 * Check, if one of the previous states already marked the lapic
169 * unstable
170 */
171 if (pwr->timer_broadcast_on_state < state)
172 return;
173
174 if (cx->type >= type)
175 pr->power.timer_broadcast_on_state = state;
176}
177
178static void __lapic_timer_propagate_broadcast(void *arg)
179{
180 struct acpi_processor *pr = (struct acpi_processor *) arg;
181 unsigned long reason;
182
183 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
184 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
185
186 clockevents_notify(reason, &pr->id);
187}
188
189static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
190{
191 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
192 (void *)pr, 1);
193}
194
195/* Power(C) State timer broadcast control */
196static void lapic_timer_state_broadcast(struct acpi_processor *pr,
197 struct acpi_processor_cx *cx,
198 int broadcast)
199{
200 int state = cx - pr->power.states;
201
202 if (state >= pr->power.timer_broadcast_on_state) {
203 unsigned long reason;
204
205 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
206 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
207 clockevents_notify(reason, &pr->id);
208 }
209}
210
211#else
212
213static void lapic_timer_check_state(int state, struct acpi_processor *pr,
214 struct acpi_processor_cx *cstate) { }
215static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
216static void lapic_timer_state_broadcast(struct acpi_processor *pr,
217 struct acpi_processor_cx *cx,
218 int broadcast)
219{
220}
221
222#endif
223
224/*
225 * Suspend / resume control
226 */
227static int acpi_idle_suspend;
228static u32 saved_bm_rld;
229
230static void acpi_idle_bm_rld_save(void)
231{
232 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
233}
234static void acpi_idle_bm_rld_restore(void)
235{
236 u32 resumed_bm_rld;
237
238 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
239
240 if (resumed_bm_rld != saved_bm_rld)
241 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
242}
243
244int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
245{
246 if (acpi_idle_suspend == 1)
247 return 0;
248
249 acpi_idle_bm_rld_save();
250 acpi_idle_suspend = 1;
251 return 0;
252}
253
254int acpi_processor_resume(struct acpi_device * device)
255{
256 if (acpi_idle_suspend == 0)
257 return 0;
258
259 acpi_idle_bm_rld_restore();
260 acpi_idle_suspend = 0;
261 return 0;
262}
263
264#if defined(CONFIG_X86)
265static void tsc_check_state(int state)
266{
267 switch (boot_cpu_data.x86_vendor) {
268 case X86_VENDOR_AMD:
269 case X86_VENDOR_INTEL:
270 /*
271 * AMD Fam10h TSC will tick in all
272 * C/P/S0/S1 states when this bit is set.
273 */
274 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
275 return;
276
277 /*FALL THROUGH*/
278 default:
279 /* TSC could halt in idle, so notify users */
280 if (state > ACPI_STATE_C1)
281 mark_tsc_unstable("TSC halts in idle");
282 }
283}
284#else
285static void tsc_check_state(int state) { return; }
286#endif
287
288static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
289{
290
291 if (!pr)
292 return -EINVAL;
293
294 if (!pr->pblk)
295 return -ENODEV;
296
297 /* if info is obtained from pblk/fadt, type equals state */
298 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
299 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
300
301#ifndef CONFIG_HOTPLUG_CPU
302 /*
303 * Check for P_LVL2_UP flag before entering C2 and above on
304 * an SMP system.
305 */
306 if ((num_online_cpus() > 1) &&
307 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
308 return -ENODEV;
309#endif
310
311 /* determine C2 and C3 address from pblk */
312 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
313 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
314
315 /* determine latencies from FADT */
316 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
317 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
318
319 /*
320 * FADT specified C2 latency must be less than or equal to
321 * 100 microseconds.
322 */
323 if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
324 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
325 "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
326 /* invalidate C2 */
327 pr->power.states[ACPI_STATE_C2].address = 0;
328 }
329
330 /*
331 * FADT supplied C3 latency must be less than or equal to
332 * 1000 microseconds.
333 */
334 if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
335 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
336 "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
337 /* invalidate C3 */
338 pr->power.states[ACPI_STATE_C3].address = 0;
339 }
340
341 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
342 "lvl2[0x%08x] lvl3[0x%08x]\n",
343 pr->power.states[ACPI_STATE_C2].address,
344 pr->power.states[ACPI_STATE_C3].address));
345
346 return 0;
347}
348
349static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
350{
351 if (!pr->power.states[ACPI_STATE_C1].valid) {
352 /* set the first C-State to C1 */
353 /* all processors need to support C1 */
354 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
355 pr->power.states[ACPI_STATE_C1].valid = 1;
356 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
357 }
358 /* the C0 state only exists as a filler in our array */
359 pr->power.states[ACPI_STATE_C0].valid = 1;
360 return 0;
361}
362
363static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
364{
365 acpi_status status = 0;
366 u64 count;
367 int current_count;
368 int i;
369 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
370 union acpi_object *cst;
371
372
373 if (nocst)
374 return -ENODEV;
375
376 current_count = 0;
377
378 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
379 if (ACPI_FAILURE(status)) {
380 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
381 return -ENODEV;
382 }
383
384 cst = buffer.pointer;
385
386 /* There must be at least 2 elements */
387 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
388 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
389 status = -EFAULT;
390 goto end;
391 }
392
393 count = cst->package.elements[0].integer.value;
394
395 /* Validate number of power states. */
396 if (count < 1 || count != cst->package.count - 1) {
397 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
398 status = -EFAULT;
399 goto end;
400 }
401
402 /* Tell driver that at least _CST is supported. */
403 pr->flags.has_cst = 1;
404
405 for (i = 1; i <= count; i++) {
406 union acpi_object *element;
407 union acpi_object *obj;
408 struct acpi_power_register *reg;
409 struct acpi_processor_cx cx;
410
411 memset(&cx, 0, sizeof(cx));
412
413 element = &(cst->package.elements[i]);
414 if (element->type != ACPI_TYPE_PACKAGE)
415 continue;
416
417 if (element->package.count != 4)
418 continue;
419
420 obj = &(element->package.elements[0]);
421
422 if (obj->type != ACPI_TYPE_BUFFER)
423 continue;
424
425 reg = (struct acpi_power_register *)obj->buffer.pointer;
426
427 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
428 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
429 continue;
430
431 /* There should be an easy way to extract an integer... */
432 obj = &(element->package.elements[1]);
433 if (obj->type != ACPI_TYPE_INTEGER)
434 continue;
435
436 cx.type = obj->integer.value;
437 /*
438 * Some buggy BIOSes won't list C1 in _CST -
439 * Let acpi_processor_get_power_info_default() handle them later
440 */
441 if (i == 1 && cx.type != ACPI_STATE_C1)
442 current_count++;
443
444 cx.address = reg->address;
445 cx.index = current_count + 1;
446
447 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
448 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
449 if (acpi_processor_ffh_cstate_probe
450 (pr->id, &cx, reg) == 0) {
451 cx.entry_method = ACPI_CSTATE_FFH;
452 } else if (cx.type == ACPI_STATE_C1) {
453 /*
454 * C1 is a special case where FIXED_HARDWARE
455 * can be handled in non-MWAIT way as well.
456 * In that case, save this _CST entry info.
457 * Otherwise, ignore this info and continue.
458 */
459 cx.entry_method = ACPI_CSTATE_HALT;
460 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
461 } else {
462 continue;
463 }
464 if (cx.type == ACPI_STATE_C1 &&
465 (boot_option_idle_override == IDLE_NOMWAIT)) {
466 /*
467 * In most cases the C1 space_id obtained from
468 * _CST object is FIXED_HARDWARE access mode.
469 * But when the option of idle=halt is added,
470 * the entry_method type should be changed from
471 * CSTATE_FFH to CSTATE_HALT.
472 * When the option of idle=nomwait is added,
473 * the C1 entry_method type should be
474 * CSTATE_HALT.
475 */
476 cx.entry_method = ACPI_CSTATE_HALT;
477 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
478 }
479 } else {
480 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
481 cx.address);
482 }
483
484 if (cx.type == ACPI_STATE_C1) {
485 cx.valid = 1;
486 }
487
488 obj = &(element->package.elements[2]);
489 if (obj->type != ACPI_TYPE_INTEGER)
490 continue;
491
492 cx.latency = obj->integer.value;
493
494 obj = &(element->package.elements[3]);
495 if (obj->type != ACPI_TYPE_INTEGER)
496 continue;
497
498 cx.power = obj->integer.value;
499
500 current_count++;
501 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
502
503 /*
504 * We support total ACPI_PROCESSOR_MAX_POWER - 1
505 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
506 */
507 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
508 printk(KERN_WARNING
509 "Limiting number of power states to max (%d)\n",
510 ACPI_PROCESSOR_MAX_POWER);
511 printk(KERN_WARNING
512 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
513 break;
514 }
515 }
516
517 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
518 current_count));
519
520 /* Validate number of power states discovered */
521 if (current_count < 2)
522 status = -EFAULT;
523
524 end:
525 kfree(buffer.pointer);
526
527 return status;
528}
529
530static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
531 struct acpi_processor_cx *cx)
532{
533 static int bm_check_flag = -1;
534 static int bm_control_flag = -1;
535
536
537 if (!cx->address)
538 return;
539
540 /*
541 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
542 * DMA transfers are used by any ISA device to avoid livelock.
543 * Note that we could disable Type-F DMA (as recommended by
544 * the erratum), but this is known to disrupt certain ISA
545 * devices thus we take the conservative approach.
546 */
547 else if (errata.piix4.fdma) {
548 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
549 "C3 not supported on PIIX4 with Type-F DMA\n"));
550 return;
551 }
552
553 /* All the logic here assumes flags.bm_check is same across all CPUs */
554 if (bm_check_flag == -1) {
555 /* Determine whether bm_check is needed based on CPU */
556 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
557 bm_check_flag = pr->flags.bm_check;
558 bm_control_flag = pr->flags.bm_control;
559 } else {
560 pr->flags.bm_check = bm_check_flag;
561 pr->flags.bm_control = bm_control_flag;
562 }
563
564 if (pr->flags.bm_check) {
565 if (!pr->flags.bm_control) {
566 if (pr->flags.has_cst != 1) {
567 /* bus mastering control is necessary */
568 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
569 "C3 support requires BM control\n"));
570 return;
571 } else {
572 /* Here we enter C3 without bus mastering */
573 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
574 "C3 support without BM control\n"));
575 }
576 }
577 } else {
578 /*
579 * WBINVD should be set in fadt, for C3 state to be
580 * supported on when bm_check is not required.
581 */
582 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
583 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
584 "Cache invalidation should work properly"
585 " for C3 to be enabled on SMP systems\n"));
586 return;
587 }
588 }
589
590 /*
591 * Otherwise we've met all of our C3 requirements.
592 * Normalize the C3 latency to expidite policy. Enable
593 * checking of bus mastering status (bm_check) so we can
594 * use this in our C3 policy
595 */
596 cx->valid = 1;
597
598 cx->latency_ticks = cx->latency;
599 /*
600 * On older chipsets, BM_RLD needs to be set
601 * in order for Bus Master activity to wake the
602 * system from C3. Newer chipsets handle DMA
603 * during C3 automatically and BM_RLD is a NOP.
604 * In either case, the proper way to
605 * handle BM_RLD is to set it and leave it set.
606 */
607 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
608
609 return;
610}
611
612static int acpi_processor_power_verify(struct acpi_processor *pr)
613{
614 unsigned int i;
615 unsigned int working = 0;
616
617 pr->power.timer_broadcast_on_state = INT_MAX;
618
619 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
620 struct acpi_processor_cx *cx = &pr->power.states[i];
621
622 switch (cx->type) {
623 case ACPI_STATE_C1:
624 cx->valid = 1;
625 break;
626
627 case ACPI_STATE_C2:
628 if (!cx->address)
629 break;
630 cx->valid = 1;
631 cx->latency_ticks = cx->latency; /* Normalize latency */
632 break;
633
634 case ACPI_STATE_C3:
635 acpi_processor_power_verify_c3(pr, cx);
636 break;
637 }
638 if (!cx->valid)
639 continue;
640
641 lapic_timer_check_state(i, pr, cx);
642 tsc_check_state(cx->type);
643 working++;
644 }
645
646 lapic_timer_propagate_broadcast(pr);
647
648 return (working);
649}
650
651static int acpi_processor_get_power_info(struct acpi_processor *pr)
652{
653 unsigned int i;
654 int result;
655
656
657 /* NOTE: the idle thread may not be running while calling
658 * this function */
659
660 /* Zero initialize all the C-states info. */
661 memset(pr->power.states, 0, sizeof(pr->power.states));
662
663 result = acpi_processor_get_power_info_cst(pr);
664 if (result == -ENODEV)
665 result = acpi_processor_get_power_info_fadt(pr);
666
667 if (result)
668 return result;
669
670 acpi_processor_get_power_info_default(pr);
671
672 pr->power.count = acpi_processor_power_verify(pr);
673
674 /*
675 * if one state of type C2 or C3 is available, mark this
676 * CPU as being "idle manageable"
677 */
678 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
679 if (pr->power.states[i].valid) {
680 pr->power.count = i;
681 if (pr->power.states[i].type >= ACPI_STATE_C2)
682 pr->flags.power = 1;
683 }
684 }
685
686 return 0;
687}
688
689/**
690 * acpi_idle_bm_check - checks if bus master activity was detected
691 */
692static int acpi_idle_bm_check(void)
693{
694 u32 bm_status = 0;
695
696 if (bm_check_disable)
697 return 0;
698
699 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
700 if (bm_status)
701 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
702 /*
703 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
704 * the true state of bus mastering activity; forcing us to
705 * manually check the BMIDEA bit of each IDE channel.
706 */
707 else if (errata.piix4.bmisx) {
708 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
709 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
710 bm_status = 1;
711 }
712 return bm_status;
713}
714
715/**
716 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
717 * @cx: cstate data
718 *
719 * Caller disables interrupt before call and enables interrupt after return.
720 */
721static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
722{
723 /* Don't trace irqs off for idle */
724 stop_critical_timings();
725 if (cx->entry_method == ACPI_CSTATE_FFH) {
726 /* Call into architectural FFH based C-state */
727 acpi_processor_ffh_cstate_enter(cx);
728 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
729 acpi_safe_halt();
730 } else {
731 /* IO port based C-state */
732 inb(cx->address);
733 /* Dummy wait op - must do something useless after P_LVL2 read
734 because chipsets cannot guarantee that STPCLK# signal
735 gets asserted in time to freeze execution properly. */
736 inl(acpi_gbl_FADT.xpm_timer_block.address);
737 }
738 start_critical_timings();
739}
740
741/**
742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
743 * @dev: the target CPU
744 * @drv: cpuidle driver containing cpuidle state info
745 * @index: index of target state
746 *
747 * This is equivalent to the HALT instruction.
748 */
749static int acpi_idle_enter_c1(struct cpuidle_device *dev,
750 struct cpuidle_driver *drv, int index)
751{
752 ktime_t kt1, kt2;
753 s64 idle_time;
754 struct acpi_processor *pr;
755 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
756 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
757
758 pr = __this_cpu_read(processors);
759 dev->last_residency = 0;
760
761 if (unlikely(!pr))
762 return -EINVAL;
763
764 local_irq_disable();
765
766 if (acpi_idle_suspend) {
767 local_irq_enable();
768 cpu_relax();
769 return -EBUSY;
770 }
771
772 lapic_timer_state_broadcast(pr, cx, 1);
773 kt1 = ktime_get_real();
774 acpi_idle_do_entry(cx);
775 kt2 = ktime_get_real();
776 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
777
778 /* Update device last_residency*/
779 dev->last_residency = (int)idle_time;
780
781 local_irq_enable();
782 cx->usage++;
783 lapic_timer_state_broadcast(pr, cx, 0);
784
785 return index;
786}
787
788
789/**
790 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
791 * @dev: the target CPU
792 * @index: the index of suggested state
793 */
794static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
795{
796 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
797 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
798
799 ACPI_FLUSH_CPU_CACHE();
800
801 while (1) {
802
803 if (cx->entry_method == ACPI_CSTATE_HALT)
804 safe_halt();
805 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
806 inb(cx->address);
807 /* See comment in acpi_idle_do_entry() */
808 inl(acpi_gbl_FADT.xpm_timer_block.address);
809 } else
810 return -ENODEV;
811 }
812
813 /* Never reached */
814 return 0;
815}
816
817/**
818 * acpi_idle_enter_simple - enters an ACPI state without BM handling
819 * @dev: the target CPU
820 * @drv: cpuidle driver with cpuidle state information
821 * @index: the index of suggested state
822 */
823static int acpi_idle_enter_simple(struct cpuidle_device *dev,
824 struct cpuidle_driver *drv, int index)
825{
826 struct acpi_processor *pr;
827 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
828 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
829 ktime_t kt1, kt2;
830 s64 idle_time_ns;
831 s64 idle_time;
832
833 pr = __this_cpu_read(processors);
834 dev->last_residency = 0;
835
836 if (unlikely(!pr))
837 return -EINVAL;
838
839 local_irq_disable();
840
841 if (acpi_idle_suspend) {
842 local_irq_enable();
843 cpu_relax();
844 return -EBUSY;
845 }
846
847 if (cx->entry_method != ACPI_CSTATE_FFH) {
848 current_thread_info()->status &= ~TS_POLLING;
849 /*
850 * TS_POLLING-cleared state must be visible before we test
851 * NEED_RESCHED:
852 */
853 smp_mb();
854
855 if (unlikely(need_resched())) {
856 current_thread_info()->status |= TS_POLLING;
857 local_irq_enable();
858 return -EINVAL;
859 }
860 }
861
862 /*
863 * Must be done before busmaster disable as we might need to
864 * access HPET !
865 */
866 lapic_timer_state_broadcast(pr, cx, 1);
867
868 if (cx->type == ACPI_STATE_C3)
869 ACPI_FLUSH_CPU_CACHE();
870
871 kt1 = ktime_get_real();
872 /* Tell the scheduler that we are going deep-idle: */
873 sched_clock_idle_sleep_event();
874 acpi_idle_do_entry(cx);
875 kt2 = ktime_get_real();
876 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
877 idle_time = idle_time_ns;
878 do_div(idle_time, NSEC_PER_USEC);
879
880 /* Update device last_residency*/
881 dev->last_residency = (int)idle_time;
882
883 /* Tell the scheduler how much we idled: */
884 sched_clock_idle_wakeup_event(idle_time_ns);
885
886 local_irq_enable();
887 if (cx->entry_method != ACPI_CSTATE_FFH)
888 current_thread_info()->status |= TS_POLLING;
889
890 cx->usage++;
891
892 lapic_timer_state_broadcast(pr, cx, 0);
893 cx->time += idle_time;
894 return index;
895}
896
897static int c3_cpu_count;
898static DEFINE_RAW_SPINLOCK(c3_lock);
899
900/**
901 * acpi_idle_enter_bm - enters C3 with proper BM handling
902 * @dev: the target CPU
903 * @drv: cpuidle driver containing state data
904 * @index: the index of suggested state
905 *
906 * If BM is detected, the deepest non-C3 idle state is entered instead.
907 */
908static int acpi_idle_enter_bm(struct cpuidle_device *dev,
909 struct cpuidle_driver *drv, int index)
910{
911 struct acpi_processor *pr;
912 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
913 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
914 ktime_t kt1, kt2;
915 s64 idle_time_ns;
916 s64 idle_time;
917
918
919 pr = __this_cpu_read(processors);
920 dev->last_residency = 0;
921
922 if (unlikely(!pr))
923 return -EINVAL;
924
925 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
926 if (drv->safe_state_index >= 0) {
927 return drv->states[drv->safe_state_index].enter(dev,
928 drv, drv->safe_state_index);
929 } else {
930 local_irq_disable();
931 if (!acpi_idle_suspend)
932 acpi_safe_halt();
933 local_irq_enable();
934 return -EBUSY;
935 }
936 }
937
938 local_irq_disable();
939
940 if (acpi_idle_suspend) {
941 local_irq_enable();
942 cpu_relax();
943 return -EBUSY;
944 }
945
946 if (cx->entry_method != ACPI_CSTATE_FFH) {
947 current_thread_info()->status &= ~TS_POLLING;
948 /*
949 * TS_POLLING-cleared state must be visible before we test
950 * NEED_RESCHED:
951 */
952 smp_mb();
953
954 if (unlikely(need_resched())) {
955 current_thread_info()->status |= TS_POLLING;
956 local_irq_enable();
957 return -EINVAL;
958 }
959 }
960
961 acpi_unlazy_tlb(smp_processor_id());
962
963 /* Tell the scheduler that we are going deep-idle: */
964 sched_clock_idle_sleep_event();
965 /*
966 * Must be done before busmaster disable as we might need to
967 * access HPET !
968 */
969 lapic_timer_state_broadcast(pr, cx, 1);
970
971 kt1 = ktime_get_real();
972 /*
973 * disable bus master
974 * bm_check implies we need ARB_DIS
975 * !bm_check implies we need cache flush
976 * bm_control implies whether we can do ARB_DIS
977 *
978 * That leaves a case where bm_check is set and bm_control is
979 * not set. In that case we cannot do much, we enter C3
980 * without doing anything.
981 */
982 if (pr->flags.bm_check && pr->flags.bm_control) {
983 raw_spin_lock(&c3_lock);
984 c3_cpu_count++;
985 /* Disable bus master arbitration when all CPUs are in C3 */
986 if (c3_cpu_count == num_online_cpus())
987 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
988 raw_spin_unlock(&c3_lock);
989 } else if (!pr->flags.bm_check) {
990 ACPI_FLUSH_CPU_CACHE();
991 }
992
993 acpi_idle_do_entry(cx);
994
995 /* Re-enable bus master arbitration */
996 if (pr->flags.bm_check && pr->flags.bm_control) {
997 raw_spin_lock(&c3_lock);
998 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
999 c3_cpu_count--;
1000 raw_spin_unlock(&c3_lock);
1001 }
1002 kt2 = ktime_get_real();
1003 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
1004 idle_time = idle_time_ns;
1005 do_div(idle_time, NSEC_PER_USEC);
1006
1007 /* Update device last_residency*/
1008 dev->last_residency = (int)idle_time;
1009
1010 /* Tell the scheduler how much we idled: */
1011 sched_clock_idle_wakeup_event(idle_time_ns);
1012
1013 local_irq_enable();
1014 if (cx->entry_method != ACPI_CSTATE_FFH)
1015 current_thread_info()->status |= TS_POLLING;
1016
1017 cx->usage++;
1018
1019 lapic_timer_state_broadcast(pr, cx, 0);
1020 cx->time += idle_time;
1021 return index;
1022}
1023
1024struct cpuidle_driver acpi_idle_driver = {
1025 .name = "acpi_idle",
1026 .owner = THIS_MODULE,
1027};
1028
1029/**
1030 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
1031 * device i.e. per-cpu data
1032 *
1033 * @pr: the ACPI processor
1034 */
1035static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
1036{
1037 int i, count = CPUIDLE_DRIVER_STATE_START;
1038 struct acpi_processor_cx *cx;
1039 struct cpuidle_state_usage *state_usage;
1040 struct cpuidle_device *dev = &pr->power.dev;
1041
1042 if (!pr->flags.power_setup_done)
1043 return -EINVAL;
1044
1045 if (pr->flags.power == 0) {
1046 return -EINVAL;
1047 }
1048
1049 dev->cpu = pr->id;
1050
1051 if (max_cstate == 0)
1052 max_cstate = 1;
1053
1054 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1055 cx = &pr->power.states[i];
1056 state_usage = &dev->states_usage[count];
1057
1058 if (!cx->valid)
1059 continue;
1060
1061#ifdef CONFIG_HOTPLUG_CPU
1062 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1063 !pr->flags.has_cst &&
1064 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1065 continue;
1066#endif
1067
1068 cpuidle_set_statedata(state_usage, cx);
1069
1070 count++;
1071 if (count == CPUIDLE_STATE_MAX)
1072 break;
1073 }
1074
1075 dev->state_count = count;
1076
1077 if (!count)
1078 return -EINVAL;
1079
1080 return 0;
1081}
1082
1083/**
1084 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
1085 * global state data i.e. idle routines
1086 *
1087 * @pr: the ACPI processor
1088 */
1089static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1090{
1091 int i, count = CPUIDLE_DRIVER_STATE_START;
1092 struct acpi_processor_cx *cx;
1093 struct cpuidle_state *state;
1094 struct cpuidle_driver *drv = &acpi_idle_driver;
1095
1096 if (!pr->flags.power_setup_done)
1097 return -EINVAL;
1098
1099 if (pr->flags.power == 0)
1100 return -EINVAL;
1101
1102 drv->safe_state_index = -1;
1103 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1104 drv->states[i].name[0] = '\0';
1105 drv->states[i].desc[0] = '\0';
1106 }
1107
1108 if (max_cstate == 0)
1109 max_cstate = 1;
1110
1111 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1112 cx = &pr->power.states[i];
1113
1114 if (!cx->valid)
1115 continue;
1116
1117#ifdef CONFIG_HOTPLUG_CPU
1118 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1119 !pr->flags.has_cst &&
1120 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1121 continue;
1122#endif
1123
1124 state = &drv->states[count];
1125 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1126 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1127 state->exit_latency = cx->latency;
1128 state->target_residency = cx->latency * latency_factor;
1129
1130 state->flags = 0;
1131 switch (cx->type) {
1132 case ACPI_STATE_C1:
1133 if (cx->entry_method == ACPI_CSTATE_FFH)
1134 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1135
1136 state->enter = acpi_idle_enter_c1;
1137 state->enter_dead = acpi_idle_play_dead;
1138 drv->safe_state_index = count;
1139 break;
1140
1141 case ACPI_STATE_C2:
1142 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1143 state->enter = acpi_idle_enter_simple;
1144 state->enter_dead = acpi_idle_play_dead;
1145 drv->safe_state_index = count;
1146 break;
1147
1148 case ACPI_STATE_C3:
1149 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1150 state->enter = pr->flags.bm_check ?
1151 acpi_idle_enter_bm :
1152 acpi_idle_enter_simple;
1153 break;
1154 }
1155
1156 count++;
1157 if (count == CPUIDLE_STATE_MAX)
1158 break;
1159 }
1160
1161 drv->state_count = count;
1162
1163 if (!count)
1164 return -EINVAL;
1165
1166 return 0;
1167}
1168
1169int acpi_processor_hotplug(struct acpi_processor *pr)
1170{
1171 int ret = 0;
1172
1173 if (disabled_by_idle_boot_param())
1174 return 0;
1175
1176 if (!pr)
1177 return -EINVAL;
1178
1179 if (nocst) {
1180 return -ENODEV;
1181 }
1182
1183 if (!pr->flags.power_setup_done)
1184 return -ENODEV;
1185
1186 cpuidle_pause_and_lock();
1187 cpuidle_disable_device(&pr->power.dev);
1188 acpi_processor_get_power_info(pr);
1189 if (pr->flags.power) {
1190 acpi_processor_setup_cpuidle_cx(pr);
1191 ret = cpuidle_enable_device(&pr->power.dev);
1192 }
1193 cpuidle_resume_and_unlock();
1194
1195 return ret;
1196}
1197
1198int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1199{
1200 int cpu;
1201 struct acpi_processor *_pr;
1202
1203 if (disabled_by_idle_boot_param())
1204 return 0;
1205
1206 if (!pr)
1207 return -EINVAL;
1208
1209 if (nocst)
1210 return -ENODEV;
1211
1212 if (!pr->flags.power_setup_done)
1213 return -ENODEV;
1214
1215 /*
1216 * FIXME: Design the ACPI notification to make it once per
1217 * system instead of once per-cpu. This condition is a hack
1218 * to make the code that updates C-States be called once.
1219 */
1220
1221 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1222
1223 cpuidle_pause_and_lock();
1224 /* Protect against cpu-hotplug */
1225 get_online_cpus();
1226
1227 /* Disable all cpuidle devices */
1228 for_each_online_cpu(cpu) {
1229 _pr = per_cpu(processors, cpu);
1230 if (!_pr || !_pr->flags.power_setup_done)
1231 continue;
1232 cpuidle_disable_device(&_pr->power.dev);
1233 }
1234
1235 /* Populate Updated C-state information */
1236 acpi_processor_setup_cpuidle_states(pr);
1237
1238 /* Enable all cpuidle devices */
1239 for_each_online_cpu(cpu) {
1240 _pr = per_cpu(processors, cpu);
1241 if (!_pr || !_pr->flags.power_setup_done)
1242 continue;
1243 acpi_processor_get_power_info(_pr);
1244 if (_pr->flags.power) {
1245 acpi_processor_setup_cpuidle_cx(_pr);
1246 cpuidle_enable_device(&_pr->power.dev);
1247 }
1248 }
1249 put_online_cpus();
1250 cpuidle_resume_and_unlock();
1251 }
1252
1253 return 0;
1254}
1255
1256static int acpi_processor_registered;
1257
1258int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1259 struct acpi_device *device)
1260{
1261 acpi_status status = 0;
1262 int retval;
1263 static int first_run;
1264
1265 if (disabled_by_idle_boot_param())
1266 return 0;
1267
1268 if (!first_run) {
1269 dmi_check_system(processor_power_dmi_table);
1270 max_cstate = acpi_processor_cstate_check(max_cstate);
1271 if (max_cstate < ACPI_C_STATES_MAX)
1272 printk(KERN_NOTICE
1273 "ACPI: processor limited to max C-state %d\n",
1274 max_cstate);
1275 first_run++;
1276 }
1277
1278 if (!pr)
1279 return -EINVAL;
1280
1281 if (acpi_gbl_FADT.cst_control && !nocst) {
1282 status =
1283 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1284 if (ACPI_FAILURE(status)) {
1285 ACPI_EXCEPTION((AE_INFO, status,
1286 "Notifying BIOS of _CST ability failed"));
1287 }
1288 }
1289
1290 acpi_processor_get_power_info(pr);
1291 pr->flags.power_setup_done = 1;
1292
1293 /*
1294 * Install the idle handler if processor power management is supported.
1295 * Note that we use previously set idle handler will be used on
1296 * platforms that only support C1.
1297 */
1298 if (pr->flags.power) {
1299 /* Register acpi_idle_driver if not already registered */
1300 if (!acpi_processor_registered) {
1301 acpi_processor_setup_cpuidle_states(pr);
1302 retval = cpuidle_register_driver(&acpi_idle_driver);
1303 if (retval)
1304 return retval;
1305 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1306 acpi_idle_driver.name);
1307 }
1308 /* Register per-cpu cpuidle_device. Cpuidle driver
1309 * must already be registered before registering device
1310 */
1311 acpi_processor_setup_cpuidle_cx(pr);
1312 retval = cpuidle_register_device(&pr->power.dev);
1313 if (retval) {
1314 if (acpi_processor_registered == 0)
1315 cpuidle_unregister_driver(&acpi_idle_driver);
1316 return retval;
1317 }
1318 acpi_processor_registered++;
1319 }
1320 return 0;
1321}
1322
1323int acpi_processor_power_exit(struct acpi_processor *pr,
1324 struct acpi_device *device)
1325{
1326 if (disabled_by_idle_boot_param())
1327 return 0;
1328
1329 if (pr->flags.power) {
1330 cpuidle_unregister_device(&pr->power.dev);
1331 acpi_processor_registered--;
1332 if (acpi_processor_registered == 0)
1333 cpuidle_unregister_driver(&acpi_idle_driver);
1334 }
1335
1336 pr->flags.power_setup_done = 0;
1337 return 0;
1338}