Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * processor_idle - idle state submodule to the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 * - Added support for C3 on SMP
12 */
13#define pr_fmt(fmt) "ACPI: " fmt
14
15#include <linux/module.h>
16#include <linux/acpi.h>
17#include <linux/dmi.h>
18#include <linux/sched.h> /* need_resched() */
19#include <linux/tick.h>
20#include <linux/cpuidle.h>
21#include <linux/cpu.h>
22#include <linux/minmax.h>
23#include <linux/perf_event.h>
24#include <acpi/processor.h>
25#include <linux/context_tracking.h>
26
27/*
28 * Include the apic definitions for x86 to have the APIC timer related defines
29 * available also for UP (on SMP it gets magically included via linux/smp.h).
30 * asm/acpi.h is not an option, as it would require more include magic. Also
31 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
32 */
33#ifdef CONFIG_X86
34#include <asm/apic.h>
35#include <asm/cpu.h>
36#endif
37
38#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
39
40static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
41module_param(max_cstate, uint, 0400);
42static bool nocst __read_mostly;
43module_param(nocst, bool, 0400);
44static bool bm_check_disable __read_mostly;
45module_param(bm_check_disable, bool, 0400);
46
47static unsigned int latency_factor __read_mostly = 2;
48module_param(latency_factor, uint, 0644);
49
50static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
51
52struct cpuidle_driver acpi_idle_driver = {
53 .name = "acpi_idle",
54 .owner = THIS_MODULE,
55};
56
57#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
58static
59DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
60
61static int disabled_by_idle_boot_param(void)
62{
63 return boot_option_idle_override == IDLE_POLL ||
64 boot_option_idle_override == IDLE_HALT;
65}
66
67/*
68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
69 * For now disable this. Probably a bug somewhere else.
70 *
71 * To skip this limit, boot/load with a large max_cstate limit.
72 */
73static int set_max_cstate(const struct dmi_system_id *id)
74{
75 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
76 return 0;
77
78 pr_notice("%s detected - limiting to C%ld max_cstate."
79 " Override with \"processor.max_cstate=%d\"\n", id->ident,
80 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
81
82 max_cstate = (long)id->driver_data;
83
84 return 0;
85}
86
87static const struct dmi_system_id processor_power_dmi_table[] = {
88 { set_max_cstate, "Clevo 5600D", {
89 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
90 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
91 (void *)2},
92 { set_max_cstate, "Pavilion zv5000", {
93 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
94 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
95 (void *)1},
96 { set_max_cstate, "Asus L8400B", {
97 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
98 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
99 (void *)1},
100 {},
101};
102
103
104/*
105 * Callers should disable interrupts before the call and enable
106 * interrupts after return.
107 */
108static void __cpuidle acpi_safe_halt(void)
109{
110 if (!tif_need_resched()) {
111 raw_safe_halt();
112 raw_local_irq_disable();
113 }
114}
115
116#ifdef ARCH_APICTIMER_STOPS_ON_C3
117
118/*
119 * Some BIOS implementations switch to C3 in the published C2 state.
120 * This seems to be a common problem on AMD boxen, but other vendors
121 * are affected too. We pick the most conservative approach: we assume
122 * that the local APIC stops in both C2 and C3.
123 */
124static void lapic_timer_check_state(int state, struct acpi_processor *pr,
125 struct acpi_processor_cx *cx)
126{
127 struct acpi_processor_power *pwr = &pr->power;
128 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
129
130 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
131 return;
132
133 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
134 type = ACPI_STATE_C1;
135
136 /*
137 * Check, if one of the previous states already marked the lapic
138 * unstable
139 */
140 if (pwr->timer_broadcast_on_state < state)
141 return;
142
143 if (cx->type >= type)
144 pr->power.timer_broadcast_on_state = state;
145}
146
147static void __lapic_timer_propagate_broadcast(void *arg)
148{
149 struct acpi_processor *pr = arg;
150
151 if (pr->power.timer_broadcast_on_state < INT_MAX)
152 tick_broadcast_enable();
153 else
154 tick_broadcast_disable();
155}
156
157static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
158{
159 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
160 (void *)pr, 1);
161}
162
163/* Power(C) State timer broadcast control */
164static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
165 struct acpi_processor_cx *cx)
166{
167 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
168}
169
170#else
171
172static void lapic_timer_check_state(int state, struct acpi_processor *pr,
173 struct acpi_processor_cx *cstate) { }
174static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
175
176static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
177 struct acpi_processor_cx *cx)
178{
179 return false;
180}
181
182#endif
183
184#if defined(CONFIG_X86)
185static void tsc_check_state(int state)
186{
187 switch (boot_cpu_data.x86_vendor) {
188 case X86_VENDOR_HYGON:
189 case X86_VENDOR_AMD:
190 case X86_VENDOR_INTEL:
191 case X86_VENDOR_CENTAUR:
192 case X86_VENDOR_ZHAOXIN:
193 /*
194 * AMD Fam10h TSC will tick in all
195 * C/P/S0/S1 states when this bit is set.
196 */
197 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
198 return;
199 fallthrough;
200 default:
201 /* TSC could halt in idle, so notify users */
202 if (state > ACPI_STATE_C1)
203 mark_tsc_unstable("TSC halts in idle");
204 }
205}
206#else
207static void tsc_check_state(int state) { return; }
208#endif
209
210static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
211{
212
213 if (!pr->pblk)
214 return -ENODEV;
215
216 /* if info is obtained from pblk/fadt, type equals state */
217 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
218 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
219
220#ifndef CONFIG_HOTPLUG_CPU
221 /*
222 * Check for P_LVL2_UP flag before entering C2 and above on
223 * an SMP system.
224 */
225 if ((num_online_cpus() > 1) &&
226 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
227 return -ENODEV;
228#endif
229
230 /* determine C2 and C3 address from pblk */
231 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
232 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
233
234 /* determine latencies from FADT */
235 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
236 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
237
238 /*
239 * FADT specified C2 latency must be less than or equal to
240 * 100 microseconds.
241 */
242 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
243 acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
244 acpi_gbl_FADT.c2_latency);
245 /* invalidate C2 */
246 pr->power.states[ACPI_STATE_C2].address = 0;
247 }
248
249 /*
250 * FADT supplied C3 latency must be less than or equal to
251 * 1000 microseconds.
252 */
253 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
254 acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
255 acpi_gbl_FADT.c3_latency);
256 /* invalidate C3 */
257 pr->power.states[ACPI_STATE_C3].address = 0;
258 }
259
260 acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
261 pr->power.states[ACPI_STATE_C2].address,
262 pr->power.states[ACPI_STATE_C3].address);
263
264 snprintf(pr->power.states[ACPI_STATE_C2].desc,
265 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
266 pr->power.states[ACPI_STATE_C2].address);
267 snprintf(pr->power.states[ACPI_STATE_C3].desc,
268 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
269 pr->power.states[ACPI_STATE_C3].address);
270
271 return 0;
272}
273
274static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
275{
276 if (!pr->power.states[ACPI_STATE_C1].valid) {
277 /* set the first C-State to C1 */
278 /* all processors need to support C1 */
279 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
280 pr->power.states[ACPI_STATE_C1].valid = 1;
281 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
282
283 snprintf(pr->power.states[ACPI_STATE_C1].desc,
284 ACPI_CX_DESC_LEN, "ACPI HLT");
285 }
286 /* the C0 state only exists as a filler in our array */
287 pr->power.states[ACPI_STATE_C0].valid = 1;
288 return 0;
289}
290
291static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
292{
293 int ret;
294
295 if (nocst)
296 return -ENODEV;
297
298 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
299 if (ret)
300 return ret;
301
302 if (!pr->power.count)
303 return -EFAULT;
304
305 pr->flags.has_cst = 1;
306 return 0;
307}
308
309static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
310 struct acpi_processor_cx *cx)
311{
312 static int bm_check_flag = -1;
313 static int bm_control_flag = -1;
314
315
316 if (!cx->address)
317 return;
318
319 /*
320 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
321 * DMA transfers are used by any ISA device to avoid livelock.
322 * Note that we could disable Type-F DMA (as recommended by
323 * the erratum), but this is known to disrupt certain ISA
324 * devices thus we take the conservative approach.
325 */
326 if (errata.piix4.fdma) {
327 acpi_handle_debug(pr->handle,
328 "C3 not supported on PIIX4 with Type-F DMA\n");
329 return;
330 }
331
332 /* All the logic here assumes flags.bm_check is same across all CPUs */
333 if (bm_check_flag == -1) {
334 /* Determine whether bm_check is needed based on CPU */
335 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
336 bm_check_flag = pr->flags.bm_check;
337 bm_control_flag = pr->flags.bm_control;
338 } else {
339 pr->flags.bm_check = bm_check_flag;
340 pr->flags.bm_control = bm_control_flag;
341 }
342
343 if (pr->flags.bm_check) {
344 if (!pr->flags.bm_control) {
345 if (pr->flags.has_cst != 1) {
346 /* bus mastering control is necessary */
347 acpi_handle_debug(pr->handle,
348 "C3 support requires BM control\n");
349 return;
350 } else {
351 /* Here we enter C3 without bus mastering */
352 acpi_handle_debug(pr->handle,
353 "C3 support without BM control\n");
354 }
355 }
356 } else {
357 /*
358 * WBINVD should be set in fadt, for C3 state to be
359 * supported on when bm_check is not required.
360 */
361 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
362 acpi_handle_debug(pr->handle,
363 "Cache invalidation should work properly"
364 " for C3 to be enabled on SMP systems\n");
365 return;
366 }
367 }
368
369 /*
370 * Otherwise we've met all of our C3 requirements.
371 * Normalize the C3 latency to expidite policy. Enable
372 * checking of bus mastering status (bm_check) so we can
373 * use this in our C3 policy
374 */
375 cx->valid = 1;
376
377 /*
378 * On older chipsets, BM_RLD needs to be set
379 * in order for Bus Master activity to wake the
380 * system from C3. Newer chipsets handle DMA
381 * during C3 automatically and BM_RLD is a NOP.
382 * In either case, the proper way to
383 * handle BM_RLD is to set it and leave it set.
384 */
385 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
386}
387
388static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
389{
390 int i, j, k;
391
392 for (i = 1; i < length; i++) {
393 if (!states[i].valid)
394 continue;
395
396 for (j = i - 1, k = i; j >= 0; j--) {
397 if (!states[j].valid)
398 continue;
399
400 if (states[j].latency > states[k].latency)
401 swap(states[j].latency, states[k].latency);
402
403 k = j;
404 }
405 }
406}
407
408static int acpi_processor_power_verify(struct acpi_processor *pr)
409{
410 unsigned int i;
411 unsigned int working = 0;
412 unsigned int last_latency = 0;
413 unsigned int last_type = 0;
414 bool buggy_latency = false;
415
416 pr->power.timer_broadcast_on_state = INT_MAX;
417
418 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
419 struct acpi_processor_cx *cx = &pr->power.states[i];
420
421 switch (cx->type) {
422 case ACPI_STATE_C1:
423 cx->valid = 1;
424 break;
425
426 case ACPI_STATE_C2:
427 if (!cx->address)
428 break;
429 cx->valid = 1;
430 break;
431
432 case ACPI_STATE_C3:
433 acpi_processor_power_verify_c3(pr, cx);
434 break;
435 }
436 if (!cx->valid)
437 continue;
438 if (cx->type >= last_type && cx->latency < last_latency)
439 buggy_latency = true;
440 last_latency = cx->latency;
441 last_type = cx->type;
442
443 lapic_timer_check_state(i, pr, cx);
444 tsc_check_state(cx->type);
445 working++;
446 }
447
448 if (buggy_latency) {
449 pr_notice("FW issue: working around C-state latencies out of order\n");
450 acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
451 }
452
453 lapic_timer_propagate_broadcast(pr);
454
455 return working;
456}
457
458static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
459{
460 unsigned int i;
461 int result;
462
463
464 /* NOTE: the idle thread may not be running while calling
465 * this function */
466
467 /* Zero initialize all the C-states info. */
468 memset(pr->power.states, 0, sizeof(pr->power.states));
469
470 result = acpi_processor_get_power_info_cst(pr);
471 if (result == -ENODEV)
472 result = acpi_processor_get_power_info_fadt(pr);
473
474 if (result)
475 return result;
476
477 acpi_processor_get_power_info_default(pr);
478
479 pr->power.count = acpi_processor_power_verify(pr);
480
481 /*
482 * if one state of type C2 or C3 is available, mark this
483 * CPU as being "idle manageable"
484 */
485 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
486 if (pr->power.states[i].valid) {
487 pr->power.count = i;
488 pr->flags.power = 1;
489 }
490 }
491
492 return 0;
493}
494
495/**
496 * acpi_idle_bm_check - checks if bus master activity was detected
497 */
498static int acpi_idle_bm_check(void)
499{
500 u32 bm_status = 0;
501
502 if (bm_check_disable)
503 return 0;
504
505 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
506 if (bm_status)
507 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
508 /*
509 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
510 * the true state of bus mastering activity; forcing us to
511 * manually check the BMIDEA bit of each IDE channel.
512 */
513 else if (errata.piix4.bmisx) {
514 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
515 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
516 bm_status = 1;
517 }
518 return bm_status;
519}
520
521static __cpuidle void io_idle(unsigned long addr)
522{
523 /* IO port based C-state */
524 inb(addr);
525
526#ifdef CONFIG_X86
527 /* No delay is needed if we are in guest */
528 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
529 return;
530 /*
531 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
532 * not this code. Assume that any Intel systems using this
533 * are ancient and may need the dummy wait. This also assumes
534 * that the motivating chipset issue was Intel-only.
535 */
536 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
537 return;
538#endif
539 /*
540 * Dummy wait op - must do something useless after P_LVL2 read
541 * because chipsets cannot guarantee that STPCLK# signal gets
542 * asserted in time to freeze execution properly
543 *
544 * This workaround has been in place since the original ACPI
545 * implementation was merged, circa 2002.
546 *
547 * If a profile is pointing to this instruction, please first
548 * consider moving your system to a more modern idle
549 * mechanism.
550 */
551 inl(acpi_gbl_FADT.xpm_timer_block.address);
552}
553
554/**
555 * acpi_idle_do_entry - enter idle state using the appropriate method
556 * @cx: cstate data
557 *
558 * Caller disables interrupt before call and enables interrupt after return.
559 */
560static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
561{
562 perf_lopwr_cb(true);
563
564 if (cx->entry_method == ACPI_CSTATE_FFH) {
565 /* Call into architectural FFH based C-state */
566 acpi_processor_ffh_cstate_enter(cx);
567 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
568 acpi_safe_halt();
569 } else {
570 io_idle(cx->address);
571 }
572
573 perf_lopwr_cb(false);
574}
575
576/**
577 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
578 * @dev: the target CPU
579 * @index: the index of suggested state
580 */
581static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
582{
583 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
584
585 ACPI_FLUSH_CPU_CACHE();
586
587 while (1) {
588
589 if (cx->entry_method == ACPI_CSTATE_HALT)
590 raw_safe_halt();
591 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
592 io_idle(cx->address);
593 } else
594 return;
595 }
596}
597
598static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
599{
600 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
601 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
602}
603
604static int c3_cpu_count;
605static DEFINE_RAW_SPINLOCK(c3_lock);
606
607/**
608 * acpi_idle_enter_bm - enters C3 with proper BM handling
609 * @drv: cpuidle driver
610 * @pr: Target processor
611 * @cx: Target state context
612 * @index: index of target state
613 */
614static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
615 struct acpi_processor *pr,
616 struct acpi_processor_cx *cx,
617 int index)
618{
619 static struct acpi_processor_cx safe_cx = {
620 .entry_method = ACPI_CSTATE_HALT,
621 };
622
623 /*
624 * disable bus master
625 * bm_check implies we need ARB_DIS
626 * bm_control implies whether we can do ARB_DIS
627 *
628 * That leaves a case where bm_check is set and bm_control is not set.
629 * In that case we cannot do much, we enter C3 without doing anything.
630 */
631 bool dis_bm = pr->flags.bm_control;
632
633 instrumentation_begin();
634
635 /* If we can skip BM, demote to a safe state. */
636 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
637 dis_bm = false;
638 index = drv->safe_state_index;
639 if (index >= 0) {
640 cx = this_cpu_read(acpi_cstate[index]);
641 } else {
642 cx = &safe_cx;
643 index = -EBUSY;
644 }
645 }
646
647 if (dis_bm) {
648 raw_spin_lock(&c3_lock);
649 c3_cpu_count++;
650 /* Disable bus master arbitration when all CPUs are in C3 */
651 if (c3_cpu_count == num_online_cpus())
652 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
653 raw_spin_unlock(&c3_lock);
654 }
655
656 ct_cpuidle_enter();
657
658 acpi_idle_do_entry(cx);
659
660 ct_cpuidle_exit();
661
662 /* Re-enable bus master arbitration */
663 if (dis_bm) {
664 raw_spin_lock(&c3_lock);
665 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
666 c3_cpu_count--;
667 raw_spin_unlock(&c3_lock);
668 }
669
670 instrumentation_end();
671
672 return index;
673}
674
675static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
676 struct cpuidle_driver *drv, int index)
677{
678 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
679 struct acpi_processor *pr;
680
681 pr = __this_cpu_read(processors);
682 if (unlikely(!pr))
683 return -EINVAL;
684
685 if (cx->type != ACPI_STATE_C1) {
686 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
687 return acpi_idle_enter_bm(drv, pr, cx, index);
688
689 /* C2 to C1 demotion. */
690 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
691 index = ACPI_IDLE_STATE_START;
692 cx = per_cpu(acpi_cstate[index], dev->cpu);
693 }
694 }
695
696 if (cx->type == ACPI_STATE_C3)
697 ACPI_FLUSH_CPU_CACHE();
698
699 acpi_idle_do_entry(cx);
700
701 return index;
702}
703
704static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
705 struct cpuidle_driver *drv, int index)
706{
707 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
708
709 if (cx->type == ACPI_STATE_C3) {
710 struct acpi_processor *pr = __this_cpu_read(processors);
711
712 if (unlikely(!pr))
713 return 0;
714
715 if (pr->flags.bm_check) {
716 u8 bm_sts_skip = cx->bm_sts_skip;
717
718 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
719 cx->bm_sts_skip = 1;
720 acpi_idle_enter_bm(drv, pr, cx, index);
721 cx->bm_sts_skip = bm_sts_skip;
722
723 return 0;
724 } else {
725 ACPI_FLUSH_CPU_CACHE();
726 }
727 }
728 acpi_idle_do_entry(cx);
729
730 return 0;
731}
732
733static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
734 struct cpuidle_device *dev)
735{
736 int i, count = ACPI_IDLE_STATE_START;
737 struct acpi_processor_cx *cx;
738 struct cpuidle_state *state;
739
740 if (max_cstate == 0)
741 max_cstate = 1;
742
743 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
744 state = &acpi_idle_driver.states[count];
745 cx = &pr->power.states[i];
746
747 if (!cx->valid)
748 continue;
749
750 per_cpu(acpi_cstate[count], dev->cpu) = cx;
751
752 if (lapic_timer_needs_broadcast(pr, cx))
753 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
754
755 if (cx->type == ACPI_STATE_C3) {
756 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
757 if (pr->flags.bm_check)
758 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
759 }
760
761 count++;
762 if (count == CPUIDLE_STATE_MAX)
763 break;
764 }
765
766 if (!count)
767 return -EINVAL;
768
769 return 0;
770}
771
772static int acpi_processor_setup_cstates(struct acpi_processor *pr)
773{
774 int i, count;
775 struct acpi_processor_cx *cx;
776 struct cpuidle_state *state;
777 struct cpuidle_driver *drv = &acpi_idle_driver;
778
779 if (max_cstate == 0)
780 max_cstate = 1;
781
782 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
783 cpuidle_poll_state_init(drv);
784 count = 1;
785 } else {
786 count = 0;
787 }
788
789 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
790 cx = &pr->power.states[i];
791
792 if (!cx->valid)
793 continue;
794
795 state = &drv->states[count];
796 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
797 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
798 state->exit_latency = cx->latency;
799 state->target_residency = cx->latency * latency_factor;
800 state->enter = acpi_idle_enter;
801
802 state->flags = 0;
803
804 state->enter_dead = acpi_idle_play_dead;
805
806 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2)
807 drv->safe_state_index = count;
808
809 /*
810 * Halt-induced C1 is not good for ->enter_s2idle, because it
811 * re-enables interrupts on exit. Moreover, C1 is generally not
812 * particularly interesting from the suspend-to-idle angle, so
813 * avoid C1 and the situations in which we may need to fall back
814 * to it altogether.
815 */
816 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
817 state->enter_s2idle = acpi_idle_enter_s2idle;
818
819 count++;
820 if (count == CPUIDLE_STATE_MAX)
821 break;
822 }
823
824 drv->state_count = count;
825
826 if (!count)
827 return -EINVAL;
828
829 return 0;
830}
831
832static inline void acpi_processor_cstate_first_run_checks(void)
833{
834 static int first_run;
835
836 if (first_run)
837 return;
838 dmi_check_system(processor_power_dmi_table);
839 max_cstate = acpi_processor_cstate_check(max_cstate);
840 if (max_cstate < ACPI_C_STATES_MAX)
841 pr_notice("processor limited to max C-state %d\n", max_cstate);
842
843 first_run++;
844
845 if (nocst)
846 return;
847
848 acpi_processor_claim_cst_control();
849}
850#else
851
852static inline int disabled_by_idle_boot_param(void) { return 0; }
853static inline void acpi_processor_cstate_first_run_checks(void) { }
854static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
855{
856 return -ENODEV;
857}
858
859static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
860 struct cpuidle_device *dev)
861{
862 return -EINVAL;
863}
864
865static int acpi_processor_setup_cstates(struct acpi_processor *pr)
866{
867 return -EINVAL;
868}
869
870#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
871
872struct acpi_lpi_states_array {
873 unsigned int size;
874 unsigned int composite_states_size;
875 struct acpi_lpi_state *entries;
876 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
877};
878
879static int obj_get_integer(union acpi_object *obj, u32 *value)
880{
881 if (obj->type != ACPI_TYPE_INTEGER)
882 return -EINVAL;
883
884 *value = obj->integer.value;
885 return 0;
886}
887
888static int acpi_processor_evaluate_lpi(acpi_handle handle,
889 struct acpi_lpi_states_array *info)
890{
891 acpi_status status;
892 int ret = 0;
893 int pkg_count, state_idx = 1, loop;
894 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
895 union acpi_object *lpi_data;
896 struct acpi_lpi_state *lpi_state;
897
898 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
899 if (ACPI_FAILURE(status)) {
900 acpi_handle_debug(handle, "No _LPI, giving up\n");
901 return -ENODEV;
902 }
903
904 lpi_data = buffer.pointer;
905
906 /* There must be at least 4 elements = 3 elements + 1 package */
907 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
908 lpi_data->package.count < 4) {
909 pr_debug("not enough elements in _LPI\n");
910 ret = -ENODATA;
911 goto end;
912 }
913
914 pkg_count = lpi_data->package.elements[2].integer.value;
915
916 /* Validate number of power states. */
917 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
918 pr_debug("count given by _LPI is not valid\n");
919 ret = -ENODATA;
920 goto end;
921 }
922
923 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
924 if (!lpi_state) {
925 ret = -ENOMEM;
926 goto end;
927 }
928
929 info->size = pkg_count;
930 info->entries = lpi_state;
931
932 /* LPI States start at index 3 */
933 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
934 union acpi_object *element, *pkg_elem, *obj;
935
936 element = &lpi_data->package.elements[loop];
937 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
938 continue;
939
940 pkg_elem = element->package.elements;
941
942 obj = pkg_elem + 6;
943 if (obj->type == ACPI_TYPE_BUFFER) {
944 struct acpi_power_register *reg;
945
946 reg = (struct acpi_power_register *)obj->buffer.pointer;
947 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
948 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
949 continue;
950
951 lpi_state->address = reg->address;
952 lpi_state->entry_method =
953 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
954 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
955 } else if (obj->type == ACPI_TYPE_INTEGER) {
956 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
957 lpi_state->address = obj->integer.value;
958 } else {
959 continue;
960 }
961
962 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
963
964 obj = pkg_elem + 9;
965 if (obj->type == ACPI_TYPE_STRING)
966 strscpy(lpi_state->desc, obj->string.pointer,
967 ACPI_CX_DESC_LEN);
968
969 lpi_state->index = state_idx;
970 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
971 pr_debug("No min. residency found, assuming 10 us\n");
972 lpi_state->min_residency = 10;
973 }
974
975 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
976 pr_debug("No wakeup residency found, assuming 10 us\n");
977 lpi_state->wake_latency = 10;
978 }
979
980 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
981 lpi_state->flags = 0;
982
983 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
984 lpi_state->arch_flags = 0;
985
986 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
987 lpi_state->res_cnt_freq = 1;
988
989 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
990 lpi_state->enable_parent_state = 0;
991 }
992
993 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
994end:
995 kfree(buffer.pointer);
996 return ret;
997}
998
999/*
1000 * flat_state_cnt - the number of composite LPI states after the process of flattening
1001 */
1002static int flat_state_cnt;
1003
1004/**
1005 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1006 *
1007 * @local: local LPI state
1008 * @parent: parent LPI state
1009 * @result: composite LPI state
1010 */
1011static bool combine_lpi_states(struct acpi_lpi_state *local,
1012 struct acpi_lpi_state *parent,
1013 struct acpi_lpi_state *result)
1014{
1015 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1016 if (!parent->address) /* 0 means autopromotable */
1017 return false;
1018 result->address = local->address + parent->address;
1019 } else {
1020 result->address = parent->address;
1021 }
1022
1023 result->min_residency = max(local->min_residency, parent->min_residency);
1024 result->wake_latency = local->wake_latency + parent->wake_latency;
1025 result->enable_parent_state = parent->enable_parent_state;
1026 result->entry_method = local->entry_method;
1027
1028 result->flags = parent->flags;
1029 result->arch_flags = parent->arch_flags;
1030 result->index = parent->index;
1031
1032 strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1033 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1034 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1035 return true;
1036}
1037
1038#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
1039
1040static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1041 struct acpi_lpi_state *t)
1042{
1043 curr_level->composite_states[curr_level->composite_states_size++] = t;
1044}
1045
1046static int flatten_lpi_states(struct acpi_processor *pr,
1047 struct acpi_lpi_states_array *curr_level,
1048 struct acpi_lpi_states_array *prev_level)
1049{
1050 int i, j, state_count = curr_level->size;
1051 struct acpi_lpi_state *p, *t = curr_level->entries;
1052
1053 curr_level->composite_states_size = 0;
1054 for (j = 0; j < state_count; j++, t++) {
1055 struct acpi_lpi_state *flpi;
1056
1057 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1058 continue;
1059
1060 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1061 pr_warn("Limiting number of LPI states to max (%d)\n",
1062 ACPI_PROCESSOR_MAX_POWER);
1063 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1064 break;
1065 }
1066
1067 flpi = &pr->power.lpi_states[flat_state_cnt];
1068
1069 if (!prev_level) { /* leaf/processor node */
1070 memcpy(flpi, t, sizeof(*t));
1071 stash_composite_state(curr_level, flpi);
1072 flat_state_cnt++;
1073 continue;
1074 }
1075
1076 for (i = 0; i < prev_level->composite_states_size; i++) {
1077 p = prev_level->composite_states[i];
1078 if (t->index <= p->enable_parent_state &&
1079 combine_lpi_states(p, t, flpi)) {
1080 stash_composite_state(curr_level, flpi);
1081 flat_state_cnt++;
1082 flpi++;
1083 }
1084 }
1085 }
1086
1087 kfree(curr_level->entries);
1088 return 0;
1089}
1090
1091int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1092{
1093 return -EOPNOTSUPP;
1094}
1095
1096static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1097{
1098 int ret, i;
1099 acpi_status status;
1100 acpi_handle handle = pr->handle, pr_ahandle;
1101 struct acpi_device *d = NULL;
1102 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1103
1104 /* make sure our architecture has support */
1105 ret = acpi_processor_ffh_lpi_probe(pr->id);
1106 if (ret == -EOPNOTSUPP)
1107 return ret;
1108
1109 if (!osc_pc_lpi_support_confirmed)
1110 return -EOPNOTSUPP;
1111
1112 if (!acpi_has_method(handle, "_LPI"))
1113 return -EINVAL;
1114
1115 flat_state_cnt = 0;
1116 prev = &info[0];
1117 curr = &info[1];
1118 handle = pr->handle;
1119 ret = acpi_processor_evaluate_lpi(handle, prev);
1120 if (ret)
1121 return ret;
1122 flatten_lpi_states(pr, prev, NULL);
1123
1124 status = acpi_get_parent(handle, &pr_ahandle);
1125 while (ACPI_SUCCESS(status)) {
1126 d = acpi_fetch_acpi_dev(pr_ahandle);
1127 if (!d)
1128 break;
1129
1130 handle = pr_ahandle;
1131
1132 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1133 break;
1134
1135 /* can be optional ? */
1136 if (!acpi_has_method(handle, "_LPI"))
1137 break;
1138
1139 ret = acpi_processor_evaluate_lpi(handle, curr);
1140 if (ret)
1141 break;
1142
1143 /* flatten all the LPI states in this level of hierarchy */
1144 flatten_lpi_states(pr, curr, prev);
1145
1146 tmp = prev, prev = curr, curr = tmp;
1147
1148 status = acpi_get_parent(handle, &pr_ahandle);
1149 }
1150
1151 pr->power.count = flat_state_cnt;
1152 /* reset the index after flattening */
1153 for (i = 0; i < pr->power.count; i++)
1154 pr->power.lpi_states[i].index = i;
1155
1156 /* Tell driver that _LPI is supported. */
1157 pr->flags.has_lpi = 1;
1158 pr->flags.power = 1;
1159
1160 return 0;
1161}
1162
1163int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1164{
1165 return -ENODEV;
1166}
1167
1168/**
1169 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1170 * @dev: the target CPU
1171 * @drv: cpuidle driver containing cpuidle state info
1172 * @index: index of target state
1173 *
1174 * Return: 0 for success or negative value for error
1175 */
1176static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1177 struct cpuidle_driver *drv, int index)
1178{
1179 struct acpi_processor *pr;
1180 struct acpi_lpi_state *lpi;
1181
1182 pr = __this_cpu_read(processors);
1183
1184 if (unlikely(!pr))
1185 return -EINVAL;
1186
1187 lpi = &pr->power.lpi_states[index];
1188 if (lpi->entry_method == ACPI_CSTATE_FFH)
1189 return acpi_processor_ffh_lpi_enter(lpi);
1190
1191 return -EINVAL;
1192}
1193
1194static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1195{
1196 int i;
1197 struct acpi_lpi_state *lpi;
1198 struct cpuidle_state *state;
1199 struct cpuidle_driver *drv = &acpi_idle_driver;
1200
1201 if (!pr->flags.has_lpi)
1202 return -EOPNOTSUPP;
1203
1204 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1205 lpi = &pr->power.lpi_states[i];
1206
1207 state = &drv->states[i];
1208 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1209 strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1210 state->exit_latency = lpi->wake_latency;
1211 state->target_residency = lpi->min_residency;
1212 state->flags |= arch_get_idle_state_flags(lpi->arch_flags);
1213 if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
1214 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
1215 state->enter = acpi_idle_lpi_enter;
1216 drv->safe_state_index = i;
1217 }
1218
1219 drv->state_count = i;
1220
1221 return 0;
1222}
1223
1224/**
1225 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1226 * global state data i.e. idle routines
1227 *
1228 * @pr: the ACPI processor
1229 */
1230static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1231{
1232 int i;
1233 struct cpuidle_driver *drv = &acpi_idle_driver;
1234
1235 if (!pr->flags.power_setup_done || !pr->flags.power)
1236 return -EINVAL;
1237
1238 drv->safe_state_index = -1;
1239 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1240 drv->states[i].name[0] = '\0';
1241 drv->states[i].desc[0] = '\0';
1242 }
1243
1244 if (pr->flags.has_lpi)
1245 return acpi_processor_setup_lpi_states(pr);
1246
1247 return acpi_processor_setup_cstates(pr);
1248}
1249
1250/**
1251 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1252 * device i.e. per-cpu data
1253 *
1254 * @pr: the ACPI processor
1255 * @dev : the cpuidle device
1256 */
1257static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1258 struct cpuidle_device *dev)
1259{
1260 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1261 return -EINVAL;
1262
1263 dev->cpu = pr->id;
1264 if (pr->flags.has_lpi)
1265 return acpi_processor_ffh_lpi_probe(pr->id);
1266
1267 return acpi_processor_setup_cpuidle_cx(pr, dev);
1268}
1269
1270static int acpi_processor_get_power_info(struct acpi_processor *pr)
1271{
1272 int ret;
1273
1274 ret = acpi_processor_get_lpi_info(pr);
1275 if (ret)
1276 ret = acpi_processor_get_cstate_info(pr);
1277
1278 return ret;
1279}
1280
1281int acpi_processor_hotplug(struct acpi_processor *pr)
1282{
1283 int ret = 0;
1284 struct cpuidle_device *dev;
1285
1286 if (disabled_by_idle_boot_param())
1287 return 0;
1288
1289 if (!pr->flags.power_setup_done)
1290 return -ENODEV;
1291
1292 dev = per_cpu(acpi_cpuidle_device, pr->id);
1293 cpuidle_pause_and_lock();
1294 cpuidle_disable_device(dev);
1295 ret = acpi_processor_get_power_info(pr);
1296 if (!ret && pr->flags.power) {
1297 acpi_processor_setup_cpuidle_dev(pr, dev);
1298 ret = cpuidle_enable_device(dev);
1299 }
1300 cpuidle_resume_and_unlock();
1301
1302 return ret;
1303}
1304
1305int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1306{
1307 int cpu;
1308 struct acpi_processor *_pr;
1309 struct cpuidle_device *dev;
1310
1311 if (disabled_by_idle_boot_param())
1312 return 0;
1313
1314 if (!pr->flags.power_setup_done)
1315 return -ENODEV;
1316
1317 /*
1318 * FIXME: Design the ACPI notification to make it once per
1319 * system instead of once per-cpu. This condition is a hack
1320 * to make the code that updates C-States be called once.
1321 */
1322
1323 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1324
1325 /* Protect against cpu-hotplug */
1326 cpus_read_lock();
1327 cpuidle_pause_and_lock();
1328
1329 /* Disable all cpuidle devices */
1330 for_each_online_cpu(cpu) {
1331 _pr = per_cpu(processors, cpu);
1332 if (!_pr || !_pr->flags.power_setup_done)
1333 continue;
1334 dev = per_cpu(acpi_cpuidle_device, cpu);
1335 cpuidle_disable_device(dev);
1336 }
1337
1338 /* Populate Updated C-state information */
1339 acpi_processor_get_power_info(pr);
1340 acpi_processor_setup_cpuidle_states(pr);
1341
1342 /* Enable all cpuidle devices */
1343 for_each_online_cpu(cpu) {
1344 _pr = per_cpu(processors, cpu);
1345 if (!_pr || !_pr->flags.power_setup_done)
1346 continue;
1347 acpi_processor_get_power_info(_pr);
1348 if (_pr->flags.power) {
1349 dev = per_cpu(acpi_cpuidle_device, cpu);
1350 acpi_processor_setup_cpuidle_dev(_pr, dev);
1351 cpuidle_enable_device(dev);
1352 }
1353 }
1354 cpuidle_resume_and_unlock();
1355 cpus_read_unlock();
1356 }
1357
1358 return 0;
1359}
1360
1361static int acpi_processor_registered;
1362
1363int acpi_processor_power_init(struct acpi_processor *pr)
1364{
1365 int retval;
1366 struct cpuidle_device *dev;
1367
1368 if (disabled_by_idle_boot_param())
1369 return 0;
1370
1371 acpi_processor_cstate_first_run_checks();
1372
1373 if (!acpi_processor_get_power_info(pr))
1374 pr->flags.power_setup_done = 1;
1375
1376 /*
1377 * Install the idle handler if processor power management is supported.
1378 * Note that we use previously set idle handler will be used on
1379 * platforms that only support C1.
1380 */
1381 if (pr->flags.power) {
1382 /* Register acpi_idle_driver if not already registered */
1383 if (!acpi_processor_registered) {
1384 acpi_processor_setup_cpuidle_states(pr);
1385 retval = cpuidle_register_driver(&acpi_idle_driver);
1386 if (retval)
1387 return retval;
1388 pr_debug("%s registered with cpuidle\n",
1389 acpi_idle_driver.name);
1390 }
1391
1392 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1393 if (!dev)
1394 return -ENOMEM;
1395 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1396
1397 acpi_processor_setup_cpuidle_dev(pr, dev);
1398
1399 /* Register per-cpu cpuidle_device. Cpuidle driver
1400 * must already be registered before registering device
1401 */
1402 retval = cpuidle_register_device(dev);
1403 if (retval) {
1404 if (acpi_processor_registered == 0)
1405 cpuidle_unregister_driver(&acpi_idle_driver);
1406 return retval;
1407 }
1408 acpi_processor_registered++;
1409 }
1410 return 0;
1411}
1412
1413int acpi_processor_power_exit(struct acpi_processor *pr)
1414{
1415 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1416
1417 if (disabled_by_idle_boot_param())
1418 return 0;
1419
1420 if (pr->flags.power) {
1421 cpuidle_unregister_device(dev);
1422 acpi_processor_registered--;
1423 if (acpi_processor_registered == 0)
1424 cpuidle_unregister_driver(&acpi_idle_driver);
1425
1426 kfree(dev);
1427 }
1428
1429 pr->flags.power_setup_done = 0;
1430 return 0;
1431}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * processor_idle - idle state submodule to the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 * - Added support for C3 on SMP
12 */
13#define pr_fmt(fmt) "ACPI: " fmt
14
15#include <linux/module.h>
16#include <linux/acpi.h>
17#include <linux/dmi.h>
18#include <linux/sched.h> /* need_resched() */
19#include <linux/sort.h>
20#include <linux/tick.h>
21#include <linux/cpuidle.h>
22#include <linux/cpu.h>
23#include <acpi/processor.h>
24
25/*
26 * Include the apic definitions for x86 to have the APIC timer related defines
27 * available also for UP (on SMP it gets magically included via linux/smp.h).
28 * asm/acpi.h is not an option, as it would require more include magic. Also
29 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
30 */
31#ifdef CONFIG_X86
32#include <asm/apic.h>
33#include <asm/cpu.h>
34#endif
35
36#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
37
38static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
39module_param(max_cstate, uint, 0000);
40static unsigned int nocst __read_mostly;
41module_param(nocst, uint, 0000);
42static int bm_check_disable __read_mostly;
43module_param(bm_check_disable, uint, 0000);
44
45static unsigned int latency_factor __read_mostly = 2;
46module_param(latency_factor, uint, 0644);
47
48static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
49
50struct cpuidle_driver acpi_idle_driver = {
51 .name = "acpi_idle",
52 .owner = THIS_MODULE,
53};
54
55#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
56static
57DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
58
59static int disabled_by_idle_boot_param(void)
60{
61 return boot_option_idle_override == IDLE_POLL ||
62 boot_option_idle_override == IDLE_HALT;
63}
64
65/*
66 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
67 * For now disable this. Probably a bug somewhere else.
68 *
69 * To skip this limit, boot/load with a large max_cstate limit.
70 */
71static int set_max_cstate(const struct dmi_system_id *id)
72{
73 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
74 return 0;
75
76 pr_notice("%s detected - limiting to C%ld max_cstate."
77 " Override with \"processor.max_cstate=%d\"\n", id->ident,
78 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
79
80 max_cstate = (long)id->driver_data;
81
82 return 0;
83}
84
85static const struct dmi_system_id processor_power_dmi_table[] = {
86 { set_max_cstate, "Clevo 5600D", {
87 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
88 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
89 (void *)2},
90 { set_max_cstate, "Pavilion zv5000", {
91 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
92 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
93 (void *)1},
94 { set_max_cstate, "Asus L8400B", {
95 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
96 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
97 (void *)1},
98 {},
99};
100
101
102/*
103 * Callers should disable interrupts before the call and enable
104 * interrupts after return.
105 */
106static void __cpuidle acpi_safe_halt(void)
107{
108 if (!tif_need_resched()) {
109 safe_halt();
110 local_irq_disable();
111 }
112}
113
114#ifdef ARCH_APICTIMER_STOPS_ON_C3
115
116/*
117 * Some BIOS implementations switch to C3 in the published C2 state.
118 * This seems to be a common problem on AMD boxen, but other vendors
119 * are affected too. We pick the most conservative approach: we assume
120 * that the local APIC stops in both C2 and C3.
121 */
122static void lapic_timer_check_state(int state, struct acpi_processor *pr,
123 struct acpi_processor_cx *cx)
124{
125 struct acpi_processor_power *pwr = &pr->power;
126 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
127
128 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
129 return;
130
131 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
132 type = ACPI_STATE_C1;
133
134 /*
135 * Check, if one of the previous states already marked the lapic
136 * unstable
137 */
138 if (pwr->timer_broadcast_on_state < state)
139 return;
140
141 if (cx->type >= type)
142 pr->power.timer_broadcast_on_state = state;
143}
144
145static void __lapic_timer_propagate_broadcast(void *arg)
146{
147 struct acpi_processor *pr = (struct acpi_processor *) arg;
148
149 if (pr->power.timer_broadcast_on_state < INT_MAX)
150 tick_broadcast_enable();
151 else
152 tick_broadcast_disable();
153}
154
155static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
156{
157 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
158 (void *)pr, 1);
159}
160
161/* Power(C) State timer broadcast control */
162static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
163 struct acpi_processor_cx *cx)
164{
165 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
166}
167
168#else
169
170static void lapic_timer_check_state(int state, struct acpi_processor *pr,
171 struct acpi_processor_cx *cstate) { }
172static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
173
174static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
175 struct acpi_processor_cx *cx)
176{
177 return false;
178}
179
180#endif
181
182#if defined(CONFIG_X86)
183static void tsc_check_state(int state)
184{
185 switch (boot_cpu_data.x86_vendor) {
186 case X86_VENDOR_HYGON:
187 case X86_VENDOR_AMD:
188 case X86_VENDOR_INTEL:
189 case X86_VENDOR_CENTAUR:
190 case X86_VENDOR_ZHAOXIN:
191 /*
192 * AMD Fam10h TSC will tick in all
193 * C/P/S0/S1 states when this bit is set.
194 */
195 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
196 return;
197 fallthrough;
198 default:
199 /* TSC could halt in idle, so notify users */
200 if (state > ACPI_STATE_C1)
201 mark_tsc_unstable("TSC halts in idle");
202 }
203}
204#else
205static void tsc_check_state(int state) { return; }
206#endif
207
208static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
209{
210
211 if (!pr->pblk)
212 return -ENODEV;
213
214 /* if info is obtained from pblk/fadt, type equals state */
215 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
216 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
217
218#ifndef CONFIG_HOTPLUG_CPU
219 /*
220 * Check for P_LVL2_UP flag before entering C2 and above on
221 * an SMP system.
222 */
223 if ((num_online_cpus() > 1) &&
224 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
225 return -ENODEV;
226#endif
227
228 /* determine C2 and C3 address from pblk */
229 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
230 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
231
232 /* determine latencies from FADT */
233 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
234 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
235
236 /*
237 * FADT specified C2 latency must be less than or equal to
238 * 100 microseconds.
239 */
240 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
241 acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
242 acpi_gbl_FADT.c2_latency);
243 /* invalidate C2 */
244 pr->power.states[ACPI_STATE_C2].address = 0;
245 }
246
247 /*
248 * FADT supplied C3 latency must be less than or equal to
249 * 1000 microseconds.
250 */
251 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
252 acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
253 acpi_gbl_FADT.c3_latency);
254 /* invalidate C3 */
255 pr->power.states[ACPI_STATE_C3].address = 0;
256 }
257
258 acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
259 pr->power.states[ACPI_STATE_C2].address,
260 pr->power.states[ACPI_STATE_C3].address);
261
262 snprintf(pr->power.states[ACPI_STATE_C2].desc,
263 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
264 pr->power.states[ACPI_STATE_C2].address);
265 snprintf(pr->power.states[ACPI_STATE_C3].desc,
266 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
267 pr->power.states[ACPI_STATE_C3].address);
268
269 return 0;
270}
271
272static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
273{
274 if (!pr->power.states[ACPI_STATE_C1].valid) {
275 /* set the first C-State to C1 */
276 /* all processors need to support C1 */
277 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
278 pr->power.states[ACPI_STATE_C1].valid = 1;
279 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
280
281 snprintf(pr->power.states[ACPI_STATE_C1].desc,
282 ACPI_CX_DESC_LEN, "ACPI HLT");
283 }
284 /* the C0 state only exists as a filler in our array */
285 pr->power.states[ACPI_STATE_C0].valid = 1;
286 return 0;
287}
288
289static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
290{
291 int ret;
292
293 if (nocst)
294 return -ENODEV;
295
296 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
297 if (ret)
298 return ret;
299
300 if (!pr->power.count)
301 return -EFAULT;
302
303 pr->flags.has_cst = 1;
304 return 0;
305}
306
307static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
308 struct acpi_processor_cx *cx)
309{
310 static int bm_check_flag = -1;
311 static int bm_control_flag = -1;
312
313
314 if (!cx->address)
315 return;
316
317 /*
318 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
319 * DMA transfers are used by any ISA device to avoid livelock.
320 * Note that we could disable Type-F DMA (as recommended by
321 * the erratum), but this is known to disrupt certain ISA
322 * devices thus we take the conservative approach.
323 */
324 else if (errata.piix4.fdma) {
325 acpi_handle_debug(pr->handle,
326 "C3 not supported on PIIX4 with Type-F DMA\n");
327 return;
328 }
329
330 /* All the logic here assumes flags.bm_check is same across all CPUs */
331 if (bm_check_flag == -1) {
332 /* Determine whether bm_check is needed based on CPU */
333 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
334 bm_check_flag = pr->flags.bm_check;
335 bm_control_flag = pr->flags.bm_control;
336 } else {
337 pr->flags.bm_check = bm_check_flag;
338 pr->flags.bm_control = bm_control_flag;
339 }
340
341 if (pr->flags.bm_check) {
342 if (!pr->flags.bm_control) {
343 if (pr->flags.has_cst != 1) {
344 /* bus mastering control is necessary */
345 acpi_handle_debug(pr->handle,
346 "C3 support requires BM control\n");
347 return;
348 } else {
349 /* Here we enter C3 without bus mastering */
350 acpi_handle_debug(pr->handle,
351 "C3 support without BM control\n");
352 }
353 }
354 } else {
355 /*
356 * WBINVD should be set in fadt, for C3 state to be
357 * supported on when bm_check is not required.
358 */
359 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
360 acpi_handle_debug(pr->handle,
361 "Cache invalidation should work properly"
362 " for C3 to be enabled on SMP systems\n");
363 return;
364 }
365 }
366
367 /*
368 * Otherwise we've met all of our C3 requirements.
369 * Normalize the C3 latency to expidite policy. Enable
370 * checking of bus mastering status (bm_check) so we can
371 * use this in our C3 policy
372 */
373 cx->valid = 1;
374
375 /*
376 * On older chipsets, BM_RLD needs to be set
377 * in order for Bus Master activity to wake the
378 * system from C3. Newer chipsets handle DMA
379 * during C3 automatically and BM_RLD is a NOP.
380 * In either case, the proper way to
381 * handle BM_RLD is to set it and leave it set.
382 */
383 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
384
385 return;
386}
387
388static int acpi_cst_latency_cmp(const void *a, const void *b)
389{
390 const struct acpi_processor_cx *x = a, *y = b;
391
392 if (!(x->valid && y->valid))
393 return 0;
394 if (x->latency > y->latency)
395 return 1;
396 if (x->latency < y->latency)
397 return -1;
398 return 0;
399}
400static void acpi_cst_latency_swap(void *a, void *b, int n)
401{
402 struct acpi_processor_cx *x = a, *y = b;
403 u32 tmp;
404
405 if (!(x->valid && y->valid))
406 return;
407 tmp = x->latency;
408 x->latency = y->latency;
409 y->latency = tmp;
410}
411
412static int acpi_processor_power_verify(struct acpi_processor *pr)
413{
414 unsigned int i;
415 unsigned int working = 0;
416 unsigned int last_latency = 0;
417 unsigned int last_type = 0;
418 bool buggy_latency = false;
419
420 pr->power.timer_broadcast_on_state = INT_MAX;
421
422 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
423 struct acpi_processor_cx *cx = &pr->power.states[i];
424
425 switch (cx->type) {
426 case ACPI_STATE_C1:
427 cx->valid = 1;
428 break;
429
430 case ACPI_STATE_C2:
431 if (!cx->address)
432 break;
433 cx->valid = 1;
434 break;
435
436 case ACPI_STATE_C3:
437 acpi_processor_power_verify_c3(pr, cx);
438 break;
439 }
440 if (!cx->valid)
441 continue;
442 if (cx->type >= last_type && cx->latency < last_latency)
443 buggy_latency = true;
444 last_latency = cx->latency;
445 last_type = cx->type;
446
447 lapic_timer_check_state(i, pr, cx);
448 tsc_check_state(cx->type);
449 working++;
450 }
451
452 if (buggy_latency) {
453 pr_notice("FW issue: working around C-state latencies out of order\n");
454 sort(&pr->power.states[1], max_cstate,
455 sizeof(struct acpi_processor_cx),
456 acpi_cst_latency_cmp,
457 acpi_cst_latency_swap);
458 }
459
460 lapic_timer_propagate_broadcast(pr);
461
462 return (working);
463}
464
465static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
466{
467 unsigned int i;
468 int result;
469
470
471 /* NOTE: the idle thread may not be running while calling
472 * this function */
473
474 /* Zero initialize all the C-states info. */
475 memset(pr->power.states, 0, sizeof(pr->power.states));
476
477 result = acpi_processor_get_power_info_cst(pr);
478 if (result == -ENODEV)
479 result = acpi_processor_get_power_info_fadt(pr);
480
481 if (result)
482 return result;
483
484 acpi_processor_get_power_info_default(pr);
485
486 pr->power.count = acpi_processor_power_verify(pr);
487
488 /*
489 * if one state of type C2 or C3 is available, mark this
490 * CPU as being "idle manageable"
491 */
492 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
493 if (pr->power.states[i].valid) {
494 pr->power.count = i;
495 pr->flags.power = 1;
496 }
497 }
498
499 return 0;
500}
501
502/**
503 * acpi_idle_bm_check - checks if bus master activity was detected
504 */
505static int acpi_idle_bm_check(void)
506{
507 u32 bm_status = 0;
508
509 if (bm_check_disable)
510 return 0;
511
512 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
513 if (bm_status)
514 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
515 /*
516 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
517 * the true state of bus mastering activity; forcing us to
518 * manually check the BMIDEA bit of each IDE channel.
519 */
520 else if (errata.piix4.bmisx) {
521 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
522 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
523 bm_status = 1;
524 }
525 return bm_status;
526}
527
528static void wait_for_freeze(void)
529{
530#ifdef CONFIG_X86
531 /* No delay is needed if we are in guest */
532 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
533 return;
534#endif
535 /* Dummy wait op - must do something useless after P_LVL2 read
536 because chipsets cannot guarantee that STPCLK# signal
537 gets asserted in time to freeze execution properly. */
538 inl(acpi_gbl_FADT.xpm_timer_block.address);
539}
540
541/**
542 * acpi_idle_do_entry - enter idle state using the appropriate method
543 * @cx: cstate data
544 *
545 * Caller disables interrupt before call and enables interrupt after return.
546 */
547static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
548{
549 if (cx->entry_method == ACPI_CSTATE_FFH) {
550 /* Call into architectural FFH based C-state */
551 acpi_processor_ffh_cstate_enter(cx);
552 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
553 acpi_safe_halt();
554 } else {
555 /* IO port based C-state */
556 inb(cx->address);
557 wait_for_freeze();
558 }
559}
560
561/**
562 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
563 * @dev: the target CPU
564 * @index: the index of suggested state
565 */
566static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
567{
568 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
569
570 ACPI_FLUSH_CPU_CACHE();
571
572 while (1) {
573
574 if (cx->entry_method == ACPI_CSTATE_HALT)
575 safe_halt();
576 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
577 inb(cx->address);
578 wait_for_freeze();
579 } else
580 return -ENODEV;
581
582#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
583 cond_wakeup_cpu0();
584#endif
585 }
586
587 /* Never reached */
588 return 0;
589}
590
591static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
592{
593 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
594 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
595}
596
597static int c3_cpu_count;
598static DEFINE_RAW_SPINLOCK(c3_lock);
599
600/**
601 * acpi_idle_enter_bm - enters C3 with proper BM handling
602 * @drv: cpuidle driver
603 * @pr: Target processor
604 * @cx: Target state context
605 * @index: index of target state
606 */
607static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
608 struct acpi_processor *pr,
609 struct acpi_processor_cx *cx,
610 int index)
611{
612 static struct acpi_processor_cx safe_cx = {
613 .entry_method = ACPI_CSTATE_HALT,
614 };
615
616 /*
617 * disable bus master
618 * bm_check implies we need ARB_DIS
619 * bm_control implies whether we can do ARB_DIS
620 *
621 * That leaves a case where bm_check is set and bm_control is not set.
622 * In that case we cannot do much, we enter C3 without doing anything.
623 */
624 bool dis_bm = pr->flags.bm_control;
625
626 /* If we can skip BM, demote to a safe state. */
627 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
628 dis_bm = false;
629 index = drv->safe_state_index;
630 if (index >= 0) {
631 cx = this_cpu_read(acpi_cstate[index]);
632 } else {
633 cx = &safe_cx;
634 index = -EBUSY;
635 }
636 }
637
638 if (dis_bm) {
639 raw_spin_lock(&c3_lock);
640 c3_cpu_count++;
641 /* Disable bus master arbitration when all CPUs are in C3 */
642 if (c3_cpu_count == num_online_cpus())
643 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
644 raw_spin_unlock(&c3_lock);
645 }
646
647 rcu_idle_enter();
648
649 acpi_idle_do_entry(cx);
650
651 rcu_idle_exit();
652
653 /* Re-enable bus master arbitration */
654 if (dis_bm) {
655 raw_spin_lock(&c3_lock);
656 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
657 c3_cpu_count--;
658 raw_spin_unlock(&c3_lock);
659 }
660
661 return index;
662}
663
664static int acpi_idle_enter(struct cpuidle_device *dev,
665 struct cpuidle_driver *drv, int index)
666{
667 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
668 struct acpi_processor *pr;
669
670 pr = __this_cpu_read(processors);
671 if (unlikely(!pr))
672 return -EINVAL;
673
674 if (cx->type != ACPI_STATE_C1) {
675 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
676 return acpi_idle_enter_bm(drv, pr, cx, index);
677
678 /* C2 to C1 demotion. */
679 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
680 index = ACPI_IDLE_STATE_START;
681 cx = per_cpu(acpi_cstate[index], dev->cpu);
682 }
683 }
684
685 if (cx->type == ACPI_STATE_C3)
686 ACPI_FLUSH_CPU_CACHE();
687
688 acpi_idle_do_entry(cx);
689
690 return index;
691}
692
693static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
694 struct cpuidle_driver *drv, int index)
695{
696 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
697
698 if (cx->type == ACPI_STATE_C3) {
699 struct acpi_processor *pr = __this_cpu_read(processors);
700
701 if (unlikely(!pr))
702 return 0;
703
704 if (pr->flags.bm_check) {
705 u8 bm_sts_skip = cx->bm_sts_skip;
706
707 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
708 cx->bm_sts_skip = 1;
709 acpi_idle_enter_bm(drv, pr, cx, index);
710 cx->bm_sts_skip = bm_sts_skip;
711
712 return 0;
713 } else {
714 ACPI_FLUSH_CPU_CACHE();
715 }
716 }
717 acpi_idle_do_entry(cx);
718
719 return 0;
720}
721
722static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
723 struct cpuidle_device *dev)
724{
725 int i, count = ACPI_IDLE_STATE_START;
726 struct acpi_processor_cx *cx;
727 struct cpuidle_state *state;
728
729 if (max_cstate == 0)
730 max_cstate = 1;
731
732 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
733 state = &acpi_idle_driver.states[count];
734 cx = &pr->power.states[i];
735
736 if (!cx->valid)
737 continue;
738
739 per_cpu(acpi_cstate[count], dev->cpu) = cx;
740
741 if (lapic_timer_needs_broadcast(pr, cx))
742 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
743
744 if (cx->type == ACPI_STATE_C3) {
745 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
746 if (pr->flags.bm_check)
747 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
748 }
749
750 count++;
751 if (count == CPUIDLE_STATE_MAX)
752 break;
753 }
754
755 if (!count)
756 return -EINVAL;
757
758 return 0;
759}
760
761static int acpi_processor_setup_cstates(struct acpi_processor *pr)
762{
763 int i, count;
764 struct acpi_processor_cx *cx;
765 struct cpuidle_state *state;
766 struct cpuidle_driver *drv = &acpi_idle_driver;
767
768 if (max_cstate == 0)
769 max_cstate = 1;
770
771 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
772 cpuidle_poll_state_init(drv);
773 count = 1;
774 } else {
775 count = 0;
776 }
777
778 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
779 cx = &pr->power.states[i];
780
781 if (!cx->valid)
782 continue;
783
784 state = &drv->states[count];
785 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
786 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
787 state->exit_latency = cx->latency;
788 state->target_residency = cx->latency * latency_factor;
789 state->enter = acpi_idle_enter;
790
791 state->flags = 0;
792 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
793 state->enter_dead = acpi_idle_play_dead;
794 drv->safe_state_index = count;
795 }
796 /*
797 * Halt-induced C1 is not good for ->enter_s2idle, because it
798 * re-enables interrupts on exit. Moreover, C1 is generally not
799 * particularly interesting from the suspend-to-idle angle, so
800 * avoid C1 and the situations in which we may need to fall back
801 * to it altogether.
802 */
803 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
804 state->enter_s2idle = acpi_idle_enter_s2idle;
805
806 count++;
807 if (count == CPUIDLE_STATE_MAX)
808 break;
809 }
810
811 drv->state_count = count;
812
813 if (!count)
814 return -EINVAL;
815
816 return 0;
817}
818
819static inline void acpi_processor_cstate_first_run_checks(void)
820{
821 static int first_run;
822
823 if (first_run)
824 return;
825 dmi_check_system(processor_power_dmi_table);
826 max_cstate = acpi_processor_cstate_check(max_cstate);
827 if (max_cstate < ACPI_C_STATES_MAX)
828 pr_notice("processor limited to max C-state %d\n", max_cstate);
829
830 first_run++;
831
832 if (nocst)
833 return;
834
835 acpi_processor_claim_cst_control();
836}
837#else
838
839static inline int disabled_by_idle_boot_param(void) { return 0; }
840static inline void acpi_processor_cstate_first_run_checks(void) { }
841static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
842{
843 return -ENODEV;
844}
845
846static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
847 struct cpuidle_device *dev)
848{
849 return -EINVAL;
850}
851
852static int acpi_processor_setup_cstates(struct acpi_processor *pr)
853{
854 return -EINVAL;
855}
856
857#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
858
859struct acpi_lpi_states_array {
860 unsigned int size;
861 unsigned int composite_states_size;
862 struct acpi_lpi_state *entries;
863 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
864};
865
866static int obj_get_integer(union acpi_object *obj, u32 *value)
867{
868 if (obj->type != ACPI_TYPE_INTEGER)
869 return -EINVAL;
870
871 *value = obj->integer.value;
872 return 0;
873}
874
875static int acpi_processor_evaluate_lpi(acpi_handle handle,
876 struct acpi_lpi_states_array *info)
877{
878 acpi_status status;
879 int ret = 0;
880 int pkg_count, state_idx = 1, loop;
881 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
882 union acpi_object *lpi_data;
883 struct acpi_lpi_state *lpi_state;
884
885 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
886 if (ACPI_FAILURE(status)) {
887 acpi_handle_debug(handle, "No _LPI, giving up\n");
888 return -ENODEV;
889 }
890
891 lpi_data = buffer.pointer;
892
893 /* There must be at least 4 elements = 3 elements + 1 package */
894 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
895 lpi_data->package.count < 4) {
896 pr_debug("not enough elements in _LPI\n");
897 ret = -ENODATA;
898 goto end;
899 }
900
901 pkg_count = lpi_data->package.elements[2].integer.value;
902
903 /* Validate number of power states. */
904 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
905 pr_debug("count given by _LPI is not valid\n");
906 ret = -ENODATA;
907 goto end;
908 }
909
910 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
911 if (!lpi_state) {
912 ret = -ENOMEM;
913 goto end;
914 }
915
916 info->size = pkg_count;
917 info->entries = lpi_state;
918
919 /* LPI States start at index 3 */
920 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
921 union acpi_object *element, *pkg_elem, *obj;
922
923 element = &lpi_data->package.elements[loop];
924 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
925 continue;
926
927 pkg_elem = element->package.elements;
928
929 obj = pkg_elem + 6;
930 if (obj->type == ACPI_TYPE_BUFFER) {
931 struct acpi_power_register *reg;
932
933 reg = (struct acpi_power_register *)obj->buffer.pointer;
934 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
935 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
936 continue;
937
938 lpi_state->address = reg->address;
939 lpi_state->entry_method =
940 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
941 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
942 } else if (obj->type == ACPI_TYPE_INTEGER) {
943 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
944 lpi_state->address = obj->integer.value;
945 } else {
946 continue;
947 }
948
949 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
950
951 obj = pkg_elem + 9;
952 if (obj->type == ACPI_TYPE_STRING)
953 strlcpy(lpi_state->desc, obj->string.pointer,
954 ACPI_CX_DESC_LEN);
955
956 lpi_state->index = state_idx;
957 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
958 pr_debug("No min. residency found, assuming 10 us\n");
959 lpi_state->min_residency = 10;
960 }
961
962 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
963 pr_debug("No wakeup residency found, assuming 10 us\n");
964 lpi_state->wake_latency = 10;
965 }
966
967 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
968 lpi_state->flags = 0;
969
970 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
971 lpi_state->arch_flags = 0;
972
973 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
974 lpi_state->res_cnt_freq = 1;
975
976 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
977 lpi_state->enable_parent_state = 0;
978 }
979
980 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
981end:
982 kfree(buffer.pointer);
983 return ret;
984}
985
986/*
987 * flat_state_cnt - the number of composite LPI states after the process of flattening
988 */
989static int flat_state_cnt;
990
991/**
992 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
993 *
994 * @local: local LPI state
995 * @parent: parent LPI state
996 * @result: composite LPI state
997 */
998static bool combine_lpi_states(struct acpi_lpi_state *local,
999 struct acpi_lpi_state *parent,
1000 struct acpi_lpi_state *result)
1001{
1002 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1003 if (!parent->address) /* 0 means autopromotable */
1004 return false;
1005 result->address = local->address + parent->address;
1006 } else {
1007 result->address = parent->address;
1008 }
1009
1010 result->min_residency = max(local->min_residency, parent->min_residency);
1011 result->wake_latency = local->wake_latency + parent->wake_latency;
1012 result->enable_parent_state = parent->enable_parent_state;
1013 result->entry_method = local->entry_method;
1014
1015 result->flags = parent->flags;
1016 result->arch_flags = parent->arch_flags;
1017 result->index = parent->index;
1018
1019 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1020 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1021 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1022 return true;
1023}
1024
1025#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
1026
1027static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1028 struct acpi_lpi_state *t)
1029{
1030 curr_level->composite_states[curr_level->composite_states_size++] = t;
1031}
1032
1033static int flatten_lpi_states(struct acpi_processor *pr,
1034 struct acpi_lpi_states_array *curr_level,
1035 struct acpi_lpi_states_array *prev_level)
1036{
1037 int i, j, state_count = curr_level->size;
1038 struct acpi_lpi_state *p, *t = curr_level->entries;
1039
1040 curr_level->composite_states_size = 0;
1041 for (j = 0; j < state_count; j++, t++) {
1042 struct acpi_lpi_state *flpi;
1043
1044 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1045 continue;
1046
1047 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1048 pr_warn("Limiting number of LPI states to max (%d)\n",
1049 ACPI_PROCESSOR_MAX_POWER);
1050 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1051 break;
1052 }
1053
1054 flpi = &pr->power.lpi_states[flat_state_cnt];
1055
1056 if (!prev_level) { /* leaf/processor node */
1057 memcpy(flpi, t, sizeof(*t));
1058 stash_composite_state(curr_level, flpi);
1059 flat_state_cnt++;
1060 continue;
1061 }
1062
1063 for (i = 0; i < prev_level->composite_states_size; i++) {
1064 p = prev_level->composite_states[i];
1065 if (t->index <= p->enable_parent_state &&
1066 combine_lpi_states(p, t, flpi)) {
1067 stash_composite_state(curr_level, flpi);
1068 flat_state_cnt++;
1069 flpi++;
1070 }
1071 }
1072 }
1073
1074 kfree(curr_level->entries);
1075 return 0;
1076}
1077
1078static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1079{
1080 int ret, i;
1081 acpi_status status;
1082 acpi_handle handle = pr->handle, pr_ahandle;
1083 struct acpi_device *d = NULL;
1084 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1085
1086 if (!osc_pc_lpi_support_confirmed)
1087 return -EOPNOTSUPP;
1088
1089 if (!acpi_has_method(handle, "_LPI"))
1090 return -EINVAL;
1091
1092 flat_state_cnt = 0;
1093 prev = &info[0];
1094 curr = &info[1];
1095 handle = pr->handle;
1096 ret = acpi_processor_evaluate_lpi(handle, prev);
1097 if (ret)
1098 return ret;
1099 flatten_lpi_states(pr, prev, NULL);
1100
1101 status = acpi_get_parent(handle, &pr_ahandle);
1102 while (ACPI_SUCCESS(status)) {
1103 acpi_bus_get_device(pr_ahandle, &d);
1104 handle = pr_ahandle;
1105
1106 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1107 break;
1108
1109 /* can be optional ? */
1110 if (!acpi_has_method(handle, "_LPI"))
1111 break;
1112
1113 ret = acpi_processor_evaluate_lpi(handle, curr);
1114 if (ret)
1115 break;
1116
1117 /* flatten all the LPI states in this level of hierarchy */
1118 flatten_lpi_states(pr, curr, prev);
1119
1120 tmp = prev, prev = curr, curr = tmp;
1121
1122 status = acpi_get_parent(handle, &pr_ahandle);
1123 }
1124
1125 pr->power.count = flat_state_cnt;
1126 /* reset the index after flattening */
1127 for (i = 0; i < pr->power.count; i++)
1128 pr->power.lpi_states[i].index = i;
1129
1130 /* Tell driver that _LPI is supported. */
1131 pr->flags.has_lpi = 1;
1132 pr->flags.power = 1;
1133
1134 return 0;
1135}
1136
1137int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1138{
1139 return -ENODEV;
1140}
1141
1142int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1143{
1144 return -ENODEV;
1145}
1146
1147/**
1148 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1149 * @dev: the target CPU
1150 * @drv: cpuidle driver containing cpuidle state info
1151 * @index: index of target state
1152 *
1153 * Return: 0 for success or negative value for error
1154 */
1155static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1156 struct cpuidle_driver *drv, int index)
1157{
1158 struct acpi_processor *pr;
1159 struct acpi_lpi_state *lpi;
1160
1161 pr = __this_cpu_read(processors);
1162
1163 if (unlikely(!pr))
1164 return -EINVAL;
1165
1166 lpi = &pr->power.lpi_states[index];
1167 if (lpi->entry_method == ACPI_CSTATE_FFH)
1168 return acpi_processor_ffh_lpi_enter(lpi);
1169
1170 return -EINVAL;
1171}
1172
1173static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1174{
1175 int i;
1176 struct acpi_lpi_state *lpi;
1177 struct cpuidle_state *state;
1178 struct cpuidle_driver *drv = &acpi_idle_driver;
1179
1180 if (!pr->flags.has_lpi)
1181 return -EOPNOTSUPP;
1182
1183 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1184 lpi = &pr->power.lpi_states[i];
1185
1186 state = &drv->states[i];
1187 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1188 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1189 state->exit_latency = lpi->wake_latency;
1190 state->target_residency = lpi->min_residency;
1191 if (lpi->arch_flags)
1192 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1193 state->enter = acpi_idle_lpi_enter;
1194 drv->safe_state_index = i;
1195 }
1196
1197 drv->state_count = i;
1198
1199 return 0;
1200}
1201
1202/**
1203 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1204 * global state data i.e. idle routines
1205 *
1206 * @pr: the ACPI processor
1207 */
1208static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1209{
1210 int i;
1211 struct cpuidle_driver *drv = &acpi_idle_driver;
1212
1213 if (!pr->flags.power_setup_done || !pr->flags.power)
1214 return -EINVAL;
1215
1216 drv->safe_state_index = -1;
1217 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1218 drv->states[i].name[0] = '\0';
1219 drv->states[i].desc[0] = '\0';
1220 }
1221
1222 if (pr->flags.has_lpi)
1223 return acpi_processor_setup_lpi_states(pr);
1224
1225 return acpi_processor_setup_cstates(pr);
1226}
1227
1228/**
1229 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1230 * device i.e. per-cpu data
1231 *
1232 * @pr: the ACPI processor
1233 * @dev : the cpuidle device
1234 */
1235static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1236 struct cpuidle_device *dev)
1237{
1238 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1239 return -EINVAL;
1240
1241 dev->cpu = pr->id;
1242 if (pr->flags.has_lpi)
1243 return acpi_processor_ffh_lpi_probe(pr->id);
1244
1245 return acpi_processor_setup_cpuidle_cx(pr, dev);
1246}
1247
1248static int acpi_processor_get_power_info(struct acpi_processor *pr)
1249{
1250 int ret;
1251
1252 ret = acpi_processor_get_lpi_info(pr);
1253 if (ret)
1254 ret = acpi_processor_get_cstate_info(pr);
1255
1256 return ret;
1257}
1258
1259int acpi_processor_hotplug(struct acpi_processor *pr)
1260{
1261 int ret = 0;
1262 struct cpuidle_device *dev;
1263
1264 if (disabled_by_idle_boot_param())
1265 return 0;
1266
1267 if (!pr->flags.power_setup_done)
1268 return -ENODEV;
1269
1270 dev = per_cpu(acpi_cpuidle_device, pr->id);
1271 cpuidle_pause_and_lock();
1272 cpuidle_disable_device(dev);
1273 ret = acpi_processor_get_power_info(pr);
1274 if (!ret && pr->flags.power) {
1275 acpi_processor_setup_cpuidle_dev(pr, dev);
1276 ret = cpuidle_enable_device(dev);
1277 }
1278 cpuidle_resume_and_unlock();
1279
1280 return ret;
1281}
1282
1283int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1284{
1285 int cpu;
1286 struct acpi_processor *_pr;
1287 struct cpuidle_device *dev;
1288
1289 if (disabled_by_idle_boot_param())
1290 return 0;
1291
1292 if (!pr->flags.power_setup_done)
1293 return -ENODEV;
1294
1295 /*
1296 * FIXME: Design the ACPI notification to make it once per
1297 * system instead of once per-cpu. This condition is a hack
1298 * to make the code that updates C-States be called once.
1299 */
1300
1301 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1302
1303 /* Protect against cpu-hotplug */
1304 get_online_cpus();
1305 cpuidle_pause_and_lock();
1306
1307 /* Disable all cpuidle devices */
1308 for_each_online_cpu(cpu) {
1309 _pr = per_cpu(processors, cpu);
1310 if (!_pr || !_pr->flags.power_setup_done)
1311 continue;
1312 dev = per_cpu(acpi_cpuidle_device, cpu);
1313 cpuidle_disable_device(dev);
1314 }
1315
1316 /* Populate Updated C-state information */
1317 acpi_processor_get_power_info(pr);
1318 acpi_processor_setup_cpuidle_states(pr);
1319
1320 /* Enable all cpuidle devices */
1321 for_each_online_cpu(cpu) {
1322 _pr = per_cpu(processors, cpu);
1323 if (!_pr || !_pr->flags.power_setup_done)
1324 continue;
1325 acpi_processor_get_power_info(_pr);
1326 if (_pr->flags.power) {
1327 dev = per_cpu(acpi_cpuidle_device, cpu);
1328 acpi_processor_setup_cpuidle_dev(_pr, dev);
1329 cpuidle_enable_device(dev);
1330 }
1331 }
1332 cpuidle_resume_and_unlock();
1333 put_online_cpus();
1334 }
1335
1336 return 0;
1337}
1338
1339static int acpi_processor_registered;
1340
1341int acpi_processor_power_init(struct acpi_processor *pr)
1342{
1343 int retval;
1344 struct cpuidle_device *dev;
1345
1346 if (disabled_by_idle_boot_param())
1347 return 0;
1348
1349 acpi_processor_cstate_first_run_checks();
1350
1351 if (!acpi_processor_get_power_info(pr))
1352 pr->flags.power_setup_done = 1;
1353
1354 /*
1355 * Install the idle handler if processor power management is supported.
1356 * Note that we use previously set idle handler will be used on
1357 * platforms that only support C1.
1358 */
1359 if (pr->flags.power) {
1360 /* Register acpi_idle_driver if not already registered */
1361 if (!acpi_processor_registered) {
1362 acpi_processor_setup_cpuidle_states(pr);
1363 retval = cpuidle_register_driver(&acpi_idle_driver);
1364 if (retval)
1365 return retval;
1366 pr_debug("%s registered with cpuidle\n",
1367 acpi_idle_driver.name);
1368 }
1369
1370 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1371 if (!dev)
1372 return -ENOMEM;
1373 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1374
1375 acpi_processor_setup_cpuidle_dev(pr, dev);
1376
1377 /* Register per-cpu cpuidle_device. Cpuidle driver
1378 * must already be registered before registering device
1379 */
1380 retval = cpuidle_register_device(dev);
1381 if (retval) {
1382 if (acpi_processor_registered == 0)
1383 cpuidle_unregister_driver(&acpi_idle_driver);
1384 return retval;
1385 }
1386 acpi_processor_registered++;
1387 }
1388 return 0;
1389}
1390
1391int acpi_processor_power_exit(struct acpi_processor *pr)
1392{
1393 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1394
1395 if (disabled_by_idle_boot_param())
1396 return 0;
1397
1398 if (pr->flags.power) {
1399 cpuidle_unregister_device(dev);
1400 acpi_processor_registered--;
1401 if (acpi_processor_registered == 0)
1402 cpuidle_unregister_driver(&acpi_idle_driver);
1403 }
1404
1405 pr->flags.power_setup_done = 0;
1406 return 0;
1407}