Loading...
1/*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11#include <linux/clockchips.h>
12#include <linux/kernel.h>
13#include <linux/mutex.h>
14#include <linux/sched.h>
15#include <linux/notifier.h>
16#include <linux/pm_qos.h>
17#include <linux/cpu.h>
18#include <linux/cpuidle.h>
19#include <linux/ktime.h>
20#include <linux/hrtimer.h>
21#include <linux/module.h>
22#include <linux/suspend.h>
23#include <linux/tick.h>
24#include <trace/events/power.h>
25
26#include "cpuidle.h"
27
28DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
29DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
30
31DEFINE_MUTEX(cpuidle_lock);
32LIST_HEAD(cpuidle_detected_devices);
33
34static int enabled_devices;
35static int off __read_mostly;
36static int initialized __read_mostly;
37
38int cpuidle_disabled(void)
39{
40 return off;
41}
42void disable_cpuidle(void)
43{
44 off = 1;
45}
46
47bool cpuidle_not_available(struct cpuidle_driver *drv,
48 struct cpuidle_device *dev)
49{
50 return off || !initialized || !drv || !dev || !dev->enabled;
51}
52
53/**
54 * cpuidle_play_dead - cpu off-lining
55 *
56 * Returns in case of an error or no driver
57 */
58int cpuidle_play_dead(void)
59{
60 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
61 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
62 int i;
63
64 if (!drv)
65 return -ENODEV;
66
67 /* Find lowest-power state that supports long-term idle */
68 for (i = drv->state_count - 1; i >= 0; i--)
69 if (drv->states[i].enter_dead)
70 return drv->states[i].enter_dead(dev, i);
71
72 return -ENODEV;
73}
74
75static int find_deepest_state(struct cpuidle_driver *drv,
76 struct cpuidle_device *dev,
77 unsigned int max_latency,
78 unsigned int forbidden_flags,
79 bool freeze)
80{
81 unsigned int latency_req = 0;
82 int i, ret = 0;
83
84 for (i = 1; i < drv->state_count; i++) {
85 struct cpuidle_state *s = &drv->states[i];
86 struct cpuidle_state_usage *su = &dev->states_usage[i];
87
88 if (s->disabled || su->disable || s->exit_latency <= latency_req
89 || s->exit_latency > max_latency
90 || (s->flags & forbidden_flags)
91 || (freeze && !s->enter_freeze))
92 continue;
93
94 latency_req = s->exit_latency;
95 ret = i;
96 }
97 return ret;
98}
99
100/**
101 * cpuidle_use_deepest_state - Set/clear governor override flag.
102 * @enable: New value of the flag.
103 *
104 * Set/unset the current CPU to use the deepest idle state (override governors
105 * going forward if set).
106 */
107void cpuidle_use_deepest_state(bool enable)
108{
109 struct cpuidle_device *dev;
110
111 preempt_disable();
112 dev = cpuidle_get_device();
113 dev->use_deepest_state = enable;
114 preempt_enable();
115}
116
117/**
118 * cpuidle_find_deepest_state - Find the deepest available idle state.
119 * @drv: cpuidle driver for the given CPU.
120 * @dev: cpuidle device for the given CPU.
121 */
122int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
123 struct cpuidle_device *dev)
124{
125 return find_deepest_state(drv, dev, UINT_MAX, 0, false);
126}
127
128#ifdef CONFIG_SUSPEND
129static void enter_freeze_proper(struct cpuidle_driver *drv,
130 struct cpuidle_device *dev, int index)
131{
132 /*
133 * trace_suspend_resume() called by tick_freeze() for the last CPU
134 * executing it contains RCU usage regarded as invalid in the idle
135 * context, so tell RCU about that.
136 */
137 RCU_NONIDLE(tick_freeze());
138 /*
139 * The state used here cannot be a "coupled" one, because the "coupled"
140 * cpuidle mechanism enables interrupts and doing that with timekeeping
141 * suspended is generally unsafe.
142 */
143 stop_critical_timings();
144 drv->states[index].enter_freeze(dev, drv, index);
145 WARN_ON(!irqs_disabled());
146 /*
147 * timekeeping_resume() that will be called by tick_unfreeze() for the
148 * first CPU executing it calls functions containing RCU read-side
149 * critical sections, so tell RCU about that.
150 */
151 RCU_NONIDLE(tick_unfreeze());
152 start_critical_timings();
153}
154
155/**
156 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
157 * @drv: cpuidle driver for the given CPU.
158 * @dev: cpuidle device for the given CPU.
159 *
160 * If there are states with the ->enter_freeze callback, find the deepest of
161 * them and enter it with frozen tick.
162 */
163int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
164{
165 int index;
166
167 /*
168 * Find the deepest state with ->enter_freeze present, which guarantees
169 * that interrupts won't be enabled when it exits and allows the tick to
170 * be frozen safely.
171 */
172 index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
173 if (index > 0)
174 enter_freeze_proper(drv, dev, index);
175
176 return index;
177}
178#endif /* CONFIG_SUSPEND */
179
180/**
181 * cpuidle_enter_state - enter the state and update stats
182 * @dev: cpuidle device for this cpu
183 * @drv: cpuidle driver for this cpu
184 * @index: index into the states table in @drv of the state to enter
185 */
186int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
187 int index)
188{
189 int entered_state;
190
191 struct cpuidle_state *target_state = &drv->states[index];
192 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
193 ktime_t time_start, time_end;
194 s64 diff;
195
196 /*
197 * Tell the time framework to switch to a broadcast timer because our
198 * local timer will be shut down. If a local timer is used from another
199 * CPU as a broadcast timer, this call may fail if it is not available.
200 */
201 if (broadcast && tick_broadcast_enter()) {
202 index = find_deepest_state(drv, dev, target_state->exit_latency,
203 CPUIDLE_FLAG_TIMER_STOP, false);
204 if (index < 0) {
205 default_idle_call();
206 return -EBUSY;
207 }
208 target_state = &drv->states[index];
209 }
210
211 /* Take note of the planned idle state. */
212 sched_idle_set_state(target_state);
213
214 trace_cpu_idle_rcuidle(index, dev->cpu);
215 time_start = ns_to_ktime(local_clock());
216
217 stop_critical_timings();
218 entered_state = target_state->enter(dev, drv, index);
219 start_critical_timings();
220
221 time_end = ns_to_ktime(local_clock());
222 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
223
224 /* The cpu is no longer idle or about to enter idle. */
225 sched_idle_set_state(NULL);
226
227 if (broadcast) {
228 if (WARN_ON_ONCE(!irqs_disabled()))
229 local_irq_disable();
230
231 tick_broadcast_exit();
232 }
233
234 if (!cpuidle_state_is_coupled(drv, index))
235 local_irq_enable();
236
237 diff = ktime_us_delta(time_end, time_start);
238 if (diff > INT_MAX)
239 diff = INT_MAX;
240
241 dev->last_residency = (int) diff;
242
243 if (entered_state >= 0) {
244 /* Update cpuidle counters */
245 /* This can be moved to within driver enter routine
246 * but that results in multiple copies of same code.
247 */
248 dev->states_usage[entered_state].time += dev->last_residency;
249 dev->states_usage[entered_state].usage++;
250 } else {
251 dev->last_residency = 0;
252 }
253
254 return entered_state;
255}
256
257/**
258 * cpuidle_select - ask the cpuidle framework to choose an idle state
259 *
260 * @drv: the cpuidle driver
261 * @dev: the cpuidle device
262 *
263 * Returns the index of the idle state. The return value must not be negative.
264 */
265int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
266{
267 return cpuidle_curr_governor->select(drv, dev);
268}
269
270/**
271 * cpuidle_enter - enter into the specified idle state
272 *
273 * @drv: the cpuidle driver tied with the cpu
274 * @dev: the cpuidle device
275 * @index: the index in the idle state table
276 *
277 * Returns the index in the idle state, < 0 in case of error.
278 * The error code depends on the backend driver
279 */
280int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
281 int index)
282{
283 if (cpuidle_state_is_coupled(drv, index))
284 return cpuidle_enter_state_coupled(dev, drv, index);
285 return cpuidle_enter_state(dev, drv, index);
286}
287
288/**
289 * cpuidle_reflect - tell the underlying governor what was the state
290 * we were in
291 *
292 * @dev : the cpuidle device
293 * @index: the index in the idle state table
294 *
295 */
296void cpuidle_reflect(struct cpuidle_device *dev, int index)
297{
298 if (cpuidle_curr_governor->reflect && index >= 0)
299 cpuidle_curr_governor->reflect(dev, index);
300}
301
302/**
303 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
304 */
305void cpuidle_install_idle_handler(void)
306{
307 if (enabled_devices) {
308 /* Make sure all changes finished before we switch to new idle */
309 smp_wmb();
310 initialized = 1;
311 }
312}
313
314/**
315 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
316 */
317void cpuidle_uninstall_idle_handler(void)
318{
319 if (enabled_devices) {
320 initialized = 0;
321 wake_up_all_idle_cpus();
322 }
323
324 /*
325 * Make sure external observers (such as the scheduler)
326 * are done looking at pointed idle states.
327 */
328 synchronize_rcu();
329}
330
331/**
332 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
333 */
334void cpuidle_pause_and_lock(void)
335{
336 mutex_lock(&cpuidle_lock);
337 cpuidle_uninstall_idle_handler();
338}
339
340EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
341
342/**
343 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
344 */
345void cpuidle_resume_and_unlock(void)
346{
347 cpuidle_install_idle_handler();
348 mutex_unlock(&cpuidle_lock);
349}
350
351EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
352
353/* Currently used in suspend/resume path to suspend cpuidle */
354void cpuidle_pause(void)
355{
356 mutex_lock(&cpuidle_lock);
357 cpuidle_uninstall_idle_handler();
358 mutex_unlock(&cpuidle_lock);
359}
360
361/* Currently used in suspend/resume path to resume cpuidle */
362void cpuidle_resume(void)
363{
364 mutex_lock(&cpuidle_lock);
365 cpuidle_install_idle_handler();
366 mutex_unlock(&cpuidle_lock);
367}
368
369/**
370 * cpuidle_enable_device - enables idle PM for a CPU
371 * @dev: the CPU
372 *
373 * This function must be called between cpuidle_pause_and_lock and
374 * cpuidle_resume_and_unlock when used externally.
375 */
376int cpuidle_enable_device(struct cpuidle_device *dev)
377{
378 int ret;
379 struct cpuidle_driver *drv;
380
381 if (!dev)
382 return -EINVAL;
383
384 if (dev->enabled)
385 return 0;
386
387 drv = cpuidle_get_cpu_driver(dev);
388
389 if (!drv || !cpuidle_curr_governor)
390 return -EIO;
391
392 if (!dev->registered)
393 return -EINVAL;
394
395 ret = cpuidle_add_device_sysfs(dev);
396 if (ret)
397 return ret;
398
399 if (cpuidle_curr_governor->enable &&
400 (ret = cpuidle_curr_governor->enable(drv, dev)))
401 goto fail_sysfs;
402
403 smp_wmb();
404
405 dev->enabled = 1;
406
407 enabled_devices++;
408 return 0;
409
410fail_sysfs:
411 cpuidle_remove_device_sysfs(dev);
412
413 return ret;
414}
415
416EXPORT_SYMBOL_GPL(cpuidle_enable_device);
417
418/**
419 * cpuidle_disable_device - disables idle PM for a CPU
420 * @dev: the CPU
421 *
422 * This function must be called between cpuidle_pause_and_lock and
423 * cpuidle_resume_and_unlock when used externally.
424 */
425void cpuidle_disable_device(struct cpuidle_device *dev)
426{
427 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
428
429 if (!dev || !dev->enabled)
430 return;
431
432 if (!drv || !cpuidle_curr_governor)
433 return;
434
435 dev->enabled = 0;
436
437 if (cpuidle_curr_governor->disable)
438 cpuidle_curr_governor->disable(drv, dev);
439
440 cpuidle_remove_device_sysfs(dev);
441 enabled_devices--;
442}
443
444EXPORT_SYMBOL_GPL(cpuidle_disable_device);
445
446static void __cpuidle_unregister_device(struct cpuidle_device *dev)
447{
448 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
449
450 list_del(&dev->device_list);
451 per_cpu(cpuidle_devices, dev->cpu) = NULL;
452 module_put(drv->owner);
453
454 dev->registered = 0;
455}
456
457static void __cpuidle_device_init(struct cpuidle_device *dev)
458{
459 memset(dev->states_usage, 0, sizeof(dev->states_usage));
460 dev->last_residency = 0;
461}
462
463/**
464 * __cpuidle_register_device - internal register function called before register
465 * and enable routines
466 * @dev: the cpu
467 *
468 * cpuidle_lock mutex must be held before this is called
469 */
470static int __cpuidle_register_device(struct cpuidle_device *dev)
471{
472 int ret;
473 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
474
475 if (!try_module_get(drv->owner))
476 return -EINVAL;
477
478 per_cpu(cpuidle_devices, dev->cpu) = dev;
479 list_add(&dev->device_list, &cpuidle_detected_devices);
480
481 ret = cpuidle_coupled_register_device(dev);
482 if (ret)
483 __cpuidle_unregister_device(dev);
484 else
485 dev->registered = 1;
486
487 return ret;
488}
489
490/**
491 * cpuidle_register_device - registers a CPU's idle PM feature
492 * @dev: the cpu
493 */
494int cpuidle_register_device(struct cpuidle_device *dev)
495{
496 int ret = -EBUSY;
497
498 if (!dev)
499 return -EINVAL;
500
501 mutex_lock(&cpuidle_lock);
502
503 if (dev->registered)
504 goto out_unlock;
505
506 __cpuidle_device_init(dev);
507
508 ret = __cpuidle_register_device(dev);
509 if (ret)
510 goto out_unlock;
511
512 ret = cpuidle_add_sysfs(dev);
513 if (ret)
514 goto out_unregister;
515
516 ret = cpuidle_enable_device(dev);
517 if (ret)
518 goto out_sysfs;
519
520 cpuidle_install_idle_handler();
521
522out_unlock:
523 mutex_unlock(&cpuidle_lock);
524
525 return ret;
526
527out_sysfs:
528 cpuidle_remove_sysfs(dev);
529out_unregister:
530 __cpuidle_unregister_device(dev);
531 goto out_unlock;
532}
533
534EXPORT_SYMBOL_GPL(cpuidle_register_device);
535
536/**
537 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
538 * @dev: the cpu
539 */
540void cpuidle_unregister_device(struct cpuidle_device *dev)
541{
542 if (!dev || dev->registered == 0)
543 return;
544
545 cpuidle_pause_and_lock();
546
547 cpuidle_disable_device(dev);
548
549 cpuidle_remove_sysfs(dev);
550
551 __cpuidle_unregister_device(dev);
552
553 cpuidle_coupled_unregister_device(dev);
554
555 cpuidle_resume_and_unlock();
556}
557
558EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
559
560/**
561 * cpuidle_unregister: unregister a driver and the devices. This function
562 * can be used only if the driver has been previously registered through
563 * the cpuidle_register function.
564 *
565 * @drv: a valid pointer to a struct cpuidle_driver
566 */
567void cpuidle_unregister(struct cpuidle_driver *drv)
568{
569 int cpu;
570 struct cpuidle_device *device;
571
572 for_each_cpu(cpu, drv->cpumask) {
573 device = &per_cpu(cpuidle_dev, cpu);
574 cpuidle_unregister_device(device);
575 }
576
577 cpuidle_unregister_driver(drv);
578}
579EXPORT_SYMBOL_GPL(cpuidle_unregister);
580
581/**
582 * cpuidle_register: registers the driver and the cpu devices with the
583 * coupled_cpus passed as parameter. This function is used for all common
584 * initialization pattern there are in the arch specific drivers. The
585 * devices is globally defined in this file.
586 *
587 * @drv : a valid pointer to a struct cpuidle_driver
588 * @coupled_cpus: a cpumask for the coupled states
589 *
590 * Returns 0 on success, < 0 otherwise
591 */
592int cpuidle_register(struct cpuidle_driver *drv,
593 const struct cpumask *const coupled_cpus)
594{
595 int ret, cpu;
596 struct cpuidle_device *device;
597
598 ret = cpuidle_register_driver(drv);
599 if (ret) {
600 pr_err("failed to register cpuidle driver\n");
601 return ret;
602 }
603
604 for_each_cpu(cpu, drv->cpumask) {
605 device = &per_cpu(cpuidle_dev, cpu);
606 device->cpu = cpu;
607
608#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
609 /*
610 * On multiplatform for ARM, the coupled idle states could be
611 * enabled in the kernel even if the cpuidle driver does not
612 * use it. Note, coupled_cpus is a struct copy.
613 */
614 if (coupled_cpus)
615 device->coupled_cpus = *coupled_cpus;
616#endif
617 ret = cpuidle_register_device(device);
618 if (!ret)
619 continue;
620
621 pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
622
623 cpuidle_unregister(drv);
624 break;
625 }
626
627 return ret;
628}
629EXPORT_SYMBOL_GPL(cpuidle_register);
630
631#ifdef CONFIG_SMP
632
633/*
634 * This function gets called when a part of the kernel has a new latency
635 * requirement. This means we need to get all processors out of their C-state,
636 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
637 * wakes them all right up.
638 */
639static int cpuidle_latency_notify(struct notifier_block *b,
640 unsigned long l, void *v)
641{
642 wake_up_all_idle_cpus();
643 return NOTIFY_OK;
644}
645
646static struct notifier_block cpuidle_latency_notifier = {
647 .notifier_call = cpuidle_latency_notify,
648};
649
650static inline void latency_notifier_init(struct notifier_block *n)
651{
652 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
653}
654
655#else /* CONFIG_SMP */
656
657#define latency_notifier_init(x) do { } while (0)
658
659#endif /* CONFIG_SMP */
660
661/**
662 * cpuidle_init - core initializer
663 */
664static int __init cpuidle_init(void)
665{
666 int ret;
667
668 if (cpuidle_disabled())
669 return -ENODEV;
670
671 ret = cpuidle_add_interface(cpu_subsys.dev_root);
672 if (ret)
673 return ret;
674
675 latency_notifier_init(&cpuidle_latency_notifier);
676
677 return 0;
678}
679
680module_param(off, int, 0444);
681core_initcall(cpuidle_init);
1/*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11#include <linux/kernel.h>
12#include <linux/mutex.h>
13#include <linux/sched.h>
14#include <linux/notifier.h>
15#include <linux/pm_qos_params.h>
16#include <linux/cpu.h>
17#include <linux/cpuidle.h>
18#include <linux/ktime.h>
19#include <linux/hrtimer.h>
20#include <trace/events/power.h>
21
22#include "cpuidle.h"
23
24DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
25
26DEFINE_MUTEX(cpuidle_lock);
27LIST_HEAD(cpuidle_detected_devices);
28
29static int enabled_devices;
30static int off __read_mostly;
31static int initialized __read_mostly;
32
33int cpuidle_disabled(void)
34{
35 return off;
36}
37void disable_cpuidle(void)
38{
39 off = 1;
40}
41
42#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
43static void cpuidle_kick_cpus(void)
44{
45 cpu_idle_wait();
46}
47#elif defined(CONFIG_SMP)
48# error "Arch needs cpu_idle_wait() equivalent here"
49#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
50static void cpuidle_kick_cpus(void) {}
51#endif
52
53static int __cpuidle_register_device(struct cpuidle_device *dev);
54
55/**
56 * cpuidle_idle_call - the main idle loop
57 *
58 * NOTE: no locks or semaphores should be used here
59 * return non-zero on failure
60 */
61int cpuidle_idle_call(void)
62{
63 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
64 struct cpuidle_state *target_state;
65 int next_state;
66
67 if (off)
68 return -ENODEV;
69
70 if (!initialized)
71 return -ENODEV;
72
73 /* check if the device is ready */
74 if (!dev || !dev->enabled)
75 return -EBUSY;
76
77#if 0
78 /* shows regressions, re-enable for 2.6.29 */
79 /*
80 * run any timers that can be run now, at this point
81 * before calculating the idle duration etc.
82 */
83 hrtimer_peek_ahead_timers();
84#endif
85
86 /*
87 * Call the device's prepare function before calling the
88 * governor's select function. ->prepare gives the device's
89 * cpuidle driver a chance to update any dynamic information
90 * of its cpuidle states for the current idle period, e.g.
91 * state availability, latencies, residencies, etc.
92 */
93 if (dev->prepare)
94 dev->prepare(dev);
95
96 /* ask the governor for the next state */
97 next_state = cpuidle_curr_governor->select(dev);
98 if (need_resched()) {
99 local_irq_enable();
100 return 0;
101 }
102
103 target_state = &dev->states[next_state];
104
105 /* enter the state and update stats */
106 dev->last_state = target_state;
107
108 trace_power_start(POWER_CSTATE, next_state, dev->cpu);
109 trace_cpu_idle(next_state, dev->cpu);
110
111 dev->last_residency = target_state->enter(dev, target_state);
112
113 trace_power_end(dev->cpu);
114 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
115
116 if (dev->last_state)
117 target_state = dev->last_state;
118
119 target_state->time += (unsigned long long)dev->last_residency;
120 target_state->usage++;
121
122 /* give the governor an opportunity to reflect on the outcome */
123 if (cpuidle_curr_governor->reflect)
124 cpuidle_curr_governor->reflect(dev);
125
126 return 0;
127}
128
129/**
130 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
131 */
132void cpuidle_install_idle_handler(void)
133{
134 if (enabled_devices) {
135 /* Make sure all changes finished before we switch to new idle */
136 smp_wmb();
137 initialized = 1;
138 }
139}
140
141/**
142 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
143 */
144void cpuidle_uninstall_idle_handler(void)
145{
146 if (enabled_devices) {
147 initialized = 0;
148 cpuidle_kick_cpus();
149 }
150}
151
152/**
153 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
154 */
155void cpuidle_pause_and_lock(void)
156{
157 mutex_lock(&cpuidle_lock);
158 cpuidle_uninstall_idle_handler();
159}
160
161EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
162
163/**
164 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
165 */
166void cpuidle_resume_and_unlock(void)
167{
168 cpuidle_install_idle_handler();
169 mutex_unlock(&cpuidle_lock);
170}
171
172EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
173
174#ifdef CONFIG_ARCH_HAS_CPU_RELAX
175static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
176{
177 ktime_t t1, t2;
178 s64 diff;
179 int ret;
180
181 t1 = ktime_get();
182 local_irq_enable();
183 while (!need_resched())
184 cpu_relax();
185
186 t2 = ktime_get();
187 diff = ktime_to_us(ktime_sub(t2, t1));
188 if (diff > INT_MAX)
189 diff = INT_MAX;
190
191 ret = (int) diff;
192 return ret;
193}
194
195static void poll_idle_init(struct cpuidle_device *dev)
196{
197 struct cpuidle_state *state = &dev->states[0];
198
199 cpuidle_set_statedata(state, NULL);
200
201 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
202 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
203 state->exit_latency = 0;
204 state->target_residency = 0;
205 state->power_usage = -1;
206 state->flags = 0;
207 state->enter = poll_idle;
208}
209#else
210static void poll_idle_init(struct cpuidle_device *dev) {}
211#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
212
213/**
214 * cpuidle_enable_device - enables idle PM for a CPU
215 * @dev: the CPU
216 *
217 * This function must be called between cpuidle_pause_and_lock and
218 * cpuidle_resume_and_unlock when used externally.
219 */
220int cpuidle_enable_device(struct cpuidle_device *dev)
221{
222 int ret, i;
223
224 if (dev->enabled)
225 return 0;
226 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
227 return -EIO;
228 if (!dev->state_count)
229 return -EINVAL;
230
231 if (dev->registered == 0) {
232 ret = __cpuidle_register_device(dev);
233 if (ret)
234 return ret;
235 }
236
237 poll_idle_init(dev);
238
239 if ((ret = cpuidle_add_state_sysfs(dev)))
240 return ret;
241
242 if (cpuidle_curr_governor->enable &&
243 (ret = cpuidle_curr_governor->enable(dev)))
244 goto fail_sysfs;
245
246 for (i = 0; i < dev->state_count; i++) {
247 dev->states[i].usage = 0;
248 dev->states[i].time = 0;
249 }
250 dev->last_residency = 0;
251 dev->last_state = NULL;
252
253 smp_wmb();
254
255 dev->enabled = 1;
256
257 enabled_devices++;
258 return 0;
259
260fail_sysfs:
261 cpuidle_remove_state_sysfs(dev);
262
263 return ret;
264}
265
266EXPORT_SYMBOL_GPL(cpuidle_enable_device);
267
268/**
269 * cpuidle_disable_device - disables idle PM for a CPU
270 * @dev: the CPU
271 *
272 * This function must be called between cpuidle_pause_and_lock and
273 * cpuidle_resume_and_unlock when used externally.
274 */
275void cpuidle_disable_device(struct cpuidle_device *dev)
276{
277 if (!dev->enabled)
278 return;
279 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
280 return;
281
282 dev->enabled = 0;
283
284 if (cpuidle_curr_governor->disable)
285 cpuidle_curr_governor->disable(dev);
286
287 cpuidle_remove_state_sysfs(dev);
288 enabled_devices--;
289}
290
291EXPORT_SYMBOL_GPL(cpuidle_disable_device);
292
293/**
294 * __cpuidle_register_device - internal register function called before register
295 * and enable routines
296 * @dev: the cpu
297 *
298 * cpuidle_lock mutex must be held before this is called
299 */
300static int __cpuidle_register_device(struct cpuidle_device *dev)
301{
302 int ret;
303 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
304 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
305
306 if (!sys_dev)
307 return -EINVAL;
308 if (!try_module_get(cpuidle_driver->owner))
309 return -EINVAL;
310
311 init_completion(&dev->kobj_unregister);
312
313 /*
314 * cpuidle driver should set the dev->power_specified bit
315 * before registering the device if the driver provides
316 * power_usage numbers.
317 *
318 * For those devices whose ->power_specified is not set,
319 * we fill in power_usage with decreasing values as the
320 * cpuidle code has an implicit assumption that state Cn
321 * uses less power than C(n-1).
322 *
323 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
324 * an power value of -1. So we use -2, -3, etc, for other
325 * c-states.
326 */
327 if (!dev->power_specified) {
328 int i;
329 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
330 dev->states[i].power_usage = -1 - i;
331 }
332
333 per_cpu(cpuidle_devices, dev->cpu) = dev;
334 list_add(&dev->device_list, &cpuidle_detected_devices);
335 if ((ret = cpuidle_add_sysfs(sys_dev))) {
336 module_put(cpuidle_driver->owner);
337 return ret;
338 }
339
340 dev->registered = 1;
341 return 0;
342}
343
344/**
345 * cpuidle_register_device - registers a CPU's idle PM feature
346 * @dev: the cpu
347 */
348int cpuidle_register_device(struct cpuidle_device *dev)
349{
350 int ret;
351
352 mutex_lock(&cpuidle_lock);
353
354 if ((ret = __cpuidle_register_device(dev))) {
355 mutex_unlock(&cpuidle_lock);
356 return ret;
357 }
358
359 cpuidle_enable_device(dev);
360 cpuidle_install_idle_handler();
361
362 mutex_unlock(&cpuidle_lock);
363
364 return 0;
365
366}
367
368EXPORT_SYMBOL_GPL(cpuidle_register_device);
369
370/**
371 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
372 * @dev: the cpu
373 */
374void cpuidle_unregister_device(struct cpuidle_device *dev)
375{
376 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
377 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
378
379 if (dev->registered == 0)
380 return;
381
382 cpuidle_pause_and_lock();
383
384 cpuidle_disable_device(dev);
385
386 cpuidle_remove_sysfs(sys_dev);
387 list_del(&dev->device_list);
388 wait_for_completion(&dev->kobj_unregister);
389 per_cpu(cpuidle_devices, dev->cpu) = NULL;
390
391 cpuidle_resume_and_unlock();
392
393 module_put(cpuidle_driver->owner);
394}
395
396EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
397
398#ifdef CONFIG_SMP
399
400static void smp_callback(void *v)
401{
402 /* we already woke the CPU up, nothing more to do */
403}
404
405/*
406 * This function gets called when a part of the kernel has a new latency
407 * requirement. This means we need to get all processors out of their C-state,
408 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
409 * wakes them all right up.
410 */
411static int cpuidle_latency_notify(struct notifier_block *b,
412 unsigned long l, void *v)
413{
414 smp_call_function(smp_callback, NULL, 1);
415 return NOTIFY_OK;
416}
417
418static struct notifier_block cpuidle_latency_notifier = {
419 .notifier_call = cpuidle_latency_notify,
420};
421
422static inline void latency_notifier_init(struct notifier_block *n)
423{
424 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
425}
426
427#else /* CONFIG_SMP */
428
429#define latency_notifier_init(x) do { } while (0)
430
431#endif /* CONFIG_SMP */
432
433/**
434 * cpuidle_init - core initializer
435 */
436static int __init cpuidle_init(void)
437{
438 int ret;
439
440 if (cpuidle_disabled())
441 return -ENODEV;
442
443 ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
444 if (ret)
445 return ret;
446
447 latency_notifier_init(&cpuidle_latency_notifier);
448
449 return 0;
450}
451
452module_param(off, int, 0444);
453core_initcall(cpuidle_init);