Loading...
1/*
2 * Generic entry point for the idle threads
3 */
4#include <linux/sched.h>
5#include <linux/cpu.h>
6#include <linux/cpuidle.h>
7#include <linux/cpuhotplug.h>
8#include <linux/tick.h>
9#include <linux/mm.h>
10#include <linux/stackprotector.h>
11#include <linux/suspend.h>
12
13#include <asm/tlb.h>
14
15#include <trace/events/power.h>
16
17#include "sched.h"
18
19/* Linker adds these: start and end of __cpuidle functions */
20extern char __cpuidle_text_start[], __cpuidle_text_end[];
21
22/**
23 * sched_idle_set_state - Record idle state for the current CPU.
24 * @idle_state: State to record.
25 */
26void sched_idle_set_state(struct cpuidle_state *idle_state)
27{
28 idle_set_state(this_rq(), idle_state);
29}
30
31static int __read_mostly cpu_idle_force_poll;
32
33void cpu_idle_poll_ctrl(bool enable)
34{
35 if (enable) {
36 cpu_idle_force_poll++;
37 } else {
38 cpu_idle_force_poll--;
39 WARN_ON_ONCE(cpu_idle_force_poll < 0);
40 }
41}
42
43#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
44static int __init cpu_idle_poll_setup(char *__unused)
45{
46 cpu_idle_force_poll = 1;
47 return 1;
48}
49__setup("nohlt", cpu_idle_poll_setup);
50
51static int __init cpu_idle_nopoll_setup(char *__unused)
52{
53 cpu_idle_force_poll = 0;
54 return 1;
55}
56__setup("hlt", cpu_idle_nopoll_setup);
57#endif
58
59static noinline int __cpuidle cpu_idle_poll(void)
60{
61 rcu_idle_enter();
62 trace_cpu_idle_rcuidle(0, smp_processor_id());
63 local_irq_enable();
64 stop_critical_timings();
65 while (!tif_need_resched() &&
66 (cpu_idle_force_poll || tick_check_broadcast_expired()))
67 cpu_relax();
68 start_critical_timings();
69 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
70 rcu_idle_exit();
71 return 1;
72}
73
74/* Weak implementations for optional arch specific functions */
75void __weak arch_cpu_idle_prepare(void) { }
76void __weak arch_cpu_idle_enter(void) { }
77void __weak arch_cpu_idle_exit(void) { }
78void __weak arch_cpu_idle_dead(void) { }
79void __weak arch_cpu_idle(void)
80{
81 cpu_idle_force_poll = 1;
82 local_irq_enable();
83}
84
85/**
86 * default_idle_call - Default CPU idle routine.
87 *
88 * To use when the cpuidle framework cannot be used.
89 */
90void __cpuidle default_idle_call(void)
91{
92 if (current_clr_polling_and_test()) {
93 local_irq_enable();
94 } else {
95 stop_critical_timings();
96 arch_cpu_idle();
97 start_critical_timings();
98 }
99}
100
101static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
102 int next_state)
103{
104 /*
105 * The idle task must be scheduled, it is pointless to go to idle, just
106 * update no idle residency and return.
107 */
108 if (current_clr_polling_and_test()) {
109 dev->last_residency = 0;
110 local_irq_enable();
111 return -EBUSY;
112 }
113
114 /*
115 * Enter the idle state previously returned by the governor decision.
116 * This function will block until an interrupt occurs and will take
117 * care of re-enabling the local interrupts
118 */
119 return cpuidle_enter(drv, dev, next_state);
120}
121
122/**
123 * cpuidle_idle_call - the main idle function
124 *
125 * NOTE: no locks or semaphores should be used here
126 *
127 * On archs that support TIF_POLLING_NRFLAG, is called with polling
128 * set, and it returns with polling set. If it ever stops polling, it
129 * must clear the polling bit.
130 */
131static void cpuidle_idle_call(void)
132{
133 struct cpuidle_device *dev = cpuidle_get_device();
134 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
135 int next_state, entered_state;
136
137 /*
138 * Check if the idle task must be rescheduled. If it is the
139 * case, exit the function after re-enabling the local irq.
140 */
141 if (need_resched()) {
142 local_irq_enable();
143 return;
144 }
145
146 /*
147 * Tell the RCU framework we are entering an idle section,
148 * so no more rcu read side critical sections and one more
149 * step to the grace period
150 */
151 rcu_idle_enter();
152
153 if (cpuidle_not_available(drv, dev)) {
154 default_idle_call();
155 goto exit_idle;
156 }
157
158 /*
159 * Suspend-to-idle ("freeze") is a system state in which all user space
160 * has been frozen, all I/O devices have been suspended and the only
161 * activity happens here and in iterrupts (if any). In that case bypass
162 * the cpuidle governor and go stratight for the deepest idle state
163 * available. Possibly also suspend the local tick and the entire
164 * timekeeping to prevent timer interrupts from kicking us out of idle
165 * until a proper wakeup interrupt happens.
166 */
167
168 if (idle_should_freeze() || dev->use_deepest_state) {
169 if (idle_should_freeze()) {
170 entered_state = cpuidle_enter_freeze(drv, dev);
171 if (entered_state > 0) {
172 local_irq_enable();
173 goto exit_idle;
174 }
175 }
176
177 next_state = cpuidle_find_deepest_state(drv, dev);
178 call_cpuidle(drv, dev, next_state);
179 } else {
180 /*
181 * Ask the cpuidle framework to choose a convenient idle state.
182 */
183 next_state = cpuidle_select(drv, dev);
184 entered_state = call_cpuidle(drv, dev, next_state);
185 /*
186 * Give the governor an opportunity to reflect on the outcome
187 */
188 cpuidle_reflect(dev, entered_state);
189 }
190
191exit_idle:
192 __current_set_polling();
193
194 /*
195 * It is up to the idle functions to reenable local interrupts
196 */
197 if (WARN_ON_ONCE(irqs_disabled()))
198 local_irq_enable();
199
200 rcu_idle_exit();
201}
202
203/*
204 * Generic idle loop implementation
205 *
206 * Called with polling cleared.
207 */
208static void do_idle(void)
209{
210 /*
211 * If the arch has a polling bit, we maintain an invariant:
212 *
213 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
214 * rq->idle). This means that, if rq->idle has the polling bit set,
215 * then setting need_resched is guaranteed to cause the CPU to
216 * reschedule.
217 */
218
219 __current_set_polling();
220 tick_nohz_idle_enter();
221
222 while (!need_resched()) {
223 check_pgt_cache();
224 rmb();
225
226 if (cpu_is_offline(smp_processor_id())) {
227 cpuhp_report_idle_dead();
228 arch_cpu_idle_dead();
229 }
230
231 local_irq_disable();
232 arch_cpu_idle_enter();
233
234 /*
235 * In poll mode we reenable interrupts and spin. Also if we
236 * detected in the wakeup from idle path that the tick
237 * broadcast device expired for us, we don't want to go deep
238 * idle as we know that the IPI is going to arrive right away.
239 */
240 if (cpu_idle_force_poll || tick_check_broadcast_expired())
241 cpu_idle_poll();
242 else
243 cpuidle_idle_call();
244 arch_cpu_idle_exit();
245 }
246
247 /*
248 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
249 * be set, propagate it into PREEMPT_NEED_RESCHED.
250 *
251 * This is required because for polling idle loops we will not have had
252 * an IPI to fold the state for us.
253 */
254 preempt_set_need_resched();
255 tick_nohz_idle_exit();
256 __current_clr_polling();
257
258 /*
259 * We promise to call sched_ttwu_pending() and reschedule if
260 * need_resched() is set while polling is set. That means that clearing
261 * polling needs to be visible before doing these things.
262 */
263 smp_mb__after_atomic();
264
265 sched_ttwu_pending();
266 schedule_preempt_disabled();
267}
268
269bool cpu_in_idle(unsigned long pc)
270{
271 return pc >= (unsigned long)__cpuidle_text_start &&
272 pc < (unsigned long)__cpuidle_text_end;
273}
274
275struct idle_timer {
276 struct hrtimer timer;
277 int done;
278};
279
280static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
281{
282 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
283
284 WRITE_ONCE(it->done, 1);
285 set_tsk_need_resched(current);
286
287 return HRTIMER_NORESTART;
288}
289
290void play_idle(unsigned long duration_ms)
291{
292 struct idle_timer it;
293
294 /*
295 * Only FIFO tasks can disable the tick since they don't need the forced
296 * preemption.
297 */
298 WARN_ON_ONCE(current->policy != SCHED_FIFO);
299 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
300 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
301 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
302 WARN_ON_ONCE(!duration_ms);
303
304 rcu_sleep_check();
305 preempt_disable();
306 current->flags |= PF_IDLE;
307 cpuidle_use_deepest_state(true);
308
309 it.done = 0;
310 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
311 it.timer.function = idle_inject_timer_fn;
312 hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
313
314 while (!READ_ONCE(it.done))
315 do_idle();
316
317 cpuidle_use_deepest_state(false);
318 current->flags &= ~PF_IDLE;
319
320 preempt_fold_need_resched();
321 preempt_enable();
322}
323EXPORT_SYMBOL_GPL(play_idle);
324
325void cpu_startup_entry(enum cpuhp_state state)
326{
327 /*
328 * This #ifdef needs to die, but it's too late in the cycle to
329 * make this generic (arm and sh have never invoked the canary
330 * init for the non boot cpus!). Will be fixed in 3.11
331 */
332#ifdef CONFIG_X86
333 /*
334 * If we're the non-boot CPU, nothing set the stack canary up
335 * for us. The boot CPU already has it initialized but no harm
336 * in doing it again. This is a good place for updating it, as
337 * we wont ever return from this function (so the invalid
338 * canaries already on the stack wont ever trigger).
339 */
340 boot_init_stack_canary();
341#endif
342 arch_cpu_idle_prepare();
343 cpuhp_online_idle(state);
344 while (1)
345 do_idle();
346}
1/*
2 * Generic entry point for the idle threads
3 */
4#include <linux/sched.h>
5#include <linux/cpu.h>
6#include <linux/cpuidle.h>
7#include <linux/tick.h>
8#include <linux/mm.h>
9#include <linux/stackprotector.h>
10
11#include <asm/tlb.h>
12
13#include <trace/events/power.h>
14
15static int __read_mostly cpu_idle_force_poll;
16
17void cpu_idle_poll_ctrl(bool enable)
18{
19 if (enable) {
20 cpu_idle_force_poll++;
21 } else {
22 cpu_idle_force_poll--;
23 WARN_ON_ONCE(cpu_idle_force_poll < 0);
24 }
25}
26
27#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
28static int __init cpu_idle_poll_setup(char *__unused)
29{
30 cpu_idle_force_poll = 1;
31 return 1;
32}
33__setup("nohlt", cpu_idle_poll_setup);
34
35static int __init cpu_idle_nopoll_setup(char *__unused)
36{
37 cpu_idle_force_poll = 0;
38 return 1;
39}
40__setup("hlt", cpu_idle_nopoll_setup);
41#endif
42
43static inline int cpu_idle_poll(void)
44{
45 rcu_idle_enter();
46 trace_cpu_idle_rcuidle(0, smp_processor_id());
47 local_irq_enable();
48 while (!tif_need_resched())
49 cpu_relax();
50 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
51 rcu_idle_exit();
52 return 1;
53}
54
55/* Weak implementations for optional arch specific functions */
56void __weak arch_cpu_idle_prepare(void) { }
57void __weak arch_cpu_idle_enter(void) { }
58void __weak arch_cpu_idle_exit(void) { }
59void __weak arch_cpu_idle_dead(void) { }
60void __weak arch_cpu_idle(void)
61{
62 cpu_idle_force_poll = 1;
63 local_irq_enable();
64}
65
66/**
67 * cpuidle_idle_call - the main idle function
68 *
69 * NOTE: no locks or semaphores should be used here
70 * return non-zero on failure
71 */
72static int cpuidle_idle_call(void)
73{
74 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
75 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
76 int next_state, entered_state, ret;
77 bool broadcast;
78
79 /*
80 * Check if the idle task must be rescheduled. If it is the
81 * case, exit the function after re-enabling the local irq and
82 * set again the polling flag
83 */
84 if (current_clr_polling_and_test()) {
85 local_irq_enable();
86 __current_set_polling();
87 return 0;
88 }
89
90 /*
91 * During the idle period, stop measuring the disabled irqs
92 * critical sections latencies
93 */
94 stop_critical_timings();
95
96 /*
97 * Tell the RCU framework we are entering an idle section,
98 * so no more rcu read side critical sections and one more
99 * step to the grace period
100 */
101 rcu_idle_enter();
102
103 /*
104 * Check if the cpuidle framework is ready, otherwise fallback
105 * to the default arch specific idle method
106 */
107 ret = cpuidle_enabled(drv, dev);
108
109 if (!ret) {
110 /*
111 * Ask the governor to choose an idle state it thinks
112 * it is convenient to go to. There is *always* a
113 * convenient idle state
114 */
115 next_state = cpuidle_select(drv, dev);
116
117 /*
118 * The idle task must be scheduled, it is pointless to
119 * go to idle, just update no idle residency and get
120 * out of this function
121 */
122 if (current_clr_polling_and_test()) {
123 dev->last_residency = 0;
124 entered_state = next_state;
125 local_irq_enable();
126 } else {
127 broadcast = !!(drv->states[next_state].flags &
128 CPUIDLE_FLAG_TIMER_STOP);
129
130 if (broadcast)
131 /*
132 * Tell the time framework to switch
133 * to a broadcast timer because our
134 * local timer will be shutdown. If a
135 * local timer is used from another
136 * cpu as a broadcast timer, this call
137 * may fail if it is not available
138 */
139 ret = clockevents_notify(
140 CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
141 &dev->cpu);
142
143 if (!ret) {
144 trace_cpu_idle_rcuidle(next_state, dev->cpu);
145
146 /*
147 * Enter the idle state previously
148 * returned by the governor
149 * decision. This function will block
150 * until an interrupt occurs and will
151 * take care of re-enabling the local
152 * interrupts
153 */
154 entered_state = cpuidle_enter(drv, dev,
155 next_state);
156
157 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT,
158 dev->cpu);
159
160 if (broadcast)
161 clockevents_notify(
162 CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
163 &dev->cpu);
164
165 /*
166 * Give the governor an opportunity to reflect on the
167 * outcome
168 */
169 cpuidle_reflect(dev, entered_state);
170 }
171 }
172 }
173
174 /*
175 * We can't use the cpuidle framework, let's use the default
176 * idle routine
177 */
178 if (ret)
179 arch_cpu_idle();
180
181 __current_set_polling();
182
183 /*
184 * It is up to the idle functions to enable back the local
185 * interrupt
186 */
187 if (WARN_ON_ONCE(irqs_disabled()))
188 local_irq_enable();
189
190 rcu_idle_exit();
191 start_critical_timings();
192
193 return 0;
194}
195
196/*
197 * Generic idle loop implementation
198 */
199static void cpu_idle_loop(void)
200{
201 while (1) {
202 tick_nohz_idle_enter();
203
204 while (!need_resched()) {
205 check_pgt_cache();
206 rmb();
207
208 if (cpu_is_offline(smp_processor_id()))
209 arch_cpu_idle_dead();
210
211 local_irq_disable();
212 arch_cpu_idle_enter();
213
214 /*
215 * In poll mode we reenable interrupts and spin.
216 *
217 * Also if we detected in the wakeup from idle
218 * path that the tick broadcast device expired
219 * for us, we don't want to go deep idle as we
220 * know that the IPI is going to arrive right
221 * away
222 */
223 if (cpu_idle_force_poll || tick_check_broadcast_expired())
224 cpu_idle_poll();
225 else
226 cpuidle_idle_call();
227
228 arch_cpu_idle_exit();
229 }
230
231 /*
232 * Since we fell out of the loop above, we know
233 * TIF_NEED_RESCHED must be set, propagate it into
234 * PREEMPT_NEED_RESCHED.
235 *
236 * This is required because for polling idle loops we will
237 * not have had an IPI to fold the state for us.
238 */
239 preempt_set_need_resched();
240 tick_nohz_idle_exit();
241 schedule_preempt_disabled();
242 }
243}
244
245void cpu_startup_entry(enum cpuhp_state state)
246{
247 /*
248 * This #ifdef needs to die, but it's too late in the cycle to
249 * make this generic (arm and sh have never invoked the canary
250 * init for the non boot cpus!). Will be fixed in 3.11
251 */
252#ifdef CONFIG_X86
253 /*
254 * If we're the non-boot CPU, nothing set the stack canary up
255 * for us. The boot CPU already has it initialized but no harm
256 * in doing it again. This is a good place for updating it, as
257 * we wont ever return from this function (so the invalid
258 * canaries already on the stack wont ever trigger).
259 */
260 boot_init_stack_canary();
261#endif
262 __current_set_polling();
263 arch_cpu_idle_prepare();
264 cpu_idle_loop();
265}