Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic entry points for the idle threads and
  4 * implementation of the idle task scheduling class.
  5 *
  6 * (NOTE: these are not related to SCHED_IDLE batch scheduled
  7 *        tasks which are handled in sched/fair.c )
  8 */
  9#include "sched.h"
 10
 11#include <trace/events/power.h>
 12
 13/* Linker adds these: start and end of __cpuidle functions */
 14extern char __cpuidle_text_start[], __cpuidle_text_end[];
 15
 16/**
 17 * sched_idle_set_state - Record idle state for the current CPU.
 18 * @idle_state: State to record.
 19 */
 20void sched_idle_set_state(struct cpuidle_state *idle_state)
 21{
 22	idle_set_state(this_rq(), idle_state);
 23}
 24
 25static int __read_mostly cpu_idle_force_poll;
 26
 27void cpu_idle_poll_ctrl(bool enable)
 28{
 29	if (enable) {
 30		cpu_idle_force_poll++;
 31	} else {
 32		cpu_idle_force_poll--;
 33		WARN_ON_ONCE(cpu_idle_force_poll < 0);
 34	}
 35}
 36
 37#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
 38static int __init cpu_idle_poll_setup(char *__unused)
 39{
 40	cpu_idle_force_poll = 1;
 41
 42	return 1;
 43}
 44__setup("nohlt", cpu_idle_poll_setup);
 45
 46static int __init cpu_idle_nopoll_setup(char *__unused)
 47{
 48	cpu_idle_force_poll = 0;
 49
 50	return 1;
 51}
 52__setup("hlt", cpu_idle_nopoll_setup);
 53#endif
 54
 55static noinline int __cpuidle cpu_idle_poll(void)
 56{
 57	trace_cpu_idle(0, smp_processor_id());
 58	stop_critical_timings();
 59	rcu_idle_enter();
 
 60	local_irq_enable();
 
 61
 62	while (!tif_need_resched() &&
 63	       (cpu_idle_force_poll || tick_check_broadcast_expired()))
 64		cpu_relax();
 65
 66	rcu_idle_exit();
 67	start_critical_timings();
 68	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 
 69
 70	return 1;
 71}
 72
 73/* Weak implementations for optional arch specific functions */
 74void __weak arch_cpu_idle_prepare(void) { }
 75void __weak arch_cpu_idle_enter(void) { }
 76void __weak arch_cpu_idle_exit(void) { }
 77void __weak arch_cpu_idle_dead(void) { }
 78void __weak arch_cpu_idle(void)
 79{
 80	cpu_idle_force_poll = 1;
 81	raw_local_irq_enable();
 82}
 83
 84/**
 85 * default_idle_call - Default CPU idle routine.
 86 *
 87 * To use when the cpuidle framework cannot be used.
 88 */
 89void __cpuidle default_idle_call(void)
 90{
 91	if (current_clr_polling_and_test()) {
 92		local_irq_enable();
 93	} else {
 94
 95		trace_cpu_idle(1, smp_processor_id());
 96		stop_critical_timings();
 97
 98		/*
 99		 * arch_cpu_idle() is supposed to enable IRQs, however
100		 * we can't do that because of RCU and tracing.
101		 *
102		 * Trace IRQs enable here, then switch off RCU, and have
103		 * arch_cpu_idle() use raw_local_irq_enable(). Note that
104		 * rcu_idle_enter() relies on lockdep IRQ state, so switch that
105		 * last -- this is very similar to the entry code.
106		 */
107		trace_hardirqs_on_prepare();
108		lockdep_hardirqs_on_prepare(_THIS_IP_);
109		rcu_idle_enter();
110		lockdep_hardirqs_on(_THIS_IP_);
111
112		arch_cpu_idle();
113
114		/*
115		 * OK, so IRQs are enabled here, but RCU needs them disabled to
116		 * turn itself back on.. funny thing is that disabling IRQs
117		 * will cause tracing, which needs RCU. Jump through hoops to
118		 * make it 'work'.
119		 */
120		raw_local_irq_disable();
121		lockdep_hardirqs_off(_THIS_IP_);
122		rcu_idle_exit();
123		lockdep_hardirqs_on(_THIS_IP_);
124		raw_local_irq_enable();
125
126		start_critical_timings();
127		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
128	}
129}
130
131static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
132			       struct cpuidle_device *dev)
133{
134	if (current_clr_polling_and_test())
135		return -EBUSY;
136
137	return cpuidle_enter_s2idle(drv, dev);
138}
139
140static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
141		      int next_state)
142{
143	/*
144	 * The idle task must be scheduled, it is pointless to go to idle, just
145	 * update no idle residency and return.
146	 */
147	if (current_clr_polling_and_test()) {
148		dev->last_residency_ns = 0;
149		local_irq_enable();
150		return -EBUSY;
151	}
152
153	/*
154	 * Enter the idle state previously returned by the governor decision.
155	 * This function will block until an interrupt occurs and will take
156	 * care of re-enabling the local interrupts
157	 */
158	return cpuidle_enter(drv, dev, next_state);
159}
160
161/**
162 * cpuidle_idle_call - the main idle function
163 *
164 * NOTE: no locks or semaphores should be used here
165 *
166 * On architectures that support TIF_POLLING_NRFLAG, is called with polling
167 * set, and it returns with polling set.  If it ever stops polling, it
168 * must clear the polling bit.
169 */
170static void cpuidle_idle_call(void)
171{
172	struct cpuidle_device *dev = cpuidle_get_device();
173	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
174	int next_state, entered_state;
175
176	/*
177	 * Check if the idle task must be rescheduled. If it is the
178	 * case, exit the function after re-enabling the local irq.
179	 */
180	if (need_resched()) {
181		local_irq_enable();
182		return;
183	}
184
185	/*
186	 * The RCU framework needs to be told that we are entering an idle
187	 * section, so no more rcu read side critical sections and one more
188	 * step to the grace period
189	 */
190
191	if (cpuidle_not_available(drv, dev)) {
192		tick_nohz_idle_stop_tick();
 
193
194		default_idle_call();
195		goto exit_idle;
196	}
197
198	/*
199	 * Suspend-to-idle ("s2idle") is a system state in which all user space
200	 * has been frozen, all I/O devices have been suspended and the only
201	 * activity happens here and in interrupts (if any). In that case bypass
202	 * the cpuidle governor and go straight for the deepest idle state
203	 * available.  Possibly also suspend the local tick and the entire
204	 * timekeeping to prevent timer interrupts from kicking us out of idle
205	 * until a proper wakeup interrupt happens.
206	 */
207
208	if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
209		u64 max_latency_ns;
210
211		if (idle_should_enter_s2idle()) {
 
212
213			entered_state = call_cpuidle_s2idle(drv, dev);
214			if (entered_state > 0)
 
215				goto exit_idle;
 
216
217			max_latency_ns = U64_MAX;
218		} else {
219			max_latency_ns = dev->forced_idle_latency_limit_ns;
220		}
221
222		tick_nohz_idle_stop_tick();
 
223
224		next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
225		call_cpuidle(drv, dev, next_state);
226	} else {
227		bool stop_tick = true;
228
229		/*
230		 * Ask the cpuidle framework to choose a convenient idle state.
231		 */
232		next_state = cpuidle_select(drv, dev, &stop_tick);
233
234		if (stop_tick || tick_nohz_tick_stopped())
235			tick_nohz_idle_stop_tick();
236		else
237			tick_nohz_idle_retain_tick();
238
 
 
239		entered_state = call_cpuidle(drv, dev, next_state);
240		/*
241		 * Give the governor an opportunity to reflect on the outcome
242		 */
243		cpuidle_reflect(dev, entered_state);
244	}
245
246exit_idle:
247	__current_set_polling();
248
249	/*
250	 * It is up to the idle functions to reenable local interrupts
251	 */
252	if (WARN_ON_ONCE(irqs_disabled()))
253		local_irq_enable();
 
 
254}
255
256/*
257 * Generic idle loop implementation
258 *
259 * Called with polling cleared.
260 */
261static void do_idle(void)
262{
263	int cpu = smp_processor_id();
264
265	/*
266	 * Check if we need to update blocked load
267	 */
268	nohz_run_idle_balance(cpu);
269
270	/*
271	 * If the arch has a polling bit, we maintain an invariant:
272	 *
273	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
274	 * rq->idle). This means that, if rq->idle has the polling bit set,
275	 * then setting need_resched is guaranteed to cause the CPU to
276	 * reschedule.
277	 */
278
279	__current_set_polling();
280	tick_nohz_idle_enter();
281
282	while (!need_resched()) {
 
283		rmb();
284
285		local_irq_disable();
286
287		if (cpu_is_offline(cpu)) {
288			tick_nohz_idle_stop_tick();
289			cpuhp_report_idle_dead();
290			arch_cpu_idle_dead();
291		}
292
 
293		arch_cpu_idle_enter();
294		rcu_nocb_flush_deferred_wakeup();
295
296		/*
297		 * In poll mode we reenable interrupts and spin. Also if we
298		 * detected in the wakeup from idle path that the tick
299		 * broadcast device expired for us, we don't want to go deep
300		 * idle as we know that the IPI is going to arrive right away.
301		 */
302		if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
303			tick_nohz_idle_restart_tick();
304			cpu_idle_poll();
305		} else {
306			cpuidle_idle_call();
307		}
308		arch_cpu_idle_exit();
309	}
310
311	/*
312	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
313	 * be set, propagate it into PREEMPT_NEED_RESCHED.
314	 *
315	 * This is required because for polling idle loops we will not have had
316	 * an IPI to fold the state for us.
317	 */
318	preempt_set_need_resched();
319	tick_nohz_idle_exit();
320	__current_clr_polling();
321
322	/*
323	 * We promise to call sched_ttwu_pending() and reschedule if
324	 * need_resched() is set while polling is set. That means that clearing
325	 * polling needs to be visible before doing these things.
326	 */
327	smp_mb__after_atomic();
328
329	/*
330	 * RCU relies on this call to be done outside of an RCU read-side
331	 * critical section.
332	 */
333	flush_smp_call_function_from_idle();
334	schedule_idle();
335
336	if (unlikely(klp_patch_pending(current)))
337		klp_update_patch_state(current);
338}
339
340bool cpu_in_idle(unsigned long pc)
341{
342	return pc >= (unsigned long)__cpuidle_text_start &&
343		pc < (unsigned long)__cpuidle_text_end;
344}
345
346struct idle_timer {
347	struct hrtimer timer;
348	int done;
349};
350
351static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
352{
353	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
354
355	WRITE_ONCE(it->done, 1);
356	set_tsk_need_resched(current);
357
358	return HRTIMER_NORESTART;
359}
360
361void play_idle_precise(u64 duration_ns, u64 latency_ns)
362{
363	struct idle_timer it;
364
365	/*
366	 * Only FIFO tasks can disable the tick since they don't need the forced
367	 * preemption.
368	 */
369	WARN_ON_ONCE(current->policy != SCHED_FIFO);
370	WARN_ON_ONCE(current->nr_cpus_allowed != 1);
371	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
372	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
373	WARN_ON_ONCE(!duration_ns);
374	WARN_ON_ONCE(current->mm);
375
376	rcu_sleep_check();
377	preempt_disable();
378	current->flags |= PF_IDLE;
379	cpuidle_use_deepest_state(latency_ns);
380
381	it.done = 0;
382	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
383	it.timer.function = idle_inject_timer_fn;
384	hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
385		      HRTIMER_MODE_REL_PINNED_HARD);
386
387	while (!READ_ONCE(it.done))
388		do_idle();
389
390	cpuidle_use_deepest_state(0);
391	current->flags &= ~PF_IDLE;
392
393	preempt_fold_need_resched();
394	preempt_enable();
395}
396EXPORT_SYMBOL_GPL(play_idle_precise);
397
398void cpu_startup_entry(enum cpuhp_state state)
399{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400	arch_cpu_idle_prepare();
401	cpuhp_online_idle(state);
402	while (1)
403		do_idle();
404}
405
406/*
407 * idle-task scheduling class.
408 */
409
410#ifdef CONFIG_SMP
411static int
412select_task_rq_idle(struct task_struct *p, int cpu, int flags)
413{
414	return task_cpu(p); /* IDLE tasks as never migrated */
415}
416
417static int
418balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
419{
420	return WARN_ON_ONCE(1);
421}
422#endif
423
424/*
425 * Idle tasks are unconditionally rescheduled:
426 */
427static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
428{
429	resched_curr(rq);
430}
431
432static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
433{
434}
435
436static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
437{
 
438	update_idle_core(rq);
439	schedstat_inc(rq->sched_goidle);
440	queue_core_balance(rq);
441}
442
443#ifdef CONFIG_SMP
444static struct task_struct *pick_task_idle(struct rq *rq)
445{
446	return rq->idle;
447}
448#endif
449
450struct task_struct *pick_next_task_idle(struct rq *rq)
451{
452	struct task_struct *next = rq->idle;
453
454	set_next_task_idle(rq, next, true);
455
456	return next;
457}
458
459/*
460 * It is not legal to sleep in the idle task - print a warning
461 * message if some code attempts to do it:
462 */
463static void
464dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
465{
466	raw_spin_rq_unlock_irq(rq);
467	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
468	dump_stack();
469	raw_spin_rq_lock_irq(rq);
 
 
 
 
470}
471
472/*
473 * scheduler tick hitting a task of our scheduling class.
474 *
475 * NOTE: This function can be called remotely by the tick offload that
476 * goes along full dynticks. Therefore no local assumption can be made
477 * and everything must be accessed through the @rq and @curr passed in
478 * parameters.
479 */
480static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
481{
482}
483
 
 
 
 
484static void switched_to_idle(struct rq *rq, struct task_struct *p)
485{
486	BUG();
487}
488
489static void
490prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
491{
492	BUG();
493}
494
 
 
 
 
 
495static void update_curr_idle(struct rq *rq)
496{
497}
498
499/*
500 * Simple, special scheduling class for the per-CPU idle tasks:
501 */
502DEFINE_SCHED_CLASS(idle) = {
503
504	/* no enqueue/yield_task for idle tasks */
505
506	/* dequeue is not valid, we print a debug message there: */
507	.dequeue_task		= dequeue_task_idle,
508
509	.check_preempt_curr	= check_preempt_curr_idle,
510
511	.pick_next_task		= pick_next_task_idle,
512	.put_prev_task		= put_prev_task_idle,
513	.set_next_task          = set_next_task_idle,
514
515#ifdef CONFIG_SMP
516	.balance		= balance_idle,
517	.pick_task		= pick_task_idle,
518	.select_task_rq		= select_task_rq_idle,
519	.set_cpus_allowed	= set_cpus_allowed_common,
520#endif
521
 
522	.task_tick		= task_tick_idle,
 
 
523
524	.prio_changed		= prio_changed_idle,
525	.switched_to		= switched_to_idle,
526	.update_curr		= update_curr_idle,
527};
v4.17
 
  1/*
  2 * Generic entry points for the idle threads and
  3 * implementation of the idle task scheduling class.
  4 *
  5 * (NOTE: these are not related to SCHED_IDLE batch scheduled
  6 *        tasks which are handled in sched/fair.c )
  7 */
  8#include "sched.h"
  9
 10#include <trace/events/power.h>
 11
 12/* Linker adds these: start and end of __cpuidle functions */
 13extern char __cpuidle_text_start[], __cpuidle_text_end[];
 14
 15/**
 16 * sched_idle_set_state - Record idle state for the current CPU.
 17 * @idle_state: State to record.
 18 */
 19void sched_idle_set_state(struct cpuidle_state *idle_state)
 20{
 21	idle_set_state(this_rq(), idle_state);
 22}
 23
 24static int __read_mostly cpu_idle_force_poll;
 25
 26void cpu_idle_poll_ctrl(bool enable)
 27{
 28	if (enable) {
 29		cpu_idle_force_poll++;
 30	} else {
 31		cpu_idle_force_poll--;
 32		WARN_ON_ONCE(cpu_idle_force_poll < 0);
 33	}
 34}
 35
 36#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
 37static int __init cpu_idle_poll_setup(char *__unused)
 38{
 39	cpu_idle_force_poll = 1;
 40
 41	return 1;
 42}
 43__setup("nohlt", cpu_idle_poll_setup);
 44
 45static int __init cpu_idle_nopoll_setup(char *__unused)
 46{
 47	cpu_idle_force_poll = 0;
 48
 49	return 1;
 50}
 51__setup("hlt", cpu_idle_nopoll_setup);
 52#endif
 53
 54static noinline int __cpuidle cpu_idle_poll(void)
 55{
 
 
 56	rcu_idle_enter();
 57	trace_cpu_idle_rcuidle(0, smp_processor_id());
 58	local_irq_enable();
 59	stop_critical_timings();
 60
 61	while (!tif_need_resched() &&
 62		(cpu_idle_force_poll || tick_check_broadcast_expired()))
 63		cpu_relax();
 
 
 64	start_critical_timings();
 65	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 66	rcu_idle_exit();
 67
 68	return 1;
 69}
 70
 71/* Weak implementations for optional arch specific functions */
 72void __weak arch_cpu_idle_prepare(void) { }
 73void __weak arch_cpu_idle_enter(void) { }
 74void __weak arch_cpu_idle_exit(void) { }
 75void __weak arch_cpu_idle_dead(void) { }
 76void __weak arch_cpu_idle(void)
 77{
 78	cpu_idle_force_poll = 1;
 79	local_irq_enable();
 80}
 81
 82/**
 83 * default_idle_call - Default CPU idle routine.
 84 *
 85 * To use when the cpuidle framework cannot be used.
 86 */
 87void __cpuidle default_idle_call(void)
 88{
 89	if (current_clr_polling_and_test()) {
 90		local_irq_enable();
 91	} else {
 
 
 92		stop_critical_timings();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93		arch_cpu_idle();
 
 
 
 
 
 
 
 
 
 
 
 
 
 94		start_critical_timings();
 
 95	}
 96}
 97
 
 
 
 
 
 
 
 
 
 98static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 99		      int next_state)
100{
101	/*
102	 * The idle task must be scheduled, it is pointless to go to idle, just
103	 * update no idle residency and return.
104	 */
105	if (current_clr_polling_and_test()) {
106		dev->last_residency = 0;
107		local_irq_enable();
108		return -EBUSY;
109	}
110
111	/*
112	 * Enter the idle state previously returned by the governor decision.
113	 * This function will block until an interrupt occurs and will take
114	 * care of re-enabling the local interrupts
115	 */
116	return cpuidle_enter(drv, dev, next_state);
117}
118
119/**
120 * cpuidle_idle_call - the main idle function
121 *
122 * NOTE: no locks or semaphores should be used here
123 *
124 * On archs that support TIF_POLLING_NRFLAG, is called with polling
125 * set, and it returns with polling set.  If it ever stops polling, it
126 * must clear the polling bit.
127 */
128static void cpuidle_idle_call(void)
129{
130	struct cpuidle_device *dev = cpuidle_get_device();
131	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
132	int next_state, entered_state;
133
134	/*
135	 * Check if the idle task must be rescheduled. If it is the
136	 * case, exit the function after re-enabling the local irq.
137	 */
138	if (need_resched()) {
139		local_irq_enable();
140		return;
141	}
142
143	/*
144	 * The RCU framework needs to be told that we are entering an idle
145	 * section, so no more rcu read side critical sections and one more
146	 * step to the grace period
147	 */
148
149	if (cpuidle_not_available(drv, dev)) {
150		tick_nohz_idle_stop_tick();
151		rcu_idle_enter();
152
153		default_idle_call();
154		goto exit_idle;
155	}
156
157	/*
158	 * Suspend-to-idle ("s2idle") is a system state in which all user space
159	 * has been frozen, all I/O devices have been suspended and the only
160	 * activity happens here and in iterrupts (if any).  In that case bypass
161	 * the cpuidle governor and go stratight for the deepest idle state
162	 * available.  Possibly also suspend the local tick and the entire
163	 * timekeeping to prevent timer interrupts from kicking us out of idle
164	 * until a proper wakeup interrupt happens.
165	 */
166
167	if (idle_should_enter_s2idle() || dev->use_deepest_state) {
 
 
168		if (idle_should_enter_s2idle()) {
169			rcu_idle_enter();
170
171			entered_state = cpuidle_enter_s2idle(drv, dev);
172			if (entered_state > 0) {
173				local_irq_enable();
174				goto exit_idle;
175			}
176
177			rcu_idle_exit();
 
 
178		}
179
180		tick_nohz_idle_stop_tick();
181		rcu_idle_enter();
182
183		next_state = cpuidle_find_deepest_state(drv, dev);
184		call_cpuidle(drv, dev, next_state);
185	} else {
186		bool stop_tick = true;
187
188		/*
189		 * Ask the cpuidle framework to choose a convenient idle state.
190		 */
191		next_state = cpuidle_select(drv, dev, &stop_tick);
192
193		if (stop_tick)
194			tick_nohz_idle_stop_tick();
195		else
196			tick_nohz_idle_retain_tick();
197
198		rcu_idle_enter();
199
200		entered_state = call_cpuidle(drv, dev, next_state);
201		/*
202		 * Give the governor an opportunity to reflect on the outcome
203		 */
204		cpuidle_reflect(dev, entered_state);
205	}
206
207exit_idle:
208	__current_set_polling();
209
210	/*
211	 * It is up to the idle functions to reenable local interrupts
212	 */
213	if (WARN_ON_ONCE(irqs_disabled()))
214		local_irq_enable();
215
216	rcu_idle_exit();
217}
218
219/*
220 * Generic idle loop implementation
221 *
222 * Called with polling cleared.
223 */
224static void do_idle(void)
225{
226	int cpu = smp_processor_id();
 
 
 
 
 
 
227	/*
228	 * If the arch has a polling bit, we maintain an invariant:
229	 *
230	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
231	 * rq->idle). This means that, if rq->idle has the polling bit set,
232	 * then setting need_resched is guaranteed to cause the CPU to
233	 * reschedule.
234	 */
235
236	__current_set_polling();
237	tick_nohz_idle_enter();
238
239	while (!need_resched()) {
240		check_pgt_cache();
241		rmb();
242
 
 
243		if (cpu_is_offline(cpu)) {
244			tick_nohz_idle_stop_tick_protected();
245			cpuhp_report_idle_dead();
246			arch_cpu_idle_dead();
247		}
248
249		local_irq_disable();
250		arch_cpu_idle_enter();
 
251
252		/*
253		 * In poll mode we reenable interrupts and spin. Also if we
254		 * detected in the wakeup from idle path that the tick
255		 * broadcast device expired for us, we don't want to go deep
256		 * idle as we know that the IPI is going to arrive right away.
257		 */
258		if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
259			tick_nohz_idle_restart_tick();
260			cpu_idle_poll();
261		} else {
262			cpuidle_idle_call();
263		}
264		arch_cpu_idle_exit();
265	}
266
267	/*
268	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
269	 * be set, propagate it into PREEMPT_NEED_RESCHED.
270	 *
271	 * This is required because for polling idle loops we will not have had
272	 * an IPI to fold the state for us.
273	 */
274	preempt_set_need_resched();
275	tick_nohz_idle_exit();
276	__current_clr_polling();
277
278	/*
279	 * We promise to call sched_ttwu_pending() and reschedule if
280	 * need_resched() is set while polling is set. That means that clearing
281	 * polling needs to be visible before doing these things.
282	 */
283	smp_mb__after_atomic();
284
285	sched_ttwu_pending();
 
 
 
 
286	schedule_idle();
287
288	if (unlikely(klp_patch_pending(current)))
289		klp_update_patch_state(current);
290}
291
292bool cpu_in_idle(unsigned long pc)
293{
294	return pc >= (unsigned long)__cpuidle_text_start &&
295		pc < (unsigned long)__cpuidle_text_end;
296}
297
298struct idle_timer {
299	struct hrtimer timer;
300	int done;
301};
302
303static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
304{
305	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
306
307	WRITE_ONCE(it->done, 1);
308	set_tsk_need_resched(current);
309
310	return HRTIMER_NORESTART;
311}
312
313void play_idle(unsigned long duration_ms)
314{
315	struct idle_timer it;
316
317	/*
318	 * Only FIFO tasks can disable the tick since they don't need the forced
319	 * preemption.
320	 */
321	WARN_ON_ONCE(current->policy != SCHED_FIFO);
322	WARN_ON_ONCE(current->nr_cpus_allowed != 1);
323	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
324	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
325	WARN_ON_ONCE(!duration_ms);
 
326
327	rcu_sleep_check();
328	preempt_disable();
329	current->flags |= PF_IDLE;
330	cpuidle_use_deepest_state(true);
331
332	it.done = 0;
333	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
334	it.timer.function = idle_inject_timer_fn;
335	hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
 
336
337	while (!READ_ONCE(it.done))
338		do_idle();
339
340	cpuidle_use_deepest_state(false);
341	current->flags &= ~PF_IDLE;
342
343	preempt_fold_need_resched();
344	preempt_enable();
345}
346EXPORT_SYMBOL_GPL(play_idle);
347
348void cpu_startup_entry(enum cpuhp_state state)
349{
350	/*
351	 * This #ifdef needs to die, but it's too late in the cycle to
352	 * make this generic (ARM and SH have never invoked the canary
353	 * init for the non boot CPUs!). Will be fixed in 3.11
354	 */
355#ifdef CONFIG_X86
356	/*
357	 * If we're the non-boot CPU, nothing set the stack canary up
358	 * for us. The boot CPU already has it initialized but no harm
359	 * in doing it again. This is a good place for updating it, as
360	 * we wont ever return from this function (so the invalid
361	 * canaries already on the stack wont ever trigger).
362	 */
363	boot_init_stack_canary();
364#endif
365	arch_cpu_idle_prepare();
366	cpuhp_online_idle(state);
367	while (1)
368		do_idle();
369}
370
371/*
372 * idle-task scheduling class.
373 */
374
375#ifdef CONFIG_SMP
376static int
377select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
378{
379	return task_cpu(p); /* IDLE tasks as never migrated */
380}
 
 
 
 
 
 
381#endif
382
383/*
384 * Idle tasks are unconditionally rescheduled:
385 */
386static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
387{
388	resched_curr(rq);
389}
390
391static struct task_struct *
392pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
 
 
393{
394	put_prev_task(rq, prev);
395	update_idle_core(rq);
396	schedstat_inc(rq->sched_goidle);
 
 
397
 
 
 
398	return rq->idle;
399}
 
 
 
 
 
 
 
 
 
 
400
401/*
402 * It is not legal to sleep in the idle task - print a warning
403 * message if some code attempts to do it:
404 */
405static void
406dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
407{
408	raw_spin_unlock_irq(&rq->lock);
409	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
410	dump_stack();
411	raw_spin_lock_irq(&rq->lock);
412}
413
414static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
415{
416}
417
418/*
419 * scheduler tick hitting a task of our scheduling class.
420 *
421 * NOTE: This function can be called remotely by the tick offload that
422 * goes along full dynticks. Therefore no local assumption can be made
423 * and everything must be accessed through the @rq and @curr passed in
424 * parameters.
425 */
426static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
427{
428}
429
430static void set_curr_task_idle(struct rq *rq)
431{
432}
433
434static void switched_to_idle(struct rq *rq, struct task_struct *p)
435{
436	BUG();
437}
438
439static void
440prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
441{
442	BUG();
443}
444
445static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
446{
447	return 0;
448}
449
450static void update_curr_idle(struct rq *rq)
451{
452}
453
454/*
455 * Simple, special scheduling class for the per-CPU idle tasks:
456 */
457const struct sched_class idle_sched_class = {
458	/* .next is NULL */
459	/* no enqueue/yield_task for idle tasks */
460
461	/* dequeue is not valid, we print a debug message there: */
462	.dequeue_task		= dequeue_task_idle,
463
464	.check_preempt_curr	= check_preempt_curr_idle,
465
466	.pick_next_task		= pick_next_task_idle,
467	.put_prev_task		= put_prev_task_idle,
 
468
469#ifdef CONFIG_SMP
 
 
470	.select_task_rq		= select_task_rq_idle,
471	.set_cpus_allowed	= set_cpus_allowed_common,
472#endif
473
474	.set_curr_task          = set_curr_task_idle,
475	.task_tick		= task_tick_idle,
476
477	.get_rr_interval	= get_rr_interval_idle,
478
479	.prio_changed		= prio_changed_idle,
480	.switched_to		= switched_to_idle,
481	.update_curr		= update_curr_idle,
482};