Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Generic entry point for the idle threads
 
 
 
 
  3 */
  4#include <linux/sched.h>
  5#include <linux/cpu.h>
  6#include <linux/cpuidle.h>
  7#include <linux/cpuhotplug.h>
  8#include <linux/tick.h>
  9#include <linux/mm.h>
 10#include <linux/stackprotector.h>
 11#include <linux/suspend.h>
 12
 13#include <asm/tlb.h>
 14
 15#include <trace/events/power.h>
 16
 17#include "sched.h"
 
 18
 19/**
 20 * sched_idle_set_state - Record idle state for the current CPU.
 21 * @idle_state: State to record.
 22 */
 23void sched_idle_set_state(struct cpuidle_state *idle_state)
 24{
 25	idle_set_state(this_rq(), idle_state);
 26}
 27
 28static int __read_mostly cpu_idle_force_poll;
 29
 30void cpu_idle_poll_ctrl(bool enable)
 31{
 32	if (enable) {
 33		cpu_idle_force_poll++;
 34	} else {
 35		cpu_idle_force_poll--;
 36		WARN_ON_ONCE(cpu_idle_force_poll < 0);
 37	}
 38}
 39
 40#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
 41static int __init cpu_idle_poll_setup(char *__unused)
 42{
 43	cpu_idle_force_poll = 1;
 
 44	return 1;
 45}
 46__setup("nohlt", cpu_idle_poll_setup);
 47
 48static int __init cpu_idle_nopoll_setup(char *__unused)
 49{
 50	cpu_idle_force_poll = 0;
 
 51	return 1;
 52}
 53__setup("hlt", cpu_idle_nopoll_setup);
 54#endif
 55
 56static inline int cpu_idle_poll(void)
 57{
 
 
 58	rcu_idle_enter();
 59	trace_cpu_idle_rcuidle(0, smp_processor_id());
 60	local_irq_enable();
 61	stop_critical_timings();
 62	while (!tif_need_resched() &&
 63		(cpu_idle_force_poll || tick_check_broadcast_expired()))
 64		cpu_relax();
 65	start_critical_timings();
 66	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 67	rcu_idle_exit();
 
 
 
 68	return 1;
 69}
 70
 71/* Weak implementations for optional arch specific functions */
 72void __weak arch_cpu_idle_prepare(void) { }
 73void __weak arch_cpu_idle_enter(void) { }
 74void __weak arch_cpu_idle_exit(void) { }
 75void __weak arch_cpu_idle_dead(void) { }
 76void __weak arch_cpu_idle(void)
 77{
 78	cpu_idle_force_poll = 1;
 79	local_irq_enable();
 80}
 81
 82/**
 83 * default_idle_call - Default CPU idle routine.
 84 *
 85 * To use when the cpuidle framework cannot be used.
 86 */
 87void default_idle_call(void)
 88{
 89	if (current_clr_polling_and_test()) {
 90		local_irq_enable();
 91	} else {
 
 
 92		stop_critical_timings();
 
 93		arch_cpu_idle();
 
 94		start_critical_timings();
 
 95	}
 96}
 97
 
 
 
 
 
 
 
 
 
 98static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 99		      int next_state)
100{
101	/*
102	 * The idle task must be scheduled, it is pointless to go to idle, just
103	 * update no idle residency and return.
104	 */
105	if (current_clr_polling_and_test()) {
106		dev->last_residency = 0;
107		local_irq_enable();
108		return -EBUSY;
109	}
110
111	/*
112	 * Enter the idle state previously returned by the governor decision.
113	 * This function will block until an interrupt occurs and will take
114	 * care of re-enabling the local interrupts
115	 */
116	return cpuidle_enter(drv, dev, next_state);
117}
118
119/**
120 * cpuidle_idle_call - the main idle function
121 *
122 * NOTE: no locks or semaphores should be used here
123 *
124 * On archs that support TIF_POLLING_NRFLAG, is called with polling
125 * set, and it returns with polling set.  If it ever stops polling, it
126 * must clear the polling bit.
127 */
128static void cpuidle_idle_call(void)
129{
130	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
131	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
132	int next_state, entered_state;
133
134	/*
135	 * Check if the idle task must be rescheduled. If it is the
136	 * case, exit the function after re-enabling the local irq.
137	 */
138	if (need_resched()) {
139		local_irq_enable();
140		return;
141	}
142
143	/*
144	 * Tell the RCU framework we are entering an idle section,
145	 * so no more rcu read side critical sections and one more
146	 * step to the grace period
147	 */
148	rcu_idle_enter();
149
150	if (cpuidle_not_available(drv, dev)) {
 
 
151		default_idle_call();
152		goto exit_idle;
153	}
154
155	/*
156	 * Suspend-to-idle ("freeze") is a system state in which all user space
157	 * has been frozen, all I/O devices have been suspended and the only
158	 * activity happens here and in iterrupts (if any).  In that case bypass
159	 * the cpuidle governor and go stratight for the deepest idle state
160	 * available.  Possibly also suspend the local tick and the entire
161	 * timekeeping to prevent timer interrupts from kicking us out of idle
162	 * until a proper wakeup interrupt happens.
163	 */
164	if (idle_should_freeze()) {
165		entered_state = cpuidle_enter_freeze(drv, dev);
166		if (entered_state > 0) {
167			local_irq_enable();
168			goto exit_idle;
 
 
 
 
 
 
 
 
169		}
170
171		next_state = cpuidle_find_deepest_state(drv, dev);
 
 
172		call_cpuidle(drv, dev, next_state);
173	} else {
 
 
174		/*
175		 * Ask the cpuidle framework to choose a convenient idle state.
176		 */
177		next_state = cpuidle_select(drv, dev);
 
 
 
 
 
 
178		entered_state = call_cpuidle(drv, dev, next_state);
179		/*
180		 * Give the governor an opportunity to reflect on the outcome
181		 */
182		cpuidle_reflect(dev, entered_state);
183	}
184
185exit_idle:
186	__current_set_polling();
187
188	/*
189	 * It is up to the idle functions to reenable local interrupts
190	 */
191	if (WARN_ON_ONCE(irqs_disabled()))
192		local_irq_enable();
193
194	rcu_idle_exit();
195}
196
197/*
198 * Generic idle loop implementation
199 *
200 * Called with polling cleared.
201 */
202static void cpu_idle_loop(void)
203{
204	while (1) {
205		/*
206		 * If the arch has a polling bit, we maintain an invariant:
207		 *
208		 * Our polling bit is clear if we're not scheduled (i.e. if
209		 * rq->curr != rq->idle).  This means that, if rq->idle has
210		 * the polling bit set, then setting need_resched is
211		 * guaranteed to cause the cpu to reschedule.
212		 */
 
 
 
 
 
 
213
214		__current_set_polling();
215		quiet_vmstat();
216		tick_nohz_idle_enter();
217
218		while (!need_resched()) {
219			check_pgt_cache();
220			rmb();
221
222			if (cpu_is_offline(smp_processor_id())) {
223				cpuhp_report_idle_dead();
224				arch_cpu_idle_dead();
225			}
226
227			local_irq_disable();
228			arch_cpu_idle_enter();
229
230			/*
231			 * In poll mode we reenable interrupts and spin.
232			 *
233			 * Also if we detected in the wakeup from idle
234			 * path that the tick broadcast device expired
235			 * for us, we don't want to go deep idle as we
236			 * know that the IPI is going to arrive right
237			 * away
238			 */
239			if (cpu_idle_force_poll || tick_check_broadcast_expired())
240				cpu_idle_poll();
241			else
242				cpuidle_idle_call();
243
244			arch_cpu_idle_exit();
 
 
 
245		}
246
247		/*
248		 * Since we fell out of the loop above, we know
249		 * TIF_NEED_RESCHED must be set, propagate it into
250		 * PREEMPT_NEED_RESCHED.
251		 *
252		 * This is required because for polling idle loops we will
253		 * not have had an IPI to fold the state for us.
254		 */
255		preempt_set_need_resched();
256		tick_nohz_idle_exit();
257		__current_clr_polling();
258
259		/*
260		 * We promise to call sched_ttwu_pending and reschedule
261		 * if need_resched is set while polling is set.  That
262		 * means that clearing polling needs to be visible
263		 * before doing these things.
264		 */
265		smp_mb__after_atomic();
266
267		sched_ttwu_pending();
268		schedule_preempt_disabled();
 
 
 
269	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270}
271
272void cpu_startup_entry(enum cpuhp_state state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273{
 
 
274	/*
275	 * This #ifdef needs to die, but it's too late in the cycle to
276	 * make this generic (arm and sh have never invoked the canary
277	 * init for the non boot cpus!). Will be fixed in 3.11
278	 */
279#ifdef CONFIG_X86
280	/*
281	 * If we're the non-boot CPU, nothing set the stack canary up
282	 * for us. The boot CPU already has it initialized but no harm
283	 * in doing it again. This is a good place for updating it, as
284	 * we wont ever return from this function (so the invalid
285	 * canaries already on the stack wont ever trigger).
286	 */
287	boot_init_stack_canary();
288#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289	arch_cpu_idle_prepare();
290	cpuhp_online_idle(state);
291	cpu_idle_loop();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic entry points for the idle threads and
  4 * implementation of the idle task scheduling class.
  5 *
  6 * (NOTE: these are not related to SCHED_IDLE batch scheduled
  7 *        tasks which are handled in sched/fair.c )
  8 */
  9#include "sched.h"
 
 
 
 
 
 
 
 
 
 10
 11#include <trace/events/power.h>
 12
 13/* Linker adds these: start and end of __cpuidle functions */
 14extern char __cpuidle_text_start[], __cpuidle_text_end[];
 15
 16/**
 17 * sched_idle_set_state - Record idle state for the current CPU.
 18 * @idle_state: State to record.
 19 */
 20void sched_idle_set_state(struct cpuidle_state *idle_state)
 21{
 22	idle_set_state(this_rq(), idle_state);
 23}
 24
 25static int __read_mostly cpu_idle_force_poll;
 26
 27void cpu_idle_poll_ctrl(bool enable)
 28{
 29	if (enable) {
 30		cpu_idle_force_poll++;
 31	} else {
 32		cpu_idle_force_poll--;
 33		WARN_ON_ONCE(cpu_idle_force_poll < 0);
 34	}
 35}
 36
 37#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
 38static int __init cpu_idle_poll_setup(char *__unused)
 39{
 40	cpu_idle_force_poll = 1;
 41
 42	return 1;
 43}
 44__setup("nohlt", cpu_idle_poll_setup);
 45
 46static int __init cpu_idle_nopoll_setup(char *__unused)
 47{
 48	cpu_idle_force_poll = 0;
 49
 50	return 1;
 51}
 52__setup("hlt", cpu_idle_nopoll_setup);
 53#endif
 54
 55static noinline int __cpuidle cpu_idle_poll(void)
 56{
 57	trace_cpu_idle(0, smp_processor_id());
 58	stop_critical_timings();
 59	rcu_idle_enter();
 
 60	local_irq_enable();
 61
 62	while (!tif_need_resched() &&
 63	       (cpu_idle_force_poll || tick_check_broadcast_expired()))
 64		cpu_relax();
 65
 
 66	rcu_idle_exit();
 67	start_critical_timings();
 68	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 69
 70	return 1;
 71}
 72
 73/* Weak implementations for optional arch specific functions */
 74void __weak arch_cpu_idle_prepare(void) { }
 75void __weak arch_cpu_idle_enter(void) { }
 76void __weak arch_cpu_idle_exit(void) { }
 77void __weak arch_cpu_idle_dead(void) { }
 78void __weak arch_cpu_idle(void)
 79{
 80	cpu_idle_force_poll = 1;
 81	local_irq_enable();
 82}
 83
 84/**
 85 * default_idle_call - Default CPU idle routine.
 86 *
 87 * To use when the cpuidle framework cannot be used.
 88 */
 89void __cpuidle default_idle_call(void)
 90{
 91	if (current_clr_polling_and_test()) {
 92		local_irq_enable();
 93	} else {
 94
 95		trace_cpu_idle(1, smp_processor_id());
 96		stop_critical_timings();
 97		rcu_idle_enter();
 98		arch_cpu_idle();
 99		rcu_idle_exit();
100		start_critical_timings();
101		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
102	}
103}
104
105static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
106			       struct cpuidle_device *dev)
107{
108	if (current_clr_polling_and_test())
109		return -EBUSY;
110
111	return cpuidle_enter_s2idle(drv, dev);
112}
113
114static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
115		      int next_state)
116{
117	/*
118	 * The idle task must be scheduled, it is pointless to go to idle, just
119	 * update no idle residency and return.
120	 */
121	if (current_clr_polling_and_test()) {
122		dev->last_residency_ns = 0;
123		local_irq_enable();
124		return -EBUSY;
125	}
126
127	/*
128	 * Enter the idle state previously returned by the governor decision.
129	 * This function will block until an interrupt occurs and will take
130	 * care of re-enabling the local interrupts
131	 */
132	return cpuidle_enter(drv, dev, next_state);
133}
134
135/**
136 * cpuidle_idle_call - the main idle function
137 *
138 * NOTE: no locks or semaphores should be used here
139 *
140 * On archs that support TIF_POLLING_NRFLAG, is called with polling
141 * set, and it returns with polling set.  If it ever stops polling, it
142 * must clear the polling bit.
143 */
144static void cpuidle_idle_call(void)
145{
146	struct cpuidle_device *dev = cpuidle_get_device();
147	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
148	int next_state, entered_state;
149
150	/*
151	 * Check if the idle task must be rescheduled. If it is the
152	 * case, exit the function after re-enabling the local irq.
153	 */
154	if (need_resched()) {
155		local_irq_enable();
156		return;
157	}
158
159	/*
160	 * The RCU framework needs to be told that we are entering an idle
161	 * section, so no more rcu read side critical sections and one more
162	 * step to the grace period
163	 */
 
164
165	if (cpuidle_not_available(drv, dev)) {
166		tick_nohz_idle_stop_tick();
167
168		default_idle_call();
169		goto exit_idle;
170	}
171
172	/*
173	 * Suspend-to-idle ("s2idle") is a system state in which all user space
174	 * has been frozen, all I/O devices have been suspended and the only
175	 * activity happens here and in interrupts (if any). In that case bypass
176	 * the cpuidle governor and go stratight for the deepest idle state
177	 * available.  Possibly also suspend the local tick and the entire
178	 * timekeeping to prevent timer interrupts from kicking us out of idle
179	 * until a proper wakeup interrupt happens.
180	 */
181
182	if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
183		u64 max_latency_ns;
184
185		if (idle_should_enter_s2idle()) {
186
187			entered_state = call_cpuidle_s2idle(drv, dev);
188			if (entered_state > 0)
189				goto exit_idle;
190
191			max_latency_ns = U64_MAX;
192		} else {
193			max_latency_ns = dev->forced_idle_latency_limit_ns;
194		}
195
196		tick_nohz_idle_stop_tick();
197
198		next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
199		call_cpuidle(drv, dev, next_state);
200	} else {
201		bool stop_tick = true;
202
203		/*
204		 * Ask the cpuidle framework to choose a convenient idle state.
205		 */
206		next_state = cpuidle_select(drv, dev, &stop_tick);
207
208		if (stop_tick || tick_nohz_tick_stopped())
209			tick_nohz_idle_stop_tick();
210		else
211			tick_nohz_idle_retain_tick();
212
213		entered_state = call_cpuidle(drv, dev, next_state);
214		/*
215		 * Give the governor an opportunity to reflect on the outcome
216		 */
217		cpuidle_reflect(dev, entered_state);
218	}
219
220exit_idle:
221	__current_set_polling();
222
223	/*
224	 * It is up to the idle functions to reenable local interrupts
225	 */
226	if (WARN_ON_ONCE(irqs_disabled()))
227		local_irq_enable();
 
 
228}
229
230/*
231 * Generic idle loop implementation
232 *
233 * Called with polling cleared.
234 */
235static void do_idle(void)
236{
237	int cpu = smp_processor_id();
238	/*
239	 * If the arch has a polling bit, we maintain an invariant:
240	 *
241	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
242	 * rq->idle). This means that, if rq->idle has the polling bit set,
243	 * then setting need_resched is guaranteed to cause the CPU to
244	 * reschedule.
245	 */
246
247	__current_set_polling();
248	tick_nohz_idle_enter();
249
250	while (!need_resched()) {
251		rmb();
252
253		local_irq_disable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
255		if (cpu_is_offline(cpu)) {
256			tick_nohz_idle_stop_tick();
257			cpuhp_report_idle_dead();
258			arch_cpu_idle_dead();
259		}
260
261		arch_cpu_idle_enter();
 
 
 
 
 
 
 
 
 
 
262
263		/*
264		 * In poll mode we reenable interrupts and spin. Also if we
265		 * detected in the wakeup from idle path that the tick
266		 * broadcast device expired for us, we don't want to go deep
267		 * idle as we know that the IPI is going to arrive right away.
268		 */
269		if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
270			tick_nohz_idle_restart_tick();
271			cpu_idle_poll();
272		} else {
273			cpuidle_idle_call();
274		}
275		arch_cpu_idle_exit();
276	}
277
278	/*
279	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
280	 * be set, propagate it into PREEMPT_NEED_RESCHED.
281	 *
282	 * This is required because for polling idle loops we will not have had
283	 * an IPI to fold the state for us.
284	 */
285	preempt_set_need_resched();
286	tick_nohz_idle_exit();
287	__current_clr_polling();
288
289	/*
290	 * We promise to call sched_ttwu_pending() and reschedule if
291	 * need_resched() is set while polling is set. That means that clearing
292	 * polling needs to be visible before doing these things.
293	 */
294	smp_mb__after_atomic();
295
296	/*
297	 * RCU relies on this call to be done outside of an RCU read-side
298	 * critical section.
299	 */
300	flush_smp_call_function_from_idle();
301	schedule_idle();
302
303	if (unlikely(klp_patch_pending(current)))
304		klp_update_patch_state(current);
305}
306
307bool cpu_in_idle(unsigned long pc)
308{
309	return pc >= (unsigned long)__cpuidle_text_start &&
310		pc < (unsigned long)__cpuidle_text_end;
311}
312
313struct idle_timer {
314	struct hrtimer timer;
315	int done;
316};
317
318static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
319{
320	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
321
322	WRITE_ONCE(it->done, 1);
323	set_tsk_need_resched(current);
324
325	return HRTIMER_NORESTART;
326}
327
328void play_idle_precise(u64 duration_ns, u64 latency_ns)
329{
330	struct idle_timer it;
331
332	/*
333	 * Only FIFO tasks can disable the tick since they don't need the forced
334	 * preemption.
 
 
 
 
 
 
 
 
 
335	 */
336	WARN_ON_ONCE(current->policy != SCHED_FIFO);
337	WARN_ON_ONCE(current->nr_cpus_allowed != 1);
338	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
339	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
340	WARN_ON_ONCE(!duration_ns);
341
342	rcu_sleep_check();
343	preempt_disable();
344	current->flags |= PF_IDLE;
345	cpuidle_use_deepest_state(latency_ns);
346
347	it.done = 0;
348	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
349	it.timer.function = idle_inject_timer_fn;
350	hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
351		      HRTIMER_MODE_REL_PINNED);
352
353	while (!READ_ONCE(it.done))
354		do_idle();
355
356	cpuidle_use_deepest_state(0);
357	current->flags &= ~PF_IDLE;
358
359	preempt_fold_need_resched();
360	preempt_enable();
361}
362EXPORT_SYMBOL_GPL(play_idle_precise);
363
364void cpu_startup_entry(enum cpuhp_state state)
365{
366	arch_cpu_idle_prepare();
367	cpuhp_online_idle(state);
368	while (1)
369		do_idle();
370}
371
372/*
373 * idle-task scheduling class.
374 */
375
376#ifdef CONFIG_SMP
377static int
378select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
379{
380	return task_cpu(p); /* IDLE tasks as never migrated */
381}
382
383static int
384balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
385{
386	return WARN_ON_ONCE(1);
387}
388#endif
389
390/*
391 * Idle tasks are unconditionally rescheduled:
392 */
393static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
394{
395	resched_curr(rq);
396}
397
398static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
399{
400}
401
402static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
403{
404	update_idle_core(rq);
405	schedstat_inc(rq->sched_goidle);
406}
407
408struct task_struct *pick_next_task_idle(struct rq *rq)
409{
410	struct task_struct *next = rq->idle;
411
412	set_next_task_idle(rq, next, true);
413
414	return next;
415}
416
417/*
418 * It is not legal to sleep in the idle task - print a warning
419 * message if some code attempts to do it:
420 */
421static void
422dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
423{
424	raw_spin_unlock_irq(&rq->lock);
425	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
426	dump_stack();
427	raw_spin_lock_irq(&rq->lock);
428}
429
430/*
431 * scheduler tick hitting a task of our scheduling class.
432 *
433 * NOTE: This function can be called remotely by the tick offload that
434 * goes along full dynticks. Therefore no local assumption can be made
435 * and everything must be accessed through the @rq and @curr passed in
436 * parameters.
437 */
438static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
439{
440}
441
442static void switched_to_idle(struct rq *rq, struct task_struct *p)
443{
444	BUG();
445}
446
447static void
448prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
449{
450	BUG();
451}
452
453static void update_curr_idle(struct rq *rq)
454{
455}
456
457/*
458 * Simple, special scheduling class for the per-CPU idle tasks:
459 */
460const struct sched_class idle_sched_class
461	__attribute__((section("__idle_sched_class"))) = {
462	/* no enqueue/yield_task for idle tasks */
463
464	/* dequeue is not valid, we print a debug message there: */
465	.dequeue_task		= dequeue_task_idle,
466
467	.check_preempt_curr	= check_preempt_curr_idle,
468
469	.pick_next_task		= pick_next_task_idle,
470	.put_prev_task		= put_prev_task_idle,
471	.set_next_task          = set_next_task_idle,
472
473#ifdef CONFIG_SMP
474	.balance		= balance_idle,
475	.select_task_rq		= select_task_rq_idle,
476	.set_cpus_allowed	= set_cpus_allowed_common,
477#endif
478
479	.task_tick		= task_tick_idle,
480
481	.prio_changed		= prio_changed_idle,
482	.switched_to		= switched_to_idle,
483	.update_curr		= update_curr_idle,
484};