Loading...
1/*
2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
10 */
11#include <linux/completion.h>
12#include <linux/cpu.h>
13#include <linux/init.h>
14#include <linux/kthread.h>
15#include <linux/export.h>
16#include <linux/percpu.h>
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
19#include <linux/interrupt.h>
20#include <linux/kallsyms.h>
21#include <linux/smpboot.h>
22#include <linux/atomic.h>
23#include <linux/nmi.h>
24#include <linux/sched/wake_q.h>
25
26/*
27 * Structure to determine completion condition and record errors. May
28 * be shared by works on different cpus.
29 */
30struct cpu_stop_done {
31 atomic_t nr_todo; /* nr left to execute */
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
34};
35
36/* the actual stopper, one per every possible cpu, enabled on online cpus */
37struct cpu_stopper {
38 struct task_struct *thread;
39
40 spinlock_t lock;
41 bool enabled; /* is this stopper enabled? */
42 struct list_head works; /* list of pending works */
43
44 struct cpu_stop_work stop_work; /* for stop_cpus */
45};
46
47static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
48static bool stop_machine_initialized = false;
49
50/* static data for stop_cpus */
51static DEFINE_MUTEX(stop_cpus_mutex);
52static bool stop_cpus_in_progress;
53
54static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
55{
56 memset(done, 0, sizeof(*done));
57 atomic_set(&done->nr_todo, nr_todo);
58 init_completion(&done->completion);
59}
60
61/* signal completion unless @done is NULL */
62static void cpu_stop_signal_done(struct cpu_stop_done *done)
63{
64 if (atomic_dec_and_test(&done->nr_todo))
65 complete(&done->completion);
66}
67
68static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
69 struct cpu_stop_work *work,
70 struct wake_q_head *wakeq)
71{
72 list_add_tail(&work->list, &stopper->works);
73 wake_q_add(wakeq, stopper->thread);
74}
75
76/* queue @work to @stopper. if offline, @work is completed immediately */
77static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
78{
79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
80 DEFINE_WAKE_Q(wakeq);
81 unsigned long flags;
82 bool enabled;
83
84 spin_lock_irqsave(&stopper->lock, flags);
85 enabled = stopper->enabled;
86 if (enabled)
87 __cpu_stop_queue_work(stopper, work, &wakeq);
88 else if (work->done)
89 cpu_stop_signal_done(work->done);
90 spin_unlock_irqrestore(&stopper->lock, flags);
91
92 wake_up_q(&wakeq);
93
94 return enabled;
95}
96
97/**
98 * stop_one_cpu - stop a cpu
99 * @cpu: cpu to stop
100 * @fn: function to execute
101 * @arg: argument to @fn
102 *
103 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
104 * the highest priority preempting any task on the cpu and
105 * monopolizing it. This function returns after the execution is
106 * complete.
107 *
108 * This function doesn't guarantee @cpu stays online till @fn
109 * completes. If @cpu goes down in the middle, execution may happen
110 * partially or fully on different cpus. @fn should either be ready
111 * for that or the caller should ensure that @cpu stays online until
112 * this function completes.
113 *
114 * CONTEXT:
115 * Might sleep.
116 *
117 * RETURNS:
118 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
119 * otherwise, the return value of @fn.
120 */
121int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
122{
123 struct cpu_stop_done done;
124 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
125
126 cpu_stop_init_done(&done, 1);
127 if (!cpu_stop_queue_work(cpu, &work))
128 return -ENOENT;
129 /*
130 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
131 * cycle by doing a preemption:
132 */
133 cond_resched();
134 wait_for_completion(&done.completion);
135 return done.ret;
136}
137
138/* This controls the threads on each CPU. */
139enum multi_stop_state {
140 /* Dummy starting state for thread. */
141 MULTI_STOP_NONE,
142 /* Awaiting everyone to be scheduled. */
143 MULTI_STOP_PREPARE,
144 /* Disable interrupts. */
145 MULTI_STOP_DISABLE_IRQ,
146 /* Run the function */
147 MULTI_STOP_RUN,
148 /* Exit */
149 MULTI_STOP_EXIT,
150};
151
152struct multi_stop_data {
153 cpu_stop_fn_t fn;
154 void *data;
155 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
156 unsigned int num_threads;
157 const struct cpumask *active_cpus;
158
159 enum multi_stop_state state;
160 atomic_t thread_ack;
161};
162
163static void set_state(struct multi_stop_data *msdata,
164 enum multi_stop_state newstate)
165{
166 /* Reset ack counter. */
167 atomic_set(&msdata->thread_ack, msdata->num_threads);
168 smp_wmb();
169 msdata->state = newstate;
170}
171
172/* Last one to ack a state moves to the next state. */
173static void ack_state(struct multi_stop_data *msdata)
174{
175 if (atomic_dec_and_test(&msdata->thread_ack))
176 set_state(msdata, msdata->state + 1);
177}
178
179/* This is the cpu_stop function which stops the CPU. */
180static int multi_cpu_stop(void *data)
181{
182 struct multi_stop_data *msdata = data;
183 enum multi_stop_state curstate = MULTI_STOP_NONE;
184 int cpu = smp_processor_id(), err = 0;
185 unsigned long flags;
186 bool is_active;
187
188 /*
189 * When called from stop_machine_from_inactive_cpu(), irq might
190 * already be disabled. Save the state and restore it on exit.
191 */
192 local_save_flags(flags);
193
194 if (!msdata->active_cpus)
195 is_active = cpu == cpumask_first(cpu_online_mask);
196 else
197 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
198
199 /* Simple state machine */
200 do {
201 /* Chill out and ensure we re-read multi_stop_state. */
202 cpu_relax_yield();
203 if (msdata->state != curstate) {
204 curstate = msdata->state;
205 switch (curstate) {
206 case MULTI_STOP_DISABLE_IRQ:
207 local_irq_disable();
208 hard_irq_disable();
209 break;
210 case MULTI_STOP_RUN:
211 if (is_active)
212 err = msdata->fn(msdata->data);
213 break;
214 default:
215 break;
216 }
217 ack_state(msdata);
218 } else if (curstate > MULTI_STOP_PREPARE) {
219 /*
220 * At this stage all other CPUs we depend on must spin
221 * in the same loop. Any reason for hard-lockup should
222 * be detected and reported on their side.
223 */
224 touch_nmi_watchdog();
225 }
226 } while (curstate != MULTI_STOP_EXIT);
227
228 local_irq_restore(flags);
229 return err;
230}
231
232static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
233 int cpu2, struct cpu_stop_work *work2)
234{
235 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
236 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
237 DEFINE_WAKE_Q(wakeq);
238 int err;
239retry:
240 spin_lock_irq(&stopper1->lock);
241 spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
242
243 err = -ENOENT;
244 if (!stopper1->enabled || !stopper2->enabled)
245 goto unlock;
246 /*
247 * Ensure that if we race with __stop_cpus() the stoppers won't get
248 * queued up in reverse order leading to system deadlock.
249 *
250 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
251 * queued a work on cpu1 but not on cpu2, we hold both locks.
252 *
253 * It can be falsely true but it is safe to spin until it is cleared,
254 * queue_stop_cpus_work() does everything under preempt_disable().
255 */
256 err = -EDEADLK;
257 if (unlikely(stop_cpus_in_progress))
258 goto unlock;
259
260 err = 0;
261 __cpu_stop_queue_work(stopper1, work1, &wakeq);
262 __cpu_stop_queue_work(stopper2, work2, &wakeq);
263unlock:
264 spin_unlock(&stopper2->lock);
265 spin_unlock_irq(&stopper1->lock);
266
267 if (unlikely(err == -EDEADLK)) {
268 while (stop_cpus_in_progress)
269 cpu_relax();
270 goto retry;
271 }
272
273 wake_up_q(&wakeq);
274
275 return err;
276}
277/**
278 * stop_two_cpus - stops two cpus
279 * @cpu1: the cpu to stop
280 * @cpu2: the other cpu to stop
281 * @fn: function to execute
282 * @arg: argument to @fn
283 *
284 * Stops both the current and specified CPU and runs @fn on one of them.
285 *
286 * returns when both are completed.
287 */
288int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
289{
290 struct cpu_stop_done done;
291 struct cpu_stop_work work1, work2;
292 struct multi_stop_data msdata;
293
294 msdata = (struct multi_stop_data){
295 .fn = fn,
296 .data = arg,
297 .num_threads = 2,
298 .active_cpus = cpumask_of(cpu1),
299 };
300
301 work1 = work2 = (struct cpu_stop_work){
302 .fn = multi_cpu_stop,
303 .arg = &msdata,
304 .done = &done
305 };
306
307 cpu_stop_init_done(&done, 2);
308 set_state(&msdata, MULTI_STOP_PREPARE);
309
310 if (cpu1 > cpu2)
311 swap(cpu1, cpu2);
312 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
313 return -ENOENT;
314
315 wait_for_completion(&done.completion);
316 return done.ret;
317}
318
319/**
320 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
321 * @cpu: cpu to stop
322 * @fn: function to execute
323 * @arg: argument to @fn
324 * @work_buf: pointer to cpu_stop_work structure
325 *
326 * Similar to stop_one_cpu() but doesn't wait for completion. The
327 * caller is responsible for ensuring @work_buf is currently unused
328 * and will remain untouched until stopper starts executing @fn.
329 *
330 * CONTEXT:
331 * Don't care.
332 *
333 * RETURNS:
334 * true if cpu_stop_work was queued successfully and @fn will be called,
335 * false otherwise.
336 */
337bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
338 struct cpu_stop_work *work_buf)
339{
340 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
341 return cpu_stop_queue_work(cpu, work_buf);
342}
343
344static bool queue_stop_cpus_work(const struct cpumask *cpumask,
345 cpu_stop_fn_t fn, void *arg,
346 struct cpu_stop_done *done)
347{
348 struct cpu_stop_work *work;
349 unsigned int cpu;
350 bool queued = false;
351
352 /*
353 * Disable preemption while queueing to avoid getting
354 * preempted by a stopper which might wait for other stoppers
355 * to enter @fn which can lead to deadlock.
356 */
357 preempt_disable();
358 stop_cpus_in_progress = true;
359 for_each_cpu(cpu, cpumask) {
360 work = &per_cpu(cpu_stopper.stop_work, cpu);
361 work->fn = fn;
362 work->arg = arg;
363 work->done = done;
364 if (cpu_stop_queue_work(cpu, work))
365 queued = true;
366 }
367 stop_cpus_in_progress = false;
368 preempt_enable();
369
370 return queued;
371}
372
373static int __stop_cpus(const struct cpumask *cpumask,
374 cpu_stop_fn_t fn, void *arg)
375{
376 struct cpu_stop_done done;
377
378 cpu_stop_init_done(&done, cpumask_weight(cpumask));
379 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
380 return -ENOENT;
381 wait_for_completion(&done.completion);
382 return done.ret;
383}
384
385/**
386 * stop_cpus - stop multiple cpus
387 * @cpumask: cpus to stop
388 * @fn: function to execute
389 * @arg: argument to @fn
390 *
391 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
392 * @fn is run in a process context with the highest priority
393 * preempting any task on the cpu and monopolizing it. This function
394 * returns after all executions are complete.
395 *
396 * This function doesn't guarantee the cpus in @cpumask stay online
397 * till @fn completes. If some cpus go down in the middle, execution
398 * on the cpu may happen partially or fully on different cpus. @fn
399 * should either be ready for that or the caller should ensure that
400 * the cpus stay online until this function completes.
401 *
402 * All stop_cpus() calls are serialized making it safe for @fn to wait
403 * for all cpus to start executing it.
404 *
405 * CONTEXT:
406 * Might sleep.
407 *
408 * RETURNS:
409 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
410 * @cpumask were offline; otherwise, 0 if all executions of @fn
411 * returned 0, any non zero return value if any returned non zero.
412 */
413int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
414{
415 int ret;
416
417 /* static works are used, process one request at a time */
418 mutex_lock(&stop_cpus_mutex);
419 ret = __stop_cpus(cpumask, fn, arg);
420 mutex_unlock(&stop_cpus_mutex);
421 return ret;
422}
423
424/**
425 * try_stop_cpus - try to stop multiple cpus
426 * @cpumask: cpus to stop
427 * @fn: function to execute
428 * @arg: argument to @fn
429 *
430 * Identical to stop_cpus() except that it fails with -EAGAIN if
431 * someone else is already using the facility.
432 *
433 * CONTEXT:
434 * Might sleep.
435 *
436 * RETURNS:
437 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
438 * @fn(@arg) was not executed at all because all cpus in @cpumask were
439 * offline; otherwise, 0 if all executions of @fn returned 0, any non
440 * zero return value if any returned non zero.
441 */
442int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
443{
444 int ret;
445
446 /* static works are used, process one request at a time */
447 if (!mutex_trylock(&stop_cpus_mutex))
448 return -EAGAIN;
449 ret = __stop_cpus(cpumask, fn, arg);
450 mutex_unlock(&stop_cpus_mutex);
451 return ret;
452}
453
454static int cpu_stop_should_run(unsigned int cpu)
455{
456 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
457 unsigned long flags;
458 int run;
459
460 spin_lock_irqsave(&stopper->lock, flags);
461 run = !list_empty(&stopper->works);
462 spin_unlock_irqrestore(&stopper->lock, flags);
463 return run;
464}
465
466static void cpu_stopper_thread(unsigned int cpu)
467{
468 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
469 struct cpu_stop_work *work;
470
471repeat:
472 work = NULL;
473 spin_lock_irq(&stopper->lock);
474 if (!list_empty(&stopper->works)) {
475 work = list_first_entry(&stopper->works,
476 struct cpu_stop_work, list);
477 list_del_init(&work->list);
478 }
479 spin_unlock_irq(&stopper->lock);
480
481 if (work) {
482 cpu_stop_fn_t fn = work->fn;
483 void *arg = work->arg;
484 struct cpu_stop_done *done = work->done;
485 int ret;
486
487 /* cpu stop callbacks must not sleep, make in_atomic() == T */
488 preempt_count_inc();
489 ret = fn(arg);
490 if (done) {
491 if (ret)
492 done->ret = ret;
493 cpu_stop_signal_done(done);
494 }
495 preempt_count_dec();
496 WARN_ONCE(preempt_count(),
497 "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
498 goto repeat;
499 }
500}
501
502void stop_machine_park(int cpu)
503{
504 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
505 /*
506 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
507 * the pending works before it parks, until then it is fine to queue
508 * the new works.
509 */
510 stopper->enabled = false;
511 kthread_park(stopper->thread);
512}
513
514extern void sched_set_stop_task(int cpu, struct task_struct *stop);
515
516static void cpu_stop_create(unsigned int cpu)
517{
518 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
519}
520
521static void cpu_stop_park(unsigned int cpu)
522{
523 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
524
525 WARN_ON(!list_empty(&stopper->works));
526}
527
528void stop_machine_unpark(int cpu)
529{
530 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
531
532 stopper->enabled = true;
533 kthread_unpark(stopper->thread);
534}
535
536static struct smp_hotplug_thread cpu_stop_threads = {
537 .store = &cpu_stopper.thread,
538 .thread_should_run = cpu_stop_should_run,
539 .thread_fn = cpu_stopper_thread,
540 .thread_comm = "migration/%u",
541 .create = cpu_stop_create,
542 .park = cpu_stop_park,
543 .selfparking = true,
544};
545
546static int __init cpu_stop_init(void)
547{
548 unsigned int cpu;
549
550 for_each_possible_cpu(cpu) {
551 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
552
553 spin_lock_init(&stopper->lock);
554 INIT_LIST_HEAD(&stopper->works);
555 }
556
557 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
558 stop_machine_unpark(raw_smp_processor_id());
559 stop_machine_initialized = true;
560 return 0;
561}
562early_initcall(cpu_stop_init);
563
564int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
565 const struct cpumask *cpus)
566{
567 struct multi_stop_data msdata = {
568 .fn = fn,
569 .data = data,
570 .num_threads = num_online_cpus(),
571 .active_cpus = cpus,
572 };
573
574 lockdep_assert_cpus_held();
575
576 if (!stop_machine_initialized) {
577 /*
578 * Handle the case where stop_machine() is called
579 * early in boot before stop_machine() has been
580 * initialized.
581 */
582 unsigned long flags;
583 int ret;
584
585 WARN_ON_ONCE(msdata.num_threads != 1);
586
587 local_irq_save(flags);
588 hard_irq_disable();
589 ret = (*fn)(data);
590 local_irq_restore(flags);
591
592 return ret;
593 }
594
595 /* Set the initial state and stop all online cpus. */
596 set_state(&msdata, MULTI_STOP_PREPARE);
597 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
598}
599
600int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
601{
602 int ret;
603
604 /* No CPUs can come up or down during this. */
605 cpus_read_lock();
606 ret = stop_machine_cpuslocked(fn, data, cpus);
607 cpus_read_unlock();
608 return ret;
609}
610EXPORT_SYMBOL_GPL(stop_machine);
611
612/**
613 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
614 * @fn: the function to run
615 * @data: the data ptr for the @fn()
616 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
617 *
618 * This is identical to stop_machine() but can be called from a CPU which
619 * is not active. The local CPU is in the process of hotplug (so no other
620 * CPU hotplug can start) and not marked active and doesn't have enough
621 * context to sleep.
622 *
623 * This function provides stop_machine() functionality for such state by
624 * using busy-wait for synchronization and executing @fn directly for local
625 * CPU.
626 *
627 * CONTEXT:
628 * Local CPU is inactive. Temporarily stops all active CPUs.
629 *
630 * RETURNS:
631 * 0 if all executions of @fn returned 0, any non zero return value if any
632 * returned non zero.
633 */
634int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
635 const struct cpumask *cpus)
636{
637 struct multi_stop_data msdata = { .fn = fn, .data = data,
638 .active_cpus = cpus };
639 struct cpu_stop_done done;
640 int ret;
641
642 /* Local CPU must be inactive and CPU hotplug in progress. */
643 BUG_ON(cpu_active(raw_smp_processor_id()));
644 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
645
646 /* No proper task established and can't sleep - busy wait for lock. */
647 while (!mutex_trylock(&stop_cpus_mutex))
648 cpu_relax();
649
650 /* Schedule work on other CPUs and execute directly for local CPU */
651 set_state(&msdata, MULTI_STOP_PREPARE);
652 cpu_stop_init_done(&done, num_active_cpus());
653 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
654 &done);
655 ret = multi_cpu_stop(&msdata);
656
657 /* Busy wait for completion. */
658 while (!completion_done(&done.completion))
659 cpu_relax();
660
661 mutex_unlock(&stop_cpus_mutex);
662 return ret ?: done.ret;
663}
1/*
2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
10 */
11#include <linux/completion.h>
12#include <linux/cpu.h>
13#include <linux/init.h>
14#include <linux/kthread.h>
15#include <linux/module.h>
16#include <linux/percpu.h>
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
19#include <linux/interrupt.h>
20#include <linux/kallsyms.h>
21
22#include <linux/atomic.h>
23
24/*
25 * Structure to determine completion condition and record errors. May
26 * be shared by works on different cpus.
27 */
28struct cpu_stop_done {
29 atomic_t nr_todo; /* nr left to execute */
30 bool executed; /* actually executed? */
31 int ret; /* collected return value */
32 struct completion completion; /* fired if nr_todo reaches 0 */
33};
34
35/* the actual stopper, one per every possible cpu, enabled on online cpus */
36struct cpu_stopper {
37 spinlock_t lock;
38 bool enabled; /* is this stopper enabled? */
39 struct list_head works; /* list of pending works */
40 struct task_struct *thread; /* stopper thread */
41};
42
43static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44
45static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
46{
47 memset(done, 0, sizeof(*done));
48 atomic_set(&done->nr_todo, nr_todo);
49 init_completion(&done->completion);
50}
51
52/* signal completion unless @done is NULL */
53static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
54{
55 if (done) {
56 if (executed)
57 done->executed = true;
58 if (atomic_dec_and_test(&done->nr_todo))
59 complete(&done->completion);
60 }
61}
62
63/* queue @work to @stopper. if offline, @work is completed immediately */
64static void cpu_stop_queue_work(struct cpu_stopper *stopper,
65 struct cpu_stop_work *work)
66{
67 unsigned long flags;
68
69 spin_lock_irqsave(&stopper->lock, flags);
70
71 if (stopper->enabled) {
72 list_add_tail(&work->list, &stopper->works);
73 wake_up_process(stopper->thread);
74 } else
75 cpu_stop_signal_done(work->done, false);
76
77 spin_unlock_irqrestore(&stopper->lock, flags);
78}
79
80/**
81 * stop_one_cpu - stop a cpu
82 * @cpu: cpu to stop
83 * @fn: function to execute
84 * @arg: argument to @fn
85 *
86 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
87 * the highest priority preempting any task on the cpu and
88 * monopolizing it. This function returns after the execution is
89 * complete.
90 *
91 * This function doesn't guarantee @cpu stays online till @fn
92 * completes. If @cpu goes down in the middle, execution may happen
93 * partially or fully on different cpus. @fn should either be ready
94 * for that or the caller should ensure that @cpu stays online until
95 * this function completes.
96 *
97 * CONTEXT:
98 * Might sleep.
99 *
100 * RETURNS:
101 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
102 * otherwise, the return value of @fn.
103 */
104int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
105{
106 struct cpu_stop_done done;
107 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
108
109 cpu_stop_init_done(&done, 1);
110 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
111 wait_for_completion(&done.completion);
112 return done.executed ? done.ret : -ENOENT;
113}
114
115/**
116 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
117 * @cpu: cpu to stop
118 * @fn: function to execute
119 * @arg: argument to @fn
120 *
121 * Similar to stop_one_cpu() but doesn't wait for completion. The
122 * caller is responsible for ensuring @work_buf is currently unused
123 * and will remain untouched until stopper starts executing @fn.
124 *
125 * CONTEXT:
126 * Don't care.
127 */
128void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
129 struct cpu_stop_work *work_buf)
130{
131 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
132 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
133}
134
135/* static data for stop_cpus */
136static DEFINE_MUTEX(stop_cpus_mutex);
137static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
138
139static void queue_stop_cpus_work(const struct cpumask *cpumask,
140 cpu_stop_fn_t fn, void *arg,
141 struct cpu_stop_done *done)
142{
143 struct cpu_stop_work *work;
144 unsigned int cpu;
145
146 /* initialize works and done */
147 for_each_cpu(cpu, cpumask) {
148 work = &per_cpu(stop_cpus_work, cpu);
149 work->fn = fn;
150 work->arg = arg;
151 work->done = done;
152 }
153
154 /*
155 * Disable preemption while queueing to avoid getting
156 * preempted by a stopper which might wait for other stoppers
157 * to enter @fn which can lead to deadlock.
158 */
159 preempt_disable();
160 for_each_cpu(cpu, cpumask)
161 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
162 &per_cpu(stop_cpus_work, cpu));
163 preempt_enable();
164}
165
166static int __stop_cpus(const struct cpumask *cpumask,
167 cpu_stop_fn_t fn, void *arg)
168{
169 struct cpu_stop_done done;
170
171 cpu_stop_init_done(&done, cpumask_weight(cpumask));
172 queue_stop_cpus_work(cpumask, fn, arg, &done);
173 wait_for_completion(&done.completion);
174 return done.executed ? done.ret : -ENOENT;
175}
176
177/**
178 * stop_cpus - stop multiple cpus
179 * @cpumask: cpus to stop
180 * @fn: function to execute
181 * @arg: argument to @fn
182 *
183 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
184 * @fn is run in a process context with the highest priority
185 * preempting any task on the cpu and monopolizing it. This function
186 * returns after all executions are complete.
187 *
188 * This function doesn't guarantee the cpus in @cpumask stay online
189 * till @fn completes. If some cpus go down in the middle, execution
190 * on the cpu may happen partially or fully on different cpus. @fn
191 * should either be ready for that or the caller should ensure that
192 * the cpus stay online until this function completes.
193 *
194 * All stop_cpus() calls are serialized making it safe for @fn to wait
195 * for all cpus to start executing it.
196 *
197 * CONTEXT:
198 * Might sleep.
199 *
200 * RETURNS:
201 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
202 * @cpumask were offline; otherwise, 0 if all executions of @fn
203 * returned 0, any non zero return value if any returned non zero.
204 */
205int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
206{
207 int ret;
208
209 /* static works are used, process one request at a time */
210 mutex_lock(&stop_cpus_mutex);
211 ret = __stop_cpus(cpumask, fn, arg);
212 mutex_unlock(&stop_cpus_mutex);
213 return ret;
214}
215
216/**
217 * try_stop_cpus - try to stop multiple cpus
218 * @cpumask: cpus to stop
219 * @fn: function to execute
220 * @arg: argument to @fn
221 *
222 * Identical to stop_cpus() except that it fails with -EAGAIN if
223 * someone else is already using the facility.
224 *
225 * CONTEXT:
226 * Might sleep.
227 *
228 * RETURNS:
229 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
230 * @fn(@arg) was not executed at all because all cpus in @cpumask were
231 * offline; otherwise, 0 if all executions of @fn returned 0, any non
232 * zero return value if any returned non zero.
233 */
234int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
235{
236 int ret;
237
238 /* static works are used, process one request at a time */
239 if (!mutex_trylock(&stop_cpus_mutex))
240 return -EAGAIN;
241 ret = __stop_cpus(cpumask, fn, arg);
242 mutex_unlock(&stop_cpus_mutex);
243 return ret;
244}
245
246static int cpu_stopper_thread(void *data)
247{
248 struct cpu_stopper *stopper = data;
249 struct cpu_stop_work *work;
250 int ret;
251
252repeat:
253 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
254
255 if (kthread_should_stop()) {
256 __set_current_state(TASK_RUNNING);
257 return 0;
258 }
259
260 work = NULL;
261 spin_lock_irq(&stopper->lock);
262 if (!list_empty(&stopper->works)) {
263 work = list_first_entry(&stopper->works,
264 struct cpu_stop_work, list);
265 list_del_init(&work->list);
266 }
267 spin_unlock_irq(&stopper->lock);
268
269 if (work) {
270 cpu_stop_fn_t fn = work->fn;
271 void *arg = work->arg;
272 struct cpu_stop_done *done = work->done;
273 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
274
275 __set_current_state(TASK_RUNNING);
276
277 /* cpu stop callbacks are not allowed to sleep */
278 preempt_disable();
279
280 ret = fn(arg);
281 if (ret)
282 done->ret = ret;
283
284 /* restore preemption and check it's still balanced */
285 preempt_enable();
286 WARN_ONCE(preempt_count(),
287 "cpu_stop: %s(%p) leaked preempt count\n",
288 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
289 ksym_buf), arg);
290
291 cpu_stop_signal_done(done, true);
292 } else
293 schedule();
294
295 goto repeat;
296}
297
298extern void sched_set_stop_task(int cpu, struct task_struct *stop);
299
300/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
301static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
302 unsigned long action, void *hcpu)
303{
304 unsigned int cpu = (unsigned long)hcpu;
305 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
306 struct task_struct *p;
307
308 switch (action & ~CPU_TASKS_FROZEN) {
309 case CPU_UP_PREPARE:
310 BUG_ON(stopper->thread || stopper->enabled ||
311 !list_empty(&stopper->works));
312 p = kthread_create_on_node(cpu_stopper_thread,
313 stopper,
314 cpu_to_node(cpu),
315 "migration/%d", cpu);
316 if (IS_ERR(p))
317 return notifier_from_errno(PTR_ERR(p));
318 get_task_struct(p);
319 kthread_bind(p, cpu);
320 sched_set_stop_task(cpu, p);
321 stopper->thread = p;
322 break;
323
324 case CPU_ONLINE:
325 /* strictly unnecessary, as first user will wake it */
326 wake_up_process(stopper->thread);
327 /* mark enabled */
328 spin_lock_irq(&stopper->lock);
329 stopper->enabled = true;
330 spin_unlock_irq(&stopper->lock);
331 break;
332
333#ifdef CONFIG_HOTPLUG_CPU
334 case CPU_UP_CANCELED:
335 case CPU_POST_DEAD:
336 {
337 struct cpu_stop_work *work;
338
339 sched_set_stop_task(cpu, NULL);
340 /* kill the stopper */
341 kthread_stop(stopper->thread);
342 /* drain remaining works */
343 spin_lock_irq(&stopper->lock);
344 list_for_each_entry(work, &stopper->works, list)
345 cpu_stop_signal_done(work->done, false);
346 stopper->enabled = false;
347 spin_unlock_irq(&stopper->lock);
348 /* release the stopper */
349 put_task_struct(stopper->thread);
350 stopper->thread = NULL;
351 break;
352 }
353#endif
354 }
355
356 return NOTIFY_OK;
357}
358
359/*
360 * Give it a higher priority so that cpu stopper is available to other
361 * cpu notifiers. It currently shares the same priority as sched
362 * migration_notifier.
363 */
364static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
365 .notifier_call = cpu_stop_cpu_callback,
366 .priority = 10,
367};
368
369static int __init cpu_stop_init(void)
370{
371 void *bcpu = (void *)(long)smp_processor_id();
372 unsigned int cpu;
373 int err;
374
375 for_each_possible_cpu(cpu) {
376 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
377
378 spin_lock_init(&stopper->lock);
379 INIT_LIST_HEAD(&stopper->works);
380 }
381
382 /* start one for the boot cpu */
383 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
384 bcpu);
385 BUG_ON(err != NOTIFY_OK);
386 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
387 register_cpu_notifier(&cpu_stop_cpu_notifier);
388
389 return 0;
390}
391early_initcall(cpu_stop_init);
392
393#ifdef CONFIG_STOP_MACHINE
394
395/* This controls the threads on each CPU. */
396enum stopmachine_state {
397 /* Dummy starting state for thread. */
398 STOPMACHINE_NONE,
399 /* Awaiting everyone to be scheduled. */
400 STOPMACHINE_PREPARE,
401 /* Disable interrupts. */
402 STOPMACHINE_DISABLE_IRQ,
403 /* Run the function */
404 STOPMACHINE_RUN,
405 /* Exit */
406 STOPMACHINE_EXIT,
407};
408
409struct stop_machine_data {
410 int (*fn)(void *);
411 void *data;
412 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
413 unsigned int num_threads;
414 const struct cpumask *active_cpus;
415
416 enum stopmachine_state state;
417 atomic_t thread_ack;
418};
419
420static void set_state(struct stop_machine_data *smdata,
421 enum stopmachine_state newstate)
422{
423 /* Reset ack counter. */
424 atomic_set(&smdata->thread_ack, smdata->num_threads);
425 smp_wmb();
426 smdata->state = newstate;
427}
428
429/* Last one to ack a state moves to the next state. */
430static void ack_state(struct stop_machine_data *smdata)
431{
432 if (atomic_dec_and_test(&smdata->thread_ack))
433 set_state(smdata, smdata->state + 1);
434}
435
436/* This is the cpu_stop function which stops the CPU. */
437static int stop_machine_cpu_stop(void *data)
438{
439 struct stop_machine_data *smdata = data;
440 enum stopmachine_state curstate = STOPMACHINE_NONE;
441 int cpu = smp_processor_id(), err = 0;
442 unsigned long flags;
443 bool is_active;
444
445 /*
446 * When called from stop_machine_from_inactive_cpu(), irq might
447 * already be disabled. Save the state and restore it on exit.
448 */
449 local_save_flags(flags);
450
451 if (!smdata->active_cpus)
452 is_active = cpu == cpumask_first(cpu_online_mask);
453 else
454 is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
455
456 /* Simple state machine */
457 do {
458 /* Chill out and ensure we re-read stopmachine_state. */
459 cpu_relax();
460 if (smdata->state != curstate) {
461 curstate = smdata->state;
462 switch (curstate) {
463 case STOPMACHINE_DISABLE_IRQ:
464 local_irq_disable();
465 hard_irq_disable();
466 break;
467 case STOPMACHINE_RUN:
468 if (is_active)
469 err = smdata->fn(smdata->data);
470 break;
471 default:
472 break;
473 }
474 ack_state(smdata);
475 }
476 } while (curstate != STOPMACHINE_EXIT);
477
478 local_irq_restore(flags);
479 return err;
480}
481
482int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
483{
484 struct stop_machine_data smdata = { .fn = fn, .data = data,
485 .num_threads = num_online_cpus(),
486 .active_cpus = cpus };
487
488 /* Set the initial state and stop all online cpus. */
489 set_state(&smdata, STOPMACHINE_PREPARE);
490 return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
491}
492
493int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
494{
495 int ret;
496
497 /* No CPUs can come up or down during this. */
498 get_online_cpus();
499 ret = __stop_machine(fn, data, cpus);
500 put_online_cpus();
501 return ret;
502}
503EXPORT_SYMBOL_GPL(stop_machine);
504
505/**
506 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
507 * @fn: the function to run
508 * @data: the data ptr for the @fn()
509 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
510 *
511 * This is identical to stop_machine() but can be called from a CPU which
512 * is not active. The local CPU is in the process of hotplug (so no other
513 * CPU hotplug can start) and not marked active and doesn't have enough
514 * context to sleep.
515 *
516 * This function provides stop_machine() functionality for such state by
517 * using busy-wait for synchronization and executing @fn directly for local
518 * CPU.
519 *
520 * CONTEXT:
521 * Local CPU is inactive. Temporarily stops all active CPUs.
522 *
523 * RETURNS:
524 * 0 if all executions of @fn returned 0, any non zero return value if any
525 * returned non zero.
526 */
527int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
528 const struct cpumask *cpus)
529{
530 struct stop_machine_data smdata = { .fn = fn, .data = data,
531 .active_cpus = cpus };
532 struct cpu_stop_done done;
533 int ret;
534
535 /* Local CPU must be inactive and CPU hotplug in progress. */
536 BUG_ON(cpu_active(raw_smp_processor_id()));
537 smdata.num_threads = num_active_cpus() + 1; /* +1 for local */
538
539 /* No proper task established and can't sleep - busy wait for lock. */
540 while (!mutex_trylock(&stop_cpus_mutex))
541 cpu_relax();
542
543 /* Schedule work on other CPUs and execute directly for local CPU */
544 set_state(&smdata, STOPMACHINE_PREPARE);
545 cpu_stop_init_done(&done, num_active_cpus());
546 queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
547 &done);
548 ret = stop_machine_cpu_stop(&smdata);
549
550 /* Busy wait for completion. */
551 while (!completion_done(&done.completion))
552 cpu_relax();
553
554 mutex_unlock(&stop_cpus_mutex);
555 return ret ?: done.ret;
556}
557
558#endif /* CONFIG_STOP_MACHINE */