Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * kernel/stop_machine.c
  4 *
  5 * Copyright (C) 2008, 2005	IBM Corporation.
  6 * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
  7 * Copyright (C) 2010		SUSE Linux Products GmbH
  8 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
 
 
  9 */
 10#include <linux/compiler.h>
 11#include <linux/completion.h>
 12#include <linux/cpu.h>
 13#include <linux/init.h>
 14#include <linux/kthread.h>
 15#include <linux/export.h>
 16#include <linux/percpu.h>
 17#include <linux/sched.h>
 18#include <linux/stop_machine.h>
 19#include <linux/interrupt.h>
 20#include <linux/kallsyms.h>
 21#include <linux/smpboot.h>
 22#include <linux/atomic.h>
 23#include <linux/nmi.h>
 24#include <linux/sched/wake_q.h>
 25
 26/*
 27 * Structure to determine completion condition and record errors.  May
 28 * be shared by works on different cpus.
 29 */
 30struct cpu_stop_done {
 31	atomic_t		nr_todo;	/* nr left to execute */
 32	int			ret;		/* collected return value */
 33	struct completion	completion;	/* fired if nr_todo reaches 0 */
 34};
 35
 36/* the actual stopper, one per every possible cpu, enabled on online cpus */
 37struct cpu_stopper {
 38	struct task_struct	*thread;
 39
 40	raw_spinlock_t		lock;
 41	bool			enabled;	/* is this stopper enabled? */
 42	struct list_head	works;		/* list of pending works */
 43
 44	struct cpu_stop_work	stop_work;	/* for stop_cpus */
 45};
 46
 47static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
 48static bool stop_machine_initialized = false;
 49
 50/* static data for stop_cpus */
 51static DEFINE_MUTEX(stop_cpus_mutex);
 52static bool stop_cpus_in_progress;
 53
 54static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 55{
 56	memset(done, 0, sizeof(*done));
 57	atomic_set(&done->nr_todo, nr_todo);
 58	init_completion(&done->completion);
 59}
 60
 61/* signal completion unless @done is NULL */
 62static void cpu_stop_signal_done(struct cpu_stop_done *done)
 63{
 64	if (atomic_dec_and_test(&done->nr_todo))
 65		complete(&done->completion);
 66}
 67
 68static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
 69					struct cpu_stop_work *work,
 70					struct wake_q_head *wakeq)
 71{
 72	list_add_tail(&work->list, &stopper->works);
 73	wake_q_add(wakeq, stopper->thread);
 74}
 75
 76/* queue @work to @stopper.  if offline, @work is completed immediately */
 77static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 78{
 79	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 80	DEFINE_WAKE_Q(wakeq);
 81	unsigned long flags;
 82	bool enabled;
 83
 84	preempt_disable();
 85	raw_spin_lock_irqsave(&stopper->lock, flags);
 86	enabled = stopper->enabled;
 87	if (enabled)
 88		__cpu_stop_queue_work(stopper, work, &wakeq);
 89	else if (work->done)
 90		cpu_stop_signal_done(work->done);
 91	raw_spin_unlock_irqrestore(&stopper->lock, flags);
 92
 93	wake_up_q(&wakeq);
 94	preempt_enable();
 95
 96	return enabled;
 97}
 98
 99/**
100 * stop_one_cpu - stop a cpu
101 * @cpu: cpu to stop
102 * @fn: function to execute
103 * @arg: argument to @fn
104 *
105 * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
106 * the highest priority preempting any task on the cpu and
107 * monopolizing it.  This function returns after the execution is
108 * complete.
109 *
110 * This function doesn't guarantee @cpu stays online till @fn
111 * completes.  If @cpu goes down in the middle, execution may happen
112 * partially or fully on different cpus.  @fn should either be ready
113 * for that or the caller should ensure that @cpu stays online until
114 * this function completes.
115 *
116 * CONTEXT:
117 * Might sleep.
118 *
119 * RETURNS:
120 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
121 * otherwise, the return value of @fn.
122 */
123int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
124{
125	struct cpu_stop_done done;
126	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
127
128	cpu_stop_init_done(&done, 1);
129	if (!cpu_stop_queue_work(cpu, &work))
130		return -ENOENT;
131	/*
132	 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
133	 * cycle by doing a preemption:
134	 */
135	cond_resched();
136	wait_for_completion(&done.completion);
137	return done.ret;
138}
139
140/* This controls the threads on each CPU. */
141enum multi_stop_state {
142	/* Dummy starting state for thread. */
143	MULTI_STOP_NONE,
144	/* Awaiting everyone to be scheduled. */
145	MULTI_STOP_PREPARE,
146	/* Disable interrupts. */
147	MULTI_STOP_DISABLE_IRQ,
148	/* Run the function */
149	MULTI_STOP_RUN,
150	/* Exit */
151	MULTI_STOP_EXIT,
152};
153
154struct multi_stop_data {
155	cpu_stop_fn_t		fn;
156	void			*data;
157	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
158	unsigned int		num_threads;
159	const struct cpumask	*active_cpus;
160
161	enum multi_stop_state	state;
162	atomic_t		thread_ack;
163};
164
165static void set_state(struct multi_stop_data *msdata,
166		      enum multi_stop_state newstate)
167{
168	/* Reset ack counter. */
169	atomic_set(&msdata->thread_ack, msdata->num_threads);
170	smp_wmb();
171	WRITE_ONCE(msdata->state, newstate);
172}
173
174/* Last one to ack a state moves to the next state. */
175static void ack_state(struct multi_stop_data *msdata)
176{
177	if (atomic_dec_and_test(&msdata->thread_ack))
178		set_state(msdata, msdata->state + 1);
179}
180
181void __weak stop_machine_yield(const struct cpumask *cpumask)
182{
183	cpu_relax();
184}
185
186/* This is the cpu_stop function which stops the CPU. */
187static int multi_cpu_stop(void *data)
188{
189	struct multi_stop_data *msdata = data;
190	enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
191	int cpu = smp_processor_id(), err = 0;
192	const struct cpumask *cpumask;
193	unsigned long flags;
194	bool is_active;
195
196	/*
197	 * When called from stop_machine_from_inactive_cpu(), irq might
198	 * already be disabled.  Save the state and restore it on exit.
199	 */
200	local_save_flags(flags);
201
202	if (!msdata->active_cpus) {
203		cpumask = cpu_online_mask;
204		is_active = cpu == cpumask_first(cpumask);
205	} else {
206		cpumask = msdata->active_cpus;
207		is_active = cpumask_test_cpu(cpu, cpumask);
208	}
209
210	/* Simple state machine */
211	do {
212		/* Chill out and ensure we re-read multi_stop_state. */
213		stop_machine_yield(cpumask);
214		newstate = READ_ONCE(msdata->state);
215		if (newstate != curstate) {
216			curstate = newstate;
217			switch (curstate) {
218			case MULTI_STOP_DISABLE_IRQ:
219				local_irq_disable();
220				hard_irq_disable();
221				break;
222			case MULTI_STOP_RUN:
223				if (is_active)
224					err = msdata->fn(msdata->data);
225				break;
226			default:
227				break;
228			}
229			ack_state(msdata);
230		} else if (curstate > MULTI_STOP_PREPARE) {
231			/*
232			 * At this stage all other CPUs we depend on must spin
233			 * in the same loop. Any reason for hard-lockup should
234			 * be detected and reported on their side.
235			 */
236			touch_nmi_watchdog();
237		}
238	} while (curstate != MULTI_STOP_EXIT);
239
240	local_irq_restore(flags);
241	return err;
242}
243
244static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
245				    int cpu2, struct cpu_stop_work *work2)
246{
247	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
248	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
249	DEFINE_WAKE_Q(wakeq);
250	int err;
251
252retry:
253	/*
254	 * The waking up of stopper threads has to happen in the same
255	 * scheduling context as the queueing.  Otherwise, there is a
256	 * possibility of one of the above stoppers being woken up by another
257	 * CPU, and preempting us. This will cause us to not wake up the other
258	 * stopper forever.
259	 */
260	preempt_disable();
261	raw_spin_lock_irq(&stopper1->lock);
262	raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
263
264	if (!stopper1->enabled || !stopper2->enabled) {
265		err = -ENOENT;
266		goto unlock;
267	}
268
269	/*
270	 * Ensure that if we race with __stop_cpus() the stoppers won't get
271	 * queued up in reverse order leading to system deadlock.
272	 *
273	 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
274	 * queued a work on cpu1 but not on cpu2, we hold both locks.
275	 *
276	 * It can be falsely true but it is safe to spin until it is cleared,
277	 * queue_stop_cpus_work() does everything under preempt_disable().
278	 */
279	if (unlikely(stop_cpus_in_progress)) {
280		err = -EDEADLK;
281		goto unlock;
282	}
283
284	err = 0;
285	__cpu_stop_queue_work(stopper1, work1, &wakeq);
286	__cpu_stop_queue_work(stopper2, work2, &wakeq);
287
288unlock:
289	raw_spin_unlock(&stopper2->lock);
290	raw_spin_unlock_irq(&stopper1->lock);
291
292	if (unlikely(err == -EDEADLK)) {
293		preempt_enable();
294
295		while (stop_cpus_in_progress)
296			cpu_relax();
297
298		goto retry;
299	}
300
301	wake_up_q(&wakeq);
302	preempt_enable();
303
304	return err;
305}
306/**
307 * stop_two_cpus - stops two cpus
308 * @cpu1: the cpu to stop
309 * @cpu2: the other cpu to stop
310 * @fn: function to execute
311 * @arg: argument to @fn
312 *
313 * Stops both the current and specified CPU and runs @fn on one of them.
314 *
315 * returns when both are completed.
316 */
317int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
318{
319	struct cpu_stop_done done;
320	struct cpu_stop_work work1, work2;
321	struct multi_stop_data msdata;
322
323	msdata = (struct multi_stop_data){
324		.fn = fn,
325		.data = arg,
326		.num_threads = 2,
327		.active_cpus = cpumask_of(cpu1),
328	};
329
330	work1 = work2 = (struct cpu_stop_work){
331		.fn = multi_cpu_stop,
332		.arg = &msdata,
333		.done = &done
334	};
335
336	cpu_stop_init_done(&done, 2);
337	set_state(&msdata, MULTI_STOP_PREPARE);
338
339	if (cpu1 > cpu2)
340		swap(cpu1, cpu2);
341	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
342		return -ENOENT;
343
344	wait_for_completion(&done.completion);
345	return done.ret;
346}
347
348/**
349 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
350 * @cpu: cpu to stop
351 * @fn: function to execute
352 * @arg: argument to @fn
353 * @work_buf: pointer to cpu_stop_work structure
354 *
355 * Similar to stop_one_cpu() but doesn't wait for completion.  The
356 * caller is responsible for ensuring @work_buf is currently unused
357 * and will remain untouched until stopper starts executing @fn.
358 *
359 * CONTEXT:
360 * Don't care.
361 *
362 * RETURNS:
363 * true if cpu_stop_work was queued successfully and @fn will be called,
364 * false otherwise.
365 */
366bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
367			struct cpu_stop_work *work_buf)
368{
369	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
370	return cpu_stop_queue_work(cpu, work_buf);
371}
372
373static bool queue_stop_cpus_work(const struct cpumask *cpumask,
374				 cpu_stop_fn_t fn, void *arg,
375				 struct cpu_stop_done *done)
376{
377	struct cpu_stop_work *work;
378	unsigned int cpu;
379	bool queued = false;
380
381	/*
382	 * Disable preemption while queueing to avoid getting
383	 * preempted by a stopper which might wait for other stoppers
384	 * to enter @fn which can lead to deadlock.
385	 */
386	preempt_disable();
387	stop_cpus_in_progress = true;
388	barrier();
389	for_each_cpu(cpu, cpumask) {
390		work = &per_cpu(cpu_stopper.stop_work, cpu);
391		work->fn = fn;
392		work->arg = arg;
393		work->done = done;
394		if (cpu_stop_queue_work(cpu, work))
395			queued = true;
396	}
397	barrier();
398	stop_cpus_in_progress = false;
399	preempt_enable();
400
401	return queued;
402}
403
404static int __stop_cpus(const struct cpumask *cpumask,
405		       cpu_stop_fn_t fn, void *arg)
406{
407	struct cpu_stop_done done;
408
409	cpu_stop_init_done(&done, cpumask_weight(cpumask));
410	if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
411		return -ENOENT;
412	wait_for_completion(&done.completion);
413	return done.ret;
414}
415
416/**
417 * stop_cpus - stop multiple cpus
418 * @cpumask: cpus to stop
419 * @fn: function to execute
420 * @arg: argument to @fn
421 *
422 * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
423 * @fn is run in a process context with the highest priority
424 * preempting any task on the cpu and monopolizing it.  This function
425 * returns after all executions are complete.
426 *
427 * This function doesn't guarantee the cpus in @cpumask stay online
428 * till @fn completes.  If some cpus go down in the middle, execution
429 * on the cpu may happen partially or fully on different cpus.  @fn
430 * should either be ready for that or the caller should ensure that
431 * the cpus stay online until this function completes.
432 *
433 * All stop_cpus() calls are serialized making it safe for @fn to wait
434 * for all cpus to start executing it.
435 *
436 * CONTEXT:
437 * Might sleep.
438 *
439 * RETURNS:
440 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
441 * @cpumask were offline; otherwise, 0 if all executions of @fn
442 * returned 0, any non zero return value if any returned non zero.
443 */
444int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
445{
446	int ret;
447
448	/* static works are used, process one request at a time */
449	mutex_lock(&stop_cpus_mutex);
450	ret = __stop_cpus(cpumask, fn, arg);
451	mutex_unlock(&stop_cpus_mutex);
452	return ret;
453}
454
455/**
456 * try_stop_cpus - try to stop multiple cpus
457 * @cpumask: cpus to stop
458 * @fn: function to execute
459 * @arg: argument to @fn
460 *
461 * Identical to stop_cpus() except that it fails with -EAGAIN if
462 * someone else is already using the facility.
463 *
464 * CONTEXT:
465 * Might sleep.
466 *
467 * RETURNS:
468 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
469 * @fn(@arg) was not executed at all because all cpus in @cpumask were
470 * offline; otherwise, 0 if all executions of @fn returned 0, any non
471 * zero return value if any returned non zero.
472 */
473int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
474{
475	int ret;
476
477	/* static works are used, process one request at a time */
478	if (!mutex_trylock(&stop_cpus_mutex))
479		return -EAGAIN;
480	ret = __stop_cpus(cpumask, fn, arg);
481	mutex_unlock(&stop_cpus_mutex);
482	return ret;
483}
484
485static int cpu_stop_should_run(unsigned int cpu)
486{
487	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
488	unsigned long flags;
489	int run;
490
491	raw_spin_lock_irqsave(&stopper->lock, flags);
492	run = !list_empty(&stopper->works);
493	raw_spin_unlock_irqrestore(&stopper->lock, flags);
494	return run;
495}
496
497static void cpu_stopper_thread(unsigned int cpu)
498{
499	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
500	struct cpu_stop_work *work;
501
502repeat:
503	work = NULL;
504	raw_spin_lock_irq(&stopper->lock);
505	if (!list_empty(&stopper->works)) {
506		work = list_first_entry(&stopper->works,
507					struct cpu_stop_work, list);
508		list_del_init(&work->list);
509	}
510	raw_spin_unlock_irq(&stopper->lock);
511
512	if (work) {
513		cpu_stop_fn_t fn = work->fn;
514		void *arg = work->arg;
515		struct cpu_stop_done *done = work->done;
516		int ret;
517
518		/* cpu stop callbacks must not sleep, make in_atomic() == T */
519		preempt_count_inc();
520		ret = fn(arg);
521		if (done) {
522			if (ret)
523				done->ret = ret;
524			cpu_stop_signal_done(done);
525		}
526		preempt_count_dec();
527		WARN_ONCE(preempt_count(),
528			  "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
529		goto repeat;
530	}
531}
532
533void stop_machine_park(int cpu)
534{
535	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
536	/*
537	 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
538	 * the pending works before it parks, until then it is fine to queue
539	 * the new works.
540	 */
541	stopper->enabled = false;
542	kthread_park(stopper->thread);
543}
544
545extern void sched_set_stop_task(int cpu, struct task_struct *stop);
546
547static void cpu_stop_create(unsigned int cpu)
548{
549	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
550}
551
552static void cpu_stop_park(unsigned int cpu)
553{
554	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
555
556	WARN_ON(!list_empty(&stopper->works));
557}
558
559void stop_machine_unpark(int cpu)
560{
561	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
562
563	stopper->enabled = true;
564	kthread_unpark(stopper->thread);
565}
566
567static struct smp_hotplug_thread cpu_stop_threads = {
568	.store			= &cpu_stopper.thread,
569	.thread_should_run	= cpu_stop_should_run,
570	.thread_fn		= cpu_stopper_thread,
571	.thread_comm		= "migration/%u",
572	.create			= cpu_stop_create,
573	.park			= cpu_stop_park,
574	.selfparking		= true,
575};
576
577static int __init cpu_stop_init(void)
578{
579	unsigned int cpu;
580
581	for_each_possible_cpu(cpu) {
582		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
583
584		raw_spin_lock_init(&stopper->lock);
585		INIT_LIST_HEAD(&stopper->works);
586	}
587
588	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
589	stop_machine_unpark(raw_smp_processor_id());
590	stop_machine_initialized = true;
591	return 0;
592}
593early_initcall(cpu_stop_init);
594
595int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
596			    const struct cpumask *cpus)
597{
598	struct multi_stop_data msdata = {
599		.fn = fn,
600		.data = data,
601		.num_threads = num_online_cpus(),
602		.active_cpus = cpus,
603	};
604
605	lockdep_assert_cpus_held();
606
607	if (!stop_machine_initialized) {
608		/*
609		 * Handle the case where stop_machine() is called
610		 * early in boot before stop_machine() has been
611		 * initialized.
612		 */
613		unsigned long flags;
614		int ret;
615
616		WARN_ON_ONCE(msdata.num_threads != 1);
617
618		local_irq_save(flags);
619		hard_irq_disable();
620		ret = (*fn)(data);
621		local_irq_restore(flags);
622
623		return ret;
624	}
625
626	/* Set the initial state and stop all online cpus. */
627	set_state(&msdata, MULTI_STOP_PREPARE);
628	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
629}
630
631int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
632{
633	int ret;
634
635	/* No CPUs can come up or down during this. */
636	cpus_read_lock();
637	ret = stop_machine_cpuslocked(fn, data, cpus);
638	cpus_read_unlock();
639	return ret;
640}
641EXPORT_SYMBOL_GPL(stop_machine);
642
643/**
644 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
645 * @fn: the function to run
646 * @data: the data ptr for the @fn()
647 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
648 *
649 * This is identical to stop_machine() but can be called from a CPU which
650 * is not active.  The local CPU is in the process of hotplug (so no other
651 * CPU hotplug can start) and not marked active and doesn't have enough
652 * context to sleep.
653 *
654 * This function provides stop_machine() functionality for such state by
655 * using busy-wait for synchronization and executing @fn directly for local
656 * CPU.
657 *
658 * CONTEXT:
659 * Local CPU is inactive.  Temporarily stops all active CPUs.
660 *
661 * RETURNS:
662 * 0 if all executions of @fn returned 0, any non zero return value if any
663 * returned non zero.
664 */
665int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
666				  const struct cpumask *cpus)
667{
668	struct multi_stop_data msdata = { .fn = fn, .data = data,
669					    .active_cpus = cpus };
670	struct cpu_stop_done done;
671	int ret;
672
673	/* Local CPU must be inactive and CPU hotplug in progress. */
674	BUG_ON(cpu_active(raw_smp_processor_id()));
675	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
676
677	/* No proper task established and can't sleep - busy wait for lock. */
678	while (!mutex_trylock(&stop_cpus_mutex))
679		cpu_relax();
680
681	/* Schedule work on other CPUs and execute directly for local CPU */
682	set_state(&msdata, MULTI_STOP_PREPARE);
683	cpu_stop_init_done(&done, num_active_cpus());
684	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
685			     &done);
686	ret = multi_cpu_stop(&msdata);
687
688	/* Busy wait for completion. */
689	while (!completion_done(&done.completion))
690		cpu_relax();
691
692	mutex_unlock(&stop_cpus_mutex);
693	return ret ?: done.ret;
694}
v4.10.11
 
  1/*
  2 * kernel/stop_machine.c
  3 *
  4 * Copyright (C) 2008, 2005	IBM Corporation.
  5 * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
  6 * Copyright (C) 2010		SUSE Linux Products GmbH
  7 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  8 *
  9 * This file is released under the GPLv2 and any later version.
 10 */
 
 11#include <linux/completion.h>
 12#include <linux/cpu.h>
 13#include <linux/init.h>
 14#include <linux/kthread.h>
 15#include <linux/export.h>
 16#include <linux/percpu.h>
 17#include <linux/sched.h>
 18#include <linux/stop_machine.h>
 19#include <linux/interrupt.h>
 20#include <linux/kallsyms.h>
 21#include <linux/smpboot.h>
 22#include <linux/atomic.h>
 23#include <linux/nmi.h>
 
 24
 25/*
 26 * Structure to determine completion condition and record errors.  May
 27 * be shared by works on different cpus.
 28 */
 29struct cpu_stop_done {
 30	atomic_t		nr_todo;	/* nr left to execute */
 31	int			ret;		/* collected return value */
 32	struct completion	completion;	/* fired if nr_todo reaches 0 */
 33};
 34
 35/* the actual stopper, one per every possible cpu, enabled on online cpus */
 36struct cpu_stopper {
 37	struct task_struct	*thread;
 38
 39	spinlock_t		lock;
 40	bool			enabled;	/* is this stopper enabled? */
 41	struct list_head	works;		/* list of pending works */
 42
 43	struct cpu_stop_work	stop_work;	/* for stop_cpus */
 44};
 45
 46static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
 47static bool stop_machine_initialized = false;
 48
 49/* static data for stop_cpus */
 50static DEFINE_MUTEX(stop_cpus_mutex);
 51static bool stop_cpus_in_progress;
 52
 53static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 54{
 55	memset(done, 0, sizeof(*done));
 56	atomic_set(&done->nr_todo, nr_todo);
 57	init_completion(&done->completion);
 58}
 59
 60/* signal completion unless @done is NULL */
 61static void cpu_stop_signal_done(struct cpu_stop_done *done)
 62{
 63	if (atomic_dec_and_test(&done->nr_todo))
 64		complete(&done->completion);
 65}
 66
 67static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
 68					struct cpu_stop_work *work)
 
 69{
 70	list_add_tail(&work->list, &stopper->works);
 71	wake_up_process(stopper->thread);
 72}
 73
 74/* queue @work to @stopper.  if offline, @work is completed immediately */
 75static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 76{
 77	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
 78	unsigned long flags;
 79	bool enabled;
 80
 81	spin_lock_irqsave(&stopper->lock, flags);
 
 82	enabled = stopper->enabled;
 83	if (enabled)
 84		__cpu_stop_queue_work(stopper, work);
 85	else if (work->done)
 86		cpu_stop_signal_done(work->done);
 87	spin_unlock_irqrestore(&stopper->lock, flags);
 
 
 
 88
 89	return enabled;
 90}
 91
 92/**
 93 * stop_one_cpu - stop a cpu
 94 * @cpu: cpu to stop
 95 * @fn: function to execute
 96 * @arg: argument to @fn
 97 *
 98 * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
 99 * the highest priority preempting any task on the cpu and
100 * monopolizing it.  This function returns after the execution is
101 * complete.
102 *
103 * This function doesn't guarantee @cpu stays online till @fn
104 * completes.  If @cpu goes down in the middle, execution may happen
105 * partially or fully on different cpus.  @fn should either be ready
106 * for that or the caller should ensure that @cpu stays online until
107 * this function completes.
108 *
109 * CONTEXT:
110 * Might sleep.
111 *
112 * RETURNS:
113 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
114 * otherwise, the return value of @fn.
115 */
116int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
117{
118	struct cpu_stop_done done;
119	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
120
121	cpu_stop_init_done(&done, 1);
122	if (!cpu_stop_queue_work(cpu, &work))
123		return -ENOENT;
124	/*
125	 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
126	 * cycle by doing a preemption:
127	 */
128	cond_resched();
129	wait_for_completion(&done.completion);
130	return done.ret;
131}
132
133/* This controls the threads on each CPU. */
134enum multi_stop_state {
135	/* Dummy starting state for thread. */
136	MULTI_STOP_NONE,
137	/* Awaiting everyone to be scheduled. */
138	MULTI_STOP_PREPARE,
139	/* Disable interrupts. */
140	MULTI_STOP_DISABLE_IRQ,
141	/* Run the function */
142	MULTI_STOP_RUN,
143	/* Exit */
144	MULTI_STOP_EXIT,
145};
146
147struct multi_stop_data {
148	cpu_stop_fn_t		fn;
149	void			*data;
150	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
151	unsigned int		num_threads;
152	const struct cpumask	*active_cpus;
153
154	enum multi_stop_state	state;
155	atomic_t		thread_ack;
156};
157
158static void set_state(struct multi_stop_data *msdata,
159		      enum multi_stop_state newstate)
160{
161	/* Reset ack counter. */
162	atomic_set(&msdata->thread_ack, msdata->num_threads);
163	smp_wmb();
164	msdata->state = newstate;
165}
166
167/* Last one to ack a state moves to the next state. */
168static void ack_state(struct multi_stop_data *msdata)
169{
170	if (atomic_dec_and_test(&msdata->thread_ack))
171		set_state(msdata, msdata->state + 1);
172}
173
 
 
 
 
 
174/* This is the cpu_stop function which stops the CPU. */
175static int multi_cpu_stop(void *data)
176{
177	struct multi_stop_data *msdata = data;
178	enum multi_stop_state curstate = MULTI_STOP_NONE;
179	int cpu = smp_processor_id(), err = 0;
 
180	unsigned long flags;
181	bool is_active;
182
183	/*
184	 * When called from stop_machine_from_inactive_cpu(), irq might
185	 * already be disabled.  Save the state and restore it on exit.
186	 */
187	local_save_flags(flags);
188
189	if (!msdata->active_cpus)
190		is_active = cpu == cpumask_first(cpu_online_mask);
191	else
192		is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
 
 
 
193
194	/* Simple state machine */
195	do {
196		/* Chill out and ensure we re-read multi_stop_state. */
197		cpu_relax_yield();
198		if (msdata->state != curstate) {
199			curstate = msdata->state;
 
200			switch (curstate) {
201			case MULTI_STOP_DISABLE_IRQ:
202				local_irq_disable();
203				hard_irq_disable();
204				break;
205			case MULTI_STOP_RUN:
206				if (is_active)
207					err = msdata->fn(msdata->data);
208				break;
209			default:
210				break;
211			}
212			ack_state(msdata);
213		} else if (curstate > MULTI_STOP_PREPARE) {
214			/*
215			 * At this stage all other CPUs we depend on must spin
216			 * in the same loop. Any reason for hard-lockup should
217			 * be detected and reported on their side.
218			 */
219			touch_nmi_watchdog();
220		}
221	} while (curstate != MULTI_STOP_EXIT);
222
223	local_irq_restore(flags);
224	return err;
225}
226
227static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
228				    int cpu2, struct cpu_stop_work *work2)
229{
230	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
231	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
 
232	int err;
 
233retry:
234	spin_lock_irq(&stopper1->lock);
235	spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 
 
 
 
 
 
 
 
236
237	err = -ENOENT;
238	if (!stopper1->enabled || !stopper2->enabled)
239		goto unlock;
 
 
240	/*
241	 * Ensure that if we race with __stop_cpus() the stoppers won't get
242	 * queued up in reverse order leading to system deadlock.
243	 *
244	 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
245	 * queued a work on cpu1 but not on cpu2, we hold both locks.
246	 *
247	 * It can be falsely true but it is safe to spin until it is cleared,
248	 * queue_stop_cpus_work() does everything under preempt_disable().
249	 */
250	err = -EDEADLK;
251	if (unlikely(stop_cpus_in_progress))
252			goto unlock;
 
253
254	err = 0;
255	__cpu_stop_queue_work(stopper1, work1);
256	__cpu_stop_queue_work(stopper2, work2);
 
257unlock:
258	spin_unlock(&stopper2->lock);
259	spin_unlock_irq(&stopper1->lock);
260
261	if (unlikely(err == -EDEADLK)) {
 
 
262		while (stop_cpus_in_progress)
263			cpu_relax();
 
264		goto retry;
265	}
 
 
 
 
266	return err;
267}
268/**
269 * stop_two_cpus - stops two cpus
270 * @cpu1: the cpu to stop
271 * @cpu2: the other cpu to stop
272 * @fn: function to execute
273 * @arg: argument to @fn
274 *
275 * Stops both the current and specified CPU and runs @fn on one of them.
276 *
277 * returns when both are completed.
278 */
279int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
280{
281	struct cpu_stop_done done;
282	struct cpu_stop_work work1, work2;
283	struct multi_stop_data msdata;
284
285	msdata = (struct multi_stop_data){
286		.fn = fn,
287		.data = arg,
288		.num_threads = 2,
289		.active_cpus = cpumask_of(cpu1),
290	};
291
292	work1 = work2 = (struct cpu_stop_work){
293		.fn = multi_cpu_stop,
294		.arg = &msdata,
295		.done = &done
296	};
297
298	cpu_stop_init_done(&done, 2);
299	set_state(&msdata, MULTI_STOP_PREPARE);
300
301	if (cpu1 > cpu2)
302		swap(cpu1, cpu2);
303	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
304		return -ENOENT;
305
306	wait_for_completion(&done.completion);
307	return done.ret;
308}
309
310/**
311 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
312 * @cpu: cpu to stop
313 * @fn: function to execute
314 * @arg: argument to @fn
315 * @work_buf: pointer to cpu_stop_work structure
316 *
317 * Similar to stop_one_cpu() but doesn't wait for completion.  The
318 * caller is responsible for ensuring @work_buf is currently unused
319 * and will remain untouched until stopper starts executing @fn.
320 *
321 * CONTEXT:
322 * Don't care.
323 *
324 * RETURNS:
325 * true if cpu_stop_work was queued successfully and @fn will be called,
326 * false otherwise.
327 */
328bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
329			struct cpu_stop_work *work_buf)
330{
331	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
332	return cpu_stop_queue_work(cpu, work_buf);
333}
334
335static bool queue_stop_cpus_work(const struct cpumask *cpumask,
336				 cpu_stop_fn_t fn, void *arg,
337				 struct cpu_stop_done *done)
338{
339	struct cpu_stop_work *work;
340	unsigned int cpu;
341	bool queued = false;
342
343	/*
344	 * Disable preemption while queueing to avoid getting
345	 * preempted by a stopper which might wait for other stoppers
346	 * to enter @fn which can lead to deadlock.
347	 */
348	preempt_disable();
349	stop_cpus_in_progress = true;
 
350	for_each_cpu(cpu, cpumask) {
351		work = &per_cpu(cpu_stopper.stop_work, cpu);
352		work->fn = fn;
353		work->arg = arg;
354		work->done = done;
355		if (cpu_stop_queue_work(cpu, work))
356			queued = true;
357	}
 
358	stop_cpus_in_progress = false;
359	preempt_enable();
360
361	return queued;
362}
363
364static int __stop_cpus(const struct cpumask *cpumask,
365		       cpu_stop_fn_t fn, void *arg)
366{
367	struct cpu_stop_done done;
368
369	cpu_stop_init_done(&done, cpumask_weight(cpumask));
370	if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
371		return -ENOENT;
372	wait_for_completion(&done.completion);
373	return done.ret;
374}
375
376/**
377 * stop_cpus - stop multiple cpus
378 * @cpumask: cpus to stop
379 * @fn: function to execute
380 * @arg: argument to @fn
381 *
382 * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
383 * @fn is run in a process context with the highest priority
384 * preempting any task on the cpu and monopolizing it.  This function
385 * returns after all executions are complete.
386 *
387 * This function doesn't guarantee the cpus in @cpumask stay online
388 * till @fn completes.  If some cpus go down in the middle, execution
389 * on the cpu may happen partially or fully on different cpus.  @fn
390 * should either be ready for that or the caller should ensure that
391 * the cpus stay online until this function completes.
392 *
393 * All stop_cpus() calls are serialized making it safe for @fn to wait
394 * for all cpus to start executing it.
395 *
396 * CONTEXT:
397 * Might sleep.
398 *
399 * RETURNS:
400 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
401 * @cpumask were offline; otherwise, 0 if all executions of @fn
402 * returned 0, any non zero return value if any returned non zero.
403 */
404int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
405{
406	int ret;
407
408	/* static works are used, process one request at a time */
409	mutex_lock(&stop_cpus_mutex);
410	ret = __stop_cpus(cpumask, fn, arg);
411	mutex_unlock(&stop_cpus_mutex);
412	return ret;
413}
414
415/**
416 * try_stop_cpus - try to stop multiple cpus
417 * @cpumask: cpus to stop
418 * @fn: function to execute
419 * @arg: argument to @fn
420 *
421 * Identical to stop_cpus() except that it fails with -EAGAIN if
422 * someone else is already using the facility.
423 *
424 * CONTEXT:
425 * Might sleep.
426 *
427 * RETURNS:
428 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
429 * @fn(@arg) was not executed at all because all cpus in @cpumask were
430 * offline; otherwise, 0 if all executions of @fn returned 0, any non
431 * zero return value if any returned non zero.
432 */
433int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
434{
435	int ret;
436
437	/* static works are used, process one request at a time */
438	if (!mutex_trylock(&stop_cpus_mutex))
439		return -EAGAIN;
440	ret = __stop_cpus(cpumask, fn, arg);
441	mutex_unlock(&stop_cpus_mutex);
442	return ret;
443}
444
445static int cpu_stop_should_run(unsigned int cpu)
446{
447	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
448	unsigned long flags;
449	int run;
450
451	spin_lock_irqsave(&stopper->lock, flags);
452	run = !list_empty(&stopper->works);
453	spin_unlock_irqrestore(&stopper->lock, flags);
454	return run;
455}
456
457static void cpu_stopper_thread(unsigned int cpu)
458{
459	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
460	struct cpu_stop_work *work;
461
462repeat:
463	work = NULL;
464	spin_lock_irq(&stopper->lock);
465	if (!list_empty(&stopper->works)) {
466		work = list_first_entry(&stopper->works,
467					struct cpu_stop_work, list);
468		list_del_init(&work->list);
469	}
470	spin_unlock_irq(&stopper->lock);
471
472	if (work) {
473		cpu_stop_fn_t fn = work->fn;
474		void *arg = work->arg;
475		struct cpu_stop_done *done = work->done;
476		int ret;
477
478		/* cpu stop callbacks must not sleep, make in_atomic() == T */
479		preempt_count_inc();
480		ret = fn(arg);
481		if (done) {
482			if (ret)
483				done->ret = ret;
484			cpu_stop_signal_done(done);
485		}
486		preempt_count_dec();
487		WARN_ONCE(preempt_count(),
488			  "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
489		goto repeat;
490	}
491}
492
493void stop_machine_park(int cpu)
494{
495	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
496	/*
497	 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
498	 * the pending works before it parks, until then it is fine to queue
499	 * the new works.
500	 */
501	stopper->enabled = false;
502	kthread_park(stopper->thread);
503}
504
505extern void sched_set_stop_task(int cpu, struct task_struct *stop);
506
507static void cpu_stop_create(unsigned int cpu)
508{
509	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
510}
511
512static void cpu_stop_park(unsigned int cpu)
513{
514	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
515
516	WARN_ON(!list_empty(&stopper->works));
517}
518
519void stop_machine_unpark(int cpu)
520{
521	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
522
523	stopper->enabled = true;
524	kthread_unpark(stopper->thread);
525}
526
527static struct smp_hotplug_thread cpu_stop_threads = {
528	.store			= &cpu_stopper.thread,
529	.thread_should_run	= cpu_stop_should_run,
530	.thread_fn		= cpu_stopper_thread,
531	.thread_comm		= "migration/%u",
532	.create			= cpu_stop_create,
533	.park			= cpu_stop_park,
534	.selfparking		= true,
535};
536
537static int __init cpu_stop_init(void)
538{
539	unsigned int cpu;
540
541	for_each_possible_cpu(cpu) {
542		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
543
544		spin_lock_init(&stopper->lock);
545		INIT_LIST_HEAD(&stopper->works);
546	}
547
548	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
549	stop_machine_unpark(raw_smp_processor_id());
550	stop_machine_initialized = true;
551	return 0;
552}
553early_initcall(cpu_stop_init);
554
555static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
 
556{
557	struct multi_stop_data msdata = {
558		.fn = fn,
559		.data = data,
560		.num_threads = num_online_cpus(),
561		.active_cpus = cpus,
562	};
563
 
 
564	if (!stop_machine_initialized) {
565		/*
566		 * Handle the case where stop_machine() is called
567		 * early in boot before stop_machine() has been
568		 * initialized.
569		 */
570		unsigned long flags;
571		int ret;
572
573		WARN_ON_ONCE(msdata.num_threads != 1);
574
575		local_irq_save(flags);
576		hard_irq_disable();
577		ret = (*fn)(data);
578		local_irq_restore(flags);
579
580		return ret;
581	}
582
583	/* Set the initial state and stop all online cpus. */
584	set_state(&msdata, MULTI_STOP_PREPARE);
585	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
586}
587
588int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
589{
590	int ret;
591
592	/* No CPUs can come up or down during this. */
593	get_online_cpus();
594	ret = __stop_machine(fn, data, cpus);
595	put_online_cpus();
596	return ret;
597}
598EXPORT_SYMBOL_GPL(stop_machine);
599
600/**
601 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
602 * @fn: the function to run
603 * @data: the data ptr for the @fn()
604 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
605 *
606 * This is identical to stop_machine() but can be called from a CPU which
607 * is not active.  The local CPU is in the process of hotplug (so no other
608 * CPU hotplug can start) and not marked active and doesn't have enough
609 * context to sleep.
610 *
611 * This function provides stop_machine() functionality for such state by
612 * using busy-wait for synchronization and executing @fn directly for local
613 * CPU.
614 *
615 * CONTEXT:
616 * Local CPU is inactive.  Temporarily stops all active CPUs.
617 *
618 * RETURNS:
619 * 0 if all executions of @fn returned 0, any non zero return value if any
620 * returned non zero.
621 */
622int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
623				  const struct cpumask *cpus)
624{
625	struct multi_stop_data msdata = { .fn = fn, .data = data,
626					    .active_cpus = cpus };
627	struct cpu_stop_done done;
628	int ret;
629
630	/* Local CPU must be inactive and CPU hotplug in progress. */
631	BUG_ON(cpu_active(raw_smp_processor_id()));
632	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
633
634	/* No proper task established and can't sleep - busy wait for lock. */
635	while (!mutex_trylock(&stop_cpus_mutex))
636		cpu_relax();
637
638	/* Schedule work on other CPUs and execute directly for local CPU */
639	set_state(&msdata, MULTI_STOP_PREPARE);
640	cpu_stop_init_done(&done, num_active_cpus());
641	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
642			     &done);
643	ret = multi_cpu_stop(&msdata);
644
645	/* Busy wait for completion. */
646	while (!completion_done(&done.completion))
647		cpu_relax();
648
649	mutex_unlock(&stop_cpus_mutex);
650	return ret ?: done.ret;
651}