Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * kernel/stop_machine.c
  3 *
  4 * Copyright (C) 2008, 2005	IBM Corporation.
  5 * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
  6 * Copyright (C) 2010		SUSE Linux Products GmbH
  7 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  8 *
  9 * This file is released under the GPLv2 and any later version.
 10 */
 
 11#include <linux/completion.h>
 12#include <linux/cpu.h>
 13#include <linux/init.h>
 14#include <linux/kthread.h>
 15#include <linux/export.h>
 16#include <linux/percpu.h>
 17#include <linux/sched.h>
 18#include <linux/stop_machine.h>
 19#include <linux/interrupt.h>
 20#include <linux/kallsyms.h>
 21#include <linux/smpboot.h>
 22#include <linux/atomic.h>
 23#include <linux/lglock.h>
 
 24
 25/*
 26 * Structure to determine completion condition and record errors.  May
 27 * be shared by works on different cpus.
 28 */
 29struct cpu_stop_done {
 30	atomic_t		nr_todo;	/* nr left to execute */
 31	int			ret;		/* collected return value */
 32	struct completion	completion;	/* fired if nr_todo reaches 0 */
 33};
 34
 35/* the actual stopper, one per every possible cpu, enabled on online cpus */
 36struct cpu_stopper {
 37	struct task_struct	*thread;
 38
 39	spinlock_t		lock;
 40	bool			enabled;	/* is this stopper enabled? */
 41	struct list_head	works;		/* list of pending works */
 42
 43	struct cpu_stop_work	stop_work;	/* for stop_cpus */
 44};
 45
 46static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
 47static bool stop_machine_initialized = false;
 48
 49/*
 50 * Avoids a race between stop_two_cpus and global stop_cpus, where
 51 * the stoppers could get queued up in reverse order, leading to
 52 * system deadlock. Using an lglock means stop_two_cpus remains
 53 * relatively cheap.
 54 */
 55DEFINE_STATIC_LGLOCK(stop_cpus_lock);
 56
 57static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 58{
 59	memset(done, 0, sizeof(*done));
 60	atomic_set(&done->nr_todo, nr_todo);
 61	init_completion(&done->completion);
 62}
 63
 64/* signal completion unless @done is NULL */
 65static void cpu_stop_signal_done(struct cpu_stop_done *done)
 66{
 67	if (atomic_dec_and_test(&done->nr_todo))
 68		complete(&done->completion);
 69}
 70
 71static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
 72					struct cpu_stop_work *work)
 
 73{
 74	list_add_tail(&work->list, &stopper->works);
 75	wake_up_process(stopper->thread);
 76}
 77
 78/* queue @work to @stopper.  if offline, @work is completed immediately */
 79static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 80{
 81	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
 82	unsigned long flags;
 83	bool enabled;
 84
 85	spin_lock_irqsave(&stopper->lock, flags);
 
 86	enabled = stopper->enabled;
 87	if (enabled)
 88		__cpu_stop_queue_work(stopper, work);
 89	else if (work->done)
 90		cpu_stop_signal_done(work->done);
 91	spin_unlock_irqrestore(&stopper->lock, flags);
 
 
 
 92
 93	return enabled;
 94}
 95
 96/**
 97 * stop_one_cpu - stop a cpu
 98 * @cpu: cpu to stop
 99 * @fn: function to execute
100 * @arg: argument to @fn
101 *
102 * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
103 * the highest priority preempting any task on the cpu and
104 * monopolizing it.  This function returns after the execution is
105 * complete.
106 *
107 * This function doesn't guarantee @cpu stays online till @fn
108 * completes.  If @cpu goes down in the middle, execution may happen
109 * partially or fully on different cpus.  @fn should either be ready
110 * for that or the caller should ensure that @cpu stays online until
111 * this function completes.
112 *
113 * CONTEXT:
114 * Might sleep.
115 *
116 * RETURNS:
117 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
118 * otherwise, the return value of @fn.
119 */
120int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
121{
122	struct cpu_stop_done done;
123	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
124
125	cpu_stop_init_done(&done, 1);
126	if (!cpu_stop_queue_work(cpu, &work))
127		return -ENOENT;
 
 
 
 
 
128	wait_for_completion(&done.completion);
129	return done.ret;
130}
131
132/* This controls the threads on each CPU. */
133enum multi_stop_state {
134	/* Dummy starting state for thread. */
135	MULTI_STOP_NONE,
136	/* Awaiting everyone to be scheduled. */
137	MULTI_STOP_PREPARE,
138	/* Disable interrupts. */
139	MULTI_STOP_DISABLE_IRQ,
140	/* Run the function */
141	MULTI_STOP_RUN,
142	/* Exit */
143	MULTI_STOP_EXIT,
144};
145
146struct multi_stop_data {
147	cpu_stop_fn_t		fn;
148	void			*data;
149	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
150	unsigned int		num_threads;
151	const struct cpumask	*active_cpus;
152
153	enum multi_stop_state	state;
154	atomic_t		thread_ack;
155};
156
157static void set_state(struct multi_stop_data *msdata,
158		      enum multi_stop_state newstate)
159{
160	/* Reset ack counter. */
161	atomic_set(&msdata->thread_ack, msdata->num_threads);
162	smp_wmb();
163	msdata->state = newstate;
164}
165
166/* Last one to ack a state moves to the next state. */
167static void ack_state(struct multi_stop_data *msdata)
168{
169	if (atomic_dec_and_test(&msdata->thread_ack))
170		set_state(msdata, msdata->state + 1);
171}
172
 
 
 
 
 
173/* This is the cpu_stop function which stops the CPU. */
174static int multi_cpu_stop(void *data)
175{
176	struct multi_stop_data *msdata = data;
177	enum multi_stop_state curstate = MULTI_STOP_NONE;
178	int cpu = smp_processor_id(), err = 0;
 
179	unsigned long flags;
180	bool is_active;
181
182	/*
183	 * When called from stop_machine_from_inactive_cpu(), irq might
184	 * already be disabled.  Save the state and restore it on exit.
185	 */
186	local_save_flags(flags);
187
188	if (!msdata->active_cpus)
189		is_active = cpu == cpumask_first(cpu_online_mask);
190	else
191		is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
 
 
 
192
193	/* Simple state machine */
194	do {
195		/* Chill out and ensure we re-read multi_stop_state. */
196		cpu_relax();
197		if (msdata->state != curstate) {
198			curstate = msdata->state;
 
199			switch (curstate) {
200			case MULTI_STOP_DISABLE_IRQ:
201				local_irq_disable();
202				hard_irq_disable();
203				break;
204			case MULTI_STOP_RUN:
205				if (is_active)
206					err = msdata->fn(msdata->data);
207				break;
208			default:
209				break;
210			}
211			ack_state(msdata);
 
 
 
 
 
 
 
212		}
 
213	} while (curstate != MULTI_STOP_EXIT);
214
215	local_irq_restore(flags);
216	return err;
217}
218
219static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
220				    int cpu2, struct cpu_stop_work *work2)
221{
222	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
223	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
 
224	int err;
225
226	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
227	spin_lock_irq(&stopper1->lock);
228	spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 
 
 
 
 
 
 
 
 
 
 
 
 
229
230	err = -ENOENT;
231	if (!stopper1->enabled || !stopper2->enabled)
 
 
 
 
 
 
 
 
 
 
232		goto unlock;
 
233
234	err = 0;
235	__cpu_stop_queue_work(stopper1, work1);
236	__cpu_stop_queue_work(stopper2, work2);
 
237unlock:
238	spin_unlock(&stopper2->lock);
239	spin_unlock_irq(&stopper1->lock);
240	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
 
 
 
 
 
 
 
 
 
 
 
241
242	return err;
243}
244/**
245 * stop_two_cpus - stops two cpus
246 * @cpu1: the cpu to stop
247 * @cpu2: the other cpu to stop
248 * @fn: function to execute
249 * @arg: argument to @fn
250 *
251 * Stops both the current and specified CPU and runs @fn on one of them.
252 *
253 * returns when both are completed.
254 */
255int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
256{
257	struct cpu_stop_done done;
258	struct cpu_stop_work work1, work2;
259	struct multi_stop_data msdata;
260
261	msdata = (struct multi_stop_data){
262		.fn = fn,
263		.data = arg,
264		.num_threads = 2,
265		.active_cpus = cpumask_of(cpu1),
266	};
267
268	work1 = work2 = (struct cpu_stop_work){
269		.fn = multi_cpu_stop,
270		.arg = &msdata,
271		.done = &done
272	};
273
274	cpu_stop_init_done(&done, 2);
275	set_state(&msdata, MULTI_STOP_PREPARE);
276
277	if (cpu1 > cpu2)
278		swap(cpu1, cpu2);
279	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
280		return -ENOENT;
281
282	wait_for_completion(&done.completion);
283	return done.ret;
284}
285
286/**
287 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
288 * @cpu: cpu to stop
289 * @fn: function to execute
290 * @arg: argument to @fn
291 * @work_buf: pointer to cpu_stop_work structure
292 *
293 * Similar to stop_one_cpu() but doesn't wait for completion.  The
294 * caller is responsible for ensuring @work_buf is currently unused
295 * and will remain untouched until stopper starts executing @fn.
296 *
297 * CONTEXT:
298 * Don't care.
299 *
300 * RETURNS:
301 * true if cpu_stop_work was queued successfully and @fn will be called,
302 * false otherwise.
303 */
304bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
305			struct cpu_stop_work *work_buf)
306{
307	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
308	return cpu_stop_queue_work(cpu, work_buf);
309}
310
311/* static data for stop_cpus */
312static DEFINE_MUTEX(stop_cpus_mutex);
313
314static bool queue_stop_cpus_work(const struct cpumask *cpumask,
315				 cpu_stop_fn_t fn, void *arg,
316				 struct cpu_stop_done *done)
317{
318	struct cpu_stop_work *work;
319	unsigned int cpu;
320	bool queued = false;
321
322	/*
323	 * Disable preemption while queueing to avoid getting
324	 * preempted by a stopper which might wait for other stoppers
325	 * to enter @fn which can lead to deadlock.
326	 */
327	lg_global_lock(&stop_cpus_lock);
 
 
328	for_each_cpu(cpu, cpumask) {
329		work = &per_cpu(cpu_stopper.stop_work, cpu);
330		work->fn = fn;
331		work->arg = arg;
332		work->done = done;
333		if (cpu_stop_queue_work(cpu, work))
334			queued = true;
335	}
336	lg_global_unlock(&stop_cpus_lock);
 
 
337
338	return queued;
339}
340
341static int __stop_cpus(const struct cpumask *cpumask,
342		       cpu_stop_fn_t fn, void *arg)
343{
344	struct cpu_stop_done done;
345
346	cpu_stop_init_done(&done, cpumask_weight(cpumask));
347	if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
348		return -ENOENT;
349	wait_for_completion(&done.completion);
350	return done.ret;
351}
352
353/**
354 * stop_cpus - stop multiple cpus
355 * @cpumask: cpus to stop
356 * @fn: function to execute
357 * @arg: argument to @fn
358 *
359 * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
360 * @fn is run in a process context with the highest priority
361 * preempting any task on the cpu and monopolizing it.  This function
362 * returns after all executions are complete.
363 *
364 * This function doesn't guarantee the cpus in @cpumask stay online
365 * till @fn completes.  If some cpus go down in the middle, execution
366 * on the cpu may happen partially or fully on different cpus.  @fn
367 * should either be ready for that or the caller should ensure that
368 * the cpus stay online until this function completes.
369 *
370 * All stop_cpus() calls are serialized making it safe for @fn to wait
371 * for all cpus to start executing it.
372 *
373 * CONTEXT:
374 * Might sleep.
375 *
376 * RETURNS:
377 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
378 * @cpumask were offline; otherwise, 0 if all executions of @fn
379 * returned 0, any non zero return value if any returned non zero.
380 */
381int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
382{
383	int ret;
384
385	/* static works are used, process one request at a time */
386	mutex_lock(&stop_cpus_mutex);
387	ret = __stop_cpus(cpumask, fn, arg);
388	mutex_unlock(&stop_cpus_mutex);
389	return ret;
390}
391
392/**
393 * try_stop_cpus - try to stop multiple cpus
394 * @cpumask: cpus to stop
395 * @fn: function to execute
396 * @arg: argument to @fn
397 *
398 * Identical to stop_cpus() except that it fails with -EAGAIN if
399 * someone else is already using the facility.
400 *
401 * CONTEXT:
402 * Might sleep.
403 *
404 * RETURNS:
405 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
406 * @fn(@arg) was not executed at all because all cpus in @cpumask were
407 * offline; otherwise, 0 if all executions of @fn returned 0, any non
408 * zero return value if any returned non zero.
409 */
410int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
411{
412	int ret;
413
414	/* static works are used, process one request at a time */
415	if (!mutex_trylock(&stop_cpus_mutex))
416		return -EAGAIN;
417	ret = __stop_cpus(cpumask, fn, arg);
418	mutex_unlock(&stop_cpus_mutex);
419	return ret;
420}
421
422static int cpu_stop_should_run(unsigned int cpu)
423{
424	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
425	unsigned long flags;
426	int run;
427
428	spin_lock_irqsave(&stopper->lock, flags);
429	run = !list_empty(&stopper->works);
430	spin_unlock_irqrestore(&stopper->lock, flags);
431	return run;
432}
433
434static void cpu_stopper_thread(unsigned int cpu)
435{
436	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
437	struct cpu_stop_work *work;
438
439repeat:
440	work = NULL;
441	spin_lock_irq(&stopper->lock);
442	if (!list_empty(&stopper->works)) {
443		work = list_first_entry(&stopper->works,
444					struct cpu_stop_work, list);
445		list_del_init(&work->list);
446	}
447	spin_unlock_irq(&stopper->lock);
448
449	if (work) {
450		cpu_stop_fn_t fn = work->fn;
451		void *arg = work->arg;
452		struct cpu_stop_done *done = work->done;
453		int ret;
454
455		/* cpu stop callbacks must not sleep, make in_atomic() == T */
456		preempt_count_inc();
457		ret = fn(arg);
458		if (done) {
459			if (ret)
460				done->ret = ret;
461			cpu_stop_signal_done(done);
462		}
463		preempt_count_dec();
464		WARN_ONCE(preempt_count(),
465			  "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
466		goto repeat;
467	}
468}
469
470void stop_machine_park(int cpu)
471{
472	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
473	/*
474	 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
475	 * the pending works before it parks, until then it is fine to queue
476	 * the new works.
477	 */
478	stopper->enabled = false;
479	kthread_park(stopper->thread);
480}
481
482extern void sched_set_stop_task(int cpu, struct task_struct *stop);
483
484static void cpu_stop_create(unsigned int cpu)
485{
486	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
487}
488
489static void cpu_stop_park(unsigned int cpu)
490{
491	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
492
493	WARN_ON(!list_empty(&stopper->works));
494}
495
496void stop_machine_unpark(int cpu)
497{
498	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
499
500	stopper->enabled = true;
501	kthread_unpark(stopper->thread);
502}
503
504static struct smp_hotplug_thread cpu_stop_threads = {
505	.store			= &cpu_stopper.thread,
506	.thread_should_run	= cpu_stop_should_run,
507	.thread_fn		= cpu_stopper_thread,
508	.thread_comm		= "migration/%u",
509	.create			= cpu_stop_create,
510	.park			= cpu_stop_park,
511	.selfparking		= true,
512};
513
514static int __init cpu_stop_init(void)
515{
516	unsigned int cpu;
517
518	for_each_possible_cpu(cpu) {
519		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
520
521		spin_lock_init(&stopper->lock);
522		INIT_LIST_HEAD(&stopper->works);
523	}
524
525	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
526	stop_machine_unpark(raw_smp_processor_id());
527	stop_machine_initialized = true;
528	return 0;
529}
530early_initcall(cpu_stop_init);
531
532static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
 
533{
534	struct multi_stop_data msdata = {
535		.fn = fn,
536		.data = data,
537		.num_threads = num_online_cpus(),
538		.active_cpus = cpus,
539	};
540
 
 
541	if (!stop_machine_initialized) {
542		/*
543		 * Handle the case where stop_machine() is called
544		 * early in boot before stop_machine() has been
545		 * initialized.
546		 */
547		unsigned long flags;
548		int ret;
549
550		WARN_ON_ONCE(msdata.num_threads != 1);
551
552		local_irq_save(flags);
553		hard_irq_disable();
554		ret = (*fn)(data);
555		local_irq_restore(flags);
556
557		return ret;
558	}
559
560	/* Set the initial state and stop all online cpus. */
561	set_state(&msdata, MULTI_STOP_PREPARE);
562	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
563}
564
565int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
566{
567	int ret;
568
569	/* No CPUs can come up or down during this. */
570	get_online_cpus();
571	ret = __stop_machine(fn, data, cpus);
572	put_online_cpus();
573	return ret;
574}
575EXPORT_SYMBOL_GPL(stop_machine);
576
577/**
578 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
579 * @fn: the function to run
580 * @data: the data ptr for the @fn()
581 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
582 *
583 * This is identical to stop_machine() but can be called from a CPU which
584 * is not active.  The local CPU is in the process of hotplug (so no other
585 * CPU hotplug can start) and not marked active and doesn't have enough
586 * context to sleep.
587 *
588 * This function provides stop_machine() functionality for such state by
589 * using busy-wait for synchronization and executing @fn directly for local
590 * CPU.
591 *
592 * CONTEXT:
593 * Local CPU is inactive.  Temporarily stops all active CPUs.
594 *
595 * RETURNS:
596 * 0 if all executions of @fn returned 0, any non zero return value if any
597 * returned non zero.
598 */
599int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
600				  const struct cpumask *cpus)
601{
602	struct multi_stop_data msdata = { .fn = fn, .data = data,
603					    .active_cpus = cpus };
604	struct cpu_stop_done done;
605	int ret;
606
607	/* Local CPU must be inactive and CPU hotplug in progress. */
608	BUG_ON(cpu_active(raw_smp_processor_id()));
609	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
610
611	/* No proper task established and can't sleep - busy wait for lock. */
612	while (!mutex_trylock(&stop_cpus_mutex))
613		cpu_relax();
614
615	/* Schedule work on other CPUs and execute directly for local CPU */
616	set_state(&msdata, MULTI_STOP_PREPARE);
617	cpu_stop_init_done(&done, num_active_cpus());
618	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
619			     &done);
620	ret = multi_cpu_stop(&msdata);
621
622	/* Busy wait for completion. */
623	while (!completion_done(&done.completion))
624		cpu_relax();
625
626	mutex_unlock(&stop_cpus_mutex);
627	return ret ?: done.ret;
628}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * kernel/stop_machine.c
  4 *
  5 * Copyright (C) 2008, 2005	IBM Corporation.
  6 * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
  7 * Copyright (C) 2010		SUSE Linux Products GmbH
  8 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
 
 
  9 */
 10#include <linux/compiler.h>
 11#include <linux/completion.h>
 12#include <linux/cpu.h>
 13#include <linux/init.h>
 14#include <linux/kthread.h>
 15#include <linux/export.h>
 16#include <linux/percpu.h>
 17#include <linux/sched.h>
 18#include <linux/stop_machine.h>
 19#include <linux/interrupt.h>
 20#include <linux/kallsyms.h>
 21#include <linux/smpboot.h>
 22#include <linux/atomic.h>
 23#include <linux/nmi.h>
 24#include <linux/sched/wake_q.h>
 25
 26/*
 27 * Structure to determine completion condition and record errors.  May
 28 * be shared by works on different cpus.
 29 */
 30struct cpu_stop_done {
 31	atomic_t		nr_todo;	/* nr left to execute */
 32	int			ret;		/* collected return value */
 33	struct completion	completion;	/* fired if nr_todo reaches 0 */
 34};
 35
 36/* the actual stopper, one per every possible cpu, enabled on online cpus */
 37struct cpu_stopper {
 38	struct task_struct	*thread;
 39
 40	raw_spinlock_t		lock;
 41	bool			enabled;	/* is this stopper enabled? */
 42	struct list_head	works;		/* list of pending works */
 43
 44	struct cpu_stop_work	stop_work;	/* for stop_cpus */
 45};
 46
 47static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
 48static bool stop_machine_initialized = false;
 49
 50/* static data for stop_cpus */
 51static DEFINE_MUTEX(stop_cpus_mutex);
 52static bool stop_cpus_in_progress;
 
 
 
 
 53
 54static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 55{
 56	memset(done, 0, sizeof(*done));
 57	atomic_set(&done->nr_todo, nr_todo);
 58	init_completion(&done->completion);
 59}
 60
 61/* signal completion unless @done is NULL */
 62static void cpu_stop_signal_done(struct cpu_stop_done *done)
 63{
 64	if (atomic_dec_and_test(&done->nr_todo))
 65		complete(&done->completion);
 66}
 67
 68static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
 69					struct cpu_stop_work *work,
 70					struct wake_q_head *wakeq)
 71{
 72	list_add_tail(&work->list, &stopper->works);
 73	wake_q_add(wakeq, stopper->thread);
 74}
 75
 76/* queue @work to @stopper.  if offline, @work is completed immediately */
 77static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 78{
 79	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 80	DEFINE_WAKE_Q(wakeq);
 81	unsigned long flags;
 82	bool enabled;
 83
 84	preempt_disable();
 85	raw_spin_lock_irqsave(&stopper->lock, flags);
 86	enabled = stopper->enabled;
 87	if (enabled)
 88		__cpu_stop_queue_work(stopper, work, &wakeq);
 89	else if (work->done)
 90		cpu_stop_signal_done(work->done);
 91	raw_spin_unlock_irqrestore(&stopper->lock, flags);
 92
 93	wake_up_q(&wakeq);
 94	preempt_enable();
 95
 96	return enabled;
 97}
 98
 99/**
100 * stop_one_cpu - stop a cpu
101 * @cpu: cpu to stop
102 * @fn: function to execute
103 * @arg: argument to @fn
104 *
105 * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
106 * the highest priority preempting any task on the cpu and
107 * monopolizing it.  This function returns after the execution is
108 * complete.
109 *
110 * This function doesn't guarantee @cpu stays online till @fn
111 * completes.  If @cpu goes down in the middle, execution may happen
112 * partially or fully on different cpus.  @fn should either be ready
113 * for that or the caller should ensure that @cpu stays online until
114 * this function completes.
115 *
116 * CONTEXT:
117 * Might sleep.
118 *
119 * RETURNS:
120 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
121 * otherwise, the return value of @fn.
122 */
123int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
124{
125	struct cpu_stop_done done;
126	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
127
128	cpu_stop_init_done(&done, 1);
129	if (!cpu_stop_queue_work(cpu, &work))
130		return -ENOENT;
131	/*
132	 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
133	 * cycle by doing a preemption:
134	 */
135	cond_resched();
136	wait_for_completion(&done.completion);
137	return done.ret;
138}
139
140/* This controls the threads on each CPU. */
141enum multi_stop_state {
142	/* Dummy starting state for thread. */
143	MULTI_STOP_NONE,
144	/* Awaiting everyone to be scheduled. */
145	MULTI_STOP_PREPARE,
146	/* Disable interrupts. */
147	MULTI_STOP_DISABLE_IRQ,
148	/* Run the function */
149	MULTI_STOP_RUN,
150	/* Exit */
151	MULTI_STOP_EXIT,
152};
153
154struct multi_stop_data {
155	cpu_stop_fn_t		fn;
156	void			*data;
157	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
158	unsigned int		num_threads;
159	const struct cpumask	*active_cpus;
160
161	enum multi_stop_state	state;
162	atomic_t		thread_ack;
163};
164
165static void set_state(struct multi_stop_data *msdata,
166		      enum multi_stop_state newstate)
167{
168	/* Reset ack counter. */
169	atomic_set(&msdata->thread_ack, msdata->num_threads);
170	smp_wmb();
171	WRITE_ONCE(msdata->state, newstate);
172}
173
174/* Last one to ack a state moves to the next state. */
175static void ack_state(struct multi_stop_data *msdata)
176{
177	if (atomic_dec_and_test(&msdata->thread_ack))
178		set_state(msdata, msdata->state + 1);
179}
180
181void __weak stop_machine_yield(const struct cpumask *cpumask)
182{
183	cpu_relax();
184}
185
186/* This is the cpu_stop function which stops the CPU. */
187static int multi_cpu_stop(void *data)
188{
189	struct multi_stop_data *msdata = data;
190	enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
191	int cpu = smp_processor_id(), err = 0;
192	const struct cpumask *cpumask;
193	unsigned long flags;
194	bool is_active;
195
196	/*
197	 * When called from stop_machine_from_inactive_cpu(), irq might
198	 * already be disabled.  Save the state and restore it on exit.
199	 */
200	local_save_flags(flags);
201
202	if (!msdata->active_cpus) {
203		cpumask = cpu_online_mask;
204		is_active = cpu == cpumask_first(cpumask);
205	} else {
206		cpumask = msdata->active_cpus;
207		is_active = cpumask_test_cpu(cpu, cpumask);
208	}
209
210	/* Simple state machine */
211	do {
212		/* Chill out and ensure we re-read multi_stop_state. */
213		stop_machine_yield(cpumask);
214		newstate = READ_ONCE(msdata->state);
215		if (newstate != curstate) {
216			curstate = newstate;
217			switch (curstate) {
218			case MULTI_STOP_DISABLE_IRQ:
219				local_irq_disable();
220				hard_irq_disable();
221				break;
222			case MULTI_STOP_RUN:
223				if (is_active)
224					err = msdata->fn(msdata->data);
225				break;
226			default:
227				break;
228			}
229			ack_state(msdata);
230		} else if (curstate > MULTI_STOP_PREPARE) {
231			/*
232			 * At this stage all other CPUs we depend on must spin
233			 * in the same loop. Any reason for hard-lockup should
234			 * be detected and reported on their side.
235			 */
236			touch_nmi_watchdog();
237		}
238		rcu_momentary_dyntick_idle();
239	} while (curstate != MULTI_STOP_EXIT);
240
241	local_irq_restore(flags);
242	return err;
243}
244
245static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
246				    int cpu2, struct cpu_stop_work *work2)
247{
248	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
249	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
250	DEFINE_WAKE_Q(wakeq);
251	int err;
252
253retry:
254	/*
255	 * The waking up of stopper threads has to happen in the same
256	 * scheduling context as the queueing.  Otherwise, there is a
257	 * possibility of one of the above stoppers being woken up by another
258	 * CPU, and preempting us. This will cause us to not wake up the other
259	 * stopper forever.
260	 */
261	preempt_disable();
262	raw_spin_lock_irq(&stopper1->lock);
263	raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
264
265	if (!stopper1->enabled || !stopper2->enabled) {
266		err = -ENOENT;
267		goto unlock;
268	}
269
270	/*
271	 * Ensure that if we race with __stop_cpus() the stoppers won't get
272	 * queued up in reverse order leading to system deadlock.
273	 *
274	 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
275	 * queued a work on cpu1 but not on cpu2, we hold both locks.
276	 *
277	 * It can be falsely true but it is safe to spin until it is cleared,
278	 * queue_stop_cpus_work() does everything under preempt_disable().
279	 */
280	if (unlikely(stop_cpus_in_progress)) {
281		err = -EDEADLK;
282		goto unlock;
283	}
284
285	err = 0;
286	__cpu_stop_queue_work(stopper1, work1, &wakeq);
287	__cpu_stop_queue_work(stopper2, work2, &wakeq);
288
289unlock:
290	raw_spin_unlock(&stopper2->lock);
291	raw_spin_unlock_irq(&stopper1->lock);
292
293	if (unlikely(err == -EDEADLK)) {
294		preempt_enable();
295
296		while (stop_cpus_in_progress)
297			cpu_relax();
298
299		goto retry;
300	}
301
302	wake_up_q(&wakeq);
303	preempt_enable();
304
305	return err;
306}
307/**
308 * stop_two_cpus - stops two cpus
309 * @cpu1: the cpu to stop
310 * @cpu2: the other cpu to stop
311 * @fn: function to execute
312 * @arg: argument to @fn
313 *
314 * Stops both the current and specified CPU and runs @fn on one of them.
315 *
316 * returns when both are completed.
317 */
318int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
319{
320	struct cpu_stop_done done;
321	struct cpu_stop_work work1, work2;
322	struct multi_stop_data msdata;
323
324	msdata = (struct multi_stop_data){
325		.fn = fn,
326		.data = arg,
327		.num_threads = 2,
328		.active_cpus = cpumask_of(cpu1),
329	};
330
331	work1 = work2 = (struct cpu_stop_work){
332		.fn = multi_cpu_stop,
333		.arg = &msdata,
334		.done = &done
335	};
336
337	cpu_stop_init_done(&done, 2);
338	set_state(&msdata, MULTI_STOP_PREPARE);
339
340	if (cpu1 > cpu2)
341		swap(cpu1, cpu2);
342	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
343		return -ENOENT;
344
345	wait_for_completion(&done.completion);
346	return done.ret;
347}
348
349/**
350 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
351 * @cpu: cpu to stop
352 * @fn: function to execute
353 * @arg: argument to @fn
354 * @work_buf: pointer to cpu_stop_work structure
355 *
356 * Similar to stop_one_cpu() but doesn't wait for completion.  The
357 * caller is responsible for ensuring @work_buf is currently unused
358 * and will remain untouched until stopper starts executing @fn.
359 *
360 * CONTEXT:
361 * Don't care.
362 *
363 * RETURNS:
364 * true if cpu_stop_work was queued successfully and @fn will be called,
365 * false otherwise.
366 */
367bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
368			struct cpu_stop_work *work_buf)
369{
370	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
371	return cpu_stop_queue_work(cpu, work_buf);
372}
373
 
 
 
374static bool queue_stop_cpus_work(const struct cpumask *cpumask,
375				 cpu_stop_fn_t fn, void *arg,
376				 struct cpu_stop_done *done)
377{
378	struct cpu_stop_work *work;
379	unsigned int cpu;
380	bool queued = false;
381
382	/*
383	 * Disable preemption while queueing to avoid getting
384	 * preempted by a stopper which might wait for other stoppers
385	 * to enter @fn which can lead to deadlock.
386	 */
387	preempt_disable();
388	stop_cpus_in_progress = true;
389	barrier();
390	for_each_cpu(cpu, cpumask) {
391		work = &per_cpu(cpu_stopper.stop_work, cpu);
392		work->fn = fn;
393		work->arg = arg;
394		work->done = done;
395		if (cpu_stop_queue_work(cpu, work))
396			queued = true;
397	}
398	barrier();
399	stop_cpus_in_progress = false;
400	preempt_enable();
401
402	return queued;
403}
404
405static int __stop_cpus(const struct cpumask *cpumask,
406		       cpu_stop_fn_t fn, void *arg)
407{
408	struct cpu_stop_done done;
409
410	cpu_stop_init_done(&done, cpumask_weight(cpumask));
411	if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
412		return -ENOENT;
413	wait_for_completion(&done.completion);
414	return done.ret;
415}
416
417/**
418 * stop_cpus - stop multiple cpus
419 * @cpumask: cpus to stop
420 * @fn: function to execute
421 * @arg: argument to @fn
422 *
423 * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
424 * @fn is run in a process context with the highest priority
425 * preempting any task on the cpu and monopolizing it.  This function
426 * returns after all executions are complete.
427 *
428 * This function doesn't guarantee the cpus in @cpumask stay online
429 * till @fn completes.  If some cpus go down in the middle, execution
430 * on the cpu may happen partially or fully on different cpus.  @fn
431 * should either be ready for that or the caller should ensure that
432 * the cpus stay online until this function completes.
433 *
434 * All stop_cpus() calls are serialized making it safe for @fn to wait
435 * for all cpus to start executing it.
436 *
437 * CONTEXT:
438 * Might sleep.
439 *
440 * RETURNS:
441 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
442 * @cpumask were offline; otherwise, 0 if all executions of @fn
443 * returned 0, any non zero return value if any returned non zero.
444 */
445static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
446{
447	int ret;
448
449	/* static works are used, process one request at a time */
450	mutex_lock(&stop_cpus_mutex);
451	ret = __stop_cpus(cpumask, fn, arg);
452	mutex_unlock(&stop_cpus_mutex);
453	return ret;
454}
455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456static int cpu_stop_should_run(unsigned int cpu)
457{
458	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
459	unsigned long flags;
460	int run;
461
462	raw_spin_lock_irqsave(&stopper->lock, flags);
463	run = !list_empty(&stopper->works);
464	raw_spin_unlock_irqrestore(&stopper->lock, flags);
465	return run;
466}
467
468static void cpu_stopper_thread(unsigned int cpu)
469{
470	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
471	struct cpu_stop_work *work;
472
473repeat:
474	work = NULL;
475	raw_spin_lock_irq(&stopper->lock);
476	if (!list_empty(&stopper->works)) {
477		work = list_first_entry(&stopper->works,
478					struct cpu_stop_work, list);
479		list_del_init(&work->list);
480	}
481	raw_spin_unlock_irq(&stopper->lock);
482
483	if (work) {
484		cpu_stop_fn_t fn = work->fn;
485		void *arg = work->arg;
486		struct cpu_stop_done *done = work->done;
487		int ret;
488
489		/* cpu stop callbacks must not sleep, make in_atomic() == T */
490		preempt_count_inc();
491		ret = fn(arg);
492		if (done) {
493			if (ret)
494				done->ret = ret;
495			cpu_stop_signal_done(done);
496		}
497		preempt_count_dec();
498		WARN_ONCE(preempt_count(),
499			  "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
500		goto repeat;
501	}
502}
503
504void stop_machine_park(int cpu)
505{
506	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
507	/*
508	 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
509	 * the pending works before it parks, until then it is fine to queue
510	 * the new works.
511	 */
512	stopper->enabled = false;
513	kthread_park(stopper->thread);
514}
515
516extern void sched_set_stop_task(int cpu, struct task_struct *stop);
517
518static void cpu_stop_create(unsigned int cpu)
519{
520	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
521}
522
523static void cpu_stop_park(unsigned int cpu)
524{
525	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
526
527	WARN_ON(!list_empty(&stopper->works));
528}
529
530void stop_machine_unpark(int cpu)
531{
532	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
533
534	stopper->enabled = true;
535	kthread_unpark(stopper->thread);
536}
537
538static struct smp_hotplug_thread cpu_stop_threads = {
539	.store			= &cpu_stopper.thread,
540	.thread_should_run	= cpu_stop_should_run,
541	.thread_fn		= cpu_stopper_thread,
542	.thread_comm		= "migration/%u",
543	.create			= cpu_stop_create,
544	.park			= cpu_stop_park,
545	.selfparking		= true,
546};
547
548static int __init cpu_stop_init(void)
549{
550	unsigned int cpu;
551
552	for_each_possible_cpu(cpu) {
553		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
554
555		raw_spin_lock_init(&stopper->lock);
556		INIT_LIST_HEAD(&stopper->works);
557	}
558
559	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
560	stop_machine_unpark(raw_smp_processor_id());
561	stop_machine_initialized = true;
562	return 0;
563}
564early_initcall(cpu_stop_init);
565
566int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
567			    const struct cpumask *cpus)
568{
569	struct multi_stop_data msdata = {
570		.fn = fn,
571		.data = data,
572		.num_threads = num_online_cpus(),
573		.active_cpus = cpus,
574	};
575
576	lockdep_assert_cpus_held();
577
578	if (!stop_machine_initialized) {
579		/*
580		 * Handle the case where stop_machine() is called
581		 * early in boot before stop_machine() has been
582		 * initialized.
583		 */
584		unsigned long flags;
585		int ret;
586
587		WARN_ON_ONCE(msdata.num_threads != 1);
588
589		local_irq_save(flags);
590		hard_irq_disable();
591		ret = (*fn)(data);
592		local_irq_restore(flags);
593
594		return ret;
595	}
596
597	/* Set the initial state and stop all online cpus. */
598	set_state(&msdata, MULTI_STOP_PREPARE);
599	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
600}
601
602int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
603{
604	int ret;
605
606	/* No CPUs can come up or down during this. */
607	cpus_read_lock();
608	ret = stop_machine_cpuslocked(fn, data, cpus);
609	cpus_read_unlock();
610	return ret;
611}
612EXPORT_SYMBOL_GPL(stop_machine);
613
614/**
615 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
616 * @fn: the function to run
617 * @data: the data ptr for the @fn()
618 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
619 *
620 * This is identical to stop_machine() but can be called from a CPU which
621 * is not active.  The local CPU is in the process of hotplug (so no other
622 * CPU hotplug can start) and not marked active and doesn't have enough
623 * context to sleep.
624 *
625 * This function provides stop_machine() functionality for such state by
626 * using busy-wait for synchronization and executing @fn directly for local
627 * CPU.
628 *
629 * CONTEXT:
630 * Local CPU is inactive.  Temporarily stops all active CPUs.
631 *
632 * RETURNS:
633 * 0 if all executions of @fn returned 0, any non zero return value if any
634 * returned non zero.
635 */
636int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
637				  const struct cpumask *cpus)
638{
639	struct multi_stop_data msdata = { .fn = fn, .data = data,
640					    .active_cpus = cpus };
641	struct cpu_stop_done done;
642	int ret;
643
644	/* Local CPU must be inactive and CPU hotplug in progress. */
645	BUG_ON(cpu_active(raw_smp_processor_id()));
646	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
647
648	/* No proper task established and can't sleep - busy wait for lock. */
649	while (!mutex_trylock(&stop_cpus_mutex))
650		cpu_relax();
651
652	/* Schedule work on other CPUs and execute directly for local CPU */
653	set_state(&msdata, MULTI_STOP_PREPARE);
654	cpu_stop_init_done(&done, num_active_cpus());
655	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
656			     &done);
657	ret = multi_cpu_stop(&msdata);
658
659	/* Busy wait for completion. */
660	while (!completion_done(&done.completion))
661		cpu_relax();
662
663	mutex_unlock(&stop_cpus_mutex);
664	return ret ?: done.ret;
665}