Linux Audio

Check our new training course

Loading...
v3.15
  1/* Kernel thread helper functions.
  2 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
  3 *
  4 * Creation is done via kthreadd, so that we get a clean environment
  5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
  6 * etc.).
  7 */
  8#include <linux/sched.h>
  9#include <linux/kthread.h>
 10#include <linux/completion.h>
 11#include <linux/err.h>
 12#include <linux/cpuset.h>
 13#include <linux/unistd.h>
 14#include <linux/file.h>
 15#include <linux/export.h>
 16#include <linux/mutex.h>
 17#include <linux/slab.h>
 18#include <linux/freezer.h>
 19#include <linux/ptrace.h>
 20#include <linux/uaccess.h>
 21#include <trace/events/sched.h>
 22
 23static DEFINE_SPINLOCK(kthread_create_lock);
 24static LIST_HEAD(kthread_create_list);
 25struct task_struct *kthreadd_task;
 26
 27struct kthread_create_info
 28{
 29	/* Information passed to kthread() from kthreadd. */
 30	int (*threadfn)(void *data);
 31	void *data;
 32	int node;
 33
 34	/* Result passed back to kthread_create() from kthreadd. */
 35	struct task_struct *result;
 36	struct completion *done;
 37
 38	struct list_head list;
 39};
 40
 41struct kthread {
 42	unsigned long flags;
 43	unsigned int cpu;
 44	void *data;
 45	struct completion parked;
 46	struct completion exited;
 47};
 48
 49enum KTHREAD_BITS {
 50	KTHREAD_IS_PER_CPU = 0,
 51	KTHREAD_SHOULD_STOP,
 52	KTHREAD_SHOULD_PARK,
 53	KTHREAD_IS_PARKED,
 54};
 55
 56#define __to_kthread(vfork)	\
 57	container_of(vfork, struct kthread, exited)
 
 
 
 
 
 
 
 58
 59static inline struct kthread *to_kthread(struct task_struct *k)
 60{
 61	return __to_kthread(k->vfork_done);
 
 62}
 63
 64static struct kthread *to_live_kthread(struct task_struct *k)
 65{
 66	struct completion *vfork = ACCESS_ONCE(k->vfork_done);
 67	if (likely(vfork))
 68		return __to_kthread(vfork);
 69	return NULL;
 
 70}
 71
 72/**
 73 * kthread_should_stop - should this kthread return now?
 74 *
 75 * When someone calls kthread_stop() on your kthread, it will be woken
 76 * and this will return true.  You should then return, and your return
 77 * value will be passed through to kthread_stop().
 78 */
 79bool kthread_should_stop(void)
 80{
 81	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
 82}
 83EXPORT_SYMBOL(kthread_should_stop);
 84
 85/**
 86 * kthread_should_park - should this kthread park now?
 87 *
 88 * When someone calls kthread_park() on your kthread, it will be woken
 89 * and this will return true.  You should then do the necessary
 90 * cleanup and call kthread_parkme()
 91 *
 92 * Similar to kthread_should_stop(), but this keeps the thread alive
 93 * and in a park position. kthread_unpark() "restarts" the thread and
 94 * calls the thread function again.
 95 */
 96bool kthread_should_park(void)
 97{
 98	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
 99}
 
100
101/**
102 * kthread_freezable_should_stop - should this freezable kthread return now?
103 * @was_frozen: optional out parameter, indicates whether %current was frozen
104 *
105 * kthread_should_stop() for freezable kthreads, which will enter
106 * refrigerator if necessary.  This function is safe from kthread_stop() /
107 * freezer deadlock and freezable kthreads should use this function instead
108 * of calling try_to_freeze() directly.
109 */
110bool kthread_freezable_should_stop(bool *was_frozen)
111{
112	bool frozen = false;
113
114	might_sleep();
115
116	if (unlikely(freezing(current)))
117		frozen = __refrigerator(true);
118
119	if (was_frozen)
120		*was_frozen = frozen;
121
122	return kthread_should_stop();
123}
124EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
125
126/**
127 * kthread_data - return data value specified on kthread creation
128 * @task: kthread task in question
129 *
130 * Return the data value specified when kthread @task was created.
131 * The caller is responsible for ensuring the validity of @task when
132 * calling this function.
133 */
134void *kthread_data(struct task_struct *task)
135{
136	return to_kthread(task)->data;
137}
138
139/**
140 * probe_kthread_data - speculative version of kthread_data()
141 * @task: possible kthread task in question
142 *
143 * @task could be a kthread task.  Return the data value specified when it
144 * was created if accessible.  If @task isn't a kthread task or its data is
145 * inaccessible for any reason, %NULL is returned.  This function requires
146 * that @task itself is safe to dereference.
147 */
148void *probe_kthread_data(struct task_struct *task)
149{
150	struct kthread *kthread = to_kthread(task);
151	void *data = NULL;
152
153	probe_kernel_read(&data, &kthread->data, sizeof(data));
154	return data;
155}
156
157static void __kthread_parkme(struct kthread *self)
158{
159	__set_current_state(TASK_PARKED);
160	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
161		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
162			complete(&self->parked);
163		schedule();
164		__set_current_state(TASK_PARKED);
165	}
166	clear_bit(KTHREAD_IS_PARKED, &self->flags);
167	__set_current_state(TASK_RUNNING);
168}
169
170void kthread_parkme(void)
171{
172	__kthread_parkme(to_kthread(current));
173}
 
174
175static int kthread(void *_create)
176{
177	/* Copy data: it's on kthread's stack */
178	struct kthread_create_info *create = _create;
179	int (*threadfn)(void *data) = create->threadfn;
180	void *data = create->data;
181	struct completion *done;
182	struct kthread self;
183	int ret;
184
185	self.flags = 0;
186	self.data = data;
187	init_completion(&self.exited);
188	init_completion(&self.parked);
189	current->vfork_done = &self.exited;
190
191	/* If user was SIGKILLed, I release the structure. */
192	done = xchg(&create->done, NULL);
193	if (!done) {
194		kfree(create);
195		do_exit(-EINTR);
196	}
 
 
 
 
 
 
 
 
 
 
 
 
 
197	/* OK, tell user we're spawned, wait for stop or wakeup */
198	__set_current_state(TASK_UNINTERRUPTIBLE);
199	create->result = current;
200	complete(done);
201	schedule();
202
203	ret = -EINTR;
204
205	if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
206		__kthread_parkme(&self);
207		ret = threadfn(data);
208	}
209	/* we can't just return, we must preserve "self" on stack */
210	do_exit(ret);
211}
212
213/* called from do_fork() to get node information for about to be created task */
214int tsk_fork_get_node(struct task_struct *tsk)
215{
216#ifdef CONFIG_NUMA
217	if (tsk == kthreadd_task)
218		return tsk->pref_node_fork;
219#endif
220	return NUMA_NO_NODE;
221}
222
223static void create_kthread(struct kthread_create_info *create)
224{
225	int pid;
226
227#ifdef CONFIG_NUMA
228	current->pref_node_fork = create->node;
229#endif
230	/* We want our own signal handler (we take no signals by default). */
231	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
232	if (pid < 0) {
233		/* If user was SIGKILLed, I release the structure. */
234		struct completion *done = xchg(&create->done, NULL);
235
236		if (!done) {
237			kfree(create);
238			return;
239		}
240		create->result = ERR_PTR(pid);
241		complete(done);
242	}
243}
244
245/**
246 * kthread_create_on_node - create a kthread.
247 * @threadfn: the function to run until signal_pending(current).
248 * @data: data ptr for @threadfn.
249 * @node: memory node number.
250 * @namefmt: printf-style name for the thread.
251 *
252 * Description: This helper function creates and names a kernel
253 * thread.  The thread will be stopped: use wake_up_process() to start
254 * it.  See also kthread_run().
255 *
256 * If thread is going to be bound on a particular cpu, give its node
257 * in @node, to get NUMA affinity for kthread stack, or else give -1.
258 * When woken, the thread will run @threadfn() with @data as its
259 * argument. @threadfn() can either call do_exit() directly if it is a
260 * standalone thread for which no one will call kthread_stop(), or
261 * return when 'kthread_should_stop()' is true (which means
262 * kthread_stop() has been called).  The return value should be zero
263 * or a negative error number; it will be passed to kthread_stop().
264 *
265 * Returns a task_struct or ERR_PTR(-ENOMEM).
266 */
267struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
268					   void *data, int node,
269					   const char namefmt[],
270					   ...)
271{
272	DECLARE_COMPLETION_ONSTACK(done);
273	struct task_struct *task;
274	struct kthread_create_info *create = kmalloc(sizeof(*create),
275						     GFP_KERNEL);
276
277	if (!create)
278		return ERR_PTR(-ENOMEM);
279	create->threadfn = threadfn;
280	create->data = data;
281	create->node = node;
282	create->done = &done;
283
284	spin_lock(&kthread_create_lock);
285	list_add_tail(&create->list, &kthread_create_list);
286	spin_unlock(&kthread_create_lock);
287
288	wake_up_process(kthreadd_task);
289	/*
290	 * Wait for completion in killable state, for I might be chosen by
291	 * the OOM killer while kthreadd is trying to allocate memory for
292	 * new kernel thread.
293	 */
294	if (unlikely(wait_for_completion_killable(&done))) {
295		/*
296		 * If I was SIGKILLed before kthreadd (or new kernel thread)
297		 * calls complete(), leave the cleanup of this structure to
298		 * that thread.
299		 */
300		if (xchg(&create->done, NULL))
301			return ERR_PTR(-ENOMEM);
302		/*
303		 * kthreadd (or new kernel thread) will call complete()
304		 * shortly.
305		 */
306		wait_for_completion(&done);
307	}
308	task = create->result;
309	if (!IS_ERR(task)) {
310		static const struct sched_param param = { .sched_priority = 0 };
311		va_list args;
312
313		va_start(args, namefmt);
314		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
315		va_end(args);
316		/*
317		 * root may have changed our (kthreadd's) priority or CPU mask.
318		 * The kernel thread should not inherit these properties.
319		 */
320		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
321		set_cpus_allowed_ptr(task, cpu_all_mask);
322	}
323	kfree(create);
324	return task;
325}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326EXPORT_SYMBOL(kthread_create_on_node);
327
328static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
329{
330	/* Must have done schedule() in kthread() before we set_task_cpu */
 
331	if (!wait_task_inactive(p, state)) {
332		WARN_ON(1);
333		return;
334	}
 
335	/* It's safe because the task is inactive. */
336	do_set_cpus_allowed(p, cpumask_of(cpu));
 
337	p->flags |= PF_NO_SETAFFINITY;
 
 
 
 
 
 
 
 
 
 
 
338}
339
340/**
341 * kthread_bind - bind a just-created kthread to a cpu.
342 * @p: thread created by kthread_create().
343 * @cpu: cpu (might not be online, must be possible) for @k to run on.
344 *
345 * Description: This function is equivalent to set_cpus_allowed(),
346 * except that @cpu doesn't need to be online, and the thread must be
347 * stopped (i.e., just returned from kthread_create()).
348 */
349void kthread_bind(struct task_struct *p, unsigned int cpu)
350{
351	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
352}
353EXPORT_SYMBOL(kthread_bind);
354
355/**
356 * kthread_create_on_cpu - Create a cpu bound kthread
357 * @threadfn: the function to run until signal_pending(current).
358 * @data: data ptr for @threadfn.
359 * @cpu: The cpu on which the thread should be bound,
360 * @namefmt: printf-style name for the thread. Format is restricted
361 *	     to "name.*%u". Code fills in cpu number.
362 *
363 * Description: This helper function creates and names a kernel thread
364 * The thread will be woken and put into park mode.
365 */
366struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
367					  void *data, unsigned int cpu,
368					  const char *namefmt)
369{
370	struct task_struct *p;
371
372	p = kthread_create_on_node(threadfn, data, cpu_to_mem(cpu), namefmt,
373				   cpu);
374	if (IS_ERR(p))
375		return p;
 
 
376	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
377	to_kthread(p)->cpu = cpu;
378	/* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
379	kthread_park(p);
380	return p;
381}
382
383static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
 
 
 
 
 
 
 
 
384{
 
 
385	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
386	/*
387	 * We clear the IS_PARKED bit here as we don't wait
388	 * until the task has left the park code. So if we'd
389	 * park before that happens we'd see the IS_PARKED bit
390	 * which might be about to be cleared.
391	 */
392	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
 
 
 
 
393		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
394			__kthread_bind(k, kthread->cpu, TASK_PARKED);
395		wake_up_state(k, TASK_PARKED);
396	}
397}
398
399/**
400 * kthread_unpark - unpark a thread created by kthread_create().
401 * @k:		thread created by kthread_create().
402 *
403 * Sets kthread_should_park() for @k to return false, wakes it, and
404 * waits for it to return. If the thread is marked percpu then its
405 * bound to the cpu again.
406 */
407void kthread_unpark(struct task_struct *k)
408{
409	struct kthread *kthread = to_live_kthread(k);
410
411	if (kthread)
412		__kthread_unpark(k, kthread);
413}
414
415/**
416 * kthread_park - park a thread created by kthread_create().
417 * @k: thread created by kthread_create().
418 *
419 * Sets kthread_should_park() for @k to return true, wakes it, and
420 * waits for it to return. This can also be called after kthread_create()
421 * instead of calling wake_up_process(): the thread will park without
422 * calling threadfn().
423 *
424 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
425 * If called by the kthread itself just the park bit is set.
426 */
427int kthread_park(struct task_struct *k)
428{
429	struct kthread *kthread = to_live_kthread(k);
430	int ret = -ENOSYS;
431
432	if (kthread) {
433		if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
434			set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
435			if (k != current) {
436				wake_up_process(k);
437				wait_for_completion(&kthread->parked);
438			}
 
439		}
440		ret = 0;
441	}
442	return ret;
 
443}
 
444
445/**
446 * kthread_stop - stop a thread created by kthread_create().
447 * @k: thread created by kthread_create().
448 *
449 * Sets kthread_should_stop() for @k to return true, wakes it, and
450 * waits for it to exit. This can also be called after kthread_create()
451 * instead of calling wake_up_process(): the thread will exit without
452 * calling threadfn().
453 *
454 * If threadfn() may call do_exit() itself, the caller must ensure
455 * task_struct can't go away.
456 *
457 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
458 * was never called.
459 */
460int kthread_stop(struct task_struct *k)
461{
462	struct kthread *kthread;
463	int ret;
464
465	trace_sched_kthread_stop(k);
466
467	get_task_struct(k);
468	kthread = to_live_kthread(k);
469	if (kthread) {
470		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
471		__kthread_unpark(k, kthread);
472		wake_up_process(k);
473		wait_for_completion(&kthread->exited);
474	}
475	ret = k->exit_code;
476	put_task_struct(k);
477
478	trace_sched_kthread_stop_ret(ret);
479	return ret;
480}
481EXPORT_SYMBOL(kthread_stop);
482
483int kthreadd(void *unused)
484{
485	struct task_struct *tsk = current;
486
487	/* Setup a clean context for our children to inherit. */
488	set_task_comm(tsk, "kthreadd");
489	ignore_signals(tsk);
490	set_cpus_allowed_ptr(tsk, cpu_all_mask);
491	set_mems_allowed(node_states[N_MEMORY]);
492
493	current->flags |= PF_NOFREEZE;
494
495	for (;;) {
496		set_current_state(TASK_INTERRUPTIBLE);
497		if (list_empty(&kthread_create_list))
498			schedule();
499		__set_current_state(TASK_RUNNING);
500
501		spin_lock(&kthread_create_lock);
502		while (!list_empty(&kthread_create_list)) {
503			struct kthread_create_info *create;
504
505			create = list_entry(kthread_create_list.next,
506					    struct kthread_create_info, list);
507			list_del_init(&create->list);
508			spin_unlock(&kthread_create_lock);
509
510			create_kthread(create);
511
512			spin_lock(&kthread_create_lock);
513		}
514		spin_unlock(&kthread_create_lock);
515	}
516
517	return 0;
518}
519
520void __init_kthread_worker(struct kthread_worker *worker,
521				const char *name,
522				struct lock_class_key *key)
523{
 
524	spin_lock_init(&worker->lock);
525	lockdep_set_class_and_name(&worker->lock, key, name);
526	INIT_LIST_HEAD(&worker->work_list);
527	worker->task = NULL;
528}
529EXPORT_SYMBOL_GPL(__init_kthread_worker);
530
531/**
532 * kthread_worker_fn - kthread function to process kthread_worker
533 * @worker_ptr: pointer to initialized kthread_worker
534 *
535 * This function can be used as @threadfn to kthread_create() or
536 * kthread_run() with @worker_ptr argument pointing to an initialized
537 * kthread_worker.  The started kthread will process work_list until
538 * the it is stopped with kthread_stop().  A kthread can also call
539 * this function directly after extra initialization.
540 *
541 * Different kthreads can be used for the same kthread_worker as long
542 * as there's only one kthread attached to it at any given time.  A
543 * kthread_worker without an attached kthread simply collects queued
544 * kthread_works.
545 */
546int kthread_worker_fn(void *worker_ptr)
547{
548	struct kthread_worker *worker = worker_ptr;
549	struct kthread_work *work;
550
551	WARN_ON(worker->task);
 
 
 
 
552	worker->task = current;
 
 
 
 
553repeat:
554	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
555
556	if (kthread_should_stop()) {
557		__set_current_state(TASK_RUNNING);
558		spin_lock_irq(&worker->lock);
559		worker->task = NULL;
560		spin_unlock_irq(&worker->lock);
561		return 0;
562	}
563
564	work = NULL;
565	spin_lock_irq(&worker->lock);
566	if (!list_empty(&worker->work_list)) {
567		work = list_first_entry(&worker->work_list,
568					struct kthread_work, node);
569		list_del_init(&work->node);
570	}
571	worker->current_work = work;
572	spin_unlock_irq(&worker->lock);
573
574	if (work) {
575		__set_current_state(TASK_RUNNING);
576		work->func(work);
577	} else if (!freezing(current))
578		schedule();
579
580	try_to_freeze();
581	goto repeat;
582}
583EXPORT_SYMBOL_GPL(kthread_worker_fn);
584
585/* insert @work before @pos in @worker */
586static void insert_kthread_work(struct kthread_worker *worker,
587			       struct kthread_work *work,
588			       struct list_head *pos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
589{
590	lockdep_assert_held(&worker->lock);
591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
592	list_add_tail(&work->node, pos);
593	work->worker = worker;
594	if (likely(worker->task))
595		wake_up_process(worker->task);
596}
597
598/**
599 * queue_kthread_work - queue a kthread_work
600 * @worker: target kthread_worker
601 * @work: kthread_work to queue
602 *
603 * Queue @work to work processor @task for async execution.  @task
604 * must have been created with kthread_worker_create().  Returns %true
605 * if @work was successfully queued, %false if it was already pending.
 
 
 
606 */
607bool queue_kthread_work(struct kthread_worker *worker,
608			struct kthread_work *work)
609{
610	bool ret = false;
611	unsigned long flags;
612
613	spin_lock_irqsave(&worker->lock, flags);
614	if (list_empty(&work->node)) {
615		insert_kthread_work(worker, work, &worker->work_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616		ret = true;
617	}
 
618	spin_unlock_irqrestore(&worker->lock, flags);
619	return ret;
620}
621EXPORT_SYMBOL_GPL(queue_kthread_work);
622
623struct kthread_flush_work {
624	struct kthread_work	work;
625	struct completion	done;
626};
627
628static void kthread_flush_work_fn(struct kthread_work *work)
629{
630	struct kthread_flush_work *fwork =
631		container_of(work, struct kthread_flush_work, work);
632	complete(&fwork->done);
633}
634
635/**
636 * flush_kthread_work - flush a kthread_work
637 * @work: work to flush
638 *
639 * If @work is queued or executing, wait for it to finish execution.
640 */
641void flush_kthread_work(struct kthread_work *work)
642{
643	struct kthread_flush_work fwork = {
644		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
645		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
646	};
647	struct kthread_worker *worker;
648	bool noop = false;
649
650retry:
651	worker = work->worker;
652	if (!worker)
653		return;
654
655	spin_lock_irq(&worker->lock);
656	if (work->worker != worker) {
657		spin_unlock_irq(&worker->lock);
658		goto retry;
659	}
660
661	if (!list_empty(&work->node))
662		insert_kthread_work(worker, &fwork.work, work->node.next);
663	else if (worker->current_work == work)
664		insert_kthread_work(worker, &fwork.work, worker->work_list.next);
 
665	else
666		noop = true;
667
668	spin_unlock_irq(&worker->lock);
669
670	if (!noop)
671		wait_for_completion(&fwork.done);
672}
673EXPORT_SYMBOL_GPL(flush_kthread_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674
675/**
676 * flush_kthread_worker - flush all current works on a kthread_worker
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677 * @worker: worker to flush
678 *
679 * Wait until all currently executing or pending works on @worker are
680 * finished.
681 */
682void flush_kthread_worker(struct kthread_worker *worker)
683{
684	struct kthread_flush_work fwork = {
685		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
686		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
687	};
688
689	queue_kthread_work(worker, &fwork.work);
690	wait_for_completion(&fwork.done);
691}
692EXPORT_SYMBOL_GPL(flush_kthread_worker);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.10.11
   1/* Kernel thread helper functions.
   2 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   3 *
   4 * Creation is done via kthreadd, so that we get a clean environment
   5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   6 * etc.).
   7 */
   8#include <linux/sched.h>
   9#include <linux/kthread.h>
  10#include <linux/completion.h>
  11#include <linux/err.h>
  12#include <linux/cpuset.h>
  13#include <linux/unistd.h>
  14#include <linux/file.h>
  15#include <linux/export.h>
  16#include <linux/mutex.h>
  17#include <linux/slab.h>
  18#include <linux/freezer.h>
  19#include <linux/ptrace.h>
  20#include <linux/uaccess.h>
  21#include <trace/events/sched.h>
  22
  23static DEFINE_SPINLOCK(kthread_create_lock);
  24static LIST_HEAD(kthread_create_list);
  25struct task_struct *kthreadd_task;
  26
  27struct kthread_create_info
  28{
  29	/* Information passed to kthread() from kthreadd. */
  30	int (*threadfn)(void *data);
  31	void *data;
  32	int node;
  33
  34	/* Result passed back to kthread_create() from kthreadd. */
  35	struct task_struct *result;
  36	struct completion *done;
  37
  38	struct list_head list;
  39};
  40
  41struct kthread {
  42	unsigned long flags;
  43	unsigned int cpu;
  44	void *data;
  45	struct completion parked;
  46	struct completion exited;
  47};
  48
  49enum KTHREAD_BITS {
  50	KTHREAD_IS_PER_CPU = 0,
  51	KTHREAD_SHOULD_STOP,
  52	KTHREAD_SHOULD_PARK,
  53	KTHREAD_IS_PARKED,
  54};
  55
  56static inline void set_kthread_struct(void *kthread)
  57{
  58	/*
  59	 * We abuse ->set_child_tid to avoid the new member and because it
  60	 * can't be wrongly copied by copy_process(). We also rely on fact
  61	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
  62	 */
  63	current->set_child_tid = (__force void __user *)kthread;
  64}
  65
  66static inline struct kthread *to_kthread(struct task_struct *k)
  67{
  68	WARN_ON(!(k->flags & PF_KTHREAD));
  69	return (__force void *)k->set_child_tid;
  70}
  71
  72void free_kthread_struct(struct task_struct *k)
  73{
  74	/*
  75	 * Can be NULL if this kthread was created by kernel_thread()
  76	 * or if kmalloc() in kthread() failed.
  77	 */
  78	kfree(to_kthread(k));
  79}
  80
  81/**
  82 * kthread_should_stop - should this kthread return now?
  83 *
  84 * When someone calls kthread_stop() on your kthread, it will be woken
  85 * and this will return true.  You should then return, and your return
  86 * value will be passed through to kthread_stop().
  87 */
  88bool kthread_should_stop(void)
  89{
  90	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
  91}
  92EXPORT_SYMBOL(kthread_should_stop);
  93
  94/**
  95 * kthread_should_park - should this kthread park now?
  96 *
  97 * When someone calls kthread_park() on your kthread, it will be woken
  98 * and this will return true.  You should then do the necessary
  99 * cleanup and call kthread_parkme()
 100 *
 101 * Similar to kthread_should_stop(), but this keeps the thread alive
 102 * and in a park position. kthread_unpark() "restarts" the thread and
 103 * calls the thread function again.
 104 */
 105bool kthread_should_park(void)
 106{
 107	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
 108}
 109EXPORT_SYMBOL_GPL(kthread_should_park);
 110
 111/**
 112 * kthread_freezable_should_stop - should this freezable kthread return now?
 113 * @was_frozen: optional out parameter, indicates whether %current was frozen
 114 *
 115 * kthread_should_stop() for freezable kthreads, which will enter
 116 * refrigerator if necessary.  This function is safe from kthread_stop() /
 117 * freezer deadlock and freezable kthreads should use this function instead
 118 * of calling try_to_freeze() directly.
 119 */
 120bool kthread_freezable_should_stop(bool *was_frozen)
 121{
 122	bool frozen = false;
 123
 124	might_sleep();
 125
 126	if (unlikely(freezing(current)))
 127		frozen = __refrigerator(true);
 128
 129	if (was_frozen)
 130		*was_frozen = frozen;
 131
 132	return kthread_should_stop();
 133}
 134EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
 135
 136/**
 137 * kthread_data - return data value specified on kthread creation
 138 * @task: kthread task in question
 139 *
 140 * Return the data value specified when kthread @task was created.
 141 * The caller is responsible for ensuring the validity of @task when
 142 * calling this function.
 143 */
 144void *kthread_data(struct task_struct *task)
 145{
 146	return to_kthread(task)->data;
 147}
 148
 149/**
 150 * kthread_probe_data - speculative version of kthread_data()
 151 * @task: possible kthread task in question
 152 *
 153 * @task could be a kthread task.  Return the data value specified when it
 154 * was created if accessible.  If @task isn't a kthread task or its data is
 155 * inaccessible for any reason, %NULL is returned.  This function requires
 156 * that @task itself is safe to dereference.
 157 */
 158void *kthread_probe_data(struct task_struct *task)
 159{
 160	struct kthread *kthread = to_kthread(task);
 161	void *data = NULL;
 162
 163	probe_kernel_read(&data, &kthread->data, sizeof(data));
 164	return data;
 165}
 166
 167static void __kthread_parkme(struct kthread *self)
 168{
 169	__set_current_state(TASK_PARKED);
 170	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
 171		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
 172			complete(&self->parked);
 173		schedule();
 174		__set_current_state(TASK_PARKED);
 175	}
 176	clear_bit(KTHREAD_IS_PARKED, &self->flags);
 177	__set_current_state(TASK_RUNNING);
 178}
 179
 180void kthread_parkme(void)
 181{
 182	__kthread_parkme(to_kthread(current));
 183}
 184EXPORT_SYMBOL_GPL(kthread_parkme);
 185
 186static int kthread(void *_create)
 187{
 188	/* Copy data: it's on kthread's stack */
 189	struct kthread_create_info *create = _create;
 190	int (*threadfn)(void *data) = create->threadfn;
 191	void *data = create->data;
 192	struct completion *done;
 193	struct kthread *self;
 194	int ret;
 195
 196	self = kmalloc(sizeof(*self), GFP_KERNEL);
 197	set_kthread_struct(self);
 
 
 
 198
 199	/* If user was SIGKILLed, I release the structure. */
 200	done = xchg(&create->done, NULL);
 201	if (!done) {
 202		kfree(create);
 203		do_exit(-EINTR);
 204	}
 205
 206	if (!self) {
 207		create->result = ERR_PTR(-ENOMEM);
 208		complete(done);
 209		do_exit(-ENOMEM);
 210	}
 211
 212	self->flags = 0;
 213	self->data = data;
 214	init_completion(&self->exited);
 215	init_completion(&self->parked);
 216	current->vfork_done = &self->exited;
 217
 218	/* OK, tell user we're spawned, wait for stop or wakeup */
 219	__set_current_state(TASK_UNINTERRUPTIBLE);
 220	create->result = current;
 221	complete(done);
 222	schedule();
 223
 224	ret = -EINTR;
 225	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 226		__kthread_parkme(self);
 
 227		ret = threadfn(data);
 228	}
 
 229	do_exit(ret);
 230}
 231
 232/* called from do_fork() to get node information for about to be created task */
 233int tsk_fork_get_node(struct task_struct *tsk)
 234{
 235#ifdef CONFIG_NUMA
 236	if (tsk == kthreadd_task)
 237		return tsk->pref_node_fork;
 238#endif
 239	return NUMA_NO_NODE;
 240}
 241
 242static void create_kthread(struct kthread_create_info *create)
 243{
 244	int pid;
 245
 246#ifdef CONFIG_NUMA
 247	current->pref_node_fork = create->node;
 248#endif
 249	/* We want our own signal handler (we take no signals by default). */
 250	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
 251	if (pid < 0) {
 252		/* If user was SIGKILLed, I release the structure. */
 253		struct completion *done = xchg(&create->done, NULL);
 254
 255		if (!done) {
 256			kfree(create);
 257			return;
 258		}
 259		create->result = ERR_PTR(pid);
 260		complete(done);
 261	}
 262}
 263
 264static __printf(4, 0)
 265struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 266						    void *data, int node,
 267						    const char namefmt[],
 268						    va_list args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 269{
 270	DECLARE_COMPLETION_ONSTACK(done);
 271	struct task_struct *task;
 272	struct kthread_create_info *create = kmalloc(sizeof(*create),
 273						     GFP_KERNEL);
 274
 275	if (!create)
 276		return ERR_PTR(-ENOMEM);
 277	create->threadfn = threadfn;
 278	create->data = data;
 279	create->node = node;
 280	create->done = &done;
 281
 282	spin_lock(&kthread_create_lock);
 283	list_add_tail(&create->list, &kthread_create_list);
 284	spin_unlock(&kthread_create_lock);
 285
 286	wake_up_process(kthreadd_task);
 287	/*
 288	 * Wait for completion in killable state, for I might be chosen by
 289	 * the OOM killer while kthreadd is trying to allocate memory for
 290	 * new kernel thread.
 291	 */
 292	if (unlikely(wait_for_completion_killable(&done))) {
 293		/*
 294		 * If I was SIGKILLed before kthreadd (or new kernel thread)
 295		 * calls complete(), leave the cleanup of this structure to
 296		 * that thread.
 297		 */
 298		if (xchg(&create->done, NULL))
 299			return ERR_PTR(-EINTR);
 300		/*
 301		 * kthreadd (or new kernel thread) will call complete()
 302		 * shortly.
 303		 */
 304		wait_for_completion(&done);
 305	}
 306	task = create->result;
 307	if (!IS_ERR(task)) {
 308		static const struct sched_param param = { .sched_priority = 0 };
 
 309
 
 310		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
 
 311		/*
 312		 * root may have changed our (kthreadd's) priority or CPU mask.
 313		 * The kernel thread should not inherit these properties.
 314		 */
 315		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
 316		set_cpus_allowed_ptr(task, cpu_all_mask);
 317	}
 318	kfree(create);
 319	return task;
 320}
 321
 322/**
 323 * kthread_create_on_node - create a kthread.
 324 * @threadfn: the function to run until signal_pending(current).
 325 * @data: data ptr for @threadfn.
 326 * @node: task and thread structures for the thread are allocated on this node
 327 * @namefmt: printf-style name for the thread.
 328 *
 329 * Description: This helper function creates and names a kernel
 330 * thread.  The thread will be stopped: use wake_up_process() to start
 331 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 332 * is affine to all CPUs.
 333 *
 334 * If thread is going to be bound on a particular cpu, give its node
 335 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 336 * When woken, the thread will run @threadfn() with @data as its
 337 * argument. @threadfn() can either call do_exit() directly if it is a
 338 * standalone thread for which no one will call kthread_stop(), or
 339 * return when 'kthread_should_stop()' is true (which means
 340 * kthread_stop() has been called).  The return value should be zero
 341 * or a negative error number; it will be passed to kthread_stop().
 342 *
 343 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 344 */
 345struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 346					   void *data, int node,
 347					   const char namefmt[],
 348					   ...)
 349{
 350	struct task_struct *task;
 351	va_list args;
 352
 353	va_start(args, namefmt);
 354	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
 355	va_end(args);
 356
 357	return task;
 358}
 359EXPORT_SYMBOL(kthread_create_on_node);
 360
 361static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
 362{
 363	unsigned long flags;
 364
 365	if (!wait_task_inactive(p, state)) {
 366		WARN_ON(1);
 367		return;
 368	}
 369
 370	/* It's safe because the task is inactive. */
 371	raw_spin_lock_irqsave(&p->pi_lock, flags);
 372	do_set_cpus_allowed(p, mask);
 373	p->flags |= PF_NO_SETAFFINITY;
 374	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 375}
 376
 377static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
 378{
 379	__kthread_bind_mask(p, cpumask_of(cpu), state);
 380}
 381
 382void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 383{
 384	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 385}
 386
 387/**
 388 * kthread_bind - bind a just-created kthread to a cpu.
 389 * @p: thread created by kthread_create().
 390 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 391 *
 392 * Description: This function is equivalent to set_cpus_allowed(),
 393 * except that @cpu doesn't need to be online, and the thread must be
 394 * stopped (i.e., just returned from kthread_create()).
 395 */
 396void kthread_bind(struct task_struct *p, unsigned int cpu)
 397{
 398	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
 399}
 400EXPORT_SYMBOL(kthread_bind);
 401
 402/**
 403 * kthread_create_on_cpu - Create a cpu bound kthread
 404 * @threadfn: the function to run until signal_pending(current).
 405 * @data: data ptr for @threadfn.
 406 * @cpu: The cpu on which the thread should be bound,
 407 * @namefmt: printf-style name for the thread. Format is restricted
 408 *	     to "name.*%u". Code fills in cpu number.
 409 *
 410 * Description: This helper function creates and names a kernel thread
 411 * The thread will be woken and put into park mode.
 412 */
 413struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 414					  void *data, unsigned int cpu,
 415					  const char *namefmt)
 416{
 417	struct task_struct *p;
 418
 419	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
 420				   cpu);
 421	if (IS_ERR(p))
 422		return p;
 423	kthread_bind(p, cpu);
 424	/* CPU hotplug need to bind once again when unparking the thread. */
 425	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
 426	to_kthread(p)->cpu = cpu;
 
 
 427	return p;
 428}
 429
 430/**
 431 * kthread_unpark - unpark a thread created by kthread_create().
 432 * @k:		thread created by kthread_create().
 433 *
 434 * Sets kthread_should_park() for @k to return false, wakes it, and
 435 * waits for it to return. If the thread is marked percpu then its
 436 * bound to the cpu again.
 437 */
 438void kthread_unpark(struct task_struct *k)
 439{
 440	struct kthread *kthread = to_kthread(k);
 441
 442	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 443	/*
 444	 * We clear the IS_PARKED bit here as we don't wait
 445	 * until the task has left the park code. So if we'd
 446	 * park before that happens we'd see the IS_PARKED bit
 447	 * which might be about to be cleared.
 448	 */
 449	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
 450		/*
 451		 * Newly created kthread was parked when the CPU was offline.
 452		 * The binding was lost and we need to set it again.
 453		 */
 454		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
 455			__kthread_bind(k, kthread->cpu, TASK_PARKED);
 456		wake_up_state(k, TASK_PARKED);
 457	}
 458}
 459EXPORT_SYMBOL_GPL(kthread_unpark);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 460
 461/**
 462 * kthread_park - park a thread created by kthread_create().
 463 * @k: thread created by kthread_create().
 464 *
 465 * Sets kthread_should_park() for @k to return true, wakes it, and
 466 * waits for it to return. This can also be called after kthread_create()
 467 * instead of calling wake_up_process(): the thread will park without
 468 * calling threadfn().
 469 *
 470 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 471 * If called by the kthread itself just the park bit is set.
 472 */
 473int kthread_park(struct task_struct *k)
 474{
 475	struct kthread *kthread = to_kthread(k);
 
 476
 477	if (WARN_ON(k->flags & PF_EXITING))
 478		return -ENOSYS;
 479
 480	if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
 481		set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
 482		if (k != current) {
 483			wake_up_process(k);
 484			wait_for_completion(&kthread->parked);
 485		}
 
 486	}
 487
 488	return 0;
 489}
 490EXPORT_SYMBOL_GPL(kthread_park);
 491
 492/**
 493 * kthread_stop - stop a thread created by kthread_create().
 494 * @k: thread created by kthread_create().
 495 *
 496 * Sets kthread_should_stop() for @k to return true, wakes it, and
 497 * waits for it to exit. This can also be called after kthread_create()
 498 * instead of calling wake_up_process(): the thread will exit without
 499 * calling threadfn().
 500 *
 501 * If threadfn() may call do_exit() itself, the caller must ensure
 502 * task_struct can't go away.
 503 *
 504 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 505 * was never called.
 506 */
 507int kthread_stop(struct task_struct *k)
 508{
 509	struct kthread *kthread;
 510	int ret;
 511
 512	trace_sched_kthread_stop(k);
 513
 514	get_task_struct(k);
 515	kthread = to_kthread(k);
 516	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
 517	kthread_unpark(k);
 518	wake_up_process(k);
 519	wait_for_completion(&kthread->exited);
 
 
 520	ret = k->exit_code;
 521	put_task_struct(k);
 522
 523	trace_sched_kthread_stop_ret(ret);
 524	return ret;
 525}
 526EXPORT_SYMBOL(kthread_stop);
 527
 528int kthreadd(void *unused)
 529{
 530	struct task_struct *tsk = current;
 531
 532	/* Setup a clean context for our children to inherit. */
 533	set_task_comm(tsk, "kthreadd");
 534	ignore_signals(tsk);
 535	set_cpus_allowed_ptr(tsk, cpu_all_mask);
 536	set_mems_allowed(node_states[N_MEMORY]);
 537
 538	current->flags |= PF_NOFREEZE;
 539
 540	for (;;) {
 541		set_current_state(TASK_INTERRUPTIBLE);
 542		if (list_empty(&kthread_create_list))
 543			schedule();
 544		__set_current_state(TASK_RUNNING);
 545
 546		spin_lock(&kthread_create_lock);
 547		while (!list_empty(&kthread_create_list)) {
 548			struct kthread_create_info *create;
 549
 550			create = list_entry(kthread_create_list.next,
 551					    struct kthread_create_info, list);
 552			list_del_init(&create->list);
 553			spin_unlock(&kthread_create_lock);
 554
 555			create_kthread(create);
 556
 557			spin_lock(&kthread_create_lock);
 558		}
 559		spin_unlock(&kthread_create_lock);
 560	}
 561
 562	return 0;
 563}
 564
 565void __kthread_init_worker(struct kthread_worker *worker,
 566				const char *name,
 567				struct lock_class_key *key)
 568{
 569	memset(worker, 0, sizeof(struct kthread_worker));
 570	spin_lock_init(&worker->lock);
 571	lockdep_set_class_and_name(&worker->lock, key, name);
 572	INIT_LIST_HEAD(&worker->work_list);
 573	INIT_LIST_HEAD(&worker->delayed_work_list);
 574}
 575EXPORT_SYMBOL_GPL(__kthread_init_worker);
 576
 577/**
 578 * kthread_worker_fn - kthread function to process kthread_worker
 579 * @worker_ptr: pointer to initialized kthread_worker
 580 *
 581 * This function implements the main cycle of kthread worker. It processes
 582 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 583 * is empty.
 584 *
 585 * The works are not allowed to keep any locks, disable preemption or interrupts
 586 * when they finish. There is defined a safe point for freezing when one work
 587 * finishes and before a new one is started.
 588 *
 589 * Also the works must not be handled by more than one worker at the same time,
 590 * see also kthread_queue_work().
 591 */
 592int kthread_worker_fn(void *worker_ptr)
 593{
 594	struct kthread_worker *worker = worker_ptr;
 595	struct kthread_work *work;
 596
 597	/*
 598	 * FIXME: Update the check and remove the assignment when all kthread
 599	 * worker users are created using kthread_create_worker*() functions.
 600	 */
 601	WARN_ON(worker->task && worker->task != current);
 602	worker->task = current;
 603
 604	if (worker->flags & KTW_FREEZABLE)
 605		set_freezable();
 606
 607repeat:
 608	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
 609
 610	if (kthread_should_stop()) {
 611		__set_current_state(TASK_RUNNING);
 612		spin_lock_irq(&worker->lock);
 613		worker->task = NULL;
 614		spin_unlock_irq(&worker->lock);
 615		return 0;
 616	}
 617
 618	work = NULL;
 619	spin_lock_irq(&worker->lock);
 620	if (!list_empty(&worker->work_list)) {
 621		work = list_first_entry(&worker->work_list,
 622					struct kthread_work, node);
 623		list_del_init(&work->node);
 624	}
 625	worker->current_work = work;
 626	spin_unlock_irq(&worker->lock);
 627
 628	if (work) {
 629		__set_current_state(TASK_RUNNING);
 630		work->func(work);
 631	} else if (!freezing(current))
 632		schedule();
 633
 634	try_to_freeze();
 635	goto repeat;
 636}
 637EXPORT_SYMBOL_GPL(kthread_worker_fn);
 638
 639static __printf(3, 0) struct kthread_worker *
 640__kthread_create_worker(int cpu, unsigned int flags,
 641			const char namefmt[], va_list args)
 642{
 643	struct kthread_worker *worker;
 644	struct task_struct *task;
 645	int node = -1;
 646
 647	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 648	if (!worker)
 649		return ERR_PTR(-ENOMEM);
 650
 651	kthread_init_worker(worker);
 652
 653	if (cpu >= 0)
 654		node = cpu_to_node(cpu);
 655
 656	task = __kthread_create_on_node(kthread_worker_fn, worker,
 657						node, namefmt, args);
 658	if (IS_ERR(task))
 659		goto fail_task;
 660
 661	if (cpu >= 0)
 662		kthread_bind(task, cpu);
 663
 664	worker->flags = flags;
 665	worker->task = task;
 666	wake_up_process(task);
 667	return worker;
 668
 669fail_task:
 670	kfree(worker);
 671	return ERR_CAST(task);
 672}
 673
 674/**
 675 * kthread_create_worker - create a kthread worker
 676 * @flags: flags modifying the default behavior of the worker
 677 * @namefmt: printf-style name for the kthread worker (task).
 678 *
 679 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 680 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 681 * when the worker was SIGKILLed.
 682 */
 683struct kthread_worker *
 684kthread_create_worker(unsigned int flags, const char namefmt[], ...)
 685{
 686	struct kthread_worker *worker;
 687	va_list args;
 688
 689	va_start(args, namefmt);
 690	worker = __kthread_create_worker(-1, flags, namefmt, args);
 691	va_end(args);
 692
 693	return worker;
 694}
 695EXPORT_SYMBOL(kthread_create_worker);
 696
 697/**
 698 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 699 *	it to a given CPU and the associated NUMA node.
 700 * @cpu: CPU number
 701 * @flags: flags modifying the default behavior of the worker
 702 * @namefmt: printf-style name for the kthread worker (task).
 703 *
 704 * Use a valid CPU number if you want to bind the kthread worker
 705 * to the given CPU and the associated NUMA node.
 706 *
 707 * A good practice is to add the cpu number also into the worker name.
 708 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 709 *
 710 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 711 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 712 * when the worker was SIGKILLed.
 713 */
 714struct kthread_worker *
 715kthread_create_worker_on_cpu(int cpu, unsigned int flags,
 716			     const char namefmt[], ...)
 717{
 718	struct kthread_worker *worker;
 719	va_list args;
 720
 721	va_start(args, namefmt);
 722	worker = __kthread_create_worker(cpu, flags, namefmt, args);
 723	va_end(args);
 724
 725	return worker;
 726}
 727EXPORT_SYMBOL(kthread_create_worker_on_cpu);
 728
 729/*
 730 * Returns true when the work could not be queued at the moment.
 731 * It happens when it is already pending in a worker list
 732 * or when it is being cancelled.
 733 */
 734static inline bool queuing_blocked(struct kthread_worker *worker,
 735				   struct kthread_work *work)
 736{
 737	lockdep_assert_held(&worker->lock);
 738
 739	return !list_empty(&work->node) || work->canceling;
 740}
 741
 742static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
 743					     struct kthread_work *work)
 744{
 745	lockdep_assert_held(&worker->lock);
 746	WARN_ON_ONCE(!list_empty(&work->node));
 747	/* Do not use a work with >1 worker, see kthread_queue_work() */
 748	WARN_ON_ONCE(work->worker && work->worker != worker);
 749}
 750
 751/* insert @work before @pos in @worker */
 752static void kthread_insert_work(struct kthread_worker *worker,
 753				struct kthread_work *work,
 754				struct list_head *pos)
 755{
 756	kthread_insert_work_sanity_check(worker, work);
 757
 758	list_add_tail(&work->node, pos);
 759	work->worker = worker;
 760	if (!worker->current_work && likely(worker->task))
 761		wake_up_process(worker->task);
 762}
 763
 764/**
 765 * kthread_queue_work - queue a kthread_work
 766 * @worker: target kthread_worker
 767 * @work: kthread_work to queue
 768 *
 769 * Queue @work to work processor @task for async execution.  @task
 770 * must have been created with kthread_worker_create().  Returns %true
 771 * if @work was successfully queued, %false if it was already pending.
 772 *
 773 * Reinitialize the work if it needs to be used by another worker.
 774 * For example, when the worker was stopped and started again.
 775 */
 776bool kthread_queue_work(struct kthread_worker *worker,
 777			struct kthread_work *work)
 778{
 779	bool ret = false;
 780	unsigned long flags;
 781
 782	spin_lock_irqsave(&worker->lock, flags);
 783	if (!queuing_blocked(worker, work)) {
 784		kthread_insert_work(worker, work, &worker->work_list);
 785		ret = true;
 786	}
 787	spin_unlock_irqrestore(&worker->lock, flags);
 788	return ret;
 789}
 790EXPORT_SYMBOL_GPL(kthread_queue_work);
 791
 792/**
 793 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 794 *	delayed work when the timer expires.
 795 * @__data: pointer to the data associated with the timer
 796 *
 797 * The format of the function is defined by struct timer_list.
 798 * It should have been called from irqsafe timer with irq already off.
 799 */
 800void kthread_delayed_work_timer_fn(unsigned long __data)
 801{
 802	struct kthread_delayed_work *dwork =
 803		(struct kthread_delayed_work *)__data;
 804	struct kthread_work *work = &dwork->work;
 805	struct kthread_worker *worker = work->worker;
 806
 807	/*
 808	 * This might happen when a pending work is reinitialized.
 809	 * It means that it is used a wrong way.
 810	 */
 811	if (WARN_ON_ONCE(!worker))
 812		return;
 813
 814	spin_lock(&worker->lock);
 815	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 816	WARN_ON_ONCE(work->worker != worker);
 817
 818	/* Move the work from worker->delayed_work_list. */
 819	WARN_ON_ONCE(list_empty(&work->node));
 820	list_del_init(&work->node);
 821	kthread_insert_work(worker, work, &worker->work_list);
 822
 823	spin_unlock(&worker->lock);
 824}
 825EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 826
 827void __kthread_queue_delayed_work(struct kthread_worker *worker,
 828				  struct kthread_delayed_work *dwork,
 829				  unsigned long delay)
 830{
 831	struct timer_list *timer = &dwork->timer;
 832	struct kthread_work *work = &dwork->work;
 833
 834	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
 835		     timer->data != (unsigned long)dwork);
 836
 837	/*
 838	 * If @delay is 0, queue @dwork->work immediately.  This is for
 839	 * both optimization and correctness.  The earliest @timer can
 840	 * expire is on the closest next tick and delayed_work users depend
 841	 * on that there's no such delay when @delay is 0.
 842	 */
 843	if (!delay) {
 844		kthread_insert_work(worker, work, &worker->work_list);
 845		return;
 846	}
 847
 848	/* Be paranoid and try to detect possible races already now. */
 849	kthread_insert_work_sanity_check(worker, work);
 850
 851	list_add(&work->node, &worker->delayed_work_list);
 852	work->worker = worker;
 853	timer_stats_timer_set_start_info(&dwork->timer);
 854	timer->expires = jiffies + delay;
 855	add_timer(timer);
 856}
 857
 858/**
 859 * kthread_queue_delayed_work - queue the associated kthread work
 860 *	after a delay.
 861 * @worker: target kthread_worker
 862 * @dwork: kthread_delayed_work to queue
 863 * @delay: number of jiffies to wait before queuing
 864 *
 865 * If the work has not been pending it starts a timer that will queue
 866 * the work after the given @delay. If @delay is zero, it queues the
 867 * work immediately.
 868 *
 869 * Return: %false if the @work has already been pending. It means that
 870 * either the timer was running or the work was queued. It returns %true
 871 * otherwise.
 872 */
 873bool kthread_queue_delayed_work(struct kthread_worker *worker,
 874				struct kthread_delayed_work *dwork,
 875				unsigned long delay)
 876{
 877	struct kthread_work *work = &dwork->work;
 878	unsigned long flags;
 879	bool ret = false;
 880
 881	spin_lock_irqsave(&worker->lock, flags);
 882
 883	if (!queuing_blocked(worker, work)) {
 884		__kthread_queue_delayed_work(worker, dwork, delay);
 885		ret = true;
 886	}
 887
 888	spin_unlock_irqrestore(&worker->lock, flags);
 889	return ret;
 890}
 891EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
 892
 893struct kthread_flush_work {
 894	struct kthread_work	work;
 895	struct completion	done;
 896};
 897
 898static void kthread_flush_work_fn(struct kthread_work *work)
 899{
 900	struct kthread_flush_work *fwork =
 901		container_of(work, struct kthread_flush_work, work);
 902	complete(&fwork->done);
 903}
 904
 905/**
 906 * kthread_flush_work - flush a kthread_work
 907 * @work: work to flush
 908 *
 909 * If @work is queued or executing, wait for it to finish execution.
 910 */
 911void kthread_flush_work(struct kthread_work *work)
 912{
 913	struct kthread_flush_work fwork = {
 914		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
 915		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
 916	};
 917	struct kthread_worker *worker;
 918	bool noop = false;
 919
 
 920	worker = work->worker;
 921	if (!worker)
 922		return;
 923
 924	spin_lock_irq(&worker->lock);
 925	/* Work must not be used with >1 worker, see kthread_queue_work(). */
 926	WARN_ON_ONCE(work->worker != worker);
 
 
 927
 928	if (!list_empty(&work->node))
 929		kthread_insert_work(worker, &fwork.work, work->node.next);
 930	else if (worker->current_work == work)
 931		kthread_insert_work(worker, &fwork.work,
 932				    worker->work_list.next);
 933	else
 934		noop = true;
 935
 936	spin_unlock_irq(&worker->lock);
 937
 938	if (!noop)
 939		wait_for_completion(&fwork.done);
 940}
 941EXPORT_SYMBOL_GPL(kthread_flush_work);
 942
 943/*
 944 * This function removes the work from the worker queue. Also it makes sure
 945 * that it won't get queued later via the delayed work's timer.
 946 *
 947 * The work might still be in use when this function finishes. See the
 948 * current_work proceed by the worker.
 949 *
 950 * Return: %true if @work was pending and successfully canceled,
 951 *	%false if @work was not pending
 952 */
 953static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
 954				  unsigned long *flags)
 955{
 956	/* Try to cancel the timer if exists. */
 957	if (is_dwork) {
 958		struct kthread_delayed_work *dwork =
 959			container_of(work, struct kthread_delayed_work, work);
 960		struct kthread_worker *worker = work->worker;
 961
 962		/*
 963		 * del_timer_sync() must be called to make sure that the timer
 964		 * callback is not running. The lock must be temporary released
 965		 * to avoid a deadlock with the callback. In the meantime,
 966		 * any queuing is blocked by setting the canceling counter.
 967		 */
 968		work->canceling++;
 969		spin_unlock_irqrestore(&worker->lock, *flags);
 970		del_timer_sync(&dwork->timer);
 971		spin_lock_irqsave(&worker->lock, *flags);
 972		work->canceling--;
 973	}
 974
 975	/*
 976	 * Try to remove the work from a worker list. It might either
 977	 * be from worker->work_list or from worker->delayed_work_list.
 978	 */
 979	if (!list_empty(&work->node)) {
 980		list_del_init(&work->node);
 981		return true;
 982	}
 983
 984	return false;
 985}
 986
 987/**
 988 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
 989 * @worker: kthread worker to use
 990 * @dwork: kthread delayed work to queue
 991 * @delay: number of jiffies to wait before queuing
 992 *
 993 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
 994 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
 995 * @work is guaranteed to be queued immediately.
 996 *
 997 * Return: %true if @dwork was pending and its timer was modified,
 998 * %false otherwise.
 999 *
1000 * A special case is when the work is being canceled in parallel.
1001 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1002 * or yet another kthread_mod_delayed_work() call. We let the other command
1003 * win and return %false here. The caller is supposed to synchronize these
1004 * operations a reasonable way.
1005 *
1006 * This function is safe to call from any context including IRQ handler.
1007 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1008 * for details.
1009 */
1010bool kthread_mod_delayed_work(struct kthread_worker *worker,
1011			      struct kthread_delayed_work *dwork,
1012			      unsigned long delay)
1013{
1014	struct kthread_work *work = &dwork->work;
1015	unsigned long flags;
1016	int ret = false;
1017
1018	spin_lock_irqsave(&worker->lock, flags);
1019
1020	/* Do not bother with canceling when never queued. */
1021	if (!work->worker)
1022		goto fast_queue;
1023
1024	/* Work must not be used with >1 worker, see kthread_queue_work() */
1025	WARN_ON_ONCE(work->worker != worker);
1026
1027	/* Do not fight with another command that is canceling this work. */
1028	if (work->canceling)
1029		goto out;
1030
1031	ret = __kthread_cancel_work(work, true, &flags);
1032fast_queue:
1033	__kthread_queue_delayed_work(worker, dwork, delay);
1034out:
1035	spin_unlock_irqrestore(&worker->lock, flags);
1036	return ret;
1037}
1038EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1039
1040static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1041{
1042	struct kthread_worker *worker = work->worker;
1043	unsigned long flags;
1044	int ret = false;
1045
1046	if (!worker)
1047		goto out;
1048
1049	spin_lock_irqsave(&worker->lock, flags);
1050	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1051	WARN_ON_ONCE(work->worker != worker);
1052
1053	ret = __kthread_cancel_work(work, is_dwork, &flags);
1054
1055	if (worker->current_work != work)
1056		goto out_fast;
1057
1058	/*
1059	 * The work is in progress and we need to wait with the lock released.
1060	 * In the meantime, block any queuing by setting the canceling counter.
1061	 */
1062	work->canceling++;
1063	spin_unlock_irqrestore(&worker->lock, flags);
1064	kthread_flush_work(work);
1065	spin_lock_irqsave(&worker->lock, flags);
1066	work->canceling--;
1067
1068out_fast:
1069	spin_unlock_irqrestore(&worker->lock, flags);
1070out:
1071	return ret;
1072}
1073
1074/**
1075 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1076 * @work: the kthread work to cancel
1077 *
1078 * Cancel @work and wait for its execution to finish.  This function
1079 * can be used even if the work re-queues itself. On return from this
1080 * function, @work is guaranteed to be not pending or executing on any CPU.
1081 *
1082 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1083 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1084 *
1085 * The caller must ensure that the worker on which @work was last
1086 * queued can't be destroyed before this function returns.
1087 *
1088 * Return: %true if @work was pending, %false otherwise.
1089 */
1090bool kthread_cancel_work_sync(struct kthread_work *work)
1091{
1092	return __kthread_cancel_work_sync(work, false);
1093}
1094EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1095
1096/**
1097 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1098 *	wait for it to finish.
1099 * @dwork: the kthread delayed work to cancel
1100 *
1101 * This is kthread_cancel_work_sync() for delayed works.
1102 *
1103 * Return: %true if @dwork was pending, %false otherwise.
1104 */
1105bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1106{
1107	return __kthread_cancel_work_sync(&dwork->work, true);
1108}
1109EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1110
1111/**
1112 * kthread_flush_worker - flush all current works on a kthread_worker
1113 * @worker: worker to flush
1114 *
1115 * Wait until all currently executing or pending works on @worker are
1116 * finished.
1117 */
1118void kthread_flush_worker(struct kthread_worker *worker)
1119{
1120	struct kthread_flush_work fwork = {
1121		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1122		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1123	};
1124
1125	kthread_queue_work(worker, &fwork.work);
1126	wait_for_completion(&fwork.done);
1127}
1128EXPORT_SYMBOL_GPL(kthread_flush_worker);
1129
1130/**
1131 * kthread_destroy_worker - destroy a kthread worker
1132 * @worker: worker to be destroyed
1133 *
1134 * Flush and destroy @worker.  The simple flush is enough because the kthread
1135 * worker API is used only in trivial scenarios.  There are no multi-step state
1136 * machines needed.
1137 */
1138void kthread_destroy_worker(struct kthread_worker *worker)
1139{
1140	struct task_struct *task;
1141
1142	task = worker->task;
1143	if (WARN_ON(!task))
1144		return;
1145
1146	kthread_flush_worker(worker);
1147	kthread_stop(task);
1148	WARN_ON(!list_empty(&worker->work_list));
1149	kfree(worker);
1150}
1151EXPORT_SYMBOL(kthread_destroy_worker);