Loading...
1/* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/cpuset.h>
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/export.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/freezer.h>
19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
21#include <trace/events/sched.h>
22
23static DEFINE_SPINLOCK(kthread_create_lock);
24static LIST_HEAD(kthread_create_list);
25struct task_struct *kthreadd_task;
26
27struct kthread_create_info
28{
29 /* Information passed to kthread() from kthreadd. */
30 int (*threadfn)(void *data);
31 void *data;
32 int node;
33
34 /* Result passed back to kthread_create() from kthreadd. */
35 struct task_struct *result;
36 struct completion *done;
37
38 struct list_head list;
39};
40
41struct kthread {
42 unsigned long flags;
43 unsigned int cpu;
44 void *data;
45 struct completion parked;
46 struct completion exited;
47};
48
49enum KTHREAD_BITS {
50 KTHREAD_IS_PER_CPU = 0,
51 KTHREAD_SHOULD_STOP,
52 KTHREAD_SHOULD_PARK,
53 KTHREAD_IS_PARKED,
54};
55
56static inline void set_kthread_struct(void *kthread)
57{
58 /*
59 * We abuse ->set_child_tid to avoid the new member and because it
60 * can't be wrongly copied by copy_process(). We also rely on fact
61 * that the caller can't exec, so PF_KTHREAD can't be cleared.
62 */
63 current->set_child_tid = (__force void __user *)kthread;
64}
65
66static inline struct kthread *to_kthread(struct task_struct *k)
67{
68 WARN_ON(!(k->flags & PF_KTHREAD));
69 return (__force void *)k->set_child_tid;
70}
71
72void free_kthread_struct(struct task_struct *k)
73{
74 /*
75 * Can be NULL if this kthread was created by kernel_thread()
76 * or if kmalloc() in kthread() failed.
77 */
78 kfree(to_kthread(k));
79}
80
81/**
82 * kthread_should_stop - should this kthread return now?
83 *
84 * When someone calls kthread_stop() on your kthread, it will be woken
85 * and this will return true. You should then return, and your return
86 * value will be passed through to kthread_stop().
87 */
88bool kthread_should_stop(void)
89{
90 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
91}
92EXPORT_SYMBOL(kthread_should_stop);
93
94/**
95 * kthread_should_park - should this kthread park now?
96 *
97 * When someone calls kthread_park() on your kthread, it will be woken
98 * and this will return true. You should then do the necessary
99 * cleanup and call kthread_parkme()
100 *
101 * Similar to kthread_should_stop(), but this keeps the thread alive
102 * and in a park position. kthread_unpark() "restarts" the thread and
103 * calls the thread function again.
104 */
105bool kthread_should_park(void)
106{
107 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
108}
109EXPORT_SYMBOL_GPL(kthread_should_park);
110
111/**
112 * kthread_freezable_should_stop - should this freezable kthread return now?
113 * @was_frozen: optional out parameter, indicates whether %current was frozen
114 *
115 * kthread_should_stop() for freezable kthreads, which will enter
116 * refrigerator if necessary. This function is safe from kthread_stop() /
117 * freezer deadlock and freezable kthreads should use this function instead
118 * of calling try_to_freeze() directly.
119 */
120bool kthread_freezable_should_stop(bool *was_frozen)
121{
122 bool frozen = false;
123
124 might_sleep();
125
126 if (unlikely(freezing(current)))
127 frozen = __refrigerator(true);
128
129 if (was_frozen)
130 *was_frozen = frozen;
131
132 return kthread_should_stop();
133}
134EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
135
136/**
137 * kthread_data - return data value specified on kthread creation
138 * @task: kthread task in question
139 *
140 * Return the data value specified when kthread @task was created.
141 * The caller is responsible for ensuring the validity of @task when
142 * calling this function.
143 */
144void *kthread_data(struct task_struct *task)
145{
146 return to_kthread(task)->data;
147}
148
149/**
150 * kthread_probe_data - speculative version of kthread_data()
151 * @task: possible kthread task in question
152 *
153 * @task could be a kthread task. Return the data value specified when it
154 * was created if accessible. If @task isn't a kthread task or its data is
155 * inaccessible for any reason, %NULL is returned. This function requires
156 * that @task itself is safe to dereference.
157 */
158void *kthread_probe_data(struct task_struct *task)
159{
160 struct kthread *kthread = to_kthread(task);
161 void *data = NULL;
162
163 probe_kernel_read(&data, &kthread->data, sizeof(data));
164 return data;
165}
166
167static void __kthread_parkme(struct kthread *self)
168{
169 __set_current_state(TASK_PARKED);
170 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
171 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
172 complete(&self->parked);
173 schedule();
174 __set_current_state(TASK_PARKED);
175 }
176 clear_bit(KTHREAD_IS_PARKED, &self->flags);
177 __set_current_state(TASK_RUNNING);
178}
179
180void kthread_parkme(void)
181{
182 __kthread_parkme(to_kthread(current));
183}
184EXPORT_SYMBOL_GPL(kthread_parkme);
185
186static int kthread(void *_create)
187{
188 /* Copy data: it's on kthread's stack */
189 struct kthread_create_info *create = _create;
190 int (*threadfn)(void *data) = create->threadfn;
191 void *data = create->data;
192 struct completion *done;
193 struct kthread *self;
194 int ret;
195
196 self = kmalloc(sizeof(*self), GFP_KERNEL);
197 set_kthread_struct(self);
198
199 /* If user was SIGKILLed, I release the structure. */
200 done = xchg(&create->done, NULL);
201 if (!done) {
202 kfree(create);
203 do_exit(-EINTR);
204 }
205
206 if (!self) {
207 create->result = ERR_PTR(-ENOMEM);
208 complete(done);
209 do_exit(-ENOMEM);
210 }
211
212 self->flags = 0;
213 self->data = data;
214 init_completion(&self->exited);
215 init_completion(&self->parked);
216 current->vfork_done = &self->exited;
217
218 /* OK, tell user we're spawned, wait for stop or wakeup */
219 __set_current_state(TASK_UNINTERRUPTIBLE);
220 create->result = current;
221 complete(done);
222 schedule();
223
224 ret = -EINTR;
225 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
226 __kthread_parkme(self);
227 ret = threadfn(data);
228 }
229 do_exit(ret);
230}
231
232/* called from do_fork() to get node information for about to be created task */
233int tsk_fork_get_node(struct task_struct *tsk)
234{
235#ifdef CONFIG_NUMA
236 if (tsk == kthreadd_task)
237 return tsk->pref_node_fork;
238#endif
239 return NUMA_NO_NODE;
240}
241
242static void create_kthread(struct kthread_create_info *create)
243{
244 int pid;
245
246#ifdef CONFIG_NUMA
247 current->pref_node_fork = create->node;
248#endif
249 /* We want our own signal handler (we take no signals by default). */
250 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
251 if (pid < 0) {
252 /* If user was SIGKILLed, I release the structure. */
253 struct completion *done = xchg(&create->done, NULL);
254
255 if (!done) {
256 kfree(create);
257 return;
258 }
259 create->result = ERR_PTR(pid);
260 complete(done);
261 }
262}
263
264static __printf(4, 0)
265struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
266 void *data, int node,
267 const char namefmt[],
268 va_list args)
269{
270 DECLARE_COMPLETION_ONSTACK(done);
271 struct task_struct *task;
272 struct kthread_create_info *create = kmalloc(sizeof(*create),
273 GFP_KERNEL);
274
275 if (!create)
276 return ERR_PTR(-ENOMEM);
277 create->threadfn = threadfn;
278 create->data = data;
279 create->node = node;
280 create->done = &done;
281
282 spin_lock(&kthread_create_lock);
283 list_add_tail(&create->list, &kthread_create_list);
284 spin_unlock(&kthread_create_lock);
285
286 wake_up_process(kthreadd_task);
287 /*
288 * Wait for completion in killable state, for I might be chosen by
289 * the OOM killer while kthreadd is trying to allocate memory for
290 * new kernel thread.
291 */
292 if (unlikely(wait_for_completion_killable(&done))) {
293 /*
294 * If I was SIGKILLed before kthreadd (or new kernel thread)
295 * calls complete(), leave the cleanup of this structure to
296 * that thread.
297 */
298 if (xchg(&create->done, NULL))
299 return ERR_PTR(-EINTR);
300 /*
301 * kthreadd (or new kernel thread) will call complete()
302 * shortly.
303 */
304 wait_for_completion(&done);
305 }
306 task = create->result;
307 if (!IS_ERR(task)) {
308 static const struct sched_param param = { .sched_priority = 0 };
309
310 vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
311 /*
312 * root may have changed our (kthreadd's) priority or CPU mask.
313 * The kernel thread should not inherit these properties.
314 */
315 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
316 set_cpus_allowed_ptr(task, cpu_all_mask);
317 }
318 kfree(create);
319 return task;
320}
321
322/**
323 * kthread_create_on_node - create a kthread.
324 * @threadfn: the function to run until signal_pending(current).
325 * @data: data ptr for @threadfn.
326 * @node: task and thread structures for the thread are allocated on this node
327 * @namefmt: printf-style name for the thread.
328 *
329 * Description: This helper function creates and names a kernel
330 * thread. The thread will be stopped: use wake_up_process() to start
331 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
332 * is affine to all CPUs.
333 *
334 * If thread is going to be bound on a particular cpu, give its node
335 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
336 * When woken, the thread will run @threadfn() with @data as its
337 * argument. @threadfn() can either call do_exit() directly if it is a
338 * standalone thread for which no one will call kthread_stop(), or
339 * return when 'kthread_should_stop()' is true (which means
340 * kthread_stop() has been called). The return value should be zero
341 * or a negative error number; it will be passed to kthread_stop().
342 *
343 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
344 */
345struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
346 void *data, int node,
347 const char namefmt[],
348 ...)
349{
350 struct task_struct *task;
351 va_list args;
352
353 va_start(args, namefmt);
354 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
355 va_end(args);
356
357 return task;
358}
359EXPORT_SYMBOL(kthread_create_on_node);
360
361static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
362{
363 unsigned long flags;
364
365 if (!wait_task_inactive(p, state)) {
366 WARN_ON(1);
367 return;
368 }
369
370 /* It's safe because the task is inactive. */
371 raw_spin_lock_irqsave(&p->pi_lock, flags);
372 do_set_cpus_allowed(p, mask);
373 p->flags |= PF_NO_SETAFFINITY;
374 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
375}
376
377static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
378{
379 __kthread_bind_mask(p, cpumask_of(cpu), state);
380}
381
382void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
383{
384 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
385}
386
387/**
388 * kthread_bind - bind a just-created kthread to a cpu.
389 * @p: thread created by kthread_create().
390 * @cpu: cpu (might not be online, must be possible) for @k to run on.
391 *
392 * Description: This function is equivalent to set_cpus_allowed(),
393 * except that @cpu doesn't need to be online, and the thread must be
394 * stopped (i.e., just returned from kthread_create()).
395 */
396void kthread_bind(struct task_struct *p, unsigned int cpu)
397{
398 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
399}
400EXPORT_SYMBOL(kthread_bind);
401
402/**
403 * kthread_create_on_cpu - Create a cpu bound kthread
404 * @threadfn: the function to run until signal_pending(current).
405 * @data: data ptr for @threadfn.
406 * @cpu: The cpu on which the thread should be bound,
407 * @namefmt: printf-style name for the thread. Format is restricted
408 * to "name.*%u". Code fills in cpu number.
409 *
410 * Description: This helper function creates and names a kernel thread
411 * The thread will be woken and put into park mode.
412 */
413struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
414 void *data, unsigned int cpu,
415 const char *namefmt)
416{
417 struct task_struct *p;
418
419 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
420 cpu);
421 if (IS_ERR(p))
422 return p;
423 kthread_bind(p, cpu);
424 /* CPU hotplug need to bind once again when unparking the thread. */
425 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
426 to_kthread(p)->cpu = cpu;
427 return p;
428}
429
430/**
431 * kthread_unpark - unpark a thread created by kthread_create().
432 * @k: thread created by kthread_create().
433 *
434 * Sets kthread_should_park() for @k to return false, wakes it, and
435 * waits for it to return. If the thread is marked percpu then its
436 * bound to the cpu again.
437 */
438void kthread_unpark(struct task_struct *k)
439{
440 struct kthread *kthread = to_kthread(k);
441
442 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
443 /*
444 * We clear the IS_PARKED bit here as we don't wait
445 * until the task has left the park code. So if we'd
446 * park before that happens we'd see the IS_PARKED bit
447 * which might be about to be cleared.
448 */
449 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
450 /*
451 * Newly created kthread was parked when the CPU was offline.
452 * The binding was lost and we need to set it again.
453 */
454 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
455 __kthread_bind(k, kthread->cpu, TASK_PARKED);
456 wake_up_state(k, TASK_PARKED);
457 }
458}
459EXPORT_SYMBOL_GPL(kthread_unpark);
460
461/**
462 * kthread_park - park a thread created by kthread_create().
463 * @k: thread created by kthread_create().
464 *
465 * Sets kthread_should_park() for @k to return true, wakes it, and
466 * waits for it to return. This can also be called after kthread_create()
467 * instead of calling wake_up_process(): the thread will park without
468 * calling threadfn().
469 *
470 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
471 * If called by the kthread itself just the park bit is set.
472 */
473int kthread_park(struct task_struct *k)
474{
475 struct kthread *kthread = to_kthread(k);
476
477 if (WARN_ON(k->flags & PF_EXITING))
478 return -ENOSYS;
479
480 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
481 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
482 if (k != current) {
483 wake_up_process(k);
484 wait_for_completion(&kthread->parked);
485 }
486 }
487
488 return 0;
489}
490EXPORT_SYMBOL_GPL(kthread_park);
491
492/**
493 * kthread_stop - stop a thread created by kthread_create().
494 * @k: thread created by kthread_create().
495 *
496 * Sets kthread_should_stop() for @k to return true, wakes it, and
497 * waits for it to exit. This can also be called after kthread_create()
498 * instead of calling wake_up_process(): the thread will exit without
499 * calling threadfn().
500 *
501 * If threadfn() may call do_exit() itself, the caller must ensure
502 * task_struct can't go away.
503 *
504 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
505 * was never called.
506 */
507int kthread_stop(struct task_struct *k)
508{
509 struct kthread *kthread;
510 int ret;
511
512 trace_sched_kthread_stop(k);
513
514 get_task_struct(k);
515 kthread = to_kthread(k);
516 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
517 kthread_unpark(k);
518 wake_up_process(k);
519 wait_for_completion(&kthread->exited);
520 ret = k->exit_code;
521 put_task_struct(k);
522
523 trace_sched_kthread_stop_ret(ret);
524 return ret;
525}
526EXPORT_SYMBOL(kthread_stop);
527
528int kthreadd(void *unused)
529{
530 struct task_struct *tsk = current;
531
532 /* Setup a clean context for our children to inherit. */
533 set_task_comm(tsk, "kthreadd");
534 ignore_signals(tsk);
535 set_cpus_allowed_ptr(tsk, cpu_all_mask);
536 set_mems_allowed(node_states[N_MEMORY]);
537
538 current->flags |= PF_NOFREEZE;
539
540 for (;;) {
541 set_current_state(TASK_INTERRUPTIBLE);
542 if (list_empty(&kthread_create_list))
543 schedule();
544 __set_current_state(TASK_RUNNING);
545
546 spin_lock(&kthread_create_lock);
547 while (!list_empty(&kthread_create_list)) {
548 struct kthread_create_info *create;
549
550 create = list_entry(kthread_create_list.next,
551 struct kthread_create_info, list);
552 list_del_init(&create->list);
553 spin_unlock(&kthread_create_lock);
554
555 create_kthread(create);
556
557 spin_lock(&kthread_create_lock);
558 }
559 spin_unlock(&kthread_create_lock);
560 }
561
562 return 0;
563}
564
565void __kthread_init_worker(struct kthread_worker *worker,
566 const char *name,
567 struct lock_class_key *key)
568{
569 memset(worker, 0, sizeof(struct kthread_worker));
570 spin_lock_init(&worker->lock);
571 lockdep_set_class_and_name(&worker->lock, key, name);
572 INIT_LIST_HEAD(&worker->work_list);
573 INIT_LIST_HEAD(&worker->delayed_work_list);
574}
575EXPORT_SYMBOL_GPL(__kthread_init_worker);
576
577/**
578 * kthread_worker_fn - kthread function to process kthread_worker
579 * @worker_ptr: pointer to initialized kthread_worker
580 *
581 * This function implements the main cycle of kthread worker. It processes
582 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
583 * is empty.
584 *
585 * The works are not allowed to keep any locks, disable preemption or interrupts
586 * when they finish. There is defined a safe point for freezing when one work
587 * finishes and before a new one is started.
588 *
589 * Also the works must not be handled by more than one worker at the same time,
590 * see also kthread_queue_work().
591 */
592int kthread_worker_fn(void *worker_ptr)
593{
594 struct kthread_worker *worker = worker_ptr;
595 struct kthread_work *work;
596
597 /*
598 * FIXME: Update the check and remove the assignment when all kthread
599 * worker users are created using kthread_create_worker*() functions.
600 */
601 WARN_ON(worker->task && worker->task != current);
602 worker->task = current;
603
604 if (worker->flags & KTW_FREEZABLE)
605 set_freezable();
606
607repeat:
608 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
609
610 if (kthread_should_stop()) {
611 __set_current_state(TASK_RUNNING);
612 spin_lock_irq(&worker->lock);
613 worker->task = NULL;
614 spin_unlock_irq(&worker->lock);
615 return 0;
616 }
617
618 work = NULL;
619 spin_lock_irq(&worker->lock);
620 if (!list_empty(&worker->work_list)) {
621 work = list_first_entry(&worker->work_list,
622 struct kthread_work, node);
623 list_del_init(&work->node);
624 }
625 worker->current_work = work;
626 spin_unlock_irq(&worker->lock);
627
628 if (work) {
629 __set_current_state(TASK_RUNNING);
630 work->func(work);
631 } else if (!freezing(current))
632 schedule();
633
634 try_to_freeze();
635 goto repeat;
636}
637EXPORT_SYMBOL_GPL(kthread_worker_fn);
638
639static __printf(3, 0) struct kthread_worker *
640__kthread_create_worker(int cpu, unsigned int flags,
641 const char namefmt[], va_list args)
642{
643 struct kthread_worker *worker;
644 struct task_struct *task;
645 int node = -1;
646
647 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
648 if (!worker)
649 return ERR_PTR(-ENOMEM);
650
651 kthread_init_worker(worker);
652
653 if (cpu >= 0)
654 node = cpu_to_node(cpu);
655
656 task = __kthread_create_on_node(kthread_worker_fn, worker,
657 node, namefmt, args);
658 if (IS_ERR(task))
659 goto fail_task;
660
661 if (cpu >= 0)
662 kthread_bind(task, cpu);
663
664 worker->flags = flags;
665 worker->task = task;
666 wake_up_process(task);
667 return worker;
668
669fail_task:
670 kfree(worker);
671 return ERR_CAST(task);
672}
673
674/**
675 * kthread_create_worker - create a kthread worker
676 * @flags: flags modifying the default behavior of the worker
677 * @namefmt: printf-style name for the kthread worker (task).
678 *
679 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
680 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
681 * when the worker was SIGKILLed.
682 */
683struct kthread_worker *
684kthread_create_worker(unsigned int flags, const char namefmt[], ...)
685{
686 struct kthread_worker *worker;
687 va_list args;
688
689 va_start(args, namefmt);
690 worker = __kthread_create_worker(-1, flags, namefmt, args);
691 va_end(args);
692
693 return worker;
694}
695EXPORT_SYMBOL(kthread_create_worker);
696
697/**
698 * kthread_create_worker_on_cpu - create a kthread worker and bind it
699 * it to a given CPU and the associated NUMA node.
700 * @cpu: CPU number
701 * @flags: flags modifying the default behavior of the worker
702 * @namefmt: printf-style name for the kthread worker (task).
703 *
704 * Use a valid CPU number if you want to bind the kthread worker
705 * to the given CPU and the associated NUMA node.
706 *
707 * A good practice is to add the cpu number also into the worker name.
708 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
709 *
710 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
711 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
712 * when the worker was SIGKILLed.
713 */
714struct kthread_worker *
715kthread_create_worker_on_cpu(int cpu, unsigned int flags,
716 const char namefmt[], ...)
717{
718 struct kthread_worker *worker;
719 va_list args;
720
721 va_start(args, namefmt);
722 worker = __kthread_create_worker(cpu, flags, namefmt, args);
723 va_end(args);
724
725 return worker;
726}
727EXPORT_SYMBOL(kthread_create_worker_on_cpu);
728
729/*
730 * Returns true when the work could not be queued at the moment.
731 * It happens when it is already pending in a worker list
732 * or when it is being cancelled.
733 */
734static inline bool queuing_blocked(struct kthread_worker *worker,
735 struct kthread_work *work)
736{
737 lockdep_assert_held(&worker->lock);
738
739 return !list_empty(&work->node) || work->canceling;
740}
741
742static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
743 struct kthread_work *work)
744{
745 lockdep_assert_held(&worker->lock);
746 WARN_ON_ONCE(!list_empty(&work->node));
747 /* Do not use a work with >1 worker, see kthread_queue_work() */
748 WARN_ON_ONCE(work->worker && work->worker != worker);
749}
750
751/* insert @work before @pos in @worker */
752static void kthread_insert_work(struct kthread_worker *worker,
753 struct kthread_work *work,
754 struct list_head *pos)
755{
756 kthread_insert_work_sanity_check(worker, work);
757
758 list_add_tail(&work->node, pos);
759 work->worker = worker;
760 if (!worker->current_work && likely(worker->task))
761 wake_up_process(worker->task);
762}
763
764/**
765 * kthread_queue_work - queue a kthread_work
766 * @worker: target kthread_worker
767 * @work: kthread_work to queue
768 *
769 * Queue @work to work processor @task for async execution. @task
770 * must have been created with kthread_worker_create(). Returns %true
771 * if @work was successfully queued, %false if it was already pending.
772 *
773 * Reinitialize the work if it needs to be used by another worker.
774 * For example, when the worker was stopped and started again.
775 */
776bool kthread_queue_work(struct kthread_worker *worker,
777 struct kthread_work *work)
778{
779 bool ret = false;
780 unsigned long flags;
781
782 spin_lock_irqsave(&worker->lock, flags);
783 if (!queuing_blocked(worker, work)) {
784 kthread_insert_work(worker, work, &worker->work_list);
785 ret = true;
786 }
787 spin_unlock_irqrestore(&worker->lock, flags);
788 return ret;
789}
790EXPORT_SYMBOL_GPL(kthread_queue_work);
791
792/**
793 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
794 * delayed work when the timer expires.
795 * @__data: pointer to the data associated with the timer
796 *
797 * The format of the function is defined by struct timer_list.
798 * It should have been called from irqsafe timer with irq already off.
799 */
800void kthread_delayed_work_timer_fn(unsigned long __data)
801{
802 struct kthread_delayed_work *dwork =
803 (struct kthread_delayed_work *)__data;
804 struct kthread_work *work = &dwork->work;
805 struct kthread_worker *worker = work->worker;
806
807 /*
808 * This might happen when a pending work is reinitialized.
809 * It means that it is used a wrong way.
810 */
811 if (WARN_ON_ONCE(!worker))
812 return;
813
814 spin_lock(&worker->lock);
815 /* Work must not be used with >1 worker, see kthread_queue_work(). */
816 WARN_ON_ONCE(work->worker != worker);
817
818 /* Move the work from worker->delayed_work_list. */
819 WARN_ON_ONCE(list_empty(&work->node));
820 list_del_init(&work->node);
821 kthread_insert_work(worker, work, &worker->work_list);
822
823 spin_unlock(&worker->lock);
824}
825EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
826
827void __kthread_queue_delayed_work(struct kthread_worker *worker,
828 struct kthread_delayed_work *dwork,
829 unsigned long delay)
830{
831 struct timer_list *timer = &dwork->timer;
832 struct kthread_work *work = &dwork->work;
833
834 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
835 timer->data != (unsigned long)dwork);
836
837 /*
838 * If @delay is 0, queue @dwork->work immediately. This is for
839 * both optimization and correctness. The earliest @timer can
840 * expire is on the closest next tick and delayed_work users depend
841 * on that there's no such delay when @delay is 0.
842 */
843 if (!delay) {
844 kthread_insert_work(worker, work, &worker->work_list);
845 return;
846 }
847
848 /* Be paranoid and try to detect possible races already now. */
849 kthread_insert_work_sanity_check(worker, work);
850
851 list_add(&work->node, &worker->delayed_work_list);
852 work->worker = worker;
853 timer_stats_timer_set_start_info(&dwork->timer);
854 timer->expires = jiffies + delay;
855 add_timer(timer);
856}
857
858/**
859 * kthread_queue_delayed_work - queue the associated kthread work
860 * after a delay.
861 * @worker: target kthread_worker
862 * @dwork: kthread_delayed_work to queue
863 * @delay: number of jiffies to wait before queuing
864 *
865 * If the work has not been pending it starts a timer that will queue
866 * the work after the given @delay. If @delay is zero, it queues the
867 * work immediately.
868 *
869 * Return: %false if the @work has already been pending. It means that
870 * either the timer was running or the work was queued. It returns %true
871 * otherwise.
872 */
873bool kthread_queue_delayed_work(struct kthread_worker *worker,
874 struct kthread_delayed_work *dwork,
875 unsigned long delay)
876{
877 struct kthread_work *work = &dwork->work;
878 unsigned long flags;
879 bool ret = false;
880
881 spin_lock_irqsave(&worker->lock, flags);
882
883 if (!queuing_blocked(worker, work)) {
884 __kthread_queue_delayed_work(worker, dwork, delay);
885 ret = true;
886 }
887
888 spin_unlock_irqrestore(&worker->lock, flags);
889 return ret;
890}
891EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
892
893struct kthread_flush_work {
894 struct kthread_work work;
895 struct completion done;
896};
897
898static void kthread_flush_work_fn(struct kthread_work *work)
899{
900 struct kthread_flush_work *fwork =
901 container_of(work, struct kthread_flush_work, work);
902 complete(&fwork->done);
903}
904
905/**
906 * kthread_flush_work - flush a kthread_work
907 * @work: work to flush
908 *
909 * If @work is queued or executing, wait for it to finish execution.
910 */
911void kthread_flush_work(struct kthread_work *work)
912{
913 struct kthread_flush_work fwork = {
914 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
915 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
916 };
917 struct kthread_worker *worker;
918 bool noop = false;
919
920 worker = work->worker;
921 if (!worker)
922 return;
923
924 spin_lock_irq(&worker->lock);
925 /* Work must not be used with >1 worker, see kthread_queue_work(). */
926 WARN_ON_ONCE(work->worker != worker);
927
928 if (!list_empty(&work->node))
929 kthread_insert_work(worker, &fwork.work, work->node.next);
930 else if (worker->current_work == work)
931 kthread_insert_work(worker, &fwork.work,
932 worker->work_list.next);
933 else
934 noop = true;
935
936 spin_unlock_irq(&worker->lock);
937
938 if (!noop)
939 wait_for_completion(&fwork.done);
940}
941EXPORT_SYMBOL_GPL(kthread_flush_work);
942
943/*
944 * This function removes the work from the worker queue. Also it makes sure
945 * that it won't get queued later via the delayed work's timer.
946 *
947 * The work might still be in use when this function finishes. See the
948 * current_work proceed by the worker.
949 *
950 * Return: %true if @work was pending and successfully canceled,
951 * %false if @work was not pending
952 */
953static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
954 unsigned long *flags)
955{
956 /* Try to cancel the timer if exists. */
957 if (is_dwork) {
958 struct kthread_delayed_work *dwork =
959 container_of(work, struct kthread_delayed_work, work);
960 struct kthread_worker *worker = work->worker;
961
962 /*
963 * del_timer_sync() must be called to make sure that the timer
964 * callback is not running. The lock must be temporary released
965 * to avoid a deadlock with the callback. In the meantime,
966 * any queuing is blocked by setting the canceling counter.
967 */
968 work->canceling++;
969 spin_unlock_irqrestore(&worker->lock, *flags);
970 del_timer_sync(&dwork->timer);
971 spin_lock_irqsave(&worker->lock, *flags);
972 work->canceling--;
973 }
974
975 /*
976 * Try to remove the work from a worker list. It might either
977 * be from worker->work_list or from worker->delayed_work_list.
978 */
979 if (!list_empty(&work->node)) {
980 list_del_init(&work->node);
981 return true;
982 }
983
984 return false;
985}
986
987/**
988 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
989 * @worker: kthread worker to use
990 * @dwork: kthread delayed work to queue
991 * @delay: number of jiffies to wait before queuing
992 *
993 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
994 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
995 * @work is guaranteed to be queued immediately.
996 *
997 * Return: %true if @dwork was pending and its timer was modified,
998 * %false otherwise.
999 *
1000 * A special case is when the work is being canceled in parallel.
1001 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1002 * or yet another kthread_mod_delayed_work() call. We let the other command
1003 * win and return %false here. The caller is supposed to synchronize these
1004 * operations a reasonable way.
1005 *
1006 * This function is safe to call from any context including IRQ handler.
1007 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1008 * for details.
1009 */
1010bool kthread_mod_delayed_work(struct kthread_worker *worker,
1011 struct kthread_delayed_work *dwork,
1012 unsigned long delay)
1013{
1014 struct kthread_work *work = &dwork->work;
1015 unsigned long flags;
1016 int ret = false;
1017
1018 spin_lock_irqsave(&worker->lock, flags);
1019
1020 /* Do not bother with canceling when never queued. */
1021 if (!work->worker)
1022 goto fast_queue;
1023
1024 /* Work must not be used with >1 worker, see kthread_queue_work() */
1025 WARN_ON_ONCE(work->worker != worker);
1026
1027 /* Do not fight with another command that is canceling this work. */
1028 if (work->canceling)
1029 goto out;
1030
1031 ret = __kthread_cancel_work(work, true, &flags);
1032fast_queue:
1033 __kthread_queue_delayed_work(worker, dwork, delay);
1034out:
1035 spin_unlock_irqrestore(&worker->lock, flags);
1036 return ret;
1037}
1038EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1039
1040static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1041{
1042 struct kthread_worker *worker = work->worker;
1043 unsigned long flags;
1044 int ret = false;
1045
1046 if (!worker)
1047 goto out;
1048
1049 spin_lock_irqsave(&worker->lock, flags);
1050 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1051 WARN_ON_ONCE(work->worker != worker);
1052
1053 ret = __kthread_cancel_work(work, is_dwork, &flags);
1054
1055 if (worker->current_work != work)
1056 goto out_fast;
1057
1058 /*
1059 * The work is in progress and we need to wait with the lock released.
1060 * In the meantime, block any queuing by setting the canceling counter.
1061 */
1062 work->canceling++;
1063 spin_unlock_irqrestore(&worker->lock, flags);
1064 kthread_flush_work(work);
1065 spin_lock_irqsave(&worker->lock, flags);
1066 work->canceling--;
1067
1068out_fast:
1069 spin_unlock_irqrestore(&worker->lock, flags);
1070out:
1071 return ret;
1072}
1073
1074/**
1075 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1076 * @work: the kthread work to cancel
1077 *
1078 * Cancel @work and wait for its execution to finish. This function
1079 * can be used even if the work re-queues itself. On return from this
1080 * function, @work is guaranteed to be not pending or executing on any CPU.
1081 *
1082 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1083 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1084 *
1085 * The caller must ensure that the worker on which @work was last
1086 * queued can't be destroyed before this function returns.
1087 *
1088 * Return: %true if @work was pending, %false otherwise.
1089 */
1090bool kthread_cancel_work_sync(struct kthread_work *work)
1091{
1092 return __kthread_cancel_work_sync(work, false);
1093}
1094EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1095
1096/**
1097 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1098 * wait for it to finish.
1099 * @dwork: the kthread delayed work to cancel
1100 *
1101 * This is kthread_cancel_work_sync() for delayed works.
1102 *
1103 * Return: %true if @dwork was pending, %false otherwise.
1104 */
1105bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1106{
1107 return __kthread_cancel_work_sync(&dwork->work, true);
1108}
1109EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1110
1111/**
1112 * kthread_flush_worker - flush all current works on a kthread_worker
1113 * @worker: worker to flush
1114 *
1115 * Wait until all currently executing or pending works on @worker are
1116 * finished.
1117 */
1118void kthread_flush_worker(struct kthread_worker *worker)
1119{
1120 struct kthread_flush_work fwork = {
1121 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1122 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1123 };
1124
1125 kthread_queue_work(worker, &fwork.work);
1126 wait_for_completion(&fwork.done);
1127}
1128EXPORT_SYMBOL_GPL(kthread_flush_worker);
1129
1130/**
1131 * kthread_destroy_worker - destroy a kthread worker
1132 * @worker: worker to be destroyed
1133 *
1134 * Flush and destroy @worker. The simple flush is enough because the kthread
1135 * worker API is used only in trivial scenarios. There are no multi-step state
1136 * machines needed.
1137 */
1138void kthread_destroy_worker(struct kthread_worker *worker)
1139{
1140 struct task_struct *task;
1141
1142 task = worker->task;
1143 if (WARN_ON(!task))
1144 return;
1145
1146 kthread_flush_worker(worker);
1147 kthread_stop(task);
1148 WARN_ON(!list_empty(&worker->work_list));
1149 kfree(worker);
1150}
1151EXPORT_SYMBOL(kthread_destroy_worker);
1// SPDX-License-Identifier: GPL-2.0-only
2/* Kernel thread helper functions.
3 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Copyright (C) 2009 Red Hat, Inc.
5 *
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 * etc.).
9 */
10#include <uapi/linux/sched/types.h>
11#include <linux/mm.h>
12#include <linux/mmu_context.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <linux/sched/isolation.h>
31#include <trace/events/sched.h>
32
33
34static DEFINE_SPINLOCK(kthread_create_lock);
35static LIST_HEAD(kthread_create_list);
36struct task_struct *kthreadd_task;
37
38struct kthread_create_info
39{
40 /* Information passed to kthread() from kthreadd. */
41 int (*threadfn)(void *data);
42 void *data;
43 int node;
44
45 /* Result passed back to kthread_create() from kthreadd. */
46 struct task_struct *result;
47 struct completion *done;
48
49 struct list_head list;
50};
51
52struct kthread {
53 unsigned long flags;
54 unsigned int cpu;
55 int (*threadfn)(void *);
56 void *data;
57 mm_segment_t oldfs;
58 struct completion parked;
59 struct completion exited;
60#ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
62#endif
63};
64
65enum KTHREAD_BITS {
66 KTHREAD_IS_PER_CPU = 0,
67 KTHREAD_SHOULD_STOP,
68 KTHREAD_SHOULD_PARK,
69};
70
71static inline struct kthread *to_kthread(struct task_struct *k)
72{
73 WARN_ON(!(k->flags & PF_KTHREAD));
74 return (__force void *)k->set_child_tid;
75}
76
77/*
78 * Variant of to_kthread() that doesn't assume @p is a kthread.
79 *
80 * Per construction; when:
81 *
82 * (p->flags & PF_KTHREAD) && p->set_child_tid
83 *
84 * the task is both a kthread and struct kthread is persistent. However
85 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
86 * begin_new_exec()).
87 */
88static inline struct kthread *__to_kthread(struct task_struct *p)
89{
90 void *kthread = (__force void *)p->set_child_tid;
91 if (kthread && !(p->flags & PF_KTHREAD))
92 kthread = NULL;
93 return kthread;
94}
95
96void set_kthread_struct(struct task_struct *p)
97{
98 struct kthread *kthread;
99
100 if (__to_kthread(p))
101 return;
102
103 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
104 /*
105 * We abuse ->set_child_tid to avoid the new member and because it
106 * can't be wrongly copied by copy_process(). We also rely on fact
107 * that the caller can't exec, so PF_KTHREAD can't be cleared.
108 */
109 p->set_child_tid = (__force void __user *)kthread;
110}
111
112void free_kthread_struct(struct task_struct *k)
113{
114 struct kthread *kthread;
115
116 /*
117 * Can be NULL if this kthread was created by kernel_thread()
118 * or if kmalloc() in kthread() failed.
119 */
120 kthread = to_kthread(k);
121#ifdef CONFIG_BLK_CGROUP
122 WARN_ON_ONCE(kthread && kthread->blkcg_css);
123#endif
124 kfree(kthread);
125}
126
127/**
128 * kthread_should_stop - should this kthread return now?
129 *
130 * When someone calls kthread_stop() on your kthread, it will be woken
131 * and this will return true. You should then return, and your return
132 * value will be passed through to kthread_stop().
133 */
134bool kthread_should_stop(void)
135{
136 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
137}
138EXPORT_SYMBOL(kthread_should_stop);
139
140bool __kthread_should_park(struct task_struct *k)
141{
142 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
143}
144EXPORT_SYMBOL_GPL(__kthread_should_park);
145
146/**
147 * kthread_should_park - should this kthread park now?
148 *
149 * When someone calls kthread_park() on your kthread, it will be woken
150 * and this will return true. You should then do the necessary
151 * cleanup and call kthread_parkme()
152 *
153 * Similar to kthread_should_stop(), but this keeps the thread alive
154 * and in a park position. kthread_unpark() "restarts" the thread and
155 * calls the thread function again.
156 */
157bool kthread_should_park(void)
158{
159 return __kthread_should_park(current);
160}
161EXPORT_SYMBOL_GPL(kthread_should_park);
162
163/**
164 * kthread_freezable_should_stop - should this freezable kthread return now?
165 * @was_frozen: optional out parameter, indicates whether %current was frozen
166 *
167 * kthread_should_stop() for freezable kthreads, which will enter
168 * refrigerator if necessary. This function is safe from kthread_stop() /
169 * freezer deadlock and freezable kthreads should use this function instead
170 * of calling try_to_freeze() directly.
171 */
172bool kthread_freezable_should_stop(bool *was_frozen)
173{
174 bool frozen = false;
175
176 might_sleep();
177
178 if (unlikely(freezing(current)))
179 frozen = __refrigerator(true);
180
181 if (was_frozen)
182 *was_frozen = frozen;
183
184 return kthread_should_stop();
185}
186EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
187
188/**
189 * kthread_func - return the function specified on kthread creation
190 * @task: kthread task in question
191 *
192 * Returns NULL if the task is not a kthread.
193 */
194void *kthread_func(struct task_struct *task)
195{
196 struct kthread *kthread = __to_kthread(task);
197 if (kthread)
198 return kthread->threadfn;
199 return NULL;
200}
201EXPORT_SYMBOL_GPL(kthread_func);
202
203/**
204 * kthread_data - return data value specified on kthread creation
205 * @task: kthread task in question
206 *
207 * Return the data value specified when kthread @task was created.
208 * The caller is responsible for ensuring the validity of @task when
209 * calling this function.
210 */
211void *kthread_data(struct task_struct *task)
212{
213 return to_kthread(task)->data;
214}
215EXPORT_SYMBOL_GPL(kthread_data);
216
217/**
218 * kthread_probe_data - speculative version of kthread_data()
219 * @task: possible kthread task in question
220 *
221 * @task could be a kthread task. Return the data value specified when it
222 * was created if accessible. If @task isn't a kthread task or its data is
223 * inaccessible for any reason, %NULL is returned. This function requires
224 * that @task itself is safe to dereference.
225 */
226void *kthread_probe_data(struct task_struct *task)
227{
228 struct kthread *kthread = __to_kthread(task);
229 void *data = NULL;
230
231 if (kthread)
232 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
233 return data;
234}
235
236static void __kthread_parkme(struct kthread *self)
237{
238 for (;;) {
239 /*
240 * TASK_PARKED is a special state; we must serialize against
241 * possible pending wakeups to avoid store-store collisions on
242 * task->state.
243 *
244 * Such a collision might possibly result in the task state
245 * changin from TASK_PARKED and us failing the
246 * wait_task_inactive() in kthread_park().
247 */
248 set_special_state(TASK_PARKED);
249 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
250 break;
251
252 /*
253 * Thread is going to call schedule(), do not preempt it,
254 * or the caller of kthread_park() may spend more time in
255 * wait_task_inactive().
256 */
257 preempt_disable();
258 complete(&self->parked);
259 schedule_preempt_disabled();
260 preempt_enable();
261 }
262 __set_current_state(TASK_RUNNING);
263}
264
265void kthread_parkme(void)
266{
267 __kthread_parkme(to_kthread(current));
268}
269EXPORT_SYMBOL_GPL(kthread_parkme);
270
271static int kthread(void *_create)
272{
273 /* Copy data: it's on kthread's stack */
274 struct kthread_create_info *create = _create;
275 int (*threadfn)(void *data) = create->threadfn;
276 void *data = create->data;
277 struct completion *done;
278 struct kthread *self;
279 int ret;
280
281 set_kthread_struct(current);
282 self = to_kthread(current);
283
284 /* If user was SIGKILLed, I release the structure. */
285 done = xchg(&create->done, NULL);
286 if (!done) {
287 kfree(create);
288 do_exit(-EINTR);
289 }
290
291 if (!self) {
292 create->result = ERR_PTR(-ENOMEM);
293 complete(done);
294 do_exit(-ENOMEM);
295 }
296
297 self->threadfn = threadfn;
298 self->data = data;
299 init_completion(&self->exited);
300 init_completion(&self->parked);
301 current->vfork_done = &self->exited;
302
303 /* OK, tell user we're spawned, wait for stop or wakeup */
304 __set_current_state(TASK_UNINTERRUPTIBLE);
305 create->result = current;
306 /*
307 * Thread is going to call schedule(), do not preempt it,
308 * or the creator may spend more time in wait_task_inactive().
309 */
310 preempt_disable();
311 complete(done);
312 schedule_preempt_disabled();
313 preempt_enable();
314
315 ret = -EINTR;
316 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
317 cgroup_kthread_ready();
318 __kthread_parkme(self);
319 ret = threadfn(data);
320 }
321 do_exit(ret);
322}
323
324/* called from kernel_clone() to get node information for about to be created task */
325int tsk_fork_get_node(struct task_struct *tsk)
326{
327#ifdef CONFIG_NUMA
328 if (tsk == kthreadd_task)
329 return tsk->pref_node_fork;
330#endif
331 return NUMA_NO_NODE;
332}
333
334static void create_kthread(struct kthread_create_info *create)
335{
336 int pid;
337
338#ifdef CONFIG_NUMA
339 current->pref_node_fork = create->node;
340#endif
341 /* We want our own signal handler (we take no signals by default). */
342 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
343 if (pid < 0) {
344 /* If user was SIGKILLed, I release the structure. */
345 struct completion *done = xchg(&create->done, NULL);
346
347 if (!done) {
348 kfree(create);
349 return;
350 }
351 create->result = ERR_PTR(pid);
352 complete(done);
353 }
354}
355
356static __printf(4, 0)
357struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
358 void *data, int node,
359 const char namefmt[],
360 va_list args)
361{
362 DECLARE_COMPLETION_ONSTACK(done);
363 struct task_struct *task;
364 struct kthread_create_info *create = kmalloc(sizeof(*create),
365 GFP_KERNEL);
366
367 if (!create)
368 return ERR_PTR(-ENOMEM);
369 create->threadfn = threadfn;
370 create->data = data;
371 create->node = node;
372 create->done = &done;
373
374 spin_lock(&kthread_create_lock);
375 list_add_tail(&create->list, &kthread_create_list);
376 spin_unlock(&kthread_create_lock);
377
378 wake_up_process(kthreadd_task);
379 /*
380 * Wait for completion in killable state, for I might be chosen by
381 * the OOM killer while kthreadd is trying to allocate memory for
382 * new kernel thread.
383 */
384 if (unlikely(wait_for_completion_killable(&done))) {
385 /*
386 * If I was SIGKILLed before kthreadd (or new kernel thread)
387 * calls complete(), leave the cleanup of this structure to
388 * that thread.
389 */
390 if (xchg(&create->done, NULL))
391 return ERR_PTR(-EINTR);
392 /*
393 * kthreadd (or new kernel thread) will call complete()
394 * shortly.
395 */
396 wait_for_completion(&done);
397 }
398 task = create->result;
399 if (!IS_ERR(task)) {
400 static const struct sched_param param = { .sched_priority = 0 };
401 char name[TASK_COMM_LEN];
402
403 /*
404 * task is already visible to other tasks, so updating
405 * COMM must be protected.
406 */
407 vsnprintf(name, sizeof(name), namefmt, args);
408 set_task_comm(task, name);
409 /*
410 * root may have changed our (kthreadd's) priority or CPU mask.
411 * The kernel thread should not inherit these properties.
412 */
413 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
414 set_cpus_allowed_ptr(task,
415 housekeeping_cpumask(HK_FLAG_KTHREAD));
416 }
417 kfree(create);
418 return task;
419}
420
421/**
422 * kthread_create_on_node - create a kthread.
423 * @threadfn: the function to run until signal_pending(current).
424 * @data: data ptr for @threadfn.
425 * @node: task and thread structures for the thread are allocated on this node
426 * @namefmt: printf-style name for the thread.
427 *
428 * Description: This helper function creates and names a kernel
429 * thread. The thread will be stopped: use wake_up_process() to start
430 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
431 * is affine to all CPUs.
432 *
433 * If thread is going to be bound on a particular cpu, give its node
434 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
435 * When woken, the thread will run @threadfn() with @data as its
436 * argument. @threadfn() can either call do_exit() directly if it is a
437 * standalone thread for which no one will call kthread_stop(), or
438 * return when 'kthread_should_stop()' is true (which means
439 * kthread_stop() has been called). The return value should be zero
440 * or a negative error number; it will be passed to kthread_stop().
441 *
442 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
443 */
444struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
445 void *data, int node,
446 const char namefmt[],
447 ...)
448{
449 struct task_struct *task;
450 va_list args;
451
452 va_start(args, namefmt);
453 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
454 va_end(args);
455
456 return task;
457}
458EXPORT_SYMBOL(kthread_create_on_node);
459
460static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
461{
462 unsigned long flags;
463
464 if (!wait_task_inactive(p, state)) {
465 WARN_ON(1);
466 return;
467 }
468
469 /* It's safe because the task is inactive. */
470 raw_spin_lock_irqsave(&p->pi_lock, flags);
471 do_set_cpus_allowed(p, mask);
472 p->flags |= PF_NO_SETAFFINITY;
473 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
474}
475
476static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
477{
478 __kthread_bind_mask(p, cpumask_of(cpu), state);
479}
480
481void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
482{
483 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
484}
485
486/**
487 * kthread_bind - bind a just-created kthread to a cpu.
488 * @p: thread created by kthread_create().
489 * @cpu: cpu (might not be online, must be possible) for @k to run on.
490 *
491 * Description: This function is equivalent to set_cpus_allowed(),
492 * except that @cpu doesn't need to be online, and the thread must be
493 * stopped (i.e., just returned from kthread_create()).
494 */
495void kthread_bind(struct task_struct *p, unsigned int cpu)
496{
497 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
498}
499EXPORT_SYMBOL(kthread_bind);
500
501/**
502 * kthread_create_on_cpu - Create a cpu bound kthread
503 * @threadfn: the function to run until signal_pending(current).
504 * @data: data ptr for @threadfn.
505 * @cpu: The cpu on which the thread should be bound,
506 * @namefmt: printf-style name for the thread. Format is restricted
507 * to "name.*%u". Code fills in cpu number.
508 *
509 * Description: This helper function creates and names a kernel thread
510 */
511struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
512 void *data, unsigned int cpu,
513 const char *namefmt)
514{
515 struct task_struct *p;
516
517 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
518 cpu);
519 if (IS_ERR(p))
520 return p;
521 kthread_bind(p, cpu);
522 /* CPU hotplug need to bind once again when unparking the thread. */
523 to_kthread(p)->cpu = cpu;
524 return p;
525}
526
527void kthread_set_per_cpu(struct task_struct *k, int cpu)
528{
529 struct kthread *kthread = to_kthread(k);
530 if (!kthread)
531 return;
532
533 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
534
535 if (cpu < 0) {
536 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
537 return;
538 }
539
540 kthread->cpu = cpu;
541 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
542}
543
544bool kthread_is_per_cpu(struct task_struct *p)
545{
546 struct kthread *kthread = __to_kthread(p);
547 if (!kthread)
548 return false;
549
550 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
551}
552
553/**
554 * kthread_unpark - unpark a thread created by kthread_create().
555 * @k: thread created by kthread_create().
556 *
557 * Sets kthread_should_park() for @k to return false, wakes it, and
558 * waits for it to return. If the thread is marked percpu then its
559 * bound to the cpu again.
560 */
561void kthread_unpark(struct task_struct *k)
562{
563 struct kthread *kthread = to_kthread(k);
564
565 /*
566 * Newly created kthread was parked when the CPU was offline.
567 * The binding was lost and we need to set it again.
568 */
569 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
570 __kthread_bind(k, kthread->cpu, TASK_PARKED);
571
572 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
573 /*
574 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
575 */
576 wake_up_state(k, TASK_PARKED);
577}
578EXPORT_SYMBOL_GPL(kthread_unpark);
579
580/**
581 * kthread_park - park a thread created by kthread_create().
582 * @k: thread created by kthread_create().
583 *
584 * Sets kthread_should_park() for @k to return true, wakes it, and
585 * waits for it to return. This can also be called after kthread_create()
586 * instead of calling wake_up_process(): the thread will park without
587 * calling threadfn().
588 *
589 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
590 * If called by the kthread itself just the park bit is set.
591 */
592int kthread_park(struct task_struct *k)
593{
594 struct kthread *kthread = to_kthread(k);
595
596 if (WARN_ON(k->flags & PF_EXITING))
597 return -ENOSYS;
598
599 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
600 return -EBUSY;
601
602 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
603 if (k != current) {
604 wake_up_process(k);
605 /*
606 * Wait for __kthread_parkme() to complete(), this means we
607 * _will_ have TASK_PARKED and are about to call schedule().
608 */
609 wait_for_completion(&kthread->parked);
610 /*
611 * Now wait for that schedule() to complete and the task to
612 * get scheduled out.
613 */
614 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
615 }
616
617 return 0;
618}
619EXPORT_SYMBOL_GPL(kthread_park);
620
621/**
622 * kthread_stop - stop a thread created by kthread_create().
623 * @k: thread created by kthread_create().
624 *
625 * Sets kthread_should_stop() for @k to return true, wakes it, and
626 * waits for it to exit. This can also be called after kthread_create()
627 * instead of calling wake_up_process(): the thread will exit without
628 * calling threadfn().
629 *
630 * If threadfn() may call do_exit() itself, the caller must ensure
631 * task_struct can't go away.
632 *
633 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
634 * was never called.
635 */
636int kthread_stop(struct task_struct *k)
637{
638 struct kthread *kthread;
639 int ret;
640
641 trace_sched_kthread_stop(k);
642
643 get_task_struct(k);
644 kthread = to_kthread(k);
645 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
646 kthread_unpark(k);
647 wake_up_process(k);
648 wait_for_completion(&kthread->exited);
649 ret = k->exit_code;
650 put_task_struct(k);
651
652 trace_sched_kthread_stop_ret(ret);
653 return ret;
654}
655EXPORT_SYMBOL(kthread_stop);
656
657int kthreadd(void *unused)
658{
659 struct task_struct *tsk = current;
660
661 /* Setup a clean context for our children to inherit. */
662 set_task_comm(tsk, "kthreadd");
663 ignore_signals(tsk);
664 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
665 set_mems_allowed(node_states[N_MEMORY]);
666
667 current->flags |= PF_NOFREEZE;
668 cgroup_init_kthreadd();
669
670 for (;;) {
671 set_current_state(TASK_INTERRUPTIBLE);
672 if (list_empty(&kthread_create_list))
673 schedule();
674 __set_current_state(TASK_RUNNING);
675
676 spin_lock(&kthread_create_lock);
677 while (!list_empty(&kthread_create_list)) {
678 struct kthread_create_info *create;
679
680 create = list_entry(kthread_create_list.next,
681 struct kthread_create_info, list);
682 list_del_init(&create->list);
683 spin_unlock(&kthread_create_lock);
684
685 create_kthread(create);
686
687 spin_lock(&kthread_create_lock);
688 }
689 spin_unlock(&kthread_create_lock);
690 }
691
692 return 0;
693}
694
695void __kthread_init_worker(struct kthread_worker *worker,
696 const char *name,
697 struct lock_class_key *key)
698{
699 memset(worker, 0, sizeof(struct kthread_worker));
700 raw_spin_lock_init(&worker->lock);
701 lockdep_set_class_and_name(&worker->lock, key, name);
702 INIT_LIST_HEAD(&worker->work_list);
703 INIT_LIST_HEAD(&worker->delayed_work_list);
704}
705EXPORT_SYMBOL_GPL(__kthread_init_worker);
706
707/**
708 * kthread_worker_fn - kthread function to process kthread_worker
709 * @worker_ptr: pointer to initialized kthread_worker
710 *
711 * This function implements the main cycle of kthread worker. It processes
712 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
713 * is empty.
714 *
715 * The works are not allowed to keep any locks, disable preemption or interrupts
716 * when they finish. There is defined a safe point for freezing when one work
717 * finishes and before a new one is started.
718 *
719 * Also the works must not be handled by more than one worker at the same time,
720 * see also kthread_queue_work().
721 */
722int kthread_worker_fn(void *worker_ptr)
723{
724 struct kthread_worker *worker = worker_ptr;
725 struct kthread_work *work;
726
727 /*
728 * FIXME: Update the check and remove the assignment when all kthread
729 * worker users are created using kthread_create_worker*() functions.
730 */
731 WARN_ON(worker->task && worker->task != current);
732 worker->task = current;
733
734 if (worker->flags & KTW_FREEZABLE)
735 set_freezable();
736
737repeat:
738 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
739
740 if (kthread_should_stop()) {
741 __set_current_state(TASK_RUNNING);
742 raw_spin_lock_irq(&worker->lock);
743 worker->task = NULL;
744 raw_spin_unlock_irq(&worker->lock);
745 return 0;
746 }
747
748 work = NULL;
749 raw_spin_lock_irq(&worker->lock);
750 if (!list_empty(&worker->work_list)) {
751 work = list_first_entry(&worker->work_list,
752 struct kthread_work, node);
753 list_del_init(&work->node);
754 }
755 worker->current_work = work;
756 raw_spin_unlock_irq(&worker->lock);
757
758 if (work) {
759 kthread_work_func_t func = work->func;
760 __set_current_state(TASK_RUNNING);
761 trace_sched_kthread_work_execute_start(work);
762 work->func(work);
763 /*
764 * Avoid dereferencing work after this point. The trace
765 * event only cares about the address.
766 */
767 trace_sched_kthread_work_execute_end(work, func);
768 } else if (!freezing(current))
769 schedule();
770
771 try_to_freeze();
772 cond_resched();
773 goto repeat;
774}
775EXPORT_SYMBOL_GPL(kthread_worker_fn);
776
777static __printf(3, 0) struct kthread_worker *
778__kthread_create_worker(int cpu, unsigned int flags,
779 const char namefmt[], va_list args)
780{
781 struct kthread_worker *worker;
782 struct task_struct *task;
783 int node = NUMA_NO_NODE;
784
785 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
786 if (!worker)
787 return ERR_PTR(-ENOMEM);
788
789 kthread_init_worker(worker);
790
791 if (cpu >= 0)
792 node = cpu_to_node(cpu);
793
794 task = __kthread_create_on_node(kthread_worker_fn, worker,
795 node, namefmt, args);
796 if (IS_ERR(task))
797 goto fail_task;
798
799 if (cpu >= 0)
800 kthread_bind(task, cpu);
801
802 worker->flags = flags;
803 worker->task = task;
804 wake_up_process(task);
805 return worker;
806
807fail_task:
808 kfree(worker);
809 return ERR_CAST(task);
810}
811
812/**
813 * kthread_create_worker - create a kthread worker
814 * @flags: flags modifying the default behavior of the worker
815 * @namefmt: printf-style name for the kthread worker (task).
816 *
817 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
818 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
819 * when the worker was SIGKILLed.
820 */
821struct kthread_worker *
822kthread_create_worker(unsigned int flags, const char namefmt[], ...)
823{
824 struct kthread_worker *worker;
825 va_list args;
826
827 va_start(args, namefmt);
828 worker = __kthread_create_worker(-1, flags, namefmt, args);
829 va_end(args);
830
831 return worker;
832}
833EXPORT_SYMBOL(kthread_create_worker);
834
835/**
836 * kthread_create_worker_on_cpu - create a kthread worker and bind it
837 * to a given CPU and the associated NUMA node.
838 * @cpu: CPU number
839 * @flags: flags modifying the default behavior of the worker
840 * @namefmt: printf-style name for the kthread worker (task).
841 *
842 * Use a valid CPU number if you want to bind the kthread worker
843 * to the given CPU and the associated NUMA node.
844 *
845 * A good practice is to add the cpu number also into the worker name.
846 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
847 *
848 * CPU hotplug:
849 * The kthread worker API is simple and generic. It just provides a way
850 * to create, use, and destroy workers.
851 *
852 * It is up to the API user how to handle CPU hotplug. They have to decide
853 * how to handle pending work items, prevent queuing new ones, and
854 * restore the functionality when the CPU goes off and on. There are a
855 * few catches:
856 *
857 * - CPU affinity gets lost when it is scheduled on an offline CPU.
858 *
859 * - The worker might not exist when the CPU was off when the user
860 * created the workers.
861 *
862 * Good practice is to implement two CPU hotplug callbacks and to
863 * destroy/create the worker when the CPU goes down/up.
864 *
865 * Return:
866 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
867 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
868 * when the worker was SIGKILLed.
869 */
870struct kthread_worker *
871kthread_create_worker_on_cpu(int cpu, unsigned int flags,
872 const char namefmt[], ...)
873{
874 struct kthread_worker *worker;
875 va_list args;
876
877 va_start(args, namefmt);
878 worker = __kthread_create_worker(cpu, flags, namefmt, args);
879 va_end(args);
880
881 return worker;
882}
883EXPORT_SYMBOL(kthread_create_worker_on_cpu);
884
885/*
886 * Returns true when the work could not be queued at the moment.
887 * It happens when it is already pending in a worker list
888 * or when it is being cancelled.
889 */
890static inline bool queuing_blocked(struct kthread_worker *worker,
891 struct kthread_work *work)
892{
893 lockdep_assert_held(&worker->lock);
894
895 return !list_empty(&work->node) || work->canceling;
896}
897
898static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
899 struct kthread_work *work)
900{
901 lockdep_assert_held(&worker->lock);
902 WARN_ON_ONCE(!list_empty(&work->node));
903 /* Do not use a work with >1 worker, see kthread_queue_work() */
904 WARN_ON_ONCE(work->worker && work->worker != worker);
905}
906
907/* insert @work before @pos in @worker */
908static void kthread_insert_work(struct kthread_worker *worker,
909 struct kthread_work *work,
910 struct list_head *pos)
911{
912 kthread_insert_work_sanity_check(worker, work);
913
914 trace_sched_kthread_work_queue_work(worker, work);
915
916 list_add_tail(&work->node, pos);
917 work->worker = worker;
918 if (!worker->current_work && likely(worker->task))
919 wake_up_process(worker->task);
920}
921
922/**
923 * kthread_queue_work - queue a kthread_work
924 * @worker: target kthread_worker
925 * @work: kthread_work to queue
926 *
927 * Queue @work to work processor @task for async execution. @task
928 * must have been created with kthread_worker_create(). Returns %true
929 * if @work was successfully queued, %false if it was already pending.
930 *
931 * Reinitialize the work if it needs to be used by another worker.
932 * For example, when the worker was stopped and started again.
933 */
934bool kthread_queue_work(struct kthread_worker *worker,
935 struct kthread_work *work)
936{
937 bool ret = false;
938 unsigned long flags;
939
940 raw_spin_lock_irqsave(&worker->lock, flags);
941 if (!queuing_blocked(worker, work)) {
942 kthread_insert_work(worker, work, &worker->work_list);
943 ret = true;
944 }
945 raw_spin_unlock_irqrestore(&worker->lock, flags);
946 return ret;
947}
948EXPORT_SYMBOL_GPL(kthread_queue_work);
949
950/**
951 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
952 * delayed work when the timer expires.
953 * @t: pointer to the expired timer
954 *
955 * The format of the function is defined by struct timer_list.
956 * It should have been called from irqsafe timer with irq already off.
957 */
958void kthread_delayed_work_timer_fn(struct timer_list *t)
959{
960 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
961 struct kthread_work *work = &dwork->work;
962 struct kthread_worker *worker = work->worker;
963 unsigned long flags;
964
965 /*
966 * This might happen when a pending work is reinitialized.
967 * It means that it is used a wrong way.
968 */
969 if (WARN_ON_ONCE(!worker))
970 return;
971
972 raw_spin_lock_irqsave(&worker->lock, flags);
973 /* Work must not be used with >1 worker, see kthread_queue_work(). */
974 WARN_ON_ONCE(work->worker != worker);
975
976 /* Move the work from worker->delayed_work_list. */
977 WARN_ON_ONCE(list_empty(&work->node));
978 list_del_init(&work->node);
979 if (!work->canceling)
980 kthread_insert_work(worker, work, &worker->work_list);
981
982 raw_spin_unlock_irqrestore(&worker->lock, flags);
983}
984EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
985
986static void __kthread_queue_delayed_work(struct kthread_worker *worker,
987 struct kthread_delayed_work *dwork,
988 unsigned long delay)
989{
990 struct timer_list *timer = &dwork->timer;
991 struct kthread_work *work = &dwork->work;
992
993 WARN_ON_FUNCTION_MISMATCH(timer->function,
994 kthread_delayed_work_timer_fn);
995
996 /*
997 * If @delay is 0, queue @dwork->work immediately. This is for
998 * both optimization and correctness. The earliest @timer can
999 * expire is on the closest next tick and delayed_work users depend
1000 * on that there's no such delay when @delay is 0.
1001 */
1002 if (!delay) {
1003 kthread_insert_work(worker, work, &worker->work_list);
1004 return;
1005 }
1006
1007 /* Be paranoid and try to detect possible races already now. */
1008 kthread_insert_work_sanity_check(worker, work);
1009
1010 list_add(&work->node, &worker->delayed_work_list);
1011 work->worker = worker;
1012 timer->expires = jiffies + delay;
1013 add_timer(timer);
1014}
1015
1016/**
1017 * kthread_queue_delayed_work - queue the associated kthread work
1018 * after a delay.
1019 * @worker: target kthread_worker
1020 * @dwork: kthread_delayed_work to queue
1021 * @delay: number of jiffies to wait before queuing
1022 *
1023 * If the work has not been pending it starts a timer that will queue
1024 * the work after the given @delay. If @delay is zero, it queues the
1025 * work immediately.
1026 *
1027 * Return: %false if the @work has already been pending. It means that
1028 * either the timer was running or the work was queued. It returns %true
1029 * otherwise.
1030 */
1031bool kthread_queue_delayed_work(struct kthread_worker *worker,
1032 struct kthread_delayed_work *dwork,
1033 unsigned long delay)
1034{
1035 struct kthread_work *work = &dwork->work;
1036 unsigned long flags;
1037 bool ret = false;
1038
1039 raw_spin_lock_irqsave(&worker->lock, flags);
1040
1041 if (!queuing_blocked(worker, work)) {
1042 __kthread_queue_delayed_work(worker, dwork, delay);
1043 ret = true;
1044 }
1045
1046 raw_spin_unlock_irqrestore(&worker->lock, flags);
1047 return ret;
1048}
1049EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1050
1051struct kthread_flush_work {
1052 struct kthread_work work;
1053 struct completion done;
1054};
1055
1056static void kthread_flush_work_fn(struct kthread_work *work)
1057{
1058 struct kthread_flush_work *fwork =
1059 container_of(work, struct kthread_flush_work, work);
1060 complete(&fwork->done);
1061}
1062
1063/**
1064 * kthread_flush_work - flush a kthread_work
1065 * @work: work to flush
1066 *
1067 * If @work is queued or executing, wait for it to finish execution.
1068 */
1069void kthread_flush_work(struct kthread_work *work)
1070{
1071 struct kthread_flush_work fwork = {
1072 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1073 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1074 };
1075 struct kthread_worker *worker;
1076 bool noop = false;
1077
1078 worker = work->worker;
1079 if (!worker)
1080 return;
1081
1082 raw_spin_lock_irq(&worker->lock);
1083 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1084 WARN_ON_ONCE(work->worker != worker);
1085
1086 if (!list_empty(&work->node))
1087 kthread_insert_work(worker, &fwork.work, work->node.next);
1088 else if (worker->current_work == work)
1089 kthread_insert_work(worker, &fwork.work,
1090 worker->work_list.next);
1091 else
1092 noop = true;
1093
1094 raw_spin_unlock_irq(&worker->lock);
1095
1096 if (!noop)
1097 wait_for_completion(&fwork.done);
1098}
1099EXPORT_SYMBOL_GPL(kthread_flush_work);
1100
1101/*
1102 * Make sure that the timer is neither set nor running and could
1103 * not manipulate the work list_head any longer.
1104 *
1105 * The function is called under worker->lock. The lock is temporary
1106 * released but the timer can't be set again in the meantime.
1107 */
1108static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1109 unsigned long *flags)
1110{
1111 struct kthread_delayed_work *dwork =
1112 container_of(work, struct kthread_delayed_work, work);
1113 struct kthread_worker *worker = work->worker;
1114
1115 /*
1116 * del_timer_sync() must be called to make sure that the timer
1117 * callback is not running. The lock must be temporary released
1118 * to avoid a deadlock with the callback. In the meantime,
1119 * any queuing is blocked by setting the canceling counter.
1120 */
1121 work->canceling++;
1122 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1123 del_timer_sync(&dwork->timer);
1124 raw_spin_lock_irqsave(&worker->lock, *flags);
1125 work->canceling--;
1126}
1127
1128/*
1129 * This function removes the work from the worker queue.
1130 *
1131 * It is called under worker->lock. The caller must make sure that
1132 * the timer used by delayed work is not running, e.g. by calling
1133 * kthread_cancel_delayed_work_timer().
1134 *
1135 * The work might still be in use when this function finishes. See the
1136 * current_work proceed by the worker.
1137 *
1138 * Return: %true if @work was pending and successfully canceled,
1139 * %false if @work was not pending
1140 */
1141static bool __kthread_cancel_work(struct kthread_work *work)
1142{
1143 /*
1144 * Try to remove the work from a worker list. It might either
1145 * be from worker->work_list or from worker->delayed_work_list.
1146 */
1147 if (!list_empty(&work->node)) {
1148 list_del_init(&work->node);
1149 return true;
1150 }
1151
1152 return false;
1153}
1154
1155/**
1156 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1157 * @worker: kthread worker to use
1158 * @dwork: kthread delayed work to queue
1159 * @delay: number of jiffies to wait before queuing
1160 *
1161 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1162 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1163 * @work is guaranteed to be queued immediately.
1164 *
1165 * Return: %false if @dwork was idle and queued, %true otherwise.
1166 *
1167 * A special case is when the work is being canceled in parallel.
1168 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1169 * or yet another kthread_mod_delayed_work() call. We let the other command
1170 * win and return %true here. The return value can be used for reference
1171 * counting and the number of queued works stays the same. Anyway, the caller
1172 * is supposed to synchronize these operations a reasonable way.
1173 *
1174 * This function is safe to call from any context including IRQ handler.
1175 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1176 * for details.
1177 */
1178bool kthread_mod_delayed_work(struct kthread_worker *worker,
1179 struct kthread_delayed_work *dwork,
1180 unsigned long delay)
1181{
1182 struct kthread_work *work = &dwork->work;
1183 unsigned long flags;
1184 int ret;
1185
1186 raw_spin_lock_irqsave(&worker->lock, flags);
1187
1188 /* Do not bother with canceling when never queued. */
1189 if (!work->worker) {
1190 ret = false;
1191 goto fast_queue;
1192 }
1193
1194 /* Work must not be used with >1 worker, see kthread_queue_work() */
1195 WARN_ON_ONCE(work->worker != worker);
1196
1197 /*
1198 * Temporary cancel the work but do not fight with another command
1199 * that is canceling the work as well.
1200 *
1201 * It is a bit tricky because of possible races with another
1202 * mod_delayed_work() and cancel_delayed_work() callers.
1203 *
1204 * The timer must be canceled first because worker->lock is released
1205 * when doing so. But the work can be removed from the queue (list)
1206 * only when it can be queued again so that the return value can
1207 * be used for reference counting.
1208 */
1209 kthread_cancel_delayed_work_timer(work, &flags);
1210 if (work->canceling) {
1211 /* The number of works in the queue does not change. */
1212 ret = true;
1213 goto out;
1214 }
1215 ret = __kthread_cancel_work(work);
1216
1217fast_queue:
1218 __kthread_queue_delayed_work(worker, dwork, delay);
1219out:
1220 raw_spin_unlock_irqrestore(&worker->lock, flags);
1221 return ret;
1222}
1223EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1224
1225static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1226{
1227 struct kthread_worker *worker = work->worker;
1228 unsigned long flags;
1229 int ret = false;
1230
1231 if (!worker)
1232 goto out;
1233
1234 raw_spin_lock_irqsave(&worker->lock, flags);
1235 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1236 WARN_ON_ONCE(work->worker != worker);
1237
1238 if (is_dwork)
1239 kthread_cancel_delayed_work_timer(work, &flags);
1240
1241 ret = __kthread_cancel_work(work);
1242
1243 if (worker->current_work != work)
1244 goto out_fast;
1245
1246 /*
1247 * The work is in progress and we need to wait with the lock released.
1248 * In the meantime, block any queuing by setting the canceling counter.
1249 */
1250 work->canceling++;
1251 raw_spin_unlock_irqrestore(&worker->lock, flags);
1252 kthread_flush_work(work);
1253 raw_spin_lock_irqsave(&worker->lock, flags);
1254 work->canceling--;
1255
1256out_fast:
1257 raw_spin_unlock_irqrestore(&worker->lock, flags);
1258out:
1259 return ret;
1260}
1261
1262/**
1263 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1264 * @work: the kthread work to cancel
1265 *
1266 * Cancel @work and wait for its execution to finish. This function
1267 * can be used even if the work re-queues itself. On return from this
1268 * function, @work is guaranteed to be not pending or executing on any CPU.
1269 *
1270 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1271 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1272 *
1273 * The caller must ensure that the worker on which @work was last
1274 * queued can't be destroyed before this function returns.
1275 *
1276 * Return: %true if @work was pending, %false otherwise.
1277 */
1278bool kthread_cancel_work_sync(struct kthread_work *work)
1279{
1280 return __kthread_cancel_work_sync(work, false);
1281}
1282EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1283
1284/**
1285 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1286 * wait for it to finish.
1287 * @dwork: the kthread delayed work to cancel
1288 *
1289 * This is kthread_cancel_work_sync() for delayed works.
1290 *
1291 * Return: %true if @dwork was pending, %false otherwise.
1292 */
1293bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1294{
1295 return __kthread_cancel_work_sync(&dwork->work, true);
1296}
1297EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1298
1299/**
1300 * kthread_flush_worker - flush all current works on a kthread_worker
1301 * @worker: worker to flush
1302 *
1303 * Wait until all currently executing or pending works on @worker are
1304 * finished.
1305 */
1306void kthread_flush_worker(struct kthread_worker *worker)
1307{
1308 struct kthread_flush_work fwork = {
1309 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1310 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1311 };
1312
1313 kthread_queue_work(worker, &fwork.work);
1314 wait_for_completion(&fwork.done);
1315}
1316EXPORT_SYMBOL_GPL(kthread_flush_worker);
1317
1318/**
1319 * kthread_destroy_worker - destroy a kthread worker
1320 * @worker: worker to be destroyed
1321 *
1322 * Flush and destroy @worker. The simple flush is enough because the kthread
1323 * worker API is used only in trivial scenarios. There are no multi-step state
1324 * machines needed.
1325 */
1326void kthread_destroy_worker(struct kthread_worker *worker)
1327{
1328 struct task_struct *task;
1329
1330 task = worker->task;
1331 if (WARN_ON(!task))
1332 return;
1333
1334 kthread_flush_worker(worker);
1335 kthread_stop(task);
1336 WARN_ON(!list_empty(&worker->work_list));
1337 kfree(worker);
1338}
1339EXPORT_SYMBOL(kthread_destroy_worker);
1340
1341/**
1342 * kthread_use_mm - make the calling kthread operate on an address space
1343 * @mm: address space to operate on
1344 */
1345void kthread_use_mm(struct mm_struct *mm)
1346{
1347 struct mm_struct *active_mm;
1348 struct task_struct *tsk = current;
1349
1350 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1351 WARN_ON_ONCE(tsk->mm);
1352
1353 task_lock(tsk);
1354 /* Hold off tlb flush IPIs while switching mm's */
1355 local_irq_disable();
1356 active_mm = tsk->active_mm;
1357 if (active_mm != mm) {
1358 mmgrab(mm);
1359 tsk->active_mm = mm;
1360 }
1361 tsk->mm = mm;
1362 membarrier_update_current_mm(mm);
1363 switch_mm_irqs_off(active_mm, mm, tsk);
1364 local_irq_enable();
1365 task_unlock(tsk);
1366#ifdef finish_arch_post_lock_switch
1367 finish_arch_post_lock_switch();
1368#endif
1369
1370 /*
1371 * When a kthread starts operating on an address space, the loop
1372 * in membarrier_{private,global}_expedited() may not observe
1373 * that tsk->mm, and not issue an IPI. Membarrier requires a
1374 * memory barrier after storing to tsk->mm, before accessing
1375 * user-space memory. A full memory barrier for membarrier
1376 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1377 * mmdrop(), or explicitly with smp_mb().
1378 */
1379 if (active_mm != mm)
1380 mmdrop(active_mm);
1381 else
1382 smp_mb();
1383
1384 to_kthread(tsk)->oldfs = force_uaccess_begin();
1385}
1386EXPORT_SYMBOL_GPL(kthread_use_mm);
1387
1388/**
1389 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1390 * @mm: address space to operate on
1391 */
1392void kthread_unuse_mm(struct mm_struct *mm)
1393{
1394 struct task_struct *tsk = current;
1395
1396 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1397 WARN_ON_ONCE(!tsk->mm);
1398
1399 force_uaccess_end(to_kthread(tsk)->oldfs);
1400
1401 task_lock(tsk);
1402 /*
1403 * When a kthread stops operating on an address space, the loop
1404 * in membarrier_{private,global}_expedited() may not observe
1405 * that tsk->mm, and not issue an IPI. Membarrier requires a
1406 * memory barrier after accessing user-space memory, before
1407 * clearing tsk->mm.
1408 */
1409 smp_mb__after_spinlock();
1410 sync_mm_rss(mm);
1411 local_irq_disable();
1412 tsk->mm = NULL;
1413 membarrier_update_current_mm(NULL);
1414 /* active_mm is still 'mm' */
1415 enter_lazy_tlb(mm, tsk);
1416 local_irq_enable();
1417 task_unlock(tsk);
1418}
1419EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1420
1421#ifdef CONFIG_BLK_CGROUP
1422/**
1423 * kthread_associate_blkcg - associate blkcg to current kthread
1424 * @css: the cgroup info
1425 *
1426 * Current thread must be a kthread. The thread is running jobs on behalf of
1427 * other threads. In some cases, we expect the jobs attach cgroup info of
1428 * original threads instead of that of current thread. This function stores
1429 * original thread's cgroup info in current kthread context for later
1430 * retrieval.
1431 */
1432void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1433{
1434 struct kthread *kthread;
1435
1436 if (!(current->flags & PF_KTHREAD))
1437 return;
1438 kthread = to_kthread(current);
1439 if (!kthread)
1440 return;
1441
1442 if (kthread->blkcg_css) {
1443 css_put(kthread->blkcg_css);
1444 kthread->blkcg_css = NULL;
1445 }
1446 if (css) {
1447 css_get(css);
1448 kthread->blkcg_css = css;
1449 }
1450}
1451EXPORT_SYMBOL(kthread_associate_blkcg);
1452
1453/**
1454 * kthread_blkcg - get associated blkcg css of current kthread
1455 *
1456 * Current thread must be a kthread.
1457 */
1458struct cgroup_subsys_state *kthread_blkcg(void)
1459{
1460 struct kthread *kthread;
1461
1462 if (current->flags & PF_KTHREAD) {
1463 kthread = to_kthread(current);
1464 if (kthread)
1465 return kthread->blkcg_css;
1466 }
1467 return NULL;
1468}
1469EXPORT_SYMBOL(kthread_blkcg);
1470#endif