Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/spinlock.h>
  3#include <linux/task_work.h>
  4#include <linux/resume_user_mode.h>
  5
  6static struct callback_head work_exited; /* all we need is ->next == NULL */
  7
  8/**
  9 * task_work_add - ask the @task to execute @work->func()
 10 * @task: the task which should run the callback
 11 * @work: the callback to run
 12 * @notify: how to notify the targeted task
 13 *
 14 * Queue @work for task_work_run() below and notify the @task if @notify
 15 * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
 16 *
 17 * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
 18 * task and run the task_work, regardless of whether the task is currently
 19 * running in the kernel or userspace.
 20 * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
 21 * reschedule IPI to force the targeted task to reschedule and run task_work.
 22 * This can be advantageous if there's no strict requirement that the
 23 * task_work be run as soon as possible, just whenever the task enters the
 24 * kernel anyway.
 25 * @TWA_RESUME work is run only when the task exits the kernel and returns to
 26 * user mode, or before entering guest mode.
 27 *
 
 28 * Fails if the @task is exiting/exited and thus it can't process this @work.
 29 * Otherwise @work->func() will be called when the @task goes through one of
 30 * the aforementioned transitions, or exits.
 31 *
 32 * If the targeted task is exiting, then an error is returned and the work item
 33 * is not queued. It's up to the caller to arrange for an alternative mechanism
 34 * in that case.
 35 *
 36 * Note: there is no ordering guarantee on works queued here. The task_work
 37 * list is LIFO.
 38 *
 39 * RETURNS:
 40 * 0 if succeeds or -ESRCH.
 41 */
 42int task_work_add(struct task_struct *task, struct callback_head *work,
 43		  enum task_work_notify_mode notify)
 44{
 45	struct callback_head *head;
 46
 47	/* record the work call stack in order to print it in KASAN reports */
 48	kasan_record_aux_stack(work);
 49
 50	head = READ_ONCE(task->task_works);
 51	do {
 
 52		if (unlikely(head == &work_exited))
 53			return -ESRCH;
 54		work->next = head;
 55	} while (!try_cmpxchg(&task->task_works, &head, work));
 56
 57	switch (notify) {
 58	case TWA_NONE:
 59		break;
 60	case TWA_RESUME:
 61		set_notify_resume(task);
 62		break;
 63	case TWA_SIGNAL:
 64		set_notify_signal(task);
 65		break;
 66	case TWA_SIGNAL_NO_IPI:
 67		__set_notify_signal(task);
 68		break;
 69	default:
 70		WARN_ON_ONCE(1);
 71		break;
 72	}
 73
 74	return 0;
 75}
 76
 77/**
 78 * task_work_cancel_match - cancel a pending work added by task_work_add()
 79 * @task: the task which should execute the work
 80 * @match: match function to call
 
 
 
 81 *
 82 * RETURNS:
 83 * The found work or NULL if not found.
 84 */
 85struct callback_head *
 86task_work_cancel_match(struct task_struct *task,
 87		       bool (*match)(struct callback_head *, void *data),
 88		       void *data)
 89{
 90	struct callback_head **pprev = &task->task_works;
 91	struct callback_head *work;
 92	unsigned long flags;
 93
 94	if (likely(!task_work_pending(task)))
 95		return NULL;
 96	/*
 97	 * If cmpxchg() fails we continue without updating pprev.
 98	 * Either we raced with task_work_add() which added the
 99	 * new entry before this work, we will find it again. Or
100	 * we raced with task_work_run(), *pprev == NULL/exited.
101	 */
102	raw_spin_lock_irqsave(&task->pi_lock, flags);
103	work = READ_ONCE(*pprev);
104	while (work) {
105		if (!match(work, data)) {
106			pprev = &work->next;
107			work = READ_ONCE(*pprev);
108		} else if (try_cmpxchg(pprev, &work, work->next))
109			break;
110	}
111	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
112
113	return work;
114}
115
116static bool task_work_func_match(struct callback_head *cb, void *data)
117{
118	return cb->func == data;
119}
120
121/**
122 * task_work_cancel - cancel a pending work added by task_work_add()
123 * @task: the task which should execute the work
124 * @func: identifies the work to remove
125 *
126 * Find the last queued pending work with ->func == @func and remove
127 * it from queue.
128 *
129 * RETURNS:
130 * The found work or NULL if not found.
131 */
132struct callback_head *
133task_work_cancel(struct task_struct *task, task_work_func_t func)
134{
135	return task_work_cancel_match(task, task_work_func_match, func);
136}
137
138/**
139 * task_work_run - execute the works added by task_work_add()
140 *
141 * Flush the pending works. Should be used by the core kernel code.
142 * Called before the task returns to the user-mode or stops, or when
143 * it exits. In the latter case task_work_add() can no longer add the
144 * new work after task_work_run() returns.
145 */
146void task_work_run(void)
147{
148	struct task_struct *task = current;
149	struct callback_head *work, *head, *next;
150
151	for (;;) {
152		/*
153		 * work->func() can do task_work_add(), do not set
154		 * work_exited unless the list is empty.
155		 */
156		work = READ_ONCE(task->task_works);
157		do {
158			head = NULL;
159			if (!work) {
160				if (task->flags & PF_EXITING)
161					head = &work_exited;
162				else
163					break;
164			}
165		} while (!try_cmpxchg(&task->task_works, &work, head));
166
167		if (!work)
168			break;
169		/*
170		 * Synchronize with task_work_cancel(). It can not remove
171		 * the first entry == work, cmpxchg(task_works) must fail.
172		 * But it can remove another entry from the ->next list.
173		 */
174		raw_spin_lock_irq(&task->pi_lock);
175		raw_spin_unlock_irq(&task->pi_lock);
176
177		do {
178			next = work->next;
179			work->func(work);
180			work = next;
181			cond_resched();
182		} while (work);
183	}
184}
v4.6
 
  1#include <linux/spinlock.h>
  2#include <linux/task_work.h>
  3#include <linux/tracehook.h>
  4
  5static struct callback_head work_exited; /* all we need is ->next == NULL */
  6
  7/**
  8 * task_work_add - ask the @task to execute @work->func()
  9 * @task: the task which should run the callback
 10 * @work: the callback to run
 11 * @notify: send the notification if true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12 *
 13 * Queue @work for task_work_run() below and notify the @task if @notify.
 14 * Fails if the @task is exiting/exited and thus it can't process this @work.
 15 * Otherwise @work->func() will be called when the @task returns from kernel
 16 * mode or exits.
 17 *
 18 * This is like the signal handler which runs in kernel mode, but it doesn't
 19 * try to wake up the @task.
 
 20 *
 21 * Note: there is no ordering guarantee on works queued here.
 
 22 *
 23 * RETURNS:
 24 * 0 if succeeds or -ESRCH.
 25 */
 26int
 27task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
 28{
 29	struct callback_head *head;
 30
 
 
 
 
 31	do {
 32		head = ACCESS_ONCE(task->task_works);
 33		if (unlikely(head == &work_exited))
 34			return -ESRCH;
 35		work->next = head;
 36	} while (cmpxchg(&task->task_works, head, work) != head);
 37
 38	if (notify)
 
 
 
 39		set_notify_resume(task);
 
 
 
 
 
 
 
 
 
 
 
 
 40	return 0;
 41}
 42
 43/**
 44 * task_work_cancel - cancel a pending work added by task_work_add()
 45 * @task: the task which should execute the work
 46 * @func: identifies the work to remove
 47 *
 48 * Find the last queued pending work with ->func == @func and remove
 49 * it from queue.
 50 *
 51 * RETURNS:
 52 * The found work or NULL if not found.
 53 */
 54struct callback_head *
 55task_work_cancel(struct task_struct *task, task_work_func_t func)
 
 
 56{
 57	struct callback_head **pprev = &task->task_works;
 58	struct callback_head *work;
 59	unsigned long flags;
 
 
 
 60	/*
 61	 * If cmpxchg() fails we continue without updating pprev.
 62	 * Either we raced with task_work_add() which added the
 63	 * new entry before this work, we will find it again. Or
 64	 * we raced with task_work_run(), *pprev == NULL/exited.
 65	 */
 66	raw_spin_lock_irqsave(&task->pi_lock, flags);
 67	while ((work = ACCESS_ONCE(*pprev))) {
 68		smp_read_barrier_depends();
 69		if (work->func != func)
 70			pprev = &work->next;
 71		else if (cmpxchg(pprev, work, work->next) == work)
 
 72			break;
 73	}
 74	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 75
 76	return work;
 77}
 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79/**
 80 * task_work_run - execute the works added by task_work_add()
 81 *
 82 * Flush the pending works. Should be used by the core kernel code.
 83 * Called before the task returns to the user-mode or stops, or when
 84 * it exits. In the latter case task_work_add() can no longer add the
 85 * new work after task_work_run() returns.
 86 */
 87void task_work_run(void)
 88{
 89	struct task_struct *task = current;
 90	struct callback_head *work, *head, *next;
 91
 92	for (;;) {
 93		/*
 94		 * work->func() can do task_work_add(), do not set
 95		 * work_exited unless the list is empty.
 96		 */
 
 97		do {
 98			work = ACCESS_ONCE(task->task_works);
 99			head = !work && (task->flags & PF_EXITING) ?
100				&work_exited : NULL;
101		} while (cmpxchg(&task->task_works, work, head) != work);
 
 
 
 
102
103		if (!work)
104			break;
105		/*
106		 * Synchronize with task_work_cancel(). It can't remove
107		 * the first entry == work, cmpxchg(task_works) should
108		 * fail, but it can play with *work and other entries.
109		 */
110		raw_spin_unlock_wait(&task->pi_lock);
111		smp_mb();
112
113		do {
114			next = work->next;
115			work->func(work);
116			work = next;
117			cond_resched();
118		} while (work);
119	}
120}