Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include <linux/spinlock.h>
  2#include <linux/task_work.h>
  3#include <linux/tracehook.h>
  4
  5static struct callback_head work_exited; /* all we need is ->next == NULL */
  6
  7/**
  8 * task_work_add - ask the @task to execute @work->func()
  9 * @task: the task which should run the callback
 10 * @work: the callback to run
 11 * @notify: send the notification if true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12 *
 13 * Queue @work for task_work_run() below and notify the @task if @notify.
 14 * Fails if the @task is exiting/exited and thus it can't process this @work.
 15 * Otherwise @work->func() will be called when the @task returns from kernel
 16 * mode or exits.
 
 
 
 
 17 *
 18 * This is like the signal handler which runs in kernel mode, but it doesn't
 19 * try to wake up the @task.
 20 *
 21 * RETURNS:
 22 * 0 if succeeds or -ESRCH.
 23 */
 24int
 25task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
 26{
 27	struct callback_head *head;
 28
 
 
 
 
 29	do {
 30		head = ACCESS_ONCE(task->task_works);
 31		if (unlikely(head == &work_exited))
 32			return -ESRCH;
 33		work->next = head;
 34	} while (cmpxchg(&task->task_works, head, work) != head);
 35
 36	if (notify)
 
 
 
 37		set_notify_resume(task);
 
 
 
 
 
 
 
 
 
 
 
 
 38	return 0;
 39}
 40
 41/**
 42 * task_work_cancel - cancel a pending work added by task_work_add()
 43 * @task: the task which should execute the work
 44 * @func: identifies the work to remove
 45 *
 46 * Find the last queued pending work with ->func == @func and remove
 47 * it from queue.
 48 *
 49 * RETURNS:
 50 * The found work or NULL if not found.
 51 */
 52struct callback_head *
 53task_work_cancel(struct task_struct *task, task_work_func_t func)
 
 
 54{
 55	struct callback_head **pprev = &task->task_works;
 56	struct callback_head *work;
 57	unsigned long flags;
 
 
 
 58	/*
 59	 * If cmpxchg() fails we continue without updating pprev.
 60	 * Either we raced with task_work_add() which added the
 61	 * new entry before this work, we will find it again. Or
 62	 * we raced with task_work_run(), *pprev == NULL/exited.
 63	 */
 64	raw_spin_lock_irqsave(&task->pi_lock, flags);
 65	while ((work = ACCESS_ONCE(*pprev))) {
 66		smp_read_barrier_depends();
 67		if (work->func != func)
 68			pprev = &work->next;
 69		else if (cmpxchg(pprev, work, work->next) == work)
 
 70			break;
 71	}
 72	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 73
 74	return work;
 75}
 76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77/**
 78 * task_work_run - execute the works added by task_work_add()
 79 *
 80 * Flush the pending works. Should be used by the core kernel code.
 81 * Called before the task returns to the user-mode or stops, or when
 82 * it exits. In the latter case task_work_add() can no longer add the
 83 * new work after task_work_run() returns.
 84 */
 85void task_work_run(void)
 86{
 87	struct task_struct *task = current;
 88	struct callback_head *work, *head, *next;
 89
 90	for (;;) {
 91		/*
 92		 * work->func() can do task_work_add(), do not set
 93		 * work_exited unless the list is empty.
 94		 */
 
 95		do {
 96			work = ACCESS_ONCE(task->task_works);
 97			head = !work && (task->flags & PF_EXITING) ?
 98				&work_exited : NULL;
 99		} while (cmpxchg(&task->task_works, work, head) != work);
 
 
 
 
100
101		if (!work)
102			break;
103		/*
104		 * Synchronize with task_work_cancel(). It can't remove
105		 * the first entry == work, cmpxchg(task_works) should
106		 * fail, but it can play with *work and other entries.
107		 */
108		raw_spin_unlock_wait(&task->pi_lock);
109		smp_mb();
110
111		/* Reverse the list to run the works in fifo order */
112		head = NULL;
113		do {
114			next = work->next;
115			work->next = head;
116			head = work;
117			work = next;
118		} while (work);
119
120		work = head;
121		do {
122			next = work->next;
123			work->func(work);
124			work = next;
125			cond_resched();
126		} while (work);
127	}
128}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/spinlock.h>
  3#include <linux/task_work.h>
  4#include <linux/resume_user_mode.h>
  5
  6static struct callback_head work_exited; /* all we need is ->next == NULL */
  7
  8/**
  9 * task_work_add - ask the @task to execute @work->func()
 10 * @task: the task which should run the callback
 11 * @work: the callback to run
 12 * @notify: how to notify the targeted task
 13 *
 14 * Queue @work for task_work_run() below and notify the @task if @notify
 15 * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
 16 *
 17 * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
 18 * task and run the task_work, regardless of whether the task is currently
 19 * running in the kernel or userspace.
 20 * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
 21 * reschedule IPI to force the targeted task to reschedule and run task_work.
 22 * This can be advantageous if there's no strict requirement that the
 23 * task_work be run as soon as possible, just whenever the task enters the
 24 * kernel anyway.
 25 * @TWA_RESUME work is run only when the task exits the kernel and returns to
 26 * user mode, or before entering guest mode.
 27 *
 
 28 * Fails if the @task is exiting/exited and thus it can't process this @work.
 29 * Otherwise @work->func() will be called when the @task goes through one of
 30 * the aforementioned transitions, or exits.
 31 *
 32 * If the targeted task is exiting, then an error is returned and the work item
 33 * is not queued. It's up to the caller to arrange for an alternative mechanism
 34 * in that case.
 35 *
 36 * Note: there is no ordering guarantee on works queued here. The task_work
 37 * list is LIFO.
 38 *
 39 * RETURNS:
 40 * 0 if succeeds or -ESRCH.
 41 */
 42int task_work_add(struct task_struct *task, struct callback_head *work,
 43		  enum task_work_notify_mode notify)
 44{
 45	struct callback_head *head;
 46
 47	/* record the work call stack in order to print it in KASAN reports */
 48	kasan_record_aux_stack(work);
 49
 50	head = READ_ONCE(task->task_works);
 51	do {
 
 52		if (unlikely(head == &work_exited))
 53			return -ESRCH;
 54		work->next = head;
 55	} while (!try_cmpxchg(&task->task_works, &head, work));
 56
 57	switch (notify) {
 58	case TWA_NONE:
 59		break;
 60	case TWA_RESUME:
 61		set_notify_resume(task);
 62		break;
 63	case TWA_SIGNAL:
 64		set_notify_signal(task);
 65		break;
 66	case TWA_SIGNAL_NO_IPI:
 67		__set_notify_signal(task);
 68		break;
 69	default:
 70		WARN_ON_ONCE(1);
 71		break;
 72	}
 73
 74	return 0;
 75}
 76
 77/**
 78 * task_work_cancel_match - cancel a pending work added by task_work_add()
 79 * @task: the task which should execute the work
 80 * @match: match function to call
 81 * @data: data to be passed in to match function
 
 
 82 *
 83 * RETURNS:
 84 * The found work or NULL if not found.
 85 */
 86struct callback_head *
 87task_work_cancel_match(struct task_struct *task,
 88		       bool (*match)(struct callback_head *, void *data),
 89		       void *data)
 90{
 91	struct callback_head **pprev = &task->task_works;
 92	struct callback_head *work;
 93	unsigned long flags;
 94
 95	if (likely(!task_work_pending(task)))
 96		return NULL;
 97	/*
 98	 * If cmpxchg() fails we continue without updating pprev.
 99	 * Either we raced with task_work_add() which added the
100	 * new entry before this work, we will find it again. Or
101	 * we raced with task_work_run(), *pprev == NULL/exited.
102	 */
103	raw_spin_lock_irqsave(&task->pi_lock, flags);
104	work = READ_ONCE(*pprev);
105	while (work) {
106		if (!match(work, data)) {
107			pprev = &work->next;
108			work = READ_ONCE(*pprev);
109		} else if (try_cmpxchg(pprev, &work, work->next))
110			break;
111	}
112	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
113
114	return work;
115}
116
117static bool task_work_func_match(struct callback_head *cb, void *data)
118{
119	return cb->func == data;
120}
121
122/**
123 * task_work_cancel - cancel a pending work added by task_work_add()
124 * @task: the task which should execute the work
125 * @func: identifies the work to remove
126 *
127 * Find the last queued pending work with ->func == @func and remove
128 * it from queue.
129 *
130 * RETURNS:
131 * The found work or NULL if not found.
132 */
133struct callback_head *
134task_work_cancel(struct task_struct *task, task_work_func_t func)
135{
136	return task_work_cancel_match(task, task_work_func_match, func);
137}
138
139/**
140 * task_work_run - execute the works added by task_work_add()
141 *
142 * Flush the pending works. Should be used by the core kernel code.
143 * Called before the task returns to the user-mode or stops, or when
144 * it exits. In the latter case task_work_add() can no longer add the
145 * new work after task_work_run() returns.
146 */
147void task_work_run(void)
148{
149	struct task_struct *task = current;
150	struct callback_head *work, *head, *next;
151
152	for (;;) {
153		/*
154		 * work->func() can do task_work_add(), do not set
155		 * work_exited unless the list is empty.
156		 */
157		work = READ_ONCE(task->task_works);
158		do {
159			head = NULL;
160			if (!work) {
161				if (task->flags & PF_EXITING)
162					head = &work_exited;
163				else
164					break;
165			}
166		} while (!try_cmpxchg(&task->task_works, &work, head));
167
168		if (!work)
169			break;
170		/*
171		 * Synchronize with task_work_cancel(). It can not remove
172		 * the first entry == work, cmpxchg(task_works) must fail.
173		 * But it can remove another entry from the ->next list.
174		 */
175		raw_spin_lock_irq(&task->pi_lock);
176		raw_spin_unlock_irq(&task->pi_lock);
 
 
 
 
 
 
 
 
 
177
 
178		do {
179			next = work->next;
180			work->func(work);
181			work = next;
182			cond_resched();
183		} while (work);
184	}
185}