Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include <linux/spinlock.h>
  3#include <linux/task_work.h>
  4#include <linux/resume_user_mode.h>
  5
  6static struct callback_head work_exited; /* all we need is ->next == NULL */
  7
 
 
 
 
 
 
 
 
 
  8/**
  9 * task_work_add - ask the @task to execute @work->func()
 10 * @task: the task which should run the callback
 11 * @work: the callback to run
 12 * @notify: how to notify the targeted task
 13 *
 14 * Queue @work for task_work_run() below and notify the @task if @notify
 15 * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
 16 *
 17 * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
 18 * task and run the task_work, regardless of whether the task is currently
 19 * running in the kernel or userspace.
 20 * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
 21 * reschedule IPI to force the targeted task to reschedule and run task_work.
 22 * This can be advantageous if there's no strict requirement that the
 23 * task_work be run as soon as possible, just whenever the task enters the
 24 * kernel anyway.
 25 * @TWA_RESUME work is run only when the task exits the kernel and returns to
 26 * user mode, or before entering guest mode.
 
 
 27 *
 28 * Fails if the @task is exiting/exited and thus it can't process this @work.
 29 * Otherwise @work->func() will be called when the @task goes through one of
 30 * the aforementioned transitions, or exits.
 31 *
 32 * If the targeted task is exiting, then an error is returned and the work item
 33 * is not queued. It's up to the caller to arrange for an alternative mechanism
 34 * in that case.
 35 *
 36 * Note: there is no ordering guarantee on works queued here. The task_work
 37 * list is LIFO.
 38 *
 39 * RETURNS:
 40 * 0 if succeeds or -ESRCH.
 41 */
 42int task_work_add(struct task_struct *task, struct callback_head *work,
 43		  enum task_work_notify_mode notify)
 44{
 45	struct callback_head *head;
 
 46
 47	/* record the work call stack in order to print it in KASAN reports */
 48	kasan_record_aux_stack(work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50	head = READ_ONCE(task->task_works);
 51	do {
 52		if (unlikely(head == &work_exited))
 53			return -ESRCH;
 54		work->next = head;
 55	} while (!try_cmpxchg(&task->task_works, &head, work));
 56
 57	switch (notify) {
 58	case TWA_NONE:
 59		break;
 60	case TWA_RESUME:
 61		set_notify_resume(task);
 62		break;
 63	case TWA_SIGNAL:
 64		set_notify_signal(task);
 65		break;
 66	case TWA_SIGNAL_NO_IPI:
 67		__set_notify_signal(task);
 68		break;
 
 
 
 
 
 69	default:
 70		WARN_ON_ONCE(1);
 71		break;
 72	}
 73
 74	return 0;
 75}
 76
 77/**
 78 * task_work_cancel_match - cancel a pending work added by task_work_add()
 79 * @task: the task which should execute the work
 80 * @match: match function to call
 
 81 *
 82 * RETURNS:
 83 * The found work or NULL if not found.
 84 */
 85struct callback_head *
 86task_work_cancel_match(struct task_struct *task,
 87		       bool (*match)(struct callback_head *, void *data),
 88		       void *data)
 89{
 90	struct callback_head **pprev = &task->task_works;
 91	struct callback_head *work;
 92	unsigned long flags;
 93
 94	if (likely(!task_work_pending(task)))
 95		return NULL;
 96	/*
 97	 * If cmpxchg() fails we continue without updating pprev.
 98	 * Either we raced with task_work_add() which added the
 99	 * new entry before this work, we will find it again. Or
100	 * we raced with task_work_run(), *pprev == NULL/exited.
101	 */
102	raw_spin_lock_irqsave(&task->pi_lock, flags);
103	work = READ_ONCE(*pprev);
104	while (work) {
105		if (!match(work, data)) {
106			pprev = &work->next;
107			work = READ_ONCE(*pprev);
108		} else if (try_cmpxchg(pprev, &work, work->next))
109			break;
110	}
111	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
112
113	return work;
114}
115
116static bool task_work_func_match(struct callback_head *cb, void *data)
117{
118	return cb->func == data;
119}
120
121/**
122 * task_work_cancel - cancel a pending work added by task_work_add()
123 * @task: the task which should execute the work
124 * @func: identifies the work to remove
125 *
126 * Find the last queued pending work with ->func == @func and remove
127 * it from queue.
128 *
129 * RETURNS:
130 * The found work or NULL if not found.
131 */
132struct callback_head *
133task_work_cancel(struct task_struct *task, task_work_func_t func)
134{
135	return task_work_cancel_match(task, task_work_func_match, func);
136}
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138/**
139 * task_work_run - execute the works added by task_work_add()
140 *
141 * Flush the pending works. Should be used by the core kernel code.
142 * Called before the task returns to the user-mode or stops, or when
143 * it exits. In the latter case task_work_add() can no longer add the
144 * new work after task_work_run() returns.
145 */
146void task_work_run(void)
147{
148	struct task_struct *task = current;
149	struct callback_head *work, *head, *next;
150
151	for (;;) {
152		/*
153		 * work->func() can do task_work_add(), do not set
154		 * work_exited unless the list is empty.
155		 */
156		work = READ_ONCE(task->task_works);
157		do {
158			head = NULL;
159			if (!work) {
160				if (task->flags & PF_EXITING)
161					head = &work_exited;
162				else
163					break;
164			}
165		} while (!try_cmpxchg(&task->task_works, &work, head));
166
167		if (!work)
168			break;
169		/*
170		 * Synchronize with task_work_cancel(). It can not remove
171		 * the first entry == work, cmpxchg(task_works) must fail.
172		 * But it can remove another entry from the ->next list.
173		 */
174		raw_spin_lock_irq(&task->pi_lock);
175		raw_spin_unlock_irq(&task->pi_lock);
176
177		do {
178			next = work->next;
179			work->func(work);
180			work = next;
181			cond_resched();
182		} while (work);
183	}
184}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/irq_work.h>
  3#include <linux/spinlock.h>
  4#include <linux/task_work.h>
  5#include <linux/resume_user_mode.h>
  6
  7static struct callback_head work_exited; /* all we need is ->next == NULL */
  8
  9#ifdef CONFIG_IRQ_WORK
 10static void task_work_set_notify_irq(struct irq_work *entry)
 11{
 12	test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
 13}
 14static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
 15	IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
 16#endif
 17
 18/**
 19 * task_work_add - ask the @task to execute @work->func()
 20 * @task: the task which should run the callback
 21 * @work: the callback to run
 22 * @notify: how to notify the targeted task
 23 *
 24 * Queue @work for task_work_run() below and notify the @task if @notify
 25 * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
 26 *
 27 * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
 28 * task and run the task_work, regardless of whether the task is currently
 29 * running in the kernel or userspace.
 30 * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
 31 * reschedule IPI to force the targeted task to reschedule and run task_work.
 32 * This can be advantageous if there's no strict requirement that the
 33 * task_work be run as soon as possible, just whenever the task enters the
 34 * kernel anyway.
 35 * @TWA_RESUME work is run only when the task exits the kernel and returns to
 36 * user mode, or before entering guest mode.
 37 * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
 38 * current @task and if the current context is NMI.
 39 *
 40 * Fails if the @task is exiting/exited and thus it can't process this @work.
 41 * Otherwise @work->func() will be called when the @task goes through one of
 42 * the aforementioned transitions, or exits.
 43 *
 44 * If the targeted task is exiting, then an error is returned and the work item
 45 * is not queued. It's up to the caller to arrange for an alternative mechanism
 46 * in that case.
 47 *
 48 * Note: there is no ordering guarantee on works queued here. The task_work
 49 * list is LIFO.
 50 *
 51 * RETURNS:
 52 * 0 if succeeds or -ESRCH.
 53 */
 54int task_work_add(struct task_struct *task, struct callback_head *work,
 55		  enum task_work_notify_mode notify)
 56{
 57	struct callback_head *head;
 58	int flags = notify & TWA_FLAGS;
 59
 60	notify &= ~TWA_FLAGS;
 61	if (notify == TWA_NMI_CURRENT) {
 62		if (WARN_ON_ONCE(task != current))
 63			return -EINVAL;
 64		if (!IS_ENABLED(CONFIG_IRQ_WORK))
 65			return -EINVAL;
 66	} else {
 67		/*
 68		 * Record the work call stack in order to print it in KASAN
 69		 * reports.
 70		 *
 71		 * Note that stack allocation can fail if TWAF_NO_ALLOC flag
 72		 * is set and new page is needed to expand the stack buffer.
 73		 */
 74		if (flags & TWAF_NO_ALLOC)
 75			kasan_record_aux_stack_noalloc(work);
 76		else
 77			kasan_record_aux_stack(work);
 78	}
 79
 80	head = READ_ONCE(task->task_works);
 81	do {
 82		if (unlikely(head == &work_exited))
 83			return -ESRCH;
 84		work->next = head;
 85	} while (!try_cmpxchg(&task->task_works, &head, work));
 86
 87	switch (notify) {
 88	case TWA_NONE:
 89		break;
 90	case TWA_RESUME:
 91		set_notify_resume(task);
 92		break;
 93	case TWA_SIGNAL:
 94		set_notify_signal(task);
 95		break;
 96	case TWA_SIGNAL_NO_IPI:
 97		__set_notify_signal(task);
 98		break;
 99#ifdef CONFIG_IRQ_WORK
100	case TWA_NMI_CURRENT:
101		irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
102		break;
103#endif
104	default:
105		WARN_ON_ONCE(1);
106		break;
107	}
108
109	return 0;
110}
111
112/**
113 * task_work_cancel_match - cancel a pending work added by task_work_add()
114 * @task: the task which should execute the work
115 * @match: match function to call
116 * @data: data to be passed in to match function
117 *
118 * RETURNS:
119 * The found work or NULL if not found.
120 */
121struct callback_head *
122task_work_cancel_match(struct task_struct *task,
123		       bool (*match)(struct callback_head *, void *data),
124		       void *data)
125{
126	struct callback_head **pprev = &task->task_works;
127	struct callback_head *work;
128	unsigned long flags;
129
130	if (likely(!task_work_pending(task)))
131		return NULL;
132	/*
133	 * If cmpxchg() fails we continue without updating pprev.
134	 * Either we raced with task_work_add() which added the
135	 * new entry before this work, we will find it again. Or
136	 * we raced with task_work_run(), *pprev == NULL/exited.
137	 */
138	raw_spin_lock_irqsave(&task->pi_lock, flags);
139	work = READ_ONCE(*pprev);
140	while (work) {
141		if (!match(work, data)) {
142			pprev = &work->next;
143			work = READ_ONCE(*pprev);
144		} else if (try_cmpxchg(pprev, &work, work->next))
145			break;
146	}
147	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
148
149	return work;
150}
151
152static bool task_work_func_match(struct callback_head *cb, void *data)
153{
154	return cb->func == data;
155}
156
157/**
158 * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
159 * @task: the task which should execute the func's work
160 * @func: identifies the func to match with a work to remove
161 *
162 * Find the last queued pending work with ->func == @func and remove
163 * it from queue.
164 *
165 * RETURNS:
166 * The found work or NULL if not found.
167 */
168struct callback_head *
169task_work_cancel_func(struct task_struct *task, task_work_func_t func)
170{
171	return task_work_cancel_match(task, task_work_func_match, func);
172}
173
174static bool task_work_match(struct callback_head *cb, void *data)
175{
176	return cb == data;
177}
178
179/**
180 * task_work_cancel - cancel a pending work added by task_work_add()
181 * @task: the task which should execute the work
182 * @cb: the callback to remove if queued
183 *
184 * Remove a callback from a task's queue if queued.
185 *
186 * RETURNS:
187 * True if the callback was queued and got cancelled, false otherwise.
188 */
189bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
190{
191	struct callback_head *ret;
192
193	ret = task_work_cancel_match(task, task_work_match, cb);
194
195	return ret == cb;
196}
197
198/**
199 * task_work_run - execute the works added by task_work_add()
200 *
201 * Flush the pending works. Should be used by the core kernel code.
202 * Called before the task returns to the user-mode or stops, or when
203 * it exits. In the latter case task_work_add() can no longer add the
204 * new work after task_work_run() returns.
205 */
206void task_work_run(void)
207{
208	struct task_struct *task = current;
209	struct callback_head *work, *head, *next;
210
211	for (;;) {
212		/*
213		 * work->func() can do task_work_add(), do not set
214		 * work_exited unless the list is empty.
215		 */
216		work = READ_ONCE(task->task_works);
217		do {
218			head = NULL;
219			if (!work) {
220				if (task->flags & PF_EXITING)
221					head = &work_exited;
222				else
223					break;
224			}
225		} while (!try_cmpxchg(&task->task_works, &work, head));
226
227		if (!work)
228			break;
229		/*
230		 * Synchronize with task_work_cancel_match(). It can not remove
231		 * the first entry == work, cmpxchg(task_works) must fail.
232		 * But it can remove another entry from the ->next list.
233		 */
234		raw_spin_lock_irq(&task->pi_lock);
235		raw_spin_unlock_irq(&task->pi_lock);
236
237		do {
238			next = work->next;
239			work->func(work);
240			work = next;
241			cond_resched();
242		} while (work);
243	}
244}