Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.14.15.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Asynchronous refcounty things
  4 *
  5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6 * Copyright 2012 Google, Inc.
  7 */
  8
  9#include <linux/closure.h>
 10#include <linux/debugfs.h>
 11#include <linux/export.h>
 12#include <linux/rcupdate.h>
 13#include <linux/seq_file.h>
 14#include <linux/sched/debug.h>
 15
 16static inline void closure_put_after_sub_checks(int flags)
 17{
 18	int r = flags & CLOSURE_REMAINING_MASK;
 19
 20	if (WARN(flags & CLOSURE_GUARD_MASK,
 21		 "closure has guard bits set: %x (%u)",
 22		 flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
 23		r &= ~CLOSURE_GUARD_MASK;
 24
 25	WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
 26	     "closure ref hit 0 with incorrect flags set: %x (%u)",
 27	     flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
 28}
 29
 30static inline void closure_put_after_sub(struct closure *cl, int flags)
 31{
 32	closure_put_after_sub_checks(flags);
 33
 34	if (!(flags & CLOSURE_REMAINING_MASK)) {
 35		smp_acquire__after_ctrl_dep();
 36
 37		cl->closure_get_happened = false;
 38
 39		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
 40			atomic_set(&cl->remaining,
 41				   CLOSURE_REMAINING_INITIALIZER);
 42			closure_queue(cl);
 43		} else {
 44			struct closure *parent = cl->parent;
 45			closure_fn *destructor = cl->fn;
 46
 47			closure_debug_destroy(cl);
 48
 49			if (destructor)
 50				destructor(&cl->work);
 51
 52			if (parent)
 53				closure_put(parent);
 54		}
 55	}
 56}
 57
 58/* For clearing flags with the same atomic op as a put */
 59void closure_sub(struct closure *cl, int v)
 60{
 61	closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
 62}
 63EXPORT_SYMBOL(closure_sub);
 64
 65/*
 66 * closure_put - decrement a closure's refcount
 67 */
 68void closure_put(struct closure *cl)
 69{
 70	closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining));
 71}
 72EXPORT_SYMBOL(closure_put);
 73
 74/*
 75 * closure_wake_up - wake up all closures on a wait list, without memory barrier
 76 */
 77void __closure_wake_up(struct closure_waitlist *wait_list)
 78{
 79	struct llist_node *list;
 80	struct closure *cl, *t;
 81	struct llist_node *reverse = NULL;
 82
 83	list = llist_del_all(&wait_list->list);
 84
 85	/* We first reverse the list to preserve FIFO ordering and fairness */
 86	reverse = llist_reverse_order(list);
 87
 88	/* Then do the wakeups */
 89	llist_for_each_entry_safe(cl, t, reverse, list) {
 90		closure_set_waiting(cl, 0);
 91		closure_sub(cl, CLOSURE_WAITING + 1);
 92	}
 93}
 94EXPORT_SYMBOL(__closure_wake_up);
 95
 96/**
 97 * closure_wait - add a closure to a waitlist
 98 * @waitlist: will own a ref on @cl, which will be released when
 99 * closure_wake_up() is called on @waitlist.
100 * @cl: closure pointer.
101 *
102 */
103bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
104{
105	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
106		return false;
107
108	cl->closure_get_happened = true;
109	closure_set_waiting(cl, _RET_IP_);
110	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
111	llist_add(&cl->list, &waitlist->list);
112
113	return true;
114}
115EXPORT_SYMBOL(closure_wait);
116
117struct closure_syncer {
118	struct task_struct	*task;
119	int			done;
120};
121
122static CLOSURE_CALLBACK(closure_sync_fn)
123{
124	struct closure *cl = container_of(ws, struct closure, work);
125	struct closure_syncer *s = cl->s;
126	struct task_struct *p;
127
128	rcu_read_lock();
129	p = READ_ONCE(s->task);
130	s->done = 1;
131	wake_up_process(p);
132	rcu_read_unlock();
133}
134
135void __sched __closure_sync(struct closure *cl)
136{
137	struct closure_syncer s = { .task = current };
138
139	cl->s = &s;
140	continue_at(cl, closure_sync_fn, NULL);
141
142	while (1) {
143		set_current_state(TASK_UNINTERRUPTIBLE);
144		if (s.done)
145			break;
146		schedule();
147	}
148
149	__set_current_state(TASK_RUNNING);
150}
151EXPORT_SYMBOL(__closure_sync);
152
153/*
154 * closure_return_sync - finish running a closure, synchronously (i.e. waiting
155 * for outstanding get()s to finish) and returning once closure refcount is 0.
156 *
157 * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
158 * closure_get_not_zero() calls waill fail.
159 */
160void __sched closure_return_sync(struct closure *cl)
161{
162	struct closure_syncer s = { .task = current };
163
164	cl->s = &s;
165	set_closure_fn(cl, closure_sync_fn, NULL);
166
167	unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
168						   &cl->remaining);
169
170	closure_put_after_sub_checks(flags);
171
172	if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
173		while (1) {
174			set_current_state(TASK_UNINTERRUPTIBLE);
175			if (s.done)
176				break;
177			schedule();
178		}
179
180		__set_current_state(TASK_RUNNING);
181	}
182
183	if (cl->parent)
184		closure_put(cl->parent);
185}
186EXPORT_SYMBOL(closure_return_sync);
187
188int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
189{
190	struct closure_syncer s = { .task = current };
191	int ret = 0;
192
193	cl->s = &s;
194	continue_at(cl, closure_sync_fn, NULL);
195
196	while (1) {
197		set_current_state(TASK_UNINTERRUPTIBLE);
198		if (s.done)
199			break;
200		if (!timeout) {
201			/*
202			 * Carefully undo the continue_at() - but only if it
203			 * hasn't completed, i.e. the final closure_put() hasn't
204			 * happened yet:
205			 */
206			unsigned old, new, v = atomic_read(&cl->remaining);
207			do {
208				old = v;
209				if (!old || (old & CLOSURE_RUNNING))
210					goto success;
211
212				new = old + CLOSURE_REMAINING_INITIALIZER;
213			} while ((v = atomic_cmpxchg(&cl->remaining, old, new)) != old);
214			ret = -ETIME;
215		}
216
217		timeout = schedule_timeout(timeout);
218	}
219success:
220	__set_current_state(TASK_RUNNING);
221	return ret;
222}
223EXPORT_SYMBOL(__closure_sync_timeout);
224
225#ifdef CONFIG_DEBUG_CLOSURES
226
227static LIST_HEAD(closure_list);
228static DEFINE_SPINLOCK(closure_list_lock);
229
230void closure_debug_create(struct closure *cl)
231{
232	unsigned long flags;
233
234	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
235	cl->magic = CLOSURE_MAGIC_ALIVE;
236
237	spin_lock_irqsave(&closure_list_lock, flags);
238	list_add(&cl->all, &closure_list);
239	spin_unlock_irqrestore(&closure_list_lock, flags);
240}
241EXPORT_SYMBOL(closure_debug_create);
242
243void closure_debug_destroy(struct closure *cl)
244{
245	unsigned long flags;
246
247	if (cl->magic == CLOSURE_MAGIC_STACK)
248		return;
249
250	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
251	cl->magic = CLOSURE_MAGIC_DEAD;
252
253	spin_lock_irqsave(&closure_list_lock, flags);
254	list_del(&cl->all);
255	spin_unlock_irqrestore(&closure_list_lock, flags);
256}
257EXPORT_SYMBOL(closure_debug_destroy);
258
259static int debug_show(struct seq_file *f, void *data)
260{
261	struct closure *cl;
262
263	spin_lock_irq(&closure_list_lock);
264
265	list_for_each_entry(cl, &closure_list, all) {
266		int r = atomic_read(&cl->remaining);
267
268		seq_printf(f, "%p: %pS -> %pS p %p r %i ",
269			   cl, (void *) cl->ip, cl->fn, cl->parent,
270			   r & CLOSURE_REMAINING_MASK);
271
272		seq_printf(f, "%s%s\n",
273			   test_bit(WORK_STRUCT_PENDING_BIT,
274				    work_data_bits(&cl->work)) ? "Q" : "",
275			   r & CLOSURE_RUNNING	? "R" : "");
276
277		if (r & CLOSURE_WAITING)
278			seq_printf(f, " W %pS\n",
279				   (void *) cl->waiting_on);
280
281		seq_putc(f, '\n');
282	}
283
284	spin_unlock_irq(&closure_list_lock);
285	return 0;
286}
287
288DEFINE_SHOW_ATTRIBUTE(debug);
289
290static int __init closure_debug_init(void)
291{
292	debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
293	return 0;
294}
295late_initcall(closure_debug_init)
296
297#endif