Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Support for async notification of waitid
  4 */
  5#include <linux/kernel.h>
  6#include <linux/errno.h>
  7#include <linux/fs.h>
  8#include <linux/file.h>
  9#include <linux/compat.h>
 10#include <linux/io_uring.h>
 11
 12#include <uapi/linux/io_uring.h>
 13
 14#include "io_uring.h"
 15#include "cancel.h"
 16#include "waitid.h"
 17#include "../kernel/exit.h"
 18
 19static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts);
 20
 21#define IO_WAITID_CANCEL_FLAG	BIT(31)
 22#define IO_WAITID_REF_MASK	GENMASK(30, 0)
 23
 24struct io_waitid {
 25	struct file *file;
 26	int which;
 27	pid_t upid;
 28	int options;
 29	atomic_t refs;
 30	struct wait_queue_head *head;
 31	struct siginfo __user *infop;
 32	struct waitid_info info;
 33};
 34
 35static void io_waitid_free(struct io_kiocb *req)
 36{
 37	struct io_waitid_async *iwa = req->async_data;
 38
 39	put_pid(iwa->wo.wo_pid);
 40	kfree(req->async_data);
 41	req->async_data = NULL;
 42	req->flags &= ~REQ_F_ASYNC_DATA;
 43}
 44
 45#ifdef CONFIG_COMPAT
 46static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo)
 47{
 48	struct compat_siginfo __user *infop;
 49	bool ret;
 50
 51	infop = (struct compat_siginfo __user *) iw->infop;
 52
 53	if (!user_write_access_begin(infop, sizeof(*infop)))
 54		return false;
 55
 56	unsafe_put_user(signo, &infop->si_signo, Efault);
 57	unsafe_put_user(0, &infop->si_errno, Efault);
 58	unsafe_put_user(iw->info.cause, &infop->si_code, Efault);
 59	unsafe_put_user(iw->info.pid, &infop->si_pid, Efault);
 60	unsafe_put_user(iw->info.uid, &infop->si_uid, Efault);
 61	unsafe_put_user(iw->info.status, &infop->si_status, Efault);
 62	ret = true;
 63done:
 64	user_write_access_end();
 65	return ret;
 66Efault:
 67	ret = false;
 68	goto done;
 69}
 70#endif
 71
 72static bool io_waitid_copy_si(struct io_kiocb *req, int signo)
 73{
 74	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
 75	bool ret;
 76
 77	if (!iw->infop)
 78		return true;
 79
 80#ifdef CONFIG_COMPAT
 81	if (req->ctx->compat)
 82		return io_waitid_compat_copy_si(iw, signo);
 83#endif
 84
 85	if (!user_write_access_begin(iw->infop, sizeof(*iw->infop)))
 86		return false;
 87
 88	unsafe_put_user(signo, &iw->infop->si_signo, Efault);
 89	unsafe_put_user(0, &iw->infop->si_errno, Efault);
 90	unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault);
 91	unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault);
 92	unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault);
 93	unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault);
 94	ret = true;
 95done:
 96	user_write_access_end();
 97	return ret;
 98Efault:
 99	ret = false;
100	goto done;
101}
102
103static int io_waitid_finish(struct io_kiocb *req, int ret)
104{
105	int signo = 0;
106
107	if (ret > 0) {
108		signo = SIGCHLD;
109		ret = 0;
110	}
111
112	if (!io_waitid_copy_si(req, signo))
113		ret = -EFAULT;
114	io_waitid_free(req);
115	return ret;
116}
117
118static void io_waitid_complete(struct io_kiocb *req, int ret)
119{
120	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
 
121
122	/* anyone completing better be holding a reference */
123	WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
124
125	lockdep_assert_held(&req->ctx->uring_lock);
126
 
 
 
 
 
 
127	hlist_del_init(&req->hash_node);
128
129	ret = io_waitid_finish(req, ret);
130	if (ret < 0)
131		req_set_fail(req);
132	io_req_set_res(req, ret, 0);
 
133}
134
135static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
136{
137	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
138	struct io_waitid_async *iwa = req->async_data;
139
140	/*
141	 * Mark us canceled regardless of ownership. This will prevent a
142	 * potential retry from a spurious wakeup.
143	 */
144	atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs);
145
146	/* claim ownership */
147	if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
148		return false;
149
150	spin_lock_irq(&iw->head->lock);
151	list_del_init(&iwa->wo.child_wait.entry);
152	spin_unlock_irq(&iw->head->lock);
153	io_waitid_complete(req, -ECANCELED);
154	io_req_queue_tw_complete(req, -ECANCELED);
155	return true;
156}
157
158int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
159		     unsigned int issue_flags)
160{
161	struct hlist_node *tmp;
162	struct io_kiocb *req;
163	int nr = 0;
164
165	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_FD_FIXED))
166		return -ENOENT;
167
168	io_ring_submit_lock(ctx, issue_flags);
169	hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
170		if (req->cqe.user_data != cd->data &&
171		    !(cd->flags & IORING_ASYNC_CANCEL_ANY))
172			continue;
173		if (__io_waitid_cancel(ctx, req))
174			nr++;
175		if (!(cd->flags & IORING_ASYNC_CANCEL_ALL))
176			break;
177	}
178	io_ring_submit_unlock(ctx, issue_flags);
179
180	if (nr)
181		return nr;
182
183	return -ENOENT;
184}
185
186bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
187			  bool cancel_all)
188{
189	struct hlist_node *tmp;
190	struct io_kiocb *req;
191	bool found = false;
192
193	lockdep_assert_held(&ctx->uring_lock);
194
195	hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
196		if (!io_match_task_safe(req, tctx, cancel_all))
197			continue;
198		hlist_del_init(&req->hash_node);
199		__io_waitid_cancel(ctx, req);
200		found = true;
201	}
202
203	return found;
204}
205
206static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
207{
208	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
209	struct io_waitid_async *iwa = req->async_data;
210
211	if (!atomic_sub_return(1, &iw->refs))
212		return false;
213
214	/*
215	 * Wakeup triggered, racing with us. It was prevented from
216	 * completing because of that, queue up the tw to do that.
217	 */
218	req->io_task_work.func = io_waitid_cb;
219	io_req_task_work_add(req);
220	remove_wait_queue(iw->head, &iwa->wo.child_wait);
221	return true;
222}
223
224static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts)
225{
226	struct io_waitid_async *iwa = req->async_data;
227	struct io_ring_ctx *ctx = req->ctx;
228	int ret;
229
230	io_tw_lock(ctx, ts);
231
232	ret = __do_wait(&iwa->wo);
233
234	/*
235	 * If we get -ERESTARTSYS here, we need to re-arm and check again
236	 * to ensure we get another callback. If the retry works, then we can
237	 * just remove ourselves from the waitqueue again and finish the
238	 * request.
239	 */
240	if (unlikely(ret == -ERESTARTSYS)) {
241		struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
242
243		/* Don't retry if cancel found it meanwhile */
244		ret = -ECANCELED;
245		if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) {
246			iw->head = &current->signal->wait_chldexit;
247			add_wait_queue(iw->head, &iwa->wo.child_wait);
248			ret = __do_wait(&iwa->wo);
249			if (ret == -ERESTARTSYS) {
250				/* retry armed, drop our ref */
251				io_waitid_drop_issue_ref(req);
252				return;
253			}
254
255			remove_wait_queue(iw->head, &iwa->wo.child_wait);
256		}
257	}
258
259	io_waitid_complete(req, ret);
260	io_req_task_complete(req, ts);
261}
262
263static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
264			  int sync, void *key)
265{
266	struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait);
267	struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo);
268	struct io_kiocb *req = iwa->req;
269	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
270	struct task_struct *p = key;
271
272	if (!pid_child_should_wake(wo, p))
273		return 0;
274
275	/* cancel is in progress */
276	if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
277		return 1;
278
279	req->io_task_work.func = io_waitid_cb;
280	io_req_task_work_add(req);
281	list_del_init(&wait->entry);
282	return 1;
283}
284
285int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
286{
287	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
288
289	if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
290		return -EINVAL;
291
292	iw->which = READ_ONCE(sqe->len);
293	iw->upid = READ_ONCE(sqe->fd);
294	iw->options = READ_ONCE(sqe->file_index);
295	iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
296	return 0;
297}
298
299int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
300{
301	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
302	struct io_ring_ctx *ctx = req->ctx;
303	struct io_waitid_async *iwa;
304	int ret;
305
306	if (io_alloc_async_data(req))
307		return -ENOMEM;
308
309	iwa = req->async_data;
310	iwa->req = req;
311
312	ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
313					iw->options, NULL);
314	if (ret)
315		goto done;
316
317	/*
318	 * Mark the request as busy upfront, in case we're racing with the
319	 * wakeup. If we are, then we'll notice when we drop this initial
320	 * reference again after arming.
321	 */
322	atomic_set(&iw->refs, 1);
323
324	/*
325	 * Cancel must hold the ctx lock, so there's no risk of cancelation
326	 * finding us until a) we remain on the list, and b) the lock is
327	 * dropped. We only need to worry about racing with the wakeup
328	 * callback.
329	 */
330	io_ring_submit_lock(ctx, issue_flags);
331	hlist_add_head(&req->hash_node, &ctx->waitid_list);
332
333	init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
334	iwa->wo.child_wait.private = req->tctx->task;
335	iw->head = &current->signal->wait_chldexit;
336	add_wait_queue(iw->head, &iwa->wo.child_wait);
337
338	ret = __do_wait(&iwa->wo);
339	if (ret == -ERESTARTSYS) {
340		/*
341		 * Nobody else grabbed a reference, it'll complete when we get
342		 * a waitqueue callback, or if someone cancels it.
343		 */
344		if (!io_waitid_drop_issue_ref(req)) {
345			io_ring_submit_unlock(ctx, issue_flags);
346			return IOU_ISSUE_SKIP_COMPLETE;
347		}
348
349		/*
350		 * Wakeup triggered, racing with us. It was prevented from
351		 * completing because of that, queue up the tw to do that.
352		 */
353		io_ring_submit_unlock(ctx, issue_flags);
354		return IOU_ISSUE_SKIP_COMPLETE;
355	}
356
357	hlist_del_init(&req->hash_node);
358	remove_wait_queue(iw->head, &iwa->wo.child_wait);
359	ret = io_waitid_finish(req, ret);
360
361	io_ring_submit_unlock(ctx, issue_flags);
362done:
363	if (ret < 0)
364		req_set_fail(req);
365	io_req_set_res(req, ret, 0);
366	return IOU_OK;
367}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Support for async notification of waitid
  4 */
  5#include <linux/kernel.h>
  6#include <linux/errno.h>
  7#include <linux/fs.h>
  8#include <linux/file.h>
  9#include <linux/compat.h>
 10#include <linux/io_uring.h>
 11
 12#include <uapi/linux/io_uring.h>
 13
 14#include "io_uring.h"
 15#include "cancel.h"
 16#include "waitid.h"
 17#include "../kernel/exit.h"
 18
 19static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts);
 20
 21#define IO_WAITID_CANCEL_FLAG	BIT(31)
 22#define IO_WAITID_REF_MASK	GENMASK(30, 0)
 23
 24struct io_waitid {
 25	struct file *file;
 26	int which;
 27	pid_t upid;
 28	int options;
 29	atomic_t refs;
 30	struct wait_queue_head *head;
 31	struct siginfo __user *infop;
 32	struct waitid_info info;
 33};
 34
 35static void io_waitid_free(struct io_kiocb *req)
 36{
 37	struct io_waitid_async *iwa = req->async_data;
 38
 39	put_pid(iwa->wo.wo_pid);
 40	kfree(req->async_data);
 41	req->async_data = NULL;
 42	req->flags &= ~REQ_F_ASYNC_DATA;
 43}
 44
 45#ifdef CONFIG_COMPAT
 46static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo)
 47{
 48	struct compat_siginfo __user *infop;
 49	bool ret;
 50
 51	infop = (struct compat_siginfo __user *) iw->infop;
 52
 53	if (!user_write_access_begin(infop, sizeof(*infop)))
 54		return false;
 55
 56	unsafe_put_user(signo, &infop->si_signo, Efault);
 57	unsafe_put_user(0, &infop->si_errno, Efault);
 58	unsafe_put_user(iw->info.cause, &infop->si_code, Efault);
 59	unsafe_put_user(iw->info.pid, &infop->si_pid, Efault);
 60	unsafe_put_user(iw->info.uid, &infop->si_uid, Efault);
 61	unsafe_put_user(iw->info.status, &infop->si_status, Efault);
 62	ret = true;
 63done:
 64	user_write_access_end();
 65	return ret;
 66Efault:
 67	ret = false;
 68	goto done;
 69}
 70#endif
 71
 72static bool io_waitid_copy_si(struct io_kiocb *req, int signo)
 73{
 74	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
 75	bool ret;
 76
 77	if (!iw->infop)
 78		return true;
 79
 80#ifdef CONFIG_COMPAT
 81	if (req->ctx->compat)
 82		return io_waitid_compat_copy_si(iw, signo);
 83#endif
 84
 85	if (!user_write_access_begin(iw->infop, sizeof(*iw->infop)))
 86		return false;
 87
 88	unsafe_put_user(signo, &iw->infop->si_signo, Efault);
 89	unsafe_put_user(0, &iw->infop->si_errno, Efault);
 90	unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault);
 91	unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault);
 92	unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault);
 93	unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault);
 94	ret = true;
 95done:
 96	user_write_access_end();
 97	return ret;
 98Efault:
 99	ret = false;
100	goto done;
101}
102
103static int io_waitid_finish(struct io_kiocb *req, int ret)
104{
105	int signo = 0;
106
107	if (ret > 0) {
108		signo = SIGCHLD;
109		ret = 0;
110	}
111
112	if (!io_waitid_copy_si(req, signo))
113		ret = -EFAULT;
114	io_waitid_free(req);
115	return ret;
116}
117
118static void io_waitid_complete(struct io_kiocb *req, int ret)
119{
120	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
121	struct io_tw_state ts = { .locked = true };
122
123	/* anyone completing better be holding a reference */
124	WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
125
126	lockdep_assert_held(&req->ctx->uring_lock);
127
128	/*
129	 * Did cancel find it meanwhile?
130	 */
131	if (hlist_unhashed(&req->hash_node))
132		return;
133
134	hlist_del_init(&req->hash_node);
135
136	ret = io_waitid_finish(req, ret);
137	if (ret < 0)
138		req_set_fail(req);
139	io_req_set_res(req, ret, 0);
140	io_req_task_complete(req, &ts);
141}
142
143static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
144{
145	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
146	struct io_waitid_async *iwa = req->async_data;
147
148	/*
149	 * Mark us canceled regardless of ownership. This will prevent a
150	 * potential retry from a spurious wakeup.
151	 */
152	atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs);
153
154	/* claim ownership */
155	if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
156		return false;
157
158	spin_lock_irq(&iw->head->lock);
159	list_del_init(&iwa->wo.child_wait.entry);
160	spin_unlock_irq(&iw->head->lock);
161	io_waitid_complete(req, -ECANCELED);
 
162	return true;
163}
164
165int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
166		     unsigned int issue_flags)
167{
168	struct hlist_node *tmp;
169	struct io_kiocb *req;
170	int nr = 0;
171
172	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_FD_FIXED))
173		return -ENOENT;
174
175	io_ring_submit_lock(ctx, issue_flags);
176	hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
177		if (req->cqe.user_data != cd->data &&
178		    !(cd->flags & IORING_ASYNC_CANCEL_ANY))
179			continue;
180		if (__io_waitid_cancel(ctx, req))
181			nr++;
182		if (!(cd->flags & IORING_ASYNC_CANCEL_ALL))
183			break;
184	}
185	io_ring_submit_unlock(ctx, issue_flags);
186
187	if (nr)
188		return nr;
189
190	return -ENOENT;
191}
192
193bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
194			  bool cancel_all)
195{
196	struct hlist_node *tmp;
197	struct io_kiocb *req;
198	bool found = false;
199
200	lockdep_assert_held(&ctx->uring_lock);
201
202	hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
203		if (!io_match_task_safe(req, task, cancel_all))
204			continue;
 
205		__io_waitid_cancel(ctx, req);
206		found = true;
207	}
208
209	return found;
210}
211
212static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
213{
214	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
215	struct io_waitid_async *iwa = req->async_data;
216
217	if (!atomic_sub_return(1, &iw->refs))
218		return false;
219
220	/*
221	 * Wakeup triggered, racing with us. It was prevented from
222	 * completing because of that, queue up the tw to do that.
223	 */
224	req->io_task_work.func = io_waitid_cb;
225	io_req_task_work_add(req);
226	remove_wait_queue(iw->head, &iwa->wo.child_wait);
227	return true;
228}
229
230static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts)
231{
232	struct io_waitid_async *iwa = req->async_data;
233	struct io_ring_ctx *ctx = req->ctx;
234	int ret;
235
236	io_tw_lock(ctx, ts);
237
238	ret = __do_wait(&iwa->wo);
239
240	/*
241	 * If we get -ERESTARTSYS here, we need to re-arm and check again
242	 * to ensure we get another callback. If the retry works, then we can
243	 * just remove ourselves from the waitqueue again and finish the
244	 * request.
245	 */
246	if (unlikely(ret == -ERESTARTSYS)) {
247		struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
248
249		/* Don't retry if cancel found it meanwhile */
250		ret = -ECANCELED;
251		if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) {
252			iw->head = &current->signal->wait_chldexit;
253			add_wait_queue(iw->head, &iwa->wo.child_wait);
254			ret = __do_wait(&iwa->wo);
255			if (ret == -ERESTARTSYS) {
256				/* retry armed, drop our ref */
257				io_waitid_drop_issue_ref(req);
258				return;
259			}
260
261			remove_wait_queue(iw->head, &iwa->wo.child_wait);
262		}
263	}
264
265	io_waitid_complete(req, ret);
 
266}
267
268static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
269			  int sync, void *key)
270{
271	struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait);
272	struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo);
273	struct io_kiocb *req = iwa->req;
274	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
275	struct task_struct *p = key;
276
277	if (!pid_child_should_wake(wo, p))
278		return 0;
279
280	/* cancel is in progress */
281	if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
282		return 1;
283
284	req->io_task_work.func = io_waitid_cb;
285	io_req_task_work_add(req);
286	list_del_init(&wait->entry);
287	return 1;
288}
289
290int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
291{
292	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
293
294	if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
295		return -EINVAL;
296
297	iw->which = READ_ONCE(sqe->len);
298	iw->upid = READ_ONCE(sqe->fd);
299	iw->options = READ_ONCE(sqe->file_index);
300	iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
301	return 0;
302}
303
304int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
305{
306	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
307	struct io_ring_ctx *ctx = req->ctx;
308	struct io_waitid_async *iwa;
309	int ret;
310
311	if (io_alloc_async_data(req))
312		return -ENOMEM;
313
314	iwa = req->async_data;
315	iwa->req = req;
316
317	ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
318					iw->options, NULL);
319	if (ret)
320		goto done;
321
322	/*
323	 * Mark the request as busy upfront, in case we're racing with the
324	 * wakeup. If we are, then we'll notice when we drop this initial
325	 * reference again after arming.
326	 */
327	atomic_set(&iw->refs, 1);
328
329	/*
330	 * Cancel must hold the ctx lock, so there's no risk of cancelation
331	 * finding us until a) we remain on the list, and b) the lock is
332	 * dropped. We only need to worry about racing with the wakeup
333	 * callback.
334	 */
335	io_ring_submit_lock(ctx, issue_flags);
336	hlist_add_head(&req->hash_node, &ctx->waitid_list);
337
338	init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
339	iwa->wo.child_wait.private = req->task;
340	iw->head = &current->signal->wait_chldexit;
341	add_wait_queue(iw->head, &iwa->wo.child_wait);
342
343	ret = __do_wait(&iwa->wo);
344	if (ret == -ERESTARTSYS) {
345		/*
346		 * Nobody else grabbed a reference, it'll complete when we get
347		 * a waitqueue callback, or if someone cancels it.
348		 */
349		if (!io_waitid_drop_issue_ref(req)) {
350			io_ring_submit_unlock(ctx, issue_flags);
351			return IOU_ISSUE_SKIP_COMPLETE;
352		}
353
354		/*
355		 * Wakeup triggered, racing with us. It was prevented from
356		 * completing because of that, queue up the tw to do that.
357		 */
358		io_ring_submit_unlock(ctx, issue_flags);
359		return IOU_ISSUE_SKIP_COMPLETE;
360	}
361
362	hlist_del_init(&req->hash_node);
363	remove_wait_queue(iw->head, &iwa->wo.child_wait);
364	ret = io_waitid_finish(req, ret);
365
366	io_ring_submit_unlock(ctx, issue_flags);
367done:
368	if (ret < 0)
369		req_set_fail(req);
370	io_req_set_res(req, ret, 0);
371	return IOU_OK;
372}