Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/poll.h>
9#include <linux/hashtable.h>
10#include <linux/io_uring.h>
11
12#include <trace/events/io_uring.h>
13
14#include <uapi/linux/io_uring.h>
15
16#include "io_uring.h"
17#include "refs.h"
18#include "opdef.h"
19#include "kbuf.h"
20#include "poll.h"
21#include "cancel.h"
22
23struct io_poll_update {
24 struct file *file;
25 u64 old_user_data;
26 u64 new_user_data;
27 __poll_t events;
28 bool update_events;
29 bool update_user_data;
30};
31
32struct io_poll_table {
33 struct poll_table_struct pt;
34 struct io_kiocb *req;
35 int nr_entries;
36 int error;
37 bool owning;
38 /* output value, set only if arm poll returns >0 */
39 __poll_t result_mask;
40};
41
42#define IO_POLL_CANCEL_FLAG BIT(31)
43#define IO_POLL_RETRY_FLAG BIT(30)
44#define IO_POLL_REF_MASK GENMASK(29, 0)
45
46/*
47 * We usually have 1-2 refs taken, 128 is more than enough and we want to
48 * maximise the margin between this amount and the moment when it overflows.
49 */
50#define IO_POLL_REF_BIAS 128
51
52#define IO_WQE_F_DOUBLE 1
53
54static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
55{
56 unsigned long priv = (unsigned long)wqe->private;
57
58 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
59}
60
61static inline bool wqe_is_double(struct wait_queue_entry *wqe)
62{
63 unsigned long priv = (unsigned long)wqe->private;
64
65 return priv & IO_WQE_F_DOUBLE;
66}
67
68static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
69{
70 int v;
71
72 /*
73 * poll_refs are already elevated and we don't have much hope for
74 * grabbing the ownership. Instead of incrementing set a retry flag
75 * to notify the loop that there might have been some change.
76 */
77 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
78 if (v & IO_POLL_REF_MASK)
79 return false;
80 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
81}
82
83/*
84 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
85 * bump it and acquire ownership. It's disallowed to modify requests while not
86 * owning it, that prevents from races for enqueueing task_work's and b/w
87 * arming poll and wakeups.
88 */
89static inline bool io_poll_get_ownership(struct io_kiocb *req)
90{
91 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
92 return io_poll_get_ownership_slowpath(req);
93 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
94}
95
96static void io_poll_mark_cancelled(struct io_kiocb *req)
97{
98 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
99}
100
101static struct io_poll *io_poll_get_double(struct io_kiocb *req)
102{
103 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
104 if (req->opcode == IORING_OP_POLL_ADD)
105 return req->async_data;
106 return req->apoll->double_poll;
107}
108
109static struct io_poll *io_poll_get_single(struct io_kiocb *req)
110{
111 if (req->opcode == IORING_OP_POLL_ADD)
112 return io_kiocb_to_cmd(req, struct io_poll);
113 return &req->apoll->poll;
114}
115
116static void io_poll_req_insert(struct io_kiocb *req)
117{
118 struct io_hash_table *table = &req->ctx->cancel_table;
119 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
120 struct io_hash_bucket *hb = &table->hbs[index];
121
122 spin_lock(&hb->lock);
123 hlist_add_head(&req->hash_node, &hb->list);
124 spin_unlock(&hb->lock);
125}
126
127static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
128{
129 struct io_hash_table *table = &req->ctx->cancel_table;
130 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
131 spinlock_t *lock = &table->hbs[index].lock;
132
133 spin_lock(lock);
134 hash_del(&req->hash_node);
135 spin_unlock(lock);
136}
137
138static void io_poll_req_insert_locked(struct io_kiocb *req)
139{
140 struct io_hash_table *table = &req->ctx->cancel_table_locked;
141 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
142
143 lockdep_assert_held(&req->ctx->uring_lock);
144
145 hlist_add_head(&req->hash_node, &table->hbs[index].list);
146}
147
148static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
149{
150 struct io_ring_ctx *ctx = req->ctx;
151
152 if (req->flags & REQ_F_HASH_LOCKED) {
153 /*
154 * ->cancel_table_locked is protected by ->uring_lock in
155 * contrast to per bucket spinlocks. Likely, tctx_task_work()
156 * already grabbed the mutex for us, but there is a chance it
157 * failed.
158 */
159 io_tw_lock(ctx, locked);
160 hash_del(&req->hash_node);
161 req->flags &= ~REQ_F_HASH_LOCKED;
162 } else {
163 io_poll_req_delete(req, ctx);
164 }
165}
166
167static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
168 wait_queue_func_t wake_func)
169{
170 poll->head = NULL;
171#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
172 /* mask in events that we always want/need */
173 poll->events = events | IO_POLL_UNMASK;
174 INIT_LIST_HEAD(&poll->wait.entry);
175 init_waitqueue_func_entry(&poll->wait, wake_func);
176}
177
178static inline void io_poll_remove_entry(struct io_poll *poll)
179{
180 struct wait_queue_head *head = smp_load_acquire(&poll->head);
181
182 if (head) {
183 spin_lock_irq(&head->lock);
184 list_del_init(&poll->wait.entry);
185 poll->head = NULL;
186 spin_unlock_irq(&head->lock);
187 }
188}
189
190static void io_poll_remove_entries(struct io_kiocb *req)
191{
192 /*
193 * Nothing to do if neither of those flags are set. Avoid dipping
194 * into the poll/apoll/double cachelines if we can.
195 */
196 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
197 return;
198
199 /*
200 * While we hold the waitqueue lock and the waitqueue is nonempty,
201 * wake_up_pollfree() will wait for us. However, taking the waitqueue
202 * lock in the first place can race with the waitqueue being freed.
203 *
204 * We solve this as eventpoll does: by taking advantage of the fact that
205 * all users of wake_up_pollfree() will RCU-delay the actual free. If
206 * we enter rcu_read_lock() and see that the pointer to the queue is
207 * non-NULL, we can then lock it without the memory being freed out from
208 * under us.
209 *
210 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
211 * case the caller deletes the entry from the queue, leaving it empty.
212 * In that case, only RCU prevents the queue memory from being freed.
213 */
214 rcu_read_lock();
215 if (req->flags & REQ_F_SINGLE_POLL)
216 io_poll_remove_entry(io_poll_get_single(req));
217 if (req->flags & REQ_F_DOUBLE_POLL)
218 io_poll_remove_entry(io_poll_get_double(req));
219 rcu_read_unlock();
220}
221
222enum {
223 IOU_POLL_DONE = 0,
224 IOU_POLL_NO_ACTION = 1,
225 IOU_POLL_REMOVE_POLL_USE_RES = 2,
226 IOU_POLL_REISSUE = 3,
227};
228
229/*
230 * All poll tw should go through this. Checks for poll events, manages
231 * references, does rewait, etc.
232 *
233 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
234 * require, which is either spurious wakeup or multishot CQE is served.
235 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
236 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
237 * poll and that the result is stored in req->cqe.
238 */
239static int io_poll_check_events(struct io_kiocb *req, bool *locked)
240{
241 int v;
242
243 /* req->task == current here, checking PF_EXITING is safe */
244 if (unlikely(req->task->flags & PF_EXITING))
245 return -ECANCELED;
246
247 do {
248 v = atomic_read(&req->poll_refs);
249
250 if (unlikely(v != 1)) {
251 /* tw should be the owner and so have some refs */
252 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
253 return IOU_POLL_NO_ACTION;
254 if (v & IO_POLL_CANCEL_FLAG)
255 return -ECANCELED;
256 /*
257 * cqe.res contains only events of the first wake up
258 * and all others are to be lost. Redo vfs_poll() to get
259 * up to date state.
260 */
261 if ((v & IO_POLL_REF_MASK) != 1)
262 req->cqe.res = 0;
263
264 if (v & IO_POLL_RETRY_FLAG) {
265 req->cqe.res = 0;
266 /*
267 * We won't find new events that came in between
268 * vfs_poll and the ref put unless we clear the
269 * flag in advance.
270 */
271 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
272 v &= ~IO_POLL_RETRY_FLAG;
273 }
274 }
275
276 /* the mask was stashed in __io_poll_execute */
277 if (!req->cqe.res) {
278 struct poll_table_struct pt = { ._key = req->apoll_events };
279 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
280 /*
281 * We got woken with a mask, but someone else got to
282 * it first. The above vfs_poll() doesn't add us back
283 * to the waitqueue, so if we get nothing back, we
284 * should be safe and attempt a reissue.
285 */
286 if (unlikely(!req->cqe.res)) {
287 /* Multishot armed need not reissue */
288 if (!(req->apoll_events & EPOLLONESHOT))
289 continue;
290 return IOU_POLL_REISSUE;
291 }
292 }
293 if (req->apoll_events & EPOLLONESHOT)
294 return IOU_POLL_DONE;
295
296 /* multishot, just fill a CQE and proceed */
297 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
298 __poll_t mask = mangle_poll(req->cqe.res &
299 req->apoll_events);
300
301 if (!io_aux_cqe(req->ctx, *locked, req->cqe.user_data,
302 mask, IORING_CQE_F_MORE, false)) {
303 io_req_set_res(req, mask, 0);
304 return IOU_POLL_REMOVE_POLL_USE_RES;
305 }
306 } else {
307 int ret = io_poll_issue(req, locked);
308 if (ret == IOU_STOP_MULTISHOT)
309 return IOU_POLL_REMOVE_POLL_USE_RES;
310 if (ret < 0)
311 return ret;
312 }
313
314 /* force the next iteration to vfs_poll() */
315 req->cqe.res = 0;
316
317 /*
318 * Release all references, retry if someone tried to restart
319 * task_work while we were executing it.
320 */
321 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
322 IO_POLL_REF_MASK);
323
324 return IOU_POLL_NO_ACTION;
325}
326
327static void io_poll_task_func(struct io_kiocb *req, bool *locked)
328{
329 int ret;
330
331 ret = io_poll_check_events(req, locked);
332 if (ret == IOU_POLL_NO_ACTION)
333 return;
334 io_poll_remove_entries(req);
335 io_poll_tw_hash_eject(req, locked);
336
337 if (req->opcode == IORING_OP_POLL_ADD) {
338 if (ret == IOU_POLL_DONE) {
339 struct io_poll *poll;
340
341 poll = io_kiocb_to_cmd(req, struct io_poll);
342 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
343 } else if (ret == IOU_POLL_REISSUE) {
344 io_req_task_submit(req, locked);
345 return;
346 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
347 req->cqe.res = ret;
348 req_set_fail(req);
349 }
350
351 io_req_set_res(req, req->cqe.res, 0);
352 io_req_task_complete(req, locked);
353 } else {
354 io_tw_lock(req->ctx, locked);
355
356 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
357 io_req_task_complete(req, locked);
358 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
359 io_req_task_submit(req, locked);
360 else
361 io_req_defer_failed(req, ret);
362 }
363}
364
365static void __io_poll_execute(struct io_kiocb *req, int mask)
366{
367 io_req_set_res(req, mask, 0);
368 req->io_task_work.func = io_poll_task_func;
369
370 trace_io_uring_task_add(req, mask);
371 io_req_task_work_add(req);
372}
373
374static inline void io_poll_execute(struct io_kiocb *req, int res)
375{
376 if (io_poll_get_ownership(req))
377 __io_poll_execute(req, res);
378}
379
380static void io_poll_cancel_req(struct io_kiocb *req)
381{
382 io_poll_mark_cancelled(req);
383 /* kick tw, which should complete the request */
384 io_poll_execute(req, 0);
385}
386
387#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
388
389static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
390{
391 io_poll_mark_cancelled(req);
392 /* we have to kick tw in case it's not already */
393 io_poll_execute(req, 0);
394
395 /*
396 * If the waitqueue is being freed early but someone is already
397 * holds ownership over it, we have to tear down the request as
398 * best we can. That means immediately removing the request from
399 * its waitqueue and preventing all further accesses to the
400 * waitqueue via the request.
401 */
402 list_del_init(&poll->wait.entry);
403
404 /*
405 * Careful: this *must* be the last step, since as soon
406 * as req->head is NULL'ed out, the request can be
407 * completed and freed, since aio_poll_complete_work()
408 * will no longer need to take the waitqueue lock.
409 */
410 smp_store_release(&poll->head, NULL);
411 return 1;
412}
413
414static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
415 void *key)
416{
417 struct io_kiocb *req = wqe_to_req(wait);
418 struct io_poll *poll = container_of(wait, struct io_poll, wait);
419 __poll_t mask = key_to_poll(key);
420
421 if (unlikely(mask & POLLFREE))
422 return io_pollfree_wake(req, poll);
423
424 /* for instances that support it check for an event match first */
425 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
426 return 0;
427
428 if (io_poll_get_ownership(req)) {
429 /*
430 * If we trigger a multishot poll off our own wakeup path,
431 * disable multishot as there is a circular dependency between
432 * CQ posting and triggering the event.
433 */
434 if (mask & EPOLL_URING_WAKE)
435 poll->events |= EPOLLONESHOT;
436
437 /* optional, saves extra locking for removal in tw handler */
438 if (mask && poll->events & EPOLLONESHOT) {
439 list_del_init(&poll->wait.entry);
440 poll->head = NULL;
441 if (wqe_is_double(wait))
442 req->flags &= ~REQ_F_DOUBLE_POLL;
443 else
444 req->flags &= ~REQ_F_SINGLE_POLL;
445 }
446 __io_poll_execute(req, mask);
447 }
448 return 1;
449}
450
451/* fails only when polling is already completing by the first entry */
452static bool io_poll_double_prepare(struct io_kiocb *req)
453{
454 struct wait_queue_head *head;
455 struct io_poll *poll = io_poll_get_single(req);
456
457 /* head is RCU protected, see io_poll_remove_entries() comments */
458 rcu_read_lock();
459 head = smp_load_acquire(&poll->head);
460 /*
461 * poll arm might not hold ownership and so race for req->flags with
462 * io_poll_wake(). There is only one poll entry queued, serialise with
463 * it by taking its head lock. As we're still arming the tw hanlder
464 * is not going to be run, so there are no races with it.
465 */
466 if (head) {
467 spin_lock_irq(&head->lock);
468 req->flags |= REQ_F_DOUBLE_POLL;
469 if (req->opcode == IORING_OP_POLL_ADD)
470 req->flags |= REQ_F_ASYNC_DATA;
471 spin_unlock_irq(&head->lock);
472 }
473 rcu_read_unlock();
474 return !!head;
475}
476
477static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
478 struct wait_queue_head *head,
479 struct io_poll **poll_ptr)
480{
481 struct io_kiocb *req = pt->req;
482 unsigned long wqe_private = (unsigned long) req;
483
484 /*
485 * The file being polled uses multiple waitqueues for poll handling
486 * (e.g. one for read, one for write). Setup a separate io_poll
487 * if this happens.
488 */
489 if (unlikely(pt->nr_entries)) {
490 struct io_poll *first = poll;
491
492 /* double add on the same waitqueue head, ignore */
493 if (first->head == head)
494 return;
495 /* already have a 2nd entry, fail a third attempt */
496 if (*poll_ptr) {
497 if ((*poll_ptr)->head == head)
498 return;
499 pt->error = -EINVAL;
500 return;
501 }
502
503 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
504 if (!poll) {
505 pt->error = -ENOMEM;
506 return;
507 }
508
509 /* mark as double wq entry */
510 wqe_private |= IO_WQE_F_DOUBLE;
511 io_init_poll_iocb(poll, first->events, first->wait.func);
512 if (!io_poll_double_prepare(req)) {
513 /* the request is completing, just back off */
514 kfree(poll);
515 return;
516 }
517 *poll_ptr = poll;
518 } else {
519 /* fine to modify, there is no poll queued to race with us */
520 req->flags |= REQ_F_SINGLE_POLL;
521 }
522
523 pt->nr_entries++;
524 poll->head = head;
525 poll->wait.private = (void *) wqe_private;
526
527 if (poll->events & EPOLLEXCLUSIVE)
528 add_wait_queue_exclusive(head, &poll->wait);
529 else
530 add_wait_queue(head, &poll->wait);
531}
532
533static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
534 struct poll_table_struct *p)
535{
536 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
537 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
538
539 __io_queue_proc(poll, pt, head,
540 (struct io_poll **) &pt->req->async_data);
541}
542
543static bool io_poll_can_finish_inline(struct io_kiocb *req,
544 struct io_poll_table *pt)
545{
546 return pt->owning || io_poll_get_ownership(req);
547}
548
549static void io_poll_add_hash(struct io_kiocb *req)
550{
551 if (req->flags & REQ_F_HASH_LOCKED)
552 io_poll_req_insert_locked(req);
553 else
554 io_poll_req_insert(req);
555}
556
557/*
558 * Returns 0 when it's handed over for polling. The caller owns the requests if
559 * it returns non-zero, but otherwise should not touch it. Negative values
560 * contain an error code. When the result is >0, the polling has completed
561 * inline and ipt.result_mask is set to the mask.
562 */
563static int __io_arm_poll_handler(struct io_kiocb *req,
564 struct io_poll *poll,
565 struct io_poll_table *ipt, __poll_t mask,
566 unsigned issue_flags)
567{
568 struct io_ring_ctx *ctx = req->ctx;
569
570 INIT_HLIST_NODE(&req->hash_node);
571 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
572 io_init_poll_iocb(poll, mask, io_poll_wake);
573 poll->file = req->file;
574 req->apoll_events = poll->events;
575
576 ipt->pt._key = mask;
577 ipt->req = req;
578 ipt->error = 0;
579 ipt->nr_entries = 0;
580 /*
581 * Polling is either completed here or via task_work, so if we're in the
582 * task context we're naturally serialised with tw by merit of running
583 * the same task. When it's io-wq, take the ownership to prevent tw
584 * from running. However, when we're in the task context, skip taking
585 * it as an optimisation.
586 *
587 * Note: even though the request won't be completed/freed, without
588 * ownership we still can race with io_poll_wake().
589 * io_poll_can_finish_inline() tries to deal with that.
590 */
591 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
592 atomic_set(&req->poll_refs, (int)ipt->owning);
593
594 /* io-wq doesn't hold uring_lock */
595 if (issue_flags & IO_URING_F_UNLOCKED)
596 req->flags &= ~REQ_F_HASH_LOCKED;
597
598 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
599
600 if (unlikely(ipt->error || !ipt->nr_entries)) {
601 io_poll_remove_entries(req);
602
603 if (!io_poll_can_finish_inline(req, ipt)) {
604 io_poll_mark_cancelled(req);
605 return 0;
606 } else if (mask && (poll->events & EPOLLET)) {
607 ipt->result_mask = mask;
608 return 1;
609 }
610 return ipt->error ?: -EINVAL;
611 }
612
613 if (mask &&
614 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
615 if (!io_poll_can_finish_inline(req, ipt)) {
616 io_poll_add_hash(req);
617 return 0;
618 }
619 io_poll_remove_entries(req);
620 ipt->result_mask = mask;
621 /* no one else has access to the req, forget about the ref */
622 return 1;
623 }
624
625 io_poll_add_hash(req);
626
627 if (mask && (poll->events & EPOLLET) &&
628 io_poll_can_finish_inline(req, ipt)) {
629 __io_poll_execute(req, mask);
630 return 0;
631 }
632
633 if (ipt->owning) {
634 /*
635 * Try to release ownership. If we see a change of state, e.g.
636 * poll was waken up, queue up a tw, it'll deal with it.
637 */
638 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
639 __io_poll_execute(req, 0);
640 }
641 return 0;
642}
643
644static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
645 struct poll_table_struct *p)
646{
647 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
648 struct async_poll *apoll = pt->req->apoll;
649
650 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
651}
652
653static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
654 unsigned issue_flags)
655{
656 struct io_ring_ctx *ctx = req->ctx;
657 struct io_cache_entry *entry;
658 struct async_poll *apoll;
659
660 if (req->flags & REQ_F_POLLED) {
661 apoll = req->apoll;
662 kfree(apoll->double_poll);
663 } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
664 entry = io_alloc_cache_get(&ctx->apoll_cache);
665 if (entry == NULL)
666 goto alloc_apoll;
667 apoll = container_of(entry, struct async_poll, cache);
668 } else {
669alloc_apoll:
670 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
671 if (unlikely(!apoll))
672 return NULL;
673 }
674 apoll->double_poll = NULL;
675 req->apoll = apoll;
676 return apoll;
677}
678
679int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
680{
681 const struct io_op_def *def = &io_op_defs[req->opcode];
682 struct async_poll *apoll;
683 struct io_poll_table ipt;
684 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
685 int ret;
686
687 /*
688 * apoll requests already grab the mutex to complete in the tw handler,
689 * so removal from the mutex-backed hash is free, use it by default.
690 */
691 req->flags |= REQ_F_HASH_LOCKED;
692
693 if (!def->pollin && !def->pollout)
694 return IO_APOLL_ABORTED;
695 if (!file_can_poll(req->file))
696 return IO_APOLL_ABORTED;
697 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
698 return IO_APOLL_ABORTED;
699 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
700 mask |= EPOLLONESHOT;
701
702 if (def->pollin) {
703 mask |= EPOLLIN | EPOLLRDNORM;
704
705 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
706 if (req->flags & REQ_F_CLEAR_POLLIN)
707 mask &= ~EPOLLIN;
708 } else {
709 mask |= EPOLLOUT | EPOLLWRNORM;
710 }
711 if (def->poll_exclusive)
712 mask |= EPOLLEXCLUSIVE;
713
714 apoll = io_req_alloc_apoll(req, issue_flags);
715 if (!apoll)
716 return IO_APOLL_ABORTED;
717 req->flags |= REQ_F_POLLED;
718 ipt.pt._qproc = io_async_queue_proc;
719
720 io_kbuf_recycle(req, issue_flags);
721
722 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
723 if (ret)
724 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
725 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
726 return IO_APOLL_OK;
727}
728
729static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
730 struct io_hash_table *table,
731 bool cancel_all)
732{
733 unsigned nr_buckets = 1U << table->hash_bits;
734 struct hlist_node *tmp;
735 struct io_kiocb *req;
736 bool found = false;
737 int i;
738
739 for (i = 0; i < nr_buckets; i++) {
740 struct io_hash_bucket *hb = &table->hbs[i];
741
742 spin_lock(&hb->lock);
743 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
744 if (io_match_task_safe(req, tsk, cancel_all)) {
745 hlist_del_init(&req->hash_node);
746 io_poll_cancel_req(req);
747 found = true;
748 }
749 }
750 spin_unlock(&hb->lock);
751 }
752 return found;
753}
754
755/*
756 * Returns true if we found and killed one or more poll requests
757 */
758__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
759 bool cancel_all)
760 __must_hold(&ctx->uring_lock)
761{
762 bool ret;
763
764 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
765 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
766 return ret;
767}
768
769static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
770 struct io_cancel_data *cd,
771 struct io_hash_table *table,
772 struct io_hash_bucket **out_bucket)
773{
774 struct io_kiocb *req;
775 u32 index = hash_long(cd->data, table->hash_bits);
776 struct io_hash_bucket *hb = &table->hbs[index];
777
778 *out_bucket = NULL;
779
780 spin_lock(&hb->lock);
781 hlist_for_each_entry(req, &hb->list, hash_node) {
782 if (cd->data != req->cqe.user_data)
783 continue;
784 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
785 continue;
786 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
787 if (cd->seq == req->work.cancel_seq)
788 continue;
789 req->work.cancel_seq = cd->seq;
790 }
791 *out_bucket = hb;
792 return req;
793 }
794 spin_unlock(&hb->lock);
795 return NULL;
796}
797
798static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
799 struct io_cancel_data *cd,
800 struct io_hash_table *table,
801 struct io_hash_bucket **out_bucket)
802{
803 unsigned nr_buckets = 1U << table->hash_bits;
804 struct io_kiocb *req;
805 int i;
806
807 *out_bucket = NULL;
808
809 for (i = 0; i < nr_buckets; i++) {
810 struct io_hash_bucket *hb = &table->hbs[i];
811
812 spin_lock(&hb->lock);
813 hlist_for_each_entry(req, &hb->list, hash_node) {
814 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
815 req->file != cd->file)
816 continue;
817 if (cd->seq == req->work.cancel_seq)
818 continue;
819 req->work.cancel_seq = cd->seq;
820 *out_bucket = hb;
821 return req;
822 }
823 spin_unlock(&hb->lock);
824 }
825 return NULL;
826}
827
828static int io_poll_disarm(struct io_kiocb *req)
829{
830 if (!req)
831 return -ENOENT;
832 if (!io_poll_get_ownership(req))
833 return -EALREADY;
834 io_poll_remove_entries(req);
835 hash_del(&req->hash_node);
836 return 0;
837}
838
839static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
840 struct io_hash_table *table)
841{
842 struct io_hash_bucket *bucket;
843 struct io_kiocb *req;
844
845 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
846 req = io_poll_file_find(ctx, cd, table, &bucket);
847 else
848 req = io_poll_find(ctx, false, cd, table, &bucket);
849
850 if (req)
851 io_poll_cancel_req(req);
852 if (bucket)
853 spin_unlock(&bucket->lock);
854 return req ? 0 : -ENOENT;
855}
856
857int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
858 unsigned issue_flags)
859{
860 int ret;
861
862 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
863 if (ret != -ENOENT)
864 return ret;
865
866 io_ring_submit_lock(ctx, issue_flags);
867 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
868 io_ring_submit_unlock(ctx, issue_flags);
869 return ret;
870}
871
872static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
873 unsigned int flags)
874{
875 u32 events;
876
877 events = READ_ONCE(sqe->poll32_events);
878#ifdef __BIG_ENDIAN
879 events = swahw32(events);
880#endif
881 if (!(flags & IORING_POLL_ADD_MULTI))
882 events |= EPOLLONESHOT;
883 if (!(flags & IORING_POLL_ADD_LEVEL))
884 events |= EPOLLET;
885 return demangle_poll(events) |
886 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
887}
888
889int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
890{
891 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
892 u32 flags;
893
894 if (sqe->buf_index || sqe->splice_fd_in)
895 return -EINVAL;
896 flags = READ_ONCE(sqe->len);
897 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
898 IORING_POLL_ADD_MULTI))
899 return -EINVAL;
900 /* meaningless without update */
901 if (flags == IORING_POLL_ADD_MULTI)
902 return -EINVAL;
903
904 upd->old_user_data = READ_ONCE(sqe->addr);
905 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
906 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
907
908 upd->new_user_data = READ_ONCE(sqe->off);
909 if (!upd->update_user_data && upd->new_user_data)
910 return -EINVAL;
911 if (upd->update_events)
912 upd->events = io_poll_parse_events(sqe, flags);
913 else if (sqe->poll32_events)
914 return -EINVAL;
915
916 return 0;
917}
918
919int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
920{
921 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
922 u32 flags;
923
924 if (sqe->buf_index || sqe->off || sqe->addr)
925 return -EINVAL;
926 flags = READ_ONCE(sqe->len);
927 if (flags & ~IORING_POLL_ADD_MULTI)
928 return -EINVAL;
929 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
930 return -EINVAL;
931
932 poll->events = io_poll_parse_events(sqe, flags);
933 return 0;
934}
935
936int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
937{
938 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
939 struct io_poll_table ipt;
940 int ret;
941
942 ipt.pt._qproc = io_poll_queue_proc;
943
944 /*
945 * If sqpoll or single issuer, there is no contention for ->uring_lock
946 * and we'll end up holding it in tw handlers anyway.
947 */
948 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
949 req->flags |= REQ_F_HASH_LOCKED;
950
951 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
952 if (ret > 0) {
953 io_req_set_res(req, ipt.result_mask, 0);
954 return IOU_OK;
955 }
956 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
957}
958
959int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
960{
961 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
962 struct io_cancel_data cd = { .data = poll_update->old_user_data, };
963 struct io_ring_ctx *ctx = req->ctx;
964 struct io_hash_bucket *bucket;
965 struct io_kiocb *preq;
966 int ret2, ret = 0;
967 bool locked;
968
969 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
970 ret2 = io_poll_disarm(preq);
971 if (bucket)
972 spin_unlock(&bucket->lock);
973 if (!ret2)
974 goto found;
975 if (ret2 != -ENOENT) {
976 ret = ret2;
977 goto out;
978 }
979
980 io_ring_submit_lock(ctx, issue_flags);
981 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
982 ret2 = io_poll_disarm(preq);
983 if (bucket)
984 spin_unlock(&bucket->lock);
985 io_ring_submit_unlock(ctx, issue_flags);
986 if (ret2) {
987 ret = ret2;
988 goto out;
989 }
990
991found:
992 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
993 ret = -EFAULT;
994 goto out;
995 }
996
997 if (poll_update->update_events || poll_update->update_user_data) {
998 /* only mask one event flags, keep behavior flags */
999 if (poll_update->update_events) {
1000 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1001
1002 poll->events &= ~0xffff;
1003 poll->events |= poll_update->events & 0xffff;
1004 poll->events |= IO_POLL_UNMASK;
1005 }
1006 if (poll_update->update_user_data)
1007 preq->cqe.user_data = poll_update->new_user_data;
1008
1009 ret2 = io_poll_add(preq, issue_flags);
1010 /* successfully updated, don't complete poll request */
1011 if (!ret2 || ret2 == -EIOCBQUEUED)
1012 goto out;
1013 }
1014
1015 req_set_fail(preq);
1016 io_req_set_res(preq, -ECANCELED, 0);
1017 locked = !(issue_flags & IO_URING_F_UNLOCKED);
1018 io_req_task_complete(preq, &locked);
1019out:
1020 if (ret < 0) {
1021 req_set_fail(req);
1022 return ret;
1023 }
1024 /* complete update request, we're done with it */
1025 io_req_set_res(req, ret, 0);
1026 return IOU_OK;
1027}
1028
1029void io_apoll_cache_free(struct io_cache_entry *entry)
1030{
1031 kfree(container_of(entry, struct async_poll, cache));
1032}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/poll.h>
9#include <linux/hashtable.h>
10#include <linux/io_uring.h>
11
12#include <trace/events/io_uring.h>
13
14#include <uapi/linux/io_uring.h>
15
16#include "io_uring.h"
17#include "refs.h"
18#include "opdef.h"
19#include "kbuf.h"
20#include "poll.h"
21#include "cancel.h"
22
23struct io_poll_update {
24 struct file *file;
25 u64 old_user_data;
26 u64 new_user_data;
27 __poll_t events;
28 bool update_events;
29 bool update_user_data;
30};
31
32struct io_poll_table {
33 struct poll_table_struct pt;
34 struct io_kiocb *req;
35 int nr_entries;
36 int error;
37 bool owning;
38 /* output value, set only if arm poll returns >0 */
39 __poll_t result_mask;
40};
41
42#define IO_POLL_CANCEL_FLAG BIT(31)
43#define IO_POLL_RETRY_FLAG BIT(30)
44#define IO_POLL_REF_MASK GENMASK(29, 0)
45
46/*
47 * We usually have 1-2 refs taken, 128 is more than enough and we want to
48 * maximise the margin between this amount and the moment when it overflows.
49 */
50#define IO_POLL_REF_BIAS 128
51
52#define IO_WQE_F_DOUBLE 1
53
54static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
55 void *key);
56
57static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
58{
59 unsigned long priv = (unsigned long)wqe->private;
60
61 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
62}
63
64static inline bool wqe_is_double(struct wait_queue_entry *wqe)
65{
66 unsigned long priv = (unsigned long)wqe->private;
67
68 return priv & IO_WQE_F_DOUBLE;
69}
70
71static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
72{
73 int v;
74
75 /*
76 * poll_refs are already elevated and we don't have much hope for
77 * grabbing the ownership. Instead of incrementing set a retry flag
78 * to notify the loop that there might have been some change.
79 */
80 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
81 if (v & IO_POLL_REF_MASK)
82 return false;
83 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
84}
85
86/*
87 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
88 * bump it and acquire ownership. It's disallowed to modify requests while not
89 * owning it, that prevents from races for enqueueing task_work's and b/w
90 * arming poll and wakeups.
91 */
92static inline bool io_poll_get_ownership(struct io_kiocb *req)
93{
94 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
95 return io_poll_get_ownership_slowpath(req);
96 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
97}
98
99static void io_poll_mark_cancelled(struct io_kiocb *req)
100{
101 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
102}
103
104static struct io_poll *io_poll_get_double(struct io_kiocb *req)
105{
106 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
107 if (req->opcode == IORING_OP_POLL_ADD)
108 return req->async_data;
109 return req->apoll->double_poll;
110}
111
112static struct io_poll *io_poll_get_single(struct io_kiocb *req)
113{
114 if (req->opcode == IORING_OP_POLL_ADD)
115 return io_kiocb_to_cmd(req, struct io_poll);
116 return &req->apoll->poll;
117}
118
119static void io_poll_req_insert(struct io_kiocb *req)
120{
121 struct io_hash_table *table = &req->ctx->cancel_table;
122 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
123 struct io_hash_bucket *hb = &table->hbs[index];
124
125 spin_lock(&hb->lock);
126 hlist_add_head(&req->hash_node, &hb->list);
127 spin_unlock(&hb->lock);
128}
129
130static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
131{
132 struct io_hash_table *table = &req->ctx->cancel_table;
133 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
134 spinlock_t *lock = &table->hbs[index].lock;
135
136 spin_lock(lock);
137 hash_del(&req->hash_node);
138 spin_unlock(lock);
139}
140
141static void io_poll_req_insert_locked(struct io_kiocb *req)
142{
143 struct io_hash_table *table = &req->ctx->cancel_table_locked;
144 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
145
146 lockdep_assert_held(&req->ctx->uring_lock);
147
148 hlist_add_head(&req->hash_node, &table->hbs[index].list);
149}
150
151static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
152{
153 struct io_ring_ctx *ctx = req->ctx;
154
155 if (req->flags & REQ_F_HASH_LOCKED) {
156 /*
157 * ->cancel_table_locked is protected by ->uring_lock in
158 * contrast to per bucket spinlocks. Likely, tctx_task_work()
159 * already grabbed the mutex for us, but there is a chance it
160 * failed.
161 */
162 io_tw_lock(ctx, ts);
163 hash_del(&req->hash_node);
164 req->flags &= ~REQ_F_HASH_LOCKED;
165 } else {
166 io_poll_req_delete(req, ctx);
167 }
168}
169
170static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
171{
172 poll->head = NULL;
173#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
174 /* mask in events that we always want/need */
175 poll->events = events | IO_POLL_UNMASK;
176 INIT_LIST_HEAD(&poll->wait.entry);
177 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
178}
179
180static inline void io_poll_remove_entry(struct io_poll *poll)
181{
182 struct wait_queue_head *head = smp_load_acquire(&poll->head);
183
184 if (head) {
185 spin_lock_irq(&head->lock);
186 list_del_init(&poll->wait.entry);
187 poll->head = NULL;
188 spin_unlock_irq(&head->lock);
189 }
190}
191
192static void io_poll_remove_entries(struct io_kiocb *req)
193{
194 /*
195 * Nothing to do if neither of those flags are set. Avoid dipping
196 * into the poll/apoll/double cachelines if we can.
197 */
198 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
199 return;
200
201 /*
202 * While we hold the waitqueue lock and the waitqueue is nonempty,
203 * wake_up_pollfree() will wait for us. However, taking the waitqueue
204 * lock in the first place can race with the waitqueue being freed.
205 *
206 * We solve this as eventpoll does: by taking advantage of the fact that
207 * all users of wake_up_pollfree() will RCU-delay the actual free. If
208 * we enter rcu_read_lock() and see that the pointer to the queue is
209 * non-NULL, we can then lock it without the memory being freed out from
210 * under us.
211 *
212 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
213 * case the caller deletes the entry from the queue, leaving it empty.
214 * In that case, only RCU prevents the queue memory from being freed.
215 */
216 rcu_read_lock();
217 if (req->flags & REQ_F_SINGLE_POLL)
218 io_poll_remove_entry(io_poll_get_single(req));
219 if (req->flags & REQ_F_DOUBLE_POLL)
220 io_poll_remove_entry(io_poll_get_double(req));
221 rcu_read_unlock();
222}
223
224enum {
225 IOU_POLL_DONE = 0,
226 IOU_POLL_NO_ACTION = 1,
227 IOU_POLL_REMOVE_POLL_USE_RES = 2,
228 IOU_POLL_REISSUE = 3,
229 IOU_POLL_REQUEUE = 4,
230};
231
232static void __io_poll_execute(struct io_kiocb *req, int mask)
233{
234 unsigned flags = 0;
235
236 io_req_set_res(req, mask, 0);
237 req->io_task_work.func = io_poll_task_func;
238
239 trace_io_uring_task_add(req, mask);
240
241 if (!(req->flags & REQ_F_POLL_NO_LAZY))
242 flags = IOU_F_TWQ_LAZY_WAKE;
243 __io_req_task_work_add(req, flags);
244}
245
246static inline void io_poll_execute(struct io_kiocb *req, int res)
247{
248 if (io_poll_get_ownership(req))
249 __io_poll_execute(req, res);
250}
251
252/*
253 * All poll tw should go through this. Checks for poll events, manages
254 * references, does rewait, etc.
255 *
256 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
257 * require, which is either spurious wakeup or multishot CQE is served.
258 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
259 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
260 * poll and that the result is stored in req->cqe.
261 */
262static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
263{
264 int v;
265
266 /* req->task == current here, checking PF_EXITING is safe */
267 if (unlikely(req->task->flags & PF_EXITING))
268 return -ECANCELED;
269
270 do {
271 v = atomic_read(&req->poll_refs);
272
273 if (unlikely(v != 1)) {
274 /* tw should be the owner and so have some refs */
275 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
276 return IOU_POLL_NO_ACTION;
277 if (v & IO_POLL_CANCEL_FLAG)
278 return -ECANCELED;
279 /*
280 * cqe.res contains only events of the first wake up
281 * and all others are to be lost. Redo vfs_poll() to get
282 * up to date state.
283 */
284 if ((v & IO_POLL_REF_MASK) != 1)
285 req->cqe.res = 0;
286
287 if (v & IO_POLL_RETRY_FLAG) {
288 req->cqe.res = 0;
289 /*
290 * We won't find new events that came in between
291 * vfs_poll and the ref put unless we clear the
292 * flag in advance.
293 */
294 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
295 v &= ~IO_POLL_RETRY_FLAG;
296 }
297 }
298
299 /* the mask was stashed in __io_poll_execute */
300 if (!req->cqe.res) {
301 struct poll_table_struct pt = { ._key = req->apoll_events };
302 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
303 /*
304 * We got woken with a mask, but someone else got to
305 * it first. The above vfs_poll() doesn't add us back
306 * to the waitqueue, so if we get nothing back, we
307 * should be safe and attempt a reissue.
308 */
309 if (unlikely(!req->cqe.res)) {
310 /* Multishot armed need not reissue */
311 if (!(req->apoll_events & EPOLLONESHOT))
312 continue;
313 return IOU_POLL_REISSUE;
314 }
315 }
316 if (req->apoll_events & EPOLLONESHOT)
317 return IOU_POLL_DONE;
318
319 /* multishot, just fill a CQE and proceed */
320 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
321 __poll_t mask = mangle_poll(req->cqe.res &
322 req->apoll_events);
323
324 if (!io_fill_cqe_req_aux(req, ts->locked, mask,
325 IORING_CQE_F_MORE)) {
326 io_req_set_res(req, mask, 0);
327 return IOU_POLL_REMOVE_POLL_USE_RES;
328 }
329 } else {
330 int ret = io_poll_issue(req, ts);
331 if (ret == IOU_STOP_MULTISHOT)
332 return IOU_POLL_REMOVE_POLL_USE_RES;
333 else if (ret == IOU_REQUEUE)
334 return IOU_POLL_REQUEUE;
335 if (ret < 0)
336 return ret;
337 }
338
339 /* force the next iteration to vfs_poll() */
340 req->cqe.res = 0;
341
342 /*
343 * Release all references, retry if someone tried to restart
344 * task_work while we were executing it.
345 */
346 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
347 IO_POLL_REF_MASK);
348
349 return IOU_POLL_NO_ACTION;
350}
351
352void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
353{
354 int ret;
355
356 ret = io_poll_check_events(req, ts);
357 if (ret == IOU_POLL_NO_ACTION) {
358 return;
359 } else if (ret == IOU_POLL_REQUEUE) {
360 __io_poll_execute(req, 0);
361 return;
362 }
363 io_poll_remove_entries(req);
364 io_poll_tw_hash_eject(req, ts);
365
366 if (req->opcode == IORING_OP_POLL_ADD) {
367 if (ret == IOU_POLL_DONE) {
368 struct io_poll *poll;
369
370 poll = io_kiocb_to_cmd(req, struct io_poll);
371 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
372 } else if (ret == IOU_POLL_REISSUE) {
373 io_req_task_submit(req, ts);
374 return;
375 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
376 req->cqe.res = ret;
377 req_set_fail(req);
378 }
379
380 io_req_set_res(req, req->cqe.res, 0);
381 io_req_task_complete(req, ts);
382 } else {
383 io_tw_lock(req->ctx, ts);
384
385 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
386 io_req_task_complete(req, ts);
387 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
388 io_req_task_submit(req, ts);
389 else
390 io_req_defer_failed(req, ret);
391 }
392}
393
394static void io_poll_cancel_req(struct io_kiocb *req)
395{
396 io_poll_mark_cancelled(req);
397 /* kick tw, which should complete the request */
398 io_poll_execute(req, 0);
399}
400
401#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
402
403static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
404{
405 io_poll_mark_cancelled(req);
406 /* we have to kick tw in case it's not already */
407 io_poll_execute(req, 0);
408
409 /*
410 * If the waitqueue is being freed early but someone is already
411 * holds ownership over it, we have to tear down the request as
412 * best we can. That means immediately removing the request from
413 * its waitqueue and preventing all further accesses to the
414 * waitqueue via the request.
415 */
416 list_del_init(&poll->wait.entry);
417
418 /*
419 * Careful: this *must* be the last step, since as soon
420 * as req->head is NULL'ed out, the request can be
421 * completed and freed, since aio_poll_complete_work()
422 * will no longer need to take the waitqueue lock.
423 */
424 smp_store_release(&poll->head, NULL);
425 return 1;
426}
427
428static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
429 void *key)
430{
431 struct io_kiocb *req = wqe_to_req(wait);
432 struct io_poll *poll = container_of(wait, struct io_poll, wait);
433 __poll_t mask = key_to_poll(key);
434
435 if (unlikely(mask & POLLFREE))
436 return io_pollfree_wake(req, poll);
437
438 /* for instances that support it check for an event match first */
439 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
440 return 0;
441
442 if (io_poll_get_ownership(req)) {
443 /*
444 * If we trigger a multishot poll off our own wakeup path,
445 * disable multishot as there is a circular dependency between
446 * CQ posting and triggering the event.
447 */
448 if (mask & EPOLL_URING_WAKE)
449 poll->events |= EPOLLONESHOT;
450
451 /* optional, saves extra locking for removal in tw handler */
452 if (mask && poll->events & EPOLLONESHOT) {
453 list_del_init(&poll->wait.entry);
454 poll->head = NULL;
455 if (wqe_is_double(wait))
456 req->flags &= ~REQ_F_DOUBLE_POLL;
457 else
458 req->flags &= ~REQ_F_SINGLE_POLL;
459 }
460 __io_poll_execute(req, mask);
461 }
462 return 1;
463}
464
465/* fails only when polling is already completing by the first entry */
466static bool io_poll_double_prepare(struct io_kiocb *req)
467{
468 struct wait_queue_head *head;
469 struct io_poll *poll = io_poll_get_single(req);
470
471 /* head is RCU protected, see io_poll_remove_entries() comments */
472 rcu_read_lock();
473 head = smp_load_acquire(&poll->head);
474 /*
475 * poll arm might not hold ownership and so race for req->flags with
476 * io_poll_wake(). There is only one poll entry queued, serialise with
477 * it by taking its head lock. As we're still arming the tw hanlder
478 * is not going to be run, so there are no races with it.
479 */
480 if (head) {
481 spin_lock_irq(&head->lock);
482 req->flags |= REQ_F_DOUBLE_POLL;
483 if (req->opcode == IORING_OP_POLL_ADD)
484 req->flags |= REQ_F_ASYNC_DATA;
485 spin_unlock_irq(&head->lock);
486 }
487 rcu_read_unlock();
488 return !!head;
489}
490
491static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
492 struct wait_queue_head *head,
493 struct io_poll **poll_ptr)
494{
495 struct io_kiocb *req = pt->req;
496 unsigned long wqe_private = (unsigned long) req;
497
498 /*
499 * The file being polled uses multiple waitqueues for poll handling
500 * (e.g. one for read, one for write). Setup a separate io_poll
501 * if this happens.
502 */
503 if (unlikely(pt->nr_entries)) {
504 struct io_poll *first = poll;
505
506 /* double add on the same waitqueue head, ignore */
507 if (first->head == head)
508 return;
509 /* already have a 2nd entry, fail a third attempt */
510 if (*poll_ptr) {
511 if ((*poll_ptr)->head == head)
512 return;
513 pt->error = -EINVAL;
514 return;
515 }
516
517 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
518 if (!poll) {
519 pt->error = -ENOMEM;
520 return;
521 }
522
523 /* mark as double wq entry */
524 wqe_private |= IO_WQE_F_DOUBLE;
525 io_init_poll_iocb(poll, first->events);
526 if (!io_poll_double_prepare(req)) {
527 /* the request is completing, just back off */
528 kfree(poll);
529 return;
530 }
531 *poll_ptr = poll;
532 } else {
533 /* fine to modify, there is no poll queued to race with us */
534 req->flags |= REQ_F_SINGLE_POLL;
535 }
536
537 pt->nr_entries++;
538 poll->head = head;
539 poll->wait.private = (void *) wqe_private;
540
541 if (poll->events & EPOLLEXCLUSIVE) {
542 /*
543 * Exclusive waits may only wake a limited amount of entries
544 * rather than all of them, this may interfere with lazy
545 * wake if someone does wait(events > 1). Ensure we don't do
546 * lazy wake for those, as we need to process each one as they
547 * come in.
548 */
549 req->flags |= REQ_F_POLL_NO_LAZY;
550 add_wait_queue_exclusive(head, &poll->wait);
551 } else {
552 add_wait_queue(head, &poll->wait);
553 }
554}
555
556static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
557 struct poll_table_struct *p)
558{
559 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
560 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
561
562 __io_queue_proc(poll, pt, head,
563 (struct io_poll **) &pt->req->async_data);
564}
565
566static bool io_poll_can_finish_inline(struct io_kiocb *req,
567 struct io_poll_table *pt)
568{
569 return pt->owning || io_poll_get_ownership(req);
570}
571
572static void io_poll_add_hash(struct io_kiocb *req)
573{
574 if (req->flags & REQ_F_HASH_LOCKED)
575 io_poll_req_insert_locked(req);
576 else
577 io_poll_req_insert(req);
578}
579
580/*
581 * Returns 0 when it's handed over for polling. The caller owns the requests if
582 * it returns non-zero, but otherwise should not touch it. Negative values
583 * contain an error code. When the result is >0, the polling has completed
584 * inline and ipt.result_mask is set to the mask.
585 */
586static int __io_arm_poll_handler(struct io_kiocb *req,
587 struct io_poll *poll,
588 struct io_poll_table *ipt, __poll_t mask,
589 unsigned issue_flags)
590{
591 struct io_ring_ctx *ctx = req->ctx;
592
593 INIT_HLIST_NODE(&req->hash_node);
594 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
595 io_init_poll_iocb(poll, mask);
596 poll->file = req->file;
597 req->apoll_events = poll->events;
598
599 ipt->pt._key = mask;
600 ipt->req = req;
601 ipt->error = 0;
602 ipt->nr_entries = 0;
603 /*
604 * Polling is either completed here or via task_work, so if we're in the
605 * task context we're naturally serialised with tw by merit of running
606 * the same task. When it's io-wq, take the ownership to prevent tw
607 * from running. However, when we're in the task context, skip taking
608 * it as an optimisation.
609 *
610 * Note: even though the request won't be completed/freed, without
611 * ownership we still can race with io_poll_wake().
612 * io_poll_can_finish_inline() tries to deal with that.
613 */
614 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
615 atomic_set(&req->poll_refs, (int)ipt->owning);
616
617 /* io-wq doesn't hold uring_lock */
618 if (issue_flags & IO_URING_F_UNLOCKED)
619 req->flags &= ~REQ_F_HASH_LOCKED;
620
621 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
622
623 if (unlikely(ipt->error || !ipt->nr_entries)) {
624 io_poll_remove_entries(req);
625
626 if (!io_poll_can_finish_inline(req, ipt)) {
627 io_poll_mark_cancelled(req);
628 return 0;
629 } else if (mask && (poll->events & EPOLLET)) {
630 ipt->result_mask = mask;
631 return 1;
632 }
633 return ipt->error ?: -EINVAL;
634 }
635
636 if (mask &&
637 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
638 if (!io_poll_can_finish_inline(req, ipt)) {
639 io_poll_add_hash(req);
640 return 0;
641 }
642 io_poll_remove_entries(req);
643 ipt->result_mask = mask;
644 /* no one else has access to the req, forget about the ref */
645 return 1;
646 }
647
648 io_poll_add_hash(req);
649
650 if (mask && (poll->events & EPOLLET) &&
651 io_poll_can_finish_inline(req, ipt)) {
652 __io_poll_execute(req, mask);
653 return 0;
654 }
655
656 if (ipt->owning) {
657 /*
658 * Try to release ownership. If we see a change of state, e.g.
659 * poll was waken up, queue up a tw, it'll deal with it.
660 */
661 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
662 __io_poll_execute(req, 0);
663 }
664 return 0;
665}
666
667static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
668 struct poll_table_struct *p)
669{
670 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
671 struct async_poll *apoll = pt->req->apoll;
672
673 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
674}
675
676/*
677 * We can't reliably detect loops in repeated poll triggers and issue
678 * subsequently failing. But rather than fail these immediately, allow a
679 * certain amount of retries before we give up. Given that this condition
680 * should _rarely_ trigger even once, we should be fine with a larger value.
681 */
682#define APOLL_MAX_RETRY 128
683
684static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
685 unsigned issue_flags)
686{
687 struct io_ring_ctx *ctx = req->ctx;
688 struct io_cache_entry *entry;
689 struct async_poll *apoll;
690
691 if (req->flags & REQ_F_POLLED) {
692 apoll = req->apoll;
693 kfree(apoll->double_poll);
694 } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
695 entry = io_alloc_cache_get(&ctx->apoll_cache);
696 if (entry == NULL)
697 goto alloc_apoll;
698 apoll = container_of(entry, struct async_poll, cache);
699 apoll->poll.retries = APOLL_MAX_RETRY;
700 } else {
701alloc_apoll:
702 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
703 if (unlikely(!apoll))
704 return NULL;
705 apoll->poll.retries = APOLL_MAX_RETRY;
706 }
707 apoll->double_poll = NULL;
708 req->apoll = apoll;
709 if (unlikely(!--apoll->poll.retries))
710 return NULL;
711 return apoll;
712}
713
714int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
715{
716 const struct io_issue_def *def = &io_issue_defs[req->opcode];
717 struct async_poll *apoll;
718 struct io_poll_table ipt;
719 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
720 int ret;
721
722 /*
723 * apoll requests already grab the mutex to complete in the tw handler,
724 * so removal from the mutex-backed hash is free, use it by default.
725 */
726 req->flags |= REQ_F_HASH_LOCKED;
727
728 if (!def->pollin && !def->pollout)
729 return IO_APOLL_ABORTED;
730 if (!file_can_poll(req->file))
731 return IO_APOLL_ABORTED;
732 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
733 mask |= EPOLLONESHOT;
734
735 if (def->pollin) {
736 mask |= EPOLLIN | EPOLLRDNORM;
737
738 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
739 if (req->flags & REQ_F_CLEAR_POLLIN)
740 mask &= ~EPOLLIN;
741 } else {
742 mask |= EPOLLOUT | EPOLLWRNORM;
743 }
744 if (def->poll_exclusive)
745 mask |= EPOLLEXCLUSIVE;
746
747 apoll = io_req_alloc_apoll(req, issue_flags);
748 if (!apoll)
749 return IO_APOLL_ABORTED;
750 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
751 req->flags |= REQ_F_POLLED;
752 ipt.pt._qproc = io_async_queue_proc;
753
754 io_kbuf_recycle(req, issue_flags);
755
756 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
757 if (ret)
758 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
759 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
760 return IO_APOLL_OK;
761}
762
763static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
764 struct io_hash_table *table,
765 bool cancel_all)
766{
767 unsigned nr_buckets = 1U << table->hash_bits;
768 struct hlist_node *tmp;
769 struct io_kiocb *req;
770 bool found = false;
771 int i;
772
773 for (i = 0; i < nr_buckets; i++) {
774 struct io_hash_bucket *hb = &table->hbs[i];
775
776 spin_lock(&hb->lock);
777 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
778 if (io_match_task_safe(req, tsk, cancel_all)) {
779 hlist_del_init(&req->hash_node);
780 io_poll_cancel_req(req);
781 found = true;
782 }
783 }
784 spin_unlock(&hb->lock);
785 }
786 return found;
787}
788
789/*
790 * Returns true if we found and killed one or more poll requests
791 */
792__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
793 bool cancel_all)
794 __must_hold(&ctx->uring_lock)
795{
796 bool ret;
797
798 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
799 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
800 return ret;
801}
802
803static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
804 struct io_cancel_data *cd,
805 struct io_hash_table *table,
806 struct io_hash_bucket **out_bucket)
807{
808 struct io_kiocb *req;
809 u32 index = hash_long(cd->data, table->hash_bits);
810 struct io_hash_bucket *hb = &table->hbs[index];
811
812 *out_bucket = NULL;
813
814 spin_lock(&hb->lock);
815 hlist_for_each_entry(req, &hb->list, hash_node) {
816 if (cd->data != req->cqe.user_data)
817 continue;
818 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
819 continue;
820 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
821 if (cd->seq == req->work.cancel_seq)
822 continue;
823 req->work.cancel_seq = cd->seq;
824 }
825 *out_bucket = hb;
826 return req;
827 }
828 spin_unlock(&hb->lock);
829 return NULL;
830}
831
832static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
833 struct io_cancel_data *cd,
834 struct io_hash_table *table,
835 struct io_hash_bucket **out_bucket)
836{
837 unsigned nr_buckets = 1U << table->hash_bits;
838 struct io_kiocb *req;
839 int i;
840
841 *out_bucket = NULL;
842
843 for (i = 0; i < nr_buckets; i++) {
844 struct io_hash_bucket *hb = &table->hbs[i];
845
846 spin_lock(&hb->lock);
847 hlist_for_each_entry(req, &hb->list, hash_node) {
848 if (io_cancel_req_match(req, cd)) {
849 *out_bucket = hb;
850 return req;
851 }
852 }
853 spin_unlock(&hb->lock);
854 }
855 return NULL;
856}
857
858static int io_poll_disarm(struct io_kiocb *req)
859{
860 if (!req)
861 return -ENOENT;
862 if (!io_poll_get_ownership(req))
863 return -EALREADY;
864 io_poll_remove_entries(req);
865 hash_del(&req->hash_node);
866 return 0;
867}
868
869static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
870 struct io_hash_table *table)
871{
872 struct io_hash_bucket *bucket;
873 struct io_kiocb *req;
874
875 if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
876 IORING_ASYNC_CANCEL_ANY))
877 req = io_poll_file_find(ctx, cd, table, &bucket);
878 else
879 req = io_poll_find(ctx, false, cd, table, &bucket);
880
881 if (req)
882 io_poll_cancel_req(req);
883 if (bucket)
884 spin_unlock(&bucket->lock);
885 return req ? 0 : -ENOENT;
886}
887
888int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
889 unsigned issue_flags)
890{
891 int ret;
892
893 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
894 if (ret != -ENOENT)
895 return ret;
896
897 io_ring_submit_lock(ctx, issue_flags);
898 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
899 io_ring_submit_unlock(ctx, issue_flags);
900 return ret;
901}
902
903static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
904 unsigned int flags)
905{
906 u32 events;
907
908 events = READ_ONCE(sqe->poll32_events);
909#ifdef __BIG_ENDIAN
910 events = swahw32(events);
911#endif
912 if (!(flags & IORING_POLL_ADD_MULTI))
913 events |= EPOLLONESHOT;
914 if (!(flags & IORING_POLL_ADD_LEVEL))
915 events |= EPOLLET;
916 return demangle_poll(events) |
917 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
918}
919
920int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
921{
922 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
923 u32 flags;
924
925 if (sqe->buf_index || sqe->splice_fd_in)
926 return -EINVAL;
927 flags = READ_ONCE(sqe->len);
928 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
929 IORING_POLL_ADD_MULTI))
930 return -EINVAL;
931 /* meaningless without update */
932 if (flags == IORING_POLL_ADD_MULTI)
933 return -EINVAL;
934
935 upd->old_user_data = READ_ONCE(sqe->addr);
936 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
937 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
938
939 upd->new_user_data = READ_ONCE(sqe->off);
940 if (!upd->update_user_data && upd->new_user_data)
941 return -EINVAL;
942 if (upd->update_events)
943 upd->events = io_poll_parse_events(sqe, flags);
944 else if (sqe->poll32_events)
945 return -EINVAL;
946
947 return 0;
948}
949
950int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
951{
952 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
953 u32 flags;
954
955 if (sqe->buf_index || sqe->off || sqe->addr)
956 return -EINVAL;
957 flags = READ_ONCE(sqe->len);
958 if (flags & ~IORING_POLL_ADD_MULTI)
959 return -EINVAL;
960 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
961 return -EINVAL;
962
963 poll->events = io_poll_parse_events(sqe, flags);
964 return 0;
965}
966
967int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
968{
969 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
970 struct io_poll_table ipt;
971 int ret;
972
973 ipt.pt._qproc = io_poll_queue_proc;
974
975 /*
976 * If sqpoll or single issuer, there is no contention for ->uring_lock
977 * and we'll end up holding it in tw handlers anyway.
978 */
979 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
980 req->flags |= REQ_F_HASH_LOCKED;
981
982 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
983 if (ret > 0) {
984 io_req_set_res(req, ipt.result_mask, 0);
985 return IOU_OK;
986 }
987 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
988}
989
990int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
991{
992 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
993 struct io_ring_ctx *ctx = req->ctx;
994 struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
995 struct io_hash_bucket *bucket;
996 struct io_kiocb *preq;
997 int ret2, ret = 0;
998 struct io_tw_state ts = { .locked = true };
999
1000 io_ring_submit_lock(ctx, issue_flags);
1001 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
1002 ret2 = io_poll_disarm(preq);
1003 if (bucket)
1004 spin_unlock(&bucket->lock);
1005 if (!ret2)
1006 goto found;
1007 if (ret2 != -ENOENT) {
1008 ret = ret2;
1009 goto out;
1010 }
1011
1012 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
1013 ret2 = io_poll_disarm(preq);
1014 if (bucket)
1015 spin_unlock(&bucket->lock);
1016 if (ret2) {
1017 ret = ret2;
1018 goto out;
1019 }
1020
1021found:
1022 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1023 ret = -EFAULT;
1024 goto out;
1025 }
1026
1027 if (poll_update->update_events || poll_update->update_user_data) {
1028 /* only mask one event flags, keep behavior flags */
1029 if (poll_update->update_events) {
1030 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1031
1032 poll->events &= ~0xffff;
1033 poll->events |= poll_update->events & 0xffff;
1034 poll->events |= IO_POLL_UNMASK;
1035 }
1036 if (poll_update->update_user_data)
1037 preq->cqe.user_data = poll_update->new_user_data;
1038
1039 ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1040 /* successfully updated, don't complete poll request */
1041 if (!ret2 || ret2 == -EIOCBQUEUED)
1042 goto out;
1043 }
1044
1045 req_set_fail(preq);
1046 io_req_set_res(preq, -ECANCELED, 0);
1047 io_req_task_complete(preq, &ts);
1048out:
1049 io_ring_submit_unlock(ctx, issue_flags);
1050 if (ret < 0) {
1051 req_set_fail(req);
1052 return ret;
1053 }
1054 /* complete update request, we're done with it */
1055 io_req_set_res(req, ret, 0);
1056 return IOU_OK;
1057}
1058
1059void io_apoll_cache_free(struct io_cache_entry *entry)
1060{
1061 kfree(container_of(entry, struct async_poll, cache));
1062}