Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/poll.h>
9#include <linux/hashtable.h>
10#include <linux/io_uring.h>
11
12#include <trace/events/io_uring.h>
13
14#include <uapi/linux/io_uring.h>
15
16#include "io_uring.h"
17#include "alloc_cache.h"
18#include "refs.h"
19#include "napi.h"
20#include "opdef.h"
21#include "kbuf.h"
22#include "poll.h"
23#include "cancel.h"
24
25struct io_poll_update {
26 struct file *file;
27 u64 old_user_data;
28 u64 new_user_data;
29 __poll_t events;
30 bool update_events;
31 bool update_user_data;
32};
33
34struct io_poll_table {
35 struct poll_table_struct pt;
36 struct io_kiocb *req;
37 int nr_entries;
38 int error;
39 bool owning;
40 /* output value, set only if arm poll returns >0 */
41 __poll_t result_mask;
42};
43
44#define IO_POLL_CANCEL_FLAG BIT(31)
45#define IO_POLL_RETRY_FLAG BIT(30)
46#define IO_POLL_REF_MASK GENMASK(29, 0)
47
48/*
49 * We usually have 1-2 refs taken, 128 is more than enough and we want to
50 * maximise the margin between this amount and the moment when it overflows.
51 */
52#define IO_POLL_REF_BIAS 128
53
54#define IO_WQE_F_DOUBLE 1
55
56static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
57 void *key);
58
59static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
60{
61 unsigned long priv = (unsigned long)wqe->private;
62
63 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
64}
65
66static inline bool wqe_is_double(struct wait_queue_entry *wqe)
67{
68 unsigned long priv = (unsigned long)wqe->private;
69
70 return priv & IO_WQE_F_DOUBLE;
71}
72
73static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
74{
75 int v;
76
77 /*
78 * poll_refs are already elevated and we don't have much hope for
79 * grabbing the ownership. Instead of incrementing set a retry flag
80 * to notify the loop that there might have been some change.
81 */
82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 if (v & IO_POLL_REF_MASK)
84 return false;
85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
86}
87
88/*
89 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
90 * bump it and acquire ownership. It's disallowed to modify requests while not
91 * owning it, that prevents from races for enqueueing task_work's and b/w
92 * arming poll and wakeups.
93 */
94static inline bool io_poll_get_ownership(struct io_kiocb *req)
95{
96 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 return io_poll_get_ownership_slowpath(req);
98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99}
100
101static void io_poll_mark_cancelled(struct io_kiocb *req)
102{
103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104}
105
106static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107{
108 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109 if (req->opcode == IORING_OP_POLL_ADD)
110 return req->async_data;
111 return req->apoll->double_poll;
112}
113
114static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115{
116 if (req->opcode == IORING_OP_POLL_ADD)
117 return io_kiocb_to_cmd(req, struct io_poll);
118 return &req->apoll->poll;
119}
120
121static void io_poll_req_insert(struct io_kiocb *req)
122{
123 struct io_hash_table *table = &req->ctx->cancel_table;
124 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125
126 lockdep_assert_held(&req->ctx->uring_lock);
127
128 hlist_add_head(&req->hash_node, &table->hbs[index].list);
129}
130
131static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
132{
133 poll->head = NULL;
134#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
135 /* mask in events that we always want/need */
136 poll->events = events | IO_POLL_UNMASK;
137 INIT_LIST_HEAD(&poll->wait.entry);
138 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
139}
140
141static inline void io_poll_remove_entry(struct io_poll *poll)
142{
143 struct wait_queue_head *head = smp_load_acquire(&poll->head);
144
145 if (head) {
146 spin_lock_irq(&head->lock);
147 list_del_init(&poll->wait.entry);
148 poll->head = NULL;
149 spin_unlock_irq(&head->lock);
150 }
151}
152
153static void io_poll_remove_entries(struct io_kiocb *req)
154{
155 /*
156 * Nothing to do if neither of those flags are set. Avoid dipping
157 * into the poll/apoll/double cachelines if we can.
158 */
159 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
160 return;
161
162 /*
163 * While we hold the waitqueue lock and the waitqueue is nonempty,
164 * wake_up_pollfree() will wait for us. However, taking the waitqueue
165 * lock in the first place can race with the waitqueue being freed.
166 *
167 * We solve this as eventpoll does: by taking advantage of the fact that
168 * all users of wake_up_pollfree() will RCU-delay the actual free. If
169 * we enter rcu_read_lock() and see that the pointer to the queue is
170 * non-NULL, we can then lock it without the memory being freed out from
171 * under us.
172 *
173 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
174 * case the caller deletes the entry from the queue, leaving it empty.
175 * In that case, only RCU prevents the queue memory from being freed.
176 */
177 rcu_read_lock();
178 if (req->flags & REQ_F_SINGLE_POLL)
179 io_poll_remove_entry(io_poll_get_single(req));
180 if (req->flags & REQ_F_DOUBLE_POLL)
181 io_poll_remove_entry(io_poll_get_double(req));
182 rcu_read_unlock();
183}
184
185enum {
186 IOU_POLL_DONE = 0,
187 IOU_POLL_NO_ACTION = 1,
188 IOU_POLL_REMOVE_POLL_USE_RES = 2,
189 IOU_POLL_REISSUE = 3,
190 IOU_POLL_REQUEUE = 4,
191};
192
193static void __io_poll_execute(struct io_kiocb *req, int mask)
194{
195 unsigned flags = 0;
196
197 io_req_set_res(req, mask, 0);
198 req->io_task_work.func = io_poll_task_func;
199
200 trace_io_uring_task_add(req, mask);
201
202 if (!(req->flags & REQ_F_POLL_NO_LAZY))
203 flags = IOU_F_TWQ_LAZY_WAKE;
204 __io_req_task_work_add(req, flags);
205}
206
207static inline void io_poll_execute(struct io_kiocb *req, int res)
208{
209 if (io_poll_get_ownership(req))
210 __io_poll_execute(req, res);
211}
212
213/*
214 * All poll tw should go through this. Checks for poll events, manages
215 * references, does rewait, etc.
216 *
217 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
218 * require, which is either spurious wakeup or multishot CQE is served.
219 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
220 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
221 * poll and that the result is stored in req->cqe.
222 */
223static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
224{
225 int v;
226
227 if (unlikely(io_should_terminate_tw()))
228 return -ECANCELED;
229
230 do {
231 v = atomic_read(&req->poll_refs);
232
233 if (unlikely(v != 1)) {
234 /* tw should be the owner and so have some refs */
235 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
236 return IOU_POLL_NO_ACTION;
237 if (v & IO_POLL_CANCEL_FLAG)
238 return -ECANCELED;
239 /*
240 * cqe.res contains only events of the first wake up
241 * and all others are to be lost. Redo vfs_poll() to get
242 * up to date state.
243 */
244 if ((v & IO_POLL_REF_MASK) != 1)
245 req->cqe.res = 0;
246
247 if (v & IO_POLL_RETRY_FLAG) {
248 req->cqe.res = 0;
249 /*
250 * We won't find new events that came in between
251 * vfs_poll and the ref put unless we clear the
252 * flag in advance.
253 */
254 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
255 v &= ~IO_POLL_RETRY_FLAG;
256 }
257 }
258
259 /* the mask was stashed in __io_poll_execute */
260 if (!req->cqe.res) {
261 struct poll_table_struct pt = { ._key = req->apoll_events };
262 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
263 /*
264 * We got woken with a mask, but someone else got to
265 * it first. The above vfs_poll() doesn't add us back
266 * to the waitqueue, so if we get nothing back, we
267 * should be safe and attempt a reissue.
268 */
269 if (unlikely(!req->cqe.res)) {
270 /* Multishot armed need not reissue */
271 if (!(req->apoll_events & EPOLLONESHOT))
272 continue;
273 return IOU_POLL_REISSUE;
274 }
275 }
276 if (unlikely(req->cqe.res & EPOLLERR))
277 req_set_fail(req);
278 if (req->apoll_events & EPOLLONESHOT)
279 return IOU_POLL_DONE;
280
281 /* multishot, just fill a CQE and proceed */
282 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
283 __poll_t mask = mangle_poll(req->cqe.res &
284 req->apoll_events);
285
286 if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
287 io_req_set_res(req, mask, 0);
288 return IOU_POLL_REMOVE_POLL_USE_RES;
289 }
290 } else {
291 int ret = io_poll_issue(req, ts);
292 if (ret == IOU_STOP_MULTISHOT)
293 return IOU_POLL_REMOVE_POLL_USE_RES;
294 else if (ret == IOU_REQUEUE)
295 return IOU_POLL_REQUEUE;
296 if (ret < 0)
297 return ret;
298 }
299
300 /* force the next iteration to vfs_poll() */
301 req->cqe.res = 0;
302
303 /*
304 * Release all references, retry if someone tried to restart
305 * task_work while we were executing it.
306 */
307 v &= IO_POLL_REF_MASK;
308 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
309
310 io_napi_add(req);
311 return IOU_POLL_NO_ACTION;
312}
313
314void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
315{
316 int ret;
317
318 ret = io_poll_check_events(req, ts);
319 if (ret == IOU_POLL_NO_ACTION) {
320 io_kbuf_recycle(req, 0);
321 return;
322 } else if (ret == IOU_POLL_REQUEUE) {
323 io_kbuf_recycle(req, 0);
324 __io_poll_execute(req, 0);
325 return;
326 }
327 io_poll_remove_entries(req);
328 /* task_work always has ->uring_lock held */
329 hash_del(&req->hash_node);
330
331 if (req->opcode == IORING_OP_POLL_ADD) {
332 if (ret == IOU_POLL_DONE) {
333 struct io_poll *poll;
334
335 poll = io_kiocb_to_cmd(req, struct io_poll);
336 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
337 } else if (ret == IOU_POLL_REISSUE) {
338 io_req_task_submit(req, ts);
339 return;
340 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
341 req->cqe.res = ret;
342 req_set_fail(req);
343 }
344
345 io_req_set_res(req, req->cqe.res, 0);
346 io_req_task_complete(req, ts);
347 } else {
348 io_tw_lock(req->ctx, ts);
349
350 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
351 io_req_task_complete(req, ts);
352 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
353 io_req_task_submit(req, ts);
354 else
355 io_req_defer_failed(req, ret);
356 }
357}
358
359static void io_poll_cancel_req(struct io_kiocb *req)
360{
361 io_poll_mark_cancelled(req);
362 /* kick tw, which should complete the request */
363 io_poll_execute(req, 0);
364}
365
366#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
367
368static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
369{
370 io_poll_mark_cancelled(req);
371 /* we have to kick tw in case it's not already */
372 io_poll_execute(req, 0);
373
374 /*
375 * If the waitqueue is being freed early but someone is already
376 * holds ownership over it, we have to tear down the request as
377 * best we can. That means immediately removing the request from
378 * its waitqueue and preventing all further accesses to the
379 * waitqueue via the request.
380 */
381 list_del_init(&poll->wait.entry);
382
383 /*
384 * Careful: this *must* be the last step, since as soon
385 * as req->head is NULL'ed out, the request can be
386 * completed and freed, since aio_poll_complete_work()
387 * will no longer need to take the waitqueue lock.
388 */
389 smp_store_release(&poll->head, NULL);
390 return 1;
391}
392
393static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
394 void *key)
395{
396 struct io_kiocb *req = wqe_to_req(wait);
397 struct io_poll *poll = container_of(wait, struct io_poll, wait);
398 __poll_t mask = key_to_poll(key);
399
400 if (unlikely(mask & POLLFREE))
401 return io_pollfree_wake(req, poll);
402
403 /* for instances that support it check for an event match first */
404 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
405 return 0;
406
407 if (io_poll_get_ownership(req)) {
408 /*
409 * If we trigger a multishot poll off our own wakeup path,
410 * disable multishot as there is a circular dependency between
411 * CQ posting and triggering the event.
412 */
413 if (mask & EPOLL_URING_WAKE)
414 poll->events |= EPOLLONESHOT;
415
416 /* optional, saves extra locking for removal in tw handler */
417 if (mask && poll->events & EPOLLONESHOT) {
418 list_del_init(&poll->wait.entry);
419 poll->head = NULL;
420 if (wqe_is_double(wait))
421 req->flags &= ~REQ_F_DOUBLE_POLL;
422 else
423 req->flags &= ~REQ_F_SINGLE_POLL;
424 }
425 __io_poll_execute(req, mask);
426 }
427 return 1;
428}
429
430/* fails only when polling is already completing by the first entry */
431static bool io_poll_double_prepare(struct io_kiocb *req)
432{
433 struct wait_queue_head *head;
434 struct io_poll *poll = io_poll_get_single(req);
435
436 /* head is RCU protected, see io_poll_remove_entries() comments */
437 rcu_read_lock();
438 head = smp_load_acquire(&poll->head);
439 /*
440 * poll arm might not hold ownership and so race for req->flags with
441 * io_poll_wake(). There is only one poll entry queued, serialise with
442 * it by taking its head lock. As we're still arming the tw hanlder
443 * is not going to be run, so there are no races with it.
444 */
445 if (head) {
446 spin_lock_irq(&head->lock);
447 req->flags |= REQ_F_DOUBLE_POLL;
448 if (req->opcode == IORING_OP_POLL_ADD)
449 req->flags |= REQ_F_ASYNC_DATA;
450 spin_unlock_irq(&head->lock);
451 }
452 rcu_read_unlock();
453 return !!head;
454}
455
456static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
457 struct wait_queue_head *head,
458 struct io_poll **poll_ptr)
459{
460 struct io_kiocb *req = pt->req;
461 unsigned long wqe_private = (unsigned long) req;
462
463 /*
464 * The file being polled uses multiple waitqueues for poll handling
465 * (e.g. one for read, one for write). Setup a separate io_poll
466 * if this happens.
467 */
468 if (unlikely(pt->nr_entries)) {
469 struct io_poll *first = poll;
470
471 /* double add on the same waitqueue head, ignore */
472 if (first->head == head)
473 return;
474 /* already have a 2nd entry, fail a third attempt */
475 if (*poll_ptr) {
476 if ((*poll_ptr)->head == head)
477 return;
478 pt->error = -EINVAL;
479 return;
480 }
481
482 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
483 if (!poll) {
484 pt->error = -ENOMEM;
485 return;
486 }
487
488 /* mark as double wq entry */
489 wqe_private |= IO_WQE_F_DOUBLE;
490 io_init_poll_iocb(poll, first->events);
491 if (!io_poll_double_prepare(req)) {
492 /* the request is completing, just back off */
493 kfree(poll);
494 return;
495 }
496 *poll_ptr = poll;
497 } else {
498 /* fine to modify, there is no poll queued to race with us */
499 req->flags |= REQ_F_SINGLE_POLL;
500 }
501
502 pt->nr_entries++;
503 poll->head = head;
504 poll->wait.private = (void *) wqe_private;
505
506 if (poll->events & EPOLLEXCLUSIVE) {
507 add_wait_queue_exclusive(head, &poll->wait);
508 } else {
509 add_wait_queue(head, &poll->wait);
510 }
511}
512
513static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
514 struct poll_table_struct *p)
515{
516 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
517 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
518
519 __io_queue_proc(poll, pt, head,
520 (struct io_poll **) &pt->req->async_data);
521}
522
523static bool io_poll_can_finish_inline(struct io_kiocb *req,
524 struct io_poll_table *pt)
525{
526 return pt->owning || io_poll_get_ownership(req);
527}
528
529static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
530{
531 struct io_ring_ctx *ctx = req->ctx;
532
533 io_ring_submit_lock(ctx, issue_flags);
534 io_poll_req_insert(req);
535 io_ring_submit_unlock(ctx, issue_flags);
536}
537
538/*
539 * Returns 0 when it's handed over for polling. The caller owns the requests if
540 * it returns non-zero, but otherwise should not touch it. Negative values
541 * contain an error code. When the result is >0, the polling has completed
542 * inline and ipt.result_mask is set to the mask.
543 */
544static int __io_arm_poll_handler(struct io_kiocb *req,
545 struct io_poll *poll,
546 struct io_poll_table *ipt, __poll_t mask,
547 unsigned issue_flags)
548{
549 INIT_HLIST_NODE(&req->hash_node);
550 io_init_poll_iocb(poll, mask);
551 poll->file = req->file;
552 req->apoll_events = poll->events;
553
554 ipt->pt._key = mask;
555 ipt->req = req;
556 ipt->error = 0;
557 ipt->nr_entries = 0;
558 /*
559 * Polling is either completed here or via task_work, so if we're in the
560 * task context we're naturally serialised with tw by merit of running
561 * the same task. When it's io-wq, take the ownership to prevent tw
562 * from running. However, when we're in the task context, skip taking
563 * it as an optimisation.
564 *
565 * Note: even though the request won't be completed/freed, without
566 * ownership we still can race with io_poll_wake().
567 * io_poll_can_finish_inline() tries to deal with that.
568 */
569 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
570 atomic_set(&req->poll_refs, (int)ipt->owning);
571
572 /*
573 * Exclusive waits may only wake a limited amount of entries
574 * rather than all of them, this may interfere with lazy
575 * wake if someone does wait(events > 1). Ensure we don't do
576 * lazy wake for those, as we need to process each one as they
577 * come in.
578 */
579 if (poll->events & EPOLLEXCLUSIVE)
580 req->flags |= REQ_F_POLL_NO_LAZY;
581
582 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
583
584 if (unlikely(ipt->error || !ipt->nr_entries)) {
585 io_poll_remove_entries(req);
586
587 if (!io_poll_can_finish_inline(req, ipt)) {
588 io_poll_mark_cancelled(req);
589 return 0;
590 } else if (mask && (poll->events & EPOLLET)) {
591 ipt->result_mask = mask;
592 return 1;
593 }
594 return ipt->error ?: -EINVAL;
595 }
596
597 if (mask &&
598 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
599 if (!io_poll_can_finish_inline(req, ipt)) {
600 io_poll_add_hash(req, issue_flags);
601 return 0;
602 }
603 io_poll_remove_entries(req);
604 ipt->result_mask = mask;
605 /* no one else has access to the req, forget about the ref */
606 return 1;
607 }
608
609 io_poll_add_hash(req, issue_flags);
610
611 if (mask && (poll->events & EPOLLET) &&
612 io_poll_can_finish_inline(req, ipt)) {
613 __io_poll_execute(req, mask);
614 return 0;
615 }
616 io_napi_add(req);
617
618 if (ipt->owning) {
619 /*
620 * Try to release ownership. If we see a change of state, e.g.
621 * poll was waken up, queue up a tw, it'll deal with it.
622 */
623 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
624 __io_poll_execute(req, 0);
625 }
626 return 0;
627}
628
629static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
630 struct poll_table_struct *p)
631{
632 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
633 struct async_poll *apoll = pt->req->apoll;
634
635 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
636}
637
638/*
639 * We can't reliably detect loops in repeated poll triggers and issue
640 * subsequently failing. But rather than fail these immediately, allow a
641 * certain amount of retries before we give up. Given that this condition
642 * should _rarely_ trigger even once, we should be fine with a larger value.
643 */
644#define APOLL_MAX_RETRY 128
645
646static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
647 unsigned issue_flags)
648{
649 struct io_ring_ctx *ctx = req->ctx;
650 struct async_poll *apoll;
651
652 if (req->flags & REQ_F_POLLED) {
653 apoll = req->apoll;
654 kfree(apoll->double_poll);
655 } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
656 apoll = io_alloc_cache_get(&ctx->apoll_cache);
657 if (!apoll)
658 goto alloc_apoll;
659 apoll->poll.retries = APOLL_MAX_RETRY;
660 } else {
661alloc_apoll:
662 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
663 if (unlikely(!apoll))
664 return NULL;
665 apoll->poll.retries = APOLL_MAX_RETRY;
666 }
667 apoll->double_poll = NULL;
668 req->apoll = apoll;
669 if (unlikely(!--apoll->poll.retries))
670 return NULL;
671 return apoll;
672}
673
674int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
675{
676 const struct io_issue_def *def = &io_issue_defs[req->opcode];
677 struct async_poll *apoll;
678 struct io_poll_table ipt;
679 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
680 int ret;
681
682 if (!def->pollin && !def->pollout)
683 return IO_APOLL_ABORTED;
684 if (!io_file_can_poll(req))
685 return IO_APOLL_ABORTED;
686 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
687 mask |= EPOLLONESHOT;
688
689 if (def->pollin) {
690 mask |= EPOLLIN | EPOLLRDNORM;
691
692 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
693 if (req->flags & REQ_F_CLEAR_POLLIN)
694 mask &= ~EPOLLIN;
695 } else {
696 mask |= EPOLLOUT | EPOLLWRNORM;
697 }
698 if (def->poll_exclusive)
699 mask |= EPOLLEXCLUSIVE;
700
701 apoll = io_req_alloc_apoll(req, issue_flags);
702 if (!apoll)
703 return IO_APOLL_ABORTED;
704 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
705 req->flags |= REQ_F_POLLED;
706 ipt.pt._qproc = io_async_queue_proc;
707
708 io_kbuf_recycle(req, issue_flags);
709
710 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
711 if (ret)
712 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
713 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
714 return IO_APOLL_OK;
715}
716
717/*
718 * Returns true if we found and killed one or more poll requests
719 */
720__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
721 bool cancel_all)
722{
723 unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
724 struct hlist_node *tmp;
725 struct io_kiocb *req;
726 bool found = false;
727 int i;
728
729 lockdep_assert_held(&ctx->uring_lock);
730
731 for (i = 0; i < nr_buckets; i++) {
732 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
733
734 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
735 if (io_match_task_safe(req, tctx, cancel_all)) {
736 hlist_del_init(&req->hash_node);
737 io_poll_cancel_req(req);
738 found = true;
739 }
740 }
741 }
742 return found;
743}
744
745static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
746 struct io_cancel_data *cd)
747{
748 struct io_kiocb *req;
749 u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
750 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
751
752 hlist_for_each_entry(req, &hb->list, hash_node) {
753 if (cd->data != req->cqe.user_data)
754 continue;
755 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
756 continue;
757 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
758 if (io_cancel_match_sequence(req, cd->seq))
759 continue;
760 }
761 return req;
762 }
763 return NULL;
764}
765
766static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
767 struct io_cancel_data *cd)
768{
769 unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
770 struct io_kiocb *req;
771 int i;
772
773 for (i = 0; i < nr_buckets; i++) {
774 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
775
776 hlist_for_each_entry(req, &hb->list, hash_node) {
777 if (io_cancel_req_match(req, cd))
778 return req;
779 }
780 }
781 return NULL;
782}
783
784static int io_poll_disarm(struct io_kiocb *req)
785{
786 if (!req)
787 return -ENOENT;
788 if (!io_poll_get_ownership(req))
789 return -EALREADY;
790 io_poll_remove_entries(req);
791 hash_del(&req->hash_node);
792 return 0;
793}
794
795static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
796{
797 struct io_kiocb *req;
798
799 if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
800 IORING_ASYNC_CANCEL_ANY))
801 req = io_poll_file_find(ctx, cd);
802 else
803 req = io_poll_find(ctx, false, cd);
804
805 if (req) {
806 io_poll_cancel_req(req);
807 return 0;
808 }
809 return -ENOENT;
810}
811
812int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
813 unsigned issue_flags)
814{
815 int ret;
816
817 io_ring_submit_lock(ctx, issue_flags);
818 ret = __io_poll_cancel(ctx, cd);
819 io_ring_submit_unlock(ctx, issue_flags);
820 return ret;
821}
822
823static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
824 unsigned int flags)
825{
826 u32 events;
827
828 events = READ_ONCE(sqe->poll32_events);
829#ifdef __BIG_ENDIAN
830 events = swahw32(events);
831#endif
832 if (!(flags & IORING_POLL_ADD_MULTI))
833 events |= EPOLLONESHOT;
834 if (!(flags & IORING_POLL_ADD_LEVEL))
835 events |= EPOLLET;
836 return demangle_poll(events) |
837 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
838}
839
840int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
841{
842 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
843 u32 flags;
844
845 if (sqe->buf_index || sqe->splice_fd_in)
846 return -EINVAL;
847 flags = READ_ONCE(sqe->len);
848 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
849 IORING_POLL_ADD_MULTI))
850 return -EINVAL;
851 /* meaningless without update */
852 if (flags == IORING_POLL_ADD_MULTI)
853 return -EINVAL;
854
855 upd->old_user_data = READ_ONCE(sqe->addr);
856 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
857 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
858
859 upd->new_user_data = READ_ONCE(sqe->off);
860 if (!upd->update_user_data && upd->new_user_data)
861 return -EINVAL;
862 if (upd->update_events)
863 upd->events = io_poll_parse_events(sqe, flags);
864 else if (sqe->poll32_events)
865 return -EINVAL;
866
867 return 0;
868}
869
870int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
871{
872 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
873 u32 flags;
874
875 if (sqe->buf_index || sqe->off || sqe->addr)
876 return -EINVAL;
877 flags = READ_ONCE(sqe->len);
878 if (flags & ~IORING_POLL_ADD_MULTI)
879 return -EINVAL;
880 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
881 return -EINVAL;
882
883 poll->events = io_poll_parse_events(sqe, flags);
884 return 0;
885}
886
887int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
888{
889 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
890 struct io_poll_table ipt;
891 int ret;
892
893 ipt.pt._qproc = io_poll_queue_proc;
894
895 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
896 if (ret > 0) {
897 io_req_set_res(req, ipt.result_mask, 0);
898 return IOU_OK;
899 }
900 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
901}
902
903int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
904{
905 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
906 struct io_ring_ctx *ctx = req->ctx;
907 struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
908 struct io_kiocb *preq;
909 int ret2, ret = 0;
910
911 io_ring_submit_lock(ctx, issue_flags);
912 preq = io_poll_find(ctx, true, &cd);
913 ret2 = io_poll_disarm(preq);
914 if (ret2) {
915 ret = ret2;
916 goto out;
917 }
918 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
919 ret = -EFAULT;
920 goto out;
921 }
922
923 if (poll_update->update_events || poll_update->update_user_data) {
924 /* only mask one event flags, keep behavior flags */
925 if (poll_update->update_events) {
926 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
927
928 poll->events &= ~0xffff;
929 poll->events |= poll_update->events & 0xffff;
930 poll->events |= IO_POLL_UNMASK;
931 }
932 if (poll_update->update_user_data)
933 preq->cqe.user_data = poll_update->new_user_data;
934
935 ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
936 /* successfully updated, don't complete poll request */
937 if (!ret2 || ret2 == -EIOCBQUEUED)
938 goto out;
939 }
940
941 req_set_fail(preq);
942 io_req_set_res(preq, -ECANCELED, 0);
943 preq->io_task_work.func = io_req_task_complete;
944 io_req_task_work_add(preq);
945out:
946 io_ring_submit_unlock(ctx, issue_flags);
947 if (ret < 0) {
948 req_set_fail(req);
949 return ret;
950 }
951 /* complete update request, we're done with it */
952 io_req_set_res(req, ret, 0);
953 return IOU_OK;
954}