Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/fs.h>
   5#include <linux/file.h>
   6#include <linux/mm.h>
   7#include <linux/slab.h>
   8#include <linux/poll.h>
   9#include <linux/hashtable.h>
  10#include <linux/io_uring.h>
  11
  12#include <trace/events/io_uring.h>
  13
  14#include <uapi/linux/io_uring.h>
  15
  16#include "io_uring.h"
 
  17#include "refs.h"
 
  18#include "opdef.h"
  19#include "kbuf.h"
  20#include "poll.h"
  21#include "cancel.h"
  22
  23struct io_poll_update {
  24	struct file			*file;
  25	u64				old_user_data;
  26	u64				new_user_data;
  27	__poll_t			events;
  28	bool				update_events;
  29	bool				update_user_data;
  30};
  31
  32struct io_poll_table {
  33	struct poll_table_struct pt;
  34	struct io_kiocb *req;
  35	int nr_entries;
  36	int error;
  37	bool owning;
  38	/* output value, set only if arm poll returns >0 */
  39	__poll_t result_mask;
  40};
  41
  42#define IO_POLL_CANCEL_FLAG	BIT(31)
  43#define IO_POLL_RETRY_FLAG	BIT(30)
  44#define IO_POLL_REF_MASK	GENMASK(29, 0)
  45
  46/*
  47 * We usually have 1-2 refs taken, 128 is more than enough and we want to
  48 * maximise the margin between this amount and the moment when it overflows.
  49 */
  50#define IO_POLL_REF_BIAS	128
  51
  52#define IO_WQE_F_DOUBLE		1
  53
  54static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
  55			void *key);
  56
  57static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
  58{
  59	unsigned long priv = (unsigned long)wqe->private;
  60
  61	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
  62}
  63
  64static inline bool wqe_is_double(struct wait_queue_entry *wqe)
  65{
  66	unsigned long priv = (unsigned long)wqe->private;
  67
  68	return priv & IO_WQE_F_DOUBLE;
  69}
  70
  71static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
  72{
  73	int v;
  74
  75	/*
  76	 * poll_refs are already elevated and we don't have much hope for
  77	 * grabbing the ownership. Instead of incrementing set a retry flag
  78	 * to notify the loop that there might have been some change.
  79	 */
  80	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
  81	if (v & IO_POLL_REF_MASK)
  82		return false;
  83	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
  84}
  85
  86/*
  87 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
  88 * bump it and acquire ownership. It's disallowed to modify requests while not
  89 * owning it, that prevents from races for enqueueing task_work's and b/w
  90 * arming poll and wakeups.
  91 */
  92static inline bool io_poll_get_ownership(struct io_kiocb *req)
  93{
  94	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
  95		return io_poll_get_ownership_slowpath(req);
  96	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
  97}
  98
  99static void io_poll_mark_cancelled(struct io_kiocb *req)
 100{
 101	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
 102}
 103
 104static struct io_poll *io_poll_get_double(struct io_kiocb *req)
 105{
 106	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
 107	if (req->opcode == IORING_OP_POLL_ADD)
 108		return req->async_data;
 109	return req->apoll->double_poll;
 110}
 111
 112static struct io_poll *io_poll_get_single(struct io_kiocb *req)
 113{
 114	if (req->opcode == IORING_OP_POLL_ADD)
 115		return io_kiocb_to_cmd(req, struct io_poll);
 116	return &req->apoll->poll;
 117}
 118
 119static void io_poll_req_insert(struct io_kiocb *req)
 120{
 121	struct io_hash_table *table = &req->ctx->cancel_table;
 122	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
 123	struct io_hash_bucket *hb = &table->hbs[index];
 124
 125	spin_lock(&hb->lock);
 126	hlist_add_head(&req->hash_node, &hb->list);
 127	spin_unlock(&hb->lock);
 128}
 129
 130static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
 131{
 132	struct io_hash_table *table = &req->ctx->cancel_table;
 133	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
 134	spinlock_t *lock = &table->hbs[index].lock;
 135
 136	spin_lock(lock);
 137	hash_del(&req->hash_node);
 138	spin_unlock(lock);
 139}
 140
 141static void io_poll_req_insert_locked(struct io_kiocb *req)
 142{
 143	struct io_hash_table *table = &req->ctx->cancel_table_locked;
 144	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
 145
 146	lockdep_assert_held(&req->ctx->uring_lock);
 147
 148	hlist_add_head(&req->hash_node, &table->hbs[index].list);
 149}
 150
 151static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
 152{
 153	struct io_ring_ctx *ctx = req->ctx;
 154
 155	if (req->flags & REQ_F_HASH_LOCKED) {
 156		/*
 157		 * ->cancel_table_locked is protected by ->uring_lock in
 158		 * contrast to per bucket spinlocks. Likely, tctx_task_work()
 159		 * already grabbed the mutex for us, but there is a chance it
 160		 * failed.
 161		 */
 162		io_tw_lock(ctx, ts);
 163		hash_del(&req->hash_node);
 164		req->flags &= ~REQ_F_HASH_LOCKED;
 165	} else {
 166		io_poll_req_delete(req, ctx);
 167	}
 168}
 169
 170static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
 171{
 172	poll->head = NULL;
 173#define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
 174	/* mask in events that we always want/need */
 175	poll->events = events | IO_POLL_UNMASK;
 176	INIT_LIST_HEAD(&poll->wait.entry);
 177	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
 178}
 179
 180static inline void io_poll_remove_entry(struct io_poll *poll)
 181{
 182	struct wait_queue_head *head = smp_load_acquire(&poll->head);
 183
 184	if (head) {
 185		spin_lock_irq(&head->lock);
 186		list_del_init(&poll->wait.entry);
 187		poll->head = NULL;
 188		spin_unlock_irq(&head->lock);
 189	}
 190}
 191
 192static void io_poll_remove_entries(struct io_kiocb *req)
 193{
 194	/*
 195	 * Nothing to do if neither of those flags are set. Avoid dipping
 196	 * into the poll/apoll/double cachelines if we can.
 197	 */
 198	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
 199		return;
 200
 201	/*
 202	 * While we hold the waitqueue lock and the waitqueue is nonempty,
 203	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
 204	 * lock in the first place can race with the waitqueue being freed.
 205	 *
 206	 * We solve this as eventpoll does: by taking advantage of the fact that
 207	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
 208	 * we enter rcu_read_lock() and see that the pointer to the queue is
 209	 * non-NULL, we can then lock it without the memory being freed out from
 210	 * under us.
 211	 *
 212	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
 213	 * case the caller deletes the entry from the queue, leaving it empty.
 214	 * In that case, only RCU prevents the queue memory from being freed.
 215	 */
 216	rcu_read_lock();
 217	if (req->flags & REQ_F_SINGLE_POLL)
 218		io_poll_remove_entry(io_poll_get_single(req));
 219	if (req->flags & REQ_F_DOUBLE_POLL)
 220		io_poll_remove_entry(io_poll_get_double(req));
 221	rcu_read_unlock();
 222}
 223
 224enum {
 225	IOU_POLL_DONE = 0,
 226	IOU_POLL_NO_ACTION = 1,
 227	IOU_POLL_REMOVE_POLL_USE_RES = 2,
 228	IOU_POLL_REISSUE = 3,
 229	IOU_POLL_REQUEUE = 4,
 230};
 231
 232static void __io_poll_execute(struct io_kiocb *req, int mask)
 233{
 234	unsigned flags = 0;
 235
 236	io_req_set_res(req, mask, 0);
 237	req->io_task_work.func = io_poll_task_func;
 238
 239	trace_io_uring_task_add(req, mask);
 240
 241	if (!(req->flags & REQ_F_POLL_NO_LAZY))
 242		flags = IOU_F_TWQ_LAZY_WAKE;
 243	__io_req_task_work_add(req, flags);
 244}
 245
 246static inline void io_poll_execute(struct io_kiocb *req, int res)
 247{
 248	if (io_poll_get_ownership(req))
 249		__io_poll_execute(req, res);
 250}
 251
 252/*
 253 * All poll tw should go through this. Checks for poll events, manages
 254 * references, does rewait, etc.
 255 *
 256 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
 257 * require, which is either spurious wakeup or multishot CQE is served.
 258 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
 259 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
 260 * poll and that the result is stored in req->cqe.
 261 */
 262static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
 263{
 264	int v;
 265
 266	/* req->task == current here, checking PF_EXITING is safe */
 267	if (unlikely(req->task->flags & PF_EXITING))
 268		return -ECANCELED;
 269
 270	do {
 271		v = atomic_read(&req->poll_refs);
 272
 273		if (unlikely(v != 1)) {
 274			/* tw should be the owner and so have some refs */
 275			if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
 276				return IOU_POLL_NO_ACTION;
 277			if (v & IO_POLL_CANCEL_FLAG)
 278				return -ECANCELED;
 279			/*
 280			 * cqe.res contains only events of the first wake up
 281			 * and all others are to be lost. Redo vfs_poll() to get
 282			 * up to date state.
 283			 */
 284			if ((v & IO_POLL_REF_MASK) != 1)
 285				req->cqe.res = 0;
 286
 287			if (v & IO_POLL_RETRY_FLAG) {
 288				req->cqe.res = 0;
 289				/*
 290				 * We won't find new events that came in between
 291				 * vfs_poll and the ref put unless we clear the
 292				 * flag in advance.
 293				 */
 294				atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
 295				v &= ~IO_POLL_RETRY_FLAG;
 296			}
 297		}
 298
 299		/* the mask was stashed in __io_poll_execute */
 300		if (!req->cqe.res) {
 301			struct poll_table_struct pt = { ._key = req->apoll_events };
 302			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
 303			/*
 304			 * We got woken with a mask, but someone else got to
 305			 * it first. The above vfs_poll() doesn't add us back
 306			 * to the waitqueue, so if we get nothing back, we
 307			 * should be safe and attempt a reissue.
 308			 */
 309			if (unlikely(!req->cqe.res)) {
 310				/* Multishot armed need not reissue */
 311				if (!(req->apoll_events & EPOLLONESHOT))
 312					continue;
 313				return IOU_POLL_REISSUE;
 314			}
 315		}
 
 
 316		if (req->apoll_events & EPOLLONESHOT)
 317			return IOU_POLL_DONE;
 318
 319		/* multishot, just fill a CQE and proceed */
 320		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
 321			__poll_t mask = mangle_poll(req->cqe.res &
 322						    req->apoll_events);
 323
 324			if (!io_fill_cqe_req_aux(req, ts->locked, mask,
 325						 IORING_CQE_F_MORE)) {
 326				io_req_set_res(req, mask, 0);
 327				return IOU_POLL_REMOVE_POLL_USE_RES;
 328			}
 329		} else {
 330			int ret = io_poll_issue(req, ts);
 331			if (ret == IOU_STOP_MULTISHOT)
 332				return IOU_POLL_REMOVE_POLL_USE_RES;
 333			else if (ret == IOU_REQUEUE)
 334				return IOU_POLL_REQUEUE;
 335			if (ret < 0)
 336				return ret;
 337		}
 338
 339		/* force the next iteration to vfs_poll() */
 340		req->cqe.res = 0;
 341
 342		/*
 343		 * Release all references, retry if someone tried to restart
 344		 * task_work while we were executing it.
 345		 */
 346	} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
 347					IO_POLL_REF_MASK);
 348
 
 349	return IOU_POLL_NO_ACTION;
 350}
 351
 352void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
 353{
 354	int ret;
 355
 356	ret = io_poll_check_events(req, ts);
 357	if (ret == IOU_POLL_NO_ACTION) {
 
 358		return;
 359	} else if (ret == IOU_POLL_REQUEUE) {
 
 360		__io_poll_execute(req, 0);
 361		return;
 362	}
 363	io_poll_remove_entries(req);
 364	io_poll_tw_hash_eject(req, ts);
 
 365
 366	if (req->opcode == IORING_OP_POLL_ADD) {
 367		if (ret == IOU_POLL_DONE) {
 368			struct io_poll *poll;
 369
 370			poll = io_kiocb_to_cmd(req, struct io_poll);
 371			req->cqe.res = mangle_poll(req->cqe.res & poll->events);
 372		} else if (ret == IOU_POLL_REISSUE) {
 373			io_req_task_submit(req, ts);
 374			return;
 375		} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
 376			req->cqe.res = ret;
 377			req_set_fail(req);
 378		}
 379
 380		io_req_set_res(req, req->cqe.res, 0);
 381		io_req_task_complete(req, ts);
 382	} else {
 383		io_tw_lock(req->ctx, ts);
 384
 385		if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
 386			io_req_task_complete(req, ts);
 387		else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
 388			io_req_task_submit(req, ts);
 389		else
 390			io_req_defer_failed(req, ret);
 391	}
 392}
 393
 394static void io_poll_cancel_req(struct io_kiocb *req)
 395{
 396	io_poll_mark_cancelled(req);
 397	/* kick tw, which should complete the request */
 398	io_poll_execute(req, 0);
 399}
 400
 401#define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
 402
 403static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
 404{
 405	io_poll_mark_cancelled(req);
 406	/* we have to kick tw in case it's not already */
 407	io_poll_execute(req, 0);
 408
 409	/*
 410	 * If the waitqueue is being freed early but someone is already
 411	 * holds ownership over it, we have to tear down the request as
 412	 * best we can. That means immediately removing the request from
 413	 * its waitqueue and preventing all further accesses to the
 414	 * waitqueue via the request.
 415	 */
 416	list_del_init(&poll->wait.entry);
 417
 418	/*
 419	 * Careful: this *must* be the last step, since as soon
 420	 * as req->head is NULL'ed out, the request can be
 421	 * completed and freed, since aio_poll_complete_work()
 422	 * will no longer need to take the waitqueue lock.
 423	 */
 424	smp_store_release(&poll->head, NULL);
 425	return 1;
 426}
 427
 428static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 429			void *key)
 430{
 431	struct io_kiocb *req = wqe_to_req(wait);
 432	struct io_poll *poll = container_of(wait, struct io_poll, wait);
 433	__poll_t mask = key_to_poll(key);
 434
 435	if (unlikely(mask & POLLFREE))
 436		return io_pollfree_wake(req, poll);
 437
 438	/* for instances that support it check for an event match first */
 439	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
 440		return 0;
 441
 442	if (io_poll_get_ownership(req)) {
 443		/*
 444		 * If we trigger a multishot poll off our own wakeup path,
 445		 * disable multishot as there is a circular dependency between
 446		 * CQ posting and triggering the event.
 447		 */
 448		if (mask & EPOLL_URING_WAKE)
 449			poll->events |= EPOLLONESHOT;
 450
 451		/* optional, saves extra locking for removal in tw handler */
 452		if (mask && poll->events & EPOLLONESHOT) {
 453			list_del_init(&poll->wait.entry);
 454			poll->head = NULL;
 455			if (wqe_is_double(wait))
 456				req->flags &= ~REQ_F_DOUBLE_POLL;
 457			else
 458				req->flags &= ~REQ_F_SINGLE_POLL;
 459		}
 460		__io_poll_execute(req, mask);
 461	}
 462	return 1;
 463}
 464
 465/* fails only when polling is already completing by the first entry */
 466static bool io_poll_double_prepare(struct io_kiocb *req)
 467{
 468	struct wait_queue_head *head;
 469	struct io_poll *poll = io_poll_get_single(req);
 470
 471	/* head is RCU protected, see io_poll_remove_entries() comments */
 472	rcu_read_lock();
 473	head = smp_load_acquire(&poll->head);
 474	/*
 475	 * poll arm might not hold ownership and so race for req->flags with
 476	 * io_poll_wake(). There is only one poll entry queued, serialise with
 477	 * it by taking its head lock. As we're still arming the tw hanlder
 478	 * is not going to be run, so there are no races with it.
 479	 */
 480	if (head) {
 481		spin_lock_irq(&head->lock);
 482		req->flags |= REQ_F_DOUBLE_POLL;
 483		if (req->opcode == IORING_OP_POLL_ADD)
 484			req->flags |= REQ_F_ASYNC_DATA;
 485		spin_unlock_irq(&head->lock);
 486	}
 487	rcu_read_unlock();
 488	return !!head;
 489}
 490
 491static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
 492			    struct wait_queue_head *head,
 493			    struct io_poll **poll_ptr)
 494{
 495	struct io_kiocb *req = pt->req;
 496	unsigned long wqe_private = (unsigned long) req;
 497
 498	/*
 499	 * The file being polled uses multiple waitqueues for poll handling
 500	 * (e.g. one for read, one for write). Setup a separate io_poll
 501	 * if this happens.
 502	 */
 503	if (unlikely(pt->nr_entries)) {
 504		struct io_poll *first = poll;
 505
 506		/* double add on the same waitqueue head, ignore */
 507		if (first->head == head)
 508			return;
 509		/* already have a 2nd entry, fail a third attempt */
 510		if (*poll_ptr) {
 511			if ((*poll_ptr)->head == head)
 512				return;
 513			pt->error = -EINVAL;
 514			return;
 515		}
 516
 517		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
 518		if (!poll) {
 519			pt->error = -ENOMEM;
 520			return;
 521		}
 522
 523		/* mark as double wq entry */
 524		wqe_private |= IO_WQE_F_DOUBLE;
 525		io_init_poll_iocb(poll, first->events);
 526		if (!io_poll_double_prepare(req)) {
 527			/* the request is completing, just back off */
 528			kfree(poll);
 529			return;
 530		}
 531		*poll_ptr = poll;
 532	} else {
 533		/* fine to modify, there is no poll queued to race with us */
 534		req->flags |= REQ_F_SINGLE_POLL;
 535	}
 536
 537	pt->nr_entries++;
 538	poll->head = head;
 539	poll->wait.private = (void *) wqe_private;
 540
 541	if (poll->events & EPOLLEXCLUSIVE) {
 542		/*
 543		 * Exclusive waits may only wake a limited amount of entries
 544		 * rather than all of them, this may interfere with lazy
 545		 * wake if someone does wait(events > 1). Ensure we don't do
 546		 * lazy wake for those, as we need to process each one as they
 547		 * come in.
 548		 */
 549		req->flags |= REQ_F_POLL_NO_LAZY;
 550		add_wait_queue_exclusive(head, &poll->wait);
 551	} else {
 552		add_wait_queue(head, &poll->wait);
 553	}
 554}
 555
 556static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
 557			       struct poll_table_struct *p)
 558{
 559	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
 560	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
 561
 562	__io_queue_proc(poll, pt, head,
 563			(struct io_poll **) &pt->req->async_data);
 564}
 565
 566static bool io_poll_can_finish_inline(struct io_kiocb *req,
 567				      struct io_poll_table *pt)
 568{
 569	return pt->owning || io_poll_get_ownership(req);
 570}
 571
 572static void io_poll_add_hash(struct io_kiocb *req)
 573{
 574	if (req->flags & REQ_F_HASH_LOCKED)
 575		io_poll_req_insert_locked(req);
 576	else
 577		io_poll_req_insert(req);
 
 578}
 579
 580/*
 581 * Returns 0 when it's handed over for polling. The caller owns the requests if
 582 * it returns non-zero, but otherwise should not touch it. Negative values
 583 * contain an error code. When the result is >0, the polling has completed
 584 * inline and ipt.result_mask is set to the mask.
 585 */
 586static int __io_arm_poll_handler(struct io_kiocb *req,
 587				 struct io_poll *poll,
 588				 struct io_poll_table *ipt, __poll_t mask,
 589				 unsigned issue_flags)
 590{
 591	struct io_ring_ctx *ctx = req->ctx;
 592
 593	INIT_HLIST_NODE(&req->hash_node);
 594	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
 595	io_init_poll_iocb(poll, mask);
 596	poll->file = req->file;
 597	req->apoll_events = poll->events;
 598
 599	ipt->pt._key = mask;
 600	ipt->req = req;
 601	ipt->error = 0;
 602	ipt->nr_entries = 0;
 603	/*
 604	 * Polling is either completed here or via task_work, so if we're in the
 605	 * task context we're naturally serialised with tw by merit of running
 606	 * the same task. When it's io-wq, take the ownership to prevent tw
 607	 * from running. However, when we're in the task context, skip taking
 608	 * it as an optimisation.
 609	 *
 610	 * Note: even though the request won't be completed/freed, without
 611	 * ownership we still can race with io_poll_wake().
 612	 * io_poll_can_finish_inline() tries to deal with that.
 613	 */
 614	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
 615	atomic_set(&req->poll_refs, (int)ipt->owning);
 616
 617	/* io-wq doesn't hold uring_lock */
 618	if (issue_flags & IO_URING_F_UNLOCKED)
 619		req->flags &= ~REQ_F_HASH_LOCKED;
 
 
 
 
 
 
 620
 621	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
 622
 623	if (unlikely(ipt->error || !ipt->nr_entries)) {
 624		io_poll_remove_entries(req);
 625
 626		if (!io_poll_can_finish_inline(req, ipt)) {
 627			io_poll_mark_cancelled(req);
 628			return 0;
 629		} else if (mask && (poll->events & EPOLLET)) {
 630			ipt->result_mask = mask;
 631			return 1;
 632		}
 633		return ipt->error ?: -EINVAL;
 634	}
 635
 636	if (mask &&
 637	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
 638		if (!io_poll_can_finish_inline(req, ipt)) {
 639			io_poll_add_hash(req);
 640			return 0;
 641		}
 642		io_poll_remove_entries(req);
 643		ipt->result_mask = mask;
 644		/* no one else has access to the req, forget about the ref */
 645		return 1;
 646	}
 647
 648	io_poll_add_hash(req);
 649
 650	if (mask && (poll->events & EPOLLET) &&
 651	    io_poll_can_finish_inline(req, ipt)) {
 652		__io_poll_execute(req, mask);
 653		return 0;
 654	}
 
 655
 656	if (ipt->owning) {
 657		/*
 658		 * Try to release ownership. If we see a change of state, e.g.
 659		 * poll was waken up, queue up a tw, it'll deal with it.
 660		 */
 661		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
 662			__io_poll_execute(req, 0);
 663	}
 664	return 0;
 665}
 666
 667static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
 668			       struct poll_table_struct *p)
 669{
 670	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
 671	struct async_poll *apoll = pt->req->apoll;
 672
 673	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
 674}
 675
 676/*
 677 * We can't reliably detect loops in repeated poll triggers and issue
 678 * subsequently failing. But rather than fail these immediately, allow a
 679 * certain amount of retries before we give up. Given that this condition
 680 * should _rarely_ trigger even once, we should be fine with a larger value.
 681 */
 682#define APOLL_MAX_RETRY		128
 683
 684static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
 685					     unsigned issue_flags)
 686{
 687	struct io_ring_ctx *ctx = req->ctx;
 688	struct io_cache_entry *entry;
 689	struct async_poll *apoll;
 690
 691	if (req->flags & REQ_F_POLLED) {
 692		apoll = req->apoll;
 693		kfree(apoll->double_poll);
 694	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
 695		entry = io_alloc_cache_get(&ctx->apoll_cache);
 696		if (entry == NULL)
 697			goto alloc_apoll;
 698		apoll = container_of(entry, struct async_poll, cache);
 699		apoll->poll.retries = APOLL_MAX_RETRY;
 700	} else {
 701alloc_apoll:
 702		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
 703		if (unlikely(!apoll))
 704			return NULL;
 705		apoll->poll.retries = APOLL_MAX_RETRY;
 706	}
 707	apoll->double_poll = NULL;
 708	req->apoll = apoll;
 709	if (unlikely(!--apoll->poll.retries))
 710		return NULL;
 711	return apoll;
 712}
 713
 714int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
 715{
 716	const struct io_issue_def *def = &io_issue_defs[req->opcode];
 717	struct async_poll *apoll;
 718	struct io_poll_table ipt;
 719	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
 720	int ret;
 721
 722	/*
 723	 * apoll requests already grab the mutex to complete in the tw handler,
 724	 * so removal from the mutex-backed hash is free, use it by default.
 725	 */
 726	req->flags |= REQ_F_HASH_LOCKED;
 727
 728	if (!def->pollin && !def->pollout)
 729		return IO_APOLL_ABORTED;
 730	if (!file_can_poll(req->file))
 731		return IO_APOLL_ABORTED;
 732	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
 733		mask |= EPOLLONESHOT;
 734
 735	if (def->pollin) {
 736		mask |= EPOLLIN | EPOLLRDNORM;
 737
 738		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
 739		if (req->flags & REQ_F_CLEAR_POLLIN)
 740			mask &= ~EPOLLIN;
 741	} else {
 742		mask |= EPOLLOUT | EPOLLWRNORM;
 743	}
 744	if (def->poll_exclusive)
 745		mask |= EPOLLEXCLUSIVE;
 746
 747	apoll = io_req_alloc_apoll(req, issue_flags);
 748	if (!apoll)
 749		return IO_APOLL_ABORTED;
 750	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
 751	req->flags |= REQ_F_POLLED;
 752	ipt.pt._qproc = io_async_queue_proc;
 753
 754	io_kbuf_recycle(req, issue_flags);
 755
 756	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
 757	if (ret)
 758		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
 759	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
 760	return IO_APOLL_OK;
 761}
 762
 763static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
 764					    struct io_hash_table *table,
 765					    bool cancel_all)
 
 
 766{
 767	unsigned nr_buckets = 1U << table->hash_bits;
 768	struct hlist_node *tmp;
 769	struct io_kiocb *req;
 770	bool found = false;
 771	int i;
 772
 
 
 773	for (i = 0; i < nr_buckets; i++) {
 774		struct io_hash_bucket *hb = &table->hbs[i];
 775
 776		spin_lock(&hb->lock);
 777		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
 778			if (io_match_task_safe(req, tsk, cancel_all)) {
 779				hlist_del_init(&req->hash_node);
 780				io_poll_cancel_req(req);
 781				found = true;
 782			}
 783		}
 784		spin_unlock(&hb->lock);
 785	}
 786	return found;
 787}
 788
 789/*
 790 * Returns true if we found and killed one or more poll requests
 791 */
 792__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
 793			       bool cancel_all)
 794	__must_hold(&ctx->uring_lock)
 795{
 796	bool ret;
 797
 798	ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
 799	ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
 800	return ret;
 801}
 802
 803static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
 804				     struct io_cancel_data *cd,
 805				     struct io_hash_table *table,
 806				     struct io_hash_bucket **out_bucket)
 807{
 808	struct io_kiocb *req;
 809	u32 index = hash_long(cd->data, table->hash_bits);
 810	struct io_hash_bucket *hb = &table->hbs[index];
 811
 812	*out_bucket = NULL;
 813
 814	spin_lock(&hb->lock);
 815	hlist_for_each_entry(req, &hb->list, hash_node) {
 816		if (cd->data != req->cqe.user_data)
 817			continue;
 818		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
 819			continue;
 820		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
 821			if (cd->seq == req->work.cancel_seq)
 822				continue;
 823			req->work.cancel_seq = cd->seq;
 824		}
 825		*out_bucket = hb;
 826		return req;
 827	}
 828	spin_unlock(&hb->lock);
 829	return NULL;
 830}
 831
 832static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
 833					  struct io_cancel_data *cd,
 834					  struct io_hash_table *table,
 835					  struct io_hash_bucket **out_bucket)
 836{
 837	unsigned nr_buckets = 1U << table->hash_bits;
 838	struct io_kiocb *req;
 839	int i;
 840
 841	*out_bucket = NULL;
 842
 843	for (i = 0; i < nr_buckets; i++) {
 844		struct io_hash_bucket *hb = &table->hbs[i];
 845
 846		spin_lock(&hb->lock);
 847		hlist_for_each_entry(req, &hb->list, hash_node) {
 848			if (io_cancel_req_match(req, cd)) {
 849				*out_bucket = hb;
 850				return req;
 851			}
 852		}
 853		spin_unlock(&hb->lock);
 854	}
 855	return NULL;
 856}
 857
 858static int io_poll_disarm(struct io_kiocb *req)
 859{
 860	if (!req)
 861		return -ENOENT;
 862	if (!io_poll_get_ownership(req))
 863		return -EALREADY;
 864	io_poll_remove_entries(req);
 865	hash_del(&req->hash_node);
 866	return 0;
 867}
 868
 869static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
 870			    struct io_hash_table *table)
 871{
 872	struct io_hash_bucket *bucket;
 873	struct io_kiocb *req;
 874
 875	if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
 876			 IORING_ASYNC_CANCEL_ANY))
 877		req = io_poll_file_find(ctx, cd, table, &bucket);
 878	else
 879		req = io_poll_find(ctx, false, cd, table, &bucket);
 880
 881	if (req)
 882		io_poll_cancel_req(req);
 883	if (bucket)
 884		spin_unlock(&bucket->lock);
 885	return req ? 0 : -ENOENT;
 886}
 887
 888int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
 889		   unsigned issue_flags)
 890{
 891	int ret;
 892
 893	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
 894	if (ret != -ENOENT)
 895		return ret;
 896
 897	io_ring_submit_lock(ctx, issue_flags);
 898	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
 899	io_ring_submit_unlock(ctx, issue_flags);
 900	return ret;
 901}
 902
 903static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
 904				     unsigned int flags)
 905{
 906	u32 events;
 907
 908	events = READ_ONCE(sqe->poll32_events);
 909#ifdef __BIG_ENDIAN
 910	events = swahw32(events);
 911#endif
 912	if (!(flags & IORING_POLL_ADD_MULTI))
 913		events |= EPOLLONESHOT;
 914	if (!(flags & IORING_POLL_ADD_LEVEL))
 915		events |= EPOLLET;
 916	return demangle_poll(events) |
 917		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
 918}
 919
 920int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 921{
 922	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
 923	u32 flags;
 924
 925	if (sqe->buf_index || sqe->splice_fd_in)
 926		return -EINVAL;
 927	flags = READ_ONCE(sqe->len);
 928	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
 929		      IORING_POLL_ADD_MULTI))
 930		return -EINVAL;
 931	/* meaningless without update */
 932	if (flags == IORING_POLL_ADD_MULTI)
 933		return -EINVAL;
 934
 935	upd->old_user_data = READ_ONCE(sqe->addr);
 936	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
 937	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
 938
 939	upd->new_user_data = READ_ONCE(sqe->off);
 940	if (!upd->update_user_data && upd->new_user_data)
 941		return -EINVAL;
 942	if (upd->update_events)
 943		upd->events = io_poll_parse_events(sqe, flags);
 944	else if (sqe->poll32_events)
 945		return -EINVAL;
 946
 947	return 0;
 948}
 949
 950int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 951{
 952	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
 953	u32 flags;
 954
 955	if (sqe->buf_index || sqe->off || sqe->addr)
 956		return -EINVAL;
 957	flags = READ_ONCE(sqe->len);
 958	if (flags & ~IORING_POLL_ADD_MULTI)
 959		return -EINVAL;
 960	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
 961		return -EINVAL;
 962
 963	poll->events = io_poll_parse_events(sqe, flags);
 964	return 0;
 965}
 966
 967int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
 968{
 969	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
 970	struct io_poll_table ipt;
 971	int ret;
 972
 973	ipt.pt._qproc = io_poll_queue_proc;
 974
 975	/*
 976	 * If sqpoll or single issuer, there is no contention for ->uring_lock
 977	 * and we'll end up holding it in tw handlers anyway.
 978	 */
 979	if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
 980		req->flags |= REQ_F_HASH_LOCKED;
 981
 982	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
 983	if (ret > 0) {
 984		io_req_set_res(req, ipt.result_mask, 0);
 985		return IOU_OK;
 986	}
 987	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
 988}
 989
 990int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
 991{
 992	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
 993	struct io_ring_ctx *ctx = req->ctx;
 994	struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
 995	struct io_hash_bucket *bucket;
 996	struct io_kiocb *preq;
 997	int ret2, ret = 0;
 998	struct io_tw_state ts = { .locked = true };
 999
1000	io_ring_submit_lock(ctx, issue_flags);
1001	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
1002	ret2 = io_poll_disarm(preq);
1003	if (bucket)
1004		spin_unlock(&bucket->lock);
1005	if (!ret2)
1006		goto found;
1007	if (ret2 != -ENOENT) {
1008		ret = ret2;
1009		goto out;
1010	}
1011
1012	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
1013	ret2 = io_poll_disarm(preq);
1014	if (bucket)
1015		spin_unlock(&bucket->lock);
1016	if (ret2) {
1017		ret = ret2;
1018		goto out;
1019	}
1020
1021found:
1022	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1023		ret = -EFAULT;
1024		goto out;
1025	}
1026
1027	if (poll_update->update_events || poll_update->update_user_data) {
1028		/* only mask one event flags, keep behavior flags */
1029		if (poll_update->update_events) {
1030			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1031
1032			poll->events &= ~0xffff;
1033			poll->events |= poll_update->events & 0xffff;
1034			poll->events |= IO_POLL_UNMASK;
1035		}
1036		if (poll_update->update_user_data)
1037			preq->cqe.user_data = poll_update->new_user_data;
1038
1039		ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1040		/* successfully updated, don't complete poll request */
1041		if (!ret2 || ret2 == -EIOCBQUEUED)
1042			goto out;
1043	}
1044
1045	req_set_fail(preq);
1046	io_req_set_res(preq, -ECANCELED, 0);
1047	io_req_task_complete(preq, &ts);
 
1048out:
1049	io_ring_submit_unlock(ctx, issue_flags);
1050	if (ret < 0) {
1051		req_set_fail(req);
1052		return ret;
1053	}
1054	/* complete update request, we're done with it */
1055	io_req_set_res(req, ret, 0);
1056	return IOU_OK;
1057}
1058
1059void io_apoll_cache_free(struct io_cache_entry *entry)
1060{
1061	kfree(container_of(entry, struct async_poll, cache));
1062}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/fs.h>
  5#include <linux/file.h>
  6#include <linux/mm.h>
  7#include <linux/slab.h>
  8#include <linux/poll.h>
  9#include <linux/hashtable.h>
 10#include <linux/io_uring.h>
 11
 12#include <trace/events/io_uring.h>
 13
 14#include <uapi/linux/io_uring.h>
 15
 16#include "io_uring.h"
 17#include "alloc_cache.h"
 18#include "refs.h"
 19#include "napi.h"
 20#include "opdef.h"
 21#include "kbuf.h"
 22#include "poll.h"
 23#include "cancel.h"
 24
 25struct io_poll_update {
 26	struct file			*file;
 27	u64				old_user_data;
 28	u64				new_user_data;
 29	__poll_t			events;
 30	bool				update_events;
 31	bool				update_user_data;
 32};
 33
 34struct io_poll_table {
 35	struct poll_table_struct pt;
 36	struct io_kiocb *req;
 37	int nr_entries;
 38	int error;
 39	bool owning;
 40	/* output value, set only if arm poll returns >0 */
 41	__poll_t result_mask;
 42};
 43
 44#define IO_POLL_CANCEL_FLAG	BIT(31)
 45#define IO_POLL_RETRY_FLAG	BIT(30)
 46#define IO_POLL_REF_MASK	GENMASK(29, 0)
 47
 48/*
 49 * We usually have 1-2 refs taken, 128 is more than enough and we want to
 50 * maximise the margin between this amount and the moment when it overflows.
 51 */
 52#define IO_POLL_REF_BIAS	128
 53
 54#define IO_WQE_F_DOUBLE		1
 55
 56static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 57			void *key);
 58
 59static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
 60{
 61	unsigned long priv = (unsigned long)wqe->private;
 62
 63	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
 64}
 65
 66static inline bool wqe_is_double(struct wait_queue_entry *wqe)
 67{
 68	unsigned long priv = (unsigned long)wqe->private;
 69
 70	return priv & IO_WQE_F_DOUBLE;
 71}
 72
 73static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
 74{
 75	int v;
 76
 77	/*
 78	 * poll_refs are already elevated and we don't have much hope for
 79	 * grabbing the ownership. Instead of incrementing set a retry flag
 80	 * to notify the loop that there might have been some change.
 81	 */
 82	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
 83	if (v & IO_POLL_REF_MASK)
 84		return false;
 85	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
 86}
 87
 88/*
 89 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
 90 * bump it and acquire ownership. It's disallowed to modify requests while not
 91 * owning it, that prevents from races for enqueueing task_work's and b/w
 92 * arming poll and wakeups.
 93 */
 94static inline bool io_poll_get_ownership(struct io_kiocb *req)
 95{
 96	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
 97		return io_poll_get_ownership_slowpath(req);
 98	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
 99}
100
101static void io_poll_mark_cancelled(struct io_kiocb *req)
102{
103	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104}
105
106static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107{
108	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109	if (req->opcode == IORING_OP_POLL_ADD)
110		return req->async_data;
111	return req->apoll->double_poll;
112}
113
114static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115{
116	if (req->opcode == IORING_OP_POLL_ADD)
117		return io_kiocb_to_cmd(req, struct io_poll);
118	return &req->apoll->poll;
119}
120
121static void io_poll_req_insert(struct io_kiocb *req)
122{
123	struct io_hash_table *table = &req->ctx->cancel_table;
124	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	lockdep_assert_held(&req->ctx->uring_lock);
127
128	hlist_add_head(&req->hash_node, &table->hbs[index].list);
129}
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
132{
133	poll->head = NULL;
134#define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
135	/* mask in events that we always want/need */
136	poll->events = events | IO_POLL_UNMASK;
137	INIT_LIST_HEAD(&poll->wait.entry);
138	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
139}
140
141static inline void io_poll_remove_entry(struct io_poll *poll)
142{
143	struct wait_queue_head *head = smp_load_acquire(&poll->head);
144
145	if (head) {
146		spin_lock_irq(&head->lock);
147		list_del_init(&poll->wait.entry);
148		poll->head = NULL;
149		spin_unlock_irq(&head->lock);
150	}
151}
152
153static void io_poll_remove_entries(struct io_kiocb *req)
154{
155	/*
156	 * Nothing to do if neither of those flags are set. Avoid dipping
157	 * into the poll/apoll/double cachelines if we can.
158	 */
159	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
160		return;
161
162	/*
163	 * While we hold the waitqueue lock and the waitqueue is nonempty,
164	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
165	 * lock in the first place can race with the waitqueue being freed.
166	 *
167	 * We solve this as eventpoll does: by taking advantage of the fact that
168	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
169	 * we enter rcu_read_lock() and see that the pointer to the queue is
170	 * non-NULL, we can then lock it without the memory being freed out from
171	 * under us.
172	 *
173	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
174	 * case the caller deletes the entry from the queue, leaving it empty.
175	 * In that case, only RCU prevents the queue memory from being freed.
176	 */
177	rcu_read_lock();
178	if (req->flags & REQ_F_SINGLE_POLL)
179		io_poll_remove_entry(io_poll_get_single(req));
180	if (req->flags & REQ_F_DOUBLE_POLL)
181		io_poll_remove_entry(io_poll_get_double(req));
182	rcu_read_unlock();
183}
184
185enum {
186	IOU_POLL_DONE = 0,
187	IOU_POLL_NO_ACTION = 1,
188	IOU_POLL_REMOVE_POLL_USE_RES = 2,
189	IOU_POLL_REISSUE = 3,
190	IOU_POLL_REQUEUE = 4,
191};
192
193static void __io_poll_execute(struct io_kiocb *req, int mask)
194{
195	unsigned flags = 0;
196
197	io_req_set_res(req, mask, 0);
198	req->io_task_work.func = io_poll_task_func;
199
200	trace_io_uring_task_add(req, mask);
201
202	if (!(req->flags & REQ_F_POLL_NO_LAZY))
203		flags = IOU_F_TWQ_LAZY_WAKE;
204	__io_req_task_work_add(req, flags);
205}
206
207static inline void io_poll_execute(struct io_kiocb *req, int res)
208{
209	if (io_poll_get_ownership(req))
210		__io_poll_execute(req, res);
211}
212
213/*
214 * All poll tw should go through this. Checks for poll events, manages
215 * references, does rewait, etc.
216 *
217 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
218 * require, which is either spurious wakeup or multishot CQE is served.
219 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
220 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
221 * poll and that the result is stored in req->cqe.
222 */
223static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
224{
225	int v;
226
227	if (unlikely(io_should_terminate_tw()))
 
228		return -ECANCELED;
229
230	do {
231		v = atomic_read(&req->poll_refs);
232
233		if (unlikely(v != 1)) {
234			/* tw should be the owner and so have some refs */
235			if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
236				return IOU_POLL_NO_ACTION;
237			if (v & IO_POLL_CANCEL_FLAG)
238				return -ECANCELED;
239			/*
240			 * cqe.res contains only events of the first wake up
241			 * and all others are to be lost. Redo vfs_poll() to get
242			 * up to date state.
243			 */
244			if ((v & IO_POLL_REF_MASK) != 1)
245				req->cqe.res = 0;
246
247			if (v & IO_POLL_RETRY_FLAG) {
248				req->cqe.res = 0;
249				/*
250				 * We won't find new events that came in between
251				 * vfs_poll and the ref put unless we clear the
252				 * flag in advance.
253				 */
254				atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
255				v &= ~IO_POLL_RETRY_FLAG;
256			}
257		}
258
259		/* the mask was stashed in __io_poll_execute */
260		if (!req->cqe.res) {
261			struct poll_table_struct pt = { ._key = req->apoll_events };
262			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
263			/*
264			 * We got woken with a mask, but someone else got to
265			 * it first. The above vfs_poll() doesn't add us back
266			 * to the waitqueue, so if we get nothing back, we
267			 * should be safe and attempt a reissue.
268			 */
269			if (unlikely(!req->cqe.res)) {
270				/* Multishot armed need not reissue */
271				if (!(req->apoll_events & EPOLLONESHOT))
272					continue;
273				return IOU_POLL_REISSUE;
274			}
275		}
276		if (unlikely(req->cqe.res & EPOLLERR))
277			req_set_fail(req);
278		if (req->apoll_events & EPOLLONESHOT)
279			return IOU_POLL_DONE;
280
281		/* multishot, just fill a CQE and proceed */
282		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
283			__poll_t mask = mangle_poll(req->cqe.res &
284						    req->apoll_events);
285
286			if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
 
287				io_req_set_res(req, mask, 0);
288				return IOU_POLL_REMOVE_POLL_USE_RES;
289			}
290		} else {
291			int ret = io_poll_issue(req, ts);
292			if (ret == IOU_STOP_MULTISHOT)
293				return IOU_POLL_REMOVE_POLL_USE_RES;
294			else if (ret == IOU_REQUEUE)
295				return IOU_POLL_REQUEUE;
296			if (ret < 0)
297				return ret;
298		}
299
300		/* force the next iteration to vfs_poll() */
301		req->cqe.res = 0;
302
303		/*
304		 * Release all references, retry if someone tried to restart
305		 * task_work while we were executing it.
306		 */
307		v &= IO_POLL_REF_MASK;
308	} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
309
310	io_napi_add(req);
311	return IOU_POLL_NO_ACTION;
312}
313
314void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
315{
316	int ret;
317
318	ret = io_poll_check_events(req, ts);
319	if (ret == IOU_POLL_NO_ACTION) {
320		io_kbuf_recycle(req, 0);
321		return;
322	} else if (ret == IOU_POLL_REQUEUE) {
323		io_kbuf_recycle(req, 0);
324		__io_poll_execute(req, 0);
325		return;
326	}
327	io_poll_remove_entries(req);
328	/* task_work always has ->uring_lock held */
329	hash_del(&req->hash_node);
330
331	if (req->opcode == IORING_OP_POLL_ADD) {
332		if (ret == IOU_POLL_DONE) {
333			struct io_poll *poll;
334
335			poll = io_kiocb_to_cmd(req, struct io_poll);
336			req->cqe.res = mangle_poll(req->cqe.res & poll->events);
337		} else if (ret == IOU_POLL_REISSUE) {
338			io_req_task_submit(req, ts);
339			return;
340		} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
341			req->cqe.res = ret;
342			req_set_fail(req);
343		}
344
345		io_req_set_res(req, req->cqe.res, 0);
346		io_req_task_complete(req, ts);
347	} else {
348		io_tw_lock(req->ctx, ts);
349
350		if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
351			io_req_task_complete(req, ts);
352		else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
353			io_req_task_submit(req, ts);
354		else
355			io_req_defer_failed(req, ret);
356	}
357}
358
359static void io_poll_cancel_req(struct io_kiocb *req)
360{
361	io_poll_mark_cancelled(req);
362	/* kick tw, which should complete the request */
363	io_poll_execute(req, 0);
364}
365
366#define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
367
368static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
369{
370	io_poll_mark_cancelled(req);
371	/* we have to kick tw in case it's not already */
372	io_poll_execute(req, 0);
373
374	/*
375	 * If the waitqueue is being freed early but someone is already
376	 * holds ownership over it, we have to tear down the request as
377	 * best we can. That means immediately removing the request from
378	 * its waitqueue and preventing all further accesses to the
379	 * waitqueue via the request.
380	 */
381	list_del_init(&poll->wait.entry);
382
383	/*
384	 * Careful: this *must* be the last step, since as soon
385	 * as req->head is NULL'ed out, the request can be
386	 * completed and freed, since aio_poll_complete_work()
387	 * will no longer need to take the waitqueue lock.
388	 */
389	smp_store_release(&poll->head, NULL);
390	return 1;
391}
392
393static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
394			void *key)
395{
396	struct io_kiocb *req = wqe_to_req(wait);
397	struct io_poll *poll = container_of(wait, struct io_poll, wait);
398	__poll_t mask = key_to_poll(key);
399
400	if (unlikely(mask & POLLFREE))
401		return io_pollfree_wake(req, poll);
402
403	/* for instances that support it check for an event match first */
404	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
405		return 0;
406
407	if (io_poll_get_ownership(req)) {
408		/*
409		 * If we trigger a multishot poll off our own wakeup path,
410		 * disable multishot as there is a circular dependency between
411		 * CQ posting and triggering the event.
412		 */
413		if (mask & EPOLL_URING_WAKE)
414			poll->events |= EPOLLONESHOT;
415
416		/* optional, saves extra locking for removal in tw handler */
417		if (mask && poll->events & EPOLLONESHOT) {
418			list_del_init(&poll->wait.entry);
419			poll->head = NULL;
420			if (wqe_is_double(wait))
421				req->flags &= ~REQ_F_DOUBLE_POLL;
422			else
423				req->flags &= ~REQ_F_SINGLE_POLL;
424		}
425		__io_poll_execute(req, mask);
426	}
427	return 1;
428}
429
430/* fails only when polling is already completing by the first entry */
431static bool io_poll_double_prepare(struct io_kiocb *req)
432{
433	struct wait_queue_head *head;
434	struct io_poll *poll = io_poll_get_single(req);
435
436	/* head is RCU protected, see io_poll_remove_entries() comments */
437	rcu_read_lock();
438	head = smp_load_acquire(&poll->head);
439	/*
440	 * poll arm might not hold ownership and so race for req->flags with
441	 * io_poll_wake(). There is only one poll entry queued, serialise with
442	 * it by taking its head lock. As we're still arming the tw hanlder
443	 * is not going to be run, so there are no races with it.
444	 */
445	if (head) {
446		spin_lock_irq(&head->lock);
447		req->flags |= REQ_F_DOUBLE_POLL;
448		if (req->opcode == IORING_OP_POLL_ADD)
449			req->flags |= REQ_F_ASYNC_DATA;
450		spin_unlock_irq(&head->lock);
451	}
452	rcu_read_unlock();
453	return !!head;
454}
455
456static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
457			    struct wait_queue_head *head,
458			    struct io_poll **poll_ptr)
459{
460	struct io_kiocb *req = pt->req;
461	unsigned long wqe_private = (unsigned long) req;
462
463	/*
464	 * The file being polled uses multiple waitqueues for poll handling
465	 * (e.g. one for read, one for write). Setup a separate io_poll
466	 * if this happens.
467	 */
468	if (unlikely(pt->nr_entries)) {
469		struct io_poll *first = poll;
470
471		/* double add on the same waitqueue head, ignore */
472		if (first->head == head)
473			return;
474		/* already have a 2nd entry, fail a third attempt */
475		if (*poll_ptr) {
476			if ((*poll_ptr)->head == head)
477				return;
478			pt->error = -EINVAL;
479			return;
480		}
481
482		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
483		if (!poll) {
484			pt->error = -ENOMEM;
485			return;
486		}
487
488		/* mark as double wq entry */
489		wqe_private |= IO_WQE_F_DOUBLE;
490		io_init_poll_iocb(poll, first->events);
491		if (!io_poll_double_prepare(req)) {
492			/* the request is completing, just back off */
493			kfree(poll);
494			return;
495		}
496		*poll_ptr = poll;
497	} else {
498		/* fine to modify, there is no poll queued to race with us */
499		req->flags |= REQ_F_SINGLE_POLL;
500	}
501
502	pt->nr_entries++;
503	poll->head = head;
504	poll->wait.private = (void *) wqe_private;
505
506	if (poll->events & EPOLLEXCLUSIVE) {
 
 
 
 
 
 
 
 
507		add_wait_queue_exclusive(head, &poll->wait);
508	} else {
509		add_wait_queue(head, &poll->wait);
510	}
511}
512
513static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
514			       struct poll_table_struct *p)
515{
516	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
517	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
518
519	__io_queue_proc(poll, pt, head,
520			(struct io_poll **) &pt->req->async_data);
521}
522
523static bool io_poll_can_finish_inline(struct io_kiocb *req,
524				      struct io_poll_table *pt)
525{
526	return pt->owning || io_poll_get_ownership(req);
527}
528
529static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
530{
531	struct io_ring_ctx *ctx = req->ctx;
532
533	io_ring_submit_lock(ctx, issue_flags);
534	io_poll_req_insert(req);
535	io_ring_submit_unlock(ctx, issue_flags);
536}
537
538/*
539 * Returns 0 when it's handed over for polling. The caller owns the requests if
540 * it returns non-zero, but otherwise should not touch it. Negative values
541 * contain an error code. When the result is >0, the polling has completed
542 * inline and ipt.result_mask is set to the mask.
543 */
544static int __io_arm_poll_handler(struct io_kiocb *req,
545				 struct io_poll *poll,
546				 struct io_poll_table *ipt, __poll_t mask,
547				 unsigned issue_flags)
548{
 
 
549	INIT_HLIST_NODE(&req->hash_node);
 
550	io_init_poll_iocb(poll, mask);
551	poll->file = req->file;
552	req->apoll_events = poll->events;
553
554	ipt->pt._key = mask;
555	ipt->req = req;
556	ipt->error = 0;
557	ipt->nr_entries = 0;
558	/*
559	 * Polling is either completed here or via task_work, so if we're in the
560	 * task context we're naturally serialised with tw by merit of running
561	 * the same task. When it's io-wq, take the ownership to prevent tw
562	 * from running. However, when we're in the task context, skip taking
563	 * it as an optimisation.
564	 *
565	 * Note: even though the request won't be completed/freed, without
566	 * ownership we still can race with io_poll_wake().
567	 * io_poll_can_finish_inline() tries to deal with that.
568	 */
569	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
570	atomic_set(&req->poll_refs, (int)ipt->owning);
571
572	/*
573	 * Exclusive waits may only wake a limited amount of entries
574	 * rather than all of them, this may interfere with lazy
575	 * wake if someone does wait(events > 1). Ensure we don't do
576	 * lazy wake for those, as we need to process each one as they
577	 * come in.
578	 */
579	if (poll->events & EPOLLEXCLUSIVE)
580		req->flags |= REQ_F_POLL_NO_LAZY;
581
582	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
583
584	if (unlikely(ipt->error || !ipt->nr_entries)) {
585		io_poll_remove_entries(req);
586
587		if (!io_poll_can_finish_inline(req, ipt)) {
588			io_poll_mark_cancelled(req);
589			return 0;
590		} else if (mask && (poll->events & EPOLLET)) {
591			ipt->result_mask = mask;
592			return 1;
593		}
594		return ipt->error ?: -EINVAL;
595	}
596
597	if (mask &&
598	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
599		if (!io_poll_can_finish_inline(req, ipt)) {
600			io_poll_add_hash(req, issue_flags);
601			return 0;
602		}
603		io_poll_remove_entries(req);
604		ipt->result_mask = mask;
605		/* no one else has access to the req, forget about the ref */
606		return 1;
607	}
608
609	io_poll_add_hash(req, issue_flags);
610
611	if (mask && (poll->events & EPOLLET) &&
612	    io_poll_can_finish_inline(req, ipt)) {
613		__io_poll_execute(req, mask);
614		return 0;
615	}
616	io_napi_add(req);
617
618	if (ipt->owning) {
619		/*
620		 * Try to release ownership. If we see a change of state, e.g.
621		 * poll was waken up, queue up a tw, it'll deal with it.
622		 */
623		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
624			__io_poll_execute(req, 0);
625	}
626	return 0;
627}
628
629static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
630			       struct poll_table_struct *p)
631{
632	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
633	struct async_poll *apoll = pt->req->apoll;
634
635	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
636}
637
638/*
639 * We can't reliably detect loops in repeated poll triggers and issue
640 * subsequently failing. But rather than fail these immediately, allow a
641 * certain amount of retries before we give up. Given that this condition
642 * should _rarely_ trigger even once, we should be fine with a larger value.
643 */
644#define APOLL_MAX_RETRY		128
645
646static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
647					     unsigned issue_flags)
648{
649	struct io_ring_ctx *ctx = req->ctx;
 
650	struct async_poll *apoll;
651
652	if (req->flags & REQ_F_POLLED) {
653		apoll = req->apoll;
654		kfree(apoll->double_poll);
655	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
656		apoll = io_alloc_cache_get(&ctx->apoll_cache);
657		if (!apoll)
658			goto alloc_apoll;
 
659		apoll->poll.retries = APOLL_MAX_RETRY;
660	} else {
661alloc_apoll:
662		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
663		if (unlikely(!apoll))
664			return NULL;
665		apoll->poll.retries = APOLL_MAX_RETRY;
666	}
667	apoll->double_poll = NULL;
668	req->apoll = apoll;
669	if (unlikely(!--apoll->poll.retries))
670		return NULL;
671	return apoll;
672}
673
674int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
675{
676	const struct io_issue_def *def = &io_issue_defs[req->opcode];
677	struct async_poll *apoll;
678	struct io_poll_table ipt;
679	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
680	int ret;
681
 
 
 
 
 
 
682	if (!def->pollin && !def->pollout)
683		return IO_APOLL_ABORTED;
684	if (!io_file_can_poll(req))
685		return IO_APOLL_ABORTED;
686	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
687		mask |= EPOLLONESHOT;
688
689	if (def->pollin) {
690		mask |= EPOLLIN | EPOLLRDNORM;
691
692		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
693		if (req->flags & REQ_F_CLEAR_POLLIN)
694			mask &= ~EPOLLIN;
695	} else {
696		mask |= EPOLLOUT | EPOLLWRNORM;
697	}
698	if (def->poll_exclusive)
699		mask |= EPOLLEXCLUSIVE;
700
701	apoll = io_req_alloc_apoll(req, issue_flags);
702	if (!apoll)
703		return IO_APOLL_ABORTED;
704	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
705	req->flags |= REQ_F_POLLED;
706	ipt.pt._qproc = io_async_queue_proc;
707
708	io_kbuf_recycle(req, issue_flags);
709
710	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
711	if (ret)
712		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
713	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
714	return IO_APOLL_OK;
715}
716
717/*
718 * Returns true if we found and killed one or more poll requests
719 */
720__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
721			       bool cancel_all)
722{
723	unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
724	struct hlist_node *tmp;
725	struct io_kiocb *req;
726	bool found = false;
727	int i;
728
729	lockdep_assert_held(&ctx->uring_lock);
730
731	for (i = 0; i < nr_buckets; i++) {
732		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
733
 
734		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
735			if (io_match_task_safe(req, tctx, cancel_all)) {
736				hlist_del_init(&req->hash_node);
737				io_poll_cancel_req(req);
738				found = true;
739			}
740		}
 
741	}
742	return found;
743}
744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
746				     struct io_cancel_data *cd)
 
 
747{
748	struct io_kiocb *req;
749	u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
750	struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
 
 
751
 
752	hlist_for_each_entry(req, &hb->list, hash_node) {
753		if (cd->data != req->cqe.user_data)
754			continue;
755		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
756			continue;
757		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
758			if (io_cancel_match_sequence(req, cd->seq))
759				continue;
 
760		}
 
761		return req;
762	}
 
763	return NULL;
764}
765
766static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
767					  struct io_cancel_data *cd)
 
 
768{
769	unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
770	struct io_kiocb *req;
771	int i;
772
 
 
773	for (i = 0; i < nr_buckets; i++) {
774		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
775
 
776		hlist_for_each_entry(req, &hb->list, hash_node) {
777			if (io_cancel_req_match(req, cd))
 
778				return req;
 
779		}
 
780	}
781	return NULL;
782}
783
784static int io_poll_disarm(struct io_kiocb *req)
785{
786	if (!req)
787		return -ENOENT;
788	if (!io_poll_get_ownership(req))
789		return -EALREADY;
790	io_poll_remove_entries(req);
791	hash_del(&req->hash_node);
792	return 0;
793}
794
795static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
 
796{
 
797	struct io_kiocb *req;
798
799	if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
800			 IORING_ASYNC_CANCEL_ANY))
801		req = io_poll_file_find(ctx, cd);
802	else
803		req = io_poll_find(ctx, false, cd);
804
805	if (req) {
806		io_poll_cancel_req(req);
807		return 0;
808	}
809	return -ENOENT;
810}
811
812int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
813		   unsigned issue_flags)
814{
815	int ret;
816
 
 
 
 
817	io_ring_submit_lock(ctx, issue_flags);
818	ret = __io_poll_cancel(ctx, cd);
819	io_ring_submit_unlock(ctx, issue_flags);
820	return ret;
821}
822
823static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
824				     unsigned int flags)
825{
826	u32 events;
827
828	events = READ_ONCE(sqe->poll32_events);
829#ifdef __BIG_ENDIAN
830	events = swahw32(events);
831#endif
832	if (!(flags & IORING_POLL_ADD_MULTI))
833		events |= EPOLLONESHOT;
834	if (!(flags & IORING_POLL_ADD_LEVEL))
835		events |= EPOLLET;
836	return demangle_poll(events) |
837		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
838}
839
840int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
841{
842	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
843	u32 flags;
844
845	if (sqe->buf_index || sqe->splice_fd_in)
846		return -EINVAL;
847	flags = READ_ONCE(sqe->len);
848	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
849		      IORING_POLL_ADD_MULTI))
850		return -EINVAL;
851	/* meaningless without update */
852	if (flags == IORING_POLL_ADD_MULTI)
853		return -EINVAL;
854
855	upd->old_user_data = READ_ONCE(sqe->addr);
856	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
857	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
858
859	upd->new_user_data = READ_ONCE(sqe->off);
860	if (!upd->update_user_data && upd->new_user_data)
861		return -EINVAL;
862	if (upd->update_events)
863		upd->events = io_poll_parse_events(sqe, flags);
864	else if (sqe->poll32_events)
865		return -EINVAL;
866
867	return 0;
868}
869
870int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
871{
872	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
873	u32 flags;
874
875	if (sqe->buf_index || sqe->off || sqe->addr)
876		return -EINVAL;
877	flags = READ_ONCE(sqe->len);
878	if (flags & ~IORING_POLL_ADD_MULTI)
879		return -EINVAL;
880	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
881		return -EINVAL;
882
883	poll->events = io_poll_parse_events(sqe, flags);
884	return 0;
885}
886
887int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
888{
889	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
890	struct io_poll_table ipt;
891	int ret;
892
893	ipt.pt._qproc = io_poll_queue_proc;
894
 
 
 
 
 
 
 
895	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
896	if (ret > 0) {
897		io_req_set_res(req, ipt.result_mask, 0);
898		return IOU_OK;
899	}
900	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
901}
902
903int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
904{
905	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
906	struct io_ring_ctx *ctx = req->ctx;
907	struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
 
908	struct io_kiocb *preq;
909	int ret2, ret = 0;
 
910
911	io_ring_submit_lock(ctx, issue_flags);
912	preq = io_poll_find(ctx, true, &cd);
913	ret2 = io_poll_disarm(preq);
 
 
 
 
 
 
 
 
 
 
 
 
 
914	if (ret2) {
915		ret = ret2;
916		goto out;
917	}
 
 
918	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
919		ret = -EFAULT;
920		goto out;
921	}
922
923	if (poll_update->update_events || poll_update->update_user_data) {
924		/* only mask one event flags, keep behavior flags */
925		if (poll_update->update_events) {
926			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
927
928			poll->events &= ~0xffff;
929			poll->events |= poll_update->events & 0xffff;
930			poll->events |= IO_POLL_UNMASK;
931		}
932		if (poll_update->update_user_data)
933			preq->cqe.user_data = poll_update->new_user_data;
934
935		ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
936		/* successfully updated, don't complete poll request */
937		if (!ret2 || ret2 == -EIOCBQUEUED)
938			goto out;
939	}
940
941	req_set_fail(preq);
942	io_req_set_res(preq, -ECANCELED, 0);
943	preq->io_task_work.func = io_req_task_complete;
944	io_req_task_work_add(preq);
945out:
946	io_ring_submit_unlock(ctx, issue_flags);
947	if (ret < 0) {
948		req_set_fail(req);
949		return ret;
950	}
951	/* complete update request, we're done with it */
952	io_req_set_res(req, ret, 0);
953	return IOU_OK;
 
 
 
 
 
954}