Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/fs.h>
   5#include <linux/file.h>
   6#include <linux/blk-mq.h>
   7#include <linux/mm.h>
   8#include <linux/slab.h>
   9#include <linux/fsnotify.h>
  10#include <linux/poll.h>
  11#include <linux/nospec.h>
  12#include <linux/compat.h>
  13#include <linux/io_uring/cmd.h>
  14
  15#include <uapi/linux/io_uring.h>
  16
  17#include "io_uring.h"
  18#include "opdef.h"
  19#include "kbuf.h"
  20#include "rsrc.h"
  21#include "poll.h"
  22#include "rw.h"
  23
  24struct io_rw {
  25	/* NOTE: kiocb has the file as the first member, so don't do it here */
  26	struct kiocb			kiocb;
  27	u64				addr;
  28	u32				len;
  29	rwf_t				flags;
  30};
  31
  32static inline bool io_file_supports_nowait(struct io_kiocb *req)
  33{
  34	return req->flags & REQ_F_SUPPORT_NOWAIT;
  35}
  36
  37#ifdef CONFIG_COMPAT
  38static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
  39{
  40	struct compat_iovec __user *uiov;
  41	compat_ssize_t clen;
  42
  43	uiov = u64_to_user_ptr(rw->addr);
  44	if (!access_ok(uiov, sizeof(*uiov)))
  45		return -EFAULT;
  46	if (__get_user(clen, &uiov->iov_len))
  47		return -EFAULT;
  48	if (clen < 0)
  49		return -EINVAL;
  50
  51	rw->len = clen;
  52	return 0;
  53}
  54#endif
  55
  56static int io_iov_buffer_select_prep(struct io_kiocb *req)
  57{
  58	struct iovec __user *uiov;
  59	struct iovec iov;
  60	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  61
  62	if (rw->len != 1)
  63		return -EINVAL;
  64
  65#ifdef CONFIG_COMPAT
  66	if (req->ctx->compat)
  67		return io_iov_compat_buffer_select_prep(rw);
  68#endif
  69
  70	uiov = u64_to_user_ptr(rw->addr);
  71	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
  72		return -EFAULT;
  73	rw->len = iov.iov_len;
  74	return 0;
  75}
  76
  77int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  78{
  79	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  80	unsigned ioprio;
  81	int ret;
  82
  83	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
  84	/* used for fixed read/write too - just read unconditionally */
  85	req->buf_index = READ_ONCE(sqe->buf_index);
  86
  87	ioprio = READ_ONCE(sqe->ioprio);
  88	if (ioprio) {
  89		ret = ioprio_check_cap(ioprio);
  90		if (ret)
  91			return ret;
  92
  93		rw->kiocb.ki_ioprio = ioprio;
  94	} else {
  95		rw->kiocb.ki_ioprio = get_current_ioprio();
  96	}
  97	rw->kiocb.dio_complete = NULL;
  98
  99	rw->addr = READ_ONCE(sqe->addr);
 100	rw->len = READ_ONCE(sqe->len);
 101	rw->flags = READ_ONCE(sqe->rw_flags);
 102	return 0;
 103}
 104
 105int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 106{
 107	int ret;
 108
 109	ret = io_prep_rw(req, sqe);
 110	if (unlikely(ret))
 111		return ret;
 112
 113	/*
 114	 * Have to do this validation here, as this is in io_read() rw->len
 115	 * might have chanaged due to buffer selection
 116	 */
 117	if (req->flags & REQ_F_BUFFER_SELECT)
 118		return io_iov_buffer_select_prep(req);
 119
 120	return 0;
 121}
 122
 123int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 124{
 125	struct io_ring_ctx *ctx = req->ctx;
 126	u16 index;
 127	int ret;
 128
 129	ret = io_prep_rw(req, sqe);
 130	if (unlikely(ret))
 131		return ret;
 132
 133	if (unlikely(req->buf_index >= ctx->nr_user_bufs))
 134		return -EFAULT;
 135	index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
 136	req->imu = ctx->user_bufs[index];
 137	io_req_set_rsrc_node(req, ctx, 0);
 138	return 0;
 139}
 140
 141/*
 142 * Multishot read is prepared just like a normal read/write request, only
 143 * difference is that we set the MULTISHOT flag.
 144 */
 145int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 146{
 147	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 148	int ret;
 149
 150	/* must be used with provided buffers */
 151	if (!(req->flags & REQ_F_BUFFER_SELECT))
 152		return -EINVAL;
 153
 154	ret = io_prep_rw(req, sqe);
 155	if (unlikely(ret))
 156		return ret;
 157
 158	if (rw->addr || rw->len)
 159		return -EINVAL;
 160
 161	req->flags |= REQ_F_APOLL_MULTISHOT;
 162	return 0;
 163}
 164
 165void io_readv_writev_cleanup(struct io_kiocb *req)
 166{
 167	struct io_async_rw *io = req->async_data;
 168
 169	kfree(io->free_iovec);
 170}
 171
 172static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 173{
 174	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 175
 176	if (rw->kiocb.ki_pos != -1)
 177		return &rw->kiocb.ki_pos;
 178
 179	if (!(req->file->f_mode & FMODE_STREAM)) {
 180		req->flags |= REQ_F_CUR_POS;
 181		rw->kiocb.ki_pos = req->file->f_pos;
 182		return &rw->kiocb.ki_pos;
 183	}
 184
 185	rw->kiocb.ki_pos = 0;
 186	return NULL;
 187}
 188
 189static void io_req_task_queue_reissue(struct io_kiocb *req)
 190{
 191	req->io_task_work.func = io_queue_iowq;
 192	io_req_task_work_add(req);
 193}
 194
 195#ifdef CONFIG_BLOCK
 196static bool io_resubmit_prep(struct io_kiocb *req)
 197{
 198	struct io_async_rw *io = req->async_data;
 199
 200	if (!req_has_async_data(req))
 201		return !io_req_prep_async(req);
 202	iov_iter_restore(&io->s.iter, &io->s.iter_state);
 203	return true;
 204}
 205
 206static bool io_rw_should_reissue(struct io_kiocb *req)
 207{
 208	umode_t mode = file_inode(req->file)->i_mode;
 209	struct io_ring_ctx *ctx = req->ctx;
 210
 211	if (!S_ISBLK(mode) && !S_ISREG(mode))
 212		return false;
 213	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
 214	    !(ctx->flags & IORING_SETUP_IOPOLL)))
 215		return false;
 216	/*
 217	 * If ref is dying, we might be running poll reap from the exit work.
 218	 * Don't attempt to reissue from that path, just let it fail with
 219	 * -EAGAIN.
 220	 */
 221	if (percpu_ref_is_dying(&ctx->refs))
 222		return false;
 223	/*
 224	 * Play it safe and assume not safe to re-import and reissue if we're
 225	 * not in the original thread group (or in task context).
 226	 */
 227	if (!same_thread_group(req->task, current) || !in_task())
 228		return false;
 229	return true;
 230}
 231#else
 232static bool io_resubmit_prep(struct io_kiocb *req)
 233{
 234	return false;
 235}
 236static bool io_rw_should_reissue(struct io_kiocb *req)
 237{
 238	return false;
 239}
 240#endif
 241
 242static void io_req_end_write(struct io_kiocb *req)
 243{
 244	if (req->flags & REQ_F_ISREG) {
 245		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 246
 247		kiocb_end_write(&rw->kiocb);
 248	}
 249}
 250
 251/*
 252 * Trigger the notifications after having done some IO, and finish the write
 253 * accounting, if any.
 254 */
 255static void io_req_io_end(struct io_kiocb *req)
 256{
 257	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 258
 259	if (rw->kiocb.ki_flags & IOCB_WRITE) {
 260		io_req_end_write(req);
 261		fsnotify_modify(req->file);
 262	} else {
 263		fsnotify_access(req->file);
 264	}
 265}
 266
 267static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 268{
 269	if (unlikely(res != req->cqe.res)) {
 270		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
 271		    io_rw_should_reissue(req)) {
 272			/*
 273			 * Reissue will start accounting again, finish the
 274			 * current cycle.
 275			 */
 276			io_req_io_end(req);
 277			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 278			return true;
 279		}
 280		req_set_fail(req);
 281		req->cqe.res = res;
 282	}
 283	return false;
 284}
 285
 286static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
 287{
 288	struct io_async_rw *io = req->async_data;
 289
 290	/* add previously done IO, if any */
 291	if (req_has_async_data(req) && io->bytes_done > 0) {
 292		if (res < 0)
 293			res = io->bytes_done;
 294		else
 295			res += io->bytes_done;
 296	}
 297	return res;
 298}
 299
 300void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
 301{
 302	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 303	struct kiocb *kiocb = &rw->kiocb;
 304
 305	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
 306		long res = kiocb->dio_complete(rw->kiocb.private);
 307
 308		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 309	}
 310
 311	io_req_io_end(req);
 312
 313	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
 314		unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
 315
 316		req->cqe.flags |= io_put_kbuf(req, issue_flags);
 317	}
 318	io_req_task_complete(req, ts);
 319}
 320
 321static void io_complete_rw(struct kiocb *kiocb, long res)
 322{
 323	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 324	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 325
 326	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
 327		if (__io_complete_rw_common(req, res))
 328			return;
 329		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 330	}
 331	req->io_task_work.func = io_req_rw_complete;
 332	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
 333}
 334
 335static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
 336{
 337	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 338	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 339
 340	if (kiocb->ki_flags & IOCB_WRITE)
 341		io_req_end_write(req);
 342	if (unlikely(res != req->cqe.res)) {
 343		if (res == -EAGAIN && io_rw_should_reissue(req)) {
 344			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 345			return;
 346		}
 347		req->cqe.res = res;
 348	}
 349
 350	/* order with io_iopoll_complete() checking ->iopoll_completed */
 351	smp_store_release(&req->iopoll_completed, 1);
 352}
 353
 354static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 355{
 356	/* IO was queued async, completion will happen later */
 357	if (ret == -EIOCBQUEUED)
 358		return;
 359
 360	/* transform internal restart error codes */
 361	if (unlikely(ret < 0)) {
 362		switch (ret) {
 363		case -ERESTARTSYS:
 364		case -ERESTARTNOINTR:
 365		case -ERESTARTNOHAND:
 366		case -ERESTART_RESTARTBLOCK:
 367			/*
 368			 * We can't just restart the syscall, since previously
 369			 * submitted sqes may already be in progress. Just fail
 370			 * this IO with EINTR.
 371			 */
 372			ret = -EINTR;
 373			break;
 374		}
 375	}
 376
 377	INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
 378			io_complete_rw, kiocb, ret);
 379}
 380
 381static int kiocb_done(struct io_kiocb *req, ssize_t ret,
 382		       unsigned int issue_flags)
 383{
 384	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 385	unsigned final_ret = io_fixup_rw_res(req, ret);
 386
 387	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
 388		req->file->f_pos = rw->kiocb.ki_pos;
 389	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
 390		if (!__io_complete_rw_common(req, ret)) {
 391			/*
 392			 * Safe to call io_end from here as we're inline
 393			 * from the submission path.
 394			 */
 395			io_req_io_end(req);
 396			io_req_set_res(req, final_ret,
 397				       io_put_kbuf(req, issue_flags));
 398			return IOU_OK;
 399		}
 400	} else {
 401		io_rw_done(&rw->kiocb, ret);
 402	}
 403
 404	if (req->flags & REQ_F_REISSUE) {
 405		req->flags &= ~REQ_F_REISSUE;
 406		if (io_resubmit_prep(req))
 407			io_req_task_queue_reissue(req);
 408		else
 409			io_req_task_queue_fail(req, final_ret);
 410	}
 411	return IOU_ISSUE_SKIP_COMPLETE;
 412}
 413
 414static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 415				       struct io_rw_state *s,
 416				       unsigned int issue_flags)
 417{
 418	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 419	struct iov_iter *iter = &s->iter;
 420	u8 opcode = req->opcode;
 421	struct iovec *iovec;
 422	void __user *buf;
 423	size_t sqe_len;
 424	ssize_t ret;
 425
 426	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
 427		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
 428		if (ret)
 429			return ERR_PTR(ret);
 430		return NULL;
 431	}
 432
 433	buf = u64_to_user_ptr(rw->addr);
 434	sqe_len = rw->len;
 435
 436	if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
 437		if (io_do_buffer_select(req)) {
 438			buf = io_buffer_select(req, &sqe_len, issue_flags);
 439			if (!buf)
 440				return ERR_PTR(-ENOBUFS);
 441			rw->addr = (unsigned long) buf;
 442			rw->len = sqe_len;
 443		}
 444
 445		ret = import_ubuf(ddir, buf, sqe_len, iter);
 446		if (ret)
 447			return ERR_PTR(ret);
 448		return NULL;
 449	}
 450
 451	iovec = s->fast_iov;
 452	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
 453			      req->ctx->compat);
 454	if (unlikely(ret < 0))
 455		return ERR_PTR(ret);
 456	return iovec;
 457}
 458
 459static inline int io_import_iovec(int rw, struct io_kiocb *req,
 460				  struct iovec **iovec, struct io_rw_state *s,
 461				  unsigned int issue_flags)
 462{
 463	*iovec = __io_import_iovec(rw, req, s, issue_flags);
 464	if (IS_ERR(*iovec))
 465		return PTR_ERR(*iovec);
 466
 467	iov_iter_save_state(&s->iter, &s->iter_state);
 468	return 0;
 469}
 470
 471static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
 472{
 473	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
 474}
 475
 476/*
 477 * For files that don't have ->read_iter() and ->write_iter(), handle them
 478 * by looping over ->read() or ->write() manually.
 479 */
 480static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
 481{
 482	struct kiocb *kiocb = &rw->kiocb;
 483	struct file *file = kiocb->ki_filp;
 484	ssize_t ret = 0;
 485	loff_t *ppos;
 486
 487	/*
 488	 * Don't support polled IO through this interface, and we can't
 489	 * support non-blocking either. For the latter, this just causes
 490	 * the kiocb to be handled from an async context.
 491	 */
 492	if (kiocb->ki_flags & IOCB_HIPRI)
 493		return -EOPNOTSUPP;
 494	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
 495	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
 496		return -EAGAIN;
 497
 498	ppos = io_kiocb_ppos(kiocb);
 499
 500	while (iov_iter_count(iter)) {
 501		void __user *addr;
 502		size_t len;
 503		ssize_t nr;
 504
 505		if (iter_is_ubuf(iter)) {
 506			addr = iter->ubuf + iter->iov_offset;
 507			len = iov_iter_count(iter);
 508		} else if (!iov_iter_is_bvec(iter)) {
 509			addr = iter_iov_addr(iter);
 510			len = iter_iov_len(iter);
 511		} else {
 512			addr = u64_to_user_ptr(rw->addr);
 513			len = rw->len;
 514		}
 515
 516		if (ddir == READ)
 517			nr = file->f_op->read(file, addr, len, ppos);
 518		else
 519			nr = file->f_op->write(file, addr, len, ppos);
 520
 521		if (nr < 0) {
 522			if (!ret)
 523				ret = nr;
 524			break;
 525		}
 526		ret += nr;
 527		if (!iov_iter_is_bvec(iter)) {
 528			iov_iter_advance(iter, nr);
 529		} else {
 530			rw->addr += nr;
 531			rw->len -= nr;
 532			if (!rw->len)
 533				break;
 534		}
 535		if (nr != len)
 536			break;
 537	}
 538
 539	return ret;
 540}
 541
 542static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
 543			  const struct iovec *fast_iov, struct iov_iter *iter)
 544{
 545	struct io_async_rw *io = req->async_data;
 546
 547	memcpy(&io->s.iter, iter, sizeof(*iter));
 548	io->free_iovec = iovec;
 549	io->bytes_done = 0;
 550	/* can only be fixed buffers, no need to do anything */
 551	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
 552		return;
 553	if (!iovec) {
 554		unsigned iov_off = 0;
 555
 556		io->s.iter.__iov = io->s.fast_iov;
 557		if (iter->__iov != fast_iov) {
 558			iov_off = iter_iov(iter) - fast_iov;
 559			io->s.iter.__iov += iov_off;
 560		}
 561		if (io->s.fast_iov != fast_iov)
 562			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
 563			       sizeof(struct iovec) * iter->nr_segs);
 564	} else {
 565		req->flags |= REQ_F_NEED_CLEANUP;
 566	}
 567}
 568
 569static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 570			     struct io_rw_state *s, bool force)
 571{
 572	if (!force && !io_cold_defs[req->opcode].prep_async)
 573		return 0;
 574	/* opcode type doesn't need async data */
 575	if (!io_cold_defs[req->opcode].async_size)
 576		return 0;
 577	if (!req_has_async_data(req)) {
 578		struct io_async_rw *iorw;
 579
 580		if (io_alloc_async_data(req)) {
 581			kfree(iovec);
 582			return -ENOMEM;
 583		}
 584
 585		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
 586		iorw = req->async_data;
 587		/* we've copied and mapped the iter, ensure state is saved */
 588		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
 589	}
 590	return 0;
 591}
 592
 593static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 594{
 595	struct io_async_rw *iorw = req->async_data;
 596	struct iovec *iov;
 597	int ret;
 598
 599	iorw->bytes_done = 0;
 600	iorw->free_iovec = NULL;
 601
 602	/* submission path, ->uring_lock should already be taken */
 603	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
 604	if (unlikely(ret < 0))
 605		return ret;
 606
 607	if (iov) {
 608		iorw->free_iovec = iov;
 609		req->flags |= REQ_F_NEED_CLEANUP;
 610	}
 611
 612	return 0;
 613}
 614
 615int io_readv_prep_async(struct io_kiocb *req)
 616{
 617	return io_rw_prep_async(req, ITER_DEST);
 618}
 619
 620int io_writev_prep_async(struct io_kiocb *req)
 621{
 622	return io_rw_prep_async(req, ITER_SOURCE);
 623}
 624
 625/*
 626 * This is our waitqueue callback handler, registered through __folio_lock_async()
 627 * when we initially tried to do the IO with the iocb armed our waitqueue.
 628 * This gets called when the page is unlocked, and we generally expect that to
 629 * happen when the page IO is completed and the page is now uptodate. This will
 630 * queue a task_work based retry of the operation, attempting to copy the data
 631 * again. If the latter fails because the page was NOT uptodate, then we will
 632 * do a thread based blocking retry of the operation. That's the unexpected
 633 * slow path.
 634 */
 635static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 636			     int sync, void *arg)
 637{
 638	struct wait_page_queue *wpq;
 639	struct io_kiocb *req = wait->private;
 640	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 641	struct wait_page_key *key = arg;
 642
 643	wpq = container_of(wait, struct wait_page_queue, wait);
 644
 645	if (!wake_page_match(wpq, key))
 646		return 0;
 647
 648	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
 649	list_del_init(&wait->entry);
 650	io_req_task_queue(req);
 651	return 1;
 652}
 653
 654/*
 655 * This controls whether a given IO request should be armed for async page
 656 * based retry. If we return false here, the request is handed to the async
 657 * worker threads for retry. If we're doing buffered reads on a regular file,
 658 * we prepare a private wait_page_queue entry and retry the operation. This
 659 * will either succeed because the page is now uptodate and unlocked, or it
 660 * will register a callback when the page is unlocked at IO completion. Through
 661 * that callback, io_uring uses task_work to setup a retry of the operation.
 662 * That retry will attempt the buffered read again. The retry will generally
 663 * succeed, or in rare cases where it fails, we then fall back to using the
 664 * async worker threads for a blocking retry.
 665 */
 666static bool io_rw_should_retry(struct io_kiocb *req)
 667{
 668	struct io_async_rw *io = req->async_data;
 669	struct wait_page_queue *wait = &io->wpq;
 670	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 671	struct kiocb *kiocb = &rw->kiocb;
 672
 673	/* never retry for NOWAIT, we just complete with -EAGAIN */
 674	if (req->flags & REQ_F_NOWAIT)
 675		return false;
 676
 677	/* Only for buffered IO */
 678	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
 679		return false;
 680
 681	/*
 682	 * just use poll if we can, and don't attempt if the fs doesn't
 683	 * support callback based unlocks
 684	 */
 685	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
 686		return false;
 687
 688	wait->wait.func = io_async_buf_func;
 689	wait->wait.private = req;
 690	wait->wait.flags = 0;
 691	INIT_LIST_HEAD(&wait->wait.entry);
 692	kiocb->ki_flags |= IOCB_WAITQ;
 693	kiocb->ki_flags &= ~IOCB_NOWAIT;
 694	kiocb->ki_waitq = wait;
 695	return true;
 696}
 697
 698static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
 699{
 700	struct file *file = rw->kiocb.ki_filp;
 701
 702	if (likely(file->f_op->read_iter))
 703		return call_read_iter(file, &rw->kiocb, iter);
 704	else if (file->f_op->read)
 705		return loop_rw_iter(READ, rw, iter);
 706	else
 707		return -EINVAL;
 708}
 709
 710static bool need_complete_io(struct io_kiocb *req)
 711{
 712	return req->flags & REQ_F_ISREG ||
 713		S_ISBLK(file_inode(req->file)->i_mode);
 714}
 715
 716static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
 717{
 718	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 719	struct kiocb *kiocb = &rw->kiocb;
 720	struct io_ring_ctx *ctx = req->ctx;
 721	struct file *file = req->file;
 722	int ret;
 723
 724	if (unlikely(!file || !(file->f_mode & mode)))
 725		return -EBADF;
 726
 727	if (!(req->flags & REQ_F_FIXED_FILE))
 728		req->flags |= io_file_get_flags(file);
 729
 730	kiocb->ki_flags = file->f_iocb_flags;
 731	ret = kiocb_set_rw_flags(kiocb, rw->flags);
 732	if (unlikely(ret))
 733		return ret;
 734	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
 735
 736	/*
 737	 * If the file is marked O_NONBLOCK, still allow retry for it if it
 738	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
 739	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
 740	 */
 741	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
 742	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
 743		req->flags |= REQ_F_NOWAIT;
 744
 745	if (ctx->flags & IORING_SETUP_IOPOLL) {
 746		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
 747			return -EOPNOTSUPP;
 748
 749		kiocb->private = NULL;
 750		kiocb->ki_flags |= IOCB_HIPRI;
 751		kiocb->ki_complete = io_complete_rw_iopoll;
 752		req->iopoll_completed = 0;
 753	} else {
 754		if (kiocb->ki_flags & IOCB_HIPRI)
 755			return -EINVAL;
 756		kiocb->ki_complete = io_complete_rw;
 757	}
 758
 759	return 0;
 760}
 761
 762static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 763{
 764	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 765	struct io_rw_state __s, *s = &__s;
 766	struct iovec *iovec;
 767	struct kiocb *kiocb = &rw->kiocb;
 768	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 769	struct io_async_rw *io;
 770	ssize_t ret, ret2;
 771	loff_t *ppos;
 772
 773	if (!req_has_async_data(req)) {
 774		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 775		if (unlikely(ret < 0))
 776			return ret;
 777	} else {
 778		io = req->async_data;
 779		s = &io->s;
 780
 781		/*
 782		 * Safe and required to re-import if we're using provided
 783		 * buffers, as we dropped the selected one before retry.
 784		 */
 785		if (io_do_buffer_select(req)) {
 786			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 787			if (unlikely(ret < 0))
 788				return ret;
 789		}
 790
 791		/*
 792		 * We come here from an earlier attempt, restore our state to
 793		 * match in case it doesn't. It's cheap enough that we don't
 794		 * need to make this conditional.
 795		 */
 796		iov_iter_restore(&s->iter, &s->iter_state);
 797		iovec = NULL;
 798	}
 799	ret = io_rw_init_file(req, FMODE_READ);
 800	if (unlikely(ret)) {
 801		kfree(iovec);
 802		return ret;
 803	}
 804	req->cqe.res = iov_iter_count(&s->iter);
 805
 806	if (force_nonblock) {
 807		/* If the file doesn't support async, just async punt */
 808		if (unlikely(!io_file_supports_nowait(req))) {
 809			ret = io_setup_async_rw(req, iovec, s, true);
 810			return ret ?: -EAGAIN;
 811		}
 812		kiocb->ki_flags |= IOCB_NOWAIT;
 813	} else {
 814		/* Ensure we clear previously set non-block flag */
 815		kiocb->ki_flags &= ~IOCB_NOWAIT;
 816	}
 817
 818	ppos = io_kiocb_update_pos(req);
 819
 820	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
 821	if (unlikely(ret)) {
 822		kfree(iovec);
 823		return ret;
 824	}
 825
 826	ret = io_iter_do_read(rw, &s->iter);
 827
 828	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
 829		req->flags &= ~REQ_F_REISSUE;
 830		/*
 831		 * If we can poll, just do that. For a vectored read, we'll
 832		 * need to copy state first.
 833		 */
 834		if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
 835			return -EAGAIN;
 836		/* IOPOLL retry should happen for io-wq threads */
 837		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
 838			goto done;
 839		/* no retry on NONBLOCK nor RWF_NOWAIT */
 840		if (req->flags & REQ_F_NOWAIT)
 841			goto done;
 842		ret = 0;
 843	} else if (ret == -EIOCBQUEUED) {
 844		if (iovec)
 845			kfree(iovec);
 846		return IOU_ISSUE_SKIP_COMPLETE;
 847	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
 848		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
 849		/* read all, failed, already did sync or don't want to retry */
 850		goto done;
 851	}
 852
 853	/*
 854	 * Don't depend on the iter state matching what was consumed, or being
 855	 * untouched in case of error. Restore it and we'll advance it
 856	 * manually if we need to.
 857	 */
 858	iov_iter_restore(&s->iter, &s->iter_state);
 859
 860	ret2 = io_setup_async_rw(req, iovec, s, true);
 861	iovec = NULL;
 862	if (ret2) {
 863		ret = ret > 0 ? ret : ret2;
 864		goto done;
 865	}
 866
 867	io = req->async_data;
 868	s = &io->s;
 869	/*
 870	 * Now use our persistent iterator and state, if we aren't already.
 871	 * We've restored and mapped the iter to match.
 872	 */
 873
 874	do {
 875		/*
 876		 * We end up here because of a partial read, either from
 877		 * above or inside this loop. Advance the iter by the bytes
 878		 * that were consumed.
 879		 */
 880		iov_iter_advance(&s->iter, ret);
 881		if (!iov_iter_count(&s->iter))
 882			break;
 883		io->bytes_done += ret;
 884		iov_iter_save_state(&s->iter, &s->iter_state);
 885
 886		/* if we can retry, do so with the callbacks armed */
 887		if (!io_rw_should_retry(req)) {
 888			kiocb->ki_flags &= ~IOCB_WAITQ;
 889			return -EAGAIN;
 890		}
 891
 892		req->cqe.res = iov_iter_count(&s->iter);
 893		/*
 894		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
 895		 * we get -EIOCBQUEUED, then we'll get a notification when the
 896		 * desired page gets unlocked. We can also get a partial read
 897		 * here, and if we do, then just retry at the new offset.
 898		 */
 899		ret = io_iter_do_read(rw, &s->iter);
 900		if (ret == -EIOCBQUEUED)
 901			return IOU_ISSUE_SKIP_COMPLETE;
 902		/* we got some bytes, but not all. retry. */
 903		kiocb->ki_flags &= ~IOCB_WAITQ;
 904		iov_iter_restore(&s->iter, &s->iter_state);
 905	} while (ret > 0);
 906done:
 907	/* it's faster to check here then delegate to kfree */
 908	if (iovec)
 909		kfree(iovec);
 910	return ret;
 911}
 912
 913int io_read(struct io_kiocb *req, unsigned int issue_flags)
 914{
 915	int ret;
 916
 917	ret = __io_read(req, issue_flags);
 918	if (ret >= 0)
 919		return kiocb_done(req, ret, issue_flags);
 920
 921	return ret;
 922}
 923
 924int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
 925{
 926	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 927	unsigned int cflags = 0;
 928	int ret;
 929
 930	/*
 931	 * Multishot MUST be used on a pollable file
 932	 */
 933	if (!file_can_poll(req->file))
 934		return -EBADFD;
 935
 936	ret = __io_read(req, issue_flags);
 937
 938	/*
 939	 * If we get -EAGAIN, recycle our buffer and just let normal poll
 940	 * handling arm it.
 941	 */
 942	if (ret == -EAGAIN) {
 943		/*
 944		 * Reset rw->len to 0 again to avoid clamping future mshot
 945		 * reads, in case the buffer size varies.
 946		 */
 947		if (io_kbuf_recycle(req, issue_flags))
 948			rw->len = 0;
 949		return -EAGAIN;
 950	}
 951
 952	/*
 953	 * Any successful return value will keep the multishot read armed.
 954	 */
 955	if (ret > 0) {
 956		/*
 957		 * Put our buffer and post a CQE. If we fail to post a CQE, then
 958		 * jump to the termination path. This request is then done.
 959		 */
 960		cflags = io_put_kbuf(req, issue_flags);
 961		rw->len = 0; /* similarly to above, reset len to 0 */
 962
 963		if (io_fill_cqe_req_aux(req,
 964					issue_flags & IO_URING_F_COMPLETE_DEFER,
 965					ret, cflags | IORING_CQE_F_MORE)) {
 966			if (issue_flags & IO_URING_F_MULTISHOT) {
 967				/*
 968				 * Force retry, as we might have more data to
 969				 * be read and otherwise it won't get retried
 970				 * until (if ever) another poll is triggered.
 971				 */
 972				io_poll_multishot_retry(req);
 973				return IOU_ISSUE_SKIP_COMPLETE;
 974			}
 975			return -EAGAIN;
 976		}
 977	}
 978
 979	/*
 980	 * Either an error, or we've hit overflow posting the CQE. For any
 981	 * multishot request, hitting overflow will terminate it.
 982	 */
 983	io_req_set_res(req, ret, cflags);
 984	if (issue_flags & IO_URING_F_MULTISHOT)
 985		return IOU_STOP_MULTISHOT;
 986	return IOU_OK;
 987}
 988
 989int io_write(struct io_kiocb *req, unsigned int issue_flags)
 990{
 991	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 992	struct io_rw_state __s, *s = &__s;
 993	struct iovec *iovec;
 994	struct kiocb *kiocb = &rw->kiocb;
 995	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 996	ssize_t ret, ret2;
 997	loff_t *ppos;
 998
 999	if (!req_has_async_data(req)) {
1000		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
1001		if (unlikely(ret < 0))
1002			return ret;
1003	} else {
1004		struct io_async_rw *io = req->async_data;
1005
1006		s = &io->s;
1007		iov_iter_restore(&s->iter, &s->iter_state);
1008		iovec = NULL;
1009	}
1010	ret = io_rw_init_file(req, FMODE_WRITE);
1011	if (unlikely(ret)) {
1012		kfree(iovec);
1013		return ret;
1014	}
1015	req->cqe.res = iov_iter_count(&s->iter);
1016
1017	if (force_nonblock) {
1018		/* If the file doesn't support async, just async punt */
1019		if (unlikely(!io_file_supports_nowait(req)))
1020			goto copy_iov;
1021
1022		/* File path supports NOWAIT for non-direct_IO only for block devices. */
1023		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1024			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
1025			(req->flags & REQ_F_ISREG))
1026			goto copy_iov;
1027
1028		kiocb->ki_flags |= IOCB_NOWAIT;
1029	} else {
1030		/* Ensure we clear previously set non-block flag */
1031		kiocb->ki_flags &= ~IOCB_NOWAIT;
1032	}
1033
1034	ppos = io_kiocb_update_pos(req);
1035
1036	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1037	if (unlikely(ret)) {
1038		kfree(iovec);
1039		return ret;
1040	}
1041
1042	if (req->flags & REQ_F_ISREG)
1043		kiocb_start_write(kiocb);
1044	kiocb->ki_flags |= IOCB_WRITE;
1045
1046	if (likely(req->file->f_op->write_iter))
1047		ret2 = call_write_iter(req->file, kiocb, &s->iter);
1048	else if (req->file->f_op->write)
1049		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
1050	else
1051		ret2 = -EINVAL;
1052
1053	if (req->flags & REQ_F_REISSUE) {
1054		req->flags &= ~REQ_F_REISSUE;
1055		ret2 = -EAGAIN;
1056	}
1057
1058	/*
1059	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1060	 * retry them without IOCB_NOWAIT.
1061	 */
1062	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1063		ret2 = -EAGAIN;
1064	/* no retry on NONBLOCK nor RWF_NOWAIT */
1065	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1066		goto done;
1067	if (!force_nonblock || ret2 != -EAGAIN) {
1068		/* IOPOLL retry should happen for io-wq threads */
1069		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1070			goto copy_iov;
1071
1072		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1073			struct io_async_rw *io;
1074
1075			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1076						req->cqe.res, ret2);
1077
1078			/* This is a partial write. The file pos has already been
1079			 * updated, setup the async struct to complete the request
1080			 * in the worker. Also update bytes_done to account for
1081			 * the bytes already written.
1082			 */
1083			iov_iter_save_state(&s->iter, &s->iter_state);
1084			ret = io_setup_async_rw(req, iovec, s, true);
1085
1086			io = req->async_data;
1087			if (io)
1088				io->bytes_done += ret2;
1089
1090			if (kiocb->ki_flags & IOCB_WRITE)
1091				io_req_end_write(req);
1092			return ret ? ret : -EAGAIN;
1093		}
1094done:
1095		ret = kiocb_done(req, ret2, issue_flags);
1096	} else {
1097copy_iov:
1098		iov_iter_restore(&s->iter, &s->iter_state);
1099		ret = io_setup_async_rw(req, iovec, s, false);
1100		if (!ret) {
1101			if (kiocb->ki_flags & IOCB_WRITE)
1102				io_req_end_write(req);
1103			return -EAGAIN;
1104		}
1105		return ret;
1106	}
1107	/* it's reportedly faster than delegating the null check to kfree() */
1108	if (iovec)
1109		kfree(iovec);
1110	return ret;
1111}
1112
1113void io_rw_fail(struct io_kiocb *req)
1114{
1115	int res;
1116
1117	res = io_fixup_rw_res(req, req->cqe.res);
1118	io_req_set_res(req, res, req->cqe.flags);
1119}
1120
1121int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1122{
1123	struct io_wq_work_node *pos, *start, *prev;
1124	unsigned int poll_flags = 0;
1125	DEFINE_IO_COMP_BATCH(iob);
1126	int nr_events = 0;
1127
1128	/*
1129	 * Only spin for completions if we don't have multiple devices hanging
1130	 * off our complete list.
1131	 */
1132	if (ctx->poll_multi_queue || force_nonspin)
1133		poll_flags |= BLK_POLL_ONESHOT;
1134
1135	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1136		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1137		struct file *file = req->file;
1138		int ret;
1139
1140		/*
1141		 * Move completed and retryable entries to our local lists.
1142		 * If we find a request that requires polling, break out
1143		 * and complete those lists first, if we have entries there.
1144		 */
1145		if (READ_ONCE(req->iopoll_completed))
1146			break;
1147
1148		if (req->opcode == IORING_OP_URING_CMD) {
1149			struct io_uring_cmd *ioucmd;
1150
1151			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1152			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1153								poll_flags);
1154		} else {
1155			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1156
1157			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1158		}
1159		if (unlikely(ret < 0))
1160			return ret;
1161		else if (ret)
1162			poll_flags |= BLK_POLL_ONESHOT;
1163
1164		/* iopoll may have completed current req */
1165		if (!rq_list_empty(iob.req_list) ||
1166		    READ_ONCE(req->iopoll_completed))
1167			break;
1168	}
1169
1170	if (!rq_list_empty(iob.req_list))
1171		iob.complete(&iob);
1172	else if (!pos)
1173		return 0;
1174
1175	prev = start;
1176	wq_list_for_each_resume(pos, prev) {
1177		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1178
1179		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1180		if (!smp_load_acquire(&req->iopoll_completed))
1181			break;
1182		nr_events++;
1183		req->cqe.flags = io_put_kbuf(req, 0);
1184	}
1185	if (unlikely(!nr_events))
1186		return 0;
1187
1188	pos = start ? start->next : ctx->iopoll_list.first;
1189	wq_list_cut(&ctx->iopoll_list, prev, start);
1190
1191	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1192		return 0;
1193	ctx->submit_state.compl_reqs.first = pos;
1194	__io_submit_flush_completions(ctx);
1195	return nr_events;
1196}