Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/fs.h>
   5#include <linux/file.h>
   6#include <linux/blk-mq.h>
   7#include <linux/mm.h>
   8#include <linux/slab.h>
   9#include <linux/fsnotify.h>
  10#include <linux/poll.h>
  11#include <linux/nospec.h>
  12#include <linux/compat.h>
  13#include <linux/io_uring.h>
  14
  15#include <uapi/linux/io_uring.h>
  16
  17#include "io_uring.h"
  18#include "opdef.h"
  19#include "kbuf.h"
  20#include "rsrc.h"
 
  21#include "rw.h"
  22
  23struct io_rw {
  24	/* NOTE: kiocb has the file as the first member, so don't do it here */
  25	struct kiocb			kiocb;
  26	u64				addr;
  27	u32				len;
  28	rwf_t				flags;
  29};
  30
  31static inline bool io_file_supports_nowait(struct io_kiocb *req)
  32{
  33	return req->flags & REQ_F_SUPPORT_NOWAIT;
  34}
  35
  36#ifdef CONFIG_COMPAT
  37static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
  38{
  39	struct compat_iovec __user *uiov;
  40	compat_ssize_t clen;
  41
  42	uiov = u64_to_user_ptr(rw->addr);
  43	if (!access_ok(uiov, sizeof(*uiov)))
  44		return -EFAULT;
  45	if (__get_user(clen, &uiov->iov_len))
  46		return -EFAULT;
  47	if (clen < 0)
  48		return -EINVAL;
  49
  50	rw->len = clen;
  51	return 0;
  52}
  53#endif
  54
  55static int io_iov_buffer_select_prep(struct io_kiocb *req)
  56{
  57	struct iovec __user *uiov;
  58	struct iovec iov;
  59	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  60
  61	if (rw->len != 1)
  62		return -EINVAL;
  63
  64#ifdef CONFIG_COMPAT
  65	if (req->ctx->compat)
  66		return io_iov_compat_buffer_select_prep(rw);
  67#endif
  68
  69	uiov = u64_to_user_ptr(rw->addr);
  70	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
  71		return -EFAULT;
  72	rw->len = iov.iov_len;
  73	return 0;
  74}
  75
  76int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  77{
  78	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  79	unsigned ioprio;
  80	int ret;
  81
  82	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
  83	/* used for fixed read/write too - just read unconditionally */
  84	req->buf_index = READ_ONCE(sqe->buf_index);
  85
  86	if (req->opcode == IORING_OP_READ_FIXED ||
  87	    req->opcode == IORING_OP_WRITE_FIXED) {
  88		struct io_ring_ctx *ctx = req->ctx;
  89		u16 index;
  90
  91		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
  92			return -EFAULT;
  93		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
  94		req->imu = ctx->user_bufs[index];
  95		io_req_set_rsrc_node(req, ctx, 0);
  96	}
  97
  98	ioprio = READ_ONCE(sqe->ioprio);
  99	if (ioprio) {
 100		ret = ioprio_check_cap(ioprio);
 101		if (ret)
 102			return ret;
 103
 104		rw->kiocb.ki_ioprio = ioprio;
 105	} else {
 106		rw->kiocb.ki_ioprio = get_current_ioprio();
 107	}
 
 108
 109	rw->addr = READ_ONCE(sqe->addr);
 110	rw->len = READ_ONCE(sqe->len);
 111	rw->flags = READ_ONCE(sqe->rw_flags);
 
 
 112
 113	/* Have to do this validation here, as this is in io_read() rw->len might
 114	 * have chanaged due to buffer selection
 
 
 
 
 
 
 
 
 
 115	 */
 116	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
 117		ret = io_iov_buffer_select_prep(req);
 118		if (ret)
 119			return ret;
 120	}
 121
 122	return 0;
 123}
 124
 125void io_readv_writev_cleanup(struct io_kiocb *req)
 126{
 127	struct io_async_rw *io = req->async_data;
 
 
 128
 129	kfree(io->free_iovec);
 
 
 
 
 
 
 
 
 
 130}
 131
 132static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 
 
 
 
 133{
 134	switch (ret) {
 135	case -EIOCBQUEUED:
 136		break;
 137	case -ERESTARTSYS:
 138	case -ERESTARTNOINTR:
 139	case -ERESTARTNOHAND:
 140	case -ERESTART_RESTARTBLOCK:
 141		/*
 142		 * We can't just restart the syscall, since previously
 143		 * submitted sqes may already be in progress. Just fail this
 144		 * IO with EINTR.
 145		 */
 146		ret = -EINTR;
 147		fallthrough;
 148	default:
 149		kiocb->ki_complete(kiocb, ret);
 150	}
 
 
 
 
 
 
 151}
 152
 153static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 154{
 155	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 156
 157	if (rw->kiocb.ki_pos != -1)
 158		return &rw->kiocb.ki_pos;
 159
 160	if (!(req->file->f_mode & FMODE_STREAM)) {
 161		req->flags |= REQ_F_CUR_POS;
 162		rw->kiocb.ki_pos = req->file->f_pos;
 163		return &rw->kiocb.ki_pos;
 164	}
 165
 166	rw->kiocb.ki_pos = 0;
 167	return NULL;
 168}
 169
 170static void io_req_task_queue_reissue(struct io_kiocb *req)
 171{
 172	req->io_task_work.func = io_queue_iowq;
 173	io_req_task_work_add(req);
 174}
 175
 176#ifdef CONFIG_BLOCK
 177static bool io_resubmit_prep(struct io_kiocb *req)
 178{
 179	struct io_async_rw *io = req->async_data;
 180
 181	if (!req_has_async_data(req))
 182		return !io_req_prep_async(req);
 183	iov_iter_restore(&io->s.iter, &io->s.iter_state);
 184	return true;
 185}
 186
 187static bool io_rw_should_reissue(struct io_kiocb *req)
 188{
 189	umode_t mode = file_inode(req->file)->i_mode;
 190	struct io_ring_ctx *ctx = req->ctx;
 191
 192	if (!S_ISBLK(mode) && !S_ISREG(mode))
 193		return false;
 194	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
 195	    !(ctx->flags & IORING_SETUP_IOPOLL)))
 196		return false;
 197	/*
 198	 * If ref is dying, we might be running poll reap from the exit work.
 199	 * Don't attempt to reissue from that path, just let it fail with
 200	 * -EAGAIN.
 201	 */
 202	if (percpu_ref_is_dying(&ctx->refs))
 203		return false;
 204	/*
 205	 * Play it safe and assume not safe to re-import and reissue if we're
 206	 * not in the original thread group (or in task context).
 207	 */
 208	if (!same_thread_group(req->task, current) || !in_task())
 209		return false;
 210	return true;
 211}
 212#else
 213static bool io_resubmit_prep(struct io_kiocb *req)
 214{
 215	return false;
 216}
 217static bool io_rw_should_reissue(struct io_kiocb *req)
 218{
 219	return false;
 220}
 221#endif
 222
 223static void kiocb_end_write(struct io_kiocb *req)
 224{
 225	/*
 226	 * Tell lockdep we inherited freeze protection from submission
 227	 * thread.
 228	 */
 229	if (req->flags & REQ_F_ISREG) {
 230		struct super_block *sb = file_inode(req->file)->i_sb;
 231
 232		__sb_writers_acquired(sb, SB_FREEZE_WRITE);
 233		sb_end_write(sb);
 234	}
 235}
 236
 237/*
 238 * Trigger the notifications after having done some IO, and finish the write
 239 * accounting, if any.
 240 */
 241static void io_req_io_end(struct io_kiocb *req)
 242{
 243	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 244
 245	if (rw->kiocb.ki_flags & IOCB_WRITE) {
 246		kiocb_end_write(req);
 247		fsnotify_modify(req->file);
 248	} else {
 249		fsnotify_access(req->file);
 250	}
 251}
 252
 253static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 254{
 255	if (unlikely(res != req->cqe.res)) {
 256		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
 257		    io_rw_should_reissue(req)) {
 258			/*
 259			 * Reissue will start accounting again, finish the
 260			 * current cycle.
 261			 */
 262			io_req_io_end(req);
 263			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 264			return true;
 265		}
 266		req_set_fail(req);
 267		req->cqe.res = res;
 268	}
 269	return false;
 270}
 271
 272static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
 273{
 274	struct io_async_rw *io = req->async_data;
 275
 276	/* add previously done IO, if any */
 277	if (req_has_async_data(req) && io->bytes_done > 0) {
 278		if (res < 0)
 279			res = io->bytes_done;
 280		else
 281			res += io->bytes_done;
 282	}
 283	return res;
 284}
 285
 286static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
 287{
 
 
 
 
 
 
 
 
 
 288	io_req_io_end(req);
 289
 290	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
 291		unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
 292
 293		req->cqe.flags |= io_put_kbuf(req, issue_flags);
 294	}
 295	io_req_task_complete(req, locked);
 296}
 297
 298static void io_complete_rw(struct kiocb *kiocb, long res)
 299{
 300	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 301	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 302
 303	if (__io_complete_rw_common(req, res))
 304		return;
 305	io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 
 
 306	req->io_task_work.func = io_req_rw_complete;
 307	io_req_task_work_add(req);
 308}
 309
 310static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
 311{
 312	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 313	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 314
 315	if (kiocb->ki_flags & IOCB_WRITE)
 316		kiocb_end_write(req);
 317	if (unlikely(res != req->cqe.res)) {
 318		if (res == -EAGAIN && io_rw_should_reissue(req)) {
 319			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 320			return;
 321		}
 322		req->cqe.res = res;
 323	}
 324
 325	/* order with io_iopoll_complete() checking ->iopoll_completed */
 326	smp_store_release(&req->iopoll_completed, 1);
 327}
 328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329static int kiocb_done(struct io_kiocb *req, ssize_t ret,
 330		       unsigned int issue_flags)
 331{
 332	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 333	unsigned final_ret = io_fixup_rw_res(req, ret);
 334
 335	if (req->flags & REQ_F_CUR_POS)
 336		req->file->f_pos = rw->kiocb.ki_pos;
 337	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
 338		if (!__io_complete_rw_common(req, ret)) {
 339			/*
 340			 * Safe to call io_end from here as we're inline
 341			 * from the submission path.
 342			 */
 343			io_req_io_end(req);
 344			io_req_set_res(req, final_ret,
 345				       io_put_kbuf(req, issue_flags));
 346			return IOU_OK;
 347		}
 348	} else {
 349		io_rw_done(&rw->kiocb, ret);
 350	}
 351
 352	if (req->flags & REQ_F_REISSUE) {
 353		req->flags &= ~REQ_F_REISSUE;
 354		if (io_resubmit_prep(req))
 355			io_req_task_queue_reissue(req);
 356		else
 357			io_req_task_queue_fail(req, final_ret);
 358	}
 359	return IOU_ISSUE_SKIP_COMPLETE;
 360}
 361
 362static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 363				       struct io_rw_state *s,
 364				       unsigned int issue_flags)
 365{
 366	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 367	struct iov_iter *iter = &s->iter;
 368	u8 opcode = req->opcode;
 369	struct iovec *iovec;
 370	void __user *buf;
 371	size_t sqe_len;
 372	ssize_t ret;
 373
 374	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
 375		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
 376		if (ret)
 377			return ERR_PTR(ret);
 378		return NULL;
 379	}
 380
 381	buf = u64_to_user_ptr(rw->addr);
 382	sqe_len = rw->len;
 383
 384	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
 385	    (req->flags & REQ_F_BUFFER_SELECT)) {
 386		if (io_do_buffer_select(req)) {
 387			buf = io_buffer_select(req, &sqe_len, issue_flags);
 388			if (!buf)
 389				return ERR_PTR(-ENOBUFS);
 390			rw->addr = (unsigned long) buf;
 391			rw->len = sqe_len;
 392		}
 393
 394		ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
 395		if (ret)
 396			return ERR_PTR(ret);
 397		return NULL;
 398	}
 399
 400	iovec = s->fast_iov;
 401	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
 402			      req->ctx->compat);
 403	if (unlikely(ret < 0))
 404		return ERR_PTR(ret);
 405	return iovec;
 406}
 407
 408static inline int io_import_iovec(int rw, struct io_kiocb *req,
 409				  struct iovec **iovec, struct io_rw_state *s,
 410				  unsigned int issue_flags)
 411{
 412	*iovec = __io_import_iovec(rw, req, s, issue_flags);
 413	if (unlikely(IS_ERR(*iovec)))
 414		return PTR_ERR(*iovec);
 415
 416	iov_iter_save_state(&s->iter, &s->iter_state);
 417	return 0;
 418}
 419
 420static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
 421{
 422	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
 423}
 424
 425/*
 426 * For files that don't have ->read_iter() and ->write_iter(), handle them
 427 * by looping over ->read() or ->write() manually.
 428 */
 429static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
 430{
 431	struct kiocb *kiocb = &rw->kiocb;
 432	struct file *file = kiocb->ki_filp;
 433	ssize_t ret = 0;
 434	loff_t *ppos;
 435
 436	/*
 437	 * Don't support polled IO through this interface, and we can't
 438	 * support non-blocking either. For the latter, this just causes
 439	 * the kiocb to be handled from an async context.
 440	 */
 441	if (kiocb->ki_flags & IOCB_HIPRI)
 442		return -EOPNOTSUPP;
 443	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
 444	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
 445		return -EAGAIN;
 446
 447	ppos = io_kiocb_ppos(kiocb);
 448
 449	while (iov_iter_count(iter)) {
 450		struct iovec iovec;
 
 451		ssize_t nr;
 452
 453		if (!iov_iter_is_bvec(iter)) {
 454			iovec = iov_iter_iovec(iter);
 
 
 
 
 455		} else {
 456			iovec.iov_base = u64_to_user_ptr(rw->addr);
 457			iovec.iov_len = rw->len;
 458		}
 459
 460		if (ddir == READ) {
 461			nr = file->f_op->read(file, iovec.iov_base,
 462					      iovec.iov_len, ppos);
 463		} else {
 464			nr = file->f_op->write(file, iovec.iov_base,
 465					       iovec.iov_len, ppos);
 466		}
 467
 468		if (nr < 0) {
 469			if (!ret)
 470				ret = nr;
 471			break;
 472		}
 473		ret += nr;
 474		if (!iov_iter_is_bvec(iter)) {
 475			iov_iter_advance(iter, nr);
 476		} else {
 477			rw->addr += nr;
 478			rw->len -= nr;
 479			if (!rw->len)
 480				break;
 481		}
 482		if (nr != iovec.iov_len)
 483			break;
 484	}
 485
 486	return ret;
 487}
 488
 489static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
 490			  const struct iovec *fast_iov, struct iov_iter *iter)
 491{
 492	struct io_async_rw *io = req->async_data;
 493
 494	memcpy(&io->s.iter, iter, sizeof(*iter));
 495	io->free_iovec = iovec;
 496	io->bytes_done = 0;
 497	/* can only be fixed buffers, no need to do anything */
 498	if (iov_iter_is_bvec(iter))
 499		return;
 500	if (!iovec) {
 501		unsigned iov_off = 0;
 502
 503		io->s.iter.iov = io->s.fast_iov;
 504		if (iter->iov != fast_iov) {
 505			iov_off = iter->iov - fast_iov;
 506			io->s.iter.iov += iov_off;
 507		}
 508		if (io->s.fast_iov != fast_iov)
 509			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
 510			       sizeof(struct iovec) * iter->nr_segs);
 511	} else {
 512		req->flags |= REQ_F_NEED_CLEANUP;
 513	}
 514}
 515
 516static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 517			     struct io_rw_state *s, bool force)
 518{
 519	if (!force && !io_op_defs[req->opcode].prep_async)
 
 
 
 520		return 0;
 521	if (!req_has_async_data(req)) {
 522		struct io_async_rw *iorw;
 523
 524		if (io_alloc_async_data(req)) {
 525			kfree(iovec);
 526			return -ENOMEM;
 527		}
 528
 529		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
 530		iorw = req->async_data;
 531		/* we've copied and mapped the iter, ensure state is saved */
 532		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
 533	}
 534	return 0;
 535}
 536
 537static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 538{
 539	struct io_async_rw *iorw = req->async_data;
 540	struct iovec *iov;
 541	int ret;
 542
 
 
 
 543	/* submission path, ->uring_lock should already be taken */
 544	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
 545	if (unlikely(ret < 0))
 546		return ret;
 547
 548	iorw->bytes_done = 0;
 549	iorw->free_iovec = iov;
 550	if (iov)
 551		req->flags |= REQ_F_NEED_CLEANUP;
 
 
 552	return 0;
 553}
 554
 555int io_readv_prep_async(struct io_kiocb *req)
 556{
 557	return io_rw_prep_async(req, ITER_DEST);
 558}
 559
 560int io_writev_prep_async(struct io_kiocb *req)
 561{
 562	return io_rw_prep_async(req, ITER_SOURCE);
 563}
 564
 565/*
 566 * This is our waitqueue callback handler, registered through __folio_lock_async()
 567 * when we initially tried to do the IO with the iocb armed our waitqueue.
 568 * This gets called when the page is unlocked, and we generally expect that to
 569 * happen when the page IO is completed and the page is now uptodate. This will
 570 * queue a task_work based retry of the operation, attempting to copy the data
 571 * again. If the latter fails because the page was NOT uptodate, then we will
 572 * do a thread based blocking retry of the operation. That's the unexpected
 573 * slow path.
 574 */
 575static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 576			     int sync, void *arg)
 577{
 578	struct wait_page_queue *wpq;
 579	struct io_kiocb *req = wait->private;
 580	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 581	struct wait_page_key *key = arg;
 582
 583	wpq = container_of(wait, struct wait_page_queue, wait);
 584
 585	if (!wake_page_match(wpq, key))
 586		return 0;
 587
 588	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
 589	list_del_init(&wait->entry);
 590	io_req_task_queue(req);
 591	return 1;
 592}
 593
 594/*
 595 * This controls whether a given IO request should be armed for async page
 596 * based retry. If we return false here, the request is handed to the async
 597 * worker threads for retry. If we're doing buffered reads on a regular file,
 598 * we prepare a private wait_page_queue entry and retry the operation. This
 599 * will either succeed because the page is now uptodate and unlocked, or it
 600 * will register a callback when the page is unlocked at IO completion. Through
 601 * that callback, io_uring uses task_work to setup a retry of the operation.
 602 * That retry will attempt the buffered read again. The retry will generally
 603 * succeed, or in rare cases where it fails, we then fall back to using the
 604 * async worker threads for a blocking retry.
 605 */
 606static bool io_rw_should_retry(struct io_kiocb *req)
 607{
 608	struct io_async_rw *io = req->async_data;
 609	struct wait_page_queue *wait = &io->wpq;
 610	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 611	struct kiocb *kiocb = &rw->kiocb;
 612
 613	/* never retry for NOWAIT, we just complete with -EAGAIN */
 614	if (req->flags & REQ_F_NOWAIT)
 615		return false;
 616
 617	/* Only for buffered IO */
 618	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
 619		return false;
 620
 621	/*
 622	 * just use poll if we can, and don't attempt if the fs doesn't
 623	 * support callback based unlocks
 624	 */
 625	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
 626		return false;
 627
 628	wait->wait.func = io_async_buf_func;
 629	wait->wait.private = req;
 630	wait->wait.flags = 0;
 631	INIT_LIST_HEAD(&wait->wait.entry);
 632	kiocb->ki_flags |= IOCB_WAITQ;
 633	kiocb->ki_flags &= ~IOCB_NOWAIT;
 634	kiocb->ki_waitq = wait;
 635	return true;
 636}
 637
 638static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
 639{
 640	struct file *file = rw->kiocb.ki_filp;
 641
 642	if (likely(file->f_op->read_iter))
 643		return call_read_iter(file, &rw->kiocb, iter);
 644	else if (file->f_op->read)
 645		return loop_rw_iter(READ, rw, iter);
 646	else
 647		return -EINVAL;
 648}
 649
 650static bool need_complete_io(struct io_kiocb *req)
 651{
 652	return req->flags & REQ_F_ISREG ||
 653		S_ISBLK(file_inode(req->file)->i_mode);
 654}
 655
 656static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
 657{
 658	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 659	struct kiocb *kiocb = &rw->kiocb;
 660	struct io_ring_ctx *ctx = req->ctx;
 661	struct file *file = req->file;
 662	int ret;
 663
 664	if (unlikely(!file || !(file->f_mode & mode)))
 665		return -EBADF;
 666
 667	if (!io_req_ffs_set(req))
 668		req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
 669
 670	kiocb->ki_flags = file->f_iocb_flags;
 671	ret = kiocb_set_rw_flags(kiocb, rw->flags);
 672	if (unlikely(ret))
 673		return ret;
 674	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
 675
 676	/*
 677	 * If the file is marked O_NONBLOCK, still allow retry for it if it
 678	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
 679	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
 680	 */
 681	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
 682	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
 683		req->flags |= REQ_F_NOWAIT;
 684
 685	if (ctx->flags & IORING_SETUP_IOPOLL) {
 686		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
 687			return -EOPNOTSUPP;
 688
 689		kiocb->private = NULL;
 690		kiocb->ki_flags |= IOCB_HIPRI;
 691		kiocb->ki_complete = io_complete_rw_iopoll;
 692		req->iopoll_completed = 0;
 693	} else {
 694		if (kiocb->ki_flags & IOCB_HIPRI)
 695			return -EINVAL;
 696		kiocb->ki_complete = io_complete_rw;
 697	}
 698
 699	return 0;
 700}
 701
 702int io_read(struct io_kiocb *req, unsigned int issue_flags)
 703{
 704	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 705	struct io_rw_state __s, *s = &__s;
 706	struct iovec *iovec;
 707	struct kiocb *kiocb = &rw->kiocb;
 708	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 709	struct io_async_rw *io;
 710	ssize_t ret, ret2;
 711	loff_t *ppos;
 712
 713	if (!req_has_async_data(req)) {
 714		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 715		if (unlikely(ret < 0))
 716			return ret;
 717	} else {
 718		io = req->async_data;
 719		s = &io->s;
 720
 721		/*
 722		 * Safe and required to re-import if we're using provided
 723		 * buffers, as we dropped the selected one before retry.
 724		 */
 725		if (io_do_buffer_select(req)) {
 726			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 727			if (unlikely(ret < 0))
 728				return ret;
 729		}
 730
 731		/*
 732		 * We come here from an earlier attempt, restore our state to
 733		 * match in case it doesn't. It's cheap enough that we don't
 734		 * need to make this conditional.
 735		 */
 736		iov_iter_restore(&s->iter, &s->iter_state);
 737		iovec = NULL;
 738	}
 739	ret = io_rw_init_file(req, FMODE_READ);
 740	if (unlikely(ret)) {
 741		kfree(iovec);
 742		return ret;
 743	}
 744	req->cqe.res = iov_iter_count(&s->iter);
 745
 746	if (force_nonblock) {
 747		/* If the file doesn't support async, just async punt */
 748		if (unlikely(!io_file_supports_nowait(req))) {
 749			ret = io_setup_async_rw(req, iovec, s, true);
 750			return ret ?: -EAGAIN;
 751		}
 752		kiocb->ki_flags |= IOCB_NOWAIT;
 753	} else {
 754		/* Ensure we clear previously set non-block flag */
 755		kiocb->ki_flags &= ~IOCB_NOWAIT;
 756	}
 757
 758	ppos = io_kiocb_update_pos(req);
 759
 760	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
 761	if (unlikely(ret)) {
 762		kfree(iovec);
 763		return ret;
 764	}
 765
 766	ret = io_iter_do_read(rw, &s->iter);
 767
 768	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
 769		req->flags &= ~REQ_F_REISSUE;
 770		/* if we can poll, just do that */
 771		if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
 
 
 
 772			return -EAGAIN;
 773		/* IOPOLL retry should happen for io-wq threads */
 774		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
 775			goto done;
 776		/* no retry on NONBLOCK nor RWF_NOWAIT */
 777		if (req->flags & REQ_F_NOWAIT)
 778			goto done;
 779		ret = 0;
 780	} else if (ret == -EIOCBQUEUED) {
 781		if (iovec)
 782			kfree(iovec);
 783		return IOU_ISSUE_SKIP_COMPLETE;
 784	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
 785		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
 786		/* read all, failed, already did sync or don't want to retry */
 787		goto done;
 788	}
 789
 790	/*
 791	 * Don't depend on the iter state matching what was consumed, or being
 792	 * untouched in case of error. Restore it and we'll advance it
 793	 * manually if we need to.
 794	 */
 795	iov_iter_restore(&s->iter, &s->iter_state);
 796
 797	ret2 = io_setup_async_rw(req, iovec, s, true);
 798	iovec = NULL;
 799	if (ret2) {
 800		ret = ret > 0 ? ret : ret2;
 801		goto done;
 802	}
 803
 804	io = req->async_data;
 805	s = &io->s;
 806	/*
 807	 * Now use our persistent iterator and state, if we aren't already.
 808	 * We've restored and mapped the iter to match.
 809	 */
 810
 811	do {
 812		/*
 813		 * We end up here because of a partial read, either from
 814		 * above or inside this loop. Advance the iter by the bytes
 815		 * that were consumed.
 816		 */
 817		iov_iter_advance(&s->iter, ret);
 818		if (!iov_iter_count(&s->iter))
 819			break;
 820		io->bytes_done += ret;
 821		iov_iter_save_state(&s->iter, &s->iter_state);
 822
 823		/* if we can retry, do so with the callbacks armed */
 824		if (!io_rw_should_retry(req)) {
 825			kiocb->ki_flags &= ~IOCB_WAITQ;
 826			return -EAGAIN;
 827		}
 828
 829		req->cqe.res = iov_iter_count(&s->iter);
 830		/*
 831		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
 832		 * we get -EIOCBQUEUED, then we'll get a notification when the
 833		 * desired page gets unlocked. We can also get a partial read
 834		 * here, and if we do, then just retry at the new offset.
 835		 */
 836		ret = io_iter_do_read(rw, &s->iter);
 837		if (ret == -EIOCBQUEUED)
 838			return IOU_ISSUE_SKIP_COMPLETE;
 839		/* we got some bytes, but not all. retry. */
 840		kiocb->ki_flags &= ~IOCB_WAITQ;
 841		iov_iter_restore(&s->iter, &s->iter_state);
 842	} while (ret > 0);
 843done:
 844	/* it's faster to check here then delegate to kfree */
 845	if (iovec)
 846		kfree(iovec);
 847	return kiocb_done(req, ret, issue_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 848}
 849
 850int io_write(struct io_kiocb *req, unsigned int issue_flags)
 851{
 852	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 853	struct io_rw_state __s, *s = &__s;
 854	struct iovec *iovec;
 855	struct kiocb *kiocb = &rw->kiocb;
 856	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 857	ssize_t ret, ret2;
 858	loff_t *ppos;
 859
 860	if (!req_has_async_data(req)) {
 861		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
 862		if (unlikely(ret < 0))
 863			return ret;
 864	} else {
 865		struct io_async_rw *io = req->async_data;
 866
 867		s = &io->s;
 868		iov_iter_restore(&s->iter, &s->iter_state);
 869		iovec = NULL;
 870	}
 871	ret = io_rw_init_file(req, FMODE_WRITE);
 872	if (unlikely(ret)) {
 873		kfree(iovec);
 874		return ret;
 875	}
 876	req->cqe.res = iov_iter_count(&s->iter);
 877
 878	if (force_nonblock) {
 879		/* If the file doesn't support async, just async punt */
 880		if (unlikely(!io_file_supports_nowait(req)))
 881			goto copy_iov;
 882
 883		/* File path supports NOWAIT for non-direct_IO only for block devices. */
 884		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
 885			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
 886			(req->flags & REQ_F_ISREG))
 887			goto copy_iov;
 888
 889		kiocb->ki_flags |= IOCB_NOWAIT;
 890	} else {
 891		/* Ensure we clear previously set non-block flag */
 892		kiocb->ki_flags &= ~IOCB_NOWAIT;
 893	}
 894
 895	ppos = io_kiocb_update_pos(req);
 896
 897	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
 898	if (unlikely(ret)) {
 899		kfree(iovec);
 900		return ret;
 901	}
 902
 903	/*
 904	 * Open-code file_start_write here to grab freeze protection,
 905	 * which will be released by another thread in
 906	 * io_complete_rw().  Fool lockdep by telling it the lock got
 907	 * released so that it doesn't complain about the held lock when
 908	 * we return to userspace.
 909	 */
 910	if (req->flags & REQ_F_ISREG) {
 911		sb_start_write(file_inode(req->file)->i_sb);
 912		__sb_writers_release(file_inode(req->file)->i_sb,
 913					SB_FREEZE_WRITE);
 914	}
 915	kiocb->ki_flags |= IOCB_WRITE;
 916
 917	if (likely(req->file->f_op->write_iter))
 918		ret2 = call_write_iter(req->file, kiocb, &s->iter);
 919	else if (req->file->f_op->write)
 920		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
 921	else
 922		ret2 = -EINVAL;
 923
 924	if (req->flags & REQ_F_REISSUE) {
 925		req->flags &= ~REQ_F_REISSUE;
 926		ret2 = -EAGAIN;
 927	}
 928
 929	/*
 930	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
 931	 * retry them without IOCB_NOWAIT.
 932	 */
 933	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
 934		ret2 = -EAGAIN;
 935	/* no retry on NONBLOCK nor RWF_NOWAIT */
 936	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
 937		goto done;
 938	if (!force_nonblock || ret2 != -EAGAIN) {
 939		/* IOPOLL retry should happen for io-wq threads */
 940		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
 941			goto copy_iov;
 942
 943		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
 944			struct io_async_rw *io;
 945
 946			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
 947						req->cqe.res, ret2);
 948
 949			/* This is a partial write. The file pos has already been
 950			 * updated, setup the async struct to complete the request
 951			 * in the worker. Also update bytes_done to account for
 952			 * the bytes already written.
 953			 */
 954			iov_iter_save_state(&s->iter, &s->iter_state);
 955			ret = io_setup_async_rw(req, iovec, s, true);
 956
 957			io = req->async_data;
 958			if (io)
 959				io->bytes_done += ret2;
 960
 961			if (kiocb->ki_flags & IOCB_WRITE)
 962				kiocb_end_write(req);
 963			return ret ? ret : -EAGAIN;
 964		}
 965done:
 966		ret = kiocb_done(req, ret2, issue_flags);
 967	} else {
 968copy_iov:
 969		iov_iter_restore(&s->iter, &s->iter_state);
 970		ret = io_setup_async_rw(req, iovec, s, false);
 971		if (!ret) {
 972			if (kiocb->ki_flags & IOCB_WRITE)
 973				kiocb_end_write(req);
 974			return -EAGAIN;
 975		}
 976		return ret;
 977	}
 978	/* it's reportedly faster than delegating the null check to kfree() */
 979	if (iovec)
 980		kfree(iovec);
 981	return ret;
 982}
 983
 984static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 985{
 986	io_commit_cqring_flush(ctx);
 987	if (ctx->flags & IORING_SETUP_SQPOLL)
 988		io_cqring_wake(ctx);
 989}
 990
 991void io_rw_fail(struct io_kiocb *req)
 992{
 993	int res;
 994
 995	res = io_fixup_rw_res(req, req->cqe.res);
 996	io_req_set_res(req, res, req->cqe.flags);
 997}
 998
 999int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1000{
1001	struct io_wq_work_node *pos, *start, *prev;
1002	unsigned int poll_flags = BLK_POLL_NOSLEEP;
1003	DEFINE_IO_COMP_BATCH(iob);
1004	int nr_events = 0;
1005
1006	/*
1007	 * Only spin for completions if we don't have multiple devices hanging
1008	 * off our complete list.
1009	 */
1010	if (ctx->poll_multi_queue || force_nonspin)
1011		poll_flags |= BLK_POLL_ONESHOT;
1012
1013	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1014		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1015		struct file *file = req->file;
1016		int ret;
1017
1018		/*
1019		 * Move completed and retryable entries to our local lists.
1020		 * If we find a request that requires polling, break out
1021		 * and complete those lists first, if we have entries there.
1022		 */
1023		if (READ_ONCE(req->iopoll_completed))
1024			break;
1025
1026		if (req->opcode == IORING_OP_URING_CMD) {
1027			struct io_uring_cmd *ioucmd;
1028
1029			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1030			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1031								poll_flags);
1032		} else {
1033			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1034
1035			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1036		}
1037		if (unlikely(ret < 0))
1038			return ret;
1039		else if (ret)
1040			poll_flags |= BLK_POLL_ONESHOT;
1041
1042		/* iopoll may have completed current req */
1043		if (!rq_list_empty(iob.req_list) ||
1044		    READ_ONCE(req->iopoll_completed))
1045			break;
1046	}
1047
1048	if (!rq_list_empty(iob.req_list))
1049		iob.complete(&iob);
1050	else if (!pos)
1051		return 0;
1052
1053	prev = start;
1054	wq_list_for_each_resume(pos, prev) {
1055		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1056
1057		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1058		if (!smp_load_acquire(&req->iopoll_completed))
1059			break;
1060		nr_events++;
1061		if (unlikely(req->flags & REQ_F_CQE_SKIP))
1062			continue;
1063
1064		req->cqe.flags = io_put_kbuf(req, 0);
1065		if (unlikely(!__io_fill_cqe_req(ctx, req))) {
1066			spin_lock(&ctx->completion_lock);
1067			io_req_cqe_overflow(req);
1068			spin_unlock(&ctx->completion_lock);
1069		}
1070	}
1071
1072	if (unlikely(!nr_events))
1073		return 0;
1074
1075	io_commit_cqring(ctx);
1076	io_cqring_ev_posted_iopoll(ctx);
1077	pos = start ? start->next : ctx->iopoll_list.first;
1078	wq_list_cut(&ctx->iopoll_list, prev, start);
1079	io_free_batch_list(ctx, pos);
 
 
 
 
1080	return nr_events;
1081}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/fs.h>
   5#include <linux/file.h>
   6#include <linux/blk-mq.h>
   7#include <linux/mm.h>
   8#include <linux/slab.h>
   9#include <linux/fsnotify.h>
  10#include <linux/poll.h>
  11#include <linux/nospec.h>
  12#include <linux/compat.h>
  13#include <linux/io_uring/cmd.h>
  14
  15#include <uapi/linux/io_uring.h>
  16
  17#include "io_uring.h"
  18#include "opdef.h"
  19#include "kbuf.h"
  20#include "rsrc.h"
  21#include "poll.h"
  22#include "rw.h"
  23
  24struct io_rw {
  25	/* NOTE: kiocb has the file as the first member, so don't do it here */
  26	struct kiocb			kiocb;
  27	u64				addr;
  28	u32				len;
  29	rwf_t				flags;
  30};
  31
  32static inline bool io_file_supports_nowait(struct io_kiocb *req)
  33{
  34	return req->flags & REQ_F_SUPPORT_NOWAIT;
  35}
  36
  37#ifdef CONFIG_COMPAT
  38static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
  39{
  40	struct compat_iovec __user *uiov;
  41	compat_ssize_t clen;
  42
  43	uiov = u64_to_user_ptr(rw->addr);
  44	if (!access_ok(uiov, sizeof(*uiov)))
  45		return -EFAULT;
  46	if (__get_user(clen, &uiov->iov_len))
  47		return -EFAULT;
  48	if (clen < 0)
  49		return -EINVAL;
  50
  51	rw->len = clen;
  52	return 0;
  53}
  54#endif
  55
  56static int io_iov_buffer_select_prep(struct io_kiocb *req)
  57{
  58	struct iovec __user *uiov;
  59	struct iovec iov;
  60	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  61
  62	if (rw->len != 1)
  63		return -EINVAL;
  64
  65#ifdef CONFIG_COMPAT
  66	if (req->ctx->compat)
  67		return io_iov_compat_buffer_select_prep(rw);
  68#endif
  69
  70	uiov = u64_to_user_ptr(rw->addr);
  71	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
  72		return -EFAULT;
  73	rw->len = iov.iov_len;
  74	return 0;
  75}
  76
  77int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  78{
  79	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  80	unsigned ioprio;
  81	int ret;
  82
  83	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
  84	/* used for fixed read/write too - just read unconditionally */
  85	req->buf_index = READ_ONCE(sqe->buf_index);
  86
 
 
 
 
 
 
 
 
 
 
 
 
  87	ioprio = READ_ONCE(sqe->ioprio);
  88	if (ioprio) {
  89		ret = ioprio_check_cap(ioprio);
  90		if (ret)
  91			return ret;
  92
  93		rw->kiocb.ki_ioprio = ioprio;
  94	} else {
  95		rw->kiocb.ki_ioprio = get_current_ioprio();
  96	}
  97	rw->kiocb.dio_complete = NULL;
  98
  99	rw->addr = READ_ONCE(sqe->addr);
 100	rw->len = READ_ONCE(sqe->len);
 101	rw->flags = READ_ONCE(sqe->rw_flags);
 102	return 0;
 103}
 104
 105int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 106{
 107	int ret;
 108
 109	ret = io_prep_rw(req, sqe);
 110	if (unlikely(ret))
 111		return ret;
 112
 113	/*
 114	 * Have to do this validation here, as this is in io_read() rw->len
 115	 * might have chanaged due to buffer selection
 116	 */
 117	if (req->flags & REQ_F_BUFFER_SELECT)
 118		return io_iov_buffer_select_prep(req);
 
 
 
 119
 120	return 0;
 121}
 122
 123int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 124{
 125	struct io_ring_ctx *ctx = req->ctx;
 126	u16 index;
 127	int ret;
 128
 129	ret = io_prep_rw(req, sqe);
 130	if (unlikely(ret))
 131		return ret;
 132
 133	if (unlikely(req->buf_index >= ctx->nr_user_bufs))
 134		return -EFAULT;
 135	index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
 136	req->imu = ctx->user_bufs[index];
 137	io_req_set_rsrc_node(req, ctx, 0);
 138	return 0;
 139}
 140
 141/*
 142 * Multishot read is prepared just like a normal read/write request, only
 143 * difference is that we set the MULTISHOT flag.
 144 */
 145int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 146{
 147	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 148	int ret;
 149
 150	/* must be used with provided buffers */
 151	if (!(req->flags & REQ_F_BUFFER_SELECT))
 152		return -EINVAL;
 153
 154	ret = io_prep_rw(req, sqe);
 155	if (unlikely(ret))
 156		return ret;
 157
 158	if (rw->addr || rw->len)
 159		return -EINVAL;
 160
 161	req->flags |= REQ_F_APOLL_MULTISHOT;
 162	return 0;
 163}
 164
 165void io_readv_writev_cleanup(struct io_kiocb *req)
 166{
 167	struct io_async_rw *io = req->async_data;
 168
 169	kfree(io->free_iovec);
 170}
 171
 172static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 173{
 174	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 175
 176	if (rw->kiocb.ki_pos != -1)
 177		return &rw->kiocb.ki_pos;
 178
 179	if (!(req->file->f_mode & FMODE_STREAM)) {
 180		req->flags |= REQ_F_CUR_POS;
 181		rw->kiocb.ki_pos = req->file->f_pos;
 182		return &rw->kiocb.ki_pos;
 183	}
 184
 185	rw->kiocb.ki_pos = 0;
 186	return NULL;
 187}
 188
 189static void io_req_task_queue_reissue(struct io_kiocb *req)
 190{
 191	req->io_task_work.func = io_queue_iowq;
 192	io_req_task_work_add(req);
 193}
 194
 195#ifdef CONFIG_BLOCK
 196static bool io_resubmit_prep(struct io_kiocb *req)
 197{
 198	struct io_async_rw *io = req->async_data;
 199
 200	if (!req_has_async_data(req))
 201		return !io_req_prep_async(req);
 202	iov_iter_restore(&io->s.iter, &io->s.iter_state);
 203	return true;
 204}
 205
 206static bool io_rw_should_reissue(struct io_kiocb *req)
 207{
 208	umode_t mode = file_inode(req->file)->i_mode;
 209	struct io_ring_ctx *ctx = req->ctx;
 210
 211	if (!S_ISBLK(mode) && !S_ISREG(mode))
 212		return false;
 213	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
 214	    !(ctx->flags & IORING_SETUP_IOPOLL)))
 215		return false;
 216	/*
 217	 * If ref is dying, we might be running poll reap from the exit work.
 218	 * Don't attempt to reissue from that path, just let it fail with
 219	 * -EAGAIN.
 220	 */
 221	if (percpu_ref_is_dying(&ctx->refs))
 222		return false;
 223	/*
 224	 * Play it safe and assume not safe to re-import and reissue if we're
 225	 * not in the original thread group (or in task context).
 226	 */
 227	if (!same_thread_group(req->task, current) || !in_task())
 228		return false;
 229	return true;
 230}
 231#else
 232static bool io_resubmit_prep(struct io_kiocb *req)
 233{
 234	return false;
 235}
 236static bool io_rw_should_reissue(struct io_kiocb *req)
 237{
 238	return false;
 239}
 240#endif
 241
 242static void io_req_end_write(struct io_kiocb *req)
 243{
 
 
 
 
 244	if (req->flags & REQ_F_ISREG) {
 245		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 246
 247		kiocb_end_write(&rw->kiocb);
 
 248	}
 249}
 250
 251/*
 252 * Trigger the notifications after having done some IO, and finish the write
 253 * accounting, if any.
 254 */
 255static void io_req_io_end(struct io_kiocb *req)
 256{
 257	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 258
 259	if (rw->kiocb.ki_flags & IOCB_WRITE) {
 260		io_req_end_write(req);
 261		fsnotify_modify(req->file);
 262	} else {
 263		fsnotify_access(req->file);
 264	}
 265}
 266
 267static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 268{
 269	if (unlikely(res != req->cqe.res)) {
 270		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
 271		    io_rw_should_reissue(req)) {
 272			/*
 273			 * Reissue will start accounting again, finish the
 274			 * current cycle.
 275			 */
 276			io_req_io_end(req);
 277			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 278			return true;
 279		}
 280		req_set_fail(req);
 281		req->cqe.res = res;
 282	}
 283	return false;
 284}
 285
 286static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
 287{
 288	struct io_async_rw *io = req->async_data;
 289
 290	/* add previously done IO, if any */
 291	if (req_has_async_data(req) && io->bytes_done > 0) {
 292		if (res < 0)
 293			res = io->bytes_done;
 294		else
 295			res += io->bytes_done;
 296	}
 297	return res;
 298}
 299
 300void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
 301{
 302	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 303	struct kiocb *kiocb = &rw->kiocb;
 304
 305	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
 306		long res = kiocb->dio_complete(rw->kiocb.private);
 307
 308		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 309	}
 310
 311	io_req_io_end(req);
 312
 313	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
 314		unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
 315
 316		req->cqe.flags |= io_put_kbuf(req, issue_flags);
 317	}
 318	io_req_task_complete(req, ts);
 319}
 320
 321static void io_complete_rw(struct kiocb *kiocb, long res)
 322{
 323	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 324	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 325
 326	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
 327		if (__io_complete_rw_common(req, res))
 328			return;
 329		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 330	}
 331	req->io_task_work.func = io_req_rw_complete;
 332	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
 333}
 334
 335static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
 336{
 337	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 338	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 339
 340	if (kiocb->ki_flags & IOCB_WRITE)
 341		io_req_end_write(req);
 342	if (unlikely(res != req->cqe.res)) {
 343		if (res == -EAGAIN && io_rw_should_reissue(req)) {
 344			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 345			return;
 346		}
 347		req->cqe.res = res;
 348	}
 349
 350	/* order with io_iopoll_complete() checking ->iopoll_completed */
 351	smp_store_release(&req->iopoll_completed, 1);
 352}
 353
 354static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 355{
 356	/* IO was queued async, completion will happen later */
 357	if (ret == -EIOCBQUEUED)
 358		return;
 359
 360	/* transform internal restart error codes */
 361	if (unlikely(ret < 0)) {
 362		switch (ret) {
 363		case -ERESTARTSYS:
 364		case -ERESTARTNOINTR:
 365		case -ERESTARTNOHAND:
 366		case -ERESTART_RESTARTBLOCK:
 367			/*
 368			 * We can't just restart the syscall, since previously
 369			 * submitted sqes may already be in progress. Just fail
 370			 * this IO with EINTR.
 371			 */
 372			ret = -EINTR;
 373			break;
 374		}
 375	}
 376
 377	INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
 378			io_complete_rw, kiocb, ret);
 379}
 380
 381static int kiocb_done(struct io_kiocb *req, ssize_t ret,
 382		       unsigned int issue_flags)
 383{
 384	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 385	unsigned final_ret = io_fixup_rw_res(req, ret);
 386
 387	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
 388		req->file->f_pos = rw->kiocb.ki_pos;
 389	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
 390		if (!__io_complete_rw_common(req, ret)) {
 391			/*
 392			 * Safe to call io_end from here as we're inline
 393			 * from the submission path.
 394			 */
 395			io_req_io_end(req);
 396			io_req_set_res(req, final_ret,
 397				       io_put_kbuf(req, issue_flags));
 398			return IOU_OK;
 399		}
 400	} else {
 401		io_rw_done(&rw->kiocb, ret);
 402	}
 403
 404	if (req->flags & REQ_F_REISSUE) {
 405		req->flags &= ~REQ_F_REISSUE;
 406		if (io_resubmit_prep(req))
 407			io_req_task_queue_reissue(req);
 408		else
 409			io_req_task_queue_fail(req, final_ret);
 410	}
 411	return IOU_ISSUE_SKIP_COMPLETE;
 412}
 413
 414static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 415				       struct io_rw_state *s,
 416				       unsigned int issue_flags)
 417{
 418	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 419	struct iov_iter *iter = &s->iter;
 420	u8 opcode = req->opcode;
 421	struct iovec *iovec;
 422	void __user *buf;
 423	size_t sqe_len;
 424	ssize_t ret;
 425
 426	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
 427		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
 428		if (ret)
 429			return ERR_PTR(ret);
 430		return NULL;
 431	}
 432
 433	buf = u64_to_user_ptr(rw->addr);
 434	sqe_len = rw->len;
 435
 436	if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
 
 437		if (io_do_buffer_select(req)) {
 438			buf = io_buffer_select(req, &sqe_len, issue_flags);
 439			if (!buf)
 440				return ERR_PTR(-ENOBUFS);
 441			rw->addr = (unsigned long) buf;
 442			rw->len = sqe_len;
 443		}
 444
 445		ret = import_ubuf(ddir, buf, sqe_len, iter);
 446		if (ret)
 447			return ERR_PTR(ret);
 448		return NULL;
 449	}
 450
 451	iovec = s->fast_iov;
 452	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
 453			      req->ctx->compat);
 454	if (unlikely(ret < 0))
 455		return ERR_PTR(ret);
 456	return iovec;
 457}
 458
 459static inline int io_import_iovec(int rw, struct io_kiocb *req,
 460				  struct iovec **iovec, struct io_rw_state *s,
 461				  unsigned int issue_flags)
 462{
 463	*iovec = __io_import_iovec(rw, req, s, issue_flags);
 464	if (IS_ERR(*iovec))
 465		return PTR_ERR(*iovec);
 466
 467	iov_iter_save_state(&s->iter, &s->iter_state);
 468	return 0;
 469}
 470
 471static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
 472{
 473	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
 474}
 475
 476/*
 477 * For files that don't have ->read_iter() and ->write_iter(), handle them
 478 * by looping over ->read() or ->write() manually.
 479 */
 480static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
 481{
 482	struct kiocb *kiocb = &rw->kiocb;
 483	struct file *file = kiocb->ki_filp;
 484	ssize_t ret = 0;
 485	loff_t *ppos;
 486
 487	/*
 488	 * Don't support polled IO through this interface, and we can't
 489	 * support non-blocking either. For the latter, this just causes
 490	 * the kiocb to be handled from an async context.
 491	 */
 492	if (kiocb->ki_flags & IOCB_HIPRI)
 493		return -EOPNOTSUPP;
 494	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
 495	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
 496		return -EAGAIN;
 497
 498	ppos = io_kiocb_ppos(kiocb);
 499
 500	while (iov_iter_count(iter)) {
 501		void __user *addr;
 502		size_t len;
 503		ssize_t nr;
 504
 505		if (iter_is_ubuf(iter)) {
 506			addr = iter->ubuf + iter->iov_offset;
 507			len = iov_iter_count(iter);
 508		} else if (!iov_iter_is_bvec(iter)) {
 509			addr = iter_iov_addr(iter);
 510			len = iter_iov_len(iter);
 511		} else {
 512			addr = u64_to_user_ptr(rw->addr);
 513			len = rw->len;
 514		}
 515
 516		if (ddir == READ)
 517			nr = file->f_op->read(file, addr, len, ppos);
 518		else
 519			nr = file->f_op->write(file, addr, len, ppos);
 
 
 
 520
 521		if (nr < 0) {
 522			if (!ret)
 523				ret = nr;
 524			break;
 525		}
 526		ret += nr;
 527		if (!iov_iter_is_bvec(iter)) {
 528			iov_iter_advance(iter, nr);
 529		} else {
 530			rw->addr += nr;
 531			rw->len -= nr;
 532			if (!rw->len)
 533				break;
 534		}
 535		if (nr != len)
 536			break;
 537	}
 538
 539	return ret;
 540}
 541
 542static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
 543			  const struct iovec *fast_iov, struct iov_iter *iter)
 544{
 545	struct io_async_rw *io = req->async_data;
 546
 547	memcpy(&io->s.iter, iter, sizeof(*iter));
 548	io->free_iovec = iovec;
 549	io->bytes_done = 0;
 550	/* can only be fixed buffers, no need to do anything */
 551	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
 552		return;
 553	if (!iovec) {
 554		unsigned iov_off = 0;
 555
 556		io->s.iter.__iov = io->s.fast_iov;
 557		if (iter->__iov != fast_iov) {
 558			iov_off = iter_iov(iter) - fast_iov;
 559			io->s.iter.__iov += iov_off;
 560		}
 561		if (io->s.fast_iov != fast_iov)
 562			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
 563			       sizeof(struct iovec) * iter->nr_segs);
 564	} else {
 565		req->flags |= REQ_F_NEED_CLEANUP;
 566	}
 567}
 568
 569static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 570			     struct io_rw_state *s, bool force)
 571{
 572	if (!force && !io_cold_defs[req->opcode].prep_async)
 573		return 0;
 574	/* opcode type doesn't need async data */
 575	if (!io_cold_defs[req->opcode].async_size)
 576		return 0;
 577	if (!req_has_async_data(req)) {
 578		struct io_async_rw *iorw;
 579
 580		if (io_alloc_async_data(req)) {
 581			kfree(iovec);
 582			return -ENOMEM;
 583		}
 584
 585		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
 586		iorw = req->async_data;
 587		/* we've copied and mapped the iter, ensure state is saved */
 588		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
 589	}
 590	return 0;
 591}
 592
 593static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 594{
 595	struct io_async_rw *iorw = req->async_data;
 596	struct iovec *iov;
 597	int ret;
 598
 599	iorw->bytes_done = 0;
 600	iorw->free_iovec = NULL;
 601
 602	/* submission path, ->uring_lock should already be taken */
 603	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
 604	if (unlikely(ret < 0))
 605		return ret;
 606
 607	if (iov) {
 608		iorw->free_iovec = iov;
 
 609		req->flags |= REQ_F_NEED_CLEANUP;
 610	}
 611
 612	return 0;
 613}
 614
 615int io_readv_prep_async(struct io_kiocb *req)
 616{
 617	return io_rw_prep_async(req, ITER_DEST);
 618}
 619
 620int io_writev_prep_async(struct io_kiocb *req)
 621{
 622	return io_rw_prep_async(req, ITER_SOURCE);
 623}
 624
 625/*
 626 * This is our waitqueue callback handler, registered through __folio_lock_async()
 627 * when we initially tried to do the IO with the iocb armed our waitqueue.
 628 * This gets called when the page is unlocked, and we generally expect that to
 629 * happen when the page IO is completed and the page is now uptodate. This will
 630 * queue a task_work based retry of the operation, attempting to copy the data
 631 * again. If the latter fails because the page was NOT uptodate, then we will
 632 * do a thread based blocking retry of the operation. That's the unexpected
 633 * slow path.
 634 */
 635static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 636			     int sync, void *arg)
 637{
 638	struct wait_page_queue *wpq;
 639	struct io_kiocb *req = wait->private;
 640	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 641	struct wait_page_key *key = arg;
 642
 643	wpq = container_of(wait, struct wait_page_queue, wait);
 644
 645	if (!wake_page_match(wpq, key))
 646		return 0;
 647
 648	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
 649	list_del_init(&wait->entry);
 650	io_req_task_queue(req);
 651	return 1;
 652}
 653
 654/*
 655 * This controls whether a given IO request should be armed for async page
 656 * based retry. If we return false here, the request is handed to the async
 657 * worker threads for retry. If we're doing buffered reads on a regular file,
 658 * we prepare a private wait_page_queue entry and retry the operation. This
 659 * will either succeed because the page is now uptodate and unlocked, or it
 660 * will register a callback when the page is unlocked at IO completion. Through
 661 * that callback, io_uring uses task_work to setup a retry of the operation.
 662 * That retry will attempt the buffered read again. The retry will generally
 663 * succeed, or in rare cases where it fails, we then fall back to using the
 664 * async worker threads for a blocking retry.
 665 */
 666static bool io_rw_should_retry(struct io_kiocb *req)
 667{
 668	struct io_async_rw *io = req->async_data;
 669	struct wait_page_queue *wait = &io->wpq;
 670	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 671	struct kiocb *kiocb = &rw->kiocb;
 672
 673	/* never retry for NOWAIT, we just complete with -EAGAIN */
 674	if (req->flags & REQ_F_NOWAIT)
 675		return false;
 676
 677	/* Only for buffered IO */
 678	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
 679		return false;
 680
 681	/*
 682	 * just use poll if we can, and don't attempt if the fs doesn't
 683	 * support callback based unlocks
 684	 */
 685	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
 686		return false;
 687
 688	wait->wait.func = io_async_buf_func;
 689	wait->wait.private = req;
 690	wait->wait.flags = 0;
 691	INIT_LIST_HEAD(&wait->wait.entry);
 692	kiocb->ki_flags |= IOCB_WAITQ;
 693	kiocb->ki_flags &= ~IOCB_NOWAIT;
 694	kiocb->ki_waitq = wait;
 695	return true;
 696}
 697
 698static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
 699{
 700	struct file *file = rw->kiocb.ki_filp;
 701
 702	if (likely(file->f_op->read_iter))
 703		return call_read_iter(file, &rw->kiocb, iter);
 704	else if (file->f_op->read)
 705		return loop_rw_iter(READ, rw, iter);
 706	else
 707		return -EINVAL;
 708}
 709
 710static bool need_complete_io(struct io_kiocb *req)
 711{
 712	return req->flags & REQ_F_ISREG ||
 713		S_ISBLK(file_inode(req->file)->i_mode);
 714}
 715
 716static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
 717{
 718	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 719	struct kiocb *kiocb = &rw->kiocb;
 720	struct io_ring_ctx *ctx = req->ctx;
 721	struct file *file = req->file;
 722	int ret;
 723
 724	if (unlikely(!file || !(file->f_mode & mode)))
 725		return -EBADF;
 726
 727	if (!(req->flags & REQ_F_FIXED_FILE))
 728		req->flags |= io_file_get_flags(file);
 729
 730	kiocb->ki_flags = file->f_iocb_flags;
 731	ret = kiocb_set_rw_flags(kiocb, rw->flags);
 732	if (unlikely(ret))
 733		return ret;
 734	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
 735
 736	/*
 737	 * If the file is marked O_NONBLOCK, still allow retry for it if it
 738	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
 739	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
 740	 */
 741	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
 742	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
 743		req->flags |= REQ_F_NOWAIT;
 744
 745	if (ctx->flags & IORING_SETUP_IOPOLL) {
 746		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
 747			return -EOPNOTSUPP;
 748
 749		kiocb->private = NULL;
 750		kiocb->ki_flags |= IOCB_HIPRI;
 751		kiocb->ki_complete = io_complete_rw_iopoll;
 752		req->iopoll_completed = 0;
 753	} else {
 754		if (kiocb->ki_flags & IOCB_HIPRI)
 755			return -EINVAL;
 756		kiocb->ki_complete = io_complete_rw;
 757	}
 758
 759	return 0;
 760}
 761
 762static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 763{
 764	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 765	struct io_rw_state __s, *s = &__s;
 766	struct iovec *iovec;
 767	struct kiocb *kiocb = &rw->kiocb;
 768	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 769	struct io_async_rw *io;
 770	ssize_t ret, ret2;
 771	loff_t *ppos;
 772
 773	if (!req_has_async_data(req)) {
 774		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 775		if (unlikely(ret < 0))
 776			return ret;
 777	} else {
 778		io = req->async_data;
 779		s = &io->s;
 780
 781		/*
 782		 * Safe and required to re-import if we're using provided
 783		 * buffers, as we dropped the selected one before retry.
 784		 */
 785		if (io_do_buffer_select(req)) {
 786			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 787			if (unlikely(ret < 0))
 788				return ret;
 789		}
 790
 791		/*
 792		 * We come here from an earlier attempt, restore our state to
 793		 * match in case it doesn't. It's cheap enough that we don't
 794		 * need to make this conditional.
 795		 */
 796		iov_iter_restore(&s->iter, &s->iter_state);
 797		iovec = NULL;
 798	}
 799	ret = io_rw_init_file(req, FMODE_READ);
 800	if (unlikely(ret)) {
 801		kfree(iovec);
 802		return ret;
 803	}
 804	req->cqe.res = iov_iter_count(&s->iter);
 805
 806	if (force_nonblock) {
 807		/* If the file doesn't support async, just async punt */
 808		if (unlikely(!io_file_supports_nowait(req))) {
 809			ret = io_setup_async_rw(req, iovec, s, true);
 810			return ret ?: -EAGAIN;
 811		}
 812		kiocb->ki_flags |= IOCB_NOWAIT;
 813	} else {
 814		/* Ensure we clear previously set non-block flag */
 815		kiocb->ki_flags &= ~IOCB_NOWAIT;
 816	}
 817
 818	ppos = io_kiocb_update_pos(req);
 819
 820	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
 821	if (unlikely(ret)) {
 822		kfree(iovec);
 823		return ret;
 824	}
 825
 826	ret = io_iter_do_read(rw, &s->iter);
 827
 828	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
 829		req->flags &= ~REQ_F_REISSUE;
 830		/*
 831		 * If we can poll, just do that. For a vectored read, we'll
 832		 * need to copy state first.
 833		 */
 834		if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
 835			return -EAGAIN;
 836		/* IOPOLL retry should happen for io-wq threads */
 837		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
 838			goto done;
 839		/* no retry on NONBLOCK nor RWF_NOWAIT */
 840		if (req->flags & REQ_F_NOWAIT)
 841			goto done;
 842		ret = 0;
 843	} else if (ret == -EIOCBQUEUED) {
 844		if (iovec)
 845			kfree(iovec);
 846		return IOU_ISSUE_SKIP_COMPLETE;
 847	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
 848		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
 849		/* read all, failed, already did sync or don't want to retry */
 850		goto done;
 851	}
 852
 853	/*
 854	 * Don't depend on the iter state matching what was consumed, or being
 855	 * untouched in case of error. Restore it and we'll advance it
 856	 * manually if we need to.
 857	 */
 858	iov_iter_restore(&s->iter, &s->iter_state);
 859
 860	ret2 = io_setup_async_rw(req, iovec, s, true);
 861	iovec = NULL;
 862	if (ret2) {
 863		ret = ret > 0 ? ret : ret2;
 864		goto done;
 865	}
 866
 867	io = req->async_data;
 868	s = &io->s;
 869	/*
 870	 * Now use our persistent iterator and state, if we aren't already.
 871	 * We've restored and mapped the iter to match.
 872	 */
 873
 874	do {
 875		/*
 876		 * We end up here because of a partial read, either from
 877		 * above or inside this loop. Advance the iter by the bytes
 878		 * that were consumed.
 879		 */
 880		iov_iter_advance(&s->iter, ret);
 881		if (!iov_iter_count(&s->iter))
 882			break;
 883		io->bytes_done += ret;
 884		iov_iter_save_state(&s->iter, &s->iter_state);
 885
 886		/* if we can retry, do so with the callbacks armed */
 887		if (!io_rw_should_retry(req)) {
 888			kiocb->ki_flags &= ~IOCB_WAITQ;
 889			return -EAGAIN;
 890		}
 891
 892		req->cqe.res = iov_iter_count(&s->iter);
 893		/*
 894		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
 895		 * we get -EIOCBQUEUED, then we'll get a notification when the
 896		 * desired page gets unlocked. We can also get a partial read
 897		 * here, and if we do, then just retry at the new offset.
 898		 */
 899		ret = io_iter_do_read(rw, &s->iter);
 900		if (ret == -EIOCBQUEUED)
 901			return IOU_ISSUE_SKIP_COMPLETE;
 902		/* we got some bytes, but not all. retry. */
 903		kiocb->ki_flags &= ~IOCB_WAITQ;
 904		iov_iter_restore(&s->iter, &s->iter_state);
 905	} while (ret > 0);
 906done:
 907	/* it's faster to check here then delegate to kfree */
 908	if (iovec)
 909		kfree(iovec);
 910	return ret;
 911}
 912
 913int io_read(struct io_kiocb *req, unsigned int issue_flags)
 914{
 915	int ret;
 916
 917	ret = __io_read(req, issue_flags);
 918	if (ret >= 0)
 919		return kiocb_done(req, ret, issue_flags);
 920
 921	return ret;
 922}
 923
 924int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
 925{
 926	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 927	unsigned int cflags = 0;
 928	int ret;
 929
 930	/*
 931	 * Multishot MUST be used on a pollable file
 932	 */
 933	if (!file_can_poll(req->file))
 934		return -EBADFD;
 935
 936	ret = __io_read(req, issue_flags);
 937
 938	/*
 939	 * If we get -EAGAIN, recycle our buffer and just let normal poll
 940	 * handling arm it.
 941	 */
 942	if (ret == -EAGAIN) {
 943		/*
 944		 * Reset rw->len to 0 again to avoid clamping future mshot
 945		 * reads, in case the buffer size varies.
 946		 */
 947		if (io_kbuf_recycle(req, issue_flags))
 948			rw->len = 0;
 949		return -EAGAIN;
 950	}
 951
 952	/*
 953	 * Any successful return value will keep the multishot read armed.
 954	 */
 955	if (ret > 0) {
 956		/*
 957		 * Put our buffer and post a CQE. If we fail to post a CQE, then
 958		 * jump to the termination path. This request is then done.
 959		 */
 960		cflags = io_put_kbuf(req, issue_flags);
 961		rw->len = 0; /* similarly to above, reset len to 0 */
 962
 963		if (io_fill_cqe_req_aux(req,
 964					issue_flags & IO_URING_F_COMPLETE_DEFER,
 965					ret, cflags | IORING_CQE_F_MORE)) {
 966			if (issue_flags & IO_URING_F_MULTISHOT) {
 967				/*
 968				 * Force retry, as we might have more data to
 969				 * be read and otherwise it won't get retried
 970				 * until (if ever) another poll is triggered.
 971				 */
 972				io_poll_multishot_retry(req);
 973				return IOU_ISSUE_SKIP_COMPLETE;
 974			}
 975			return -EAGAIN;
 976		}
 977	}
 978
 979	/*
 980	 * Either an error, or we've hit overflow posting the CQE. For any
 981	 * multishot request, hitting overflow will terminate it.
 982	 */
 983	io_req_set_res(req, ret, cflags);
 984	if (issue_flags & IO_URING_F_MULTISHOT)
 985		return IOU_STOP_MULTISHOT;
 986	return IOU_OK;
 987}
 988
 989int io_write(struct io_kiocb *req, unsigned int issue_flags)
 990{
 991	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 992	struct io_rw_state __s, *s = &__s;
 993	struct iovec *iovec;
 994	struct kiocb *kiocb = &rw->kiocb;
 995	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 996	ssize_t ret, ret2;
 997	loff_t *ppos;
 998
 999	if (!req_has_async_data(req)) {
1000		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
1001		if (unlikely(ret < 0))
1002			return ret;
1003	} else {
1004		struct io_async_rw *io = req->async_data;
1005
1006		s = &io->s;
1007		iov_iter_restore(&s->iter, &s->iter_state);
1008		iovec = NULL;
1009	}
1010	ret = io_rw_init_file(req, FMODE_WRITE);
1011	if (unlikely(ret)) {
1012		kfree(iovec);
1013		return ret;
1014	}
1015	req->cqe.res = iov_iter_count(&s->iter);
1016
1017	if (force_nonblock) {
1018		/* If the file doesn't support async, just async punt */
1019		if (unlikely(!io_file_supports_nowait(req)))
1020			goto copy_iov;
1021
1022		/* File path supports NOWAIT for non-direct_IO only for block devices. */
1023		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1024			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
1025			(req->flags & REQ_F_ISREG))
1026			goto copy_iov;
1027
1028		kiocb->ki_flags |= IOCB_NOWAIT;
1029	} else {
1030		/* Ensure we clear previously set non-block flag */
1031		kiocb->ki_flags &= ~IOCB_NOWAIT;
1032	}
1033
1034	ppos = io_kiocb_update_pos(req);
1035
1036	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1037	if (unlikely(ret)) {
1038		kfree(iovec);
1039		return ret;
1040	}
1041
1042	if (req->flags & REQ_F_ISREG)
1043		kiocb_start_write(kiocb);
 
 
 
 
 
 
 
 
 
 
1044	kiocb->ki_flags |= IOCB_WRITE;
1045
1046	if (likely(req->file->f_op->write_iter))
1047		ret2 = call_write_iter(req->file, kiocb, &s->iter);
1048	else if (req->file->f_op->write)
1049		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
1050	else
1051		ret2 = -EINVAL;
1052
1053	if (req->flags & REQ_F_REISSUE) {
1054		req->flags &= ~REQ_F_REISSUE;
1055		ret2 = -EAGAIN;
1056	}
1057
1058	/*
1059	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1060	 * retry them without IOCB_NOWAIT.
1061	 */
1062	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1063		ret2 = -EAGAIN;
1064	/* no retry on NONBLOCK nor RWF_NOWAIT */
1065	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1066		goto done;
1067	if (!force_nonblock || ret2 != -EAGAIN) {
1068		/* IOPOLL retry should happen for io-wq threads */
1069		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1070			goto copy_iov;
1071
1072		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1073			struct io_async_rw *io;
1074
1075			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1076						req->cqe.res, ret2);
1077
1078			/* This is a partial write. The file pos has already been
1079			 * updated, setup the async struct to complete the request
1080			 * in the worker. Also update bytes_done to account for
1081			 * the bytes already written.
1082			 */
1083			iov_iter_save_state(&s->iter, &s->iter_state);
1084			ret = io_setup_async_rw(req, iovec, s, true);
1085
1086			io = req->async_data;
1087			if (io)
1088				io->bytes_done += ret2;
1089
1090			if (kiocb->ki_flags & IOCB_WRITE)
1091				io_req_end_write(req);
1092			return ret ? ret : -EAGAIN;
1093		}
1094done:
1095		ret = kiocb_done(req, ret2, issue_flags);
1096	} else {
1097copy_iov:
1098		iov_iter_restore(&s->iter, &s->iter_state);
1099		ret = io_setup_async_rw(req, iovec, s, false);
1100		if (!ret) {
1101			if (kiocb->ki_flags & IOCB_WRITE)
1102				io_req_end_write(req);
1103			return -EAGAIN;
1104		}
1105		return ret;
1106	}
1107	/* it's reportedly faster than delegating the null check to kfree() */
1108	if (iovec)
1109		kfree(iovec);
1110	return ret;
1111}
1112
 
 
 
 
 
 
 
1113void io_rw_fail(struct io_kiocb *req)
1114{
1115	int res;
1116
1117	res = io_fixup_rw_res(req, req->cqe.res);
1118	io_req_set_res(req, res, req->cqe.flags);
1119}
1120
1121int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1122{
1123	struct io_wq_work_node *pos, *start, *prev;
1124	unsigned int poll_flags = 0;
1125	DEFINE_IO_COMP_BATCH(iob);
1126	int nr_events = 0;
1127
1128	/*
1129	 * Only spin for completions if we don't have multiple devices hanging
1130	 * off our complete list.
1131	 */
1132	if (ctx->poll_multi_queue || force_nonspin)
1133		poll_flags |= BLK_POLL_ONESHOT;
1134
1135	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1136		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1137		struct file *file = req->file;
1138		int ret;
1139
1140		/*
1141		 * Move completed and retryable entries to our local lists.
1142		 * If we find a request that requires polling, break out
1143		 * and complete those lists first, if we have entries there.
1144		 */
1145		if (READ_ONCE(req->iopoll_completed))
1146			break;
1147
1148		if (req->opcode == IORING_OP_URING_CMD) {
1149			struct io_uring_cmd *ioucmd;
1150
1151			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1152			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1153								poll_flags);
1154		} else {
1155			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1156
1157			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1158		}
1159		if (unlikely(ret < 0))
1160			return ret;
1161		else if (ret)
1162			poll_flags |= BLK_POLL_ONESHOT;
1163
1164		/* iopoll may have completed current req */
1165		if (!rq_list_empty(iob.req_list) ||
1166		    READ_ONCE(req->iopoll_completed))
1167			break;
1168	}
1169
1170	if (!rq_list_empty(iob.req_list))
1171		iob.complete(&iob);
1172	else if (!pos)
1173		return 0;
1174
1175	prev = start;
1176	wq_list_for_each_resume(pos, prev) {
1177		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1178
1179		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1180		if (!smp_load_acquire(&req->iopoll_completed))
1181			break;
1182		nr_events++;
 
 
 
1183		req->cqe.flags = io_put_kbuf(req, 0);
 
 
 
 
 
1184	}
 
1185	if (unlikely(!nr_events))
1186		return 0;
1187
 
 
1188	pos = start ? start->next : ctx->iopoll_list.first;
1189	wq_list_cut(&ctx->iopoll_list, prev, start);
1190
1191	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1192		return 0;
1193	ctx->submit_state.compl_reqs.first = pos;
1194	__io_submit_flush_completions(ctx);
1195	return nr_events;
1196}