Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/fs.h>
   5#include <linux/file.h>
   6#include <linux/blk-mq.h>
   7#include <linux/mm.h>
   8#include <linux/slab.h>
   9#include <linux/fsnotify.h>
  10#include <linux/poll.h>
  11#include <linux/nospec.h>
  12#include <linux/compat.h>
  13#include <linux/io_uring.h>
 
  14
  15#include <uapi/linux/io_uring.h>
  16
  17#include "io_uring.h"
  18#include "opdef.h"
  19#include "kbuf.h"
  20#include "rsrc.h"
 
  21#include "rw.h"
  22
  23struct io_rw {
  24	/* NOTE: kiocb has the file as the first member, so don't do it here */
  25	struct kiocb			kiocb;
  26	u64				addr;
  27	u32				len;
  28	rwf_t				flags;
  29};
  30
  31static inline bool io_file_supports_nowait(struct io_kiocb *req)
  32{
  33	return req->flags & REQ_F_SUPPORT_NOWAIT;
  34}
  35
  36#ifdef CONFIG_COMPAT
  37static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
  38{
  39	struct compat_iovec __user *uiov;
  40	compat_ssize_t clen;
  41
  42	uiov = u64_to_user_ptr(rw->addr);
  43	if (!access_ok(uiov, sizeof(*uiov)))
  44		return -EFAULT;
  45	if (__get_user(clen, &uiov->iov_len))
  46		return -EFAULT;
  47	if (clen < 0)
  48		return -EINVAL;
  49
  50	rw->len = clen;
  51	return 0;
  52}
  53#endif
  54
  55static int io_iov_buffer_select_prep(struct io_kiocb *req)
  56{
  57	struct iovec __user *uiov;
  58	struct iovec iov;
  59	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  60
  61	if (rw->len != 1)
  62		return -EINVAL;
  63
  64#ifdef CONFIG_COMPAT
  65	if (req->ctx->compat)
  66		return io_iov_compat_buffer_select_prep(rw);
  67#endif
  68
  69	uiov = u64_to_user_ptr(rw->addr);
  70	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
  71		return -EFAULT;
  72	rw->len = iov.iov_len;
  73	return 0;
  74}
  75
  76int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  77{
  78	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  79	unsigned ioprio;
  80	int ret;
  81
  82	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
  83	/* used for fixed read/write too - just read unconditionally */
  84	req->buf_index = READ_ONCE(sqe->buf_index);
  85
  86	if (req->opcode == IORING_OP_READ_FIXED ||
  87	    req->opcode == IORING_OP_WRITE_FIXED) {
  88		struct io_ring_ctx *ctx = req->ctx;
  89		u16 index;
  90
  91		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
  92			return -EFAULT;
  93		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
  94		req->imu = ctx->user_bufs[index];
  95		io_req_set_rsrc_node(req, ctx, 0);
  96	}
  97
  98	ioprio = READ_ONCE(sqe->ioprio);
  99	if (ioprio) {
 100		ret = ioprio_check_cap(ioprio);
 101		if (ret)
 102			return ret;
 103
 104		rw->kiocb.ki_ioprio = ioprio;
 105	} else {
 106		rw->kiocb.ki_ioprio = get_current_ioprio();
 107	}
 
 108
 109	rw->addr = READ_ONCE(sqe->addr);
 110	rw->len = READ_ONCE(sqe->len);
 111	rw->flags = READ_ONCE(sqe->rw_flags);
 
 
 
 
 
 
 
 
 
 
 112
 113	/* Have to do this validation here, as this is in io_read() rw->len might
 114	 * have chanaged due to buffer selection
 
 115	 */
 116	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
 117		ret = io_iov_buffer_select_prep(req);
 118		if (ret)
 119			return ret;
 120	}
 121
 122	return 0;
 123}
 124
 125void io_readv_writev_cleanup(struct io_kiocb *req)
 126{
 127	struct io_async_rw *io = req->async_data;
 
 
 128
 129	kfree(io->free_iovec);
 
 
 
 
 
 
 
 
 
 130}
 131
 132static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 
 
 
 
 133{
 134	switch (ret) {
 135	case -EIOCBQUEUED:
 136		break;
 137	case -ERESTARTSYS:
 138	case -ERESTARTNOINTR:
 139	case -ERESTARTNOHAND:
 140	case -ERESTART_RESTARTBLOCK:
 141		/*
 142		 * We can't just restart the syscall, since previously
 143		 * submitted sqes may already be in progress. Just fail this
 144		 * IO with EINTR.
 145		 */
 146		ret = -EINTR;
 147		fallthrough;
 148	default:
 149		kiocb->ki_complete(kiocb, ret);
 150	}
 
 
 
 
 
 
 151}
 152
 153static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 154{
 155	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 156
 157	if (rw->kiocb.ki_pos != -1)
 158		return &rw->kiocb.ki_pos;
 159
 160	if (!(req->file->f_mode & FMODE_STREAM)) {
 161		req->flags |= REQ_F_CUR_POS;
 162		rw->kiocb.ki_pos = req->file->f_pos;
 163		return &rw->kiocb.ki_pos;
 164	}
 165
 166	rw->kiocb.ki_pos = 0;
 167	return NULL;
 168}
 169
 170static void io_req_task_queue_reissue(struct io_kiocb *req)
 171{
 172	req->io_task_work.func = io_queue_iowq;
 173	io_req_task_work_add(req);
 174}
 175
 176#ifdef CONFIG_BLOCK
 177static bool io_resubmit_prep(struct io_kiocb *req)
 178{
 179	struct io_async_rw *io = req->async_data;
 180
 181	if (!req_has_async_data(req))
 182		return !io_req_prep_async(req);
 183	iov_iter_restore(&io->s.iter, &io->s.iter_state);
 184	return true;
 185}
 186
 187static bool io_rw_should_reissue(struct io_kiocb *req)
 188{
 189	umode_t mode = file_inode(req->file)->i_mode;
 190	struct io_ring_ctx *ctx = req->ctx;
 191
 192	if (!S_ISBLK(mode) && !S_ISREG(mode))
 193		return false;
 194	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
 195	    !(ctx->flags & IORING_SETUP_IOPOLL)))
 196		return false;
 197	/*
 198	 * If ref is dying, we might be running poll reap from the exit work.
 199	 * Don't attempt to reissue from that path, just let it fail with
 200	 * -EAGAIN.
 201	 */
 202	if (percpu_ref_is_dying(&ctx->refs))
 203		return false;
 204	/*
 205	 * Play it safe and assume not safe to re-import and reissue if we're
 206	 * not in the original thread group (or in task context).
 207	 */
 208	if (!same_thread_group(req->task, current) || !in_task())
 209		return false;
 210	return true;
 211}
 212#else
 213static bool io_resubmit_prep(struct io_kiocb *req)
 214{
 215	return false;
 216}
 217static bool io_rw_should_reissue(struct io_kiocb *req)
 218{
 219	return false;
 220}
 221#endif
 222
 223static void kiocb_end_write(struct io_kiocb *req)
 224{
 225	/*
 226	 * Tell lockdep we inherited freeze protection from submission
 227	 * thread.
 228	 */
 229	if (req->flags & REQ_F_ISREG) {
 230		struct super_block *sb = file_inode(req->file)->i_sb;
 231
 232		__sb_writers_acquired(sb, SB_FREEZE_WRITE);
 233		sb_end_write(sb);
 234	}
 235}
 236
 237/*
 238 * Trigger the notifications after having done some IO, and finish the write
 239 * accounting, if any.
 240 */
 241static void io_req_io_end(struct io_kiocb *req)
 242{
 243	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 244
 245	if (rw->kiocb.ki_flags & IOCB_WRITE) {
 246		kiocb_end_write(req);
 247		fsnotify_modify(req->file);
 248	} else {
 249		fsnotify_access(req->file);
 250	}
 251}
 252
 253static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 254{
 255	if (unlikely(res != req->cqe.res)) {
 256		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
 257		    io_rw_should_reissue(req)) {
 258			/*
 259			 * Reissue will start accounting again, finish the
 260			 * current cycle.
 261			 */
 262			io_req_io_end(req);
 263			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 264			return true;
 265		}
 266		req_set_fail(req);
 267		req->cqe.res = res;
 268	}
 269	return false;
 270}
 271
 272static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
 273{
 274	struct io_async_rw *io = req->async_data;
 275
 276	/* add previously done IO, if any */
 277	if (req_has_async_data(req) && io->bytes_done > 0) {
 278		if (res < 0)
 279			res = io->bytes_done;
 280		else
 281			res += io->bytes_done;
 282	}
 283	return res;
 284}
 285
 286static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
 287{
 
 
 
 
 
 
 
 
 
 288	io_req_io_end(req);
 289
 290	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
 291		unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
 292
 293		req->cqe.flags |= io_put_kbuf(req, issue_flags);
 294	}
 295	io_req_task_complete(req, locked);
 296}
 297
 298static void io_complete_rw(struct kiocb *kiocb, long res)
 299{
 300	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 301	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 302
 303	if (__io_complete_rw_common(req, res))
 304		return;
 305	io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 
 
 306	req->io_task_work.func = io_req_rw_complete;
 307	io_req_task_work_add(req);
 308}
 309
 310static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
 311{
 312	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 313	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 314
 315	if (kiocb->ki_flags & IOCB_WRITE)
 316		kiocb_end_write(req);
 317	if (unlikely(res != req->cqe.res)) {
 318		if (res == -EAGAIN && io_rw_should_reissue(req)) {
 319			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
 320			return;
 321		}
 322		req->cqe.res = res;
 323	}
 324
 325	/* order with io_iopoll_complete() checking ->iopoll_completed */
 326	smp_store_release(&req->iopoll_completed, 1);
 327}
 328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329static int kiocb_done(struct io_kiocb *req, ssize_t ret,
 330		       unsigned int issue_flags)
 331{
 332	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 333	unsigned final_ret = io_fixup_rw_res(req, ret);
 334
 335	if (req->flags & REQ_F_CUR_POS)
 336		req->file->f_pos = rw->kiocb.ki_pos;
 337	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
 338		if (!__io_complete_rw_common(req, ret)) {
 339			/*
 340			 * Safe to call io_end from here as we're inline
 341			 * from the submission path.
 342			 */
 343			io_req_io_end(req);
 344			io_req_set_res(req, final_ret,
 345				       io_put_kbuf(req, issue_flags));
 346			return IOU_OK;
 347		}
 348	} else {
 349		io_rw_done(&rw->kiocb, ret);
 350	}
 351
 352	if (req->flags & REQ_F_REISSUE) {
 353		req->flags &= ~REQ_F_REISSUE;
 354		if (io_resubmit_prep(req))
 355			io_req_task_queue_reissue(req);
 356		else
 357			io_req_task_queue_fail(req, final_ret);
 358	}
 359	return IOU_ISSUE_SKIP_COMPLETE;
 360}
 361
 362static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 363				       struct io_rw_state *s,
 364				       unsigned int issue_flags)
 365{
 366	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 367	struct iov_iter *iter = &s->iter;
 368	u8 opcode = req->opcode;
 369	struct iovec *iovec;
 370	void __user *buf;
 371	size_t sqe_len;
 372	ssize_t ret;
 373
 374	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
 375		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
 376		if (ret)
 377			return ERR_PTR(ret);
 378		return NULL;
 379	}
 380
 381	buf = u64_to_user_ptr(rw->addr);
 382	sqe_len = rw->len;
 383
 384	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
 385	    (req->flags & REQ_F_BUFFER_SELECT)) {
 386		if (io_do_buffer_select(req)) {
 387			buf = io_buffer_select(req, &sqe_len, issue_flags);
 388			if (!buf)
 389				return ERR_PTR(-ENOBUFS);
 390			rw->addr = (unsigned long) buf;
 391			rw->len = sqe_len;
 392		}
 393
 394		ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
 395		if (ret)
 396			return ERR_PTR(ret);
 397		return NULL;
 398	}
 399
 400	iovec = s->fast_iov;
 401	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
 402			      req->ctx->compat);
 403	if (unlikely(ret < 0))
 404		return ERR_PTR(ret);
 405	return iovec;
 406}
 407
 408static inline int io_import_iovec(int rw, struct io_kiocb *req,
 409				  struct iovec **iovec, struct io_rw_state *s,
 410				  unsigned int issue_flags)
 411{
 412	*iovec = __io_import_iovec(rw, req, s, issue_flags);
 413	if (unlikely(IS_ERR(*iovec)))
 414		return PTR_ERR(*iovec);
 415
 416	iov_iter_save_state(&s->iter, &s->iter_state);
 417	return 0;
 418}
 419
 420static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
 421{
 422	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
 423}
 424
 425/*
 426 * For files that don't have ->read_iter() and ->write_iter(), handle them
 427 * by looping over ->read() or ->write() manually.
 428 */
 429static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
 430{
 431	struct kiocb *kiocb = &rw->kiocb;
 432	struct file *file = kiocb->ki_filp;
 433	ssize_t ret = 0;
 434	loff_t *ppos;
 435
 436	/*
 437	 * Don't support polled IO through this interface, and we can't
 438	 * support non-blocking either. For the latter, this just causes
 439	 * the kiocb to be handled from an async context.
 440	 */
 441	if (kiocb->ki_flags & IOCB_HIPRI)
 442		return -EOPNOTSUPP;
 443	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
 444	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
 445		return -EAGAIN;
 446
 447	ppos = io_kiocb_ppos(kiocb);
 448
 449	while (iov_iter_count(iter)) {
 450		struct iovec iovec;
 
 451		ssize_t nr;
 452
 453		if (!iov_iter_is_bvec(iter)) {
 454			iovec = iov_iter_iovec(iter);
 
 
 
 
 455		} else {
 456			iovec.iov_base = u64_to_user_ptr(rw->addr);
 457			iovec.iov_len = rw->len;
 458		}
 459
 460		if (ddir == READ) {
 461			nr = file->f_op->read(file, iovec.iov_base,
 462					      iovec.iov_len, ppos);
 463		} else {
 464			nr = file->f_op->write(file, iovec.iov_base,
 465					       iovec.iov_len, ppos);
 466		}
 467
 468		if (nr < 0) {
 469			if (!ret)
 470				ret = nr;
 471			break;
 472		}
 473		ret += nr;
 474		if (!iov_iter_is_bvec(iter)) {
 475			iov_iter_advance(iter, nr);
 476		} else {
 477			rw->addr += nr;
 478			rw->len -= nr;
 479			if (!rw->len)
 480				break;
 481		}
 482		if (nr != iovec.iov_len)
 483			break;
 484	}
 485
 486	return ret;
 487}
 488
 489static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
 490			  const struct iovec *fast_iov, struct iov_iter *iter)
 491{
 492	struct io_async_rw *io = req->async_data;
 493
 494	memcpy(&io->s.iter, iter, sizeof(*iter));
 495	io->free_iovec = iovec;
 496	io->bytes_done = 0;
 497	/* can only be fixed buffers, no need to do anything */
 498	if (iov_iter_is_bvec(iter))
 499		return;
 500	if (!iovec) {
 501		unsigned iov_off = 0;
 502
 503		io->s.iter.iov = io->s.fast_iov;
 504		if (iter->iov != fast_iov) {
 505			iov_off = iter->iov - fast_iov;
 506			io->s.iter.iov += iov_off;
 507		}
 508		if (io->s.fast_iov != fast_iov)
 509			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
 510			       sizeof(struct iovec) * iter->nr_segs);
 511	} else {
 512		req->flags |= REQ_F_NEED_CLEANUP;
 513	}
 514}
 515
 516static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 517			     struct io_rw_state *s, bool force)
 518{
 519	if (!force && !io_op_defs[req->opcode].prep_async)
 
 
 
 520		return 0;
 521	if (!req_has_async_data(req)) {
 522		struct io_async_rw *iorw;
 523
 524		if (io_alloc_async_data(req)) {
 525			kfree(iovec);
 526			return -ENOMEM;
 527		}
 528
 529		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
 530		iorw = req->async_data;
 531		/* we've copied and mapped the iter, ensure state is saved */
 532		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
 533	}
 534	return 0;
 535}
 536
 537static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 538{
 539	struct io_async_rw *iorw = req->async_data;
 540	struct iovec *iov;
 541	int ret;
 542
 
 
 
 543	/* submission path, ->uring_lock should already be taken */
 544	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
 545	if (unlikely(ret < 0))
 546		return ret;
 547
 548	iorw->bytes_done = 0;
 549	iorw->free_iovec = iov;
 550	if (iov)
 551		req->flags |= REQ_F_NEED_CLEANUP;
 
 
 552	return 0;
 553}
 554
 555int io_readv_prep_async(struct io_kiocb *req)
 556{
 557	return io_rw_prep_async(req, ITER_DEST);
 558}
 559
 560int io_writev_prep_async(struct io_kiocb *req)
 561{
 562	return io_rw_prep_async(req, ITER_SOURCE);
 563}
 564
 565/*
 566 * This is our waitqueue callback handler, registered through __folio_lock_async()
 567 * when we initially tried to do the IO with the iocb armed our waitqueue.
 568 * This gets called when the page is unlocked, and we generally expect that to
 569 * happen when the page IO is completed and the page is now uptodate. This will
 570 * queue a task_work based retry of the operation, attempting to copy the data
 571 * again. If the latter fails because the page was NOT uptodate, then we will
 572 * do a thread based blocking retry of the operation. That's the unexpected
 573 * slow path.
 574 */
 575static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 576			     int sync, void *arg)
 577{
 578	struct wait_page_queue *wpq;
 579	struct io_kiocb *req = wait->private;
 580	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 581	struct wait_page_key *key = arg;
 582
 583	wpq = container_of(wait, struct wait_page_queue, wait);
 584
 585	if (!wake_page_match(wpq, key))
 586		return 0;
 587
 588	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
 589	list_del_init(&wait->entry);
 590	io_req_task_queue(req);
 591	return 1;
 592}
 593
 594/*
 595 * This controls whether a given IO request should be armed for async page
 596 * based retry. If we return false here, the request is handed to the async
 597 * worker threads for retry. If we're doing buffered reads on a regular file,
 598 * we prepare a private wait_page_queue entry and retry the operation. This
 599 * will either succeed because the page is now uptodate and unlocked, or it
 600 * will register a callback when the page is unlocked at IO completion. Through
 601 * that callback, io_uring uses task_work to setup a retry of the operation.
 602 * That retry will attempt the buffered read again. The retry will generally
 603 * succeed, or in rare cases where it fails, we then fall back to using the
 604 * async worker threads for a blocking retry.
 605 */
 606static bool io_rw_should_retry(struct io_kiocb *req)
 607{
 608	struct io_async_rw *io = req->async_data;
 609	struct wait_page_queue *wait = &io->wpq;
 610	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 611	struct kiocb *kiocb = &rw->kiocb;
 612
 613	/* never retry for NOWAIT, we just complete with -EAGAIN */
 614	if (req->flags & REQ_F_NOWAIT)
 615		return false;
 616
 617	/* Only for buffered IO */
 618	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
 619		return false;
 620
 621	/*
 622	 * just use poll if we can, and don't attempt if the fs doesn't
 623	 * support callback based unlocks
 624	 */
 625	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
 626		return false;
 627
 628	wait->wait.func = io_async_buf_func;
 629	wait->wait.private = req;
 630	wait->wait.flags = 0;
 631	INIT_LIST_HEAD(&wait->wait.entry);
 632	kiocb->ki_flags |= IOCB_WAITQ;
 633	kiocb->ki_flags &= ~IOCB_NOWAIT;
 634	kiocb->ki_waitq = wait;
 635	return true;
 636}
 637
 638static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
 639{
 640	struct file *file = rw->kiocb.ki_filp;
 641
 642	if (likely(file->f_op->read_iter))
 643		return call_read_iter(file, &rw->kiocb, iter);
 644	else if (file->f_op->read)
 645		return loop_rw_iter(READ, rw, iter);
 646	else
 647		return -EINVAL;
 648}
 649
 650static bool need_complete_io(struct io_kiocb *req)
 651{
 652	return req->flags & REQ_F_ISREG ||
 653		S_ISBLK(file_inode(req->file)->i_mode);
 654}
 655
 656static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
 657{
 658	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 659	struct kiocb *kiocb = &rw->kiocb;
 660	struct io_ring_ctx *ctx = req->ctx;
 661	struct file *file = req->file;
 662	int ret;
 663
 664	if (unlikely(!file || !(file->f_mode & mode)))
 665		return -EBADF;
 666
 667	if (!io_req_ffs_set(req))
 668		req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
 669
 670	kiocb->ki_flags = file->f_iocb_flags;
 671	ret = kiocb_set_rw_flags(kiocb, rw->flags);
 672	if (unlikely(ret))
 673		return ret;
 674	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
 675
 676	/*
 677	 * If the file is marked O_NONBLOCK, still allow retry for it if it
 678	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
 679	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
 680	 */
 681	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
 682	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
 683		req->flags |= REQ_F_NOWAIT;
 684
 685	if (ctx->flags & IORING_SETUP_IOPOLL) {
 686		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
 687			return -EOPNOTSUPP;
 688
 689		kiocb->private = NULL;
 690		kiocb->ki_flags |= IOCB_HIPRI;
 691		kiocb->ki_complete = io_complete_rw_iopoll;
 692		req->iopoll_completed = 0;
 693	} else {
 694		if (kiocb->ki_flags & IOCB_HIPRI)
 695			return -EINVAL;
 696		kiocb->ki_complete = io_complete_rw;
 697	}
 698
 699	return 0;
 700}
 701
 702int io_read(struct io_kiocb *req, unsigned int issue_flags)
 703{
 704	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 705	struct io_rw_state __s, *s = &__s;
 706	struct iovec *iovec;
 707	struct kiocb *kiocb = &rw->kiocb;
 708	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 709	struct io_async_rw *io;
 710	ssize_t ret, ret2;
 711	loff_t *ppos;
 712
 713	if (!req_has_async_data(req)) {
 714		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 715		if (unlikely(ret < 0))
 716			return ret;
 717	} else {
 718		io = req->async_data;
 719		s = &io->s;
 720
 721		/*
 722		 * Safe and required to re-import if we're using provided
 723		 * buffers, as we dropped the selected one before retry.
 724		 */
 725		if (io_do_buffer_select(req)) {
 726			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 727			if (unlikely(ret < 0))
 728				return ret;
 729		}
 730
 731		/*
 732		 * We come here from an earlier attempt, restore our state to
 733		 * match in case it doesn't. It's cheap enough that we don't
 734		 * need to make this conditional.
 735		 */
 736		iov_iter_restore(&s->iter, &s->iter_state);
 737		iovec = NULL;
 738	}
 739	ret = io_rw_init_file(req, FMODE_READ);
 740	if (unlikely(ret)) {
 741		kfree(iovec);
 742		return ret;
 743	}
 744	req->cqe.res = iov_iter_count(&s->iter);
 745
 746	if (force_nonblock) {
 747		/* If the file doesn't support async, just async punt */
 748		if (unlikely(!io_file_supports_nowait(req))) {
 749			ret = io_setup_async_rw(req, iovec, s, true);
 750			return ret ?: -EAGAIN;
 751		}
 752		kiocb->ki_flags |= IOCB_NOWAIT;
 753	} else {
 754		/* Ensure we clear previously set non-block flag */
 755		kiocb->ki_flags &= ~IOCB_NOWAIT;
 756	}
 757
 758	ppos = io_kiocb_update_pos(req);
 759
 760	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
 761	if (unlikely(ret)) {
 762		kfree(iovec);
 763		return ret;
 764	}
 765
 766	ret = io_iter_do_read(rw, &s->iter);
 767
 768	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
 769		req->flags &= ~REQ_F_REISSUE;
 770		/* if we can poll, just do that */
 771		if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
 
 
 
 772			return -EAGAIN;
 773		/* IOPOLL retry should happen for io-wq threads */
 774		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
 775			goto done;
 776		/* no retry on NONBLOCK nor RWF_NOWAIT */
 777		if (req->flags & REQ_F_NOWAIT)
 778			goto done;
 779		ret = 0;
 780	} else if (ret == -EIOCBQUEUED) {
 781		if (iovec)
 782			kfree(iovec);
 783		return IOU_ISSUE_SKIP_COMPLETE;
 784	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
 785		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
 786		/* read all, failed, already did sync or don't want to retry */
 787		goto done;
 788	}
 789
 790	/*
 791	 * Don't depend on the iter state matching what was consumed, or being
 792	 * untouched in case of error. Restore it and we'll advance it
 793	 * manually if we need to.
 794	 */
 795	iov_iter_restore(&s->iter, &s->iter_state);
 796
 797	ret2 = io_setup_async_rw(req, iovec, s, true);
 798	iovec = NULL;
 799	if (ret2) {
 800		ret = ret > 0 ? ret : ret2;
 801		goto done;
 802	}
 803
 804	io = req->async_data;
 805	s = &io->s;
 806	/*
 807	 * Now use our persistent iterator and state, if we aren't already.
 808	 * We've restored and mapped the iter to match.
 809	 */
 810
 811	do {
 812		/*
 813		 * We end up here because of a partial read, either from
 814		 * above or inside this loop. Advance the iter by the bytes
 815		 * that were consumed.
 816		 */
 817		iov_iter_advance(&s->iter, ret);
 818		if (!iov_iter_count(&s->iter))
 819			break;
 820		io->bytes_done += ret;
 821		iov_iter_save_state(&s->iter, &s->iter_state);
 822
 823		/* if we can retry, do so with the callbacks armed */
 824		if (!io_rw_should_retry(req)) {
 825			kiocb->ki_flags &= ~IOCB_WAITQ;
 826			return -EAGAIN;
 827		}
 828
 829		req->cqe.res = iov_iter_count(&s->iter);
 830		/*
 831		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
 832		 * we get -EIOCBQUEUED, then we'll get a notification when the
 833		 * desired page gets unlocked. We can also get a partial read
 834		 * here, and if we do, then just retry at the new offset.
 835		 */
 836		ret = io_iter_do_read(rw, &s->iter);
 837		if (ret == -EIOCBQUEUED)
 838			return IOU_ISSUE_SKIP_COMPLETE;
 839		/* we got some bytes, but not all. retry. */
 840		kiocb->ki_flags &= ~IOCB_WAITQ;
 841		iov_iter_restore(&s->iter, &s->iter_state);
 842	} while (ret > 0);
 843done:
 844	/* it's faster to check here then delegate to kfree */
 845	if (iovec)
 846		kfree(iovec);
 847	return kiocb_done(req, ret, issue_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 848}
 849
 850int io_write(struct io_kiocb *req, unsigned int issue_flags)
 851{
 852	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 853	struct io_rw_state __s, *s = &__s;
 854	struct iovec *iovec;
 855	struct kiocb *kiocb = &rw->kiocb;
 856	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 857	ssize_t ret, ret2;
 858	loff_t *ppos;
 859
 860	if (!req_has_async_data(req)) {
 861		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
 862		if (unlikely(ret < 0))
 863			return ret;
 864	} else {
 865		struct io_async_rw *io = req->async_data;
 866
 867		s = &io->s;
 868		iov_iter_restore(&s->iter, &s->iter_state);
 869		iovec = NULL;
 870	}
 871	ret = io_rw_init_file(req, FMODE_WRITE);
 872	if (unlikely(ret)) {
 873		kfree(iovec);
 874		return ret;
 875	}
 876	req->cqe.res = iov_iter_count(&s->iter);
 877
 878	if (force_nonblock) {
 879		/* If the file doesn't support async, just async punt */
 880		if (unlikely(!io_file_supports_nowait(req)))
 881			goto copy_iov;
 882
 883		/* File path supports NOWAIT for non-direct_IO only for block devices. */
 884		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
 885			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
 886			(req->flags & REQ_F_ISREG))
 887			goto copy_iov;
 888
 889		kiocb->ki_flags |= IOCB_NOWAIT;
 890	} else {
 891		/* Ensure we clear previously set non-block flag */
 892		kiocb->ki_flags &= ~IOCB_NOWAIT;
 893	}
 894
 895	ppos = io_kiocb_update_pos(req);
 896
 897	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
 898	if (unlikely(ret)) {
 899		kfree(iovec);
 900		return ret;
 901	}
 902
 903	/*
 904	 * Open-code file_start_write here to grab freeze protection,
 905	 * which will be released by another thread in
 906	 * io_complete_rw().  Fool lockdep by telling it the lock got
 907	 * released so that it doesn't complain about the held lock when
 908	 * we return to userspace.
 909	 */
 910	if (req->flags & REQ_F_ISREG) {
 911		sb_start_write(file_inode(req->file)->i_sb);
 912		__sb_writers_release(file_inode(req->file)->i_sb,
 913					SB_FREEZE_WRITE);
 914	}
 915	kiocb->ki_flags |= IOCB_WRITE;
 916
 917	if (likely(req->file->f_op->write_iter))
 918		ret2 = call_write_iter(req->file, kiocb, &s->iter);
 919	else if (req->file->f_op->write)
 920		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
 921	else
 922		ret2 = -EINVAL;
 923
 924	if (req->flags & REQ_F_REISSUE) {
 925		req->flags &= ~REQ_F_REISSUE;
 926		ret2 = -EAGAIN;
 927	}
 928
 929	/*
 930	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
 931	 * retry them without IOCB_NOWAIT.
 932	 */
 933	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
 934		ret2 = -EAGAIN;
 935	/* no retry on NONBLOCK nor RWF_NOWAIT */
 936	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
 937		goto done;
 938	if (!force_nonblock || ret2 != -EAGAIN) {
 939		/* IOPOLL retry should happen for io-wq threads */
 940		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
 941			goto copy_iov;
 942
 943		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
 944			struct io_async_rw *io;
 945
 946			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
 947						req->cqe.res, ret2);
 948
 949			/* This is a partial write. The file pos has already been
 950			 * updated, setup the async struct to complete the request
 951			 * in the worker. Also update bytes_done to account for
 952			 * the bytes already written.
 953			 */
 954			iov_iter_save_state(&s->iter, &s->iter_state);
 955			ret = io_setup_async_rw(req, iovec, s, true);
 956
 957			io = req->async_data;
 958			if (io)
 959				io->bytes_done += ret2;
 960
 961			if (kiocb->ki_flags & IOCB_WRITE)
 962				kiocb_end_write(req);
 963			return ret ? ret : -EAGAIN;
 964		}
 965done:
 966		ret = kiocb_done(req, ret2, issue_flags);
 967	} else {
 968copy_iov:
 969		iov_iter_restore(&s->iter, &s->iter_state);
 970		ret = io_setup_async_rw(req, iovec, s, false);
 971		if (!ret) {
 972			if (kiocb->ki_flags & IOCB_WRITE)
 973				kiocb_end_write(req);
 974			return -EAGAIN;
 975		}
 976		return ret;
 977	}
 978	/* it's reportedly faster than delegating the null check to kfree() */
 979	if (iovec)
 980		kfree(iovec);
 981	return ret;
 982}
 983
 984static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 985{
 986	io_commit_cqring_flush(ctx);
 987	if (ctx->flags & IORING_SETUP_SQPOLL)
 988		io_cqring_wake(ctx);
 989}
 990
 991void io_rw_fail(struct io_kiocb *req)
 992{
 993	int res;
 994
 995	res = io_fixup_rw_res(req, req->cqe.res);
 996	io_req_set_res(req, res, req->cqe.flags);
 997}
 998
 999int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1000{
1001	struct io_wq_work_node *pos, *start, *prev;
1002	unsigned int poll_flags = BLK_POLL_NOSLEEP;
1003	DEFINE_IO_COMP_BATCH(iob);
1004	int nr_events = 0;
1005
1006	/*
1007	 * Only spin for completions if we don't have multiple devices hanging
1008	 * off our complete list.
1009	 */
1010	if (ctx->poll_multi_queue || force_nonspin)
1011		poll_flags |= BLK_POLL_ONESHOT;
1012
1013	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1014		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1015		struct file *file = req->file;
1016		int ret;
1017
1018		/*
1019		 * Move completed and retryable entries to our local lists.
1020		 * If we find a request that requires polling, break out
1021		 * and complete those lists first, if we have entries there.
1022		 */
1023		if (READ_ONCE(req->iopoll_completed))
1024			break;
1025
1026		if (req->opcode == IORING_OP_URING_CMD) {
1027			struct io_uring_cmd *ioucmd;
1028
1029			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1030			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1031								poll_flags);
1032		} else {
1033			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1034
1035			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1036		}
1037		if (unlikely(ret < 0))
1038			return ret;
1039		else if (ret)
1040			poll_flags |= BLK_POLL_ONESHOT;
1041
1042		/* iopoll may have completed current req */
1043		if (!rq_list_empty(iob.req_list) ||
1044		    READ_ONCE(req->iopoll_completed))
1045			break;
1046	}
1047
1048	if (!rq_list_empty(iob.req_list))
1049		iob.complete(&iob);
1050	else if (!pos)
1051		return 0;
1052
1053	prev = start;
1054	wq_list_for_each_resume(pos, prev) {
1055		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1056
1057		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1058		if (!smp_load_acquire(&req->iopoll_completed))
1059			break;
1060		nr_events++;
1061		if (unlikely(req->flags & REQ_F_CQE_SKIP))
1062			continue;
1063
1064		req->cqe.flags = io_put_kbuf(req, 0);
1065		if (unlikely(!__io_fill_cqe_req(ctx, req))) {
1066			spin_lock(&ctx->completion_lock);
1067			io_req_cqe_overflow(req);
1068			spin_unlock(&ctx->completion_lock);
1069		}
1070	}
1071
1072	if (unlikely(!nr_events))
1073		return 0;
1074
1075	io_commit_cqring(ctx);
1076	io_cqring_ev_posted_iopoll(ctx);
1077	pos = start ? start->next : ctx->iopoll_list.first;
1078	wq_list_cut(&ctx->iopoll_list, prev, start);
1079	io_free_batch_list(ctx, pos);
 
 
 
 
1080	return nr_events;
1081}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/fs.h>
   5#include <linux/file.h>
   6#include <linux/blk-mq.h>
   7#include <linux/mm.h>
   8#include <linux/slab.h>
   9#include <linux/fsnotify.h>
  10#include <linux/poll.h>
  11#include <linux/nospec.h>
  12#include <linux/compat.h>
  13#include <linux/io_uring/cmd.h>
  14#include <linux/indirect_call_wrapper.h>
  15
  16#include <uapi/linux/io_uring.h>
  17
  18#include "io_uring.h"
  19#include "opdef.h"
  20#include "kbuf.h"
  21#include "rsrc.h"
  22#include "poll.h"
  23#include "rw.h"
  24
  25struct io_rw {
  26	/* NOTE: kiocb has the file as the first member, so don't do it here */
  27	struct kiocb			kiocb;
  28	u64				addr;
  29	u32				len;
  30	rwf_t				flags;
  31};
  32
  33static inline bool io_file_supports_nowait(struct io_kiocb *req)
  34{
  35	return req->flags & REQ_F_SUPPORT_NOWAIT;
  36}
  37
  38#ifdef CONFIG_COMPAT
  39static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
  40{
  41	struct compat_iovec __user *uiov;
  42	compat_ssize_t clen;
  43
  44	uiov = u64_to_user_ptr(rw->addr);
  45	if (!access_ok(uiov, sizeof(*uiov)))
  46		return -EFAULT;
  47	if (__get_user(clen, &uiov->iov_len))
  48		return -EFAULT;
  49	if (clen < 0)
  50		return -EINVAL;
  51
  52	rw->len = clen;
  53	return 0;
  54}
  55#endif
  56
  57static int io_iov_buffer_select_prep(struct io_kiocb *req)
  58{
  59	struct iovec __user *uiov;
  60	struct iovec iov;
  61	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  62
  63	if (rw->len != 1)
  64		return -EINVAL;
  65
  66#ifdef CONFIG_COMPAT
  67	if (req->ctx->compat)
  68		return io_iov_compat_buffer_select_prep(rw);
  69#endif
  70
  71	uiov = u64_to_user_ptr(rw->addr);
  72	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
  73		return -EFAULT;
  74	rw->len = iov.iov_len;
  75	return 0;
  76}
  77
  78int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  79{
  80	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
  81	unsigned ioprio;
  82	int ret;
  83
  84	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
  85	/* used for fixed read/write too - just read unconditionally */
  86	req->buf_index = READ_ONCE(sqe->buf_index);
  87
 
 
 
 
 
 
 
 
 
 
 
 
  88	ioprio = READ_ONCE(sqe->ioprio);
  89	if (ioprio) {
  90		ret = ioprio_check_cap(ioprio);
  91		if (ret)
  92			return ret;
  93
  94		rw->kiocb.ki_ioprio = ioprio;
  95	} else {
  96		rw->kiocb.ki_ioprio = get_current_ioprio();
  97	}
  98	rw->kiocb.dio_complete = NULL;
  99
 100	rw->addr = READ_ONCE(sqe->addr);
 101	rw->len = READ_ONCE(sqe->len);
 102	rw->flags = READ_ONCE(sqe->rw_flags);
 103	return 0;
 104}
 105
 106int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 107{
 108	int ret;
 109
 110	ret = io_prep_rw(req, sqe);
 111	if (unlikely(ret))
 112		return ret;
 113
 114	/*
 115	 * Have to do this validation here, as this is in io_read() rw->len
 116	 * might have chanaged due to buffer selection
 117	 */
 118	if (req->flags & REQ_F_BUFFER_SELECT)
 119		return io_iov_buffer_select_prep(req);
 
 
 
 120
 121	return 0;
 122}
 123
 124int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 125{
 126	struct io_ring_ctx *ctx = req->ctx;
 127	u16 index;
 128	int ret;
 129
 130	ret = io_prep_rw(req, sqe);
 131	if (unlikely(ret))
 132		return ret;
 133
 134	if (unlikely(req->buf_index >= ctx->nr_user_bufs))
 135		return -EFAULT;
 136	index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
 137	req->imu = ctx->user_bufs[index];
 138	io_req_set_rsrc_node(req, ctx, 0);
 139	return 0;
 140}
 141
 142/*
 143 * Multishot read is prepared just like a normal read/write request, only
 144 * difference is that we set the MULTISHOT flag.
 145 */
 146int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 147{
 148	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 149	int ret;
 150
 151	/* must be used with provided buffers */
 152	if (!(req->flags & REQ_F_BUFFER_SELECT))
 153		return -EINVAL;
 154
 155	ret = io_prep_rw(req, sqe);
 156	if (unlikely(ret))
 157		return ret;
 158
 159	if (rw->addr || rw->len)
 160		return -EINVAL;
 161
 162	req->flags |= REQ_F_APOLL_MULTISHOT;
 163	return 0;
 164}
 165
 166void io_readv_writev_cleanup(struct io_kiocb *req)
 167{
 168	struct io_async_rw *io = req->async_data;
 169
 170	kfree(io->free_iovec);
 171}
 172
 173static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 174{
 175	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 176
 177	if (rw->kiocb.ki_pos != -1)
 178		return &rw->kiocb.ki_pos;
 179
 180	if (!(req->file->f_mode & FMODE_STREAM)) {
 181		req->flags |= REQ_F_CUR_POS;
 182		rw->kiocb.ki_pos = req->file->f_pos;
 183		return &rw->kiocb.ki_pos;
 184	}
 185
 186	rw->kiocb.ki_pos = 0;
 187	return NULL;
 188}
 189
 190static void io_req_task_queue_reissue(struct io_kiocb *req)
 191{
 192	req->io_task_work.func = io_queue_iowq;
 193	io_req_task_work_add(req);
 194}
 195
 196#ifdef CONFIG_BLOCK
 197static bool io_resubmit_prep(struct io_kiocb *req)
 198{
 199	struct io_async_rw *io = req->async_data;
 200
 201	if (!req_has_async_data(req))
 202		return !io_req_prep_async(req);
 203	iov_iter_restore(&io->s.iter, &io->s.iter_state);
 204	return true;
 205}
 206
 207static bool io_rw_should_reissue(struct io_kiocb *req)
 208{
 209	umode_t mode = file_inode(req->file)->i_mode;
 210	struct io_ring_ctx *ctx = req->ctx;
 211
 212	if (!S_ISBLK(mode) && !S_ISREG(mode))
 213		return false;
 214	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
 215	    !(ctx->flags & IORING_SETUP_IOPOLL)))
 216		return false;
 217	/*
 218	 * If ref is dying, we might be running poll reap from the exit work.
 219	 * Don't attempt to reissue from that path, just let it fail with
 220	 * -EAGAIN.
 221	 */
 222	if (percpu_ref_is_dying(&ctx->refs))
 223		return false;
 224	/*
 225	 * Play it safe and assume not safe to re-import and reissue if we're
 226	 * not in the original thread group (or in task context).
 227	 */
 228	if (!same_thread_group(req->task, current) || !in_task())
 229		return false;
 230	return true;
 231}
 232#else
 233static bool io_resubmit_prep(struct io_kiocb *req)
 234{
 235	return false;
 236}
 237static bool io_rw_should_reissue(struct io_kiocb *req)
 238{
 239	return false;
 240}
 241#endif
 242
 243static void io_req_end_write(struct io_kiocb *req)
 244{
 
 
 
 
 245	if (req->flags & REQ_F_ISREG) {
 246		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 247
 248		kiocb_end_write(&rw->kiocb);
 
 249	}
 250}
 251
 252/*
 253 * Trigger the notifications after having done some IO, and finish the write
 254 * accounting, if any.
 255 */
 256static void io_req_io_end(struct io_kiocb *req)
 257{
 258	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 259
 260	if (rw->kiocb.ki_flags & IOCB_WRITE) {
 261		io_req_end_write(req);
 262		fsnotify_modify(req->file);
 263	} else {
 264		fsnotify_access(req->file);
 265	}
 266}
 267
 268static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 269{
 270	if (unlikely(res != req->cqe.res)) {
 271		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
 272		    io_rw_should_reissue(req)) {
 273			/*
 274			 * Reissue will start accounting again, finish the
 275			 * current cycle.
 276			 */
 277			io_req_io_end(req);
 278			req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
 279			return true;
 280		}
 281		req_set_fail(req);
 282		req->cqe.res = res;
 283	}
 284	return false;
 285}
 286
 287static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
 288{
 289	struct io_async_rw *io = req->async_data;
 290
 291	/* add previously done IO, if any */
 292	if (req_has_async_data(req) && io->bytes_done > 0) {
 293		if (res < 0)
 294			res = io->bytes_done;
 295		else
 296			res += io->bytes_done;
 297	}
 298	return res;
 299}
 300
 301void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
 302{
 303	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 304	struct kiocb *kiocb = &rw->kiocb;
 305
 306	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
 307		long res = kiocb->dio_complete(rw->kiocb.private);
 308
 309		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 310	}
 311
 312	io_req_io_end(req);
 313
 314	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
 315		unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
 316
 317		req->cqe.flags |= io_put_kbuf(req, issue_flags);
 318	}
 319	io_req_task_complete(req, ts);
 320}
 321
 322static void io_complete_rw(struct kiocb *kiocb, long res)
 323{
 324	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 325	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 326
 327	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
 328		if (__io_complete_rw_common(req, res))
 329			return;
 330		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
 331	}
 332	req->io_task_work.func = io_req_rw_complete;
 333	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
 334}
 335
 336static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
 337{
 338	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
 339	struct io_kiocb *req = cmd_to_io_kiocb(rw);
 340
 341	if (kiocb->ki_flags & IOCB_WRITE)
 342		io_req_end_write(req);
 343	if (unlikely(res != req->cqe.res)) {
 344		if (res == -EAGAIN && io_rw_should_reissue(req)) {
 345			req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
 346			return;
 347		}
 348		req->cqe.res = res;
 349	}
 350
 351	/* order with io_iopoll_complete() checking ->iopoll_completed */
 352	smp_store_release(&req->iopoll_completed, 1);
 353}
 354
 355static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 356{
 357	/* IO was queued async, completion will happen later */
 358	if (ret == -EIOCBQUEUED)
 359		return;
 360
 361	/* transform internal restart error codes */
 362	if (unlikely(ret < 0)) {
 363		switch (ret) {
 364		case -ERESTARTSYS:
 365		case -ERESTARTNOINTR:
 366		case -ERESTARTNOHAND:
 367		case -ERESTART_RESTARTBLOCK:
 368			/*
 369			 * We can't just restart the syscall, since previously
 370			 * submitted sqes may already be in progress. Just fail
 371			 * this IO with EINTR.
 372			 */
 373			ret = -EINTR;
 374			break;
 375		}
 376	}
 377
 378	INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
 379			io_complete_rw, kiocb, ret);
 380}
 381
 382static int kiocb_done(struct io_kiocb *req, ssize_t ret,
 383		       unsigned int issue_flags)
 384{
 385	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 386	unsigned final_ret = io_fixup_rw_res(req, ret);
 387
 388	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
 389		req->file->f_pos = rw->kiocb.ki_pos;
 390	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
 391		if (!__io_complete_rw_common(req, ret)) {
 392			/*
 393			 * Safe to call io_end from here as we're inline
 394			 * from the submission path.
 395			 */
 396			io_req_io_end(req);
 397			io_req_set_res(req, final_ret,
 398				       io_put_kbuf(req, issue_flags));
 399			return IOU_OK;
 400		}
 401	} else {
 402		io_rw_done(&rw->kiocb, ret);
 403	}
 404
 405	if (req->flags & REQ_F_REISSUE) {
 406		req->flags &= ~REQ_F_REISSUE;
 407		if (io_resubmit_prep(req))
 408			io_req_task_queue_reissue(req);
 409		else
 410			io_req_task_queue_fail(req, final_ret);
 411	}
 412	return IOU_ISSUE_SKIP_COMPLETE;
 413}
 414
 415static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
 416				       struct io_rw_state *s,
 417				       unsigned int issue_flags)
 418{
 419	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 420	struct iov_iter *iter = &s->iter;
 421	u8 opcode = req->opcode;
 422	struct iovec *iovec;
 423	void __user *buf;
 424	size_t sqe_len;
 425	ssize_t ret;
 426
 427	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
 428		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
 429		if (ret)
 430			return ERR_PTR(ret);
 431		return NULL;
 432	}
 433
 434	buf = u64_to_user_ptr(rw->addr);
 435	sqe_len = rw->len;
 436
 437	if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
 
 438		if (io_do_buffer_select(req)) {
 439			buf = io_buffer_select(req, &sqe_len, issue_flags);
 440			if (!buf)
 441				return ERR_PTR(-ENOBUFS);
 442			rw->addr = (unsigned long) buf;
 443			rw->len = sqe_len;
 444		}
 445
 446		ret = import_ubuf(ddir, buf, sqe_len, iter);
 447		if (ret)
 448			return ERR_PTR(ret);
 449		return NULL;
 450	}
 451
 452	iovec = s->fast_iov;
 453	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
 454			      req->ctx->compat);
 455	if (unlikely(ret < 0))
 456		return ERR_PTR(ret);
 457	return iovec;
 458}
 459
 460static inline int io_import_iovec(int rw, struct io_kiocb *req,
 461				  struct iovec **iovec, struct io_rw_state *s,
 462				  unsigned int issue_flags)
 463{
 464	*iovec = __io_import_iovec(rw, req, s, issue_flags);
 465	if (IS_ERR(*iovec))
 466		return PTR_ERR(*iovec);
 467
 468	iov_iter_save_state(&s->iter, &s->iter_state);
 469	return 0;
 470}
 471
 472static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
 473{
 474	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
 475}
 476
 477/*
 478 * For files that don't have ->read_iter() and ->write_iter(), handle them
 479 * by looping over ->read() or ->write() manually.
 480 */
 481static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
 482{
 483	struct kiocb *kiocb = &rw->kiocb;
 484	struct file *file = kiocb->ki_filp;
 485	ssize_t ret = 0;
 486	loff_t *ppos;
 487
 488	/*
 489	 * Don't support polled IO through this interface, and we can't
 490	 * support non-blocking either. For the latter, this just causes
 491	 * the kiocb to be handled from an async context.
 492	 */
 493	if (kiocb->ki_flags & IOCB_HIPRI)
 494		return -EOPNOTSUPP;
 495	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
 496	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
 497		return -EAGAIN;
 498
 499	ppos = io_kiocb_ppos(kiocb);
 500
 501	while (iov_iter_count(iter)) {
 502		void __user *addr;
 503		size_t len;
 504		ssize_t nr;
 505
 506		if (iter_is_ubuf(iter)) {
 507			addr = iter->ubuf + iter->iov_offset;
 508			len = iov_iter_count(iter);
 509		} else if (!iov_iter_is_bvec(iter)) {
 510			addr = iter_iov_addr(iter);
 511			len = iter_iov_len(iter);
 512		} else {
 513			addr = u64_to_user_ptr(rw->addr);
 514			len = rw->len;
 515		}
 516
 517		if (ddir == READ)
 518			nr = file->f_op->read(file, addr, len, ppos);
 519		else
 520			nr = file->f_op->write(file, addr, len, ppos);
 
 
 
 521
 522		if (nr < 0) {
 523			if (!ret)
 524				ret = nr;
 525			break;
 526		}
 527		ret += nr;
 528		if (!iov_iter_is_bvec(iter)) {
 529			iov_iter_advance(iter, nr);
 530		} else {
 531			rw->addr += nr;
 532			rw->len -= nr;
 533			if (!rw->len)
 534				break;
 535		}
 536		if (nr != len)
 537			break;
 538	}
 539
 540	return ret;
 541}
 542
 543static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
 544			  const struct iovec *fast_iov, struct iov_iter *iter)
 545{
 546	struct io_async_rw *io = req->async_data;
 547
 548	memcpy(&io->s.iter, iter, sizeof(*iter));
 549	io->free_iovec = iovec;
 550	io->bytes_done = 0;
 551	/* can only be fixed buffers, no need to do anything */
 552	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
 553		return;
 554	if (!iovec) {
 555		unsigned iov_off = 0;
 556
 557		io->s.iter.__iov = io->s.fast_iov;
 558		if (iter->__iov != fast_iov) {
 559			iov_off = iter_iov(iter) - fast_iov;
 560			io->s.iter.__iov += iov_off;
 561		}
 562		if (io->s.fast_iov != fast_iov)
 563			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
 564			       sizeof(struct iovec) * iter->nr_segs);
 565	} else {
 566		req->flags |= REQ_F_NEED_CLEANUP;
 567	}
 568}
 569
 570static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 571			     struct io_rw_state *s, bool force)
 572{
 573	if (!force && !io_cold_defs[req->opcode].prep_async)
 574		return 0;
 575	/* opcode type doesn't need async data */
 576	if (!io_cold_defs[req->opcode].async_size)
 577		return 0;
 578	if (!req_has_async_data(req)) {
 579		struct io_async_rw *iorw;
 580
 581		if (io_alloc_async_data(req)) {
 582			kfree(iovec);
 583			return -ENOMEM;
 584		}
 585
 586		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
 587		iorw = req->async_data;
 588		/* we've copied and mapped the iter, ensure state is saved */
 589		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
 590	}
 591	return 0;
 592}
 593
 594static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 595{
 596	struct io_async_rw *iorw = req->async_data;
 597	struct iovec *iov;
 598	int ret;
 599
 600	iorw->bytes_done = 0;
 601	iorw->free_iovec = NULL;
 602
 603	/* submission path, ->uring_lock should already be taken */
 604	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
 605	if (unlikely(ret < 0))
 606		return ret;
 607
 608	if (iov) {
 609		iorw->free_iovec = iov;
 
 610		req->flags |= REQ_F_NEED_CLEANUP;
 611	}
 612
 613	return 0;
 614}
 615
 616int io_readv_prep_async(struct io_kiocb *req)
 617{
 618	return io_rw_prep_async(req, ITER_DEST);
 619}
 620
 621int io_writev_prep_async(struct io_kiocb *req)
 622{
 623	return io_rw_prep_async(req, ITER_SOURCE);
 624}
 625
 626/*
 627 * This is our waitqueue callback handler, registered through __folio_lock_async()
 628 * when we initially tried to do the IO with the iocb armed our waitqueue.
 629 * This gets called when the page is unlocked, and we generally expect that to
 630 * happen when the page IO is completed and the page is now uptodate. This will
 631 * queue a task_work based retry of the operation, attempting to copy the data
 632 * again. If the latter fails because the page was NOT uptodate, then we will
 633 * do a thread based blocking retry of the operation. That's the unexpected
 634 * slow path.
 635 */
 636static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 637			     int sync, void *arg)
 638{
 639	struct wait_page_queue *wpq;
 640	struct io_kiocb *req = wait->private;
 641	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 642	struct wait_page_key *key = arg;
 643
 644	wpq = container_of(wait, struct wait_page_queue, wait);
 645
 646	if (!wake_page_match(wpq, key))
 647		return 0;
 648
 649	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
 650	list_del_init(&wait->entry);
 651	io_req_task_queue(req);
 652	return 1;
 653}
 654
 655/*
 656 * This controls whether a given IO request should be armed for async page
 657 * based retry. If we return false here, the request is handed to the async
 658 * worker threads for retry. If we're doing buffered reads on a regular file,
 659 * we prepare a private wait_page_queue entry and retry the operation. This
 660 * will either succeed because the page is now uptodate and unlocked, or it
 661 * will register a callback when the page is unlocked at IO completion. Through
 662 * that callback, io_uring uses task_work to setup a retry of the operation.
 663 * That retry will attempt the buffered read again. The retry will generally
 664 * succeed, or in rare cases where it fails, we then fall back to using the
 665 * async worker threads for a blocking retry.
 666 */
 667static bool io_rw_should_retry(struct io_kiocb *req)
 668{
 669	struct io_async_rw *io = req->async_data;
 670	struct wait_page_queue *wait = &io->wpq;
 671	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 672	struct kiocb *kiocb = &rw->kiocb;
 673
 674	/* never retry for NOWAIT, we just complete with -EAGAIN */
 675	if (req->flags & REQ_F_NOWAIT)
 676		return false;
 677
 678	/* Only for buffered IO */
 679	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
 680		return false;
 681
 682	/*
 683	 * just use poll if we can, and don't attempt if the fs doesn't
 684	 * support callback based unlocks
 685	 */
 686	if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC))
 687		return false;
 688
 689	wait->wait.func = io_async_buf_func;
 690	wait->wait.private = req;
 691	wait->wait.flags = 0;
 692	INIT_LIST_HEAD(&wait->wait.entry);
 693	kiocb->ki_flags |= IOCB_WAITQ;
 694	kiocb->ki_flags &= ~IOCB_NOWAIT;
 695	kiocb->ki_waitq = wait;
 696	return true;
 697}
 698
 699static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
 700{
 701	struct file *file = rw->kiocb.ki_filp;
 702
 703	if (likely(file->f_op->read_iter))
 704		return call_read_iter(file, &rw->kiocb, iter);
 705	else if (file->f_op->read)
 706		return loop_rw_iter(READ, rw, iter);
 707	else
 708		return -EINVAL;
 709}
 710
 711static bool need_complete_io(struct io_kiocb *req)
 712{
 713	return req->flags & REQ_F_ISREG ||
 714		S_ISBLK(file_inode(req->file)->i_mode);
 715}
 716
 717static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
 718{
 719	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 720	struct kiocb *kiocb = &rw->kiocb;
 721	struct io_ring_ctx *ctx = req->ctx;
 722	struct file *file = req->file;
 723	int ret;
 724
 725	if (unlikely(!(file->f_mode & mode)))
 726		return -EBADF;
 727
 728	if (!(req->flags & REQ_F_FIXED_FILE))
 729		req->flags |= io_file_get_flags(file);
 730
 731	kiocb->ki_flags = file->f_iocb_flags;
 732	ret = kiocb_set_rw_flags(kiocb, rw->flags);
 733	if (unlikely(ret))
 734		return ret;
 735	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
 736
 737	/*
 738	 * If the file is marked O_NONBLOCK, still allow retry for it if it
 739	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
 740	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
 741	 */
 742	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
 743	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
 744		req->flags |= REQ_F_NOWAIT;
 745
 746	if (ctx->flags & IORING_SETUP_IOPOLL) {
 747		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
 748			return -EOPNOTSUPP;
 749
 750		kiocb->private = NULL;
 751		kiocb->ki_flags |= IOCB_HIPRI;
 752		kiocb->ki_complete = io_complete_rw_iopoll;
 753		req->iopoll_completed = 0;
 754	} else {
 755		if (kiocb->ki_flags & IOCB_HIPRI)
 756			return -EINVAL;
 757		kiocb->ki_complete = io_complete_rw;
 758	}
 759
 760	return 0;
 761}
 762
 763static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 764{
 765	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 766	struct io_rw_state __s, *s = &__s;
 767	struct iovec *iovec;
 768	struct kiocb *kiocb = &rw->kiocb;
 769	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 770	struct io_async_rw *io;
 771	ssize_t ret, ret2;
 772	loff_t *ppos;
 773
 774	if (!req_has_async_data(req)) {
 775		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 776		if (unlikely(ret < 0))
 777			return ret;
 778	} else {
 779		io = req->async_data;
 780		s = &io->s;
 781
 782		/*
 783		 * Safe and required to re-import if we're using provided
 784		 * buffers, as we dropped the selected one before retry.
 785		 */
 786		if (io_do_buffer_select(req)) {
 787			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
 788			if (unlikely(ret < 0))
 789				return ret;
 790		}
 791
 792		/*
 793		 * We come here from an earlier attempt, restore our state to
 794		 * match in case it doesn't. It's cheap enough that we don't
 795		 * need to make this conditional.
 796		 */
 797		iov_iter_restore(&s->iter, &s->iter_state);
 798		iovec = NULL;
 799	}
 800	ret = io_rw_init_file(req, FMODE_READ);
 801	if (unlikely(ret)) {
 802		kfree(iovec);
 803		return ret;
 804	}
 805	req->cqe.res = iov_iter_count(&s->iter);
 806
 807	if (force_nonblock) {
 808		/* If the file doesn't support async, just async punt */
 809		if (unlikely(!io_file_supports_nowait(req))) {
 810			ret = io_setup_async_rw(req, iovec, s, true);
 811			return ret ?: -EAGAIN;
 812		}
 813		kiocb->ki_flags |= IOCB_NOWAIT;
 814	} else {
 815		/* Ensure we clear previously set non-block flag */
 816		kiocb->ki_flags &= ~IOCB_NOWAIT;
 817	}
 818
 819	ppos = io_kiocb_update_pos(req);
 820
 821	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
 822	if (unlikely(ret)) {
 823		kfree(iovec);
 824		return ret;
 825	}
 826
 827	ret = io_iter_do_read(rw, &s->iter);
 828
 829	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
 830		req->flags &= ~REQ_F_REISSUE;
 831		/*
 832		 * If we can poll, just do that. For a vectored read, we'll
 833		 * need to copy state first.
 834		 */
 835		if (io_file_can_poll(req) && !io_issue_defs[req->opcode].vectored)
 836			return -EAGAIN;
 837		/* IOPOLL retry should happen for io-wq threads */
 838		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
 839			goto done;
 840		/* no retry on NONBLOCK nor RWF_NOWAIT */
 841		if (req->flags & REQ_F_NOWAIT)
 842			goto done;
 843		ret = 0;
 844	} else if (ret == -EIOCBQUEUED) {
 845		if (iovec)
 846			kfree(iovec);
 847		return IOU_ISSUE_SKIP_COMPLETE;
 848	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
 849		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
 850		/* read all, failed, already did sync or don't want to retry */
 851		goto done;
 852	}
 853
 854	/*
 855	 * Don't depend on the iter state matching what was consumed, or being
 856	 * untouched in case of error. Restore it and we'll advance it
 857	 * manually if we need to.
 858	 */
 859	iov_iter_restore(&s->iter, &s->iter_state);
 860
 861	ret2 = io_setup_async_rw(req, iovec, s, true);
 862	iovec = NULL;
 863	if (ret2) {
 864		ret = ret > 0 ? ret : ret2;
 865		goto done;
 866	}
 867
 868	io = req->async_data;
 869	s = &io->s;
 870	/*
 871	 * Now use our persistent iterator and state, if we aren't already.
 872	 * We've restored and mapped the iter to match.
 873	 */
 874
 875	do {
 876		/*
 877		 * We end up here because of a partial read, either from
 878		 * above or inside this loop. Advance the iter by the bytes
 879		 * that were consumed.
 880		 */
 881		iov_iter_advance(&s->iter, ret);
 882		if (!iov_iter_count(&s->iter))
 883			break;
 884		io->bytes_done += ret;
 885		iov_iter_save_state(&s->iter, &s->iter_state);
 886
 887		/* if we can retry, do so with the callbacks armed */
 888		if (!io_rw_should_retry(req)) {
 889			kiocb->ki_flags &= ~IOCB_WAITQ;
 890			return -EAGAIN;
 891		}
 892
 893		req->cqe.res = iov_iter_count(&s->iter);
 894		/*
 895		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
 896		 * we get -EIOCBQUEUED, then we'll get a notification when the
 897		 * desired page gets unlocked. We can also get a partial read
 898		 * here, and if we do, then just retry at the new offset.
 899		 */
 900		ret = io_iter_do_read(rw, &s->iter);
 901		if (ret == -EIOCBQUEUED)
 902			return IOU_ISSUE_SKIP_COMPLETE;
 903		/* we got some bytes, but not all. retry. */
 904		kiocb->ki_flags &= ~IOCB_WAITQ;
 905		iov_iter_restore(&s->iter, &s->iter_state);
 906	} while (ret > 0);
 907done:
 908	/* it's faster to check here then delegate to kfree */
 909	if (iovec)
 910		kfree(iovec);
 911	return ret;
 912}
 913
 914int io_read(struct io_kiocb *req, unsigned int issue_flags)
 915{
 916	int ret;
 917
 918	ret = __io_read(req, issue_flags);
 919	if (ret >= 0)
 920		return kiocb_done(req, ret, issue_flags);
 921
 922	return ret;
 923}
 924
 925int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
 926{
 927	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
 928	unsigned int cflags = 0;
 929	int ret;
 930
 931	/*
 932	 * Multishot MUST be used on a pollable file
 933	 */
 934	if (!io_file_can_poll(req))
 935		return -EBADFD;
 936
 937	ret = __io_read(req, issue_flags);
 938
 939	/*
 940	 * If the file doesn't support proper NOWAIT, then disable multishot
 941	 * and stay in single shot mode.
 942	 */
 943	if (!io_file_supports_nowait(req))
 944		req->flags &= ~REQ_F_APOLL_MULTISHOT;
 945
 946	/*
 947	 * If we get -EAGAIN, recycle our buffer and just let normal poll
 948	 * handling arm it.
 949	 */
 950	if (ret == -EAGAIN) {
 951		/*
 952		 * Reset rw->len to 0 again to avoid clamping future mshot
 953		 * reads, in case the buffer size varies.
 954		 */
 955		if (io_kbuf_recycle(req, issue_flags))
 956			rw->len = 0;
 957		if (issue_flags & IO_URING_F_MULTISHOT)
 958			return IOU_ISSUE_SKIP_COMPLETE;
 959		return -EAGAIN;
 960	}
 961
 962	/*
 963	 * Any successful return value will keep the multishot read armed.
 964	 */
 965	if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) {
 966		/*
 967		 * Put our buffer and post a CQE. If we fail to post a CQE, then
 968		 * jump to the termination path. This request is then done.
 969		 */
 970		cflags = io_put_kbuf(req, issue_flags);
 971		rw->len = 0; /* similarly to above, reset len to 0 */
 972
 973		if (io_fill_cqe_req_aux(req,
 974					issue_flags & IO_URING_F_COMPLETE_DEFER,
 975					ret, cflags | IORING_CQE_F_MORE)) {
 976			if (issue_flags & IO_URING_F_MULTISHOT) {
 977				/*
 978				 * Force retry, as we might have more data to
 979				 * be read and otherwise it won't get retried
 980				 * until (if ever) another poll is triggered.
 981				 */
 982				io_poll_multishot_retry(req);
 983				return IOU_ISSUE_SKIP_COMPLETE;
 984			}
 985			return -EAGAIN;
 986		}
 987	}
 988
 989	/*
 990	 * Either an error, or we've hit overflow posting the CQE. For any
 991	 * multishot request, hitting overflow will terminate it.
 992	 */
 993	io_req_set_res(req, ret, cflags);
 994	if (issue_flags & IO_URING_F_MULTISHOT)
 995		return IOU_STOP_MULTISHOT;
 996	return IOU_OK;
 997}
 998
 999int io_write(struct io_kiocb *req, unsigned int issue_flags)
1000{
1001	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1002	struct io_rw_state __s, *s = &__s;
1003	struct iovec *iovec;
1004	struct kiocb *kiocb = &rw->kiocb;
1005	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1006	ssize_t ret, ret2;
1007	loff_t *ppos;
1008
1009	if (!req_has_async_data(req)) {
1010		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
1011		if (unlikely(ret < 0))
1012			return ret;
1013	} else {
1014		struct io_async_rw *io = req->async_data;
1015
1016		s = &io->s;
1017		iov_iter_restore(&s->iter, &s->iter_state);
1018		iovec = NULL;
1019	}
1020	ret = io_rw_init_file(req, FMODE_WRITE);
1021	if (unlikely(ret)) {
1022		kfree(iovec);
1023		return ret;
1024	}
1025	req->cqe.res = iov_iter_count(&s->iter);
1026
1027	if (force_nonblock) {
1028		/* If the file doesn't support async, just async punt */
1029		if (unlikely(!io_file_supports_nowait(req)))
1030			goto copy_iov;
1031
1032		/* File path supports NOWAIT for non-direct_IO only for block devices. */
1033		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1034			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
1035			(req->flags & REQ_F_ISREG))
1036			goto copy_iov;
1037
1038		kiocb->ki_flags |= IOCB_NOWAIT;
1039	} else {
1040		/* Ensure we clear previously set non-block flag */
1041		kiocb->ki_flags &= ~IOCB_NOWAIT;
1042	}
1043
1044	ppos = io_kiocb_update_pos(req);
1045
1046	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1047	if (unlikely(ret)) {
1048		kfree(iovec);
1049		return ret;
1050	}
1051
1052	if (req->flags & REQ_F_ISREG)
1053		kiocb_start_write(kiocb);
 
 
 
 
 
 
 
 
 
 
1054	kiocb->ki_flags |= IOCB_WRITE;
1055
1056	if (likely(req->file->f_op->write_iter))
1057		ret2 = call_write_iter(req->file, kiocb, &s->iter);
1058	else if (req->file->f_op->write)
1059		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
1060	else
1061		ret2 = -EINVAL;
1062
1063	if (req->flags & REQ_F_REISSUE) {
1064		req->flags &= ~REQ_F_REISSUE;
1065		ret2 = -EAGAIN;
1066	}
1067
1068	/*
1069	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1070	 * retry them without IOCB_NOWAIT.
1071	 */
1072	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1073		ret2 = -EAGAIN;
1074	/* no retry on NONBLOCK nor RWF_NOWAIT */
1075	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1076		goto done;
1077	if (!force_nonblock || ret2 != -EAGAIN) {
1078		/* IOPOLL retry should happen for io-wq threads */
1079		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1080			goto copy_iov;
1081
1082		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1083			struct io_async_rw *io;
1084
1085			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1086						req->cqe.res, ret2);
1087
1088			/* This is a partial write. The file pos has already been
1089			 * updated, setup the async struct to complete the request
1090			 * in the worker. Also update bytes_done to account for
1091			 * the bytes already written.
1092			 */
1093			iov_iter_save_state(&s->iter, &s->iter_state);
1094			ret = io_setup_async_rw(req, iovec, s, true);
1095
1096			io = req->async_data;
1097			if (io)
1098				io->bytes_done += ret2;
1099
1100			if (kiocb->ki_flags & IOCB_WRITE)
1101				io_req_end_write(req);
1102			return ret ? ret : -EAGAIN;
1103		}
1104done:
1105		ret = kiocb_done(req, ret2, issue_flags);
1106	} else {
1107copy_iov:
1108		iov_iter_restore(&s->iter, &s->iter_state);
1109		ret = io_setup_async_rw(req, iovec, s, false);
1110		if (!ret) {
1111			if (kiocb->ki_flags & IOCB_WRITE)
1112				io_req_end_write(req);
1113			return -EAGAIN;
1114		}
1115		return ret;
1116	}
1117	/* it's reportedly faster than delegating the null check to kfree() */
1118	if (iovec)
1119		kfree(iovec);
1120	return ret;
1121}
1122
 
 
 
 
 
 
 
1123void io_rw_fail(struct io_kiocb *req)
1124{
1125	int res;
1126
1127	res = io_fixup_rw_res(req, req->cqe.res);
1128	io_req_set_res(req, res, req->cqe.flags);
1129}
1130
1131int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1132{
1133	struct io_wq_work_node *pos, *start, *prev;
1134	unsigned int poll_flags = 0;
1135	DEFINE_IO_COMP_BATCH(iob);
1136	int nr_events = 0;
1137
1138	/*
1139	 * Only spin for completions if we don't have multiple devices hanging
1140	 * off our complete list.
1141	 */
1142	if (ctx->poll_multi_queue || force_nonspin)
1143		poll_flags |= BLK_POLL_ONESHOT;
1144
1145	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1146		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1147		struct file *file = req->file;
1148		int ret;
1149
1150		/*
1151		 * Move completed and retryable entries to our local lists.
1152		 * If we find a request that requires polling, break out
1153		 * and complete those lists first, if we have entries there.
1154		 */
1155		if (READ_ONCE(req->iopoll_completed))
1156			break;
1157
1158		if (req->opcode == IORING_OP_URING_CMD) {
1159			struct io_uring_cmd *ioucmd;
1160
1161			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1162			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1163								poll_flags);
1164		} else {
1165			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1166
1167			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1168		}
1169		if (unlikely(ret < 0))
1170			return ret;
1171		else if (ret)
1172			poll_flags |= BLK_POLL_ONESHOT;
1173
1174		/* iopoll may have completed current req */
1175		if (!rq_list_empty(iob.req_list) ||
1176		    READ_ONCE(req->iopoll_completed))
1177			break;
1178	}
1179
1180	if (!rq_list_empty(iob.req_list))
1181		iob.complete(&iob);
1182	else if (!pos)
1183		return 0;
1184
1185	prev = start;
1186	wq_list_for_each_resume(pos, prev) {
1187		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1188
1189		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1190		if (!smp_load_acquire(&req->iopoll_completed))
1191			break;
1192		nr_events++;
 
 
 
1193		req->cqe.flags = io_put_kbuf(req, 0);
 
 
 
 
 
1194	}
 
1195	if (unlikely(!nr_events))
1196		return 0;
1197
 
 
1198	pos = start ? start->next : ctx->iopoll_list.first;
1199	wq_list_cut(&ctx->iopoll_list, prev, start);
1200
1201	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1202		return 0;
1203	ctx->submit_state.compl_reqs.first = pos;
1204	__io_submit_flush_completions(ctx);
1205	return nr_events;
1206}