Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/file.h>
   5#include <linux/slab.h>
   6#include <linux/net.h>
   7#include <linux/compat.h>
   8#include <net/compat.h>
   9#include <linux/io_uring.h>
  10
  11#include <uapi/linux/io_uring.h>
  12
  13#include "io_uring.h"
  14#include "kbuf.h"
  15#include "alloc_cache.h"
  16#include "net.h"
  17#include "notif.h"
  18#include "rsrc.h"
  19
  20#if defined(CONFIG_NET)
  21struct io_shutdown {
  22	struct file			*file;
  23	int				how;
  24};
  25
  26struct io_accept {
  27	struct file			*file;
  28	struct sockaddr __user		*addr;
  29	int __user			*addr_len;
  30	int				flags;
  31	int				iou_flags;
  32	u32				file_slot;
  33	unsigned long			nofile;
  34};
  35
  36struct io_socket {
  37	struct file			*file;
  38	int				domain;
  39	int				type;
  40	int				protocol;
  41	int				flags;
  42	u32				file_slot;
  43	unsigned long			nofile;
  44};
  45
  46struct io_connect {
  47	struct file			*file;
  48	struct sockaddr __user		*addr;
  49	int				addr_len;
  50	bool				in_progress;
  51	bool				seen_econnaborted;
  52};
  53
  54struct io_bind {
  55	struct file			*file;
  56	int				addr_len;
  57};
  58
  59struct io_listen {
  60	struct file			*file;
  61	int				backlog;
  62};
  63
  64struct io_sr_msg {
  65	struct file			*file;
  66	union {
  67		struct compat_msghdr __user	*umsg_compat;
  68		struct user_msghdr __user	*umsg;
  69		void __user			*buf;
  70	};
  71	int				len;
  72	unsigned			done_io;
  73	unsigned			msg_flags;
  74	unsigned			nr_multishot_loops;
  75	u16				flags;
  76	/* initialised and used only by !msg send variants */
  77	u16				buf_group;
  78	u16				buf_index;
  79	void __user			*msg_control;
  80	/* used only for send zerocopy */
  81	struct io_kiocb 		*notif;
  82};
  83
  84/*
  85 * Number of times we'll try and do receives if there's more data. If we
  86 * exceed this limit, then add us to the back of the queue and retry from
  87 * there. This helps fairness between flooding clients.
  88 */
  89#define MULTISHOT_MAX_RETRY	32
  90
  91int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  92{
  93	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
  94
  95	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
  96		     sqe->buf_index || sqe->splice_fd_in))
  97		return -EINVAL;
  98
  99	shutdown->how = READ_ONCE(sqe->len);
 100	req->flags |= REQ_F_FORCE_ASYNC;
 101	return 0;
 102}
 103
 104int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
 105{
 106	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
 107	struct socket *sock;
 108	int ret;
 109
 110	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
 111
 112	sock = sock_from_file(req->file);
 113	if (unlikely(!sock))
 114		return -ENOTSOCK;
 115
 116	ret = __sys_shutdown_sock(sock, shutdown->how);
 117	io_req_set_res(req, ret, 0);
 118	return IOU_OK;
 119}
 120
 121static bool io_net_retry(struct socket *sock, int flags)
 122{
 123	if (!(flags & MSG_WAITALL))
 124		return false;
 125	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
 126}
 127
 128static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
 129{
 130	if (kmsg->free_iov) {
 131		kfree(kmsg->free_iov);
 132		kmsg->free_iov_nr = 0;
 133		kmsg->free_iov = NULL;
 134	}
 135}
 136
 137static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
 138{
 139	struct io_async_msghdr *hdr = req->async_data;
 140	struct iovec *iov;
 141
 142	/* can't recycle, ensure we free the iovec if we have one */
 143	if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
 144		io_netmsg_iovec_free(hdr);
 145		return;
 146	}
 147
 148	/* Let normal cleanup path reap it if we fail adding to the cache */
 149	iov = hdr->free_iov;
 150	if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
 151		if (iov)
 152			kasan_mempool_poison_object(iov);
 153		req->async_data = NULL;
 154		req->flags &= ~REQ_F_ASYNC_DATA;
 155	}
 156}
 157
 158static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
 159{
 160	struct io_ring_ctx *ctx = req->ctx;
 161	struct io_async_msghdr *hdr;
 162
 163	hdr = io_alloc_cache_get(&ctx->netmsg_cache);
 164	if (hdr) {
 165		if (hdr->free_iov) {
 166			kasan_mempool_unpoison_object(hdr->free_iov,
 167				hdr->free_iov_nr * sizeof(struct iovec));
 168			req->flags |= REQ_F_NEED_CLEANUP;
 169		}
 170		req->flags |= REQ_F_ASYNC_DATA;
 171		req->async_data = hdr;
 172		return hdr;
 173	}
 174
 175	if (!io_alloc_async_data(req)) {
 176		hdr = req->async_data;
 177		hdr->free_iov_nr = 0;
 178		hdr->free_iov = NULL;
 179		return hdr;
 180	}
 181	return NULL;
 182}
 183
 184/* assign new iovec to kmsg, if we need to */
 185static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
 186			     struct iovec *iov)
 187{
 188	if (iov) {
 189		req->flags |= REQ_F_NEED_CLEANUP;
 190		kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs;
 191		if (kmsg->free_iov)
 192			kfree(kmsg->free_iov);
 193		kmsg->free_iov = iov;
 194	}
 195	return 0;
 196}
 197
 198static inline void io_mshot_prep_retry(struct io_kiocb *req,
 199				       struct io_async_msghdr *kmsg)
 200{
 201	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 202
 203	req->flags &= ~REQ_F_BL_EMPTY;
 204	sr->done_io = 0;
 205	sr->len = 0; /* get from the provided buffer */
 206	req->buf_index = sr->buf_group;
 207}
 208
 209#ifdef CONFIG_COMPAT
 210static int io_compat_msg_copy_hdr(struct io_kiocb *req,
 211				  struct io_async_msghdr *iomsg,
 212				  struct compat_msghdr *msg, int ddir)
 213{
 214	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 215	struct compat_iovec __user *uiov;
 216	struct iovec *iov;
 217	int ret, nr_segs;
 218
 219	if (iomsg->free_iov) {
 220		nr_segs = iomsg->free_iov_nr;
 221		iov = iomsg->free_iov;
 222	} else {
 223		iov = &iomsg->fast_iov;
 224		nr_segs = 1;
 225	}
 226
 227	if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
 228		return -EFAULT;
 229
 230	uiov = compat_ptr(msg->msg_iov);
 231	if (req->flags & REQ_F_BUFFER_SELECT) {
 232		compat_ssize_t clen;
 233
 234		if (msg->msg_iovlen == 0) {
 235			sr->len = iov->iov_len = 0;
 236			iov->iov_base = NULL;
 237		} else if (msg->msg_iovlen > 1) {
 238			return -EINVAL;
 239		} else {
 240			if (!access_ok(uiov, sizeof(*uiov)))
 241				return -EFAULT;
 242			if (__get_user(clen, &uiov->iov_len))
 243				return -EFAULT;
 244			if (clen < 0)
 245				return -EINVAL;
 246			sr->len = clen;
 247		}
 248
 249		return 0;
 250	}
 251
 252	ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
 253				nr_segs, &iov, &iomsg->msg.msg_iter, true);
 254	if (unlikely(ret < 0))
 255		return ret;
 256
 257	return io_net_vec_assign(req, iomsg, iov);
 258}
 259#endif
 260
 261static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
 262			   struct user_msghdr *msg, int ddir)
 263{
 264	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 265	struct user_msghdr __user *umsg = sr->umsg;
 266	struct iovec *iov;
 267	int ret, nr_segs;
 268
 269	if (iomsg->free_iov) {
 270		nr_segs = iomsg->free_iov_nr;
 271		iov = iomsg->free_iov;
 272	} else {
 273		iov = &iomsg->fast_iov;
 274		nr_segs = 1;
 275	}
 276
 277	if (!user_access_begin(umsg, sizeof(*umsg)))
 278		return -EFAULT;
 279
 280	ret = -EFAULT;
 281	unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
 282	unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
 283	unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
 284	unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
 285	unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
 286	unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
 287	msg->msg_flags = 0;
 288
 289	if (req->flags & REQ_F_BUFFER_SELECT) {
 290		if (msg->msg_iovlen == 0) {
 291			sr->len = iov->iov_len = 0;
 292			iov->iov_base = NULL;
 293		} else if (msg->msg_iovlen > 1) {
 294			ret = -EINVAL;
 295			goto ua_end;
 296		} else {
 297			/* we only need the length for provided buffers */
 298			if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
 299				goto ua_end;
 300			unsafe_get_user(iov->iov_len, &msg->msg_iov[0].iov_len,
 301					ua_end);
 302			sr->len = iov->iov_len;
 303		}
 304		ret = 0;
 305ua_end:
 306		user_access_end();
 307		return ret;
 308	}
 309
 310	user_access_end();
 311	ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
 312				&iov, &iomsg->msg.msg_iter, false);
 313	if (unlikely(ret < 0))
 314		return ret;
 315
 316	return io_net_vec_assign(req, iomsg, iov);
 317}
 318
 319static int io_sendmsg_copy_hdr(struct io_kiocb *req,
 320			       struct io_async_msghdr *iomsg)
 321{
 322	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 323	struct user_msghdr msg;
 324	int ret;
 325
 326	iomsg->msg.msg_name = &iomsg->addr;
 327	iomsg->msg.msg_iter.nr_segs = 0;
 328
 329#ifdef CONFIG_COMPAT
 330	if (unlikely(req->ctx->compat)) {
 331		struct compat_msghdr cmsg;
 332
 333		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
 334		if (unlikely(ret))
 335			return ret;
 336
 337		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
 338		sr->msg_control = iomsg->msg.msg_control_user;
 339		return ret;
 340	}
 341#endif
 342
 343	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
 344	if (unlikely(ret))
 345		return ret;
 346
 347	ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
 348
 349	/* save msg_control as sys_sendmsg() overwrites it */
 350	sr->msg_control = iomsg->msg.msg_control_user;
 351	return ret;
 352}
 353
 354void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
 355{
 356	struct io_async_msghdr *io = req->async_data;
 357
 358	io_netmsg_iovec_free(io);
 359}
 360
 361static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 362{
 363	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 364	struct io_async_msghdr *kmsg = req->async_data;
 365	void __user *addr;
 366	u16 addr_len;
 367	int ret;
 368
 369	sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
 370
 371	if (READ_ONCE(sqe->__pad3[0]))
 372		return -EINVAL;
 373
 374	kmsg->msg.msg_name = NULL;
 375	kmsg->msg.msg_namelen = 0;
 376	kmsg->msg.msg_control = NULL;
 377	kmsg->msg.msg_controllen = 0;
 378	kmsg->msg.msg_ubuf = NULL;
 379
 380	addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 381	addr_len = READ_ONCE(sqe->addr_len);
 382	if (addr) {
 383		ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr);
 384		if (unlikely(ret < 0))
 385			return ret;
 386		kmsg->msg.msg_name = &kmsg->addr;
 387		kmsg->msg.msg_namelen = addr_len;
 388	}
 389	if (!io_do_buffer_select(req)) {
 390		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
 391				  &kmsg->msg.msg_iter);
 392		if (unlikely(ret < 0))
 393			return ret;
 394	}
 395	return 0;
 396}
 397
 398static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 399{
 400	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 401	struct io_async_msghdr *kmsg = req->async_data;
 402	int ret;
 403
 404	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 405
 406	ret = io_sendmsg_copy_hdr(req, kmsg);
 407	if (!ret)
 408		req->flags |= REQ_F_NEED_CLEANUP;
 409	return ret;
 410}
 411
 412#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
 413
 414int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 415{
 416	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 417
 418	sr->done_io = 0;
 419
 420	if (req->opcode != IORING_OP_SEND) {
 421		if (sqe->addr2 || sqe->file_index)
 422			return -EINVAL;
 423	}
 424
 425	sr->len = READ_ONCE(sqe->len);
 426	sr->flags = READ_ONCE(sqe->ioprio);
 427	if (sr->flags & ~SENDMSG_FLAGS)
 428		return -EINVAL;
 429	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 430	if (sr->msg_flags & MSG_DONTWAIT)
 431		req->flags |= REQ_F_NOWAIT;
 432	if (sr->flags & IORING_RECVSEND_BUNDLE) {
 433		if (req->opcode == IORING_OP_SENDMSG)
 434			return -EINVAL;
 435		if (!(req->flags & REQ_F_BUFFER_SELECT))
 436			return -EINVAL;
 437		sr->msg_flags |= MSG_WAITALL;
 438		sr->buf_group = req->buf_index;
 439		req->buf_list = NULL;
 440	}
 441
 442#ifdef CONFIG_COMPAT
 443	if (req->ctx->compat)
 444		sr->msg_flags |= MSG_CMSG_COMPAT;
 445#endif
 446	if (unlikely(!io_msg_alloc_async(req)))
 447		return -ENOMEM;
 448	if (req->opcode != IORING_OP_SENDMSG)
 449		return io_send_setup(req, sqe);
 450	return io_sendmsg_setup(req, sqe);
 451}
 452
 453static void io_req_msg_cleanup(struct io_kiocb *req,
 454			       unsigned int issue_flags)
 455{
 456	req->flags &= ~REQ_F_NEED_CLEANUP;
 457	io_netmsg_recycle(req, issue_flags);
 458}
 459
 460/*
 461 * For bundle completions, we need to figure out how many segments we consumed.
 462 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
 463 * could be using an ITER_IOVEC. If the latter, then if we consumed all of
 464 * the segments, then it's a trivial questiont o answer. If we have residual
 465 * data in the iter, then loop the segments to figure out how much we
 466 * transferred.
 467 */
 468static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
 469{
 470	struct iovec *iov;
 471	int nbufs;
 472
 473	/* no data is always zero segments, and a ubuf is always 1 segment */
 474	if (ret <= 0)
 475		return 0;
 476	if (iter_is_ubuf(&kmsg->msg.msg_iter))
 477		return 1;
 478
 479	iov = kmsg->free_iov;
 480	if (!iov)
 481		iov = &kmsg->fast_iov;
 482
 483	/* if all data was transferred, it's basic pointer math */
 484	if (!iov_iter_count(&kmsg->msg.msg_iter))
 485		return iter_iov(&kmsg->msg.msg_iter) - iov;
 486
 487	/* short transfer, count segments */
 488	nbufs = 0;
 489	do {
 490		int this_len = min_t(int, iov[nbufs].iov_len, ret);
 491
 492		nbufs++;
 493		ret -= this_len;
 494	} while (ret);
 495
 496	return nbufs;
 497}
 498
 499static inline bool io_send_finish(struct io_kiocb *req, int *ret,
 500				  struct io_async_msghdr *kmsg,
 501				  unsigned issue_flags)
 502{
 503	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 504	bool bundle_finished = *ret <= 0;
 505	unsigned int cflags;
 506
 507	if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
 508		cflags = io_put_kbuf(req, *ret, issue_flags);
 509		goto finish;
 510	}
 511
 512	cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
 513
 514	if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
 515		goto finish;
 516
 517	/*
 518	 * Fill CQE for this receive and see if we should keep trying to
 519	 * receive from this socket.
 520	 */
 521	if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
 522		io_mshot_prep_retry(req, kmsg);
 523		return false;
 524	}
 525
 526	/* Otherwise stop bundle and use the current result. */
 527finish:
 528	io_req_set_res(req, *ret, cflags);
 529	*ret = IOU_OK;
 530	return true;
 531}
 532
 533int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 534{
 535	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 536	struct io_async_msghdr *kmsg = req->async_data;
 537	struct socket *sock;
 538	unsigned flags;
 539	int min_ret = 0;
 540	int ret;
 541
 542	sock = sock_from_file(req->file);
 543	if (unlikely(!sock))
 544		return -ENOTSOCK;
 545
 546	if (!(req->flags & REQ_F_POLLED) &&
 547	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 548		return -EAGAIN;
 549
 550	flags = sr->msg_flags;
 551	if (issue_flags & IO_URING_F_NONBLOCK)
 552		flags |= MSG_DONTWAIT;
 553	if (flags & MSG_WAITALL)
 554		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 555
 556	kmsg->msg.msg_control_user = sr->msg_control;
 557
 558	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
 559
 560	if (ret < min_ret) {
 561		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 562			return -EAGAIN;
 563		if (ret > 0 && io_net_retry(sock, flags)) {
 564			kmsg->msg.msg_controllen = 0;
 565			kmsg->msg.msg_control = NULL;
 566			sr->done_io += ret;
 567			req->flags |= REQ_F_BL_NO_RECYCLE;
 568			return -EAGAIN;
 569		}
 570		if (ret == -ERESTARTSYS)
 571			ret = -EINTR;
 572		req_set_fail(req);
 573	}
 574	io_req_msg_cleanup(req, issue_flags);
 575	if (ret >= 0)
 576		ret += sr->done_io;
 577	else if (sr->done_io)
 578		ret = sr->done_io;
 579	io_req_set_res(req, ret, 0);
 580	return IOU_OK;
 581}
 582
 583int io_send(struct io_kiocb *req, unsigned int issue_flags)
 584{
 585	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 586	struct io_async_msghdr *kmsg = req->async_data;
 587	struct socket *sock;
 588	unsigned flags;
 589	int min_ret = 0;
 590	int ret;
 591
 592	sock = sock_from_file(req->file);
 593	if (unlikely(!sock))
 594		return -ENOTSOCK;
 595
 596	if (!(req->flags & REQ_F_POLLED) &&
 597	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 598		return -EAGAIN;
 599
 600	flags = sr->msg_flags;
 601	if (issue_flags & IO_URING_F_NONBLOCK)
 602		flags |= MSG_DONTWAIT;
 603
 604retry_bundle:
 605	if (io_do_buffer_select(req)) {
 606		struct buf_sel_arg arg = {
 607			.iovs = &kmsg->fast_iov,
 608			.max_len = min_not_zero(sr->len, INT_MAX),
 609			.nr_iovs = 1,
 610		};
 611
 612		if (kmsg->free_iov) {
 613			arg.nr_iovs = kmsg->free_iov_nr;
 614			arg.iovs = kmsg->free_iov;
 615			arg.mode = KBUF_MODE_FREE;
 616		}
 617
 618		if (!(sr->flags & IORING_RECVSEND_BUNDLE))
 619			arg.nr_iovs = 1;
 620		else
 621			arg.mode |= KBUF_MODE_EXPAND;
 622
 623		ret = io_buffers_select(req, &arg, issue_flags);
 624		if (unlikely(ret < 0))
 625			return ret;
 626
 627		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
 628			kmsg->free_iov_nr = ret;
 629			kmsg->free_iov = arg.iovs;
 630			req->flags |= REQ_F_NEED_CLEANUP;
 631		}
 632		sr->len = arg.out_len;
 633
 634		if (ret == 1) {
 635			sr->buf = arg.iovs[0].iov_base;
 636			ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
 637						&kmsg->msg.msg_iter);
 638			if (unlikely(ret))
 639				return ret;
 640		} else {
 641			iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
 642					arg.iovs, ret, arg.out_len);
 643		}
 644	}
 645
 646	/*
 647	 * If MSG_WAITALL is set, or this is a bundle send, then we need
 648	 * the full amount. If just bundle is set, if we do a short send
 649	 * then we complete the bundle sequence rather than continue on.
 650	 */
 651	if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
 652		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 653
 654	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
 655	kmsg->msg.msg_flags = flags;
 656	ret = sock_sendmsg(sock, &kmsg->msg);
 657	if (ret < min_ret) {
 658		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 659			return -EAGAIN;
 660
 661		if (ret > 0 && io_net_retry(sock, flags)) {
 662			sr->len -= ret;
 663			sr->buf += ret;
 664			sr->done_io += ret;
 665			req->flags |= REQ_F_BL_NO_RECYCLE;
 666			return -EAGAIN;
 667		}
 668		if (ret == -ERESTARTSYS)
 669			ret = -EINTR;
 670		req_set_fail(req);
 671	}
 672	if (ret >= 0)
 673		ret += sr->done_io;
 674	else if (sr->done_io)
 675		ret = sr->done_io;
 676
 677	if (!io_send_finish(req, &ret, kmsg, issue_flags))
 678		goto retry_bundle;
 679
 680	io_req_msg_cleanup(req, issue_flags);
 681	return ret;
 682}
 683
 684static int io_recvmsg_mshot_prep(struct io_kiocb *req,
 685				 struct io_async_msghdr *iomsg,
 686				 int namelen, size_t controllen)
 687{
 688	if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
 689			  (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
 690		int hdr;
 691
 692		if (unlikely(namelen < 0))
 693			return -EOVERFLOW;
 694		if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
 695					namelen, &hdr))
 696			return -EOVERFLOW;
 697		if (check_add_overflow(hdr, controllen, &hdr))
 698			return -EOVERFLOW;
 699
 700		iomsg->namelen = namelen;
 701		iomsg->controllen = controllen;
 702		return 0;
 703	}
 704
 705	return 0;
 706}
 707
 708static int io_recvmsg_copy_hdr(struct io_kiocb *req,
 709			       struct io_async_msghdr *iomsg)
 710{
 711	struct user_msghdr msg;
 712	int ret;
 713
 714	iomsg->msg.msg_name = &iomsg->addr;
 715	iomsg->msg.msg_iter.nr_segs = 0;
 716
 717#ifdef CONFIG_COMPAT
 718	if (unlikely(req->ctx->compat)) {
 719		struct compat_msghdr cmsg;
 720
 721		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
 722		if (unlikely(ret))
 723			return ret;
 724
 725		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
 726		if (unlikely(ret))
 727			return ret;
 728
 729		return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
 730						cmsg.msg_controllen);
 731	}
 732#endif
 733
 734	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
 735	if (unlikely(ret))
 736		return ret;
 737
 738	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
 739	if (unlikely(ret))
 740		return ret;
 741
 742	return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
 743					msg.msg_controllen);
 744}
 745
 746static int io_recvmsg_prep_setup(struct io_kiocb *req)
 747{
 748	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 749	struct io_async_msghdr *kmsg;
 750	int ret;
 751
 752	kmsg = io_msg_alloc_async(req);
 753	if (unlikely(!kmsg))
 754		return -ENOMEM;
 755
 756	if (req->opcode == IORING_OP_RECV) {
 757		kmsg->msg.msg_name = NULL;
 758		kmsg->msg.msg_namelen = 0;
 759		kmsg->msg.msg_inq = 0;
 760		kmsg->msg.msg_control = NULL;
 761		kmsg->msg.msg_get_inq = 1;
 762		kmsg->msg.msg_controllen = 0;
 763		kmsg->msg.msg_iocb = NULL;
 764		kmsg->msg.msg_ubuf = NULL;
 765
 766		if (!io_do_buffer_select(req)) {
 767			ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
 768					  &kmsg->msg.msg_iter);
 769			if (unlikely(ret))
 770				return ret;
 771		}
 772		return 0;
 773	}
 774
 775	ret = io_recvmsg_copy_hdr(req, kmsg);
 776	if (!ret)
 777		req->flags |= REQ_F_NEED_CLEANUP;
 778	return ret;
 779}
 780
 781#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
 782			IORING_RECVSEND_BUNDLE)
 783
 784int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 785{
 786	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 787
 788	sr->done_io = 0;
 789
 790	if (unlikely(sqe->file_index || sqe->addr2))
 791		return -EINVAL;
 792
 793	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 794	sr->len = READ_ONCE(sqe->len);
 795	sr->flags = READ_ONCE(sqe->ioprio);
 796	if (sr->flags & ~RECVMSG_FLAGS)
 797		return -EINVAL;
 798	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 799	if (sr->msg_flags & MSG_DONTWAIT)
 800		req->flags |= REQ_F_NOWAIT;
 801	if (sr->msg_flags & MSG_ERRQUEUE)
 802		req->flags |= REQ_F_CLEAR_POLLIN;
 803	if (req->flags & REQ_F_BUFFER_SELECT) {
 804		/*
 805		 * Store the buffer group for this multishot receive separately,
 806		 * as if we end up doing an io-wq based issue that selects a
 807		 * buffer, it has to be committed immediately and that will
 808		 * clear ->buf_list. This means we lose the link to the buffer
 809		 * list, and the eventual buffer put on completion then cannot
 810		 * restore it.
 811		 */
 812		sr->buf_group = req->buf_index;
 813		req->buf_list = NULL;
 814	}
 815	if (sr->flags & IORING_RECV_MULTISHOT) {
 816		if (!(req->flags & REQ_F_BUFFER_SELECT))
 817			return -EINVAL;
 818		if (sr->msg_flags & MSG_WAITALL)
 819			return -EINVAL;
 820		if (req->opcode == IORING_OP_RECV && sr->len)
 821			return -EINVAL;
 822		req->flags |= REQ_F_APOLL_MULTISHOT;
 823	}
 824	if (sr->flags & IORING_RECVSEND_BUNDLE) {
 825		if (req->opcode == IORING_OP_RECVMSG)
 826			return -EINVAL;
 827	}
 828
 829#ifdef CONFIG_COMPAT
 830	if (req->ctx->compat)
 831		sr->msg_flags |= MSG_CMSG_COMPAT;
 832#endif
 833	sr->nr_multishot_loops = 0;
 834	return io_recvmsg_prep_setup(req);
 835}
 836
 837/*
 838 * Finishes io_recv and io_recvmsg.
 839 *
 840 * Returns true if it is actually finished, or false if it should run
 841 * again (for multishot).
 842 */
 843static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 844				  struct io_async_msghdr *kmsg,
 845				  bool mshot_finished, unsigned issue_flags)
 846{
 847	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 848	unsigned int cflags = 0;
 849
 850	if (kmsg->msg.msg_inq > 0)
 851		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 852
 853	if (sr->flags & IORING_RECVSEND_BUNDLE) {
 854		cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
 855				      issue_flags);
 856		/* bundle with no more immediate buffers, we're done */
 857		if (req->flags & REQ_F_BL_EMPTY)
 858			goto finish;
 859	} else {
 860		cflags |= io_put_kbuf(req, *ret, issue_flags);
 861	}
 862
 863	/*
 864	 * Fill CQE for this receive and see if we should keep trying to
 865	 * receive from this socket.
 866	 */
 867	if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
 868	    io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
 869		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
 870
 871		io_mshot_prep_retry(req, kmsg);
 872		/* Known not-empty or unknown state, retry */
 873		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
 874			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
 875				return false;
 876			/* mshot retries exceeded, force a requeue */
 877			sr->nr_multishot_loops = 0;
 878			mshot_retry_ret = IOU_REQUEUE;
 879		}
 880		if (issue_flags & IO_URING_F_MULTISHOT)
 881			*ret = mshot_retry_ret;
 882		else
 883			*ret = -EAGAIN;
 884		return true;
 885	}
 886
 887	/* Finish the request / stop multishot. */
 888finish:
 889	io_req_set_res(req, *ret, cflags);
 890
 891	if (issue_flags & IO_URING_F_MULTISHOT)
 892		*ret = IOU_STOP_MULTISHOT;
 893	else
 894		*ret = IOU_OK;
 895	io_req_msg_cleanup(req, issue_flags);
 896	return true;
 897}
 898
 899static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
 900				     struct io_sr_msg *sr, void __user **buf,
 901				     size_t *len)
 902{
 903	unsigned long ubuf = (unsigned long) *buf;
 904	unsigned long hdr;
 905
 906	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 907		kmsg->controllen;
 908	if (*len < hdr)
 909		return -EFAULT;
 910
 911	if (kmsg->controllen) {
 912		unsigned long control = ubuf + hdr - kmsg->controllen;
 913
 914		kmsg->msg.msg_control_user = (void __user *) control;
 915		kmsg->msg.msg_controllen = kmsg->controllen;
 916	}
 917
 918	sr->buf = *buf; /* stash for later copy */
 919	*buf = (void __user *) (ubuf + hdr);
 920	kmsg->payloadlen = *len = *len - hdr;
 921	return 0;
 922}
 923
 924struct io_recvmsg_multishot_hdr {
 925	struct io_uring_recvmsg_out msg;
 926	struct sockaddr_storage addr;
 927};
 928
 929static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
 930				struct io_async_msghdr *kmsg,
 931				unsigned int flags, bool *finished)
 932{
 933	int err;
 934	int copy_len;
 935	struct io_recvmsg_multishot_hdr hdr;
 936
 937	if (kmsg->namelen)
 938		kmsg->msg.msg_name = &hdr.addr;
 939	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
 940	kmsg->msg.msg_namelen = 0;
 941
 942	if (sock->file->f_flags & O_NONBLOCK)
 943		flags |= MSG_DONTWAIT;
 944
 945	err = sock_recvmsg(sock, &kmsg->msg, flags);
 946	*finished = err <= 0;
 947	if (err < 0)
 948		return err;
 949
 950	hdr.msg = (struct io_uring_recvmsg_out) {
 951		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
 952		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
 953	};
 954
 955	hdr.msg.payloadlen = err;
 956	if (err > kmsg->payloadlen)
 957		err = kmsg->payloadlen;
 958
 959	copy_len = sizeof(struct io_uring_recvmsg_out);
 960	if (kmsg->msg.msg_namelen > kmsg->namelen)
 961		copy_len += kmsg->namelen;
 962	else
 963		copy_len += kmsg->msg.msg_namelen;
 964
 965	/*
 966	 *      "fromlen shall refer to the value before truncation.."
 967	 *                      1003.1g
 968	 */
 969	hdr.msg.namelen = kmsg->msg.msg_namelen;
 970
 971	/* ensure that there is no gap between hdr and sockaddr_storage */
 972	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
 973		     sizeof(struct io_uring_recvmsg_out));
 974	if (copy_to_user(io->buf, &hdr, copy_len)) {
 975		*finished = true;
 976		return -EFAULT;
 977	}
 978
 979	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 980			kmsg->controllen + err;
 981}
 982
 983int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 984{
 985	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 986	struct io_async_msghdr *kmsg = req->async_data;
 987	struct socket *sock;
 988	unsigned flags;
 989	int ret, min_ret = 0;
 990	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 991	bool mshot_finished = true;
 992
 993	sock = sock_from_file(req->file);
 994	if (unlikely(!sock))
 995		return -ENOTSOCK;
 996
 997	if (!(req->flags & REQ_F_POLLED) &&
 998	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 999		return -EAGAIN;
1000
1001	flags = sr->msg_flags;
1002	if (force_nonblock)
1003		flags |= MSG_DONTWAIT;
1004
1005retry_multishot:
1006	if (io_do_buffer_select(req)) {
1007		void __user *buf;
1008		size_t len = sr->len;
1009
1010		buf = io_buffer_select(req, &len, issue_flags);
1011		if (!buf)
1012			return -ENOBUFS;
1013
1014		if (req->flags & REQ_F_APOLL_MULTISHOT) {
1015			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
1016			if (ret) {
1017				io_kbuf_recycle(req, issue_flags);
1018				return ret;
1019			}
1020		}
1021
1022		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
1023	}
1024
1025	kmsg->msg.msg_get_inq = 1;
1026	kmsg->msg.msg_inq = -1;
1027	if (req->flags & REQ_F_APOLL_MULTISHOT) {
1028		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
1029					   &mshot_finished);
1030	} else {
1031		/* disable partial retry for recvmsg with cmsg attached */
1032		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
1033			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1034
1035		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
1036					 kmsg->uaddr, flags);
1037	}
1038
1039	if (ret < min_ret) {
1040		if (ret == -EAGAIN && force_nonblock) {
1041			if (issue_flags & IO_URING_F_MULTISHOT) {
1042				io_kbuf_recycle(req, issue_flags);
1043				return IOU_ISSUE_SKIP_COMPLETE;
1044			}
1045			return -EAGAIN;
1046		}
1047		if (ret > 0 && io_net_retry(sock, flags)) {
1048			sr->done_io += ret;
1049			req->flags |= REQ_F_BL_NO_RECYCLE;
1050			return -EAGAIN;
1051		}
1052		if (ret == -ERESTARTSYS)
1053			ret = -EINTR;
1054		req_set_fail(req);
1055	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1056		req_set_fail(req);
1057	}
1058
1059	if (ret > 0)
1060		ret += sr->done_io;
1061	else if (sr->done_io)
1062		ret = sr->done_io;
1063	else
1064		io_kbuf_recycle(req, issue_flags);
1065
1066	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1067		goto retry_multishot;
1068
1069	return ret;
1070}
1071
1072static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1073			      size_t *len, unsigned int issue_flags)
1074{
1075	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1076	int ret;
1077
1078	/*
1079	 * If the ring isn't locked, then don't use the peek interface
1080	 * to grab multiple buffers as we will lock/unlock between
1081	 * this selection and posting the buffers.
1082	 */
1083	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1084	    sr->flags & IORING_RECVSEND_BUNDLE) {
1085		struct buf_sel_arg arg = {
1086			.iovs = &kmsg->fast_iov,
1087			.nr_iovs = 1,
1088			.mode = KBUF_MODE_EXPAND,
1089		};
1090
1091		if (kmsg->free_iov) {
1092			arg.nr_iovs = kmsg->free_iov_nr;
1093			arg.iovs = kmsg->free_iov;
1094			arg.mode |= KBUF_MODE_FREE;
1095		}
1096
1097		if (kmsg->msg.msg_inq > 0)
1098			arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1099
1100		ret = io_buffers_peek(req, &arg);
1101		if (unlikely(ret < 0))
1102			return ret;
1103
1104		/* special case 1 vec, can be a fast path */
1105		if (ret == 1) {
1106			sr->buf = arg.iovs[0].iov_base;
1107			sr->len = arg.iovs[0].iov_len;
1108			goto map_ubuf;
1109		}
1110		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
1111				arg.out_len);
1112		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
1113			kmsg->free_iov_nr = ret;
1114			kmsg->free_iov = arg.iovs;
1115			req->flags |= REQ_F_NEED_CLEANUP;
1116		}
1117	} else {
1118		void __user *buf;
1119
1120		*len = sr->len;
1121		buf = io_buffer_select(req, len, issue_flags);
1122		if (!buf)
1123			return -ENOBUFS;
1124		sr->buf = buf;
1125		sr->len = *len;
1126map_ubuf:
1127		ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1128				  &kmsg->msg.msg_iter);
1129		if (unlikely(ret))
1130			return ret;
1131	}
1132
1133	return 0;
1134}
1135
1136int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1137{
1138	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1139	struct io_async_msghdr *kmsg = req->async_data;
1140	struct socket *sock;
1141	unsigned flags;
1142	int ret, min_ret = 0;
1143	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1144	size_t len = sr->len;
1145	bool mshot_finished;
1146
1147	if (!(req->flags & REQ_F_POLLED) &&
1148	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1149		return -EAGAIN;
1150
1151	sock = sock_from_file(req->file);
1152	if (unlikely(!sock))
1153		return -ENOTSOCK;
1154
1155	flags = sr->msg_flags;
1156	if (force_nonblock)
1157		flags |= MSG_DONTWAIT;
1158
1159retry_multishot:
1160	if (io_do_buffer_select(req)) {
1161		ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
1162		if (unlikely(ret)) {
1163			kmsg->msg.msg_inq = -1;
1164			goto out_free;
1165		}
1166		sr->buf = NULL;
1167	}
1168
1169	kmsg->msg.msg_flags = 0;
1170	kmsg->msg.msg_inq = -1;
1171
1172	if (flags & MSG_WAITALL)
1173		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1174
1175	ret = sock_recvmsg(sock, &kmsg->msg, flags);
1176	if (ret < min_ret) {
1177		if (ret == -EAGAIN && force_nonblock) {
1178			if (issue_flags & IO_URING_F_MULTISHOT) {
1179				io_kbuf_recycle(req, issue_flags);
1180				return IOU_ISSUE_SKIP_COMPLETE;
1181			}
1182
1183			return -EAGAIN;
1184		}
1185		if (ret > 0 && io_net_retry(sock, flags)) {
1186			sr->len -= ret;
1187			sr->buf += ret;
1188			sr->done_io += ret;
1189			req->flags |= REQ_F_BL_NO_RECYCLE;
1190			return -EAGAIN;
1191		}
1192		if (ret == -ERESTARTSYS)
1193			ret = -EINTR;
1194		req_set_fail(req);
1195	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1196out_free:
1197		req_set_fail(req);
1198	}
1199
1200	mshot_finished = ret <= 0;
1201	if (ret > 0)
1202		ret += sr->done_io;
1203	else if (sr->done_io)
1204		ret = sr->done_io;
1205	else
1206		io_kbuf_recycle(req, issue_flags);
1207
1208	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1209		goto retry_multishot;
1210
1211	return ret;
1212}
1213
1214void io_send_zc_cleanup(struct io_kiocb *req)
1215{
1216	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1217	struct io_async_msghdr *io = req->async_data;
1218
1219	if (req_has_async_data(req))
1220		io_netmsg_iovec_free(io);
1221	if (zc->notif) {
1222		io_notif_flush(zc->notif);
1223		zc->notif = NULL;
1224	}
1225}
1226
1227#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1228#define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1229
1230int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1231{
1232	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1233	struct io_ring_ctx *ctx = req->ctx;
1234	struct io_kiocb *notif;
1235
1236	zc->done_io = 0;
1237	req->flags |= REQ_F_POLL_NO_LAZY;
1238
1239	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1240		return -EINVAL;
1241	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1242	if (req->flags & REQ_F_CQE_SKIP)
1243		return -EINVAL;
1244
1245	notif = zc->notif = io_alloc_notif(ctx);
1246	if (!notif)
1247		return -ENOMEM;
1248	notif->cqe.user_data = req->cqe.user_data;
1249	notif->cqe.res = 0;
1250	notif->cqe.flags = IORING_CQE_F_NOTIF;
1251	req->flags |= REQ_F_NEED_CLEANUP;
1252
1253	zc->flags = READ_ONCE(sqe->ioprio);
1254	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1255		if (zc->flags & ~IO_ZC_FLAGS_VALID)
1256			return -EINVAL;
1257		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1258			struct io_notif_data *nd = io_notif_to_data(notif);
1259
1260			nd->zc_report = true;
1261			nd->zc_used = false;
1262			nd->zc_copied = false;
1263		}
1264	}
1265
1266	if (req->opcode != IORING_OP_SEND_ZC) {
1267		if (unlikely(sqe->addr2 || sqe->file_index))
1268			return -EINVAL;
1269		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1270			return -EINVAL;
1271	}
1272
1273	zc->len = READ_ONCE(sqe->len);
1274	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
1275	zc->buf_index = READ_ONCE(sqe->buf_index);
1276	if (zc->msg_flags & MSG_DONTWAIT)
1277		req->flags |= REQ_F_NOWAIT;
1278
1279#ifdef CONFIG_COMPAT
1280	if (req->ctx->compat)
1281		zc->msg_flags |= MSG_CMSG_COMPAT;
1282#endif
1283	if (unlikely(!io_msg_alloc_async(req)))
1284		return -ENOMEM;
1285	if (req->opcode != IORING_OP_SENDMSG_ZC)
1286		return io_send_setup(req, sqe);
1287	return io_sendmsg_setup(req, sqe);
1288}
1289
1290static int io_sg_from_iter_iovec(struct sk_buff *skb,
1291				 struct iov_iter *from, size_t length)
1292{
1293	skb_zcopy_downgrade_managed(skb);
1294	return zerocopy_fill_skb_from_iter(skb, from, length);
1295}
1296
1297static int io_sg_from_iter(struct sk_buff *skb,
1298			   struct iov_iter *from, size_t length)
1299{
1300	struct skb_shared_info *shinfo = skb_shinfo(skb);
1301	int frag = shinfo->nr_frags;
1302	int ret = 0;
1303	struct bvec_iter bi;
1304	ssize_t copied = 0;
1305	unsigned long truesize = 0;
1306
1307	if (!frag)
1308		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1309	else if (unlikely(!skb_zcopy_managed(skb)))
1310		return zerocopy_fill_skb_from_iter(skb, from, length);
1311
1312	bi.bi_size = min(from->count, length);
1313	bi.bi_bvec_done = from->iov_offset;
1314	bi.bi_idx = 0;
1315
1316	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1317		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1318
1319		copied += v.bv_len;
1320		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1321		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1322					   v.bv_offset, v.bv_len);
1323		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1324	}
1325	if (bi.bi_size)
1326		ret = -EMSGSIZE;
1327
1328	shinfo->nr_frags = frag;
1329	from->bvec += bi.bi_idx;
1330	from->nr_segs -= bi.bi_idx;
1331	from->count -= copied;
1332	from->iov_offset = bi.bi_bvec_done;
1333
1334	skb->data_len += copied;
1335	skb->len += copied;
1336	skb->truesize += truesize;
1337	return ret;
1338}
1339
1340static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
1341{
1342	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1343	struct io_async_msghdr *kmsg = req->async_data;
1344	int ret;
1345
1346	if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
1347		struct io_ring_ctx *ctx = req->ctx;
1348		struct io_rsrc_node *node;
1349
1350		ret = -EFAULT;
1351		io_ring_submit_lock(ctx, issue_flags);
1352		node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
1353		if (node) {
1354			io_req_assign_buf_node(sr->notif, node);
1355			ret = 0;
1356		}
1357		io_ring_submit_unlock(ctx, issue_flags);
1358
1359		if (unlikely(ret))
1360			return ret;
1361
1362		ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter,
1363					node->buf, (u64)(uintptr_t)sr->buf,
1364					sr->len);
1365		if (unlikely(ret))
1366			return ret;
1367		kmsg->msg.sg_from_iter = io_sg_from_iter;
1368	} else {
1369		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
1370		if (unlikely(ret))
1371			return ret;
1372		ret = io_notif_account_mem(sr->notif, sr->len);
1373		if (unlikely(ret))
1374			return ret;
1375		kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1376	}
1377
1378	return ret;
1379}
1380
1381int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1382{
1383	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1384	struct io_async_msghdr *kmsg = req->async_data;
1385	struct socket *sock;
1386	unsigned msg_flags;
1387	int ret, min_ret = 0;
1388
1389	sock = sock_from_file(req->file);
1390	if (unlikely(!sock))
1391		return -ENOTSOCK;
1392	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1393		return -EOPNOTSUPP;
1394
1395	if (!(req->flags & REQ_F_POLLED) &&
1396	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1397		return -EAGAIN;
1398
1399	if (!zc->done_io) {
1400		ret = io_send_zc_import(req, issue_flags);
1401		if (unlikely(ret))
1402			return ret;
1403	}
1404
1405	msg_flags = zc->msg_flags;
1406	if (issue_flags & IO_URING_F_NONBLOCK)
1407		msg_flags |= MSG_DONTWAIT;
1408	if (msg_flags & MSG_WAITALL)
1409		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1410	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1411
1412	kmsg->msg.msg_flags = msg_flags;
1413	kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1414	ret = sock_sendmsg(sock, &kmsg->msg);
1415
1416	if (unlikely(ret < min_ret)) {
1417		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1418			return -EAGAIN;
1419
1420		if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
1421			zc->len -= ret;
1422			zc->buf += ret;
1423			zc->done_io += ret;
1424			req->flags |= REQ_F_BL_NO_RECYCLE;
1425			return -EAGAIN;
1426		}
1427		if (ret == -ERESTARTSYS)
1428			ret = -EINTR;
1429		req_set_fail(req);
1430	}
1431
1432	if (ret >= 0)
1433		ret += zc->done_io;
1434	else if (zc->done_io)
1435		ret = zc->done_io;
1436
1437	/*
1438	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1439	 * flushing notif to io_send_zc_cleanup()
1440	 */
1441	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1442		io_notif_flush(zc->notif);
1443		io_req_msg_cleanup(req, 0);
1444	}
1445	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1446	return IOU_OK;
1447}
1448
1449int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1450{
1451	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1452	struct io_async_msghdr *kmsg = req->async_data;
1453	struct socket *sock;
1454	unsigned flags;
1455	int ret, min_ret = 0;
1456
1457	sock = sock_from_file(req->file);
1458	if (unlikely(!sock))
1459		return -ENOTSOCK;
1460	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1461		return -EOPNOTSUPP;
1462
1463	if (!(req->flags & REQ_F_POLLED) &&
1464	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1465		return -EAGAIN;
1466
1467	flags = sr->msg_flags;
1468	if (issue_flags & IO_URING_F_NONBLOCK)
1469		flags |= MSG_DONTWAIT;
1470	if (flags & MSG_WAITALL)
1471		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1472
1473	kmsg->msg.msg_control_user = sr->msg_control;
1474	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1475	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1476	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1477
1478	if (unlikely(ret < min_ret)) {
1479		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1480			return -EAGAIN;
1481
1482		if (ret > 0 && io_net_retry(sock, flags)) {
1483			sr->done_io += ret;
1484			req->flags |= REQ_F_BL_NO_RECYCLE;
1485			return -EAGAIN;
1486		}
1487		if (ret == -ERESTARTSYS)
1488			ret = -EINTR;
1489		req_set_fail(req);
1490	}
1491
1492	if (ret >= 0)
1493		ret += sr->done_io;
1494	else if (sr->done_io)
1495		ret = sr->done_io;
1496
1497	/*
1498	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1499	 * flushing notif to io_send_zc_cleanup()
1500	 */
1501	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1502		io_notif_flush(sr->notif);
1503		io_req_msg_cleanup(req, 0);
1504	}
1505	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1506	return IOU_OK;
1507}
1508
1509void io_sendrecv_fail(struct io_kiocb *req)
1510{
1511	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1512
1513	if (sr->done_io)
1514		req->cqe.res = sr->done_io;
1515
1516	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1517	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1518		req->cqe.flags |= IORING_CQE_F_MORE;
1519}
1520
1521#define ACCEPT_FLAGS	(IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \
1522			 IORING_ACCEPT_POLL_FIRST)
1523
1524int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1525{
1526	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1527
1528	if (sqe->len || sqe->buf_index)
1529		return -EINVAL;
1530
1531	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1532	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1533	accept->flags = READ_ONCE(sqe->accept_flags);
1534	accept->nofile = rlimit(RLIMIT_NOFILE);
1535	accept->iou_flags = READ_ONCE(sqe->ioprio);
1536	if (accept->iou_flags & ~ACCEPT_FLAGS)
1537		return -EINVAL;
1538
1539	accept->file_slot = READ_ONCE(sqe->file_index);
1540	if (accept->file_slot) {
1541		if (accept->flags & SOCK_CLOEXEC)
1542			return -EINVAL;
1543		if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
1544		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1545			return -EINVAL;
1546	}
1547	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1548		return -EINVAL;
1549	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1550		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1551	if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
1552		req->flags |= REQ_F_APOLL_MULTISHOT;
1553	if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
1554		req->flags |= REQ_F_NOWAIT;
1555	return 0;
1556}
1557
1558int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1559{
1560	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1561	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1562	bool fixed = !!accept->file_slot;
1563	struct proto_accept_arg arg = {
1564		.flags = force_nonblock ? O_NONBLOCK : 0,
1565	};
1566	struct file *file;
1567	unsigned cflags;
1568	int ret, fd;
1569
1570	if (!(req->flags & REQ_F_POLLED) &&
1571	    accept->iou_flags & IORING_ACCEPT_POLL_FIRST)
1572		return -EAGAIN;
1573
1574retry:
1575	if (!fixed) {
1576		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1577		if (unlikely(fd < 0))
1578			return fd;
1579	}
1580	arg.err = 0;
1581	arg.is_empty = -1;
1582	file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
1583			 accept->flags);
1584	if (IS_ERR(file)) {
1585		if (!fixed)
1586			put_unused_fd(fd);
1587		ret = PTR_ERR(file);
1588		if (ret == -EAGAIN && force_nonblock &&
1589		    !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
1590			/*
1591			 * if it's multishot and polled, we don't need to
1592			 * return EAGAIN to arm the poll infra since it
1593			 * has already been done
1594			 */
1595			if (issue_flags & IO_URING_F_MULTISHOT)
1596				return IOU_ISSUE_SKIP_COMPLETE;
1597			return ret;
1598		}
1599		if (ret == -ERESTARTSYS)
1600			ret = -EINTR;
1601		req_set_fail(req);
1602	} else if (!fixed) {
1603		fd_install(fd, file);
1604		ret = fd;
1605	} else {
1606		ret = io_fixed_fd_install(req, issue_flags, file,
1607						accept->file_slot);
1608	}
1609
1610	cflags = 0;
1611	if (!arg.is_empty)
1612		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
1613
1614	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1615		io_req_set_res(req, ret, cflags);
1616		return IOU_OK;
1617	}
1618
1619	if (ret < 0)
1620		return ret;
1621	if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1622		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
1623			goto retry;
1624		if (issue_flags & IO_URING_F_MULTISHOT)
1625			return IOU_ISSUE_SKIP_COMPLETE;
1626		return -EAGAIN;
1627	}
1628
1629	io_req_set_res(req, ret, cflags);
1630	return IOU_STOP_MULTISHOT;
1631}
1632
1633int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1634{
1635	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1636
1637	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1638		return -EINVAL;
1639
1640	sock->domain = READ_ONCE(sqe->fd);
1641	sock->type = READ_ONCE(sqe->off);
1642	sock->protocol = READ_ONCE(sqe->len);
1643	sock->file_slot = READ_ONCE(sqe->file_index);
1644	sock->nofile = rlimit(RLIMIT_NOFILE);
1645
1646	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1647	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1648		return -EINVAL;
1649	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1650		return -EINVAL;
1651	return 0;
1652}
1653
1654int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1655{
1656	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1657	bool fixed = !!sock->file_slot;
1658	struct file *file;
1659	int ret, fd;
1660
1661	if (!fixed) {
1662		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1663		if (unlikely(fd < 0))
1664			return fd;
1665	}
1666	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1667	if (IS_ERR(file)) {
1668		if (!fixed)
1669			put_unused_fd(fd);
1670		ret = PTR_ERR(file);
1671		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1672			return -EAGAIN;
1673		if (ret == -ERESTARTSYS)
1674			ret = -EINTR;
1675		req_set_fail(req);
1676	} else if (!fixed) {
1677		fd_install(fd, file);
1678		ret = fd;
1679	} else {
1680		ret = io_fixed_fd_install(req, issue_flags, file,
1681					    sock->file_slot);
1682	}
1683	io_req_set_res(req, ret, 0);
1684	return IOU_OK;
1685}
1686
1687int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1688{
1689	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1690	struct io_async_msghdr *io;
1691
1692	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1693		return -EINVAL;
1694
1695	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1696	conn->addr_len =  READ_ONCE(sqe->addr2);
1697	conn->in_progress = conn->seen_econnaborted = false;
1698
1699	io = io_msg_alloc_async(req);
1700	if (unlikely(!io))
1701		return -ENOMEM;
1702
1703	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr);
1704}
1705
1706int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1707{
1708	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1709	struct io_async_msghdr *io = req->async_data;
1710	unsigned file_flags;
1711	int ret;
1712	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1713
1714	if (unlikely(req->flags & REQ_F_FAIL)) {
1715		ret = -ECONNRESET;
1716		goto out;
1717	}
1718
1719	file_flags = force_nonblock ? O_NONBLOCK : 0;
1720
1721	ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
1722				 file_flags);
1723	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1724	    && force_nonblock) {
1725		if (ret == -EINPROGRESS) {
1726			connect->in_progress = true;
1727		} else if (ret == -ECONNABORTED) {
1728			if (connect->seen_econnaborted)
1729				goto out;
1730			connect->seen_econnaborted = true;
1731		}
1732		return -EAGAIN;
1733	}
1734	if (connect->in_progress) {
1735		/*
1736		 * At least bluetooth will return -EBADFD on a re-connect
1737		 * attempt, and it's (supposedly) also valid to get -EISCONN
1738		 * which means the previous result is good. For both of these,
1739		 * grab the sock_error() and use that for the completion.
1740		 */
1741		if (ret == -EBADFD || ret == -EISCONN)
1742			ret = sock_error(sock_from_file(req->file)->sk);
1743	}
1744	if (ret == -ERESTARTSYS)
1745		ret = -EINTR;
1746out:
1747	if (ret < 0)
1748		req_set_fail(req);
1749	io_req_msg_cleanup(req, issue_flags);
1750	io_req_set_res(req, ret, 0);
1751	return IOU_OK;
1752}
1753
1754int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1755{
1756	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1757	struct sockaddr __user *uaddr;
1758	struct io_async_msghdr *io;
1759
1760	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1761		return -EINVAL;
1762
1763	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1764	bind->addr_len =  READ_ONCE(sqe->addr2);
1765
1766	io = io_msg_alloc_async(req);
1767	if (unlikely(!io))
1768		return -ENOMEM;
1769	return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
1770}
1771
1772int io_bind(struct io_kiocb *req, unsigned int issue_flags)
1773{
1774	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1775	struct io_async_msghdr *io = req->async_data;
1776	struct socket *sock;
1777	int ret;
1778
1779	sock = sock_from_file(req->file);
1780	if (unlikely(!sock))
1781		return -ENOTSOCK;
1782
1783	ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
1784	if (ret < 0)
1785		req_set_fail(req);
1786	io_req_set_res(req, ret, 0);
1787	return 0;
1788}
1789
1790int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1791{
1792	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1793
1794	if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
1795		return -EINVAL;
1796
1797	listen->backlog = READ_ONCE(sqe->len);
1798	return 0;
1799}
1800
1801int io_listen(struct io_kiocb *req, unsigned int issue_flags)
1802{
1803	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1804	struct socket *sock;
1805	int ret;
1806
1807	sock = sock_from_file(req->file);
1808	if (unlikely(!sock))
1809		return -ENOTSOCK;
1810
1811	ret = __sys_listen_socket(sock, listen->backlog);
1812	if (ret < 0)
1813		req_set_fail(req);
1814	io_req_set_res(req, ret, 0);
1815	return 0;
1816}
1817
1818void io_netmsg_cache_free(const void *entry)
1819{
1820	struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
1821
1822	if (kmsg->free_iov) {
1823		kasan_mempool_unpoison_object(kmsg->free_iov,
1824				kmsg->free_iov_nr * sizeof(struct iovec));
1825		io_netmsg_iovec_free(kmsg);
1826	}
1827	kfree(kmsg);
1828}
1829#endif