Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/file.h>
   5#include <linux/slab.h>
   6#include <linux/net.h>
   7#include <linux/compat.h>
   8#include <net/compat.h>
   9#include <linux/io_uring.h>
  10
  11#include <uapi/linux/io_uring.h>
  12
  13#include "io_uring.h"
  14#include "kbuf.h"
  15#include "alloc_cache.h"
  16#include "net.h"
  17#include "notif.h"
  18#include "rsrc.h"
  19
  20#if defined(CONFIG_NET)
  21struct io_shutdown {
  22	struct file			*file;
  23	int				how;
  24};
  25
  26struct io_accept {
  27	struct file			*file;
  28	struct sockaddr __user		*addr;
  29	int __user			*addr_len;
  30	int				flags;
 
  31	u32				file_slot;
  32	unsigned long			nofile;
  33};
  34
  35struct io_socket {
  36	struct file			*file;
  37	int				domain;
  38	int				type;
  39	int				protocol;
  40	int				flags;
  41	u32				file_slot;
  42	unsigned long			nofile;
  43};
  44
  45struct io_connect {
  46	struct file			*file;
  47	struct sockaddr __user		*addr;
  48	int				addr_len;
  49	bool				in_progress;
  50	bool				seen_econnaborted;
  51};
  52
 
 
 
 
 
 
 
 
 
 
  53struct io_sr_msg {
  54	struct file			*file;
  55	union {
  56		struct compat_msghdr __user	*umsg_compat;
  57		struct user_msghdr __user	*umsg;
  58		void __user			*buf;
  59	};
  60	unsigned			len;
  61	unsigned			done_io;
  62	unsigned			msg_flags;
  63	unsigned			nr_multishot_loops;
  64	u16				flags;
  65	/* initialised and used only by !msg send variants */
  66	u16				addr_len;
  67	u16				buf_group;
  68	void __user			*addr;
  69	void __user			*msg_control;
  70	/* used only for send zerocopy */
  71	struct io_kiocb 		*notif;
  72};
  73
  74/*
  75 * Number of times we'll try and do receives if there's more data. If we
  76 * exceed this limit, then add us to the back of the queue and retry from
  77 * there. This helps fairness between flooding clients.
  78 */
  79#define MULTISHOT_MAX_RETRY	32
  80
  81static inline bool io_check_multishot(struct io_kiocb *req,
  82				      unsigned int issue_flags)
  83{
  84	/*
  85	 * When ->locked_cq is set we only allow to post CQEs from the original
  86	 * task context. Usual request completions will be handled in other
  87	 * generic paths but multipoll may decide to post extra cqes.
  88	 */
  89	return !(issue_flags & IO_URING_F_IOWQ) ||
  90		!(issue_flags & IO_URING_F_MULTISHOT) ||
  91		!req->ctx->task_complete;
  92}
  93
  94int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  95{
  96	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
  97
  98	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
  99		     sqe->buf_index || sqe->splice_fd_in))
 100		return -EINVAL;
 101
 102	shutdown->how = READ_ONCE(sqe->len);
 103	req->flags |= REQ_F_FORCE_ASYNC;
 104	return 0;
 105}
 106
 107int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
 108{
 109	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
 110	struct socket *sock;
 111	int ret;
 112
 113	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
 114
 115	sock = sock_from_file(req->file);
 116	if (unlikely(!sock))
 117		return -ENOTSOCK;
 118
 119	ret = __sys_shutdown_sock(sock, shutdown->how);
 120	io_req_set_res(req, ret, 0);
 121	return IOU_OK;
 122}
 123
 124static bool io_net_retry(struct socket *sock, int flags)
 125{
 126	if (!(flags & MSG_WAITALL))
 127		return false;
 128	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
 129}
 130
 
 
 
 
 
 
 
 
 
 131static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
 132{
 133	struct io_async_msghdr *hdr = req->async_data;
 
 134
 135	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
 
 
 136		return;
 
 137
 138	/* Let normal cleanup path reap it if we fail adding to the cache */
 139	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
 
 
 
 140		req->async_data = NULL;
 141		req->flags &= ~REQ_F_ASYNC_DATA;
 142	}
 143}
 144
 145static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
 146						  unsigned int issue_flags)
 147{
 148	struct io_ring_ctx *ctx = req->ctx;
 149	struct io_cache_entry *entry;
 150	struct io_async_msghdr *hdr;
 151
 152	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
 153		entry = io_alloc_cache_get(&ctx->netmsg_cache);
 154		if (entry) {
 155			hdr = container_of(entry, struct io_async_msghdr, cache);
 156			hdr->free_iov = NULL;
 157			req->flags |= REQ_F_ASYNC_DATA;
 158			req->async_data = hdr;
 159			return hdr;
 160		}
 
 
 
 161	}
 162
 163	if (!io_alloc_async_data(req)) {
 164		hdr = req->async_data;
 
 165		hdr->free_iov = NULL;
 166		return hdr;
 167	}
 168	return NULL;
 169}
 170
 171static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
 
 
 172{
 173	/* ->prep_async is always called from the submission context */
 174	return io_msg_alloc_async(req, 0);
 
 
 
 
 
 
 175}
 176
 177static int io_setup_async_msg(struct io_kiocb *req,
 178			      struct io_async_msghdr *kmsg,
 179			      unsigned int issue_flags)
 180{
 181	struct io_async_msghdr *async_msg;
 182
 183	if (req_has_async_data(req))
 184		return -EAGAIN;
 185	async_msg = io_msg_alloc_async(req, issue_flags);
 186	if (!async_msg) {
 187		kfree(kmsg->free_iov);
 188		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189	}
 190	req->flags |= REQ_F_NEED_CLEANUP;
 191	memcpy(async_msg, kmsg, sizeof(*kmsg));
 192	if (async_msg->msg.msg_name)
 193		async_msg->msg.msg_name = &async_msg->addr;
 194
 195	if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
 196		return -EAGAIN;
 
 
 
 
 197
 198	/* if were using fast_iov, set it to the new one */
 199	if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
 200		size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
 201		async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
 
 
 
 
 
 
 
 
 
 
 
 
 202	}
 203
 204	return -EAGAIN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 205}
 206
 207static int io_sendmsg_copy_hdr(struct io_kiocb *req,
 208			       struct io_async_msghdr *iomsg)
 209{
 210	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 
 211	int ret;
 212
 213	iomsg->msg.msg_name = &iomsg->addr;
 214	iomsg->free_iov = iomsg->fast_iov;
 215	ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
 216					&iomsg->free_iov);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217	/* save msg_control as sys_sendmsg() overwrites it */
 218	sr->msg_control = iomsg->msg.msg_control_user;
 219	return ret;
 220}
 221
 222int io_send_prep_async(struct io_kiocb *req)
 223{
 224	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
 225	struct io_async_msghdr *io;
 226	int ret;
 227
 228	if (!zc->addr || req_has_async_data(req))
 229		return 0;
 230	io = io_msg_alloc_async_prep(req);
 231	if (!io)
 232		return -ENOMEM;
 233	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
 234	return ret;
 235}
 236
 237static int io_setup_async_addr(struct io_kiocb *req,
 238			      struct sockaddr_storage *addr_storage,
 239			      unsigned int issue_flags)
 240{
 241	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 242	struct io_async_msghdr *io;
 
 
 
 243
 244	if (!sr->addr || req_has_async_data(req))
 245		return -EAGAIN;
 246	io = io_msg_alloc_async(req, issue_flags);
 247	if (!io)
 248		return -ENOMEM;
 249	memcpy(&io->addr, addr_storage, sizeof(io->addr));
 250	return -EAGAIN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251}
 252
 253int io_sendmsg_prep_async(struct io_kiocb *req)
 254{
 
 
 255	int ret;
 256
 257	if (!io_msg_alloc_async_prep(req))
 258		return -ENOMEM;
 259	ret = io_sendmsg_copy_hdr(req, req->async_data);
 260	if (!ret)
 261		req->flags |= REQ_F_NEED_CLEANUP;
 262	return ret;
 263}
 264
 265void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
 266{
 267	struct io_async_msghdr *io = req->async_data;
 268
 269	kfree(io->free_iov);
 270}
 271
 272int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 273{
 274	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 275
 276	if (req->opcode == IORING_OP_SEND) {
 277		if (READ_ONCE(sqe->__pad3[0]))
 
 
 278			return -EINVAL;
 279		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 280		sr->addr_len = READ_ONCE(sqe->addr_len);
 281	} else if (sqe->addr2 || sqe->file_index) {
 282		return -EINVAL;
 283	}
 284
 285	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 286	sr->len = READ_ONCE(sqe->len);
 287	sr->flags = READ_ONCE(sqe->ioprio);
 288	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
 289		return -EINVAL;
 290	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 291	if (sr->msg_flags & MSG_DONTWAIT)
 292		req->flags |= REQ_F_NOWAIT;
 
 
 
 
 
 
 
 
 
 293
 294#ifdef CONFIG_COMPAT
 295	if (req->ctx->compat)
 296		sr->msg_flags |= MSG_CMSG_COMPAT;
 297#endif
 298	sr->done_io = 0;
 299	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300}
 301
 302int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 303{
 304	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 305	struct io_async_msghdr iomsg, *kmsg;
 306	struct socket *sock;
 307	unsigned flags;
 308	int min_ret = 0;
 309	int ret;
 310
 311	sock = sock_from_file(req->file);
 312	if (unlikely(!sock))
 313		return -ENOTSOCK;
 314
 315	if (req_has_async_data(req)) {
 316		kmsg = req->async_data;
 317		kmsg->msg.msg_control_user = sr->msg_control;
 318	} else {
 319		ret = io_sendmsg_copy_hdr(req, &iomsg);
 320		if (ret)
 321			return ret;
 322		kmsg = &iomsg;
 323	}
 324
 325	if (!(req->flags & REQ_F_POLLED) &&
 326	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 327		return io_setup_async_msg(req, kmsg, issue_flags);
 328
 329	flags = sr->msg_flags;
 330	if (issue_flags & IO_URING_F_NONBLOCK)
 331		flags |= MSG_DONTWAIT;
 332	if (flags & MSG_WAITALL)
 333		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 334
 
 
 335	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
 336
 337	if (ret < min_ret) {
 338		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 339			return io_setup_async_msg(req, kmsg, issue_flags);
 340		if (ret > 0 && io_net_retry(sock, flags)) {
 341			kmsg->msg.msg_controllen = 0;
 342			kmsg->msg.msg_control = NULL;
 343			sr->done_io += ret;
 344			req->flags |= REQ_F_PARTIAL_IO;
 345			return io_setup_async_msg(req, kmsg, issue_flags);
 346		}
 347		if (ret == -ERESTARTSYS)
 348			ret = -EINTR;
 349		req_set_fail(req);
 350	}
 351	/* fast path, check for non-NULL to avoid function call */
 352	if (kmsg->free_iov)
 353		kfree(kmsg->free_iov);
 354	req->flags &= ~REQ_F_NEED_CLEANUP;
 355	io_netmsg_recycle(req, issue_flags);
 356	if (ret >= 0)
 357		ret += sr->done_io;
 358	else if (sr->done_io)
 359		ret = sr->done_io;
 360	io_req_set_res(req, ret, 0);
 361	return IOU_OK;
 362}
 363
 364int io_send(struct io_kiocb *req, unsigned int issue_flags)
 365{
 366	struct sockaddr_storage __address;
 367	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 368	struct msghdr msg;
 369	struct socket *sock;
 370	unsigned flags;
 371	int min_ret = 0;
 372	int ret;
 373
 374	msg.msg_name = NULL;
 375	msg.msg_control = NULL;
 376	msg.msg_controllen = 0;
 377	msg.msg_namelen = 0;
 378	msg.msg_ubuf = NULL;
 379
 380	if (sr->addr) {
 381		if (req_has_async_data(req)) {
 382			struct io_async_msghdr *io = req->async_data;
 383
 384			msg.msg_name = &io->addr;
 385		} else {
 386			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
 387			if (unlikely(ret < 0))
 388				return ret;
 389			msg.msg_name = (struct sockaddr *)&__address;
 390		}
 391		msg.msg_namelen = sr->addr_len;
 392	}
 393
 394	if (!(req->flags & REQ_F_POLLED) &&
 395	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 396		return io_setup_async_addr(req, &__address, issue_flags);
 397
 398	sock = sock_from_file(req->file);
 399	if (unlikely(!sock))
 400		return -ENOTSOCK;
 401
 402	ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
 403	if (unlikely(ret))
 404		return ret;
 405
 406	flags = sr->msg_flags;
 407	if (issue_flags & IO_URING_F_NONBLOCK)
 408		flags |= MSG_DONTWAIT;
 409	if (flags & MSG_WAITALL)
 410		min_ret = iov_iter_count(&msg.msg_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 411
 412	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
 413	msg.msg_flags = flags;
 414	ret = sock_sendmsg(sock, &msg);
 415	if (ret < min_ret) {
 416		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 417			return io_setup_async_addr(req, &__address, issue_flags);
 418
 419		if (ret > 0 && io_net_retry(sock, flags)) {
 420			sr->len -= ret;
 421			sr->buf += ret;
 422			sr->done_io += ret;
 423			req->flags |= REQ_F_PARTIAL_IO;
 424			return io_setup_async_addr(req, &__address, issue_flags);
 425		}
 426		if (ret == -ERESTARTSYS)
 427			ret = -EINTR;
 428		req_set_fail(req);
 429	}
 430	if (ret >= 0)
 431		ret += sr->done_io;
 432	else if (sr->done_io)
 433		ret = sr->done_io;
 434	io_req_set_res(req, ret, 0);
 435	return IOU_OK;
 436}
 437
 438static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
 439{
 440	int hdr;
 441
 442	if (iomsg->namelen < 0)
 443		return true;
 444	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
 445			       iomsg->namelen, &hdr))
 446		return true;
 447	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
 448		return true;
 449
 450	return false;
 
 451}
 452
 453static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
 454				 struct io_async_msghdr *iomsg)
 455{
 456	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 457	struct user_msghdr msg;
 458	int ret;
 459
 460	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
 461		return -EFAULT;
 462
 463	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
 464	if (ret)
 465		return ret;
 
 
 466
 467	if (req->flags & REQ_F_BUFFER_SELECT) {
 468		if (msg.msg_iovlen == 0) {
 469			sr->len = iomsg->fast_iov[0].iov_len = 0;
 470			iomsg->fast_iov[0].iov_base = NULL;
 471			iomsg->free_iov = NULL;
 472		} else if (msg.msg_iovlen > 1) {
 473			return -EINVAL;
 474		} else {
 475			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
 476				return -EFAULT;
 477			sr->len = iomsg->fast_iov[0].iov_len;
 478			iomsg->free_iov = NULL;
 479		}
 480
 481		if (req->flags & REQ_F_APOLL_MULTISHOT) {
 482			iomsg->namelen = msg.msg_namelen;
 483			iomsg->controllen = msg.msg_controllen;
 484			if (io_recvmsg_multishot_overflow(iomsg))
 485				return -EOVERFLOW;
 486		}
 487	} else {
 488		iomsg->free_iov = iomsg->fast_iov;
 489		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
 490				     &iomsg->free_iov, &iomsg->msg.msg_iter,
 491				     false);
 492		if (ret > 0)
 493			ret = 0;
 494	}
 495
 496	return ret;
 497}
 498
 499#ifdef CONFIG_COMPAT
 500static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
 501					struct io_async_msghdr *iomsg)
 502{
 503	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 504	struct compat_msghdr msg;
 505	struct compat_iovec __user *uiov;
 506	int ret;
 507
 508	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
 509		return -EFAULT;
 510
 511	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
 512	if (ret)
 513		return ret;
 514
 515	uiov = compat_ptr(msg.msg_iov);
 516	if (req->flags & REQ_F_BUFFER_SELECT) {
 517		compat_ssize_t clen;
 518
 519		iomsg->free_iov = NULL;
 520		if (msg.msg_iovlen == 0) {
 521			sr->len = 0;
 522		} else if (msg.msg_iovlen > 1) {
 523			return -EINVAL;
 524		} else {
 525			if (!access_ok(uiov, sizeof(*uiov)))
 526				return -EFAULT;
 527			if (__get_user(clen, &uiov->iov_len))
 528				return -EFAULT;
 529			if (clen < 0)
 530				return -EINVAL;
 531			sr->len = clen;
 532		}
 533
 534		if (req->flags & REQ_F_APOLL_MULTISHOT) {
 535			iomsg->namelen = msg.msg_namelen;
 536			iomsg->controllen = msg.msg_controllen;
 537			if (io_recvmsg_multishot_overflow(iomsg))
 538				return -EOVERFLOW;
 539		}
 540	} else {
 541		iomsg->free_iov = iomsg->fast_iov;
 542		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
 543				   UIO_FASTIOV, &iomsg->free_iov,
 544				   &iomsg->msg.msg_iter, true);
 545		if (ret < 0)
 546			return ret;
 547	}
 548
 549	return 0;
 550}
 
 551#endif
 552
 553static int io_recvmsg_copy_hdr(struct io_kiocb *req,
 554			       struct io_async_msghdr *iomsg)
 555{
 556	iomsg->msg.msg_name = &iomsg->addr;
 557	iomsg->msg.msg_iter.nr_segs = 0;
 558
 559#ifdef CONFIG_COMPAT
 560	if (req->ctx->compat)
 561		return __io_compat_recvmsg_copy_hdr(req, iomsg);
 562#endif
 563
 564	return __io_recvmsg_copy_hdr(req, iomsg);
 
 565}
 566
 567int io_recvmsg_prep_async(struct io_kiocb *req)
 568{
 
 
 569	int ret;
 570
 571	if (!io_msg_alloc_async_prep(req))
 
 572		return -ENOMEM;
 573	ret = io_recvmsg_copy_hdr(req, req->async_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574	if (!ret)
 575		req->flags |= REQ_F_NEED_CLEANUP;
 576	return ret;
 577}
 578
 579#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
 
 580
 581int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 582{
 583	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 584
 
 
 585	if (unlikely(sqe->file_index || sqe->addr2))
 586		return -EINVAL;
 587
 588	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 589	sr->len = READ_ONCE(sqe->len);
 590	sr->flags = READ_ONCE(sqe->ioprio);
 591	if (sr->flags & ~(RECVMSG_FLAGS))
 592		return -EINVAL;
 593	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 594	if (sr->msg_flags & MSG_DONTWAIT)
 595		req->flags |= REQ_F_NOWAIT;
 596	if (sr->msg_flags & MSG_ERRQUEUE)
 597		req->flags |= REQ_F_CLEAR_POLLIN;
 598	if (sr->flags & IORING_RECV_MULTISHOT) {
 599		if (!(req->flags & REQ_F_BUFFER_SELECT))
 600			return -EINVAL;
 601		if (sr->msg_flags & MSG_WAITALL)
 602			return -EINVAL;
 603		if (req->opcode == IORING_OP_RECV && sr->len)
 604			return -EINVAL;
 605		req->flags |= REQ_F_APOLL_MULTISHOT;
 606		/*
 607		 * Store the buffer group for this multishot receive separately,
 608		 * as if we end up doing an io-wq based issue that selects a
 609		 * buffer, it has to be committed immediately and that will
 610		 * clear ->buf_list. This means we lose the link to the buffer
 611		 * list, and the eventual buffer put on completion then cannot
 612		 * restore it.
 613		 */
 614		sr->buf_group = req->buf_index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615	}
 616
 617#ifdef CONFIG_COMPAT
 618	if (req->ctx->compat)
 619		sr->msg_flags |= MSG_CMSG_COMPAT;
 620#endif
 621	sr->done_io = 0;
 622	sr->nr_multishot_loops = 0;
 623	return 0;
 624}
 625
 626static inline void io_recv_prep_retry(struct io_kiocb *req)
 627{
 628	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 629
 630	sr->done_io = 0;
 631	sr->len = 0; /* get from the provided buffer */
 632	req->buf_index = sr->buf_group;
 633}
 634
 635/*
 636 * Finishes io_recv and io_recvmsg.
 637 *
 638 * Returns true if it is actually finished, or false if it should run
 639 * again (for multishot).
 640 */
 641static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 642				  struct msghdr *msg, bool mshot_finished,
 643				  unsigned issue_flags)
 644{
 645	unsigned int cflags;
 
 646
 647	cflags = io_put_kbuf(req, issue_flags);
 648	if (msg->msg_inq && msg->msg_inq != -1)
 649		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 650
 651	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
 652		io_req_set_res(req, *ret, cflags);
 653		*ret = IOU_OK;
 654		return true;
 
 
 
 
 655	}
 656
 657	if (mshot_finished)
 658		goto finish;
 659
 660	/*
 661	 * Fill CQE for this receive and see if we should keep trying to
 662	 * receive from this socket.
 663	 */
 664	if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
 665				*ret, cflags | IORING_CQE_F_MORE)) {
 666		struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 667		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
 668
 669		io_recv_prep_retry(req);
 670		/* Known not-empty or unknown state, retry */
 671		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
 672			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
 673				return false;
 674			/* mshot retries exceeded, force a requeue */
 675			sr->nr_multishot_loops = 0;
 676			mshot_retry_ret = IOU_REQUEUE;
 677		}
 678		if (issue_flags & IO_URING_F_MULTISHOT)
 679			*ret = mshot_retry_ret;
 680		else
 681			*ret = -EAGAIN;
 682		return true;
 683	}
 684	/* Otherwise stop multishot but use the current result. */
 
 685finish:
 686	io_req_set_res(req, *ret, cflags);
 687
 688	if (issue_flags & IO_URING_F_MULTISHOT)
 689		*ret = IOU_STOP_MULTISHOT;
 690	else
 691		*ret = IOU_OK;
 
 692	return true;
 693}
 694
 695static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
 696				     struct io_sr_msg *sr, void __user **buf,
 697				     size_t *len)
 698{
 699	unsigned long ubuf = (unsigned long) *buf;
 700	unsigned long hdr;
 701
 702	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 703		kmsg->controllen;
 704	if (*len < hdr)
 705		return -EFAULT;
 706
 707	if (kmsg->controllen) {
 708		unsigned long control = ubuf + hdr - kmsg->controllen;
 709
 710		kmsg->msg.msg_control_user = (void __user *) control;
 711		kmsg->msg.msg_controllen = kmsg->controllen;
 712	}
 713
 714	sr->buf = *buf; /* stash for later copy */
 715	*buf = (void __user *) (ubuf + hdr);
 716	kmsg->payloadlen = *len = *len - hdr;
 717	return 0;
 718}
 719
 720struct io_recvmsg_multishot_hdr {
 721	struct io_uring_recvmsg_out msg;
 722	struct sockaddr_storage addr;
 723};
 724
 725static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
 726				struct io_async_msghdr *kmsg,
 727				unsigned int flags, bool *finished)
 728{
 729	int err;
 730	int copy_len;
 731	struct io_recvmsg_multishot_hdr hdr;
 732
 733	if (kmsg->namelen)
 734		kmsg->msg.msg_name = &hdr.addr;
 735	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
 736	kmsg->msg.msg_namelen = 0;
 737
 738	if (sock->file->f_flags & O_NONBLOCK)
 739		flags |= MSG_DONTWAIT;
 740
 741	err = sock_recvmsg(sock, &kmsg->msg, flags);
 742	*finished = err <= 0;
 743	if (err < 0)
 744		return err;
 745
 746	hdr.msg = (struct io_uring_recvmsg_out) {
 747		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
 748		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
 749	};
 750
 751	hdr.msg.payloadlen = err;
 752	if (err > kmsg->payloadlen)
 753		err = kmsg->payloadlen;
 754
 755	copy_len = sizeof(struct io_uring_recvmsg_out);
 756	if (kmsg->msg.msg_namelen > kmsg->namelen)
 757		copy_len += kmsg->namelen;
 758	else
 759		copy_len += kmsg->msg.msg_namelen;
 760
 761	/*
 762	 *      "fromlen shall refer to the value before truncation.."
 763	 *                      1003.1g
 764	 */
 765	hdr.msg.namelen = kmsg->msg.msg_namelen;
 766
 767	/* ensure that there is no gap between hdr and sockaddr_storage */
 768	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
 769		     sizeof(struct io_uring_recvmsg_out));
 770	if (copy_to_user(io->buf, &hdr, copy_len)) {
 771		*finished = true;
 772		return -EFAULT;
 773	}
 774
 775	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 776			kmsg->controllen + err;
 777}
 778
 779int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 780{
 781	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 782	struct io_async_msghdr iomsg, *kmsg;
 783	struct socket *sock;
 784	unsigned flags;
 785	int ret, min_ret = 0;
 786	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 787	bool mshot_finished = true;
 788
 789	sock = sock_from_file(req->file);
 790	if (unlikely(!sock))
 791		return -ENOTSOCK;
 792
 793	if (req_has_async_data(req)) {
 794		kmsg = req->async_data;
 795	} else {
 796		ret = io_recvmsg_copy_hdr(req, &iomsg);
 797		if (ret)
 798			return ret;
 799		kmsg = &iomsg;
 800	}
 801
 802	if (!(req->flags & REQ_F_POLLED) &&
 803	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 804		return io_setup_async_msg(req, kmsg, issue_flags);
 805
 806	if (!io_check_multishot(req, issue_flags))
 807		return io_setup_async_msg(req, kmsg, issue_flags);
 
 808
 809retry_multishot:
 810	if (io_do_buffer_select(req)) {
 811		void __user *buf;
 812		size_t len = sr->len;
 813
 814		buf = io_buffer_select(req, &len, issue_flags);
 815		if (!buf)
 816			return -ENOBUFS;
 817
 818		if (req->flags & REQ_F_APOLL_MULTISHOT) {
 819			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
 820			if (ret) {
 821				io_kbuf_recycle(req, issue_flags);
 822				return ret;
 823			}
 824		}
 825
 826		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
 827	}
 828
 829	flags = sr->msg_flags;
 830	if (force_nonblock)
 831		flags |= MSG_DONTWAIT;
 832
 833	kmsg->msg.msg_get_inq = 1;
 834	kmsg->msg.msg_inq = -1;
 835	if (req->flags & REQ_F_APOLL_MULTISHOT) {
 836		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
 837					   &mshot_finished);
 838	} else {
 839		/* disable partial retry for recvmsg with cmsg attached */
 840		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
 841			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 842
 843		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
 844					 kmsg->uaddr, flags);
 845	}
 846
 847	if (ret < min_ret) {
 848		if (ret == -EAGAIN && force_nonblock) {
 849			ret = io_setup_async_msg(req, kmsg, issue_flags);
 850			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
 851				io_kbuf_recycle(req, issue_flags);
 852				return IOU_ISSUE_SKIP_COMPLETE;
 853			}
 854			return ret;
 855		}
 856		if (ret > 0 && io_net_retry(sock, flags)) {
 857			sr->done_io += ret;
 858			req->flags |= REQ_F_PARTIAL_IO;
 859			return io_setup_async_msg(req, kmsg, issue_flags);
 860		}
 861		if (ret == -ERESTARTSYS)
 862			ret = -EINTR;
 863		req_set_fail(req);
 864	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 865		req_set_fail(req);
 866	}
 867
 868	if (ret > 0)
 869		ret += sr->done_io;
 870	else if (sr->done_io)
 871		ret = sr->done_io;
 872	else
 873		io_kbuf_recycle(req, issue_flags);
 874
 875	if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
 876		goto retry_multishot;
 877
 878	if (mshot_finished) {
 879		/* fast path, check for non-NULL to avoid function call */
 880		if (kmsg->free_iov)
 881			kfree(kmsg->free_iov);
 882		io_netmsg_recycle(req, issue_flags);
 883		req->flags &= ~REQ_F_NEED_CLEANUP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884	}
 885
 886	return ret;
 887}
 888
 889int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 890{
 891	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 892	struct msghdr msg;
 893	struct socket *sock;
 894	unsigned flags;
 895	int ret, min_ret = 0;
 896	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 897	size_t len = sr->len;
 
 898
 899	if (!(req->flags & REQ_F_POLLED) &&
 900	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 901		return -EAGAIN;
 902
 903	if (!io_check_multishot(req, issue_flags))
 904		return -EAGAIN;
 905
 906	sock = sock_from_file(req->file);
 907	if (unlikely(!sock))
 908		return -ENOTSOCK;
 909
 910	msg.msg_name = NULL;
 911	msg.msg_namelen = 0;
 912	msg.msg_control = NULL;
 913	msg.msg_get_inq = 1;
 914	msg.msg_controllen = 0;
 915	msg.msg_iocb = NULL;
 916	msg.msg_ubuf = NULL;
 917
 918retry_multishot:
 919	if (io_do_buffer_select(req)) {
 920		void __user *buf;
 921
 922		buf = io_buffer_select(req, &len, issue_flags);
 923		if (!buf)
 924			return -ENOBUFS;
 925		sr->buf = buf;
 926		sr->len = len;
 927	}
 928
 929	ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
 930	if (unlikely(ret))
 931		goto out_free;
 932
 933	msg.msg_inq = -1;
 934	msg.msg_flags = 0;
 935
 936	flags = sr->msg_flags;
 937	if (force_nonblock)
 938		flags |= MSG_DONTWAIT;
 939	if (flags & MSG_WAITALL)
 940		min_ret = iov_iter_count(&msg.msg_iter);
 941
 942	ret = sock_recvmsg(sock, &msg, flags);
 943	if (ret < min_ret) {
 944		if (ret == -EAGAIN && force_nonblock) {
 945			if (issue_flags & IO_URING_F_MULTISHOT) {
 946				io_kbuf_recycle(req, issue_flags);
 947				return IOU_ISSUE_SKIP_COMPLETE;
 948			}
 949
 950			return -EAGAIN;
 951		}
 952		if (ret > 0 && io_net_retry(sock, flags)) {
 953			sr->len -= ret;
 954			sr->buf += ret;
 955			sr->done_io += ret;
 956			req->flags |= REQ_F_PARTIAL_IO;
 957			return -EAGAIN;
 958		}
 959		if (ret == -ERESTARTSYS)
 960			ret = -EINTR;
 961		req_set_fail(req);
 962	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 963out_free:
 964		req_set_fail(req);
 965	}
 966
 
 967	if (ret > 0)
 968		ret += sr->done_io;
 969	else if (sr->done_io)
 970		ret = sr->done_io;
 971	else
 972		io_kbuf_recycle(req, issue_flags);
 973
 974	if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
 975		goto retry_multishot;
 976
 977	return ret;
 978}
 979
 980void io_send_zc_cleanup(struct io_kiocb *req)
 981{
 982	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
 983	struct io_async_msghdr *io;
 984
 985	if (req_has_async_data(req)) {
 986		io = req->async_data;
 987		/* might be ->fast_iov if *msg_copy_hdr failed */
 988		if (io->free_iov != io->fast_iov)
 989			kfree(io->free_iov);
 990	}
 991	if (zc->notif) {
 992		io_notif_flush(zc->notif);
 993		zc->notif = NULL;
 994	}
 995}
 996
 997#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
 998#define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
 999
1000int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1001{
1002	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1003	struct io_ring_ctx *ctx = req->ctx;
1004	struct io_kiocb *notif;
1005
 
 
 
1006	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1007		return -EINVAL;
1008	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1009	if (req->flags & REQ_F_CQE_SKIP)
1010		return -EINVAL;
1011
1012	notif = zc->notif = io_alloc_notif(ctx);
1013	if (!notif)
1014		return -ENOMEM;
1015	notif->cqe.user_data = req->cqe.user_data;
1016	notif->cqe.res = 0;
1017	notif->cqe.flags = IORING_CQE_F_NOTIF;
1018	req->flags |= REQ_F_NEED_CLEANUP;
1019
1020	zc->flags = READ_ONCE(sqe->ioprio);
1021	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1022		if (zc->flags & ~IO_ZC_FLAGS_VALID)
1023			return -EINVAL;
1024		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1025			io_notif_set_extended(notif);
1026			io_notif_to_data(notif)->zc_report = true;
1027		}
1028	}
1029
1030	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1031		unsigned idx = READ_ONCE(sqe->buf_index);
1032
1033		if (unlikely(idx >= ctx->nr_user_bufs))
1034			return -EFAULT;
1035		idx = array_index_nospec(idx, ctx->nr_user_bufs);
1036		req->imu = READ_ONCE(ctx->user_bufs[idx]);
1037		io_req_set_rsrc_node(notif, ctx, 0);
1038	}
1039
1040	if (req->opcode == IORING_OP_SEND_ZC) {
1041		if (READ_ONCE(sqe->__pad3[0]))
1042			return -EINVAL;
1043		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1044		zc->addr_len = READ_ONCE(sqe->addr_len);
1045	} else {
1046		if (unlikely(sqe->addr2 || sqe->file_index))
1047			return -EINVAL;
1048		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1049			return -EINVAL;
1050	}
1051
1052	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1053	zc->len = READ_ONCE(sqe->len);
1054	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 
1055	if (zc->msg_flags & MSG_DONTWAIT)
1056		req->flags |= REQ_F_NOWAIT;
1057
1058	zc->done_io = 0;
1059
1060#ifdef CONFIG_COMPAT
1061	if (req->ctx->compat)
1062		zc->msg_flags |= MSG_CMSG_COMPAT;
1063#endif
1064	return 0;
 
 
 
 
1065}
1066
1067static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1068				 struct iov_iter *from, size_t length)
1069{
1070	skb_zcopy_downgrade_managed(skb);
1071	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1072}
1073
1074static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1075			   struct iov_iter *from, size_t length)
1076{
1077	struct skb_shared_info *shinfo = skb_shinfo(skb);
1078	int frag = shinfo->nr_frags;
1079	int ret = 0;
1080	struct bvec_iter bi;
1081	ssize_t copied = 0;
1082	unsigned long truesize = 0;
1083
1084	if (!frag)
1085		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1086	else if (unlikely(!skb_zcopy_managed(skb)))
1087		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1088
1089	bi.bi_size = min(from->count, length);
1090	bi.bi_bvec_done = from->iov_offset;
1091	bi.bi_idx = 0;
1092
1093	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1094		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1095
1096		copied += v.bv_len;
1097		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1098		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1099					   v.bv_offset, v.bv_len);
1100		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1101	}
1102	if (bi.bi_size)
1103		ret = -EMSGSIZE;
1104
1105	shinfo->nr_frags = frag;
1106	from->bvec += bi.bi_idx;
1107	from->nr_segs -= bi.bi_idx;
1108	from->count -= copied;
1109	from->iov_offset = bi.bi_bvec_done;
1110
1111	skb->data_len += copied;
1112	skb->len += copied;
1113	skb->truesize += truesize;
 
 
1114
1115	if (sk && sk->sk_type == SOCK_STREAM) {
1116		sk_wmem_queued_add(sk, truesize);
1117		if (!skb_zcopy_pure(skb))
1118			sk_mem_charge(sk, truesize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119	} else {
1120		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
 
 
 
 
 
 
1121	}
 
1122	return ret;
1123}
1124
1125int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1126{
1127	struct sockaddr_storage __address;
1128	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1129	struct msghdr msg;
1130	struct socket *sock;
1131	unsigned msg_flags;
1132	int ret, min_ret = 0;
1133
1134	sock = sock_from_file(req->file);
1135	if (unlikely(!sock))
1136		return -ENOTSOCK;
1137	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1138		return -EOPNOTSUPP;
1139
1140	msg.msg_name = NULL;
1141	msg.msg_control = NULL;
1142	msg.msg_controllen = 0;
1143	msg.msg_namelen = 0;
1144
1145	if (zc->addr) {
1146		if (req_has_async_data(req)) {
1147			struct io_async_msghdr *io = req->async_data;
1148
1149			msg.msg_name = &io->addr;
1150		} else {
1151			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1152			if (unlikely(ret < 0))
1153				return ret;
1154			msg.msg_name = (struct sockaddr *)&__address;
1155		}
1156		msg.msg_namelen = zc->addr_len;
1157	}
1158
1159	if (!(req->flags & REQ_F_POLLED) &&
1160	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1161		return io_setup_async_addr(req, &__address, issue_flags);
1162
1163	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1164		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1165					(u64)(uintptr_t)zc->buf, zc->len);
1166		if (unlikely(ret))
1167			return ret;
1168		msg.sg_from_iter = io_sg_from_iter;
1169	} else {
1170		io_notif_set_extended(zc->notif);
1171		ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1172		if (unlikely(ret))
1173			return ret;
1174		ret = io_notif_account_mem(zc->notif, zc->len);
1175		if (unlikely(ret))
1176			return ret;
1177		msg.sg_from_iter = io_sg_from_iter_iovec;
1178	}
1179
1180	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1181	if (issue_flags & IO_URING_F_NONBLOCK)
1182		msg_flags |= MSG_DONTWAIT;
1183	if (msg_flags & MSG_WAITALL)
1184		min_ret = iov_iter_count(&msg.msg_iter);
1185	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1186
1187	msg.msg_flags = msg_flags;
1188	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1189	ret = sock_sendmsg(sock, &msg);
1190
1191	if (unlikely(ret < min_ret)) {
1192		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1193			return io_setup_async_addr(req, &__address, issue_flags);
1194
1195		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1196			zc->len -= ret;
1197			zc->buf += ret;
1198			zc->done_io += ret;
1199			req->flags |= REQ_F_PARTIAL_IO;
1200			return io_setup_async_addr(req, &__address, issue_flags);
1201		}
1202		if (ret == -ERESTARTSYS)
1203			ret = -EINTR;
1204		req_set_fail(req);
1205	}
1206
1207	if (ret >= 0)
1208		ret += zc->done_io;
1209	else if (zc->done_io)
1210		ret = zc->done_io;
1211
1212	/*
1213	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1214	 * flushing notif to io_send_zc_cleanup()
1215	 */
1216	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1217		io_notif_flush(zc->notif);
1218		req->flags &= ~REQ_F_NEED_CLEANUP;
1219	}
1220	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1221	return IOU_OK;
1222}
1223
1224int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1225{
1226	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1227	struct io_async_msghdr iomsg, *kmsg;
1228	struct socket *sock;
1229	unsigned flags;
1230	int ret, min_ret = 0;
1231
1232	io_notif_set_extended(sr->notif);
1233
1234	sock = sock_from_file(req->file);
1235	if (unlikely(!sock))
1236		return -ENOTSOCK;
1237	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1238		return -EOPNOTSUPP;
1239
1240	if (req_has_async_data(req)) {
1241		kmsg = req->async_data;
1242	} else {
1243		ret = io_sendmsg_copy_hdr(req, &iomsg);
1244		if (ret)
1245			return ret;
1246		kmsg = &iomsg;
1247	}
1248
1249	if (!(req->flags & REQ_F_POLLED) &&
1250	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1251		return io_setup_async_msg(req, kmsg, issue_flags);
1252
1253	flags = sr->msg_flags | MSG_ZEROCOPY;
1254	if (issue_flags & IO_URING_F_NONBLOCK)
1255		flags |= MSG_DONTWAIT;
1256	if (flags & MSG_WAITALL)
1257		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1258
 
1259	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1260	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1261	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1262
1263	if (unlikely(ret < min_ret)) {
1264		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1265			return io_setup_async_msg(req, kmsg, issue_flags);
1266
1267		if (ret > 0 && io_net_retry(sock, flags)) {
1268			sr->done_io += ret;
1269			req->flags |= REQ_F_PARTIAL_IO;
1270			return io_setup_async_msg(req, kmsg, issue_flags);
1271		}
1272		if (ret == -ERESTARTSYS)
1273			ret = -EINTR;
1274		req_set_fail(req);
1275	}
1276	/* fast path, check for non-NULL to avoid function call */
1277	if (kmsg->free_iov) {
1278		kfree(kmsg->free_iov);
1279		kmsg->free_iov = NULL;
1280	}
1281
1282	io_netmsg_recycle(req, issue_flags);
1283	if (ret >= 0)
1284		ret += sr->done_io;
1285	else if (sr->done_io)
1286		ret = sr->done_io;
1287
1288	/*
1289	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1290	 * flushing notif to io_send_zc_cleanup()
1291	 */
1292	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1293		io_notif_flush(sr->notif);
1294		req->flags &= ~REQ_F_NEED_CLEANUP;
1295	}
1296	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1297	return IOU_OK;
1298}
1299
1300void io_sendrecv_fail(struct io_kiocb *req)
1301{
1302	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1303
1304	if (req->flags & REQ_F_PARTIAL_IO)
1305		req->cqe.res = sr->done_io;
1306
1307	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1308	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1309		req->cqe.flags |= IORING_CQE_F_MORE;
1310}
1311
 
 
 
1312int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1313{
1314	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1315	unsigned flags;
1316
1317	if (sqe->len || sqe->buf_index)
1318		return -EINVAL;
1319
1320	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1321	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1322	accept->flags = READ_ONCE(sqe->accept_flags);
1323	accept->nofile = rlimit(RLIMIT_NOFILE);
1324	flags = READ_ONCE(sqe->ioprio);
1325	if (flags & ~IORING_ACCEPT_MULTISHOT)
1326		return -EINVAL;
1327
1328	accept->file_slot = READ_ONCE(sqe->file_index);
1329	if (accept->file_slot) {
1330		if (accept->flags & SOCK_CLOEXEC)
1331			return -EINVAL;
1332		if (flags & IORING_ACCEPT_MULTISHOT &&
1333		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1334			return -EINVAL;
1335	}
1336	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1337		return -EINVAL;
1338	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1339		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1340	if (flags & IORING_ACCEPT_MULTISHOT)
1341		req->flags |= REQ_F_APOLL_MULTISHOT;
 
 
1342	return 0;
1343}
1344
1345int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1346{
1347	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1348	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1349	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1350	bool fixed = !!accept->file_slot;
 
 
 
1351	struct file *file;
 
1352	int ret, fd;
1353
1354	if (!io_check_multishot(req, issue_flags))
 
1355		return -EAGAIN;
 
1356retry:
1357	if (!fixed) {
1358		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1359		if (unlikely(fd < 0))
1360			return fd;
1361	}
1362	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
 
 
1363			 accept->flags);
1364	if (IS_ERR(file)) {
1365		if (!fixed)
1366			put_unused_fd(fd);
1367		ret = PTR_ERR(file);
1368		if (ret == -EAGAIN && force_nonblock) {
 
1369			/*
1370			 * if it's multishot and polled, we don't need to
1371			 * return EAGAIN to arm the poll infra since it
1372			 * has already been done
1373			 */
1374			if (issue_flags & IO_URING_F_MULTISHOT)
1375				return IOU_ISSUE_SKIP_COMPLETE;
1376			return ret;
1377		}
1378		if (ret == -ERESTARTSYS)
1379			ret = -EINTR;
1380		req_set_fail(req);
1381	} else if (!fixed) {
1382		fd_install(fd, file);
1383		ret = fd;
1384	} else {
1385		ret = io_fixed_fd_install(req, issue_flags, file,
1386						accept->file_slot);
1387	}
1388
 
 
 
 
1389	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1390		io_req_set_res(req, ret, 0);
1391		return IOU_OK;
1392	}
1393
1394	if (ret < 0)
1395		return ret;
1396	if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
1397				ret, IORING_CQE_F_MORE))
1398		goto retry;
 
 
 
 
1399
1400	io_req_set_res(req, ret, 0);
1401	return IOU_STOP_MULTISHOT;
1402}
1403
1404int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1405{
1406	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1407
1408	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1409		return -EINVAL;
1410
1411	sock->domain = READ_ONCE(sqe->fd);
1412	sock->type = READ_ONCE(sqe->off);
1413	sock->protocol = READ_ONCE(sqe->len);
1414	sock->file_slot = READ_ONCE(sqe->file_index);
1415	sock->nofile = rlimit(RLIMIT_NOFILE);
1416
1417	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1418	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1419		return -EINVAL;
1420	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1421		return -EINVAL;
1422	return 0;
1423}
1424
1425int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1426{
1427	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1428	bool fixed = !!sock->file_slot;
1429	struct file *file;
1430	int ret, fd;
1431
1432	if (!fixed) {
1433		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1434		if (unlikely(fd < 0))
1435			return fd;
1436	}
1437	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1438	if (IS_ERR(file)) {
1439		if (!fixed)
1440			put_unused_fd(fd);
1441		ret = PTR_ERR(file);
1442		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1443			return -EAGAIN;
1444		if (ret == -ERESTARTSYS)
1445			ret = -EINTR;
1446		req_set_fail(req);
1447	} else if (!fixed) {
1448		fd_install(fd, file);
1449		ret = fd;
1450	} else {
1451		ret = io_fixed_fd_install(req, issue_flags, file,
1452					    sock->file_slot);
1453	}
1454	io_req_set_res(req, ret, 0);
1455	return IOU_OK;
1456}
1457
1458int io_connect_prep_async(struct io_kiocb *req)
1459{
1460	struct io_async_connect *io = req->async_data;
1461	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1462
1463	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1464}
1465
1466int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1467{
1468	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
 
1469
1470	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1471		return -EINVAL;
1472
1473	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1474	conn->addr_len =  READ_ONCE(sqe->addr2);
1475	conn->in_progress = conn->seen_econnaborted = false;
1476	return 0;
 
 
 
 
 
1477}
1478
1479int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1480{
1481	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1482	struct io_async_connect __io, *io;
1483	unsigned file_flags;
1484	int ret;
1485	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1486
1487	if (req_has_async_data(req)) {
1488		io = req->async_data;
1489	} else {
1490		ret = move_addr_to_kernel(connect->addr,
1491						connect->addr_len,
1492						&__io.address);
1493		if (ret)
1494			goto out;
1495		io = &__io;
1496	}
1497
1498	file_flags = force_nonblock ? O_NONBLOCK : 0;
1499
1500	ret = __sys_connect_file(req->file, &io->address,
1501					connect->addr_len, file_flags);
1502	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1503	    && force_nonblock) {
1504		if (ret == -EINPROGRESS) {
1505			connect->in_progress = true;
1506		} else if (ret == -ECONNABORTED) {
1507			if (connect->seen_econnaborted)
1508				goto out;
1509			connect->seen_econnaborted = true;
1510		}
1511		if (req_has_async_data(req))
1512			return -EAGAIN;
1513		if (io_alloc_async_data(req)) {
1514			ret = -ENOMEM;
1515			goto out;
1516		}
1517		memcpy(req->async_data, &__io, sizeof(__io));
1518		return -EAGAIN;
1519	}
1520	if (connect->in_progress) {
1521		/*
1522		 * At least bluetooth will return -EBADFD on a re-connect
1523		 * attempt, and it's (supposedly) also valid to get -EISCONN
1524		 * which means the previous result is good. For both of these,
1525		 * grab the sock_error() and use that for the completion.
1526		 */
1527		if (ret == -EBADFD || ret == -EISCONN)
1528			ret = sock_error(sock_from_file(req->file)->sk);
1529	}
1530	if (ret == -ERESTARTSYS)
1531		ret = -EINTR;
1532out:
1533	if (ret < 0)
1534		req_set_fail(req);
 
1535	io_req_set_res(req, ret, 0);
1536	return IOU_OK;
1537}
1538
1539void io_netmsg_cache_free(struct io_cache_entry *entry)
1540{
1541	kfree(container_of(entry, struct io_async_msghdr, cache));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1542}
1543#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/file.h>
   5#include <linux/slab.h>
   6#include <linux/net.h>
   7#include <linux/compat.h>
   8#include <net/compat.h>
   9#include <linux/io_uring.h>
  10
  11#include <uapi/linux/io_uring.h>
  12
  13#include "io_uring.h"
  14#include "kbuf.h"
  15#include "alloc_cache.h"
  16#include "net.h"
  17#include "notif.h"
  18#include "rsrc.h"
  19
  20#if defined(CONFIG_NET)
  21struct io_shutdown {
  22	struct file			*file;
  23	int				how;
  24};
  25
  26struct io_accept {
  27	struct file			*file;
  28	struct sockaddr __user		*addr;
  29	int __user			*addr_len;
  30	int				flags;
  31	int				iou_flags;
  32	u32				file_slot;
  33	unsigned long			nofile;
  34};
  35
  36struct io_socket {
  37	struct file			*file;
  38	int				domain;
  39	int				type;
  40	int				protocol;
  41	int				flags;
  42	u32				file_slot;
  43	unsigned long			nofile;
  44};
  45
  46struct io_connect {
  47	struct file			*file;
  48	struct sockaddr __user		*addr;
  49	int				addr_len;
  50	bool				in_progress;
  51	bool				seen_econnaborted;
  52};
  53
  54struct io_bind {
  55	struct file			*file;
  56	int				addr_len;
  57};
  58
  59struct io_listen {
  60	struct file			*file;
  61	int				backlog;
  62};
  63
  64struct io_sr_msg {
  65	struct file			*file;
  66	union {
  67		struct compat_msghdr __user	*umsg_compat;
  68		struct user_msghdr __user	*umsg;
  69		void __user			*buf;
  70	};
  71	int				len;
  72	unsigned			done_io;
  73	unsigned			msg_flags;
  74	unsigned			nr_multishot_loops;
  75	u16				flags;
  76	/* initialised and used only by !msg send variants */
 
  77	u16				buf_group;
  78	u16				buf_index;
  79	void __user			*msg_control;
  80	/* used only for send zerocopy */
  81	struct io_kiocb 		*notif;
  82};
  83
  84/*
  85 * Number of times we'll try and do receives if there's more data. If we
  86 * exceed this limit, then add us to the back of the queue and retry from
  87 * there. This helps fairness between flooding clients.
  88 */
  89#define MULTISHOT_MAX_RETRY	32
  90
 
 
 
 
 
 
 
 
 
 
 
 
 
  91int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  92{
  93	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
  94
  95	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
  96		     sqe->buf_index || sqe->splice_fd_in))
  97		return -EINVAL;
  98
  99	shutdown->how = READ_ONCE(sqe->len);
 100	req->flags |= REQ_F_FORCE_ASYNC;
 101	return 0;
 102}
 103
 104int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
 105{
 106	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
 107	struct socket *sock;
 108	int ret;
 109
 110	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
 111
 112	sock = sock_from_file(req->file);
 113	if (unlikely(!sock))
 114		return -ENOTSOCK;
 115
 116	ret = __sys_shutdown_sock(sock, shutdown->how);
 117	io_req_set_res(req, ret, 0);
 118	return IOU_OK;
 119}
 120
 121static bool io_net_retry(struct socket *sock, int flags)
 122{
 123	if (!(flags & MSG_WAITALL))
 124		return false;
 125	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
 126}
 127
 128static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
 129{
 130	if (kmsg->free_iov) {
 131		kfree(kmsg->free_iov);
 132		kmsg->free_iov_nr = 0;
 133		kmsg->free_iov = NULL;
 134	}
 135}
 136
 137static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
 138{
 139	struct io_async_msghdr *hdr = req->async_data;
 140	struct iovec *iov;
 141
 142	/* can't recycle, ensure we free the iovec if we have one */
 143	if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
 144		io_netmsg_iovec_free(hdr);
 145		return;
 146	}
 147
 148	/* Let normal cleanup path reap it if we fail adding to the cache */
 149	iov = hdr->free_iov;
 150	if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
 151		if (iov)
 152			kasan_mempool_poison_object(iov);
 153		req->async_data = NULL;
 154		req->flags &= ~REQ_F_ASYNC_DATA;
 155	}
 156}
 157
 158static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
 
 159{
 160	struct io_ring_ctx *ctx = req->ctx;
 
 161	struct io_async_msghdr *hdr;
 162
 163	hdr = io_alloc_cache_get(&ctx->netmsg_cache);
 164	if (hdr) {
 165		if (hdr->free_iov) {
 166			kasan_mempool_unpoison_object(hdr->free_iov,
 167				hdr->free_iov_nr * sizeof(struct iovec));
 168			req->flags |= REQ_F_NEED_CLEANUP;
 
 
 169		}
 170		req->flags |= REQ_F_ASYNC_DATA;
 171		req->async_data = hdr;
 172		return hdr;
 173	}
 174
 175	if (!io_alloc_async_data(req)) {
 176		hdr = req->async_data;
 177		hdr->free_iov_nr = 0;
 178		hdr->free_iov = NULL;
 179		return hdr;
 180	}
 181	return NULL;
 182}
 183
 184/* assign new iovec to kmsg, if we need to */
 185static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
 186			     struct iovec *iov)
 187{
 188	if (iov) {
 189		req->flags |= REQ_F_NEED_CLEANUP;
 190		kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs;
 191		if (kmsg->free_iov)
 192			kfree(kmsg->free_iov);
 193		kmsg->free_iov = iov;
 194	}
 195	return 0;
 196}
 197
 198static inline void io_mshot_prep_retry(struct io_kiocb *req,
 199				       struct io_async_msghdr *kmsg)
 
 200{
 201	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 202
 203	req->flags &= ~REQ_F_BL_EMPTY;
 204	sr->done_io = 0;
 205	sr->len = 0; /* get from the provided buffer */
 206	req->buf_index = sr->buf_group;
 207}
 208
 209#ifdef CONFIG_COMPAT
 210static int io_compat_msg_copy_hdr(struct io_kiocb *req,
 211				  struct io_async_msghdr *iomsg,
 212				  struct compat_msghdr *msg, int ddir)
 213{
 214	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 215	struct compat_iovec __user *uiov;
 216	struct iovec *iov;
 217	int ret, nr_segs;
 218
 219	if (iomsg->free_iov) {
 220		nr_segs = iomsg->free_iov_nr;
 221		iov = iomsg->free_iov;
 222	} else {
 223		iov = &iomsg->fast_iov;
 224		nr_segs = 1;
 225	}
 
 
 
 
 226
 227	if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
 228		return -EFAULT;
 229
 230	uiov = compat_ptr(msg->msg_iov);
 231	if (req->flags & REQ_F_BUFFER_SELECT) {
 232		compat_ssize_t clen;
 233
 234		if (msg->msg_iovlen == 0) {
 235			sr->len = iov->iov_len = 0;
 236			iov->iov_base = NULL;
 237		} else if (msg->msg_iovlen > 1) {
 238			return -EINVAL;
 239		} else {
 240			if (!access_ok(uiov, sizeof(*uiov)))
 241				return -EFAULT;
 242			if (__get_user(clen, &uiov->iov_len))
 243				return -EFAULT;
 244			if (clen < 0)
 245				return -EINVAL;
 246			sr->len = clen;
 247		}
 248
 249		return 0;
 250	}
 251
 252	ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
 253				nr_segs, &iov, &iomsg->msg.msg_iter, true);
 254	if (unlikely(ret < 0))
 255		return ret;
 256
 257	return io_net_vec_assign(req, iomsg, iov);
 258}
 259#endif
 260
 261static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
 262			   struct user_msghdr *msg, int ddir)
 263{
 264	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 265	struct user_msghdr __user *umsg = sr->umsg;
 266	struct iovec *iov;
 267	int ret, nr_segs;
 268
 269	if (iomsg->free_iov) {
 270		nr_segs = iomsg->free_iov_nr;
 271		iov = iomsg->free_iov;
 272	} else {
 273		iov = &iomsg->fast_iov;
 274		nr_segs = 1;
 275	}
 276
 277	if (!user_access_begin(umsg, sizeof(*umsg)))
 278		return -EFAULT;
 279
 280	ret = -EFAULT;
 281	unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
 282	unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
 283	unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
 284	unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
 285	unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
 286	unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
 287	msg->msg_flags = 0;
 288
 289	if (req->flags & REQ_F_BUFFER_SELECT) {
 290		if (msg->msg_iovlen == 0) {
 291			sr->len = iov->iov_len = 0;
 292			iov->iov_base = NULL;
 293		} else if (msg->msg_iovlen > 1) {
 294			ret = -EINVAL;
 295			goto ua_end;
 296		} else {
 297			/* we only need the length for provided buffers */
 298			if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
 299				goto ua_end;
 300			unsafe_get_user(iov->iov_len, &msg->msg_iov[0].iov_len,
 301					ua_end);
 302			sr->len = iov->iov_len;
 303		}
 304		ret = 0;
 305ua_end:
 306		user_access_end();
 307		return ret;
 308	}
 309
 310	user_access_end();
 311	ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
 312				&iov, &iomsg->msg.msg_iter, false);
 313	if (unlikely(ret < 0))
 314		return ret;
 315
 316	return io_net_vec_assign(req, iomsg, iov);
 317}
 318
 319static int io_sendmsg_copy_hdr(struct io_kiocb *req,
 320			       struct io_async_msghdr *iomsg)
 321{
 322	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 323	struct user_msghdr msg;
 324	int ret;
 325
 326	iomsg->msg.msg_name = &iomsg->addr;
 327	iomsg->msg.msg_iter.nr_segs = 0;
 328
 329#ifdef CONFIG_COMPAT
 330	if (unlikely(req->ctx->compat)) {
 331		struct compat_msghdr cmsg;
 332
 333		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
 334		if (unlikely(ret))
 335			return ret;
 336
 337		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
 338		sr->msg_control = iomsg->msg.msg_control_user;
 339		return ret;
 340	}
 341#endif
 342
 343	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
 344	if (unlikely(ret))
 345		return ret;
 346
 347	ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
 348
 349	/* save msg_control as sys_sendmsg() overwrites it */
 350	sr->msg_control = iomsg->msg.msg_control_user;
 351	return ret;
 352}
 353
 354void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
 355{
 356	struct io_async_msghdr *io = req->async_data;
 
 
 357
 358	io_netmsg_iovec_free(io);
 
 
 
 
 
 
 359}
 360
 361static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
 
 362{
 363	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 364	struct io_async_msghdr *kmsg = req->async_data;
 365	void __user *addr;
 366	u16 addr_len;
 367	int ret;
 368
 369	sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
 370
 371	if (READ_ONCE(sqe->__pad3[0]))
 372		return -EINVAL;
 373
 374	kmsg->msg.msg_name = NULL;
 375	kmsg->msg.msg_namelen = 0;
 376	kmsg->msg.msg_control = NULL;
 377	kmsg->msg.msg_controllen = 0;
 378	kmsg->msg.msg_ubuf = NULL;
 379
 380	addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 381	addr_len = READ_ONCE(sqe->addr_len);
 382	if (addr) {
 383		ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr);
 384		if (unlikely(ret < 0))
 385			return ret;
 386		kmsg->msg.msg_name = &kmsg->addr;
 387		kmsg->msg.msg_namelen = addr_len;
 388	}
 389	if (!io_do_buffer_select(req)) {
 390		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
 391				  &kmsg->msg.msg_iter);
 392		if (unlikely(ret < 0))
 393			return ret;
 394	}
 395	return 0;
 396}
 397
 398static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 399{
 400	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 401	struct io_async_msghdr *kmsg = req->async_data;
 402	int ret;
 403
 404	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 405
 406	ret = io_sendmsg_copy_hdr(req, kmsg);
 407	if (!ret)
 408		req->flags |= REQ_F_NEED_CLEANUP;
 409	return ret;
 410}
 411
 412#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
 
 
 
 
 
 413
 414int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 415{
 416	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 417
 418	sr->done_io = 0;
 419
 420	if (req->opcode != IORING_OP_SEND) {
 421		if (sqe->addr2 || sqe->file_index)
 422			return -EINVAL;
 
 
 
 
 423	}
 424
 
 425	sr->len = READ_ONCE(sqe->len);
 426	sr->flags = READ_ONCE(sqe->ioprio);
 427	if (sr->flags & ~SENDMSG_FLAGS)
 428		return -EINVAL;
 429	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 430	if (sr->msg_flags & MSG_DONTWAIT)
 431		req->flags |= REQ_F_NOWAIT;
 432	if (sr->flags & IORING_RECVSEND_BUNDLE) {
 433		if (req->opcode == IORING_OP_SENDMSG)
 434			return -EINVAL;
 435		if (!(req->flags & REQ_F_BUFFER_SELECT))
 436			return -EINVAL;
 437		sr->msg_flags |= MSG_WAITALL;
 438		sr->buf_group = req->buf_index;
 439		req->buf_list = NULL;
 440	}
 441
 442#ifdef CONFIG_COMPAT
 443	if (req->ctx->compat)
 444		sr->msg_flags |= MSG_CMSG_COMPAT;
 445#endif
 446	if (unlikely(!io_msg_alloc_async(req)))
 447		return -ENOMEM;
 448	if (req->opcode != IORING_OP_SENDMSG)
 449		return io_send_setup(req, sqe);
 450	return io_sendmsg_setup(req, sqe);
 451}
 452
 453static void io_req_msg_cleanup(struct io_kiocb *req,
 454			       unsigned int issue_flags)
 455{
 456	req->flags &= ~REQ_F_NEED_CLEANUP;
 457	io_netmsg_recycle(req, issue_flags);
 458}
 459
 460/*
 461 * For bundle completions, we need to figure out how many segments we consumed.
 462 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
 463 * could be using an ITER_IOVEC. If the latter, then if we consumed all of
 464 * the segments, then it's a trivial questiont o answer. If we have residual
 465 * data in the iter, then loop the segments to figure out how much we
 466 * transferred.
 467 */
 468static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
 469{
 470	struct iovec *iov;
 471	int nbufs;
 472
 473	/* no data is always zero segments, and a ubuf is always 1 segment */
 474	if (ret <= 0)
 475		return 0;
 476	if (iter_is_ubuf(&kmsg->msg.msg_iter))
 477		return 1;
 478
 479	iov = kmsg->free_iov;
 480	if (!iov)
 481		iov = &kmsg->fast_iov;
 482
 483	/* if all data was transferred, it's basic pointer math */
 484	if (!iov_iter_count(&kmsg->msg.msg_iter))
 485		return iter_iov(&kmsg->msg.msg_iter) - iov;
 486
 487	/* short transfer, count segments */
 488	nbufs = 0;
 489	do {
 490		int this_len = min_t(int, iov[nbufs].iov_len, ret);
 491
 492		nbufs++;
 493		ret -= this_len;
 494	} while (ret);
 495
 496	return nbufs;
 497}
 498
 499static inline bool io_send_finish(struct io_kiocb *req, int *ret,
 500				  struct io_async_msghdr *kmsg,
 501				  unsigned issue_flags)
 502{
 503	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 504	bool bundle_finished = *ret <= 0;
 505	unsigned int cflags;
 506
 507	if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
 508		cflags = io_put_kbuf(req, *ret, issue_flags);
 509		goto finish;
 510	}
 511
 512	cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
 513
 514	if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
 515		goto finish;
 516
 517	/*
 518	 * Fill CQE for this receive and see if we should keep trying to
 519	 * receive from this socket.
 520	 */
 521	if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
 522		io_mshot_prep_retry(req, kmsg);
 523		return false;
 524	}
 525
 526	/* Otherwise stop bundle and use the current result. */
 527finish:
 528	io_req_set_res(req, *ret, cflags);
 529	*ret = IOU_OK;
 530	return true;
 531}
 532
 533int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 534{
 535	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 536	struct io_async_msghdr *kmsg = req->async_data;
 537	struct socket *sock;
 538	unsigned flags;
 539	int min_ret = 0;
 540	int ret;
 541
 542	sock = sock_from_file(req->file);
 543	if (unlikely(!sock))
 544		return -ENOTSOCK;
 545
 
 
 
 
 
 
 
 
 
 
 546	if (!(req->flags & REQ_F_POLLED) &&
 547	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 548		return -EAGAIN;
 549
 550	flags = sr->msg_flags;
 551	if (issue_flags & IO_URING_F_NONBLOCK)
 552		flags |= MSG_DONTWAIT;
 553	if (flags & MSG_WAITALL)
 554		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 555
 556	kmsg->msg.msg_control_user = sr->msg_control;
 557
 558	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
 559
 560	if (ret < min_ret) {
 561		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 562			return -EAGAIN;
 563		if (ret > 0 && io_net_retry(sock, flags)) {
 564			kmsg->msg.msg_controllen = 0;
 565			kmsg->msg.msg_control = NULL;
 566			sr->done_io += ret;
 567			req->flags |= REQ_F_BL_NO_RECYCLE;
 568			return -EAGAIN;
 569		}
 570		if (ret == -ERESTARTSYS)
 571			ret = -EINTR;
 572		req_set_fail(req);
 573	}
 574	io_req_msg_cleanup(req, issue_flags);
 
 
 
 
 575	if (ret >= 0)
 576		ret += sr->done_io;
 577	else if (sr->done_io)
 578		ret = sr->done_io;
 579	io_req_set_res(req, ret, 0);
 580	return IOU_OK;
 581}
 582
 583int io_send(struct io_kiocb *req, unsigned int issue_flags)
 584{
 
 585	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 586	struct io_async_msghdr *kmsg = req->async_data;
 587	struct socket *sock;
 588	unsigned flags;
 589	int min_ret = 0;
 590	int ret;
 591
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592	sock = sock_from_file(req->file);
 593	if (unlikely(!sock))
 594		return -ENOTSOCK;
 595
 596	if (!(req->flags & REQ_F_POLLED) &&
 597	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 598		return -EAGAIN;
 599
 600	flags = sr->msg_flags;
 601	if (issue_flags & IO_URING_F_NONBLOCK)
 602		flags |= MSG_DONTWAIT;
 603
 604retry_bundle:
 605	if (io_do_buffer_select(req)) {
 606		struct buf_sel_arg arg = {
 607			.iovs = &kmsg->fast_iov,
 608			.max_len = min_not_zero(sr->len, INT_MAX),
 609			.nr_iovs = 1,
 610		};
 611
 612		if (kmsg->free_iov) {
 613			arg.nr_iovs = kmsg->free_iov_nr;
 614			arg.iovs = kmsg->free_iov;
 615			arg.mode = KBUF_MODE_FREE;
 616		}
 617
 618		if (!(sr->flags & IORING_RECVSEND_BUNDLE))
 619			arg.nr_iovs = 1;
 620		else
 621			arg.mode |= KBUF_MODE_EXPAND;
 622
 623		ret = io_buffers_select(req, &arg, issue_flags);
 624		if (unlikely(ret < 0))
 625			return ret;
 626
 627		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
 628			kmsg->free_iov_nr = ret;
 629			kmsg->free_iov = arg.iovs;
 630			req->flags |= REQ_F_NEED_CLEANUP;
 631		}
 632		sr->len = arg.out_len;
 633
 634		if (ret == 1) {
 635			sr->buf = arg.iovs[0].iov_base;
 636			ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
 637						&kmsg->msg.msg_iter);
 638			if (unlikely(ret))
 639				return ret;
 640		} else {
 641			iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
 642					arg.iovs, ret, arg.out_len);
 643		}
 644	}
 645
 646	/*
 647	 * If MSG_WAITALL is set, or this is a bundle send, then we need
 648	 * the full amount. If just bundle is set, if we do a short send
 649	 * then we complete the bundle sequence rather than continue on.
 650	 */
 651	if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
 652		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 653
 654	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
 655	kmsg->msg.msg_flags = flags;
 656	ret = sock_sendmsg(sock, &kmsg->msg);
 657	if (ret < min_ret) {
 658		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 659			return -EAGAIN;
 660
 661		if (ret > 0 && io_net_retry(sock, flags)) {
 662			sr->len -= ret;
 663			sr->buf += ret;
 664			sr->done_io += ret;
 665			req->flags |= REQ_F_BL_NO_RECYCLE;
 666			return -EAGAIN;
 667		}
 668		if (ret == -ERESTARTSYS)
 669			ret = -EINTR;
 670		req_set_fail(req);
 671	}
 672	if (ret >= 0)
 673		ret += sr->done_io;
 674	else if (sr->done_io)
 675		ret = sr->done_io;
 
 
 
 676
 677	if (!io_send_finish(req, &ret, kmsg, issue_flags))
 678		goto retry_bundle;
 
 
 
 
 
 
 
 
 
 679
 680	io_req_msg_cleanup(req, issue_flags);
 681	return ret;
 682}
 683
 684static int io_recvmsg_mshot_prep(struct io_kiocb *req,
 685				 struct io_async_msghdr *iomsg,
 686				 int namelen, size_t controllen)
 687{
 688	if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
 689			  (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
 690		int hdr;
 691
 692		if (unlikely(namelen < 0))
 693			return -EOVERFLOW;
 694		if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
 695					namelen, &hdr))
 696			return -EOVERFLOW;
 697		if (check_add_overflow(hdr, controllen, &hdr))
 698			return -EOVERFLOW;
 699
 700		iomsg->namelen = namelen;
 701		iomsg->controllen = controllen;
 702		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 703	}
 704
 705	return 0;
 706}
 707
 708static int io_recvmsg_copy_hdr(struct io_kiocb *req,
 709			       struct io_async_msghdr *iomsg)
 
 710{
 711	struct user_msghdr msg;
 
 
 712	int ret;
 713
 714	iomsg->msg.msg_name = &iomsg->addr;
 715	iomsg->msg.msg_iter.nr_segs = 0;
 
 
 
 
 716
 717#ifdef CONFIG_COMPAT
 718	if (unlikely(req->ctx->compat)) {
 719		struct compat_msghdr cmsg;
 720
 721		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
 722		if (unlikely(ret))
 723			return ret;
 
 
 
 
 
 
 
 
 
 
 
 724
 725		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
 726		if (unlikely(ret))
 
 
 
 
 
 
 
 
 
 
 727			return ret;
 
 728
 729		return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
 730						cmsg.msg_controllen);
 731	}
 732#endif
 733
 734	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
 735	if (unlikely(ret))
 736		return ret;
 
 
 737
 738	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
 739	if (unlikely(ret))
 740		return ret;
 
 741
 742	return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
 743					msg.msg_controllen);
 744}
 745
 746static int io_recvmsg_prep_setup(struct io_kiocb *req)
 747{
 748	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 749	struct io_async_msghdr *kmsg;
 750	int ret;
 751
 752	kmsg = io_msg_alloc_async(req);
 753	if (unlikely(!kmsg))
 754		return -ENOMEM;
 755
 756	if (req->opcode == IORING_OP_RECV) {
 757		kmsg->msg.msg_name = NULL;
 758		kmsg->msg.msg_namelen = 0;
 759		kmsg->msg.msg_inq = 0;
 760		kmsg->msg.msg_control = NULL;
 761		kmsg->msg.msg_get_inq = 1;
 762		kmsg->msg.msg_controllen = 0;
 763		kmsg->msg.msg_iocb = NULL;
 764		kmsg->msg.msg_ubuf = NULL;
 765
 766		if (!io_do_buffer_select(req)) {
 767			ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
 768					  &kmsg->msg.msg_iter);
 769			if (unlikely(ret))
 770				return ret;
 771		}
 772		return 0;
 773	}
 774
 775	ret = io_recvmsg_copy_hdr(req, kmsg);
 776	if (!ret)
 777		req->flags |= REQ_F_NEED_CLEANUP;
 778	return ret;
 779}
 780
 781#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
 782			IORING_RECVSEND_BUNDLE)
 783
 784int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 785{
 786	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 787
 788	sr->done_io = 0;
 789
 790	if (unlikely(sqe->file_index || sqe->addr2))
 791		return -EINVAL;
 792
 793	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 794	sr->len = READ_ONCE(sqe->len);
 795	sr->flags = READ_ONCE(sqe->ioprio);
 796	if (sr->flags & ~RECVMSG_FLAGS)
 797		return -EINVAL;
 798	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 799	if (sr->msg_flags & MSG_DONTWAIT)
 800		req->flags |= REQ_F_NOWAIT;
 801	if (sr->msg_flags & MSG_ERRQUEUE)
 802		req->flags |= REQ_F_CLEAR_POLLIN;
 803	if (req->flags & REQ_F_BUFFER_SELECT) {
 
 
 
 
 
 
 
 804		/*
 805		 * Store the buffer group for this multishot receive separately,
 806		 * as if we end up doing an io-wq based issue that selects a
 807		 * buffer, it has to be committed immediately and that will
 808		 * clear ->buf_list. This means we lose the link to the buffer
 809		 * list, and the eventual buffer put on completion then cannot
 810		 * restore it.
 811		 */
 812		sr->buf_group = req->buf_index;
 813		req->buf_list = NULL;
 814	}
 815	if (sr->flags & IORING_RECV_MULTISHOT) {
 816		if (!(req->flags & REQ_F_BUFFER_SELECT))
 817			return -EINVAL;
 818		if (sr->msg_flags & MSG_WAITALL)
 819			return -EINVAL;
 820		if (req->opcode == IORING_OP_RECV && sr->len)
 821			return -EINVAL;
 822		req->flags |= REQ_F_APOLL_MULTISHOT;
 823	}
 824	if (sr->flags & IORING_RECVSEND_BUNDLE) {
 825		if (req->opcode == IORING_OP_RECVMSG)
 826			return -EINVAL;
 827	}
 828
 829#ifdef CONFIG_COMPAT
 830	if (req->ctx->compat)
 831		sr->msg_flags |= MSG_CMSG_COMPAT;
 832#endif
 
 833	sr->nr_multishot_loops = 0;
 834	return io_recvmsg_prep_setup(req);
 
 
 
 
 
 
 
 
 
 835}
 836
 837/*
 838 * Finishes io_recv and io_recvmsg.
 839 *
 840 * Returns true if it is actually finished, or false if it should run
 841 * again (for multishot).
 842 */
 843static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 844				  struct io_async_msghdr *kmsg,
 845				  bool mshot_finished, unsigned issue_flags)
 846{
 847	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 848	unsigned int cflags = 0;
 849
 850	if (kmsg->msg.msg_inq > 0)
 
 851		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 852
 853	if (sr->flags & IORING_RECVSEND_BUNDLE) {
 854		cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
 855				      issue_flags);
 856		/* bundle with no more immediate buffers, we're done */
 857		if (req->flags & REQ_F_BL_EMPTY)
 858			goto finish;
 859	} else {
 860		cflags |= io_put_kbuf(req, *ret, issue_flags);
 861	}
 862
 
 
 
 863	/*
 864	 * Fill CQE for this receive and see if we should keep trying to
 865	 * receive from this socket.
 866	 */
 867	if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
 868	    io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
 
 869		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
 870
 871		io_mshot_prep_retry(req, kmsg);
 872		/* Known not-empty or unknown state, retry */
 873		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
 874			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
 875				return false;
 876			/* mshot retries exceeded, force a requeue */
 877			sr->nr_multishot_loops = 0;
 878			mshot_retry_ret = IOU_REQUEUE;
 879		}
 880		if (issue_flags & IO_URING_F_MULTISHOT)
 881			*ret = mshot_retry_ret;
 882		else
 883			*ret = -EAGAIN;
 884		return true;
 885	}
 886
 887	/* Finish the request / stop multishot. */
 888finish:
 889	io_req_set_res(req, *ret, cflags);
 890
 891	if (issue_flags & IO_URING_F_MULTISHOT)
 892		*ret = IOU_STOP_MULTISHOT;
 893	else
 894		*ret = IOU_OK;
 895	io_req_msg_cleanup(req, issue_flags);
 896	return true;
 897}
 898
 899static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
 900				     struct io_sr_msg *sr, void __user **buf,
 901				     size_t *len)
 902{
 903	unsigned long ubuf = (unsigned long) *buf;
 904	unsigned long hdr;
 905
 906	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 907		kmsg->controllen;
 908	if (*len < hdr)
 909		return -EFAULT;
 910
 911	if (kmsg->controllen) {
 912		unsigned long control = ubuf + hdr - kmsg->controllen;
 913
 914		kmsg->msg.msg_control_user = (void __user *) control;
 915		kmsg->msg.msg_controllen = kmsg->controllen;
 916	}
 917
 918	sr->buf = *buf; /* stash for later copy */
 919	*buf = (void __user *) (ubuf + hdr);
 920	kmsg->payloadlen = *len = *len - hdr;
 921	return 0;
 922}
 923
 924struct io_recvmsg_multishot_hdr {
 925	struct io_uring_recvmsg_out msg;
 926	struct sockaddr_storage addr;
 927};
 928
 929static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
 930				struct io_async_msghdr *kmsg,
 931				unsigned int flags, bool *finished)
 932{
 933	int err;
 934	int copy_len;
 935	struct io_recvmsg_multishot_hdr hdr;
 936
 937	if (kmsg->namelen)
 938		kmsg->msg.msg_name = &hdr.addr;
 939	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
 940	kmsg->msg.msg_namelen = 0;
 941
 942	if (sock->file->f_flags & O_NONBLOCK)
 943		flags |= MSG_DONTWAIT;
 944
 945	err = sock_recvmsg(sock, &kmsg->msg, flags);
 946	*finished = err <= 0;
 947	if (err < 0)
 948		return err;
 949
 950	hdr.msg = (struct io_uring_recvmsg_out) {
 951		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
 952		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
 953	};
 954
 955	hdr.msg.payloadlen = err;
 956	if (err > kmsg->payloadlen)
 957		err = kmsg->payloadlen;
 958
 959	copy_len = sizeof(struct io_uring_recvmsg_out);
 960	if (kmsg->msg.msg_namelen > kmsg->namelen)
 961		copy_len += kmsg->namelen;
 962	else
 963		copy_len += kmsg->msg.msg_namelen;
 964
 965	/*
 966	 *      "fromlen shall refer to the value before truncation.."
 967	 *                      1003.1g
 968	 */
 969	hdr.msg.namelen = kmsg->msg.msg_namelen;
 970
 971	/* ensure that there is no gap between hdr and sockaddr_storage */
 972	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
 973		     sizeof(struct io_uring_recvmsg_out));
 974	if (copy_to_user(io->buf, &hdr, copy_len)) {
 975		*finished = true;
 976		return -EFAULT;
 977	}
 978
 979	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 980			kmsg->controllen + err;
 981}
 982
 983int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 984{
 985	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 986	struct io_async_msghdr *kmsg = req->async_data;
 987	struct socket *sock;
 988	unsigned flags;
 989	int ret, min_ret = 0;
 990	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 991	bool mshot_finished = true;
 992
 993	sock = sock_from_file(req->file);
 994	if (unlikely(!sock))
 995		return -ENOTSOCK;
 996
 
 
 
 
 
 
 
 
 
 997	if (!(req->flags & REQ_F_POLLED) &&
 998	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 999		return -EAGAIN;
1000
1001	flags = sr->msg_flags;
1002	if (force_nonblock)
1003		flags |= MSG_DONTWAIT;
1004
1005retry_multishot:
1006	if (io_do_buffer_select(req)) {
1007		void __user *buf;
1008		size_t len = sr->len;
1009
1010		buf = io_buffer_select(req, &len, issue_flags);
1011		if (!buf)
1012			return -ENOBUFS;
1013
1014		if (req->flags & REQ_F_APOLL_MULTISHOT) {
1015			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
1016			if (ret) {
1017				io_kbuf_recycle(req, issue_flags);
1018				return ret;
1019			}
1020		}
1021
1022		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
1023	}
1024
 
 
 
 
1025	kmsg->msg.msg_get_inq = 1;
1026	kmsg->msg.msg_inq = -1;
1027	if (req->flags & REQ_F_APOLL_MULTISHOT) {
1028		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
1029					   &mshot_finished);
1030	} else {
1031		/* disable partial retry for recvmsg with cmsg attached */
1032		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
1033			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1034
1035		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
1036					 kmsg->uaddr, flags);
1037	}
1038
1039	if (ret < min_ret) {
1040		if (ret == -EAGAIN && force_nonblock) {
1041			if (issue_flags & IO_URING_F_MULTISHOT) {
 
1042				io_kbuf_recycle(req, issue_flags);
1043				return IOU_ISSUE_SKIP_COMPLETE;
1044			}
1045			return -EAGAIN;
1046		}
1047		if (ret > 0 && io_net_retry(sock, flags)) {
1048			sr->done_io += ret;
1049			req->flags |= REQ_F_BL_NO_RECYCLE;
1050			return -EAGAIN;
1051		}
1052		if (ret == -ERESTARTSYS)
1053			ret = -EINTR;
1054		req_set_fail(req);
1055	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1056		req_set_fail(req);
1057	}
1058
1059	if (ret > 0)
1060		ret += sr->done_io;
1061	else if (sr->done_io)
1062		ret = sr->done_io;
1063	else
1064		io_kbuf_recycle(req, issue_flags);
1065
1066	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1067		goto retry_multishot;
1068
1069	return ret;
1070}
1071
1072static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1073			      size_t *len, unsigned int issue_flags)
1074{
1075	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1076	int ret;
1077
1078	/*
1079	 * If the ring isn't locked, then don't use the peek interface
1080	 * to grab multiple buffers as we will lock/unlock between
1081	 * this selection and posting the buffers.
1082	 */
1083	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1084	    sr->flags & IORING_RECVSEND_BUNDLE) {
1085		struct buf_sel_arg arg = {
1086			.iovs = &kmsg->fast_iov,
1087			.nr_iovs = 1,
1088			.mode = KBUF_MODE_EXPAND,
1089		};
1090
1091		if (kmsg->free_iov) {
1092			arg.nr_iovs = kmsg->free_iov_nr;
1093			arg.iovs = kmsg->free_iov;
1094			arg.mode |= KBUF_MODE_FREE;
1095		}
1096
1097		if (kmsg->msg.msg_inq > 0)
1098			arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1099
1100		ret = io_buffers_peek(req, &arg);
1101		if (unlikely(ret < 0))
1102			return ret;
1103
1104		/* special case 1 vec, can be a fast path */
1105		if (ret == 1) {
1106			sr->buf = arg.iovs[0].iov_base;
1107			sr->len = arg.iovs[0].iov_len;
1108			goto map_ubuf;
1109		}
1110		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
1111				arg.out_len);
1112		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
1113			kmsg->free_iov_nr = ret;
1114			kmsg->free_iov = arg.iovs;
1115			req->flags |= REQ_F_NEED_CLEANUP;
1116		}
1117	} else {
1118		void __user *buf;
1119
1120		*len = sr->len;
1121		buf = io_buffer_select(req, len, issue_flags);
1122		if (!buf)
1123			return -ENOBUFS;
1124		sr->buf = buf;
1125		sr->len = *len;
1126map_ubuf:
1127		ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1128				  &kmsg->msg.msg_iter);
1129		if (unlikely(ret))
1130			return ret;
1131	}
1132
1133	return 0;
1134}
1135
1136int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1137{
1138	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1139	struct io_async_msghdr *kmsg = req->async_data;
1140	struct socket *sock;
1141	unsigned flags;
1142	int ret, min_ret = 0;
1143	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1144	size_t len = sr->len;
1145	bool mshot_finished;
1146
1147	if (!(req->flags & REQ_F_POLLED) &&
1148	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1149		return -EAGAIN;
1150
 
 
 
1151	sock = sock_from_file(req->file);
1152	if (unlikely(!sock))
1153		return -ENOTSOCK;
1154
1155	flags = sr->msg_flags;
1156	if (force_nonblock)
1157		flags |= MSG_DONTWAIT;
 
 
 
 
1158
1159retry_multishot:
1160	if (io_do_buffer_select(req)) {
1161		ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
1162		if (unlikely(ret)) {
1163			kmsg->msg.msg_inq = -1;
1164			goto out_free;
1165		}
1166		sr->buf = NULL;
 
1167	}
1168
1169	kmsg->msg.msg_flags = 0;
1170	kmsg->msg.msg_inq = -1;
 
 
 
 
1171
 
 
 
1172	if (flags & MSG_WAITALL)
1173		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1174
1175	ret = sock_recvmsg(sock, &kmsg->msg, flags);
1176	if (ret < min_ret) {
1177		if (ret == -EAGAIN && force_nonblock) {
1178			if (issue_flags & IO_URING_F_MULTISHOT) {
1179				io_kbuf_recycle(req, issue_flags);
1180				return IOU_ISSUE_SKIP_COMPLETE;
1181			}
1182
1183			return -EAGAIN;
1184		}
1185		if (ret > 0 && io_net_retry(sock, flags)) {
1186			sr->len -= ret;
1187			sr->buf += ret;
1188			sr->done_io += ret;
1189			req->flags |= REQ_F_BL_NO_RECYCLE;
1190			return -EAGAIN;
1191		}
1192		if (ret == -ERESTARTSYS)
1193			ret = -EINTR;
1194		req_set_fail(req);
1195	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1196out_free:
1197		req_set_fail(req);
1198	}
1199
1200	mshot_finished = ret <= 0;
1201	if (ret > 0)
1202		ret += sr->done_io;
1203	else if (sr->done_io)
1204		ret = sr->done_io;
1205	else
1206		io_kbuf_recycle(req, issue_flags);
1207
1208	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1209		goto retry_multishot;
1210
1211	return ret;
1212}
1213
1214void io_send_zc_cleanup(struct io_kiocb *req)
1215{
1216	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1217	struct io_async_msghdr *io = req->async_data;
1218
1219	if (req_has_async_data(req))
1220		io_netmsg_iovec_free(io);
 
 
 
 
1221	if (zc->notif) {
1222		io_notif_flush(zc->notif);
1223		zc->notif = NULL;
1224	}
1225}
1226
1227#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1228#define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1229
1230int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1231{
1232	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1233	struct io_ring_ctx *ctx = req->ctx;
1234	struct io_kiocb *notif;
1235
1236	zc->done_io = 0;
1237	req->flags |= REQ_F_POLL_NO_LAZY;
1238
1239	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1240		return -EINVAL;
1241	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1242	if (req->flags & REQ_F_CQE_SKIP)
1243		return -EINVAL;
1244
1245	notif = zc->notif = io_alloc_notif(ctx);
1246	if (!notif)
1247		return -ENOMEM;
1248	notif->cqe.user_data = req->cqe.user_data;
1249	notif->cqe.res = 0;
1250	notif->cqe.flags = IORING_CQE_F_NOTIF;
1251	req->flags |= REQ_F_NEED_CLEANUP;
1252
1253	zc->flags = READ_ONCE(sqe->ioprio);
1254	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1255		if (zc->flags & ~IO_ZC_FLAGS_VALID)
1256			return -EINVAL;
1257		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1258			struct io_notif_data *nd = io_notif_to_data(notif);
 
 
 
 
 
 
1259
1260			nd->zc_report = true;
1261			nd->zc_used = false;
1262			nd->zc_copied = false;
1263		}
 
1264	}
1265
1266	if (req->opcode != IORING_OP_SEND_ZC) {
 
 
 
 
 
1267		if (unlikely(sqe->addr2 || sqe->file_index))
1268			return -EINVAL;
1269		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1270			return -EINVAL;
1271	}
1272
 
1273	zc->len = READ_ONCE(sqe->len);
1274	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
1275	zc->buf_index = READ_ONCE(sqe->buf_index);
1276	if (zc->msg_flags & MSG_DONTWAIT)
1277		req->flags |= REQ_F_NOWAIT;
1278
 
 
1279#ifdef CONFIG_COMPAT
1280	if (req->ctx->compat)
1281		zc->msg_flags |= MSG_CMSG_COMPAT;
1282#endif
1283	if (unlikely(!io_msg_alloc_async(req)))
1284		return -ENOMEM;
1285	if (req->opcode != IORING_OP_SENDMSG_ZC)
1286		return io_send_setup(req, sqe);
1287	return io_sendmsg_setup(req, sqe);
1288}
1289
1290static int io_sg_from_iter_iovec(struct sk_buff *skb,
1291				 struct iov_iter *from, size_t length)
1292{
1293	skb_zcopy_downgrade_managed(skb);
1294	return zerocopy_fill_skb_from_iter(skb, from, length);
1295}
1296
1297static int io_sg_from_iter(struct sk_buff *skb,
1298			   struct iov_iter *from, size_t length)
1299{
1300	struct skb_shared_info *shinfo = skb_shinfo(skb);
1301	int frag = shinfo->nr_frags;
1302	int ret = 0;
1303	struct bvec_iter bi;
1304	ssize_t copied = 0;
1305	unsigned long truesize = 0;
1306
1307	if (!frag)
1308		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1309	else if (unlikely(!skb_zcopy_managed(skb)))
1310		return zerocopy_fill_skb_from_iter(skb, from, length);
1311
1312	bi.bi_size = min(from->count, length);
1313	bi.bi_bvec_done = from->iov_offset;
1314	bi.bi_idx = 0;
1315
1316	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1317		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1318
1319		copied += v.bv_len;
1320		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1321		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1322					   v.bv_offset, v.bv_len);
1323		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1324	}
1325	if (bi.bi_size)
1326		ret = -EMSGSIZE;
1327
1328	shinfo->nr_frags = frag;
1329	from->bvec += bi.bi_idx;
1330	from->nr_segs -= bi.bi_idx;
1331	from->count -= copied;
1332	from->iov_offset = bi.bi_bvec_done;
1333
1334	skb->data_len += copied;
1335	skb->len += copied;
1336	skb->truesize += truesize;
1337	return ret;
1338}
1339
1340static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
1341{
1342	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1343	struct io_async_msghdr *kmsg = req->async_data;
1344	int ret;
1345
1346	if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
1347		struct io_ring_ctx *ctx = req->ctx;
1348		struct io_rsrc_node *node;
1349
1350		ret = -EFAULT;
1351		io_ring_submit_lock(ctx, issue_flags);
1352		node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
1353		if (node) {
1354			io_req_assign_buf_node(sr->notif, node);
1355			ret = 0;
1356		}
1357		io_ring_submit_unlock(ctx, issue_flags);
1358
1359		if (unlikely(ret))
1360			return ret;
1361
1362		ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter,
1363					node->buf, (u64)(uintptr_t)sr->buf,
1364					sr->len);
1365		if (unlikely(ret))
1366			return ret;
1367		kmsg->msg.sg_from_iter = io_sg_from_iter;
1368	} else {
1369		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
1370		if (unlikely(ret))
1371			return ret;
1372		ret = io_notif_account_mem(sr->notif, sr->len);
1373		if (unlikely(ret))
1374			return ret;
1375		kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1376	}
1377
1378	return ret;
1379}
1380
1381int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1382{
 
1383	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1384	struct io_async_msghdr *kmsg = req->async_data;
1385	struct socket *sock;
1386	unsigned msg_flags;
1387	int ret, min_ret = 0;
1388
1389	sock = sock_from_file(req->file);
1390	if (unlikely(!sock))
1391		return -ENOTSOCK;
1392	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1393		return -EOPNOTSUPP;
1394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1395	if (!(req->flags & REQ_F_POLLED) &&
1396	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1397		return -EAGAIN;
1398
1399	if (!zc->done_io) {
1400		ret = io_send_zc_import(req, issue_flags);
 
1401		if (unlikely(ret))
1402			return ret;
 
 
 
 
 
 
 
 
 
 
1403	}
1404
1405	msg_flags = zc->msg_flags;
1406	if (issue_flags & IO_URING_F_NONBLOCK)
1407		msg_flags |= MSG_DONTWAIT;
1408	if (msg_flags & MSG_WAITALL)
1409		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1410	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1411
1412	kmsg->msg.msg_flags = msg_flags;
1413	kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1414	ret = sock_sendmsg(sock, &kmsg->msg);
1415
1416	if (unlikely(ret < min_ret)) {
1417		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1418			return -EAGAIN;
1419
1420		if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
1421			zc->len -= ret;
1422			zc->buf += ret;
1423			zc->done_io += ret;
1424			req->flags |= REQ_F_BL_NO_RECYCLE;
1425			return -EAGAIN;
1426		}
1427		if (ret == -ERESTARTSYS)
1428			ret = -EINTR;
1429		req_set_fail(req);
1430	}
1431
1432	if (ret >= 0)
1433		ret += zc->done_io;
1434	else if (zc->done_io)
1435		ret = zc->done_io;
1436
1437	/*
1438	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1439	 * flushing notif to io_send_zc_cleanup()
1440	 */
1441	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1442		io_notif_flush(zc->notif);
1443		io_req_msg_cleanup(req, 0);
1444	}
1445	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1446	return IOU_OK;
1447}
1448
1449int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1450{
1451	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1452	struct io_async_msghdr *kmsg = req->async_data;
1453	struct socket *sock;
1454	unsigned flags;
1455	int ret, min_ret = 0;
1456
 
 
1457	sock = sock_from_file(req->file);
1458	if (unlikely(!sock))
1459		return -ENOTSOCK;
1460	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1461		return -EOPNOTSUPP;
1462
 
 
 
 
 
 
 
 
 
1463	if (!(req->flags & REQ_F_POLLED) &&
1464	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1465		return -EAGAIN;
1466
1467	flags = sr->msg_flags;
1468	if (issue_flags & IO_URING_F_NONBLOCK)
1469		flags |= MSG_DONTWAIT;
1470	if (flags & MSG_WAITALL)
1471		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1472
1473	kmsg->msg.msg_control_user = sr->msg_control;
1474	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1475	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1476	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1477
1478	if (unlikely(ret < min_ret)) {
1479		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1480			return -EAGAIN;
1481
1482		if (ret > 0 && io_net_retry(sock, flags)) {
1483			sr->done_io += ret;
1484			req->flags |= REQ_F_BL_NO_RECYCLE;
1485			return -EAGAIN;
1486		}
1487		if (ret == -ERESTARTSYS)
1488			ret = -EINTR;
1489		req_set_fail(req);
1490	}
 
 
 
 
 
1491
 
1492	if (ret >= 0)
1493		ret += sr->done_io;
1494	else if (sr->done_io)
1495		ret = sr->done_io;
1496
1497	/*
1498	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1499	 * flushing notif to io_send_zc_cleanup()
1500	 */
1501	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1502		io_notif_flush(sr->notif);
1503		io_req_msg_cleanup(req, 0);
1504	}
1505	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1506	return IOU_OK;
1507}
1508
1509void io_sendrecv_fail(struct io_kiocb *req)
1510{
1511	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1512
1513	if (sr->done_io)
1514		req->cqe.res = sr->done_io;
1515
1516	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1517	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1518		req->cqe.flags |= IORING_CQE_F_MORE;
1519}
1520
1521#define ACCEPT_FLAGS	(IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \
1522			 IORING_ACCEPT_POLL_FIRST)
1523
1524int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1525{
1526	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
 
1527
1528	if (sqe->len || sqe->buf_index)
1529		return -EINVAL;
1530
1531	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1532	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1533	accept->flags = READ_ONCE(sqe->accept_flags);
1534	accept->nofile = rlimit(RLIMIT_NOFILE);
1535	accept->iou_flags = READ_ONCE(sqe->ioprio);
1536	if (accept->iou_flags & ~ACCEPT_FLAGS)
1537		return -EINVAL;
1538
1539	accept->file_slot = READ_ONCE(sqe->file_index);
1540	if (accept->file_slot) {
1541		if (accept->flags & SOCK_CLOEXEC)
1542			return -EINVAL;
1543		if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
1544		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1545			return -EINVAL;
1546	}
1547	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1548		return -EINVAL;
1549	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1550		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1551	if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
1552		req->flags |= REQ_F_APOLL_MULTISHOT;
1553	if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
1554		req->flags |= REQ_F_NOWAIT;
1555	return 0;
1556}
1557
1558int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1559{
1560	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1561	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
1562	bool fixed = !!accept->file_slot;
1563	struct proto_accept_arg arg = {
1564		.flags = force_nonblock ? O_NONBLOCK : 0,
1565	};
1566	struct file *file;
1567	unsigned cflags;
1568	int ret, fd;
1569
1570	if (!(req->flags & REQ_F_POLLED) &&
1571	    accept->iou_flags & IORING_ACCEPT_POLL_FIRST)
1572		return -EAGAIN;
1573
1574retry:
1575	if (!fixed) {
1576		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1577		if (unlikely(fd < 0))
1578			return fd;
1579	}
1580	arg.err = 0;
1581	arg.is_empty = -1;
1582	file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
1583			 accept->flags);
1584	if (IS_ERR(file)) {
1585		if (!fixed)
1586			put_unused_fd(fd);
1587		ret = PTR_ERR(file);
1588		if (ret == -EAGAIN && force_nonblock &&
1589		    !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
1590			/*
1591			 * if it's multishot and polled, we don't need to
1592			 * return EAGAIN to arm the poll infra since it
1593			 * has already been done
1594			 */
1595			if (issue_flags & IO_URING_F_MULTISHOT)
1596				return IOU_ISSUE_SKIP_COMPLETE;
1597			return ret;
1598		}
1599		if (ret == -ERESTARTSYS)
1600			ret = -EINTR;
1601		req_set_fail(req);
1602	} else if (!fixed) {
1603		fd_install(fd, file);
1604		ret = fd;
1605	} else {
1606		ret = io_fixed_fd_install(req, issue_flags, file,
1607						accept->file_slot);
1608	}
1609
1610	cflags = 0;
1611	if (!arg.is_empty)
1612		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
1613
1614	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1615		io_req_set_res(req, ret, cflags);
1616		return IOU_OK;
1617	}
1618
1619	if (ret < 0)
1620		return ret;
1621	if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1622		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
1623			goto retry;
1624		if (issue_flags & IO_URING_F_MULTISHOT)
1625			return IOU_ISSUE_SKIP_COMPLETE;
1626		return -EAGAIN;
1627	}
1628
1629	io_req_set_res(req, ret, cflags);
1630	return IOU_STOP_MULTISHOT;
1631}
1632
1633int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1634{
1635	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1636
1637	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1638		return -EINVAL;
1639
1640	sock->domain = READ_ONCE(sqe->fd);
1641	sock->type = READ_ONCE(sqe->off);
1642	sock->protocol = READ_ONCE(sqe->len);
1643	sock->file_slot = READ_ONCE(sqe->file_index);
1644	sock->nofile = rlimit(RLIMIT_NOFILE);
1645
1646	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1647	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1648		return -EINVAL;
1649	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1650		return -EINVAL;
1651	return 0;
1652}
1653
1654int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1655{
1656	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1657	bool fixed = !!sock->file_slot;
1658	struct file *file;
1659	int ret, fd;
1660
1661	if (!fixed) {
1662		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1663		if (unlikely(fd < 0))
1664			return fd;
1665	}
1666	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1667	if (IS_ERR(file)) {
1668		if (!fixed)
1669			put_unused_fd(fd);
1670		ret = PTR_ERR(file);
1671		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1672			return -EAGAIN;
1673		if (ret == -ERESTARTSYS)
1674			ret = -EINTR;
1675		req_set_fail(req);
1676	} else if (!fixed) {
1677		fd_install(fd, file);
1678		ret = fd;
1679	} else {
1680		ret = io_fixed_fd_install(req, issue_flags, file,
1681					    sock->file_slot);
1682	}
1683	io_req_set_res(req, ret, 0);
1684	return IOU_OK;
1685}
1686
 
 
 
 
 
 
 
 
1687int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1688{
1689	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1690	struct io_async_msghdr *io;
1691
1692	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1693		return -EINVAL;
1694
1695	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1696	conn->addr_len =  READ_ONCE(sqe->addr2);
1697	conn->in_progress = conn->seen_econnaborted = false;
1698
1699	io = io_msg_alloc_async(req);
1700	if (unlikely(!io))
1701		return -ENOMEM;
1702
1703	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr);
1704}
1705
1706int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1707{
1708	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1709	struct io_async_msghdr *io = req->async_data;
1710	unsigned file_flags;
1711	int ret;
1712	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1713
1714	if (unlikely(req->flags & REQ_F_FAIL)) {
1715		ret = -ECONNRESET;
1716		goto out;
 
 
 
 
 
 
1717	}
1718
1719	file_flags = force_nonblock ? O_NONBLOCK : 0;
1720
1721	ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
1722				 file_flags);
1723	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1724	    && force_nonblock) {
1725		if (ret == -EINPROGRESS) {
1726			connect->in_progress = true;
1727		} else if (ret == -ECONNABORTED) {
1728			if (connect->seen_econnaborted)
1729				goto out;
1730			connect->seen_econnaborted = true;
1731		}
 
 
 
 
 
 
 
1732		return -EAGAIN;
1733	}
1734	if (connect->in_progress) {
1735		/*
1736		 * At least bluetooth will return -EBADFD on a re-connect
1737		 * attempt, and it's (supposedly) also valid to get -EISCONN
1738		 * which means the previous result is good. For both of these,
1739		 * grab the sock_error() and use that for the completion.
1740		 */
1741		if (ret == -EBADFD || ret == -EISCONN)
1742			ret = sock_error(sock_from_file(req->file)->sk);
1743	}
1744	if (ret == -ERESTARTSYS)
1745		ret = -EINTR;
1746out:
1747	if (ret < 0)
1748		req_set_fail(req);
1749	io_req_msg_cleanup(req, issue_flags);
1750	io_req_set_res(req, ret, 0);
1751	return IOU_OK;
1752}
1753
1754int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1755{
1756	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1757	struct sockaddr __user *uaddr;
1758	struct io_async_msghdr *io;
1759
1760	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1761		return -EINVAL;
1762
1763	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1764	bind->addr_len =  READ_ONCE(sqe->addr2);
1765
1766	io = io_msg_alloc_async(req);
1767	if (unlikely(!io))
1768		return -ENOMEM;
1769	return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
1770}
1771
1772int io_bind(struct io_kiocb *req, unsigned int issue_flags)
1773{
1774	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1775	struct io_async_msghdr *io = req->async_data;
1776	struct socket *sock;
1777	int ret;
1778
1779	sock = sock_from_file(req->file);
1780	if (unlikely(!sock))
1781		return -ENOTSOCK;
1782
1783	ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
1784	if (ret < 0)
1785		req_set_fail(req);
1786	io_req_set_res(req, ret, 0);
1787	return 0;
1788}
1789
1790int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1791{
1792	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1793
1794	if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
1795		return -EINVAL;
1796
1797	listen->backlog = READ_ONCE(sqe->len);
1798	return 0;
1799}
1800
1801int io_listen(struct io_kiocb *req, unsigned int issue_flags)
1802{
1803	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1804	struct socket *sock;
1805	int ret;
1806
1807	sock = sock_from_file(req->file);
1808	if (unlikely(!sock))
1809		return -ENOTSOCK;
1810
1811	ret = __sys_listen_socket(sock, listen->backlog);
1812	if (ret < 0)
1813		req_set_fail(req);
1814	io_req_set_res(req, ret, 0);
1815	return 0;
1816}
1817
1818void io_netmsg_cache_free(const void *entry)
1819{
1820	struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
1821
1822	if (kmsg->free_iov) {
1823		kasan_mempool_unpoison_object(kmsg->free_iov,
1824				kmsg->free_iov_nr * sizeof(struct iovec));
1825		io_netmsg_iovec_free(kmsg);
1826	}
1827	kfree(kmsg);
1828}
1829#endif