Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/file.h>
   5#include <linux/slab.h>
   6#include <linux/net.h>
   7#include <linux/compat.h>
   8#include <net/compat.h>
   9#include <linux/io_uring.h>
  10
  11#include <uapi/linux/io_uring.h>
  12
  13#include "io_uring.h"
  14#include "kbuf.h"
  15#include "alloc_cache.h"
  16#include "net.h"
  17#include "notif.h"
  18#include "rsrc.h"
  19
  20#if defined(CONFIG_NET)
  21struct io_shutdown {
  22	struct file			*file;
  23	int				how;
  24};
  25
  26struct io_accept {
  27	struct file			*file;
  28	struct sockaddr __user		*addr;
  29	int __user			*addr_len;
  30	int				flags;
  31	u32				file_slot;
  32	unsigned long			nofile;
  33};
  34
  35struct io_socket {
  36	struct file			*file;
  37	int				domain;
  38	int				type;
  39	int				protocol;
  40	int				flags;
  41	u32				file_slot;
  42	unsigned long			nofile;
  43};
  44
  45struct io_connect {
  46	struct file			*file;
  47	struct sockaddr __user		*addr;
  48	int				addr_len;
  49	bool				in_progress;
 
  50};
  51
  52struct io_sr_msg {
  53	struct file			*file;
  54	union {
  55		struct compat_msghdr __user	*umsg_compat;
  56		struct user_msghdr __user	*umsg;
  57		void __user			*buf;
  58	};
  59	unsigned			len;
  60	unsigned			done_io;
  61	unsigned			msg_flags;
 
  62	u16				flags;
  63	/* initialised and used only by !msg send variants */
  64	u16				addr_len;
  65	u16				buf_group;
  66	void __user			*addr;
 
  67	/* used only for send zerocopy */
  68	struct io_kiocb 		*notif;
  69};
  70
  71static inline bool io_check_multishot(struct io_kiocb *req,
  72				      unsigned int issue_flags)
  73{
  74	/*
  75	 * When ->locked_cq is set we only allow to post CQEs from the original
  76	 * task context. Usual request completions will be handled in other
  77	 * generic paths but multipoll may decide to post extra cqes.
  78	 */
  79	return !(issue_flags & IO_URING_F_IOWQ) ||
  80		!(issue_flags & IO_URING_F_MULTISHOT) ||
  81		!req->ctx->task_complete;
  82}
  83
  84int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  85{
  86	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
  87
  88	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
  89		     sqe->buf_index || sqe->splice_fd_in))
  90		return -EINVAL;
  91
  92	shutdown->how = READ_ONCE(sqe->len);
 
  93	return 0;
  94}
  95
  96int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
  97{
  98	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
  99	struct socket *sock;
 100	int ret;
 101
 102	if (issue_flags & IO_URING_F_NONBLOCK)
 103		return -EAGAIN;
 104
 105	sock = sock_from_file(req->file);
 106	if (unlikely(!sock))
 107		return -ENOTSOCK;
 108
 109	ret = __sys_shutdown_sock(sock, shutdown->how);
 110	io_req_set_res(req, ret, 0);
 111	return IOU_OK;
 112}
 113
 114static bool io_net_retry(struct socket *sock, int flags)
 115{
 116	if (!(flags & MSG_WAITALL))
 117		return false;
 118	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
 119}
 120
 121static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
 122{
 123	struct io_async_msghdr *hdr = req->async_data;
 124
 125	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
 126		return;
 127
 128	/* Let normal cleanup path reap it if we fail adding to the cache */
 129	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
 130		req->async_data = NULL;
 131		req->flags &= ~REQ_F_ASYNC_DATA;
 132	}
 133}
 134
 135static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
 136						  unsigned int issue_flags)
 137{
 138	struct io_ring_ctx *ctx = req->ctx;
 139	struct io_cache_entry *entry;
 140	struct io_async_msghdr *hdr;
 141
 142	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
 143		entry = io_alloc_cache_get(&ctx->netmsg_cache);
 144		if (entry) {
 145			hdr = container_of(entry, struct io_async_msghdr, cache);
 146			hdr->free_iov = NULL;
 147			req->flags |= REQ_F_ASYNC_DATA;
 148			req->async_data = hdr;
 149			return hdr;
 150		}
 151	}
 152
 153	if (!io_alloc_async_data(req)) {
 154		hdr = req->async_data;
 155		hdr->free_iov = NULL;
 156		return hdr;
 157	}
 158	return NULL;
 159}
 160
 161static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
 162{
 163	/* ->prep_async is always called from the submission context */
 164	return io_msg_alloc_async(req, 0);
 165}
 166
 167static int io_setup_async_msg(struct io_kiocb *req,
 168			      struct io_async_msghdr *kmsg,
 169			      unsigned int issue_flags)
 170{
 171	struct io_async_msghdr *async_msg;
 172
 173	if (req_has_async_data(req))
 174		return -EAGAIN;
 175	async_msg = io_msg_alloc_async(req, issue_flags);
 176	if (!async_msg) {
 177		kfree(kmsg->free_iov);
 178		return -ENOMEM;
 179	}
 180	req->flags |= REQ_F_NEED_CLEANUP;
 181	memcpy(async_msg, kmsg, sizeof(*kmsg));
 182	if (async_msg->msg.msg_name)
 183		async_msg->msg.msg_name = &async_msg->addr;
 
 
 
 
 184	/* if were using fast_iov, set it to the new one */
 185	if (!kmsg->free_iov) {
 186		size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
 187		async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
 188	}
 189
 190	return -EAGAIN;
 191}
 192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193static int io_sendmsg_copy_hdr(struct io_kiocb *req,
 194			       struct io_async_msghdr *iomsg)
 195{
 196	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 
 
 197
 198	iomsg->msg.msg_name = &iomsg->addr;
 199	iomsg->free_iov = iomsg->fast_iov;
 200	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
 201					&iomsg->free_iov);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202}
 203
 204int io_send_prep_async(struct io_kiocb *req)
 205{
 206	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
 207	struct io_async_msghdr *io;
 208	int ret;
 209
 210	if (!zc->addr || req_has_async_data(req))
 
 
 
 211		return 0;
 212	io = io_msg_alloc_async_prep(req);
 213	if (!io)
 214		return -ENOMEM;
 215	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
 216	return ret;
 217}
 218
 219static int io_setup_async_addr(struct io_kiocb *req,
 220			      struct sockaddr_storage *addr_storage,
 221			      unsigned int issue_flags)
 222{
 223	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 224	struct io_async_msghdr *io;
 225
 226	if (!sr->addr || req_has_async_data(req))
 227		return -EAGAIN;
 228	io = io_msg_alloc_async(req, issue_flags);
 229	if (!io)
 230		return -ENOMEM;
 231	memcpy(&io->addr, addr_storage, sizeof(io->addr));
 232	return -EAGAIN;
 233}
 234
 235int io_sendmsg_prep_async(struct io_kiocb *req)
 236{
 
 237	int ret;
 238
 
 239	if (!io_msg_alloc_async_prep(req))
 240		return -ENOMEM;
 241	ret = io_sendmsg_copy_hdr(req, req->async_data);
 242	if (!ret)
 243		req->flags |= REQ_F_NEED_CLEANUP;
 244	return ret;
 245}
 246
 247void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
 248{
 249	struct io_async_msghdr *io = req->async_data;
 250
 251	kfree(io->free_iov);
 252}
 253
 254int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 255{
 256	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 257
 
 
 258	if (req->opcode == IORING_OP_SEND) {
 259		if (READ_ONCE(sqe->__pad3[0]))
 260			return -EINVAL;
 261		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 262		sr->addr_len = READ_ONCE(sqe->addr_len);
 263	} else if (sqe->addr2 || sqe->file_index) {
 264		return -EINVAL;
 265	}
 266
 267	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 268	sr->len = READ_ONCE(sqe->len);
 269	sr->flags = READ_ONCE(sqe->ioprio);
 270	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
 271		return -EINVAL;
 272	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 273	if (sr->msg_flags & MSG_DONTWAIT)
 274		req->flags |= REQ_F_NOWAIT;
 275
 276#ifdef CONFIG_COMPAT
 277	if (req->ctx->compat)
 278		sr->msg_flags |= MSG_CMSG_COMPAT;
 279#endif
 280	sr->done_io = 0;
 281	return 0;
 282}
 283
 
 
 
 
 
 
 
 
 
 
 
 284int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 285{
 286	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 287	struct io_async_msghdr iomsg, *kmsg;
 288	struct socket *sock;
 289	unsigned flags;
 290	int min_ret = 0;
 291	int ret;
 292
 293	sock = sock_from_file(req->file);
 294	if (unlikely(!sock))
 295		return -ENOTSOCK;
 296
 297	if (req_has_async_data(req)) {
 298		kmsg = req->async_data;
 
 299	} else {
 300		ret = io_sendmsg_copy_hdr(req, &iomsg);
 301		if (ret)
 302			return ret;
 303		kmsg = &iomsg;
 304	}
 305
 306	if (!(req->flags & REQ_F_POLLED) &&
 307	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 308		return io_setup_async_msg(req, kmsg, issue_flags);
 309
 310	flags = sr->msg_flags;
 311	if (issue_flags & IO_URING_F_NONBLOCK)
 312		flags |= MSG_DONTWAIT;
 313	if (flags & MSG_WAITALL)
 314		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 315
 316	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
 317
 318	if (ret < min_ret) {
 319		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 320			return io_setup_async_msg(req, kmsg, issue_flags);
 321		if (ret > 0 && io_net_retry(sock, flags)) {
 
 
 322			sr->done_io += ret;
 323			req->flags |= REQ_F_PARTIAL_IO;
 324			return io_setup_async_msg(req, kmsg, issue_flags);
 325		}
 326		if (ret == -ERESTARTSYS)
 327			ret = -EINTR;
 328		req_set_fail(req);
 329	}
 330	/* fast path, check for non-NULL to avoid function call */
 331	if (kmsg->free_iov)
 332		kfree(kmsg->free_iov);
 333	req->flags &= ~REQ_F_NEED_CLEANUP;
 334	io_netmsg_recycle(req, issue_flags);
 335	if (ret >= 0)
 336		ret += sr->done_io;
 337	else if (sr->done_io)
 338		ret = sr->done_io;
 339	io_req_set_res(req, ret, 0);
 340	return IOU_OK;
 341}
 342
 343int io_send(struct io_kiocb *req, unsigned int issue_flags)
 344{
 345	struct sockaddr_storage __address;
 346	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 347	struct msghdr msg;
 348	struct iovec iov;
 349	struct socket *sock;
 350	unsigned flags;
 351	int min_ret = 0;
 352	int ret;
 353
 354	msg.msg_name = NULL;
 355	msg.msg_control = NULL;
 356	msg.msg_controllen = 0;
 357	msg.msg_namelen = 0;
 358	msg.msg_ubuf = NULL;
 359
 360	if (sr->addr) {
 361		if (req_has_async_data(req)) {
 362			struct io_async_msghdr *io = req->async_data;
 363
 364			msg.msg_name = &io->addr;
 365		} else {
 366			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
 367			if (unlikely(ret < 0))
 368				return ret;
 369			msg.msg_name = (struct sockaddr *)&__address;
 370		}
 371		msg.msg_namelen = sr->addr_len;
 372	}
 373
 374	if (!(req->flags & REQ_F_POLLED) &&
 375	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 376		return io_setup_async_addr(req, &__address, issue_flags);
 377
 378	sock = sock_from_file(req->file);
 379	if (unlikely(!sock))
 380		return -ENOTSOCK;
 381
 382	ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
 383	if (unlikely(ret))
 384		return ret;
 385
 386	flags = sr->msg_flags;
 387	if (issue_flags & IO_URING_F_NONBLOCK)
 388		flags |= MSG_DONTWAIT;
 389	if (flags & MSG_WAITALL)
 390		min_ret = iov_iter_count(&msg.msg_iter);
 391
 
 392	msg.msg_flags = flags;
 393	ret = sock_sendmsg(sock, &msg);
 394	if (ret < min_ret) {
 395		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 396			return io_setup_async_addr(req, &__address, issue_flags);
 397
 398		if (ret > 0 && io_net_retry(sock, flags)) {
 399			sr->len -= ret;
 400			sr->buf += ret;
 401			sr->done_io += ret;
 402			req->flags |= REQ_F_PARTIAL_IO;
 403			return io_setup_async_addr(req, &__address, issue_flags);
 404		}
 405		if (ret == -ERESTARTSYS)
 406			ret = -EINTR;
 407		req_set_fail(req);
 408	}
 409	if (ret >= 0)
 410		ret += sr->done_io;
 411	else if (sr->done_io)
 412		ret = sr->done_io;
 413	io_req_set_res(req, ret, 0);
 414	return IOU_OK;
 415}
 416
 417static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
 418{
 419	int hdr;
 
 
 
 
 
 
 
 
 
 
 
 
 420
 421	if (iomsg->namelen < 0)
 422		return true;
 423	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
 424			       iomsg->namelen, &hdr))
 425		return true;
 426	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
 427		return true;
 428
 429	return false;
 430}
 431
 432static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
 433				 struct io_async_msghdr *iomsg)
 434{
 435	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 436	struct user_msghdr msg;
 437	int ret;
 438
 439	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
 440		return -EFAULT;
 441
 442	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
 443	if (ret)
 444		return ret;
 445
 446	if (req->flags & REQ_F_BUFFER_SELECT) {
 447		if (msg.msg_iovlen == 0) {
 448			sr->len = iomsg->fast_iov[0].iov_len = 0;
 449			iomsg->fast_iov[0].iov_base = NULL;
 450			iomsg->free_iov = NULL;
 451		} else if (msg.msg_iovlen > 1) {
 452			return -EINVAL;
 453		} else {
 454			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
 455				return -EFAULT;
 456			sr->len = iomsg->fast_iov[0].iov_len;
 457			iomsg->free_iov = NULL;
 458		}
 459
 460		if (req->flags & REQ_F_APOLL_MULTISHOT) {
 461			iomsg->namelen = msg.msg_namelen;
 462			iomsg->controllen = msg.msg_controllen;
 463			if (io_recvmsg_multishot_overflow(iomsg))
 464				return -EOVERFLOW;
 465		}
 466	} else {
 467		iomsg->free_iov = iomsg->fast_iov;
 468		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
 469				     &iomsg->free_iov, &iomsg->msg.msg_iter,
 470				     false);
 471		if (ret > 0)
 472			ret = 0;
 473	}
 474
 475	return ret;
 476}
 477
 478#ifdef CONFIG_COMPAT
 479static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
 480					struct io_async_msghdr *iomsg)
 481{
 482	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 483	struct compat_msghdr msg;
 484	struct compat_iovec __user *uiov;
 485	int ret;
 486
 487	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
 488		return -EFAULT;
 489
 490	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
 491	if (ret)
 492		return ret;
 493
 494	uiov = compat_ptr(msg.msg_iov);
 495	if (req->flags & REQ_F_BUFFER_SELECT) {
 496		compat_ssize_t clen;
 497
 498		iomsg->free_iov = NULL;
 499		if (msg.msg_iovlen == 0) {
 500			sr->len = 0;
 501		} else if (msg.msg_iovlen > 1) {
 502			return -EINVAL;
 503		} else {
 504			if (!access_ok(uiov, sizeof(*uiov)))
 505				return -EFAULT;
 506			if (__get_user(clen, &uiov->iov_len))
 507				return -EFAULT;
 508			if (clen < 0)
 509				return -EINVAL;
 510			sr->len = clen;
 511		}
 512
 513		if (req->flags & REQ_F_APOLL_MULTISHOT) {
 514			iomsg->namelen = msg.msg_namelen;
 515			iomsg->controllen = msg.msg_controllen;
 516			if (io_recvmsg_multishot_overflow(iomsg))
 517				return -EOVERFLOW;
 518		}
 519	} else {
 520		iomsg->free_iov = iomsg->fast_iov;
 521		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
 522				   UIO_FASTIOV, &iomsg->free_iov,
 523				   &iomsg->msg.msg_iter, true);
 524		if (ret < 0)
 525			return ret;
 526	}
 527
 528	return 0;
 529}
 
 530#endif
 531
 532static int io_recvmsg_copy_hdr(struct io_kiocb *req,
 533			       struct io_async_msghdr *iomsg)
 534{
 535	iomsg->msg.msg_name = &iomsg->addr;
 536
 537#ifdef CONFIG_COMPAT
 538	if (req->ctx->compat)
 539		return __io_compat_recvmsg_copy_hdr(req, iomsg);
 540#endif
 541
 542	return __io_recvmsg_copy_hdr(req, iomsg);
 
 543}
 544
 545int io_recvmsg_prep_async(struct io_kiocb *req)
 546{
 
 
 547	int ret;
 548
 
 549	if (!io_msg_alloc_async_prep(req))
 550		return -ENOMEM;
 551	ret = io_recvmsg_copy_hdr(req, req->async_data);
 
 552	if (!ret)
 553		req->flags |= REQ_F_NEED_CLEANUP;
 554	return ret;
 555}
 556
 557#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
 558
 559int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 560{
 561	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 562
 
 
 563	if (unlikely(sqe->file_index || sqe->addr2))
 564		return -EINVAL;
 565
 566	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 567	sr->len = READ_ONCE(sqe->len);
 568	sr->flags = READ_ONCE(sqe->ioprio);
 569	if (sr->flags & ~(RECVMSG_FLAGS))
 570		return -EINVAL;
 571	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 572	if (sr->msg_flags & MSG_DONTWAIT)
 573		req->flags |= REQ_F_NOWAIT;
 574	if (sr->msg_flags & MSG_ERRQUEUE)
 575		req->flags |= REQ_F_CLEAR_POLLIN;
 576	if (sr->flags & IORING_RECV_MULTISHOT) {
 577		if (!(req->flags & REQ_F_BUFFER_SELECT))
 578			return -EINVAL;
 579		if (sr->msg_flags & MSG_WAITALL)
 580			return -EINVAL;
 581		if (req->opcode == IORING_OP_RECV && sr->len)
 582			return -EINVAL;
 583		req->flags |= REQ_F_APOLL_MULTISHOT;
 584		/*
 585		 * Store the buffer group for this multishot receive separately,
 586		 * as if we end up doing an io-wq based issue that selects a
 587		 * buffer, it has to be committed immediately and that will
 588		 * clear ->buf_list. This means we lose the link to the buffer
 589		 * list, and the eventual buffer put on completion then cannot
 590		 * restore it.
 591		 */
 592		sr->buf_group = req->buf_index;
 593	}
 594
 595#ifdef CONFIG_COMPAT
 596	if (req->ctx->compat)
 597		sr->msg_flags |= MSG_CMSG_COMPAT;
 598#endif
 599	sr->done_io = 0;
 600	return 0;
 601}
 602
 603static inline void io_recv_prep_retry(struct io_kiocb *req)
 604{
 605	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 606
 
 607	sr->done_io = 0;
 608	sr->len = 0; /* get from the provided buffer */
 609	req->buf_index = sr->buf_group;
 610}
 611
 612/*
 613 * Finishes io_recv and io_recvmsg.
 614 *
 615 * Returns true if it is actually finished, or false if it should run
 616 * again (for multishot).
 617 */
 618static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 619				  unsigned int cflags, bool mshot_finished,
 620				  unsigned issue_flags)
 621{
 622	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
 623		io_req_set_res(req, *ret, cflags);
 624		*ret = IOU_OK;
 625		return true;
 626	}
 627
 628	if (!mshot_finished) {
 629		if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
 630			       req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
 631			io_recv_prep_retry(req);
 632			return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633		}
 634		/* Otherwise stop multishot but use the current result. */
 
 
 
 
 635	}
 636
 
 637	io_req_set_res(req, *ret, cflags);
 638
 639	if (issue_flags & IO_URING_F_MULTISHOT)
 640		*ret = IOU_STOP_MULTISHOT;
 641	else
 642		*ret = IOU_OK;
 643	return true;
 644}
 645
 646static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
 647				     struct io_sr_msg *sr, void __user **buf,
 648				     size_t *len)
 649{
 650	unsigned long ubuf = (unsigned long) *buf;
 651	unsigned long hdr;
 652
 653	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 654		kmsg->controllen;
 655	if (*len < hdr)
 656		return -EFAULT;
 657
 658	if (kmsg->controllen) {
 659		unsigned long control = ubuf + hdr - kmsg->controllen;
 660
 661		kmsg->msg.msg_control_user = (void __user *) control;
 662		kmsg->msg.msg_controllen = kmsg->controllen;
 663	}
 664
 665	sr->buf = *buf; /* stash for later copy */
 666	*buf = (void __user *) (ubuf + hdr);
 667	kmsg->payloadlen = *len = *len - hdr;
 668	return 0;
 669}
 670
 671struct io_recvmsg_multishot_hdr {
 672	struct io_uring_recvmsg_out msg;
 673	struct sockaddr_storage addr;
 674};
 675
 676static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
 677				struct io_async_msghdr *kmsg,
 678				unsigned int flags, bool *finished)
 679{
 680	int err;
 681	int copy_len;
 682	struct io_recvmsg_multishot_hdr hdr;
 683
 684	if (kmsg->namelen)
 685		kmsg->msg.msg_name = &hdr.addr;
 686	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
 687	kmsg->msg.msg_namelen = 0;
 688
 689	if (sock->file->f_flags & O_NONBLOCK)
 690		flags |= MSG_DONTWAIT;
 691
 692	err = sock_recvmsg(sock, &kmsg->msg, flags);
 693	*finished = err <= 0;
 694	if (err < 0)
 695		return err;
 696
 697	hdr.msg = (struct io_uring_recvmsg_out) {
 698		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
 699		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
 700	};
 701
 702	hdr.msg.payloadlen = err;
 703	if (err > kmsg->payloadlen)
 704		err = kmsg->payloadlen;
 705
 706	copy_len = sizeof(struct io_uring_recvmsg_out);
 707	if (kmsg->msg.msg_namelen > kmsg->namelen)
 708		copy_len += kmsg->namelen;
 709	else
 710		copy_len += kmsg->msg.msg_namelen;
 711
 712	/*
 713	 *      "fromlen shall refer to the value before truncation.."
 714	 *                      1003.1g
 715	 */
 716	hdr.msg.namelen = kmsg->msg.msg_namelen;
 717
 718	/* ensure that there is no gap between hdr and sockaddr_storage */
 719	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
 720		     sizeof(struct io_uring_recvmsg_out));
 721	if (copy_to_user(io->buf, &hdr, copy_len)) {
 722		*finished = true;
 723		return -EFAULT;
 724	}
 725
 726	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 727			kmsg->controllen + err;
 728}
 729
 730int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 731{
 732	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 733	struct io_async_msghdr iomsg, *kmsg;
 734	struct socket *sock;
 735	unsigned int cflags;
 736	unsigned flags;
 737	int ret, min_ret = 0;
 738	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 739	bool mshot_finished = true;
 740
 741	sock = sock_from_file(req->file);
 742	if (unlikely(!sock))
 743		return -ENOTSOCK;
 744
 745	if (req_has_async_data(req)) {
 746		kmsg = req->async_data;
 747	} else {
 748		ret = io_recvmsg_copy_hdr(req, &iomsg);
 749		if (ret)
 750			return ret;
 751		kmsg = &iomsg;
 752	}
 753
 754	if (!(req->flags & REQ_F_POLLED) &&
 755	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 756		return io_setup_async_msg(req, kmsg, issue_flags);
 757
 758	if (!io_check_multishot(req, issue_flags))
 759		return io_setup_async_msg(req, kmsg, issue_flags);
 
 760
 761retry_multishot:
 762	if (io_do_buffer_select(req)) {
 763		void __user *buf;
 764		size_t len = sr->len;
 765
 766		buf = io_buffer_select(req, &len, issue_flags);
 767		if (!buf)
 768			return -ENOBUFS;
 769
 770		if (req->flags & REQ_F_APOLL_MULTISHOT) {
 771			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
 772			if (ret) {
 773				io_kbuf_recycle(req, issue_flags);
 774				return ret;
 775			}
 776		}
 777
 778		kmsg->fast_iov[0].iov_base = buf;
 779		kmsg->fast_iov[0].iov_len = len;
 780		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
 781				len);
 782	}
 783
 784	flags = sr->msg_flags;
 785	if (force_nonblock)
 786		flags |= MSG_DONTWAIT;
 787	if (flags & MSG_WAITALL)
 788		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 789
 790	kmsg->msg.msg_get_inq = 1;
 791	if (req->flags & REQ_F_APOLL_MULTISHOT)
 
 792		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
 793					   &mshot_finished);
 794	else
 
 
 
 
 795		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
 796					 kmsg->uaddr, flags);
 
 797
 798	if (ret < min_ret) {
 799		if (ret == -EAGAIN && force_nonblock) {
 800			ret = io_setup_async_msg(req, kmsg, issue_flags);
 801			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
 802				io_kbuf_recycle(req, issue_flags);
 803				return IOU_ISSUE_SKIP_COMPLETE;
 804			}
 805			return ret;
 806		}
 807		if (ret > 0 && io_net_retry(sock, flags)) {
 808			sr->done_io += ret;
 809			req->flags |= REQ_F_PARTIAL_IO;
 810			return io_setup_async_msg(req, kmsg, issue_flags);
 811		}
 812		if (ret == -ERESTARTSYS)
 813			ret = -EINTR;
 814		req_set_fail(req);
 815	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 816		req_set_fail(req);
 817	}
 818
 819	if (ret > 0)
 820		ret += sr->done_io;
 821	else if (sr->done_io)
 822		ret = sr->done_io;
 823	else
 824		io_kbuf_recycle(req, issue_flags);
 825
 826	cflags = io_put_kbuf(req, issue_flags);
 827	if (kmsg->msg.msg_inq)
 828		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 829
 830	if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
 831		goto retry_multishot;
 832
 833	if (mshot_finished) {
 834		/* fast path, check for non-NULL to avoid function call */
 835		if (kmsg->free_iov)
 836			kfree(kmsg->free_iov);
 837		io_netmsg_recycle(req, issue_flags);
 838		req->flags &= ~REQ_F_NEED_CLEANUP;
 839	}
 840
 841	return ret;
 842}
 843
 844int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 845{
 846	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 847	struct msghdr msg;
 848	struct socket *sock;
 849	struct iovec iov;
 850	unsigned int cflags;
 851	unsigned flags;
 852	int ret, min_ret = 0;
 853	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 854	size_t len = sr->len;
 855
 856	if (!(req->flags & REQ_F_POLLED) &&
 857	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 858		return -EAGAIN;
 859
 860	if (!io_check_multishot(req, issue_flags))
 861		return -EAGAIN;
 862
 863	sock = sock_from_file(req->file);
 864	if (unlikely(!sock))
 865		return -ENOTSOCK;
 866
 
 
 
 
 
 
 
 
 
 
 
 
 867retry_multishot:
 868	if (io_do_buffer_select(req)) {
 869		void __user *buf;
 870
 871		buf = io_buffer_select(req, &len, issue_flags);
 872		if (!buf)
 873			return -ENOBUFS;
 874		sr->buf = buf;
 
 875	}
 876
 877	ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
 878	if (unlikely(ret))
 879		goto out_free;
 880
 881	msg.msg_name = NULL;
 882	msg.msg_namelen = 0;
 883	msg.msg_control = NULL;
 884	msg.msg_get_inq = 1;
 885	msg.msg_flags = 0;
 886	msg.msg_controllen = 0;
 887	msg.msg_iocb = NULL;
 888	msg.msg_ubuf = NULL;
 889
 890	flags = sr->msg_flags;
 891	if (force_nonblock)
 892		flags |= MSG_DONTWAIT;
 893	if (flags & MSG_WAITALL)
 894		min_ret = iov_iter_count(&msg.msg_iter);
 895
 896	ret = sock_recvmsg(sock, &msg, flags);
 897	if (ret < min_ret) {
 898		if (ret == -EAGAIN && force_nonblock) {
 899			if (issue_flags & IO_URING_F_MULTISHOT) {
 900				io_kbuf_recycle(req, issue_flags);
 901				return IOU_ISSUE_SKIP_COMPLETE;
 902			}
 903
 904			return -EAGAIN;
 905		}
 906		if (ret > 0 && io_net_retry(sock, flags)) {
 907			sr->len -= ret;
 908			sr->buf += ret;
 909			sr->done_io += ret;
 910			req->flags |= REQ_F_PARTIAL_IO;
 911			return -EAGAIN;
 912		}
 913		if (ret == -ERESTARTSYS)
 914			ret = -EINTR;
 915		req_set_fail(req);
 916	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 917out_free:
 918		req_set_fail(req);
 919	}
 920
 921	if (ret > 0)
 922		ret += sr->done_io;
 923	else if (sr->done_io)
 924		ret = sr->done_io;
 925	else
 926		io_kbuf_recycle(req, issue_flags);
 927
 928	cflags = io_put_kbuf(req, issue_flags);
 929	if (msg.msg_inq)
 930		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 931
 932	if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
 933		goto retry_multishot;
 934
 935	return ret;
 936}
 937
 938void io_send_zc_cleanup(struct io_kiocb *req)
 939{
 940	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
 941	struct io_async_msghdr *io;
 942
 943	if (req_has_async_data(req)) {
 944		io = req->async_data;
 945		/* might be ->fast_iov if *msg_copy_hdr failed */
 946		if (io->free_iov != io->fast_iov)
 947			kfree(io->free_iov);
 948	}
 949	if (zc->notif) {
 950		io_notif_flush(zc->notif);
 951		zc->notif = NULL;
 952	}
 953}
 954
 955#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
 956#define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
 957
 958int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 959{
 960	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
 961	struct io_ring_ctx *ctx = req->ctx;
 962	struct io_kiocb *notif;
 963
 
 
 
 964	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
 965		return -EINVAL;
 966	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
 967	if (req->flags & REQ_F_CQE_SKIP)
 968		return -EINVAL;
 969
 970	notif = zc->notif = io_alloc_notif(ctx);
 971	if (!notif)
 972		return -ENOMEM;
 973	notif->cqe.user_data = req->cqe.user_data;
 974	notif->cqe.res = 0;
 975	notif->cqe.flags = IORING_CQE_F_NOTIF;
 976	req->flags |= REQ_F_NEED_CLEANUP;
 977
 978	zc->flags = READ_ONCE(sqe->ioprio);
 979	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
 980		if (zc->flags & ~IO_ZC_FLAGS_VALID)
 981			return -EINVAL;
 982		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
 983			io_notif_set_extended(notif);
 984			io_notif_to_data(notif)->zc_report = true;
 985		}
 986	}
 987
 988	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
 989		unsigned idx = READ_ONCE(sqe->buf_index);
 990
 991		if (unlikely(idx >= ctx->nr_user_bufs))
 992			return -EFAULT;
 993		idx = array_index_nospec(idx, ctx->nr_user_bufs);
 994		req->imu = READ_ONCE(ctx->user_bufs[idx]);
 995		io_req_set_rsrc_node(notif, ctx, 0);
 996	}
 997
 998	if (req->opcode == IORING_OP_SEND_ZC) {
 999		if (READ_ONCE(sqe->__pad3[0]))
1000			return -EINVAL;
1001		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1002		zc->addr_len = READ_ONCE(sqe->addr_len);
1003	} else {
1004		if (unlikely(sqe->addr2 || sqe->file_index))
1005			return -EINVAL;
1006		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1007			return -EINVAL;
1008	}
1009
1010	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1011	zc->len = READ_ONCE(sqe->len);
1012	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1013	if (zc->msg_flags & MSG_DONTWAIT)
1014		req->flags |= REQ_F_NOWAIT;
1015
1016	zc->done_io = 0;
1017
1018#ifdef CONFIG_COMPAT
1019	if (req->ctx->compat)
1020		zc->msg_flags |= MSG_CMSG_COMPAT;
1021#endif
1022	return 0;
1023}
1024
1025static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1026				 struct iov_iter *from, size_t length)
1027{
1028	skb_zcopy_downgrade_managed(skb);
1029	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1030}
1031
1032static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1033			   struct iov_iter *from, size_t length)
1034{
1035	struct skb_shared_info *shinfo = skb_shinfo(skb);
1036	int frag = shinfo->nr_frags;
1037	int ret = 0;
1038	struct bvec_iter bi;
1039	ssize_t copied = 0;
1040	unsigned long truesize = 0;
1041
1042	if (!frag)
1043		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1044	else if (unlikely(!skb_zcopy_managed(skb)))
1045		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1046
1047	bi.bi_size = min(from->count, length);
1048	bi.bi_bvec_done = from->iov_offset;
1049	bi.bi_idx = 0;
1050
1051	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1052		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1053
1054		copied += v.bv_len;
1055		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1056		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1057					   v.bv_offset, v.bv_len);
1058		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1059	}
1060	if (bi.bi_size)
1061		ret = -EMSGSIZE;
1062
1063	shinfo->nr_frags = frag;
1064	from->bvec += bi.bi_idx;
1065	from->nr_segs -= bi.bi_idx;
1066	from->count -= copied;
1067	from->iov_offset = bi.bi_bvec_done;
1068
1069	skb->data_len += copied;
1070	skb->len += copied;
1071	skb->truesize += truesize;
1072
1073	if (sk && sk->sk_type == SOCK_STREAM) {
1074		sk_wmem_queued_add(sk, truesize);
1075		if (!skb_zcopy_pure(skb))
1076			sk_mem_charge(sk, truesize);
1077	} else {
1078		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1079	}
1080	return ret;
1081}
1082
1083int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1084{
1085	struct sockaddr_storage __address;
1086	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1087	struct msghdr msg;
1088	struct iovec iov;
1089	struct socket *sock;
1090	unsigned msg_flags;
1091	int ret, min_ret = 0;
1092
1093	sock = sock_from_file(req->file);
1094	if (unlikely(!sock))
1095		return -ENOTSOCK;
1096	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1097		return -EOPNOTSUPP;
1098
1099	msg.msg_name = NULL;
1100	msg.msg_control = NULL;
1101	msg.msg_controllen = 0;
1102	msg.msg_namelen = 0;
1103
1104	if (zc->addr) {
1105		if (req_has_async_data(req)) {
1106			struct io_async_msghdr *io = req->async_data;
1107
1108			msg.msg_name = &io->addr;
1109		} else {
1110			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1111			if (unlikely(ret < 0))
1112				return ret;
1113			msg.msg_name = (struct sockaddr *)&__address;
1114		}
1115		msg.msg_namelen = zc->addr_len;
1116	}
1117
1118	if (!(req->flags & REQ_F_POLLED) &&
1119	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1120		return io_setup_async_addr(req, &__address, issue_flags);
1121
1122	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1123		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1124					(u64)(uintptr_t)zc->buf, zc->len);
1125		if (unlikely(ret))
1126			return ret;
1127		msg.sg_from_iter = io_sg_from_iter;
1128	} else {
1129		io_notif_set_extended(zc->notif);
1130		ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1131					  &msg.msg_iter);
1132		if (unlikely(ret))
1133			return ret;
1134		ret = io_notif_account_mem(zc->notif, zc->len);
1135		if (unlikely(ret))
1136			return ret;
1137		msg.sg_from_iter = io_sg_from_iter_iovec;
1138	}
1139
1140	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1141	if (issue_flags & IO_URING_F_NONBLOCK)
1142		msg_flags |= MSG_DONTWAIT;
1143	if (msg_flags & MSG_WAITALL)
1144		min_ret = iov_iter_count(&msg.msg_iter);
 
1145
1146	msg.msg_flags = msg_flags;
1147	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1148	ret = sock_sendmsg(sock, &msg);
1149
1150	if (unlikely(ret < min_ret)) {
1151		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1152			return io_setup_async_addr(req, &__address, issue_flags);
1153
1154		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1155			zc->len -= ret;
1156			zc->buf += ret;
1157			zc->done_io += ret;
1158			req->flags |= REQ_F_PARTIAL_IO;
1159			return io_setup_async_addr(req, &__address, issue_flags);
1160		}
1161		if (ret == -ERESTARTSYS)
1162			ret = -EINTR;
1163		req_set_fail(req);
1164	}
1165
1166	if (ret >= 0)
1167		ret += zc->done_io;
1168	else if (zc->done_io)
1169		ret = zc->done_io;
1170
1171	/*
1172	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1173	 * flushing notif to io_send_zc_cleanup()
1174	 */
1175	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1176		io_notif_flush(zc->notif);
1177		req->flags &= ~REQ_F_NEED_CLEANUP;
1178	}
1179	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1180	return IOU_OK;
1181}
1182
1183int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1184{
1185	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1186	struct io_async_msghdr iomsg, *kmsg;
1187	struct socket *sock;
1188	unsigned flags;
1189	int ret, min_ret = 0;
1190
1191	io_notif_set_extended(sr->notif);
1192
1193	sock = sock_from_file(req->file);
1194	if (unlikely(!sock))
1195		return -ENOTSOCK;
1196	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1197		return -EOPNOTSUPP;
1198
1199	if (req_has_async_data(req)) {
1200		kmsg = req->async_data;
 
1201	} else {
1202		ret = io_sendmsg_copy_hdr(req, &iomsg);
1203		if (ret)
1204			return ret;
1205		kmsg = &iomsg;
1206	}
1207
1208	if (!(req->flags & REQ_F_POLLED) &&
1209	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1210		return io_setup_async_msg(req, kmsg, issue_flags);
1211
1212	flags = sr->msg_flags | MSG_ZEROCOPY;
1213	if (issue_flags & IO_URING_F_NONBLOCK)
1214		flags |= MSG_DONTWAIT;
1215	if (flags & MSG_WAITALL)
1216		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1217
1218	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1219	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1220	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1221
1222	if (unlikely(ret < min_ret)) {
1223		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1224			return io_setup_async_msg(req, kmsg, issue_flags);
1225
1226		if (ret > 0 && io_net_retry(sock, flags)) {
1227			sr->done_io += ret;
1228			req->flags |= REQ_F_PARTIAL_IO;
1229			return io_setup_async_msg(req, kmsg, issue_flags);
1230		}
1231		if (ret == -ERESTARTSYS)
1232			ret = -EINTR;
1233		req_set_fail(req);
1234	}
1235	/* fast path, check for non-NULL to avoid function call */
1236	if (kmsg->free_iov) {
1237		kfree(kmsg->free_iov);
1238		kmsg->free_iov = NULL;
1239	}
1240
1241	io_netmsg_recycle(req, issue_flags);
1242	if (ret >= 0)
1243		ret += sr->done_io;
1244	else if (sr->done_io)
1245		ret = sr->done_io;
1246
1247	/*
1248	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1249	 * flushing notif to io_send_zc_cleanup()
1250	 */
1251	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1252		io_notif_flush(sr->notif);
1253		req->flags &= ~REQ_F_NEED_CLEANUP;
1254	}
1255	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1256	return IOU_OK;
1257}
1258
1259void io_sendrecv_fail(struct io_kiocb *req)
1260{
1261	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1262
1263	if (req->flags & REQ_F_PARTIAL_IO)
1264		req->cqe.res = sr->done_io;
1265
1266	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1267	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1268		req->cqe.flags |= IORING_CQE_F_MORE;
1269}
1270
1271int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1272{
1273	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1274	unsigned flags;
1275
1276	if (sqe->len || sqe->buf_index)
1277		return -EINVAL;
1278
1279	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1280	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1281	accept->flags = READ_ONCE(sqe->accept_flags);
1282	accept->nofile = rlimit(RLIMIT_NOFILE);
1283	flags = READ_ONCE(sqe->ioprio);
1284	if (flags & ~IORING_ACCEPT_MULTISHOT)
1285		return -EINVAL;
1286
1287	accept->file_slot = READ_ONCE(sqe->file_index);
1288	if (accept->file_slot) {
1289		if (accept->flags & SOCK_CLOEXEC)
1290			return -EINVAL;
1291		if (flags & IORING_ACCEPT_MULTISHOT &&
1292		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1293			return -EINVAL;
1294	}
1295	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1296		return -EINVAL;
1297	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1298		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1299	if (flags & IORING_ACCEPT_MULTISHOT)
1300		req->flags |= REQ_F_APOLL_MULTISHOT;
1301	return 0;
1302}
1303
1304int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1305{
1306	struct io_ring_ctx *ctx = req->ctx;
1307	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1308	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1309	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1310	bool fixed = !!accept->file_slot;
1311	struct file *file;
1312	int ret, fd;
1313
1314	if (!io_check_multishot(req, issue_flags))
1315		return -EAGAIN;
1316retry:
1317	if (!fixed) {
1318		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1319		if (unlikely(fd < 0))
1320			return fd;
1321	}
1322	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1323			 accept->flags);
1324	if (IS_ERR(file)) {
1325		if (!fixed)
1326			put_unused_fd(fd);
1327		ret = PTR_ERR(file);
1328		if (ret == -EAGAIN && force_nonblock) {
1329			/*
1330			 * if it's multishot and polled, we don't need to
1331			 * return EAGAIN to arm the poll infra since it
1332			 * has already been done
1333			 */
1334			if (issue_flags & IO_URING_F_MULTISHOT)
1335				ret = IOU_ISSUE_SKIP_COMPLETE;
1336			return ret;
1337		}
1338		if (ret == -ERESTARTSYS)
1339			ret = -EINTR;
1340		req_set_fail(req);
1341	} else if (!fixed) {
1342		fd_install(fd, file);
1343		ret = fd;
1344	} else {
1345		ret = io_fixed_fd_install(req, issue_flags, file,
1346						accept->file_slot);
1347	}
1348
1349	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1350		io_req_set_res(req, ret, 0);
1351		return IOU_OK;
1352	}
1353
1354	if (ret < 0)
1355		return ret;
1356	if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1357		       req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1358		goto retry;
1359
1360	return -ECANCELED;
 
1361}
1362
1363int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1364{
1365	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1366
1367	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1368		return -EINVAL;
1369
1370	sock->domain = READ_ONCE(sqe->fd);
1371	sock->type = READ_ONCE(sqe->off);
1372	sock->protocol = READ_ONCE(sqe->len);
1373	sock->file_slot = READ_ONCE(sqe->file_index);
1374	sock->nofile = rlimit(RLIMIT_NOFILE);
1375
1376	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1377	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1378		return -EINVAL;
1379	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1380		return -EINVAL;
1381	return 0;
1382}
1383
1384int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1385{
1386	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1387	bool fixed = !!sock->file_slot;
1388	struct file *file;
1389	int ret, fd;
1390
1391	if (!fixed) {
1392		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1393		if (unlikely(fd < 0))
1394			return fd;
1395	}
1396	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1397	if (IS_ERR(file)) {
1398		if (!fixed)
1399			put_unused_fd(fd);
1400		ret = PTR_ERR(file);
1401		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1402			return -EAGAIN;
1403		if (ret == -ERESTARTSYS)
1404			ret = -EINTR;
1405		req_set_fail(req);
1406	} else if (!fixed) {
1407		fd_install(fd, file);
1408		ret = fd;
1409	} else {
1410		ret = io_fixed_fd_install(req, issue_flags, file,
1411					    sock->file_slot);
1412	}
1413	io_req_set_res(req, ret, 0);
1414	return IOU_OK;
1415}
1416
1417int io_connect_prep_async(struct io_kiocb *req)
1418{
1419	struct io_async_connect *io = req->async_data;
1420	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1421
1422	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1423}
1424
1425int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1426{
1427	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1428
1429	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1430		return -EINVAL;
1431
1432	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1433	conn->addr_len =  READ_ONCE(sqe->addr2);
1434	conn->in_progress = false;
1435	return 0;
1436}
1437
1438int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1439{
1440	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1441	struct io_async_connect __io, *io;
1442	unsigned file_flags;
1443	int ret;
1444	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1445
1446	if (connect->in_progress) {
1447		struct socket *socket;
1448
1449		ret = -ENOTSOCK;
1450		socket = sock_from_file(req->file);
1451		if (socket)
1452			ret = sock_error(socket->sk);
1453		goto out;
1454	}
1455
1456	if (req_has_async_data(req)) {
1457		io = req->async_data;
1458	} else {
1459		ret = move_addr_to_kernel(connect->addr,
1460						connect->addr_len,
1461						&__io.address);
1462		if (ret)
1463			goto out;
1464		io = &__io;
1465	}
1466
1467	file_flags = force_nonblock ? O_NONBLOCK : 0;
1468
1469	ret = __sys_connect_file(req->file, &io->address,
1470					connect->addr_len, file_flags);
1471	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
 
1472		if (ret == -EINPROGRESS) {
1473			connect->in_progress = true;
1474		} else {
1475			if (req_has_async_data(req))
1476				return -EAGAIN;
1477			if (io_alloc_async_data(req)) {
1478				ret = -ENOMEM;
1479				goto out;
1480			}
1481			memcpy(req->async_data, &__io, sizeof(__io));
 
 
 
 
 
1482		}
 
1483		return -EAGAIN;
 
 
 
 
 
 
 
 
 
 
1484	}
1485	if (ret == -ERESTARTSYS)
1486		ret = -EINTR;
1487out:
1488	if (ret < 0)
1489		req_set_fail(req);
1490	io_req_set_res(req, ret, 0);
1491	return IOU_OK;
1492}
1493
1494void io_netmsg_cache_free(struct io_cache_entry *entry)
1495{
1496	kfree(container_of(entry, struct io_async_msghdr, cache));
1497}
1498#endif
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/file.h>
   5#include <linux/slab.h>
   6#include <linux/net.h>
   7#include <linux/compat.h>
   8#include <net/compat.h>
   9#include <linux/io_uring.h>
  10
  11#include <uapi/linux/io_uring.h>
  12
  13#include "io_uring.h"
  14#include "kbuf.h"
  15#include "alloc_cache.h"
  16#include "net.h"
  17#include "notif.h"
  18#include "rsrc.h"
  19
  20#if defined(CONFIG_NET)
  21struct io_shutdown {
  22	struct file			*file;
  23	int				how;
  24};
  25
  26struct io_accept {
  27	struct file			*file;
  28	struct sockaddr __user		*addr;
  29	int __user			*addr_len;
  30	int				flags;
  31	u32				file_slot;
  32	unsigned long			nofile;
  33};
  34
  35struct io_socket {
  36	struct file			*file;
  37	int				domain;
  38	int				type;
  39	int				protocol;
  40	int				flags;
  41	u32				file_slot;
  42	unsigned long			nofile;
  43};
  44
  45struct io_connect {
  46	struct file			*file;
  47	struct sockaddr __user		*addr;
  48	int				addr_len;
  49	bool				in_progress;
  50	bool				seen_econnaborted;
  51};
  52
  53struct io_sr_msg {
  54	struct file			*file;
  55	union {
  56		struct compat_msghdr __user	*umsg_compat;
  57		struct user_msghdr __user	*umsg;
  58		void __user			*buf;
  59	};
  60	unsigned			len;
  61	unsigned			done_io;
  62	unsigned			msg_flags;
  63	unsigned			nr_multishot_loops;
  64	u16				flags;
  65	/* initialised and used only by !msg send variants */
  66	u16				addr_len;
  67	u16				buf_group;
  68	void __user			*addr;
  69	void __user			*msg_control;
  70	/* used only for send zerocopy */
  71	struct io_kiocb 		*notif;
  72};
  73
  74/*
  75 * Number of times we'll try and do receives if there's more data. If we
  76 * exceed this limit, then add us to the back of the queue and retry from
  77 * there. This helps fairness between flooding clients.
  78 */
  79#define MULTISHOT_MAX_RETRY	32
 
 
 
 
 
 
  80
  81int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  82{
  83	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
  84
  85	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
  86		     sqe->buf_index || sqe->splice_fd_in))
  87		return -EINVAL;
  88
  89	shutdown->how = READ_ONCE(sqe->len);
  90	req->flags |= REQ_F_FORCE_ASYNC;
  91	return 0;
  92}
  93
  94int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
  95{
  96	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
  97	struct socket *sock;
  98	int ret;
  99
 100	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
 
 101
 102	sock = sock_from_file(req->file);
 103	if (unlikely(!sock))
 104		return -ENOTSOCK;
 105
 106	ret = __sys_shutdown_sock(sock, shutdown->how);
 107	io_req_set_res(req, ret, 0);
 108	return IOU_OK;
 109}
 110
 111static bool io_net_retry(struct socket *sock, int flags)
 112{
 113	if (!(flags & MSG_WAITALL))
 114		return false;
 115	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
 116}
 117
 118static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
 119{
 120	struct io_async_msghdr *hdr = req->async_data;
 121
 122	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
 123		return;
 124
 125	/* Let normal cleanup path reap it if we fail adding to the cache */
 126	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
 127		req->async_data = NULL;
 128		req->flags &= ~REQ_F_ASYNC_DATA;
 129	}
 130}
 131
 132static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
 133						  unsigned int issue_flags)
 134{
 135	struct io_ring_ctx *ctx = req->ctx;
 136	struct io_cache_entry *entry;
 137	struct io_async_msghdr *hdr;
 138
 139	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
 140		entry = io_alloc_cache_get(&ctx->netmsg_cache);
 141		if (entry) {
 142			hdr = container_of(entry, struct io_async_msghdr, cache);
 143			hdr->free_iov = NULL;
 144			req->flags |= REQ_F_ASYNC_DATA;
 145			req->async_data = hdr;
 146			return hdr;
 147		}
 148	}
 149
 150	if (!io_alloc_async_data(req)) {
 151		hdr = req->async_data;
 152		hdr->free_iov = NULL;
 153		return hdr;
 154	}
 155	return NULL;
 156}
 157
 158static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
 159{
 160	/* ->prep_async is always called from the submission context */
 161	return io_msg_alloc_async(req, 0);
 162}
 163
 164static int io_setup_async_msg(struct io_kiocb *req,
 165			      struct io_async_msghdr *kmsg,
 166			      unsigned int issue_flags)
 167{
 168	struct io_async_msghdr *async_msg;
 169
 170	if (req_has_async_data(req))
 171		return -EAGAIN;
 172	async_msg = io_msg_alloc_async(req, issue_flags);
 173	if (!async_msg) {
 174		kfree(kmsg->free_iov);
 175		return -ENOMEM;
 176	}
 177	req->flags |= REQ_F_NEED_CLEANUP;
 178	memcpy(async_msg, kmsg, sizeof(*kmsg));
 179	if (async_msg->msg.msg_name)
 180		async_msg->msg.msg_name = &async_msg->addr;
 181
 182	if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
 183		return -EAGAIN;
 184
 185	/* if were using fast_iov, set it to the new one */
 186	if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
 187		size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
 188		async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
 189	}
 190
 191	return -EAGAIN;
 192}
 193
 194#ifdef CONFIG_COMPAT
 195static int io_compat_msg_copy_hdr(struct io_kiocb *req,
 196				  struct io_async_msghdr *iomsg,
 197				  struct compat_msghdr *msg, int ddir)
 198{
 199	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 200	struct compat_iovec __user *uiov;
 201	int ret;
 202
 203	if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
 204		return -EFAULT;
 205
 206	uiov = compat_ptr(msg->msg_iov);
 207	if (req->flags & REQ_F_BUFFER_SELECT) {
 208		compat_ssize_t clen;
 209
 210		iomsg->free_iov = NULL;
 211		if (msg->msg_iovlen == 0) {
 212			sr->len = 0;
 213		} else if (msg->msg_iovlen > 1) {
 214			return -EINVAL;
 215		} else {
 216			if (!access_ok(uiov, sizeof(*uiov)))
 217				return -EFAULT;
 218			if (__get_user(clen, &uiov->iov_len))
 219				return -EFAULT;
 220			if (clen < 0)
 221				return -EINVAL;
 222			sr->len = clen;
 223		}
 224
 225		return 0;
 226	}
 227
 228	iomsg->free_iov = iomsg->fast_iov;
 229	ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
 230				UIO_FASTIOV, &iomsg->free_iov,
 231				&iomsg->msg.msg_iter, true);
 232	if (unlikely(ret < 0))
 233		return ret;
 234
 235	return 0;
 236}
 237#endif
 238
 239static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
 240			   struct user_msghdr *msg, int ddir)
 241{
 242	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 243	int ret;
 244
 245	if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
 246		return -EFAULT;
 247
 248	ret = -EFAULT;
 249	unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
 250	unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
 251	unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
 252	unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
 253	unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
 254	unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
 255	msg->msg_flags = 0;
 256
 257	if (req->flags & REQ_F_BUFFER_SELECT) {
 258		if (msg->msg_iovlen == 0) {
 259			sr->len = iomsg->fast_iov[0].iov_len = 0;
 260			iomsg->fast_iov[0].iov_base = NULL;
 261			iomsg->free_iov = NULL;
 262		} else if (msg->msg_iovlen > 1) {
 263			ret = -EINVAL;
 264			goto ua_end;
 265		} else {
 266			/* we only need the length for provided buffers */
 267			if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
 268				goto ua_end;
 269			unsafe_get_user(iomsg->fast_iov[0].iov_len,
 270					&msg->msg_iov[0].iov_len, ua_end);
 271			sr->len = iomsg->fast_iov[0].iov_len;
 272			iomsg->free_iov = NULL;
 273		}
 274		ret = 0;
 275ua_end:
 276		user_access_end();
 277		return ret;
 278	}
 279
 280	user_access_end();
 281	iomsg->free_iov = iomsg->fast_iov;
 282	ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
 283				&iomsg->free_iov, &iomsg->msg.msg_iter, false);
 284	if (unlikely(ret < 0))
 285		return ret;
 286
 287	return 0;
 288}
 289
 290static int io_sendmsg_copy_hdr(struct io_kiocb *req,
 291			       struct io_async_msghdr *iomsg)
 292{
 293	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 294	struct user_msghdr msg;
 295	int ret;
 296
 297	iomsg->msg.msg_name = &iomsg->addr;
 298	iomsg->msg.msg_iter.nr_segs = 0;
 299
 300#ifdef CONFIG_COMPAT
 301	if (unlikely(req->ctx->compat)) {
 302		struct compat_msghdr cmsg;
 303
 304		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
 305		if (unlikely(ret))
 306			return ret;
 307
 308		return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
 309	}
 310#endif
 311
 312	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
 313	if (unlikely(ret))
 314		return ret;
 315
 316	ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
 317
 318	/* save msg_control as sys_sendmsg() overwrites it */
 319	sr->msg_control = iomsg->msg.msg_control_user;
 320	return ret;
 321}
 322
 323int io_send_prep_async(struct io_kiocb *req)
 324{
 325	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
 326	struct io_async_msghdr *io;
 327	int ret;
 328
 329	if (req_has_async_data(req))
 330		return 0;
 331	zc->done_io = 0;
 332	if (!zc->addr)
 333		return 0;
 334	io = io_msg_alloc_async_prep(req);
 335	if (!io)
 336		return -ENOMEM;
 337	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
 338	return ret;
 339}
 340
 341static int io_setup_async_addr(struct io_kiocb *req,
 342			      struct sockaddr_storage *addr_storage,
 343			      unsigned int issue_flags)
 344{
 345	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 346	struct io_async_msghdr *io;
 347
 348	if (!sr->addr || req_has_async_data(req))
 349		return -EAGAIN;
 350	io = io_msg_alloc_async(req, issue_flags);
 351	if (!io)
 352		return -ENOMEM;
 353	memcpy(&io->addr, addr_storage, sizeof(io->addr));
 354	return -EAGAIN;
 355}
 356
 357int io_sendmsg_prep_async(struct io_kiocb *req)
 358{
 359	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 360	int ret;
 361
 362	sr->done_io = 0;
 363	if (!io_msg_alloc_async_prep(req))
 364		return -ENOMEM;
 365	ret = io_sendmsg_copy_hdr(req, req->async_data);
 366	if (!ret)
 367		req->flags |= REQ_F_NEED_CLEANUP;
 368	return ret;
 369}
 370
 371void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
 372{
 373	struct io_async_msghdr *io = req->async_data;
 374
 375	kfree(io->free_iov);
 376}
 377
 378int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 379{
 380	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 381
 382	sr->done_io = 0;
 383
 384	if (req->opcode == IORING_OP_SEND) {
 385		if (READ_ONCE(sqe->__pad3[0]))
 386			return -EINVAL;
 387		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 388		sr->addr_len = READ_ONCE(sqe->addr_len);
 389	} else if (sqe->addr2 || sqe->file_index) {
 390		return -EINVAL;
 391	}
 392
 393	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 394	sr->len = READ_ONCE(sqe->len);
 395	sr->flags = READ_ONCE(sqe->ioprio);
 396	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
 397		return -EINVAL;
 398	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 399	if (sr->msg_flags & MSG_DONTWAIT)
 400		req->flags |= REQ_F_NOWAIT;
 401
 402#ifdef CONFIG_COMPAT
 403	if (req->ctx->compat)
 404		sr->msg_flags |= MSG_CMSG_COMPAT;
 405#endif
 
 406	return 0;
 407}
 408
 409static void io_req_msg_cleanup(struct io_kiocb *req,
 410			       struct io_async_msghdr *kmsg,
 411			       unsigned int issue_flags)
 412{
 413	req->flags &= ~REQ_F_NEED_CLEANUP;
 414	/* fast path, check for non-NULL to avoid function call */
 415	if (kmsg->free_iov)
 416		kfree(kmsg->free_iov);
 417	io_netmsg_recycle(req, issue_flags);
 418}
 419
 420int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 421{
 422	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 423	struct io_async_msghdr iomsg, *kmsg;
 424	struct socket *sock;
 425	unsigned flags;
 426	int min_ret = 0;
 427	int ret;
 428
 429	sock = sock_from_file(req->file);
 430	if (unlikely(!sock))
 431		return -ENOTSOCK;
 432
 433	if (req_has_async_data(req)) {
 434		kmsg = req->async_data;
 435		kmsg->msg.msg_control_user = sr->msg_control;
 436	} else {
 437		ret = io_sendmsg_copy_hdr(req, &iomsg);
 438		if (ret)
 439			return ret;
 440		kmsg = &iomsg;
 441	}
 442
 443	if (!(req->flags & REQ_F_POLLED) &&
 444	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 445		return io_setup_async_msg(req, kmsg, issue_flags);
 446
 447	flags = sr->msg_flags;
 448	if (issue_flags & IO_URING_F_NONBLOCK)
 449		flags |= MSG_DONTWAIT;
 450	if (flags & MSG_WAITALL)
 451		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 452
 453	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
 454
 455	if (ret < min_ret) {
 456		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 457			return io_setup_async_msg(req, kmsg, issue_flags);
 458		if (ret > 0 && io_net_retry(sock, flags)) {
 459			kmsg->msg.msg_controllen = 0;
 460			kmsg->msg.msg_control = NULL;
 461			sr->done_io += ret;
 462			req->flags |= REQ_F_BL_NO_RECYCLE;
 463			return io_setup_async_msg(req, kmsg, issue_flags);
 464		}
 465		if (ret == -ERESTARTSYS)
 466			ret = -EINTR;
 467		req_set_fail(req);
 468	}
 469	io_req_msg_cleanup(req, kmsg, issue_flags);
 
 
 
 
 470	if (ret >= 0)
 471		ret += sr->done_io;
 472	else if (sr->done_io)
 473		ret = sr->done_io;
 474	io_req_set_res(req, ret, 0);
 475	return IOU_OK;
 476}
 477
 478int io_send(struct io_kiocb *req, unsigned int issue_flags)
 479{
 480	struct sockaddr_storage __address;
 481	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 482	struct msghdr msg;
 
 483	struct socket *sock;
 484	unsigned flags;
 485	int min_ret = 0;
 486	int ret;
 487
 488	msg.msg_name = NULL;
 489	msg.msg_control = NULL;
 490	msg.msg_controllen = 0;
 491	msg.msg_namelen = 0;
 492	msg.msg_ubuf = NULL;
 493
 494	if (sr->addr) {
 495		if (req_has_async_data(req)) {
 496			struct io_async_msghdr *io = req->async_data;
 497
 498			msg.msg_name = &io->addr;
 499		} else {
 500			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
 501			if (unlikely(ret < 0))
 502				return ret;
 503			msg.msg_name = (struct sockaddr *)&__address;
 504		}
 505		msg.msg_namelen = sr->addr_len;
 506	}
 507
 508	if (!(req->flags & REQ_F_POLLED) &&
 509	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 510		return io_setup_async_addr(req, &__address, issue_flags);
 511
 512	sock = sock_from_file(req->file);
 513	if (unlikely(!sock))
 514		return -ENOTSOCK;
 515
 516	ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
 517	if (unlikely(ret))
 518		return ret;
 519
 520	flags = sr->msg_flags;
 521	if (issue_flags & IO_URING_F_NONBLOCK)
 522		flags |= MSG_DONTWAIT;
 523	if (flags & MSG_WAITALL)
 524		min_ret = iov_iter_count(&msg.msg_iter);
 525
 526	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
 527	msg.msg_flags = flags;
 528	ret = sock_sendmsg(sock, &msg);
 529	if (ret < min_ret) {
 530		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 531			return io_setup_async_addr(req, &__address, issue_flags);
 532
 533		if (ret > 0 && io_net_retry(sock, flags)) {
 534			sr->len -= ret;
 535			sr->buf += ret;
 536			sr->done_io += ret;
 537			req->flags |= REQ_F_BL_NO_RECYCLE;
 538			return io_setup_async_addr(req, &__address, issue_flags);
 539		}
 540		if (ret == -ERESTARTSYS)
 541			ret = -EINTR;
 542		req_set_fail(req);
 543	}
 544	if (ret >= 0)
 545		ret += sr->done_io;
 546	else if (sr->done_io)
 547		ret = sr->done_io;
 548	io_req_set_res(req, ret, 0);
 549	return IOU_OK;
 550}
 551
 552static int io_recvmsg_mshot_prep(struct io_kiocb *req,
 553				 struct io_async_msghdr *iomsg,
 554				 int namelen, size_t controllen)
 555{
 556	if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
 557			  (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
 558		int hdr;
 559
 560		if (unlikely(namelen < 0))
 561			return -EOVERFLOW;
 562		if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
 563					namelen, &hdr))
 564			return -EOVERFLOW;
 565		if (check_add_overflow(hdr, controllen, &hdr))
 566			return -EOVERFLOW;
 567
 568		iomsg->namelen = namelen;
 569		iomsg->controllen = controllen;
 570		return 0;
 571	}
 
 
 
 572
 573	return 0;
 574}
 575
 576static int io_recvmsg_copy_hdr(struct io_kiocb *req,
 577			       struct io_async_msghdr *iomsg)
 578{
 
 579	struct user_msghdr msg;
 580	int ret;
 581
 582	iomsg->msg.msg_name = &iomsg->addr;
 583	iomsg->msg.msg_iter.nr_segs = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584
 585#ifdef CONFIG_COMPAT
 586	if (unlikely(req->ctx->compat)) {
 587		struct compat_msghdr cmsg;
 
 
 
 
 
 588
 589		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
 590		if (unlikely(ret))
 591			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592
 593		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
 594		if (unlikely(ret))
 
 
 
 
 
 
 
 
 
 
 595			return ret;
 
 596
 597		return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
 598						cmsg.msg_controllen);
 599	}
 600#endif
 601
 602	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
 603	if (unlikely(ret))
 604		return ret;
 
 605
 606	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
 607	if (unlikely(ret))
 608		return ret;
 
 609
 610	return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
 611					msg.msg_controllen);
 612}
 613
 614int io_recvmsg_prep_async(struct io_kiocb *req)
 615{
 616	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 617	struct io_async_msghdr *iomsg;
 618	int ret;
 619
 620	sr->done_io = 0;
 621	if (!io_msg_alloc_async_prep(req))
 622		return -ENOMEM;
 623	iomsg = req->async_data;
 624	ret = io_recvmsg_copy_hdr(req, iomsg);
 625	if (!ret)
 626		req->flags |= REQ_F_NEED_CLEANUP;
 627	return ret;
 628}
 629
 630#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
 631
 632int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 633{
 634	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 635
 636	sr->done_io = 0;
 637
 638	if (unlikely(sqe->file_index || sqe->addr2))
 639		return -EINVAL;
 640
 641	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 642	sr->len = READ_ONCE(sqe->len);
 643	sr->flags = READ_ONCE(sqe->ioprio);
 644	if (sr->flags & ~(RECVMSG_FLAGS))
 645		return -EINVAL;
 646	sr->msg_flags = READ_ONCE(sqe->msg_flags);
 647	if (sr->msg_flags & MSG_DONTWAIT)
 648		req->flags |= REQ_F_NOWAIT;
 649	if (sr->msg_flags & MSG_ERRQUEUE)
 650		req->flags |= REQ_F_CLEAR_POLLIN;
 651	if (sr->flags & IORING_RECV_MULTISHOT) {
 652		if (!(req->flags & REQ_F_BUFFER_SELECT))
 653			return -EINVAL;
 654		if (sr->msg_flags & MSG_WAITALL)
 655			return -EINVAL;
 656		if (req->opcode == IORING_OP_RECV && sr->len)
 657			return -EINVAL;
 658		req->flags |= REQ_F_APOLL_MULTISHOT;
 659		/*
 660		 * Store the buffer group for this multishot receive separately,
 661		 * as if we end up doing an io-wq based issue that selects a
 662		 * buffer, it has to be committed immediately and that will
 663		 * clear ->buf_list. This means we lose the link to the buffer
 664		 * list, and the eventual buffer put on completion then cannot
 665		 * restore it.
 666		 */
 667		sr->buf_group = req->buf_index;
 668	}
 669
 670#ifdef CONFIG_COMPAT
 671	if (req->ctx->compat)
 672		sr->msg_flags |= MSG_CMSG_COMPAT;
 673#endif
 674	sr->nr_multishot_loops = 0;
 675	return 0;
 676}
 677
 678static inline void io_recv_prep_retry(struct io_kiocb *req)
 679{
 680	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 681
 682	req->flags &= ~REQ_F_BL_EMPTY;
 683	sr->done_io = 0;
 684	sr->len = 0; /* get from the provided buffer */
 685	req->buf_index = sr->buf_group;
 686}
 687
 688/*
 689 * Finishes io_recv and io_recvmsg.
 690 *
 691 * Returns true if it is actually finished, or false if it should run
 692 * again (for multishot).
 693 */
 694static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 695				  struct msghdr *msg, bool mshot_finished,
 696				  unsigned issue_flags)
 697{
 698	unsigned int cflags;
 
 
 
 
 699
 700	cflags = io_put_kbuf(req, issue_flags);
 701	if (msg->msg_inq > 0)
 702		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 703
 704	/*
 705	 * Fill CQE for this receive and see if we should keep trying to
 706	 * receive from this socket.
 707	 */
 708	if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
 709	    io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
 710				*ret, cflags | IORING_CQE_F_MORE)) {
 711		struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 712		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
 713
 714		io_recv_prep_retry(req);
 715		/* Known not-empty or unknown state, retry */
 716		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) {
 717			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
 718				return false;
 719			/* mshot retries exceeded, force a requeue */
 720			sr->nr_multishot_loops = 0;
 721			mshot_retry_ret = IOU_REQUEUE;
 722		}
 723		if (issue_flags & IO_URING_F_MULTISHOT)
 724			*ret = mshot_retry_ret;
 725		else
 726			*ret = -EAGAIN;
 727		return true;
 728	}
 729
 730	/* Finish the request / stop multishot. */
 731	io_req_set_res(req, *ret, cflags);
 732
 733	if (issue_flags & IO_URING_F_MULTISHOT)
 734		*ret = IOU_STOP_MULTISHOT;
 735	else
 736		*ret = IOU_OK;
 737	return true;
 738}
 739
 740static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
 741				     struct io_sr_msg *sr, void __user **buf,
 742				     size_t *len)
 743{
 744	unsigned long ubuf = (unsigned long) *buf;
 745	unsigned long hdr;
 746
 747	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 748		kmsg->controllen;
 749	if (*len < hdr)
 750		return -EFAULT;
 751
 752	if (kmsg->controllen) {
 753		unsigned long control = ubuf + hdr - kmsg->controllen;
 754
 755		kmsg->msg.msg_control_user = (void __user *) control;
 756		kmsg->msg.msg_controllen = kmsg->controllen;
 757	}
 758
 759	sr->buf = *buf; /* stash for later copy */
 760	*buf = (void __user *) (ubuf + hdr);
 761	kmsg->payloadlen = *len = *len - hdr;
 762	return 0;
 763}
 764
 765struct io_recvmsg_multishot_hdr {
 766	struct io_uring_recvmsg_out msg;
 767	struct sockaddr_storage addr;
 768};
 769
 770static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
 771				struct io_async_msghdr *kmsg,
 772				unsigned int flags, bool *finished)
 773{
 774	int err;
 775	int copy_len;
 776	struct io_recvmsg_multishot_hdr hdr;
 777
 778	if (kmsg->namelen)
 779		kmsg->msg.msg_name = &hdr.addr;
 780	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
 781	kmsg->msg.msg_namelen = 0;
 782
 783	if (sock->file->f_flags & O_NONBLOCK)
 784		flags |= MSG_DONTWAIT;
 785
 786	err = sock_recvmsg(sock, &kmsg->msg, flags);
 787	*finished = err <= 0;
 788	if (err < 0)
 789		return err;
 790
 791	hdr.msg = (struct io_uring_recvmsg_out) {
 792		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
 793		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
 794	};
 795
 796	hdr.msg.payloadlen = err;
 797	if (err > kmsg->payloadlen)
 798		err = kmsg->payloadlen;
 799
 800	copy_len = sizeof(struct io_uring_recvmsg_out);
 801	if (kmsg->msg.msg_namelen > kmsg->namelen)
 802		copy_len += kmsg->namelen;
 803	else
 804		copy_len += kmsg->msg.msg_namelen;
 805
 806	/*
 807	 *      "fromlen shall refer to the value before truncation.."
 808	 *                      1003.1g
 809	 */
 810	hdr.msg.namelen = kmsg->msg.msg_namelen;
 811
 812	/* ensure that there is no gap between hdr and sockaddr_storage */
 813	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
 814		     sizeof(struct io_uring_recvmsg_out));
 815	if (copy_to_user(io->buf, &hdr, copy_len)) {
 816		*finished = true;
 817		return -EFAULT;
 818	}
 819
 820	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
 821			kmsg->controllen + err;
 822}
 823
 824int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 825{
 826	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 827	struct io_async_msghdr iomsg, *kmsg;
 828	struct socket *sock;
 
 829	unsigned flags;
 830	int ret, min_ret = 0;
 831	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 832	bool mshot_finished = true;
 833
 834	sock = sock_from_file(req->file);
 835	if (unlikely(!sock))
 836		return -ENOTSOCK;
 837
 838	if (req_has_async_data(req)) {
 839		kmsg = req->async_data;
 840	} else {
 841		ret = io_recvmsg_copy_hdr(req, &iomsg);
 842		if (ret)
 843			return ret;
 844		kmsg = &iomsg;
 845	}
 846
 847	if (!(req->flags & REQ_F_POLLED) &&
 848	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 849		return io_setup_async_msg(req, kmsg, issue_flags);
 850
 851	flags = sr->msg_flags;
 852	if (force_nonblock)
 853		flags |= MSG_DONTWAIT;
 854
 855retry_multishot:
 856	if (io_do_buffer_select(req)) {
 857		void __user *buf;
 858		size_t len = sr->len;
 859
 860		buf = io_buffer_select(req, &len, issue_flags);
 861		if (!buf)
 862			return -ENOBUFS;
 863
 864		if (req->flags & REQ_F_APOLL_MULTISHOT) {
 865			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
 866			if (ret) {
 867				io_kbuf_recycle(req, issue_flags);
 868				return ret;
 869			}
 870		}
 871
 872		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
 
 
 
 873	}
 874
 
 
 
 
 
 
 875	kmsg->msg.msg_get_inq = 1;
 876	kmsg->msg.msg_inq = -1;
 877	if (req->flags & REQ_F_APOLL_MULTISHOT) {
 878		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
 879					   &mshot_finished);
 880	} else {
 881		/* disable partial retry for recvmsg with cmsg attached */
 882		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
 883			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 884
 885		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
 886					 kmsg->uaddr, flags);
 887	}
 888
 889	if (ret < min_ret) {
 890		if (ret == -EAGAIN && force_nonblock) {
 891			ret = io_setup_async_msg(req, kmsg, issue_flags);
 892			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
 893				io_kbuf_recycle(req, issue_flags);
 894				return IOU_ISSUE_SKIP_COMPLETE;
 895			}
 896			return ret;
 897		}
 898		if (ret > 0 && io_net_retry(sock, flags)) {
 899			sr->done_io += ret;
 900			req->flags |= REQ_F_BL_NO_RECYCLE;
 901			return io_setup_async_msg(req, kmsg, issue_flags);
 902		}
 903		if (ret == -ERESTARTSYS)
 904			ret = -EINTR;
 905		req_set_fail(req);
 906	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 907		req_set_fail(req);
 908	}
 909
 910	if (ret > 0)
 911		ret += sr->done_io;
 912	else if (sr->done_io)
 913		ret = sr->done_io;
 914	else
 915		io_kbuf_recycle(req, issue_flags);
 916
 917	if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
 
 
 
 
 918		goto retry_multishot;
 919
 920	if (mshot_finished)
 921		io_req_msg_cleanup(req, kmsg, issue_flags);
 922	else if (ret == -EAGAIN)
 923		return io_setup_async_msg(req, kmsg, issue_flags);
 
 
 
 924
 925	return ret;
 926}
 927
 928int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 929{
 930	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
 931	struct msghdr msg;
 932	struct socket *sock;
 
 
 933	unsigned flags;
 934	int ret, min_ret = 0;
 935	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 936	size_t len = sr->len;
 937
 938	if (!(req->flags & REQ_F_POLLED) &&
 939	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 940		return -EAGAIN;
 941
 
 
 
 942	sock = sock_from_file(req->file);
 943	if (unlikely(!sock))
 944		return -ENOTSOCK;
 945
 946	msg.msg_name = NULL;
 947	msg.msg_namelen = 0;
 948	msg.msg_control = NULL;
 949	msg.msg_get_inq = 1;
 950	msg.msg_controllen = 0;
 951	msg.msg_iocb = NULL;
 952	msg.msg_ubuf = NULL;
 953
 954	flags = sr->msg_flags;
 955	if (force_nonblock)
 956		flags |= MSG_DONTWAIT;
 957
 958retry_multishot:
 959	if (io_do_buffer_select(req)) {
 960		void __user *buf;
 961
 962		buf = io_buffer_select(req, &len, issue_flags);
 963		if (!buf)
 964			return -ENOBUFS;
 965		sr->buf = buf;
 966		sr->len = len;
 967	}
 968
 969	ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
 970	if (unlikely(ret))
 971		goto out_free;
 972
 973	msg.msg_inq = -1;
 
 
 
 974	msg.msg_flags = 0;
 
 
 
 975
 
 
 
 976	if (flags & MSG_WAITALL)
 977		min_ret = iov_iter_count(&msg.msg_iter);
 978
 979	ret = sock_recvmsg(sock, &msg, flags);
 980	if (ret < min_ret) {
 981		if (ret == -EAGAIN && force_nonblock) {
 982			if (issue_flags & IO_URING_F_MULTISHOT) {
 983				io_kbuf_recycle(req, issue_flags);
 984				return IOU_ISSUE_SKIP_COMPLETE;
 985			}
 986
 987			return -EAGAIN;
 988		}
 989		if (ret > 0 && io_net_retry(sock, flags)) {
 990			sr->len -= ret;
 991			sr->buf += ret;
 992			sr->done_io += ret;
 993			req->flags |= REQ_F_BL_NO_RECYCLE;
 994			return -EAGAIN;
 995		}
 996		if (ret == -ERESTARTSYS)
 997			ret = -EINTR;
 998		req_set_fail(req);
 999	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1000out_free:
1001		req_set_fail(req);
1002	}
1003
1004	if (ret > 0)
1005		ret += sr->done_io;
1006	else if (sr->done_io)
1007		ret = sr->done_io;
1008	else
1009		io_kbuf_recycle(req, issue_flags);
1010
1011	if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
 
 
 
 
1012		goto retry_multishot;
1013
1014	return ret;
1015}
1016
1017void io_send_zc_cleanup(struct io_kiocb *req)
1018{
1019	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1020	struct io_async_msghdr *io;
1021
1022	if (req_has_async_data(req)) {
1023		io = req->async_data;
1024		/* might be ->fast_iov if *msg_copy_hdr failed */
1025		if (io->free_iov != io->fast_iov)
1026			kfree(io->free_iov);
1027	}
1028	if (zc->notif) {
1029		io_notif_flush(zc->notif);
1030		zc->notif = NULL;
1031	}
1032}
1033
1034#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1035#define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1036
1037int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1038{
1039	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1040	struct io_ring_ctx *ctx = req->ctx;
1041	struct io_kiocb *notif;
1042
1043	zc->done_io = 0;
1044	req->flags |= REQ_F_POLL_NO_LAZY;
1045
1046	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1047		return -EINVAL;
1048	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1049	if (req->flags & REQ_F_CQE_SKIP)
1050		return -EINVAL;
1051
1052	notif = zc->notif = io_alloc_notif(ctx);
1053	if (!notif)
1054		return -ENOMEM;
1055	notif->cqe.user_data = req->cqe.user_data;
1056	notif->cqe.res = 0;
1057	notif->cqe.flags = IORING_CQE_F_NOTIF;
1058	req->flags |= REQ_F_NEED_CLEANUP;
1059
1060	zc->flags = READ_ONCE(sqe->ioprio);
1061	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1062		if (zc->flags & ~IO_ZC_FLAGS_VALID)
1063			return -EINVAL;
1064		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1065			io_notif_set_extended(notif);
1066			io_notif_to_data(notif)->zc_report = true;
1067		}
1068	}
1069
1070	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1071		unsigned idx = READ_ONCE(sqe->buf_index);
1072
1073		if (unlikely(idx >= ctx->nr_user_bufs))
1074			return -EFAULT;
1075		idx = array_index_nospec(idx, ctx->nr_user_bufs);
1076		req->imu = READ_ONCE(ctx->user_bufs[idx]);
1077		io_req_set_rsrc_node(notif, ctx, 0);
1078	}
1079
1080	if (req->opcode == IORING_OP_SEND_ZC) {
1081		if (READ_ONCE(sqe->__pad3[0]))
1082			return -EINVAL;
1083		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1084		zc->addr_len = READ_ONCE(sqe->addr_len);
1085	} else {
1086		if (unlikely(sqe->addr2 || sqe->file_index))
1087			return -EINVAL;
1088		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1089			return -EINVAL;
1090	}
1091
1092	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1093	zc->len = READ_ONCE(sqe->len);
1094	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1095	if (zc->msg_flags & MSG_DONTWAIT)
1096		req->flags |= REQ_F_NOWAIT;
1097
 
 
1098#ifdef CONFIG_COMPAT
1099	if (req->ctx->compat)
1100		zc->msg_flags |= MSG_CMSG_COMPAT;
1101#endif
1102	return 0;
1103}
1104
1105static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1106				 struct iov_iter *from, size_t length)
1107{
1108	skb_zcopy_downgrade_managed(skb);
1109	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1110}
1111
1112static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1113			   struct iov_iter *from, size_t length)
1114{
1115	struct skb_shared_info *shinfo = skb_shinfo(skb);
1116	int frag = shinfo->nr_frags;
1117	int ret = 0;
1118	struct bvec_iter bi;
1119	ssize_t copied = 0;
1120	unsigned long truesize = 0;
1121
1122	if (!frag)
1123		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1124	else if (unlikely(!skb_zcopy_managed(skb)))
1125		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1126
1127	bi.bi_size = min(from->count, length);
1128	bi.bi_bvec_done = from->iov_offset;
1129	bi.bi_idx = 0;
1130
1131	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1132		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1133
1134		copied += v.bv_len;
1135		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1136		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1137					   v.bv_offset, v.bv_len);
1138		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1139	}
1140	if (bi.bi_size)
1141		ret = -EMSGSIZE;
1142
1143	shinfo->nr_frags = frag;
1144	from->bvec += bi.bi_idx;
1145	from->nr_segs -= bi.bi_idx;
1146	from->count -= copied;
1147	from->iov_offset = bi.bi_bvec_done;
1148
1149	skb->data_len += copied;
1150	skb->len += copied;
1151	skb->truesize += truesize;
1152
1153	if (sk && sk->sk_type == SOCK_STREAM) {
1154		sk_wmem_queued_add(sk, truesize);
1155		if (!skb_zcopy_pure(skb))
1156			sk_mem_charge(sk, truesize);
1157	} else {
1158		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1159	}
1160	return ret;
1161}
1162
1163int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1164{
1165	struct sockaddr_storage __address;
1166	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1167	struct msghdr msg;
 
1168	struct socket *sock;
1169	unsigned msg_flags;
1170	int ret, min_ret = 0;
1171
1172	sock = sock_from_file(req->file);
1173	if (unlikely(!sock))
1174		return -ENOTSOCK;
1175	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1176		return -EOPNOTSUPP;
1177
1178	msg.msg_name = NULL;
1179	msg.msg_control = NULL;
1180	msg.msg_controllen = 0;
1181	msg.msg_namelen = 0;
1182
1183	if (zc->addr) {
1184		if (req_has_async_data(req)) {
1185			struct io_async_msghdr *io = req->async_data;
1186
1187			msg.msg_name = &io->addr;
1188		} else {
1189			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1190			if (unlikely(ret < 0))
1191				return ret;
1192			msg.msg_name = (struct sockaddr *)&__address;
1193		}
1194		msg.msg_namelen = zc->addr_len;
1195	}
1196
1197	if (!(req->flags & REQ_F_POLLED) &&
1198	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1199		return io_setup_async_addr(req, &__address, issue_flags);
1200
1201	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1202		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1203					(u64)(uintptr_t)zc->buf, zc->len);
1204		if (unlikely(ret))
1205			return ret;
1206		msg.sg_from_iter = io_sg_from_iter;
1207	} else {
1208		io_notif_set_extended(zc->notif);
1209		ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
 
1210		if (unlikely(ret))
1211			return ret;
1212		ret = io_notif_account_mem(zc->notif, zc->len);
1213		if (unlikely(ret))
1214			return ret;
1215		msg.sg_from_iter = io_sg_from_iter_iovec;
1216	}
1217
1218	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1219	if (issue_flags & IO_URING_F_NONBLOCK)
1220		msg_flags |= MSG_DONTWAIT;
1221	if (msg_flags & MSG_WAITALL)
1222		min_ret = iov_iter_count(&msg.msg_iter);
1223	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1224
1225	msg.msg_flags = msg_flags;
1226	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1227	ret = sock_sendmsg(sock, &msg);
1228
1229	if (unlikely(ret < min_ret)) {
1230		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1231			return io_setup_async_addr(req, &__address, issue_flags);
1232
1233		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1234			zc->len -= ret;
1235			zc->buf += ret;
1236			zc->done_io += ret;
1237			req->flags |= REQ_F_BL_NO_RECYCLE;
1238			return io_setup_async_addr(req, &__address, issue_flags);
1239		}
1240		if (ret == -ERESTARTSYS)
1241			ret = -EINTR;
1242		req_set_fail(req);
1243	}
1244
1245	if (ret >= 0)
1246		ret += zc->done_io;
1247	else if (zc->done_io)
1248		ret = zc->done_io;
1249
1250	/*
1251	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1252	 * flushing notif to io_send_zc_cleanup()
1253	 */
1254	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1255		io_notif_flush(zc->notif);
1256		req->flags &= ~REQ_F_NEED_CLEANUP;
1257	}
1258	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1259	return IOU_OK;
1260}
1261
1262int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1263{
1264	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1265	struct io_async_msghdr iomsg, *kmsg;
1266	struct socket *sock;
1267	unsigned flags;
1268	int ret, min_ret = 0;
1269
1270	io_notif_set_extended(sr->notif);
1271
1272	sock = sock_from_file(req->file);
1273	if (unlikely(!sock))
1274		return -ENOTSOCK;
1275	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1276		return -EOPNOTSUPP;
1277
1278	if (req_has_async_data(req)) {
1279		kmsg = req->async_data;
1280		kmsg->msg.msg_control_user = sr->msg_control;
1281	} else {
1282		ret = io_sendmsg_copy_hdr(req, &iomsg);
1283		if (ret)
1284			return ret;
1285		kmsg = &iomsg;
1286	}
1287
1288	if (!(req->flags & REQ_F_POLLED) &&
1289	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1290		return io_setup_async_msg(req, kmsg, issue_flags);
1291
1292	flags = sr->msg_flags | MSG_ZEROCOPY;
1293	if (issue_flags & IO_URING_F_NONBLOCK)
1294		flags |= MSG_DONTWAIT;
1295	if (flags & MSG_WAITALL)
1296		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1297
1298	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1299	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1300	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1301
1302	if (unlikely(ret < min_ret)) {
1303		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1304			return io_setup_async_msg(req, kmsg, issue_flags);
1305
1306		if (ret > 0 && io_net_retry(sock, flags)) {
1307			sr->done_io += ret;
1308			req->flags |= REQ_F_BL_NO_RECYCLE;
1309			return io_setup_async_msg(req, kmsg, issue_flags);
1310		}
1311		if (ret == -ERESTARTSYS)
1312			ret = -EINTR;
1313		req_set_fail(req);
1314	}
1315	/* fast path, check for non-NULL to avoid function call */
1316	if (kmsg->free_iov) {
1317		kfree(kmsg->free_iov);
1318		kmsg->free_iov = NULL;
1319	}
1320
1321	io_netmsg_recycle(req, issue_flags);
1322	if (ret >= 0)
1323		ret += sr->done_io;
1324	else if (sr->done_io)
1325		ret = sr->done_io;
1326
1327	/*
1328	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1329	 * flushing notif to io_send_zc_cleanup()
1330	 */
1331	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1332		io_notif_flush(sr->notif);
1333		req->flags &= ~REQ_F_NEED_CLEANUP;
1334	}
1335	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1336	return IOU_OK;
1337}
1338
1339void io_sendrecv_fail(struct io_kiocb *req)
1340{
1341	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1342
1343	if (sr->done_io)
1344		req->cqe.res = sr->done_io;
1345
1346	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1347	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1348		req->cqe.flags |= IORING_CQE_F_MORE;
1349}
1350
1351int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1352{
1353	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1354	unsigned flags;
1355
1356	if (sqe->len || sqe->buf_index)
1357		return -EINVAL;
1358
1359	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1360	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1361	accept->flags = READ_ONCE(sqe->accept_flags);
1362	accept->nofile = rlimit(RLIMIT_NOFILE);
1363	flags = READ_ONCE(sqe->ioprio);
1364	if (flags & ~IORING_ACCEPT_MULTISHOT)
1365		return -EINVAL;
1366
1367	accept->file_slot = READ_ONCE(sqe->file_index);
1368	if (accept->file_slot) {
1369		if (accept->flags & SOCK_CLOEXEC)
1370			return -EINVAL;
1371		if (flags & IORING_ACCEPT_MULTISHOT &&
1372		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1373			return -EINVAL;
1374	}
1375	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1376		return -EINVAL;
1377	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1378		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1379	if (flags & IORING_ACCEPT_MULTISHOT)
1380		req->flags |= REQ_F_APOLL_MULTISHOT;
1381	return 0;
1382}
1383
1384int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1385{
 
1386	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1387	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1388	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1389	bool fixed = !!accept->file_slot;
1390	struct file *file;
1391	int ret, fd;
1392
 
 
1393retry:
1394	if (!fixed) {
1395		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1396		if (unlikely(fd < 0))
1397			return fd;
1398	}
1399	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1400			 accept->flags);
1401	if (IS_ERR(file)) {
1402		if (!fixed)
1403			put_unused_fd(fd);
1404		ret = PTR_ERR(file);
1405		if (ret == -EAGAIN && force_nonblock) {
1406			/*
1407			 * if it's multishot and polled, we don't need to
1408			 * return EAGAIN to arm the poll infra since it
1409			 * has already been done
1410			 */
1411			if (issue_flags & IO_URING_F_MULTISHOT)
1412				return IOU_ISSUE_SKIP_COMPLETE;
1413			return ret;
1414		}
1415		if (ret == -ERESTARTSYS)
1416			ret = -EINTR;
1417		req_set_fail(req);
1418	} else if (!fixed) {
1419		fd_install(fd, file);
1420		ret = fd;
1421	} else {
1422		ret = io_fixed_fd_install(req, issue_flags, file,
1423						accept->file_slot);
1424	}
1425
1426	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1427		io_req_set_res(req, ret, 0);
1428		return IOU_OK;
1429	}
1430
1431	if (ret < 0)
1432		return ret;
1433	if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
1434				ret, IORING_CQE_F_MORE))
1435		goto retry;
1436
1437	io_req_set_res(req, ret, 0);
1438	return IOU_STOP_MULTISHOT;
1439}
1440
1441int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1442{
1443	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1444
1445	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1446		return -EINVAL;
1447
1448	sock->domain = READ_ONCE(sqe->fd);
1449	sock->type = READ_ONCE(sqe->off);
1450	sock->protocol = READ_ONCE(sqe->len);
1451	sock->file_slot = READ_ONCE(sqe->file_index);
1452	sock->nofile = rlimit(RLIMIT_NOFILE);
1453
1454	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1455	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1456		return -EINVAL;
1457	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1458		return -EINVAL;
1459	return 0;
1460}
1461
1462int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1463{
1464	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1465	bool fixed = !!sock->file_slot;
1466	struct file *file;
1467	int ret, fd;
1468
1469	if (!fixed) {
1470		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1471		if (unlikely(fd < 0))
1472			return fd;
1473	}
1474	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1475	if (IS_ERR(file)) {
1476		if (!fixed)
1477			put_unused_fd(fd);
1478		ret = PTR_ERR(file);
1479		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1480			return -EAGAIN;
1481		if (ret == -ERESTARTSYS)
1482			ret = -EINTR;
1483		req_set_fail(req);
1484	} else if (!fixed) {
1485		fd_install(fd, file);
1486		ret = fd;
1487	} else {
1488		ret = io_fixed_fd_install(req, issue_flags, file,
1489					    sock->file_slot);
1490	}
1491	io_req_set_res(req, ret, 0);
1492	return IOU_OK;
1493}
1494
1495int io_connect_prep_async(struct io_kiocb *req)
1496{
1497	struct io_async_connect *io = req->async_data;
1498	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1499
1500	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1501}
1502
1503int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1504{
1505	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1506
1507	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1508		return -EINVAL;
1509
1510	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1511	conn->addr_len =  READ_ONCE(sqe->addr2);
1512	conn->in_progress = conn->seen_econnaborted = false;
1513	return 0;
1514}
1515
1516int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1517{
1518	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1519	struct io_async_connect __io, *io;
1520	unsigned file_flags;
1521	int ret;
1522	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1523
 
 
 
 
 
 
 
 
 
 
1524	if (req_has_async_data(req)) {
1525		io = req->async_data;
1526	} else {
1527		ret = move_addr_to_kernel(connect->addr,
1528						connect->addr_len,
1529						&__io.address);
1530		if (ret)
1531			goto out;
1532		io = &__io;
1533	}
1534
1535	file_flags = force_nonblock ? O_NONBLOCK : 0;
1536
1537	ret = __sys_connect_file(req->file, &io->address,
1538					connect->addr_len, file_flags);
1539	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1540	    && force_nonblock) {
1541		if (ret == -EINPROGRESS) {
1542			connect->in_progress = true;
1543		} else if (ret == -ECONNABORTED) {
1544			if (connect->seen_econnaborted)
 
 
 
1545				goto out;
1546			connect->seen_econnaborted = true;
1547		}
1548		if (req_has_async_data(req))
1549			return -EAGAIN;
1550		if (io_alloc_async_data(req)) {
1551			ret = -ENOMEM;
1552			goto out;
1553		}
1554		memcpy(req->async_data, &__io, sizeof(__io));
1555		return -EAGAIN;
1556	}
1557	if (connect->in_progress) {
1558		/*
1559		 * At least bluetooth will return -EBADFD on a re-connect
1560		 * attempt, and it's (supposedly) also valid to get -EISCONN
1561		 * which means the previous result is good. For both of these,
1562		 * grab the sock_error() and use that for the completion.
1563		 */
1564		if (ret == -EBADFD || ret == -EISCONN)
1565			ret = sock_error(sock_from_file(req->file)->sk);
1566	}
1567	if (ret == -ERESTARTSYS)
1568		ret = -EINTR;
1569out:
1570	if (ret < 0)
1571		req_set_fail(req);
1572	io_req_set_res(req, ret, 0);
1573	return IOU_OK;
1574}
1575
1576void io_netmsg_cache_free(struct io_cache_entry *entry)
1577{
1578	kfree(container_of(entry, struct io_async_msghdr, cache));
1579}
1580#endif