Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/skbuff.h>
   6#include <linux/scatterlist.h>
   7
   8#include <net/sock.h>
   9#include <net/tcp.h>
  10#include <net/tls.h>
  11#include <trace/events/sock.h>
  12
  13static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
  14{
  15	if (msg->sg.end > msg->sg.start &&
  16	    elem_first_coalesce < msg->sg.end)
  17		return true;
  18
  19	if (msg->sg.end < msg->sg.start &&
  20	    (elem_first_coalesce > msg->sg.start ||
  21	     elem_first_coalesce < msg->sg.end))
  22		return true;
  23
  24	return false;
  25}
  26
  27int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
  28		 int elem_first_coalesce)
  29{
  30	struct page_frag *pfrag = sk_page_frag(sk);
  31	u32 osize = msg->sg.size;
  32	int ret = 0;
  33
  34	len -= msg->sg.size;
  35	while (len > 0) {
  36		struct scatterlist *sge;
  37		u32 orig_offset;
  38		int use, i;
  39
  40		if (!sk_page_frag_refill(sk, pfrag)) {
  41			ret = -ENOMEM;
  42			goto msg_trim;
  43		}
  44
  45		orig_offset = pfrag->offset;
  46		use = min_t(int, len, pfrag->size - orig_offset);
  47		if (!sk_wmem_schedule(sk, use)) {
  48			ret = -ENOMEM;
  49			goto msg_trim;
  50		}
  51
  52		i = msg->sg.end;
  53		sk_msg_iter_var_prev(i);
  54		sge = &msg->sg.data[i];
  55
  56		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
  57		    sg_page(sge) == pfrag->page &&
  58		    sge->offset + sge->length == orig_offset) {
  59			sge->length += use;
  60		} else {
  61			if (sk_msg_full(msg)) {
  62				ret = -ENOSPC;
  63				break;
  64			}
  65
  66			sge = &msg->sg.data[msg->sg.end];
  67			sg_unmark_end(sge);
  68			sg_set_page(sge, pfrag->page, use, orig_offset);
  69			get_page(pfrag->page);
  70			sk_msg_iter_next(msg, end);
  71		}
  72
  73		sk_mem_charge(sk, use);
  74		msg->sg.size += use;
  75		pfrag->offset += use;
  76		len -= use;
  77	}
  78
  79	return ret;
  80
  81msg_trim:
  82	sk_msg_trim(sk, msg, osize);
  83	return ret;
  84}
  85EXPORT_SYMBOL_GPL(sk_msg_alloc);
  86
  87int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
  88		 u32 off, u32 len)
  89{
  90	int i = src->sg.start;
  91	struct scatterlist *sge = sk_msg_elem(src, i);
  92	struct scatterlist *sgd = NULL;
  93	u32 sge_len, sge_off;
  94
  95	while (off) {
  96		if (sge->length > off)
  97			break;
  98		off -= sge->length;
  99		sk_msg_iter_var_next(i);
 100		if (i == src->sg.end && off)
 101			return -ENOSPC;
 102		sge = sk_msg_elem(src, i);
 103	}
 104
 105	while (len) {
 106		sge_len = sge->length - off;
 107		if (sge_len > len)
 108			sge_len = len;
 109
 110		if (dst->sg.end)
 111			sgd = sk_msg_elem(dst, dst->sg.end - 1);
 112
 113		if (sgd &&
 114		    (sg_page(sge) == sg_page(sgd)) &&
 115		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
 116			sgd->length += sge_len;
 117			dst->sg.size += sge_len;
 118		} else if (!sk_msg_full(dst)) {
 119			sge_off = sge->offset + off;
 120			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
 121		} else {
 122			return -ENOSPC;
 123		}
 124
 125		off = 0;
 126		len -= sge_len;
 127		sk_mem_charge(sk, sge_len);
 128		sk_msg_iter_var_next(i);
 129		if (i == src->sg.end && len)
 130			return -ENOSPC;
 131		sge = sk_msg_elem(src, i);
 132	}
 133
 134	return 0;
 135}
 136EXPORT_SYMBOL_GPL(sk_msg_clone);
 137
 138void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 139{
 140	int i = msg->sg.start;
 141
 142	do {
 143		struct scatterlist *sge = sk_msg_elem(msg, i);
 144
 145		if (bytes < sge->length) {
 146			sge->length -= bytes;
 147			sge->offset += bytes;
 148			sk_mem_uncharge(sk, bytes);
 149			break;
 150		}
 151
 152		sk_mem_uncharge(sk, sge->length);
 153		bytes -= sge->length;
 154		sge->length = 0;
 155		sge->offset = 0;
 156		sk_msg_iter_var_next(i);
 157	} while (bytes && i != msg->sg.end);
 158	msg->sg.start = i;
 159}
 160EXPORT_SYMBOL_GPL(sk_msg_return_zero);
 161
 162void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
 163{
 164	int i = msg->sg.start;
 165
 166	do {
 167		struct scatterlist *sge = &msg->sg.data[i];
 168		int uncharge = (bytes < sge->length) ? bytes : sge->length;
 169
 170		sk_mem_uncharge(sk, uncharge);
 171		bytes -= uncharge;
 172		sk_msg_iter_var_next(i);
 173	} while (i != msg->sg.end);
 174}
 175EXPORT_SYMBOL_GPL(sk_msg_return);
 176
 177static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
 178			    bool charge)
 179{
 180	struct scatterlist *sge = sk_msg_elem(msg, i);
 181	u32 len = sge->length;
 182
 183	/* When the skb owns the memory we free it from consume_skb path. */
 184	if (!msg->skb) {
 185		if (charge)
 186			sk_mem_uncharge(sk, len);
 187		put_page(sg_page(sge));
 188	}
 189	memset(sge, 0, sizeof(*sge));
 190	return len;
 191}
 192
 193static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
 194			 bool charge)
 195{
 196	struct scatterlist *sge = sk_msg_elem(msg, i);
 197	int freed = 0;
 198
 199	while (msg->sg.size) {
 200		msg->sg.size -= sge->length;
 201		freed += sk_msg_free_elem(sk, msg, i, charge);
 202		sk_msg_iter_var_next(i);
 203		sk_msg_check_to_free(msg, i, msg->sg.size);
 204		sge = sk_msg_elem(msg, i);
 205	}
 206	consume_skb(msg->skb);
 207	sk_msg_init(msg);
 208	return freed;
 209}
 210
 211int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
 212{
 213	return __sk_msg_free(sk, msg, msg->sg.start, false);
 214}
 215EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
 216
 217int sk_msg_free(struct sock *sk, struct sk_msg *msg)
 218{
 219	return __sk_msg_free(sk, msg, msg->sg.start, true);
 220}
 221EXPORT_SYMBOL_GPL(sk_msg_free);
 222
 223static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
 224				  u32 bytes, bool charge)
 225{
 226	struct scatterlist *sge;
 227	u32 i = msg->sg.start;
 228
 229	while (bytes) {
 230		sge = sk_msg_elem(msg, i);
 231		if (!sge->length)
 232			break;
 233		if (bytes < sge->length) {
 234			if (charge)
 235				sk_mem_uncharge(sk, bytes);
 236			sge->length -= bytes;
 237			sge->offset += bytes;
 238			msg->sg.size -= bytes;
 239			break;
 240		}
 241
 242		msg->sg.size -= sge->length;
 243		bytes -= sge->length;
 244		sk_msg_free_elem(sk, msg, i, charge);
 245		sk_msg_iter_var_next(i);
 246		sk_msg_check_to_free(msg, i, bytes);
 247	}
 248	msg->sg.start = i;
 249}
 250
 251void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
 252{
 253	__sk_msg_free_partial(sk, msg, bytes, true);
 254}
 255EXPORT_SYMBOL_GPL(sk_msg_free_partial);
 256
 257void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
 258				  u32 bytes)
 259{
 260	__sk_msg_free_partial(sk, msg, bytes, false);
 261}
 262
 263void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 264{
 265	int trim = msg->sg.size - len;
 266	u32 i = msg->sg.end;
 267
 268	if (trim <= 0) {
 269		WARN_ON(trim < 0);
 270		return;
 271	}
 272
 273	sk_msg_iter_var_prev(i);
 274	msg->sg.size = len;
 275	while (msg->sg.data[i].length &&
 276	       trim >= msg->sg.data[i].length) {
 277		trim -= msg->sg.data[i].length;
 278		sk_msg_free_elem(sk, msg, i, true);
 279		sk_msg_iter_var_prev(i);
 280		if (!trim)
 281			goto out;
 282	}
 283
 284	msg->sg.data[i].length -= trim;
 285	sk_mem_uncharge(sk, trim);
 286	/* Adjust copybreak if it falls into the trimmed part of last buf */
 287	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
 288		msg->sg.copybreak = msg->sg.data[i].length;
 289out:
 290	sk_msg_iter_var_next(i);
 291	msg->sg.end = i;
 292
 293	/* If we trim data a full sg elem before curr pointer update
 294	 * copybreak and current so that any future copy operations
 295	 * start at new copy location.
 296	 * However trimed data that has not yet been used in a copy op
 297	 * does not require an update.
 298	 */
 299	if (!msg->sg.size) {
 300		msg->sg.curr = msg->sg.start;
 301		msg->sg.copybreak = 0;
 302	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
 303		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
 304		sk_msg_iter_var_prev(i);
 305		msg->sg.curr = i;
 306		msg->sg.copybreak = msg->sg.data[i].length;
 307	}
 308}
 309EXPORT_SYMBOL_GPL(sk_msg_trim);
 310
 311int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 312			      struct sk_msg *msg, u32 bytes)
 313{
 314	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
 315	const int to_max_pages = MAX_MSG_FRAGS;
 316	struct page *pages[MAX_MSG_FRAGS];
 317	ssize_t orig, copied, use, offset;
 318
 319	orig = msg->sg.size;
 320	while (bytes > 0) {
 321		i = 0;
 322		maxpages = to_max_pages - num_elems;
 323		if (maxpages == 0) {
 324			ret = -EFAULT;
 325			goto out;
 326		}
 327
 328		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
 329					    &offset);
 330		if (copied <= 0) {
 331			ret = -EFAULT;
 332			goto out;
 333		}
 334
 335		bytes -= copied;
 336		msg->sg.size += copied;
 337
 338		while (copied) {
 339			use = min_t(int, copied, PAGE_SIZE - offset);
 340			sg_set_page(&msg->sg.data[msg->sg.end],
 341				    pages[i], use, offset);
 342			sg_unmark_end(&msg->sg.data[msg->sg.end]);
 343			sk_mem_charge(sk, use);
 344
 345			offset = 0;
 346			copied -= use;
 347			sk_msg_iter_next(msg, end);
 348			num_elems++;
 349			i++;
 350		}
 351		/* When zerocopy is mixed with sk_msg_*copy* operations we
 352		 * may have a copybreak set in this case clear and prefer
 353		 * zerocopy remainder when possible.
 354		 */
 355		msg->sg.copybreak = 0;
 356		msg->sg.curr = msg->sg.end;
 357	}
 358out:
 359	/* Revert iov_iter updates, msg will need to use 'trim' later if it
 360	 * also needs to be cleared.
 361	 */
 362	if (ret)
 363		iov_iter_revert(from, msg->sg.size - orig);
 364	return ret;
 365}
 366EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
 367
 368int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 369			     struct sk_msg *msg, u32 bytes)
 370{
 371	int ret = -ENOSPC, i = msg->sg.curr;
 372	struct scatterlist *sge;
 373	u32 copy, buf_size;
 374	void *to;
 375
 376	do {
 377		sge = sk_msg_elem(msg, i);
 378		/* This is possible if a trim operation shrunk the buffer */
 379		if (msg->sg.copybreak >= sge->length) {
 380			msg->sg.copybreak = 0;
 381			sk_msg_iter_var_next(i);
 382			if (i == msg->sg.end)
 383				break;
 384			sge = sk_msg_elem(msg, i);
 385		}
 386
 387		buf_size = sge->length - msg->sg.copybreak;
 388		copy = (buf_size > bytes) ? bytes : buf_size;
 389		to = sg_virt(sge) + msg->sg.copybreak;
 390		msg->sg.copybreak += copy;
 391		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
 392			ret = copy_from_iter_nocache(to, copy, from);
 393		else
 394			ret = copy_from_iter(to, copy, from);
 395		if (ret != copy) {
 396			ret = -EFAULT;
 397			goto out;
 398		}
 399		bytes -= copy;
 400		if (!bytes)
 401			break;
 402		msg->sg.copybreak = 0;
 403		sk_msg_iter_var_next(i);
 404	} while (i != msg->sg.end);
 405out:
 406	msg->sg.curr = i;
 407	return ret;
 408}
 409EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
 410
 411/* Receive sk_msg from psock->ingress_msg to @msg. */
 412int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
 413		   int len, int flags)
 414{
 415	struct iov_iter *iter = &msg->msg_iter;
 416	int peek = flags & MSG_PEEK;
 417	struct sk_msg *msg_rx;
 418	int i, copied = 0;
 419
 420	msg_rx = sk_psock_peek_msg(psock);
 421	while (copied != len) {
 422		struct scatterlist *sge;
 423
 424		if (unlikely(!msg_rx))
 425			break;
 426
 427		i = msg_rx->sg.start;
 428		do {
 429			struct page *page;
 430			int copy;
 431
 432			sge = sk_msg_elem(msg_rx, i);
 433			copy = sge->length;
 434			page = sg_page(sge);
 435			if (copied + copy > len)
 436				copy = len - copied;
 437			copy = copy_page_to_iter(page, sge->offset, copy, iter);
 438			if (!copy) {
 439				copied = copied ? copied : -EFAULT;
 440				goto out;
 441			}
 442
 443			copied += copy;
 444			if (likely(!peek)) {
 445				sge->offset += copy;
 446				sge->length -= copy;
 447				if (!msg_rx->skb)
 448					sk_mem_uncharge(sk, copy);
 449				msg_rx->sg.size -= copy;
 450
 451				if (!sge->length) {
 452					sk_msg_iter_var_next(i);
 453					if (!msg_rx->skb)
 454						put_page(page);
 455				}
 456			} else {
 457				/* Lets not optimize peek case if copy_page_to_iter
 458				 * didn't copy the entire length lets just break.
 459				 */
 460				if (copy != sge->length)
 461					goto out;
 462				sk_msg_iter_var_next(i);
 463			}
 464
 465			if (copied == len)
 466				break;
 467		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
 468
 469		if (unlikely(peek)) {
 470			msg_rx = sk_psock_next_msg(psock, msg_rx);
 471			if (!msg_rx)
 472				break;
 473			continue;
 474		}
 475
 476		msg_rx->sg.start = i;
 477		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
 478			msg_rx = sk_psock_dequeue_msg(psock);
 479			kfree_sk_msg(msg_rx);
 480		}
 481		msg_rx = sk_psock_peek_msg(psock);
 482	}
 483out:
 
 
 484	return copied;
 485}
 486EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
 487
 488bool sk_msg_is_readable(struct sock *sk)
 489{
 490	struct sk_psock *psock;
 491	bool empty = true;
 492
 493	rcu_read_lock();
 494	psock = sk_psock(sk);
 495	if (likely(psock))
 496		empty = list_empty(&psock->ingress_msg);
 497	rcu_read_unlock();
 498	return !empty;
 499}
 500EXPORT_SYMBOL_GPL(sk_msg_is_readable);
 501
 502static struct sk_msg *alloc_sk_msg(gfp_t gfp)
 503{
 504	struct sk_msg *msg;
 505
 506	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
 507	if (unlikely(!msg))
 508		return NULL;
 509	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
 510	return msg;
 511}
 512
 513static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
 514						  struct sk_buff *skb)
 515{
 516	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 517		return NULL;
 518
 519	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 520		return NULL;
 521
 522	return alloc_sk_msg(GFP_KERNEL);
 523}
 524
 525static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
 526					u32 off, u32 len,
 527					struct sk_psock *psock,
 528					struct sock *sk,
 529					struct sk_msg *msg)
 530{
 531	int num_sge, copied;
 532
 533	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
 534	if (num_sge < 0) {
 535		/* skb linearize may fail with ENOMEM, but lets simply try again
 536		 * later if this happens. Under memory pressure we don't want to
 537		 * drop the skb. We need to linearize the skb so that the mapping
 538		 * in skb_to_sgvec can not error.
 539		 */
 540		if (skb_linearize(skb))
 541			return -EAGAIN;
 542
 543		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
 544		if (unlikely(num_sge < 0))
 545			return num_sge;
 546	}
 547
 548	copied = len;
 549	msg->sg.start = 0;
 550	msg->sg.size = copied;
 551	msg->sg.end = num_sge;
 552	msg->skb = skb;
 553
 554	sk_psock_queue_msg(psock, msg);
 555	sk_psock_data_ready(sk, psock);
 556	return copied;
 557}
 558
 559static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 560				     u32 off, u32 len);
 561
 562static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
 563				u32 off, u32 len)
 564{
 565	struct sock *sk = psock->sk;
 566	struct sk_msg *msg;
 567	int err;
 568
 569	/* If we are receiving on the same sock skb->sk is already assigned,
 570	 * skip memory accounting and owner transition seeing it already set
 571	 * correctly.
 572	 */
 573	if (unlikely(skb->sk == sk))
 574		return sk_psock_skb_ingress_self(psock, skb, off, len);
 575	msg = sk_psock_create_ingress_msg(sk, skb);
 576	if (!msg)
 577		return -EAGAIN;
 578
 579	/* This will transition ownership of the data from the socket where
 580	 * the BPF program was run initiating the redirect to the socket
 581	 * we will eventually receive this data on. The data will be released
 582	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
 583	 * into user buffers.
 584	 */
 585	skb_set_owner_r(skb, sk);
 586	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 587	if (err < 0)
 588		kfree(msg);
 589	return err;
 590}
 591
 592/* Puts an skb on the ingress queue of the socket already assigned to the
 593 * skb. In this case we do not need to check memory limits or skb_set_owner_r
 594 * because the skb is already accounted for here.
 595 */
 596static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 597				     u32 off, u32 len)
 598{
 599	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
 600	struct sock *sk = psock->sk;
 601	int err;
 602
 603	if (unlikely(!msg))
 604		return -EAGAIN;
 605	skb_set_owner_r(skb, sk);
 606	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 607	if (err < 0)
 608		kfree(msg);
 609	return err;
 610}
 611
 612static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 613			       u32 off, u32 len, bool ingress)
 614{
 615	int err = 0;
 616
 617	if (!ingress) {
 618		if (!sock_writeable(psock->sk))
 619			return -EAGAIN;
 620		return skb_send_sock(psock->sk, skb, off, len);
 621	}
 622	skb_get(skb);
 623	err = sk_psock_skb_ingress(psock, skb, off, len);
 624	if (err < 0)
 625		kfree_skb(skb);
 626	return err;
 627}
 628
 629static void sk_psock_skb_state(struct sk_psock *psock,
 630			       struct sk_psock_work_state *state,
 
 631			       int len, int off)
 632{
 633	spin_lock_bh(&psock->ingress_lock);
 634	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 
 635		state->len = len;
 636		state->off = off;
 
 
 637	}
 638	spin_unlock_bh(&psock->ingress_lock);
 639}
 640
 641static void sk_psock_backlog(struct work_struct *work)
 642{
 643	struct delayed_work *dwork = to_delayed_work(work);
 644	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
 645	struct sk_psock_work_state *state = &psock->work_state;
 646	struct sk_buff *skb = NULL;
 647	u32 len = 0, off = 0;
 648	bool ingress;
 
 649	int ret;
 650
 651	mutex_lock(&psock->work_mutex);
 652	if (unlikely(state->len)) {
 
 
 653		len = state->len;
 654		off = state->off;
 
 
 655	}
 
 
 656
 657	while ((skb = skb_peek(&psock->ingress_skb))) {
 658		len = skb->len;
 659		off = 0;
 660		if (skb_bpf_strparser(skb)) {
 661			struct strp_msg *stm = strp_msg(skb);
 662
 663			off = stm->offset;
 664			len = stm->full_len;
 665		}
 
 666		ingress = skb_bpf_ingress(skb);
 667		skb_bpf_redirect_clear(skb);
 668		do {
 669			ret = -EIO;
 670			if (!sock_flag(psock->sk, SOCK_DEAD))
 671				ret = sk_psock_handle_skb(psock, skb, off,
 672							  len, ingress);
 673			if (ret <= 0) {
 674				if (ret == -EAGAIN) {
 675					sk_psock_skb_state(psock, state, len, off);
 676
 677					/* Delay slightly to prioritize any
 678					 * other work that might be here.
 679					 */
 680					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
 681						schedule_delayed_work(&psock->work, 1);
 682					goto end;
 683				}
 684				/* Hard errors break pipe and stop xmit. */
 685				sk_psock_report_error(psock, ret ? -ret : EPIPE);
 686				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 
 687				goto end;
 688			}
 689			off += ret;
 690			len -= ret;
 691		} while (len);
 692
 693		skb = skb_dequeue(&psock->ingress_skb);
 694		kfree_skb(skb);
 695	}
 696end:
 697	mutex_unlock(&psock->work_mutex);
 698}
 699
 700struct sk_psock *sk_psock_init(struct sock *sk, int node)
 701{
 702	struct sk_psock *psock;
 703	struct proto *prot;
 704
 705	write_lock_bh(&sk->sk_callback_lock);
 706
 707	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
 708		psock = ERR_PTR(-EINVAL);
 709		goto out;
 710	}
 711
 712	if (sk->sk_user_data) {
 713		psock = ERR_PTR(-EBUSY);
 714		goto out;
 715	}
 716
 717	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
 718	if (!psock) {
 719		psock = ERR_PTR(-ENOMEM);
 720		goto out;
 721	}
 722
 723	prot = READ_ONCE(sk->sk_prot);
 724	psock->sk = sk;
 725	psock->eval = __SK_NONE;
 726	psock->sk_proto = prot;
 727	psock->saved_unhash = prot->unhash;
 728	psock->saved_destroy = prot->destroy;
 729	psock->saved_close = prot->close;
 730	psock->saved_write_space = sk->sk_write_space;
 731
 732	INIT_LIST_HEAD(&psock->link);
 733	spin_lock_init(&psock->link_lock);
 734
 735	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
 736	mutex_init(&psock->work_mutex);
 737	INIT_LIST_HEAD(&psock->ingress_msg);
 738	spin_lock_init(&psock->ingress_lock);
 739	skb_queue_head_init(&psock->ingress_skb);
 740
 741	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
 742	refcount_set(&psock->refcnt, 1);
 743
 744	__rcu_assign_sk_user_data_with_flags(sk, psock,
 745					     SK_USER_DATA_NOCOPY |
 746					     SK_USER_DATA_PSOCK);
 747	sock_hold(sk);
 748
 749out:
 750	write_unlock_bh(&sk->sk_callback_lock);
 751	return psock;
 752}
 753EXPORT_SYMBOL_GPL(sk_psock_init);
 754
 755struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
 756{
 757	struct sk_psock_link *link;
 758
 759	spin_lock_bh(&psock->link_lock);
 760	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
 761					list);
 762	if (link)
 763		list_del(&link->list);
 764	spin_unlock_bh(&psock->link_lock);
 765	return link;
 766}
 767
 768static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
 769{
 770	struct sk_msg *msg, *tmp;
 771
 772	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
 773		list_del(&msg->list);
 774		sk_msg_free(psock->sk, msg);
 775		kfree(msg);
 776	}
 777}
 778
 779static void __sk_psock_zap_ingress(struct sk_psock *psock)
 780{
 781	struct sk_buff *skb;
 782
 783	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
 784		skb_bpf_redirect_clear(skb);
 785		sock_drop(psock->sk, skb);
 786	}
 
 
 
 
 
 787	__sk_psock_purge_ingress_msg(psock);
 788}
 789
 790static void sk_psock_link_destroy(struct sk_psock *psock)
 791{
 792	struct sk_psock_link *link, *tmp;
 793
 794	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 795		list_del(&link->list);
 796		sk_psock_free_link(link);
 797	}
 798}
 799
 800void sk_psock_stop(struct sk_psock *psock)
 801{
 802	spin_lock_bh(&psock->ingress_lock);
 803	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 804	sk_psock_cork_free(psock);
 
 805	spin_unlock_bh(&psock->ingress_lock);
 806}
 807
 808static void sk_psock_done_strp(struct sk_psock *psock);
 809
 810static void sk_psock_destroy(struct work_struct *work)
 811{
 812	struct sk_psock *psock = container_of(to_rcu_work(work),
 813					      struct sk_psock, rwork);
 814	/* No sk_callback_lock since already detached. */
 815
 816	sk_psock_done_strp(psock);
 817
 818	cancel_delayed_work_sync(&psock->work);
 819	__sk_psock_zap_ingress(psock);
 820	mutex_destroy(&psock->work_mutex);
 821
 822	psock_progs_drop(&psock->progs);
 823
 824	sk_psock_link_destroy(psock);
 825	sk_psock_cork_free(psock);
 826
 827	if (psock->sk_redir)
 828		sock_put(psock->sk_redir);
 829	if (psock->sk_pair)
 830		sock_put(psock->sk_pair);
 831	sock_put(psock->sk);
 832	kfree(psock);
 833}
 834
 835void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 836{
 837	write_lock_bh(&sk->sk_callback_lock);
 838	sk_psock_restore_proto(sk, psock);
 839	rcu_assign_sk_user_data(sk, NULL);
 840	if (psock->progs.stream_parser)
 841		sk_psock_stop_strp(sk, psock);
 842	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
 843		sk_psock_stop_verdict(sk, psock);
 844	write_unlock_bh(&sk->sk_callback_lock);
 845
 846	sk_psock_stop(psock);
 847
 848	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
 849	queue_rcu_work(system_wq, &psock->rwork);
 850}
 851EXPORT_SYMBOL_GPL(sk_psock_drop);
 852
 853static int sk_psock_map_verd(int verdict, bool redir)
 854{
 855	switch (verdict) {
 856	case SK_PASS:
 857		return redir ? __SK_REDIRECT : __SK_PASS;
 858	case SK_DROP:
 859	default:
 860		break;
 861	}
 862
 863	return __SK_DROP;
 864}
 865
 866int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
 867			 struct sk_msg *msg)
 868{
 869	struct bpf_prog *prog;
 870	int ret;
 871
 872	rcu_read_lock();
 873	prog = READ_ONCE(psock->progs.msg_parser);
 874	if (unlikely(!prog)) {
 875		ret = __SK_PASS;
 876		goto out;
 877	}
 878
 879	sk_msg_compute_data_pointers(msg);
 880	msg->sk = sk;
 881	ret = bpf_prog_run_pin_on_cpu(prog, msg);
 882	ret = sk_psock_map_verd(ret, msg->sk_redir);
 883	psock->apply_bytes = msg->apply_bytes;
 884	if (ret == __SK_REDIRECT) {
 885		if (psock->sk_redir) {
 886			sock_put(psock->sk_redir);
 887			psock->sk_redir = NULL;
 888		}
 889		if (!msg->sk_redir) {
 890			ret = __SK_DROP;
 891			goto out;
 892		}
 893		psock->redir_ingress = sk_msg_to_ingress(msg);
 894		psock->sk_redir = msg->sk_redir;
 895		sock_hold(psock->sk_redir);
 896	}
 897out:
 898	rcu_read_unlock();
 899	return ret;
 900}
 901EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
 902
 903static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
 904{
 905	struct sk_psock *psock_other;
 906	struct sock *sk_other;
 907
 908	sk_other = skb_bpf_redirect_fetch(skb);
 909	/* This error is a buggy BPF program, it returned a redirect
 910	 * return code, but then didn't set a redirect interface.
 911	 */
 912	if (unlikely(!sk_other)) {
 913		skb_bpf_redirect_clear(skb);
 914		sock_drop(from->sk, skb);
 915		return -EIO;
 916	}
 917	psock_other = sk_psock(sk_other);
 918	/* This error indicates the socket is being torn down or had another
 919	 * error that caused the pipe to break. We can't send a packet on
 920	 * a socket that is in this state so we drop the skb.
 921	 */
 922	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
 923		skb_bpf_redirect_clear(skb);
 924		sock_drop(from->sk, skb);
 925		return -EIO;
 926	}
 927	spin_lock_bh(&psock_other->ingress_lock);
 928	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
 929		spin_unlock_bh(&psock_other->ingress_lock);
 930		skb_bpf_redirect_clear(skb);
 931		sock_drop(from->sk, skb);
 932		return -EIO;
 933	}
 934
 935	skb_queue_tail(&psock_other->ingress_skb, skb);
 936	schedule_delayed_work(&psock_other->work, 0);
 937	spin_unlock_bh(&psock_other->ingress_lock);
 938	return 0;
 939}
 940
 941static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
 942				       struct sk_psock *from, int verdict)
 943{
 944	switch (verdict) {
 945	case __SK_REDIRECT:
 946		sk_psock_skb_redirect(from, skb);
 947		break;
 948	case __SK_PASS:
 949	case __SK_DROP:
 950	default:
 951		break;
 952	}
 953}
 954
 955int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
 956{
 957	struct bpf_prog *prog;
 958	int ret = __SK_PASS;
 959
 960	rcu_read_lock();
 961	prog = READ_ONCE(psock->progs.stream_verdict);
 962	if (likely(prog)) {
 963		skb->sk = psock->sk;
 964		skb_dst_drop(skb);
 965		skb_bpf_redirect_clear(skb);
 966		ret = bpf_prog_run_pin_on_cpu(prog, skb);
 967		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
 968		skb->sk = NULL;
 969	}
 970	sk_psock_tls_verdict_apply(skb, psock, ret);
 971	rcu_read_unlock();
 972	return ret;
 973}
 974EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
 975
 976static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
 977				  int verdict)
 978{
 979	struct sock *sk_other;
 980	int err = 0;
 981	u32 len, off;
 982
 983	switch (verdict) {
 984	case __SK_PASS:
 985		err = -EIO;
 986		sk_other = psock->sk;
 987		if (sock_flag(sk_other, SOCK_DEAD) ||
 988		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
 
 989			goto out_free;
 
 990
 991		skb_bpf_set_ingress(skb);
 992
 993		/* If the queue is empty then we can submit directly
 994		 * into the msg queue. If its not empty we have to
 995		 * queue work otherwise we may get OOO data. Otherwise,
 996		 * if sk_psock_skb_ingress errors will be handled by
 997		 * retrying later from workqueue.
 998		 */
 999		if (skb_queue_empty(&psock->ingress_skb)) {
1000			len = skb->len;
1001			off = 0;
1002			if (skb_bpf_strparser(skb)) {
1003				struct strp_msg *stm = strp_msg(skb);
1004
1005				off = stm->offset;
1006				len = stm->full_len;
1007			}
1008			err = sk_psock_skb_ingress_self(psock, skb, off, len);
1009		}
1010		if (err < 0) {
1011			spin_lock_bh(&psock->ingress_lock);
1012			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1013				skb_queue_tail(&psock->ingress_skb, skb);
1014				schedule_delayed_work(&psock->work, 0);
1015				err = 0;
1016			}
1017			spin_unlock_bh(&psock->ingress_lock);
1018			if (err < 0)
 
1019				goto out_free;
 
1020		}
1021		break;
1022	case __SK_REDIRECT:
1023		tcp_eat_skb(psock->sk, skb);
1024		err = sk_psock_skb_redirect(psock, skb);
1025		break;
1026	case __SK_DROP:
1027	default:
1028out_free:
1029		skb_bpf_redirect_clear(skb);
1030		tcp_eat_skb(psock->sk, skb);
1031		sock_drop(psock->sk, skb);
1032	}
1033
1034	return err;
1035}
1036
1037static void sk_psock_write_space(struct sock *sk)
1038{
1039	struct sk_psock *psock;
1040	void (*write_space)(struct sock *sk) = NULL;
1041
1042	rcu_read_lock();
1043	psock = sk_psock(sk);
1044	if (likely(psock)) {
1045		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1046			schedule_delayed_work(&psock->work, 0);
1047		write_space = psock->saved_write_space;
1048	}
1049	rcu_read_unlock();
1050	if (write_space)
1051		write_space(sk);
1052}
1053
1054#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1055static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1056{
1057	struct sk_psock *psock;
1058	struct bpf_prog *prog;
1059	int ret = __SK_DROP;
1060	struct sock *sk;
1061
1062	rcu_read_lock();
1063	sk = strp->sk;
1064	psock = sk_psock(sk);
1065	if (unlikely(!psock)) {
1066		sock_drop(sk, skb);
1067		goto out;
1068	}
1069	prog = READ_ONCE(psock->progs.stream_verdict);
1070	if (likely(prog)) {
1071		skb->sk = sk;
1072		skb_dst_drop(skb);
1073		skb_bpf_redirect_clear(skb);
1074		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1075		skb_bpf_set_strparser(skb);
 
1076		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1077		skb->sk = NULL;
1078	}
1079	sk_psock_verdict_apply(psock, skb, ret);
1080out:
1081	rcu_read_unlock();
1082}
1083
1084static int sk_psock_strp_read_done(struct strparser *strp, int err)
1085{
1086	return err;
1087}
1088
1089static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1090{
1091	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1092	struct bpf_prog *prog;
1093	int ret = skb->len;
1094
1095	rcu_read_lock();
1096	prog = READ_ONCE(psock->progs.stream_parser);
1097	if (likely(prog)) {
1098		skb->sk = psock->sk;
1099		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1100		skb->sk = NULL;
1101	}
1102	rcu_read_unlock();
1103	return ret;
1104}
1105
1106/* Called with socket lock held. */
1107static void sk_psock_strp_data_ready(struct sock *sk)
1108{
1109	struct sk_psock *psock;
1110
1111	trace_sk_data_ready(sk);
1112
1113	rcu_read_lock();
1114	psock = sk_psock(sk);
1115	if (likely(psock)) {
1116		if (tls_sw_has_ctx_rx(sk)) {
1117			psock->saved_data_ready(sk);
1118		} else {
1119			write_lock_bh(&sk->sk_callback_lock);
1120			strp_data_ready(&psock->strp);
1121			write_unlock_bh(&sk->sk_callback_lock);
1122		}
1123	}
1124	rcu_read_unlock();
1125}
1126
1127int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1128{
1129	int ret;
1130
1131	static const struct strp_callbacks cb = {
1132		.rcv_msg	= sk_psock_strp_read,
1133		.read_sock_done	= sk_psock_strp_read_done,
1134		.parse_msg	= sk_psock_strp_parse,
1135	};
1136
1137	ret = strp_init(&psock->strp, sk, &cb);
1138	if (!ret)
1139		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1140
1141	return ret;
1142}
1143
1144void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1145{
1146	if (psock->saved_data_ready)
1147		return;
1148
1149	psock->saved_data_ready = sk->sk_data_ready;
1150	sk->sk_data_ready = sk_psock_strp_data_ready;
1151	sk->sk_write_space = sk_psock_write_space;
1152}
1153
1154void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1155{
1156	psock_set_prog(&psock->progs.stream_parser, NULL);
1157
1158	if (!psock->saved_data_ready)
1159		return;
1160
1161	sk->sk_data_ready = psock->saved_data_ready;
1162	psock->saved_data_ready = NULL;
1163	strp_stop(&psock->strp);
1164}
1165
1166static void sk_psock_done_strp(struct sk_psock *psock)
1167{
1168	/* Parser has been stopped */
1169	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1170		strp_done(&psock->strp);
1171}
1172#else
1173static void sk_psock_done_strp(struct sk_psock *psock)
1174{
1175}
1176#endif /* CONFIG_BPF_STREAM_PARSER */
1177
1178static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1179{
1180	struct sk_psock *psock;
1181	struct bpf_prog *prog;
1182	int ret = __SK_DROP;
1183	int len = skb->len;
1184
 
 
1185	rcu_read_lock();
1186	psock = sk_psock(sk);
1187	if (unlikely(!psock)) {
1188		len = 0;
1189		tcp_eat_skb(sk, skb);
1190		sock_drop(sk, skb);
1191		goto out;
1192	}
1193	prog = READ_ONCE(psock->progs.stream_verdict);
1194	if (!prog)
1195		prog = READ_ONCE(psock->progs.skb_verdict);
1196	if (likely(prog)) {
1197		skb_dst_drop(skb);
1198		skb_bpf_redirect_clear(skb);
1199		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1200		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1201	}
1202	ret = sk_psock_verdict_apply(psock, skb, ret);
1203	if (ret < 0)
1204		len = ret;
1205out:
1206	rcu_read_unlock();
1207	return len;
1208}
1209
1210static void sk_psock_verdict_data_ready(struct sock *sk)
1211{
1212	struct socket *sock = sk->sk_socket;
1213	const struct proto_ops *ops;
1214	int copied;
1215
1216	trace_sk_data_ready(sk);
1217
1218	if (unlikely(!sock))
1219		return;
1220	ops = READ_ONCE(sock->ops);
1221	if (!ops || !ops->read_skb)
1222		return;
1223	copied = ops->read_skb(sk, sk_psock_verdict_recv);
1224	if (copied >= 0) {
1225		struct sk_psock *psock;
1226
1227		rcu_read_lock();
1228		psock = sk_psock(sk);
1229		if (psock) {
1230			read_lock_bh(&sk->sk_callback_lock);
1231			sk_psock_data_ready(sk, psock);
1232			read_unlock_bh(&sk->sk_callback_lock);
1233		}
1234		rcu_read_unlock();
1235	}
1236}
1237
1238void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1239{
1240	if (psock->saved_data_ready)
1241		return;
1242
1243	psock->saved_data_ready = sk->sk_data_ready;
1244	sk->sk_data_ready = sk_psock_verdict_data_ready;
1245	sk->sk_write_space = sk_psock_write_space;
1246}
1247
1248void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1249{
1250	psock_set_prog(&psock->progs.stream_verdict, NULL);
1251	psock_set_prog(&psock->progs.skb_verdict, NULL);
1252
1253	if (!psock->saved_data_ready)
1254		return;
1255
1256	sk->sk_data_ready = psock->saved_data_ready;
1257	psock->saved_data_ready = NULL;
1258}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/skbuff.h>
   6#include <linux/scatterlist.h>
   7
   8#include <net/sock.h>
   9#include <net/tcp.h>
  10#include <net/tls.h>
 
  11
  12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
  13{
  14	if (msg->sg.end > msg->sg.start &&
  15	    elem_first_coalesce < msg->sg.end)
  16		return true;
  17
  18	if (msg->sg.end < msg->sg.start &&
  19	    (elem_first_coalesce > msg->sg.start ||
  20	     elem_first_coalesce < msg->sg.end))
  21		return true;
  22
  23	return false;
  24}
  25
  26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
  27		 int elem_first_coalesce)
  28{
  29	struct page_frag *pfrag = sk_page_frag(sk);
  30	u32 osize = msg->sg.size;
  31	int ret = 0;
  32
  33	len -= msg->sg.size;
  34	while (len > 0) {
  35		struct scatterlist *sge;
  36		u32 orig_offset;
  37		int use, i;
  38
  39		if (!sk_page_frag_refill(sk, pfrag)) {
  40			ret = -ENOMEM;
  41			goto msg_trim;
  42		}
  43
  44		orig_offset = pfrag->offset;
  45		use = min_t(int, len, pfrag->size - orig_offset);
  46		if (!sk_wmem_schedule(sk, use)) {
  47			ret = -ENOMEM;
  48			goto msg_trim;
  49		}
  50
  51		i = msg->sg.end;
  52		sk_msg_iter_var_prev(i);
  53		sge = &msg->sg.data[i];
  54
  55		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
  56		    sg_page(sge) == pfrag->page &&
  57		    sge->offset + sge->length == orig_offset) {
  58			sge->length += use;
  59		} else {
  60			if (sk_msg_full(msg)) {
  61				ret = -ENOSPC;
  62				break;
  63			}
  64
  65			sge = &msg->sg.data[msg->sg.end];
  66			sg_unmark_end(sge);
  67			sg_set_page(sge, pfrag->page, use, orig_offset);
  68			get_page(pfrag->page);
  69			sk_msg_iter_next(msg, end);
  70		}
  71
  72		sk_mem_charge(sk, use);
  73		msg->sg.size += use;
  74		pfrag->offset += use;
  75		len -= use;
  76	}
  77
  78	return ret;
  79
  80msg_trim:
  81	sk_msg_trim(sk, msg, osize);
  82	return ret;
  83}
  84EXPORT_SYMBOL_GPL(sk_msg_alloc);
  85
  86int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
  87		 u32 off, u32 len)
  88{
  89	int i = src->sg.start;
  90	struct scatterlist *sge = sk_msg_elem(src, i);
  91	struct scatterlist *sgd = NULL;
  92	u32 sge_len, sge_off;
  93
  94	while (off) {
  95		if (sge->length > off)
  96			break;
  97		off -= sge->length;
  98		sk_msg_iter_var_next(i);
  99		if (i == src->sg.end && off)
 100			return -ENOSPC;
 101		sge = sk_msg_elem(src, i);
 102	}
 103
 104	while (len) {
 105		sge_len = sge->length - off;
 106		if (sge_len > len)
 107			sge_len = len;
 108
 109		if (dst->sg.end)
 110			sgd = sk_msg_elem(dst, dst->sg.end - 1);
 111
 112		if (sgd &&
 113		    (sg_page(sge) == sg_page(sgd)) &&
 114		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
 115			sgd->length += sge_len;
 116			dst->sg.size += sge_len;
 117		} else if (!sk_msg_full(dst)) {
 118			sge_off = sge->offset + off;
 119			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
 120		} else {
 121			return -ENOSPC;
 122		}
 123
 124		off = 0;
 125		len -= sge_len;
 126		sk_mem_charge(sk, sge_len);
 127		sk_msg_iter_var_next(i);
 128		if (i == src->sg.end && len)
 129			return -ENOSPC;
 130		sge = sk_msg_elem(src, i);
 131	}
 132
 133	return 0;
 134}
 135EXPORT_SYMBOL_GPL(sk_msg_clone);
 136
 137void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 138{
 139	int i = msg->sg.start;
 140
 141	do {
 142		struct scatterlist *sge = sk_msg_elem(msg, i);
 143
 144		if (bytes < sge->length) {
 145			sge->length -= bytes;
 146			sge->offset += bytes;
 147			sk_mem_uncharge(sk, bytes);
 148			break;
 149		}
 150
 151		sk_mem_uncharge(sk, sge->length);
 152		bytes -= sge->length;
 153		sge->length = 0;
 154		sge->offset = 0;
 155		sk_msg_iter_var_next(i);
 156	} while (bytes && i != msg->sg.end);
 157	msg->sg.start = i;
 158}
 159EXPORT_SYMBOL_GPL(sk_msg_return_zero);
 160
 161void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
 162{
 163	int i = msg->sg.start;
 164
 165	do {
 166		struct scatterlist *sge = &msg->sg.data[i];
 167		int uncharge = (bytes < sge->length) ? bytes : sge->length;
 168
 169		sk_mem_uncharge(sk, uncharge);
 170		bytes -= uncharge;
 171		sk_msg_iter_var_next(i);
 172	} while (i != msg->sg.end);
 173}
 174EXPORT_SYMBOL_GPL(sk_msg_return);
 175
 176static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
 177			    bool charge)
 178{
 179	struct scatterlist *sge = sk_msg_elem(msg, i);
 180	u32 len = sge->length;
 181
 182	/* When the skb owns the memory we free it from consume_skb path. */
 183	if (!msg->skb) {
 184		if (charge)
 185			sk_mem_uncharge(sk, len);
 186		put_page(sg_page(sge));
 187	}
 188	memset(sge, 0, sizeof(*sge));
 189	return len;
 190}
 191
 192static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
 193			 bool charge)
 194{
 195	struct scatterlist *sge = sk_msg_elem(msg, i);
 196	int freed = 0;
 197
 198	while (msg->sg.size) {
 199		msg->sg.size -= sge->length;
 200		freed += sk_msg_free_elem(sk, msg, i, charge);
 201		sk_msg_iter_var_next(i);
 202		sk_msg_check_to_free(msg, i, msg->sg.size);
 203		sge = sk_msg_elem(msg, i);
 204	}
 205	consume_skb(msg->skb);
 206	sk_msg_init(msg);
 207	return freed;
 208}
 209
 210int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
 211{
 212	return __sk_msg_free(sk, msg, msg->sg.start, false);
 213}
 214EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
 215
 216int sk_msg_free(struct sock *sk, struct sk_msg *msg)
 217{
 218	return __sk_msg_free(sk, msg, msg->sg.start, true);
 219}
 220EXPORT_SYMBOL_GPL(sk_msg_free);
 221
 222static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
 223				  u32 bytes, bool charge)
 224{
 225	struct scatterlist *sge;
 226	u32 i = msg->sg.start;
 227
 228	while (bytes) {
 229		sge = sk_msg_elem(msg, i);
 230		if (!sge->length)
 231			break;
 232		if (bytes < sge->length) {
 233			if (charge)
 234				sk_mem_uncharge(sk, bytes);
 235			sge->length -= bytes;
 236			sge->offset += bytes;
 237			msg->sg.size -= bytes;
 238			break;
 239		}
 240
 241		msg->sg.size -= sge->length;
 242		bytes -= sge->length;
 243		sk_msg_free_elem(sk, msg, i, charge);
 244		sk_msg_iter_var_next(i);
 245		sk_msg_check_to_free(msg, i, bytes);
 246	}
 247	msg->sg.start = i;
 248}
 249
 250void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
 251{
 252	__sk_msg_free_partial(sk, msg, bytes, true);
 253}
 254EXPORT_SYMBOL_GPL(sk_msg_free_partial);
 255
 256void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
 257				  u32 bytes)
 258{
 259	__sk_msg_free_partial(sk, msg, bytes, false);
 260}
 261
 262void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 263{
 264	int trim = msg->sg.size - len;
 265	u32 i = msg->sg.end;
 266
 267	if (trim <= 0) {
 268		WARN_ON(trim < 0);
 269		return;
 270	}
 271
 272	sk_msg_iter_var_prev(i);
 273	msg->sg.size = len;
 274	while (msg->sg.data[i].length &&
 275	       trim >= msg->sg.data[i].length) {
 276		trim -= msg->sg.data[i].length;
 277		sk_msg_free_elem(sk, msg, i, true);
 278		sk_msg_iter_var_prev(i);
 279		if (!trim)
 280			goto out;
 281	}
 282
 283	msg->sg.data[i].length -= trim;
 284	sk_mem_uncharge(sk, trim);
 285	/* Adjust copybreak if it falls into the trimmed part of last buf */
 286	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
 287		msg->sg.copybreak = msg->sg.data[i].length;
 288out:
 289	sk_msg_iter_var_next(i);
 290	msg->sg.end = i;
 291
 292	/* If we trim data a full sg elem before curr pointer update
 293	 * copybreak and current so that any future copy operations
 294	 * start at new copy location.
 295	 * However trimed data that has not yet been used in a copy op
 296	 * does not require an update.
 297	 */
 298	if (!msg->sg.size) {
 299		msg->sg.curr = msg->sg.start;
 300		msg->sg.copybreak = 0;
 301	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
 302		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
 303		sk_msg_iter_var_prev(i);
 304		msg->sg.curr = i;
 305		msg->sg.copybreak = msg->sg.data[i].length;
 306	}
 307}
 308EXPORT_SYMBOL_GPL(sk_msg_trim);
 309
 310int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 311			      struct sk_msg *msg, u32 bytes)
 312{
 313	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
 314	const int to_max_pages = MAX_MSG_FRAGS;
 315	struct page *pages[MAX_MSG_FRAGS];
 316	ssize_t orig, copied, use, offset;
 317
 318	orig = msg->sg.size;
 319	while (bytes > 0) {
 320		i = 0;
 321		maxpages = to_max_pages - num_elems;
 322		if (maxpages == 0) {
 323			ret = -EFAULT;
 324			goto out;
 325		}
 326
 327		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
 328					    &offset);
 329		if (copied <= 0) {
 330			ret = -EFAULT;
 331			goto out;
 332		}
 333
 334		bytes -= copied;
 335		msg->sg.size += copied;
 336
 337		while (copied) {
 338			use = min_t(int, copied, PAGE_SIZE - offset);
 339			sg_set_page(&msg->sg.data[msg->sg.end],
 340				    pages[i], use, offset);
 341			sg_unmark_end(&msg->sg.data[msg->sg.end]);
 342			sk_mem_charge(sk, use);
 343
 344			offset = 0;
 345			copied -= use;
 346			sk_msg_iter_next(msg, end);
 347			num_elems++;
 348			i++;
 349		}
 350		/* When zerocopy is mixed with sk_msg_*copy* operations we
 351		 * may have a copybreak set in this case clear and prefer
 352		 * zerocopy remainder when possible.
 353		 */
 354		msg->sg.copybreak = 0;
 355		msg->sg.curr = msg->sg.end;
 356	}
 357out:
 358	/* Revert iov_iter updates, msg will need to use 'trim' later if it
 359	 * also needs to be cleared.
 360	 */
 361	if (ret)
 362		iov_iter_revert(from, msg->sg.size - orig);
 363	return ret;
 364}
 365EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
 366
 367int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 368			     struct sk_msg *msg, u32 bytes)
 369{
 370	int ret = -ENOSPC, i = msg->sg.curr;
 371	struct scatterlist *sge;
 372	u32 copy, buf_size;
 373	void *to;
 374
 375	do {
 376		sge = sk_msg_elem(msg, i);
 377		/* This is possible if a trim operation shrunk the buffer */
 378		if (msg->sg.copybreak >= sge->length) {
 379			msg->sg.copybreak = 0;
 380			sk_msg_iter_var_next(i);
 381			if (i == msg->sg.end)
 382				break;
 383			sge = sk_msg_elem(msg, i);
 384		}
 385
 386		buf_size = sge->length - msg->sg.copybreak;
 387		copy = (buf_size > bytes) ? bytes : buf_size;
 388		to = sg_virt(sge) + msg->sg.copybreak;
 389		msg->sg.copybreak += copy;
 390		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
 391			ret = copy_from_iter_nocache(to, copy, from);
 392		else
 393			ret = copy_from_iter(to, copy, from);
 394		if (ret != copy) {
 395			ret = -EFAULT;
 396			goto out;
 397		}
 398		bytes -= copy;
 399		if (!bytes)
 400			break;
 401		msg->sg.copybreak = 0;
 402		sk_msg_iter_var_next(i);
 403	} while (i != msg->sg.end);
 404out:
 405	msg->sg.curr = i;
 406	return ret;
 407}
 408EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
 409
 410/* Receive sk_msg from psock->ingress_msg to @msg. */
 411int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
 412		   int len, int flags)
 413{
 414	struct iov_iter *iter = &msg->msg_iter;
 415	int peek = flags & MSG_PEEK;
 416	struct sk_msg *msg_rx;
 417	int i, copied = 0;
 418
 419	msg_rx = sk_psock_peek_msg(psock);
 420	while (copied != len) {
 421		struct scatterlist *sge;
 422
 423		if (unlikely(!msg_rx))
 424			break;
 425
 426		i = msg_rx->sg.start;
 427		do {
 428			struct page *page;
 429			int copy;
 430
 431			sge = sk_msg_elem(msg_rx, i);
 432			copy = sge->length;
 433			page = sg_page(sge);
 434			if (copied + copy > len)
 435				copy = len - copied;
 436			copy = copy_page_to_iter(page, sge->offset, copy, iter);
 437			if (!copy) {
 438				copied = copied ? copied : -EFAULT;
 439				goto out;
 440			}
 441
 442			copied += copy;
 443			if (likely(!peek)) {
 444				sge->offset += copy;
 445				sge->length -= copy;
 446				if (!msg_rx->skb)
 447					sk_mem_uncharge(sk, copy);
 448				msg_rx->sg.size -= copy;
 449
 450				if (!sge->length) {
 451					sk_msg_iter_var_next(i);
 452					if (!msg_rx->skb)
 453						put_page(page);
 454				}
 455			} else {
 456				/* Lets not optimize peek case if copy_page_to_iter
 457				 * didn't copy the entire length lets just break.
 458				 */
 459				if (copy != sge->length)
 460					goto out;
 461				sk_msg_iter_var_next(i);
 462			}
 463
 464			if (copied == len)
 465				break;
 466		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
 467
 468		if (unlikely(peek)) {
 469			msg_rx = sk_psock_next_msg(psock, msg_rx);
 470			if (!msg_rx)
 471				break;
 472			continue;
 473		}
 474
 475		msg_rx->sg.start = i;
 476		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
 477			msg_rx = sk_psock_dequeue_msg(psock);
 478			kfree_sk_msg(msg_rx);
 479		}
 480		msg_rx = sk_psock_peek_msg(psock);
 481	}
 482out:
 483	if (psock->work_state.skb && copied > 0)
 484		schedule_work(&psock->work);
 485	return copied;
 486}
 487EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
 488
 489bool sk_msg_is_readable(struct sock *sk)
 490{
 491	struct sk_psock *psock;
 492	bool empty = true;
 493
 494	rcu_read_lock();
 495	psock = sk_psock(sk);
 496	if (likely(psock))
 497		empty = list_empty(&psock->ingress_msg);
 498	rcu_read_unlock();
 499	return !empty;
 500}
 501EXPORT_SYMBOL_GPL(sk_msg_is_readable);
 502
 503static struct sk_msg *alloc_sk_msg(gfp_t gfp)
 504{
 505	struct sk_msg *msg;
 506
 507	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
 508	if (unlikely(!msg))
 509		return NULL;
 510	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
 511	return msg;
 512}
 513
 514static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
 515						  struct sk_buff *skb)
 516{
 517	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 518		return NULL;
 519
 520	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 521		return NULL;
 522
 523	return alloc_sk_msg(GFP_KERNEL);
 524}
 525
 526static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
 527					u32 off, u32 len,
 528					struct sk_psock *psock,
 529					struct sock *sk,
 530					struct sk_msg *msg)
 531{
 532	int num_sge, copied;
 533
 534	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
 535	if (num_sge < 0) {
 536		/* skb linearize may fail with ENOMEM, but lets simply try again
 537		 * later if this happens. Under memory pressure we don't want to
 538		 * drop the skb. We need to linearize the skb so that the mapping
 539		 * in skb_to_sgvec can not error.
 540		 */
 541		if (skb_linearize(skb))
 542			return -EAGAIN;
 543
 544		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
 545		if (unlikely(num_sge < 0))
 546			return num_sge;
 547	}
 548
 549	copied = len;
 550	msg->sg.start = 0;
 551	msg->sg.size = copied;
 552	msg->sg.end = num_sge;
 553	msg->skb = skb;
 554
 555	sk_psock_queue_msg(psock, msg);
 556	sk_psock_data_ready(sk, psock);
 557	return copied;
 558}
 559
 560static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 561				     u32 off, u32 len);
 562
 563static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
 564				u32 off, u32 len)
 565{
 566	struct sock *sk = psock->sk;
 567	struct sk_msg *msg;
 568	int err;
 569
 570	/* If we are receiving on the same sock skb->sk is already assigned,
 571	 * skip memory accounting and owner transition seeing it already set
 572	 * correctly.
 573	 */
 574	if (unlikely(skb->sk == sk))
 575		return sk_psock_skb_ingress_self(psock, skb, off, len);
 576	msg = sk_psock_create_ingress_msg(sk, skb);
 577	if (!msg)
 578		return -EAGAIN;
 579
 580	/* This will transition ownership of the data from the socket where
 581	 * the BPF program was run initiating the redirect to the socket
 582	 * we will eventually receive this data on. The data will be released
 583	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
 584	 * into user buffers.
 585	 */
 586	skb_set_owner_r(skb, sk);
 587	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 588	if (err < 0)
 589		kfree(msg);
 590	return err;
 591}
 592
 593/* Puts an skb on the ingress queue of the socket already assigned to the
 594 * skb. In this case we do not need to check memory limits or skb_set_owner_r
 595 * because the skb is already accounted for here.
 596 */
 597static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 598				     u32 off, u32 len)
 599{
 600	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
 601	struct sock *sk = psock->sk;
 602	int err;
 603
 604	if (unlikely(!msg))
 605		return -EAGAIN;
 606	skb_set_owner_r(skb, sk);
 607	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 608	if (err < 0)
 609		kfree(msg);
 610	return err;
 611}
 612
 613static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 614			       u32 off, u32 len, bool ingress)
 615{
 
 
 616	if (!ingress) {
 617		if (!sock_writeable(psock->sk))
 618			return -EAGAIN;
 619		return skb_send_sock(psock->sk, skb, off, len);
 620	}
 621	return sk_psock_skb_ingress(psock, skb, off, len);
 
 
 
 
 622}
 623
 624static void sk_psock_skb_state(struct sk_psock *psock,
 625			       struct sk_psock_work_state *state,
 626			       struct sk_buff *skb,
 627			       int len, int off)
 628{
 629	spin_lock_bh(&psock->ingress_lock);
 630	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 631		state->skb = skb;
 632		state->len = len;
 633		state->off = off;
 634	} else {
 635		sock_drop(psock->sk, skb);
 636	}
 637	spin_unlock_bh(&psock->ingress_lock);
 638}
 639
 640static void sk_psock_backlog(struct work_struct *work)
 641{
 642	struct sk_psock *psock = container_of(work, struct sk_psock, work);
 
 643	struct sk_psock_work_state *state = &psock->work_state;
 644	struct sk_buff *skb = NULL;
 
 645	bool ingress;
 646	u32 len, off;
 647	int ret;
 648
 649	mutex_lock(&psock->work_mutex);
 650	if (unlikely(state->skb)) {
 651		spin_lock_bh(&psock->ingress_lock);
 652		skb = state->skb;
 653		len = state->len;
 654		off = state->off;
 655		state->skb = NULL;
 656		spin_unlock_bh(&psock->ingress_lock);
 657	}
 658	if (skb)
 659		goto start;
 660
 661	while ((skb = skb_dequeue(&psock->ingress_skb))) {
 662		len = skb->len;
 663		off = 0;
 664		if (skb_bpf_strparser(skb)) {
 665			struct strp_msg *stm = strp_msg(skb);
 666
 667			off = stm->offset;
 668			len = stm->full_len;
 669		}
 670start:
 671		ingress = skb_bpf_ingress(skb);
 672		skb_bpf_redirect_clear(skb);
 673		do {
 674			ret = -EIO;
 675			if (!sock_flag(psock->sk, SOCK_DEAD))
 676				ret = sk_psock_handle_skb(psock, skb, off,
 677							  len, ingress);
 678			if (ret <= 0) {
 679				if (ret == -EAGAIN) {
 680					sk_psock_skb_state(psock, state, skb,
 681							   len, off);
 
 
 
 
 
 682					goto end;
 683				}
 684				/* Hard errors break pipe and stop xmit. */
 685				sk_psock_report_error(psock, ret ? -ret : EPIPE);
 686				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 687				sock_drop(psock->sk, skb);
 688				goto end;
 689			}
 690			off += ret;
 691			len -= ret;
 692		} while (len);
 693
 694		if (!ingress)
 695			kfree_skb(skb);
 696	}
 697end:
 698	mutex_unlock(&psock->work_mutex);
 699}
 700
 701struct sk_psock *sk_psock_init(struct sock *sk, int node)
 702{
 703	struct sk_psock *psock;
 704	struct proto *prot;
 705
 706	write_lock_bh(&sk->sk_callback_lock);
 707
 708	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
 709		psock = ERR_PTR(-EINVAL);
 710		goto out;
 711	}
 712
 713	if (sk->sk_user_data) {
 714		psock = ERR_PTR(-EBUSY);
 715		goto out;
 716	}
 717
 718	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
 719	if (!psock) {
 720		psock = ERR_PTR(-ENOMEM);
 721		goto out;
 722	}
 723
 724	prot = READ_ONCE(sk->sk_prot);
 725	psock->sk = sk;
 726	psock->eval = __SK_NONE;
 727	psock->sk_proto = prot;
 728	psock->saved_unhash = prot->unhash;
 729	psock->saved_destroy = prot->destroy;
 730	psock->saved_close = prot->close;
 731	psock->saved_write_space = sk->sk_write_space;
 732
 733	INIT_LIST_HEAD(&psock->link);
 734	spin_lock_init(&psock->link_lock);
 735
 736	INIT_WORK(&psock->work, sk_psock_backlog);
 737	mutex_init(&psock->work_mutex);
 738	INIT_LIST_HEAD(&psock->ingress_msg);
 739	spin_lock_init(&psock->ingress_lock);
 740	skb_queue_head_init(&psock->ingress_skb);
 741
 742	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
 743	refcount_set(&psock->refcnt, 1);
 744
 745	__rcu_assign_sk_user_data_with_flags(sk, psock,
 746					     SK_USER_DATA_NOCOPY |
 747					     SK_USER_DATA_PSOCK);
 748	sock_hold(sk);
 749
 750out:
 751	write_unlock_bh(&sk->sk_callback_lock);
 752	return psock;
 753}
 754EXPORT_SYMBOL_GPL(sk_psock_init);
 755
 756struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
 757{
 758	struct sk_psock_link *link;
 759
 760	spin_lock_bh(&psock->link_lock);
 761	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
 762					list);
 763	if (link)
 764		list_del(&link->list);
 765	spin_unlock_bh(&psock->link_lock);
 766	return link;
 767}
 768
 769static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
 770{
 771	struct sk_msg *msg, *tmp;
 772
 773	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
 774		list_del(&msg->list);
 775		sk_msg_free(psock->sk, msg);
 776		kfree(msg);
 777	}
 778}
 779
 780static void __sk_psock_zap_ingress(struct sk_psock *psock)
 781{
 782	struct sk_buff *skb;
 783
 784	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
 785		skb_bpf_redirect_clear(skb);
 786		sock_drop(psock->sk, skb);
 787	}
 788	kfree_skb(psock->work_state.skb);
 789	/* We null the skb here to ensure that calls to sk_psock_backlog
 790	 * do not pick up the free'd skb.
 791	 */
 792	psock->work_state.skb = NULL;
 793	__sk_psock_purge_ingress_msg(psock);
 794}
 795
 796static void sk_psock_link_destroy(struct sk_psock *psock)
 797{
 798	struct sk_psock_link *link, *tmp;
 799
 800	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 801		list_del(&link->list);
 802		sk_psock_free_link(link);
 803	}
 804}
 805
 806void sk_psock_stop(struct sk_psock *psock)
 807{
 808	spin_lock_bh(&psock->ingress_lock);
 809	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 810	sk_psock_cork_free(psock);
 811	__sk_psock_zap_ingress(psock);
 812	spin_unlock_bh(&psock->ingress_lock);
 813}
 814
 815static void sk_psock_done_strp(struct sk_psock *psock);
 816
 817static void sk_psock_destroy(struct work_struct *work)
 818{
 819	struct sk_psock *psock = container_of(to_rcu_work(work),
 820					      struct sk_psock, rwork);
 821	/* No sk_callback_lock since already detached. */
 822
 823	sk_psock_done_strp(psock);
 824
 825	cancel_work_sync(&psock->work);
 
 826	mutex_destroy(&psock->work_mutex);
 827
 828	psock_progs_drop(&psock->progs);
 829
 830	sk_psock_link_destroy(psock);
 831	sk_psock_cork_free(psock);
 832
 833	if (psock->sk_redir)
 834		sock_put(psock->sk_redir);
 
 
 835	sock_put(psock->sk);
 836	kfree(psock);
 837}
 838
 839void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 840{
 841	write_lock_bh(&sk->sk_callback_lock);
 842	sk_psock_restore_proto(sk, psock);
 843	rcu_assign_sk_user_data(sk, NULL);
 844	if (psock->progs.stream_parser)
 845		sk_psock_stop_strp(sk, psock);
 846	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
 847		sk_psock_stop_verdict(sk, psock);
 848	write_unlock_bh(&sk->sk_callback_lock);
 849
 850	sk_psock_stop(psock);
 851
 852	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
 853	queue_rcu_work(system_wq, &psock->rwork);
 854}
 855EXPORT_SYMBOL_GPL(sk_psock_drop);
 856
 857static int sk_psock_map_verd(int verdict, bool redir)
 858{
 859	switch (verdict) {
 860	case SK_PASS:
 861		return redir ? __SK_REDIRECT : __SK_PASS;
 862	case SK_DROP:
 863	default:
 864		break;
 865	}
 866
 867	return __SK_DROP;
 868}
 869
 870int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
 871			 struct sk_msg *msg)
 872{
 873	struct bpf_prog *prog;
 874	int ret;
 875
 876	rcu_read_lock();
 877	prog = READ_ONCE(psock->progs.msg_parser);
 878	if (unlikely(!prog)) {
 879		ret = __SK_PASS;
 880		goto out;
 881	}
 882
 883	sk_msg_compute_data_pointers(msg);
 884	msg->sk = sk;
 885	ret = bpf_prog_run_pin_on_cpu(prog, msg);
 886	ret = sk_psock_map_verd(ret, msg->sk_redir);
 887	psock->apply_bytes = msg->apply_bytes;
 888	if (ret == __SK_REDIRECT) {
 889		if (psock->sk_redir) {
 890			sock_put(psock->sk_redir);
 891			psock->sk_redir = NULL;
 892		}
 893		if (!msg->sk_redir) {
 894			ret = __SK_DROP;
 895			goto out;
 896		}
 897		psock->redir_ingress = sk_msg_to_ingress(msg);
 898		psock->sk_redir = msg->sk_redir;
 899		sock_hold(psock->sk_redir);
 900	}
 901out:
 902	rcu_read_unlock();
 903	return ret;
 904}
 905EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
 906
 907static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
 908{
 909	struct sk_psock *psock_other;
 910	struct sock *sk_other;
 911
 912	sk_other = skb_bpf_redirect_fetch(skb);
 913	/* This error is a buggy BPF program, it returned a redirect
 914	 * return code, but then didn't set a redirect interface.
 915	 */
 916	if (unlikely(!sk_other)) {
 917		skb_bpf_redirect_clear(skb);
 918		sock_drop(from->sk, skb);
 919		return -EIO;
 920	}
 921	psock_other = sk_psock(sk_other);
 922	/* This error indicates the socket is being torn down or had another
 923	 * error that caused the pipe to break. We can't send a packet on
 924	 * a socket that is in this state so we drop the skb.
 925	 */
 926	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
 927		skb_bpf_redirect_clear(skb);
 928		sock_drop(from->sk, skb);
 929		return -EIO;
 930	}
 931	spin_lock_bh(&psock_other->ingress_lock);
 932	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
 933		spin_unlock_bh(&psock_other->ingress_lock);
 934		skb_bpf_redirect_clear(skb);
 935		sock_drop(from->sk, skb);
 936		return -EIO;
 937	}
 938
 939	skb_queue_tail(&psock_other->ingress_skb, skb);
 940	schedule_work(&psock_other->work);
 941	spin_unlock_bh(&psock_other->ingress_lock);
 942	return 0;
 943}
 944
 945static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
 946				       struct sk_psock *from, int verdict)
 947{
 948	switch (verdict) {
 949	case __SK_REDIRECT:
 950		sk_psock_skb_redirect(from, skb);
 951		break;
 952	case __SK_PASS:
 953	case __SK_DROP:
 954	default:
 955		break;
 956	}
 957}
 958
 959int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
 960{
 961	struct bpf_prog *prog;
 962	int ret = __SK_PASS;
 963
 964	rcu_read_lock();
 965	prog = READ_ONCE(psock->progs.stream_verdict);
 966	if (likely(prog)) {
 967		skb->sk = psock->sk;
 968		skb_dst_drop(skb);
 969		skb_bpf_redirect_clear(skb);
 970		ret = bpf_prog_run_pin_on_cpu(prog, skb);
 971		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
 972		skb->sk = NULL;
 973	}
 974	sk_psock_tls_verdict_apply(skb, psock, ret);
 975	rcu_read_unlock();
 976	return ret;
 977}
 978EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
 979
 980static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
 981				  int verdict)
 982{
 983	struct sock *sk_other;
 984	int err = 0;
 985	u32 len, off;
 986
 987	switch (verdict) {
 988	case __SK_PASS:
 989		err = -EIO;
 990		sk_other = psock->sk;
 991		if (sock_flag(sk_other, SOCK_DEAD) ||
 992		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 993			skb_bpf_redirect_clear(skb);
 994			goto out_free;
 995		}
 996
 997		skb_bpf_set_ingress(skb);
 998
 999		/* If the queue is empty then we can submit directly
1000		 * into the msg queue. If its not empty we have to
1001		 * queue work otherwise we may get OOO data. Otherwise,
1002		 * if sk_psock_skb_ingress errors will be handled by
1003		 * retrying later from workqueue.
1004		 */
1005		if (skb_queue_empty(&psock->ingress_skb)) {
1006			len = skb->len;
1007			off = 0;
1008			if (skb_bpf_strparser(skb)) {
1009				struct strp_msg *stm = strp_msg(skb);
1010
1011				off = stm->offset;
1012				len = stm->full_len;
1013			}
1014			err = sk_psock_skb_ingress_self(psock, skb, off, len);
1015		}
1016		if (err < 0) {
1017			spin_lock_bh(&psock->ingress_lock);
1018			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1019				skb_queue_tail(&psock->ingress_skb, skb);
1020				schedule_work(&psock->work);
1021				err = 0;
1022			}
1023			spin_unlock_bh(&psock->ingress_lock);
1024			if (err < 0) {
1025				skb_bpf_redirect_clear(skb);
1026				goto out_free;
1027			}
1028		}
1029		break;
1030	case __SK_REDIRECT:
 
1031		err = sk_psock_skb_redirect(psock, skb);
1032		break;
1033	case __SK_DROP:
1034	default:
1035out_free:
 
 
1036		sock_drop(psock->sk, skb);
1037	}
1038
1039	return err;
1040}
1041
1042static void sk_psock_write_space(struct sock *sk)
1043{
1044	struct sk_psock *psock;
1045	void (*write_space)(struct sock *sk) = NULL;
1046
1047	rcu_read_lock();
1048	psock = sk_psock(sk);
1049	if (likely(psock)) {
1050		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1051			schedule_work(&psock->work);
1052		write_space = psock->saved_write_space;
1053	}
1054	rcu_read_unlock();
1055	if (write_space)
1056		write_space(sk);
1057}
1058
1059#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1060static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1061{
1062	struct sk_psock *psock;
1063	struct bpf_prog *prog;
1064	int ret = __SK_DROP;
1065	struct sock *sk;
1066
1067	rcu_read_lock();
1068	sk = strp->sk;
1069	psock = sk_psock(sk);
1070	if (unlikely(!psock)) {
1071		sock_drop(sk, skb);
1072		goto out;
1073	}
1074	prog = READ_ONCE(psock->progs.stream_verdict);
1075	if (likely(prog)) {
1076		skb->sk = sk;
1077		skb_dst_drop(skb);
1078		skb_bpf_redirect_clear(skb);
1079		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1080		if (ret == SK_PASS)
1081			skb_bpf_set_strparser(skb);
1082		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1083		skb->sk = NULL;
1084	}
1085	sk_psock_verdict_apply(psock, skb, ret);
1086out:
1087	rcu_read_unlock();
1088}
1089
1090static int sk_psock_strp_read_done(struct strparser *strp, int err)
1091{
1092	return err;
1093}
1094
1095static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1096{
1097	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1098	struct bpf_prog *prog;
1099	int ret = skb->len;
1100
1101	rcu_read_lock();
1102	prog = READ_ONCE(psock->progs.stream_parser);
1103	if (likely(prog)) {
1104		skb->sk = psock->sk;
1105		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1106		skb->sk = NULL;
1107	}
1108	rcu_read_unlock();
1109	return ret;
1110}
1111
1112/* Called with socket lock held. */
1113static void sk_psock_strp_data_ready(struct sock *sk)
1114{
1115	struct sk_psock *psock;
1116
 
 
1117	rcu_read_lock();
1118	psock = sk_psock(sk);
1119	if (likely(psock)) {
1120		if (tls_sw_has_ctx_rx(sk)) {
1121			psock->saved_data_ready(sk);
1122		} else {
1123			write_lock_bh(&sk->sk_callback_lock);
1124			strp_data_ready(&psock->strp);
1125			write_unlock_bh(&sk->sk_callback_lock);
1126		}
1127	}
1128	rcu_read_unlock();
1129}
1130
1131int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1132{
 
 
1133	static const struct strp_callbacks cb = {
1134		.rcv_msg	= sk_psock_strp_read,
1135		.read_sock_done	= sk_psock_strp_read_done,
1136		.parse_msg	= sk_psock_strp_parse,
1137	};
1138
1139	return strp_init(&psock->strp, sk, &cb);
 
 
 
 
1140}
1141
1142void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1143{
1144	if (psock->saved_data_ready)
1145		return;
1146
1147	psock->saved_data_ready = sk->sk_data_ready;
1148	sk->sk_data_ready = sk_psock_strp_data_ready;
1149	sk->sk_write_space = sk_psock_write_space;
1150}
1151
1152void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1153{
1154	psock_set_prog(&psock->progs.stream_parser, NULL);
1155
1156	if (!psock->saved_data_ready)
1157		return;
1158
1159	sk->sk_data_ready = psock->saved_data_ready;
1160	psock->saved_data_ready = NULL;
1161	strp_stop(&psock->strp);
1162}
1163
1164static void sk_psock_done_strp(struct sk_psock *psock)
1165{
1166	/* Parser has been stopped */
1167	if (psock->progs.stream_parser)
1168		strp_done(&psock->strp);
1169}
1170#else
1171static void sk_psock_done_strp(struct sk_psock *psock)
1172{
1173}
1174#endif /* CONFIG_BPF_STREAM_PARSER */
1175
1176static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1177{
1178	struct sk_psock *psock;
1179	struct bpf_prog *prog;
1180	int ret = __SK_DROP;
1181	int len = skb->len;
1182
1183	skb_get(skb);
1184
1185	rcu_read_lock();
1186	psock = sk_psock(sk);
1187	if (unlikely(!psock)) {
1188		len = 0;
 
1189		sock_drop(sk, skb);
1190		goto out;
1191	}
1192	prog = READ_ONCE(psock->progs.stream_verdict);
1193	if (!prog)
1194		prog = READ_ONCE(psock->progs.skb_verdict);
1195	if (likely(prog)) {
1196		skb_dst_drop(skb);
1197		skb_bpf_redirect_clear(skb);
1198		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1199		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1200	}
1201	ret = sk_psock_verdict_apply(psock, skb, ret);
1202	if (ret < 0)
1203		len = ret;
1204out:
1205	rcu_read_unlock();
1206	return len;
1207}
1208
1209static void sk_psock_verdict_data_ready(struct sock *sk)
1210{
1211	struct socket *sock = sk->sk_socket;
 
 
 
 
1212
1213	if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
1214		return;
1215	sock->ops->read_skb(sk, sk_psock_verdict_recv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1216}
1217
1218void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1219{
1220	if (psock->saved_data_ready)
1221		return;
1222
1223	psock->saved_data_ready = sk->sk_data_ready;
1224	sk->sk_data_ready = sk_psock_verdict_data_ready;
1225	sk->sk_write_space = sk_psock_write_space;
1226}
1227
1228void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1229{
1230	psock_set_prog(&psock->progs.stream_verdict, NULL);
1231	psock_set_prog(&psock->progs.skb_verdict, NULL);
1232
1233	if (!psock->saved_data_ready)
1234		return;
1235
1236	sk->sk_data_ready = psock->saved_data_ready;
1237	psock->saved_data_ready = NULL;
1238}