Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/skbuff.h>
   6#include <linux/scatterlist.h>
   7
   8#include <net/sock.h>
   9#include <net/tcp.h>
  10#include <net/tls.h>
 
  11
  12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
  13{
  14	if (msg->sg.end > msg->sg.start &&
  15	    elem_first_coalesce < msg->sg.end)
  16		return true;
  17
  18	if (msg->sg.end < msg->sg.start &&
  19	    (elem_first_coalesce > msg->sg.start ||
  20	     elem_first_coalesce < msg->sg.end))
  21		return true;
  22
  23	return false;
  24}
  25
  26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
  27		 int elem_first_coalesce)
  28{
  29	struct page_frag *pfrag = sk_page_frag(sk);
 
  30	int ret = 0;
  31
  32	len -= msg->sg.size;
  33	while (len > 0) {
  34		struct scatterlist *sge;
  35		u32 orig_offset;
  36		int use, i;
  37
  38		if (!sk_page_frag_refill(sk, pfrag))
  39			return -ENOMEM;
 
 
  40
  41		orig_offset = pfrag->offset;
  42		use = min_t(int, len, pfrag->size - orig_offset);
  43		if (!sk_wmem_schedule(sk, use))
  44			return -ENOMEM;
 
 
  45
  46		i = msg->sg.end;
  47		sk_msg_iter_var_prev(i);
  48		sge = &msg->sg.data[i];
  49
  50		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
  51		    sg_page(sge) == pfrag->page &&
  52		    sge->offset + sge->length == orig_offset) {
  53			sge->length += use;
  54		} else {
  55			if (sk_msg_full(msg)) {
  56				ret = -ENOSPC;
  57				break;
  58			}
  59
  60			sge = &msg->sg.data[msg->sg.end];
  61			sg_unmark_end(sge);
  62			sg_set_page(sge, pfrag->page, use, orig_offset);
  63			get_page(pfrag->page);
  64			sk_msg_iter_next(msg, end);
  65		}
  66
  67		sk_mem_charge(sk, use);
  68		msg->sg.size += use;
  69		pfrag->offset += use;
  70		len -= use;
  71	}
  72
  73	return ret;
 
 
 
 
  74}
  75EXPORT_SYMBOL_GPL(sk_msg_alloc);
  76
  77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
  78		 u32 off, u32 len)
  79{
  80	int i = src->sg.start;
  81	struct scatterlist *sge = sk_msg_elem(src, i);
  82	struct scatterlist *sgd = NULL;
  83	u32 sge_len, sge_off;
  84
  85	while (off) {
  86		if (sge->length > off)
  87			break;
  88		off -= sge->length;
  89		sk_msg_iter_var_next(i);
  90		if (i == src->sg.end && off)
  91			return -ENOSPC;
  92		sge = sk_msg_elem(src, i);
  93	}
  94
  95	while (len) {
  96		sge_len = sge->length - off;
  97		if (sge_len > len)
  98			sge_len = len;
  99
 100		if (dst->sg.end)
 101			sgd = sk_msg_elem(dst, dst->sg.end - 1);
 102
 103		if (sgd &&
 104		    (sg_page(sge) == sg_page(sgd)) &&
 105		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
 106			sgd->length += sge_len;
 107			dst->sg.size += sge_len;
 108		} else if (!sk_msg_full(dst)) {
 109			sge_off = sge->offset + off;
 110			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
 111		} else {
 112			return -ENOSPC;
 113		}
 114
 115		off = 0;
 116		len -= sge_len;
 117		sk_mem_charge(sk, sge_len);
 118		sk_msg_iter_var_next(i);
 119		if (i == src->sg.end && len)
 120			return -ENOSPC;
 121		sge = sk_msg_elem(src, i);
 122	}
 123
 124	return 0;
 125}
 126EXPORT_SYMBOL_GPL(sk_msg_clone);
 127
 128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 129{
 130	int i = msg->sg.start;
 131
 132	do {
 133		struct scatterlist *sge = sk_msg_elem(msg, i);
 134
 135		if (bytes < sge->length) {
 136			sge->length -= bytes;
 137			sge->offset += bytes;
 138			sk_mem_uncharge(sk, bytes);
 139			break;
 140		}
 141
 142		sk_mem_uncharge(sk, sge->length);
 143		bytes -= sge->length;
 144		sge->length = 0;
 145		sge->offset = 0;
 146		sk_msg_iter_var_next(i);
 147	} while (bytes && i != msg->sg.end);
 148	msg->sg.start = i;
 149}
 150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
 151
 152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
 153{
 154	int i = msg->sg.start;
 155
 156	do {
 157		struct scatterlist *sge = &msg->sg.data[i];
 158		int uncharge = (bytes < sge->length) ? bytes : sge->length;
 159
 160		sk_mem_uncharge(sk, uncharge);
 161		bytes -= uncharge;
 162		sk_msg_iter_var_next(i);
 163	} while (i != msg->sg.end);
 164}
 165EXPORT_SYMBOL_GPL(sk_msg_return);
 166
 167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
 168			    bool charge)
 169{
 170	struct scatterlist *sge = sk_msg_elem(msg, i);
 171	u32 len = sge->length;
 172
 173	/* When the skb owns the memory we free it from consume_skb path. */
 174	if (!msg->skb) {
 175		if (charge)
 176			sk_mem_uncharge(sk, len);
 177		put_page(sg_page(sge));
 178	}
 179	memset(sge, 0, sizeof(*sge));
 180	return len;
 181}
 182
 183static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
 184			 bool charge)
 185{
 186	struct scatterlist *sge = sk_msg_elem(msg, i);
 187	int freed = 0;
 188
 189	while (msg->sg.size) {
 190		msg->sg.size -= sge->length;
 191		freed += sk_msg_free_elem(sk, msg, i, charge);
 192		sk_msg_iter_var_next(i);
 193		sk_msg_check_to_free(msg, i, msg->sg.size);
 194		sge = sk_msg_elem(msg, i);
 195	}
 196	consume_skb(msg->skb);
 197	sk_msg_init(msg);
 198	return freed;
 199}
 200
 201int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
 202{
 203	return __sk_msg_free(sk, msg, msg->sg.start, false);
 204}
 205EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
 206
 207int sk_msg_free(struct sock *sk, struct sk_msg *msg)
 208{
 209	return __sk_msg_free(sk, msg, msg->sg.start, true);
 210}
 211EXPORT_SYMBOL_GPL(sk_msg_free);
 212
 213static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
 214				  u32 bytes, bool charge)
 215{
 216	struct scatterlist *sge;
 217	u32 i = msg->sg.start;
 218
 219	while (bytes) {
 220		sge = sk_msg_elem(msg, i);
 221		if (!sge->length)
 222			break;
 223		if (bytes < sge->length) {
 224			if (charge)
 225				sk_mem_uncharge(sk, bytes);
 226			sge->length -= bytes;
 227			sge->offset += bytes;
 228			msg->sg.size -= bytes;
 229			break;
 230		}
 231
 232		msg->sg.size -= sge->length;
 233		bytes -= sge->length;
 234		sk_msg_free_elem(sk, msg, i, charge);
 235		sk_msg_iter_var_next(i);
 236		sk_msg_check_to_free(msg, i, bytes);
 237	}
 238	msg->sg.start = i;
 239}
 240
 241void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
 242{
 243	__sk_msg_free_partial(sk, msg, bytes, true);
 244}
 245EXPORT_SYMBOL_GPL(sk_msg_free_partial);
 246
 247void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
 248				  u32 bytes)
 249{
 250	__sk_msg_free_partial(sk, msg, bytes, false);
 251}
 252
 253void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 254{
 255	int trim = msg->sg.size - len;
 256	u32 i = msg->sg.end;
 257
 258	if (trim <= 0) {
 259		WARN_ON(trim < 0);
 260		return;
 261	}
 262
 263	sk_msg_iter_var_prev(i);
 264	msg->sg.size = len;
 265	while (msg->sg.data[i].length &&
 266	       trim >= msg->sg.data[i].length) {
 267		trim -= msg->sg.data[i].length;
 268		sk_msg_free_elem(sk, msg, i, true);
 269		sk_msg_iter_var_prev(i);
 270		if (!trim)
 271			goto out;
 272	}
 273
 274	msg->sg.data[i].length -= trim;
 275	sk_mem_uncharge(sk, trim);
 276	/* Adjust copybreak if it falls into the trimmed part of last buf */
 277	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
 278		msg->sg.copybreak = msg->sg.data[i].length;
 279out:
 280	sk_msg_iter_var_next(i);
 281	msg->sg.end = i;
 282
 283	/* If we trim data a full sg elem before curr pointer update
 284	 * copybreak and current so that any future copy operations
 285	 * start at new copy location.
 286	 * However trimed data that has not yet been used in a copy op
 287	 * does not require an update.
 288	 */
 289	if (!msg->sg.size) {
 290		msg->sg.curr = msg->sg.start;
 291		msg->sg.copybreak = 0;
 292	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
 293		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
 294		sk_msg_iter_var_prev(i);
 295		msg->sg.curr = i;
 296		msg->sg.copybreak = msg->sg.data[i].length;
 297	}
 298}
 299EXPORT_SYMBOL_GPL(sk_msg_trim);
 300
 301int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 302			      struct sk_msg *msg, u32 bytes)
 303{
 304	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
 305	const int to_max_pages = MAX_MSG_FRAGS;
 306	struct page *pages[MAX_MSG_FRAGS];
 307	ssize_t orig, copied, use, offset;
 308
 309	orig = msg->sg.size;
 310	while (bytes > 0) {
 311		i = 0;
 312		maxpages = to_max_pages - num_elems;
 313		if (maxpages == 0) {
 314			ret = -EFAULT;
 315			goto out;
 316		}
 317
 318		copied = iov_iter_get_pages(from, pages, bytes, maxpages,
 319					    &offset);
 320		if (copied <= 0) {
 321			ret = -EFAULT;
 322			goto out;
 323		}
 324
 325		iov_iter_advance(from, copied);
 326		bytes -= copied;
 327		msg->sg.size += copied;
 328
 329		while (copied) {
 330			use = min_t(int, copied, PAGE_SIZE - offset);
 331			sg_set_page(&msg->sg.data[msg->sg.end],
 332				    pages[i], use, offset);
 333			sg_unmark_end(&msg->sg.data[msg->sg.end]);
 334			sk_mem_charge(sk, use);
 335
 336			offset = 0;
 337			copied -= use;
 338			sk_msg_iter_next(msg, end);
 339			num_elems++;
 340			i++;
 341		}
 342		/* When zerocopy is mixed with sk_msg_*copy* operations we
 343		 * may have a copybreak set in this case clear and prefer
 344		 * zerocopy remainder when possible.
 345		 */
 346		msg->sg.copybreak = 0;
 347		msg->sg.curr = msg->sg.end;
 348	}
 349out:
 350	/* Revert iov_iter updates, msg will need to use 'trim' later if it
 351	 * also needs to be cleared.
 352	 */
 353	if (ret)
 354		iov_iter_revert(from, msg->sg.size - orig);
 355	return ret;
 356}
 357EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
 358
 359int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 360			     struct sk_msg *msg, u32 bytes)
 361{
 362	int ret = -ENOSPC, i = msg->sg.curr;
 
 363	struct scatterlist *sge;
 364	u32 copy, buf_size;
 365	void *to;
 366
 367	do {
 368		sge = sk_msg_elem(msg, i);
 369		/* This is possible if a trim operation shrunk the buffer */
 370		if (msg->sg.copybreak >= sge->length) {
 371			msg->sg.copybreak = 0;
 372			sk_msg_iter_var_next(i);
 373			if (i == msg->sg.end)
 374				break;
 375			sge = sk_msg_elem(msg, i);
 376		}
 377
 378		buf_size = sge->length - msg->sg.copybreak;
 379		copy = (buf_size > bytes) ? bytes : buf_size;
 380		to = sg_virt(sge) + msg->sg.copybreak;
 381		msg->sg.copybreak += copy;
 382		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
 383			ret = copy_from_iter_nocache(to, copy, from);
 384		else
 385			ret = copy_from_iter(to, copy, from);
 386		if (ret != copy) {
 387			ret = -EFAULT;
 388			goto out;
 389		}
 390		bytes -= copy;
 
 391		if (!bytes)
 392			break;
 393		msg->sg.copybreak = 0;
 394		sk_msg_iter_var_next(i);
 395	} while (i != msg->sg.end);
 396out:
 397	msg->sg.curr = i;
 398	return ret;
 399}
 400EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
 401
 402/* Receive sk_msg from psock->ingress_msg to @msg. */
 403int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
 404		   int len, int flags)
 405{
 406	struct iov_iter *iter = &msg->msg_iter;
 407	int peek = flags & MSG_PEEK;
 408	struct sk_msg *msg_rx;
 409	int i, copied = 0;
 410
 411	msg_rx = sk_psock_peek_msg(psock);
 412	while (copied != len) {
 413		struct scatterlist *sge;
 414
 415		if (unlikely(!msg_rx))
 416			break;
 417
 418		i = msg_rx->sg.start;
 419		do {
 420			struct page *page;
 421			int copy;
 422
 423			sge = sk_msg_elem(msg_rx, i);
 424			copy = sge->length;
 425			page = sg_page(sge);
 426			if (copied + copy > len)
 427				copy = len - copied;
 428			copy = copy_page_to_iter(page, sge->offset, copy, iter);
 429			if (!copy)
 430				return copied ? copied : -EFAULT;
 
 
 
 431
 432			copied += copy;
 433			if (likely(!peek)) {
 434				sge->offset += copy;
 435				sge->length -= copy;
 436				if (!msg_rx->skb)
 437					sk_mem_uncharge(sk, copy);
 
 
 438				msg_rx->sg.size -= copy;
 439
 440				if (!sge->length) {
 441					sk_msg_iter_var_next(i);
 442					if (!msg_rx->skb)
 443						put_page(page);
 444				}
 445			} else {
 446				/* Lets not optimize peek case if copy_page_to_iter
 447				 * didn't copy the entire length lets just break.
 448				 */
 449				if (copy != sge->length)
 450					return copied;
 451				sk_msg_iter_var_next(i);
 452			}
 453
 454			if (copied == len)
 455				break;
 456		} while (i != msg_rx->sg.end);
 457
 458		if (unlikely(peek)) {
 459			msg_rx = sk_psock_next_msg(psock, msg_rx);
 460			if (!msg_rx)
 461				break;
 462			continue;
 463		}
 464
 465		msg_rx->sg.start = i;
 466		if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
 467			msg_rx = sk_psock_dequeue_msg(psock);
 468			kfree_sk_msg(msg_rx);
 469		}
 470		msg_rx = sk_psock_peek_msg(psock);
 471	}
 472
 473	return copied;
 474}
 475EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
 476
 477static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
 478						  struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 479{
 480	struct sk_msg *msg;
 481
 482	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 
 483		return NULL;
 
 
 
 484
 485	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 
 
 
 486		return NULL;
 487
 488	msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
 489	if (unlikely(!msg))
 490		return NULL;
 491
 492	sk_msg_init(msg);
 493	return msg;
 494}
 495
 496static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
 
 497					struct sk_psock *psock,
 498					struct sock *sk,
 499					struct sk_msg *msg)
 500{
 501	int num_sge, copied;
 502
 503	/* skb linearize may fail with ENOMEM, but lets simply try again
 504	 * later if this happens. Under memory pressure we don't want to
 505	 * drop the skb. We need to linearize the skb so that the mapping
 506	 * in skb_to_sgvec can not error.
 507	 */
 508	if (skb_linearize(skb))
 509		return -EAGAIN;
 510	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
 511	if (unlikely(num_sge < 0))
 512		return num_sge;
 
 
 
 
 513
 514	copied = skb->len;
 515	msg->sg.start = 0;
 516	msg->sg.size = copied;
 517	msg->sg.end = num_sge;
 518	msg->skb = skb;
 519
 520	sk_psock_queue_msg(psock, msg);
 521	sk_psock_data_ready(sk, psock);
 522	return copied;
 523}
 524
 525static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
 
 526
 527static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
 
 528{
 529	struct sock *sk = psock->sk;
 530	struct sk_msg *msg;
 531	int err;
 532
 533	/* If we are receiving on the same sock skb->sk is already assigned,
 534	 * skip memory accounting and owner transition seeing it already set
 535	 * correctly.
 536	 */
 537	if (unlikely(skb->sk == sk))
 538		return sk_psock_skb_ingress_self(psock, skb);
 539	msg = sk_psock_create_ingress_msg(sk, skb);
 540	if (!msg)
 541		return -EAGAIN;
 542
 543	/* This will transition ownership of the data from the socket where
 544	 * the BPF program was run initiating the redirect to the socket
 545	 * we will eventually receive this data on. The data will be released
 546	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
 547	 * into user buffers.
 548	 */
 549	skb_set_owner_r(skb, sk);
 550	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
 551	if (err < 0)
 552		kfree(msg);
 553	return err;
 554}
 555
 556/* Puts an skb on the ingress queue of the socket already assigned to the
 557 * skb. In this case we do not need to check memory limits or skb_set_owner_r
 558 * because the skb is already accounted for here.
 559 */
 560static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
 
 561{
 562	struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
 563	struct sock *sk = psock->sk;
 564	int err;
 565
 566	if (unlikely(!msg))
 567		return -EAGAIN;
 568	sk_msg_init(msg);
 569	skb_set_owner_r(skb, sk);
 570	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
 571	if (err < 0)
 572		kfree(msg);
 573	return err;
 574}
 575
 576static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 577			       u32 off, u32 len, bool ingress)
 578{
 
 
 579	if (!ingress) {
 580		if (!sock_writeable(psock->sk))
 581			return -EAGAIN;
 582		return skb_send_sock(psock->sk, skb, off, len);
 583	}
 584	return sk_psock_skb_ingress(psock, skb);
 
 
 
 
 585}
 586
 587static void sk_psock_skb_state(struct sk_psock *psock,
 588			       struct sk_psock_work_state *state,
 589			       struct sk_buff *skb,
 590			       int len, int off)
 591{
 592	spin_lock_bh(&psock->ingress_lock);
 593	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 594		state->skb = skb;
 595		state->len = len;
 596		state->off = off;
 597	} else {
 598		sock_drop(psock->sk, skb);
 599	}
 600	spin_unlock_bh(&psock->ingress_lock);
 601}
 602
 603static void sk_psock_backlog(struct work_struct *work)
 604{
 605	struct sk_psock *psock = container_of(work, struct sk_psock, work);
 
 606	struct sk_psock_work_state *state = &psock->work_state;
 607	struct sk_buff *skb = NULL;
 
 608	bool ingress;
 609	u32 len, off;
 610	int ret;
 611
 612	mutex_lock(&psock->work_mutex);
 613	if (unlikely(state->skb)) {
 614		spin_lock_bh(&psock->ingress_lock);
 615		skb = state->skb;
 616		len = state->len;
 617		off = state->off;
 618		state->skb = NULL;
 619		spin_unlock_bh(&psock->ingress_lock);
 620	}
 621	if (skb)
 622		goto start;
 623
 624	while ((skb = skb_dequeue(&psock->ingress_skb))) {
 625		len = skb->len;
 626		off = 0;
 627start:
 
 
 
 
 
 628		ingress = skb_bpf_ingress(skb);
 629		skb_bpf_redirect_clear(skb);
 630		do {
 631			ret = -EIO;
 632			if (!sock_flag(psock->sk, SOCK_DEAD))
 633				ret = sk_psock_handle_skb(psock, skb, off,
 634							  len, ingress);
 635			if (ret <= 0) {
 636				if (ret == -EAGAIN) {
 637					sk_psock_skb_state(psock, state, skb,
 638							   len, off);
 
 
 
 
 
 639					goto end;
 640				}
 641				/* Hard errors break pipe and stop xmit. */
 642				sk_psock_report_error(psock, ret ? -ret : EPIPE);
 643				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 644				sock_drop(psock->sk, skb);
 645				goto end;
 646			}
 647			off += ret;
 648			len -= ret;
 649		} while (len);
 650
 651		if (!ingress)
 652			kfree_skb(skb);
 653	}
 654end:
 655	mutex_unlock(&psock->work_mutex);
 656}
 657
 658struct sk_psock *sk_psock_init(struct sock *sk, int node)
 659{
 660	struct sk_psock *psock;
 661	struct proto *prot;
 662
 663	write_lock_bh(&sk->sk_callback_lock);
 664
 
 
 
 
 
 665	if (sk->sk_user_data) {
 666		psock = ERR_PTR(-EBUSY);
 667		goto out;
 668	}
 669
 670	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
 671	if (!psock) {
 672		psock = ERR_PTR(-ENOMEM);
 673		goto out;
 674	}
 675
 676	prot = READ_ONCE(sk->sk_prot);
 677	psock->sk = sk;
 678	psock->eval = __SK_NONE;
 679	psock->sk_proto = prot;
 680	psock->saved_unhash = prot->unhash;
 
 681	psock->saved_close = prot->close;
 682	psock->saved_write_space = sk->sk_write_space;
 683
 684	INIT_LIST_HEAD(&psock->link);
 685	spin_lock_init(&psock->link_lock);
 686
 687	INIT_WORK(&psock->work, sk_psock_backlog);
 688	mutex_init(&psock->work_mutex);
 689	INIT_LIST_HEAD(&psock->ingress_msg);
 690	spin_lock_init(&psock->ingress_lock);
 691	skb_queue_head_init(&psock->ingress_skb);
 692
 693	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
 694	refcount_set(&psock->refcnt, 1);
 695
 696	rcu_assign_sk_user_data_nocopy(sk, psock);
 
 
 697	sock_hold(sk);
 698
 699out:
 700	write_unlock_bh(&sk->sk_callback_lock);
 701	return psock;
 702}
 703EXPORT_SYMBOL_GPL(sk_psock_init);
 704
 705struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
 706{
 707	struct sk_psock_link *link;
 708
 709	spin_lock_bh(&psock->link_lock);
 710	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
 711					list);
 712	if (link)
 713		list_del(&link->list);
 714	spin_unlock_bh(&psock->link_lock);
 715	return link;
 716}
 717
 718static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
 719{
 720	struct sk_msg *msg, *tmp;
 721
 722	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
 723		list_del(&msg->list);
 
 
 724		sk_msg_free(psock->sk, msg);
 725		kfree(msg);
 726	}
 727}
 728
 729static void __sk_psock_zap_ingress(struct sk_psock *psock)
 730{
 731	struct sk_buff *skb;
 732
 733	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
 734		skb_bpf_redirect_clear(skb);
 735		sock_drop(psock->sk, skb);
 736	}
 737	kfree_skb(psock->work_state.skb);
 738	/* We null the skb here to ensure that calls to sk_psock_backlog
 739	 * do not pick up the free'd skb.
 740	 */
 741	psock->work_state.skb = NULL;
 742	__sk_psock_purge_ingress_msg(psock);
 743}
 744
 745static void sk_psock_link_destroy(struct sk_psock *psock)
 746{
 747	struct sk_psock_link *link, *tmp;
 748
 749	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 750		list_del(&link->list);
 751		sk_psock_free_link(link);
 752	}
 753}
 754
 755void sk_psock_stop(struct sk_psock *psock, bool wait)
 756{
 757	spin_lock_bh(&psock->ingress_lock);
 758	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 759	sk_psock_cork_free(psock);
 760	__sk_psock_zap_ingress(psock);
 761	spin_unlock_bh(&psock->ingress_lock);
 762
 763	if (wait)
 764		cancel_work_sync(&psock->work);
 765}
 766
 767static void sk_psock_done_strp(struct sk_psock *psock);
 768
 769static void sk_psock_destroy(struct work_struct *work)
 770{
 771	struct sk_psock *psock = container_of(to_rcu_work(work),
 772					      struct sk_psock, rwork);
 773	/* No sk_callback_lock since already detached. */
 774
 775	sk_psock_done_strp(psock);
 776
 777	cancel_work_sync(&psock->work);
 
 778	mutex_destroy(&psock->work_mutex);
 779
 780	psock_progs_drop(&psock->progs);
 781
 782	sk_psock_link_destroy(psock);
 783	sk_psock_cork_free(psock);
 784
 785	if (psock->sk_redir)
 786		sock_put(psock->sk_redir);
 
 
 787	sock_put(psock->sk);
 788	kfree(psock);
 789}
 790
 791void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 792{
 793	write_lock_bh(&sk->sk_callback_lock);
 794	sk_psock_restore_proto(sk, psock);
 795	rcu_assign_sk_user_data(sk, NULL);
 796	if (psock->progs.stream_parser)
 797		sk_psock_stop_strp(sk, psock);
 798	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
 799		sk_psock_stop_verdict(sk, psock);
 800	write_unlock_bh(&sk->sk_callback_lock);
 801
 802	sk_psock_stop(psock, false);
 803
 804	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
 805	queue_rcu_work(system_wq, &psock->rwork);
 806}
 807EXPORT_SYMBOL_GPL(sk_psock_drop);
 808
 809static int sk_psock_map_verd(int verdict, bool redir)
 810{
 811	switch (verdict) {
 812	case SK_PASS:
 813		return redir ? __SK_REDIRECT : __SK_PASS;
 814	case SK_DROP:
 815	default:
 816		break;
 817	}
 818
 819	return __SK_DROP;
 820}
 821
 822int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
 823			 struct sk_msg *msg)
 824{
 825	struct bpf_prog *prog;
 826	int ret;
 827
 828	rcu_read_lock();
 829	prog = READ_ONCE(psock->progs.msg_parser);
 830	if (unlikely(!prog)) {
 831		ret = __SK_PASS;
 832		goto out;
 833	}
 834
 835	sk_msg_compute_data_pointers(msg);
 836	msg->sk = sk;
 837	ret = bpf_prog_run_pin_on_cpu(prog, msg);
 838	ret = sk_psock_map_verd(ret, msg->sk_redir);
 839	psock->apply_bytes = msg->apply_bytes;
 840	if (ret == __SK_REDIRECT) {
 841		if (psock->sk_redir)
 842			sock_put(psock->sk_redir);
 843		psock->sk_redir = msg->sk_redir;
 844		if (!psock->sk_redir) {
 
 845			ret = __SK_DROP;
 846			goto out;
 847		}
 
 
 848		sock_hold(psock->sk_redir);
 849	}
 850out:
 851	rcu_read_unlock();
 852	return ret;
 853}
 854EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
 855
 856static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
 857{
 858	struct sk_psock *psock_other;
 859	struct sock *sk_other;
 860
 861	sk_other = skb_bpf_redirect_fetch(skb);
 862	/* This error is a buggy BPF program, it returned a redirect
 863	 * return code, but then didn't set a redirect interface.
 864	 */
 865	if (unlikely(!sk_other)) {
 
 866		sock_drop(from->sk, skb);
 867		return -EIO;
 868	}
 869	psock_other = sk_psock(sk_other);
 870	/* This error indicates the socket is being torn down or had another
 871	 * error that caused the pipe to break. We can't send a packet on
 872	 * a socket that is in this state so we drop the skb.
 873	 */
 874	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
 875		skb_bpf_redirect_clear(skb);
 876		sock_drop(from->sk, skb);
 877		return -EIO;
 878	}
 879	spin_lock_bh(&psock_other->ingress_lock);
 880	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
 881		spin_unlock_bh(&psock_other->ingress_lock);
 882		skb_bpf_redirect_clear(skb);
 883		sock_drop(from->sk, skb);
 884		return -EIO;
 885	}
 886
 887	skb_queue_tail(&psock_other->ingress_skb, skb);
 888	schedule_work(&psock_other->work);
 889	spin_unlock_bh(&psock_other->ingress_lock);
 890	return 0;
 891}
 892
 893static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
 894				       struct sk_psock *from, int verdict)
 895{
 896	switch (verdict) {
 897	case __SK_REDIRECT:
 898		sk_psock_skb_redirect(from, skb);
 899		break;
 900	case __SK_PASS:
 901	case __SK_DROP:
 902	default:
 903		break;
 904	}
 905}
 906
 907int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
 908{
 909	struct bpf_prog *prog;
 910	int ret = __SK_PASS;
 911
 912	rcu_read_lock();
 913	prog = READ_ONCE(psock->progs.stream_verdict);
 914	if (likely(prog)) {
 915		skb->sk = psock->sk;
 916		skb_dst_drop(skb);
 917		skb_bpf_redirect_clear(skb);
 918		ret = bpf_prog_run_pin_on_cpu(prog, skb);
 919		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
 920		skb->sk = NULL;
 921	}
 922	sk_psock_tls_verdict_apply(skb, psock, ret);
 923	rcu_read_unlock();
 924	return ret;
 925}
 926EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
 927
 928static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
 929				  int verdict)
 930{
 931	struct sock *sk_other;
 932	int err = 0;
 
 933
 934	switch (verdict) {
 935	case __SK_PASS:
 936		err = -EIO;
 937		sk_other = psock->sk;
 938		if (sock_flag(sk_other, SOCK_DEAD) ||
 939		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 940			goto out_free;
 941		}
 942
 943		skb_bpf_set_ingress(skb);
 944
 945		/* If the queue is empty then we can submit directly
 946		 * into the msg queue. If its not empty we have to
 947		 * queue work otherwise we may get OOO data. Otherwise,
 948		 * if sk_psock_skb_ingress errors will be handled by
 949		 * retrying later from workqueue.
 950		 */
 951		if (skb_queue_empty(&psock->ingress_skb)) {
 952			err = sk_psock_skb_ingress_self(psock, skb);
 
 
 
 
 
 
 
 
 953		}
 954		if (err < 0) {
 955			spin_lock_bh(&psock->ingress_lock);
 956			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 957				skb_queue_tail(&psock->ingress_skb, skb);
 958				schedule_work(&psock->work);
 959				err = 0;
 960			}
 961			spin_unlock_bh(&psock->ingress_lock);
 962			if (err < 0) {
 963				skb_bpf_redirect_clear(skb);
 964				goto out_free;
 965			}
 966		}
 967		break;
 968	case __SK_REDIRECT:
 
 969		err = sk_psock_skb_redirect(psock, skb);
 970		break;
 971	case __SK_DROP:
 972	default:
 973out_free:
 
 
 974		sock_drop(psock->sk, skb);
 975	}
 976
 977	return err;
 978}
 979
 980static void sk_psock_write_space(struct sock *sk)
 981{
 982	struct sk_psock *psock;
 983	void (*write_space)(struct sock *sk) = NULL;
 984
 985	rcu_read_lock();
 986	psock = sk_psock(sk);
 987	if (likely(psock)) {
 988		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
 989			schedule_work(&psock->work);
 990		write_space = psock->saved_write_space;
 991	}
 992	rcu_read_unlock();
 993	if (write_space)
 994		write_space(sk);
 995}
 996
 997#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
 998static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
 999{
1000	struct sk_psock *psock;
1001	struct bpf_prog *prog;
1002	int ret = __SK_DROP;
1003	struct sock *sk;
1004
1005	rcu_read_lock();
1006	sk = strp->sk;
1007	psock = sk_psock(sk);
1008	if (unlikely(!psock)) {
1009		sock_drop(sk, skb);
1010		goto out;
1011	}
1012	prog = READ_ONCE(psock->progs.stream_verdict);
1013	if (likely(prog)) {
1014		skb->sk = sk;
1015		skb_dst_drop(skb);
1016		skb_bpf_redirect_clear(skb);
1017		ret = bpf_prog_run_pin_on_cpu(prog, skb);
 
1018		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1019		skb->sk = NULL;
1020	}
1021	sk_psock_verdict_apply(psock, skb, ret);
1022out:
1023	rcu_read_unlock();
1024}
1025
1026static int sk_psock_strp_read_done(struct strparser *strp, int err)
1027{
1028	return err;
1029}
1030
1031static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1032{
1033	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1034	struct bpf_prog *prog;
1035	int ret = skb->len;
1036
1037	rcu_read_lock();
1038	prog = READ_ONCE(psock->progs.stream_parser);
1039	if (likely(prog)) {
1040		skb->sk = psock->sk;
1041		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1042		skb->sk = NULL;
1043	}
1044	rcu_read_unlock();
1045	return ret;
1046}
1047
1048/* Called with socket lock held. */
1049static void sk_psock_strp_data_ready(struct sock *sk)
1050{
1051	struct sk_psock *psock;
1052
 
 
1053	rcu_read_lock();
1054	psock = sk_psock(sk);
1055	if (likely(psock)) {
1056		if (tls_sw_has_ctx_rx(sk)) {
1057			psock->saved_data_ready(sk);
1058		} else {
1059			write_lock_bh(&sk->sk_callback_lock);
1060			strp_data_ready(&psock->strp);
1061			write_unlock_bh(&sk->sk_callback_lock);
1062		}
1063	}
1064	rcu_read_unlock();
1065}
1066
1067int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1068{
 
 
1069	static const struct strp_callbacks cb = {
1070		.rcv_msg	= sk_psock_strp_read,
1071		.read_sock_done	= sk_psock_strp_read_done,
1072		.parse_msg	= sk_psock_strp_parse,
1073	};
1074
1075	return strp_init(&psock->strp, sk, &cb);
 
 
 
 
1076}
1077
1078void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1079{
1080	if (psock->saved_data_ready)
1081		return;
1082
1083	psock->saved_data_ready = sk->sk_data_ready;
1084	sk->sk_data_ready = sk_psock_strp_data_ready;
1085	sk->sk_write_space = sk_psock_write_space;
1086}
1087
1088void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1089{
 
 
1090	if (!psock->saved_data_ready)
1091		return;
1092
1093	sk->sk_data_ready = psock->saved_data_ready;
1094	psock->saved_data_ready = NULL;
1095	strp_stop(&psock->strp);
1096}
1097
1098static void sk_psock_done_strp(struct sk_psock *psock)
1099{
1100	/* Parser has been stopped */
1101	if (psock->progs.stream_parser)
1102		strp_done(&psock->strp);
1103}
1104#else
1105static void sk_psock_done_strp(struct sk_psock *psock)
1106{
1107}
1108#endif /* CONFIG_BPF_STREAM_PARSER */
1109
1110static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1111				 unsigned int offset, size_t orig_len)
1112{
1113	struct sock *sk = (struct sock *)desc->arg.data;
1114	struct sk_psock *psock;
1115	struct bpf_prog *prog;
1116	int ret = __SK_DROP;
1117	int len = skb->len;
1118
1119	/* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1120	skb = skb_clone(skb, GFP_ATOMIC);
1121	if (!skb) {
1122		desc->error = -ENOMEM;
1123		return 0;
1124	}
1125
1126	rcu_read_lock();
1127	psock = sk_psock(sk);
1128	if (unlikely(!psock)) {
1129		len = 0;
 
1130		sock_drop(sk, skb);
1131		goto out;
1132	}
1133	prog = READ_ONCE(psock->progs.stream_verdict);
1134	if (!prog)
1135		prog = READ_ONCE(psock->progs.skb_verdict);
1136	if (likely(prog)) {
1137		skb->sk = sk;
1138		skb_dst_drop(skb);
1139		skb_bpf_redirect_clear(skb);
1140		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1141		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1142		skb->sk = NULL;
1143	}
1144	if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1145		len = 0;
 
1146out:
1147	rcu_read_unlock();
1148	return len;
1149}
1150
1151static void sk_psock_verdict_data_ready(struct sock *sk)
1152{
1153	struct socket *sock = sk->sk_socket;
1154	read_descriptor_t desc;
1155
1156	if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1157		return;
1158
1159	desc.arg.data = sk;
1160	desc.error = 0;
1161	desc.count = 1;
1162
1163	sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1164}
1165
1166void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1167{
1168	if (psock->saved_data_ready)
1169		return;
1170
1171	psock->saved_data_ready = sk->sk_data_ready;
1172	sk->sk_data_ready = sk_psock_verdict_data_ready;
1173	sk->sk_write_space = sk_psock_write_space;
1174}
1175
1176void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1177{
 
 
 
1178	if (!psock->saved_data_ready)
1179		return;
1180
1181	sk->sk_data_ready = psock->saved_data_ready;
1182	psock->saved_data_ready = NULL;
1183}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/skbuff.h>
   6#include <linux/scatterlist.h>
   7
   8#include <net/sock.h>
   9#include <net/tcp.h>
  10#include <net/tls.h>
  11#include <trace/events/sock.h>
  12
  13static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
  14{
  15	if (msg->sg.end > msg->sg.start &&
  16	    elem_first_coalesce < msg->sg.end)
  17		return true;
  18
  19	if (msg->sg.end < msg->sg.start &&
  20	    (elem_first_coalesce > msg->sg.start ||
  21	     elem_first_coalesce < msg->sg.end))
  22		return true;
  23
  24	return false;
  25}
  26
  27int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
  28		 int elem_first_coalesce)
  29{
  30	struct page_frag *pfrag = sk_page_frag(sk);
  31	u32 osize = msg->sg.size;
  32	int ret = 0;
  33
  34	len -= msg->sg.size;
  35	while (len > 0) {
  36		struct scatterlist *sge;
  37		u32 orig_offset;
  38		int use, i;
  39
  40		if (!sk_page_frag_refill(sk, pfrag)) {
  41			ret = -ENOMEM;
  42			goto msg_trim;
  43		}
  44
  45		orig_offset = pfrag->offset;
  46		use = min_t(int, len, pfrag->size - orig_offset);
  47		if (!sk_wmem_schedule(sk, use)) {
  48			ret = -ENOMEM;
  49			goto msg_trim;
  50		}
  51
  52		i = msg->sg.end;
  53		sk_msg_iter_var_prev(i);
  54		sge = &msg->sg.data[i];
  55
  56		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
  57		    sg_page(sge) == pfrag->page &&
  58		    sge->offset + sge->length == orig_offset) {
  59			sge->length += use;
  60		} else {
  61			if (sk_msg_full(msg)) {
  62				ret = -ENOSPC;
  63				break;
  64			}
  65
  66			sge = &msg->sg.data[msg->sg.end];
  67			sg_unmark_end(sge);
  68			sg_set_page(sge, pfrag->page, use, orig_offset);
  69			get_page(pfrag->page);
  70			sk_msg_iter_next(msg, end);
  71		}
  72
  73		sk_mem_charge(sk, use);
  74		msg->sg.size += use;
  75		pfrag->offset += use;
  76		len -= use;
  77	}
  78
  79	return ret;
  80
  81msg_trim:
  82	sk_msg_trim(sk, msg, osize);
  83	return ret;
  84}
  85EXPORT_SYMBOL_GPL(sk_msg_alloc);
  86
  87int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
  88		 u32 off, u32 len)
  89{
  90	int i = src->sg.start;
  91	struct scatterlist *sge = sk_msg_elem(src, i);
  92	struct scatterlist *sgd = NULL;
  93	u32 sge_len, sge_off;
  94
  95	while (off) {
  96		if (sge->length > off)
  97			break;
  98		off -= sge->length;
  99		sk_msg_iter_var_next(i);
 100		if (i == src->sg.end && off)
 101			return -ENOSPC;
 102		sge = sk_msg_elem(src, i);
 103	}
 104
 105	while (len) {
 106		sge_len = sge->length - off;
 107		if (sge_len > len)
 108			sge_len = len;
 109
 110		if (dst->sg.end)
 111			sgd = sk_msg_elem(dst, dst->sg.end - 1);
 112
 113		if (sgd &&
 114		    (sg_page(sge) == sg_page(sgd)) &&
 115		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
 116			sgd->length += sge_len;
 117			dst->sg.size += sge_len;
 118		} else if (!sk_msg_full(dst)) {
 119			sge_off = sge->offset + off;
 120			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
 121		} else {
 122			return -ENOSPC;
 123		}
 124
 125		off = 0;
 126		len -= sge_len;
 127		sk_mem_charge(sk, sge_len);
 128		sk_msg_iter_var_next(i);
 129		if (i == src->sg.end && len)
 130			return -ENOSPC;
 131		sge = sk_msg_elem(src, i);
 132	}
 133
 134	return 0;
 135}
 136EXPORT_SYMBOL_GPL(sk_msg_clone);
 137
 138void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 139{
 140	int i = msg->sg.start;
 141
 142	do {
 143		struct scatterlist *sge = sk_msg_elem(msg, i);
 144
 145		if (bytes < sge->length) {
 146			sge->length -= bytes;
 147			sge->offset += bytes;
 148			sk_mem_uncharge(sk, bytes);
 149			break;
 150		}
 151
 152		sk_mem_uncharge(sk, sge->length);
 153		bytes -= sge->length;
 154		sge->length = 0;
 155		sge->offset = 0;
 156		sk_msg_iter_var_next(i);
 157	} while (bytes && i != msg->sg.end);
 158	msg->sg.start = i;
 159}
 160EXPORT_SYMBOL_GPL(sk_msg_return_zero);
 161
 162void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
 163{
 164	int i = msg->sg.start;
 165
 166	do {
 167		struct scatterlist *sge = &msg->sg.data[i];
 168		int uncharge = (bytes < sge->length) ? bytes : sge->length;
 169
 170		sk_mem_uncharge(sk, uncharge);
 171		bytes -= uncharge;
 172		sk_msg_iter_var_next(i);
 173	} while (i != msg->sg.end);
 174}
 175EXPORT_SYMBOL_GPL(sk_msg_return);
 176
 177static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
 178			    bool charge)
 179{
 180	struct scatterlist *sge = sk_msg_elem(msg, i);
 181	u32 len = sge->length;
 182
 183	/* When the skb owns the memory we free it from consume_skb path. */
 184	if (!msg->skb) {
 185		if (charge)
 186			sk_mem_uncharge(sk, len);
 187		put_page(sg_page(sge));
 188	}
 189	memset(sge, 0, sizeof(*sge));
 190	return len;
 191}
 192
 193static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
 194			 bool charge)
 195{
 196	struct scatterlist *sge = sk_msg_elem(msg, i);
 197	int freed = 0;
 198
 199	while (msg->sg.size) {
 200		msg->sg.size -= sge->length;
 201		freed += sk_msg_free_elem(sk, msg, i, charge);
 202		sk_msg_iter_var_next(i);
 203		sk_msg_check_to_free(msg, i, msg->sg.size);
 204		sge = sk_msg_elem(msg, i);
 205	}
 206	consume_skb(msg->skb);
 207	sk_msg_init(msg);
 208	return freed;
 209}
 210
 211int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
 212{
 213	return __sk_msg_free(sk, msg, msg->sg.start, false);
 214}
 215EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
 216
 217int sk_msg_free(struct sock *sk, struct sk_msg *msg)
 218{
 219	return __sk_msg_free(sk, msg, msg->sg.start, true);
 220}
 221EXPORT_SYMBOL_GPL(sk_msg_free);
 222
 223static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
 224				  u32 bytes, bool charge)
 225{
 226	struct scatterlist *sge;
 227	u32 i = msg->sg.start;
 228
 229	while (bytes) {
 230		sge = sk_msg_elem(msg, i);
 231		if (!sge->length)
 232			break;
 233		if (bytes < sge->length) {
 234			if (charge)
 235				sk_mem_uncharge(sk, bytes);
 236			sge->length -= bytes;
 237			sge->offset += bytes;
 238			msg->sg.size -= bytes;
 239			break;
 240		}
 241
 242		msg->sg.size -= sge->length;
 243		bytes -= sge->length;
 244		sk_msg_free_elem(sk, msg, i, charge);
 245		sk_msg_iter_var_next(i);
 246		sk_msg_check_to_free(msg, i, bytes);
 247	}
 248	msg->sg.start = i;
 249}
 250
 251void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
 252{
 253	__sk_msg_free_partial(sk, msg, bytes, true);
 254}
 255EXPORT_SYMBOL_GPL(sk_msg_free_partial);
 256
 257void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
 258				  u32 bytes)
 259{
 260	__sk_msg_free_partial(sk, msg, bytes, false);
 261}
 262
 263void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 264{
 265	int trim = msg->sg.size - len;
 266	u32 i = msg->sg.end;
 267
 268	if (trim <= 0) {
 269		WARN_ON(trim < 0);
 270		return;
 271	}
 272
 273	sk_msg_iter_var_prev(i);
 274	msg->sg.size = len;
 275	while (msg->sg.data[i].length &&
 276	       trim >= msg->sg.data[i].length) {
 277		trim -= msg->sg.data[i].length;
 278		sk_msg_free_elem(sk, msg, i, true);
 279		sk_msg_iter_var_prev(i);
 280		if (!trim)
 281			goto out;
 282	}
 283
 284	msg->sg.data[i].length -= trim;
 285	sk_mem_uncharge(sk, trim);
 286	/* Adjust copybreak if it falls into the trimmed part of last buf */
 287	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
 288		msg->sg.copybreak = msg->sg.data[i].length;
 289out:
 290	sk_msg_iter_var_next(i);
 291	msg->sg.end = i;
 292
 293	/* If we trim data a full sg elem before curr pointer update
 294	 * copybreak and current so that any future copy operations
 295	 * start at new copy location.
 296	 * However trimmed data that has not yet been used in a copy op
 297	 * does not require an update.
 298	 */
 299	if (!msg->sg.size) {
 300		msg->sg.curr = msg->sg.start;
 301		msg->sg.copybreak = 0;
 302	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
 303		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
 304		sk_msg_iter_var_prev(i);
 305		msg->sg.curr = i;
 306		msg->sg.copybreak = msg->sg.data[i].length;
 307	}
 308}
 309EXPORT_SYMBOL_GPL(sk_msg_trim);
 310
 311int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 312			      struct sk_msg *msg, u32 bytes)
 313{
 314	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
 315	const int to_max_pages = MAX_MSG_FRAGS;
 316	struct page *pages[MAX_MSG_FRAGS];
 317	ssize_t orig, copied, use, offset;
 318
 319	orig = msg->sg.size;
 320	while (bytes > 0) {
 321		i = 0;
 322		maxpages = to_max_pages - num_elems;
 323		if (maxpages == 0) {
 324			ret = -EFAULT;
 325			goto out;
 326		}
 327
 328		copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
 329					    &offset);
 330		if (copied <= 0) {
 331			ret = -EFAULT;
 332			goto out;
 333		}
 334
 
 335		bytes -= copied;
 336		msg->sg.size += copied;
 337
 338		while (copied) {
 339			use = min_t(int, copied, PAGE_SIZE - offset);
 340			sg_set_page(&msg->sg.data[msg->sg.end],
 341				    pages[i], use, offset);
 342			sg_unmark_end(&msg->sg.data[msg->sg.end]);
 343			sk_mem_charge(sk, use);
 344
 345			offset = 0;
 346			copied -= use;
 347			sk_msg_iter_next(msg, end);
 348			num_elems++;
 349			i++;
 350		}
 351		/* When zerocopy is mixed with sk_msg_*copy* operations we
 352		 * may have a copybreak set in this case clear and prefer
 353		 * zerocopy remainder when possible.
 354		 */
 355		msg->sg.copybreak = 0;
 356		msg->sg.curr = msg->sg.end;
 357	}
 358out:
 359	/* Revert iov_iter updates, msg will need to use 'trim' later if it
 360	 * also needs to be cleared.
 361	 */
 362	if (ret)
 363		iov_iter_revert(from, msg->sg.size - orig);
 364	return ret;
 365}
 366EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
 367
 368int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 369			     struct sk_msg *msg, u32 bytes)
 370{
 371	int ret = -ENOSPC, i = msg->sg.curr;
 372	u32 copy, buf_size, copied = 0;
 373	struct scatterlist *sge;
 
 374	void *to;
 375
 376	do {
 377		sge = sk_msg_elem(msg, i);
 378		/* This is possible if a trim operation shrunk the buffer */
 379		if (msg->sg.copybreak >= sge->length) {
 380			msg->sg.copybreak = 0;
 381			sk_msg_iter_var_next(i);
 382			if (i == msg->sg.end)
 383				break;
 384			sge = sk_msg_elem(msg, i);
 385		}
 386
 387		buf_size = sge->length - msg->sg.copybreak;
 388		copy = (buf_size > bytes) ? bytes : buf_size;
 389		to = sg_virt(sge) + msg->sg.copybreak;
 390		msg->sg.copybreak += copy;
 391		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
 392			ret = copy_from_iter_nocache(to, copy, from);
 393		else
 394			ret = copy_from_iter(to, copy, from);
 395		if (ret != copy) {
 396			ret = -EFAULT;
 397			goto out;
 398		}
 399		bytes -= copy;
 400		copied += copy;
 401		if (!bytes)
 402			break;
 403		msg->sg.copybreak = 0;
 404		sk_msg_iter_var_next(i);
 405	} while (i != msg->sg.end);
 406out:
 407	msg->sg.curr = i;
 408	return (ret < 0) ? ret : copied;
 409}
 410EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
 411
 412/* Receive sk_msg from psock->ingress_msg to @msg. */
 413int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
 414		   int len, int flags)
 415{
 416	struct iov_iter *iter = &msg->msg_iter;
 417	int peek = flags & MSG_PEEK;
 418	struct sk_msg *msg_rx;
 419	int i, copied = 0;
 420
 421	msg_rx = sk_psock_peek_msg(psock);
 422	while (copied != len) {
 423		struct scatterlist *sge;
 424
 425		if (unlikely(!msg_rx))
 426			break;
 427
 428		i = msg_rx->sg.start;
 429		do {
 430			struct page *page;
 431			int copy;
 432
 433			sge = sk_msg_elem(msg_rx, i);
 434			copy = sge->length;
 435			page = sg_page(sge);
 436			if (copied + copy > len)
 437				copy = len - copied;
 438			if (copy)
 439				copy = copy_page_to_iter(page, sge->offset, copy, iter);
 440			if (!copy) {
 441				copied = copied ? copied : -EFAULT;
 442				goto out;
 443			}
 444
 445			copied += copy;
 446			if (likely(!peek)) {
 447				sge->offset += copy;
 448				sge->length -= copy;
 449				if (!msg_rx->skb) {
 450					sk_mem_uncharge(sk, copy);
 451					atomic_sub(copy, &sk->sk_rmem_alloc);
 452				}
 453				msg_rx->sg.size -= copy;
 454
 455				if (!sge->length) {
 456					sk_msg_iter_var_next(i);
 457					if (!msg_rx->skb)
 458						put_page(page);
 459				}
 460			} else {
 461				/* Lets not optimize peek case if copy_page_to_iter
 462				 * didn't copy the entire length lets just break.
 463				 */
 464				if (copy != sge->length)
 465					goto out;
 466				sk_msg_iter_var_next(i);
 467			}
 468
 469			if (copied == len)
 470				break;
 471		} while ((i != msg_rx->sg.end) && !sg_is_last(sge));
 472
 473		if (unlikely(peek)) {
 474			msg_rx = sk_psock_next_msg(psock, msg_rx);
 475			if (!msg_rx)
 476				break;
 477			continue;
 478		}
 479
 480		msg_rx->sg.start = i;
 481		if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
 482			msg_rx = sk_psock_dequeue_msg(psock);
 483			kfree_sk_msg(msg_rx);
 484		}
 485		msg_rx = sk_psock_peek_msg(psock);
 486	}
 487out:
 488	return copied;
 489}
 490EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
 491
 492bool sk_msg_is_readable(struct sock *sk)
 493{
 494	struct sk_psock *psock;
 495	bool empty = true;
 496
 497	rcu_read_lock();
 498	psock = sk_psock(sk);
 499	if (likely(psock))
 500		empty = list_empty(&psock->ingress_msg);
 501	rcu_read_unlock();
 502	return !empty;
 503}
 504EXPORT_SYMBOL_GPL(sk_msg_is_readable);
 505
 506static struct sk_msg *alloc_sk_msg(gfp_t gfp)
 507{
 508	struct sk_msg *msg;
 509
 510	msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
 511	if (unlikely(!msg))
 512		return NULL;
 513	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
 514	return msg;
 515}
 516
 517static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
 518						  struct sk_buff *skb)
 519{
 520	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 521		return NULL;
 522
 523	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 
 524		return NULL;
 525
 526	return alloc_sk_msg(GFP_KERNEL);
 
 527}
 528
 529static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
 530					u32 off, u32 len,
 531					struct sk_psock *psock,
 532					struct sock *sk,
 533					struct sk_msg *msg)
 534{
 535	int num_sge, copied;
 536
 537	num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
 538	if (num_sge < 0) {
 539		/* skb linearize may fail with ENOMEM, but lets simply try again
 540		 * later if this happens. Under memory pressure we don't want to
 541		 * drop the skb. We need to linearize the skb so that the mapping
 542		 * in skb_to_sgvec can not error.
 543		 */
 544		if (skb_linearize(skb))
 545			return -EAGAIN;
 546
 547		num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
 548		if (unlikely(num_sge < 0))
 549			return num_sge;
 550	}
 551
 552	copied = len;
 553	msg->sg.start = 0;
 554	msg->sg.size = copied;
 555	msg->sg.end = num_sge;
 556	msg->skb = skb;
 557
 558	sk_psock_queue_msg(psock, msg);
 559	sk_psock_data_ready(sk, psock);
 560	return copied;
 561}
 562
 563static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 564				     u32 off, u32 len);
 565
 566static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
 567				u32 off, u32 len)
 568{
 569	struct sock *sk = psock->sk;
 570	struct sk_msg *msg;
 571	int err;
 572
 573	/* If we are receiving on the same sock skb->sk is already assigned,
 574	 * skip memory accounting and owner transition seeing it already set
 575	 * correctly.
 576	 */
 577	if (unlikely(skb->sk == sk))
 578		return sk_psock_skb_ingress_self(psock, skb, off, len);
 579	msg = sk_psock_create_ingress_msg(sk, skb);
 580	if (!msg)
 581		return -EAGAIN;
 582
 583	/* This will transition ownership of the data from the socket where
 584	 * the BPF program was run initiating the redirect to the socket
 585	 * we will eventually receive this data on. The data will be released
 586	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
 587	 * into user buffers.
 588	 */
 589	skb_set_owner_r(skb, sk);
 590	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 591	if (err < 0)
 592		kfree(msg);
 593	return err;
 594}
 595
 596/* Puts an skb on the ingress queue of the socket already assigned to the
 597 * skb. In this case we do not need to check memory limits or skb_set_owner_r
 598 * because the skb is already accounted for here.
 599 */
 600static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 601				     u32 off, u32 len)
 602{
 603	struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
 604	struct sock *sk = psock->sk;
 605	int err;
 606
 607	if (unlikely(!msg))
 608		return -EAGAIN;
 
 609	skb_set_owner_r(skb, sk);
 610	err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 611	if (err < 0)
 612		kfree(msg);
 613	return err;
 614}
 615
 616static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 617			       u32 off, u32 len, bool ingress)
 618{
 619	int err = 0;
 620
 621	if (!ingress) {
 622		if (!sock_writeable(psock->sk))
 623			return -EAGAIN;
 624		return skb_send_sock(psock->sk, skb, off, len);
 625	}
 626	skb_get(skb);
 627	err = sk_psock_skb_ingress(psock, skb, off, len);
 628	if (err < 0)
 629		kfree_skb(skb);
 630	return err;
 631}
 632
 633static void sk_psock_skb_state(struct sk_psock *psock,
 634			       struct sk_psock_work_state *state,
 
 635			       int len, int off)
 636{
 637	spin_lock_bh(&psock->ingress_lock);
 638	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 
 639		state->len = len;
 640		state->off = off;
 
 
 641	}
 642	spin_unlock_bh(&psock->ingress_lock);
 643}
 644
 645static void sk_psock_backlog(struct work_struct *work)
 646{
 647	struct delayed_work *dwork = to_delayed_work(work);
 648	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
 649	struct sk_psock_work_state *state = &psock->work_state;
 650	struct sk_buff *skb = NULL;
 651	u32 len = 0, off = 0;
 652	bool ingress;
 
 653	int ret;
 654
 655	mutex_lock(&psock->work_mutex);
 656	if (unlikely(state->len)) {
 
 
 657		len = state->len;
 658		off = state->off;
 
 
 659	}
 
 
 660
 661	while ((skb = skb_peek(&psock->ingress_skb))) {
 662		len = skb->len;
 663		off = 0;
 664		if (skb_bpf_strparser(skb)) {
 665			struct strp_msg *stm = strp_msg(skb);
 666
 667			off = stm->offset;
 668			len = stm->full_len;
 669		}
 670		ingress = skb_bpf_ingress(skb);
 671		skb_bpf_redirect_clear(skb);
 672		do {
 673			ret = -EIO;
 674			if (!sock_flag(psock->sk, SOCK_DEAD))
 675				ret = sk_psock_handle_skb(psock, skb, off,
 676							  len, ingress);
 677			if (ret <= 0) {
 678				if (ret == -EAGAIN) {
 679					sk_psock_skb_state(psock, state, len, off);
 680
 681					/* Delay slightly to prioritize any
 682					 * other work that might be here.
 683					 */
 684					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
 685						schedule_delayed_work(&psock->work, 1);
 686					goto end;
 687				}
 688				/* Hard errors break pipe and stop xmit. */
 689				sk_psock_report_error(psock, ret ? -ret : EPIPE);
 690				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 
 691				goto end;
 692			}
 693			off += ret;
 694			len -= ret;
 695		} while (len);
 696
 697		skb = skb_dequeue(&psock->ingress_skb);
 698		kfree_skb(skb);
 699	}
 700end:
 701	mutex_unlock(&psock->work_mutex);
 702}
 703
 704struct sk_psock *sk_psock_init(struct sock *sk, int node)
 705{
 706	struct sk_psock *psock;
 707	struct proto *prot;
 708
 709	write_lock_bh(&sk->sk_callback_lock);
 710
 711	if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
 712		psock = ERR_PTR(-EINVAL);
 713		goto out;
 714	}
 715
 716	if (sk->sk_user_data) {
 717		psock = ERR_PTR(-EBUSY);
 718		goto out;
 719	}
 720
 721	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
 722	if (!psock) {
 723		psock = ERR_PTR(-ENOMEM);
 724		goto out;
 725	}
 726
 727	prot = READ_ONCE(sk->sk_prot);
 728	psock->sk = sk;
 729	psock->eval = __SK_NONE;
 730	psock->sk_proto = prot;
 731	psock->saved_unhash = prot->unhash;
 732	psock->saved_destroy = prot->destroy;
 733	psock->saved_close = prot->close;
 734	psock->saved_write_space = sk->sk_write_space;
 735
 736	INIT_LIST_HEAD(&psock->link);
 737	spin_lock_init(&psock->link_lock);
 738
 739	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
 740	mutex_init(&psock->work_mutex);
 741	INIT_LIST_HEAD(&psock->ingress_msg);
 742	spin_lock_init(&psock->ingress_lock);
 743	skb_queue_head_init(&psock->ingress_skb);
 744
 745	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
 746	refcount_set(&psock->refcnt, 1);
 747
 748	__rcu_assign_sk_user_data_with_flags(sk, psock,
 749					     SK_USER_DATA_NOCOPY |
 750					     SK_USER_DATA_PSOCK);
 751	sock_hold(sk);
 752
 753out:
 754	write_unlock_bh(&sk->sk_callback_lock);
 755	return psock;
 756}
 757EXPORT_SYMBOL_GPL(sk_psock_init);
 758
 759struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
 760{
 761	struct sk_psock_link *link;
 762
 763	spin_lock_bh(&psock->link_lock);
 764	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
 765					list);
 766	if (link)
 767		list_del(&link->list);
 768	spin_unlock_bh(&psock->link_lock);
 769	return link;
 770}
 771
 772static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
 773{
 774	struct sk_msg *msg, *tmp;
 775
 776	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
 777		list_del(&msg->list);
 778		if (!msg->skb)
 779			atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
 780		sk_msg_free(psock->sk, msg);
 781		kfree(msg);
 782	}
 783}
 784
 785static void __sk_psock_zap_ingress(struct sk_psock *psock)
 786{
 787	struct sk_buff *skb;
 788
 789	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
 790		skb_bpf_redirect_clear(skb);
 791		sock_drop(psock->sk, skb);
 792	}
 
 
 
 
 
 793	__sk_psock_purge_ingress_msg(psock);
 794}
 795
 796static void sk_psock_link_destroy(struct sk_psock *psock)
 797{
 798	struct sk_psock_link *link, *tmp;
 799
 800	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 801		list_del(&link->list);
 802		sk_psock_free_link(link);
 803	}
 804}
 805
 806void sk_psock_stop(struct sk_psock *psock)
 807{
 808	spin_lock_bh(&psock->ingress_lock);
 809	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 810	sk_psock_cork_free(psock);
 
 811	spin_unlock_bh(&psock->ingress_lock);
 
 
 
 812}
 813
 814static void sk_psock_done_strp(struct sk_psock *psock);
 815
 816static void sk_psock_destroy(struct work_struct *work)
 817{
 818	struct sk_psock *psock = container_of(to_rcu_work(work),
 819					      struct sk_psock, rwork);
 820	/* No sk_callback_lock since already detached. */
 821
 822	sk_psock_done_strp(psock);
 823
 824	cancel_delayed_work_sync(&psock->work);
 825	__sk_psock_zap_ingress(psock);
 826	mutex_destroy(&psock->work_mutex);
 827
 828	psock_progs_drop(&psock->progs);
 829
 830	sk_psock_link_destroy(psock);
 831	sk_psock_cork_free(psock);
 832
 833	if (psock->sk_redir)
 834		sock_put(psock->sk_redir);
 835	if (psock->sk_pair)
 836		sock_put(psock->sk_pair);
 837	sock_put(psock->sk);
 838	kfree(psock);
 839}
 840
 841void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 842{
 843	write_lock_bh(&sk->sk_callback_lock);
 844	sk_psock_restore_proto(sk, psock);
 845	rcu_assign_sk_user_data(sk, NULL);
 846	if (psock->progs.stream_parser)
 847		sk_psock_stop_strp(sk, psock);
 848	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
 849		sk_psock_stop_verdict(sk, psock);
 850	write_unlock_bh(&sk->sk_callback_lock);
 851
 852	sk_psock_stop(psock);
 853
 854	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
 855	queue_rcu_work(system_wq, &psock->rwork);
 856}
 857EXPORT_SYMBOL_GPL(sk_psock_drop);
 858
 859static int sk_psock_map_verd(int verdict, bool redir)
 860{
 861	switch (verdict) {
 862	case SK_PASS:
 863		return redir ? __SK_REDIRECT : __SK_PASS;
 864	case SK_DROP:
 865	default:
 866		break;
 867	}
 868
 869	return __SK_DROP;
 870}
 871
 872int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
 873			 struct sk_msg *msg)
 874{
 875	struct bpf_prog *prog;
 876	int ret;
 877
 878	rcu_read_lock();
 879	prog = READ_ONCE(psock->progs.msg_parser);
 880	if (unlikely(!prog)) {
 881		ret = __SK_PASS;
 882		goto out;
 883	}
 884
 885	sk_msg_compute_data_pointers(msg);
 886	msg->sk = sk;
 887	ret = bpf_prog_run_pin_on_cpu(prog, msg);
 888	ret = sk_psock_map_verd(ret, msg->sk_redir);
 889	psock->apply_bytes = msg->apply_bytes;
 890	if (ret == __SK_REDIRECT) {
 891		if (psock->sk_redir) {
 892			sock_put(psock->sk_redir);
 893			psock->sk_redir = NULL;
 894		}
 895		if (!msg->sk_redir) {
 896			ret = __SK_DROP;
 897			goto out;
 898		}
 899		psock->redir_ingress = sk_msg_to_ingress(msg);
 900		psock->sk_redir = msg->sk_redir;
 901		sock_hold(psock->sk_redir);
 902	}
 903out:
 904	rcu_read_unlock();
 905	return ret;
 906}
 907EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
 908
 909static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
 910{
 911	struct sk_psock *psock_other;
 912	struct sock *sk_other;
 913
 914	sk_other = skb_bpf_redirect_fetch(skb);
 915	/* This error is a buggy BPF program, it returned a redirect
 916	 * return code, but then didn't set a redirect interface.
 917	 */
 918	if (unlikely(!sk_other)) {
 919		skb_bpf_redirect_clear(skb);
 920		sock_drop(from->sk, skb);
 921		return -EIO;
 922	}
 923	psock_other = sk_psock(sk_other);
 924	/* This error indicates the socket is being torn down or had another
 925	 * error that caused the pipe to break. We can't send a packet on
 926	 * a socket that is in this state so we drop the skb.
 927	 */
 928	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
 929		skb_bpf_redirect_clear(skb);
 930		sock_drop(from->sk, skb);
 931		return -EIO;
 932	}
 933	spin_lock_bh(&psock_other->ingress_lock);
 934	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
 935		spin_unlock_bh(&psock_other->ingress_lock);
 936		skb_bpf_redirect_clear(skb);
 937		sock_drop(from->sk, skb);
 938		return -EIO;
 939	}
 940
 941	skb_queue_tail(&psock_other->ingress_skb, skb);
 942	schedule_delayed_work(&psock_other->work, 0);
 943	spin_unlock_bh(&psock_other->ingress_lock);
 944	return 0;
 945}
 946
 947static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
 948				       struct sk_psock *from, int verdict)
 949{
 950	switch (verdict) {
 951	case __SK_REDIRECT:
 952		sk_psock_skb_redirect(from, skb);
 953		break;
 954	case __SK_PASS:
 955	case __SK_DROP:
 956	default:
 957		break;
 958	}
 959}
 960
 961int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
 962{
 963	struct bpf_prog *prog;
 964	int ret = __SK_PASS;
 965
 966	rcu_read_lock();
 967	prog = READ_ONCE(psock->progs.stream_verdict);
 968	if (likely(prog)) {
 969		skb->sk = psock->sk;
 970		skb_dst_drop(skb);
 971		skb_bpf_redirect_clear(skb);
 972		ret = bpf_prog_run_pin_on_cpu(prog, skb);
 973		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
 974		skb->sk = NULL;
 975	}
 976	sk_psock_tls_verdict_apply(skb, psock, ret);
 977	rcu_read_unlock();
 978	return ret;
 979}
 980EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
 981
 982static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
 983				  int verdict)
 984{
 985	struct sock *sk_other;
 986	int err = 0;
 987	u32 len, off;
 988
 989	switch (verdict) {
 990	case __SK_PASS:
 991		err = -EIO;
 992		sk_other = psock->sk;
 993		if (sock_flag(sk_other, SOCK_DEAD) ||
 994		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
 995			goto out_free;
 
 996
 997		skb_bpf_set_ingress(skb);
 998
 999		/* If the queue is empty then we can submit directly
1000		 * into the msg queue. If its not empty we have to
1001		 * queue work otherwise we may get OOO data. Otherwise,
1002		 * if sk_psock_skb_ingress errors will be handled by
1003		 * retrying later from workqueue.
1004		 */
1005		if (skb_queue_empty(&psock->ingress_skb)) {
1006			len = skb->len;
1007			off = 0;
1008			if (skb_bpf_strparser(skb)) {
1009				struct strp_msg *stm = strp_msg(skb);
1010
1011				off = stm->offset;
1012				len = stm->full_len;
1013			}
1014			err = sk_psock_skb_ingress_self(psock, skb, off, len);
1015		}
1016		if (err < 0) {
1017			spin_lock_bh(&psock->ingress_lock);
1018			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1019				skb_queue_tail(&psock->ingress_skb, skb);
1020				schedule_delayed_work(&psock->work, 0);
1021				err = 0;
1022			}
1023			spin_unlock_bh(&psock->ingress_lock);
1024			if (err < 0)
 
1025				goto out_free;
 
1026		}
1027		break;
1028	case __SK_REDIRECT:
1029		tcp_eat_skb(psock->sk, skb);
1030		err = sk_psock_skb_redirect(psock, skb);
1031		break;
1032	case __SK_DROP:
1033	default:
1034out_free:
1035		skb_bpf_redirect_clear(skb);
1036		tcp_eat_skb(psock->sk, skb);
1037		sock_drop(psock->sk, skb);
1038	}
1039
1040	return err;
1041}
1042
1043static void sk_psock_write_space(struct sock *sk)
1044{
1045	struct sk_psock *psock;
1046	void (*write_space)(struct sock *sk) = NULL;
1047
1048	rcu_read_lock();
1049	psock = sk_psock(sk);
1050	if (likely(psock)) {
1051		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1052			schedule_delayed_work(&psock->work, 0);
1053		write_space = psock->saved_write_space;
1054	}
1055	rcu_read_unlock();
1056	if (write_space)
1057		write_space(sk);
1058}
1059
1060#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1061static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1062{
1063	struct sk_psock *psock;
1064	struct bpf_prog *prog;
1065	int ret = __SK_DROP;
1066	struct sock *sk;
1067
1068	rcu_read_lock();
1069	sk = strp->sk;
1070	psock = sk_psock(sk);
1071	if (unlikely(!psock)) {
1072		sock_drop(sk, skb);
1073		goto out;
1074	}
1075	prog = READ_ONCE(psock->progs.stream_verdict);
1076	if (likely(prog)) {
1077		skb->sk = sk;
1078		skb_dst_drop(skb);
1079		skb_bpf_redirect_clear(skb);
1080		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1081		skb_bpf_set_strparser(skb);
1082		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1083		skb->sk = NULL;
1084	}
1085	sk_psock_verdict_apply(psock, skb, ret);
1086out:
1087	rcu_read_unlock();
1088}
1089
1090static int sk_psock_strp_read_done(struct strparser *strp, int err)
1091{
1092	return err;
1093}
1094
1095static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1096{
1097	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1098	struct bpf_prog *prog;
1099	int ret = skb->len;
1100
1101	rcu_read_lock();
1102	prog = READ_ONCE(psock->progs.stream_parser);
1103	if (likely(prog)) {
1104		skb->sk = psock->sk;
1105		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1106		skb->sk = NULL;
1107	}
1108	rcu_read_unlock();
1109	return ret;
1110}
1111
1112/* Called with socket lock held. */
1113static void sk_psock_strp_data_ready(struct sock *sk)
1114{
1115	struct sk_psock *psock;
1116
1117	trace_sk_data_ready(sk);
1118
1119	rcu_read_lock();
1120	psock = sk_psock(sk);
1121	if (likely(psock)) {
1122		if (tls_sw_has_ctx_rx(sk)) {
1123			psock->saved_data_ready(sk);
1124		} else {
1125			read_lock_bh(&sk->sk_callback_lock);
1126			strp_data_ready(&psock->strp);
1127			read_unlock_bh(&sk->sk_callback_lock);
1128		}
1129	}
1130	rcu_read_unlock();
1131}
1132
1133int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1134{
1135	int ret;
1136
1137	static const struct strp_callbacks cb = {
1138		.rcv_msg	= sk_psock_strp_read,
1139		.read_sock_done	= sk_psock_strp_read_done,
1140		.parse_msg	= sk_psock_strp_parse,
1141	};
1142
1143	ret = strp_init(&psock->strp, sk, &cb);
1144	if (!ret)
1145		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1146
1147	return ret;
1148}
1149
1150void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1151{
1152	if (psock->saved_data_ready)
1153		return;
1154
1155	psock->saved_data_ready = sk->sk_data_ready;
1156	sk->sk_data_ready = sk_psock_strp_data_ready;
1157	sk->sk_write_space = sk_psock_write_space;
1158}
1159
1160void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1161{
1162	psock_set_prog(&psock->progs.stream_parser, NULL);
1163
1164	if (!psock->saved_data_ready)
1165		return;
1166
1167	sk->sk_data_ready = psock->saved_data_ready;
1168	psock->saved_data_ready = NULL;
1169	strp_stop(&psock->strp);
1170}
1171
1172static void sk_psock_done_strp(struct sk_psock *psock)
1173{
1174	/* Parser has been stopped */
1175	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1176		strp_done(&psock->strp);
1177}
1178#else
1179static void sk_psock_done_strp(struct sk_psock *psock)
1180{
1181}
1182#endif /* CONFIG_BPF_STREAM_PARSER */
1183
1184static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
 
1185{
 
1186	struct sk_psock *psock;
1187	struct bpf_prog *prog;
1188	int ret = __SK_DROP;
1189	int len = skb->len;
1190
 
 
 
 
 
 
 
1191	rcu_read_lock();
1192	psock = sk_psock(sk);
1193	if (unlikely(!psock)) {
1194		len = 0;
1195		tcp_eat_skb(sk, skb);
1196		sock_drop(sk, skb);
1197		goto out;
1198	}
1199	prog = READ_ONCE(psock->progs.stream_verdict);
1200	if (!prog)
1201		prog = READ_ONCE(psock->progs.skb_verdict);
1202	if (likely(prog)) {
 
1203		skb_dst_drop(skb);
1204		skb_bpf_redirect_clear(skb);
1205		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1206		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
 
1207	}
1208	ret = sk_psock_verdict_apply(psock, skb, ret);
1209	if (ret < 0)
1210		len = ret;
1211out:
1212	rcu_read_unlock();
1213	return len;
1214}
1215
1216static void sk_psock_verdict_data_ready(struct sock *sk)
1217{
1218	struct socket *sock = sk->sk_socket;
1219	const struct proto_ops *ops;
1220	int copied;
 
 
1221
1222	trace_sk_data_ready(sk);
 
 
1223
1224	if (unlikely(!sock))
1225		return;
1226	ops = READ_ONCE(sock->ops);
1227	if (!ops || !ops->read_skb)
1228		return;
1229	copied = ops->read_skb(sk, sk_psock_verdict_recv);
1230	if (copied >= 0) {
1231		struct sk_psock *psock;
1232
1233		rcu_read_lock();
1234		psock = sk_psock(sk);
1235		if (psock)
1236			sk_psock_data_ready(sk, psock);
1237		rcu_read_unlock();
1238	}
1239}
1240
1241void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1242{
1243	if (psock->saved_data_ready)
1244		return;
1245
1246	psock->saved_data_ready = sk->sk_data_ready;
1247	sk->sk_data_ready = sk_psock_verdict_data_ready;
1248	sk->sk_write_space = sk_psock_write_space;
1249}
1250
1251void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1252{
1253	psock_set_prog(&psock->progs.stream_verdict, NULL);
1254	psock_set_prog(&psock->progs.skb_verdict, NULL);
1255
1256	if (!psock->saved_data_ready)
1257		return;
1258
1259	sk->sk_data_ready = psock->saved_data_ready;
1260	psock->saved_data_ready = NULL;
1261}