Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/skbuff.h>
   6#include <linux/scatterlist.h>
   7
   8#include <net/sock.h>
   9#include <net/tcp.h>
  10#include <net/tls.h>
  11
  12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
  13{
  14	if (msg->sg.end > msg->sg.start &&
  15	    elem_first_coalesce < msg->sg.end)
  16		return true;
  17
  18	if (msg->sg.end < msg->sg.start &&
  19	    (elem_first_coalesce > msg->sg.start ||
  20	     elem_first_coalesce < msg->sg.end))
  21		return true;
  22
  23	return false;
  24}
  25
  26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
  27		 int elem_first_coalesce)
  28{
  29	struct page_frag *pfrag = sk_page_frag(sk);
  30	int ret = 0;
  31
  32	len -= msg->sg.size;
  33	while (len > 0) {
  34		struct scatterlist *sge;
  35		u32 orig_offset;
  36		int use, i;
  37
  38		if (!sk_page_frag_refill(sk, pfrag))
  39			return -ENOMEM;
  40
  41		orig_offset = pfrag->offset;
  42		use = min_t(int, len, pfrag->size - orig_offset);
  43		if (!sk_wmem_schedule(sk, use))
  44			return -ENOMEM;
  45
  46		i = msg->sg.end;
  47		sk_msg_iter_var_prev(i);
  48		sge = &msg->sg.data[i];
  49
  50		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
  51		    sg_page(sge) == pfrag->page &&
  52		    sge->offset + sge->length == orig_offset) {
  53			sge->length += use;
  54		} else {
  55			if (sk_msg_full(msg)) {
  56				ret = -ENOSPC;
  57				break;
  58			}
  59
  60			sge = &msg->sg.data[msg->sg.end];
  61			sg_unmark_end(sge);
  62			sg_set_page(sge, pfrag->page, use, orig_offset);
  63			get_page(pfrag->page);
  64			sk_msg_iter_next(msg, end);
  65		}
  66
  67		sk_mem_charge(sk, use);
  68		msg->sg.size += use;
  69		pfrag->offset += use;
  70		len -= use;
  71	}
  72
  73	return ret;
  74}
  75EXPORT_SYMBOL_GPL(sk_msg_alloc);
  76
  77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
  78		 u32 off, u32 len)
  79{
  80	int i = src->sg.start;
  81	struct scatterlist *sge = sk_msg_elem(src, i);
  82	struct scatterlist *sgd = NULL;
  83	u32 sge_len, sge_off;
  84
  85	while (off) {
  86		if (sge->length > off)
  87			break;
  88		off -= sge->length;
  89		sk_msg_iter_var_next(i);
  90		if (i == src->sg.end && off)
  91			return -ENOSPC;
  92		sge = sk_msg_elem(src, i);
  93	}
  94
  95	while (len) {
  96		sge_len = sge->length - off;
  97		if (sge_len > len)
  98			sge_len = len;
  99
 100		if (dst->sg.end)
 101			sgd = sk_msg_elem(dst, dst->sg.end - 1);
 102
 103		if (sgd &&
 104		    (sg_page(sge) == sg_page(sgd)) &&
 105		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
 106			sgd->length += sge_len;
 107			dst->sg.size += sge_len;
 108		} else if (!sk_msg_full(dst)) {
 109			sge_off = sge->offset + off;
 110			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
 111		} else {
 112			return -ENOSPC;
 113		}
 114
 115		off = 0;
 116		len -= sge_len;
 117		sk_mem_charge(sk, sge_len);
 118		sk_msg_iter_var_next(i);
 119		if (i == src->sg.end && len)
 120			return -ENOSPC;
 121		sge = sk_msg_elem(src, i);
 122	}
 123
 124	return 0;
 125}
 126EXPORT_SYMBOL_GPL(sk_msg_clone);
 127
 128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 129{
 130	int i = msg->sg.start;
 131
 132	do {
 133		struct scatterlist *sge = sk_msg_elem(msg, i);
 134
 135		if (bytes < sge->length) {
 136			sge->length -= bytes;
 137			sge->offset += bytes;
 138			sk_mem_uncharge(sk, bytes);
 139			break;
 140		}
 141
 142		sk_mem_uncharge(sk, sge->length);
 143		bytes -= sge->length;
 144		sge->length = 0;
 145		sge->offset = 0;
 146		sk_msg_iter_var_next(i);
 147	} while (bytes && i != msg->sg.end);
 148	msg->sg.start = i;
 149}
 150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
 151
 152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
 153{
 154	int i = msg->sg.start;
 155
 156	do {
 157		struct scatterlist *sge = &msg->sg.data[i];
 158		int uncharge = (bytes < sge->length) ? bytes : sge->length;
 159
 160		sk_mem_uncharge(sk, uncharge);
 161		bytes -= uncharge;
 162		sk_msg_iter_var_next(i);
 163	} while (i != msg->sg.end);
 164}
 165EXPORT_SYMBOL_GPL(sk_msg_return);
 166
 167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
 168			    bool charge)
 169{
 170	struct scatterlist *sge = sk_msg_elem(msg, i);
 171	u32 len = sge->length;
 172
 173	/* When the skb owns the memory we free it from consume_skb path. */
 174	if (!msg->skb) {
 175		if (charge)
 176			sk_mem_uncharge(sk, len);
 177		put_page(sg_page(sge));
 178	}
 179	memset(sge, 0, sizeof(*sge));
 180	return len;
 181}
 182
 183static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
 184			 bool charge)
 185{
 186	struct scatterlist *sge = sk_msg_elem(msg, i);
 187	int freed = 0;
 188
 189	while (msg->sg.size) {
 190		msg->sg.size -= sge->length;
 191		freed += sk_msg_free_elem(sk, msg, i, charge);
 192		sk_msg_iter_var_next(i);
 193		sk_msg_check_to_free(msg, i, msg->sg.size);
 194		sge = sk_msg_elem(msg, i);
 195	}
 196	consume_skb(msg->skb);
 197	sk_msg_init(msg);
 198	return freed;
 199}
 200
 201int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
 202{
 203	return __sk_msg_free(sk, msg, msg->sg.start, false);
 204}
 205EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
 206
 207int sk_msg_free(struct sock *sk, struct sk_msg *msg)
 208{
 209	return __sk_msg_free(sk, msg, msg->sg.start, true);
 210}
 211EXPORT_SYMBOL_GPL(sk_msg_free);
 212
 213static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
 214				  u32 bytes, bool charge)
 215{
 216	struct scatterlist *sge;
 217	u32 i = msg->sg.start;
 218
 219	while (bytes) {
 220		sge = sk_msg_elem(msg, i);
 221		if (!sge->length)
 222			break;
 223		if (bytes < sge->length) {
 224			if (charge)
 225				sk_mem_uncharge(sk, bytes);
 226			sge->length -= bytes;
 227			sge->offset += bytes;
 228			msg->sg.size -= bytes;
 229			break;
 230		}
 231
 232		msg->sg.size -= sge->length;
 233		bytes -= sge->length;
 234		sk_msg_free_elem(sk, msg, i, charge);
 235		sk_msg_iter_var_next(i);
 236		sk_msg_check_to_free(msg, i, bytes);
 237	}
 238	msg->sg.start = i;
 239}
 240
 241void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
 242{
 243	__sk_msg_free_partial(sk, msg, bytes, true);
 244}
 245EXPORT_SYMBOL_GPL(sk_msg_free_partial);
 246
 247void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
 248				  u32 bytes)
 249{
 250	__sk_msg_free_partial(sk, msg, bytes, false);
 251}
 252
 253void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 254{
 255	int trim = msg->sg.size - len;
 256	u32 i = msg->sg.end;
 257
 258	if (trim <= 0) {
 259		WARN_ON(trim < 0);
 260		return;
 261	}
 262
 263	sk_msg_iter_var_prev(i);
 264	msg->sg.size = len;
 265	while (msg->sg.data[i].length &&
 266	       trim >= msg->sg.data[i].length) {
 267		trim -= msg->sg.data[i].length;
 268		sk_msg_free_elem(sk, msg, i, true);
 269		sk_msg_iter_var_prev(i);
 270		if (!trim)
 271			goto out;
 272	}
 273
 274	msg->sg.data[i].length -= trim;
 275	sk_mem_uncharge(sk, trim);
 276	/* Adjust copybreak if it falls into the trimmed part of last buf */
 277	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
 278		msg->sg.copybreak = msg->sg.data[i].length;
 279out:
 280	sk_msg_iter_var_next(i);
 281	msg->sg.end = i;
 282
 283	/* If we trim data a full sg elem before curr pointer update
 284	 * copybreak and current so that any future copy operations
 285	 * start at new copy location.
 286	 * However trimed data that has not yet been used in a copy op
 287	 * does not require an update.
 288	 */
 289	if (!msg->sg.size) {
 290		msg->sg.curr = msg->sg.start;
 291		msg->sg.copybreak = 0;
 292	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
 293		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
 294		sk_msg_iter_var_prev(i);
 295		msg->sg.curr = i;
 296		msg->sg.copybreak = msg->sg.data[i].length;
 297	}
 298}
 299EXPORT_SYMBOL_GPL(sk_msg_trim);
 300
 301int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 302			      struct sk_msg *msg, u32 bytes)
 303{
 304	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
 305	const int to_max_pages = MAX_MSG_FRAGS;
 306	struct page *pages[MAX_MSG_FRAGS];
 307	ssize_t orig, copied, use, offset;
 308
 309	orig = msg->sg.size;
 310	while (bytes > 0) {
 311		i = 0;
 312		maxpages = to_max_pages - num_elems;
 313		if (maxpages == 0) {
 314			ret = -EFAULT;
 315			goto out;
 316		}
 317
 318		copied = iov_iter_get_pages(from, pages, bytes, maxpages,
 319					    &offset);
 320		if (copied <= 0) {
 321			ret = -EFAULT;
 322			goto out;
 323		}
 324
 325		iov_iter_advance(from, copied);
 326		bytes -= copied;
 327		msg->sg.size += copied;
 328
 329		while (copied) {
 330			use = min_t(int, copied, PAGE_SIZE - offset);
 331			sg_set_page(&msg->sg.data[msg->sg.end],
 332				    pages[i], use, offset);
 333			sg_unmark_end(&msg->sg.data[msg->sg.end]);
 334			sk_mem_charge(sk, use);
 335
 336			offset = 0;
 337			copied -= use;
 338			sk_msg_iter_next(msg, end);
 339			num_elems++;
 340			i++;
 341		}
 342		/* When zerocopy is mixed with sk_msg_*copy* operations we
 343		 * may have a copybreak set in this case clear and prefer
 344		 * zerocopy remainder when possible.
 345		 */
 346		msg->sg.copybreak = 0;
 347		msg->sg.curr = msg->sg.end;
 348	}
 349out:
 350	/* Revert iov_iter updates, msg will need to use 'trim' later if it
 351	 * also needs to be cleared.
 352	 */
 353	if (ret)
 354		iov_iter_revert(from, msg->sg.size - orig);
 355	return ret;
 356}
 357EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
 358
 359int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 360			     struct sk_msg *msg, u32 bytes)
 361{
 362	int ret = -ENOSPC, i = msg->sg.curr;
 363	struct scatterlist *sge;
 364	u32 copy, buf_size;
 365	void *to;
 366
 367	do {
 368		sge = sk_msg_elem(msg, i);
 369		/* This is possible if a trim operation shrunk the buffer */
 370		if (msg->sg.copybreak >= sge->length) {
 371			msg->sg.copybreak = 0;
 372			sk_msg_iter_var_next(i);
 373			if (i == msg->sg.end)
 374				break;
 375			sge = sk_msg_elem(msg, i);
 376		}
 377
 378		buf_size = sge->length - msg->sg.copybreak;
 379		copy = (buf_size > bytes) ? bytes : buf_size;
 380		to = sg_virt(sge) + msg->sg.copybreak;
 381		msg->sg.copybreak += copy;
 382		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
 383			ret = copy_from_iter_nocache(to, copy, from);
 384		else
 385			ret = copy_from_iter(to, copy, from);
 386		if (ret != copy) {
 387			ret = -EFAULT;
 388			goto out;
 389		}
 390		bytes -= copy;
 391		if (!bytes)
 392			break;
 393		msg->sg.copybreak = 0;
 394		sk_msg_iter_var_next(i);
 395	} while (i != msg->sg.end);
 396out:
 397	msg->sg.curr = i;
 398	return ret;
 399}
 400EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
 401
 402/* Receive sk_msg from psock->ingress_msg to @msg. */
 403int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
 404		   int len, int flags)
 405{
 406	struct iov_iter *iter = &msg->msg_iter;
 407	int peek = flags & MSG_PEEK;
 408	struct sk_msg *msg_rx;
 409	int i, copied = 0;
 410
 411	msg_rx = sk_psock_peek_msg(psock);
 412	while (copied != len) {
 413		struct scatterlist *sge;
 414
 415		if (unlikely(!msg_rx))
 416			break;
 417
 418		i = msg_rx->sg.start;
 419		do {
 420			struct page *page;
 421			int copy;
 422
 423			sge = sk_msg_elem(msg_rx, i);
 424			copy = sge->length;
 425			page = sg_page(sge);
 426			if (copied + copy > len)
 427				copy = len - copied;
 428			copy = copy_page_to_iter(page, sge->offset, copy, iter);
 429			if (!copy)
 430				return copied ? copied : -EFAULT;
 431
 432			copied += copy;
 433			if (likely(!peek)) {
 434				sge->offset += copy;
 435				sge->length -= copy;
 436				if (!msg_rx->skb)
 437					sk_mem_uncharge(sk, copy);
 438				msg_rx->sg.size -= copy;
 439
 440				if (!sge->length) {
 441					sk_msg_iter_var_next(i);
 442					if (!msg_rx->skb)
 443						put_page(page);
 444				}
 445			} else {
 446				/* Lets not optimize peek case if copy_page_to_iter
 447				 * didn't copy the entire length lets just break.
 448				 */
 449				if (copy != sge->length)
 450					return copied;
 451				sk_msg_iter_var_next(i);
 452			}
 453
 454			if (copied == len)
 455				break;
 456		} while (i != msg_rx->sg.end);
 457
 458		if (unlikely(peek)) {
 459			msg_rx = sk_psock_next_msg(psock, msg_rx);
 460			if (!msg_rx)
 461				break;
 462			continue;
 463		}
 464
 465		msg_rx->sg.start = i;
 466		if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
 467			msg_rx = sk_psock_dequeue_msg(psock);
 468			kfree_sk_msg(msg_rx);
 469		}
 470		msg_rx = sk_psock_peek_msg(psock);
 471	}
 472
 473	return copied;
 474}
 475EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
 476
 477static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
 478						  struct sk_buff *skb)
 479{
 
 
 480	struct sk_msg *msg;
 481
 482	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 483		return NULL;
 484
 485	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 486		return NULL;
 487
 488	msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
 489	if (unlikely(!msg))
 490		return NULL;
 
 
 
 
 491
 492	sk_msg_init(msg);
 493	return msg;
 494}
 495
 496static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
 497					struct sk_psock *psock,
 498					struct sock *sk,
 499					struct sk_msg *msg)
 500{
 501	int num_sge, copied;
 502
 503	/* skb linearize may fail with ENOMEM, but lets simply try again
 504	 * later if this happens. Under memory pressure we don't want to
 505	 * drop the skb. We need to linearize the skb so that the mapping
 506	 * in skb_to_sgvec can not error.
 507	 */
 508	if (skb_linearize(skb))
 509		return -EAGAIN;
 510	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
 511	if (unlikely(num_sge < 0))
 
 512		return num_sge;
 
 513
 
 514	copied = skb->len;
 515	msg->sg.start = 0;
 516	msg->sg.size = copied;
 517	msg->sg.end = num_sge;
 518	msg->skb = skb;
 519
 520	sk_psock_queue_msg(psock, msg);
 521	sk_psock_data_ready(sk, psock);
 522	return copied;
 523}
 524
 525static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
 526
 527static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
 528{
 529	struct sock *sk = psock->sk;
 530	struct sk_msg *msg;
 531	int err;
 532
 533	/* If we are receiving on the same sock skb->sk is already assigned,
 534	 * skip memory accounting and owner transition seeing it already set
 535	 * correctly.
 536	 */
 537	if (unlikely(skb->sk == sk))
 538		return sk_psock_skb_ingress_self(psock, skb);
 539	msg = sk_psock_create_ingress_msg(sk, skb);
 540	if (!msg)
 541		return -EAGAIN;
 542
 543	/* This will transition ownership of the data from the socket where
 544	 * the BPF program was run initiating the redirect to the socket
 545	 * we will eventually receive this data on. The data will be released
 546	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
 547	 * into user buffers.
 548	 */
 549	skb_set_owner_r(skb, sk);
 550	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
 551	if (err < 0)
 552		kfree(msg);
 553	return err;
 554}
 555
 556/* Puts an skb on the ingress queue of the socket already assigned to the
 557 * skb. In this case we do not need to check memory limits or skb_set_owner_r
 558 * because the skb is already accounted for here.
 559 */
 560static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
 561{
 562	struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
 563	struct sock *sk = psock->sk;
 564	int err;
 565
 566	if (unlikely(!msg))
 567		return -EAGAIN;
 568	sk_msg_init(msg);
 569	skb_set_owner_r(skb, sk);
 570	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
 571	if (err < 0)
 572		kfree(msg);
 573	return err;
 574}
 575
 576static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 577			       u32 off, u32 len, bool ingress)
 578{
 579	if (!ingress) {
 580		if (!sock_writeable(psock->sk))
 581			return -EAGAIN;
 582		return skb_send_sock(psock->sk, skb, off, len);
 583	}
 584	return sk_psock_skb_ingress(psock, skb);
 585}
 586
 587static void sk_psock_skb_state(struct sk_psock *psock,
 588			       struct sk_psock_work_state *state,
 589			       struct sk_buff *skb,
 590			       int len, int off)
 591{
 592	spin_lock_bh(&psock->ingress_lock);
 593	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 594		state->skb = skb;
 595		state->len = len;
 596		state->off = off;
 597	} else {
 598		sock_drop(psock->sk, skb);
 599	}
 600	spin_unlock_bh(&psock->ingress_lock);
 601}
 602
 603static void sk_psock_backlog(struct work_struct *work)
 604{
 605	struct sk_psock *psock = container_of(work, struct sk_psock, work);
 606	struct sk_psock_work_state *state = &psock->work_state;
 607	struct sk_buff *skb = NULL;
 608	bool ingress;
 609	u32 len, off;
 610	int ret;
 611
 612	mutex_lock(&psock->work_mutex);
 613	if (unlikely(state->skb)) {
 614		spin_lock_bh(&psock->ingress_lock);
 615		skb = state->skb;
 616		len = state->len;
 617		off = state->off;
 618		state->skb = NULL;
 619		spin_unlock_bh(&psock->ingress_lock);
 620	}
 621	if (skb)
 622		goto start;
 
 623
 624	while ((skb = skb_dequeue(&psock->ingress_skb))) {
 625		len = skb->len;
 626		off = 0;
 627start:
 628		ingress = skb_bpf_ingress(skb);
 629		skb_bpf_redirect_clear(skb);
 630		do {
 631			ret = -EIO;
 632			if (!sock_flag(psock->sk, SOCK_DEAD))
 633				ret = sk_psock_handle_skb(psock, skb, off,
 634							  len, ingress);
 635			if (ret <= 0) {
 636				if (ret == -EAGAIN) {
 637					sk_psock_skb_state(psock, state, skb,
 638							   len, off);
 
 639					goto end;
 640				}
 641				/* Hard errors break pipe and stop xmit. */
 642				sk_psock_report_error(psock, ret ? -ret : EPIPE);
 643				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 644				sock_drop(psock->sk, skb);
 645				goto end;
 646			}
 647			off += ret;
 648			len -= ret;
 649		} while (len);
 650
 651		if (!ingress)
 652			kfree_skb(skb);
 653	}
 654end:
 655	mutex_unlock(&psock->work_mutex);
 656}
 657
 658struct sk_psock *sk_psock_init(struct sock *sk, int node)
 659{
 660	struct sk_psock *psock;
 661	struct proto *prot;
 662
 663	write_lock_bh(&sk->sk_callback_lock);
 664
 665	if (sk->sk_user_data) {
 666		psock = ERR_PTR(-EBUSY);
 667		goto out;
 668	}
 669
 670	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
 671	if (!psock) {
 672		psock = ERR_PTR(-ENOMEM);
 673		goto out;
 674	}
 675
 676	prot = READ_ONCE(sk->sk_prot);
 677	psock->sk = sk;
 678	psock->eval = __SK_NONE;
 679	psock->sk_proto = prot;
 680	psock->saved_unhash = prot->unhash;
 681	psock->saved_close = prot->close;
 682	psock->saved_write_space = sk->sk_write_space;
 683
 684	INIT_LIST_HEAD(&psock->link);
 685	spin_lock_init(&psock->link_lock);
 686
 687	INIT_WORK(&psock->work, sk_psock_backlog);
 688	mutex_init(&psock->work_mutex);
 689	INIT_LIST_HEAD(&psock->ingress_msg);
 690	spin_lock_init(&psock->ingress_lock);
 691	skb_queue_head_init(&psock->ingress_skb);
 692
 693	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
 694	refcount_set(&psock->refcnt, 1);
 695
 696	rcu_assign_sk_user_data_nocopy(sk, psock);
 697	sock_hold(sk);
 698
 699out:
 700	write_unlock_bh(&sk->sk_callback_lock);
 701	return psock;
 702}
 703EXPORT_SYMBOL_GPL(sk_psock_init);
 704
 705struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
 706{
 707	struct sk_psock_link *link;
 708
 709	spin_lock_bh(&psock->link_lock);
 710	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
 711					list);
 712	if (link)
 713		list_del(&link->list);
 714	spin_unlock_bh(&psock->link_lock);
 715	return link;
 716}
 717
 718static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
 719{
 720	struct sk_msg *msg, *tmp;
 721
 722	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
 723		list_del(&msg->list);
 724		sk_msg_free(psock->sk, msg);
 725		kfree(msg);
 726	}
 727}
 728
 729static void __sk_psock_zap_ingress(struct sk_psock *psock)
 730{
 731	struct sk_buff *skb;
 732
 733	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
 734		skb_bpf_redirect_clear(skb);
 735		sock_drop(psock->sk, skb);
 736	}
 737	kfree_skb(psock->work_state.skb);
 738	/* We null the skb here to ensure that calls to sk_psock_backlog
 739	 * do not pick up the free'd skb.
 740	 */
 741	psock->work_state.skb = NULL;
 742	__sk_psock_purge_ingress_msg(psock);
 743}
 744
 745static void sk_psock_link_destroy(struct sk_psock *psock)
 746{
 747	struct sk_psock_link *link, *tmp;
 748
 749	list_for_each_entry_safe(link, tmp, &psock->link, list) {
 750		list_del(&link->list);
 751		sk_psock_free_link(link);
 752	}
 753}
 754
 755void sk_psock_stop(struct sk_psock *psock, bool wait)
 756{
 757	spin_lock_bh(&psock->ingress_lock);
 758	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 759	sk_psock_cork_free(psock);
 760	__sk_psock_zap_ingress(psock);
 761	spin_unlock_bh(&psock->ingress_lock);
 762
 763	if (wait)
 764		cancel_work_sync(&psock->work);
 765}
 766
 767static void sk_psock_done_strp(struct sk_psock *psock);
 768
 769static void sk_psock_destroy(struct work_struct *work)
 770{
 771	struct sk_psock *psock = container_of(to_rcu_work(work),
 772					      struct sk_psock, rwork);
 773	/* No sk_callback_lock since already detached. */
 774
 775	sk_psock_done_strp(psock);
 
 
 776
 777	cancel_work_sync(&psock->work);
 778	mutex_destroy(&psock->work_mutex);
 779
 780	psock_progs_drop(&psock->progs);
 781
 782	sk_psock_link_destroy(psock);
 783	sk_psock_cork_free(psock);
 
 784
 785	if (psock->sk_redir)
 786		sock_put(psock->sk_redir);
 787	sock_put(psock->sk);
 788	kfree(psock);
 789}
 790
 
 
 
 
 
 
 
 
 
 791void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 792{
 
 
 
 793	write_lock_bh(&sk->sk_callback_lock);
 794	sk_psock_restore_proto(sk, psock);
 795	rcu_assign_sk_user_data(sk, NULL);
 796	if (psock->progs.stream_parser)
 797		sk_psock_stop_strp(sk, psock);
 798	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
 799		sk_psock_stop_verdict(sk, psock);
 800	write_unlock_bh(&sk->sk_callback_lock);
 
 801
 802	sk_psock_stop(psock, false);
 803
 804	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
 805	queue_rcu_work(system_wq, &psock->rwork);
 806}
 807EXPORT_SYMBOL_GPL(sk_psock_drop);
 808
 809static int sk_psock_map_verd(int verdict, bool redir)
 810{
 811	switch (verdict) {
 812	case SK_PASS:
 813		return redir ? __SK_REDIRECT : __SK_PASS;
 814	case SK_DROP:
 815	default:
 816		break;
 817	}
 818
 819	return __SK_DROP;
 820}
 821
 822int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
 823			 struct sk_msg *msg)
 824{
 825	struct bpf_prog *prog;
 826	int ret;
 827
 828	rcu_read_lock();
 829	prog = READ_ONCE(psock->progs.msg_parser);
 830	if (unlikely(!prog)) {
 831		ret = __SK_PASS;
 832		goto out;
 833	}
 834
 835	sk_msg_compute_data_pointers(msg);
 836	msg->sk = sk;
 837	ret = bpf_prog_run_pin_on_cpu(prog, msg);
 838	ret = sk_psock_map_verd(ret, msg->sk_redir);
 839	psock->apply_bytes = msg->apply_bytes;
 840	if (ret == __SK_REDIRECT) {
 841		if (psock->sk_redir)
 842			sock_put(psock->sk_redir);
 843		psock->sk_redir = msg->sk_redir;
 844		if (!psock->sk_redir) {
 845			ret = __SK_DROP;
 846			goto out;
 847		}
 848		sock_hold(psock->sk_redir);
 849	}
 850out:
 851	rcu_read_unlock();
 852	return ret;
 853}
 854EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
 855
 856static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857{
 858	struct sk_psock *psock_other;
 859	struct sock *sk_other;
 
 860
 861	sk_other = skb_bpf_redirect_fetch(skb);
 862	/* This error is a buggy BPF program, it returned a redirect
 863	 * return code, but then didn't set a redirect interface.
 864	 */
 865	if (unlikely(!sk_other)) {
 866		sock_drop(from->sk, skb);
 867		return -EIO;
 868	}
 869	psock_other = sk_psock(sk_other);
 870	/* This error indicates the socket is being torn down or had another
 871	 * error that caused the pipe to break. We can't send a packet on
 872	 * a socket that is in this state so we drop the skb.
 873	 */
 874	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
 875		skb_bpf_redirect_clear(skb);
 876		sock_drop(from->sk, skb);
 877		return -EIO;
 878	}
 879	spin_lock_bh(&psock_other->ingress_lock);
 880	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
 881		spin_unlock_bh(&psock_other->ingress_lock);
 882		skb_bpf_redirect_clear(skb);
 883		sock_drop(from->sk, skb);
 884		return -EIO;
 885	}
 886
 887	skb_queue_tail(&psock_other->ingress_skb, skb);
 888	schedule_work(&psock_other->work);
 889	spin_unlock_bh(&psock_other->ingress_lock);
 890	return 0;
 891}
 892
 893static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
 894				       struct sk_psock *from, int verdict)
 895{
 896	switch (verdict) {
 897	case __SK_REDIRECT:
 898		sk_psock_skb_redirect(from, skb);
 899		break;
 900	case __SK_PASS:
 901	case __SK_DROP:
 902	default:
 903		break;
 904	}
 905}
 906
 907int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
 908{
 909	struct bpf_prog *prog;
 910	int ret = __SK_PASS;
 911
 912	rcu_read_lock();
 913	prog = READ_ONCE(psock->progs.stream_verdict);
 914	if (likely(prog)) {
 915		skb->sk = psock->sk;
 916		skb_dst_drop(skb);
 917		skb_bpf_redirect_clear(skb);
 918		ret = bpf_prog_run_pin_on_cpu(prog, skb);
 919		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
 920		skb->sk = NULL;
 921	}
 922	sk_psock_tls_verdict_apply(skb, psock, ret);
 923	rcu_read_unlock();
 924	return ret;
 925}
 926EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
 927
 928static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
 929				  int verdict)
 930{
 931	struct sock *sk_other;
 932	int err = 0;
 933
 934	switch (verdict) {
 935	case __SK_PASS:
 936		err = -EIO;
 937		sk_other = psock->sk;
 938		if (sock_flag(sk_other, SOCK_DEAD) ||
 939		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 940			goto out_free;
 941		}
 
 
 
 942
 943		skb_bpf_set_ingress(skb);
 944
 945		/* If the queue is empty then we can submit directly
 946		 * into the msg queue. If its not empty we have to
 947		 * queue work otherwise we may get OOO data. Otherwise,
 948		 * if sk_psock_skb_ingress errors will be handled by
 949		 * retrying later from workqueue.
 950		 */
 951		if (skb_queue_empty(&psock->ingress_skb)) {
 952			err = sk_psock_skb_ingress_self(psock, skb);
 953		}
 954		if (err < 0) {
 955			spin_lock_bh(&psock->ingress_lock);
 956			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 957				skb_queue_tail(&psock->ingress_skb, skb);
 958				schedule_work(&psock->work);
 959				err = 0;
 960			}
 961			spin_unlock_bh(&psock->ingress_lock);
 962			if (err < 0) {
 963				skb_bpf_redirect_clear(skb);
 964				goto out_free;
 965			}
 966		}
 967		break;
 968	case __SK_REDIRECT:
 969		err = sk_psock_skb_redirect(psock, skb);
 970		break;
 971	case __SK_DROP:
 972	default:
 973out_free:
 974		sock_drop(psock->sk, skb);
 975	}
 976
 977	return err;
 978}
 979
 980static void sk_psock_write_space(struct sock *sk)
 981{
 982	struct sk_psock *psock;
 983	void (*write_space)(struct sock *sk) = NULL;
 984
 985	rcu_read_lock();
 986	psock = sk_psock(sk);
 987	if (likely(psock)) {
 988		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
 989			schedule_work(&psock->work);
 990		write_space = psock->saved_write_space;
 991	}
 992	rcu_read_unlock();
 993	if (write_space)
 994		write_space(sk);
 995}
 996
 997#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
 998static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
 999{
1000	struct sk_psock *psock;
1001	struct bpf_prog *prog;
1002	int ret = __SK_DROP;
1003	struct sock *sk;
1004
1005	rcu_read_lock();
1006	sk = strp->sk;
1007	psock = sk_psock(sk);
1008	if (unlikely(!psock)) {
1009		sock_drop(sk, skb);
1010		goto out;
1011	}
1012	prog = READ_ONCE(psock->progs.stream_verdict);
1013	if (likely(prog)) {
1014		skb->sk = sk;
1015		skb_dst_drop(skb);
1016		skb_bpf_redirect_clear(skb);
1017		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1018		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1019		skb->sk = NULL;
1020	}
1021	sk_psock_verdict_apply(psock, skb, ret);
1022out:
1023	rcu_read_unlock();
1024}
1025
1026static int sk_psock_strp_read_done(struct strparser *strp, int err)
1027{
1028	return err;
1029}
1030
1031static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1032{
1033	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1034	struct bpf_prog *prog;
1035	int ret = skb->len;
1036
1037	rcu_read_lock();
1038	prog = READ_ONCE(psock->progs.stream_parser);
1039	if (likely(prog)) {
1040		skb->sk = psock->sk;
1041		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1042		skb->sk = NULL;
1043	}
1044	rcu_read_unlock();
1045	return ret;
1046}
1047
1048/* Called with socket lock held. */
1049static void sk_psock_strp_data_ready(struct sock *sk)
1050{
1051	struct sk_psock *psock;
1052
1053	rcu_read_lock();
1054	psock = sk_psock(sk);
1055	if (likely(psock)) {
1056		if (tls_sw_has_ctx_rx(sk)) {
1057			psock->saved_data_ready(sk);
1058		} else {
1059			write_lock_bh(&sk->sk_callback_lock);
1060			strp_data_ready(&psock->strp);
1061			write_unlock_bh(&sk->sk_callback_lock);
1062		}
1063	}
1064	rcu_read_unlock();
1065}
1066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1068{
1069	static const struct strp_callbacks cb = {
1070		.rcv_msg	= sk_psock_strp_read,
1071		.read_sock_done	= sk_psock_strp_read_done,
1072		.parse_msg	= sk_psock_strp_parse,
1073	};
1074
1075	return strp_init(&psock->strp, sk, &cb);
 
1076}
1077
1078void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1079{
1080	if (psock->saved_data_ready)
 
 
1081		return;
1082
1083	psock->saved_data_ready = sk->sk_data_ready;
1084	sk->sk_data_ready = sk_psock_strp_data_ready;
1085	sk->sk_write_space = sk_psock_write_space;
 
1086}
1087
1088void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1089{
1090	if (!psock->saved_data_ready)
1091		return;
1092
1093	sk->sk_data_ready = psock->saved_data_ready;
1094	psock->saved_data_ready = NULL;
1095	strp_stop(&psock->strp);
1096}
1097
1098static void sk_psock_done_strp(struct sk_psock *psock)
1099{
1100	/* Parser has been stopped */
1101	if (psock->progs.stream_parser)
1102		strp_done(&psock->strp);
1103}
1104#else
1105static void sk_psock_done_strp(struct sk_psock *psock)
1106{
1107}
1108#endif /* CONFIG_BPF_STREAM_PARSER */
1109
1110static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1111				 unsigned int offset, size_t orig_len)
1112{
1113	struct sock *sk = (struct sock *)desc->arg.data;
1114	struct sk_psock *psock;
1115	struct bpf_prog *prog;
1116	int ret = __SK_DROP;
1117	int len = skb->len;
1118
1119	/* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1120	skb = skb_clone(skb, GFP_ATOMIC);
1121	if (!skb) {
1122		desc->error = -ENOMEM;
1123		return 0;
1124	}
1125
1126	rcu_read_lock();
1127	psock = sk_psock(sk);
1128	if (unlikely(!psock)) {
1129		len = 0;
1130		sock_drop(sk, skb);
1131		goto out;
1132	}
1133	prog = READ_ONCE(psock->progs.stream_verdict);
1134	if (!prog)
1135		prog = READ_ONCE(psock->progs.skb_verdict);
1136	if (likely(prog)) {
1137		skb->sk = sk;
1138		skb_dst_drop(skb);
1139		skb_bpf_redirect_clear(skb);
1140		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1141		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1142		skb->sk = NULL;
1143	}
1144	if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1145		len = 0;
1146out:
1147	rcu_read_unlock();
1148	return len;
1149}
1150
1151static void sk_psock_verdict_data_ready(struct sock *sk)
1152{
1153	struct socket *sock = sk->sk_socket;
1154	read_descriptor_t desc;
1155
1156	if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1157		return;
1158
1159	desc.arg.data = sk;
1160	desc.error = 0;
1161	desc.count = 1;
1162
1163	sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1164}
1165
1166void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1167{
1168	if (psock->saved_data_ready)
1169		return;
1170
1171	psock->saved_data_ready = sk->sk_data_ready;
1172	sk->sk_data_ready = sk_psock_verdict_data_ready;
1173	sk->sk_write_space = sk_psock_write_space;
1174}
1175
1176void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1177{
1178	if (!psock->saved_data_ready)
1179		return;
1180
1181	sk->sk_data_ready = psock->saved_data_ready;
1182	psock->saved_data_ready = NULL;
 
 
1183}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
  3
  4#include <linux/skmsg.h>
  5#include <linux/skbuff.h>
  6#include <linux/scatterlist.h>
  7
  8#include <net/sock.h>
  9#include <net/tcp.h>
 10#include <net/tls.h>
 11
 12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
 13{
 14	if (msg->sg.end > msg->sg.start &&
 15	    elem_first_coalesce < msg->sg.end)
 16		return true;
 17
 18	if (msg->sg.end < msg->sg.start &&
 19	    (elem_first_coalesce > msg->sg.start ||
 20	     elem_first_coalesce < msg->sg.end))
 21		return true;
 22
 23	return false;
 24}
 25
 26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
 27		 int elem_first_coalesce)
 28{
 29	struct page_frag *pfrag = sk_page_frag(sk);
 30	int ret = 0;
 31
 32	len -= msg->sg.size;
 33	while (len > 0) {
 34		struct scatterlist *sge;
 35		u32 orig_offset;
 36		int use, i;
 37
 38		if (!sk_page_frag_refill(sk, pfrag))
 39			return -ENOMEM;
 40
 41		orig_offset = pfrag->offset;
 42		use = min_t(int, len, pfrag->size - orig_offset);
 43		if (!sk_wmem_schedule(sk, use))
 44			return -ENOMEM;
 45
 46		i = msg->sg.end;
 47		sk_msg_iter_var_prev(i);
 48		sge = &msg->sg.data[i];
 49
 50		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
 51		    sg_page(sge) == pfrag->page &&
 52		    sge->offset + sge->length == orig_offset) {
 53			sge->length += use;
 54		} else {
 55			if (sk_msg_full(msg)) {
 56				ret = -ENOSPC;
 57				break;
 58			}
 59
 60			sge = &msg->sg.data[msg->sg.end];
 61			sg_unmark_end(sge);
 62			sg_set_page(sge, pfrag->page, use, orig_offset);
 63			get_page(pfrag->page);
 64			sk_msg_iter_next(msg, end);
 65		}
 66
 67		sk_mem_charge(sk, use);
 68		msg->sg.size += use;
 69		pfrag->offset += use;
 70		len -= use;
 71	}
 72
 73	return ret;
 74}
 75EXPORT_SYMBOL_GPL(sk_msg_alloc);
 76
 77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
 78		 u32 off, u32 len)
 79{
 80	int i = src->sg.start;
 81	struct scatterlist *sge = sk_msg_elem(src, i);
 82	struct scatterlist *sgd = NULL;
 83	u32 sge_len, sge_off;
 84
 85	while (off) {
 86		if (sge->length > off)
 87			break;
 88		off -= sge->length;
 89		sk_msg_iter_var_next(i);
 90		if (i == src->sg.end && off)
 91			return -ENOSPC;
 92		sge = sk_msg_elem(src, i);
 93	}
 94
 95	while (len) {
 96		sge_len = sge->length - off;
 97		if (sge_len > len)
 98			sge_len = len;
 99
100		if (dst->sg.end)
101			sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103		if (sgd &&
104		    (sg_page(sge) == sg_page(sgd)) &&
105		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106			sgd->length += sge_len;
107			dst->sg.size += sge_len;
108		} else if (!sk_msg_full(dst)) {
109			sge_off = sge->offset + off;
110			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111		} else {
112			return -ENOSPC;
113		}
114
115		off = 0;
116		len -= sge_len;
117		sk_mem_charge(sk, sge_len);
118		sk_msg_iter_var_next(i);
119		if (i == src->sg.end && len)
120			return -ENOSPC;
121		sge = sk_msg_elem(src, i);
122	}
123
124	return 0;
125}
126EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129{
130	int i = msg->sg.start;
131
132	do {
133		struct scatterlist *sge = sk_msg_elem(msg, i);
134
135		if (bytes < sge->length) {
136			sge->length -= bytes;
137			sge->offset += bytes;
138			sk_mem_uncharge(sk, bytes);
139			break;
140		}
141
142		sk_mem_uncharge(sk, sge->length);
143		bytes -= sge->length;
144		sge->length = 0;
145		sge->offset = 0;
146		sk_msg_iter_var_next(i);
147	} while (bytes && i != msg->sg.end);
148	msg->sg.start = i;
149}
150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153{
154	int i = msg->sg.start;
155
156	do {
157		struct scatterlist *sge = &msg->sg.data[i];
158		int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160		sk_mem_uncharge(sk, uncharge);
161		bytes -= uncharge;
162		sk_msg_iter_var_next(i);
163	} while (i != msg->sg.end);
164}
165EXPORT_SYMBOL_GPL(sk_msg_return);
166
167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168			    bool charge)
169{
170	struct scatterlist *sge = sk_msg_elem(msg, i);
171	u32 len = sge->length;
172
173	if (charge)
174		sk_mem_uncharge(sk, len);
175	if (!msg->skb)
 
176		put_page(sg_page(sge));
 
177	memset(sge, 0, sizeof(*sge));
178	return len;
179}
180
181static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
182			 bool charge)
183{
184	struct scatterlist *sge = sk_msg_elem(msg, i);
185	int freed = 0;
186
187	while (msg->sg.size) {
188		msg->sg.size -= sge->length;
189		freed += sk_msg_free_elem(sk, msg, i, charge);
190		sk_msg_iter_var_next(i);
191		sk_msg_check_to_free(msg, i, msg->sg.size);
192		sge = sk_msg_elem(msg, i);
193	}
194	consume_skb(msg->skb);
195	sk_msg_init(msg);
196	return freed;
197}
198
199int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
200{
201	return __sk_msg_free(sk, msg, msg->sg.start, false);
202}
203EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
204
205int sk_msg_free(struct sock *sk, struct sk_msg *msg)
206{
207	return __sk_msg_free(sk, msg, msg->sg.start, true);
208}
209EXPORT_SYMBOL_GPL(sk_msg_free);
210
211static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
212				  u32 bytes, bool charge)
213{
214	struct scatterlist *sge;
215	u32 i = msg->sg.start;
216
217	while (bytes) {
218		sge = sk_msg_elem(msg, i);
219		if (!sge->length)
220			break;
221		if (bytes < sge->length) {
222			if (charge)
223				sk_mem_uncharge(sk, bytes);
224			sge->length -= bytes;
225			sge->offset += bytes;
226			msg->sg.size -= bytes;
227			break;
228		}
229
230		msg->sg.size -= sge->length;
231		bytes -= sge->length;
232		sk_msg_free_elem(sk, msg, i, charge);
233		sk_msg_iter_var_next(i);
234		sk_msg_check_to_free(msg, i, bytes);
235	}
236	msg->sg.start = i;
237}
238
239void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
240{
241	__sk_msg_free_partial(sk, msg, bytes, true);
242}
243EXPORT_SYMBOL_GPL(sk_msg_free_partial);
244
245void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
246				  u32 bytes)
247{
248	__sk_msg_free_partial(sk, msg, bytes, false);
249}
250
251void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
252{
253	int trim = msg->sg.size - len;
254	u32 i = msg->sg.end;
255
256	if (trim <= 0) {
257		WARN_ON(trim < 0);
258		return;
259	}
260
261	sk_msg_iter_var_prev(i);
262	msg->sg.size = len;
263	while (msg->sg.data[i].length &&
264	       trim >= msg->sg.data[i].length) {
265		trim -= msg->sg.data[i].length;
266		sk_msg_free_elem(sk, msg, i, true);
267		sk_msg_iter_var_prev(i);
268		if (!trim)
269			goto out;
270	}
271
272	msg->sg.data[i].length -= trim;
273	sk_mem_uncharge(sk, trim);
274	/* Adjust copybreak if it falls into the trimmed part of last buf */
275	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
276		msg->sg.copybreak = msg->sg.data[i].length;
277out:
278	sk_msg_iter_var_next(i);
279	msg->sg.end = i;
280
281	/* If we trim data a full sg elem before curr pointer update
282	 * copybreak and current so that any future copy operations
283	 * start at new copy location.
284	 * However trimed data that has not yet been used in a copy op
285	 * does not require an update.
286	 */
287	if (!msg->sg.size) {
288		msg->sg.curr = msg->sg.start;
289		msg->sg.copybreak = 0;
290	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
291		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
292		sk_msg_iter_var_prev(i);
293		msg->sg.curr = i;
294		msg->sg.copybreak = msg->sg.data[i].length;
295	}
296}
297EXPORT_SYMBOL_GPL(sk_msg_trim);
298
299int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
300			      struct sk_msg *msg, u32 bytes)
301{
302	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
303	const int to_max_pages = MAX_MSG_FRAGS;
304	struct page *pages[MAX_MSG_FRAGS];
305	ssize_t orig, copied, use, offset;
306
307	orig = msg->sg.size;
308	while (bytes > 0) {
309		i = 0;
310		maxpages = to_max_pages - num_elems;
311		if (maxpages == 0) {
312			ret = -EFAULT;
313			goto out;
314		}
315
316		copied = iov_iter_get_pages(from, pages, bytes, maxpages,
317					    &offset);
318		if (copied <= 0) {
319			ret = -EFAULT;
320			goto out;
321		}
322
323		iov_iter_advance(from, copied);
324		bytes -= copied;
325		msg->sg.size += copied;
326
327		while (copied) {
328			use = min_t(int, copied, PAGE_SIZE - offset);
329			sg_set_page(&msg->sg.data[msg->sg.end],
330				    pages[i], use, offset);
331			sg_unmark_end(&msg->sg.data[msg->sg.end]);
332			sk_mem_charge(sk, use);
333
334			offset = 0;
335			copied -= use;
336			sk_msg_iter_next(msg, end);
337			num_elems++;
338			i++;
339		}
340		/* When zerocopy is mixed with sk_msg_*copy* operations we
341		 * may have a copybreak set in this case clear and prefer
342		 * zerocopy remainder when possible.
343		 */
344		msg->sg.copybreak = 0;
345		msg->sg.curr = msg->sg.end;
346	}
347out:
348	/* Revert iov_iter updates, msg will need to use 'trim' later if it
349	 * also needs to be cleared.
350	 */
351	if (ret)
352		iov_iter_revert(from, msg->sg.size - orig);
353	return ret;
354}
355EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
356
357int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
358			     struct sk_msg *msg, u32 bytes)
359{
360	int ret = -ENOSPC, i = msg->sg.curr;
361	struct scatterlist *sge;
362	u32 copy, buf_size;
363	void *to;
364
365	do {
366		sge = sk_msg_elem(msg, i);
367		/* This is possible if a trim operation shrunk the buffer */
368		if (msg->sg.copybreak >= sge->length) {
369			msg->sg.copybreak = 0;
370			sk_msg_iter_var_next(i);
371			if (i == msg->sg.end)
372				break;
373			sge = sk_msg_elem(msg, i);
374		}
375
376		buf_size = sge->length - msg->sg.copybreak;
377		copy = (buf_size > bytes) ? bytes : buf_size;
378		to = sg_virt(sge) + msg->sg.copybreak;
379		msg->sg.copybreak += copy;
380		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
381			ret = copy_from_iter_nocache(to, copy, from);
382		else
383			ret = copy_from_iter(to, copy, from);
384		if (ret != copy) {
385			ret = -EFAULT;
386			goto out;
387		}
388		bytes -= copy;
389		if (!bytes)
390			break;
391		msg->sg.copybreak = 0;
392		sk_msg_iter_var_next(i);
393	} while (i != msg->sg.end);
394out:
395	msg->sg.curr = i;
396	return ret;
397}
398EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
399
400static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401{
402	struct sock *sk = psock->sk;
403	int copied = 0, num_sge;
404	struct sk_msg *msg;
405
406	msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
 
 
 
 
 
 
407	if (unlikely(!msg))
408		return -EAGAIN;
409	if (!sk_rmem_schedule(sk, skb, skb->len)) {
410		kfree(msg);
411		return -EAGAIN;
412	}
413
414	sk_msg_init(msg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
416	if (unlikely(num_sge < 0)) {
417		kfree(msg);
418		return num_sge;
419	}
420
421	sk_mem_charge(sk, skb->len);
422	copied = skb->len;
423	msg->sg.start = 0;
424	msg->sg.size = copied;
425	msg->sg.end = num_sge;
426	msg->skb = skb;
427
428	sk_psock_queue_msg(psock, msg);
429	sk_psock_data_ready(sk, psock);
430	return copied;
431}
432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
434			       u32 off, u32 len, bool ingress)
435{
436	if (ingress)
437		return sk_psock_skb_ingress(psock, skb);
438	else
439		return skb_send_sock_locked(psock->sk, skb, off, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440}
441
442static void sk_psock_backlog(struct work_struct *work)
443{
444	struct sk_psock *psock = container_of(work, struct sk_psock, work);
445	struct sk_psock_work_state *state = &psock->work_state;
446	struct sk_buff *skb;
447	bool ingress;
448	u32 len, off;
449	int ret;
450
451	/* Lock sock to avoid losing sk_socket during loop. */
452	lock_sock(psock->sk);
453	if (state->skb) {
454		skb = state->skb;
455		len = state->len;
456		off = state->off;
457		state->skb = NULL;
 
 
 
458		goto start;
459	}
460
461	while ((skb = skb_dequeue(&psock->ingress_skb))) {
462		len = skb->len;
463		off = 0;
464start:
465		ingress = tcp_skb_bpf_ingress(skb);
 
466		do {
467			ret = -EIO;
468			if (likely(psock->sk->sk_socket))
469				ret = sk_psock_handle_skb(psock, skb, off,
470							  len, ingress);
471			if (ret <= 0) {
472				if (ret == -EAGAIN) {
473					state->skb = skb;
474					state->len = len;
475					state->off = off;
476					goto end;
477				}
478				/* Hard errors break pipe and stop xmit. */
479				sk_psock_report_error(psock, ret ? -ret : EPIPE);
480				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
481				kfree_skb(skb);
482				goto end;
483			}
484			off += ret;
485			len -= ret;
486		} while (len);
487
488		if (!ingress)
489			kfree_skb(skb);
490	}
491end:
492	release_sock(psock->sk);
493}
494
495struct sk_psock *sk_psock_init(struct sock *sk, int node)
496{
497	struct sk_psock *psock = kzalloc_node(sizeof(*psock),
498					      GFP_ATOMIC | __GFP_NOWARN,
499					      node);
500	if (!psock)
501		return NULL;
 
 
 
 
 
 
 
 
 
 
502
 
503	psock->sk = sk;
504	psock->eval =  __SK_NONE;
 
 
 
 
505
506	INIT_LIST_HEAD(&psock->link);
507	spin_lock_init(&psock->link_lock);
508
509	INIT_WORK(&psock->work, sk_psock_backlog);
 
510	INIT_LIST_HEAD(&psock->ingress_msg);
 
511	skb_queue_head_init(&psock->ingress_skb);
512
513	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
514	refcount_set(&psock->refcnt, 1);
515
516	rcu_assign_sk_user_data_nocopy(sk, psock);
517	sock_hold(sk);
518
 
 
519	return psock;
520}
521EXPORT_SYMBOL_GPL(sk_psock_init);
522
523struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
524{
525	struct sk_psock_link *link;
526
527	spin_lock_bh(&psock->link_lock);
528	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
529					list);
530	if (link)
531		list_del(&link->list);
532	spin_unlock_bh(&psock->link_lock);
533	return link;
534}
535
536void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
537{
538	struct sk_msg *msg, *tmp;
539
540	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
541		list_del(&msg->list);
542		sk_msg_free(psock->sk, msg);
543		kfree(msg);
544	}
545}
546
547static void sk_psock_zap_ingress(struct sk_psock *psock)
548{
549	__skb_queue_purge(&psock->ingress_skb);
 
 
 
 
 
 
 
 
 
 
550	__sk_psock_purge_ingress_msg(psock);
551}
552
553static void sk_psock_link_destroy(struct sk_psock *psock)
554{
555	struct sk_psock_link *link, *tmp;
556
557	list_for_each_entry_safe(link, tmp, &psock->link, list) {
558		list_del(&link->list);
559		sk_psock_free_link(link);
560	}
561}
562
563static void sk_psock_destroy_deferred(struct work_struct *gc)
564{
565	struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
 
 
 
 
566
 
 
 
 
 
 
 
 
 
 
567	/* No sk_callback_lock since already detached. */
568
569	/* Parser has been stopped */
570	if (psock->progs.skb_parser)
571		strp_done(&psock->parser.strp);
572
573	cancel_work_sync(&psock->work);
 
574
575	psock_progs_drop(&psock->progs);
576
577	sk_psock_link_destroy(psock);
578	sk_psock_cork_free(psock);
579	sk_psock_zap_ingress(psock);
580
581	if (psock->sk_redir)
582		sock_put(psock->sk_redir);
583	sock_put(psock->sk);
584	kfree(psock);
585}
586
587void sk_psock_destroy(struct rcu_head *rcu)
588{
589	struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
590
591	INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
592	schedule_work(&psock->gc);
593}
594EXPORT_SYMBOL_GPL(sk_psock_destroy);
595
596void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
597{
598	sk_psock_cork_free(psock);
599	sk_psock_zap_ingress(psock);
600
601	write_lock_bh(&sk->sk_callback_lock);
602	sk_psock_restore_proto(sk, psock);
603	rcu_assign_sk_user_data(sk, NULL);
604	if (psock->progs.skb_parser)
605		sk_psock_stop_strp(sk, psock);
 
 
606	write_unlock_bh(&sk->sk_callback_lock);
607	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
608
609	call_rcu(&psock->rcu, sk_psock_destroy);
 
 
 
610}
611EXPORT_SYMBOL_GPL(sk_psock_drop);
612
613static int sk_psock_map_verd(int verdict, bool redir)
614{
615	switch (verdict) {
616	case SK_PASS:
617		return redir ? __SK_REDIRECT : __SK_PASS;
618	case SK_DROP:
619	default:
620		break;
621	}
622
623	return __SK_DROP;
624}
625
626int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
627			 struct sk_msg *msg)
628{
629	struct bpf_prog *prog;
630	int ret;
631
632	rcu_read_lock();
633	prog = READ_ONCE(psock->progs.msg_parser);
634	if (unlikely(!prog)) {
635		ret = __SK_PASS;
636		goto out;
637	}
638
639	sk_msg_compute_data_pointers(msg);
640	msg->sk = sk;
641	ret = bpf_prog_run_pin_on_cpu(prog, msg);
642	ret = sk_psock_map_verd(ret, msg->sk_redir);
643	psock->apply_bytes = msg->apply_bytes;
644	if (ret == __SK_REDIRECT) {
645		if (psock->sk_redir)
646			sock_put(psock->sk_redir);
647		psock->sk_redir = msg->sk_redir;
648		if (!psock->sk_redir) {
649			ret = __SK_DROP;
650			goto out;
651		}
652		sock_hold(psock->sk_redir);
653	}
654out:
655	rcu_read_unlock();
656	return ret;
657}
658EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
659
660static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
661			    struct sk_buff *skb)
662{
663	int ret;
664
665	skb->sk = psock->sk;
666	bpf_compute_data_end_sk_skb(skb);
667	ret = bpf_prog_run_pin_on_cpu(prog, skb);
668	/* strparser clones the skb before handing it to a upper layer,
669	 * meaning skb_orphan has been called. We NULL sk on the way out
670	 * to ensure we don't trigger a BUG_ON() in skb/sk operations
671	 * later and because we are not charging the memory of this skb
672	 * to any socket yet.
673	 */
674	skb->sk = NULL;
675	return ret;
676}
677
678static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
679{
680	struct sk_psock_parser *parser;
681
682	parser = container_of(strp, struct sk_psock_parser, strp);
683	return container_of(parser, struct sk_psock, parser);
684}
685
686static void sk_psock_skb_redirect(struct sk_buff *skb)
687{
688	struct sk_psock *psock_other;
689	struct sock *sk_other;
690	bool ingress;
691
692	sk_other = tcp_skb_bpf_redirect_fetch(skb);
 
 
 
693	if (unlikely(!sk_other)) {
694		kfree_skb(skb);
695		return;
696	}
697	psock_other = sk_psock(sk_other);
698	if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
699	    !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
700		kfree_skb(skb);
701		return;
702	}
703
704	ingress = tcp_skb_bpf_ingress(skb);
705	if ((!ingress && sock_writeable(sk_other)) ||
706	    (ingress &&
707	     atomic_read(&sk_other->sk_rmem_alloc) <=
708	     sk_other->sk_rcvbuf)) {
709		if (!ingress)
710			skb_set_owner_w(skb, sk_other);
711		skb_queue_tail(&psock_other->ingress_skb, skb);
712		schedule_work(&psock_other->work);
713	} else {
714		kfree_skb(skb);
715	}
 
 
 
716}
717
718static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
 
719{
720	switch (verdict) {
721	case __SK_REDIRECT:
722		sk_psock_skb_redirect(skb);
723		break;
724	case __SK_PASS:
725	case __SK_DROP:
726	default:
727		break;
728	}
729}
730
731int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
732{
733	struct bpf_prog *prog;
734	int ret = __SK_PASS;
735
736	rcu_read_lock();
737	prog = READ_ONCE(psock->progs.skb_verdict);
738	if (likely(prog)) {
739		tcp_skb_bpf_redirect_clear(skb);
740		ret = sk_psock_bpf_run(psock, prog, skb);
741		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
 
 
 
742	}
743	sk_psock_tls_verdict_apply(skb, ret);
744	rcu_read_unlock();
745	return ret;
746}
747EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
748
749static void sk_psock_verdict_apply(struct sk_psock *psock,
750				   struct sk_buff *skb, int verdict)
751{
752	struct sock *sk_other;
 
753
754	switch (verdict) {
755	case __SK_PASS:
 
756		sk_other = psock->sk;
757		if (sock_flag(sk_other, SOCK_DEAD) ||
758		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
759			goto out_free;
760		}
761		if (atomic_read(&sk_other->sk_rmem_alloc) <=
762		    sk_other->sk_rcvbuf) {
763			struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
764
765			tcp->bpf.flags |= BPF_F_INGRESS;
766			skb_queue_tail(&psock->ingress_skb, skb);
767			schedule_work(&psock->work);
768			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
769		}
770		goto out_free;
771	case __SK_REDIRECT:
772		sk_psock_skb_redirect(skb);
773		break;
774	case __SK_DROP:
775	default:
776out_free:
777		kfree_skb(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
778	}
 
 
 
779}
780
 
781static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
782{
783	struct sk_psock *psock;
784	struct bpf_prog *prog;
785	int ret = __SK_DROP;
786	struct sock *sk;
787
788	rcu_read_lock();
789	sk = strp->sk;
790	psock = sk_psock(sk);
791	if (unlikely(!psock)) {
792		kfree_skb(skb);
793		goto out;
794	}
795	prog = READ_ONCE(psock->progs.skb_verdict);
796	if (likely(prog)) {
797		skb_orphan(skb);
798		tcp_skb_bpf_redirect_clear(skb);
799		ret = sk_psock_bpf_run(psock, prog, skb);
800		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
 
 
801	}
802	sk_psock_verdict_apply(psock, skb, ret);
803out:
804	rcu_read_unlock();
805}
806
807static int sk_psock_strp_read_done(struct strparser *strp, int err)
808{
809	return err;
810}
811
812static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
813{
814	struct sk_psock *psock = sk_psock_from_strp(strp);
815	struct bpf_prog *prog;
816	int ret = skb->len;
817
818	rcu_read_lock();
819	prog = READ_ONCE(psock->progs.skb_parser);
820	if (likely(prog))
821		ret = sk_psock_bpf_run(psock, prog, skb);
 
 
 
822	rcu_read_unlock();
823	return ret;
824}
825
826/* Called with socket lock held. */
827static void sk_psock_strp_data_ready(struct sock *sk)
828{
829	struct sk_psock *psock;
830
831	rcu_read_lock();
832	psock = sk_psock(sk);
833	if (likely(psock)) {
834		if (tls_sw_has_ctx_rx(sk)) {
835			psock->parser.saved_data_ready(sk);
836		} else {
837			write_lock_bh(&sk->sk_callback_lock);
838			strp_data_ready(&psock->parser.strp);
839			write_unlock_bh(&sk->sk_callback_lock);
840		}
841	}
842	rcu_read_unlock();
843}
844
845static void sk_psock_write_space(struct sock *sk)
846{
847	struct sk_psock *psock;
848	void (*write_space)(struct sock *sk) = NULL;
849
850	rcu_read_lock();
851	psock = sk_psock(sk);
852	if (likely(psock)) {
853		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
854			schedule_work(&psock->work);
855		write_space = psock->saved_write_space;
856	}
857	rcu_read_unlock();
858	if (write_space)
859		write_space(sk);
860}
861
862int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
863{
864	static const struct strp_callbacks cb = {
865		.rcv_msg	= sk_psock_strp_read,
866		.read_sock_done	= sk_psock_strp_read_done,
867		.parse_msg	= sk_psock_strp_parse,
868	};
869
870	psock->parser.enabled = false;
871	return strp_init(&psock->parser.strp, sk, &cb);
872}
873
874void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
875{
876	struct sk_psock_parser *parser = &psock->parser;
877
878	if (parser->enabled)
879		return;
880
881	parser->saved_data_ready = sk->sk_data_ready;
882	sk->sk_data_ready = sk_psock_strp_data_ready;
883	sk->sk_write_space = sk_psock_write_space;
884	parser->enabled = true;
885}
886
887void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
888{
889	struct sk_psock_parser *parser = &psock->parser;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
890
891	if (!parser->enabled)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
892		return;
893
894	sk->sk_data_ready = parser->saved_data_ready;
895	parser->saved_data_ready = NULL;
896	strp_stop(&parser->strp);
897	parser->enabled = false;
898}