Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Kernel Connection Multiplexor
   4 *
   5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
   6 */
   7
   8#include <linux/bpf.h>
   9#include <linux/errno.h>
  10#include <linux/errqueue.h>
  11#include <linux/file.h>
  12#include <linux/filter.h>
  13#include <linux/in.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/net.h>
  17#include <linux/netdevice.h>
  18#include <linux/poll.h>
  19#include <linux/rculist.h>
  20#include <linux/skbuff.h>
  21#include <linux/socket.h>
  22#include <linux/uaccess.h>
  23#include <linux/workqueue.h>
  24#include <linux/syscalls.h>
  25#include <linux/sched/signal.h>
  26
  27#include <net/kcm.h>
  28#include <net/netns/generic.h>
  29#include <net/sock.h>
  30#include <uapi/linux/kcm.h>
  31#include <trace/events/sock.h>
  32
  33unsigned int kcm_net_id;
  34
  35static struct kmem_cache *kcm_psockp __read_mostly;
  36static struct kmem_cache *kcm_muxp __read_mostly;
  37static struct workqueue_struct *kcm_wq;
  38
  39static inline struct kcm_sock *kcm_sk(const struct sock *sk)
  40{
  41	return (struct kcm_sock *)sk;
  42}
  43
  44static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
  45{
  46	return (struct kcm_tx_msg *)skb->cb;
  47}
  48
  49static void report_csk_error(struct sock *csk, int err)
  50{
  51	csk->sk_err = EPIPE;
  52	sk_error_report(csk);
  53}
  54
  55static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
  56			       bool wakeup_kcm)
  57{
  58	struct sock *csk = psock->sk;
  59	struct kcm_mux *mux = psock->mux;
  60
  61	/* Unrecoverable error in transmit */
  62
  63	spin_lock_bh(&mux->lock);
  64
  65	if (psock->tx_stopped) {
  66		spin_unlock_bh(&mux->lock);
  67		return;
  68	}
  69
  70	psock->tx_stopped = 1;
  71	KCM_STATS_INCR(psock->stats.tx_aborts);
  72
  73	if (!psock->tx_kcm) {
  74		/* Take off psocks_avail list */
  75		list_del(&psock->psock_avail_list);
  76	} else if (wakeup_kcm) {
  77		/* In this case psock is being aborted while outside of
  78		 * write_msgs and psock is reserved. Schedule tx_work
  79		 * to handle the failure there. Need to commit tx_stopped
  80		 * before queuing work.
  81		 */
  82		smp_mb();
  83
  84		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
  85	}
  86
  87	spin_unlock_bh(&mux->lock);
  88
  89	/* Report error on lower socket */
  90	report_csk_error(csk, err);
  91}
  92
  93/* RX mux lock held. */
  94static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
  95				    struct kcm_psock *psock)
  96{
  97	STRP_STATS_ADD(mux->stats.rx_bytes,
  98		       psock->strp.stats.bytes -
  99		       psock->saved_rx_bytes);
 100	mux->stats.rx_msgs +=
 101		psock->strp.stats.msgs - psock->saved_rx_msgs;
 102	psock->saved_rx_msgs = psock->strp.stats.msgs;
 103	psock->saved_rx_bytes = psock->strp.stats.bytes;
 104}
 105
 106static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
 107				    struct kcm_psock *psock)
 108{
 109	KCM_STATS_ADD(mux->stats.tx_bytes,
 110		      psock->stats.tx_bytes - psock->saved_tx_bytes);
 111	mux->stats.tx_msgs +=
 112		psock->stats.tx_msgs - psock->saved_tx_msgs;
 113	psock->saved_tx_msgs = psock->stats.tx_msgs;
 114	psock->saved_tx_bytes = psock->stats.tx_bytes;
 115}
 116
 117static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 118
 119/* KCM is ready to receive messages on its queue-- either the KCM is new or
 120 * has become unblocked after being blocked on full socket buffer. Queue any
 121 * pending ready messages on a psock. RX mux lock held.
 122 */
 123static void kcm_rcv_ready(struct kcm_sock *kcm)
 124{
 125	struct kcm_mux *mux = kcm->mux;
 126	struct kcm_psock *psock;
 127	struct sk_buff *skb;
 128
 129	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
 130		return;
 131
 132	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
 133		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 134			/* Assuming buffer limit has been reached */
 135			skb_queue_head(&mux->rx_hold_queue, skb);
 136			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 137			return;
 138		}
 139	}
 140
 141	while (!list_empty(&mux->psocks_ready)) {
 142		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
 143					 psock_ready_list);
 144
 145		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
 146			/* Assuming buffer limit has been reached */
 147			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 148			return;
 149		}
 150
 151		/* Consumed the ready message on the psock. Schedule rx_work to
 152		 * get more messages.
 153		 */
 154		list_del(&psock->psock_ready_list);
 155		psock->ready_rx_msg = NULL;
 156		/* Commit clearing of ready_rx_msg for queuing work */
 157		smp_mb();
 158
 159		strp_unpause(&psock->strp);
 160		strp_check_rcv(&psock->strp);
 161	}
 162
 163	/* Buffer limit is okay now, add to ready list */
 164	list_add_tail(&kcm->wait_rx_list,
 165		      &kcm->mux->kcm_rx_waiters);
 166	/* paired with lockless reads in kcm_rfree() */
 167	WRITE_ONCE(kcm->rx_wait, true);
 168}
 169
 170static void kcm_rfree(struct sk_buff *skb)
 171{
 172	struct sock *sk = skb->sk;
 173	struct kcm_sock *kcm = kcm_sk(sk);
 174	struct kcm_mux *mux = kcm->mux;
 175	unsigned int len = skb->truesize;
 176
 177	sk_mem_uncharge(sk, len);
 178	atomic_sub(len, &sk->sk_rmem_alloc);
 179
 180	/* For reading rx_wait and rx_psock without holding lock */
 181	smp_mb__after_atomic();
 182
 183	if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
 184	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
 185		spin_lock_bh(&mux->rx_lock);
 186		kcm_rcv_ready(kcm);
 187		spin_unlock_bh(&mux->rx_lock);
 188	}
 189}
 190
 191static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 192{
 193	struct sk_buff_head *list = &sk->sk_receive_queue;
 194
 195	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 196		return -ENOMEM;
 197
 198	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 199		return -ENOBUFS;
 200
 201	skb->dev = NULL;
 202
 203	skb_orphan(skb);
 204	skb->sk = sk;
 205	skb->destructor = kcm_rfree;
 206	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 207	sk_mem_charge(sk, skb->truesize);
 208
 209	skb_queue_tail(list, skb);
 210
 211	if (!sock_flag(sk, SOCK_DEAD))
 212		sk->sk_data_ready(sk);
 213
 214	return 0;
 215}
 216
 217/* Requeue received messages for a kcm socket to other kcm sockets. This is
 218 * called with a kcm socket is receive disabled.
 219 * RX mux lock held.
 220 */
 221static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
 222{
 223	struct sk_buff *skb;
 224	struct kcm_sock *kcm;
 225
 226	while ((skb = skb_dequeue(head))) {
 227		/* Reset destructor to avoid calling kcm_rcv_ready */
 228		skb->destructor = sock_rfree;
 229		skb_orphan(skb);
 230try_again:
 231		if (list_empty(&mux->kcm_rx_waiters)) {
 232			skb_queue_tail(&mux->rx_hold_queue, skb);
 233			continue;
 234		}
 235
 236		kcm = list_first_entry(&mux->kcm_rx_waiters,
 237				       struct kcm_sock, wait_rx_list);
 238
 239		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 240			/* Should mean socket buffer full */
 241			list_del(&kcm->wait_rx_list);
 242			/* paired with lockless reads in kcm_rfree() */
 243			WRITE_ONCE(kcm->rx_wait, false);
 244
 245			/* Commit rx_wait to read in kcm_free */
 246			smp_wmb();
 247
 248			goto try_again;
 249		}
 250	}
 251}
 252
 253/* Lower sock lock held */
 254static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
 255				       struct sk_buff *head)
 256{
 257	struct kcm_mux *mux = psock->mux;
 258	struct kcm_sock *kcm;
 259
 260	WARN_ON(psock->ready_rx_msg);
 261
 262	if (psock->rx_kcm)
 263		return psock->rx_kcm;
 264
 265	spin_lock_bh(&mux->rx_lock);
 266
 267	if (psock->rx_kcm) {
 268		spin_unlock_bh(&mux->rx_lock);
 269		return psock->rx_kcm;
 270	}
 271
 272	kcm_update_rx_mux_stats(mux, psock);
 273
 274	if (list_empty(&mux->kcm_rx_waiters)) {
 275		psock->ready_rx_msg = head;
 276		strp_pause(&psock->strp);
 277		list_add_tail(&psock->psock_ready_list,
 278			      &mux->psocks_ready);
 279		spin_unlock_bh(&mux->rx_lock);
 280		return NULL;
 281	}
 282
 283	kcm = list_first_entry(&mux->kcm_rx_waiters,
 284			       struct kcm_sock, wait_rx_list);
 285	list_del(&kcm->wait_rx_list);
 286	/* paired with lockless reads in kcm_rfree() */
 287	WRITE_ONCE(kcm->rx_wait, false);
 288
 289	psock->rx_kcm = kcm;
 290	/* paired with lockless reads in kcm_rfree() */
 291	WRITE_ONCE(kcm->rx_psock, psock);
 292
 293	spin_unlock_bh(&mux->rx_lock);
 294
 295	return kcm;
 296}
 297
 298static void kcm_done(struct kcm_sock *kcm);
 299
 300static void kcm_done_work(struct work_struct *w)
 301{
 302	kcm_done(container_of(w, struct kcm_sock, done_work));
 303}
 304
 305/* Lower sock held */
 306static void unreserve_rx_kcm(struct kcm_psock *psock,
 307			     bool rcv_ready)
 308{
 309	struct kcm_sock *kcm = psock->rx_kcm;
 310	struct kcm_mux *mux = psock->mux;
 311
 312	if (!kcm)
 313		return;
 314
 315	spin_lock_bh(&mux->rx_lock);
 316
 317	psock->rx_kcm = NULL;
 318	/* paired with lockless reads in kcm_rfree() */
 319	WRITE_ONCE(kcm->rx_psock, NULL);
 320
 321	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
 322	 * kcm_rfree
 323	 */
 324	smp_mb();
 325
 326	if (unlikely(kcm->done)) {
 327		spin_unlock_bh(&mux->rx_lock);
 328
 329		/* Need to run kcm_done in a task since we need to qcquire
 330		 * callback locks which may already be held here.
 331		 */
 332		INIT_WORK(&kcm->done_work, kcm_done_work);
 333		schedule_work(&kcm->done_work);
 334		return;
 335	}
 336
 337	if (unlikely(kcm->rx_disabled)) {
 338		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
 339	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
 340		/* Check for degenerative race with rx_wait that all
 341		 * data was dequeued (accounted for in kcm_rfree).
 342		 */
 343		kcm_rcv_ready(kcm);
 344	}
 345	spin_unlock_bh(&mux->rx_lock);
 346}
 347
 348/* Lower sock lock held */
 349static void psock_data_ready(struct sock *sk)
 350{
 351	struct kcm_psock *psock;
 352
 353	trace_sk_data_ready(sk);
 354
 355	read_lock_bh(&sk->sk_callback_lock);
 356
 357	psock = (struct kcm_psock *)sk->sk_user_data;
 358	if (likely(psock))
 359		strp_data_ready(&psock->strp);
 360
 361	read_unlock_bh(&sk->sk_callback_lock);
 362}
 363
 364/* Called with lower sock held */
 365static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
 366{
 367	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 368	struct kcm_sock *kcm;
 369
 370try_queue:
 371	kcm = reserve_rx_kcm(psock, skb);
 372	if (!kcm) {
 373		 /* Unable to reserve a KCM, message is held in psock and strp
 374		  * is paused.
 375		  */
 376		return;
 377	}
 378
 379	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 380		/* Should mean socket buffer full */
 381		unreserve_rx_kcm(psock, false);
 382		goto try_queue;
 383	}
 384}
 385
 386static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
 387{
 388	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 389	struct bpf_prog *prog = psock->bpf_prog;
 390	int res;
 391
 392	res = bpf_prog_run_pin_on_cpu(prog, skb);
 393	return res;
 394}
 395
 396static int kcm_read_sock_done(struct strparser *strp, int err)
 397{
 398	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 399
 400	unreserve_rx_kcm(psock, true);
 401
 402	return err;
 403}
 404
 405static void psock_state_change(struct sock *sk)
 406{
 407	/* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
 408	 * since application will normally not poll with EPOLLIN
 409	 * on the TCP sockets.
 410	 */
 411
 412	report_csk_error(sk, EPIPE);
 413}
 414
 415static void psock_write_space(struct sock *sk)
 416{
 417	struct kcm_psock *psock;
 418	struct kcm_mux *mux;
 419	struct kcm_sock *kcm;
 420
 421	read_lock_bh(&sk->sk_callback_lock);
 422
 423	psock = (struct kcm_psock *)sk->sk_user_data;
 424	if (unlikely(!psock))
 425		goto out;
 426	mux = psock->mux;
 427
 428	spin_lock_bh(&mux->lock);
 429
 430	/* Check if the socket is reserved so someone is waiting for sending. */
 431	kcm = psock->tx_kcm;
 432	if (kcm && !unlikely(kcm->tx_stopped))
 433		queue_work(kcm_wq, &kcm->tx_work);
 434
 435	spin_unlock_bh(&mux->lock);
 436out:
 437	read_unlock_bh(&sk->sk_callback_lock);
 438}
 439
 440static void unreserve_psock(struct kcm_sock *kcm);
 441
 442/* kcm sock is locked. */
 443static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
 444{
 445	struct kcm_mux *mux = kcm->mux;
 446	struct kcm_psock *psock;
 447
 448	psock = kcm->tx_psock;
 449
 450	smp_rmb(); /* Must read tx_psock before tx_wait */
 451
 452	if (psock) {
 453		WARN_ON(kcm->tx_wait);
 454		if (unlikely(psock->tx_stopped))
 455			unreserve_psock(kcm);
 456		else
 457			return kcm->tx_psock;
 458	}
 459
 460	spin_lock_bh(&mux->lock);
 461
 462	/* Check again under lock to see if psock was reserved for this
 463	 * psock via psock_unreserve.
 464	 */
 465	psock = kcm->tx_psock;
 466	if (unlikely(psock)) {
 467		WARN_ON(kcm->tx_wait);
 468		spin_unlock_bh(&mux->lock);
 469		return kcm->tx_psock;
 470	}
 471
 472	if (!list_empty(&mux->psocks_avail)) {
 473		psock = list_first_entry(&mux->psocks_avail,
 474					 struct kcm_psock,
 475					 psock_avail_list);
 476		list_del(&psock->psock_avail_list);
 477		if (kcm->tx_wait) {
 478			list_del(&kcm->wait_psock_list);
 479			kcm->tx_wait = false;
 480		}
 481		kcm->tx_psock = psock;
 482		psock->tx_kcm = kcm;
 483		KCM_STATS_INCR(psock->stats.reserved);
 484	} else if (!kcm->tx_wait) {
 485		list_add_tail(&kcm->wait_psock_list,
 486			      &mux->kcm_tx_waiters);
 487		kcm->tx_wait = true;
 488	}
 489
 490	spin_unlock_bh(&mux->lock);
 491
 492	return psock;
 493}
 494
 495/* mux lock held */
 496static void psock_now_avail(struct kcm_psock *psock)
 497{
 498	struct kcm_mux *mux = psock->mux;
 499	struct kcm_sock *kcm;
 500
 501	if (list_empty(&mux->kcm_tx_waiters)) {
 502		list_add_tail(&psock->psock_avail_list,
 503			      &mux->psocks_avail);
 504	} else {
 505		kcm = list_first_entry(&mux->kcm_tx_waiters,
 506				       struct kcm_sock,
 507				       wait_psock_list);
 508		list_del(&kcm->wait_psock_list);
 509		kcm->tx_wait = false;
 510		psock->tx_kcm = kcm;
 511
 512		/* Commit before changing tx_psock since that is read in
 513		 * reserve_psock before queuing work.
 514		 */
 515		smp_mb();
 516
 517		kcm->tx_psock = psock;
 518		KCM_STATS_INCR(psock->stats.reserved);
 519		queue_work(kcm_wq, &kcm->tx_work);
 520	}
 521}
 522
 523/* kcm sock is locked. */
 524static void unreserve_psock(struct kcm_sock *kcm)
 525{
 526	struct kcm_psock *psock;
 527	struct kcm_mux *mux = kcm->mux;
 528
 529	spin_lock_bh(&mux->lock);
 530
 531	psock = kcm->tx_psock;
 532
 533	if (WARN_ON(!psock)) {
 534		spin_unlock_bh(&mux->lock);
 535		return;
 536	}
 537
 538	smp_rmb(); /* Read tx_psock before tx_wait */
 539
 540	kcm_update_tx_mux_stats(mux, psock);
 541
 542	WARN_ON(kcm->tx_wait);
 543
 544	kcm->tx_psock = NULL;
 545	psock->tx_kcm = NULL;
 546	KCM_STATS_INCR(psock->stats.unreserved);
 547
 548	if (unlikely(psock->tx_stopped)) {
 549		if (psock->done) {
 550			/* Deferred free */
 551			list_del(&psock->psock_list);
 552			mux->psocks_cnt--;
 553			sock_put(psock->sk);
 554			fput(psock->sk->sk_socket->file);
 555			kmem_cache_free(kcm_psockp, psock);
 556		}
 557
 558		/* Don't put back on available list */
 559
 560		spin_unlock_bh(&mux->lock);
 561
 562		return;
 563	}
 564
 565	psock_now_avail(psock);
 566
 567	spin_unlock_bh(&mux->lock);
 568}
 569
 570static void kcm_report_tx_retry(struct kcm_sock *kcm)
 571{
 572	struct kcm_mux *mux = kcm->mux;
 573
 574	spin_lock_bh(&mux->lock);
 575	KCM_STATS_INCR(mux->stats.tx_retries);
 576	spin_unlock_bh(&mux->lock);
 577}
 578
 579/* Write any messages ready on the kcm socket.  Called with kcm sock lock
 580 * held.  Return bytes actually sent or error.
 581 */
 582static int kcm_write_msgs(struct kcm_sock *kcm)
 583{
 584	unsigned int total_sent = 0;
 585	struct sock *sk = &kcm->sk;
 586	struct kcm_psock *psock;
 587	struct sk_buff *head;
 
 
 
 588	int ret = 0;
 589
 590	kcm->tx_wait_more = false;
 591	psock = kcm->tx_psock;
 592	if (unlikely(psock && psock->tx_stopped)) {
 593		/* A reserved psock was aborted asynchronously. Unreserve
 594		 * it and we'll retry the message.
 595		 */
 596		unreserve_psock(kcm);
 597		kcm_report_tx_retry(kcm);
 598		if (skb_queue_empty(&sk->sk_write_queue))
 599			return 0;
 600
 601		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->started_tx = false;
 
 
 
 602	}
 603
 604retry:
 605	while ((head = skb_peek(&sk->sk_write_queue))) {
 606		struct msghdr msg = {
 607			.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
 608		};
 609		struct kcm_tx_msg *txm = kcm_tx_msg(head);
 610		struct sk_buff *skb;
 611		unsigned int msize;
 612		int i;
 613
 614		if (!txm->started_tx) {
 615			psock = reserve_psock(kcm);
 616			if (!psock)
 617				goto out;
 618			skb = head;
 619			txm->frag_offset = 0;
 620			txm->sent = 0;
 621			txm->started_tx = true;
 622		} else {
 623			if (WARN_ON(!psock)) {
 624				ret = -EINVAL;
 625				goto out;
 626			}
 627			skb = txm->frag_skb;
 628		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629
 
 630		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
 631			ret = -EINVAL;
 632			goto out;
 633		}
 634
 635		msize = 0;
 636		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 637			msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
 638
 639		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
 640			      skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
 641			      msize);
 642		iov_iter_advance(&msg.msg_iter, txm->frag_offset);
 
 
 
 643
 644		do {
 645			ret = sock_sendmsg(psock->sk->sk_socket, &msg);
 
 
 
 646			if (ret <= 0) {
 647				if (ret == -EAGAIN) {
 648					/* Save state to try again when there's
 649					 * write space on the socket
 650					 */
 
 
 
 651					txm->frag_skb = skb;
 
 652					ret = 0;
 653					goto out;
 654				}
 655
 656				/* Hard failure in sending message, abort this
 657				 * psock since it has lost framing
 658				 * synchronization and retry sending the
 659				 * message from the beginning.
 660				 */
 661				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
 662						   true);
 663				unreserve_psock(kcm);
 664				psock = NULL;
 665
 666				txm->started_tx = false;
 667				kcm_report_tx_retry(kcm);
 668				ret = 0;
 669				goto retry;
 
 670			}
 671
 672			txm->sent += ret;
 673			txm->frag_offset += ret;
 674			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
 675		} while (msg.msg_iter.count > 0);
 
 
 
 
 676
 677		if (skb == head) {
 678			if (skb_has_frag_list(skb)) {
 679				txm->frag_skb = skb_shinfo(skb)->frag_list;
 680				txm->frag_offset = 0;
 681				continue;
 682			}
 683		} else if (skb->next) {
 684			txm->frag_skb = skb->next;
 685			txm->frag_offset = 0;
 686			continue;
 687		}
 688
 689		/* Successfully sent the whole packet, account for it. */
 690		sk->sk_wmem_queued -= txm->sent;
 691		total_sent += txm->sent;
 692		skb_dequeue(&sk->sk_write_queue);
 693		kfree_skb(head);
 
 
 694		KCM_STATS_INCR(psock->stats.tx_msgs);
 695	}
 696out:
 697	if (!head) {
 698		/* Done with all queued messages. */
 699		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
 700		if (psock)
 701			unreserve_psock(kcm);
 702	}
 703
 704	/* Check if write space is available */
 705	sk->sk_write_space(sk);
 706
 707	return total_sent ? : ret;
 708}
 709
 710static void kcm_tx_work(struct work_struct *w)
 711{
 712	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
 713	struct sock *sk = &kcm->sk;
 714	int err;
 715
 716	lock_sock(sk);
 717
 718	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
 719	 * aborts
 720	 */
 721	err = kcm_write_msgs(kcm);
 722	if (err < 0) {
 723		/* Hard failure in write, report error on KCM socket */
 724		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
 725		report_csk_error(&kcm->sk, -err);
 726		goto out;
 727	}
 728
 729	/* Primarily for SOCK_SEQPACKET sockets */
 730	if (likely(sk->sk_socket) &&
 731	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
 732		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 733		sk->sk_write_space(sk);
 734	}
 735
 736out:
 737	release_sock(sk);
 738}
 739
 740static void kcm_push(struct kcm_sock *kcm)
 741{
 742	if (kcm->tx_wait_more)
 743		kcm_write_msgs(kcm);
 744}
 745
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 746static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 747{
 748	struct sock *sk = sock->sk;
 749	struct kcm_sock *kcm = kcm_sk(sk);
 750	struct sk_buff *skb = NULL, *head = NULL;
 751	size_t copy, copied = 0;
 752	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 753	int eor = (sock->type == SOCK_DGRAM) ?
 754		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
 755	int err = -EPIPE;
 756
 757	lock_sock(sk);
 758
 759	/* Per tcp_sendmsg this should be in poll */
 760	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 761
 762	if (sk->sk_err)
 763		goto out_error;
 764
 765	if (kcm->seq_skb) {
 766		/* Previously opened message */
 767		head = kcm->seq_skb;
 768		skb = kcm_tx_msg(head)->last_skb;
 769		goto start;
 770	}
 771
 772	/* Call the sk_stream functions to manage the sndbuf mem. */
 773	if (!sk_stream_memory_free(sk)) {
 774		kcm_push(kcm);
 775		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 776		err = sk_stream_wait_memory(sk, &timeo);
 777		if (err)
 778			goto out_error;
 779	}
 780
 781	if (msg_data_left(msg)) {
 782		/* New message, alloc head skb */
 783		head = alloc_skb(0, sk->sk_allocation);
 784		while (!head) {
 785			kcm_push(kcm);
 786			err = sk_stream_wait_memory(sk, &timeo);
 787			if (err)
 788				goto out_error;
 789
 790			head = alloc_skb(0, sk->sk_allocation);
 791		}
 792
 793		skb = head;
 794
 795		/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
 796		 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
 797		 */
 798		skb->ip_summed = CHECKSUM_UNNECESSARY;
 799	}
 800
 801start:
 802	while (msg_data_left(msg)) {
 803		bool merge = true;
 804		int i = skb_shinfo(skb)->nr_frags;
 805		struct page_frag *pfrag = sk_page_frag(sk);
 806
 807		if (!sk_page_frag_refill(sk, pfrag))
 808			goto wait_for_memory;
 809
 810		if (!skb_can_coalesce(skb, i, pfrag->page,
 811				      pfrag->offset)) {
 812			if (i == MAX_SKB_FRAGS) {
 813				struct sk_buff *tskb;
 814
 815				tskb = alloc_skb(0, sk->sk_allocation);
 816				if (!tskb)
 817					goto wait_for_memory;
 818
 819				if (head == skb)
 820					skb_shinfo(head)->frag_list = tskb;
 821				else
 822					skb->next = tskb;
 823
 824				skb = tskb;
 825				skb->ip_summed = CHECKSUM_UNNECESSARY;
 826				continue;
 827			}
 828			merge = false;
 829		}
 830
 831		if (msg->msg_flags & MSG_SPLICE_PAGES) {
 832			copy = msg_data_left(msg);
 833			if (!sk_wmem_schedule(sk, copy))
 834				goto wait_for_memory;
 835
 836			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
 837						   sk->sk_allocation);
 838			if (err < 0) {
 839				if (err == -EMSGSIZE)
 840					goto wait_for_memory;
 841				goto out_error;
 842			}
 843
 844			copy = err;
 845			skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
 846			sk_wmem_queued_add(sk, copy);
 847			sk_mem_charge(sk, copy);
 
 
 848
 849			if (head != skb)
 850				head->truesize += copy;
 
 851		} else {
 852			copy = min_t(int, msg_data_left(msg),
 853				     pfrag->size - pfrag->offset);
 854			if (!sk_wmem_schedule(sk, copy))
 855				goto wait_for_memory;
 856
 857			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
 858						       pfrag->page,
 859						       pfrag->offset,
 860						       copy);
 861			if (err)
 862				goto out_error;
 863
 864			/* Update the skb. */
 865			if (merge) {
 866				skb_frag_size_add(
 867					&skb_shinfo(skb)->frags[i - 1], copy);
 868			} else {
 869				skb_fill_page_desc(skb, i, pfrag->page,
 870						   pfrag->offset, copy);
 871				get_page(pfrag->page);
 872			}
 873
 874			pfrag->offset += copy;
 875		}
 876
 
 877		copied += copy;
 878		if (head != skb) {
 879			head->len += copy;
 880			head->data_len += copy;
 881		}
 882
 883		continue;
 884
 885wait_for_memory:
 886		kcm_push(kcm);
 887		err = sk_stream_wait_memory(sk, &timeo);
 888		if (err)
 889			goto out_error;
 890	}
 891
 892	if (eor) {
 893		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
 894
 895		if (head) {
 896			/* Message complete, queue it on send buffer */
 897			__skb_queue_tail(&sk->sk_write_queue, head);
 898			kcm->seq_skb = NULL;
 899			KCM_STATS_INCR(kcm->stats.tx_msgs);
 900		}
 901
 902		if (msg->msg_flags & MSG_BATCH) {
 903			kcm->tx_wait_more = true;
 904		} else if (kcm->tx_wait_more || not_busy) {
 905			err = kcm_write_msgs(kcm);
 906			if (err < 0) {
 907				/* We got a hard error in write_msgs but have
 908				 * already queued this message. Report an error
 909				 * in the socket, but don't affect return value
 910				 * from sendmsg
 911				 */
 912				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
 913				report_csk_error(&kcm->sk, -err);
 914			}
 915		}
 916	} else {
 917		/* Message not complete, save state */
 918partial_message:
 919		if (head) {
 920			kcm->seq_skb = head;
 921			kcm_tx_msg(head)->last_skb = skb;
 922		}
 923	}
 924
 925	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
 926
 927	release_sock(sk);
 928	return copied;
 929
 930out_error:
 931	kcm_push(kcm);
 932
 933	if (sock->type == SOCK_SEQPACKET) {
 934		/* Wrote some bytes before encountering an
 935		 * error, return partial success.
 936		 */
 937		if (copied)
 938			goto partial_message;
 939		if (head != kcm->seq_skb)
 940			kfree_skb(head);
 941	} else {
 942		kfree_skb(head);
 943		kcm->seq_skb = NULL;
 944	}
 945
 
 
 
 946	err = sk_stream_error(sk, msg->msg_flags, err);
 947
 948	/* make sure we wake any epoll edge trigger waiter */
 949	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
 950		sk->sk_write_space(sk);
 951
 952	release_sock(sk);
 953	return err;
 954}
 955
 956static void kcm_splice_eof(struct socket *sock)
 957{
 958	struct sock *sk = sock->sk;
 959	struct kcm_sock *kcm = kcm_sk(sk);
 960
 961	if (skb_queue_empty_lockless(&sk->sk_write_queue))
 962		return;
 963
 964	lock_sock(sk);
 965	kcm_write_msgs(kcm);
 966	release_sock(sk);
 967}
 968
 969static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
 970		       size_t len, int flags)
 971{
 972	struct sock *sk = sock->sk;
 973	struct kcm_sock *kcm = kcm_sk(sk);
 974	int err = 0;
 975	struct strp_msg *stm;
 976	int copied = 0;
 977	struct sk_buff *skb;
 978
 979	skb = skb_recv_datagram(sk, flags, &err);
 980	if (!skb)
 981		goto out;
 982
 983	/* Okay, have a message on the receive queue */
 984
 985	stm = strp_msg(skb);
 986
 987	if (len > stm->full_len)
 988		len = stm->full_len;
 989
 990	err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
 991	if (err < 0)
 992		goto out;
 993
 994	copied = len;
 995	if (likely(!(flags & MSG_PEEK))) {
 996		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
 997		if (copied < stm->full_len) {
 998			if (sock->type == SOCK_DGRAM) {
 999				/* Truncated message */
1000				msg->msg_flags |= MSG_TRUNC;
1001				goto msg_finished;
1002			}
1003			stm->offset += copied;
1004			stm->full_len -= copied;
1005		} else {
1006msg_finished:
1007			/* Finished with message */
1008			msg->msg_flags |= MSG_EOR;
1009			KCM_STATS_INCR(kcm->stats.rx_msgs);
1010		}
1011	}
1012
1013out:
1014	skb_free_datagram(sk, skb);
1015	return copied ? : err;
1016}
1017
1018static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1019			       struct pipe_inode_info *pipe, size_t len,
1020			       unsigned int flags)
1021{
1022	struct sock *sk = sock->sk;
1023	struct kcm_sock *kcm = kcm_sk(sk);
1024	struct strp_msg *stm;
1025	int err = 0;
1026	ssize_t copied;
1027	struct sk_buff *skb;
1028
1029	/* Only support splice for SOCKSEQPACKET */
1030
1031	skb = skb_recv_datagram(sk, flags, &err);
1032	if (!skb)
1033		goto err_out;
1034
1035	/* Okay, have a message on the receive queue */
1036
1037	stm = strp_msg(skb);
1038
1039	if (len > stm->full_len)
1040		len = stm->full_len;
1041
1042	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1043	if (copied < 0) {
1044		err = copied;
1045		goto err_out;
1046	}
1047
1048	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1049
1050	stm->offset += copied;
1051	stm->full_len -= copied;
1052
1053	/* We have no way to return MSG_EOR. If all the bytes have been
1054	 * read we still leave the message in the receive socket buffer.
1055	 * A subsequent recvmsg needs to be done to return MSG_EOR and
1056	 * finish reading the message.
1057	 */
1058
1059	skb_free_datagram(sk, skb);
1060	return copied;
1061
1062err_out:
1063	skb_free_datagram(sk, skb);
1064	return err;
1065}
1066
1067/* kcm sock lock held */
1068static void kcm_recv_disable(struct kcm_sock *kcm)
1069{
1070	struct kcm_mux *mux = kcm->mux;
1071
1072	if (kcm->rx_disabled)
1073		return;
1074
1075	spin_lock_bh(&mux->rx_lock);
1076
1077	kcm->rx_disabled = 1;
1078
1079	/* If a psock is reserved we'll do cleanup in unreserve */
1080	if (!kcm->rx_psock) {
1081		if (kcm->rx_wait) {
1082			list_del(&kcm->wait_rx_list);
1083			/* paired with lockless reads in kcm_rfree() */
1084			WRITE_ONCE(kcm->rx_wait, false);
1085		}
1086
1087		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1088	}
1089
1090	spin_unlock_bh(&mux->rx_lock);
1091}
1092
1093/* kcm sock lock held */
1094static void kcm_recv_enable(struct kcm_sock *kcm)
1095{
1096	struct kcm_mux *mux = kcm->mux;
1097
1098	if (!kcm->rx_disabled)
1099		return;
1100
1101	spin_lock_bh(&mux->rx_lock);
1102
1103	kcm->rx_disabled = 0;
1104	kcm_rcv_ready(kcm);
1105
1106	spin_unlock_bh(&mux->rx_lock);
1107}
1108
1109static int kcm_setsockopt(struct socket *sock, int level, int optname,
1110			  sockptr_t optval, unsigned int optlen)
1111{
1112	struct kcm_sock *kcm = kcm_sk(sock->sk);
1113	int val, valbool;
1114	int err = 0;
1115
1116	if (level != SOL_KCM)
1117		return -ENOPROTOOPT;
1118
1119	if (optlen < sizeof(int))
1120		return -EINVAL;
1121
1122	if (copy_from_sockptr(&val, optval, sizeof(int)))
1123		return -EFAULT;
1124
1125	valbool = val ? 1 : 0;
1126
1127	switch (optname) {
1128	case KCM_RECV_DISABLE:
1129		lock_sock(&kcm->sk);
1130		if (valbool)
1131			kcm_recv_disable(kcm);
1132		else
1133			kcm_recv_enable(kcm);
1134		release_sock(&kcm->sk);
1135		break;
1136	default:
1137		err = -ENOPROTOOPT;
1138	}
1139
1140	return err;
1141}
1142
1143static int kcm_getsockopt(struct socket *sock, int level, int optname,
1144			  char __user *optval, int __user *optlen)
1145{
1146	struct kcm_sock *kcm = kcm_sk(sock->sk);
1147	int val, len;
1148
1149	if (level != SOL_KCM)
1150		return -ENOPROTOOPT;
1151
1152	if (get_user(len, optlen))
1153		return -EFAULT;
1154
1155	len = min_t(unsigned int, len, sizeof(int));
1156	if (len < 0)
1157		return -EINVAL;
1158
1159	switch (optname) {
1160	case KCM_RECV_DISABLE:
1161		val = kcm->rx_disabled;
1162		break;
1163	default:
1164		return -ENOPROTOOPT;
1165	}
1166
1167	if (put_user(len, optlen))
1168		return -EFAULT;
1169	if (copy_to_user(optval, &val, len))
1170		return -EFAULT;
1171	return 0;
1172}
1173
1174static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1175{
1176	struct kcm_sock *tkcm;
1177	struct list_head *head;
1178	int index = 0;
1179
1180	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1181	 * we set sk_state, otherwise epoll_wait always returns right away with
1182	 * EPOLLHUP
1183	 */
1184	kcm->sk.sk_state = TCP_ESTABLISHED;
1185
1186	/* Add to mux's kcm sockets list */
1187	kcm->mux = mux;
1188	spin_lock_bh(&mux->lock);
1189
1190	head = &mux->kcm_socks;
1191	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1192		if (tkcm->index != index)
1193			break;
1194		head = &tkcm->kcm_sock_list;
1195		index++;
1196	}
1197
1198	list_add(&kcm->kcm_sock_list, head);
1199	kcm->index = index;
1200
1201	mux->kcm_socks_cnt++;
1202	spin_unlock_bh(&mux->lock);
1203
1204	INIT_WORK(&kcm->tx_work, kcm_tx_work);
1205
1206	spin_lock_bh(&mux->rx_lock);
1207	kcm_rcv_ready(kcm);
1208	spin_unlock_bh(&mux->rx_lock);
1209}
1210
1211static int kcm_attach(struct socket *sock, struct socket *csock,
1212		      struct bpf_prog *prog)
1213{
1214	struct kcm_sock *kcm = kcm_sk(sock->sk);
1215	struct kcm_mux *mux = kcm->mux;
1216	struct sock *csk;
1217	struct kcm_psock *psock = NULL, *tpsock;
1218	struct list_head *head;
1219	int index = 0;
1220	static const struct strp_callbacks cb = {
1221		.rcv_msg = kcm_rcv_strparser,
1222		.parse_msg = kcm_parse_func_strparser,
1223		.read_sock_done = kcm_read_sock_done,
1224	};
1225	int err = 0;
1226
1227	csk = csock->sk;
1228	if (!csk)
1229		return -EINVAL;
1230
1231	lock_sock(csk);
1232
1233	/* Only allow TCP sockets to be attached for now */
1234	if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1235	    csk->sk_protocol != IPPROTO_TCP) {
1236		err = -EOPNOTSUPP;
1237		goto out;
1238	}
1239
1240	/* Don't allow listeners or closed sockets */
1241	if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1242		err = -EOPNOTSUPP;
1243		goto out;
1244	}
1245
1246	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1247	if (!psock) {
1248		err = -ENOMEM;
1249		goto out;
1250	}
1251
1252	psock->mux = mux;
1253	psock->sk = csk;
1254	psock->bpf_prog = prog;
1255
1256	write_lock_bh(&csk->sk_callback_lock);
1257
1258	/* Check if sk_user_data is already by KCM or someone else.
1259	 * Must be done under lock to prevent race conditions.
1260	 */
1261	if (csk->sk_user_data) {
1262		write_unlock_bh(&csk->sk_callback_lock);
1263		kmem_cache_free(kcm_psockp, psock);
1264		err = -EALREADY;
1265		goto out;
1266	}
1267
1268	err = strp_init(&psock->strp, csk, &cb);
1269	if (err) {
1270		write_unlock_bh(&csk->sk_callback_lock);
1271		kmem_cache_free(kcm_psockp, psock);
1272		goto out;
1273	}
1274
1275	psock->save_data_ready = csk->sk_data_ready;
1276	psock->save_write_space = csk->sk_write_space;
1277	psock->save_state_change = csk->sk_state_change;
1278	csk->sk_user_data = psock;
1279	csk->sk_data_ready = psock_data_ready;
1280	csk->sk_write_space = psock_write_space;
1281	csk->sk_state_change = psock_state_change;
1282
1283	write_unlock_bh(&csk->sk_callback_lock);
1284
1285	sock_hold(csk);
1286
1287	/* Finished initialization, now add the psock to the MUX. */
1288	spin_lock_bh(&mux->lock);
1289	head = &mux->psocks;
1290	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1291		if (tpsock->index != index)
1292			break;
1293		head = &tpsock->psock_list;
1294		index++;
1295	}
1296
1297	list_add(&psock->psock_list, head);
1298	psock->index = index;
1299
1300	KCM_STATS_INCR(mux->stats.psock_attach);
1301	mux->psocks_cnt++;
1302	psock_now_avail(psock);
1303	spin_unlock_bh(&mux->lock);
1304
1305	/* Schedule RX work in case there are already bytes queued */
1306	strp_check_rcv(&psock->strp);
1307
1308out:
1309	release_sock(csk);
1310
1311	return err;
1312}
1313
1314static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1315{
1316	struct socket *csock;
1317	struct bpf_prog *prog;
1318	int err;
1319
1320	csock = sockfd_lookup(info->fd, &err);
1321	if (!csock)
1322		return -ENOENT;
1323
1324	prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1325	if (IS_ERR(prog)) {
1326		err = PTR_ERR(prog);
1327		goto out;
1328	}
1329
1330	err = kcm_attach(sock, csock, prog);
1331	if (err) {
1332		bpf_prog_put(prog);
1333		goto out;
1334	}
1335
1336	/* Keep reference on file also */
1337
1338	return 0;
1339out:
1340	sockfd_put(csock);
1341	return err;
1342}
1343
1344static void kcm_unattach(struct kcm_psock *psock)
1345{
1346	struct sock *csk = psock->sk;
1347	struct kcm_mux *mux = psock->mux;
1348
1349	lock_sock(csk);
1350
1351	/* Stop getting callbacks from TCP socket. After this there should
1352	 * be no way to reserve a kcm for this psock.
1353	 */
1354	write_lock_bh(&csk->sk_callback_lock);
1355	csk->sk_user_data = NULL;
1356	csk->sk_data_ready = psock->save_data_ready;
1357	csk->sk_write_space = psock->save_write_space;
1358	csk->sk_state_change = psock->save_state_change;
1359	strp_stop(&psock->strp);
1360
1361	if (WARN_ON(psock->rx_kcm)) {
1362		write_unlock_bh(&csk->sk_callback_lock);
1363		release_sock(csk);
1364		return;
1365	}
1366
1367	spin_lock_bh(&mux->rx_lock);
1368
1369	/* Stop receiver activities. After this point psock should not be
1370	 * able to get onto ready list either through callbacks or work.
1371	 */
1372	if (psock->ready_rx_msg) {
1373		list_del(&psock->psock_ready_list);
1374		kfree_skb(psock->ready_rx_msg);
1375		psock->ready_rx_msg = NULL;
1376		KCM_STATS_INCR(mux->stats.rx_ready_drops);
1377	}
1378
1379	spin_unlock_bh(&mux->rx_lock);
1380
1381	write_unlock_bh(&csk->sk_callback_lock);
1382
1383	/* Call strp_done without sock lock */
1384	release_sock(csk);
1385	strp_done(&psock->strp);
1386	lock_sock(csk);
1387
1388	bpf_prog_put(psock->bpf_prog);
1389
1390	spin_lock_bh(&mux->lock);
1391
1392	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1393	save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1394
1395	KCM_STATS_INCR(mux->stats.psock_unattach);
1396
1397	if (psock->tx_kcm) {
1398		/* psock was reserved.  Just mark it finished and we will clean
1399		 * up in the kcm paths, we need kcm lock which can not be
1400		 * acquired here.
1401		 */
1402		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1403		spin_unlock_bh(&mux->lock);
1404
1405		/* We are unattaching a socket that is reserved. Abort the
1406		 * socket since we may be out of sync in sending on it. We need
1407		 * to do this without the mux lock.
1408		 */
1409		kcm_abort_tx_psock(psock, EPIPE, false);
1410
1411		spin_lock_bh(&mux->lock);
1412		if (!psock->tx_kcm) {
1413			/* psock now unreserved in window mux was unlocked */
1414			goto no_reserved;
1415		}
1416		psock->done = 1;
1417
1418		/* Commit done before queuing work to process it */
1419		smp_mb();
1420
1421		/* Queue tx work to make sure psock->done is handled */
1422		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1423		spin_unlock_bh(&mux->lock);
1424	} else {
1425no_reserved:
1426		if (!psock->tx_stopped)
1427			list_del(&psock->psock_avail_list);
1428		list_del(&psock->psock_list);
1429		mux->psocks_cnt--;
1430		spin_unlock_bh(&mux->lock);
1431
1432		sock_put(csk);
1433		fput(csk->sk_socket->file);
1434		kmem_cache_free(kcm_psockp, psock);
1435	}
1436
1437	release_sock(csk);
1438}
1439
1440static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1441{
1442	struct kcm_sock *kcm = kcm_sk(sock->sk);
1443	struct kcm_mux *mux = kcm->mux;
1444	struct kcm_psock *psock;
1445	struct socket *csock;
1446	struct sock *csk;
1447	int err;
1448
1449	csock = sockfd_lookup(info->fd, &err);
1450	if (!csock)
1451		return -ENOENT;
1452
1453	csk = csock->sk;
1454	if (!csk) {
1455		err = -EINVAL;
1456		goto out;
1457	}
1458
1459	err = -ENOENT;
1460
1461	spin_lock_bh(&mux->lock);
1462
1463	list_for_each_entry(psock, &mux->psocks, psock_list) {
1464		if (psock->sk != csk)
1465			continue;
1466
1467		/* Found the matching psock */
1468
1469		if (psock->unattaching || WARN_ON(psock->done)) {
1470			err = -EALREADY;
1471			break;
1472		}
1473
1474		psock->unattaching = 1;
1475
1476		spin_unlock_bh(&mux->lock);
1477
1478		/* Lower socket lock should already be held */
1479		kcm_unattach(psock);
1480
1481		err = 0;
1482		goto out;
1483	}
1484
1485	spin_unlock_bh(&mux->lock);
1486
1487out:
1488	sockfd_put(csock);
1489	return err;
1490}
1491
1492static struct proto kcm_proto = {
1493	.name	= "KCM",
1494	.owner	= THIS_MODULE,
1495	.obj_size = sizeof(struct kcm_sock),
1496};
1497
1498/* Clone a kcm socket. */
1499static struct file *kcm_clone(struct socket *osock)
1500{
1501	struct socket *newsock;
1502	struct sock *newsk;
1503
1504	newsock = sock_alloc();
1505	if (!newsock)
1506		return ERR_PTR(-ENFILE);
1507
1508	newsock->type = osock->type;
1509	newsock->ops = osock->ops;
1510
1511	__module_get(newsock->ops->owner);
1512
1513	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1514			 &kcm_proto, false);
1515	if (!newsk) {
1516		sock_release(newsock);
1517		return ERR_PTR(-ENOMEM);
1518	}
1519	sock_init_data(newsock, newsk);
1520	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1521
1522	return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1523}
1524
1525static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1526{
1527	int err;
1528
1529	switch (cmd) {
1530	case SIOCKCMATTACH: {
1531		struct kcm_attach info;
1532
1533		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1534			return -EFAULT;
1535
1536		err = kcm_attach_ioctl(sock, &info);
1537
1538		break;
1539	}
1540	case SIOCKCMUNATTACH: {
1541		struct kcm_unattach info;
1542
1543		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1544			return -EFAULT;
1545
1546		err = kcm_unattach_ioctl(sock, &info);
1547
1548		break;
1549	}
1550	case SIOCKCMCLONE: {
1551		struct kcm_clone info;
1552		struct file *file;
1553
1554		info.fd = get_unused_fd_flags(0);
1555		if (unlikely(info.fd < 0))
1556			return info.fd;
1557
1558		file = kcm_clone(sock);
1559		if (IS_ERR(file)) {
1560			put_unused_fd(info.fd);
1561			return PTR_ERR(file);
1562		}
1563		if (copy_to_user((void __user *)arg, &info,
1564				 sizeof(info))) {
1565			put_unused_fd(info.fd);
1566			fput(file);
1567			return -EFAULT;
1568		}
1569		fd_install(info.fd, file);
1570		err = 0;
1571		break;
1572	}
1573	default:
1574		err = -ENOIOCTLCMD;
1575		break;
1576	}
1577
1578	return err;
1579}
1580
1581static void free_mux(struct rcu_head *rcu)
1582{
1583	struct kcm_mux *mux = container_of(rcu,
1584	    struct kcm_mux, rcu);
1585
1586	kmem_cache_free(kcm_muxp, mux);
1587}
1588
1589static void release_mux(struct kcm_mux *mux)
1590{
1591	struct kcm_net *knet = mux->knet;
1592	struct kcm_psock *psock, *tmp_psock;
1593
1594	/* Release psocks */
1595	list_for_each_entry_safe(psock, tmp_psock,
1596				 &mux->psocks, psock_list) {
1597		if (!WARN_ON(psock->unattaching))
1598			kcm_unattach(psock);
1599	}
1600
1601	if (WARN_ON(mux->psocks_cnt))
1602		return;
1603
1604	__skb_queue_purge(&mux->rx_hold_queue);
1605
1606	mutex_lock(&knet->mutex);
1607	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1608	aggregate_psock_stats(&mux->aggregate_psock_stats,
1609			      &knet->aggregate_psock_stats);
1610	aggregate_strp_stats(&mux->aggregate_strp_stats,
1611			     &knet->aggregate_strp_stats);
1612	list_del_rcu(&mux->kcm_mux_list);
1613	knet->count--;
1614	mutex_unlock(&knet->mutex);
1615
1616	call_rcu(&mux->rcu, free_mux);
1617}
1618
1619static void kcm_done(struct kcm_sock *kcm)
1620{
1621	struct kcm_mux *mux = kcm->mux;
1622	struct sock *sk = &kcm->sk;
1623	int socks_cnt;
1624
1625	spin_lock_bh(&mux->rx_lock);
1626	if (kcm->rx_psock) {
1627		/* Cleanup in unreserve_rx_kcm */
1628		WARN_ON(kcm->done);
1629		kcm->rx_disabled = 1;
1630		kcm->done = 1;
1631		spin_unlock_bh(&mux->rx_lock);
1632		return;
1633	}
1634
1635	if (kcm->rx_wait) {
1636		list_del(&kcm->wait_rx_list);
1637		/* paired with lockless reads in kcm_rfree() */
1638		WRITE_ONCE(kcm->rx_wait, false);
1639	}
1640	/* Move any pending receive messages to other kcm sockets */
1641	requeue_rx_msgs(mux, &sk->sk_receive_queue);
1642
1643	spin_unlock_bh(&mux->rx_lock);
1644
1645	if (WARN_ON(sk_rmem_alloc_get(sk)))
1646		return;
1647
1648	/* Detach from MUX */
1649	spin_lock_bh(&mux->lock);
1650
1651	list_del(&kcm->kcm_sock_list);
1652	mux->kcm_socks_cnt--;
1653	socks_cnt = mux->kcm_socks_cnt;
1654
1655	spin_unlock_bh(&mux->lock);
1656
1657	if (!socks_cnt) {
1658		/* We are done with the mux now. */
1659		release_mux(mux);
1660	}
1661
1662	WARN_ON(kcm->rx_wait);
1663
1664	sock_put(&kcm->sk);
1665}
1666
1667/* Called by kcm_release to close a KCM socket.
1668 * If this is the last KCM socket on the MUX, destroy the MUX.
1669 */
1670static int kcm_release(struct socket *sock)
1671{
1672	struct sock *sk = sock->sk;
1673	struct kcm_sock *kcm;
1674	struct kcm_mux *mux;
1675	struct kcm_psock *psock;
1676
1677	if (!sk)
1678		return 0;
1679
1680	kcm = kcm_sk(sk);
1681	mux = kcm->mux;
1682
1683	lock_sock(sk);
1684	sock_orphan(sk);
1685	kfree_skb(kcm->seq_skb);
1686
1687	/* Purge queue under lock to avoid race condition with tx_work trying
1688	 * to act when queue is nonempty. If tx_work runs after this point
1689	 * it will just return.
1690	 */
1691	__skb_queue_purge(&sk->sk_write_queue);
1692
1693	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
1694	 * get a writespace callback. This prevents further work being queued
1695	 * from the callback (unbinding the psock occurs after canceling work.
1696	 */
1697	kcm->tx_stopped = 1;
1698
1699	release_sock(sk);
1700
1701	spin_lock_bh(&mux->lock);
1702	if (kcm->tx_wait) {
1703		/* Take of tx_wait list, after this point there should be no way
1704		 * that a psock will be assigned to this kcm.
1705		 */
1706		list_del(&kcm->wait_psock_list);
1707		kcm->tx_wait = false;
1708	}
1709	spin_unlock_bh(&mux->lock);
1710
1711	/* Cancel work. After this point there should be no outside references
1712	 * to the kcm socket.
1713	 */
1714	cancel_work_sync(&kcm->tx_work);
1715
1716	lock_sock(sk);
1717	psock = kcm->tx_psock;
1718	if (psock) {
1719		/* A psock was reserved, so we need to kill it since it
1720		 * may already have some bytes queued from a message. We
1721		 * need to do this after removing kcm from tx_wait list.
1722		 */
1723		kcm_abort_tx_psock(psock, EPIPE, false);
1724		unreserve_psock(kcm);
1725	}
1726	release_sock(sk);
1727
1728	WARN_ON(kcm->tx_wait);
1729	WARN_ON(kcm->tx_psock);
1730
1731	sock->sk = NULL;
1732
1733	kcm_done(kcm);
1734
1735	return 0;
1736}
1737
1738static const struct proto_ops kcm_dgram_ops = {
1739	.family =	PF_KCM,
1740	.owner =	THIS_MODULE,
1741	.release =	kcm_release,
1742	.bind =		sock_no_bind,
1743	.connect =	sock_no_connect,
1744	.socketpair =	sock_no_socketpair,
1745	.accept =	sock_no_accept,
1746	.getname =	sock_no_getname,
1747	.poll =		datagram_poll,
1748	.ioctl =	kcm_ioctl,
1749	.listen =	sock_no_listen,
1750	.shutdown =	sock_no_shutdown,
1751	.setsockopt =	kcm_setsockopt,
1752	.getsockopt =	kcm_getsockopt,
1753	.sendmsg =	kcm_sendmsg,
1754	.recvmsg =	kcm_recvmsg,
1755	.mmap =		sock_no_mmap,
1756	.splice_eof =	kcm_splice_eof,
1757};
1758
1759static const struct proto_ops kcm_seqpacket_ops = {
1760	.family =	PF_KCM,
1761	.owner =	THIS_MODULE,
1762	.release =	kcm_release,
1763	.bind =		sock_no_bind,
1764	.connect =	sock_no_connect,
1765	.socketpair =	sock_no_socketpair,
1766	.accept =	sock_no_accept,
1767	.getname =	sock_no_getname,
1768	.poll =		datagram_poll,
1769	.ioctl =	kcm_ioctl,
1770	.listen =	sock_no_listen,
1771	.shutdown =	sock_no_shutdown,
1772	.setsockopt =	kcm_setsockopt,
1773	.getsockopt =	kcm_getsockopt,
1774	.sendmsg =	kcm_sendmsg,
1775	.recvmsg =	kcm_recvmsg,
1776	.mmap =		sock_no_mmap,
1777	.splice_eof =	kcm_splice_eof,
1778	.splice_read =	kcm_splice_read,
1779};
1780
1781/* Create proto operation for kcm sockets */
1782static int kcm_create(struct net *net, struct socket *sock,
1783		      int protocol, int kern)
1784{
1785	struct kcm_net *knet = net_generic(net, kcm_net_id);
1786	struct sock *sk;
1787	struct kcm_mux *mux;
1788
1789	switch (sock->type) {
1790	case SOCK_DGRAM:
1791		sock->ops = &kcm_dgram_ops;
1792		break;
1793	case SOCK_SEQPACKET:
1794		sock->ops = &kcm_seqpacket_ops;
1795		break;
1796	default:
1797		return -ESOCKTNOSUPPORT;
1798	}
1799
1800	if (protocol != KCMPROTO_CONNECTED)
1801		return -EPROTONOSUPPORT;
1802
1803	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1804	if (!sk)
1805		return -ENOMEM;
1806
1807	/* Allocate a kcm mux, shared between KCM sockets */
1808	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1809	if (!mux) {
1810		sk_free(sk);
1811		return -ENOMEM;
1812	}
1813
1814	spin_lock_init(&mux->lock);
1815	spin_lock_init(&mux->rx_lock);
1816	INIT_LIST_HEAD(&mux->kcm_socks);
1817	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1818	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1819
1820	INIT_LIST_HEAD(&mux->psocks);
1821	INIT_LIST_HEAD(&mux->psocks_ready);
1822	INIT_LIST_HEAD(&mux->psocks_avail);
1823
1824	mux->knet = knet;
1825
1826	/* Add new MUX to list */
1827	mutex_lock(&knet->mutex);
1828	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1829	knet->count++;
1830	mutex_unlock(&knet->mutex);
1831
1832	skb_queue_head_init(&mux->rx_hold_queue);
1833
1834	/* Init KCM socket */
1835	sock_init_data(sock, sk);
1836	init_kcm_sock(kcm_sk(sk), mux);
1837
1838	return 0;
1839}
1840
1841static const struct net_proto_family kcm_family_ops = {
1842	.family = PF_KCM,
1843	.create = kcm_create,
1844	.owner  = THIS_MODULE,
1845};
1846
1847static __net_init int kcm_init_net(struct net *net)
1848{
1849	struct kcm_net *knet = net_generic(net, kcm_net_id);
1850
1851	INIT_LIST_HEAD_RCU(&knet->mux_list);
1852	mutex_init(&knet->mutex);
1853
1854	return 0;
1855}
1856
1857static __net_exit void kcm_exit_net(struct net *net)
1858{
1859	struct kcm_net *knet = net_generic(net, kcm_net_id);
1860
1861	/* All KCM sockets should be closed at this point, which should mean
1862	 * that all multiplexors and psocks have been destroyed.
1863	 */
1864	WARN_ON(!list_empty(&knet->mux_list));
1865
1866	mutex_destroy(&knet->mutex);
1867}
1868
1869static struct pernet_operations kcm_net_ops = {
1870	.init = kcm_init_net,
1871	.exit = kcm_exit_net,
1872	.id   = &kcm_net_id,
1873	.size = sizeof(struct kcm_net),
1874};
1875
1876static int __init kcm_init(void)
1877{
1878	int err = -ENOMEM;
1879
1880	kcm_muxp = kmem_cache_create("kcm_mux_cache",
1881				     sizeof(struct kcm_mux), 0,
1882				     SLAB_HWCACHE_ALIGN, NULL);
1883	if (!kcm_muxp)
1884		goto fail;
1885
1886	kcm_psockp = kmem_cache_create("kcm_psock_cache",
1887				       sizeof(struct kcm_psock), 0,
1888					SLAB_HWCACHE_ALIGN, NULL);
1889	if (!kcm_psockp)
1890		goto fail;
1891
1892	kcm_wq = create_singlethread_workqueue("kkcmd");
1893	if (!kcm_wq)
1894		goto fail;
1895
1896	err = proto_register(&kcm_proto, 1);
1897	if (err)
1898		goto fail;
1899
1900	err = register_pernet_device(&kcm_net_ops);
1901	if (err)
1902		goto net_ops_fail;
1903
1904	err = sock_register(&kcm_family_ops);
1905	if (err)
1906		goto sock_register_fail;
1907
1908	err = kcm_proc_init();
1909	if (err)
1910		goto proc_init_fail;
1911
1912	return 0;
1913
1914proc_init_fail:
1915	sock_unregister(PF_KCM);
1916
1917sock_register_fail:
1918	unregister_pernet_device(&kcm_net_ops);
1919
1920net_ops_fail:
1921	proto_unregister(&kcm_proto);
1922
1923fail:
1924	kmem_cache_destroy(kcm_muxp);
1925	kmem_cache_destroy(kcm_psockp);
1926
1927	if (kcm_wq)
1928		destroy_workqueue(kcm_wq);
1929
1930	return err;
1931}
1932
1933static void __exit kcm_exit(void)
1934{
1935	kcm_proc_exit();
1936	sock_unregister(PF_KCM);
1937	unregister_pernet_device(&kcm_net_ops);
1938	proto_unregister(&kcm_proto);
1939	destroy_workqueue(kcm_wq);
1940
1941	kmem_cache_destroy(kcm_muxp);
1942	kmem_cache_destroy(kcm_psockp);
1943}
1944
1945module_init(kcm_init);
1946module_exit(kcm_exit);
1947
1948MODULE_LICENSE("GPL");
1949MODULE_DESCRIPTION("KCM (Kernel Connection Multiplexor) sockets");
1950MODULE_ALIAS_NETPROTO(PF_KCM);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Kernel Connection Multiplexor
   4 *
   5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
   6 */
   7
   8#include <linux/bpf.h>
   9#include <linux/errno.h>
  10#include <linux/errqueue.h>
  11#include <linux/file.h>
  12#include <linux/filter.h>
  13#include <linux/in.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/net.h>
  17#include <linux/netdevice.h>
  18#include <linux/poll.h>
  19#include <linux/rculist.h>
  20#include <linux/skbuff.h>
  21#include <linux/socket.h>
  22#include <linux/uaccess.h>
  23#include <linux/workqueue.h>
  24#include <linux/syscalls.h>
  25#include <linux/sched/signal.h>
  26
  27#include <net/kcm.h>
  28#include <net/netns/generic.h>
  29#include <net/sock.h>
  30#include <uapi/linux/kcm.h>
 
  31
  32unsigned int kcm_net_id;
  33
  34static struct kmem_cache *kcm_psockp __read_mostly;
  35static struct kmem_cache *kcm_muxp __read_mostly;
  36static struct workqueue_struct *kcm_wq;
  37
  38static inline struct kcm_sock *kcm_sk(const struct sock *sk)
  39{
  40	return (struct kcm_sock *)sk;
  41}
  42
  43static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
  44{
  45	return (struct kcm_tx_msg *)skb->cb;
  46}
  47
  48static void report_csk_error(struct sock *csk, int err)
  49{
  50	csk->sk_err = EPIPE;
  51	sk_error_report(csk);
  52}
  53
  54static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
  55			       bool wakeup_kcm)
  56{
  57	struct sock *csk = psock->sk;
  58	struct kcm_mux *mux = psock->mux;
  59
  60	/* Unrecoverable error in transmit */
  61
  62	spin_lock_bh(&mux->lock);
  63
  64	if (psock->tx_stopped) {
  65		spin_unlock_bh(&mux->lock);
  66		return;
  67	}
  68
  69	psock->tx_stopped = 1;
  70	KCM_STATS_INCR(psock->stats.tx_aborts);
  71
  72	if (!psock->tx_kcm) {
  73		/* Take off psocks_avail list */
  74		list_del(&psock->psock_avail_list);
  75	} else if (wakeup_kcm) {
  76		/* In this case psock is being aborted while outside of
  77		 * write_msgs and psock is reserved. Schedule tx_work
  78		 * to handle the failure there. Need to commit tx_stopped
  79		 * before queuing work.
  80		 */
  81		smp_mb();
  82
  83		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
  84	}
  85
  86	spin_unlock_bh(&mux->lock);
  87
  88	/* Report error on lower socket */
  89	report_csk_error(csk, err);
  90}
  91
  92/* RX mux lock held. */
  93static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
  94				    struct kcm_psock *psock)
  95{
  96	STRP_STATS_ADD(mux->stats.rx_bytes,
  97		       psock->strp.stats.bytes -
  98		       psock->saved_rx_bytes);
  99	mux->stats.rx_msgs +=
 100		psock->strp.stats.msgs - psock->saved_rx_msgs;
 101	psock->saved_rx_msgs = psock->strp.stats.msgs;
 102	psock->saved_rx_bytes = psock->strp.stats.bytes;
 103}
 104
 105static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
 106				    struct kcm_psock *psock)
 107{
 108	KCM_STATS_ADD(mux->stats.tx_bytes,
 109		      psock->stats.tx_bytes - psock->saved_tx_bytes);
 110	mux->stats.tx_msgs +=
 111		psock->stats.tx_msgs - psock->saved_tx_msgs;
 112	psock->saved_tx_msgs = psock->stats.tx_msgs;
 113	psock->saved_tx_bytes = psock->stats.tx_bytes;
 114}
 115
 116static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 117
 118/* KCM is ready to receive messages on its queue-- either the KCM is new or
 119 * has become unblocked after being blocked on full socket buffer. Queue any
 120 * pending ready messages on a psock. RX mux lock held.
 121 */
 122static void kcm_rcv_ready(struct kcm_sock *kcm)
 123{
 124	struct kcm_mux *mux = kcm->mux;
 125	struct kcm_psock *psock;
 126	struct sk_buff *skb;
 127
 128	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
 129		return;
 130
 131	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
 132		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 133			/* Assuming buffer limit has been reached */
 134			skb_queue_head(&mux->rx_hold_queue, skb);
 135			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 136			return;
 137		}
 138	}
 139
 140	while (!list_empty(&mux->psocks_ready)) {
 141		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
 142					 psock_ready_list);
 143
 144		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
 145			/* Assuming buffer limit has been reached */
 146			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 147			return;
 148		}
 149
 150		/* Consumed the ready message on the psock. Schedule rx_work to
 151		 * get more messages.
 152		 */
 153		list_del(&psock->psock_ready_list);
 154		psock->ready_rx_msg = NULL;
 155		/* Commit clearing of ready_rx_msg for queuing work */
 156		smp_mb();
 157
 158		strp_unpause(&psock->strp);
 159		strp_check_rcv(&psock->strp);
 160	}
 161
 162	/* Buffer limit is okay now, add to ready list */
 163	list_add_tail(&kcm->wait_rx_list,
 164		      &kcm->mux->kcm_rx_waiters);
 165	/* paired with lockless reads in kcm_rfree() */
 166	WRITE_ONCE(kcm->rx_wait, true);
 167}
 168
 169static void kcm_rfree(struct sk_buff *skb)
 170{
 171	struct sock *sk = skb->sk;
 172	struct kcm_sock *kcm = kcm_sk(sk);
 173	struct kcm_mux *mux = kcm->mux;
 174	unsigned int len = skb->truesize;
 175
 176	sk_mem_uncharge(sk, len);
 177	atomic_sub(len, &sk->sk_rmem_alloc);
 178
 179	/* For reading rx_wait and rx_psock without holding lock */
 180	smp_mb__after_atomic();
 181
 182	if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
 183	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
 184		spin_lock_bh(&mux->rx_lock);
 185		kcm_rcv_ready(kcm);
 186		spin_unlock_bh(&mux->rx_lock);
 187	}
 188}
 189
 190static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 191{
 192	struct sk_buff_head *list = &sk->sk_receive_queue;
 193
 194	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 195		return -ENOMEM;
 196
 197	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 198		return -ENOBUFS;
 199
 200	skb->dev = NULL;
 201
 202	skb_orphan(skb);
 203	skb->sk = sk;
 204	skb->destructor = kcm_rfree;
 205	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 206	sk_mem_charge(sk, skb->truesize);
 207
 208	skb_queue_tail(list, skb);
 209
 210	if (!sock_flag(sk, SOCK_DEAD))
 211		sk->sk_data_ready(sk);
 212
 213	return 0;
 214}
 215
 216/* Requeue received messages for a kcm socket to other kcm sockets. This is
 217 * called with a kcm socket is receive disabled.
 218 * RX mux lock held.
 219 */
 220static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
 221{
 222	struct sk_buff *skb;
 223	struct kcm_sock *kcm;
 224
 225	while ((skb = skb_dequeue(head))) {
 226		/* Reset destructor to avoid calling kcm_rcv_ready */
 227		skb->destructor = sock_rfree;
 228		skb_orphan(skb);
 229try_again:
 230		if (list_empty(&mux->kcm_rx_waiters)) {
 231			skb_queue_tail(&mux->rx_hold_queue, skb);
 232			continue;
 233		}
 234
 235		kcm = list_first_entry(&mux->kcm_rx_waiters,
 236				       struct kcm_sock, wait_rx_list);
 237
 238		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 239			/* Should mean socket buffer full */
 240			list_del(&kcm->wait_rx_list);
 241			/* paired with lockless reads in kcm_rfree() */
 242			WRITE_ONCE(kcm->rx_wait, false);
 243
 244			/* Commit rx_wait to read in kcm_free */
 245			smp_wmb();
 246
 247			goto try_again;
 248		}
 249	}
 250}
 251
 252/* Lower sock lock held */
 253static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
 254				       struct sk_buff *head)
 255{
 256	struct kcm_mux *mux = psock->mux;
 257	struct kcm_sock *kcm;
 258
 259	WARN_ON(psock->ready_rx_msg);
 260
 261	if (psock->rx_kcm)
 262		return psock->rx_kcm;
 263
 264	spin_lock_bh(&mux->rx_lock);
 265
 266	if (psock->rx_kcm) {
 267		spin_unlock_bh(&mux->rx_lock);
 268		return psock->rx_kcm;
 269	}
 270
 271	kcm_update_rx_mux_stats(mux, psock);
 272
 273	if (list_empty(&mux->kcm_rx_waiters)) {
 274		psock->ready_rx_msg = head;
 275		strp_pause(&psock->strp);
 276		list_add_tail(&psock->psock_ready_list,
 277			      &mux->psocks_ready);
 278		spin_unlock_bh(&mux->rx_lock);
 279		return NULL;
 280	}
 281
 282	kcm = list_first_entry(&mux->kcm_rx_waiters,
 283			       struct kcm_sock, wait_rx_list);
 284	list_del(&kcm->wait_rx_list);
 285	/* paired with lockless reads in kcm_rfree() */
 286	WRITE_ONCE(kcm->rx_wait, false);
 287
 288	psock->rx_kcm = kcm;
 289	/* paired with lockless reads in kcm_rfree() */
 290	WRITE_ONCE(kcm->rx_psock, psock);
 291
 292	spin_unlock_bh(&mux->rx_lock);
 293
 294	return kcm;
 295}
 296
 297static void kcm_done(struct kcm_sock *kcm);
 298
 299static void kcm_done_work(struct work_struct *w)
 300{
 301	kcm_done(container_of(w, struct kcm_sock, done_work));
 302}
 303
 304/* Lower sock held */
 305static void unreserve_rx_kcm(struct kcm_psock *psock,
 306			     bool rcv_ready)
 307{
 308	struct kcm_sock *kcm = psock->rx_kcm;
 309	struct kcm_mux *mux = psock->mux;
 310
 311	if (!kcm)
 312		return;
 313
 314	spin_lock_bh(&mux->rx_lock);
 315
 316	psock->rx_kcm = NULL;
 317	/* paired with lockless reads in kcm_rfree() */
 318	WRITE_ONCE(kcm->rx_psock, NULL);
 319
 320	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
 321	 * kcm_rfree
 322	 */
 323	smp_mb();
 324
 325	if (unlikely(kcm->done)) {
 326		spin_unlock_bh(&mux->rx_lock);
 327
 328		/* Need to run kcm_done in a task since we need to qcquire
 329		 * callback locks which may already be held here.
 330		 */
 331		INIT_WORK(&kcm->done_work, kcm_done_work);
 332		schedule_work(&kcm->done_work);
 333		return;
 334	}
 335
 336	if (unlikely(kcm->rx_disabled)) {
 337		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
 338	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
 339		/* Check for degenerative race with rx_wait that all
 340		 * data was dequeued (accounted for in kcm_rfree).
 341		 */
 342		kcm_rcv_ready(kcm);
 343	}
 344	spin_unlock_bh(&mux->rx_lock);
 345}
 346
 347/* Lower sock lock held */
 348static void psock_data_ready(struct sock *sk)
 349{
 350	struct kcm_psock *psock;
 351
 
 
 352	read_lock_bh(&sk->sk_callback_lock);
 353
 354	psock = (struct kcm_psock *)sk->sk_user_data;
 355	if (likely(psock))
 356		strp_data_ready(&psock->strp);
 357
 358	read_unlock_bh(&sk->sk_callback_lock);
 359}
 360
 361/* Called with lower sock held */
 362static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
 363{
 364	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 365	struct kcm_sock *kcm;
 366
 367try_queue:
 368	kcm = reserve_rx_kcm(psock, skb);
 369	if (!kcm) {
 370		 /* Unable to reserve a KCM, message is held in psock and strp
 371		  * is paused.
 372		  */
 373		return;
 374	}
 375
 376	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 377		/* Should mean socket buffer full */
 378		unreserve_rx_kcm(psock, false);
 379		goto try_queue;
 380	}
 381}
 382
 383static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
 384{
 385	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 386	struct bpf_prog *prog = psock->bpf_prog;
 387	int res;
 388
 389	res = bpf_prog_run_pin_on_cpu(prog, skb);
 390	return res;
 391}
 392
 393static int kcm_read_sock_done(struct strparser *strp, int err)
 394{
 395	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 396
 397	unreserve_rx_kcm(psock, true);
 398
 399	return err;
 400}
 401
 402static void psock_state_change(struct sock *sk)
 403{
 404	/* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
 405	 * since application will normally not poll with EPOLLIN
 406	 * on the TCP sockets.
 407	 */
 408
 409	report_csk_error(sk, EPIPE);
 410}
 411
 412static void psock_write_space(struct sock *sk)
 413{
 414	struct kcm_psock *psock;
 415	struct kcm_mux *mux;
 416	struct kcm_sock *kcm;
 417
 418	read_lock_bh(&sk->sk_callback_lock);
 419
 420	psock = (struct kcm_psock *)sk->sk_user_data;
 421	if (unlikely(!psock))
 422		goto out;
 423	mux = psock->mux;
 424
 425	spin_lock_bh(&mux->lock);
 426
 427	/* Check if the socket is reserved so someone is waiting for sending. */
 428	kcm = psock->tx_kcm;
 429	if (kcm && !unlikely(kcm->tx_stopped))
 430		queue_work(kcm_wq, &kcm->tx_work);
 431
 432	spin_unlock_bh(&mux->lock);
 433out:
 434	read_unlock_bh(&sk->sk_callback_lock);
 435}
 436
 437static void unreserve_psock(struct kcm_sock *kcm);
 438
 439/* kcm sock is locked. */
 440static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
 441{
 442	struct kcm_mux *mux = kcm->mux;
 443	struct kcm_psock *psock;
 444
 445	psock = kcm->tx_psock;
 446
 447	smp_rmb(); /* Must read tx_psock before tx_wait */
 448
 449	if (psock) {
 450		WARN_ON(kcm->tx_wait);
 451		if (unlikely(psock->tx_stopped))
 452			unreserve_psock(kcm);
 453		else
 454			return kcm->tx_psock;
 455	}
 456
 457	spin_lock_bh(&mux->lock);
 458
 459	/* Check again under lock to see if psock was reserved for this
 460	 * psock via psock_unreserve.
 461	 */
 462	psock = kcm->tx_psock;
 463	if (unlikely(psock)) {
 464		WARN_ON(kcm->tx_wait);
 465		spin_unlock_bh(&mux->lock);
 466		return kcm->tx_psock;
 467	}
 468
 469	if (!list_empty(&mux->psocks_avail)) {
 470		psock = list_first_entry(&mux->psocks_avail,
 471					 struct kcm_psock,
 472					 psock_avail_list);
 473		list_del(&psock->psock_avail_list);
 474		if (kcm->tx_wait) {
 475			list_del(&kcm->wait_psock_list);
 476			kcm->tx_wait = false;
 477		}
 478		kcm->tx_psock = psock;
 479		psock->tx_kcm = kcm;
 480		KCM_STATS_INCR(psock->stats.reserved);
 481	} else if (!kcm->tx_wait) {
 482		list_add_tail(&kcm->wait_psock_list,
 483			      &mux->kcm_tx_waiters);
 484		kcm->tx_wait = true;
 485	}
 486
 487	spin_unlock_bh(&mux->lock);
 488
 489	return psock;
 490}
 491
 492/* mux lock held */
 493static void psock_now_avail(struct kcm_psock *psock)
 494{
 495	struct kcm_mux *mux = psock->mux;
 496	struct kcm_sock *kcm;
 497
 498	if (list_empty(&mux->kcm_tx_waiters)) {
 499		list_add_tail(&psock->psock_avail_list,
 500			      &mux->psocks_avail);
 501	} else {
 502		kcm = list_first_entry(&mux->kcm_tx_waiters,
 503				       struct kcm_sock,
 504				       wait_psock_list);
 505		list_del(&kcm->wait_psock_list);
 506		kcm->tx_wait = false;
 507		psock->tx_kcm = kcm;
 508
 509		/* Commit before changing tx_psock since that is read in
 510		 * reserve_psock before queuing work.
 511		 */
 512		smp_mb();
 513
 514		kcm->tx_psock = psock;
 515		KCM_STATS_INCR(psock->stats.reserved);
 516		queue_work(kcm_wq, &kcm->tx_work);
 517	}
 518}
 519
 520/* kcm sock is locked. */
 521static void unreserve_psock(struct kcm_sock *kcm)
 522{
 523	struct kcm_psock *psock;
 524	struct kcm_mux *mux = kcm->mux;
 525
 526	spin_lock_bh(&mux->lock);
 527
 528	psock = kcm->tx_psock;
 529
 530	if (WARN_ON(!psock)) {
 531		spin_unlock_bh(&mux->lock);
 532		return;
 533	}
 534
 535	smp_rmb(); /* Read tx_psock before tx_wait */
 536
 537	kcm_update_tx_mux_stats(mux, psock);
 538
 539	WARN_ON(kcm->tx_wait);
 540
 541	kcm->tx_psock = NULL;
 542	psock->tx_kcm = NULL;
 543	KCM_STATS_INCR(psock->stats.unreserved);
 544
 545	if (unlikely(psock->tx_stopped)) {
 546		if (psock->done) {
 547			/* Deferred free */
 548			list_del(&psock->psock_list);
 549			mux->psocks_cnt--;
 550			sock_put(psock->sk);
 551			fput(psock->sk->sk_socket->file);
 552			kmem_cache_free(kcm_psockp, psock);
 553		}
 554
 555		/* Don't put back on available list */
 556
 557		spin_unlock_bh(&mux->lock);
 558
 559		return;
 560	}
 561
 562	psock_now_avail(psock);
 563
 564	spin_unlock_bh(&mux->lock);
 565}
 566
 567static void kcm_report_tx_retry(struct kcm_sock *kcm)
 568{
 569	struct kcm_mux *mux = kcm->mux;
 570
 571	spin_lock_bh(&mux->lock);
 572	KCM_STATS_INCR(mux->stats.tx_retries);
 573	spin_unlock_bh(&mux->lock);
 574}
 575
 576/* Write any messages ready on the kcm socket.  Called with kcm sock lock
 577 * held.  Return bytes actually sent or error.
 578 */
 579static int kcm_write_msgs(struct kcm_sock *kcm)
 580{
 
 581	struct sock *sk = &kcm->sk;
 582	struct kcm_psock *psock;
 583	struct sk_buff *skb, *head;
 584	struct kcm_tx_msg *txm;
 585	unsigned short fragidx, frag_offset;
 586	unsigned int sent, total_sent = 0;
 587	int ret = 0;
 588
 589	kcm->tx_wait_more = false;
 590	psock = kcm->tx_psock;
 591	if (unlikely(psock && psock->tx_stopped)) {
 592		/* A reserved psock was aborted asynchronously. Unreserve
 593		 * it and we'll retry the message.
 594		 */
 595		unreserve_psock(kcm);
 596		kcm_report_tx_retry(kcm);
 597		if (skb_queue_empty(&sk->sk_write_queue))
 598			return 0;
 599
 600		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
 601
 602	} else if (skb_queue_empty(&sk->sk_write_queue)) {
 603		return 0;
 604	}
 605
 606	head = skb_peek(&sk->sk_write_queue);
 607	txm = kcm_tx_msg(head);
 608
 609	if (txm->sent) {
 610		/* Send of first skbuff in queue already in progress */
 611		if (WARN_ON(!psock)) {
 612			ret = -EINVAL;
 613			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 614		}
 615		sent = txm->sent;
 616		frag_offset = txm->frag_offset;
 617		fragidx = txm->fragidx;
 618		skb = txm->frag_skb;
 619
 620		goto do_frag;
 621	}
 622
 623try_again:
 624	psock = reserve_psock(kcm);
 625	if (!psock)
 626		goto out;
 627
 628	do {
 629		skb = head;
 630		txm = kcm_tx_msg(head);
 631		sent = 0;
 632
 633do_frag_list:
 634		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
 635			ret = -EINVAL;
 636			goto out;
 637		}
 638
 639		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
 640		     fragidx++) {
 641			skb_frag_t *frag;
 642
 643			frag_offset = 0;
 644do_frag:
 645			frag = &skb_shinfo(skb)->frags[fragidx];
 646			if (WARN_ON(!skb_frag_size(frag))) {
 647				ret = -EINVAL;
 648				goto out;
 649			}
 650
 651			ret = kernel_sendpage(psock->sk->sk_socket,
 652					      skb_frag_page(frag),
 653					      skb_frag_off(frag) + frag_offset,
 654					      skb_frag_size(frag) - frag_offset,
 655					      MSG_DONTWAIT);
 656			if (ret <= 0) {
 657				if (ret == -EAGAIN) {
 658					/* Save state to try again when there's
 659					 * write space on the socket
 660					 */
 661					txm->sent = sent;
 662					txm->frag_offset = frag_offset;
 663					txm->fragidx = fragidx;
 664					txm->frag_skb = skb;
 665
 666					ret = 0;
 667					goto out;
 668				}
 669
 670				/* Hard failure in sending message, abort this
 671				 * psock since it has lost framing
 672				 * synchronization and retry sending the
 673				 * message from the beginning.
 674				 */
 675				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
 676						   true);
 677				unreserve_psock(kcm);
 
 678
 679				txm->sent = 0;
 680				kcm_report_tx_retry(kcm);
 681				ret = 0;
 682
 683				goto try_again;
 684			}
 685
 686			sent += ret;
 687			frag_offset += ret;
 688			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
 689			if (frag_offset < skb_frag_size(frag)) {
 690				/* Not finished with this frag */
 691				goto do_frag;
 692			}
 693		}
 694
 695		if (skb == head) {
 696			if (skb_has_frag_list(skb)) {
 697				skb = skb_shinfo(skb)->frag_list;
 698				goto do_frag_list;
 
 699			}
 700		} else if (skb->next) {
 701			skb = skb->next;
 702			goto do_frag_list;
 
 703		}
 704
 705		/* Successfully sent the whole packet, account for it. */
 
 
 706		skb_dequeue(&sk->sk_write_queue);
 707		kfree_skb(head);
 708		sk->sk_wmem_queued -= sent;
 709		total_sent += sent;
 710		KCM_STATS_INCR(psock->stats.tx_msgs);
 711	} while ((head = skb_peek(&sk->sk_write_queue)));
 712out:
 713	if (!head) {
 714		/* Done with all queued messages. */
 715		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
 716		unreserve_psock(kcm);
 
 717	}
 718
 719	/* Check if write space is available */
 720	sk->sk_write_space(sk);
 721
 722	return total_sent ? : ret;
 723}
 724
 725static void kcm_tx_work(struct work_struct *w)
 726{
 727	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
 728	struct sock *sk = &kcm->sk;
 729	int err;
 730
 731	lock_sock(sk);
 732
 733	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
 734	 * aborts
 735	 */
 736	err = kcm_write_msgs(kcm);
 737	if (err < 0) {
 738		/* Hard failure in write, report error on KCM socket */
 739		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
 740		report_csk_error(&kcm->sk, -err);
 741		goto out;
 742	}
 743
 744	/* Primarily for SOCK_SEQPACKET sockets */
 745	if (likely(sk->sk_socket) &&
 746	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
 747		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 748		sk->sk_write_space(sk);
 749	}
 750
 751out:
 752	release_sock(sk);
 753}
 754
 755static void kcm_push(struct kcm_sock *kcm)
 756{
 757	if (kcm->tx_wait_more)
 758		kcm_write_msgs(kcm);
 759}
 760
 761static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
 762			    int offset, size_t size, int flags)
 763
 764{
 765	struct sock *sk = sock->sk;
 766	struct kcm_sock *kcm = kcm_sk(sk);
 767	struct sk_buff *skb = NULL, *head = NULL;
 768	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 769	bool eor;
 770	int err = 0;
 771	int i;
 772
 773	if (flags & MSG_SENDPAGE_NOTLAST)
 774		flags |= MSG_MORE;
 775
 776	/* No MSG_EOR from splice, only look at MSG_MORE */
 777	eor = !(flags & MSG_MORE);
 778
 779	lock_sock(sk);
 780
 781	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 782
 783	err = -EPIPE;
 784	if (sk->sk_err)
 785		goto out_error;
 786
 787	if (kcm->seq_skb) {
 788		/* Previously opened message */
 789		head = kcm->seq_skb;
 790		skb = kcm_tx_msg(head)->last_skb;
 791		i = skb_shinfo(skb)->nr_frags;
 792
 793		if (skb_can_coalesce(skb, i, page, offset)) {
 794			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
 795			skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
 796			goto coalesced;
 797		}
 798
 799		if (i >= MAX_SKB_FRAGS) {
 800			struct sk_buff *tskb;
 801
 802			tskb = alloc_skb(0, sk->sk_allocation);
 803			while (!tskb) {
 804				kcm_push(kcm);
 805				err = sk_stream_wait_memory(sk, &timeo);
 806				if (err)
 807					goto out_error;
 808			}
 809
 810			if (head == skb)
 811				skb_shinfo(head)->frag_list = tskb;
 812			else
 813				skb->next = tskb;
 814
 815			skb = tskb;
 816			skb->ip_summed = CHECKSUM_UNNECESSARY;
 817			i = 0;
 818		}
 819	} else {
 820		/* Call the sk_stream functions to manage the sndbuf mem. */
 821		if (!sk_stream_memory_free(sk)) {
 822			kcm_push(kcm);
 823			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 824			err = sk_stream_wait_memory(sk, &timeo);
 825			if (err)
 826				goto out_error;
 827		}
 828
 829		head = alloc_skb(0, sk->sk_allocation);
 830		while (!head) {
 831			kcm_push(kcm);
 832			err = sk_stream_wait_memory(sk, &timeo);
 833			if (err)
 834				goto out_error;
 835		}
 836
 837		skb = head;
 838		i = 0;
 839	}
 840
 841	get_page(page);
 842	skb_fill_page_desc_noacc(skb, i, page, offset, size);
 843	skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
 844
 845coalesced:
 846	skb->len += size;
 847	skb->data_len += size;
 848	skb->truesize += size;
 849	sk->sk_wmem_queued += size;
 850	sk_mem_charge(sk, size);
 851
 852	if (head != skb) {
 853		head->len += size;
 854		head->data_len += size;
 855		head->truesize += size;
 856	}
 857
 858	if (eor) {
 859		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
 860
 861		/* Message complete, queue it on send buffer */
 862		__skb_queue_tail(&sk->sk_write_queue, head);
 863		kcm->seq_skb = NULL;
 864		KCM_STATS_INCR(kcm->stats.tx_msgs);
 865
 866		if (flags & MSG_BATCH) {
 867			kcm->tx_wait_more = true;
 868		} else if (kcm->tx_wait_more || not_busy) {
 869			err = kcm_write_msgs(kcm);
 870			if (err < 0) {
 871				/* We got a hard error in write_msgs but have
 872				 * already queued this message. Report an error
 873				 * in the socket, but don't affect return value
 874				 * from sendmsg
 875				 */
 876				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
 877				report_csk_error(&kcm->sk, -err);
 878			}
 879		}
 880	} else {
 881		/* Message not complete, save state */
 882		kcm->seq_skb = head;
 883		kcm_tx_msg(head)->last_skb = skb;
 884	}
 885
 886	KCM_STATS_ADD(kcm->stats.tx_bytes, size);
 887
 888	release_sock(sk);
 889	return size;
 890
 891out_error:
 892	kcm_push(kcm);
 893
 894	err = sk_stream_error(sk, flags, err);
 895
 896	/* make sure we wake any epoll edge trigger waiter */
 897	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
 898		sk->sk_write_space(sk);
 899
 900	release_sock(sk);
 901	return err;
 902}
 903
 904static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 905{
 906	struct sock *sk = sock->sk;
 907	struct kcm_sock *kcm = kcm_sk(sk);
 908	struct sk_buff *skb = NULL, *head = NULL;
 909	size_t copy, copied = 0;
 910	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 911	int eor = (sock->type == SOCK_DGRAM) ?
 912		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
 913	int err = -EPIPE;
 914
 915	lock_sock(sk);
 916
 917	/* Per tcp_sendmsg this should be in poll */
 918	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 919
 920	if (sk->sk_err)
 921		goto out_error;
 922
 923	if (kcm->seq_skb) {
 924		/* Previously opened message */
 925		head = kcm->seq_skb;
 926		skb = kcm_tx_msg(head)->last_skb;
 927		goto start;
 928	}
 929
 930	/* Call the sk_stream functions to manage the sndbuf mem. */
 931	if (!sk_stream_memory_free(sk)) {
 932		kcm_push(kcm);
 933		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 934		err = sk_stream_wait_memory(sk, &timeo);
 935		if (err)
 936			goto out_error;
 937	}
 938
 939	if (msg_data_left(msg)) {
 940		/* New message, alloc head skb */
 941		head = alloc_skb(0, sk->sk_allocation);
 942		while (!head) {
 943			kcm_push(kcm);
 944			err = sk_stream_wait_memory(sk, &timeo);
 945			if (err)
 946				goto out_error;
 947
 948			head = alloc_skb(0, sk->sk_allocation);
 949		}
 950
 951		skb = head;
 952
 953		/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
 954		 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
 955		 */
 956		skb->ip_summed = CHECKSUM_UNNECESSARY;
 957	}
 958
 959start:
 960	while (msg_data_left(msg)) {
 961		bool merge = true;
 962		int i = skb_shinfo(skb)->nr_frags;
 963		struct page_frag *pfrag = sk_page_frag(sk);
 964
 965		if (!sk_page_frag_refill(sk, pfrag))
 966			goto wait_for_memory;
 967
 968		if (!skb_can_coalesce(skb, i, pfrag->page,
 969				      pfrag->offset)) {
 970			if (i == MAX_SKB_FRAGS) {
 971				struct sk_buff *tskb;
 972
 973				tskb = alloc_skb(0, sk->sk_allocation);
 974				if (!tskb)
 975					goto wait_for_memory;
 976
 977				if (head == skb)
 978					skb_shinfo(head)->frag_list = tskb;
 979				else
 980					skb->next = tskb;
 981
 982				skb = tskb;
 983				skb->ip_summed = CHECKSUM_UNNECESSARY;
 984				continue;
 985			}
 986			merge = false;
 987		}
 988
 989		copy = min_t(int, msg_data_left(msg),
 990			     pfrag->size - pfrag->offset);
 
 
 991
 992		if (!sk_wmem_schedule(sk, copy))
 993			goto wait_for_memory;
 
 
 
 
 
 994
 995		err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
 996					       pfrag->page,
 997					       pfrag->offset,
 998					       copy);
 999		if (err)
1000			goto out_error;
1001
1002		/* Update the skb. */
1003		if (merge) {
1004			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1005		} else {
1006			skb_fill_page_desc(skb, i, pfrag->page,
1007					   pfrag->offset, copy);
1008			get_page(pfrag->page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009		}
1010
1011		pfrag->offset += copy;
1012		copied += copy;
1013		if (head != skb) {
1014			head->len += copy;
1015			head->data_len += copy;
1016		}
1017
1018		continue;
1019
1020wait_for_memory:
1021		kcm_push(kcm);
1022		err = sk_stream_wait_memory(sk, &timeo);
1023		if (err)
1024			goto out_error;
1025	}
1026
1027	if (eor) {
1028		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1029
1030		if (head) {
1031			/* Message complete, queue it on send buffer */
1032			__skb_queue_tail(&sk->sk_write_queue, head);
1033			kcm->seq_skb = NULL;
1034			KCM_STATS_INCR(kcm->stats.tx_msgs);
1035		}
1036
1037		if (msg->msg_flags & MSG_BATCH) {
1038			kcm->tx_wait_more = true;
1039		} else if (kcm->tx_wait_more || not_busy) {
1040			err = kcm_write_msgs(kcm);
1041			if (err < 0) {
1042				/* We got a hard error in write_msgs but have
1043				 * already queued this message. Report an error
1044				 * in the socket, but don't affect return value
1045				 * from sendmsg
1046				 */
1047				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1048				report_csk_error(&kcm->sk, -err);
1049			}
1050		}
1051	} else {
1052		/* Message not complete, save state */
1053partial_message:
1054		if (head) {
1055			kcm->seq_skb = head;
1056			kcm_tx_msg(head)->last_skb = skb;
1057		}
1058	}
1059
1060	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1061
1062	release_sock(sk);
1063	return copied;
1064
1065out_error:
1066	kcm_push(kcm);
1067
1068	if (copied && sock->type == SOCK_SEQPACKET) {
1069		/* Wrote some bytes before encountering an
1070		 * error, return partial success.
1071		 */
1072		goto partial_message;
 
 
 
 
 
 
1073	}
1074
1075	if (head != kcm->seq_skb)
1076		kfree_skb(head);
1077
1078	err = sk_stream_error(sk, msg->msg_flags, err);
1079
1080	/* make sure we wake any epoll edge trigger waiter */
1081	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1082		sk->sk_write_space(sk);
1083
1084	release_sock(sk);
1085	return err;
1086}
1087
 
 
 
 
 
 
 
 
 
 
 
 
 
1088static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1089		       size_t len, int flags)
1090{
1091	struct sock *sk = sock->sk;
1092	struct kcm_sock *kcm = kcm_sk(sk);
1093	int err = 0;
1094	struct strp_msg *stm;
1095	int copied = 0;
1096	struct sk_buff *skb;
1097
1098	skb = skb_recv_datagram(sk, flags, &err);
1099	if (!skb)
1100		goto out;
1101
1102	/* Okay, have a message on the receive queue */
1103
1104	stm = strp_msg(skb);
1105
1106	if (len > stm->full_len)
1107		len = stm->full_len;
1108
1109	err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
1110	if (err < 0)
1111		goto out;
1112
1113	copied = len;
1114	if (likely(!(flags & MSG_PEEK))) {
1115		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1116		if (copied < stm->full_len) {
1117			if (sock->type == SOCK_DGRAM) {
1118				/* Truncated message */
1119				msg->msg_flags |= MSG_TRUNC;
1120				goto msg_finished;
1121			}
1122			stm->offset += copied;
1123			stm->full_len -= copied;
1124		} else {
1125msg_finished:
1126			/* Finished with message */
1127			msg->msg_flags |= MSG_EOR;
1128			KCM_STATS_INCR(kcm->stats.rx_msgs);
1129		}
1130	}
1131
1132out:
1133	skb_free_datagram(sk, skb);
1134	return copied ? : err;
1135}
1136
1137static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1138			       struct pipe_inode_info *pipe, size_t len,
1139			       unsigned int flags)
1140{
1141	struct sock *sk = sock->sk;
1142	struct kcm_sock *kcm = kcm_sk(sk);
1143	struct strp_msg *stm;
1144	int err = 0;
1145	ssize_t copied;
1146	struct sk_buff *skb;
1147
1148	/* Only support splice for SOCKSEQPACKET */
1149
1150	skb = skb_recv_datagram(sk, flags, &err);
1151	if (!skb)
1152		goto err_out;
1153
1154	/* Okay, have a message on the receive queue */
1155
1156	stm = strp_msg(skb);
1157
1158	if (len > stm->full_len)
1159		len = stm->full_len;
1160
1161	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1162	if (copied < 0) {
1163		err = copied;
1164		goto err_out;
1165	}
1166
1167	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1168
1169	stm->offset += copied;
1170	stm->full_len -= copied;
1171
1172	/* We have no way to return MSG_EOR. If all the bytes have been
1173	 * read we still leave the message in the receive socket buffer.
1174	 * A subsequent recvmsg needs to be done to return MSG_EOR and
1175	 * finish reading the message.
1176	 */
1177
1178	skb_free_datagram(sk, skb);
1179	return copied;
1180
1181err_out:
1182	skb_free_datagram(sk, skb);
1183	return err;
1184}
1185
1186/* kcm sock lock held */
1187static void kcm_recv_disable(struct kcm_sock *kcm)
1188{
1189	struct kcm_mux *mux = kcm->mux;
1190
1191	if (kcm->rx_disabled)
1192		return;
1193
1194	spin_lock_bh(&mux->rx_lock);
1195
1196	kcm->rx_disabled = 1;
1197
1198	/* If a psock is reserved we'll do cleanup in unreserve */
1199	if (!kcm->rx_psock) {
1200		if (kcm->rx_wait) {
1201			list_del(&kcm->wait_rx_list);
1202			/* paired with lockless reads in kcm_rfree() */
1203			WRITE_ONCE(kcm->rx_wait, false);
1204		}
1205
1206		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1207	}
1208
1209	spin_unlock_bh(&mux->rx_lock);
1210}
1211
1212/* kcm sock lock held */
1213static void kcm_recv_enable(struct kcm_sock *kcm)
1214{
1215	struct kcm_mux *mux = kcm->mux;
1216
1217	if (!kcm->rx_disabled)
1218		return;
1219
1220	spin_lock_bh(&mux->rx_lock);
1221
1222	kcm->rx_disabled = 0;
1223	kcm_rcv_ready(kcm);
1224
1225	spin_unlock_bh(&mux->rx_lock);
1226}
1227
1228static int kcm_setsockopt(struct socket *sock, int level, int optname,
1229			  sockptr_t optval, unsigned int optlen)
1230{
1231	struct kcm_sock *kcm = kcm_sk(sock->sk);
1232	int val, valbool;
1233	int err = 0;
1234
1235	if (level != SOL_KCM)
1236		return -ENOPROTOOPT;
1237
1238	if (optlen < sizeof(int))
1239		return -EINVAL;
1240
1241	if (copy_from_sockptr(&val, optval, sizeof(int)))
1242		return -EFAULT;
1243
1244	valbool = val ? 1 : 0;
1245
1246	switch (optname) {
1247	case KCM_RECV_DISABLE:
1248		lock_sock(&kcm->sk);
1249		if (valbool)
1250			kcm_recv_disable(kcm);
1251		else
1252			kcm_recv_enable(kcm);
1253		release_sock(&kcm->sk);
1254		break;
1255	default:
1256		err = -ENOPROTOOPT;
1257	}
1258
1259	return err;
1260}
1261
1262static int kcm_getsockopt(struct socket *sock, int level, int optname,
1263			  char __user *optval, int __user *optlen)
1264{
1265	struct kcm_sock *kcm = kcm_sk(sock->sk);
1266	int val, len;
1267
1268	if (level != SOL_KCM)
1269		return -ENOPROTOOPT;
1270
1271	if (get_user(len, optlen))
1272		return -EFAULT;
1273
1274	len = min_t(unsigned int, len, sizeof(int));
1275	if (len < 0)
1276		return -EINVAL;
1277
1278	switch (optname) {
1279	case KCM_RECV_DISABLE:
1280		val = kcm->rx_disabled;
1281		break;
1282	default:
1283		return -ENOPROTOOPT;
1284	}
1285
1286	if (put_user(len, optlen))
1287		return -EFAULT;
1288	if (copy_to_user(optval, &val, len))
1289		return -EFAULT;
1290	return 0;
1291}
1292
1293static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1294{
1295	struct kcm_sock *tkcm;
1296	struct list_head *head;
1297	int index = 0;
1298
1299	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1300	 * we set sk_state, otherwise epoll_wait always returns right away with
1301	 * EPOLLHUP
1302	 */
1303	kcm->sk.sk_state = TCP_ESTABLISHED;
1304
1305	/* Add to mux's kcm sockets list */
1306	kcm->mux = mux;
1307	spin_lock_bh(&mux->lock);
1308
1309	head = &mux->kcm_socks;
1310	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1311		if (tkcm->index != index)
1312			break;
1313		head = &tkcm->kcm_sock_list;
1314		index++;
1315	}
1316
1317	list_add(&kcm->kcm_sock_list, head);
1318	kcm->index = index;
1319
1320	mux->kcm_socks_cnt++;
1321	spin_unlock_bh(&mux->lock);
1322
1323	INIT_WORK(&kcm->tx_work, kcm_tx_work);
1324
1325	spin_lock_bh(&mux->rx_lock);
1326	kcm_rcv_ready(kcm);
1327	spin_unlock_bh(&mux->rx_lock);
1328}
1329
1330static int kcm_attach(struct socket *sock, struct socket *csock,
1331		      struct bpf_prog *prog)
1332{
1333	struct kcm_sock *kcm = kcm_sk(sock->sk);
1334	struct kcm_mux *mux = kcm->mux;
1335	struct sock *csk;
1336	struct kcm_psock *psock = NULL, *tpsock;
1337	struct list_head *head;
1338	int index = 0;
1339	static const struct strp_callbacks cb = {
1340		.rcv_msg = kcm_rcv_strparser,
1341		.parse_msg = kcm_parse_func_strparser,
1342		.read_sock_done = kcm_read_sock_done,
1343	};
1344	int err = 0;
1345
1346	csk = csock->sk;
1347	if (!csk)
1348		return -EINVAL;
1349
1350	lock_sock(csk);
1351
1352	/* Only allow TCP sockets to be attached for now */
1353	if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1354	    csk->sk_protocol != IPPROTO_TCP) {
1355		err = -EOPNOTSUPP;
1356		goto out;
1357	}
1358
1359	/* Don't allow listeners or closed sockets */
1360	if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1361		err = -EOPNOTSUPP;
1362		goto out;
1363	}
1364
1365	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1366	if (!psock) {
1367		err = -ENOMEM;
1368		goto out;
1369	}
1370
1371	psock->mux = mux;
1372	psock->sk = csk;
1373	psock->bpf_prog = prog;
1374
1375	write_lock_bh(&csk->sk_callback_lock);
1376
1377	/* Check if sk_user_data is already by KCM or someone else.
1378	 * Must be done under lock to prevent race conditions.
1379	 */
1380	if (csk->sk_user_data) {
1381		write_unlock_bh(&csk->sk_callback_lock);
1382		kmem_cache_free(kcm_psockp, psock);
1383		err = -EALREADY;
1384		goto out;
1385	}
1386
1387	err = strp_init(&psock->strp, csk, &cb);
1388	if (err) {
1389		write_unlock_bh(&csk->sk_callback_lock);
1390		kmem_cache_free(kcm_psockp, psock);
1391		goto out;
1392	}
1393
1394	psock->save_data_ready = csk->sk_data_ready;
1395	psock->save_write_space = csk->sk_write_space;
1396	psock->save_state_change = csk->sk_state_change;
1397	csk->sk_user_data = psock;
1398	csk->sk_data_ready = psock_data_ready;
1399	csk->sk_write_space = psock_write_space;
1400	csk->sk_state_change = psock_state_change;
1401
1402	write_unlock_bh(&csk->sk_callback_lock);
1403
1404	sock_hold(csk);
1405
1406	/* Finished initialization, now add the psock to the MUX. */
1407	spin_lock_bh(&mux->lock);
1408	head = &mux->psocks;
1409	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1410		if (tpsock->index != index)
1411			break;
1412		head = &tpsock->psock_list;
1413		index++;
1414	}
1415
1416	list_add(&psock->psock_list, head);
1417	psock->index = index;
1418
1419	KCM_STATS_INCR(mux->stats.psock_attach);
1420	mux->psocks_cnt++;
1421	psock_now_avail(psock);
1422	spin_unlock_bh(&mux->lock);
1423
1424	/* Schedule RX work in case there are already bytes queued */
1425	strp_check_rcv(&psock->strp);
1426
1427out:
1428	release_sock(csk);
1429
1430	return err;
1431}
1432
1433static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1434{
1435	struct socket *csock;
1436	struct bpf_prog *prog;
1437	int err;
1438
1439	csock = sockfd_lookup(info->fd, &err);
1440	if (!csock)
1441		return -ENOENT;
1442
1443	prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1444	if (IS_ERR(prog)) {
1445		err = PTR_ERR(prog);
1446		goto out;
1447	}
1448
1449	err = kcm_attach(sock, csock, prog);
1450	if (err) {
1451		bpf_prog_put(prog);
1452		goto out;
1453	}
1454
1455	/* Keep reference on file also */
1456
1457	return 0;
1458out:
1459	sockfd_put(csock);
1460	return err;
1461}
1462
1463static void kcm_unattach(struct kcm_psock *psock)
1464{
1465	struct sock *csk = psock->sk;
1466	struct kcm_mux *mux = psock->mux;
1467
1468	lock_sock(csk);
1469
1470	/* Stop getting callbacks from TCP socket. After this there should
1471	 * be no way to reserve a kcm for this psock.
1472	 */
1473	write_lock_bh(&csk->sk_callback_lock);
1474	csk->sk_user_data = NULL;
1475	csk->sk_data_ready = psock->save_data_ready;
1476	csk->sk_write_space = psock->save_write_space;
1477	csk->sk_state_change = psock->save_state_change;
1478	strp_stop(&psock->strp);
1479
1480	if (WARN_ON(psock->rx_kcm)) {
1481		write_unlock_bh(&csk->sk_callback_lock);
1482		release_sock(csk);
1483		return;
1484	}
1485
1486	spin_lock_bh(&mux->rx_lock);
1487
1488	/* Stop receiver activities. After this point psock should not be
1489	 * able to get onto ready list either through callbacks or work.
1490	 */
1491	if (psock->ready_rx_msg) {
1492		list_del(&psock->psock_ready_list);
1493		kfree_skb(psock->ready_rx_msg);
1494		psock->ready_rx_msg = NULL;
1495		KCM_STATS_INCR(mux->stats.rx_ready_drops);
1496	}
1497
1498	spin_unlock_bh(&mux->rx_lock);
1499
1500	write_unlock_bh(&csk->sk_callback_lock);
1501
1502	/* Call strp_done without sock lock */
1503	release_sock(csk);
1504	strp_done(&psock->strp);
1505	lock_sock(csk);
1506
1507	bpf_prog_put(psock->bpf_prog);
1508
1509	spin_lock_bh(&mux->lock);
1510
1511	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1512	save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1513
1514	KCM_STATS_INCR(mux->stats.psock_unattach);
1515
1516	if (psock->tx_kcm) {
1517		/* psock was reserved.  Just mark it finished and we will clean
1518		 * up in the kcm paths, we need kcm lock which can not be
1519		 * acquired here.
1520		 */
1521		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1522		spin_unlock_bh(&mux->lock);
1523
1524		/* We are unattaching a socket that is reserved. Abort the
1525		 * socket since we may be out of sync in sending on it. We need
1526		 * to do this without the mux lock.
1527		 */
1528		kcm_abort_tx_psock(psock, EPIPE, false);
1529
1530		spin_lock_bh(&mux->lock);
1531		if (!psock->tx_kcm) {
1532			/* psock now unreserved in window mux was unlocked */
1533			goto no_reserved;
1534		}
1535		psock->done = 1;
1536
1537		/* Commit done before queuing work to process it */
1538		smp_mb();
1539
1540		/* Queue tx work to make sure psock->done is handled */
1541		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1542		spin_unlock_bh(&mux->lock);
1543	} else {
1544no_reserved:
1545		if (!psock->tx_stopped)
1546			list_del(&psock->psock_avail_list);
1547		list_del(&psock->psock_list);
1548		mux->psocks_cnt--;
1549		spin_unlock_bh(&mux->lock);
1550
1551		sock_put(csk);
1552		fput(csk->sk_socket->file);
1553		kmem_cache_free(kcm_psockp, psock);
1554	}
1555
1556	release_sock(csk);
1557}
1558
1559static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1560{
1561	struct kcm_sock *kcm = kcm_sk(sock->sk);
1562	struct kcm_mux *mux = kcm->mux;
1563	struct kcm_psock *psock;
1564	struct socket *csock;
1565	struct sock *csk;
1566	int err;
1567
1568	csock = sockfd_lookup(info->fd, &err);
1569	if (!csock)
1570		return -ENOENT;
1571
1572	csk = csock->sk;
1573	if (!csk) {
1574		err = -EINVAL;
1575		goto out;
1576	}
1577
1578	err = -ENOENT;
1579
1580	spin_lock_bh(&mux->lock);
1581
1582	list_for_each_entry(psock, &mux->psocks, psock_list) {
1583		if (psock->sk != csk)
1584			continue;
1585
1586		/* Found the matching psock */
1587
1588		if (psock->unattaching || WARN_ON(psock->done)) {
1589			err = -EALREADY;
1590			break;
1591		}
1592
1593		psock->unattaching = 1;
1594
1595		spin_unlock_bh(&mux->lock);
1596
1597		/* Lower socket lock should already be held */
1598		kcm_unattach(psock);
1599
1600		err = 0;
1601		goto out;
1602	}
1603
1604	spin_unlock_bh(&mux->lock);
1605
1606out:
1607	sockfd_put(csock);
1608	return err;
1609}
1610
1611static struct proto kcm_proto = {
1612	.name	= "KCM",
1613	.owner	= THIS_MODULE,
1614	.obj_size = sizeof(struct kcm_sock),
1615};
1616
1617/* Clone a kcm socket. */
1618static struct file *kcm_clone(struct socket *osock)
1619{
1620	struct socket *newsock;
1621	struct sock *newsk;
1622
1623	newsock = sock_alloc();
1624	if (!newsock)
1625		return ERR_PTR(-ENFILE);
1626
1627	newsock->type = osock->type;
1628	newsock->ops = osock->ops;
1629
1630	__module_get(newsock->ops->owner);
1631
1632	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1633			 &kcm_proto, false);
1634	if (!newsk) {
1635		sock_release(newsock);
1636		return ERR_PTR(-ENOMEM);
1637	}
1638	sock_init_data(newsock, newsk);
1639	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1640
1641	return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1642}
1643
1644static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1645{
1646	int err;
1647
1648	switch (cmd) {
1649	case SIOCKCMATTACH: {
1650		struct kcm_attach info;
1651
1652		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1653			return -EFAULT;
1654
1655		err = kcm_attach_ioctl(sock, &info);
1656
1657		break;
1658	}
1659	case SIOCKCMUNATTACH: {
1660		struct kcm_unattach info;
1661
1662		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1663			return -EFAULT;
1664
1665		err = kcm_unattach_ioctl(sock, &info);
1666
1667		break;
1668	}
1669	case SIOCKCMCLONE: {
1670		struct kcm_clone info;
1671		struct file *file;
1672
1673		info.fd = get_unused_fd_flags(0);
1674		if (unlikely(info.fd < 0))
1675			return info.fd;
1676
1677		file = kcm_clone(sock);
1678		if (IS_ERR(file)) {
1679			put_unused_fd(info.fd);
1680			return PTR_ERR(file);
1681		}
1682		if (copy_to_user((void __user *)arg, &info,
1683				 sizeof(info))) {
1684			put_unused_fd(info.fd);
1685			fput(file);
1686			return -EFAULT;
1687		}
1688		fd_install(info.fd, file);
1689		err = 0;
1690		break;
1691	}
1692	default:
1693		err = -ENOIOCTLCMD;
1694		break;
1695	}
1696
1697	return err;
1698}
1699
1700static void free_mux(struct rcu_head *rcu)
1701{
1702	struct kcm_mux *mux = container_of(rcu,
1703	    struct kcm_mux, rcu);
1704
1705	kmem_cache_free(kcm_muxp, mux);
1706}
1707
1708static void release_mux(struct kcm_mux *mux)
1709{
1710	struct kcm_net *knet = mux->knet;
1711	struct kcm_psock *psock, *tmp_psock;
1712
1713	/* Release psocks */
1714	list_for_each_entry_safe(psock, tmp_psock,
1715				 &mux->psocks, psock_list) {
1716		if (!WARN_ON(psock->unattaching))
1717			kcm_unattach(psock);
1718	}
1719
1720	if (WARN_ON(mux->psocks_cnt))
1721		return;
1722
1723	__skb_queue_purge(&mux->rx_hold_queue);
1724
1725	mutex_lock(&knet->mutex);
1726	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1727	aggregate_psock_stats(&mux->aggregate_psock_stats,
1728			      &knet->aggregate_psock_stats);
1729	aggregate_strp_stats(&mux->aggregate_strp_stats,
1730			     &knet->aggregate_strp_stats);
1731	list_del_rcu(&mux->kcm_mux_list);
1732	knet->count--;
1733	mutex_unlock(&knet->mutex);
1734
1735	call_rcu(&mux->rcu, free_mux);
1736}
1737
1738static void kcm_done(struct kcm_sock *kcm)
1739{
1740	struct kcm_mux *mux = kcm->mux;
1741	struct sock *sk = &kcm->sk;
1742	int socks_cnt;
1743
1744	spin_lock_bh(&mux->rx_lock);
1745	if (kcm->rx_psock) {
1746		/* Cleanup in unreserve_rx_kcm */
1747		WARN_ON(kcm->done);
1748		kcm->rx_disabled = 1;
1749		kcm->done = 1;
1750		spin_unlock_bh(&mux->rx_lock);
1751		return;
1752	}
1753
1754	if (kcm->rx_wait) {
1755		list_del(&kcm->wait_rx_list);
1756		/* paired with lockless reads in kcm_rfree() */
1757		WRITE_ONCE(kcm->rx_wait, false);
1758	}
1759	/* Move any pending receive messages to other kcm sockets */
1760	requeue_rx_msgs(mux, &sk->sk_receive_queue);
1761
1762	spin_unlock_bh(&mux->rx_lock);
1763
1764	if (WARN_ON(sk_rmem_alloc_get(sk)))
1765		return;
1766
1767	/* Detach from MUX */
1768	spin_lock_bh(&mux->lock);
1769
1770	list_del(&kcm->kcm_sock_list);
1771	mux->kcm_socks_cnt--;
1772	socks_cnt = mux->kcm_socks_cnt;
1773
1774	spin_unlock_bh(&mux->lock);
1775
1776	if (!socks_cnt) {
1777		/* We are done with the mux now. */
1778		release_mux(mux);
1779	}
1780
1781	WARN_ON(kcm->rx_wait);
1782
1783	sock_put(&kcm->sk);
1784}
1785
1786/* Called by kcm_release to close a KCM socket.
1787 * If this is the last KCM socket on the MUX, destroy the MUX.
1788 */
1789static int kcm_release(struct socket *sock)
1790{
1791	struct sock *sk = sock->sk;
1792	struct kcm_sock *kcm;
1793	struct kcm_mux *mux;
1794	struct kcm_psock *psock;
1795
1796	if (!sk)
1797		return 0;
1798
1799	kcm = kcm_sk(sk);
1800	mux = kcm->mux;
1801
1802	lock_sock(sk);
1803	sock_orphan(sk);
1804	kfree_skb(kcm->seq_skb);
1805
1806	/* Purge queue under lock to avoid race condition with tx_work trying
1807	 * to act when queue is nonempty. If tx_work runs after this point
1808	 * it will just return.
1809	 */
1810	__skb_queue_purge(&sk->sk_write_queue);
1811
1812	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
1813	 * get a writespace callback. This prevents further work being queued
1814	 * from the callback (unbinding the psock occurs after canceling work.
1815	 */
1816	kcm->tx_stopped = 1;
1817
1818	release_sock(sk);
1819
1820	spin_lock_bh(&mux->lock);
1821	if (kcm->tx_wait) {
1822		/* Take of tx_wait list, after this point there should be no way
1823		 * that a psock will be assigned to this kcm.
1824		 */
1825		list_del(&kcm->wait_psock_list);
1826		kcm->tx_wait = false;
1827	}
1828	spin_unlock_bh(&mux->lock);
1829
1830	/* Cancel work. After this point there should be no outside references
1831	 * to the kcm socket.
1832	 */
1833	cancel_work_sync(&kcm->tx_work);
1834
1835	lock_sock(sk);
1836	psock = kcm->tx_psock;
1837	if (psock) {
1838		/* A psock was reserved, so we need to kill it since it
1839		 * may already have some bytes queued from a message. We
1840		 * need to do this after removing kcm from tx_wait list.
1841		 */
1842		kcm_abort_tx_psock(psock, EPIPE, false);
1843		unreserve_psock(kcm);
1844	}
1845	release_sock(sk);
1846
1847	WARN_ON(kcm->tx_wait);
1848	WARN_ON(kcm->tx_psock);
1849
1850	sock->sk = NULL;
1851
1852	kcm_done(kcm);
1853
1854	return 0;
1855}
1856
1857static const struct proto_ops kcm_dgram_ops = {
1858	.family =	PF_KCM,
1859	.owner =	THIS_MODULE,
1860	.release =	kcm_release,
1861	.bind =		sock_no_bind,
1862	.connect =	sock_no_connect,
1863	.socketpair =	sock_no_socketpair,
1864	.accept =	sock_no_accept,
1865	.getname =	sock_no_getname,
1866	.poll =		datagram_poll,
1867	.ioctl =	kcm_ioctl,
1868	.listen =	sock_no_listen,
1869	.shutdown =	sock_no_shutdown,
1870	.setsockopt =	kcm_setsockopt,
1871	.getsockopt =	kcm_getsockopt,
1872	.sendmsg =	kcm_sendmsg,
1873	.recvmsg =	kcm_recvmsg,
1874	.mmap =		sock_no_mmap,
1875	.sendpage =	kcm_sendpage,
1876};
1877
1878static const struct proto_ops kcm_seqpacket_ops = {
1879	.family =	PF_KCM,
1880	.owner =	THIS_MODULE,
1881	.release =	kcm_release,
1882	.bind =		sock_no_bind,
1883	.connect =	sock_no_connect,
1884	.socketpair =	sock_no_socketpair,
1885	.accept =	sock_no_accept,
1886	.getname =	sock_no_getname,
1887	.poll =		datagram_poll,
1888	.ioctl =	kcm_ioctl,
1889	.listen =	sock_no_listen,
1890	.shutdown =	sock_no_shutdown,
1891	.setsockopt =	kcm_setsockopt,
1892	.getsockopt =	kcm_getsockopt,
1893	.sendmsg =	kcm_sendmsg,
1894	.recvmsg =	kcm_recvmsg,
1895	.mmap =		sock_no_mmap,
1896	.sendpage =	kcm_sendpage,
1897	.splice_read =	kcm_splice_read,
1898};
1899
1900/* Create proto operation for kcm sockets */
1901static int kcm_create(struct net *net, struct socket *sock,
1902		      int protocol, int kern)
1903{
1904	struct kcm_net *knet = net_generic(net, kcm_net_id);
1905	struct sock *sk;
1906	struct kcm_mux *mux;
1907
1908	switch (sock->type) {
1909	case SOCK_DGRAM:
1910		sock->ops = &kcm_dgram_ops;
1911		break;
1912	case SOCK_SEQPACKET:
1913		sock->ops = &kcm_seqpacket_ops;
1914		break;
1915	default:
1916		return -ESOCKTNOSUPPORT;
1917	}
1918
1919	if (protocol != KCMPROTO_CONNECTED)
1920		return -EPROTONOSUPPORT;
1921
1922	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1923	if (!sk)
1924		return -ENOMEM;
1925
1926	/* Allocate a kcm mux, shared between KCM sockets */
1927	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1928	if (!mux) {
1929		sk_free(sk);
1930		return -ENOMEM;
1931	}
1932
1933	spin_lock_init(&mux->lock);
1934	spin_lock_init(&mux->rx_lock);
1935	INIT_LIST_HEAD(&mux->kcm_socks);
1936	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1937	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1938
1939	INIT_LIST_HEAD(&mux->psocks);
1940	INIT_LIST_HEAD(&mux->psocks_ready);
1941	INIT_LIST_HEAD(&mux->psocks_avail);
1942
1943	mux->knet = knet;
1944
1945	/* Add new MUX to list */
1946	mutex_lock(&knet->mutex);
1947	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1948	knet->count++;
1949	mutex_unlock(&knet->mutex);
1950
1951	skb_queue_head_init(&mux->rx_hold_queue);
1952
1953	/* Init KCM socket */
1954	sock_init_data(sock, sk);
1955	init_kcm_sock(kcm_sk(sk), mux);
1956
1957	return 0;
1958}
1959
1960static const struct net_proto_family kcm_family_ops = {
1961	.family = PF_KCM,
1962	.create = kcm_create,
1963	.owner  = THIS_MODULE,
1964};
1965
1966static __net_init int kcm_init_net(struct net *net)
1967{
1968	struct kcm_net *knet = net_generic(net, kcm_net_id);
1969
1970	INIT_LIST_HEAD_RCU(&knet->mux_list);
1971	mutex_init(&knet->mutex);
1972
1973	return 0;
1974}
1975
1976static __net_exit void kcm_exit_net(struct net *net)
1977{
1978	struct kcm_net *knet = net_generic(net, kcm_net_id);
1979
1980	/* All KCM sockets should be closed at this point, which should mean
1981	 * that all multiplexors and psocks have been destroyed.
1982	 */
1983	WARN_ON(!list_empty(&knet->mux_list));
 
 
1984}
1985
1986static struct pernet_operations kcm_net_ops = {
1987	.init = kcm_init_net,
1988	.exit = kcm_exit_net,
1989	.id   = &kcm_net_id,
1990	.size = sizeof(struct kcm_net),
1991};
1992
1993static int __init kcm_init(void)
1994{
1995	int err = -ENOMEM;
1996
1997	kcm_muxp = kmem_cache_create("kcm_mux_cache",
1998				     sizeof(struct kcm_mux), 0,
1999				     SLAB_HWCACHE_ALIGN, NULL);
2000	if (!kcm_muxp)
2001		goto fail;
2002
2003	kcm_psockp = kmem_cache_create("kcm_psock_cache",
2004				       sizeof(struct kcm_psock), 0,
2005					SLAB_HWCACHE_ALIGN, NULL);
2006	if (!kcm_psockp)
2007		goto fail;
2008
2009	kcm_wq = create_singlethread_workqueue("kkcmd");
2010	if (!kcm_wq)
2011		goto fail;
2012
2013	err = proto_register(&kcm_proto, 1);
2014	if (err)
2015		goto fail;
2016
2017	err = register_pernet_device(&kcm_net_ops);
2018	if (err)
2019		goto net_ops_fail;
2020
2021	err = sock_register(&kcm_family_ops);
2022	if (err)
2023		goto sock_register_fail;
2024
2025	err = kcm_proc_init();
2026	if (err)
2027		goto proc_init_fail;
2028
2029	return 0;
2030
2031proc_init_fail:
2032	sock_unregister(PF_KCM);
2033
2034sock_register_fail:
2035	unregister_pernet_device(&kcm_net_ops);
2036
2037net_ops_fail:
2038	proto_unregister(&kcm_proto);
2039
2040fail:
2041	kmem_cache_destroy(kcm_muxp);
2042	kmem_cache_destroy(kcm_psockp);
2043
2044	if (kcm_wq)
2045		destroy_workqueue(kcm_wq);
2046
2047	return err;
2048}
2049
2050static void __exit kcm_exit(void)
2051{
2052	kcm_proc_exit();
2053	sock_unregister(PF_KCM);
2054	unregister_pernet_device(&kcm_net_ops);
2055	proto_unregister(&kcm_proto);
2056	destroy_workqueue(kcm_wq);
2057
2058	kmem_cache_destroy(kcm_muxp);
2059	kmem_cache_destroy(kcm_psockp);
2060}
2061
2062module_init(kcm_init);
2063module_exit(kcm_exit);
2064
2065MODULE_LICENSE("GPL");
 
2066MODULE_ALIAS_NETPROTO(PF_KCM);