Linux Audio

Check our new training course

Loading...
v4.6
 
 
 
 
 
 
 
   1#include <linux/bpf.h>
   2#include <linux/errno.h>
   3#include <linux/errqueue.h>
   4#include <linux/file.h>
 
   5#include <linux/in.h>
   6#include <linux/kernel.h>
   7#include <linux/module.h>
   8#include <linux/net.h>
   9#include <linux/netdevice.h>
  10#include <linux/poll.h>
  11#include <linux/rculist.h>
  12#include <linux/skbuff.h>
  13#include <linux/socket.h>
  14#include <linux/uaccess.h>
  15#include <linux/workqueue.h>
 
 
 
  16#include <net/kcm.h>
  17#include <net/netns/generic.h>
  18#include <net/sock.h>
  19#include <net/tcp.h>
  20#include <uapi/linux/kcm.h>
 
  21
  22unsigned int kcm_net_id;
  23
  24static struct kmem_cache *kcm_psockp __read_mostly;
  25static struct kmem_cache *kcm_muxp __read_mostly;
  26static struct workqueue_struct *kcm_wq;
  27
  28static inline struct kcm_sock *kcm_sk(const struct sock *sk)
  29{
  30	return (struct kcm_sock *)sk;
  31}
  32
  33static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
  34{
  35	return (struct kcm_tx_msg *)skb->cb;
  36}
  37
  38static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
  39{
  40	return (struct kcm_rx_msg *)((void *)skb->cb +
  41				     offsetof(struct qdisc_skb_cb, data));
  42}
  43
  44static void report_csk_error(struct sock *csk, int err)
  45{
  46	csk->sk_err = EPIPE;
  47	csk->sk_error_report(csk);
  48}
  49
  50/* Callback lock held */
  51static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
  52			       struct sk_buff *skb)
  53{
  54	struct sock *csk = psock->sk;
  55
  56	/* Unrecoverable error in receive */
  57
  58	del_timer(&psock->rx_msg_timer);
  59
  60	if (psock->rx_stopped)
  61		return;
  62
  63	psock->rx_stopped = 1;
  64	KCM_STATS_INCR(psock->stats.rx_aborts);
  65
  66	/* Report an error on the lower socket */
  67	report_csk_error(csk, err);
  68}
  69
  70static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
  71			       bool wakeup_kcm)
  72{
  73	struct sock *csk = psock->sk;
  74	struct kcm_mux *mux = psock->mux;
  75
  76	/* Unrecoverable error in transmit */
  77
  78	spin_lock_bh(&mux->lock);
  79
  80	if (psock->tx_stopped) {
  81		spin_unlock_bh(&mux->lock);
  82		return;
  83	}
  84
  85	psock->tx_stopped = 1;
  86	KCM_STATS_INCR(psock->stats.tx_aborts);
  87
  88	if (!psock->tx_kcm) {
  89		/* Take off psocks_avail list */
  90		list_del(&psock->psock_avail_list);
  91	} else if (wakeup_kcm) {
  92		/* In this case psock is being aborted while outside of
  93		 * write_msgs and psock is reserved. Schedule tx_work
  94		 * to handle the failure there. Need to commit tx_stopped
  95		 * before queuing work.
  96		 */
  97		smp_mb();
  98
  99		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
 100	}
 101
 102	spin_unlock_bh(&mux->lock);
 103
 104	/* Report error on lower socket */
 105	report_csk_error(csk, err);
 106}
 107
 108/* RX mux lock held. */
 109static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
 110				    struct kcm_psock *psock)
 111{
 112	KCM_STATS_ADD(mux->stats.rx_bytes,
 113		      psock->stats.rx_bytes - psock->saved_rx_bytes);
 
 114	mux->stats.rx_msgs +=
 115		psock->stats.rx_msgs - psock->saved_rx_msgs;
 116	psock->saved_rx_msgs = psock->stats.rx_msgs;
 117	psock->saved_rx_bytes = psock->stats.rx_bytes;
 118}
 119
 120static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
 121				    struct kcm_psock *psock)
 122{
 123	KCM_STATS_ADD(mux->stats.tx_bytes,
 124		      psock->stats.tx_bytes - psock->saved_tx_bytes);
 125	mux->stats.tx_msgs +=
 126		psock->stats.tx_msgs - psock->saved_tx_msgs;
 127	psock->saved_tx_msgs = psock->stats.tx_msgs;
 128	psock->saved_tx_bytes = psock->stats.tx_bytes;
 129}
 130
 131static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 132
 133/* KCM is ready to receive messages on its queue-- either the KCM is new or
 134 * has become unblocked after being blocked on full socket buffer. Queue any
 135 * pending ready messages on a psock. RX mux lock held.
 136 */
 137static void kcm_rcv_ready(struct kcm_sock *kcm)
 138{
 139	struct kcm_mux *mux = kcm->mux;
 140	struct kcm_psock *psock;
 141	struct sk_buff *skb;
 142
 143	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
 144		return;
 145
 146	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
 147		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 148			/* Assuming buffer limit has been reached */
 149			skb_queue_head(&mux->rx_hold_queue, skb);
 150			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 151			return;
 152		}
 153	}
 154
 155	while (!list_empty(&mux->psocks_ready)) {
 156		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
 157					 psock_ready_list);
 158
 159		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
 160			/* Assuming buffer limit has been reached */
 161			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 162			return;
 163		}
 164
 165		/* Consumed the ready message on the psock. Schedule rx_work to
 166		 * get more messages.
 167		 */
 168		list_del(&psock->psock_ready_list);
 169		psock->ready_rx_msg = NULL;
 170
 171		/* Commit clearing of ready_rx_msg for queuing work */
 172		smp_mb();
 173
 174		queue_work(kcm_wq, &psock->rx_work);
 
 175	}
 176
 177	/* Buffer limit is okay now, add to ready list */
 178	list_add_tail(&kcm->wait_rx_list,
 179		      &kcm->mux->kcm_rx_waiters);
 180	kcm->rx_wait = true;
 
 181}
 182
 183static void kcm_rfree(struct sk_buff *skb)
 184{
 185	struct sock *sk = skb->sk;
 186	struct kcm_sock *kcm = kcm_sk(sk);
 187	struct kcm_mux *mux = kcm->mux;
 188	unsigned int len = skb->truesize;
 189
 190	sk_mem_uncharge(sk, len);
 191	atomic_sub(len, &sk->sk_rmem_alloc);
 192
 193	/* For reading rx_wait and rx_psock without holding lock */
 194	smp_mb__after_atomic();
 195
 196	if (!kcm->rx_wait && !kcm->rx_psock &&
 197	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
 198		spin_lock_bh(&mux->rx_lock);
 199		kcm_rcv_ready(kcm);
 200		spin_unlock_bh(&mux->rx_lock);
 201	}
 202}
 203
 204static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 205{
 206	struct sk_buff_head *list = &sk->sk_receive_queue;
 207
 208	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 209		return -ENOMEM;
 210
 211	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 212		return -ENOBUFS;
 213
 214	skb->dev = NULL;
 215
 216	skb_orphan(skb);
 217	skb->sk = sk;
 218	skb->destructor = kcm_rfree;
 219	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 220	sk_mem_charge(sk, skb->truesize);
 221
 222	skb_queue_tail(list, skb);
 223
 224	if (!sock_flag(sk, SOCK_DEAD))
 225		sk->sk_data_ready(sk);
 226
 227	return 0;
 228}
 229
 230/* Requeue received messages for a kcm socket to other kcm sockets. This is
 231 * called with a kcm socket is receive disabled.
 232 * RX mux lock held.
 233 */
 234static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
 235{
 236	struct sk_buff *skb;
 237	struct kcm_sock *kcm;
 238
 239	while ((skb = __skb_dequeue(head))) {
 240		/* Reset destructor to avoid calling kcm_rcv_ready */
 241		skb->destructor = sock_rfree;
 242		skb_orphan(skb);
 243try_again:
 244		if (list_empty(&mux->kcm_rx_waiters)) {
 245			skb_queue_tail(&mux->rx_hold_queue, skb);
 246			continue;
 247		}
 248
 249		kcm = list_first_entry(&mux->kcm_rx_waiters,
 250				       struct kcm_sock, wait_rx_list);
 251
 252		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 253			/* Should mean socket buffer full */
 254			list_del(&kcm->wait_rx_list);
 255			kcm->rx_wait = false;
 
 256
 257			/* Commit rx_wait to read in kcm_free */
 258			smp_wmb();
 259
 260			goto try_again;
 261		}
 262	}
 263}
 264
 265/* Lower sock lock held */
 266static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
 267				       struct sk_buff *head)
 268{
 269	struct kcm_mux *mux = psock->mux;
 270	struct kcm_sock *kcm;
 271
 272	WARN_ON(psock->ready_rx_msg);
 273
 274	if (psock->rx_kcm)
 275		return psock->rx_kcm;
 276
 277	spin_lock_bh(&mux->rx_lock);
 278
 279	if (psock->rx_kcm) {
 280		spin_unlock_bh(&mux->rx_lock);
 281		return psock->rx_kcm;
 282	}
 283
 284	kcm_update_rx_mux_stats(mux, psock);
 285
 286	if (list_empty(&mux->kcm_rx_waiters)) {
 287		psock->ready_rx_msg = head;
 
 288		list_add_tail(&psock->psock_ready_list,
 289			      &mux->psocks_ready);
 290		spin_unlock_bh(&mux->rx_lock);
 291		return NULL;
 292	}
 293
 294	kcm = list_first_entry(&mux->kcm_rx_waiters,
 295			       struct kcm_sock, wait_rx_list);
 296	list_del(&kcm->wait_rx_list);
 297	kcm->rx_wait = false;
 
 298
 299	psock->rx_kcm = kcm;
 300	kcm->rx_psock = psock;
 
 301
 302	spin_unlock_bh(&mux->rx_lock);
 303
 304	return kcm;
 305}
 306
 307static void kcm_done(struct kcm_sock *kcm);
 308
 309static void kcm_done_work(struct work_struct *w)
 310{
 311	kcm_done(container_of(w, struct kcm_sock, done_work));
 312}
 313
 314/* Lower sock held */
 315static void unreserve_rx_kcm(struct kcm_psock *psock,
 316			     bool rcv_ready)
 317{
 318	struct kcm_sock *kcm = psock->rx_kcm;
 319	struct kcm_mux *mux = psock->mux;
 320
 321	if (!kcm)
 322		return;
 323
 324	spin_lock_bh(&mux->rx_lock);
 325
 326	psock->rx_kcm = NULL;
 327	kcm->rx_psock = NULL;
 
 328
 329	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
 330	 * kcm_rfree
 331	 */
 332	smp_mb();
 333
 334	if (unlikely(kcm->done)) {
 335		spin_unlock_bh(&mux->rx_lock);
 336
 337		/* Need to run kcm_done in a task since we need to qcquire
 338		 * callback locks which may already be held here.
 339		 */
 340		INIT_WORK(&kcm->done_work, kcm_done_work);
 341		schedule_work(&kcm->done_work);
 342		return;
 343	}
 344
 345	if (unlikely(kcm->rx_disabled)) {
 346		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
 347	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
 348		/* Check for degenerative race with rx_wait that all
 349		 * data was dequeued (accounted for in kcm_rfree).
 350		 */
 351		kcm_rcv_ready(kcm);
 352	}
 353	spin_unlock_bh(&mux->rx_lock);
 354}
 355
 356static void kcm_start_rx_timer(struct kcm_psock *psock)
 357{
 358	if (psock->sk->sk_rcvtimeo)
 359		mod_timer(&psock->rx_msg_timer, psock->sk->sk_rcvtimeo);
 360}
 361
 362/* Macro to invoke filter function. */
 363#define KCM_RUN_FILTER(prog, ctx) \
 364	(*prog->bpf_func)(ctx, prog->insnsi)
 365
 366/* Lower socket lock held */
 367static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
 368			unsigned int orig_offset, size_t orig_len)
 369{
 370	struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
 371	struct kcm_rx_msg *rxm;
 372	struct kcm_sock *kcm;
 373	struct sk_buff *head, *skb;
 374	size_t eaten = 0, cand_len;
 375	ssize_t extra;
 376	int err;
 377	bool cloned_orig = false;
 378
 379	if (psock->ready_rx_msg)
 380		return 0;
 381
 382	head = psock->rx_skb_head;
 383	if (head) {
 384		/* Message already in progress */
 385
 386		rxm = kcm_rx_msg(head);
 387		if (unlikely(rxm->early_eaten)) {
 388			/* Already some number of bytes on the receive sock
 389			 * data saved in rx_skb_head, just indicate they
 390			 * are consumed.
 391			 */
 392			eaten = orig_len <= rxm->early_eaten ?
 393				orig_len : rxm->early_eaten;
 394			rxm->early_eaten -= eaten;
 395
 396			return eaten;
 397		}
 398
 399		if (unlikely(orig_offset)) {
 400			/* Getting data with a non-zero offset when a message is
 401			 * in progress is not expected. If it does happen, we
 402			 * need to clone and pull since we can't deal with
 403			 * offsets in the skbs for a message expect in the head.
 404			 */
 405			orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
 406			if (!orig_skb) {
 407				KCM_STATS_INCR(psock->stats.rx_mem_fail);
 408				desc->error = -ENOMEM;
 409				return 0;
 410			}
 411			if (!pskb_pull(orig_skb, orig_offset)) {
 412				KCM_STATS_INCR(psock->stats.rx_mem_fail);
 413				kfree_skb(orig_skb);
 414				desc->error = -ENOMEM;
 415				return 0;
 416			}
 417			cloned_orig = true;
 418			orig_offset = 0;
 419		}
 420
 421		if (!psock->rx_skb_nextp) {
 422			/* We are going to append to the frags_list of head.
 423			 * Need to unshare the frag_list.
 424			 */
 425			err = skb_unclone(head, GFP_ATOMIC);
 426			if (err) {
 427				KCM_STATS_INCR(psock->stats.rx_mem_fail);
 428				desc->error = err;
 429				return 0;
 430			}
 431
 432			if (unlikely(skb_shinfo(head)->frag_list)) {
 433				/* We can't append to an sk_buff that already
 434				 * has a frag_list. We create a new head, point
 435				 * the frag_list of that to the old head, and
 436				 * then are able to use the old head->next for
 437				 * appending to the message.
 438				 */
 439				if (WARN_ON(head->next)) {
 440					desc->error = -EINVAL;
 441					return 0;
 442				}
 443
 444				skb = alloc_skb(0, GFP_ATOMIC);
 445				if (!skb) {
 446					KCM_STATS_INCR(psock->stats.rx_mem_fail);
 447					desc->error = -ENOMEM;
 448					return 0;
 449				}
 450				skb->len = head->len;
 451				skb->data_len = head->len;
 452				skb->truesize = head->truesize;
 453				*kcm_rx_msg(skb) = *kcm_rx_msg(head);
 454				psock->rx_skb_nextp = &head->next;
 455				skb_shinfo(skb)->frag_list = head;
 456				psock->rx_skb_head = skb;
 457				head = skb;
 458			} else {
 459				psock->rx_skb_nextp =
 460				    &skb_shinfo(head)->frag_list;
 461			}
 462		}
 463	}
 464
 465	while (eaten < orig_len) {
 466		/* Always clone since we will consume something */
 467		skb = skb_clone(orig_skb, GFP_ATOMIC);
 468		if (!skb) {
 469			KCM_STATS_INCR(psock->stats.rx_mem_fail);
 470			desc->error = -ENOMEM;
 471			break;
 472		}
 473
 474		cand_len = orig_len - eaten;
 475
 476		head = psock->rx_skb_head;
 477		if (!head) {
 478			head = skb;
 479			psock->rx_skb_head = head;
 480			/* Will set rx_skb_nextp on next packet if needed */
 481			psock->rx_skb_nextp = NULL;
 482			rxm = kcm_rx_msg(head);
 483			memset(rxm, 0, sizeof(*rxm));
 484			rxm->offset = orig_offset + eaten;
 485		} else {
 486			/* Unclone since we may be appending to an skb that we
 487			 * already share a frag_list with.
 488			 */
 489			err = skb_unclone(skb, GFP_ATOMIC);
 490			if (err) {
 491				KCM_STATS_INCR(psock->stats.rx_mem_fail);
 492				desc->error = err;
 493				break;
 494			}
 495
 496			rxm = kcm_rx_msg(head);
 497			*psock->rx_skb_nextp = skb;
 498			psock->rx_skb_nextp = &skb->next;
 499			head->data_len += skb->len;
 500			head->len += skb->len;
 501			head->truesize += skb->truesize;
 502		}
 503
 504		if (!rxm->full_len) {
 505			ssize_t len;
 506
 507			len = KCM_RUN_FILTER(psock->bpf_prog, head);
 508
 509			if (!len) {
 510				/* Need more header to determine length */
 511				if (!rxm->accum_len) {
 512					/* Start RX timer for new message */
 513					kcm_start_rx_timer(psock);
 514				}
 515				rxm->accum_len += cand_len;
 516				eaten += cand_len;
 517				KCM_STATS_INCR(psock->stats.rx_need_more_hdr);
 518				WARN_ON(eaten != orig_len);
 519				break;
 520			} else if (len > psock->sk->sk_rcvbuf) {
 521				/* Message length exceeds maximum allowed */
 522				KCM_STATS_INCR(psock->stats.rx_msg_too_big);
 523				desc->error = -EMSGSIZE;
 524				psock->rx_skb_head = NULL;
 525				kcm_abort_rx_psock(psock, EMSGSIZE, head);
 526				break;
 527			} else if (len <= (ssize_t)head->len -
 528					  skb->len - rxm->offset) {
 529				/* Length must be into new skb (and also
 530				 * greater than zero)
 531				 */
 532				KCM_STATS_INCR(psock->stats.rx_bad_hdr_len);
 533				desc->error = -EPROTO;
 534				psock->rx_skb_head = NULL;
 535				kcm_abort_rx_psock(psock, EPROTO, head);
 536				break;
 537			}
 538
 539			rxm->full_len = len;
 540		}
 541
 542		extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
 543
 544		if (extra < 0) {
 545			/* Message not complete yet. */
 546			if (rxm->full_len - rxm->accum_len >
 547			    tcp_inq(psock->sk)) {
 548				/* Don't have the whole messages in the socket
 549				 * buffer. Set psock->rx_need_bytes to wait for
 550				 * the rest of the message. Also, set "early
 551				 * eaten" since we've already buffered the skb
 552				 * but don't consume yet per tcp_read_sock.
 553				 */
 554
 555				if (!rxm->accum_len) {
 556					/* Start RX timer for new message */
 557					kcm_start_rx_timer(psock);
 558				}
 559
 560				psock->rx_need_bytes = rxm->full_len -
 561						       rxm->accum_len;
 562				rxm->accum_len += cand_len;
 563				rxm->early_eaten = cand_len;
 564				KCM_STATS_ADD(psock->stats.rx_bytes, cand_len);
 565				desc->count = 0; /* Stop reading socket */
 566				break;
 567			}
 568			rxm->accum_len += cand_len;
 569			eaten += cand_len;
 570			WARN_ON(eaten != orig_len);
 571			break;
 572		}
 573
 574		/* Positive extra indicates ore bytes than needed for the
 575		 * message
 576		 */
 577
 578		WARN_ON(extra > cand_len);
 579
 580		eaten += (cand_len - extra);
 581
 582		/* Hurray, we have a new message! */
 583		del_timer(&psock->rx_msg_timer);
 584		psock->rx_skb_head = NULL;
 585		KCM_STATS_INCR(psock->stats.rx_msgs);
 586
 587try_queue:
 588		kcm = reserve_rx_kcm(psock, head);
 589		if (!kcm) {
 590			/* Unable to reserve a KCM, message is held in psock. */
 591			break;
 592		}
 593
 594		if (kcm_queue_rcv_skb(&kcm->sk, head)) {
 595			/* Should mean socket buffer full */
 596			unreserve_rx_kcm(psock, false);
 597			goto try_queue;
 598		}
 599	}
 600
 601	if (cloned_orig)
 602		kfree_skb(orig_skb);
 603
 604	KCM_STATS_ADD(psock->stats.rx_bytes, eaten);
 605
 606	return eaten;
 607}
 608
 609/* Called with lock held on lower socket */
 610static int psock_tcp_read_sock(struct kcm_psock *psock)
 611{
 612	read_descriptor_t desc;
 613
 614	desc.arg.data = psock;
 615	desc.error = 0;
 616	desc.count = 1; /* give more than one skb per call */
 617
 618	/* sk should be locked here, so okay to do tcp_read_sock */
 619	tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
 620
 621	unreserve_rx_kcm(psock, true);
 622
 623	return desc.error;
 624}
 625
 626/* Lower sock lock held */
 627static void psock_tcp_data_ready(struct sock *sk)
 628{
 629	struct kcm_psock *psock;
 630
 
 
 631	read_lock_bh(&sk->sk_callback_lock);
 632
 633	psock = (struct kcm_psock *)sk->sk_user_data;
 634	if (unlikely(!psock || psock->rx_stopped))
 635		goto out;
 636
 637	if (psock->ready_rx_msg)
 638		goto out;
 639
 640	if (psock->rx_need_bytes) {
 641		if (tcp_inq(sk) >= psock->rx_need_bytes)
 642			psock->rx_need_bytes = 0;
 643		else
 644			goto out;
 645	}
 646
 647	if (psock_tcp_read_sock(psock) == -ENOMEM)
 648		queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
 649
 650out:
 651	read_unlock_bh(&sk->sk_callback_lock);
 652}
 653
 654static void do_psock_rx_work(struct kcm_psock *psock)
 
 655{
 656	read_descriptor_t rd_desc;
 657	struct sock *csk = psock->sk;
 658
 659	/* We need the read lock to synchronize with psock_tcp_data_ready. We
 660	 * need the socket lock for calling tcp_read_sock.
 661	 */
 662	lock_sock(csk);
 663	read_lock_bh(&csk->sk_callback_lock);
 664
 665	if (unlikely(csk->sk_user_data != psock))
 666		goto out;
 667
 668	if (unlikely(psock->rx_stopped))
 669		goto out;
 670
 671	if (psock->ready_rx_msg)
 672		goto out;
 673
 674	rd_desc.arg.data = psock;
 675
 676	if (psock_tcp_read_sock(psock) == -ENOMEM)
 677		queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
 
 
 
 
 
 
 678
 679out:
 680	read_unlock_bh(&csk->sk_callback_lock);
 681	release_sock(csk);
 
 
 682}
 683
 684static void psock_rx_work(struct work_struct *w)
 685{
 686	do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
 
 
 
 
 
 687}
 688
 689static void psock_rx_delayed_work(struct work_struct *w)
 690{
 691	do_psock_rx_work(container_of(w, struct kcm_psock,
 692				      rx_delayed_work.work));
 
 
 
 693}
 694
 695static void psock_tcp_state_change(struct sock *sk)
 696{
 697	/* TCP only does a POLLIN for a half close. Do a POLLHUP here
 698	 * since application will normally not poll with POLLIN
 699	 * on the TCP sockets.
 700	 */
 701
 702	report_csk_error(sk, EPIPE);
 703}
 704
 705static void psock_tcp_write_space(struct sock *sk)
 706{
 707	struct kcm_psock *psock;
 708	struct kcm_mux *mux;
 709	struct kcm_sock *kcm;
 710
 711	read_lock_bh(&sk->sk_callback_lock);
 712
 713	psock = (struct kcm_psock *)sk->sk_user_data;
 714	if (unlikely(!psock))
 715		goto out;
 716
 717	mux = psock->mux;
 718
 719	spin_lock_bh(&mux->lock);
 720
 721	/* Check if the socket is reserved so someone is waiting for sending. */
 722	kcm = psock->tx_kcm;
 723	if (kcm)
 724		queue_work(kcm_wq, &kcm->tx_work);
 725
 726	spin_unlock_bh(&mux->lock);
 727out:
 728	read_unlock_bh(&sk->sk_callback_lock);
 729}
 730
 731static void unreserve_psock(struct kcm_sock *kcm);
 732
 733/* kcm sock is locked. */
 734static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
 735{
 736	struct kcm_mux *mux = kcm->mux;
 737	struct kcm_psock *psock;
 738
 739	psock = kcm->tx_psock;
 740
 741	smp_rmb(); /* Must read tx_psock before tx_wait */
 742
 743	if (psock) {
 744		WARN_ON(kcm->tx_wait);
 745		if (unlikely(psock->tx_stopped))
 746			unreserve_psock(kcm);
 747		else
 748			return kcm->tx_psock;
 749	}
 750
 751	spin_lock_bh(&mux->lock);
 752
 753	/* Check again under lock to see if psock was reserved for this
 754	 * psock via psock_unreserve.
 755	 */
 756	psock = kcm->tx_psock;
 757	if (unlikely(psock)) {
 758		WARN_ON(kcm->tx_wait);
 759		spin_unlock_bh(&mux->lock);
 760		return kcm->tx_psock;
 761	}
 762
 763	if (!list_empty(&mux->psocks_avail)) {
 764		psock = list_first_entry(&mux->psocks_avail,
 765					 struct kcm_psock,
 766					 psock_avail_list);
 767		list_del(&psock->psock_avail_list);
 768		if (kcm->tx_wait) {
 769			list_del(&kcm->wait_psock_list);
 770			kcm->tx_wait = false;
 771		}
 772		kcm->tx_psock = psock;
 773		psock->tx_kcm = kcm;
 774		KCM_STATS_INCR(psock->stats.reserved);
 775	} else if (!kcm->tx_wait) {
 776		list_add_tail(&kcm->wait_psock_list,
 777			      &mux->kcm_tx_waiters);
 778		kcm->tx_wait = true;
 779	}
 780
 781	spin_unlock_bh(&mux->lock);
 782
 783	return psock;
 784}
 785
 786/* mux lock held */
 787static void psock_now_avail(struct kcm_psock *psock)
 788{
 789	struct kcm_mux *mux = psock->mux;
 790	struct kcm_sock *kcm;
 791
 792	if (list_empty(&mux->kcm_tx_waiters)) {
 793		list_add_tail(&psock->psock_avail_list,
 794			      &mux->psocks_avail);
 795	} else {
 796		kcm = list_first_entry(&mux->kcm_tx_waiters,
 797				       struct kcm_sock,
 798				       wait_psock_list);
 799		list_del(&kcm->wait_psock_list);
 800		kcm->tx_wait = false;
 801		psock->tx_kcm = kcm;
 802
 803		/* Commit before changing tx_psock since that is read in
 804		 * reserve_psock before queuing work.
 805		 */
 806		smp_mb();
 807
 808		kcm->tx_psock = psock;
 809		KCM_STATS_INCR(psock->stats.reserved);
 810		queue_work(kcm_wq, &kcm->tx_work);
 811	}
 812}
 813
 814/* kcm sock is locked. */
 815static void unreserve_psock(struct kcm_sock *kcm)
 816{
 817	struct kcm_psock *psock;
 818	struct kcm_mux *mux = kcm->mux;
 819
 820	spin_lock_bh(&mux->lock);
 821
 822	psock = kcm->tx_psock;
 823
 824	if (WARN_ON(!psock)) {
 825		spin_unlock_bh(&mux->lock);
 826		return;
 827	}
 828
 829	smp_rmb(); /* Read tx_psock before tx_wait */
 830
 831	kcm_update_tx_mux_stats(mux, psock);
 832
 833	WARN_ON(kcm->tx_wait);
 834
 835	kcm->tx_psock = NULL;
 836	psock->tx_kcm = NULL;
 837	KCM_STATS_INCR(psock->stats.unreserved);
 838
 839	if (unlikely(psock->tx_stopped)) {
 840		if (psock->done) {
 841			/* Deferred free */
 842			list_del(&psock->psock_list);
 843			mux->psocks_cnt--;
 844			sock_put(psock->sk);
 845			fput(psock->sk->sk_socket->file);
 846			kmem_cache_free(kcm_psockp, psock);
 847		}
 848
 849		/* Don't put back on available list */
 850
 851		spin_unlock_bh(&mux->lock);
 852
 853		return;
 854	}
 855
 856	psock_now_avail(psock);
 857
 858	spin_unlock_bh(&mux->lock);
 859}
 860
 861static void kcm_report_tx_retry(struct kcm_sock *kcm)
 862{
 863	struct kcm_mux *mux = kcm->mux;
 864
 865	spin_lock_bh(&mux->lock);
 866	KCM_STATS_INCR(mux->stats.tx_retries);
 867	spin_unlock_bh(&mux->lock);
 868}
 869
 870/* Write any messages ready on the kcm socket.  Called with kcm sock lock
 871 * held.  Return bytes actually sent or error.
 872 */
 873static int kcm_write_msgs(struct kcm_sock *kcm)
 874{
 
 875	struct sock *sk = &kcm->sk;
 876	struct kcm_psock *psock;
 877	struct sk_buff *skb, *head;
 878	struct kcm_tx_msg *txm;
 879	unsigned short fragidx, frag_offset;
 880	unsigned int sent, total_sent = 0;
 881	int ret = 0;
 882
 883	kcm->tx_wait_more = false;
 884	psock = kcm->tx_psock;
 885	if (unlikely(psock && psock->tx_stopped)) {
 886		/* A reserved psock was aborted asynchronously. Unreserve
 887		 * it and we'll retry the message.
 888		 */
 889		unreserve_psock(kcm);
 890		kcm_report_tx_retry(kcm);
 891		if (skb_queue_empty(&sk->sk_write_queue))
 892			return 0;
 893
 894		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
 895
 896	} else if (skb_queue_empty(&sk->sk_write_queue)) {
 897		return 0;
 898	}
 899
 900	head = skb_peek(&sk->sk_write_queue);
 901	txm = kcm_tx_msg(head);
 902
 903	if (txm->sent) {
 904		/* Send of first skbuff in queue already in progress */
 905		if (WARN_ON(!psock)) {
 906			ret = -EINVAL;
 907			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908		}
 909		sent = txm->sent;
 910		frag_offset = txm->frag_offset;
 911		fragidx = txm->fragidx;
 912		skb = txm->frag_skb;
 913
 914		goto do_frag;
 915	}
 916
 917try_again:
 918	psock = reserve_psock(kcm);
 919	if (!psock)
 920		goto out;
 921
 922	do {
 923		skb = head;
 924		txm = kcm_tx_msg(head);
 925		sent = 0;
 926
 927do_frag_list:
 928		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
 929			ret = -EINVAL;
 930			goto out;
 931		}
 932
 933		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
 934		     fragidx++) {
 935			skb_frag_t *frag;
 936
 937			frag_offset = 0;
 938do_frag:
 939			frag = &skb_shinfo(skb)->frags[fragidx];
 940			if (WARN_ON(!frag->size)) {
 941				ret = -EINVAL;
 942				goto out;
 943			}
 944
 945			ret = kernel_sendpage(psock->sk->sk_socket,
 946					      frag->page.p,
 947					      frag->page_offset + frag_offset,
 948					      frag->size - frag_offset,
 949					      MSG_DONTWAIT);
 950			if (ret <= 0) {
 951				if (ret == -EAGAIN) {
 952					/* Save state to try again when there's
 953					 * write space on the socket
 954					 */
 955					txm->sent = sent;
 956					txm->frag_offset = frag_offset;
 957					txm->fragidx = fragidx;
 958					txm->frag_skb = skb;
 959
 960					ret = 0;
 961					goto out;
 962				}
 963
 964				/* Hard failure in sending message, abort this
 965				 * psock since it has lost framing
 966				 * synchonization and retry sending the
 967				 * message from the beginning.
 968				 */
 969				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
 970						   true);
 971				unreserve_psock(kcm);
 
 972
 973				txm->sent = 0;
 974				kcm_report_tx_retry(kcm);
 975				ret = 0;
 976
 977				goto try_again;
 978			}
 979
 980			sent += ret;
 981			frag_offset += ret;
 982			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
 983			if (frag_offset < frag->size) {
 984				/* Not finished with this frag */
 985				goto do_frag;
 986			}
 987		}
 988
 989		if (skb == head) {
 990			if (skb_has_frag_list(skb)) {
 991				skb = skb_shinfo(skb)->frag_list;
 992				goto do_frag_list;
 
 993			}
 994		} else if (skb->next) {
 995			skb = skb->next;
 996			goto do_frag_list;
 
 997		}
 998
 999		/* Successfully sent the whole packet, account for it. */
 
 
1000		skb_dequeue(&sk->sk_write_queue);
1001		kfree_skb(head);
1002		sk->sk_wmem_queued -= sent;
1003		total_sent += sent;
1004		KCM_STATS_INCR(psock->stats.tx_msgs);
1005	} while ((head = skb_peek(&sk->sk_write_queue)));
1006out:
1007	if (!head) {
1008		/* Done with all queued messages. */
1009		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
1010		unreserve_psock(kcm);
 
1011	}
1012
1013	/* Check if write space is available */
1014	sk->sk_write_space(sk);
1015
1016	return total_sent ? : ret;
1017}
1018
1019static void kcm_tx_work(struct work_struct *w)
1020{
1021	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
1022	struct sock *sk = &kcm->sk;
1023	int err;
1024
1025	lock_sock(sk);
1026
1027	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
1028	 * aborts
1029	 */
1030	err = kcm_write_msgs(kcm);
1031	if (err < 0) {
1032		/* Hard failure in write, report error on KCM socket */
1033		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
1034		report_csk_error(&kcm->sk, -err);
1035		goto out;
1036	}
1037
1038	/* Primarily for SOCK_SEQPACKET sockets */
1039	if (likely(sk->sk_socket) &&
1040	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1041		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1042		sk->sk_write_space(sk);
1043	}
1044
1045out:
1046	release_sock(sk);
1047}
1048
1049static void kcm_push(struct kcm_sock *kcm)
1050{
1051	if (kcm->tx_wait_more)
1052		kcm_write_msgs(kcm);
1053}
1054
1055static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
1056			    int offset, size_t size, int flags)
1057
1058{
1059	struct sock *sk = sock->sk;
1060	struct kcm_sock *kcm = kcm_sk(sk);
1061	struct sk_buff *skb = NULL, *head = NULL;
1062	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1063	bool eor;
1064	int err = 0;
1065	int i;
1066
1067	if (flags & MSG_SENDPAGE_NOTLAST)
1068		flags |= MSG_MORE;
1069
1070	/* No MSG_EOR from splice, only look at MSG_MORE */
1071	eor = !(flags & MSG_MORE);
1072
1073	lock_sock(sk);
1074
1075	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1076
1077	err = -EPIPE;
1078	if (sk->sk_err)
1079		goto out_error;
1080
1081	if (kcm->seq_skb) {
1082		/* Previously opened message */
1083		head = kcm->seq_skb;
1084		skb = kcm_tx_msg(head)->last_skb;
1085		i = skb_shinfo(skb)->nr_frags;
1086
1087		if (skb_can_coalesce(skb, i, page, offset)) {
1088			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
1089			skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1090			goto coalesced;
1091		}
1092
1093		if (i >= MAX_SKB_FRAGS) {
1094			struct sk_buff *tskb;
1095
1096			tskb = alloc_skb(0, sk->sk_allocation);
1097			while (!tskb) {
1098				kcm_push(kcm);
1099				err = sk_stream_wait_memory(sk, &timeo);
1100				if (err)
1101					goto out_error;
1102			}
1103
1104			if (head == skb)
1105				skb_shinfo(head)->frag_list = tskb;
1106			else
1107				skb->next = tskb;
1108
1109			skb = tskb;
1110			skb->ip_summed = CHECKSUM_UNNECESSARY;
1111			i = 0;
1112		}
1113	} else {
1114		/* Call the sk_stream functions to manage the sndbuf mem. */
1115		if (!sk_stream_memory_free(sk)) {
1116			kcm_push(kcm);
1117			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1118			err = sk_stream_wait_memory(sk, &timeo);
1119			if (err)
1120				goto out_error;
1121		}
1122
1123		head = alloc_skb(0, sk->sk_allocation);
1124		while (!head) {
1125			kcm_push(kcm);
1126			err = sk_stream_wait_memory(sk, &timeo);
1127			if (err)
1128				goto out_error;
1129		}
1130
1131		skb = head;
1132		i = 0;
1133	}
1134
1135	get_page(page);
1136	skb_fill_page_desc(skb, i, page, offset, size);
1137	skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1138
1139coalesced:
1140	skb->len += size;
1141	skb->data_len += size;
1142	skb->truesize += size;
1143	sk->sk_wmem_queued += size;
1144	sk_mem_charge(sk, size);
1145
1146	if (head != skb) {
1147		head->len += size;
1148		head->data_len += size;
1149		head->truesize += size;
1150	}
1151
1152	if (eor) {
1153		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1154
1155		/* Message complete, queue it on send buffer */
1156		__skb_queue_tail(&sk->sk_write_queue, head);
1157		kcm->seq_skb = NULL;
1158		KCM_STATS_INCR(kcm->stats.tx_msgs);
1159
1160		if (flags & MSG_BATCH) {
1161			kcm->tx_wait_more = true;
1162		} else if (kcm->tx_wait_more || not_busy) {
1163			err = kcm_write_msgs(kcm);
1164			if (err < 0) {
1165				/* We got a hard error in write_msgs but have
1166				 * already queued this message. Report an error
1167				 * in the socket, but don't affect return value
1168				 * from sendmsg
1169				 */
1170				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1171				report_csk_error(&kcm->sk, -err);
1172			}
1173		}
1174	} else {
1175		/* Message not complete, save state */
1176		kcm->seq_skb = head;
1177		kcm_tx_msg(head)->last_skb = skb;
1178	}
1179
1180	KCM_STATS_ADD(kcm->stats.tx_bytes, size);
1181
1182	release_sock(sk);
1183	return size;
1184
1185out_error:
1186	kcm_push(kcm);
1187
1188	err = sk_stream_error(sk, flags, err);
1189
1190	/* make sure we wake any epoll edge trigger waiter */
1191	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1192		sk->sk_write_space(sk);
1193
1194	release_sock(sk);
1195	return err;
1196}
1197
1198static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1199{
1200	struct sock *sk = sock->sk;
1201	struct kcm_sock *kcm = kcm_sk(sk);
1202	struct sk_buff *skb = NULL, *head = NULL;
1203	size_t copy, copied = 0;
1204	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1205	int eor = (sock->type == SOCK_DGRAM) ?
1206		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
1207	int err = -EPIPE;
1208
 
1209	lock_sock(sk);
1210
1211	/* Per tcp_sendmsg this should be in poll */
1212	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1213
1214	if (sk->sk_err)
1215		goto out_error;
1216
1217	if (kcm->seq_skb) {
1218		/* Previously opened message */
1219		head = kcm->seq_skb;
1220		skb = kcm_tx_msg(head)->last_skb;
1221		goto start;
1222	}
1223
1224	/* Call the sk_stream functions to manage the sndbuf mem. */
1225	if (!sk_stream_memory_free(sk)) {
1226		kcm_push(kcm);
1227		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1228		err = sk_stream_wait_memory(sk, &timeo);
1229		if (err)
1230			goto out_error;
1231	}
1232
1233	/* New message, alloc head skb */
1234	head = alloc_skb(0, sk->sk_allocation);
1235	while (!head) {
1236		kcm_push(kcm);
1237		err = sk_stream_wait_memory(sk, &timeo);
1238		if (err)
1239			goto out_error;
1240
1241		head = alloc_skb(0, sk->sk_allocation);
1242	}
 
 
 
 
 
 
 
1243
1244	skb = head;
1245
1246	/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
1247	 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
1248	 */
1249	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
1250
1251start:
1252	while (msg_data_left(msg)) {
1253		bool merge = true;
1254		int i = skb_shinfo(skb)->nr_frags;
1255		struct page_frag *pfrag = sk_page_frag(sk);
1256
1257		if (!sk_page_frag_refill(sk, pfrag))
1258			goto wait_for_memory;
1259
1260		if (!skb_can_coalesce(skb, i, pfrag->page,
1261				      pfrag->offset)) {
1262			if (i == MAX_SKB_FRAGS) {
1263				struct sk_buff *tskb;
1264
1265				tskb = alloc_skb(0, sk->sk_allocation);
1266				if (!tskb)
1267					goto wait_for_memory;
1268
1269				if (head == skb)
1270					skb_shinfo(head)->frag_list = tskb;
1271				else
1272					skb->next = tskb;
1273
1274				skb = tskb;
1275				skb->ip_summed = CHECKSUM_UNNECESSARY;
1276				continue;
1277			}
1278			merge = false;
1279		}
1280
1281		copy = min_t(int, msg_data_left(msg),
1282			     pfrag->size - pfrag->offset);
 
 
1283
1284		if (!sk_wmem_schedule(sk, copy))
1285			goto wait_for_memory;
 
 
 
 
 
1286
1287		err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1288					       pfrag->page,
1289					       pfrag->offset,
1290					       copy);
1291		if (err)
1292			goto out_error;
1293
1294		/* Update the skb. */
1295		if (merge) {
1296			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1297		} else {
1298			skb_fill_page_desc(skb, i, pfrag->page,
1299					   pfrag->offset, copy);
1300			get_page(pfrag->page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1301		}
1302
1303		pfrag->offset += copy;
1304		copied += copy;
1305		if (head != skb) {
1306			head->len += copy;
1307			head->data_len += copy;
1308		}
1309
1310		continue;
1311
1312wait_for_memory:
1313		kcm_push(kcm);
1314		err = sk_stream_wait_memory(sk, &timeo);
1315		if (err)
1316			goto out_error;
1317	}
1318
1319	if (eor) {
1320		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1321
1322		/* Message complete, queue it on send buffer */
1323		__skb_queue_tail(&sk->sk_write_queue, head);
1324		kcm->seq_skb = NULL;
1325		KCM_STATS_INCR(kcm->stats.tx_msgs);
 
 
1326
1327		if (msg->msg_flags & MSG_BATCH) {
1328			kcm->tx_wait_more = true;
1329		} else if (kcm->tx_wait_more || not_busy) {
1330			err = kcm_write_msgs(kcm);
1331			if (err < 0) {
1332				/* We got a hard error in write_msgs but have
1333				 * already queued this message. Report an error
1334				 * in the socket, but don't affect return value
1335				 * from sendmsg
1336				 */
1337				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1338				report_csk_error(&kcm->sk, -err);
1339			}
1340		}
1341	} else {
1342		/* Message not complete, save state */
1343partial_message:
1344		kcm->seq_skb = head;
1345		kcm_tx_msg(head)->last_skb = skb;
 
 
1346	}
1347
1348	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1349
1350	release_sock(sk);
 
1351	return copied;
1352
1353out_error:
1354	kcm_push(kcm);
1355
1356	if (copied && sock->type == SOCK_SEQPACKET) {
1357		/* Wrote some bytes before encountering an
1358		 * error, return partial success.
1359		 */
1360		goto partial_message;
1361	}
1362
1363	if (head != kcm->seq_skb)
 
1364		kfree_skb(head);
 
 
1365
1366	err = sk_stream_error(sk, msg->msg_flags, err);
1367
1368	/* make sure we wake any epoll edge trigger waiter */
1369	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1370		sk->sk_write_space(sk);
1371
1372	release_sock(sk);
 
1373	return err;
1374}
1375
1376static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1377				     long timeo, int *err)
1378{
1379	struct sk_buff *skb;
1380
1381	while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1382		if (sk->sk_err) {
1383			*err = sock_error(sk);
1384			return NULL;
1385		}
1386
1387		if (sock_flag(sk, SOCK_DONE))
1388			return NULL;
1389
1390		if ((flags & MSG_DONTWAIT) || !timeo) {
1391			*err = -EAGAIN;
1392			return NULL;
1393		}
1394
1395		sk_wait_data(sk, &timeo, NULL);
1396
1397		/* Handle signals */
1398		if (signal_pending(current)) {
1399			*err = sock_intr_errno(timeo);
1400			return NULL;
1401		}
1402	}
1403
1404	return skb;
 
 
1405}
1406
1407static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1408		       size_t len, int flags)
1409{
1410	struct sock *sk = sock->sk;
1411	struct kcm_sock *kcm = kcm_sk(sk);
1412	int err = 0;
1413	long timeo;
1414	struct kcm_rx_msg *rxm;
1415	int copied = 0;
1416	struct sk_buff *skb;
1417
1418	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1419
1420	lock_sock(sk);
1421
1422	skb = kcm_wait_data(sk, flags, timeo, &err);
1423	if (!skb)
1424		goto out;
1425
1426	/* Okay, have a message on the receive queue */
1427
1428	rxm = kcm_rx_msg(skb);
1429
1430	if (len > rxm->full_len)
1431		len = rxm->full_len;
1432
1433	err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1434	if (err < 0)
1435		goto out;
1436
1437	copied = len;
1438	if (likely(!(flags & MSG_PEEK))) {
1439		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1440		if (copied < rxm->full_len) {
1441			if (sock->type == SOCK_DGRAM) {
1442				/* Truncated message */
1443				msg->msg_flags |= MSG_TRUNC;
1444				goto msg_finished;
1445			}
1446			rxm->offset += copied;
1447			rxm->full_len -= copied;
1448		} else {
1449msg_finished:
1450			/* Finished with message */
1451			msg->msg_flags |= MSG_EOR;
1452			KCM_STATS_INCR(kcm->stats.rx_msgs);
1453			skb_unlink(skb, &sk->sk_receive_queue);
1454			kfree_skb(skb);
1455		}
1456	}
1457
1458out:
1459	release_sock(sk);
1460
1461	return copied ? : err;
1462}
1463
1464static ssize_t kcm_sock_splice(struct sock *sk,
1465			       struct pipe_inode_info *pipe,
1466			       struct splice_pipe_desc *spd)
1467{
1468	int ret;
1469
1470	release_sock(sk);
1471	ret = splice_to_pipe(pipe, spd);
1472	lock_sock(sk);
1473
1474	return ret;
1475}
1476
1477static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1478			       struct pipe_inode_info *pipe, size_t len,
1479			       unsigned int flags)
1480{
1481	struct sock *sk = sock->sk;
1482	struct kcm_sock *kcm = kcm_sk(sk);
1483	long timeo;
1484	struct kcm_rx_msg *rxm;
1485	int err = 0;
1486	size_t copied;
1487	struct sk_buff *skb;
1488
1489	/* Only support splice for SOCKSEQPACKET */
1490
1491	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1492
1493	lock_sock(sk);
1494
1495	skb = kcm_wait_data(sk, flags, timeo, &err);
1496	if (!skb)
1497		goto err_out;
1498
1499	/* Okay, have a message on the receive queue */
1500
1501	rxm = kcm_rx_msg(skb);
1502
1503	if (len > rxm->full_len)
1504		len = rxm->full_len;
1505
1506	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags,
1507				 kcm_sock_splice);
1508	if (copied < 0) {
1509		err = copied;
1510		goto err_out;
1511	}
1512
1513	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1514
1515	rxm->offset += copied;
1516	rxm->full_len -= copied;
1517
1518	/* We have no way to return MSG_EOR. If all the bytes have been
1519	 * read we still leave the message in the receive socket buffer.
1520	 * A subsequent recvmsg needs to be done to return MSG_EOR and
1521	 * finish reading the message.
1522	 */
1523
1524	release_sock(sk);
1525
1526	return copied;
1527
1528err_out:
1529	release_sock(sk);
1530
1531	return err;
1532}
1533
1534/* kcm sock lock held */
1535static void kcm_recv_disable(struct kcm_sock *kcm)
1536{
1537	struct kcm_mux *mux = kcm->mux;
1538
1539	if (kcm->rx_disabled)
1540		return;
1541
1542	spin_lock_bh(&mux->rx_lock);
1543
1544	kcm->rx_disabled = 1;
1545
1546	/* If a psock is reserved we'll do cleanup in unreserve */
1547	if (!kcm->rx_psock) {
1548		if (kcm->rx_wait) {
1549			list_del(&kcm->wait_rx_list);
1550			kcm->rx_wait = false;
 
1551		}
1552
1553		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1554	}
1555
1556	spin_unlock_bh(&mux->rx_lock);
1557}
1558
1559/* kcm sock lock held */
1560static void kcm_recv_enable(struct kcm_sock *kcm)
1561{
1562	struct kcm_mux *mux = kcm->mux;
1563
1564	if (!kcm->rx_disabled)
1565		return;
1566
1567	spin_lock_bh(&mux->rx_lock);
1568
1569	kcm->rx_disabled = 0;
1570	kcm_rcv_ready(kcm);
1571
1572	spin_unlock_bh(&mux->rx_lock);
1573}
1574
1575static int kcm_setsockopt(struct socket *sock, int level, int optname,
1576			  char __user *optval, unsigned int optlen)
1577{
1578	struct kcm_sock *kcm = kcm_sk(sock->sk);
1579	int val, valbool;
1580	int err = 0;
1581
1582	if (level != SOL_KCM)
1583		return -ENOPROTOOPT;
1584
1585	if (optlen < sizeof(int))
1586		return -EINVAL;
1587
1588	if (get_user(val, (int __user *)optval))
1589		return -EINVAL;
1590
1591	valbool = val ? 1 : 0;
1592
1593	switch (optname) {
1594	case KCM_RECV_DISABLE:
1595		lock_sock(&kcm->sk);
1596		if (valbool)
1597			kcm_recv_disable(kcm);
1598		else
1599			kcm_recv_enable(kcm);
1600		release_sock(&kcm->sk);
1601		break;
1602	default:
1603		err = -ENOPROTOOPT;
1604	}
1605
1606	return err;
1607}
1608
1609static int kcm_getsockopt(struct socket *sock, int level, int optname,
1610			  char __user *optval, int __user *optlen)
1611{
1612	struct kcm_sock *kcm = kcm_sk(sock->sk);
1613	int val, len;
1614
1615	if (level != SOL_KCM)
1616		return -ENOPROTOOPT;
1617
1618	if (get_user(len, optlen))
1619		return -EFAULT;
1620
1621	len = min_t(unsigned int, len, sizeof(int));
1622	if (len < 0)
1623		return -EINVAL;
1624
 
 
1625	switch (optname) {
1626	case KCM_RECV_DISABLE:
1627		val = kcm->rx_disabled;
1628		break;
1629	default:
1630		return -ENOPROTOOPT;
1631	}
1632
1633	if (put_user(len, optlen))
1634		return -EFAULT;
1635	if (copy_to_user(optval, &val, len))
1636		return -EFAULT;
1637	return 0;
1638}
1639
1640static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1641{
1642	struct kcm_sock *tkcm;
1643	struct list_head *head;
1644	int index = 0;
1645
1646	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1647	 * we set sk_state, otherwise epoll_wait always returns right away with
1648	 * POLLHUP
1649	 */
1650	kcm->sk.sk_state = TCP_ESTABLISHED;
1651
1652	/* Add to mux's kcm sockets list */
1653	kcm->mux = mux;
1654	spin_lock_bh(&mux->lock);
1655
1656	head = &mux->kcm_socks;
1657	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1658		if (tkcm->index != index)
1659			break;
1660		head = &tkcm->kcm_sock_list;
1661		index++;
1662	}
1663
1664	list_add(&kcm->kcm_sock_list, head);
1665	kcm->index = index;
1666
1667	mux->kcm_socks_cnt++;
1668	spin_unlock_bh(&mux->lock);
1669
1670	INIT_WORK(&kcm->tx_work, kcm_tx_work);
 
1671
1672	spin_lock_bh(&mux->rx_lock);
1673	kcm_rcv_ready(kcm);
1674	spin_unlock_bh(&mux->rx_lock);
1675}
1676
1677static void kcm_rx_msg_timeout(unsigned long arg)
1678{
1679	struct kcm_psock *psock = (struct kcm_psock *)arg;
1680
1681	/* Message assembly timed out */
1682	KCM_STATS_INCR(psock->stats.rx_msg_timeouts);
1683	kcm_abort_rx_psock(psock, ETIMEDOUT, NULL);
1684}
1685
1686static int kcm_attach(struct socket *sock, struct socket *csock,
1687		      struct bpf_prog *prog)
1688{
1689	struct kcm_sock *kcm = kcm_sk(sock->sk);
1690	struct kcm_mux *mux = kcm->mux;
1691	struct sock *csk;
1692	struct kcm_psock *psock = NULL, *tpsock;
1693	struct list_head *head;
1694	int index = 0;
1695
1696	if (csock->ops->family != PF_INET &&
1697	    csock->ops->family != PF_INET6)
1698		return -EINVAL;
 
 
1699
1700	csk = csock->sk;
1701	if (!csk)
1702		return -EINVAL;
1703
1704	/* Only support TCP for now */
1705	if (csk->sk_protocol != IPPROTO_TCP)
1706		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
1707
1708	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1709	if (!psock)
1710		return -ENOMEM;
 
 
1711
1712	psock->mux = mux;
1713	psock->sk = csk;
1714	psock->bpf_prog = prog;
1715
1716	setup_timer(&psock->rx_msg_timer, kcm_rx_msg_timeout,
1717		    (unsigned long)psock);
1718
1719	INIT_WORK(&psock->rx_work, psock_rx_work);
1720	INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work);
 
 
 
 
 
 
 
1721
1722	sock_hold(csk);
 
 
 
 
 
1723
1724	write_lock_bh(&csk->sk_callback_lock);
1725	psock->save_data_ready = csk->sk_data_ready;
1726	psock->save_write_space = csk->sk_write_space;
1727	psock->save_state_change = csk->sk_state_change;
1728	csk->sk_user_data = psock;
1729	csk->sk_data_ready = psock_tcp_data_ready;
1730	csk->sk_write_space = psock_tcp_write_space;
1731	csk->sk_state_change = psock_tcp_state_change;
 
1732	write_unlock_bh(&csk->sk_callback_lock);
1733
 
 
1734	/* Finished initialization, now add the psock to the MUX. */
1735	spin_lock_bh(&mux->lock);
1736	head = &mux->psocks;
1737	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1738		if (tpsock->index != index)
1739			break;
1740		head = &tpsock->psock_list;
1741		index++;
1742	}
1743
1744	list_add(&psock->psock_list, head);
1745	psock->index = index;
1746
1747	KCM_STATS_INCR(mux->stats.psock_attach);
1748	mux->psocks_cnt++;
1749	psock_now_avail(psock);
1750	spin_unlock_bh(&mux->lock);
1751
1752	/* Schedule RX work in case there are already bytes queued */
1753	queue_work(kcm_wq, &psock->rx_work);
1754
1755	return 0;
 
 
 
1756}
1757
1758static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1759{
1760	struct socket *csock;
1761	struct bpf_prog *prog;
1762	int err;
1763
1764	csock = sockfd_lookup(info->fd, &err);
1765	if (!csock)
1766		return -ENOENT;
1767
1768	prog = bpf_prog_get(info->bpf_fd);
1769	if (IS_ERR(prog)) {
1770		err = PTR_ERR(prog);
1771		goto out;
1772	}
1773
1774	if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1775		bpf_prog_put(prog);
1776		err = -EINVAL;
1777		goto out;
1778	}
1779
1780	err = kcm_attach(sock, csock, prog);
1781	if (err) {
1782		bpf_prog_put(prog);
1783		goto out;
1784	}
1785
1786	/* Keep reference on file also */
1787
1788	return 0;
1789out:
1790	fput(csock->file);
1791	return err;
1792}
1793
1794static void kcm_unattach(struct kcm_psock *psock)
1795{
1796	struct sock *csk = psock->sk;
1797	struct kcm_mux *mux = psock->mux;
1798
 
 
1799	/* Stop getting callbacks from TCP socket. After this there should
1800	 * be no way to reserve a kcm for this psock.
1801	 */
1802	write_lock_bh(&csk->sk_callback_lock);
1803	csk->sk_user_data = NULL;
1804	csk->sk_data_ready = psock->save_data_ready;
1805	csk->sk_write_space = psock->save_write_space;
1806	csk->sk_state_change = psock->save_state_change;
1807	psock->rx_stopped = 1;
1808
1809	if (WARN_ON(psock->rx_kcm)) {
1810		write_unlock_bh(&csk->sk_callback_lock);
 
1811		return;
1812	}
1813
1814	spin_lock_bh(&mux->rx_lock);
1815
1816	/* Stop receiver activities. After this point psock should not be
1817	 * able to get onto ready list either through callbacks or work.
1818	 */
1819	if (psock->ready_rx_msg) {
1820		list_del(&psock->psock_ready_list);
1821		kfree_skb(psock->ready_rx_msg);
1822		psock->ready_rx_msg = NULL;
1823		KCM_STATS_INCR(mux->stats.rx_ready_drops);
1824	}
1825
1826	spin_unlock_bh(&mux->rx_lock);
1827
1828	write_unlock_bh(&csk->sk_callback_lock);
1829
1830	del_timer_sync(&psock->rx_msg_timer);
1831	cancel_work_sync(&psock->rx_work);
1832	cancel_delayed_work_sync(&psock->rx_delayed_work);
 
1833
1834	bpf_prog_put(psock->bpf_prog);
1835
1836	kfree_skb(psock->rx_skb_head);
1837	psock->rx_skb_head = NULL;
1838
1839	spin_lock_bh(&mux->lock);
1840
1841	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
 
1842
1843	KCM_STATS_INCR(mux->stats.psock_unattach);
1844
1845	if (psock->tx_kcm) {
1846		/* psock was reserved.  Just mark it finished and we will clean
1847		 * up in the kcm paths, we need kcm lock which can not be
1848		 * acquired here.
1849		 */
1850		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1851		spin_unlock_bh(&mux->lock);
1852
1853		/* We are unattaching a socket that is reserved. Abort the
1854		 * socket since we may be out of sync in sending on it. We need
1855		 * to do this without the mux lock.
1856		 */
1857		kcm_abort_tx_psock(psock, EPIPE, false);
1858
1859		spin_lock_bh(&mux->lock);
1860		if (!psock->tx_kcm) {
1861			/* psock now unreserved in window mux was unlocked */
1862			goto no_reserved;
1863		}
1864		psock->done = 1;
1865
1866		/* Commit done before queuing work to process it */
1867		smp_mb();
1868
1869		/* Queue tx work to make sure psock->done is handled */
1870		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1871		spin_unlock_bh(&mux->lock);
1872	} else {
1873no_reserved:
1874		if (!psock->tx_stopped)
1875			list_del(&psock->psock_avail_list);
1876		list_del(&psock->psock_list);
1877		mux->psocks_cnt--;
1878		spin_unlock_bh(&mux->lock);
1879
1880		sock_put(csk);
1881		fput(csk->sk_socket->file);
1882		kmem_cache_free(kcm_psockp, psock);
1883	}
 
 
1884}
1885
1886static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1887{
1888	struct kcm_sock *kcm = kcm_sk(sock->sk);
1889	struct kcm_mux *mux = kcm->mux;
1890	struct kcm_psock *psock;
1891	struct socket *csock;
1892	struct sock *csk;
1893	int err;
1894
1895	csock = sockfd_lookup(info->fd, &err);
1896	if (!csock)
1897		return -ENOENT;
1898
1899	csk = csock->sk;
1900	if (!csk) {
1901		err = -EINVAL;
1902		goto out;
1903	}
1904
1905	err = -ENOENT;
1906
1907	spin_lock_bh(&mux->lock);
1908
1909	list_for_each_entry(psock, &mux->psocks, psock_list) {
1910		if (psock->sk != csk)
1911			continue;
1912
1913		/* Found the matching psock */
1914
1915		if (psock->unattaching || WARN_ON(psock->done)) {
1916			err = -EALREADY;
1917			break;
1918		}
1919
1920		psock->unattaching = 1;
1921
1922		spin_unlock_bh(&mux->lock);
1923
 
1924		kcm_unattach(psock);
1925
1926		err = 0;
1927		goto out;
1928	}
1929
1930	spin_unlock_bh(&mux->lock);
1931
1932out:
1933	fput(csock->file);
1934	return err;
1935}
1936
1937static struct proto kcm_proto = {
1938	.name	= "KCM",
1939	.owner	= THIS_MODULE,
1940	.obj_size = sizeof(struct kcm_sock),
1941};
1942
1943/* Clone a kcm socket. */
1944static int kcm_clone(struct socket *osock, struct kcm_clone *info,
1945		     struct socket **newsockp)
1946{
1947	struct socket *newsock;
1948	struct sock *newsk;
1949	struct file *newfile;
1950	int err, newfd;
1951
1952	err = -ENFILE;
1953	newsock = sock_alloc();
1954	if (!newsock)
1955		goto out;
1956
1957	newsock->type = osock->type;
1958	newsock->ops = osock->ops;
1959
1960	__module_get(newsock->ops->owner);
1961
1962	newfd = get_unused_fd_flags(0);
1963	if (unlikely(newfd < 0)) {
1964		err = newfd;
1965		goto out_fd_fail;
1966	}
1967
1968	newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1969	if (unlikely(IS_ERR(newfile))) {
1970		err = PTR_ERR(newfile);
1971		goto out_sock_alloc_fail;
1972	}
1973
1974	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1975			 &kcm_proto, true);
1976	if (!newsk) {
1977		err = -ENOMEM;
1978		goto out_sk_alloc_fail;
1979	}
1980
1981	sock_init_data(newsock, newsk);
1982	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1983
1984	fd_install(newfd, newfile);
1985	*newsockp = newsock;
1986	info->fd = newfd;
1987
1988	return 0;
1989
1990out_sk_alloc_fail:
1991	fput(newfile);
1992out_sock_alloc_fail:
1993	put_unused_fd(newfd);
1994out_fd_fail:
1995	sock_release(newsock);
1996out:
1997	return err;
1998}
1999
2000static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2001{
2002	int err;
2003
2004	switch (cmd) {
2005	case SIOCKCMATTACH: {
2006		struct kcm_attach info;
2007
2008		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2009			err = -EFAULT;
2010
2011		err = kcm_attach_ioctl(sock, &info);
2012
2013		break;
2014	}
2015	case SIOCKCMUNATTACH: {
2016		struct kcm_unattach info;
2017
2018		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2019			err = -EFAULT;
2020
2021		err = kcm_unattach_ioctl(sock, &info);
2022
2023		break;
2024	}
2025	case SIOCKCMCLONE: {
2026		struct kcm_clone info;
2027		struct socket *newsock = NULL;
2028
2029		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2030			err = -EFAULT;
2031
2032		err = kcm_clone(sock, &info, &newsock);
2033
2034		if (!err) {
2035			if (copy_to_user((void __user *)arg, &info,
2036					 sizeof(info))) {
2037				err = -EFAULT;
2038				sock_release(newsock);
2039			}
 
 
 
 
 
 
2040		}
2041
 
2042		break;
2043	}
2044	default:
2045		err = -ENOIOCTLCMD;
2046		break;
2047	}
2048
2049	return err;
2050}
2051
2052static void free_mux(struct rcu_head *rcu)
2053{
2054	struct kcm_mux *mux = container_of(rcu,
2055	    struct kcm_mux, rcu);
2056
2057	kmem_cache_free(kcm_muxp, mux);
2058}
2059
2060static void release_mux(struct kcm_mux *mux)
2061{
2062	struct kcm_net *knet = mux->knet;
2063	struct kcm_psock *psock, *tmp_psock;
2064
2065	/* Release psocks */
2066	list_for_each_entry_safe(psock, tmp_psock,
2067				 &mux->psocks, psock_list) {
2068		if (!WARN_ON(psock->unattaching))
2069			kcm_unattach(psock);
2070	}
2071
2072	if (WARN_ON(mux->psocks_cnt))
2073		return;
2074
2075	__skb_queue_purge(&mux->rx_hold_queue);
2076
2077	mutex_lock(&knet->mutex);
2078	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
2079	aggregate_psock_stats(&mux->aggregate_psock_stats,
2080			      &knet->aggregate_psock_stats);
 
 
2081	list_del_rcu(&mux->kcm_mux_list);
2082	knet->count--;
2083	mutex_unlock(&knet->mutex);
2084
2085	call_rcu(&mux->rcu, free_mux);
2086}
2087
2088static void kcm_done(struct kcm_sock *kcm)
2089{
2090	struct kcm_mux *mux = kcm->mux;
2091	struct sock *sk = &kcm->sk;
2092	int socks_cnt;
2093
2094	spin_lock_bh(&mux->rx_lock);
2095	if (kcm->rx_psock) {
2096		/* Cleanup in unreserve_rx_kcm */
2097		WARN_ON(kcm->done);
2098		kcm->rx_disabled = 1;
2099		kcm->done = 1;
2100		spin_unlock_bh(&mux->rx_lock);
2101		return;
2102	}
2103
2104	if (kcm->rx_wait) {
2105		list_del(&kcm->wait_rx_list);
2106		kcm->rx_wait = false;
 
2107	}
2108	/* Move any pending receive messages to other kcm sockets */
2109	requeue_rx_msgs(mux, &sk->sk_receive_queue);
2110
2111	spin_unlock_bh(&mux->rx_lock);
2112
2113	if (WARN_ON(sk_rmem_alloc_get(sk)))
2114		return;
2115
2116	/* Detach from MUX */
2117	spin_lock_bh(&mux->lock);
2118
2119	list_del(&kcm->kcm_sock_list);
2120	mux->kcm_socks_cnt--;
2121	socks_cnt = mux->kcm_socks_cnt;
2122
2123	spin_unlock_bh(&mux->lock);
2124
2125	if (!socks_cnt) {
2126		/* We are done with the mux now. */
2127		release_mux(mux);
2128	}
2129
2130	WARN_ON(kcm->rx_wait);
2131
2132	sock_put(&kcm->sk);
2133}
2134
2135/* Called by kcm_release to close a KCM socket.
2136 * If this is the last KCM socket on the MUX, destroy the MUX.
2137 */
2138static int kcm_release(struct socket *sock)
2139{
2140	struct sock *sk = sock->sk;
2141	struct kcm_sock *kcm;
2142	struct kcm_mux *mux;
2143	struct kcm_psock *psock;
2144
2145	if (!sk)
2146		return 0;
2147
2148	kcm = kcm_sk(sk);
2149	mux = kcm->mux;
2150
 
2151	sock_orphan(sk);
2152	kfree_skb(kcm->seq_skb);
2153
2154	lock_sock(sk);
2155	/* Purge queue under lock to avoid race condition with tx_work trying
2156	 * to act when queue is nonempty. If tx_work runs after this point
2157	 * it will just return.
2158	 */
2159	__skb_queue_purge(&sk->sk_write_queue);
 
 
 
 
 
 
 
2160	release_sock(sk);
2161
2162	spin_lock_bh(&mux->lock);
2163	if (kcm->tx_wait) {
2164		/* Take of tx_wait list, after this point there should be no way
2165		 * that a psock will be assigned to this kcm.
2166		 */
2167		list_del(&kcm->wait_psock_list);
2168		kcm->tx_wait = false;
2169	}
2170	spin_unlock_bh(&mux->lock);
2171
2172	/* Cancel work. After this point there should be no outside references
2173	 * to the kcm socket.
2174	 */
2175	cancel_work_sync(&kcm->tx_work);
2176
2177	lock_sock(sk);
2178	psock = kcm->tx_psock;
2179	if (psock) {
2180		/* A psock was reserved, so we need to kill it since it
2181		 * may already have some bytes queued from a message. We
2182		 * need to do this after removing kcm from tx_wait list.
2183		 */
2184		kcm_abort_tx_psock(psock, EPIPE, false);
2185		unreserve_psock(kcm);
2186	}
2187	release_sock(sk);
2188
2189	WARN_ON(kcm->tx_wait);
2190	WARN_ON(kcm->tx_psock);
2191
2192	sock->sk = NULL;
2193
2194	kcm_done(kcm);
2195
2196	return 0;
2197}
2198
2199static const struct proto_ops kcm_dgram_ops = {
2200	.family =	PF_KCM,
2201	.owner =	THIS_MODULE,
2202	.release =	kcm_release,
2203	.bind =		sock_no_bind,
2204	.connect =	sock_no_connect,
2205	.socketpair =	sock_no_socketpair,
2206	.accept =	sock_no_accept,
2207	.getname =	sock_no_getname,
2208	.poll =		datagram_poll,
2209	.ioctl =	kcm_ioctl,
2210	.listen =	sock_no_listen,
2211	.shutdown =	sock_no_shutdown,
2212	.setsockopt =	kcm_setsockopt,
2213	.getsockopt =	kcm_getsockopt,
2214	.sendmsg =	kcm_sendmsg,
2215	.recvmsg =	kcm_recvmsg,
2216	.mmap =		sock_no_mmap,
2217	.sendpage =	kcm_sendpage,
2218};
2219
2220static const struct proto_ops kcm_seqpacket_ops = {
2221	.family =	PF_KCM,
2222	.owner =	THIS_MODULE,
2223	.release =	kcm_release,
2224	.bind =		sock_no_bind,
2225	.connect =	sock_no_connect,
2226	.socketpair =	sock_no_socketpair,
2227	.accept =	sock_no_accept,
2228	.getname =	sock_no_getname,
2229	.poll =		datagram_poll,
2230	.ioctl =	kcm_ioctl,
2231	.listen =	sock_no_listen,
2232	.shutdown =	sock_no_shutdown,
2233	.setsockopt =	kcm_setsockopt,
2234	.getsockopt =	kcm_getsockopt,
2235	.sendmsg =	kcm_sendmsg,
2236	.recvmsg =	kcm_recvmsg,
2237	.mmap =		sock_no_mmap,
2238	.sendpage =	kcm_sendpage,
2239	.splice_read =	kcm_splice_read,
2240};
2241
2242/* Create proto operation for kcm sockets */
2243static int kcm_create(struct net *net, struct socket *sock,
2244		      int protocol, int kern)
2245{
2246	struct kcm_net *knet = net_generic(net, kcm_net_id);
2247	struct sock *sk;
2248	struct kcm_mux *mux;
2249
2250	switch (sock->type) {
2251	case SOCK_DGRAM:
2252		sock->ops = &kcm_dgram_ops;
2253		break;
2254	case SOCK_SEQPACKET:
2255		sock->ops = &kcm_seqpacket_ops;
2256		break;
2257	default:
2258		return -ESOCKTNOSUPPORT;
2259	}
2260
2261	if (protocol != KCMPROTO_CONNECTED)
2262		return -EPROTONOSUPPORT;
2263
2264	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
2265	if (!sk)
2266		return -ENOMEM;
2267
2268	/* Allocate a kcm mux, shared between KCM sockets */
2269	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
2270	if (!mux) {
2271		sk_free(sk);
2272		return -ENOMEM;
2273	}
2274
2275	spin_lock_init(&mux->lock);
2276	spin_lock_init(&mux->rx_lock);
2277	INIT_LIST_HEAD(&mux->kcm_socks);
2278	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
2279	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
2280
2281	INIT_LIST_HEAD(&mux->psocks);
2282	INIT_LIST_HEAD(&mux->psocks_ready);
2283	INIT_LIST_HEAD(&mux->psocks_avail);
2284
2285	mux->knet = knet;
2286
2287	/* Add new MUX to list */
2288	mutex_lock(&knet->mutex);
2289	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
2290	knet->count++;
2291	mutex_unlock(&knet->mutex);
2292
2293	skb_queue_head_init(&mux->rx_hold_queue);
2294
2295	/* Init KCM socket */
2296	sock_init_data(sock, sk);
2297	init_kcm_sock(kcm_sk(sk), mux);
2298
2299	return 0;
2300}
2301
2302static struct net_proto_family kcm_family_ops = {
2303	.family = PF_KCM,
2304	.create = kcm_create,
2305	.owner  = THIS_MODULE,
2306};
2307
2308static __net_init int kcm_init_net(struct net *net)
2309{
2310	struct kcm_net *knet = net_generic(net, kcm_net_id);
2311
2312	INIT_LIST_HEAD_RCU(&knet->mux_list);
2313	mutex_init(&knet->mutex);
2314
2315	return 0;
2316}
2317
2318static __net_exit void kcm_exit_net(struct net *net)
2319{
2320	struct kcm_net *knet = net_generic(net, kcm_net_id);
2321
2322	/* All KCM sockets should be closed at this point, which should mean
2323	 * that all multiplexors and psocks have been destroyed.
2324	 */
2325	WARN_ON(!list_empty(&knet->mux_list));
 
 
2326}
2327
2328static struct pernet_operations kcm_net_ops = {
2329	.init = kcm_init_net,
2330	.exit = kcm_exit_net,
2331	.id   = &kcm_net_id,
2332	.size = sizeof(struct kcm_net),
2333};
2334
2335static int __init kcm_init(void)
2336{
2337	int err = -ENOMEM;
2338
2339	kcm_muxp = kmem_cache_create("kcm_mux_cache",
2340				     sizeof(struct kcm_mux), 0,
2341				     SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2342	if (!kcm_muxp)
2343		goto fail;
2344
2345	kcm_psockp = kmem_cache_create("kcm_psock_cache",
2346				       sizeof(struct kcm_psock), 0,
2347					SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2348	if (!kcm_psockp)
2349		goto fail;
2350
2351	kcm_wq = create_singlethread_workqueue("kkcmd");
2352	if (!kcm_wq)
2353		goto fail;
2354
2355	err = proto_register(&kcm_proto, 1);
2356	if (err)
2357		goto fail;
2358
2359	err = sock_register(&kcm_family_ops);
2360	if (err)
2361		goto sock_register_fail;
2362
2363	err = register_pernet_device(&kcm_net_ops);
2364	if (err)
2365		goto net_ops_fail;
2366
 
 
 
 
2367	err = kcm_proc_init();
2368	if (err)
2369		goto proc_init_fail;
2370
2371	return 0;
2372
2373proc_init_fail:
2374	unregister_pernet_device(&kcm_net_ops);
2375
2376net_ops_fail:
2377	sock_unregister(PF_KCM);
2378
2379sock_register_fail:
 
 
 
2380	proto_unregister(&kcm_proto);
2381
2382fail:
2383	kmem_cache_destroy(kcm_muxp);
2384	kmem_cache_destroy(kcm_psockp);
2385
2386	if (kcm_wq)
2387		destroy_workqueue(kcm_wq);
2388
2389	return err;
2390}
2391
2392static void __exit kcm_exit(void)
2393{
2394	kcm_proc_exit();
2395	unregister_pernet_device(&kcm_net_ops);
2396	sock_unregister(PF_KCM);
 
2397	proto_unregister(&kcm_proto);
2398	destroy_workqueue(kcm_wq);
2399
2400	kmem_cache_destroy(kcm_muxp);
2401	kmem_cache_destroy(kcm_psockp);
2402}
2403
2404module_init(kcm_init);
2405module_exit(kcm_exit);
2406
2407MODULE_LICENSE("GPL");
 
2408MODULE_ALIAS_NETPROTO(PF_KCM);
2409
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Kernel Connection Multiplexor
   4 *
   5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
   6 */
   7
   8#include <linux/bpf.h>
   9#include <linux/errno.h>
  10#include <linux/errqueue.h>
  11#include <linux/file.h>
  12#include <linux/filter.h>
  13#include <linux/in.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/net.h>
  17#include <linux/netdevice.h>
  18#include <linux/poll.h>
  19#include <linux/rculist.h>
  20#include <linux/skbuff.h>
  21#include <linux/socket.h>
  22#include <linux/uaccess.h>
  23#include <linux/workqueue.h>
  24#include <linux/syscalls.h>
  25#include <linux/sched/signal.h>
  26
  27#include <net/kcm.h>
  28#include <net/netns/generic.h>
  29#include <net/sock.h>
 
  30#include <uapi/linux/kcm.h>
  31#include <trace/events/sock.h>
  32
  33unsigned int kcm_net_id;
  34
  35static struct kmem_cache *kcm_psockp __read_mostly;
  36static struct kmem_cache *kcm_muxp __read_mostly;
  37static struct workqueue_struct *kcm_wq;
  38
  39static inline struct kcm_sock *kcm_sk(const struct sock *sk)
  40{
  41	return (struct kcm_sock *)sk;
  42}
  43
  44static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
  45{
  46	return (struct kcm_tx_msg *)skb->cb;
  47}
  48
 
 
 
 
 
 
  49static void report_csk_error(struct sock *csk, int err)
  50{
  51	csk->sk_err = EPIPE;
  52	sk_error_report(csk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53}
  54
  55static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
  56			       bool wakeup_kcm)
  57{
  58	struct sock *csk = psock->sk;
  59	struct kcm_mux *mux = psock->mux;
  60
  61	/* Unrecoverable error in transmit */
  62
  63	spin_lock_bh(&mux->lock);
  64
  65	if (psock->tx_stopped) {
  66		spin_unlock_bh(&mux->lock);
  67		return;
  68	}
  69
  70	psock->tx_stopped = 1;
  71	KCM_STATS_INCR(psock->stats.tx_aborts);
  72
  73	if (!psock->tx_kcm) {
  74		/* Take off psocks_avail list */
  75		list_del(&psock->psock_avail_list);
  76	} else if (wakeup_kcm) {
  77		/* In this case psock is being aborted while outside of
  78		 * write_msgs and psock is reserved. Schedule tx_work
  79		 * to handle the failure there. Need to commit tx_stopped
  80		 * before queuing work.
  81		 */
  82		smp_mb();
  83
  84		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
  85	}
  86
  87	spin_unlock_bh(&mux->lock);
  88
  89	/* Report error on lower socket */
  90	report_csk_error(csk, err);
  91}
  92
  93/* RX mux lock held. */
  94static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
  95				    struct kcm_psock *psock)
  96{
  97	STRP_STATS_ADD(mux->stats.rx_bytes,
  98		       psock->strp.stats.bytes -
  99		       psock->saved_rx_bytes);
 100	mux->stats.rx_msgs +=
 101		psock->strp.stats.msgs - psock->saved_rx_msgs;
 102	psock->saved_rx_msgs = psock->strp.stats.msgs;
 103	psock->saved_rx_bytes = psock->strp.stats.bytes;
 104}
 105
 106static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
 107				    struct kcm_psock *psock)
 108{
 109	KCM_STATS_ADD(mux->stats.tx_bytes,
 110		      psock->stats.tx_bytes - psock->saved_tx_bytes);
 111	mux->stats.tx_msgs +=
 112		psock->stats.tx_msgs - psock->saved_tx_msgs;
 113	psock->saved_tx_msgs = psock->stats.tx_msgs;
 114	psock->saved_tx_bytes = psock->stats.tx_bytes;
 115}
 116
 117static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 118
 119/* KCM is ready to receive messages on its queue-- either the KCM is new or
 120 * has become unblocked after being blocked on full socket buffer. Queue any
 121 * pending ready messages on a psock. RX mux lock held.
 122 */
 123static void kcm_rcv_ready(struct kcm_sock *kcm)
 124{
 125	struct kcm_mux *mux = kcm->mux;
 126	struct kcm_psock *psock;
 127	struct sk_buff *skb;
 128
 129	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
 130		return;
 131
 132	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
 133		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 134			/* Assuming buffer limit has been reached */
 135			skb_queue_head(&mux->rx_hold_queue, skb);
 136			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 137			return;
 138		}
 139	}
 140
 141	while (!list_empty(&mux->psocks_ready)) {
 142		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
 143					 psock_ready_list);
 144
 145		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
 146			/* Assuming buffer limit has been reached */
 147			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 148			return;
 149		}
 150
 151		/* Consumed the ready message on the psock. Schedule rx_work to
 152		 * get more messages.
 153		 */
 154		list_del(&psock->psock_ready_list);
 155		psock->ready_rx_msg = NULL;
 
 156		/* Commit clearing of ready_rx_msg for queuing work */
 157		smp_mb();
 158
 159		strp_unpause(&psock->strp);
 160		strp_check_rcv(&psock->strp);
 161	}
 162
 163	/* Buffer limit is okay now, add to ready list */
 164	list_add_tail(&kcm->wait_rx_list,
 165		      &kcm->mux->kcm_rx_waiters);
 166	/* paired with lockless reads in kcm_rfree() */
 167	WRITE_ONCE(kcm->rx_wait, true);
 168}
 169
 170static void kcm_rfree(struct sk_buff *skb)
 171{
 172	struct sock *sk = skb->sk;
 173	struct kcm_sock *kcm = kcm_sk(sk);
 174	struct kcm_mux *mux = kcm->mux;
 175	unsigned int len = skb->truesize;
 176
 177	sk_mem_uncharge(sk, len);
 178	atomic_sub(len, &sk->sk_rmem_alloc);
 179
 180	/* For reading rx_wait and rx_psock without holding lock */
 181	smp_mb__after_atomic();
 182
 183	if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
 184	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
 185		spin_lock_bh(&mux->rx_lock);
 186		kcm_rcv_ready(kcm);
 187		spin_unlock_bh(&mux->rx_lock);
 188	}
 189}
 190
 191static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 192{
 193	struct sk_buff_head *list = &sk->sk_receive_queue;
 194
 195	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 196		return -ENOMEM;
 197
 198	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 199		return -ENOBUFS;
 200
 201	skb->dev = NULL;
 202
 203	skb_orphan(skb);
 204	skb->sk = sk;
 205	skb->destructor = kcm_rfree;
 206	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 207	sk_mem_charge(sk, skb->truesize);
 208
 209	skb_queue_tail(list, skb);
 210
 211	if (!sock_flag(sk, SOCK_DEAD))
 212		sk->sk_data_ready(sk);
 213
 214	return 0;
 215}
 216
 217/* Requeue received messages for a kcm socket to other kcm sockets. This is
 218 * called with a kcm socket is receive disabled.
 219 * RX mux lock held.
 220 */
 221static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
 222{
 223	struct sk_buff *skb;
 224	struct kcm_sock *kcm;
 225
 226	while ((skb = skb_dequeue(head))) {
 227		/* Reset destructor to avoid calling kcm_rcv_ready */
 228		skb->destructor = sock_rfree;
 229		skb_orphan(skb);
 230try_again:
 231		if (list_empty(&mux->kcm_rx_waiters)) {
 232			skb_queue_tail(&mux->rx_hold_queue, skb);
 233			continue;
 234		}
 235
 236		kcm = list_first_entry(&mux->kcm_rx_waiters,
 237				       struct kcm_sock, wait_rx_list);
 238
 239		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 240			/* Should mean socket buffer full */
 241			list_del(&kcm->wait_rx_list);
 242			/* paired with lockless reads in kcm_rfree() */
 243			WRITE_ONCE(kcm->rx_wait, false);
 244
 245			/* Commit rx_wait to read in kcm_free */
 246			smp_wmb();
 247
 248			goto try_again;
 249		}
 250	}
 251}
 252
 253/* Lower sock lock held */
 254static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
 255				       struct sk_buff *head)
 256{
 257	struct kcm_mux *mux = psock->mux;
 258	struct kcm_sock *kcm;
 259
 260	WARN_ON(psock->ready_rx_msg);
 261
 262	if (psock->rx_kcm)
 263		return psock->rx_kcm;
 264
 265	spin_lock_bh(&mux->rx_lock);
 266
 267	if (psock->rx_kcm) {
 268		spin_unlock_bh(&mux->rx_lock);
 269		return psock->rx_kcm;
 270	}
 271
 272	kcm_update_rx_mux_stats(mux, psock);
 273
 274	if (list_empty(&mux->kcm_rx_waiters)) {
 275		psock->ready_rx_msg = head;
 276		strp_pause(&psock->strp);
 277		list_add_tail(&psock->psock_ready_list,
 278			      &mux->psocks_ready);
 279		spin_unlock_bh(&mux->rx_lock);
 280		return NULL;
 281	}
 282
 283	kcm = list_first_entry(&mux->kcm_rx_waiters,
 284			       struct kcm_sock, wait_rx_list);
 285	list_del(&kcm->wait_rx_list);
 286	/* paired with lockless reads in kcm_rfree() */
 287	WRITE_ONCE(kcm->rx_wait, false);
 288
 289	psock->rx_kcm = kcm;
 290	/* paired with lockless reads in kcm_rfree() */
 291	WRITE_ONCE(kcm->rx_psock, psock);
 292
 293	spin_unlock_bh(&mux->rx_lock);
 294
 295	return kcm;
 296}
 297
 298static void kcm_done(struct kcm_sock *kcm);
 299
 300static void kcm_done_work(struct work_struct *w)
 301{
 302	kcm_done(container_of(w, struct kcm_sock, done_work));
 303}
 304
 305/* Lower sock held */
 306static void unreserve_rx_kcm(struct kcm_psock *psock,
 307			     bool rcv_ready)
 308{
 309	struct kcm_sock *kcm = psock->rx_kcm;
 310	struct kcm_mux *mux = psock->mux;
 311
 312	if (!kcm)
 313		return;
 314
 315	spin_lock_bh(&mux->rx_lock);
 316
 317	psock->rx_kcm = NULL;
 318	/* paired with lockless reads in kcm_rfree() */
 319	WRITE_ONCE(kcm->rx_psock, NULL);
 320
 321	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
 322	 * kcm_rfree
 323	 */
 324	smp_mb();
 325
 326	if (unlikely(kcm->done)) {
 327		spin_unlock_bh(&mux->rx_lock);
 328
 329		/* Need to run kcm_done in a task since we need to qcquire
 330		 * callback locks which may already be held here.
 331		 */
 332		INIT_WORK(&kcm->done_work, kcm_done_work);
 333		schedule_work(&kcm->done_work);
 334		return;
 335	}
 336
 337	if (unlikely(kcm->rx_disabled)) {
 338		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
 339	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
 340		/* Check for degenerative race with rx_wait that all
 341		 * data was dequeued (accounted for in kcm_rfree).
 342		 */
 343		kcm_rcv_ready(kcm);
 344	}
 345	spin_unlock_bh(&mux->rx_lock);
 346}
 347
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 348/* Lower sock lock held */
 349static void psock_data_ready(struct sock *sk)
 350{
 351	struct kcm_psock *psock;
 352
 353	trace_sk_data_ready(sk);
 354
 355	read_lock_bh(&sk->sk_callback_lock);
 356
 357	psock = (struct kcm_psock *)sk->sk_user_data;
 358	if (likely(psock))
 359		strp_data_ready(&psock->strp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 360
 
 361	read_unlock_bh(&sk->sk_callback_lock);
 362}
 363
 364/* Called with lower sock held */
 365static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
 366{
 367	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 368	struct kcm_sock *kcm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 369
 370try_queue:
 371	kcm = reserve_rx_kcm(psock, skb);
 372	if (!kcm) {
 373		 /* Unable to reserve a KCM, message is held in psock and strp
 374		  * is paused.
 375		  */
 376		return;
 377	}
 378
 379	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 380		/* Should mean socket buffer full */
 381		unreserve_rx_kcm(psock, false);
 382		goto try_queue;
 383	}
 384}
 385
 386static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
 387{
 388	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 389	struct bpf_prog *prog = psock->bpf_prog;
 390	int res;
 391
 392	res = bpf_prog_run_pin_on_cpu(prog, skb);
 393	return res;
 394}
 395
 396static int kcm_read_sock_done(struct strparser *strp, int err)
 397{
 398	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 399
 400	unreserve_rx_kcm(psock, true);
 401
 402	return err;
 403}
 404
 405static void psock_state_change(struct sock *sk)
 406{
 407	/* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
 408	 * since application will normally not poll with EPOLLIN
 409	 * on the TCP sockets.
 410	 */
 411
 412	report_csk_error(sk, EPIPE);
 413}
 414
 415static void psock_write_space(struct sock *sk)
 416{
 417	struct kcm_psock *psock;
 418	struct kcm_mux *mux;
 419	struct kcm_sock *kcm;
 420
 421	read_lock_bh(&sk->sk_callback_lock);
 422
 423	psock = (struct kcm_psock *)sk->sk_user_data;
 424	if (unlikely(!psock))
 425		goto out;
 
 426	mux = psock->mux;
 427
 428	spin_lock_bh(&mux->lock);
 429
 430	/* Check if the socket is reserved so someone is waiting for sending. */
 431	kcm = psock->tx_kcm;
 432	if (kcm && !unlikely(kcm->tx_stopped))
 433		queue_work(kcm_wq, &kcm->tx_work);
 434
 435	spin_unlock_bh(&mux->lock);
 436out:
 437	read_unlock_bh(&sk->sk_callback_lock);
 438}
 439
 440static void unreserve_psock(struct kcm_sock *kcm);
 441
 442/* kcm sock is locked. */
 443static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
 444{
 445	struct kcm_mux *mux = kcm->mux;
 446	struct kcm_psock *psock;
 447
 448	psock = kcm->tx_psock;
 449
 450	smp_rmb(); /* Must read tx_psock before tx_wait */
 451
 452	if (psock) {
 453		WARN_ON(kcm->tx_wait);
 454		if (unlikely(psock->tx_stopped))
 455			unreserve_psock(kcm);
 456		else
 457			return kcm->tx_psock;
 458	}
 459
 460	spin_lock_bh(&mux->lock);
 461
 462	/* Check again under lock to see if psock was reserved for this
 463	 * psock via psock_unreserve.
 464	 */
 465	psock = kcm->tx_psock;
 466	if (unlikely(psock)) {
 467		WARN_ON(kcm->tx_wait);
 468		spin_unlock_bh(&mux->lock);
 469		return kcm->tx_psock;
 470	}
 471
 472	if (!list_empty(&mux->psocks_avail)) {
 473		psock = list_first_entry(&mux->psocks_avail,
 474					 struct kcm_psock,
 475					 psock_avail_list);
 476		list_del(&psock->psock_avail_list);
 477		if (kcm->tx_wait) {
 478			list_del(&kcm->wait_psock_list);
 479			kcm->tx_wait = false;
 480		}
 481		kcm->tx_psock = psock;
 482		psock->tx_kcm = kcm;
 483		KCM_STATS_INCR(psock->stats.reserved);
 484	} else if (!kcm->tx_wait) {
 485		list_add_tail(&kcm->wait_psock_list,
 486			      &mux->kcm_tx_waiters);
 487		kcm->tx_wait = true;
 488	}
 489
 490	spin_unlock_bh(&mux->lock);
 491
 492	return psock;
 493}
 494
 495/* mux lock held */
 496static void psock_now_avail(struct kcm_psock *psock)
 497{
 498	struct kcm_mux *mux = psock->mux;
 499	struct kcm_sock *kcm;
 500
 501	if (list_empty(&mux->kcm_tx_waiters)) {
 502		list_add_tail(&psock->psock_avail_list,
 503			      &mux->psocks_avail);
 504	} else {
 505		kcm = list_first_entry(&mux->kcm_tx_waiters,
 506				       struct kcm_sock,
 507				       wait_psock_list);
 508		list_del(&kcm->wait_psock_list);
 509		kcm->tx_wait = false;
 510		psock->tx_kcm = kcm;
 511
 512		/* Commit before changing tx_psock since that is read in
 513		 * reserve_psock before queuing work.
 514		 */
 515		smp_mb();
 516
 517		kcm->tx_psock = psock;
 518		KCM_STATS_INCR(psock->stats.reserved);
 519		queue_work(kcm_wq, &kcm->tx_work);
 520	}
 521}
 522
 523/* kcm sock is locked. */
 524static void unreserve_psock(struct kcm_sock *kcm)
 525{
 526	struct kcm_psock *psock;
 527	struct kcm_mux *mux = kcm->mux;
 528
 529	spin_lock_bh(&mux->lock);
 530
 531	psock = kcm->tx_psock;
 532
 533	if (WARN_ON(!psock)) {
 534		spin_unlock_bh(&mux->lock);
 535		return;
 536	}
 537
 538	smp_rmb(); /* Read tx_psock before tx_wait */
 539
 540	kcm_update_tx_mux_stats(mux, psock);
 541
 542	WARN_ON(kcm->tx_wait);
 543
 544	kcm->tx_psock = NULL;
 545	psock->tx_kcm = NULL;
 546	KCM_STATS_INCR(psock->stats.unreserved);
 547
 548	if (unlikely(psock->tx_stopped)) {
 549		if (psock->done) {
 550			/* Deferred free */
 551			list_del(&psock->psock_list);
 552			mux->psocks_cnt--;
 553			sock_put(psock->sk);
 554			fput(psock->sk->sk_socket->file);
 555			kmem_cache_free(kcm_psockp, psock);
 556		}
 557
 558		/* Don't put back on available list */
 559
 560		spin_unlock_bh(&mux->lock);
 561
 562		return;
 563	}
 564
 565	psock_now_avail(psock);
 566
 567	spin_unlock_bh(&mux->lock);
 568}
 569
 570static void kcm_report_tx_retry(struct kcm_sock *kcm)
 571{
 572	struct kcm_mux *mux = kcm->mux;
 573
 574	spin_lock_bh(&mux->lock);
 575	KCM_STATS_INCR(mux->stats.tx_retries);
 576	spin_unlock_bh(&mux->lock);
 577}
 578
 579/* Write any messages ready on the kcm socket.  Called with kcm sock lock
 580 * held.  Return bytes actually sent or error.
 581 */
 582static int kcm_write_msgs(struct kcm_sock *kcm)
 583{
 584	unsigned int total_sent = 0;
 585	struct sock *sk = &kcm->sk;
 586	struct kcm_psock *psock;
 587	struct sk_buff *head;
 
 
 
 588	int ret = 0;
 589
 590	kcm->tx_wait_more = false;
 591	psock = kcm->tx_psock;
 592	if (unlikely(psock && psock->tx_stopped)) {
 593		/* A reserved psock was aborted asynchronously. Unreserve
 594		 * it and we'll retry the message.
 595		 */
 596		unreserve_psock(kcm);
 597		kcm_report_tx_retry(kcm);
 598		if (skb_queue_empty(&sk->sk_write_queue))
 599			return 0;
 600
 601		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->started_tx = false;
 
 
 
 602	}
 603
 604retry:
 605	while ((head = skb_peek(&sk->sk_write_queue))) {
 606		struct msghdr msg = {
 607			.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
 608		};
 609		struct kcm_tx_msg *txm = kcm_tx_msg(head);
 610		struct sk_buff *skb;
 611		unsigned int msize;
 612		int i;
 613
 614		if (!txm->started_tx) {
 615			psock = reserve_psock(kcm);
 616			if (!psock)
 617				goto out;
 618			skb = head;
 619			txm->frag_offset = 0;
 620			txm->sent = 0;
 621			txm->started_tx = true;
 622		} else {
 623			if (WARN_ON(!psock)) {
 624				ret = -EINVAL;
 625				goto out;
 626			}
 627			skb = txm->frag_skb;
 628		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629
 630		if (WARN_ON(!skb_shinfo(skb)->nr_frags) ||
 631		    WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
 632			ret = -EINVAL;
 633			goto out;
 634		}
 635
 636		msize = 0;
 637		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 638			msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
 639
 640		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
 641			      (const struct bio_vec *)skb_shinfo(skb)->frags,
 642			      skb_shinfo(skb)->nr_frags, msize);
 643		iov_iter_advance(&msg.msg_iter, txm->frag_offset);
 
 
 
 644
 645		do {
 646			ret = sock_sendmsg(psock->sk->sk_socket, &msg);
 
 
 
 647			if (ret <= 0) {
 648				if (ret == -EAGAIN) {
 649					/* Save state to try again when there's
 650					 * write space on the socket
 651					 */
 
 
 
 652					txm->frag_skb = skb;
 
 653					ret = 0;
 654					goto out;
 655				}
 656
 657				/* Hard failure in sending message, abort this
 658				 * psock since it has lost framing
 659				 * synchronization and retry sending the
 660				 * message from the beginning.
 661				 */
 662				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
 663						   true);
 664				unreserve_psock(kcm);
 665				psock = NULL;
 666
 667				txm->started_tx = false;
 668				kcm_report_tx_retry(kcm);
 669				ret = 0;
 670				goto retry;
 
 671			}
 672
 673			txm->sent += ret;
 674			txm->frag_offset += ret;
 675			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
 676		} while (msg.msg_iter.count > 0);
 
 
 
 
 677
 678		if (skb == head) {
 679			if (skb_has_frag_list(skb)) {
 680				txm->frag_skb = skb_shinfo(skb)->frag_list;
 681				txm->frag_offset = 0;
 682				continue;
 683			}
 684		} else if (skb->next) {
 685			txm->frag_skb = skb->next;
 686			txm->frag_offset = 0;
 687			continue;
 688		}
 689
 690		/* Successfully sent the whole packet, account for it. */
 691		sk->sk_wmem_queued -= txm->sent;
 692		total_sent += txm->sent;
 693		skb_dequeue(&sk->sk_write_queue);
 694		kfree_skb(head);
 
 
 695		KCM_STATS_INCR(psock->stats.tx_msgs);
 696	}
 697out:
 698	if (!head) {
 699		/* Done with all queued messages. */
 700		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
 701		if (psock)
 702			unreserve_psock(kcm);
 703	}
 704
 705	/* Check if write space is available */
 706	sk->sk_write_space(sk);
 707
 708	return total_sent ? : ret;
 709}
 710
 711static void kcm_tx_work(struct work_struct *w)
 712{
 713	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
 714	struct sock *sk = &kcm->sk;
 715	int err;
 716
 717	lock_sock(sk);
 718
 719	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
 720	 * aborts
 721	 */
 722	err = kcm_write_msgs(kcm);
 723	if (err < 0) {
 724		/* Hard failure in write, report error on KCM socket */
 725		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
 726		report_csk_error(&kcm->sk, -err);
 727		goto out;
 728	}
 729
 730	/* Primarily for SOCK_SEQPACKET sockets */
 731	if (likely(sk->sk_socket) &&
 732	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
 733		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 734		sk->sk_write_space(sk);
 735	}
 736
 737out:
 738	release_sock(sk);
 739}
 740
 741static void kcm_push(struct kcm_sock *kcm)
 742{
 743	if (kcm->tx_wait_more)
 744		kcm_write_msgs(kcm);
 745}
 746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 748{
 749	struct sock *sk = sock->sk;
 750	struct kcm_sock *kcm = kcm_sk(sk);
 751	struct sk_buff *skb = NULL, *head = NULL;
 752	size_t copy, copied = 0;
 753	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 754	int eor = (sock->type == SOCK_DGRAM) ?
 755		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
 756	int err = -EPIPE;
 757
 758	mutex_lock(&kcm->tx_mutex);
 759	lock_sock(sk);
 760
 761	/* Per tcp_sendmsg this should be in poll */
 762	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 763
 764	if (sk->sk_err)
 765		goto out_error;
 766
 767	if (kcm->seq_skb) {
 768		/* Previously opened message */
 769		head = kcm->seq_skb;
 770		skb = kcm_tx_msg(head)->last_skb;
 771		goto start;
 772	}
 773
 774	/* Call the sk_stream functions to manage the sndbuf mem. */
 775	if (!sk_stream_memory_free(sk)) {
 776		kcm_push(kcm);
 777		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 778		err = sk_stream_wait_memory(sk, &timeo);
 779		if (err)
 780			goto out_error;
 781	}
 782
 783	if (msg_data_left(msg)) {
 784		/* New message, alloc head skb */
 
 
 
 
 
 
 785		head = alloc_skb(0, sk->sk_allocation);
 786		while (!head) {
 787			kcm_push(kcm);
 788			err = sk_stream_wait_memory(sk, &timeo);
 789			if (err)
 790				goto out_error;
 791
 792			head = alloc_skb(0, sk->sk_allocation);
 793		}
 794
 795		skb = head;
 796
 797		/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
 798		 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
 799		 */
 800		skb->ip_summed = CHECKSUM_UNNECESSARY;
 801	}
 802
 803start:
 804	while (msg_data_left(msg)) {
 805		bool merge = true;
 806		int i = skb_shinfo(skb)->nr_frags;
 807		struct page_frag *pfrag = sk_page_frag(sk);
 808
 809		if (!sk_page_frag_refill(sk, pfrag))
 810			goto wait_for_memory;
 811
 812		if (!skb_can_coalesce(skb, i, pfrag->page,
 813				      pfrag->offset)) {
 814			if (i == MAX_SKB_FRAGS) {
 815				struct sk_buff *tskb;
 816
 817				tskb = alloc_skb(0, sk->sk_allocation);
 818				if (!tskb)
 819					goto wait_for_memory;
 820
 821				if (head == skb)
 822					skb_shinfo(head)->frag_list = tskb;
 823				else
 824					skb->next = tskb;
 825
 826				skb = tskb;
 827				skb->ip_summed = CHECKSUM_UNNECESSARY;
 828				continue;
 829			}
 830			merge = false;
 831		}
 832
 833		if (msg->msg_flags & MSG_SPLICE_PAGES) {
 834			copy = msg_data_left(msg);
 835			if (!sk_wmem_schedule(sk, copy))
 836				goto wait_for_memory;
 837
 838			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
 839						   sk->sk_allocation);
 840			if (err < 0) {
 841				if (err == -EMSGSIZE)
 842					goto wait_for_memory;
 843				goto out_error;
 844			}
 845
 846			copy = err;
 847			skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
 848			sk_wmem_queued_add(sk, copy);
 849			sk_mem_charge(sk, copy);
 
 
 850
 851			if (head != skb)
 852				head->truesize += copy;
 
 853		} else {
 854			copy = min_t(int, msg_data_left(msg),
 855				     pfrag->size - pfrag->offset);
 856			if (!sk_wmem_schedule(sk, copy))
 857				goto wait_for_memory;
 858
 859			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
 860						       pfrag->page,
 861						       pfrag->offset,
 862						       copy);
 863			if (err)
 864				goto out_error;
 865
 866			/* Update the skb. */
 867			if (merge) {
 868				skb_frag_size_add(
 869					&skb_shinfo(skb)->frags[i - 1], copy);
 870			} else {
 871				skb_fill_page_desc(skb, i, pfrag->page,
 872						   pfrag->offset, copy);
 873				get_page(pfrag->page);
 874			}
 875
 876			pfrag->offset += copy;
 877		}
 878
 
 879		copied += copy;
 880		if (head != skb) {
 881			head->len += copy;
 882			head->data_len += copy;
 883		}
 884
 885		continue;
 886
 887wait_for_memory:
 888		kcm_push(kcm);
 889		err = sk_stream_wait_memory(sk, &timeo);
 890		if (err)
 891			goto out_error;
 892	}
 893
 894	if (eor) {
 895		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
 896
 897		if (head) {
 898			/* Message complete, queue it on send buffer */
 899			__skb_queue_tail(&sk->sk_write_queue, head);
 900			kcm->seq_skb = NULL;
 901			KCM_STATS_INCR(kcm->stats.tx_msgs);
 902		}
 903
 904		if (msg->msg_flags & MSG_BATCH) {
 905			kcm->tx_wait_more = true;
 906		} else if (kcm->tx_wait_more || not_busy) {
 907			err = kcm_write_msgs(kcm);
 908			if (err < 0) {
 909				/* We got a hard error in write_msgs but have
 910				 * already queued this message. Report an error
 911				 * in the socket, but don't affect return value
 912				 * from sendmsg
 913				 */
 914				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
 915				report_csk_error(&kcm->sk, -err);
 916			}
 917		}
 918	} else {
 919		/* Message not complete, save state */
 920partial_message:
 921		if (head) {
 922			kcm->seq_skb = head;
 923			kcm_tx_msg(head)->last_skb = skb;
 924		}
 925	}
 926
 927	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
 928
 929	release_sock(sk);
 930	mutex_unlock(&kcm->tx_mutex);
 931	return copied;
 932
 933out_error:
 934	kcm_push(kcm);
 935
 936	if (sock->type == SOCK_SEQPACKET) {
 937		/* Wrote some bytes before encountering an
 938		 * error, return partial success.
 939		 */
 940		if (copied)
 941			goto partial_message;
 942		if (head != kcm->seq_skb)
 943			kfree_skb(head);
 944	} else {
 945		kfree_skb(head);
 946		kcm->seq_skb = NULL;
 947	}
 948
 949	err = sk_stream_error(sk, msg->msg_flags, err);
 950
 951	/* make sure we wake any epoll edge trigger waiter */
 952	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
 953		sk->sk_write_space(sk);
 954
 955	release_sock(sk);
 956	mutex_unlock(&kcm->tx_mutex);
 957	return err;
 958}
 959
 960static void kcm_splice_eof(struct socket *sock)
 
 961{
 962	struct sock *sk = sock->sk;
 963	struct kcm_sock *kcm = kcm_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 964
 965	if (skb_queue_empty_lockless(&sk->sk_write_queue))
 966		return;
 
 
 
 
 967
 968	lock_sock(sk);
 969	kcm_write_msgs(kcm);
 970	release_sock(sk);
 971}
 972
 973static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
 974		       size_t len, int flags)
 975{
 976	struct sock *sk = sock->sk;
 977	struct kcm_sock *kcm = kcm_sk(sk);
 978	int err = 0;
 979	struct strp_msg *stm;
 
 980	int copied = 0;
 981	struct sk_buff *skb;
 982
 983	skb = skb_recv_datagram(sk, flags, &err);
 
 
 
 
 984	if (!skb)
 985		goto out;
 986
 987	/* Okay, have a message on the receive queue */
 988
 989	stm = strp_msg(skb);
 990
 991	if (len > stm->full_len)
 992		len = stm->full_len;
 993
 994	err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
 995	if (err < 0)
 996		goto out;
 997
 998	copied = len;
 999	if (likely(!(flags & MSG_PEEK))) {
1000		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1001		if (copied < stm->full_len) {
1002			if (sock->type == SOCK_DGRAM) {
1003				/* Truncated message */
1004				msg->msg_flags |= MSG_TRUNC;
1005				goto msg_finished;
1006			}
1007			stm->offset += copied;
1008			stm->full_len -= copied;
1009		} else {
1010msg_finished:
1011			/* Finished with message */
1012			msg->msg_flags |= MSG_EOR;
1013			KCM_STATS_INCR(kcm->stats.rx_msgs);
 
 
1014		}
1015	}
1016
1017out:
1018	skb_free_datagram(sk, skb);
 
1019	return copied ? : err;
1020}
1021
 
 
 
 
 
 
 
 
 
 
 
 
 
1022static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1023			       struct pipe_inode_info *pipe, size_t len,
1024			       unsigned int flags)
1025{
1026	struct sock *sk = sock->sk;
1027	struct kcm_sock *kcm = kcm_sk(sk);
1028	struct strp_msg *stm;
 
1029	int err = 0;
1030	ssize_t copied;
1031	struct sk_buff *skb;
1032
1033	/* Only support splice for SOCKSEQPACKET */
1034
1035	skb = skb_recv_datagram(sk, flags, &err);
 
 
 
 
1036	if (!skb)
1037		goto err_out;
1038
1039	/* Okay, have a message on the receive queue */
1040
1041	stm = strp_msg(skb);
1042
1043	if (len > stm->full_len)
1044		len = stm->full_len;
1045
1046	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
 
1047	if (copied < 0) {
1048		err = copied;
1049		goto err_out;
1050	}
1051
1052	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1053
1054	stm->offset += copied;
1055	stm->full_len -= copied;
1056
1057	/* We have no way to return MSG_EOR. If all the bytes have been
1058	 * read we still leave the message in the receive socket buffer.
1059	 * A subsequent recvmsg needs to be done to return MSG_EOR and
1060	 * finish reading the message.
1061	 */
1062
1063	skb_free_datagram(sk, skb);
 
1064	return copied;
1065
1066err_out:
1067	skb_free_datagram(sk, skb);
 
1068	return err;
1069}
1070
1071/* kcm sock lock held */
1072static void kcm_recv_disable(struct kcm_sock *kcm)
1073{
1074	struct kcm_mux *mux = kcm->mux;
1075
1076	if (kcm->rx_disabled)
1077		return;
1078
1079	spin_lock_bh(&mux->rx_lock);
1080
1081	kcm->rx_disabled = 1;
1082
1083	/* If a psock is reserved we'll do cleanup in unreserve */
1084	if (!kcm->rx_psock) {
1085		if (kcm->rx_wait) {
1086			list_del(&kcm->wait_rx_list);
1087			/* paired with lockless reads in kcm_rfree() */
1088			WRITE_ONCE(kcm->rx_wait, false);
1089		}
1090
1091		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1092	}
1093
1094	spin_unlock_bh(&mux->rx_lock);
1095}
1096
1097/* kcm sock lock held */
1098static void kcm_recv_enable(struct kcm_sock *kcm)
1099{
1100	struct kcm_mux *mux = kcm->mux;
1101
1102	if (!kcm->rx_disabled)
1103		return;
1104
1105	spin_lock_bh(&mux->rx_lock);
1106
1107	kcm->rx_disabled = 0;
1108	kcm_rcv_ready(kcm);
1109
1110	spin_unlock_bh(&mux->rx_lock);
1111}
1112
1113static int kcm_setsockopt(struct socket *sock, int level, int optname,
1114			  sockptr_t optval, unsigned int optlen)
1115{
1116	struct kcm_sock *kcm = kcm_sk(sock->sk);
1117	int val, valbool;
1118	int err = 0;
1119
1120	if (level != SOL_KCM)
1121		return -ENOPROTOOPT;
1122
1123	if (optlen < sizeof(int))
1124		return -EINVAL;
1125
1126	if (copy_from_sockptr(&val, optval, sizeof(int)))
1127		return -EFAULT;
1128
1129	valbool = val ? 1 : 0;
1130
1131	switch (optname) {
1132	case KCM_RECV_DISABLE:
1133		lock_sock(&kcm->sk);
1134		if (valbool)
1135			kcm_recv_disable(kcm);
1136		else
1137			kcm_recv_enable(kcm);
1138		release_sock(&kcm->sk);
1139		break;
1140	default:
1141		err = -ENOPROTOOPT;
1142	}
1143
1144	return err;
1145}
1146
1147static int kcm_getsockopt(struct socket *sock, int level, int optname,
1148			  char __user *optval, int __user *optlen)
1149{
1150	struct kcm_sock *kcm = kcm_sk(sock->sk);
1151	int val, len;
1152
1153	if (level != SOL_KCM)
1154		return -ENOPROTOOPT;
1155
1156	if (get_user(len, optlen))
1157		return -EFAULT;
1158
 
1159	if (len < 0)
1160		return -EINVAL;
1161
1162	len = min_t(unsigned int, len, sizeof(int));
1163
1164	switch (optname) {
1165	case KCM_RECV_DISABLE:
1166		val = kcm->rx_disabled;
1167		break;
1168	default:
1169		return -ENOPROTOOPT;
1170	}
1171
1172	if (put_user(len, optlen))
1173		return -EFAULT;
1174	if (copy_to_user(optval, &val, len))
1175		return -EFAULT;
1176	return 0;
1177}
1178
1179static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1180{
1181	struct kcm_sock *tkcm;
1182	struct list_head *head;
1183	int index = 0;
1184
1185	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1186	 * we set sk_state, otherwise epoll_wait always returns right away with
1187	 * EPOLLHUP
1188	 */
1189	kcm->sk.sk_state = TCP_ESTABLISHED;
1190
1191	/* Add to mux's kcm sockets list */
1192	kcm->mux = mux;
1193	spin_lock_bh(&mux->lock);
1194
1195	head = &mux->kcm_socks;
1196	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1197		if (tkcm->index != index)
1198			break;
1199		head = &tkcm->kcm_sock_list;
1200		index++;
1201	}
1202
1203	list_add(&kcm->kcm_sock_list, head);
1204	kcm->index = index;
1205
1206	mux->kcm_socks_cnt++;
1207	spin_unlock_bh(&mux->lock);
1208
1209	INIT_WORK(&kcm->tx_work, kcm_tx_work);
1210	mutex_init(&kcm->tx_mutex);
1211
1212	spin_lock_bh(&mux->rx_lock);
1213	kcm_rcv_ready(kcm);
1214	spin_unlock_bh(&mux->rx_lock);
1215}
1216
 
 
 
 
 
 
 
 
 
1217static int kcm_attach(struct socket *sock, struct socket *csock,
1218		      struct bpf_prog *prog)
1219{
1220	struct kcm_sock *kcm = kcm_sk(sock->sk);
1221	struct kcm_mux *mux = kcm->mux;
1222	struct sock *csk;
1223	struct kcm_psock *psock = NULL, *tpsock;
1224	struct list_head *head;
1225	int index = 0;
1226	static const struct strp_callbacks cb = {
1227		.rcv_msg = kcm_rcv_strparser,
1228		.parse_msg = kcm_parse_func_strparser,
1229		.read_sock_done = kcm_read_sock_done,
1230	};
1231	int err = 0;
1232
1233	csk = csock->sk;
1234	if (!csk)
1235		return -EINVAL;
1236
1237	lock_sock(csk);
1238
1239	/* Only allow TCP sockets to be attached for now */
1240	if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1241	    csk->sk_protocol != IPPROTO_TCP) {
1242		err = -EOPNOTSUPP;
1243		goto out;
1244	}
1245
1246	/* Don't allow listeners or closed sockets */
1247	if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1248		err = -EOPNOTSUPP;
1249		goto out;
1250	}
1251
1252	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1253	if (!psock) {
1254		err = -ENOMEM;
1255		goto out;
1256	}
1257
1258	psock->mux = mux;
1259	psock->sk = csk;
1260	psock->bpf_prog = prog;
1261
1262	write_lock_bh(&csk->sk_callback_lock);
 
1263
1264	/* Check if sk_user_data is already by KCM or someone else.
1265	 * Must be done under lock to prevent race conditions.
1266	 */
1267	if (csk->sk_user_data) {
1268		write_unlock_bh(&csk->sk_callback_lock);
1269		kmem_cache_free(kcm_psockp, psock);
1270		err = -EALREADY;
1271		goto out;
1272	}
1273
1274	err = strp_init(&psock->strp, csk, &cb);
1275	if (err) {
1276		write_unlock_bh(&csk->sk_callback_lock);
1277		kmem_cache_free(kcm_psockp, psock);
1278		goto out;
1279	}
1280
 
1281	psock->save_data_ready = csk->sk_data_ready;
1282	psock->save_write_space = csk->sk_write_space;
1283	psock->save_state_change = csk->sk_state_change;
1284	csk->sk_user_data = psock;
1285	csk->sk_data_ready = psock_data_ready;
1286	csk->sk_write_space = psock_write_space;
1287	csk->sk_state_change = psock_state_change;
1288
1289	write_unlock_bh(&csk->sk_callback_lock);
1290
1291	sock_hold(csk);
1292
1293	/* Finished initialization, now add the psock to the MUX. */
1294	spin_lock_bh(&mux->lock);
1295	head = &mux->psocks;
1296	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1297		if (tpsock->index != index)
1298			break;
1299		head = &tpsock->psock_list;
1300		index++;
1301	}
1302
1303	list_add(&psock->psock_list, head);
1304	psock->index = index;
1305
1306	KCM_STATS_INCR(mux->stats.psock_attach);
1307	mux->psocks_cnt++;
1308	psock_now_avail(psock);
1309	spin_unlock_bh(&mux->lock);
1310
1311	/* Schedule RX work in case there are already bytes queued */
1312	strp_check_rcv(&psock->strp);
1313
1314out:
1315	release_sock(csk);
1316
1317	return err;
1318}
1319
1320static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1321{
1322	struct socket *csock;
1323	struct bpf_prog *prog;
1324	int err;
1325
1326	csock = sockfd_lookup(info->fd, &err);
1327	if (!csock)
1328		return -ENOENT;
1329
1330	prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1331	if (IS_ERR(prog)) {
1332		err = PTR_ERR(prog);
1333		goto out;
1334	}
1335
 
 
 
 
 
 
1336	err = kcm_attach(sock, csock, prog);
1337	if (err) {
1338		bpf_prog_put(prog);
1339		goto out;
1340	}
1341
1342	/* Keep reference on file also */
1343
1344	return 0;
1345out:
1346	sockfd_put(csock);
1347	return err;
1348}
1349
1350static void kcm_unattach(struct kcm_psock *psock)
1351{
1352	struct sock *csk = psock->sk;
1353	struct kcm_mux *mux = psock->mux;
1354
1355	lock_sock(csk);
1356
1357	/* Stop getting callbacks from TCP socket. After this there should
1358	 * be no way to reserve a kcm for this psock.
1359	 */
1360	write_lock_bh(&csk->sk_callback_lock);
1361	csk->sk_user_data = NULL;
1362	csk->sk_data_ready = psock->save_data_ready;
1363	csk->sk_write_space = psock->save_write_space;
1364	csk->sk_state_change = psock->save_state_change;
1365	strp_stop(&psock->strp);
1366
1367	if (WARN_ON(psock->rx_kcm)) {
1368		write_unlock_bh(&csk->sk_callback_lock);
1369		release_sock(csk);
1370		return;
1371	}
1372
1373	spin_lock_bh(&mux->rx_lock);
1374
1375	/* Stop receiver activities. After this point psock should not be
1376	 * able to get onto ready list either through callbacks or work.
1377	 */
1378	if (psock->ready_rx_msg) {
1379		list_del(&psock->psock_ready_list);
1380		kfree_skb(psock->ready_rx_msg);
1381		psock->ready_rx_msg = NULL;
1382		KCM_STATS_INCR(mux->stats.rx_ready_drops);
1383	}
1384
1385	spin_unlock_bh(&mux->rx_lock);
1386
1387	write_unlock_bh(&csk->sk_callback_lock);
1388
1389	/* Call strp_done without sock lock */
1390	release_sock(csk);
1391	strp_done(&psock->strp);
1392	lock_sock(csk);
1393
1394	bpf_prog_put(psock->bpf_prog);
1395
 
 
 
1396	spin_lock_bh(&mux->lock);
1397
1398	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1399	save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1400
1401	KCM_STATS_INCR(mux->stats.psock_unattach);
1402
1403	if (psock->tx_kcm) {
1404		/* psock was reserved.  Just mark it finished and we will clean
1405		 * up in the kcm paths, we need kcm lock which can not be
1406		 * acquired here.
1407		 */
1408		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1409		spin_unlock_bh(&mux->lock);
1410
1411		/* We are unattaching a socket that is reserved. Abort the
1412		 * socket since we may be out of sync in sending on it. We need
1413		 * to do this without the mux lock.
1414		 */
1415		kcm_abort_tx_psock(psock, EPIPE, false);
1416
1417		spin_lock_bh(&mux->lock);
1418		if (!psock->tx_kcm) {
1419			/* psock now unreserved in window mux was unlocked */
1420			goto no_reserved;
1421		}
1422		psock->done = 1;
1423
1424		/* Commit done before queuing work to process it */
1425		smp_mb();
1426
1427		/* Queue tx work to make sure psock->done is handled */
1428		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1429		spin_unlock_bh(&mux->lock);
1430	} else {
1431no_reserved:
1432		if (!psock->tx_stopped)
1433			list_del(&psock->psock_avail_list);
1434		list_del(&psock->psock_list);
1435		mux->psocks_cnt--;
1436		spin_unlock_bh(&mux->lock);
1437
1438		sock_put(csk);
1439		fput(csk->sk_socket->file);
1440		kmem_cache_free(kcm_psockp, psock);
1441	}
1442
1443	release_sock(csk);
1444}
1445
1446static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1447{
1448	struct kcm_sock *kcm = kcm_sk(sock->sk);
1449	struct kcm_mux *mux = kcm->mux;
1450	struct kcm_psock *psock;
1451	struct socket *csock;
1452	struct sock *csk;
1453	int err;
1454
1455	csock = sockfd_lookup(info->fd, &err);
1456	if (!csock)
1457		return -ENOENT;
1458
1459	csk = csock->sk;
1460	if (!csk) {
1461		err = -EINVAL;
1462		goto out;
1463	}
1464
1465	err = -ENOENT;
1466
1467	spin_lock_bh(&mux->lock);
1468
1469	list_for_each_entry(psock, &mux->psocks, psock_list) {
1470		if (psock->sk != csk)
1471			continue;
1472
1473		/* Found the matching psock */
1474
1475		if (psock->unattaching || WARN_ON(psock->done)) {
1476			err = -EALREADY;
1477			break;
1478		}
1479
1480		psock->unattaching = 1;
1481
1482		spin_unlock_bh(&mux->lock);
1483
1484		/* Lower socket lock should already be held */
1485		kcm_unattach(psock);
1486
1487		err = 0;
1488		goto out;
1489	}
1490
1491	spin_unlock_bh(&mux->lock);
1492
1493out:
1494	sockfd_put(csock);
1495	return err;
1496}
1497
1498static struct proto kcm_proto = {
1499	.name	= "KCM",
1500	.owner	= THIS_MODULE,
1501	.obj_size = sizeof(struct kcm_sock),
1502};
1503
1504/* Clone a kcm socket. */
1505static struct file *kcm_clone(struct socket *osock)
 
1506{
1507	struct socket *newsock;
1508	struct sock *newsk;
 
 
1509
 
1510	newsock = sock_alloc();
1511	if (!newsock)
1512		return ERR_PTR(-ENFILE);
1513
1514	newsock->type = osock->type;
1515	newsock->ops = osock->ops;
1516
1517	__module_get(newsock->ops->owner);
1518
 
 
 
 
 
 
 
 
 
 
 
 
1519	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1520			 &kcm_proto, false);
1521	if (!newsk) {
1522		sock_release(newsock);
1523		return ERR_PTR(-ENOMEM);
1524	}
 
1525	sock_init_data(newsock, newsk);
1526	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1527
1528	return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
1529}
1530
1531static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1532{
1533	int err;
1534
1535	switch (cmd) {
1536	case SIOCKCMATTACH: {
1537		struct kcm_attach info;
1538
1539		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1540			return -EFAULT;
1541
1542		err = kcm_attach_ioctl(sock, &info);
1543
1544		break;
1545	}
1546	case SIOCKCMUNATTACH: {
1547		struct kcm_unattach info;
1548
1549		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1550			return -EFAULT;
1551
1552		err = kcm_unattach_ioctl(sock, &info);
1553
1554		break;
1555	}
1556	case SIOCKCMCLONE: {
1557		struct kcm_clone info;
1558		struct file *file;
 
 
 
1559
1560		info.fd = get_unused_fd_flags(0);
1561		if (unlikely(info.fd < 0))
1562			return info.fd;
1563
1564		file = kcm_clone(sock);
1565		if (IS_ERR(file)) {
1566			put_unused_fd(info.fd);
1567			return PTR_ERR(file);
1568		}
1569		if (copy_to_user((void __user *)arg, &info,
1570				 sizeof(info))) {
1571			put_unused_fd(info.fd);
1572			fput(file);
1573			return -EFAULT;
1574		}
1575		fd_install(info.fd, file);
1576		err = 0;
1577		break;
1578	}
1579	default:
1580		err = -ENOIOCTLCMD;
1581		break;
1582	}
1583
1584	return err;
1585}
1586
 
 
 
 
 
 
 
 
1587static void release_mux(struct kcm_mux *mux)
1588{
1589	struct kcm_net *knet = mux->knet;
1590	struct kcm_psock *psock, *tmp_psock;
1591
1592	/* Release psocks */
1593	list_for_each_entry_safe(psock, tmp_psock,
1594				 &mux->psocks, psock_list) {
1595		if (!WARN_ON(psock->unattaching))
1596			kcm_unattach(psock);
1597	}
1598
1599	if (WARN_ON(mux->psocks_cnt))
1600		return;
1601
1602	__skb_queue_purge(&mux->rx_hold_queue);
1603
1604	mutex_lock(&knet->mutex);
1605	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1606	aggregate_psock_stats(&mux->aggregate_psock_stats,
1607			      &knet->aggregate_psock_stats);
1608	aggregate_strp_stats(&mux->aggregate_strp_stats,
1609			     &knet->aggregate_strp_stats);
1610	list_del_rcu(&mux->kcm_mux_list);
1611	knet->count--;
1612	mutex_unlock(&knet->mutex);
1613
1614	kfree_rcu(mux, rcu);
1615}
1616
1617static void kcm_done(struct kcm_sock *kcm)
1618{
1619	struct kcm_mux *mux = kcm->mux;
1620	struct sock *sk = &kcm->sk;
1621	int socks_cnt;
1622
1623	spin_lock_bh(&mux->rx_lock);
1624	if (kcm->rx_psock) {
1625		/* Cleanup in unreserve_rx_kcm */
1626		WARN_ON(kcm->done);
1627		kcm->rx_disabled = 1;
1628		kcm->done = 1;
1629		spin_unlock_bh(&mux->rx_lock);
1630		return;
1631	}
1632
1633	if (kcm->rx_wait) {
1634		list_del(&kcm->wait_rx_list);
1635		/* paired with lockless reads in kcm_rfree() */
1636		WRITE_ONCE(kcm->rx_wait, false);
1637	}
1638	/* Move any pending receive messages to other kcm sockets */
1639	requeue_rx_msgs(mux, &sk->sk_receive_queue);
1640
1641	spin_unlock_bh(&mux->rx_lock);
1642
1643	if (WARN_ON(sk_rmem_alloc_get(sk)))
1644		return;
1645
1646	/* Detach from MUX */
1647	spin_lock_bh(&mux->lock);
1648
1649	list_del(&kcm->kcm_sock_list);
1650	mux->kcm_socks_cnt--;
1651	socks_cnt = mux->kcm_socks_cnt;
1652
1653	spin_unlock_bh(&mux->lock);
1654
1655	if (!socks_cnt) {
1656		/* We are done with the mux now. */
1657		release_mux(mux);
1658	}
1659
1660	WARN_ON(kcm->rx_wait);
1661
1662	sock_put(&kcm->sk);
1663}
1664
1665/* Called by kcm_release to close a KCM socket.
1666 * If this is the last KCM socket on the MUX, destroy the MUX.
1667 */
1668static int kcm_release(struct socket *sock)
1669{
1670	struct sock *sk = sock->sk;
1671	struct kcm_sock *kcm;
1672	struct kcm_mux *mux;
1673	struct kcm_psock *psock;
1674
1675	if (!sk)
1676		return 0;
1677
1678	kcm = kcm_sk(sk);
1679	mux = kcm->mux;
1680
1681	lock_sock(sk);
1682	sock_orphan(sk);
1683	kfree_skb(kcm->seq_skb);
1684
 
1685	/* Purge queue under lock to avoid race condition with tx_work trying
1686	 * to act when queue is nonempty. If tx_work runs after this point
1687	 * it will just return.
1688	 */
1689	__skb_queue_purge(&sk->sk_write_queue);
1690
1691	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
1692	 * get a writespace callback. This prevents further work being queued
1693	 * from the callback (unbinding the psock occurs after canceling work.
1694	 */
1695	kcm->tx_stopped = 1;
1696
1697	release_sock(sk);
1698
1699	spin_lock_bh(&mux->lock);
1700	if (kcm->tx_wait) {
1701		/* Take of tx_wait list, after this point there should be no way
1702		 * that a psock will be assigned to this kcm.
1703		 */
1704		list_del(&kcm->wait_psock_list);
1705		kcm->tx_wait = false;
1706	}
1707	spin_unlock_bh(&mux->lock);
1708
1709	/* Cancel work. After this point there should be no outside references
1710	 * to the kcm socket.
1711	 */
1712	cancel_work_sync(&kcm->tx_work);
1713
1714	lock_sock(sk);
1715	psock = kcm->tx_psock;
1716	if (psock) {
1717		/* A psock was reserved, so we need to kill it since it
1718		 * may already have some bytes queued from a message. We
1719		 * need to do this after removing kcm from tx_wait list.
1720		 */
1721		kcm_abort_tx_psock(psock, EPIPE, false);
1722		unreserve_psock(kcm);
1723	}
1724	release_sock(sk);
1725
1726	WARN_ON(kcm->tx_wait);
1727	WARN_ON(kcm->tx_psock);
1728
1729	sock->sk = NULL;
1730
1731	kcm_done(kcm);
1732
1733	return 0;
1734}
1735
1736static const struct proto_ops kcm_dgram_ops = {
1737	.family =	PF_KCM,
1738	.owner =	THIS_MODULE,
1739	.release =	kcm_release,
1740	.bind =		sock_no_bind,
1741	.connect =	sock_no_connect,
1742	.socketpair =	sock_no_socketpair,
1743	.accept =	sock_no_accept,
1744	.getname =	sock_no_getname,
1745	.poll =		datagram_poll,
1746	.ioctl =	kcm_ioctl,
1747	.listen =	sock_no_listen,
1748	.shutdown =	sock_no_shutdown,
1749	.setsockopt =	kcm_setsockopt,
1750	.getsockopt =	kcm_getsockopt,
1751	.sendmsg =	kcm_sendmsg,
1752	.recvmsg =	kcm_recvmsg,
1753	.mmap =		sock_no_mmap,
1754	.splice_eof =	kcm_splice_eof,
1755};
1756
1757static const struct proto_ops kcm_seqpacket_ops = {
1758	.family =	PF_KCM,
1759	.owner =	THIS_MODULE,
1760	.release =	kcm_release,
1761	.bind =		sock_no_bind,
1762	.connect =	sock_no_connect,
1763	.socketpair =	sock_no_socketpair,
1764	.accept =	sock_no_accept,
1765	.getname =	sock_no_getname,
1766	.poll =		datagram_poll,
1767	.ioctl =	kcm_ioctl,
1768	.listen =	sock_no_listen,
1769	.shutdown =	sock_no_shutdown,
1770	.setsockopt =	kcm_setsockopt,
1771	.getsockopt =	kcm_getsockopt,
1772	.sendmsg =	kcm_sendmsg,
1773	.recvmsg =	kcm_recvmsg,
1774	.mmap =		sock_no_mmap,
1775	.splice_eof =	kcm_splice_eof,
1776	.splice_read =	kcm_splice_read,
1777};
1778
1779/* Create proto operation for kcm sockets */
1780static int kcm_create(struct net *net, struct socket *sock,
1781		      int protocol, int kern)
1782{
1783	struct kcm_net *knet = net_generic(net, kcm_net_id);
1784	struct sock *sk;
1785	struct kcm_mux *mux;
1786
1787	switch (sock->type) {
1788	case SOCK_DGRAM:
1789		sock->ops = &kcm_dgram_ops;
1790		break;
1791	case SOCK_SEQPACKET:
1792		sock->ops = &kcm_seqpacket_ops;
1793		break;
1794	default:
1795		return -ESOCKTNOSUPPORT;
1796	}
1797
1798	if (protocol != KCMPROTO_CONNECTED)
1799		return -EPROTONOSUPPORT;
1800
1801	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1802	if (!sk)
1803		return -ENOMEM;
1804
1805	/* Allocate a kcm mux, shared between KCM sockets */
1806	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1807	if (!mux) {
1808		sk_free(sk);
1809		return -ENOMEM;
1810	}
1811
1812	spin_lock_init(&mux->lock);
1813	spin_lock_init(&mux->rx_lock);
1814	INIT_LIST_HEAD(&mux->kcm_socks);
1815	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1816	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1817
1818	INIT_LIST_HEAD(&mux->psocks);
1819	INIT_LIST_HEAD(&mux->psocks_ready);
1820	INIT_LIST_HEAD(&mux->psocks_avail);
1821
1822	mux->knet = knet;
1823
1824	/* Add new MUX to list */
1825	mutex_lock(&knet->mutex);
1826	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1827	knet->count++;
1828	mutex_unlock(&knet->mutex);
1829
1830	skb_queue_head_init(&mux->rx_hold_queue);
1831
1832	/* Init KCM socket */
1833	sock_init_data(sock, sk);
1834	init_kcm_sock(kcm_sk(sk), mux);
1835
1836	return 0;
1837}
1838
1839static const struct net_proto_family kcm_family_ops = {
1840	.family = PF_KCM,
1841	.create = kcm_create,
1842	.owner  = THIS_MODULE,
1843};
1844
1845static __net_init int kcm_init_net(struct net *net)
1846{
1847	struct kcm_net *knet = net_generic(net, kcm_net_id);
1848
1849	INIT_LIST_HEAD_RCU(&knet->mux_list);
1850	mutex_init(&knet->mutex);
1851
1852	return 0;
1853}
1854
1855static __net_exit void kcm_exit_net(struct net *net)
1856{
1857	struct kcm_net *knet = net_generic(net, kcm_net_id);
1858
1859	/* All KCM sockets should be closed at this point, which should mean
1860	 * that all multiplexors and psocks have been destroyed.
1861	 */
1862	WARN_ON(!list_empty(&knet->mux_list));
1863
1864	mutex_destroy(&knet->mutex);
1865}
1866
1867static struct pernet_operations kcm_net_ops = {
1868	.init = kcm_init_net,
1869	.exit = kcm_exit_net,
1870	.id   = &kcm_net_id,
1871	.size = sizeof(struct kcm_net),
1872};
1873
1874static int __init kcm_init(void)
1875{
1876	int err = -ENOMEM;
1877
1878	kcm_muxp = KMEM_CACHE(kcm_mux, SLAB_HWCACHE_ALIGN);
 
 
1879	if (!kcm_muxp)
1880		goto fail;
1881
1882	kcm_psockp = KMEM_CACHE(kcm_psock, SLAB_HWCACHE_ALIGN);
 
 
1883	if (!kcm_psockp)
1884		goto fail;
1885
1886	kcm_wq = create_singlethread_workqueue("kkcmd");
1887	if (!kcm_wq)
1888		goto fail;
1889
1890	err = proto_register(&kcm_proto, 1);
1891	if (err)
1892		goto fail;
1893
 
 
 
 
1894	err = register_pernet_device(&kcm_net_ops);
1895	if (err)
1896		goto net_ops_fail;
1897
1898	err = sock_register(&kcm_family_ops);
1899	if (err)
1900		goto sock_register_fail;
1901
1902	err = kcm_proc_init();
1903	if (err)
1904		goto proc_init_fail;
1905
1906	return 0;
1907
1908proc_init_fail:
 
 
 
1909	sock_unregister(PF_KCM);
1910
1911sock_register_fail:
1912	unregister_pernet_device(&kcm_net_ops);
1913
1914net_ops_fail:
1915	proto_unregister(&kcm_proto);
1916
1917fail:
1918	kmem_cache_destroy(kcm_muxp);
1919	kmem_cache_destroy(kcm_psockp);
1920
1921	if (kcm_wq)
1922		destroy_workqueue(kcm_wq);
1923
1924	return err;
1925}
1926
1927static void __exit kcm_exit(void)
1928{
1929	kcm_proc_exit();
 
1930	sock_unregister(PF_KCM);
1931	unregister_pernet_device(&kcm_net_ops);
1932	proto_unregister(&kcm_proto);
1933	destroy_workqueue(kcm_wq);
1934
1935	kmem_cache_destroy(kcm_muxp);
1936	kmem_cache_destroy(kcm_psockp);
1937}
1938
1939module_init(kcm_init);
1940module_exit(kcm_exit);
1941
1942MODULE_LICENSE("GPL");
1943MODULE_DESCRIPTION("KCM (Kernel Connection Multiplexor) sockets");
1944MODULE_ALIAS_NETPROTO(PF_KCM);