Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * Kernel Connection Multiplexor
   3 *
   4 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2
   8 * as published by the Free Software Foundation.
   9 */
  10
  11#include <linux/bpf.h>
  12#include <linux/errno.h>
  13#include <linux/errqueue.h>
  14#include <linux/file.h>
  15#include <linux/in.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/net.h>
  19#include <linux/netdevice.h>
  20#include <linux/poll.h>
  21#include <linux/rculist.h>
  22#include <linux/skbuff.h>
  23#include <linux/socket.h>
  24#include <linux/uaccess.h>
  25#include <linux/workqueue.h>
  26#include <linux/syscalls.h>
  27#include <net/kcm.h>
  28#include <net/netns/generic.h>
  29#include <net/sock.h>
  30#include <uapi/linux/kcm.h>
  31
  32unsigned int kcm_net_id;
  33
  34static struct kmem_cache *kcm_psockp __read_mostly;
  35static struct kmem_cache *kcm_muxp __read_mostly;
  36static struct workqueue_struct *kcm_wq;
  37
  38static inline struct kcm_sock *kcm_sk(const struct sock *sk)
  39{
  40	return (struct kcm_sock *)sk;
  41}
  42
  43static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
  44{
  45	return (struct kcm_tx_msg *)skb->cb;
  46}
  47
  48static void report_csk_error(struct sock *csk, int err)
  49{
  50	csk->sk_err = EPIPE;
  51	csk->sk_error_report(csk);
  52}
  53
  54static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
  55			       bool wakeup_kcm)
  56{
  57	struct sock *csk = psock->sk;
  58	struct kcm_mux *mux = psock->mux;
  59
  60	/* Unrecoverable error in transmit */
  61
  62	spin_lock_bh(&mux->lock);
  63
  64	if (psock->tx_stopped) {
  65		spin_unlock_bh(&mux->lock);
  66		return;
  67	}
  68
  69	psock->tx_stopped = 1;
  70	KCM_STATS_INCR(psock->stats.tx_aborts);
  71
  72	if (!psock->tx_kcm) {
  73		/* Take off psocks_avail list */
  74		list_del(&psock->psock_avail_list);
  75	} else if (wakeup_kcm) {
  76		/* In this case psock is being aborted while outside of
  77		 * write_msgs and psock is reserved. Schedule tx_work
  78		 * to handle the failure there. Need to commit tx_stopped
  79		 * before queuing work.
  80		 */
  81		smp_mb();
  82
  83		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
  84	}
  85
  86	spin_unlock_bh(&mux->lock);
  87
  88	/* Report error on lower socket */
  89	report_csk_error(csk, err);
  90}
  91
  92/* RX mux lock held. */
  93static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
  94				    struct kcm_psock *psock)
  95{
  96	STRP_STATS_ADD(mux->stats.rx_bytes,
  97		       psock->strp.stats.rx_bytes -
  98		       psock->saved_rx_bytes);
  99	mux->stats.rx_msgs +=
 100		psock->strp.stats.rx_msgs - psock->saved_rx_msgs;
 101	psock->saved_rx_msgs = psock->strp.stats.rx_msgs;
 102	psock->saved_rx_bytes = psock->strp.stats.rx_bytes;
 103}
 104
 105static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
 106				    struct kcm_psock *psock)
 107{
 108	KCM_STATS_ADD(mux->stats.tx_bytes,
 109		      psock->stats.tx_bytes - psock->saved_tx_bytes);
 110	mux->stats.tx_msgs +=
 111		psock->stats.tx_msgs - psock->saved_tx_msgs;
 112	psock->saved_tx_msgs = psock->stats.tx_msgs;
 113	psock->saved_tx_bytes = psock->stats.tx_bytes;
 114}
 115
 116static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 117
 118/* KCM is ready to receive messages on its queue-- either the KCM is new or
 119 * has become unblocked after being blocked on full socket buffer. Queue any
 120 * pending ready messages on a psock. RX mux lock held.
 121 */
 122static void kcm_rcv_ready(struct kcm_sock *kcm)
 123{
 124	struct kcm_mux *mux = kcm->mux;
 125	struct kcm_psock *psock;
 126	struct sk_buff *skb;
 127
 128	if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
 129		return;
 130
 131	while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
 132		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 133			/* Assuming buffer limit has been reached */
 134			skb_queue_head(&mux->rx_hold_queue, skb);
 135			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 136			return;
 137		}
 138	}
 139
 140	while (!list_empty(&mux->psocks_ready)) {
 141		psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
 142					 psock_ready_list);
 143
 144		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
 145			/* Assuming buffer limit has been reached */
 146			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
 147			return;
 148		}
 149
 150		/* Consumed the ready message on the psock. Schedule rx_work to
 151		 * get more messages.
 152		 */
 153		list_del(&psock->psock_ready_list);
 154		psock->ready_rx_msg = NULL;
 155		/* Commit clearing of ready_rx_msg for queuing work */
 156		smp_mb();
 157
 158		strp_unpause(&psock->strp);
 159		strp_check_rcv(&psock->strp);
 160	}
 161
 162	/* Buffer limit is okay now, add to ready list */
 163	list_add_tail(&kcm->wait_rx_list,
 164		      &kcm->mux->kcm_rx_waiters);
 165	kcm->rx_wait = true;
 166}
 167
 168static void kcm_rfree(struct sk_buff *skb)
 169{
 170	struct sock *sk = skb->sk;
 171	struct kcm_sock *kcm = kcm_sk(sk);
 172	struct kcm_mux *mux = kcm->mux;
 173	unsigned int len = skb->truesize;
 174
 175	sk_mem_uncharge(sk, len);
 176	atomic_sub(len, &sk->sk_rmem_alloc);
 177
 178	/* For reading rx_wait and rx_psock without holding lock */
 179	smp_mb__after_atomic();
 180
 181	if (!kcm->rx_wait && !kcm->rx_psock &&
 182	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
 183		spin_lock_bh(&mux->rx_lock);
 184		kcm_rcv_ready(kcm);
 185		spin_unlock_bh(&mux->rx_lock);
 186	}
 187}
 188
 189static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 190{
 191	struct sk_buff_head *list = &sk->sk_receive_queue;
 192
 193	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 194		return -ENOMEM;
 195
 196	if (!sk_rmem_schedule(sk, skb, skb->truesize))
 197		return -ENOBUFS;
 198
 199	skb->dev = NULL;
 200
 201	skb_orphan(skb);
 202	skb->sk = sk;
 203	skb->destructor = kcm_rfree;
 204	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 205	sk_mem_charge(sk, skb->truesize);
 206
 207	skb_queue_tail(list, skb);
 208
 209	if (!sock_flag(sk, SOCK_DEAD))
 210		sk->sk_data_ready(sk);
 211
 212	return 0;
 213}
 214
 215/* Requeue received messages for a kcm socket to other kcm sockets. This is
 216 * called with a kcm socket is receive disabled.
 217 * RX mux lock held.
 218 */
 219static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
 220{
 221	struct sk_buff *skb;
 222	struct kcm_sock *kcm;
 223
 224	while ((skb = __skb_dequeue(head))) {
 225		/* Reset destructor to avoid calling kcm_rcv_ready */
 226		skb->destructor = sock_rfree;
 227		skb_orphan(skb);
 228try_again:
 229		if (list_empty(&mux->kcm_rx_waiters)) {
 230			skb_queue_tail(&mux->rx_hold_queue, skb);
 231			continue;
 232		}
 233
 234		kcm = list_first_entry(&mux->kcm_rx_waiters,
 235				       struct kcm_sock, wait_rx_list);
 236
 237		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 238			/* Should mean socket buffer full */
 239			list_del(&kcm->wait_rx_list);
 240			kcm->rx_wait = false;
 241
 242			/* Commit rx_wait to read in kcm_free */
 243			smp_wmb();
 244
 245			goto try_again;
 246		}
 247	}
 248}
 249
 250/* Lower sock lock held */
 251static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
 252				       struct sk_buff *head)
 253{
 254	struct kcm_mux *mux = psock->mux;
 255	struct kcm_sock *kcm;
 256
 257	WARN_ON(psock->ready_rx_msg);
 258
 259	if (psock->rx_kcm)
 260		return psock->rx_kcm;
 261
 262	spin_lock_bh(&mux->rx_lock);
 263
 264	if (psock->rx_kcm) {
 265		spin_unlock_bh(&mux->rx_lock);
 266		return psock->rx_kcm;
 267	}
 268
 269	kcm_update_rx_mux_stats(mux, psock);
 270
 271	if (list_empty(&mux->kcm_rx_waiters)) {
 272		psock->ready_rx_msg = head;
 273		strp_pause(&psock->strp);
 274		list_add_tail(&psock->psock_ready_list,
 275			      &mux->psocks_ready);
 276		spin_unlock_bh(&mux->rx_lock);
 277		return NULL;
 278	}
 279
 280	kcm = list_first_entry(&mux->kcm_rx_waiters,
 281			       struct kcm_sock, wait_rx_list);
 282	list_del(&kcm->wait_rx_list);
 283	kcm->rx_wait = false;
 284
 285	psock->rx_kcm = kcm;
 286	kcm->rx_psock = psock;
 287
 288	spin_unlock_bh(&mux->rx_lock);
 289
 290	return kcm;
 291}
 292
 293static void kcm_done(struct kcm_sock *kcm);
 294
 295static void kcm_done_work(struct work_struct *w)
 296{
 297	kcm_done(container_of(w, struct kcm_sock, done_work));
 298}
 299
 300/* Lower sock held */
 301static void unreserve_rx_kcm(struct kcm_psock *psock,
 302			     bool rcv_ready)
 303{
 304	struct kcm_sock *kcm = psock->rx_kcm;
 305	struct kcm_mux *mux = psock->mux;
 306
 307	if (!kcm)
 308		return;
 309
 310	spin_lock_bh(&mux->rx_lock);
 311
 312	psock->rx_kcm = NULL;
 313	kcm->rx_psock = NULL;
 314
 315	/* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
 316	 * kcm_rfree
 317	 */
 318	smp_mb();
 319
 320	if (unlikely(kcm->done)) {
 321		spin_unlock_bh(&mux->rx_lock);
 322
 323		/* Need to run kcm_done in a task since we need to qcquire
 324		 * callback locks which may already be held here.
 325		 */
 326		INIT_WORK(&kcm->done_work, kcm_done_work);
 327		schedule_work(&kcm->done_work);
 328		return;
 329	}
 330
 331	if (unlikely(kcm->rx_disabled)) {
 332		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
 333	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
 334		/* Check for degenerative race with rx_wait that all
 335		 * data was dequeued (accounted for in kcm_rfree).
 336		 */
 337		kcm_rcv_ready(kcm);
 338	}
 339	spin_unlock_bh(&mux->rx_lock);
 340}
 341
 342/* Lower sock lock held */
 343static void psock_data_ready(struct sock *sk)
 344{
 345	struct kcm_psock *psock;
 346
 347	read_lock_bh(&sk->sk_callback_lock);
 348
 349	psock = (struct kcm_psock *)sk->sk_user_data;
 350	if (likely(psock))
 351		strp_data_ready(&psock->strp);
 352
 353	read_unlock_bh(&sk->sk_callback_lock);
 354}
 355
 356/* Called with lower sock held */
 357static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
 358{
 359	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 360	struct kcm_sock *kcm;
 361
 362try_queue:
 363	kcm = reserve_rx_kcm(psock, skb);
 364	if (!kcm) {
 365		 /* Unable to reserve a KCM, message is held in psock and strp
 366		  * is paused.
 367		  */
 368		return;
 369	}
 370
 371	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
 372		/* Should mean socket buffer full */
 373		unreserve_rx_kcm(psock, false);
 374		goto try_queue;
 375	}
 376}
 377
 378static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
 379{
 380	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 381	struct bpf_prog *prog = psock->bpf_prog;
 382
 383	return (*prog->bpf_func)(skb, prog->insnsi);
 384}
 385
 386static int kcm_read_sock_done(struct strparser *strp, int err)
 387{
 388	struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
 389
 390	unreserve_rx_kcm(psock, true);
 391
 392	return err;
 393}
 394
 395static void psock_state_change(struct sock *sk)
 396{
 397	/* TCP only does a POLLIN for a half close. Do a POLLHUP here
 398	 * since application will normally not poll with POLLIN
 399	 * on the TCP sockets.
 400	 */
 401
 402	report_csk_error(sk, EPIPE);
 403}
 404
 405static void psock_write_space(struct sock *sk)
 406{
 407	struct kcm_psock *psock;
 408	struct kcm_mux *mux;
 409	struct kcm_sock *kcm;
 410
 411	read_lock_bh(&sk->sk_callback_lock);
 412
 413	psock = (struct kcm_psock *)sk->sk_user_data;
 414	if (unlikely(!psock))
 415		goto out;
 416	mux = psock->mux;
 417
 418	spin_lock_bh(&mux->lock);
 419
 420	/* Check if the socket is reserved so someone is waiting for sending. */
 421	kcm = psock->tx_kcm;
 422	if (kcm && !unlikely(kcm->tx_stopped))
 423		queue_work(kcm_wq, &kcm->tx_work);
 424
 425	spin_unlock_bh(&mux->lock);
 426out:
 427	read_unlock_bh(&sk->sk_callback_lock);
 428}
 429
 430static void unreserve_psock(struct kcm_sock *kcm);
 431
 432/* kcm sock is locked. */
 433static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
 434{
 435	struct kcm_mux *mux = kcm->mux;
 436	struct kcm_psock *psock;
 437
 438	psock = kcm->tx_psock;
 439
 440	smp_rmb(); /* Must read tx_psock before tx_wait */
 441
 442	if (psock) {
 443		WARN_ON(kcm->tx_wait);
 444		if (unlikely(psock->tx_stopped))
 445			unreserve_psock(kcm);
 446		else
 447			return kcm->tx_psock;
 448	}
 449
 450	spin_lock_bh(&mux->lock);
 451
 452	/* Check again under lock to see if psock was reserved for this
 453	 * psock via psock_unreserve.
 454	 */
 455	psock = kcm->tx_psock;
 456	if (unlikely(psock)) {
 457		WARN_ON(kcm->tx_wait);
 458		spin_unlock_bh(&mux->lock);
 459		return kcm->tx_psock;
 460	}
 461
 462	if (!list_empty(&mux->psocks_avail)) {
 463		psock = list_first_entry(&mux->psocks_avail,
 464					 struct kcm_psock,
 465					 psock_avail_list);
 466		list_del(&psock->psock_avail_list);
 467		if (kcm->tx_wait) {
 468			list_del(&kcm->wait_psock_list);
 469			kcm->tx_wait = false;
 470		}
 471		kcm->tx_psock = psock;
 472		psock->tx_kcm = kcm;
 473		KCM_STATS_INCR(psock->stats.reserved);
 474	} else if (!kcm->tx_wait) {
 475		list_add_tail(&kcm->wait_psock_list,
 476			      &mux->kcm_tx_waiters);
 477		kcm->tx_wait = true;
 478	}
 479
 480	spin_unlock_bh(&mux->lock);
 481
 482	return psock;
 483}
 484
 485/* mux lock held */
 486static void psock_now_avail(struct kcm_psock *psock)
 487{
 488	struct kcm_mux *mux = psock->mux;
 489	struct kcm_sock *kcm;
 490
 491	if (list_empty(&mux->kcm_tx_waiters)) {
 492		list_add_tail(&psock->psock_avail_list,
 493			      &mux->psocks_avail);
 494	} else {
 495		kcm = list_first_entry(&mux->kcm_tx_waiters,
 496				       struct kcm_sock,
 497				       wait_psock_list);
 498		list_del(&kcm->wait_psock_list);
 499		kcm->tx_wait = false;
 500		psock->tx_kcm = kcm;
 501
 502		/* Commit before changing tx_psock since that is read in
 503		 * reserve_psock before queuing work.
 504		 */
 505		smp_mb();
 506
 507		kcm->tx_psock = psock;
 508		KCM_STATS_INCR(psock->stats.reserved);
 509		queue_work(kcm_wq, &kcm->tx_work);
 510	}
 511}
 512
 513/* kcm sock is locked. */
 514static void unreserve_psock(struct kcm_sock *kcm)
 515{
 516	struct kcm_psock *psock;
 517	struct kcm_mux *mux = kcm->mux;
 518
 519	spin_lock_bh(&mux->lock);
 520
 521	psock = kcm->tx_psock;
 522
 523	if (WARN_ON(!psock)) {
 524		spin_unlock_bh(&mux->lock);
 525		return;
 526	}
 527
 528	smp_rmb(); /* Read tx_psock before tx_wait */
 529
 530	kcm_update_tx_mux_stats(mux, psock);
 531
 532	WARN_ON(kcm->tx_wait);
 533
 534	kcm->tx_psock = NULL;
 535	psock->tx_kcm = NULL;
 536	KCM_STATS_INCR(psock->stats.unreserved);
 537
 538	if (unlikely(psock->tx_stopped)) {
 539		if (psock->done) {
 540			/* Deferred free */
 541			list_del(&psock->psock_list);
 542			mux->psocks_cnt--;
 543			sock_put(psock->sk);
 544			fput(psock->sk->sk_socket->file);
 545			kmem_cache_free(kcm_psockp, psock);
 546		}
 547
 548		/* Don't put back on available list */
 549
 550		spin_unlock_bh(&mux->lock);
 551
 552		return;
 553	}
 554
 555	psock_now_avail(psock);
 556
 557	spin_unlock_bh(&mux->lock);
 558}
 559
 560static void kcm_report_tx_retry(struct kcm_sock *kcm)
 561{
 562	struct kcm_mux *mux = kcm->mux;
 563
 564	spin_lock_bh(&mux->lock);
 565	KCM_STATS_INCR(mux->stats.tx_retries);
 566	spin_unlock_bh(&mux->lock);
 567}
 568
 569/* Write any messages ready on the kcm socket.  Called with kcm sock lock
 570 * held.  Return bytes actually sent or error.
 571 */
 572static int kcm_write_msgs(struct kcm_sock *kcm)
 573{
 574	struct sock *sk = &kcm->sk;
 575	struct kcm_psock *psock;
 576	struct sk_buff *skb, *head;
 577	struct kcm_tx_msg *txm;
 578	unsigned short fragidx, frag_offset;
 579	unsigned int sent, total_sent = 0;
 580	int ret = 0;
 581
 582	kcm->tx_wait_more = false;
 583	psock = kcm->tx_psock;
 584	if (unlikely(psock && psock->tx_stopped)) {
 585		/* A reserved psock was aborted asynchronously. Unreserve
 586		 * it and we'll retry the message.
 587		 */
 588		unreserve_psock(kcm);
 589		kcm_report_tx_retry(kcm);
 590		if (skb_queue_empty(&sk->sk_write_queue))
 591			return 0;
 592
 593		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
 594
 595	} else if (skb_queue_empty(&sk->sk_write_queue)) {
 596		return 0;
 597	}
 598
 599	head = skb_peek(&sk->sk_write_queue);
 600	txm = kcm_tx_msg(head);
 601
 602	if (txm->sent) {
 603		/* Send of first skbuff in queue already in progress */
 604		if (WARN_ON(!psock)) {
 605			ret = -EINVAL;
 606			goto out;
 607		}
 608		sent = txm->sent;
 609		frag_offset = txm->frag_offset;
 610		fragidx = txm->fragidx;
 611		skb = txm->frag_skb;
 612
 613		goto do_frag;
 614	}
 615
 616try_again:
 617	psock = reserve_psock(kcm);
 618	if (!psock)
 619		goto out;
 620
 621	do {
 622		skb = head;
 623		txm = kcm_tx_msg(head);
 624		sent = 0;
 625
 626do_frag_list:
 627		if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
 628			ret = -EINVAL;
 629			goto out;
 630		}
 631
 632		for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
 633		     fragidx++) {
 634			skb_frag_t *frag;
 635
 636			frag_offset = 0;
 637do_frag:
 638			frag = &skb_shinfo(skb)->frags[fragidx];
 639			if (WARN_ON(!frag->size)) {
 640				ret = -EINVAL;
 641				goto out;
 642			}
 643
 644			ret = kernel_sendpage(psock->sk->sk_socket,
 645					      frag->page.p,
 646					      frag->page_offset + frag_offset,
 647					      frag->size - frag_offset,
 648					      MSG_DONTWAIT);
 649			if (ret <= 0) {
 650				if (ret == -EAGAIN) {
 651					/* Save state to try again when there's
 652					 * write space on the socket
 653					 */
 654					txm->sent = sent;
 655					txm->frag_offset = frag_offset;
 656					txm->fragidx = fragidx;
 657					txm->frag_skb = skb;
 658
 659					ret = 0;
 660					goto out;
 661				}
 662
 663				/* Hard failure in sending message, abort this
 664				 * psock since it has lost framing
 665				 * synchonization and retry sending the
 666				 * message from the beginning.
 667				 */
 668				kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
 669						   true);
 670				unreserve_psock(kcm);
 671
 672				txm->sent = 0;
 673				kcm_report_tx_retry(kcm);
 674				ret = 0;
 675
 676				goto try_again;
 677			}
 678
 679			sent += ret;
 680			frag_offset += ret;
 681			KCM_STATS_ADD(psock->stats.tx_bytes, ret);
 682			if (frag_offset < frag->size) {
 683				/* Not finished with this frag */
 684				goto do_frag;
 685			}
 686		}
 687
 688		if (skb == head) {
 689			if (skb_has_frag_list(skb)) {
 690				skb = skb_shinfo(skb)->frag_list;
 691				goto do_frag_list;
 692			}
 693		} else if (skb->next) {
 694			skb = skb->next;
 695			goto do_frag_list;
 696		}
 697
 698		/* Successfully sent the whole packet, account for it. */
 699		skb_dequeue(&sk->sk_write_queue);
 700		kfree_skb(head);
 701		sk->sk_wmem_queued -= sent;
 702		total_sent += sent;
 703		KCM_STATS_INCR(psock->stats.tx_msgs);
 704	} while ((head = skb_peek(&sk->sk_write_queue)));
 705out:
 706	if (!head) {
 707		/* Done with all queued messages. */
 708		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
 709		unreserve_psock(kcm);
 710	}
 711
 712	/* Check if write space is available */
 713	sk->sk_write_space(sk);
 714
 715	return total_sent ? : ret;
 716}
 717
 718static void kcm_tx_work(struct work_struct *w)
 719{
 720	struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
 721	struct sock *sk = &kcm->sk;
 722	int err;
 723
 724	lock_sock(sk);
 725
 726	/* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
 727	 * aborts
 728	 */
 729	err = kcm_write_msgs(kcm);
 730	if (err < 0) {
 731		/* Hard failure in write, report error on KCM socket */
 732		pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
 733		report_csk_error(&kcm->sk, -err);
 734		goto out;
 735	}
 736
 737	/* Primarily for SOCK_SEQPACKET sockets */
 738	if (likely(sk->sk_socket) &&
 739	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
 740		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 741		sk->sk_write_space(sk);
 742	}
 743
 744out:
 745	release_sock(sk);
 746}
 747
 748static void kcm_push(struct kcm_sock *kcm)
 749{
 750	if (kcm->tx_wait_more)
 751		kcm_write_msgs(kcm);
 752}
 753
 754static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
 755			    int offset, size_t size, int flags)
 756
 757{
 758	struct sock *sk = sock->sk;
 759	struct kcm_sock *kcm = kcm_sk(sk);
 760	struct sk_buff *skb = NULL, *head = NULL;
 761	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 762	bool eor;
 763	int err = 0;
 764	int i;
 765
 766	if (flags & MSG_SENDPAGE_NOTLAST)
 767		flags |= MSG_MORE;
 768
 769	/* No MSG_EOR from splice, only look at MSG_MORE */
 770	eor = !(flags & MSG_MORE);
 771
 772	lock_sock(sk);
 773
 774	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 775
 776	err = -EPIPE;
 777	if (sk->sk_err)
 778		goto out_error;
 779
 780	if (kcm->seq_skb) {
 781		/* Previously opened message */
 782		head = kcm->seq_skb;
 783		skb = kcm_tx_msg(head)->last_skb;
 784		i = skb_shinfo(skb)->nr_frags;
 785
 786		if (skb_can_coalesce(skb, i, page, offset)) {
 787			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
 788			skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
 789			goto coalesced;
 790		}
 791
 792		if (i >= MAX_SKB_FRAGS) {
 793			struct sk_buff *tskb;
 794
 795			tskb = alloc_skb(0, sk->sk_allocation);
 796			while (!tskb) {
 797				kcm_push(kcm);
 798				err = sk_stream_wait_memory(sk, &timeo);
 799				if (err)
 800					goto out_error;
 801			}
 802
 803			if (head == skb)
 804				skb_shinfo(head)->frag_list = tskb;
 805			else
 806				skb->next = tskb;
 807
 808			skb = tskb;
 809			skb->ip_summed = CHECKSUM_UNNECESSARY;
 810			i = 0;
 811		}
 812	} else {
 813		/* Call the sk_stream functions to manage the sndbuf mem. */
 814		if (!sk_stream_memory_free(sk)) {
 815			kcm_push(kcm);
 816			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 817			err = sk_stream_wait_memory(sk, &timeo);
 818			if (err)
 819				goto out_error;
 820		}
 821
 822		head = alloc_skb(0, sk->sk_allocation);
 823		while (!head) {
 824			kcm_push(kcm);
 825			err = sk_stream_wait_memory(sk, &timeo);
 826			if (err)
 827				goto out_error;
 828		}
 829
 830		skb = head;
 831		i = 0;
 832	}
 833
 834	get_page(page);
 835	skb_fill_page_desc(skb, i, page, offset, size);
 836	skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
 837
 838coalesced:
 839	skb->len += size;
 840	skb->data_len += size;
 841	skb->truesize += size;
 842	sk->sk_wmem_queued += size;
 843	sk_mem_charge(sk, size);
 844
 845	if (head != skb) {
 846		head->len += size;
 847		head->data_len += size;
 848		head->truesize += size;
 849	}
 850
 851	if (eor) {
 852		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
 853
 854		/* Message complete, queue it on send buffer */
 855		__skb_queue_tail(&sk->sk_write_queue, head);
 856		kcm->seq_skb = NULL;
 857		KCM_STATS_INCR(kcm->stats.tx_msgs);
 858
 859		if (flags & MSG_BATCH) {
 860			kcm->tx_wait_more = true;
 861		} else if (kcm->tx_wait_more || not_busy) {
 862			err = kcm_write_msgs(kcm);
 863			if (err < 0) {
 864				/* We got a hard error in write_msgs but have
 865				 * already queued this message. Report an error
 866				 * in the socket, but don't affect return value
 867				 * from sendmsg
 868				 */
 869				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
 870				report_csk_error(&kcm->sk, -err);
 871			}
 872		}
 873	} else {
 874		/* Message not complete, save state */
 875		kcm->seq_skb = head;
 876		kcm_tx_msg(head)->last_skb = skb;
 877	}
 878
 879	KCM_STATS_ADD(kcm->stats.tx_bytes, size);
 880
 881	release_sock(sk);
 882	return size;
 883
 884out_error:
 885	kcm_push(kcm);
 886
 887	err = sk_stream_error(sk, flags, err);
 888
 889	/* make sure we wake any epoll edge trigger waiter */
 890	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
 891		sk->sk_write_space(sk);
 892
 893	release_sock(sk);
 894	return err;
 895}
 896
 897static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 898{
 899	struct sock *sk = sock->sk;
 900	struct kcm_sock *kcm = kcm_sk(sk);
 901	struct sk_buff *skb = NULL, *head = NULL;
 902	size_t copy, copied = 0;
 903	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 904	int eor = (sock->type == SOCK_DGRAM) ?
 905		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
 906	int err = -EPIPE;
 907
 908	lock_sock(sk);
 909
 910	/* Per tcp_sendmsg this should be in poll */
 911	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 912
 913	if (sk->sk_err)
 914		goto out_error;
 915
 916	if (kcm->seq_skb) {
 917		/* Previously opened message */
 918		head = kcm->seq_skb;
 919		skb = kcm_tx_msg(head)->last_skb;
 920		goto start;
 921	}
 922
 923	/* Call the sk_stream functions to manage the sndbuf mem. */
 924	if (!sk_stream_memory_free(sk)) {
 925		kcm_push(kcm);
 926		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 927		err = sk_stream_wait_memory(sk, &timeo);
 928		if (err)
 929			goto out_error;
 930	}
 931
 932	if (msg_data_left(msg)) {
 933		/* New message, alloc head skb */
 934		head = alloc_skb(0, sk->sk_allocation);
 935		while (!head) {
 936			kcm_push(kcm);
 937			err = sk_stream_wait_memory(sk, &timeo);
 938			if (err)
 939				goto out_error;
 940
 941			head = alloc_skb(0, sk->sk_allocation);
 942		}
 943
 944		skb = head;
 945
 946		/* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
 947		 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
 948		 */
 949		skb->ip_summed = CHECKSUM_UNNECESSARY;
 950	}
 951
 952start:
 953	while (msg_data_left(msg)) {
 954		bool merge = true;
 955		int i = skb_shinfo(skb)->nr_frags;
 956		struct page_frag *pfrag = sk_page_frag(sk);
 957
 958		if (!sk_page_frag_refill(sk, pfrag))
 959			goto wait_for_memory;
 960
 961		if (!skb_can_coalesce(skb, i, pfrag->page,
 962				      pfrag->offset)) {
 963			if (i == MAX_SKB_FRAGS) {
 964				struct sk_buff *tskb;
 965
 966				tskb = alloc_skb(0, sk->sk_allocation);
 967				if (!tskb)
 968					goto wait_for_memory;
 969
 970				if (head == skb)
 971					skb_shinfo(head)->frag_list = tskb;
 972				else
 973					skb->next = tskb;
 974
 975				skb = tskb;
 976				skb->ip_summed = CHECKSUM_UNNECESSARY;
 977				continue;
 978			}
 979			merge = false;
 980		}
 981
 982		copy = min_t(int, msg_data_left(msg),
 983			     pfrag->size - pfrag->offset);
 984
 985		if (!sk_wmem_schedule(sk, copy))
 986			goto wait_for_memory;
 987
 988		err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
 989					       pfrag->page,
 990					       pfrag->offset,
 991					       copy);
 992		if (err)
 993			goto out_error;
 994
 995		/* Update the skb. */
 996		if (merge) {
 997			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
 998		} else {
 999			skb_fill_page_desc(skb, i, pfrag->page,
1000					   pfrag->offset, copy);
1001			get_page(pfrag->page);
1002		}
1003
1004		pfrag->offset += copy;
1005		copied += copy;
1006		if (head != skb) {
1007			head->len += copy;
1008			head->data_len += copy;
1009		}
1010
1011		continue;
1012
1013wait_for_memory:
1014		kcm_push(kcm);
1015		err = sk_stream_wait_memory(sk, &timeo);
1016		if (err)
1017			goto out_error;
1018	}
1019
1020	if (eor) {
1021		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1022
1023		if (head) {
1024			/* Message complete, queue it on send buffer */
1025			__skb_queue_tail(&sk->sk_write_queue, head);
1026			kcm->seq_skb = NULL;
1027			KCM_STATS_INCR(kcm->stats.tx_msgs);
1028		}
1029
1030		if (msg->msg_flags & MSG_BATCH) {
1031			kcm->tx_wait_more = true;
1032		} else if (kcm->tx_wait_more || not_busy) {
1033			err = kcm_write_msgs(kcm);
1034			if (err < 0) {
1035				/* We got a hard error in write_msgs but have
1036				 * already queued this message. Report an error
1037				 * in the socket, but don't affect return value
1038				 * from sendmsg
1039				 */
1040				pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1041				report_csk_error(&kcm->sk, -err);
1042			}
1043		}
1044	} else {
1045		/* Message not complete, save state */
1046partial_message:
1047		if (head) {
1048			kcm->seq_skb = head;
1049			kcm_tx_msg(head)->last_skb = skb;
1050		}
1051	}
1052
1053	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1054
1055	release_sock(sk);
1056	return copied;
1057
1058out_error:
1059	kcm_push(kcm);
1060
1061	if (copied && sock->type == SOCK_SEQPACKET) {
1062		/* Wrote some bytes before encountering an
1063		 * error, return partial success.
1064		 */
1065		goto partial_message;
1066	}
1067
1068	if (head != kcm->seq_skb)
1069		kfree_skb(head);
1070
1071	err = sk_stream_error(sk, msg->msg_flags, err);
1072
1073	/* make sure we wake any epoll edge trigger waiter */
1074	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1075		sk->sk_write_space(sk);
1076
1077	release_sock(sk);
1078	return err;
1079}
1080
1081static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1082				     long timeo, int *err)
1083{
1084	struct sk_buff *skb;
1085
1086	while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1087		if (sk->sk_err) {
1088			*err = sock_error(sk);
1089			return NULL;
1090		}
1091
1092		if (sock_flag(sk, SOCK_DONE))
1093			return NULL;
1094
1095		if ((flags & MSG_DONTWAIT) || !timeo) {
1096			*err = -EAGAIN;
1097			return NULL;
1098		}
1099
1100		sk_wait_data(sk, &timeo, NULL);
1101
1102		/* Handle signals */
1103		if (signal_pending(current)) {
1104			*err = sock_intr_errno(timeo);
1105			return NULL;
1106		}
1107	}
1108
1109	return skb;
1110}
1111
1112static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1113		       size_t len, int flags)
1114{
1115	struct sock *sk = sock->sk;
1116	struct kcm_sock *kcm = kcm_sk(sk);
1117	int err = 0;
1118	long timeo;
1119	struct strp_rx_msg *rxm;
1120	int copied = 0;
1121	struct sk_buff *skb;
1122
1123	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1124
1125	lock_sock(sk);
1126
1127	skb = kcm_wait_data(sk, flags, timeo, &err);
1128	if (!skb)
1129		goto out;
1130
1131	/* Okay, have a message on the receive queue */
1132
1133	rxm = strp_rx_msg(skb);
1134
1135	if (len > rxm->full_len)
1136		len = rxm->full_len;
1137
1138	err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1139	if (err < 0)
1140		goto out;
1141
1142	copied = len;
1143	if (likely(!(flags & MSG_PEEK))) {
1144		KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1145		if (copied < rxm->full_len) {
1146			if (sock->type == SOCK_DGRAM) {
1147				/* Truncated message */
1148				msg->msg_flags |= MSG_TRUNC;
1149				goto msg_finished;
1150			}
1151			rxm->offset += copied;
1152			rxm->full_len -= copied;
1153		} else {
1154msg_finished:
1155			/* Finished with message */
1156			msg->msg_flags |= MSG_EOR;
1157			KCM_STATS_INCR(kcm->stats.rx_msgs);
1158			skb_unlink(skb, &sk->sk_receive_queue);
1159			kfree_skb(skb);
1160		}
1161	}
1162
1163out:
1164	release_sock(sk);
1165
1166	return copied ? : err;
1167}
1168
1169static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1170			       struct pipe_inode_info *pipe, size_t len,
1171			       unsigned int flags)
1172{
1173	struct sock *sk = sock->sk;
1174	struct kcm_sock *kcm = kcm_sk(sk);
1175	long timeo;
1176	struct strp_rx_msg *rxm;
1177	int err = 0;
1178	ssize_t copied;
1179	struct sk_buff *skb;
1180
1181	/* Only support splice for SOCKSEQPACKET */
1182
1183	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1184
1185	lock_sock(sk);
1186
1187	skb = kcm_wait_data(sk, flags, timeo, &err);
1188	if (!skb)
1189		goto err_out;
1190
1191	/* Okay, have a message on the receive queue */
1192
1193	rxm = strp_rx_msg(skb);
1194
1195	if (len > rxm->full_len)
1196		len = rxm->full_len;
1197
1198	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags);
1199	if (copied < 0) {
1200		err = copied;
1201		goto err_out;
1202	}
1203
1204	KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1205
1206	rxm->offset += copied;
1207	rxm->full_len -= copied;
1208
1209	/* We have no way to return MSG_EOR. If all the bytes have been
1210	 * read we still leave the message in the receive socket buffer.
1211	 * A subsequent recvmsg needs to be done to return MSG_EOR and
1212	 * finish reading the message.
1213	 */
1214
1215	release_sock(sk);
1216
1217	return copied;
1218
1219err_out:
1220	release_sock(sk);
1221
1222	return err;
1223}
1224
1225/* kcm sock lock held */
1226static void kcm_recv_disable(struct kcm_sock *kcm)
1227{
1228	struct kcm_mux *mux = kcm->mux;
1229
1230	if (kcm->rx_disabled)
1231		return;
1232
1233	spin_lock_bh(&mux->rx_lock);
1234
1235	kcm->rx_disabled = 1;
1236
1237	/* If a psock is reserved we'll do cleanup in unreserve */
1238	if (!kcm->rx_psock) {
1239		if (kcm->rx_wait) {
1240			list_del(&kcm->wait_rx_list);
1241			kcm->rx_wait = false;
1242		}
1243
1244		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1245	}
1246
1247	spin_unlock_bh(&mux->rx_lock);
1248}
1249
1250/* kcm sock lock held */
1251static void kcm_recv_enable(struct kcm_sock *kcm)
1252{
1253	struct kcm_mux *mux = kcm->mux;
1254
1255	if (!kcm->rx_disabled)
1256		return;
1257
1258	spin_lock_bh(&mux->rx_lock);
1259
1260	kcm->rx_disabled = 0;
1261	kcm_rcv_ready(kcm);
1262
1263	spin_unlock_bh(&mux->rx_lock);
1264}
1265
1266static int kcm_setsockopt(struct socket *sock, int level, int optname,
1267			  char __user *optval, unsigned int optlen)
1268{
1269	struct kcm_sock *kcm = kcm_sk(sock->sk);
1270	int val, valbool;
1271	int err = 0;
1272
1273	if (level != SOL_KCM)
1274		return -ENOPROTOOPT;
1275
1276	if (optlen < sizeof(int))
1277		return -EINVAL;
1278
1279	if (get_user(val, (int __user *)optval))
1280		return -EINVAL;
1281
1282	valbool = val ? 1 : 0;
1283
1284	switch (optname) {
1285	case KCM_RECV_DISABLE:
1286		lock_sock(&kcm->sk);
1287		if (valbool)
1288			kcm_recv_disable(kcm);
1289		else
1290			kcm_recv_enable(kcm);
1291		release_sock(&kcm->sk);
1292		break;
1293	default:
1294		err = -ENOPROTOOPT;
1295	}
1296
1297	return err;
1298}
1299
1300static int kcm_getsockopt(struct socket *sock, int level, int optname,
1301			  char __user *optval, int __user *optlen)
1302{
1303	struct kcm_sock *kcm = kcm_sk(sock->sk);
1304	int val, len;
1305
1306	if (level != SOL_KCM)
1307		return -ENOPROTOOPT;
1308
1309	if (get_user(len, optlen))
1310		return -EFAULT;
1311
1312	len = min_t(unsigned int, len, sizeof(int));
1313	if (len < 0)
1314		return -EINVAL;
1315
1316	switch (optname) {
1317	case KCM_RECV_DISABLE:
1318		val = kcm->rx_disabled;
1319		break;
1320	default:
1321		return -ENOPROTOOPT;
1322	}
1323
1324	if (put_user(len, optlen))
1325		return -EFAULT;
1326	if (copy_to_user(optval, &val, len))
1327		return -EFAULT;
1328	return 0;
1329}
1330
1331static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1332{
1333	struct kcm_sock *tkcm;
1334	struct list_head *head;
1335	int index = 0;
1336
1337	/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1338	 * we set sk_state, otherwise epoll_wait always returns right away with
1339	 * POLLHUP
1340	 */
1341	kcm->sk.sk_state = TCP_ESTABLISHED;
1342
1343	/* Add to mux's kcm sockets list */
1344	kcm->mux = mux;
1345	spin_lock_bh(&mux->lock);
1346
1347	head = &mux->kcm_socks;
1348	list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1349		if (tkcm->index != index)
1350			break;
1351		head = &tkcm->kcm_sock_list;
1352		index++;
1353	}
1354
1355	list_add(&kcm->kcm_sock_list, head);
1356	kcm->index = index;
1357
1358	mux->kcm_socks_cnt++;
1359	spin_unlock_bh(&mux->lock);
1360
1361	INIT_WORK(&kcm->tx_work, kcm_tx_work);
1362
1363	spin_lock_bh(&mux->rx_lock);
1364	kcm_rcv_ready(kcm);
1365	spin_unlock_bh(&mux->rx_lock);
1366}
1367
1368static int kcm_attach(struct socket *sock, struct socket *csock,
1369		      struct bpf_prog *prog)
1370{
1371	struct kcm_sock *kcm = kcm_sk(sock->sk);
1372	struct kcm_mux *mux = kcm->mux;
1373	struct sock *csk;
1374	struct kcm_psock *psock = NULL, *tpsock;
1375	struct list_head *head;
1376	int index = 0;
1377	struct strp_callbacks cb;
1378	int err;
1379
1380	csk = csock->sk;
1381	if (!csk)
1382		return -EINVAL;
1383
1384	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1385	if (!psock)
1386		return -ENOMEM;
1387
1388	psock->mux = mux;
1389	psock->sk = csk;
1390	psock->bpf_prog = prog;
1391
1392	cb.rcv_msg = kcm_rcv_strparser;
1393	cb.abort_parser = NULL;
1394	cb.parse_msg = kcm_parse_func_strparser;
1395	cb.read_sock_done = kcm_read_sock_done;
1396
1397	err = strp_init(&psock->strp, csk, &cb);
1398	if (err) {
1399		kmem_cache_free(kcm_psockp, psock);
1400		return err;
1401	}
1402
1403	sock_hold(csk);
1404
1405	write_lock_bh(&csk->sk_callback_lock);
1406	psock->save_data_ready = csk->sk_data_ready;
1407	psock->save_write_space = csk->sk_write_space;
1408	psock->save_state_change = csk->sk_state_change;
1409	csk->sk_user_data = psock;
1410	csk->sk_data_ready = psock_data_ready;
1411	csk->sk_write_space = psock_write_space;
1412	csk->sk_state_change = psock_state_change;
1413	write_unlock_bh(&csk->sk_callback_lock);
1414
1415	/* Finished initialization, now add the psock to the MUX. */
1416	spin_lock_bh(&mux->lock);
1417	head = &mux->psocks;
1418	list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1419		if (tpsock->index != index)
1420			break;
1421		head = &tpsock->psock_list;
1422		index++;
1423	}
1424
1425	list_add(&psock->psock_list, head);
1426	psock->index = index;
1427
1428	KCM_STATS_INCR(mux->stats.psock_attach);
1429	mux->psocks_cnt++;
1430	psock_now_avail(psock);
1431	spin_unlock_bh(&mux->lock);
1432
1433	/* Schedule RX work in case there are already bytes queued */
1434	strp_check_rcv(&psock->strp);
1435
1436	return 0;
1437}
1438
1439static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1440{
1441	struct socket *csock;
1442	struct bpf_prog *prog;
1443	int err;
1444
1445	csock = sockfd_lookup(info->fd, &err);
1446	if (!csock)
1447		return -ENOENT;
1448
1449	prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1450	if (IS_ERR(prog)) {
1451		err = PTR_ERR(prog);
1452		goto out;
1453	}
1454
1455	err = kcm_attach(sock, csock, prog);
1456	if (err) {
1457		bpf_prog_put(prog);
1458		goto out;
1459	}
1460
1461	/* Keep reference on file also */
1462
1463	return 0;
1464out:
1465	fput(csock->file);
1466	return err;
1467}
1468
1469static void kcm_unattach(struct kcm_psock *psock)
1470{
1471	struct sock *csk = psock->sk;
1472	struct kcm_mux *mux = psock->mux;
1473
1474	lock_sock(csk);
1475
1476	/* Stop getting callbacks from TCP socket. After this there should
1477	 * be no way to reserve a kcm for this psock.
1478	 */
1479	write_lock_bh(&csk->sk_callback_lock);
1480	csk->sk_user_data = NULL;
1481	csk->sk_data_ready = psock->save_data_ready;
1482	csk->sk_write_space = psock->save_write_space;
1483	csk->sk_state_change = psock->save_state_change;
1484	strp_stop(&psock->strp);
1485
1486	if (WARN_ON(psock->rx_kcm)) {
1487		write_unlock_bh(&csk->sk_callback_lock);
1488		return;
1489	}
1490
1491	spin_lock_bh(&mux->rx_lock);
1492
1493	/* Stop receiver activities. After this point psock should not be
1494	 * able to get onto ready list either through callbacks or work.
1495	 */
1496	if (psock->ready_rx_msg) {
1497		list_del(&psock->psock_ready_list);
1498		kfree_skb(psock->ready_rx_msg);
1499		psock->ready_rx_msg = NULL;
1500		KCM_STATS_INCR(mux->stats.rx_ready_drops);
1501	}
1502
1503	spin_unlock_bh(&mux->rx_lock);
1504
1505	write_unlock_bh(&csk->sk_callback_lock);
1506
1507	/* Call strp_done without sock lock */
1508	release_sock(csk);
1509	strp_done(&psock->strp);
1510	lock_sock(csk);
1511
1512	bpf_prog_put(psock->bpf_prog);
1513
1514	spin_lock_bh(&mux->lock);
1515
1516	aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1517	save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1518
1519	KCM_STATS_INCR(mux->stats.psock_unattach);
1520
1521	if (psock->tx_kcm) {
1522		/* psock was reserved.  Just mark it finished and we will clean
1523		 * up in the kcm paths, we need kcm lock which can not be
1524		 * acquired here.
1525		 */
1526		KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1527		spin_unlock_bh(&mux->lock);
1528
1529		/* We are unattaching a socket that is reserved. Abort the
1530		 * socket since we may be out of sync in sending on it. We need
1531		 * to do this without the mux lock.
1532		 */
1533		kcm_abort_tx_psock(psock, EPIPE, false);
1534
1535		spin_lock_bh(&mux->lock);
1536		if (!psock->tx_kcm) {
1537			/* psock now unreserved in window mux was unlocked */
1538			goto no_reserved;
1539		}
1540		psock->done = 1;
1541
1542		/* Commit done before queuing work to process it */
1543		smp_mb();
1544
1545		/* Queue tx work to make sure psock->done is handled */
1546		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1547		spin_unlock_bh(&mux->lock);
1548	} else {
1549no_reserved:
1550		if (!psock->tx_stopped)
1551			list_del(&psock->psock_avail_list);
1552		list_del(&psock->psock_list);
1553		mux->psocks_cnt--;
1554		spin_unlock_bh(&mux->lock);
1555
1556		sock_put(csk);
1557		fput(csk->sk_socket->file);
1558		kmem_cache_free(kcm_psockp, psock);
1559	}
1560
1561	release_sock(csk);
1562}
1563
1564static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1565{
1566	struct kcm_sock *kcm = kcm_sk(sock->sk);
1567	struct kcm_mux *mux = kcm->mux;
1568	struct kcm_psock *psock;
1569	struct socket *csock;
1570	struct sock *csk;
1571	int err;
1572
1573	csock = sockfd_lookup(info->fd, &err);
1574	if (!csock)
1575		return -ENOENT;
1576
1577	csk = csock->sk;
1578	if (!csk) {
1579		err = -EINVAL;
1580		goto out;
1581	}
1582
1583	err = -ENOENT;
1584
1585	spin_lock_bh(&mux->lock);
1586
1587	list_for_each_entry(psock, &mux->psocks, psock_list) {
1588		if (psock->sk != csk)
1589			continue;
1590
1591		/* Found the matching psock */
1592
1593		if (psock->unattaching || WARN_ON(psock->done)) {
1594			err = -EALREADY;
1595			break;
1596		}
1597
1598		psock->unattaching = 1;
1599
1600		spin_unlock_bh(&mux->lock);
1601
1602		/* Lower socket lock should already be held */
1603		kcm_unattach(psock);
1604
1605		err = 0;
1606		goto out;
1607	}
1608
1609	spin_unlock_bh(&mux->lock);
1610
1611out:
1612	fput(csock->file);
1613	return err;
1614}
1615
1616static struct proto kcm_proto = {
1617	.name	= "KCM",
1618	.owner	= THIS_MODULE,
1619	.obj_size = sizeof(struct kcm_sock),
1620};
1621
1622/* Clone a kcm socket. */
1623static int kcm_clone(struct socket *osock, struct kcm_clone *info,
1624		     struct socket **newsockp)
1625{
1626	struct socket *newsock;
1627	struct sock *newsk;
1628	struct file *newfile;
1629	int err, newfd;
1630
1631	err = -ENFILE;
1632	newsock = sock_alloc();
1633	if (!newsock)
1634		goto out;
1635
1636	newsock->type = osock->type;
1637	newsock->ops = osock->ops;
1638
1639	__module_get(newsock->ops->owner);
1640
1641	newfd = get_unused_fd_flags(0);
1642	if (unlikely(newfd < 0)) {
1643		err = newfd;
1644		goto out_fd_fail;
1645	}
1646
1647	newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1648	if (unlikely(IS_ERR(newfile))) {
1649		err = PTR_ERR(newfile);
1650		goto out_sock_alloc_fail;
1651	}
1652
1653	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1654			 &kcm_proto, true);
1655	if (!newsk) {
1656		err = -ENOMEM;
1657		goto out_sk_alloc_fail;
1658	}
1659
1660	sock_init_data(newsock, newsk);
1661	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1662
1663	fd_install(newfd, newfile);
1664	*newsockp = newsock;
1665	info->fd = newfd;
1666
1667	return 0;
1668
1669out_sk_alloc_fail:
1670	fput(newfile);
1671out_sock_alloc_fail:
1672	put_unused_fd(newfd);
1673out_fd_fail:
1674	sock_release(newsock);
1675out:
1676	return err;
1677}
1678
1679static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1680{
1681	int err;
1682
1683	switch (cmd) {
1684	case SIOCKCMATTACH: {
1685		struct kcm_attach info;
1686
1687		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1688			err = -EFAULT;
1689
1690		err = kcm_attach_ioctl(sock, &info);
1691
1692		break;
1693	}
1694	case SIOCKCMUNATTACH: {
1695		struct kcm_unattach info;
1696
1697		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1698			err = -EFAULT;
1699
1700		err = kcm_unattach_ioctl(sock, &info);
1701
1702		break;
1703	}
1704	case SIOCKCMCLONE: {
1705		struct kcm_clone info;
1706		struct socket *newsock = NULL;
1707
1708		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1709			err = -EFAULT;
1710
1711		err = kcm_clone(sock, &info, &newsock);
1712
1713		if (!err) {
1714			if (copy_to_user((void __user *)arg, &info,
1715					 sizeof(info))) {
1716				err = -EFAULT;
1717				sys_close(info.fd);
1718			}
1719		}
1720
1721		break;
1722	}
1723	default:
1724		err = -ENOIOCTLCMD;
1725		break;
1726	}
1727
1728	return err;
1729}
1730
1731static void free_mux(struct rcu_head *rcu)
1732{
1733	struct kcm_mux *mux = container_of(rcu,
1734	    struct kcm_mux, rcu);
1735
1736	kmem_cache_free(kcm_muxp, mux);
1737}
1738
1739static void release_mux(struct kcm_mux *mux)
1740{
1741	struct kcm_net *knet = mux->knet;
1742	struct kcm_psock *psock, *tmp_psock;
1743
1744	/* Release psocks */
1745	list_for_each_entry_safe(psock, tmp_psock,
1746				 &mux->psocks, psock_list) {
1747		if (!WARN_ON(psock->unattaching))
1748			kcm_unattach(psock);
1749	}
1750
1751	if (WARN_ON(mux->psocks_cnt))
1752		return;
1753
1754	__skb_queue_purge(&mux->rx_hold_queue);
1755
1756	mutex_lock(&knet->mutex);
1757	aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1758	aggregate_psock_stats(&mux->aggregate_psock_stats,
1759			      &knet->aggregate_psock_stats);
1760	aggregate_strp_stats(&mux->aggregate_strp_stats,
1761			     &knet->aggregate_strp_stats);
1762	list_del_rcu(&mux->kcm_mux_list);
1763	knet->count--;
1764	mutex_unlock(&knet->mutex);
1765
1766	call_rcu(&mux->rcu, free_mux);
1767}
1768
1769static void kcm_done(struct kcm_sock *kcm)
1770{
1771	struct kcm_mux *mux = kcm->mux;
1772	struct sock *sk = &kcm->sk;
1773	int socks_cnt;
1774
1775	spin_lock_bh(&mux->rx_lock);
1776	if (kcm->rx_psock) {
1777		/* Cleanup in unreserve_rx_kcm */
1778		WARN_ON(kcm->done);
1779		kcm->rx_disabled = 1;
1780		kcm->done = 1;
1781		spin_unlock_bh(&mux->rx_lock);
1782		return;
1783	}
1784
1785	if (kcm->rx_wait) {
1786		list_del(&kcm->wait_rx_list);
1787		kcm->rx_wait = false;
1788	}
1789	/* Move any pending receive messages to other kcm sockets */
1790	requeue_rx_msgs(mux, &sk->sk_receive_queue);
1791
1792	spin_unlock_bh(&mux->rx_lock);
1793
1794	if (WARN_ON(sk_rmem_alloc_get(sk)))
1795		return;
1796
1797	/* Detach from MUX */
1798	spin_lock_bh(&mux->lock);
1799
1800	list_del(&kcm->kcm_sock_list);
1801	mux->kcm_socks_cnt--;
1802	socks_cnt = mux->kcm_socks_cnt;
1803
1804	spin_unlock_bh(&mux->lock);
1805
1806	if (!socks_cnt) {
1807		/* We are done with the mux now. */
1808		release_mux(mux);
1809	}
1810
1811	WARN_ON(kcm->rx_wait);
1812
1813	sock_put(&kcm->sk);
1814}
1815
1816/* Called by kcm_release to close a KCM socket.
1817 * If this is the last KCM socket on the MUX, destroy the MUX.
1818 */
1819static int kcm_release(struct socket *sock)
1820{
1821	struct sock *sk = sock->sk;
1822	struct kcm_sock *kcm;
1823	struct kcm_mux *mux;
1824	struct kcm_psock *psock;
1825
1826	if (!sk)
1827		return 0;
1828
1829	kcm = kcm_sk(sk);
1830	mux = kcm->mux;
1831
1832	sock_orphan(sk);
1833	kfree_skb(kcm->seq_skb);
1834
1835	lock_sock(sk);
1836	/* Purge queue under lock to avoid race condition with tx_work trying
1837	 * to act when queue is nonempty. If tx_work runs after this point
1838	 * it will just return.
1839	 */
1840	__skb_queue_purge(&sk->sk_write_queue);
1841
1842	/* Set tx_stopped. This is checked when psock is bound to a kcm and we
1843	 * get a writespace callback. This prevents further work being queued
1844	 * from the callback (unbinding the psock occurs after canceling work.
1845	 */
1846	kcm->tx_stopped = 1;
1847
1848	release_sock(sk);
1849
1850	spin_lock_bh(&mux->lock);
1851	if (kcm->tx_wait) {
1852		/* Take of tx_wait list, after this point there should be no way
1853		 * that a psock will be assigned to this kcm.
1854		 */
1855		list_del(&kcm->wait_psock_list);
1856		kcm->tx_wait = false;
1857	}
1858	spin_unlock_bh(&mux->lock);
1859
1860	/* Cancel work. After this point there should be no outside references
1861	 * to the kcm socket.
1862	 */
1863	cancel_work_sync(&kcm->tx_work);
1864
1865	lock_sock(sk);
1866	psock = kcm->tx_psock;
1867	if (psock) {
1868		/* A psock was reserved, so we need to kill it since it
1869		 * may already have some bytes queued from a message. We
1870		 * need to do this after removing kcm from tx_wait list.
1871		 */
1872		kcm_abort_tx_psock(psock, EPIPE, false);
1873		unreserve_psock(kcm);
1874	}
1875	release_sock(sk);
1876
1877	WARN_ON(kcm->tx_wait);
1878	WARN_ON(kcm->tx_psock);
1879
1880	sock->sk = NULL;
1881
1882	kcm_done(kcm);
1883
1884	return 0;
1885}
1886
1887static const struct proto_ops kcm_dgram_ops = {
1888	.family =	PF_KCM,
1889	.owner =	THIS_MODULE,
1890	.release =	kcm_release,
1891	.bind =		sock_no_bind,
1892	.connect =	sock_no_connect,
1893	.socketpair =	sock_no_socketpair,
1894	.accept =	sock_no_accept,
1895	.getname =	sock_no_getname,
1896	.poll =		datagram_poll,
1897	.ioctl =	kcm_ioctl,
1898	.listen =	sock_no_listen,
1899	.shutdown =	sock_no_shutdown,
1900	.setsockopt =	kcm_setsockopt,
1901	.getsockopt =	kcm_getsockopt,
1902	.sendmsg =	kcm_sendmsg,
1903	.recvmsg =	kcm_recvmsg,
1904	.mmap =		sock_no_mmap,
1905	.sendpage =	kcm_sendpage,
1906};
1907
1908static const struct proto_ops kcm_seqpacket_ops = {
1909	.family =	PF_KCM,
1910	.owner =	THIS_MODULE,
1911	.release =	kcm_release,
1912	.bind =		sock_no_bind,
1913	.connect =	sock_no_connect,
1914	.socketpair =	sock_no_socketpair,
1915	.accept =	sock_no_accept,
1916	.getname =	sock_no_getname,
1917	.poll =		datagram_poll,
1918	.ioctl =	kcm_ioctl,
1919	.listen =	sock_no_listen,
1920	.shutdown =	sock_no_shutdown,
1921	.setsockopt =	kcm_setsockopt,
1922	.getsockopt =	kcm_getsockopt,
1923	.sendmsg =	kcm_sendmsg,
1924	.recvmsg =	kcm_recvmsg,
1925	.mmap =		sock_no_mmap,
1926	.sendpage =	kcm_sendpage,
1927	.splice_read =	kcm_splice_read,
1928};
1929
1930/* Create proto operation for kcm sockets */
1931static int kcm_create(struct net *net, struct socket *sock,
1932		      int protocol, int kern)
1933{
1934	struct kcm_net *knet = net_generic(net, kcm_net_id);
1935	struct sock *sk;
1936	struct kcm_mux *mux;
1937
1938	switch (sock->type) {
1939	case SOCK_DGRAM:
1940		sock->ops = &kcm_dgram_ops;
1941		break;
1942	case SOCK_SEQPACKET:
1943		sock->ops = &kcm_seqpacket_ops;
1944		break;
1945	default:
1946		return -ESOCKTNOSUPPORT;
1947	}
1948
1949	if (protocol != KCMPROTO_CONNECTED)
1950		return -EPROTONOSUPPORT;
1951
1952	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1953	if (!sk)
1954		return -ENOMEM;
1955
1956	/* Allocate a kcm mux, shared between KCM sockets */
1957	mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1958	if (!mux) {
1959		sk_free(sk);
1960		return -ENOMEM;
1961	}
1962
1963	spin_lock_init(&mux->lock);
1964	spin_lock_init(&mux->rx_lock);
1965	INIT_LIST_HEAD(&mux->kcm_socks);
1966	INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1967	INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1968
1969	INIT_LIST_HEAD(&mux->psocks);
1970	INIT_LIST_HEAD(&mux->psocks_ready);
1971	INIT_LIST_HEAD(&mux->psocks_avail);
1972
1973	mux->knet = knet;
1974
1975	/* Add new MUX to list */
1976	mutex_lock(&knet->mutex);
1977	list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1978	knet->count++;
1979	mutex_unlock(&knet->mutex);
1980
1981	skb_queue_head_init(&mux->rx_hold_queue);
1982
1983	/* Init KCM socket */
1984	sock_init_data(sock, sk);
1985	init_kcm_sock(kcm_sk(sk), mux);
1986
1987	return 0;
1988}
1989
1990static struct net_proto_family kcm_family_ops = {
1991	.family = PF_KCM,
1992	.create = kcm_create,
1993	.owner  = THIS_MODULE,
1994};
1995
1996static __net_init int kcm_init_net(struct net *net)
1997{
1998	struct kcm_net *knet = net_generic(net, kcm_net_id);
1999
2000	INIT_LIST_HEAD_RCU(&knet->mux_list);
2001	mutex_init(&knet->mutex);
2002
2003	return 0;
2004}
2005
2006static __net_exit void kcm_exit_net(struct net *net)
2007{
2008	struct kcm_net *knet = net_generic(net, kcm_net_id);
2009
2010	/* All KCM sockets should be closed at this point, which should mean
2011	 * that all multiplexors and psocks have been destroyed.
2012	 */
2013	WARN_ON(!list_empty(&knet->mux_list));
2014}
2015
2016static struct pernet_operations kcm_net_ops = {
2017	.init = kcm_init_net,
2018	.exit = kcm_exit_net,
2019	.id   = &kcm_net_id,
2020	.size = sizeof(struct kcm_net),
2021};
2022
2023static int __init kcm_init(void)
2024{
2025	int err = -ENOMEM;
2026
2027	kcm_muxp = kmem_cache_create("kcm_mux_cache",
2028				     sizeof(struct kcm_mux), 0,
2029				     SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2030	if (!kcm_muxp)
2031		goto fail;
2032
2033	kcm_psockp = kmem_cache_create("kcm_psock_cache",
2034				       sizeof(struct kcm_psock), 0,
2035					SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2036	if (!kcm_psockp)
2037		goto fail;
2038
2039	kcm_wq = create_singlethread_workqueue("kkcmd");
2040	if (!kcm_wq)
2041		goto fail;
2042
2043	err = proto_register(&kcm_proto, 1);
2044	if (err)
2045		goto fail;
2046
2047	err = sock_register(&kcm_family_ops);
2048	if (err)
2049		goto sock_register_fail;
2050
2051	err = register_pernet_device(&kcm_net_ops);
2052	if (err)
2053		goto net_ops_fail;
2054
2055	err = kcm_proc_init();
2056	if (err)
2057		goto proc_init_fail;
2058
2059	return 0;
2060
2061proc_init_fail:
2062	unregister_pernet_device(&kcm_net_ops);
2063
2064net_ops_fail:
2065	sock_unregister(PF_KCM);
2066
2067sock_register_fail:
2068	proto_unregister(&kcm_proto);
2069
2070fail:
2071	kmem_cache_destroy(kcm_muxp);
2072	kmem_cache_destroy(kcm_psockp);
2073
2074	if (kcm_wq)
2075		destroy_workqueue(kcm_wq);
2076
2077	return err;
2078}
2079
2080static void __exit kcm_exit(void)
2081{
2082	kcm_proc_exit();
2083	unregister_pernet_device(&kcm_net_ops);
2084	sock_unregister(PF_KCM);
2085	proto_unregister(&kcm_proto);
2086	destroy_workqueue(kcm_wq);
2087
2088	kmem_cache_destroy(kcm_muxp);
2089	kmem_cache_destroy(kcm_psockp);
2090}
2091
2092module_init(kcm_init);
2093module_exit(kcm_exit);
2094
2095MODULE_LICENSE("GPL");
2096MODULE_ALIAS_NETPROTO(PF_KCM);
2097