Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * net/tipc/socket.c: TIPC socket API
   3 *
   4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
   5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
 
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include <linux/rhashtable.h>
  38#include <linux/sched/signal.h>
 
  39
  40#include "core.h"
  41#include "name_table.h"
  42#include "node.h"
  43#include "link.h"
  44#include "name_distr.h"
  45#include "socket.h"
  46#include "bcast.h"
  47#include "netlink.h"
  48#include "group.h"
  49#include "trace.h"
  50
 
 
  51#define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
  52#define CONN_PROBING_INTV	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
  53#define TIPC_FWD_MSG		1
  54#define TIPC_MAX_PORT		0xffffffff
  55#define TIPC_MIN_PORT		1
  56#define TIPC_ACK_RATE		4       /* ACK at 1/4 of of rcv window size */
  57
  58enum {
  59	TIPC_LISTEN = TCP_LISTEN,
  60	TIPC_ESTABLISHED = TCP_ESTABLISHED,
  61	TIPC_OPEN = TCP_CLOSE,
  62	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
  63	TIPC_CONNECTING = TCP_SYN_SENT,
  64};
  65
  66struct sockaddr_pair {
  67	struct sockaddr_tipc sock;
  68	struct sockaddr_tipc member;
  69};
  70
  71/**
  72 * struct tipc_sock - TIPC socket structure
  73 * @sk: socket - interacts with 'port' and with user via the socket API
  74 * @conn_type: TIPC type used when connection was established
  75 * @conn_instance: TIPC instance used when connection was established
  76 * @published: non-zero if port has one or more associated names
  77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
 
  78 * @portid: unique port identity in TIPC socket hash table
  79 * @phdr: preformatted message header used when sending messages
  80 * #cong_links: list of congested links
  81 * @publications: list of publications for port
  82 * @blocking_link: address of the congested link we are currently sleeping on
  83 * @pub_count: total # of publications port has made during its lifetime
  84 * @conn_timeout: the time we can wait for an unresponded setup request
 
  85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
  86 * @cong_link_cnt: number of congested links
  87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
 
 
  88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
 
  89 * @peer: 'connected' peer for dgram/rdm
  90 * @node: hash table node
  91 * @mc_method: cookie for use between socket and broadcast layer
  92 * @rcu: rcu struct for tipc_sock
 
 
 
 
 
 
 
 
 
 
 
  93 */
  94struct tipc_sock {
  95	struct sock sk;
  96	u32 conn_type;
  97	u32 conn_instance;
  98	int published;
  99	u32 max_pkt;
 
 100	u32 portid;
 101	struct tipc_msg phdr;
 102	struct list_head cong_links;
 103	struct list_head publications;
 104	u32 pub_count;
 105	atomic_t dupl_rcvcnt;
 106	u16 conn_timeout;
 107	bool probe_unacked;
 108	u16 cong_link_cnt;
 109	u16 snt_unacked;
 110	u16 snd_win;
 111	u16 peer_caps;
 112	u16 rcv_unacked;
 113	u16 rcv_win;
 114	struct sockaddr_tipc peer;
 115	struct rhash_head node;
 116	struct tipc_mc_method mc_method;
 117	struct rcu_head rcu;
 118	struct tipc_group *group;
 
 
 
 
 
 
 
 119	bool group_is_open;
 
 
 120};
 121
 122static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 123static void tipc_data_ready(struct sock *sk);
 124static void tipc_write_space(struct sock *sk);
 125static void tipc_sock_destruct(struct sock *sk);
 126static int tipc_release(struct socket *sock);
 127static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
 128		       bool kern);
 129static void tipc_sk_timeout(struct timer_list *t);
 130static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
 131			   struct tipc_name_seq const *seq);
 132static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
 133			    struct tipc_name_seq const *seq);
 134static int tipc_sk_leave(struct tipc_sock *tsk);
 135static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
 136static int tipc_sk_insert(struct tipc_sock *tsk);
 137static void tipc_sk_remove(struct tipc_sock *tsk);
 138static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
 139static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
 
 
 140
 141static const struct proto_ops packet_ops;
 142static const struct proto_ops stream_ops;
 143static const struct proto_ops msg_ops;
 144static struct proto tipc_proto;
 145static const struct rhashtable_params tsk_rht_params;
 146
 147static u32 tsk_own_node(struct tipc_sock *tsk)
 148{
 149	return msg_prevnode(&tsk->phdr);
 150}
 151
 152static u32 tsk_peer_node(struct tipc_sock *tsk)
 153{
 154	return msg_destnode(&tsk->phdr);
 155}
 156
 157static u32 tsk_peer_port(struct tipc_sock *tsk)
 158{
 159	return msg_destport(&tsk->phdr);
 160}
 161
 162static  bool tsk_unreliable(struct tipc_sock *tsk)
 163{
 164	return msg_src_droppable(&tsk->phdr) != 0;
 165}
 166
 167static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
 168{
 169	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
 170}
 171
 172static bool tsk_unreturnable(struct tipc_sock *tsk)
 173{
 174	return msg_dest_droppable(&tsk->phdr) != 0;
 175}
 176
 177static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
 178{
 179	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
 180}
 181
 182static int tsk_importance(struct tipc_sock *tsk)
 183{
 184	return msg_importance(&tsk->phdr);
 185}
 186
 187static int tsk_set_importance(struct tipc_sock *tsk, int imp)
 188{
 189	if (imp > TIPC_CRITICAL_IMPORTANCE)
 190		return -EINVAL;
 191	msg_set_importance(&tsk->phdr, (u32)imp);
 192	return 0;
 193}
 194
 195static struct tipc_sock *tipc_sk(const struct sock *sk)
 196{
 197	return container_of(sk, struct tipc_sock, sk);
 
 
 
 198}
 199
 200static bool tsk_conn_cong(struct tipc_sock *tsk)
 201{
 202	return tsk->snt_unacked > tsk->snd_win;
 203}
 204
 205static u16 tsk_blocks(int len)
 206{
 207	return ((len / FLOWCTL_BLK_SZ) + 1);
 208}
 209
 210/* tsk_blocks(): translate a buffer size in bytes to number of
 211 * advertisable blocks, taking into account the ratio truesize(len)/len
 212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
 213 */
 214static u16 tsk_adv_blocks(int len)
 215{
 216	return len / FLOWCTL_BLK_SZ / 4;
 217}
 218
 219/* tsk_inc(): increment counter for sent or received data
 220 * - If block based flow control is not supported by peer we
 221 *   fall back to message based ditto, incrementing the counter
 222 */
 223static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
 224{
 225	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
 226		return ((msglen / FLOWCTL_BLK_SZ) + 1);
 227	return 1;
 228}
 229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230/**
 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
 
 232 *
 233 * Caller must hold socket lock
 234 */
 235static void tsk_advance_rx_queue(struct sock *sk)
 236{
 237	trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
 238	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
 239}
 240
 241/* tipc_sk_respond() : send response message back to sender
 242 */
 243static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
 244{
 245	u32 selector;
 246	u32 dnode;
 247	u32 onode = tipc_own_addr(sock_net(sk));
 248
 249	if (!tipc_msg_reverse(onode, &skb, err))
 250		return;
 251
 252	trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
 253	dnode = msg_destnode(buf_msg(skb));
 254	selector = msg_origport(buf_msg(skb));
 255	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
 256}
 257
 258/**
 259 * tsk_rej_rx_queue - reject all buffers in socket receive queue
 
 
 260 *
 261 * Caller must hold socket lock
 262 */
 263static void tsk_rej_rx_queue(struct sock *sk)
 264{
 265	struct sk_buff *skb;
 266
 267	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
 268		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 269}
 270
 271static bool tipc_sk_connected(struct sock *sk)
 272{
 273	return sk->sk_state == TIPC_ESTABLISHED;
 274}
 275
 276/* tipc_sk_type_connectionless - check if the socket is datagram socket
 277 * @sk: socket
 278 *
 279 * Returns true if connection less, false otherwise
 280 */
 281static bool tipc_sk_type_connectionless(struct sock *sk)
 282{
 283	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
 284}
 285
 286/* tsk_peer_msg - verify if message was sent by connected port's peer
 287 *
 288 * Handles cases where the node's network address has changed from
 289 * the default of <0.0.0> to its configured setting.
 290 */
 291static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
 292{
 293	struct sock *sk = &tsk->sk;
 294	u32 self = tipc_own_addr(sock_net(sk));
 295	u32 peer_port = tsk_peer_port(tsk);
 296	u32 orig_node, peer_node;
 297
 298	if (unlikely(!tipc_sk_connected(sk)))
 299		return false;
 300
 301	if (unlikely(msg_origport(msg) != peer_port))
 302		return false;
 303
 304	orig_node = msg_orignode(msg);
 305	peer_node = tsk_peer_node(tsk);
 306
 307	if (likely(orig_node == peer_node))
 308		return true;
 309
 310	if (!orig_node && peer_node == self)
 311		return true;
 312
 313	if (!peer_node && orig_node == self)
 314		return true;
 315
 316	return false;
 317}
 318
 319/* tipc_set_sk_state - set the sk_state of the socket
 320 * @sk: socket
 321 *
 322 * Caller must hold socket lock
 323 *
 324 * Returns 0 on success, errno otherwise
 325 */
 326static int tipc_set_sk_state(struct sock *sk, int state)
 327{
 328	int oldsk_state = sk->sk_state;
 329	int res = -EINVAL;
 330
 331	switch (state) {
 332	case TIPC_OPEN:
 333		res = 0;
 334		break;
 335	case TIPC_LISTEN:
 336	case TIPC_CONNECTING:
 337		if (oldsk_state == TIPC_OPEN)
 338			res = 0;
 339		break;
 340	case TIPC_ESTABLISHED:
 341		if (oldsk_state == TIPC_CONNECTING ||
 342		    oldsk_state == TIPC_OPEN)
 343			res = 0;
 344		break;
 345	case TIPC_DISCONNECTING:
 346		if (oldsk_state == TIPC_CONNECTING ||
 347		    oldsk_state == TIPC_ESTABLISHED)
 348			res = 0;
 349		break;
 350	}
 351
 352	if (!res)
 353		sk->sk_state = state;
 354
 355	return res;
 356}
 357
 358static int tipc_sk_sock_err(struct socket *sock, long *timeout)
 359{
 360	struct sock *sk = sock->sk;
 361	int err = sock_error(sk);
 362	int typ = sock->type;
 363
 364	if (err)
 365		return err;
 366	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
 367		if (sk->sk_state == TIPC_DISCONNECTING)
 368			return -EPIPE;
 369		else if (!tipc_sk_connected(sk))
 370			return -ENOTCONN;
 371	}
 372	if (!*timeout)
 373		return -EAGAIN;
 374	if (signal_pending(current))
 375		return sock_intr_errno(*timeout);
 376
 377	return 0;
 378}
 379
 380#define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
 381({                                                                             \
 382	DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
 383	struct sock *sk_;						       \
 384	int rc_;							       \
 385									       \
 386	while ((rc_ = !(condition_))) {					       \
 387		/* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
 388		smp_rmb();                                                     \
 389		sk_ = (sock_)->sk;					       \
 390		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
 391		if (rc_)						       \
 392			break;						       \
 393		add_wait_queue(sk_sleep(sk_), &wait_);                         \
 394		release_sock(sk_);					       \
 395		*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
 396		sched_annotate_sleep();				               \
 397		lock_sock(sk_);						       \
 398		remove_wait_queue(sk_sleep(sk_), &wait_);		       \
 399	}								       \
 400	rc_;								       \
 401})
 402
 403/**
 404 * tipc_sk_create - create a TIPC socket
 405 * @net: network namespace (must be default network)
 406 * @sock: pre-allocated socket structure
 407 * @protocol: protocol indicator (must be 0)
 408 * @kern: caused by kernel or by userspace?
 409 *
 410 * This routine creates additional data structures used by the TIPC socket,
 411 * initializes them, and links them together.
 412 *
 413 * Returns 0 on success, errno otherwise
 414 */
 415static int tipc_sk_create(struct net *net, struct socket *sock,
 416			  int protocol, int kern)
 417{
 418	const struct proto_ops *ops;
 419	struct sock *sk;
 420	struct tipc_sock *tsk;
 421	struct tipc_msg *msg;
 422
 423	/* Validate arguments */
 424	if (unlikely(protocol != 0))
 425		return -EPROTONOSUPPORT;
 426
 427	switch (sock->type) {
 428	case SOCK_STREAM:
 429		ops = &stream_ops;
 430		break;
 431	case SOCK_SEQPACKET:
 432		ops = &packet_ops;
 433		break;
 434	case SOCK_DGRAM:
 435	case SOCK_RDM:
 436		ops = &msg_ops;
 437		break;
 438	default:
 439		return -EPROTOTYPE;
 440	}
 441
 442	/* Allocate socket's protocol area */
 443	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
 444	if (sk == NULL)
 445		return -ENOMEM;
 446
 447	tsk = tipc_sk(sk);
 448	tsk->max_pkt = MAX_PKT_DEFAULT;
 
 
 449	INIT_LIST_HEAD(&tsk->publications);
 450	INIT_LIST_HEAD(&tsk->cong_links);
 451	msg = &tsk->phdr;
 452
 453	/* Finish initializing socket data structures */
 454	sock->ops = ops;
 455	sock_init_data(sock, sk);
 456	tipc_set_sk_state(sk, TIPC_OPEN);
 457	if (tipc_sk_insert(tsk)) {
 
 458		pr_warn("Socket create failed; port number exhausted\n");
 459		return -EINVAL;
 460	}
 461
 462	/* Ensure tsk is visible before we read own_addr. */
 463	smp_mb();
 464
 465	tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
 466		      TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
 467
 468	msg_set_origport(msg, tsk->portid);
 469	timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
 470	sk->sk_shutdown = 0;
 471	sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
 472	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
 473	sk->sk_data_ready = tipc_data_ready;
 474	sk->sk_write_space = tipc_write_space;
 475	sk->sk_destruct = tipc_sock_destruct;
 476	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
 477	tsk->group_is_open = true;
 478	atomic_set(&tsk->dupl_rcvcnt, 0);
 479
 480	/* Start out with safe limits until we receive an advertised window */
 481	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
 482	tsk->rcv_win = tsk->snd_win;
 483
 484	if (tipc_sk_type_connectionless(sk)) {
 485		tsk_set_unreturnable(tsk, true);
 486		if (sock->type == SOCK_DGRAM)
 487			tsk_set_unreliable(tsk, true);
 488	}
 489	__skb_queue_head_init(&tsk->mc_method.deferredq);
 490	trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
 491	return 0;
 492}
 493
 494static void tipc_sk_callback(struct rcu_head *head)
 495{
 496	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
 497
 498	sock_put(&tsk->sk);
 499}
 500
 501/* Caller should hold socket lock for the socket. */
 502static void __tipc_shutdown(struct socket *sock, int error)
 503{
 504	struct sock *sk = sock->sk;
 505	struct tipc_sock *tsk = tipc_sk(sk);
 506	struct net *net = sock_net(sk);
 507	long timeout = CONN_TIMEOUT_DEFAULT;
 508	u32 dnode = tsk_peer_node(tsk);
 509	struct sk_buff *skb;
 510
 511	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
 512	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
 513					    !tsk_conn_cong(tsk)));
 514
 515	/* Remove any pending SYN message */
 
 
 516	__skb_queue_purge(&sk->sk_write_queue);
 517
 518	/* Reject all unreceived messages, except on an active connection
 519	 * (which disconnects locally & sends a 'FIN+' to peer).
 520	 */
 521	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 522		if (TIPC_SKB_CB(skb)->bytes_read) {
 523			kfree_skb(skb);
 524			continue;
 525		}
 526		if (!tipc_sk_type_connectionless(sk) &&
 527		    sk->sk_state != TIPC_DISCONNECTING) {
 528			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
 529			tipc_node_remove_conn(net, dnode, tsk->portid);
 530		}
 531		tipc_sk_respond(sk, skb, error);
 532	}
 533
 534	if (tipc_sk_type_connectionless(sk))
 
 
 535		return;
 
 536
 537	if (sk->sk_state != TIPC_DISCONNECTING) {
 
 
 
 
 
 
 
 
 
 
 
 538		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
 539				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
 540				      tsk_own_node(tsk), tsk_peer_port(tsk),
 541				      tsk->portid, error);
 542		if (skb)
 543			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
 544		tipc_node_remove_conn(net, dnode, tsk->portid);
 545		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
 
 
 
 
 
 
 546	}
 547}
 548
 549/**
 550 * tipc_release - destroy a TIPC socket
 551 * @sock: socket to destroy
 552 *
 553 * This routine cleans up any messages that are still queued on the socket.
 554 * For DGRAM and RDM socket types, all queued messages are rejected.
 555 * For SEQPACKET and STREAM socket types, the first message is rejected
 556 * and any others are discarded.  (If the first message on a STREAM socket
 557 * is partially-read, it is discarded and the next one is rejected instead.)
 558 *
 559 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 560 * are returned or discarded according to the "destination droppable" setting
 561 * specified for the message by the sender.
 562 *
 563 * Returns 0 on success, errno otherwise
 564 */
 565static int tipc_release(struct socket *sock)
 566{
 567	struct sock *sk = sock->sk;
 568	struct tipc_sock *tsk;
 569
 570	/*
 571	 * Exit if socket isn't fully initialized (occurs when a failed accept()
 572	 * releases a pre-allocated child socket that was never used)
 573	 */
 574	if (sk == NULL)
 575		return 0;
 576
 577	tsk = tipc_sk(sk);
 578	lock_sock(sk);
 579
 580	trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
 581	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
 582	sk->sk_shutdown = SHUTDOWN_MASK;
 583	tipc_sk_leave(tsk);
 584	tipc_sk_withdraw(tsk, 0, NULL);
 585	__skb_queue_purge(&tsk->mc_method.deferredq);
 586	sk_stop_timer(sk, &sk->sk_timer);
 587	tipc_sk_remove(tsk);
 588
 589	sock_orphan(sk);
 590	/* Reject any messages that accumulated in backlog queue */
 591	release_sock(sk);
 592	tipc_dest_list_purge(&tsk->cong_links);
 593	tsk->cong_link_cnt = 0;
 594	call_rcu(&tsk->rcu, tipc_sk_callback);
 595	sock->sk = NULL;
 596
 597	return 0;
 598}
 599
 600/**
 601 * tipc_bind - associate or disassocate TIPC name(s) with a socket
 602 * @sock: socket structure
 603 * @uaddr: socket address describing name(s) and desired operation
 604 * @uaddr_len: size of socket address data structure
 605 *
 606 * Name and name sequence binding is indicated using a positive scope value;
 607 * a negative scope value unbinds the specified name.  Specifying no name
 608 * (i.e. a socket address length of 0) unbinds all names from the socket.
 609 *
 610 * Returns 0 on success, errno otherwise
 611 *
 612 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 613 *       access any non-constant socket information.
 614 */
 615static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
 616		     int uaddr_len)
 617{
 618	struct sock *sk = sock->sk;
 619	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
 620	struct tipc_sock *tsk = tipc_sk(sk);
 621	int res = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 622
 623	lock_sock(sk);
 624	if (unlikely(!uaddr_len)) {
 625		res = tipc_sk_withdraw(tsk, 0, NULL);
 626		goto exit;
 627	}
 628	if (tsk->group) {
 629		res = -EACCES;
 630		goto exit;
 631	}
 632	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
 633		res = -EINVAL;
 634		goto exit;
 635	}
 636	if (addr->family != AF_TIPC) {
 637		res = -EAFNOSUPPORT;
 638		goto exit;
 639	}
 640
 641	if (addr->addrtype == TIPC_ADDR_NAME)
 642		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
 643	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
 644		res = -EAFNOSUPPORT;
 645		goto exit;
 646	}
 647
 648	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
 649	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
 650	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
 651		res = -EACCES;
 652		goto exit;
 653	}
 654
 655	res = (addr->scope >= 0) ?
 656		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
 657		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
 658exit:
 659	release_sock(sk);
 660	return res;
 661}
 662
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 663/**
 664 * tipc_getname - get port ID of socket or peer socket
 665 * @sock: socket structure
 666 * @uaddr: area for returned socket address
 667 * @uaddr_len: area for returned length of socket address
 668 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
 669 *
 670 * Returns 0 on success, errno otherwise
 671 *
 672 * NOTE: This routine doesn't need to take the socket lock since it only
 673 *       accesses socket information that is unchanging (or which changes in
 674 *       a completely predictable manner).
 675 */
 676static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 677			int peer)
 678{
 679	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
 680	struct sock *sk = sock->sk;
 681	struct tipc_sock *tsk = tipc_sk(sk);
 682
 683	memset(addr, 0, sizeof(*addr));
 684	if (peer) {
 685		if ((!tipc_sk_connected(sk)) &&
 686		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
 687			return -ENOTCONN;
 688		addr->addr.id.ref = tsk_peer_port(tsk);
 689		addr->addr.id.node = tsk_peer_node(tsk);
 690	} else {
 691		addr->addr.id.ref = tsk->portid;
 692		addr->addr.id.node = tipc_own_addr(sock_net(sk));
 693	}
 694
 695	addr->addrtype = TIPC_ADDR_ID;
 696	addr->family = AF_TIPC;
 697	addr->scope = 0;
 698	addr->addr.name.domain = 0;
 699
 700	return sizeof(*addr);
 701}
 702
 703/**
 704 * tipc_poll - read and possibly block on pollmask
 705 * @file: file structure associated with the socket
 706 * @sock: socket for which to calculate the poll bits
 707 * @wait: ???
 708 *
 709 * Returns pollmask value
 710 *
 711 * COMMENTARY:
 712 * It appears that the usual socket locking mechanisms are not useful here
 713 * since the pollmask info is potentially out-of-date the moment this routine
 714 * exits.  TCP and other protocols seem to rely on higher level poll routines
 715 * to handle any preventable race conditions, so TIPC will do the same ...
 716 *
 717 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 718 * imply that the operation will succeed, merely that it should be performed
 719 * and will not block.
 720 */
 721static __poll_t tipc_poll(struct file *file, struct socket *sock,
 722			      poll_table *wait)
 723{
 724	struct sock *sk = sock->sk;
 725	struct tipc_sock *tsk = tipc_sk(sk);
 726	__poll_t revents = 0;
 727
 728	sock_poll_wait(file, sock, wait);
 729	trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
 730
 731	if (sk->sk_shutdown & RCV_SHUTDOWN)
 732		revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 733	if (sk->sk_shutdown == SHUTDOWN_MASK)
 734		revents |= EPOLLHUP;
 735
 736	switch (sk->sk_state) {
 737	case TIPC_ESTABLISHED:
 738		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
 739			revents |= EPOLLOUT;
 740		/* fall through */
 741	case TIPC_LISTEN:
 742	case TIPC_CONNECTING:
 743		if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
 744			revents |= EPOLLIN | EPOLLRDNORM;
 745		break;
 746	case TIPC_OPEN:
 747		if (tsk->group_is_open && !tsk->cong_link_cnt)
 748			revents |= EPOLLOUT;
 749		if (!tipc_sk_type_connectionless(sk))
 750			break;
 751		if (skb_queue_empty_lockless(&sk->sk_receive_queue))
 752			break;
 753		revents |= EPOLLIN | EPOLLRDNORM;
 754		break;
 755	case TIPC_DISCONNECTING:
 756		revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
 757		break;
 758	}
 759	return revents;
 760}
 761
 762/**
 763 * tipc_sendmcast - send multicast message
 764 * @sock: socket structure
 765 * @seq: destination address
 766 * @msg: message to send
 767 * @dlen: length of data to send
 768 * @timeout: timeout to wait for wakeup
 769 *
 770 * Called from function tipc_sendmsg(), which has done all sanity checks
 771 * Returns the number of bytes sent on success, or errno
 772 */
 773static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
 774			  struct msghdr *msg, size_t dlen, long timeout)
 775{
 776	struct sock *sk = sock->sk;
 777	struct tipc_sock *tsk = tipc_sk(sk);
 778	struct tipc_msg *hdr = &tsk->phdr;
 779	struct net *net = sock_net(sk);
 780	int mtu = tipc_bcast_get_mtu(net);
 781	struct tipc_mc_method *method = &tsk->mc_method;
 782	struct sk_buff_head pkts;
 783	struct tipc_nlist dsts;
 784	int rc;
 785
 786	if (tsk->group)
 787		return -EACCES;
 788
 789	/* Block or return if any destination link is congested */
 790	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
 791	if (unlikely(rc))
 792		return rc;
 793
 794	/* Lookup destination nodes */
 795	tipc_nlist_init(&dsts, tipc_own_addr(net));
 796	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
 797				      seq->upper, &dsts);
 798	if (!dsts.local && !dsts.remote)
 799		return -EHOSTUNREACH;
 800
 801	/* Build message header */
 802	msg_set_type(hdr, TIPC_MCAST_MSG);
 803	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
 804	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
 805	msg_set_destport(hdr, 0);
 806	msg_set_destnode(hdr, 0);
 807	msg_set_nametype(hdr, seq->type);
 808	msg_set_namelower(hdr, seq->lower);
 809	msg_set_nameupper(hdr, seq->upper);
 810
 811	/* Build message as chain of buffers */
 812	__skb_queue_head_init(&pkts);
 813	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
 814
 815	/* Send message if build was successful */
 816	if (unlikely(rc == dlen)) {
 817		trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
 818					TIPC_DUMP_SK_SNDQ, " ");
 819		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
 820				     &tsk->cong_link_cnt);
 821	}
 822
 823	tipc_nlist_purge(&dsts);
 824
 825	return rc ? rc : dlen;
 826}
 827
 828/**
 829 * tipc_send_group_msg - send a message to a member in the group
 830 * @net: network namespace
 
 831 * @m: message to send
 832 * @mb: group member
 833 * @dnode: destination node
 834 * @dport: destination port
 835 * @dlen: total length of message data
 836 */
 837static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
 838			       struct msghdr *m, struct tipc_member *mb,
 839			       u32 dnode, u32 dport, int dlen)
 840{
 841	u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
 842	struct tipc_mc_method *method = &tsk->mc_method;
 843	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 844	struct tipc_msg *hdr = &tsk->phdr;
 845	struct sk_buff_head pkts;
 846	int mtu, rc;
 847
 848	/* Complete message header */
 849	msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
 850	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
 851	msg_set_destport(hdr, dport);
 852	msg_set_destnode(hdr, dnode);
 853	msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
 854
 855	/* Build message as chain of buffers */
 856	__skb_queue_head_init(&pkts);
 857	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
 858	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
 859	if (unlikely(rc != dlen))
 860		return rc;
 861
 862	/* Send message */
 863	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
 864	if (unlikely(rc == -ELINKCONG)) {
 865		tipc_dest_push(&tsk->cong_links, dnode, 0);
 866		tsk->cong_link_cnt++;
 867	}
 868
 869	/* Update send window */
 870	tipc_group_update_member(mb, blks);
 871
 872	/* A broadcast sent within next EXPIRE period must follow same path */
 873	method->rcast = true;
 874	method->mandatory = true;
 875	return dlen;
 876}
 877
 878/**
 879 * tipc_send_group_unicast - send message to a member in the group
 880 * @sock: socket structure
 881 * @m: message to send
 882 * @dlen: total length of message data
 883 * @timeout: timeout to wait for wakeup
 884 *
 885 * Called from function tipc_sendmsg(), which has done all sanity checks
 886 * Returns the number of bytes sent on success, or errno
 887 */
 888static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
 889				   int dlen, long timeout)
 890{
 891	struct sock *sk = sock->sk;
 892	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
 893	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 894	struct tipc_sock *tsk = tipc_sk(sk);
 895	struct net *net = sock_net(sk);
 896	struct tipc_member *mb = NULL;
 897	u32 node, port;
 898	int rc;
 899
 900	node = dest->addr.id.node;
 901	port = dest->addr.id.ref;
 902	if (!port && !node)
 903		return -EHOSTUNREACH;
 904
 905	/* Block or return if destination link or member is congested */
 906	rc = tipc_wait_for_cond(sock, &timeout,
 907				!tipc_dest_find(&tsk->cong_links, node, 0) &&
 908				tsk->group &&
 909				!tipc_group_cong(tsk->group, node, port, blks,
 910						 &mb));
 911	if (unlikely(rc))
 912		return rc;
 913
 914	if (unlikely(!mb))
 915		return -EHOSTUNREACH;
 916
 917	rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
 918
 919	return rc ? rc : dlen;
 920}
 921
 922/**
 923 * tipc_send_group_anycast - send message to any member with given identity
 924 * @sock: socket structure
 925 * @m: message to send
 926 * @dlen: total length of message data
 927 * @timeout: timeout to wait for wakeup
 928 *
 929 * Called from function tipc_sendmsg(), which has done all sanity checks
 930 * Returns the number of bytes sent on success, or errno
 931 */
 932static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
 933				   int dlen, long timeout)
 934{
 935	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
 936	struct sock *sk = sock->sk;
 937	struct tipc_sock *tsk = tipc_sk(sk);
 938	struct list_head *cong_links = &tsk->cong_links;
 939	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 940	struct tipc_msg *hdr = &tsk->phdr;
 941	struct tipc_member *first = NULL;
 942	struct tipc_member *mbr = NULL;
 943	struct net *net = sock_net(sk);
 944	u32 node, port, exclude;
 945	struct list_head dsts;
 946	u32 type, inst, scope;
 947	int lookups = 0;
 948	int dstcnt, rc;
 949	bool cong;
 950
 951	INIT_LIST_HEAD(&dsts);
 952
 953	type = msg_nametype(hdr);
 954	inst = dest->addr.name.name.instance;
 955	scope = msg_lookup_scope(hdr);
 956
 957	while (++lookups < 4) {
 958		exclude = tipc_group_exclude(tsk->group);
 959
 960		first = NULL;
 961
 962		/* Look for a non-congested destination member, if any */
 963		while (1) {
 964			if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
 965						 &dstcnt, exclude, false))
 966				return -EHOSTUNREACH;
 967			tipc_dest_pop(&dsts, &node, &port);
 968			cong = tipc_group_cong(tsk->group, node, port, blks,
 969					       &mbr);
 970			if (!cong)
 971				break;
 972			if (mbr == first)
 973				break;
 974			if (!first)
 975				first = mbr;
 976		}
 977
 978		/* Start over if destination was not in member list */
 979		if (unlikely(!mbr))
 980			continue;
 981
 982		if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
 983			break;
 984
 985		/* Block or return if destination link or member is congested */
 986		rc = tipc_wait_for_cond(sock, &timeout,
 987					!tipc_dest_find(cong_links, node, 0) &&
 988					tsk->group &&
 989					!tipc_group_cong(tsk->group, node, port,
 990							 blks, &mbr));
 991		if (unlikely(rc))
 992			return rc;
 993
 994		/* Send, unless destination disappeared while waiting */
 995		if (likely(mbr))
 996			break;
 997	}
 998
 999	if (unlikely(lookups >= 4))
1000		return -EHOSTUNREACH;
1001
1002	rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1003
1004	return rc ? rc : dlen;
1005}
1006
1007/**
1008 * tipc_send_group_bcast - send message to all members in communication group
1009 * @sk: socket structure
1010 * @m: message to send
1011 * @dlen: total length of message data
1012 * @timeout: timeout to wait for wakeup
1013 *
1014 * Called from function tipc_sendmsg(), which has done all sanity checks
1015 * Returns the number of bytes sent on success, or errno
1016 */
1017static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1018				 int dlen, long timeout)
1019{
1020	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1021	struct sock *sk = sock->sk;
1022	struct net *net = sock_net(sk);
1023	struct tipc_sock *tsk = tipc_sk(sk);
1024	struct tipc_nlist *dsts;
1025	struct tipc_mc_method *method = &tsk->mc_method;
1026	bool ack = method->mandatory && method->rcast;
1027	int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1028	struct tipc_msg *hdr = &tsk->phdr;
1029	int mtu = tipc_bcast_get_mtu(net);
1030	struct sk_buff_head pkts;
1031	int rc = -EHOSTUNREACH;
1032
1033	/* Block or return if any destination link or member is congested */
1034	rc = tipc_wait_for_cond(sock, &timeout,
1035				!tsk->cong_link_cnt && tsk->group &&
1036				!tipc_group_bc_cong(tsk->group, blks));
1037	if (unlikely(rc))
1038		return rc;
1039
1040	dsts = tipc_group_dests(tsk->group);
1041	if (!dsts->local && !dsts->remote)
1042		return -EHOSTUNREACH;
1043
1044	/* Complete message header */
1045	if (dest) {
1046		msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1047		msg_set_nameinst(hdr, dest->addr.name.name.instance);
1048	} else {
1049		msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1050		msg_set_nameinst(hdr, 0);
1051	}
1052	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1053	msg_set_destport(hdr, 0);
1054	msg_set_destnode(hdr, 0);
1055	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1056
1057	/* Avoid getting stuck with repeated forced replicasts */
1058	msg_set_grp_bc_ack_req(hdr, ack);
1059
1060	/* Build message as chain of buffers */
1061	__skb_queue_head_init(&pkts);
1062	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1063	if (unlikely(rc != dlen))
1064		return rc;
1065
1066	/* Send message */
1067	rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1068	if (unlikely(rc))
1069		return rc;
1070
1071	/* Update broadcast sequence number and send windows */
1072	tipc_group_update_bc_members(tsk->group, blks, ack);
1073
1074	/* Broadcast link is now free to choose method for next broadcast */
1075	method->mandatory = false;
1076	method->expires = jiffies;
1077
1078	return dlen;
1079}
1080
1081/**
1082 * tipc_send_group_mcast - send message to all members with given identity
1083 * @sock: socket structure
1084 * @m: message to send
1085 * @dlen: total length of message data
1086 * @timeout: timeout to wait for wakeup
1087 *
1088 * Called from function tipc_sendmsg(), which has done all sanity checks
1089 * Returns the number of bytes sent on success, or errno
1090 */
1091static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1092				 int dlen, long timeout)
1093{
 
1094	struct sock *sk = sock->sk;
1095	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1096	struct tipc_sock *tsk = tipc_sk(sk);
1097	struct tipc_group *grp = tsk->group;
1098	struct tipc_msg *hdr = &tsk->phdr;
1099	struct net *net = sock_net(sk);
1100	u32 type, inst, scope, exclude;
1101	struct list_head dsts;
1102	u32 dstcnt;
1103
1104	INIT_LIST_HEAD(&dsts);
1105
1106	type = msg_nametype(hdr);
1107	inst = dest->addr.name.name.instance;
1108	scope = msg_lookup_scope(hdr);
1109	exclude = tipc_group_exclude(grp);
1110
1111	if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1112				 &dstcnt, exclude, true))
1113		return -EHOSTUNREACH;
1114
1115	if (dstcnt == 1) {
1116		tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1117		return tipc_send_group_unicast(sock, m, dlen, timeout);
1118	}
1119
1120	tipc_dest_list_purge(&dsts);
1121	return tipc_send_group_bcast(sock, m, dlen, timeout);
1122}
1123
1124/**
1125 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
 
1126 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1127 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1128 *
1129 * Multi-threaded: parallel calls with reference to same queues may occur
1130 */
1131void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1132		       struct sk_buff_head *inputq)
1133{
1134	u32 self = tipc_own_addr(net);
1135	u32 type, lower, upper, scope;
1136	struct sk_buff *skb, *_skb;
1137	u32 portid, onode;
1138	struct sk_buff_head tmpq;
1139	struct list_head dports;
1140	struct tipc_msg *hdr;
 
1141	int user, mtyp, hlen;
1142	bool exact;
1143
1144	__skb_queue_head_init(&tmpq);
1145	INIT_LIST_HEAD(&dports);
 
1146
 
1147	skb = tipc_skb_peek(arrvq, &inputq->lock);
1148	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1149		hdr = buf_msg(skb);
1150		user = msg_user(hdr);
1151		mtyp = msg_type(hdr);
1152		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1153		onode = msg_orignode(hdr);
1154		type = msg_nametype(hdr);
 
 
 
 
 
 
1155
1156		if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1157			spin_lock_bh(&inputq->lock);
1158			if (skb_peek(arrvq) == skb) {
1159				__skb_dequeue(arrvq);
1160				__skb_queue_tail(inputq, skb);
1161			}
1162			kfree_skb(skb);
1163			spin_unlock_bh(&inputq->lock);
1164			continue;
1165		}
1166
1167		/* Group messages require exact scope match */
1168		if (msg_in_group(hdr)) {
1169			lower = 0;
1170			upper = ~0;
1171			scope = msg_lookup_scope(hdr);
1172			exact = true;
1173		} else {
1174			/* TIPC_NODE_SCOPE means "any scope" in this context */
1175			if (onode == self)
1176				scope = TIPC_NODE_SCOPE;
1177			else
1178				scope = TIPC_CLUSTER_SCOPE;
1179			exact = false;
1180			lower = msg_namelower(hdr);
1181			upper = msg_nameupper(hdr);
1182		}
1183
1184		/* Create destination port list: */
1185		tipc_nametbl_mc_lookup(net, type, lower, upper,
1186				       scope, exact, &dports);
1187
1188		/* Clone message per destination */
1189		while (tipc_dest_pop(&dports, NULL, &portid)) {
1190			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1191			if (_skb) {
1192				msg_set_destport(buf_msg(_skb), portid);
1193				__skb_queue_tail(&tmpq, _skb);
1194				continue;
1195			}
1196			pr_warn("Failed to clone mcast rcv buffer\n");
1197		}
1198		/* Append to inputq if not already done by other thread */
1199		spin_lock_bh(&inputq->lock);
1200		if (skb_peek(arrvq) == skb) {
1201			skb_queue_splice_tail_init(&tmpq, inputq);
 
1202			kfree_skb(__skb_dequeue(arrvq));
1203		}
1204		spin_unlock_bh(&inputq->lock);
1205		__skb_queue_purge(&tmpq);
1206		kfree_skb(skb);
1207	}
1208	tipc_sk_rcv(net, inputq);
1209}
1210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1211/**
1212 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1213 * @tsk: receiving socket
1214 * @skb: pointer to message buffer.
 
 
1215 */
1216static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1217				   struct sk_buff_head *inputq,
1218				   struct sk_buff_head *xmitq)
1219{
1220	struct tipc_msg *hdr = buf_msg(skb);
1221	u32 onode = tsk_own_node(tsk);
1222	struct sock *sk = &tsk->sk;
1223	int mtyp = msg_type(hdr);
1224	bool conn_cong;
1225
1226	/* Ignore if connection cannot be validated: */
1227	if (!tsk_peer_msg(tsk, hdr)) {
1228		trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1229		goto exit;
1230	}
1231
1232	if (unlikely(msg_errcode(hdr))) {
1233		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1234		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1235				      tsk_peer_port(tsk));
1236		sk->sk_state_change(sk);
1237
1238		/* State change is ignored if socket already awake,
1239		 * - convert msg to abort msg and add to inqueue
1240		 */
1241		msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1242		msg_set_type(hdr, TIPC_CONN_MSG);
1243		msg_set_size(hdr, BASIC_H_SIZE);
1244		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1245		__skb_queue_tail(inputq, skb);
1246		return;
1247	}
1248
1249	tsk->probe_unacked = false;
1250
1251	if (mtyp == CONN_PROBE) {
1252		msg_set_type(hdr, CONN_PROBE_REPLY);
1253		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1254			__skb_queue_tail(xmitq, skb);
1255		return;
1256	} else if (mtyp == CONN_ACK) {
1257		conn_cong = tsk_conn_cong(tsk);
 
1258		tsk->snt_unacked -= msg_conn_ack(hdr);
1259		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1260			tsk->snd_win = msg_adv_win(hdr);
1261		if (conn_cong)
1262			sk->sk_write_space(sk);
1263	} else if (mtyp != CONN_PROBE_REPLY) {
1264		pr_warn("Received unknown CONN_PROTO msg\n");
1265	}
1266exit:
1267	kfree_skb(skb);
1268}
1269
1270/**
1271 * tipc_sendmsg - send message in connectionless manner
1272 * @sock: socket structure
1273 * @m: message to send
1274 * @dsz: amount of user data to be sent
1275 *
1276 * Message must have an destination specified explicitly.
1277 * Used for SOCK_RDM and SOCK_DGRAM messages,
1278 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1279 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1280 *
1281 * Returns the number of bytes sent on success, or errno otherwise
1282 */
1283static int tipc_sendmsg(struct socket *sock,
1284			struct msghdr *m, size_t dsz)
1285{
1286	struct sock *sk = sock->sk;
1287	int ret;
1288
1289	lock_sock(sk);
1290	ret = __tipc_sendmsg(sock, m, dsz);
1291	release_sock(sk);
1292
1293	return ret;
1294}
1295
1296static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1297{
1298	struct sock *sk = sock->sk;
1299	struct net *net = sock_net(sk);
1300	struct tipc_sock *tsk = tipc_sk(sk);
1301	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1302	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1303	struct list_head *clinks = &tsk->cong_links;
1304	bool syn = !tipc_sk_type_connectionless(sk);
1305	struct tipc_group *grp = tsk->group;
1306	struct tipc_msg *hdr = &tsk->phdr;
1307	struct tipc_name_seq *seq;
1308	struct sk_buff_head pkts;
1309	u32 dport, dnode = 0;
1310	u32 type, inst;
1311	int mtu, rc;
1312
1313	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1314		return -EMSGSIZE;
1315
1316	if (likely(dest)) {
1317		if (unlikely(m->msg_namelen < sizeof(*dest)))
1318			return -EINVAL;
1319		if (unlikely(dest->family != AF_TIPC))
1320			return -EINVAL;
 
1321	}
1322
 
1323	if (grp) {
1324		if (!dest)
1325			return tipc_send_group_bcast(sock, m, dlen, timeout);
1326		if (dest->addrtype == TIPC_ADDR_NAME)
1327			return tipc_send_group_anycast(sock, m, dlen, timeout);
1328		if (dest->addrtype == TIPC_ADDR_ID)
1329			return tipc_send_group_unicast(sock, m, dlen, timeout);
1330		if (dest->addrtype == TIPC_ADDR_MCAST)
1331			return tipc_send_group_mcast(sock, m, dlen, timeout);
1332		return -EINVAL;
1333	}
1334
1335	if (unlikely(!dest)) {
1336		dest = &tsk->peer;
1337		if (!syn && dest->family != AF_TIPC)
1338			return -EDESTADDRREQ;
 
1339	}
1340
1341	if (unlikely(syn)) {
1342		if (sk->sk_state == TIPC_LISTEN)
1343			return -EPIPE;
1344		if (sk->sk_state != TIPC_OPEN)
1345			return -EISCONN;
1346		if (tsk->published)
1347			return -EOPNOTSUPP;
1348		if (dest->addrtype == TIPC_ADDR_NAME) {
1349			tsk->conn_type = dest->addr.name.name.type;
1350			tsk->conn_instance = dest->addr.name.name.instance;
1351		}
1352		msg_set_syn(hdr, 1);
1353	}
1354
1355	seq = &dest->addr.nameseq;
1356	if (dest->addrtype == TIPC_ADDR_MCAST)
1357		return tipc_sendmcast(sock, seq, m, dlen, timeout);
1358
1359	if (dest->addrtype == TIPC_ADDR_NAME) {
1360		type = dest->addr.name.name.type;
1361		inst = dest->addr.name.name.instance;
1362		dnode = dest->addr.name.domain;
1363		msg_set_type(hdr, TIPC_NAMED_MSG);
1364		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1365		msg_set_nametype(hdr, type);
1366		msg_set_nameinst(hdr, inst);
1367		msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1368		dport = tipc_nametbl_translate(net, type, inst, &dnode);
1369		msg_set_destnode(hdr, dnode);
1370		msg_set_destport(hdr, dport);
1371		if (unlikely(!dport && !dnode))
1372			return -EHOSTUNREACH;
1373	} else if (dest->addrtype == TIPC_ADDR_ID) {
1374		dnode = dest->addr.id.node;
1375		msg_set_type(hdr, TIPC_DIRECT_MSG);
1376		msg_set_lookup_scope(hdr, 0);
1377		msg_set_destnode(hdr, dnode);
1378		msg_set_destport(hdr, dest->addr.id.ref);
1379		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1380	} else {
1381		return -EINVAL;
1382	}
1383
1384	/* Block or return if destination link is congested */
1385	rc = tipc_wait_for_cond(sock, &timeout,
1386				!tipc_dest_find(clinks, dnode, 0));
1387	if (unlikely(rc))
1388		return rc;
1389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390	__skb_queue_head_init(&pkts);
1391	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1392	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1393	if (unlikely(rc != dlen))
1394		return rc;
1395	if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
 
1396		return -ENOMEM;
 
1397
 
1398	trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1399	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1400	if (unlikely(rc == -ELINKCONG)) {
1401		tipc_dest_push(clinks, dnode, 0);
1402		tsk->cong_link_cnt++;
1403		rc = 0;
1404	}
1405
1406	if (unlikely(syn && !rc))
1407		tipc_set_sk_state(sk, TIPC_CONNECTING);
 
 
 
 
 
1408
1409	return rc ? rc : dlen;
1410}
1411
1412/**
1413 * tipc_sendstream - send stream-oriented data
1414 * @sock: socket structure
1415 * @m: data to send
1416 * @dsz: total length of data to be transmitted
1417 *
1418 * Used for SOCK_STREAM data.
1419 *
1420 * Returns the number of bytes sent on success (or partial success),
1421 * or errno if no data sent
1422 */
1423static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1424{
1425	struct sock *sk = sock->sk;
1426	int ret;
1427
1428	lock_sock(sk);
1429	ret = __tipc_sendstream(sock, m, dsz);
1430	release_sock(sk);
1431
1432	return ret;
1433}
1434
1435static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1436{
1437	struct sock *sk = sock->sk;
1438	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1439	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
 
1440	struct tipc_sock *tsk = tipc_sk(sk);
1441	struct tipc_msg *hdr = &tsk->phdr;
1442	struct net *net = sock_net(sk);
1443	struct sk_buff_head pkts;
1444	u32 dnode = tsk_peer_node(tsk);
 
 
1445	int send, sent = 0;
1446	int rc = 0;
1447
1448	__skb_queue_head_init(&pkts);
1449
1450	if (unlikely(dlen > INT_MAX))
1451		return -EMSGSIZE;
1452
1453	/* Handle implicit connection setup */
1454	if (unlikely(dest)) {
1455		rc = __tipc_sendmsg(sock, m, dlen);
1456		if (dlen && dlen == rc) {
1457			tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1458			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1459		}
1460		return rc;
1461	}
1462
1463	do {
1464		rc = tipc_wait_for_cond(sock, &timeout,
1465					(!tsk->cong_link_cnt &&
1466					 !tsk_conn_cong(tsk) &&
1467					 tipc_sk_connected(sk)));
1468		if (unlikely(rc))
1469			break;
1470
1471		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1472		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1473		if (unlikely(rc != send))
1474			break;
1475
1476		trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477					 TIPC_DUMP_SK_SNDQ, " ");
1478		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1479		if (unlikely(rc == -ELINKCONG)) {
1480			tsk->cong_link_cnt = 1;
1481			rc = 0;
1482		}
1483		if (likely(!rc)) {
1484			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
 
1485			sent += send;
1486		}
1487	} while (sent < dlen && !rc);
1488
1489	return sent ? sent : rc;
1490}
1491
1492/**
1493 * tipc_send_packet - send a connection-oriented message
1494 * @sock: socket structure
1495 * @m: message to send
1496 * @dsz: length of data to be transmitted
1497 *
1498 * Used for SOCK_SEQPACKET messages.
1499 *
1500 * Returns the number of bytes sent on success, or errno otherwise
1501 */
1502static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1503{
1504	if (dsz > TIPC_MAX_USER_MSG_SIZE)
1505		return -EMSGSIZE;
1506
1507	return tipc_sendstream(sock, m, dsz);
1508}
1509
1510/* tipc_sk_finish_conn - complete the setup of a connection
1511 */
1512static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1513				u32 peer_node)
1514{
1515	struct sock *sk = &tsk->sk;
1516	struct net *net = sock_net(sk);
1517	struct tipc_msg *msg = &tsk->phdr;
1518
1519	msg_set_syn(msg, 0);
1520	msg_set_destnode(msg, peer_node);
1521	msg_set_destport(msg, peer_port);
1522	msg_set_type(msg, TIPC_CONN_MSG);
1523	msg_set_lookup_scope(msg, 0);
1524	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1525
1526	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1527	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1528	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1529	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1530	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
 
1531	__skb_queue_purge(&sk->sk_write_queue);
1532	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1533		return;
1534
1535	/* Fall back to message based flow control */
1536	tsk->rcv_win = FLOWCTL_MSG_WIN;
1537	tsk->snd_win = FLOWCTL_MSG_WIN;
1538}
1539
1540/**
1541 * tipc_sk_set_orig_addr - capture sender's address for received message
1542 * @m: descriptor for message info
1543 * @hdr: received message header
1544 *
1545 * Note: Address is not captured if not requested by receiver.
1546 */
1547static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1548{
1549	DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1550	struct tipc_msg *hdr = buf_msg(skb);
1551
1552	if (!srcaddr)
1553		return;
1554
1555	srcaddr->sock.family = AF_TIPC;
1556	srcaddr->sock.addrtype = TIPC_ADDR_ID;
1557	srcaddr->sock.scope = 0;
1558	srcaddr->sock.addr.id.ref = msg_origport(hdr);
1559	srcaddr->sock.addr.id.node = msg_orignode(hdr);
1560	srcaddr->sock.addr.name.domain = 0;
1561	m->msg_namelen = sizeof(struct sockaddr_tipc);
1562
1563	if (!msg_in_group(hdr))
1564		return;
1565
1566	/* Group message users may also want to know sending member's id */
1567	srcaddr->member.family = AF_TIPC;
1568	srcaddr->member.addrtype = TIPC_ADDR_NAME;
1569	srcaddr->member.scope = 0;
1570	srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1571	srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1572	srcaddr->member.addr.name.domain = 0;
1573	m->msg_namelen = sizeof(*srcaddr);
1574}
1575
1576/**
1577 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1578 * @m: descriptor for message info
1579 * @skb: received message buffer
1580 * @tsk: TIPC port associated with message
1581 *
1582 * Note: Ancillary data is not captured if not requested by receiver.
1583 *
1584 * Returns 0 if successful, otherwise errno
1585 */
1586static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1587				 struct tipc_sock *tsk)
1588{
1589	struct tipc_msg *msg;
1590	u32 anc_data[3];
1591	u32 err;
1592	u32 dest_type;
1593	int has_name;
1594	int res;
1595
1596	if (likely(m->msg_controllen == 0))
1597		return 0;
1598	msg = buf_msg(skb);
1599
1600	/* Optionally capture errored message object(s) */
1601	err = msg ? msg_errcode(msg) : 0;
1602	if (unlikely(err)) {
1603		anc_data[0] = err;
1604		anc_data[1] = msg_data_sz(msg);
1605		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1606		if (res)
1607			return res;
1608		if (anc_data[1]) {
1609			if (skb_linearize(skb))
1610				return -ENOMEM;
1611			msg = buf_msg(skb);
1612			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1613				       msg_data(msg));
1614			if (res)
1615				return res;
1616		}
1617	}
1618
1619	/* Optionally capture message destination object */
1620	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1621	switch (dest_type) {
1622	case TIPC_NAMED_MSG:
1623		has_name = 1;
1624		anc_data[0] = msg_nametype(msg);
1625		anc_data[1] = msg_namelower(msg);
1626		anc_data[2] = msg_namelower(msg);
1627		break;
1628	case TIPC_MCAST_MSG:
1629		has_name = 1;
1630		anc_data[0] = msg_nametype(msg);
1631		anc_data[1] = msg_namelower(msg);
1632		anc_data[2] = msg_nameupper(msg);
1633		break;
1634	case TIPC_CONN_MSG:
1635		has_name = (tsk->conn_type != 0);
1636		anc_data[0] = tsk->conn_type;
1637		anc_data[1] = tsk->conn_instance;
1638		anc_data[2] = tsk->conn_instance;
1639		break;
1640	default:
1641		has_name = 0;
1642	}
1643	if (has_name) {
1644		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1645		if (res)
1646			return res;
1647	}
1648
1649	return 0;
1650}
1651
1652static void tipc_sk_send_ack(struct tipc_sock *tsk)
1653{
1654	struct sock *sk = &tsk->sk;
1655	struct net *net = sock_net(sk);
1656	struct sk_buff *skb = NULL;
1657	struct tipc_msg *msg;
1658	u32 peer_port = tsk_peer_port(tsk);
1659	u32 dnode = tsk_peer_node(tsk);
1660
1661	if (!tipc_sk_connected(sk))
1662		return;
1663	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1664			      dnode, tsk_own_node(tsk), peer_port,
1665			      tsk->portid, TIPC_OK);
1666	if (!skb)
1667		return;
1668	msg = buf_msg(skb);
1669	msg_set_conn_ack(msg, tsk->rcv_unacked);
1670	tsk->rcv_unacked = 0;
1671
1672	/* Adjust to and advertize the correct window limit */
1673	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1674		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1675		msg_set_adv_win(msg, tsk->rcv_win);
1676	}
1677	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 
 
 
 
 
 
 
 
 
 
 
 
1678}
1679
1680static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1681{
1682	struct sock *sk = sock->sk;
1683	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1684	long timeo = *timeop;
1685	int err = sock_error(sk);
1686
1687	if (err)
1688		return err;
1689
1690	for (;;) {
1691		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1692			if (sk->sk_shutdown & RCV_SHUTDOWN) {
1693				err = -ENOTCONN;
1694				break;
1695			}
1696			add_wait_queue(sk_sleep(sk), &wait);
1697			release_sock(sk);
1698			timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1699			sched_annotate_sleep();
1700			lock_sock(sk);
1701			remove_wait_queue(sk_sleep(sk), &wait);
1702		}
1703		err = 0;
1704		if (!skb_queue_empty(&sk->sk_receive_queue))
1705			break;
1706		err = -EAGAIN;
1707		if (!timeo)
1708			break;
1709		err = sock_intr_errno(timeo);
1710		if (signal_pending(current))
1711			break;
1712
1713		err = sock_error(sk);
1714		if (err)
1715			break;
1716	}
1717	*timeop = timeo;
1718	return err;
1719}
1720
1721/**
1722 * tipc_recvmsg - receive packet-oriented message
 
1723 * @m: descriptor for message info
1724 * @buflen: length of user buffer area
1725 * @flags: receive flags
1726 *
1727 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1728 * If the complete message doesn't fit in user area, truncate it.
1729 *
1730 * Returns size of returned message data, errno otherwise
1731 */
1732static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1733			size_t buflen,	int flags)
1734{
1735	struct sock *sk = sock->sk;
1736	bool connected = !tipc_sk_type_connectionless(sk);
1737	struct tipc_sock *tsk = tipc_sk(sk);
1738	int rc, err, hlen, dlen, copy;
 
1739	struct sk_buff_head xmitq;
1740	struct tipc_msg *hdr;
1741	struct sk_buff *skb;
1742	bool grp_evt;
1743	long timeout;
1744
1745	/* Catch invalid receive requests */
1746	if (unlikely(!buflen))
1747		return -EINVAL;
1748
1749	lock_sock(sk);
1750	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1751		rc = -ENOTCONN;
1752		goto exit;
1753	}
1754	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1755
1756	/* Step rcv queue to first msg with data or error; wait if necessary */
1757	do {
1758		rc = tipc_wait_for_rcvmsg(sock, &timeout);
1759		if (unlikely(rc))
1760			goto exit;
1761		skb = skb_peek(&sk->sk_receive_queue);
 
1762		hdr = buf_msg(skb);
1763		dlen = msg_data_sz(hdr);
1764		hlen = msg_hdr_sz(hdr);
1765		err = msg_errcode(hdr);
1766		grp_evt = msg_is_grp_evt(hdr);
1767		if (likely(dlen || err))
1768			break;
1769		tsk_advance_rx_queue(sk);
1770	} while (1);
1771
1772	/* Collect msg meta data, including error code and rejected data */
1773	tipc_sk_set_orig_addr(m, skb);
1774	rc = tipc_sk_anc_data_recv(m, skb, tsk);
1775	if (unlikely(rc))
1776		goto exit;
1777	hdr = buf_msg(skb);
1778
1779	/* Capture data if non-error msg, otherwise just set return value */
1780	if (likely(!err)) {
1781		copy = min_t(int, dlen, buflen);
1782		if (unlikely(copy != dlen))
1783			m->msg_flags |= MSG_TRUNC;
1784		rc = skb_copy_datagram_msg(skb, hlen, m, copy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1785	} else {
1786		copy = 0;
1787		rc = 0;
1788		if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1789			rc = -ECONNRESET;
 
 
1790	}
1791	if (unlikely(rc))
1792		goto exit;
1793
1794	/* Mark message as group event if applicable */
1795	if (unlikely(grp_evt)) {
1796		if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1797			m->msg_flags |= MSG_EOR;
1798		m->msg_flags |= MSG_OOB;
1799		copy = 0;
1800	}
1801
1802	/* Caption of data or error code/rejected data was successful */
1803	if (unlikely(flags & MSG_PEEK))
1804		goto exit;
1805
1806	/* Send group flow control advertisement when applicable */
1807	if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1808		__skb_queue_head_init(&xmitq);
1809		tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1810					  msg_orignode(hdr), msg_origport(hdr),
1811					  &xmitq);
1812		tipc_node_distr_xmit(sock_net(sk), &xmitq);
1813	}
1814
 
 
 
1815	tsk_advance_rx_queue(sk);
1816
1817	if (likely(!connected))
1818		goto exit;
1819
1820	/* Send connection flow control advertisement when applicable */
1821	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1822	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1823		tipc_sk_send_ack(tsk);
1824exit:
1825	release_sock(sk);
1826	return rc ? rc : copy;
1827}
1828
1829/**
1830 * tipc_recvstream - receive stream-oriented data
 
1831 * @m: descriptor for message info
1832 * @buflen: total size of user buffer area
1833 * @flags: receive flags
1834 *
1835 * Used for SOCK_STREAM messages only.  If not enough data is available
1836 * will optionally wait for more; never truncates data.
1837 *
1838 * Returns size of returned message data, errno otherwise
1839 */
1840static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1841			   size_t buflen, int flags)
1842{
1843	struct sock *sk = sock->sk;
1844	struct tipc_sock *tsk = tipc_sk(sk);
1845	struct sk_buff *skb;
1846	struct tipc_msg *hdr;
1847	struct tipc_skb_cb *skb_cb;
1848	bool peek = flags & MSG_PEEK;
1849	int offset, required, copy, copied = 0;
1850	int hlen, dlen, err, rc;
1851	long timeout;
1852
1853	/* Catch invalid receive attempts */
1854	if (unlikely(!buflen))
1855		return -EINVAL;
1856
1857	lock_sock(sk);
1858
1859	if (unlikely(sk->sk_state == TIPC_OPEN)) {
1860		rc = -ENOTCONN;
1861		goto exit;
1862	}
1863	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1864	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1865
1866	do {
1867		/* Look at first msg in receive queue; wait if necessary */
1868		rc = tipc_wait_for_rcvmsg(sock, &timeout);
1869		if (unlikely(rc))
1870			break;
1871		skb = skb_peek(&sk->sk_receive_queue);
1872		skb_cb = TIPC_SKB_CB(skb);
1873		hdr = buf_msg(skb);
1874		dlen = msg_data_sz(hdr);
1875		hlen = msg_hdr_sz(hdr);
1876		err = msg_errcode(hdr);
1877
1878		/* Discard any empty non-errored (SYN-) message */
1879		if (unlikely(!dlen && !err)) {
1880			tsk_advance_rx_queue(sk);
1881			continue;
1882		}
1883
1884		/* Collect msg meta data, incl. error code and rejected data */
1885		if (!copied) {
1886			tipc_sk_set_orig_addr(m, skb);
1887			rc = tipc_sk_anc_data_recv(m, skb, tsk);
1888			if (rc)
1889				break;
1890			hdr = buf_msg(skb);
1891		}
1892
1893		/* Copy data if msg ok, otherwise return error/partial data */
1894		if (likely(!err)) {
1895			offset = skb_cb->bytes_read;
1896			copy = min_t(int, dlen - offset, buflen - copied);
1897			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1898			if (unlikely(rc))
1899				break;
1900			copied += copy;
1901			offset += copy;
1902			if (unlikely(offset < dlen)) {
1903				if (!peek)
1904					skb_cb->bytes_read = offset;
1905				break;
1906			}
1907		} else {
1908			rc = 0;
1909			if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1910				rc = -ECONNRESET;
1911			if (copied || rc)
1912				break;
1913		}
1914
1915		if (unlikely(peek))
1916			break;
1917
1918		tsk_advance_rx_queue(sk);
1919
1920		/* Send connection flow control advertisement when applicable */
1921		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1922		if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1923			tipc_sk_send_ack(tsk);
1924
1925		/* Exit if all requested data or FIN/error received */
1926		if (copied == buflen || err)
1927			break;
1928
1929	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1930exit:
1931	release_sock(sk);
1932	return copied ? copied : rc;
1933}
1934
1935/**
1936 * tipc_write_space - wake up thread if port congestion is released
1937 * @sk: socket
1938 */
1939static void tipc_write_space(struct sock *sk)
1940{
1941	struct socket_wq *wq;
1942
1943	rcu_read_lock();
1944	wq = rcu_dereference(sk->sk_wq);
1945	if (skwq_has_sleeper(wq))
1946		wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1947						EPOLLWRNORM | EPOLLWRBAND);
1948	rcu_read_unlock();
1949}
1950
1951/**
1952 * tipc_data_ready - wake up threads to indicate messages have been received
1953 * @sk: socket
1954 * @len: the length of messages
1955 */
1956static void tipc_data_ready(struct sock *sk)
1957{
1958	struct socket_wq *wq;
1959
 
 
1960	rcu_read_lock();
1961	wq = rcu_dereference(sk->sk_wq);
1962	if (skwq_has_sleeper(wq))
1963		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1964						EPOLLRDNORM | EPOLLRDBAND);
1965	rcu_read_unlock();
1966}
1967
1968static void tipc_sock_destruct(struct sock *sk)
1969{
1970	__skb_queue_purge(&sk->sk_receive_queue);
1971}
1972
1973static void tipc_sk_proto_rcv(struct sock *sk,
1974			      struct sk_buff_head *inputq,
1975			      struct sk_buff_head *xmitq)
1976{
1977	struct sk_buff *skb = __skb_dequeue(inputq);
1978	struct tipc_sock *tsk = tipc_sk(sk);
1979	struct tipc_msg *hdr = buf_msg(skb);
1980	struct tipc_group *grp = tsk->group;
1981	bool wakeup = false;
1982
1983	switch (msg_user(hdr)) {
1984	case CONN_MANAGER:
1985		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1986		return;
1987	case SOCK_WAKEUP:
1988		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1989		/* coupled with smp_rmb() in tipc_wait_for_cond() */
1990		smp_wmb();
1991		tsk->cong_link_cnt--;
1992		wakeup = true;
 
1993		break;
1994	case GROUP_PROTOCOL:
1995		tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1996		break;
1997	case TOP_SRV:
1998		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1999				      hdr, inputq, xmitq);
2000		break;
2001	default:
2002		break;
2003	}
2004
2005	if (wakeup)
2006		sk->sk_write_space(sk);
2007
2008	kfree_skb(skb);
2009}
2010
2011/**
2012 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2013 * @tsk: TIPC socket
2014 * @skb: pointer to message buffer.
2015 * Returns true if message should be added to receive queue, false otherwise
 
2016 */
2017static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 
2018{
2019	struct sock *sk = &tsk->sk;
2020	struct net *net = sock_net(sk);
2021	struct tipc_msg *hdr = buf_msg(skb);
2022	bool con_msg = msg_connected(hdr);
2023	u32 pport = tsk_peer_port(tsk);
2024	u32 pnode = tsk_peer_node(tsk);
2025	u32 oport = msg_origport(hdr);
2026	u32 onode = msg_orignode(hdr);
2027	int err = msg_errcode(hdr);
2028	unsigned long delay;
2029
2030	if (unlikely(msg_mcast(hdr)))
2031		return false;
 
2032
2033	switch (sk->sk_state) {
2034	case TIPC_CONNECTING:
2035		/* Setup ACK */
2036		if (likely(con_msg)) {
2037			if (err)
2038				break;
2039			tipc_sk_finish_conn(tsk, oport, onode);
2040			msg_set_importance(&tsk->phdr, msg_importance(hdr));
2041			/* ACK+ message with data is added to receive queue */
2042			if (msg_data_sz(hdr))
2043				return true;
2044			/* Empty ACK-, - wake up sleeping connect() and drop */
2045			sk->sk_state_change(sk);
2046			msg_set_dest_droppable(hdr, 1);
2047			return false;
2048		}
2049		/* Ignore connectionless message if not from listening socket */
2050		if (oport != pport || onode != pnode)
2051			return false;
2052
2053		/* Rejected SYN */
2054		if (err != TIPC_ERR_OVERLOAD)
2055			break;
2056
2057		/* Prepare for new setup attempt if we have a SYN clone */
2058		if (skb_queue_empty(&sk->sk_write_queue))
2059			break;
2060		get_random_bytes(&delay, 2);
2061		delay %= (tsk->conn_timeout / 4);
2062		delay = msecs_to_jiffies(delay + 100);
2063		sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2064		return false;
2065	case TIPC_OPEN:
2066	case TIPC_DISCONNECTING:
2067		return false;
2068	case TIPC_LISTEN:
2069		/* Accept only SYN message */
2070		if (!msg_is_syn(hdr) &&
2071		    tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2072			return false;
2073		if (!con_msg && !err)
2074			return true;
2075		return false;
2076	case TIPC_ESTABLISHED:
 
 
2077		/* Accept only connection-based messages sent by peer */
2078		if (likely(con_msg && !err && pport == oport && pnode == onode))
 
 
 
 
 
 
 
 
 
 
2079			return true;
 
2080		if (!tsk_peer_msg(tsk, hdr))
2081			return false;
2082		if (!err)
2083			return true;
2084		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2085		tipc_node_remove_conn(net, pnode, tsk->portid);
2086		sk->sk_state_change(sk);
2087		return true;
2088	default:
2089		pr_err("Unknown sk_state %u\n", sk->sk_state);
2090	}
2091	/* Abort connection setup attempt */
2092	tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2093	sk->sk_err = ECONNREFUSED;
2094	sk->sk_state_change(sk);
2095	return true;
2096}
2097
2098/**
2099 * rcvbuf_limit - get proper overload limit of socket receive queue
2100 * @sk: socket
2101 * @skb: message
2102 *
2103 * For connection oriented messages, irrespective of importance,
2104 * default queue limit is 2 MB.
2105 *
2106 * For connectionless messages, queue limits are based on message
2107 * importance as follows:
2108 *
2109 * TIPC_LOW_IMPORTANCE       (2 MB)
2110 * TIPC_MEDIUM_IMPORTANCE    (4 MB)
2111 * TIPC_HIGH_IMPORTANCE      (8 MB)
2112 * TIPC_CRITICAL_IMPORTANCE  (16 MB)
2113 *
2114 * Returns overload limit according to corresponding message importance
2115 */
2116static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2117{
2118	struct tipc_sock *tsk = tipc_sk(sk);
2119	struct tipc_msg *hdr = buf_msg(skb);
2120
2121	if (unlikely(msg_in_group(hdr)))
2122		return READ_ONCE(sk->sk_rcvbuf);
2123
2124	if (unlikely(!msg_connected(hdr)))
2125		return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2126
2127	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2128		return READ_ONCE(sk->sk_rcvbuf);
2129
2130	return FLOWCTL_MSG_LIM;
2131}
2132
2133/**
2134 * tipc_sk_filter_rcv - validate incoming message
2135 * @sk: socket
2136 * @skb: pointer to message.
 
2137 *
2138 * Enqueues message on receive queue if acceptable; optionally handles
2139 * disconnect indication for a connected socket.
2140 *
2141 * Called with socket lock already taken
2142 *
2143 */
2144static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2145			       struct sk_buff_head *xmitq)
2146{
2147	bool sk_conn = !tipc_sk_type_connectionless(sk);
2148	struct tipc_sock *tsk = tipc_sk(sk);
2149	struct tipc_group *grp = tsk->group;
2150	struct tipc_msg *hdr = buf_msg(skb);
2151	struct net *net = sock_net(sk);
2152	struct sk_buff_head inputq;
2153	int mtyp = msg_type(hdr);
2154	int limit, err = TIPC_OK;
2155
2156	trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2157	TIPC_SKB_CB(skb)->bytes_read = 0;
2158	__skb_queue_head_init(&inputq);
2159	__skb_queue_tail(&inputq, skb);
2160
2161	if (unlikely(!msg_isdata(hdr)))
2162		tipc_sk_proto_rcv(sk, &inputq, xmitq);
2163
2164	if (unlikely(grp))
2165		tipc_group_filter_msg(grp, &inputq, xmitq);
2166
2167	if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2168		tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2169
2170	/* Validate and add to receive buffer if there is space */
2171	while ((skb = __skb_dequeue(&inputq))) {
2172		hdr = buf_msg(skb);
2173		limit = rcvbuf_limit(sk, skb);
2174		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2175		    (!sk_conn && msg_connected(hdr)) ||
2176		    (!grp && msg_in_group(hdr)))
2177			err = TIPC_ERR_NO_PORT;
2178		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2179			trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2180					   "err_overload2!");
2181			atomic_inc(&sk->sk_drops);
2182			err = TIPC_ERR_OVERLOAD;
2183		}
2184
2185		if (unlikely(err)) {
2186			if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2187				trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2188						      "@filter_rcv!");
2189				__skb_queue_tail(xmitq, skb);
2190			}
2191			err = TIPC_OK;
2192			continue;
2193		}
2194		__skb_queue_tail(&sk->sk_receive_queue, skb);
2195		skb_set_owner_r(skb, sk);
2196		trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2197					 "rcvq >90% allocated!");
2198		sk->sk_data_ready(sk);
2199	}
2200}
2201
2202/**
2203 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2204 * @sk: socket
2205 * @skb: message
2206 *
2207 * Caller must hold socket lock
2208 */
2209static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2210{
2211	unsigned int before = sk_rmem_alloc_get(sk);
2212	struct sk_buff_head xmitq;
2213	unsigned int added;
2214
2215	__skb_queue_head_init(&xmitq);
2216
2217	tipc_sk_filter_rcv(sk, skb, &xmitq);
2218	added = sk_rmem_alloc_get(sk) - before;
2219	atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2220
2221	/* Send pending response/rejected messages, if any */
2222	tipc_node_distr_xmit(sock_net(sk), &xmitq);
2223	return 0;
2224}
2225
2226/**
2227 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2228 *                   inputq and try adding them to socket or backlog queue
2229 * @inputq: list of incoming buffers with potentially different destinations
2230 * @sk: socket where the buffers should be enqueued
2231 * @dport: port number for the socket
 
2232 *
2233 * Caller must hold socket lock
2234 */
2235static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2236			    u32 dport, struct sk_buff_head *xmitq)
2237{
2238	unsigned long time_limit = jiffies + 2;
2239	struct sk_buff *skb;
2240	unsigned int lim;
2241	atomic_t *dcnt;
2242	u32 onode;
2243
2244	while (skb_queue_len(inputq)) {
2245		if (unlikely(time_after_eq(jiffies, time_limit)))
2246			return;
2247
2248		skb = tipc_skb_dequeue(inputq, dport);
2249		if (unlikely(!skb))
2250			return;
2251
2252		/* Add message directly to receive queue if possible */
2253		if (!sock_owned_by_user(sk)) {
2254			tipc_sk_filter_rcv(sk, skb, xmitq);
2255			continue;
2256		}
2257
2258		/* Try backlog, compensating for double-counted bytes */
2259		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2260		if (!sk->sk_backlog.len)
2261			atomic_set(dcnt, 0);
2262		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2263		if (likely(!sk_add_backlog(sk, skb, lim))) {
2264			trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2265						 "bklg & rcvq >90% allocated!");
2266			continue;
2267		}
2268
2269		trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2270		/* Overload => reject message back to sender */
2271		onode = tipc_own_addr(sock_net(sk));
2272		atomic_inc(&sk->sk_drops);
2273		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2274			trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2275					      "@sk_enqueue!");
2276			__skb_queue_tail(xmitq, skb);
2277		}
2278		break;
2279	}
2280}
2281
2282/**
2283 * tipc_sk_rcv - handle a chain of incoming buffers
 
2284 * @inputq: buffer list containing the buffers
2285 * Consumes all buffers in list until inputq is empty
2286 * Note: may be called in multiple threads referring to the same queue
2287 */
2288void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2289{
2290	struct sk_buff_head xmitq;
2291	u32 dnode, dport = 0;
2292	int err;
2293	struct tipc_sock *tsk;
2294	struct sock *sk;
2295	struct sk_buff *skb;
2296
2297	__skb_queue_head_init(&xmitq);
2298	while (skb_queue_len(inputq)) {
2299		dport = tipc_skb_peek_port(inputq, dport);
2300		tsk = tipc_sk_lookup(net, dport);
2301
2302		if (likely(tsk)) {
2303			sk = &tsk->sk;
2304			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2305				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2306				spin_unlock_bh(&sk->sk_lock.slock);
2307			}
2308			/* Send pending response/rejected messages, if any */
2309			tipc_node_distr_xmit(sock_net(sk), &xmitq);
2310			sock_put(sk);
2311			continue;
2312		}
2313		/* No destination socket => dequeue skb if still there */
2314		skb = tipc_skb_dequeue(inputq, dport);
2315		if (!skb)
2316			return;
2317
2318		/* Try secondary lookup if unresolved named message */
2319		err = TIPC_ERR_NO_PORT;
2320		if (tipc_msg_lookup_dest(net, skb, &err))
2321			goto xmit;
2322
2323		/* Prepare for message rejection */
2324		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2325			continue;
2326
2327		trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2328xmit:
2329		dnode = msg_destnode(buf_msg(skb));
2330		tipc_node_xmit_skb(net, skb, dnode, dport);
2331	}
2332}
2333
2334static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2335{
2336	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2337	struct sock *sk = sock->sk;
2338	int done;
2339
2340	do {
2341		int err = sock_error(sk);
2342		if (err)
2343			return err;
2344		if (!*timeo_p)
2345			return -ETIMEDOUT;
2346		if (signal_pending(current))
2347			return sock_intr_errno(*timeo_p);
 
 
2348
2349		add_wait_queue(sk_sleep(sk), &wait);
2350		done = sk_wait_event(sk, timeo_p,
2351				     sk->sk_state != TIPC_CONNECTING, &wait);
2352		remove_wait_queue(sk_sleep(sk), &wait);
2353	} while (!done);
2354	return 0;
2355}
2356
2357static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2358{
2359	if (addr->family != AF_TIPC)
2360		return false;
2361	if (addr->addrtype == TIPC_SERVICE_RANGE)
2362		return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2363	return (addr->addrtype == TIPC_SERVICE_ADDR ||
2364		addr->addrtype == TIPC_SOCKET_ADDR);
2365}
2366
2367/**
2368 * tipc_connect - establish a connection to another TIPC port
2369 * @sock: socket structure
2370 * @dest: socket address for destination port
2371 * @destlen: size of socket address data structure
2372 * @flags: file-related flags associated with socket
2373 *
2374 * Returns 0 on success, errno otherwise
2375 */
2376static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2377			int destlen, int flags)
2378{
2379	struct sock *sk = sock->sk;
2380	struct tipc_sock *tsk = tipc_sk(sk);
2381	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2382	struct msghdr m = {NULL,};
2383	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2384	int previous;
2385	int res = 0;
2386
2387	if (destlen != sizeof(struct sockaddr_tipc))
2388		return -EINVAL;
2389
2390	lock_sock(sk);
2391
2392	if (tsk->group) {
2393		res = -EINVAL;
2394		goto exit;
2395	}
2396
2397	if (dst->family == AF_UNSPEC) {
2398		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2399		if (!tipc_sk_type_connectionless(sk))
2400			res = -EINVAL;
2401		goto exit;
2402	}
2403	if (!tipc_sockaddr_is_sane(dst)) {
2404		res = -EINVAL;
2405		goto exit;
2406	}
2407	/* DGRAM/RDM connect(), just save the destaddr */
2408	if (tipc_sk_type_connectionless(sk)) {
2409		memcpy(&tsk->peer, dest, destlen);
2410		goto exit;
2411	} else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2412		res = -EINVAL;
2413		goto exit;
2414	}
2415
2416	previous = sk->sk_state;
2417
2418	switch (sk->sk_state) {
2419	case TIPC_OPEN:
2420		/* Send a 'SYN-' to destination */
2421		m.msg_name = dest;
2422		m.msg_namelen = destlen;
 
2423
2424		/* If connect is in non-blocking case, set MSG_DONTWAIT to
2425		 * indicate send_msg() is never blocked.
2426		 */
2427		if (!timeout)
2428			m.msg_flags = MSG_DONTWAIT;
2429
2430		res = __tipc_sendmsg(sock, &m, 0);
2431		if ((res < 0) && (res != -EWOULDBLOCK))
2432			goto exit;
2433
2434		/* Just entered TIPC_CONNECTING state; the only
2435		 * difference is that return value in non-blocking
2436		 * case is EINPROGRESS, rather than EALREADY.
2437		 */
2438		res = -EINPROGRESS;
2439		/* fall through */
2440	case TIPC_CONNECTING:
2441		if (!timeout) {
2442			if (previous == TIPC_CONNECTING)
2443				res = -EALREADY;
2444			goto exit;
2445		}
2446		timeout = msecs_to_jiffies(timeout);
2447		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2448		res = tipc_wait_for_connect(sock, &timeout);
2449		break;
2450	case TIPC_ESTABLISHED:
2451		res = -EISCONN;
2452		break;
2453	default:
2454		res = -EINVAL;
2455	}
2456
2457exit:
2458	release_sock(sk);
2459	return res;
2460}
2461
2462/**
2463 * tipc_listen - allow socket to listen for incoming connections
2464 * @sock: socket structure
2465 * @len: (unused)
2466 *
2467 * Returns 0 on success, errno otherwise
2468 */
2469static int tipc_listen(struct socket *sock, int len)
2470{
2471	struct sock *sk = sock->sk;
2472	int res;
2473
2474	lock_sock(sk);
2475	res = tipc_set_sk_state(sk, TIPC_LISTEN);
2476	release_sock(sk);
2477
2478	return res;
2479}
2480
2481static int tipc_wait_for_accept(struct socket *sock, long timeo)
2482{
2483	struct sock *sk = sock->sk;
2484	DEFINE_WAIT(wait);
2485	int err;
2486
2487	/* True wake-one mechanism for incoming connections: only
2488	 * one process gets woken up, not the 'whole herd'.
2489	 * Since we do not 'race & poll' for established sockets
2490	 * anymore, the common case will execute the loop only once.
2491	*/
2492	for (;;) {
2493		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2494					  TASK_INTERRUPTIBLE);
2495		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
 
2496			release_sock(sk);
2497			timeo = schedule_timeout(timeo);
2498			lock_sock(sk);
 
2499		}
2500		err = 0;
2501		if (!skb_queue_empty(&sk->sk_receive_queue))
2502			break;
2503		err = -EAGAIN;
2504		if (!timeo)
2505			break;
2506		err = sock_intr_errno(timeo);
2507		if (signal_pending(current))
2508			break;
2509	}
2510	finish_wait(sk_sleep(sk), &wait);
2511	return err;
2512}
2513
2514/**
2515 * tipc_accept - wait for connection request
2516 * @sock: listening socket
2517 * @newsock: new socket that is to be connected
2518 * @flags: file-related flags associated with socket
 
2519 *
2520 * Returns 0 on success, errno otherwise
2521 */
2522static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2523		       bool kern)
2524{
2525	struct sock *new_sk, *sk = sock->sk;
2526	struct sk_buff *buf;
2527	struct tipc_sock *new_tsock;
 
2528	struct tipc_msg *msg;
 
2529	long timeo;
2530	int res;
2531
2532	lock_sock(sk);
2533
2534	if (sk->sk_state != TIPC_LISTEN) {
2535		res = -EINVAL;
2536		goto exit;
2537	}
2538	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2539	res = tipc_wait_for_accept(sock, timeo);
2540	if (res)
2541		goto exit;
2542
2543	buf = skb_peek(&sk->sk_receive_queue);
2544
2545	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2546	if (res)
2547		goto exit;
2548	security_sk_clone(sock->sk, new_sock->sk);
2549
2550	new_sk = new_sock->sk;
2551	new_tsock = tipc_sk(new_sk);
2552	msg = buf_msg(buf);
2553
2554	/* we lock on new_sk; but lockdep sees the lock on sk */
2555	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2556
2557	/*
2558	 * Reject any stray messages received by new socket
2559	 * before the socket lock was taken (very, very unlikely)
2560	 */
2561	tsk_rej_rx_queue(new_sk);
2562
2563	/* Connect new socket to it's peer */
2564	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2565
2566	tsk_set_importance(new_tsock, msg_importance(msg));
2567	if (msg_named(msg)) {
2568		new_tsock->conn_type = msg_nametype(msg);
2569		new_tsock->conn_instance = msg_nameinst(msg);
 
2570	}
2571
2572	/*
2573	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2574	 * Respond to 'SYN+' by queuing it on new socket.
2575	 */
2576	if (!msg_data_sz(msg)) {
2577		struct msghdr m = {NULL,};
2578
2579		tsk_advance_rx_queue(sk);
2580		__tipc_sendstream(new_sock, &m, 0);
2581	} else {
2582		__skb_dequeue(&sk->sk_receive_queue);
2583		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2584		skb_set_owner_r(buf, new_sk);
2585	}
 
 
2586	release_sock(new_sk);
2587exit:
2588	release_sock(sk);
2589	return res;
2590}
2591
2592/**
2593 * tipc_shutdown - shutdown socket connection
2594 * @sock: socket structure
2595 * @how: direction to close (must be SHUT_RDWR)
2596 *
2597 * Terminates connection (if necessary), then purges socket's receive queue.
2598 *
2599 * Returns 0 on success, errno otherwise
2600 */
2601static int tipc_shutdown(struct socket *sock, int how)
2602{
2603	struct sock *sk = sock->sk;
2604	int res;
2605
2606	if (how != SHUT_RDWR)
2607		return -EINVAL;
2608
2609	lock_sock(sk);
2610
2611	trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2612	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2613	sk->sk_shutdown = SEND_SHUTDOWN;
2614
2615	if (sk->sk_state == TIPC_DISCONNECTING) {
2616		/* Discard any unreceived messages */
2617		__skb_queue_purge(&sk->sk_receive_queue);
2618
2619		/* Wake up anyone sleeping in poll */
2620		sk->sk_state_change(sk);
2621		res = 0;
2622	} else {
2623		res = -ENOTCONN;
2624	}
 
 
2625
2626	release_sock(sk);
2627	return res;
2628}
2629
2630static void tipc_sk_check_probing_state(struct sock *sk,
2631					struct sk_buff_head *list)
2632{
2633	struct tipc_sock *tsk = tipc_sk(sk);
2634	u32 pnode = tsk_peer_node(tsk);
2635	u32 pport = tsk_peer_port(tsk);
2636	u32 self = tsk_own_node(tsk);
2637	u32 oport = tsk->portid;
2638	struct sk_buff *skb;
2639
2640	if (tsk->probe_unacked) {
2641		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2642		sk->sk_err = ECONNABORTED;
2643		tipc_node_remove_conn(sock_net(sk), pnode, pport);
2644		sk->sk_state_change(sk);
2645		return;
2646	}
2647	/* Prepare new probe */
2648	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2649			      pnode, self, pport, oport, TIPC_OK);
2650	if (skb)
2651		__skb_queue_tail(list, skb);
2652	tsk->probe_unacked = true;
2653	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2654}
2655
2656static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2657{
2658	struct tipc_sock *tsk = tipc_sk(sk);
2659
2660	/* Try again later if dest link is congested */
2661	if (tsk->cong_link_cnt) {
2662		sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
 
2663		return;
2664	}
2665	/* Prepare SYN for retransmit */
2666	tipc_msg_skb_clone(&sk->sk_write_queue, list);
2667}
2668
2669static void tipc_sk_timeout(struct timer_list *t)
2670{
2671	struct sock *sk = from_timer(sk, t, sk_timer);
2672	struct tipc_sock *tsk = tipc_sk(sk);
2673	u32 pnode = tsk_peer_node(tsk);
2674	struct sk_buff_head list;
2675	int rc = 0;
2676
2677	__skb_queue_head_init(&list);
2678	bh_lock_sock(sk);
2679
2680	/* Try again later if socket is busy */
2681	if (sock_owned_by_user(sk)) {
2682		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2683		bh_unlock_sock(sk);
 
2684		return;
2685	}
2686
2687	if (sk->sk_state == TIPC_ESTABLISHED)
2688		tipc_sk_check_probing_state(sk, &list);
2689	else if (sk->sk_state == TIPC_CONNECTING)
2690		tipc_sk_retry_connect(sk, &list);
2691
2692	bh_unlock_sock(sk);
2693
2694	if (!skb_queue_empty(&list))
2695		rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2696
2697	/* SYN messages may cause link congestion */
2698	if (rc == -ELINKCONG) {
2699		tipc_dest_push(&tsk->cong_links, pnode, 0);
2700		tsk->cong_link_cnt = 1;
2701	}
2702	sock_put(sk);
2703}
2704
2705static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2706			   struct tipc_name_seq const *seq)
2707{
2708	struct sock *sk = &tsk->sk;
2709	struct net *net = sock_net(sk);
2710	struct publication *publ;
 
2711	u32 key;
2712
2713	if (scope != TIPC_NODE_SCOPE)
2714		scope = TIPC_CLUSTER_SCOPE;
2715
2716	if (tipc_sk_connected(sk))
2717		return -EINVAL;
2718	key = tsk->portid + tsk->pub_count + 1;
2719	if (key == tsk->portid)
2720		return -EADDRINUSE;
2721
2722	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2723				    scope, tsk->portid, key);
2724	if (unlikely(!publ))
2725		return -EINVAL;
2726
2727	list_add(&publ->binding_sock, &tsk->publications);
2728	tsk->pub_count++;
2729	tsk->published = 1;
2730	return 0;
2731}
2732
2733static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2734			    struct tipc_name_seq const *seq)
2735{
2736	struct net *net = sock_net(&tsk->sk);
2737	struct publication *publ;
2738	struct publication *safe;
2739	int rc = -EINVAL;
2740
2741	if (scope != TIPC_NODE_SCOPE)
2742		scope = TIPC_CLUSTER_SCOPE;
2743
2744	list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2745		if (seq) {
2746			if (publ->scope != scope)
2747				continue;
2748			if (publ->type != seq->type)
2749				continue;
2750			if (publ->lower != seq->lower)
2751				continue;
2752			if (publ->upper != seq->upper)
2753				break;
2754			tipc_nametbl_withdraw(net, publ->type, publ->lower,
2755					      publ->upper, publ->key);
2756			rc = 0;
2757			break;
2758		}
2759		tipc_nametbl_withdraw(net, publ->type, publ->lower,
2760				      publ->upper, publ->key);
 
 
 
 
 
 
 
 
2761		rc = 0;
 
2762	}
2763	if (list_empty(&tsk->publications))
2764		tsk->published = 0;
 
 
2765	return rc;
2766}
2767
2768/* tipc_sk_reinit: set non-zero address in all existing sockets
2769 *                 when we go from standalone to network mode.
2770 */
2771void tipc_sk_reinit(struct net *net)
2772{
2773	struct tipc_net *tn = net_generic(net, tipc_net_id);
2774	struct rhashtable_iter iter;
2775	struct tipc_sock *tsk;
2776	struct tipc_msg *msg;
2777
2778	rhashtable_walk_enter(&tn->sk_rht, &iter);
2779
2780	do {
2781		rhashtable_walk_start(&iter);
2782
2783		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2784			sock_hold(&tsk->sk);
2785			rhashtable_walk_stop(&iter);
2786			lock_sock(&tsk->sk);
2787			msg = &tsk->phdr;
2788			msg_set_prevnode(msg, tipc_own_addr(net));
2789			msg_set_orignode(msg, tipc_own_addr(net));
2790			release_sock(&tsk->sk);
2791			rhashtable_walk_start(&iter);
2792			sock_put(&tsk->sk);
2793		}
2794
2795		rhashtable_walk_stop(&iter);
2796	} while (tsk == ERR_PTR(-EAGAIN));
2797
2798	rhashtable_walk_exit(&iter);
2799}
2800
2801static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2802{
2803	struct tipc_net *tn = net_generic(net, tipc_net_id);
2804	struct tipc_sock *tsk;
2805
2806	rcu_read_lock();
2807	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2808	if (tsk)
2809		sock_hold(&tsk->sk);
2810	rcu_read_unlock();
2811
2812	return tsk;
2813}
2814
2815static int tipc_sk_insert(struct tipc_sock *tsk)
2816{
2817	struct sock *sk = &tsk->sk;
2818	struct net *net = sock_net(sk);
2819	struct tipc_net *tn = net_generic(net, tipc_net_id);
2820	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2821	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2822
2823	while (remaining--) {
2824		portid++;
2825		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2826			portid = TIPC_MIN_PORT;
2827		tsk->portid = portid;
2828		sock_hold(&tsk->sk);
2829		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2830						   tsk_rht_params))
2831			return 0;
2832		sock_put(&tsk->sk);
2833	}
2834
2835	return -1;
2836}
2837
2838static void tipc_sk_remove(struct tipc_sock *tsk)
2839{
2840	struct sock *sk = &tsk->sk;
2841	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2842
2843	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2844		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2845		__sock_put(sk);
2846	}
2847}
2848
2849static const struct rhashtable_params tsk_rht_params = {
2850	.nelem_hint = 192,
2851	.head_offset = offsetof(struct tipc_sock, node),
2852	.key_offset = offsetof(struct tipc_sock, portid),
2853	.key_len = sizeof(u32), /* portid */
2854	.max_size = 1048576,
2855	.min_size = 256,
2856	.automatic_shrinking = true,
2857};
2858
2859int tipc_sk_rht_init(struct net *net)
2860{
2861	struct tipc_net *tn = net_generic(net, tipc_net_id);
2862
2863	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2864}
2865
2866void tipc_sk_rht_destroy(struct net *net)
2867{
2868	struct tipc_net *tn = net_generic(net, tipc_net_id);
2869
2870	/* Wait for socket readers to complete */
2871	synchronize_net();
2872
2873	rhashtable_destroy(&tn->sk_rht);
2874}
2875
2876static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2877{
2878	struct net *net = sock_net(&tsk->sk);
2879	struct tipc_group *grp = tsk->group;
2880	struct tipc_msg *hdr = &tsk->phdr;
2881	struct tipc_name_seq seq;
2882	int rc;
2883
2884	if (mreq->type < TIPC_RESERVED_TYPES)
2885		return -EACCES;
2886	if (mreq->scope > TIPC_NODE_SCOPE)
2887		return -EINVAL;
 
 
2888	if (grp)
2889		return -EACCES;
2890	grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2891	if (!grp)
2892		return -ENOMEM;
2893	tsk->group = grp;
2894	msg_set_lookup_scope(hdr, mreq->scope);
2895	msg_set_nametype(hdr, mreq->type);
2896	msg_set_dest_droppable(hdr, true);
2897	seq.type = mreq->type;
2898	seq.lower = mreq->instance;
2899	seq.upper = seq.lower;
2900	tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2901	rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2902	if (rc) {
2903		tipc_group_delete(net, grp);
2904		tsk->group = NULL;
2905		return rc;
2906	}
2907	/* Eliminate any risk that a broadcast overtakes sent JOINs */
2908	tsk->mc_method.rcast = true;
2909	tsk->mc_method.mandatory = true;
2910	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2911	return rc;
2912}
2913
2914static int tipc_sk_leave(struct tipc_sock *tsk)
2915{
2916	struct net *net = sock_net(&tsk->sk);
2917	struct tipc_group *grp = tsk->group;
2918	struct tipc_name_seq seq;
2919	int scope;
2920
2921	if (!grp)
2922		return -EINVAL;
2923	tipc_group_self(grp, &seq, &scope);
 
 
2924	tipc_group_delete(net, grp);
2925	tsk->group = NULL;
2926	tipc_sk_withdraw(tsk, scope, &seq);
2927	return 0;
2928}
2929
2930/**
2931 * tipc_setsockopt - set socket option
2932 * @sock: socket structure
2933 * @lvl: option level
2934 * @opt: option identifier
2935 * @ov: pointer to new option value
2936 * @ol: length of option value
2937 *
2938 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2939 * (to ease compatibility).
2940 *
2941 * Returns 0 on success, errno otherwise
2942 */
2943static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2944			   char __user *ov, unsigned int ol)
2945{
2946	struct sock *sk = sock->sk;
2947	struct tipc_sock *tsk = tipc_sk(sk);
2948	struct tipc_group_req mreq;
2949	u32 value = 0;
2950	int res = 0;
2951
2952	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2953		return 0;
2954	if (lvl != SOL_TIPC)
2955		return -ENOPROTOOPT;
2956
2957	switch (opt) {
2958	case TIPC_IMPORTANCE:
2959	case TIPC_SRC_DROPPABLE:
2960	case TIPC_DEST_DROPPABLE:
2961	case TIPC_CONN_TIMEOUT:
 
2962		if (ol < sizeof(value))
2963			return -EINVAL;
2964		if (get_user(value, (u32 __user *)ov))
2965			return -EFAULT;
2966		break;
2967	case TIPC_GROUP_JOIN:
2968		if (ol < sizeof(mreq))
2969			return -EINVAL;
2970		if (copy_from_user(&mreq, ov, sizeof(mreq)))
2971			return -EFAULT;
2972		break;
2973	default:
2974		if (ov || ol)
2975			return -EINVAL;
2976	}
2977
2978	lock_sock(sk);
2979
2980	switch (opt) {
2981	case TIPC_IMPORTANCE:
2982		res = tsk_set_importance(tsk, value);
2983		break;
2984	case TIPC_SRC_DROPPABLE:
2985		if (sock->type != SOCK_STREAM)
2986			tsk_set_unreliable(tsk, value);
2987		else
2988			res = -ENOPROTOOPT;
2989		break;
2990	case TIPC_DEST_DROPPABLE:
2991		tsk_set_unreturnable(tsk, value);
2992		break;
2993	case TIPC_CONN_TIMEOUT:
2994		tipc_sk(sk)->conn_timeout = value;
2995		break;
2996	case TIPC_MCAST_BROADCAST:
2997		tsk->mc_method.rcast = false;
2998		tsk->mc_method.mandatory = true;
2999		break;
3000	case TIPC_MCAST_REPLICAST:
3001		tsk->mc_method.rcast = true;
3002		tsk->mc_method.mandatory = true;
3003		break;
3004	case TIPC_GROUP_JOIN:
3005		res = tipc_sk_join(tsk, &mreq);
3006		break;
3007	case TIPC_GROUP_LEAVE:
3008		res = tipc_sk_leave(tsk);
3009		break;
 
 
 
 
3010	default:
3011		res = -EINVAL;
3012	}
3013
3014	release_sock(sk);
3015
3016	return res;
3017}
3018
3019/**
3020 * tipc_getsockopt - get socket option
3021 * @sock: socket structure
3022 * @lvl: option level
3023 * @opt: option identifier
3024 * @ov: receptacle for option value
3025 * @ol: receptacle for length of option value
3026 *
3027 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3028 * (to ease compatibility).
3029 *
3030 * Returns 0 on success, errno otherwise
3031 */
3032static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3033			   char __user *ov, int __user *ol)
3034{
3035	struct sock *sk = sock->sk;
3036	struct tipc_sock *tsk = tipc_sk(sk);
3037	struct tipc_name_seq seq;
3038	int len, scope;
3039	u32 value;
3040	int res;
3041
3042	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3043		return put_user(0, ol);
3044	if (lvl != SOL_TIPC)
3045		return -ENOPROTOOPT;
3046	res = get_user(len, ol);
3047	if (res)
3048		return res;
3049
3050	lock_sock(sk);
3051
3052	switch (opt) {
3053	case TIPC_IMPORTANCE:
3054		value = tsk_importance(tsk);
3055		break;
3056	case TIPC_SRC_DROPPABLE:
3057		value = tsk_unreliable(tsk);
3058		break;
3059	case TIPC_DEST_DROPPABLE:
3060		value = tsk_unreturnable(tsk);
3061		break;
3062	case TIPC_CONN_TIMEOUT:
3063		value = tsk->conn_timeout;
3064		/* no need to set "res", since already 0 at this point */
3065		break;
3066	case TIPC_NODE_RECVQ_DEPTH:
3067		value = 0; /* was tipc_queue_size, now obsolete */
3068		break;
3069	case TIPC_SOCK_RECVQ_DEPTH:
3070		value = skb_queue_len(&sk->sk_receive_queue);
3071		break;
3072	case TIPC_SOCK_RECVQ_USED:
3073		value = sk_rmem_alloc_get(sk);
3074		break;
3075	case TIPC_GROUP_JOIN:
3076		seq.type = 0;
3077		if (tsk->group)
3078			tipc_group_self(tsk->group, &seq, &scope);
3079		value = seq.type;
3080		break;
3081	default:
3082		res = -EINVAL;
3083	}
3084
3085	release_sock(sk);
3086
3087	if (res)
3088		return res;	/* "get" failed */
3089
3090	if (len < sizeof(value))
3091		return -EINVAL;
3092
3093	if (copy_to_user(ov, &value, sizeof(value)))
3094		return -EFAULT;
3095
3096	return put_user(sizeof(value), ol);
3097}
3098
3099static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3100{
3101	struct net *net = sock_net(sock->sk);
3102	struct tipc_sioc_nodeid_req nr = {0};
3103	struct tipc_sioc_ln_req lnr;
3104	void __user *argp = (void __user *)arg;
3105
3106	switch (cmd) {
3107	case SIOCGETLINKNAME:
3108		if (copy_from_user(&lnr, argp, sizeof(lnr)))
3109			return -EFAULT;
3110		if (!tipc_node_get_linkname(net,
3111					    lnr.bearer_id & 0xffff, lnr.peer,
3112					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
3113			if (copy_to_user(argp, &lnr, sizeof(lnr)))
3114				return -EFAULT;
3115			return 0;
3116		}
3117		return -EADDRNOTAVAIL;
3118	case SIOCGETNODEID:
3119		if (copy_from_user(&nr, argp, sizeof(nr)))
3120			return -EFAULT;
3121		if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3122			return -EADDRNOTAVAIL;
3123		if (copy_to_user(argp, &nr, sizeof(nr)))
3124			return -EFAULT;
3125		return 0;
3126	default:
3127		return -ENOIOCTLCMD;
3128	}
3129}
3130
3131static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3132{
3133	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3134	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3135	u32 onode = tipc_own_addr(sock_net(sock1->sk));
3136
3137	tsk1->peer.family = AF_TIPC;
3138	tsk1->peer.addrtype = TIPC_ADDR_ID;
3139	tsk1->peer.scope = TIPC_NODE_SCOPE;
3140	tsk1->peer.addr.id.ref = tsk2->portid;
3141	tsk1->peer.addr.id.node = onode;
3142	tsk2->peer.family = AF_TIPC;
3143	tsk2->peer.addrtype = TIPC_ADDR_ID;
3144	tsk2->peer.scope = TIPC_NODE_SCOPE;
3145	tsk2->peer.addr.id.ref = tsk1->portid;
3146	tsk2->peer.addr.id.node = onode;
3147
3148	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3149	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3150	return 0;
3151}
3152
3153/* Protocol switches for the various types of TIPC sockets */
3154
3155static const struct proto_ops msg_ops = {
3156	.owner		= THIS_MODULE,
3157	.family		= AF_TIPC,
3158	.release	= tipc_release,
3159	.bind		= tipc_bind,
3160	.connect	= tipc_connect,
3161	.socketpair	= tipc_socketpair,
3162	.accept		= sock_no_accept,
3163	.getname	= tipc_getname,
3164	.poll		= tipc_poll,
3165	.ioctl		= tipc_ioctl,
3166	.listen		= sock_no_listen,
3167	.shutdown	= tipc_shutdown,
3168	.setsockopt	= tipc_setsockopt,
3169	.getsockopt	= tipc_getsockopt,
3170	.sendmsg	= tipc_sendmsg,
3171	.recvmsg	= tipc_recvmsg,
3172	.mmap		= sock_no_mmap,
3173	.sendpage	= sock_no_sendpage
3174};
3175
3176static const struct proto_ops packet_ops = {
3177	.owner		= THIS_MODULE,
3178	.family		= AF_TIPC,
3179	.release	= tipc_release,
3180	.bind		= tipc_bind,
3181	.connect	= tipc_connect,
3182	.socketpair	= tipc_socketpair,
3183	.accept		= tipc_accept,
3184	.getname	= tipc_getname,
3185	.poll		= tipc_poll,
3186	.ioctl		= tipc_ioctl,
3187	.listen		= tipc_listen,
3188	.shutdown	= tipc_shutdown,
3189	.setsockopt	= tipc_setsockopt,
3190	.getsockopt	= tipc_getsockopt,
3191	.sendmsg	= tipc_send_packet,
3192	.recvmsg	= tipc_recvmsg,
3193	.mmap		= sock_no_mmap,
3194	.sendpage	= sock_no_sendpage
3195};
3196
3197static const struct proto_ops stream_ops = {
3198	.owner		= THIS_MODULE,
3199	.family		= AF_TIPC,
3200	.release	= tipc_release,
3201	.bind		= tipc_bind,
3202	.connect	= tipc_connect,
3203	.socketpair	= tipc_socketpair,
3204	.accept		= tipc_accept,
3205	.getname	= tipc_getname,
3206	.poll		= tipc_poll,
3207	.ioctl		= tipc_ioctl,
3208	.listen		= tipc_listen,
3209	.shutdown	= tipc_shutdown,
3210	.setsockopt	= tipc_setsockopt,
3211	.getsockopt	= tipc_getsockopt,
3212	.sendmsg	= tipc_sendstream,
3213	.recvmsg	= tipc_recvstream,
3214	.mmap		= sock_no_mmap,
3215	.sendpage	= sock_no_sendpage
3216};
3217
3218static const struct net_proto_family tipc_family_ops = {
3219	.owner		= THIS_MODULE,
3220	.family		= AF_TIPC,
3221	.create		= tipc_sk_create
3222};
3223
3224static struct proto tipc_proto = {
3225	.name		= "TIPC",
3226	.owner		= THIS_MODULE,
3227	.obj_size	= sizeof(struct tipc_sock),
3228	.sysctl_rmem	= sysctl_tipc_rmem
3229};
3230
3231/**
3232 * tipc_socket_init - initialize TIPC socket interface
3233 *
3234 * Returns 0 on success, errno otherwise
3235 */
3236int tipc_socket_init(void)
3237{
3238	int res;
3239
3240	res = proto_register(&tipc_proto, 1);
3241	if (res) {
3242		pr_err("Failed to register TIPC protocol type\n");
3243		goto out;
3244	}
3245
3246	res = sock_register(&tipc_family_ops);
3247	if (res) {
3248		pr_err("Failed to register TIPC socket type\n");
3249		proto_unregister(&tipc_proto);
3250		goto out;
3251	}
3252 out:
3253	return res;
3254}
3255
3256/**
3257 * tipc_socket_stop - stop TIPC socket interface
3258 */
3259void tipc_socket_stop(void)
3260{
3261	sock_unregister(tipc_family_ops.family);
3262	proto_unregister(&tipc_proto);
3263}
3264
3265/* Caller should hold socket lock for the passed tipc socket. */
3266static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3267{
3268	u32 peer_node;
3269	u32 peer_port;
3270	struct nlattr *nest;
3271
3272	peer_node = tsk_peer_node(tsk);
3273	peer_port = tsk_peer_port(tsk);
3274
 
3275	nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3276	if (!nest)
3277		return -EMSGSIZE;
3278
3279	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3280		goto msg_full;
3281	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3282		goto msg_full;
3283
3284	if (tsk->conn_type != 0) {
3285		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3286			goto msg_full;
3287		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3288			goto msg_full;
3289		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3290			goto msg_full;
3291	}
3292	nla_nest_end(skb, nest);
3293
3294	return 0;
3295
3296msg_full:
3297	nla_nest_cancel(skb, nest);
3298
3299	return -EMSGSIZE;
3300}
3301
3302static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3303			  *tsk)
3304{
3305	struct net *net = sock_net(skb->sk);
3306	struct sock *sk = &tsk->sk;
3307
3308	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3309	    nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3310		return -EMSGSIZE;
3311
3312	if (tipc_sk_connected(sk)) {
3313		if (__tipc_nl_add_sk_con(skb, tsk))
3314			return -EMSGSIZE;
3315	} else if (!list_empty(&tsk->publications)) {
3316		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3317			return -EMSGSIZE;
3318	}
3319	return 0;
3320}
3321
3322/* Caller should hold socket lock for the passed tipc socket. */
3323static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3324			    struct tipc_sock *tsk)
3325{
3326	struct nlattr *attrs;
3327	void *hdr;
3328
3329	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3330			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3331	if (!hdr)
3332		goto msg_cancel;
3333
3334	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3335	if (!attrs)
3336		goto genlmsg_cancel;
3337
3338	if (__tipc_nl_add_sk_info(skb, tsk))
3339		goto attr_msg_cancel;
3340
3341	nla_nest_end(skb, attrs);
3342	genlmsg_end(skb, hdr);
3343
3344	return 0;
3345
3346attr_msg_cancel:
3347	nla_nest_cancel(skb, attrs);
3348genlmsg_cancel:
3349	genlmsg_cancel(skb, hdr);
3350msg_cancel:
3351	return -EMSGSIZE;
3352}
3353
3354int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3355		    int (*skb_handler)(struct sk_buff *skb,
3356				       struct netlink_callback *cb,
3357				       struct tipc_sock *tsk))
3358{
3359	struct rhashtable_iter *iter = (void *)cb->args[4];
3360	struct tipc_sock *tsk;
3361	int err;
3362
3363	rhashtable_walk_start(iter);
3364	while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3365		if (IS_ERR(tsk)) {
3366			err = PTR_ERR(tsk);
3367			if (err == -EAGAIN) {
3368				err = 0;
3369				continue;
3370			}
3371			break;
3372		}
3373
3374		sock_hold(&tsk->sk);
3375		rhashtable_walk_stop(iter);
3376		lock_sock(&tsk->sk);
3377		err = skb_handler(skb, cb, tsk);
3378		if (err) {
3379			release_sock(&tsk->sk);
3380			sock_put(&tsk->sk);
3381			goto out;
3382		}
3383		release_sock(&tsk->sk);
3384		rhashtable_walk_start(iter);
3385		sock_put(&tsk->sk);
3386	}
3387	rhashtable_walk_stop(iter);
3388out:
3389	return skb->len;
3390}
3391EXPORT_SYMBOL(tipc_nl_sk_walk);
3392
3393int tipc_dump_start(struct netlink_callback *cb)
3394{
3395	return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3396}
3397EXPORT_SYMBOL(tipc_dump_start);
3398
3399int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3400{
3401	/* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3402	struct rhashtable_iter *iter = (void *)cb->args[4];
3403	struct tipc_net *tn = tipc_net(net);
3404
3405	if (!iter) {
3406		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3407		if (!iter)
3408			return -ENOMEM;
3409
3410		cb->args[4] = (long)iter;
3411	}
3412
3413	rhashtable_walk_enter(&tn->sk_rht, iter);
3414	return 0;
3415}
3416
3417int tipc_dump_done(struct netlink_callback *cb)
3418{
3419	struct rhashtable_iter *hti = (void *)cb->args[4];
3420
3421	rhashtable_walk_exit(hti);
3422	kfree(hti);
3423	return 0;
3424}
3425EXPORT_SYMBOL(tipc_dump_done);
3426
3427int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3428			   struct tipc_sock *tsk, u32 sk_filter_state,
3429			   u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3430{
3431	struct sock *sk = &tsk->sk;
3432	struct nlattr *attrs;
3433	struct nlattr *stat;
3434
3435	/*filter response w.r.t sk_state*/
3436	if (!(sk_filter_state & (1 << sk->sk_state)))
3437		return 0;
3438
3439	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3440	if (!attrs)
3441		goto msg_cancel;
3442
3443	if (__tipc_nl_add_sk_info(skb, tsk))
3444		goto attr_msg_cancel;
3445
3446	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3447	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3448	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3449	    nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3450			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3451					 sock_i_uid(sk))) ||
3452	    nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3453			      tipc_diag_gen_cookie(sk),
3454			      TIPC_NLA_SOCK_PAD))
3455		goto attr_msg_cancel;
3456
3457	stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3458	if (!stat)
3459		goto attr_msg_cancel;
3460
3461	if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3462			skb_queue_len(&sk->sk_receive_queue)) ||
3463	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3464			skb_queue_len(&sk->sk_write_queue)) ||
3465	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3466			atomic_read(&sk->sk_drops)))
3467		goto stat_msg_cancel;
3468
3469	if (tsk->cong_link_cnt &&
3470	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3471		goto stat_msg_cancel;
3472
3473	if (tsk_conn_cong(tsk) &&
3474	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3475		goto stat_msg_cancel;
3476
3477	nla_nest_end(skb, stat);
3478
3479	if (tsk->group)
3480		if (tipc_group_fill_sock_diag(tsk->group, skb))
3481			goto stat_msg_cancel;
3482
3483	nla_nest_end(skb, attrs);
3484
3485	return 0;
3486
3487stat_msg_cancel:
3488	nla_nest_cancel(skb, stat);
3489attr_msg_cancel:
3490	nla_nest_cancel(skb, attrs);
3491msg_cancel:
3492	return -EMSGSIZE;
3493}
3494EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3495
3496int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3497{
3498	return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3499}
3500
3501/* Caller should hold socket lock for the passed tipc socket. */
3502static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3503				 struct netlink_callback *cb,
3504				 struct publication *publ)
3505{
3506	void *hdr;
3507	struct nlattr *attrs;
3508
3509	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3510			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3511	if (!hdr)
3512		goto msg_cancel;
3513
3514	attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3515	if (!attrs)
3516		goto genlmsg_cancel;
3517
3518	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3519		goto attr_msg_cancel;
3520	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3521		goto attr_msg_cancel;
3522	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3523		goto attr_msg_cancel;
3524	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3525		goto attr_msg_cancel;
3526
3527	nla_nest_end(skb, attrs);
3528	genlmsg_end(skb, hdr);
3529
3530	return 0;
3531
3532attr_msg_cancel:
3533	nla_nest_cancel(skb, attrs);
3534genlmsg_cancel:
3535	genlmsg_cancel(skb, hdr);
3536msg_cancel:
3537	return -EMSGSIZE;
3538}
3539
3540/* Caller should hold socket lock for the passed tipc socket. */
3541static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3542				  struct netlink_callback *cb,
3543				  struct tipc_sock *tsk, u32 *last_publ)
3544{
3545	int err;
3546	struct publication *p;
3547
3548	if (*last_publ) {
3549		list_for_each_entry(p, &tsk->publications, binding_sock) {
3550			if (p->key == *last_publ)
3551				break;
3552		}
3553		if (p->key != *last_publ) {
3554			/* We never set seq or call nl_dump_check_consistent()
3555			 * this means that setting prev_seq here will cause the
3556			 * consistence check to fail in the netlink callback
3557			 * handler. Resulting in the last NLMSG_DONE message
3558			 * having the NLM_F_DUMP_INTR flag set.
3559			 */
3560			cb->prev_seq = 1;
3561			*last_publ = 0;
3562			return -EPIPE;
3563		}
3564	} else {
3565		p = list_first_entry(&tsk->publications, struct publication,
3566				     binding_sock);
3567	}
3568
3569	list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3570		err = __tipc_nl_add_sk_publ(skb, cb, p);
3571		if (err) {
3572			*last_publ = p->key;
3573			return err;
3574		}
3575	}
3576	*last_publ = 0;
3577
3578	return 0;
3579}
3580
3581int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3582{
3583	int err;
3584	u32 tsk_portid = cb->args[0];
3585	u32 last_publ = cb->args[1];
3586	u32 done = cb->args[2];
3587	struct net *net = sock_net(skb->sk);
3588	struct tipc_sock *tsk;
3589
3590	if (!tsk_portid) {
3591		struct nlattr **attrs;
3592		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3593
3594		err = tipc_nlmsg_parse(cb->nlh, &attrs);
3595		if (err)
3596			return err;
3597
3598		if (!attrs[TIPC_NLA_SOCK])
3599			return -EINVAL;
3600
3601		err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3602						  attrs[TIPC_NLA_SOCK],
3603						  tipc_nl_sock_policy, NULL);
3604		if (err)
3605			return err;
3606
3607		if (!sock[TIPC_NLA_SOCK_REF])
3608			return -EINVAL;
3609
3610		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3611	}
3612
3613	if (done)
3614		return 0;
3615
3616	tsk = tipc_sk_lookup(net, tsk_portid);
3617	if (!tsk)
3618		return -EINVAL;
3619
3620	lock_sock(&tsk->sk);
3621	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3622	if (!err)
3623		done = 1;
3624	release_sock(&tsk->sk);
3625	sock_put(&tsk->sk);
3626
3627	cb->args[0] = tsk_portid;
3628	cb->args[1] = last_publ;
3629	cb->args[2] = done;
3630
3631	return skb->len;
3632}
3633
3634/**
3635 * tipc_sk_filtering - check if a socket should be traced
3636 * @sk: the socket to be examined
3637 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3638 *  (portid, sock type, name type, name lower, name upper)
3639 *
3640 * Returns true if the socket meets the socket tuple data
 
 
 
3641 * (value 0 = 'any') or when there is no tuple set (all = 0),
3642 * otherwise false
3643 */
3644bool tipc_sk_filtering(struct sock *sk)
3645{
3646	struct tipc_sock *tsk;
3647	struct publication *p;
3648	u32 _port, _sktype, _type, _lower, _upper;
3649	u32 type = 0, lower = 0, upper = 0;
3650
3651	if (!sk)
3652		return true;
3653
3654	tsk = tipc_sk(sk);
3655
3656	_port = sysctl_tipc_sk_filter[0];
3657	_sktype = sysctl_tipc_sk_filter[1];
3658	_type = sysctl_tipc_sk_filter[2];
3659	_lower = sysctl_tipc_sk_filter[3];
3660	_upper = sysctl_tipc_sk_filter[4];
3661
3662	if (!_port && !_sktype && !_type && !_lower && !_upper)
3663		return true;
3664
3665	if (_port)
3666		return (_port == tsk->portid);
3667
3668	if (_sktype && _sktype != sk->sk_type)
3669		return false;
3670
3671	if (tsk->published) {
3672		p = list_first_entry_or_null(&tsk->publications,
3673					     struct publication, binding_sock);
3674		if (p) {
3675			type = p->type;
3676			lower = p->lower;
3677			upper = p->upper;
3678		}
3679	}
3680
3681	if (!tipc_sk_type_connectionless(sk)) {
3682		type = tsk->conn_type;
3683		lower = tsk->conn_instance;
3684		upper = tsk->conn_instance;
3685	}
3686
3687	if ((_type && _type != type) || (_lower && _lower != lower) ||
3688	    (_upper && _upper != upper))
3689		return false;
3690
3691	return true;
3692}
3693
3694u32 tipc_sock_get_portid(struct sock *sk)
3695{
3696	return (sk) ? (tipc_sk(sk))->portid : 0;
3697}
3698
3699/**
3700 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3701 *			both the rcv and backlog queues are considered
3702 * @sk: tipc sk to be checked
3703 * @skb: tipc msg to be checked
3704 *
3705 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3706 */
3707
3708bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3709{
3710	atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3711	unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3712	unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3713
3714	return (qsize > lim * 90 / 100);
3715}
3716
3717/**
3718 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3719 *			only the rcv queue is considered
3720 * @sk: tipc sk to be checked
3721 * @skb: tipc msg to be checked
3722 *
3723 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3724 */
3725
3726bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3727{
3728	unsigned int lim = rcvbuf_limit(sk, skb);
3729	unsigned int qsize = sk_rmem_alloc_get(sk);
3730
3731	return (qsize > lim * 90 / 100);
3732}
3733
3734/**
3735 * tipc_sk_dump - dump TIPC socket
3736 * @sk: tipc sk to be dumped
3737 * @dqueues: bitmask to decide if any socket queue to be dumped?
3738 *           - TIPC_DUMP_NONE: don't dump socket queues
3739 *           - TIPC_DUMP_SK_SNDQ: dump socket send queue
3740 *           - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3741 *           - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3742 *           - TIPC_DUMP_ALL: dump all the socket queues above
3743 * @buf: returned buffer of dump data in format
3744 */
3745int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3746{
3747	int i = 0;
3748	size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
 
3749	struct tipc_sock *tsk;
3750	struct publication *p;
3751	bool tsk_connected;
3752
3753	if (!sk) {
3754		i += scnprintf(buf, sz, "sk data: (null)\n");
3755		return i;
3756	}
3757
3758	tsk = tipc_sk(sk);
3759	tsk_connected = !tipc_sk_type_connectionless(sk);
3760
3761	i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3762	i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3763	i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3764	i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3765	i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3766	if (tsk_connected) {
3767		i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3768		i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3769		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3770		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
 
 
3771	}
3772	i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3773	if (tsk->published) {
3774		p = list_first_entry_or_null(&tsk->publications,
3775					     struct publication, binding_sock);
3776		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3777		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3778		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3779	}
3780	i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3781	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3782	i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3783	i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3784	i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3785	i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3786	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3787	i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3788	i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3789	i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3790	i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3791	i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3792	i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3793	i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3794
3795	if (dqueues & TIPC_DUMP_SK_SNDQ) {
3796		i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3797		i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3798	}
3799
3800	if (dqueues & TIPC_DUMP_SK_RCVQ) {
3801		i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3802		i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3803	}
3804
3805	if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3806		i += scnprintf(buf + i, sz - i, "sk_backlog:\n  head ");
3807		i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3808		if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3809			i += scnprintf(buf + i, sz - i, "  tail ");
3810			i += tipc_skb_dump(sk->sk_backlog.tail, false,
3811					   buf + i);
3812		}
3813	}
3814
3815	return i;
3816}
v6.9.4
   1/*
   2 * net/tipc/socket.c: TIPC socket API
   3 *
   4 * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
   5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
   6 * Copyright (c) 2020-2021, Red Hat Inc
   7 * All rights reserved.
   8 *
   9 * Redistribution and use in source and binary forms, with or without
  10 * modification, are permitted provided that the following conditions are met:
  11 *
  12 * 1. Redistributions of source code must retain the above copyright
  13 *    notice, this list of conditions and the following disclaimer.
  14 * 2. Redistributions in binary form must reproduce the above copyright
  15 *    notice, this list of conditions and the following disclaimer in the
  16 *    documentation and/or other materials provided with the distribution.
  17 * 3. Neither the names of the copyright holders nor the names of its
  18 *    contributors may be used to endorse or promote products derived from
  19 *    this software without specific prior written permission.
  20 *
  21 * Alternatively, this software may be distributed under the terms of the
  22 * GNU General Public License ("GPL") version 2 as published by the Free
  23 * Software Foundation.
  24 *
  25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35 * POSSIBILITY OF SUCH DAMAGE.
  36 */
  37
  38#include <linux/rhashtable.h>
  39#include <linux/sched/signal.h>
  40#include <trace/events/sock.h>
  41
  42#include "core.h"
  43#include "name_table.h"
  44#include "node.h"
  45#include "link.h"
  46#include "name_distr.h"
  47#include "socket.h"
  48#include "bcast.h"
  49#include "netlink.h"
  50#include "group.h"
  51#include "trace.h"
  52
  53#define NAGLE_START_INIT	4
  54#define NAGLE_START_MAX		1024
  55#define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
  56#define CONN_PROBING_INTV	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
 
  57#define TIPC_MAX_PORT		0xffffffff
  58#define TIPC_MIN_PORT		1
  59#define TIPC_ACK_RATE		4       /* ACK at 1/4 of rcv window size */
  60
  61enum {
  62	TIPC_LISTEN = TCP_LISTEN,
  63	TIPC_ESTABLISHED = TCP_ESTABLISHED,
  64	TIPC_OPEN = TCP_CLOSE,
  65	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
  66	TIPC_CONNECTING = TCP_SYN_SENT,
  67};
  68
  69struct sockaddr_pair {
  70	struct sockaddr_tipc sock;
  71	struct sockaddr_tipc member;
  72};
  73
  74/**
  75 * struct tipc_sock - TIPC socket structure
  76 * @sk: socket - interacts with 'port' and with user via the socket API
 
 
 
  77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
  78 * @maxnagle: maximum size of msg which can be subject to nagle
  79 * @portid: unique port identity in TIPC socket hash table
  80 * @phdr: preformatted message header used when sending messages
  81 * @cong_links: list of congested links
  82 * @publications: list of publications for port
 
  83 * @pub_count: total # of publications port has made during its lifetime
  84 * @conn_timeout: the time we can wait for an unresponded setup request
  85 * @probe_unacked: probe has not received ack yet
  86 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
  87 * @cong_link_cnt: number of congested links
  88 * @snt_unacked: # messages sent by socket, and not yet acked by peer
  89 * @snd_win: send window size
  90 * @peer_caps: peer capabilities mask
  91 * @rcv_unacked: # messages read by user, but not yet acked back to peer
  92 * @rcv_win: receive window size
  93 * @peer: 'connected' peer for dgram/rdm
  94 * @node: hash table node
  95 * @mc_method: cookie for use between socket and broadcast layer
  96 * @rcu: rcu struct for tipc_sock
  97 * @group: TIPC communications group
  98 * @oneway: message count in one direction (FIXME)
  99 * @nagle_start: current nagle value
 100 * @snd_backlog: send backlog count
 101 * @msg_acc: messages accepted; used in managing backlog and nagle
 102 * @pkt_cnt: TIPC socket packet count
 103 * @expect_ack: whether this TIPC socket is expecting an ack
 104 * @nodelay: setsockopt() TIPC_NODELAY setting
 105 * @group_is_open: TIPC socket group is fully open (FIXME)
 106 * @published: true if port has one or more associated names
 107 * @conn_addrtype: address type used when establishing connection
 108 */
 109struct tipc_sock {
 110	struct sock sk;
 
 
 
 111	u32 max_pkt;
 112	u32 maxnagle;
 113	u32 portid;
 114	struct tipc_msg phdr;
 115	struct list_head cong_links;
 116	struct list_head publications;
 117	u32 pub_count;
 118	atomic_t dupl_rcvcnt;
 119	u16 conn_timeout;
 120	bool probe_unacked;
 121	u16 cong_link_cnt;
 122	u16 snt_unacked;
 123	u16 snd_win;
 124	u16 peer_caps;
 125	u16 rcv_unacked;
 126	u16 rcv_win;
 127	struct sockaddr_tipc peer;
 128	struct rhash_head node;
 129	struct tipc_mc_method mc_method;
 130	struct rcu_head rcu;
 131	struct tipc_group *group;
 132	u32 oneway;
 133	u32 nagle_start;
 134	u16 snd_backlog;
 135	u16 msg_acc;
 136	u16 pkt_cnt;
 137	bool expect_ack;
 138	bool nodelay;
 139	bool group_is_open;
 140	bool published;
 141	u8 conn_addrtype;
 142};
 143
 144static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 145static void tipc_data_ready(struct sock *sk);
 146static void tipc_write_space(struct sock *sk);
 147static void tipc_sock_destruct(struct sock *sk);
 148static int tipc_release(struct socket *sock);
 149static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
 150		       bool kern);
 151static void tipc_sk_timeout(struct timer_list *t);
 152static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
 153static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
 
 
 154static int tipc_sk_leave(struct tipc_sock *tsk);
 155static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
 156static int tipc_sk_insert(struct tipc_sock *tsk);
 157static void tipc_sk_remove(struct tipc_sock *tsk);
 158static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
 159static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
 160static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
 161static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
 162
 163static const struct proto_ops packet_ops;
 164static const struct proto_ops stream_ops;
 165static const struct proto_ops msg_ops;
 166static struct proto tipc_proto;
 167static const struct rhashtable_params tsk_rht_params;
 168
 169static u32 tsk_own_node(struct tipc_sock *tsk)
 170{
 171	return msg_prevnode(&tsk->phdr);
 172}
 173
 174static u32 tsk_peer_node(struct tipc_sock *tsk)
 175{
 176	return msg_destnode(&tsk->phdr);
 177}
 178
 179static u32 tsk_peer_port(struct tipc_sock *tsk)
 180{
 181	return msg_destport(&tsk->phdr);
 182}
 183
 184static  bool tsk_unreliable(struct tipc_sock *tsk)
 185{
 186	return msg_src_droppable(&tsk->phdr) != 0;
 187}
 188
 189static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
 190{
 191	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
 192}
 193
 194static bool tsk_unreturnable(struct tipc_sock *tsk)
 195{
 196	return msg_dest_droppable(&tsk->phdr) != 0;
 197}
 198
 199static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
 200{
 201	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
 202}
 203
 204static int tsk_importance(struct tipc_sock *tsk)
 205{
 206	return msg_importance(&tsk->phdr);
 207}
 208
 209static struct tipc_sock *tipc_sk(const struct sock *sk)
 210{
 211	return container_of(sk, struct tipc_sock, sk);
 
 
 
 212}
 213
 214int tsk_set_importance(struct sock *sk, int imp)
 215{
 216	if (imp > TIPC_CRITICAL_IMPORTANCE)
 217		return -EINVAL;
 218	msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
 219	return 0;
 220}
 221
 222static bool tsk_conn_cong(struct tipc_sock *tsk)
 223{
 224	return tsk->snt_unacked > tsk->snd_win;
 225}
 226
 227static u16 tsk_blocks(int len)
 228{
 229	return ((len / FLOWCTL_BLK_SZ) + 1);
 230}
 231
 232/* tsk_blocks(): translate a buffer size in bytes to number of
 233 * advertisable blocks, taking into account the ratio truesize(len)/len
 234 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
 235 */
 236static u16 tsk_adv_blocks(int len)
 237{
 238	return len / FLOWCTL_BLK_SZ / 4;
 239}
 240
 241/* tsk_inc(): increment counter for sent or received data
 242 * - If block based flow control is not supported by peer we
 243 *   fall back to message based ditto, incrementing the counter
 244 */
 245static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
 246{
 247	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
 248		return ((msglen / FLOWCTL_BLK_SZ) + 1);
 249	return 1;
 250}
 251
 252/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
 253 */
 254static void tsk_set_nagle(struct tipc_sock *tsk)
 255{
 256	struct sock *sk = &tsk->sk;
 257
 258	tsk->maxnagle = 0;
 259	if (sk->sk_type != SOCK_STREAM)
 260		return;
 261	if (tsk->nodelay)
 262		return;
 263	if (!(tsk->peer_caps & TIPC_NAGLE))
 264		return;
 265	/* Limit node local buffer size to avoid receive queue overflow */
 266	if (tsk->max_pkt == MAX_MSG_SIZE)
 267		tsk->maxnagle = 1500;
 268	else
 269		tsk->maxnagle = tsk->max_pkt;
 270}
 271
 272/**
 273 * tsk_advance_rx_queue - discard first buffer in socket receive queue
 274 * @sk: network socket
 275 *
 276 * Caller must hold socket lock
 277 */
 278static void tsk_advance_rx_queue(struct sock *sk)
 279{
 280	trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
 281	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
 282}
 283
 284/* tipc_sk_respond() : send response message back to sender
 285 */
 286static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
 287{
 288	u32 selector;
 289	u32 dnode;
 290	u32 onode = tipc_own_addr(sock_net(sk));
 291
 292	if (!tipc_msg_reverse(onode, &skb, err))
 293		return;
 294
 295	trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
 296	dnode = msg_destnode(buf_msg(skb));
 297	selector = msg_origport(buf_msg(skb));
 298	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
 299}
 300
 301/**
 302 * tsk_rej_rx_queue - reject all buffers in socket receive queue
 303 * @sk: network socket
 304 * @error: response error code
 305 *
 306 * Caller must hold socket lock
 307 */
 308static void tsk_rej_rx_queue(struct sock *sk, int error)
 309{
 310	struct sk_buff *skb;
 311
 312	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
 313		tipc_sk_respond(sk, skb, error);
 314}
 315
 316static bool tipc_sk_connected(const struct sock *sk)
 317{
 318	return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED;
 319}
 320
 321/* tipc_sk_type_connectionless - check if the socket is datagram socket
 322 * @sk: socket
 323 *
 324 * Returns true if connection less, false otherwise
 325 */
 326static bool tipc_sk_type_connectionless(struct sock *sk)
 327{
 328	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
 329}
 330
 331/* tsk_peer_msg - verify if message was sent by connected port's peer
 332 *
 333 * Handles cases where the node's network address has changed from
 334 * the default of <0.0.0> to its configured setting.
 335 */
 336static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
 337{
 338	struct sock *sk = &tsk->sk;
 339	u32 self = tipc_own_addr(sock_net(sk));
 340	u32 peer_port = tsk_peer_port(tsk);
 341	u32 orig_node, peer_node;
 342
 343	if (unlikely(!tipc_sk_connected(sk)))
 344		return false;
 345
 346	if (unlikely(msg_origport(msg) != peer_port))
 347		return false;
 348
 349	orig_node = msg_orignode(msg);
 350	peer_node = tsk_peer_node(tsk);
 351
 352	if (likely(orig_node == peer_node))
 353		return true;
 354
 355	if (!orig_node && peer_node == self)
 356		return true;
 357
 358	if (!peer_node && orig_node == self)
 359		return true;
 360
 361	return false;
 362}
 363
 364/* tipc_set_sk_state - set the sk_state of the socket
 365 * @sk: socket
 366 *
 367 * Caller must hold socket lock
 368 *
 369 * Returns 0 on success, errno otherwise
 370 */
 371static int tipc_set_sk_state(struct sock *sk, int state)
 372{
 373	int oldsk_state = sk->sk_state;
 374	int res = -EINVAL;
 375
 376	switch (state) {
 377	case TIPC_OPEN:
 378		res = 0;
 379		break;
 380	case TIPC_LISTEN:
 381	case TIPC_CONNECTING:
 382		if (oldsk_state == TIPC_OPEN)
 383			res = 0;
 384		break;
 385	case TIPC_ESTABLISHED:
 386		if (oldsk_state == TIPC_CONNECTING ||
 387		    oldsk_state == TIPC_OPEN)
 388			res = 0;
 389		break;
 390	case TIPC_DISCONNECTING:
 391		if (oldsk_state == TIPC_CONNECTING ||
 392		    oldsk_state == TIPC_ESTABLISHED)
 393			res = 0;
 394		break;
 395	}
 396
 397	if (!res)
 398		sk->sk_state = state;
 399
 400	return res;
 401}
 402
 403static int tipc_sk_sock_err(struct socket *sock, long *timeout)
 404{
 405	struct sock *sk = sock->sk;
 406	int err = sock_error(sk);
 407	int typ = sock->type;
 408
 409	if (err)
 410		return err;
 411	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
 412		if (sk->sk_state == TIPC_DISCONNECTING)
 413			return -EPIPE;
 414		else if (!tipc_sk_connected(sk))
 415			return -ENOTCONN;
 416	}
 417	if (!*timeout)
 418		return -EAGAIN;
 419	if (signal_pending(current))
 420		return sock_intr_errno(*timeout);
 421
 422	return 0;
 423}
 424
 425#define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
 426({                                                                             \
 427	DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
 428	struct sock *sk_;						       \
 429	int rc_;							       \
 430									       \
 431	while ((rc_ = !(condition_))) {					       \
 432		/* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
 433		smp_rmb();                                                     \
 434		sk_ = (sock_)->sk;					       \
 435		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
 436		if (rc_)						       \
 437			break;						       \
 438		add_wait_queue(sk_sleep(sk_), &wait_);                         \
 439		release_sock(sk_);					       \
 440		*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
 441		sched_annotate_sleep();				               \
 442		lock_sock(sk_);						       \
 443		remove_wait_queue(sk_sleep(sk_), &wait_);		       \
 444	}								       \
 445	rc_;								       \
 446})
 447
 448/**
 449 * tipc_sk_create - create a TIPC socket
 450 * @net: network namespace (must be default network)
 451 * @sock: pre-allocated socket structure
 452 * @protocol: protocol indicator (must be 0)
 453 * @kern: caused by kernel or by userspace?
 454 *
 455 * This routine creates additional data structures used by the TIPC socket,
 456 * initializes them, and links them together.
 457 *
 458 * Return: 0 on success, errno otherwise
 459 */
 460static int tipc_sk_create(struct net *net, struct socket *sock,
 461			  int protocol, int kern)
 462{
 463	const struct proto_ops *ops;
 464	struct sock *sk;
 465	struct tipc_sock *tsk;
 466	struct tipc_msg *msg;
 467
 468	/* Validate arguments */
 469	if (unlikely(protocol != 0))
 470		return -EPROTONOSUPPORT;
 471
 472	switch (sock->type) {
 473	case SOCK_STREAM:
 474		ops = &stream_ops;
 475		break;
 476	case SOCK_SEQPACKET:
 477		ops = &packet_ops;
 478		break;
 479	case SOCK_DGRAM:
 480	case SOCK_RDM:
 481		ops = &msg_ops;
 482		break;
 483	default:
 484		return -EPROTOTYPE;
 485	}
 486
 487	/* Allocate socket's protocol area */
 488	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
 489	if (sk == NULL)
 490		return -ENOMEM;
 491
 492	tsk = tipc_sk(sk);
 493	tsk->max_pkt = MAX_PKT_DEFAULT;
 494	tsk->maxnagle = 0;
 495	tsk->nagle_start = NAGLE_START_INIT;
 496	INIT_LIST_HEAD(&tsk->publications);
 497	INIT_LIST_HEAD(&tsk->cong_links);
 498	msg = &tsk->phdr;
 499
 500	/* Finish initializing socket data structures */
 501	sock->ops = ops;
 502	sock_init_data(sock, sk);
 503	tipc_set_sk_state(sk, TIPC_OPEN);
 504	if (tipc_sk_insert(tsk)) {
 505		sk_free(sk);
 506		pr_warn("Socket create failed; port number exhausted\n");
 507		return -EINVAL;
 508	}
 509
 510	/* Ensure tsk is visible before we read own_addr. */
 511	smp_mb();
 512
 513	tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
 514		      TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
 515
 516	msg_set_origport(msg, tsk->portid);
 517	timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
 518	sk->sk_shutdown = 0;
 519	sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
 520	sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
 521	sk->sk_data_ready = tipc_data_ready;
 522	sk->sk_write_space = tipc_write_space;
 523	sk->sk_destruct = tipc_sock_destruct;
 524	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
 525	tsk->group_is_open = true;
 526	atomic_set(&tsk->dupl_rcvcnt, 0);
 527
 528	/* Start out with safe limits until we receive an advertised window */
 529	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
 530	tsk->rcv_win = tsk->snd_win;
 531
 532	if (tipc_sk_type_connectionless(sk)) {
 533		tsk_set_unreturnable(tsk, true);
 534		if (sock->type == SOCK_DGRAM)
 535			tsk_set_unreliable(tsk, true);
 536	}
 537	__skb_queue_head_init(&tsk->mc_method.deferredq);
 538	trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
 539	return 0;
 540}
 541
 542static void tipc_sk_callback(struct rcu_head *head)
 543{
 544	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
 545
 546	sock_put(&tsk->sk);
 547}
 548
 549/* Caller should hold socket lock for the socket. */
 550static void __tipc_shutdown(struct socket *sock, int error)
 551{
 552	struct sock *sk = sock->sk;
 553	struct tipc_sock *tsk = tipc_sk(sk);
 554	struct net *net = sock_net(sk);
 555	long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
 556	u32 dnode = tsk_peer_node(tsk);
 557	struct sk_buff *skb;
 558
 559	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
 560	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
 561					    !tsk_conn_cong(tsk)));
 562
 563	/* Push out delayed messages if in Nagle mode */
 564	tipc_sk_push_backlog(tsk, false);
 565	/* Remove pending SYN */
 566	__skb_queue_purge(&sk->sk_write_queue);
 567
 568	/* Remove partially received buffer if any */
 569	skb = skb_peek(&sk->sk_receive_queue);
 570	if (skb && TIPC_SKB_CB(skb)->bytes_read) {
 571		__skb_unlink(skb, &sk->sk_receive_queue);
 572		kfree_skb(skb);
 
 
 
 
 
 
 
 
 
 573	}
 574
 575	/* Reject all unreceived messages if connectionless */
 576	if (tipc_sk_type_connectionless(sk)) {
 577		tsk_rej_rx_queue(sk, error);
 578		return;
 579	}
 580
 581	switch (sk->sk_state) {
 582	case TIPC_CONNECTING:
 583	case TIPC_ESTABLISHED:
 584		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
 585		tipc_node_remove_conn(net, dnode, tsk->portid);
 586		/* Send a FIN+/- to its peer */
 587		skb = __skb_dequeue(&sk->sk_receive_queue);
 588		if (skb) {
 589			__skb_queue_purge(&sk->sk_receive_queue);
 590			tipc_sk_respond(sk, skb, error);
 591			break;
 592		}
 593		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
 594				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
 595				      tsk_own_node(tsk), tsk_peer_port(tsk),
 596				      tsk->portid, error);
 597		if (skb)
 598			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
 599		break;
 600	case TIPC_LISTEN:
 601		/* Reject all SYN messages */
 602		tsk_rej_rx_queue(sk, error);
 603		break;
 604	default:
 605		__skb_queue_purge(&sk->sk_receive_queue);
 606		break;
 607	}
 608}
 609
 610/**
 611 * tipc_release - destroy a TIPC socket
 612 * @sock: socket to destroy
 613 *
 614 * This routine cleans up any messages that are still queued on the socket.
 615 * For DGRAM and RDM socket types, all queued messages are rejected.
 616 * For SEQPACKET and STREAM socket types, the first message is rejected
 617 * and any others are discarded.  (If the first message on a STREAM socket
 618 * is partially-read, it is discarded and the next one is rejected instead.)
 619 *
 620 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 621 * are returned or discarded according to the "destination droppable" setting
 622 * specified for the message by the sender.
 623 *
 624 * Return: 0 on success, errno otherwise
 625 */
 626static int tipc_release(struct socket *sock)
 627{
 628	struct sock *sk = sock->sk;
 629	struct tipc_sock *tsk;
 630
 631	/*
 632	 * Exit if socket isn't fully initialized (occurs when a failed accept()
 633	 * releases a pre-allocated child socket that was never used)
 634	 */
 635	if (sk == NULL)
 636		return 0;
 637
 638	tsk = tipc_sk(sk);
 639	lock_sock(sk);
 640
 641	trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
 642	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
 643	sk->sk_shutdown = SHUTDOWN_MASK;
 644	tipc_sk_leave(tsk);
 645	tipc_sk_withdraw(tsk, NULL);
 646	__skb_queue_purge(&tsk->mc_method.deferredq);
 647	sk_stop_timer(sk, &sk->sk_timer);
 648	tipc_sk_remove(tsk);
 649
 650	sock_orphan(sk);
 651	/* Reject any messages that accumulated in backlog queue */
 652	release_sock(sk);
 653	tipc_dest_list_purge(&tsk->cong_links);
 654	tsk->cong_link_cnt = 0;
 655	call_rcu(&tsk->rcu, tipc_sk_callback);
 656	sock->sk = NULL;
 657
 658	return 0;
 659}
 660
 661/**
 662 * __tipc_bind - associate or disassocate TIPC name(s) with a socket
 663 * @sock: socket structure
 664 * @skaddr: socket address describing name(s) and desired operation
 665 * @alen: size of socket address data structure
 666 *
 667 * Name and name sequence binding are indicated using a positive scope value;
 668 * a negative scope value unbinds the specified name.  Specifying no name
 669 * (i.e. a socket address length of 0) unbinds all names from the socket.
 670 *
 671 * Return: 0 on success, errno otherwise
 672 *
 673 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 674 *       access any non-constant socket information.
 675 */
 676static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
 
 677{
 678	struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
 679	struct tipc_sock *tsk = tipc_sk(sock->sk);
 680	bool unbind = false;
 681
 682	if (unlikely(!alen))
 683		return tipc_sk_withdraw(tsk, NULL);
 684
 685	if (ua->addrtype == TIPC_SERVICE_ADDR) {
 686		ua->addrtype = TIPC_SERVICE_RANGE;
 687		ua->sr.upper = ua->sr.lower;
 688	}
 689	if (ua->scope < 0) {
 690		unbind = true;
 691		ua->scope = -ua->scope;
 692	}
 693	/* Users may still use deprecated TIPC_ZONE_SCOPE */
 694	if (ua->scope != TIPC_NODE_SCOPE)
 695		ua->scope = TIPC_CLUSTER_SCOPE;
 696
 697	if (tsk->group)
 698		return -EACCES;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699
 700	if (unbind)
 701		return tipc_sk_withdraw(tsk, ua);
 702	return tipc_sk_publish(tsk, ua);
 703}
 
 
 704
 705int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
 706{
 707	int res;
 
 
 
 708
 709	lock_sock(sock->sk);
 710	res = __tipc_bind(sock, skaddr, alen);
 711	release_sock(sock->sk);
 
 
 712	return res;
 713}
 714
 715static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
 716{
 717	struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
 718	u32 atype = ua->addrtype;
 719
 720	if (alen) {
 721		if (!tipc_uaddr_valid(ua, alen))
 722			return -EINVAL;
 723		if (atype == TIPC_SOCKET_ADDR)
 724			return -EAFNOSUPPORT;
 725		if (ua->sr.type < TIPC_RESERVED_TYPES) {
 726			pr_warn_once("Can't bind to reserved service type %u\n",
 727				     ua->sr.type);
 728			return -EACCES;
 729		}
 730	}
 731	return tipc_sk_bind(sock, skaddr, alen);
 732}
 733
 734/**
 735 * tipc_getname - get port ID of socket or peer socket
 736 * @sock: socket structure
 737 * @uaddr: area for returned socket address
 
 738 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
 739 *
 740 * Return: 0 on success, errno otherwise
 741 *
 742 * NOTE: This routine doesn't need to take the socket lock since it only
 743 *       accesses socket information that is unchanging (or which changes in
 744 *       a completely predictable manner).
 745 */
 746static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 747			int peer)
 748{
 749	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
 750	struct sock *sk = sock->sk;
 751	struct tipc_sock *tsk = tipc_sk(sk);
 752
 753	memset(addr, 0, sizeof(*addr));
 754	if (peer) {
 755		if ((!tipc_sk_connected(sk)) &&
 756		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
 757			return -ENOTCONN;
 758		addr->addr.id.ref = tsk_peer_port(tsk);
 759		addr->addr.id.node = tsk_peer_node(tsk);
 760	} else {
 761		addr->addr.id.ref = tsk->portid;
 762		addr->addr.id.node = tipc_own_addr(sock_net(sk));
 763	}
 764
 765	addr->addrtype = TIPC_SOCKET_ADDR;
 766	addr->family = AF_TIPC;
 767	addr->scope = 0;
 768	addr->addr.name.domain = 0;
 769
 770	return sizeof(*addr);
 771}
 772
 773/**
 774 * tipc_poll - read and possibly block on pollmask
 775 * @file: file structure associated with the socket
 776 * @sock: socket for which to calculate the poll bits
 777 * @wait: ???
 778 *
 779 * Return: pollmask value
 780 *
 781 * COMMENTARY:
 782 * It appears that the usual socket locking mechanisms are not useful here
 783 * since the pollmask info is potentially out-of-date the moment this routine
 784 * exits.  TCP and other protocols seem to rely on higher level poll routines
 785 * to handle any preventable race conditions, so TIPC will do the same ...
 786 *
 787 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 788 * imply that the operation will succeed, merely that it should be performed
 789 * and will not block.
 790 */
 791static __poll_t tipc_poll(struct file *file, struct socket *sock,
 792			      poll_table *wait)
 793{
 794	struct sock *sk = sock->sk;
 795	struct tipc_sock *tsk = tipc_sk(sk);
 796	__poll_t revents = 0;
 797
 798	sock_poll_wait(file, sock, wait);
 799	trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
 800
 801	if (sk->sk_shutdown & RCV_SHUTDOWN)
 802		revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 803	if (sk->sk_shutdown == SHUTDOWN_MASK)
 804		revents |= EPOLLHUP;
 805
 806	switch (sk->sk_state) {
 807	case TIPC_ESTABLISHED:
 808		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
 809			revents |= EPOLLOUT;
 810		fallthrough;
 811	case TIPC_LISTEN:
 812	case TIPC_CONNECTING:
 813		if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
 814			revents |= EPOLLIN | EPOLLRDNORM;
 815		break;
 816	case TIPC_OPEN:
 817		if (tsk->group_is_open && !tsk->cong_link_cnt)
 818			revents |= EPOLLOUT;
 819		if (!tipc_sk_type_connectionless(sk))
 820			break;
 821		if (skb_queue_empty_lockless(&sk->sk_receive_queue))
 822			break;
 823		revents |= EPOLLIN | EPOLLRDNORM;
 824		break;
 825	case TIPC_DISCONNECTING:
 826		revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
 827		break;
 828	}
 829	return revents;
 830}
 831
 832/**
 833 * tipc_sendmcast - send multicast message
 834 * @sock: socket structure
 835 * @ua: destination address struct
 836 * @msg: message to send
 837 * @dlen: length of data to send
 838 * @timeout: timeout to wait for wakeup
 839 *
 840 * Called from function tipc_sendmsg(), which has done all sanity checks
 841 * Return: the number of bytes sent on success, or errno
 842 */
 843static int tipc_sendmcast(struct  socket *sock, struct tipc_uaddr *ua,
 844			  struct msghdr *msg, size_t dlen, long timeout)
 845{
 846	struct sock *sk = sock->sk;
 847	struct tipc_sock *tsk = tipc_sk(sk);
 848	struct tipc_msg *hdr = &tsk->phdr;
 849	struct net *net = sock_net(sk);
 850	int mtu = tipc_bcast_get_mtu(net);
 
 851	struct sk_buff_head pkts;
 852	struct tipc_nlist dsts;
 853	int rc;
 854
 855	if (tsk->group)
 856		return -EACCES;
 857
 858	/* Block or return if any destination link is congested */
 859	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
 860	if (unlikely(rc))
 861		return rc;
 862
 863	/* Lookup destination nodes */
 864	tipc_nlist_init(&dsts, tipc_own_addr(net));
 865	tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
 
 866	if (!dsts.local && !dsts.remote)
 867		return -EHOSTUNREACH;
 868
 869	/* Build message header */
 870	msg_set_type(hdr, TIPC_MCAST_MSG);
 871	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
 872	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
 873	msg_set_destport(hdr, 0);
 874	msg_set_destnode(hdr, 0);
 875	msg_set_nametype(hdr, ua->sr.type);
 876	msg_set_namelower(hdr, ua->sr.lower);
 877	msg_set_nameupper(hdr, ua->sr.upper);
 878
 879	/* Build message as chain of buffers */
 880	__skb_queue_head_init(&pkts);
 881	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
 882
 883	/* Send message if build was successful */
 884	if (unlikely(rc == dlen)) {
 885		trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
 886					TIPC_DUMP_SK_SNDQ, " ");
 887		rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
 888				     &tsk->cong_link_cnt);
 889	}
 890
 891	tipc_nlist_purge(&dsts);
 892
 893	return rc ? rc : dlen;
 894}
 895
 896/**
 897 * tipc_send_group_msg - send a message to a member in the group
 898 * @net: network namespace
 899 * @tsk: tipc socket
 900 * @m: message to send
 901 * @mb: group member
 902 * @dnode: destination node
 903 * @dport: destination port
 904 * @dlen: total length of message data
 905 */
 906static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
 907			       struct msghdr *m, struct tipc_member *mb,
 908			       u32 dnode, u32 dport, int dlen)
 909{
 910	u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
 911	struct tipc_mc_method *method = &tsk->mc_method;
 912	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 913	struct tipc_msg *hdr = &tsk->phdr;
 914	struct sk_buff_head pkts;
 915	int mtu, rc;
 916
 917	/* Complete message header */
 918	msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
 919	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
 920	msg_set_destport(hdr, dport);
 921	msg_set_destnode(hdr, dnode);
 922	msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
 923
 924	/* Build message as chain of buffers */
 925	__skb_queue_head_init(&pkts);
 926	mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
 927	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
 928	if (unlikely(rc != dlen))
 929		return rc;
 930
 931	/* Send message */
 932	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
 933	if (unlikely(rc == -ELINKCONG)) {
 934		tipc_dest_push(&tsk->cong_links, dnode, 0);
 935		tsk->cong_link_cnt++;
 936	}
 937
 938	/* Update send window */
 939	tipc_group_update_member(mb, blks);
 940
 941	/* A broadcast sent within next EXPIRE period must follow same path */
 942	method->rcast = true;
 943	method->mandatory = true;
 944	return dlen;
 945}
 946
 947/**
 948 * tipc_send_group_unicast - send message to a member in the group
 949 * @sock: socket structure
 950 * @m: message to send
 951 * @dlen: total length of message data
 952 * @timeout: timeout to wait for wakeup
 953 *
 954 * Called from function tipc_sendmsg(), which has done all sanity checks
 955 * Return: the number of bytes sent on success, or errno
 956 */
 957static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
 958				   int dlen, long timeout)
 959{
 960	struct sock *sk = sock->sk;
 961	struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
 962	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 963	struct tipc_sock *tsk = tipc_sk(sk);
 964	struct net *net = sock_net(sk);
 965	struct tipc_member *mb = NULL;
 966	u32 node, port;
 967	int rc;
 968
 969	node = ua->sk.node;
 970	port = ua->sk.ref;
 971	if (!port && !node)
 972		return -EHOSTUNREACH;
 973
 974	/* Block or return if destination link or member is congested */
 975	rc = tipc_wait_for_cond(sock, &timeout,
 976				!tipc_dest_find(&tsk->cong_links, node, 0) &&
 977				tsk->group &&
 978				!tipc_group_cong(tsk->group, node, port, blks,
 979						 &mb));
 980	if (unlikely(rc))
 981		return rc;
 982
 983	if (unlikely(!mb))
 984		return -EHOSTUNREACH;
 985
 986	rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
 987
 988	return rc ? rc : dlen;
 989}
 990
 991/**
 992 * tipc_send_group_anycast - send message to any member with given identity
 993 * @sock: socket structure
 994 * @m: message to send
 995 * @dlen: total length of message data
 996 * @timeout: timeout to wait for wakeup
 997 *
 998 * Called from function tipc_sendmsg(), which has done all sanity checks
 999 * Return: the number of bytes sent on success, or errno
1000 */
1001static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
1002				   int dlen, long timeout)
1003{
1004	struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1005	struct sock *sk = sock->sk;
1006	struct tipc_sock *tsk = tipc_sk(sk);
1007	struct list_head *cong_links = &tsk->cong_links;
1008	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
1009	struct tipc_msg *hdr = &tsk->phdr;
1010	struct tipc_member *first = NULL;
1011	struct tipc_member *mbr = NULL;
1012	struct net *net = sock_net(sk);
1013	u32 node, port, exclude;
1014	struct list_head dsts;
 
1015	int lookups = 0;
1016	int dstcnt, rc;
1017	bool cong;
1018
1019	INIT_LIST_HEAD(&dsts);
1020	ua->sa.type = msg_nametype(hdr);
1021	ua->scope = msg_lookup_scope(hdr);
 
 
1022
1023	while (++lookups < 4) {
1024		exclude = tipc_group_exclude(tsk->group);
1025
1026		first = NULL;
1027
1028		/* Look for a non-congested destination member, if any */
1029		while (1) {
1030			if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt,
1031						       exclude, false))
1032				return -EHOSTUNREACH;
1033			tipc_dest_pop(&dsts, &node, &port);
1034			cong = tipc_group_cong(tsk->group, node, port, blks,
1035					       &mbr);
1036			if (!cong)
1037				break;
1038			if (mbr == first)
1039				break;
1040			if (!first)
1041				first = mbr;
1042		}
1043
1044		/* Start over if destination was not in member list */
1045		if (unlikely(!mbr))
1046			continue;
1047
1048		if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
1049			break;
1050
1051		/* Block or return if destination link or member is congested */
1052		rc = tipc_wait_for_cond(sock, &timeout,
1053					!tipc_dest_find(cong_links, node, 0) &&
1054					tsk->group &&
1055					!tipc_group_cong(tsk->group, node, port,
1056							 blks, &mbr));
1057		if (unlikely(rc))
1058			return rc;
1059
1060		/* Send, unless destination disappeared while waiting */
1061		if (likely(mbr))
1062			break;
1063	}
1064
1065	if (unlikely(lookups >= 4))
1066		return -EHOSTUNREACH;
1067
1068	rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1069
1070	return rc ? rc : dlen;
1071}
1072
1073/**
1074 * tipc_send_group_bcast - send message to all members in communication group
1075 * @sock: socket structure
1076 * @m: message to send
1077 * @dlen: total length of message data
1078 * @timeout: timeout to wait for wakeup
1079 *
1080 * Called from function tipc_sendmsg(), which has done all sanity checks
1081 * Return: the number of bytes sent on success, or errno
1082 */
1083static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1084				 int dlen, long timeout)
1085{
1086	struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1087	struct sock *sk = sock->sk;
1088	struct net *net = sock_net(sk);
1089	struct tipc_sock *tsk = tipc_sk(sk);
1090	struct tipc_nlist *dsts;
1091	struct tipc_mc_method *method = &tsk->mc_method;
1092	bool ack = method->mandatory && method->rcast;
1093	int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1094	struct tipc_msg *hdr = &tsk->phdr;
1095	int mtu = tipc_bcast_get_mtu(net);
1096	struct sk_buff_head pkts;
1097	int rc = -EHOSTUNREACH;
1098
1099	/* Block or return if any destination link or member is congested */
1100	rc = tipc_wait_for_cond(sock, &timeout,
1101				!tsk->cong_link_cnt && tsk->group &&
1102				!tipc_group_bc_cong(tsk->group, blks));
1103	if (unlikely(rc))
1104		return rc;
1105
1106	dsts = tipc_group_dests(tsk->group);
1107	if (!dsts->local && !dsts->remote)
1108		return -EHOSTUNREACH;
1109
1110	/* Complete message header */
1111	if (ua) {
1112		msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1113		msg_set_nameinst(hdr, ua->sa.instance);
1114	} else {
1115		msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1116		msg_set_nameinst(hdr, 0);
1117	}
1118	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1119	msg_set_destport(hdr, 0);
1120	msg_set_destnode(hdr, 0);
1121	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1122
1123	/* Avoid getting stuck with repeated forced replicasts */
1124	msg_set_grp_bc_ack_req(hdr, ack);
1125
1126	/* Build message as chain of buffers */
1127	__skb_queue_head_init(&pkts);
1128	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1129	if (unlikely(rc != dlen))
1130		return rc;
1131
1132	/* Send message */
1133	rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1134	if (unlikely(rc))
1135		return rc;
1136
1137	/* Update broadcast sequence number and send windows */
1138	tipc_group_update_bc_members(tsk->group, blks, ack);
1139
1140	/* Broadcast link is now free to choose method for next broadcast */
1141	method->mandatory = false;
1142	method->expires = jiffies;
1143
1144	return dlen;
1145}
1146
1147/**
1148 * tipc_send_group_mcast - send message to all members with given identity
1149 * @sock: socket structure
1150 * @m: message to send
1151 * @dlen: total length of message data
1152 * @timeout: timeout to wait for wakeup
1153 *
1154 * Called from function tipc_sendmsg(), which has done all sanity checks
1155 * Return: the number of bytes sent on success, or errno
1156 */
1157static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1158				 int dlen, long timeout)
1159{
1160	struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1161	struct sock *sk = sock->sk;
 
1162	struct tipc_sock *tsk = tipc_sk(sk);
1163	struct tipc_group *grp = tsk->group;
1164	struct tipc_msg *hdr = &tsk->phdr;
1165	struct net *net = sock_net(sk);
 
1166	struct list_head dsts;
1167	u32 dstcnt, exclude;
1168
1169	INIT_LIST_HEAD(&dsts);
1170	ua->sa.type = msg_nametype(hdr);
1171	ua->scope = msg_lookup_scope(hdr);
 
 
1172	exclude = tipc_group_exclude(grp);
1173
1174	if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true))
 
1175		return -EHOSTUNREACH;
1176
1177	if (dstcnt == 1) {
1178		tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
1179		return tipc_send_group_unicast(sock, m, dlen, timeout);
1180	}
1181
1182	tipc_dest_list_purge(&dsts);
1183	return tipc_send_group_bcast(sock, m, dlen, timeout);
1184}
1185
1186/**
1187 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1188 * @net: the associated network namespace
1189 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1190 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1191 *
1192 * Multi-threaded: parallel calls with reference to same queues may occur
1193 */
1194void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1195		       struct sk_buff_head *inputq)
1196{
1197	u32 self = tipc_own_addr(net);
 
1198	struct sk_buff *skb, *_skb;
1199	u32 portid, onode;
1200	struct sk_buff_head tmpq;
1201	struct list_head dports;
1202	struct tipc_msg *hdr;
1203	struct tipc_uaddr ua;
1204	int user, mtyp, hlen;
 
1205
1206	__skb_queue_head_init(&tmpq);
1207	INIT_LIST_HEAD(&dports);
1208	ua.addrtype = TIPC_SERVICE_RANGE;
1209
1210	/* tipc_skb_peek() increments the head skb's reference counter */
1211	skb = tipc_skb_peek(arrvq, &inputq->lock);
1212	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1213		hdr = buf_msg(skb);
1214		user = msg_user(hdr);
1215		mtyp = msg_type(hdr);
1216		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1217		onode = msg_orignode(hdr);
1218		ua.sr.type = msg_nametype(hdr);
1219		ua.sr.lower = msg_namelower(hdr);
1220		ua.sr.upper = msg_nameupper(hdr);
1221		if (onode == self)
1222			ua.scope = TIPC_ANY_SCOPE;
1223		else
1224			ua.scope = TIPC_CLUSTER_SCOPE;
1225
1226		if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1227			spin_lock_bh(&inputq->lock);
1228			if (skb_peek(arrvq) == skb) {
1229				__skb_dequeue(arrvq);
1230				__skb_queue_tail(inputq, skb);
1231			}
1232			kfree_skb(skb);
1233			spin_unlock_bh(&inputq->lock);
1234			continue;
1235		}
1236
1237		/* Group messages require exact scope match */
1238		if (msg_in_group(hdr)) {
1239			ua.sr.lower = 0;
1240			ua.sr.upper = ~0;
1241			ua.scope = msg_lookup_scope(hdr);
 
 
 
 
 
 
 
 
 
 
1242		}
1243
1244		/* Create destination port list: */
1245		tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
 
1246
1247		/* Clone message per destination */
1248		while (tipc_dest_pop(&dports, NULL, &portid)) {
1249			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1250			if (_skb) {
1251				msg_set_destport(buf_msg(_skb), portid);
1252				__skb_queue_tail(&tmpq, _skb);
1253				continue;
1254			}
1255			pr_warn("Failed to clone mcast rcv buffer\n");
1256		}
1257		/* Append clones to inputq only if skb is still head of arrvq */
1258		spin_lock_bh(&inputq->lock);
1259		if (skb_peek(arrvq) == skb) {
1260			skb_queue_splice_tail_init(&tmpq, inputq);
1261			/* Decrement the skb's refcnt */
1262			kfree_skb(__skb_dequeue(arrvq));
1263		}
1264		spin_unlock_bh(&inputq->lock);
1265		__skb_queue_purge(&tmpq);
1266		kfree_skb(skb);
1267	}
1268	tipc_sk_rcv(net, inputq);
1269}
1270
1271/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
1272 *                         when socket is in Nagle mode
1273 */
1274static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
1275{
1276	struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1277	struct sk_buff *skb = skb_peek_tail(txq);
1278	struct net *net = sock_net(&tsk->sk);
1279	u32 dnode = tsk_peer_node(tsk);
1280	int rc;
1281
1282	if (nagle_ack) {
1283		tsk->pkt_cnt += skb_queue_len(txq);
1284		if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1285			tsk->oneway = 0;
1286			if (tsk->nagle_start < NAGLE_START_MAX)
1287				tsk->nagle_start *= 2;
1288			tsk->expect_ack = false;
1289			pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1290				 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1291				 tsk->nagle_start);
1292		} else {
1293			tsk->nagle_start = NAGLE_START_INIT;
1294			if (skb) {
1295				msg_set_ack_required(buf_msg(skb));
1296				tsk->expect_ack = true;
1297			} else {
1298				tsk->expect_ack = false;
1299			}
1300		}
1301		tsk->msg_acc = 0;
1302		tsk->pkt_cnt = 0;
1303	}
1304
1305	if (!skb || tsk->cong_link_cnt)
1306		return;
1307
1308	/* Do not send SYN again after congestion */
1309	if (msg_is_syn(buf_msg(skb)))
1310		return;
1311
1312	if (tsk->msg_acc)
1313		tsk->pkt_cnt += skb_queue_len(txq);
1314	tsk->snt_unacked += tsk->snd_backlog;
1315	tsk->snd_backlog = 0;
1316	rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1317	if (rc == -ELINKCONG)
1318		tsk->cong_link_cnt = 1;
1319}
1320
1321/**
1322 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1323 * @tsk: receiving socket
1324 * @skb: pointer to message buffer.
1325 * @inputq: buffer list containing the buffers
1326 * @xmitq: output message area
1327 */
1328static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1329				   struct sk_buff_head *inputq,
1330				   struct sk_buff_head *xmitq)
1331{
1332	struct tipc_msg *hdr = buf_msg(skb);
1333	u32 onode = tsk_own_node(tsk);
1334	struct sock *sk = &tsk->sk;
1335	int mtyp = msg_type(hdr);
1336	bool was_cong;
1337
1338	/* Ignore if connection cannot be validated: */
1339	if (!tsk_peer_msg(tsk, hdr)) {
1340		trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1341		goto exit;
1342	}
1343
1344	if (unlikely(msg_errcode(hdr))) {
1345		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1346		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1347				      tsk_peer_port(tsk));
1348		sk->sk_state_change(sk);
1349
1350		/* State change is ignored if socket already awake,
1351		 * - convert msg to abort msg and add to inqueue
1352		 */
1353		msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1354		msg_set_type(hdr, TIPC_CONN_MSG);
1355		msg_set_size(hdr, BASIC_H_SIZE);
1356		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1357		__skb_queue_tail(inputq, skb);
1358		return;
1359	}
1360
1361	tsk->probe_unacked = false;
1362
1363	if (mtyp == CONN_PROBE) {
1364		msg_set_type(hdr, CONN_PROBE_REPLY);
1365		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1366			__skb_queue_tail(xmitq, skb);
1367		return;
1368	} else if (mtyp == CONN_ACK) {
1369		was_cong = tsk_conn_cong(tsk);
1370		tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
1371		tsk->snt_unacked -= msg_conn_ack(hdr);
1372		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1373			tsk->snd_win = msg_adv_win(hdr);
1374		if (was_cong && !tsk_conn_cong(tsk))
1375			sk->sk_write_space(sk);
1376	} else if (mtyp != CONN_PROBE_REPLY) {
1377		pr_warn("Received unknown CONN_PROTO msg\n");
1378	}
1379exit:
1380	kfree_skb(skb);
1381}
1382
1383/**
1384 * tipc_sendmsg - send message in connectionless manner
1385 * @sock: socket structure
1386 * @m: message to send
1387 * @dsz: amount of user data to be sent
1388 *
1389 * Message must have an destination specified explicitly.
1390 * Used for SOCK_RDM and SOCK_DGRAM messages,
1391 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1392 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1393 *
1394 * Return: the number of bytes sent on success, or errno otherwise
1395 */
1396static int tipc_sendmsg(struct socket *sock,
1397			struct msghdr *m, size_t dsz)
1398{
1399	struct sock *sk = sock->sk;
1400	int ret;
1401
1402	lock_sock(sk);
1403	ret = __tipc_sendmsg(sock, m, dsz);
1404	release_sock(sk);
1405
1406	return ret;
1407}
1408
1409static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1410{
1411	struct sock *sk = sock->sk;
1412	struct net *net = sock_net(sk);
1413	struct tipc_sock *tsk = tipc_sk(sk);
1414	struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1415	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1416	struct list_head *clinks = &tsk->cong_links;
1417	bool syn = !tipc_sk_type_connectionless(sk);
1418	struct tipc_group *grp = tsk->group;
1419	struct tipc_msg *hdr = &tsk->phdr;
1420	struct tipc_socket_addr skaddr;
1421	struct sk_buff_head pkts;
1422	int atype, mtu, rc;
 
 
1423
1424	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1425		return -EMSGSIZE;
1426
1427	if (ua) {
1428		if (!tipc_uaddr_valid(ua, m->msg_namelen))
 
 
1429			return -EINVAL;
1430		atype = ua->addrtype;
1431	}
1432
1433	/* If socket belongs to a communication group follow other paths */
1434	if (grp) {
1435		if (!ua)
1436			return tipc_send_group_bcast(sock, m, dlen, timeout);
1437		if (atype == TIPC_SERVICE_ADDR)
1438			return tipc_send_group_anycast(sock, m, dlen, timeout);
1439		if (atype == TIPC_SOCKET_ADDR)
1440			return tipc_send_group_unicast(sock, m, dlen, timeout);
1441		if (atype == TIPC_SERVICE_RANGE)
1442			return tipc_send_group_mcast(sock, m, dlen, timeout);
1443		return -EINVAL;
1444	}
1445
1446	if (!ua) {
1447		ua = (struct tipc_uaddr *)&tsk->peer;
1448		if (!syn && ua->family != AF_TIPC)
1449			return -EDESTADDRREQ;
1450		atype = ua->addrtype;
1451	}
1452
1453	if (unlikely(syn)) {
1454		if (sk->sk_state == TIPC_LISTEN)
1455			return -EPIPE;
1456		if (sk->sk_state != TIPC_OPEN)
1457			return -EISCONN;
1458		if (tsk->published)
1459			return -EOPNOTSUPP;
1460		if (atype == TIPC_SERVICE_ADDR)
1461			tsk->conn_addrtype = atype;
 
 
1462		msg_set_syn(hdr, 1);
1463	}
1464
1465	memset(&skaddr, 0, sizeof(skaddr));
1466
1467	/* Determine destination */
1468	if (atype == TIPC_SERVICE_RANGE) {
1469		return tipc_sendmcast(sock, ua, m, dlen, timeout);
1470	} else if (atype == TIPC_SERVICE_ADDR) {
1471		skaddr.node = ua->lookup_node;
1472		ua->scope = tipc_node2scope(skaddr.node);
1473		if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr))
 
 
 
 
 
 
 
 
1474			return -EHOSTUNREACH;
1475	} else if (atype == TIPC_SOCKET_ADDR) {
1476		skaddr = ua->sk;
 
 
 
 
 
1477	} else {
1478		return -EINVAL;
1479	}
1480
1481	/* Block or return if destination link is congested */
1482	rc = tipc_wait_for_cond(sock, &timeout,
1483				!tipc_dest_find(clinks, skaddr.node, 0));
1484	if (unlikely(rc))
1485		return rc;
1486
1487	/* Finally build message header */
1488	msg_set_destnode(hdr, skaddr.node);
1489	msg_set_destport(hdr, skaddr.ref);
1490	if (atype == TIPC_SERVICE_ADDR) {
1491		msg_set_type(hdr, TIPC_NAMED_MSG);
1492		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1493		msg_set_nametype(hdr, ua->sa.type);
1494		msg_set_nameinst(hdr, ua->sa.instance);
1495		msg_set_lookup_scope(hdr, ua->scope);
1496	} else { /* TIPC_SOCKET_ADDR */
1497		msg_set_type(hdr, TIPC_DIRECT_MSG);
1498		msg_set_lookup_scope(hdr, 0);
1499		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1500	}
1501
1502	/* Add message body */
1503	__skb_queue_head_init(&pkts);
1504	mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
1505	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1506	if (unlikely(rc != dlen))
1507		return rc;
1508	if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1509		__skb_queue_purge(&pkts);
1510		return -ENOMEM;
1511	}
1512
1513	/* Send message */
1514	trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1515	rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
1516	if (unlikely(rc == -ELINKCONG)) {
1517		tipc_dest_push(clinks, skaddr.node, 0);
1518		tsk->cong_link_cnt++;
1519		rc = 0;
1520	}
1521
1522	if (unlikely(syn && !rc)) {
1523		tipc_set_sk_state(sk, TIPC_CONNECTING);
1524		if (dlen && timeout) {
1525			timeout = msecs_to_jiffies(timeout);
1526			tipc_wait_for_connect(sock, &timeout);
1527		}
1528	}
1529
1530	return rc ? rc : dlen;
1531}
1532
1533/**
1534 * tipc_sendstream - send stream-oriented data
1535 * @sock: socket structure
1536 * @m: data to send
1537 * @dsz: total length of data to be transmitted
1538 *
1539 * Used for SOCK_STREAM data.
1540 *
1541 * Return: the number of bytes sent on success (or partial success),
1542 * or errno if no data sent
1543 */
1544static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1545{
1546	struct sock *sk = sock->sk;
1547	int ret;
1548
1549	lock_sock(sk);
1550	ret = __tipc_sendstream(sock, m, dsz);
1551	release_sock(sk);
1552
1553	return ret;
1554}
1555
1556static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1557{
1558	struct sock *sk = sock->sk;
1559	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1560	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1561	struct sk_buff_head *txq = &sk->sk_write_queue;
1562	struct tipc_sock *tsk = tipc_sk(sk);
1563	struct tipc_msg *hdr = &tsk->phdr;
1564	struct net *net = sock_net(sk);
1565	struct sk_buff *skb;
1566	u32 dnode = tsk_peer_node(tsk);
1567	int maxnagle = tsk->maxnagle;
1568	int maxpkt = tsk->max_pkt;
1569	int send, sent = 0;
1570	int blocks, rc = 0;
 
 
1571
1572	if (unlikely(dlen > INT_MAX))
1573		return -EMSGSIZE;
1574
1575	/* Handle implicit connection setup */
1576	if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
1577		rc = __tipc_sendmsg(sock, m, dlen);
1578		if (dlen && dlen == rc) {
1579			tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1580			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1581		}
1582		return rc;
1583	}
1584
1585	do {
1586		rc = tipc_wait_for_cond(sock, &timeout,
1587					(!tsk->cong_link_cnt &&
1588					 !tsk_conn_cong(tsk) &&
1589					 tipc_sk_connected(sk)));
1590		if (unlikely(rc))
1591			break;
 
1592		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1593		blocks = tsk->snd_backlog;
1594		if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1595		    send <= maxnagle) {
1596			rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
1597			if (unlikely(rc < 0))
1598				break;
1599			blocks += rc;
1600			tsk->msg_acc++;
1601			if (blocks <= 64 && tsk->expect_ack) {
1602				tsk->snd_backlog = blocks;
1603				sent += send;
1604				break;
1605			} else if (blocks > 64) {
1606				tsk->pkt_cnt += skb_queue_len(txq);
1607			} else {
1608				skb = skb_peek_tail(txq);
1609				if (skb) {
1610					msg_set_ack_required(buf_msg(skb));
1611					tsk->expect_ack = true;
1612				} else {
1613					tsk->expect_ack = false;
1614				}
1615				tsk->msg_acc = 0;
1616				tsk->pkt_cnt = 0;
1617			}
1618		} else {
1619			rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
1620			if (unlikely(rc != send))
1621				break;
1622			blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1623		}
1624		trace_tipc_sk_sendstream(sk, skb_peek(txq),
1625					 TIPC_DUMP_SK_SNDQ, " ");
1626		rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1627		if (unlikely(rc == -ELINKCONG)) {
1628			tsk->cong_link_cnt = 1;
1629			rc = 0;
1630		}
1631		if (likely(!rc)) {
1632			tsk->snt_unacked += blocks;
1633			tsk->snd_backlog = 0;
1634			sent += send;
1635		}
1636	} while (sent < dlen && !rc);
1637
1638	return sent ? sent : rc;
1639}
1640
1641/**
1642 * tipc_send_packet - send a connection-oriented message
1643 * @sock: socket structure
1644 * @m: message to send
1645 * @dsz: length of data to be transmitted
1646 *
1647 * Used for SOCK_SEQPACKET messages.
1648 *
1649 * Return: the number of bytes sent on success, or errno otherwise
1650 */
1651static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1652{
1653	if (dsz > TIPC_MAX_USER_MSG_SIZE)
1654		return -EMSGSIZE;
1655
1656	return tipc_sendstream(sock, m, dsz);
1657}
1658
1659/* tipc_sk_finish_conn - complete the setup of a connection
1660 */
1661static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1662				u32 peer_node)
1663{
1664	struct sock *sk = &tsk->sk;
1665	struct net *net = sock_net(sk);
1666	struct tipc_msg *msg = &tsk->phdr;
1667
1668	msg_set_syn(msg, 0);
1669	msg_set_destnode(msg, peer_node);
1670	msg_set_destport(msg, peer_port);
1671	msg_set_type(msg, TIPC_CONN_MSG);
1672	msg_set_lookup_scope(msg, 0);
1673	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1674
1675	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1676	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1677	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1678	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1679	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1680	tsk_set_nagle(tsk);
1681	__skb_queue_purge(&sk->sk_write_queue);
1682	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1683		return;
1684
1685	/* Fall back to message based flow control */
1686	tsk->rcv_win = FLOWCTL_MSG_WIN;
1687	tsk->snd_win = FLOWCTL_MSG_WIN;
1688}
1689
1690/**
1691 * tipc_sk_set_orig_addr - capture sender's address for received message
1692 * @m: descriptor for message info
1693 * @skb: received message
1694 *
1695 * Note: Address is not captured if not requested by receiver.
1696 */
1697static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1698{
1699	DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1700	struct tipc_msg *hdr = buf_msg(skb);
1701
1702	if (!srcaddr)
1703		return;
1704
1705	srcaddr->sock.family = AF_TIPC;
1706	srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
1707	srcaddr->sock.scope = 0;
1708	srcaddr->sock.addr.id.ref = msg_origport(hdr);
1709	srcaddr->sock.addr.id.node = msg_orignode(hdr);
1710	srcaddr->sock.addr.name.domain = 0;
1711	m->msg_namelen = sizeof(struct sockaddr_tipc);
1712
1713	if (!msg_in_group(hdr))
1714		return;
1715
1716	/* Group message users may also want to know sending member's id */
1717	srcaddr->member.family = AF_TIPC;
1718	srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
1719	srcaddr->member.scope = 0;
1720	srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1721	srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1722	srcaddr->member.addr.name.domain = 0;
1723	m->msg_namelen = sizeof(*srcaddr);
1724}
1725
1726/**
1727 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1728 * @m: descriptor for message info
1729 * @skb: received message buffer
1730 * @tsk: TIPC port associated with message
1731 *
1732 * Note: Ancillary data is not captured if not requested by receiver.
1733 *
1734 * Return: 0 if successful, otherwise errno
1735 */
1736static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1737				 struct tipc_sock *tsk)
1738{
1739	struct tipc_msg *hdr;
1740	u32 data[3] = {0,};
1741	bool has_addr;
1742	int dlen, rc;
 
 
1743
1744	if (likely(m->msg_controllen == 0))
1745		return 0;
 
1746
1747	hdr = buf_msg(skb);
1748	dlen = msg_data_sz(hdr);
1749
1750	/* Capture errored message object, if any */
1751	if (msg_errcode(hdr)) {
1752		if (skb_linearize(skb))
1753			return -ENOMEM;
1754		hdr = buf_msg(skb);
1755		data[0] = msg_errcode(hdr);
1756		data[1] = dlen;
1757		rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
1758		if (rc || !dlen)
1759			return rc;
1760		rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
1761		if (rc)
1762			return rc;
 
1763	}
1764
1765	/* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
1766	switch (msg_type(hdr)) {
 
1767	case TIPC_NAMED_MSG:
1768		has_addr = true;
1769		data[0] = msg_nametype(hdr);
1770		data[1] = msg_namelower(hdr);
1771		data[2] = data[1];
1772		break;
1773	case TIPC_MCAST_MSG:
1774		has_addr = true;
1775		data[0] = msg_nametype(hdr);
1776		data[1] = msg_namelower(hdr);
1777		data[2] = msg_nameupper(hdr);
1778		break;
1779	case TIPC_CONN_MSG:
1780		has_addr = !!tsk->conn_addrtype;
1781		data[0] = msg_nametype(&tsk->phdr);
1782		data[1] = msg_nameinst(&tsk->phdr);
1783		data[2] = data[1];
1784		break;
1785	default:
1786		has_addr = false;
1787	}
1788	if (!has_addr)
1789		return 0;
1790	return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
 
 
 
 
1791}
1792
1793static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
1794{
1795	struct sock *sk = &tsk->sk;
 
1796	struct sk_buff *skb = NULL;
1797	struct tipc_msg *msg;
1798	u32 peer_port = tsk_peer_port(tsk);
1799	u32 dnode = tsk_peer_node(tsk);
1800
1801	if (!tipc_sk_connected(sk))
1802		return NULL;
1803	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1804			      dnode, tsk_own_node(tsk), peer_port,
1805			      tsk->portid, TIPC_OK);
1806	if (!skb)
1807		return NULL;
1808	msg = buf_msg(skb);
1809	msg_set_conn_ack(msg, tsk->rcv_unacked);
1810	tsk->rcv_unacked = 0;
1811
1812	/* Adjust to and advertize the correct window limit */
1813	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1814		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1815		msg_set_adv_win(msg, tsk->rcv_win);
1816	}
1817	return skb;
1818}
1819
1820static void tipc_sk_send_ack(struct tipc_sock *tsk)
1821{
1822	struct sk_buff *skb;
1823
1824	skb = tipc_sk_build_ack(tsk);
1825	if (!skb)
1826		return;
1827
1828	tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1829			   msg_link_selector(buf_msg(skb)));
1830}
1831
1832static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1833{
1834	struct sock *sk = sock->sk;
1835	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1836	long timeo = *timeop;
1837	int err = sock_error(sk);
1838
1839	if (err)
1840		return err;
1841
1842	for (;;) {
1843		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1844			if (sk->sk_shutdown & RCV_SHUTDOWN) {
1845				err = -ENOTCONN;
1846				break;
1847			}
1848			add_wait_queue(sk_sleep(sk), &wait);
1849			release_sock(sk);
1850			timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1851			sched_annotate_sleep();
1852			lock_sock(sk);
1853			remove_wait_queue(sk_sleep(sk), &wait);
1854		}
1855		err = 0;
1856		if (!skb_queue_empty(&sk->sk_receive_queue))
1857			break;
1858		err = -EAGAIN;
1859		if (!timeo)
1860			break;
1861		err = sock_intr_errno(timeo);
1862		if (signal_pending(current))
1863			break;
1864
1865		err = sock_error(sk);
1866		if (err)
1867			break;
1868	}
1869	*timeop = timeo;
1870	return err;
1871}
1872
1873/**
1874 * tipc_recvmsg - receive packet-oriented message
1875 * @sock: network socket
1876 * @m: descriptor for message info
1877 * @buflen: length of user buffer area
1878 * @flags: receive flags
1879 *
1880 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1881 * If the complete message doesn't fit in user area, truncate it.
1882 *
1883 * Return: size of returned message data, errno otherwise
1884 */
1885static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1886			size_t buflen,	int flags)
1887{
1888	struct sock *sk = sock->sk;
1889	bool connected = !tipc_sk_type_connectionless(sk);
1890	struct tipc_sock *tsk = tipc_sk(sk);
1891	int rc, err, hlen, dlen, copy;
1892	struct tipc_skb_cb *skb_cb;
1893	struct sk_buff_head xmitq;
1894	struct tipc_msg *hdr;
1895	struct sk_buff *skb;
1896	bool grp_evt;
1897	long timeout;
1898
1899	/* Catch invalid receive requests */
1900	if (unlikely(!buflen))
1901		return -EINVAL;
1902
1903	lock_sock(sk);
1904	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1905		rc = -ENOTCONN;
1906		goto exit;
1907	}
1908	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1909
1910	/* Step rcv queue to first msg with data or error; wait if necessary */
1911	do {
1912		rc = tipc_wait_for_rcvmsg(sock, &timeout);
1913		if (unlikely(rc))
1914			goto exit;
1915		skb = skb_peek(&sk->sk_receive_queue);
1916		skb_cb = TIPC_SKB_CB(skb);
1917		hdr = buf_msg(skb);
1918		dlen = msg_data_sz(hdr);
1919		hlen = msg_hdr_sz(hdr);
1920		err = msg_errcode(hdr);
1921		grp_evt = msg_is_grp_evt(hdr);
1922		if (likely(dlen || err))
1923			break;
1924		tsk_advance_rx_queue(sk);
1925	} while (1);
1926
1927	/* Collect msg meta data, including error code and rejected data */
1928	tipc_sk_set_orig_addr(m, skb);
1929	rc = tipc_sk_anc_data_recv(m, skb, tsk);
1930	if (unlikely(rc))
1931		goto exit;
1932	hdr = buf_msg(skb);
1933
1934	/* Capture data if non-error msg, otherwise just set return value */
1935	if (likely(!err)) {
1936		int offset = skb_cb->bytes_read;
1937
1938		copy = min_t(int, dlen - offset, buflen);
1939		rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1940		if (unlikely(rc))
1941			goto exit;
1942		if (unlikely(offset + copy < dlen)) {
1943			if (flags & MSG_EOR) {
1944				if (!(flags & MSG_PEEK))
1945					skb_cb->bytes_read = offset + copy;
1946			} else {
1947				m->msg_flags |= MSG_TRUNC;
1948				skb_cb->bytes_read = 0;
1949			}
1950		} else {
1951			if (flags & MSG_EOR)
1952				m->msg_flags |= MSG_EOR;
1953			skb_cb->bytes_read = 0;
1954		}
1955	} else {
1956		copy = 0;
1957		rc = 0;
1958		if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
1959			rc = -ECONNRESET;
1960			goto exit;
1961		}
1962	}
 
 
1963
1964	/* Mark message as group event if applicable */
1965	if (unlikely(grp_evt)) {
1966		if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1967			m->msg_flags |= MSG_EOR;
1968		m->msg_flags |= MSG_OOB;
1969		copy = 0;
1970	}
1971
1972	/* Caption of data or error code/rejected data was successful */
1973	if (unlikely(flags & MSG_PEEK))
1974		goto exit;
1975
1976	/* Send group flow control advertisement when applicable */
1977	if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1978		__skb_queue_head_init(&xmitq);
1979		tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1980					  msg_orignode(hdr), msg_origport(hdr),
1981					  &xmitq);
1982		tipc_node_distr_xmit(sock_net(sk), &xmitq);
1983	}
1984
1985	if (skb_cb->bytes_read)
1986		goto exit;
1987
1988	tsk_advance_rx_queue(sk);
1989
1990	if (likely(!connected))
1991		goto exit;
1992
1993	/* Send connection flow control advertisement when applicable */
1994	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1995	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1996		tipc_sk_send_ack(tsk);
1997exit:
1998	release_sock(sk);
1999	return rc ? rc : copy;
2000}
2001
2002/**
2003 * tipc_recvstream - receive stream-oriented data
2004 * @sock: network socket
2005 * @m: descriptor for message info
2006 * @buflen: total size of user buffer area
2007 * @flags: receive flags
2008 *
2009 * Used for SOCK_STREAM messages only.  If not enough data is available
2010 * will optionally wait for more; never truncates data.
2011 *
2012 * Return: size of returned message data, errno otherwise
2013 */
2014static int tipc_recvstream(struct socket *sock, struct msghdr *m,
2015			   size_t buflen, int flags)
2016{
2017	struct sock *sk = sock->sk;
2018	struct tipc_sock *tsk = tipc_sk(sk);
2019	struct sk_buff *skb;
2020	struct tipc_msg *hdr;
2021	struct tipc_skb_cb *skb_cb;
2022	bool peek = flags & MSG_PEEK;
2023	int offset, required, copy, copied = 0;
2024	int hlen, dlen, err, rc;
2025	long timeout;
2026
2027	/* Catch invalid receive attempts */
2028	if (unlikely(!buflen))
2029		return -EINVAL;
2030
2031	lock_sock(sk);
2032
2033	if (unlikely(sk->sk_state == TIPC_OPEN)) {
2034		rc = -ENOTCONN;
2035		goto exit;
2036	}
2037	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
2038	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2039
2040	do {
2041		/* Look at first msg in receive queue; wait if necessary */
2042		rc = tipc_wait_for_rcvmsg(sock, &timeout);
2043		if (unlikely(rc))
2044			break;
2045		skb = skb_peek(&sk->sk_receive_queue);
2046		skb_cb = TIPC_SKB_CB(skb);
2047		hdr = buf_msg(skb);
2048		dlen = msg_data_sz(hdr);
2049		hlen = msg_hdr_sz(hdr);
2050		err = msg_errcode(hdr);
2051
2052		/* Discard any empty non-errored (SYN-) message */
2053		if (unlikely(!dlen && !err)) {
2054			tsk_advance_rx_queue(sk);
2055			continue;
2056		}
2057
2058		/* Collect msg meta data, incl. error code and rejected data */
2059		if (!copied) {
2060			tipc_sk_set_orig_addr(m, skb);
2061			rc = tipc_sk_anc_data_recv(m, skb, tsk);
2062			if (rc)
2063				break;
2064			hdr = buf_msg(skb);
2065		}
2066
2067		/* Copy data if msg ok, otherwise return error/partial data */
2068		if (likely(!err)) {
2069			offset = skb_cb->bytes_read;
2070			copy = min_t(int, dlen - offset, buflen - copied);
2071			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
2072			if (unlikely(rc))
2073				break;
2074			copied += copy;
2075			offset += copy;
2076			if (unlikely(offset < dlen)) {
2077				if (!peek)
2078					skb_cb->bytes_read = offset;
2079				break;
2080			}
2081		} else {
2082			rc = 0;
2083			if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
2084				rc = -ECONNRESET;
2085			if (copied || rc)
2086				break;
2087		}
2088
2089		if (unlikely(peek))
2090			break;
2091
2092		tsk_advance_rx_queue(sk);
2093
2094		/* Send connection flow control advertisement when applicable */
2095		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2096		if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2097			tipc_sk_send_ack(tsk);
2098
2099		/* Exit if all requested data or FIN/error received */
2100		if (copied == buflen || err)
2101			break;
2102
2103	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
2104exit:
2105	release_sock(sk);
2106	return copied ? copied : rc;
2107}
2108
2109/**
2110 * tipc_write_space - wake up thread if port congestion is released
2111 * @sk: socket
2112 */
2113static void tipc_write_space(struct sock *sk)
2114{
2115	struct socket_wq *wq;
2116
2117	rcu_read_lock();
2118	wq = rcu_dereference(sk->sk_wq);
2119	if (skwq_has_sleeper(wq))
2120		wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2121						EPOLLWRNORM | EPOLLWRBAND);
2122	rcu_read_unlock();
2123}
2124
2125/**
2126 * tipc_data_ready - wake up threads to indicate messages have been received
2127 * @sk: socket
 
2128 */
2129static void tipc_data_ready(struct sock *sk)
2130{
2131	struct socket_wq *wq;
2132
2133	trace_sk_data_ready(sk);
2134
2135	rcu_read_lock();
2136	wq = rcu_dereference(sk->sk_wq);
2137	if (skwq_has_sleeper(wq))
2138		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
2139						EPOLLRDNORM | EPOLLRDBAND);
2140	rcu_read_unlock();
2141}
2142
2143static void tipc_sock_destruct(struct sock *sk)
2144{
2145	__skb_queue_purge(&sk->sk_receive_queue);
2146}
2147
2148static void tipc_sk_proto_rcv(struct sock *sk,
2149			      struct sk_buff_head *inputq,
2150			      struct sk_buff_head *xmitq)
2151{
2152	struct sk_buff *skb = __skb_dequeue(inputq);
2153	struct tipc_sock *tsk = tipc_sk(sk);
2154	struct tipc_msg *hdr = buf_msg(skb);
2155	struct tipc_group *grp = tsk->group;
2156	bool wakeup = false;
2157
2158	switch (msg_user(hdr)) {
2159	case CONN_MANAGER:
2160		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2161		return;
2162	case SOCK_WAKEUP:
2163		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2164		/* coupled with smp_rmb() in tipc_wait_for_cond() */
2165		smp_wmb();
2166		tsk->cong_link_cnt--;
2167		wakeup = true;
2168		tipc_sk_push_backlog(tsk, false);
2169		break;
2170	case GROUP_PROTOCOL:
2171		tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
2172		break;
2173	case TOP_SRV:
2174		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2175				      hdr, inputq, xmitq);
2176		break;
2177	default:
2178		break;
2179	}
2180
2181	if (wakeup)
2182		sk->sk_write_space(sk);
2183
2184	kfree_skb(skb);
2185}
2186
2187/**
2188 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2189 * @tsk: TIPC socket
2190 * @skb: pointer to message buffer.
2191 * @xmitq: for Nagle ACK if any
2192 * Return: true if message should be added to receive queue, false otherwise
2193 */
2194static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2195				   struct sk_buff_head *xmitq)
2196{
2197	struct sock *sk = &tsk->sk;
2198	struct net *net = sock_net(sk);
2199	struct tipc_msg *hdr = buf_msg(skb);
2200	bool con_msg = msg_connected(hdr);
2201	u32 pport = tsk_peer_port(tsk);
2202	u32 pnode = tsk_peer_node(tsk);
2203	u32 oport = msg_origport(hdr);
2204	u32 onode = msg_orignode(hdr);
2205	int err = msg_errcode(hdr);
2206	unsigned long delay;
2207
2208	if (unlikely(msg_mcast(hdr)))
2209		return false;
2210	tsk->oneway = 0;
2211
2212	switch (sk->sk_state) {
2213	case TIPC_CONNECTING:
2214		/* Setup ACK */
2215		if (likely(con_msg)) {
2216			if (err)
2217				break;
2218			tipc_sk_finish_conn(tsk, oport, onode);
2219			msg_set_importance(&tsk->phdr, msg_importance(hdr));
2220			/* ACK+ message with data is added to receive queue */
2221			if (msg_data_sz(hdr))
2222				return true;
2223			/* Empty ACK-, - wake up sleeping connect() and drop */
2224			sk->sk_state_change(sk);
2225			msg_set_dest_droppable(hdr, 1);
2226			return false;
2227		}
2228		/* Ignore connectionless message if not from listening socket */
2229		if (oport != pport || onode != pnode)
2230			return false;
2231
2232		/* Rejected SYN */
2233		if (err != TIPC_ERR_OVERLOAD)
2234			break;
2235
2236		/* Prepare for new setup attempt if we have a SYN clone */
2237		if (skb_queue_empty(&sk->sk_write_queue))
2238			break;
2239		get_random_bytes(&delay, 2);
2240		delay %= (tsk->conn_timeout / 4);
2241		delay = msecs_to_jiffies(delay + 100);
2242		sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2243		return false;
2244	case TIPC_OPEN:
2245	case TIPC_DISCONNECTING:
2246		return false;
2247	case TIPC_LISTEN:
2248		/* Accept only SYN message */
2249		if (!msg_is_syn(hdr) &&
2250		    tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2251			return false;
2252		if (!con_msg && !err)
2253			return true;
2254		return false;
2255	case TIPC_ESTABLISHED:
2256		if (!skb_queue_empty(&sk->sk_write_queue))
2257			tipc_sk_push_backlog(tsk, false);
2258		/* Accept only connection-based messages sent by peer */
2259		if (likely(con_msg && !err && pport == oport &&
2260			   pnode == onode)) {
2261			if (msg_ack_required(hdr)) {
2262				struct sk_buff *skb;
2263
2264				skb = tipc_sk_build_ack(tsk);
2265				if (skb) {
2266					msg_set_nagle_ack(buf_msg(skb));
2267					__skb_queue_tail(xmitq, skb);
2268				}
2269			}
2270			return true;
2271		}
2272		if (!tsk_peer_msg(tsk, hdr))
2273			return false;
2274		if (!err)
2275			return true;
2276		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2277		tipc_node_remove_conn(net, pnode, tsk->portid);
2278		sk->sk_state_change(sk);
2279		return true;
2280	default:
2281		pr_err("Unknown sk_state %u\n", sk->sk_state);
2282	}
2283	/* Abort connection setup attempt */
2284	tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2285	sk->sk_err = ECONNREFUSED;
2286	sk->sk_state_change(sk);
2287	return true;
2288}
2289
2290/**
2291 * rcvbuf_limit - get proper overload limit of socket receive queue
2292 * @sk: socket
2293 * @skb: message
2294 *
2295 * For connection oriented messages, irrespective of importance,
2296 * default queue limit is 2 MB.
2297 *
2298 * For connectionless messages, queue limits are based on message
2299 * importance as follows:
2300 *
2301 * TIPC_LOW_IMPORTANCE       (2 MB)
2302 * TIPC_MEDIUM_IMPORTANCE    (4 MB)
2303 * TIPC_HIGH_IMPORTANCE      (8 MB)
2304 * TIPC_CRITICAL_IMPORTANCE  (16 MB)
2305 *
2306 * Return: overload limit according to corresponding message importance
2307 */
2308static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2309{
2310	struct tipc_sock *tsk = tipc_sk(sk);
2311	struct tipc_msg *hdr = buf_msg(skb);
2312
2313	if (unlikely(msg_in_group(hdr)))
2314		return READ_ONCE(sk->sk_rcvbuf);
2315
2316	if (unlikely(!msg_connected(hdr)))
2317		return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2318
2319	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2320		return READ_ONCE(sk->sk_rcvbuf);
2321
2322	return FLOWCTL_MSG_LIM;
2323}
2324
2325/**
2326 * tipc_sk_filter_rcv - validate incoming message
2327 * @sk: socket
2328 * @skb: pointer to message.
2329 * @xmitq: output message area (FIXME)
2330 *
2331 * Enqueues message on receive queue if acceptable; optionally handles
2332 * disconnect indication for a connected socket.
2333 *
2334 * Called with socket lock already taken
 
2335 */
2336static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2337			       struct sk_buff_head *xmitq)
2338{
2339	bool sk_conn = !tipc_sk_type_connectionless(sk);
2340	struct tipc_sock *tsk = tipc_sk(sk);
2341	struct tipc_group *grp = tsk->group;
2342	struct tipc_msg *hdr = buf_msg(skb);
2343	struct net *net = sock_net(sk);
2344	struct sk_buff_head inputq;
2345	int mtyp = msg_type(hdr);
2346	int limit, err = TIPC_OK;
2347
2348	trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2349	TIPC_SKB_CB(skb)->bytes_read = 0;
2350	__skb_queue_head_init(&inputq);
2351	__skb_queue_tail(&inputq, skb);
2352
2353	if (unlikely(!msg_isdata(hdr)))
2354		tipc_sk_proto_rcv(sk, &inputq, xmitq);
2355
2356	if (unlikely(grp))
2357		tipc_group_filter_msg(grp, &inputq, xmitq);
2358
2359	if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2360		tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2361
2362	/* Validate and add to receive buffer if there is space */
2363	while ((skb = __skb_dequeue(&inputq))) {
2364		hdr = buf_msg(skb);
2365		limit = rcvbuf_limit(sk, skb);
2366		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
2367		    (!sk_conn && msg_connected(hdr)) ||
2368		    (!grp && msg_in_group(hdr)))
2369			err = TIPC_ERR_NO_PORT;
2370		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2371			trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2372					   "err_overload2!");
2373			atomic_inc(&sk->sk_drops);
2374			err = TIPC_ERR_OVERLOAD;
2375		}
2376
2377		if (unlikely(err)) {
2378			if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2379				trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2380						      "@filter_rcv!");
2381				__skb_queue_tail(xmitq, skb);
2382			}
2383			err = TIPC_OK;
2384			continue;
2385		}
2386		__skb_queue_tail(&sk->sk_receive_queue, skb);
2387		skb_set_owner_r(skb, sk);
2388		trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2389					 "rcvq >90% allocated!");
2390		sk->sk_data_ready(sk);
2391	}
2392}
2393
2394/**
2395 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2396 * @sk: socket
2397 * @skb: message
2398 *
2399 * Caller must hold socket lock
2400 */
2401static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2402{
2403	unsigned int before = sk_rmem_alloc_get(sk);
2404	struct sk_buff_head xmitq;
2405	unsigned int added;
2406
2407	__skb_queue_head_init(&xmitq);
2408
2409	tipc_sk_filter_rcv(sk, skb, &xmitq);
2410	added = sk_rmem_alloc_get(sk) - before;
2411	atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2412
2413	/* Send pending response/rejected messages, if any */
2414	tipc_node_distr_xmit(sock_net(sk), &xmitq);
2415	return 0;
2416}
2417
2418/**
2419 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2420 *                   inputq and try adding them to socket or backlog queue
2421 * @inputq: list of incoming buffers with potentially different destinations
2422 * @sk: socket where the buffers should be enqueued
2423 * @dport: port number for the socket
2424 * @xmitq: output queue
2425 *
2426 * Caller must hold socket lock
2427 */
2428static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2429			    u32 dport, struct sk_buff_head *xmitq)
2430{
2431	unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
2432	struct sk_buff *skb;
2433	unsigned int lim;
2434	atomic_t *dcnt;
2435	u32 onode;
2436
2437	while (skb_queue_len(inputq)) {
2438		if (unlikely(time_after_eq(jiffies, time_limit)))
2439			return;
2440
2441		skb = tipc_skb_dequeue(inputq, dport);
2442		if (unlikely(!skb))
2443			return;
2444
2445		/* Add message directly to receive queue if possible */
2446		if (!sock_owned_by_user(sk)) {
2447			tipc_sk_filter_rcv(sk, skb, xmitq);
2448			continue;
2449		}
2450
2451		/* Try backlog, compensating for double-counted bytes */
2452		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2453		if (!sk->sk_backlog.len)
2454			atomic_set(dcnt, 0);
2455		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2456		if (likely(!sk_add_backlog(sk, skb, lim))) {
2457			trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2458						 "bklg & rcvq >90% allocated!");
2459			continue;
2460		}
2461
2462		trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2463		/* Overload => reject message back to sender */
2464		onode = tipc_own_addr(sock_net(sk));
2465		atomic_inc(&sk->sk_drops);
2466		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2467			trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2468					      "@sk_enqueue!");
2469			__skb_queue_tail(xmitq, skb);
2470		}
2471		break;
2472	}
2473}
2474
2475/**
2476 * tipc_sk_rcv - handle a chain of incoming buffers
2477 * @net: the associated network namespace
2478 * @inputq: buffer list containing the buffers
2479 * Consumes all buffers in list until inputq is empty
2480 * Note: may be called in multiple threads referring to the same queue
2481 */
2482void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2483{
2484	struct sk_buff_head xmitq;
2485	u32 dnode, dport = 0;
2486	int err;
2487	struct tipc_sock *tsk;
2488	struct sock *sk;
2489	struct sk_buff *skb;
2490
2491	__skb_queue_head_init(&xmitq);
2492	while (skb_queue_len(inputq)) {
2493		dport = tipc_skb_peek_port(inputq, dport);
2494		tsk = tipc_sk_lookup(net, dport);
2495
2496		if (likely(tsk)) {
2497			sk = &tsk->sk;
2498			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2499				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2500				spin_unlock_bh(&sk->sk_lock.slock);
2501			}
2502			/* Send pending response/rejected messages, if any */
2503			tipc_node_distr_xmit(sock_net(sk), &xmitq);
2504			sock_put(sk);
2505			continue;
2506		}
2507		/* No destination socket => dequeue skb if still there */
2508		skb = tipc_skb_dequeue(inputq, dport);
2509		if (!skb)
2510			return;
2511
2512		/* Try secondary lookup if unresolved named message */
2513		err = TIPC_ERR_NO_PORT;
2514		if (tipc_msg_lookup_dest(net, skb, &err))
2515			goto xmit;
2516
2517		/* Prepare for message rejection */
2518		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2519			continue;
2520
2521		trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2522xmit:
2523		dnode = msg_destnode(buf_msg(skb));
2524		tipc_node_xmit_skb(net, skb, dnode, dport);
2525	}
2526}
2527
2528static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2529{
2530	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2531	struct sock *sk = sock->sk;
2532	int done;
2533
2534	do {
2535		int err = sock_error(sk);
2536		if (err)
2537			return err;
2538		if (!*timeo_p)
2539			return -ETIMEDOUT;
2540		if (signal_pending(current))
2541			return sock_intr_errno(*timeo_p);
2542		if (sk->sk_state == TIPC_DISCONNECTING)
2543			break;
2544
2545		add_wait_queue(sk_sleep(sk), &wait);
2546		done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
2547				     &wait);
2548		remove_wait_queue(sk_sleep(sk), &wait);
2549	} while (!done);
2550	return 0;
2551}
2552
2553static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2554{
2555	if (addr->family != AF_TIPC)
2556		return false;
2557	if (addr->addrtype == TIPC_SERVICE_RANGE)
2558		return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2559	return (addr->addrtype == TIPC_SERVICE_ADDR ||
2560		addr->addrtype == TIPC_SOCKET_ADDR);
2561}
2562
2563/**
2564 * tipc_connect - establish a connection to another TIPC port
2565 * @sock: socket structure
2566 * @dest: socket address for destination port
2567 * @destlen: size of socket address data structure
2568 * @flags: file-related flags associated with socket
2569 *
2570 * Return: 0 on success, errno otherwise
2571 */
2572static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2573			int destlen, int flags)
2574{
2575	struct sock *sk = sock->sk;
2576	struct tipc_sock *tsk = tipc_sk(sk);
2577	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2578	struct msghdr m = {NULL,};
2579	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2580	int previous;
2581	int res = 0;
2582
2583	if (destlen != sizeof(struct sockaddr_tipc))
2584		return -EINVAL;
2585
2586	lock_sock(sk);
2587
2588	if (tsk->group) {
2589		res = -EINVAL;
2590		goto exit;
2591	}
2592
2593	if (dst->family == AF_UNSPEC) {
2594		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2595		if (!tipc_sk_type_connectionless(sk))
2596			res = -EINVAL;
2597		goto exit;
2598	}
2599	if (!tipc_sockaddr_is_sane(dst)) {
2600		res = -EINVAL;
2601		goto exit;
2602	}
2603	/* DGRAM/RDM connect(), just save the destaddr */
2604	if (tipc_sk_type_connectionless(sk)) {
2605		memcpy(&tsk->peer, dest, destlen);
2606		goto exit;
2607	} else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2608		res = -EINVAL;
2609		goto exit;
2610	}
2611
2612	previous = sk->sk_state;
2613
2614	switch (sk->sk_state) {
2615	case TIPC_OPEN:
2616		/* Send a 'SYN-' to destination */
2617		m.msg_name = dest;
2618		m.msg_namelen = destlen;
2619		iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
2620
2621		/* If connect is in non-blocking case, set MSG_DONTWAIT to
2622		 * indicate send_msg() is never blocked.
2623		 */
2624		if (!timeout)
2625			m.msg_flags = MSG_DONTWAIT;
2626
2627		res = __tipc_sendmsg(sock, &m, 0);
2628		if ((res < 0) && (res != -EWOULDBLOCK))
2629			goto exit;
2630
2631		/* Just entered TIPC_CONNECTING state; the only
2632		 * difference is that return value in non-blocking
2633		 * case is EINPROGRESS, rather than EALREADY.
2634		 */
2635		res = -EINPROGRESS;
2636		fallthrough;
2637	case TIPC_CONNECTING:
2638		if (!timeout) {
2639			if (previous == TIPC_CONNECTING)
2640				res = -EALREADY;
2641			goto exit;
2642		}
2643		timeout = msecs_to_jiffies(timeout);
2644		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2645		res = tipc_wait_for_connect(sock, &timeout);
2646		break;
2647	case TIPC_ESTABLISHED:
2648		res = -EISCONN;
2649		break;
2650	default:
2651		res = -EINVAL;
2652	}
2653
2654exit:
2655	release_sock(sk);
2656	return res;
2657}
2658
2659/**
2660 * tipc_listen - allow socket to listen for incoming connections
2661 * @sock: socket structure
2662 * @len: (unused)
2663 *
2664 * Return: 0 on success, errno otherwise
2665 */
2666static int tipc_listen(struct socket *sock, int len)
2667{
2668	struct sock *sk = sock->sk;
2669	int res;
2670
2671	lock_sock(sk);
2672	res = tipc_set_sk_state(sk, TIPC_LISTEN);
2673	release_sock(sk);
2674
2675	return res;
2676}
2677
2678static int tipc_wait_for_accept(struct socket *sock, long timeo)
2679{
2680	struct sock *sk = sock->sk;
2681	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2682	int err;
2683
2684	/* True wake-one mechanism for incoming connections: only
2685	 * one process gets woken up, not the 'whole herd'.
2686	 * Since we do not 'race & poll' for established sockets
2687	 * anymore, the common case will execute the loop only once.
2688	*/
2689	for (;;) {
 
 
2690		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2691			add_wait_queue(sk_sleep(sk), &wait);
2692			release_sock(sk);
2693			timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
2694			lock_sock(sk);
2695			remove_wait_queue(sk_sleep(sk), &wait);
2696		}
2697		err = 0;
2698		if (!skb_queue_empty(&sk->sk_receive_queue))
2699			break;
2700		err = -EAGAIN;
2701		if (!timeo)
2702			break;
2703		err = sock_intr_errno(timeo);
2704		if (signal_pending(current))
2705			break;
2706	}
 
2707	return err;
2708}
2709
2710/**
2711 * tipc_accept - wait for connection request
2712 * @sock: listening socket
2713 * @new_sock: new socket that is to be connected
2714 * @flags: file-related flags associated with socket
2715 * @kern: caused by kernel or by userspace?
2716 *
2717 * Return: 0 on success, errno otherwise
2718 */
2719static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2720		       bool kern)
2721{
2722	struct sock *new_sk, *sk = sock->sk;
 
2723	struct tipc_sock *new_tsock;
2724	struct msghdr m = {NULL,};
2725	struct tipc_msg *msg;
2726	struct sk_buff *buf;
2727	long timeo;
2728	int res;
2729
2730	lock_sock(sk);
2731
2732	if (sk->sk_state != TIPC_LISTEN) {
2733		res = -EINVAL;
2734		goto exit;
2735	}
2736	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2737	res = tipc_wait_for_accept(sock, timeo);
2738	if (res)
2739		goto exit;
2740
2741	buf = skb_peek(&sk->sk_receive_queue);
2742
2743	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2744	if (res)
2745		goto exit;
2746	security_sk_clone(sock->sk, new_sock->sk);
2747
2748	new_sk = new_sock->sk;
2749	new_tsock = tipc_sk(new_sk);
2750	msg = buf_msg(buf);
2751
2752	/* we lock on new_sk; but lockdep sees the lock on sk */
2753	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2754
2755	/*
2756	 * Reject any stray messages received by new socket
2757	 * before the socket lock was taken (very, very unlikely)
2758	 */
2759	tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
2760
2761	/* Connect new socket to it's peer */
2762	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2763
2764	tsk_set_importance(new_sk, msg_importance(msg));
2765	if (msg_named(msg)) {
2766		new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
2767		msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
2768		msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
2769	}
2770
2771	/*
2772	 * Respond to 'SYN-' by discarding it & returning 'ACK'.
2773	 * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
2774	 */
2775	if (!msg_data_sz(msg)) {
 
 
2776		tsk_advance_rx_queue(sk);
 
2777	} else {
2778		__skb_dequeue(&sk->sk_receive_queue);
2779		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2780		skb_set_owner_r(buf, new_sk);
2781	}
2782	iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
2783	__tipc_sendstream(new_sock, &m, 0);
2784	release_sock(new_sk);
2785exit:
2786	release_sock(sk);
2787	return res;
2788}
2789
2790/**
2791 * tipc_shutdown - shutdown socket connection
2792 * @sock: socket structure
2793 * @how: direction to close (must be SHUT_RDWR)
2794 *
2795 * Terminates connection (if necessary), then purges socket's receive queue.
2796 *
2797 * Return: 0 on success, errno otherwise
2798 */
2799static int tipc_shutdown(struct socket *sock, int how)
2800{
2801	struct sock *sk = sock->sk;
2802	int res;
2803
2804	if (how != SHUT_RDWR)
2805		return -EINVAL;
2806
2807	lock_sock(sk);
2808
2809	trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2810	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2811	sk->sk_shutdown = SHUTDOWN_MASK;
2812
2813	if (sk->sk_state == TIPC_DISCONNECTING) {
2814		/* Discard any unreceived messages */
2815		__skb_queue_purge(&sk->sk_receive_queue);
2816
 
 
2817		res = 0;
2818	} else {
2819		res = -ENOTCONN;
2820	}
2821	/* Wake up anyone sleeping in poll. */
2822	sk->sk_state_change(sk);
2823
2824	release_sock(sk);
2825	return res;
2826}
2827
2828static void tipc_sk_check_probing_state(struct sock *sk,
2829					struct sk_buff_head *list)
2830{
2831	struct tipc_sock *tsk = tipc_sk(sk);
2832	u32 pnode = tsk_peer_node(tsk);
2833	u32 pport = tsk_peer_port(tsk);
2834	u32 self = tsk_own_node(tsk);
2835	u32 oport = tsk->portid;
2836	struct sk_buff *skb;
2837
2838	if (tsk->probe_unacked) {
2839		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2840		sk->sk_err = ECONNABORTED;
2841		tipc_node_remove_conn(sock_net(sk), pnode, pport);
2842		sk->sk_state_change(sk);
2843		return;
2844	}
2845	/* Prepare new probe */
2846	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2847			      pnode, self, pport, oport, TIPC_OK);
2848	if (skb)
2849		__skb_queue_tail(list, skb);
2850	tsk->probe_unacked = true;
2851	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2852}
2853
2854static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2855{
2856	struct tipc_sock *tsk = tipc_sk(sk);
2857
2858	/* Try again later if dest link is congested */
2859	if (tsk->cong_link_cnt) {
2860		sk_reset_timer(sk, &sk->sk_timer,
2861			       jiffies + msecs_to_jiffies(100));
2862		return;
2863	}
2864	/* Prepare SYN for retransmit */
2865	tipc_msg_skb_clone(&sk->sk_write_queue, list);
2866}
2867
2868static void tipc_sk_timeout(struct timer_list *t)
2869{
2870	struct sock *sk = from_timer(sk, t, sk_timer);
2871	struct tipc_sock *tsk = tipc_sk(sk);
2872	u32 pnode = tsk_peer_node(tsk);
2873	struct sk_buff_head list;
2874	int rc = 0;
2875
2876	__skb_queue_head_init(&list);
2877	bh_lock_sock(sk);
2878
2879	/* Try again later if socket is busy */
2880	if (sock_owned_by_user(sk)) {
2881		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2882		bh_unlock_sock(sk);
2883		sock_put(sk);
2884		return;
2885	}
2886
2887	if (sk->sk_state == TIPC_ESTABLISHED)
2888		tipc_sk_check_probing_state(sk, &list);
2889	else if (sk->sk_state == TIPC_CONNECTING)
2890		tipc_sk_retry_connect(sk, &list);
2891
2892	bh_unlock_sock(sk);
2893
2894	if (!skb_queue_empty(&list))
2895		rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2896
2897	/* SYN messages may cause link congestion */
2898	if (rc == -ELINKCONG) {
2899		tipc_dest_push(&tsk->cong_links, pnode, 0);
2900		tsk->cong_link_cnt = 1;
2901	}
2902	sock_put(sk);
2903}
2904
2905static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
 
2906{
2907	struct sock *sk = &tsk->sk;
2908	struct net *net = sock_net(sk);
2909	struct tipc_socket_addr skaddr;
2910	struct publication *p;
2911	u32 key;
2912
 
 
 
2913	if (tipc_sk_connected(sk))
2914		return -EINVAL;
2915	key = tsk->portid + tsk->pub_count + 1;
2916	if (key == tsk->portid)
2917		return -EADDRINUSE;
2918	skaddr.ref = tsk->portid;
2919	skaddr.node = tipc_own_addr(net);
2920	p = tipc_nametbl_publish(net, ua, &skaddr, key);
2921	if (unlikely(!p))
2922		return -EINVAL;
2923
2924	list_add(&p->binding_sock, &tsk->publications);
2925	tsk->pub_count++;
2926	tsk->published = true;
2927	return 0;
2928}
2929
2930static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
 
2931{
2932	struct net *net = sock_net(&tsk->sk);
2933	struct publication *safe, *p;
2934	struct tipc_uaddr _ua;
2935	int rc = -EINVAL;
2936
2937	list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
2938		if (!ua) {
2939			tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
2940				   p->sr.type, p->sr.lower, p->sr.upper);
2941			tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
2942			continue;
 
 
 
 
 
 
 
 
 
 
 
2943		}
2944		/* Unbind specific publication */
2945		if (p->scope != ua->scope)
2946			continue;
2947		if (p->sr.type != ua->sr.type)
2948			continue;
2949		if (p->sr.lower != ua->sr.lower)
2950			continue;
2951		if (p->sr.upper != ua->sr.upper)
2952			break;
2953		tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
2954		rc = 0;
2955		break;
2956	}
2957	if (list_empty(&tsk->publications)) {
2958		tsk->published = 0;
2959		rc = 0;
2960	}
2961	return rc;
2962}
2963
2964/* tipc_sk_reinit: set non-zero address in all existing sockets
2965 *                 when we go from standalone to network mode.
2966 */
2967void tipc_sk_reinit(struct net *net)
2968{
2969	struct tipc_net *tn = net_generic(net, tipc_net_id);
2970	struct rhashtable_iter iter;
2971	struct tipc_sock *tsk;
2972	struct tipc_msg *msg;
2973
2974	rhashtable_walk_enter(&tn->sk_rht, &iter);
2975
2976	do {
2977		rhashtable_walk_start(&iter);
2978
2979		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2980			sock_hold(&tsk->sk);
2981			rhashtable_walk_stop(&iter);
2982			lock_sock(&tsk->sk);
2983			msg = &tsk->phdr;
2984			msg_set_prevnode(msg, tipc_own_addr(net));
2985			msg_set_orignode(msg, tipc_own_addr(net));
2986			release_sock(&tsk->sk);
2987			rhashtable_walk_start(&iter);
2988			sock_put(&tsk->sk);
2989		}
2990
2991		rhashtable_walk_stop(&iter);
2992	} while (tsk == ERR_PTR(-EAGAIN));
2993
2994	rhashtable_walk_exit(&iter);
2995}
2996
2997static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2998{
2999	struct tipc_net *tn = net_generic(net, tipc_net_id);
3000	struct tipc_sock *tsk;
3001
3002	rcu_read_lock();
3003	tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
3004	if (tsk)
3005		sock_hold(&tsk->sk);
3006	rcu_read_unlock();
3007
3008	return tsk;
3009}
3010
3011static int tipc_sk_insert(struct tipc_sock *tsk)
3012{
3013	struct sock *sk = &tsk->sk;
3014	struct net *net = sock_net(sk);
3015	struct tipc_net *tn = net_generic(net, tipc_net_id);
3016	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
3017	u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
3018
3019	while (remaining--) {
3020		portid++;
3021		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
3022			portid = TIPC_MIN_PORT;
3023		tsk->portid = portid;
3024		sock_hold(&tsk->sk);
3025		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3026						   tsk_rht_params))
3027			return 0;
3028		sock_put(&tsk->sk);
3029	}
3030
3031	return -1;
3032}
3033
3034static void tipc_sk_remove(struct tipc_sock *tsk)
3035{
3036	struct sock *sk = &tsk->sk;
3037	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
3038
3039	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3040		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
3041		__sock_put(sk);
3042	}
3043}
3044
3045static const struct rhashtable_params tsk_rht_params = {
3046	.nelem_hint = 192,
3047	.head_offset = offsetof(struct tipc_sock, node),
3048	.key_offset = offsetof(struct tipc_sock, portid),
3049	.key_len = sizeof(u32), /* portid */
3050	.max_size = 1048576,
3051	.min_size = 256,
3052	.automatic_shrinking = true,
3053};
3054
3055int tipc_sk_rht_init(struct net *net)
3056{
3057	struct tipc_net *tn = net_generic(net, tipc_net_id);
3058
3059	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
3060}
3061
3062void tipc_sk_rht_destroy(struct net *net)
3063{
3064	struct tipc_net *tn = net_generic(net, tipc_net_id);
3065
3066	/* Wait for socket readers to complete */
3067	synchronize_net();
3068
3069	rhashtable_destroy(&tn->sk_rht);
3070}
3071
3072static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3073{
3074	struct net *net = sock_net(&tsk->sk);
3075	struct tipc_group *grp = tsk->group;
3076	struct tipc_msg *hdr = &tsk->phdr;
3077	struct tipc_uaddr ua;
3078	int rc;
3079
3080	if (mreq->type < TIPC_RESERVED_TYPES)
3081		return -EACCES;
3082	if (mreq->scope > TIPC_NODE_SCOPE)
3083		return -EINVAL;
3084	if (mreq->scope != TIPC_NODE_SCOPE)
3085		mreq->scope = TIPC_CLUSTER_SCOPE;
3086	if (grp)
3087		return -EACCES;
3088	grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3089	if (!grp)
3090		return -ENOMEM;
3091	tsk->group = grp;
3092	msg_set_lookup_scope(hdr, mreq->scope);
3093	msg_set_nametype(hdr, mreq->type);
3094	msg_set_dest_droppable(hdr, true);
3095	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
3096		   mreq->type, mreq->instance, mreq->instance);
3097	tipc_nametbl_build_group(net, grp, &ua);
3098	rc = tipc_sk_publish(tsk, &ua);
 
3099	if (rc) {
3100		tipc_group_delete(net, grp);
3101		tsk->group = NULL;
3102		return rc;
3103	}
3104	/* Eliminate any risk that a broadcast overtakes sent JOINs */
3105	tsk->mc_method.rcast = true;
3106	tsk->mc_method.mandatory = true;
3107	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3108	return rc;
3109}
3110
3111static int tipc_sk_leave(struct tipc_sock *tsk)
3112{
3113	struct net *net = sock_net(&tsk->sk);
3114	struct tipc_group *grp = tsk->group;
3115	struct tipc_uaddr ua;
3116	int scope;
3117
3118	if (!grp)
3119		return -EINVAL;
3120	ua.addrtype = TIPC_SERVICE_RANGE;
3121	tipc_group_self(grp, &ua.sr, &scope);
3122	ua.scope = scope;
3123	tipc_group_delete(net, grp);
3124	tsk->group = NULL;
3125	tipc_sk_withdraw(tsk, &ua);
3126	return 0;
3127}
3128
3129/**
3130 * tipc_setsockopt - set socket option
3131 * @sock: socket structure
3132 * @lvl: option level
3133 * @opt: option identifier
3134 * @ov: pointer to new option value
3135 * @ol: length of option value
3136 *
3137 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
3138 * (to ease compatibility).
3139 *
3140 * Return: 0 on success, errno otherwise
3141 */
3142static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
3143			   sockptr_t ov, unsigned int ol)
3144{
3145	struct sock *sk = sock->sk;
3146	struct tipc_sock *tsk = tipc_sk(sk);
3147	struct tipc_group_req mreq;
3148	u32 value = 0;
3149	int res = 0;
3150
3151	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3152		return 0;
3153	if (lvl != SOL_TIPC)
3154		return -ENOPROTOOPT;
3155
3156	switch (opt) {
3157	case TIPC_IMPORTANCE:
3158	case TIPC_SRC_DROPPABLE:
3159	case TIPC_DEST_DROPPABLE:
3160	case TIPC_CONN_TIMEOUT:
3161	case TIPC_NODELAY:
3162		if (ol < sizeof(value))
3163			return -EINVAL;
3164		if (copy_from_sockptr(&value, ov, sizeof(u32)))
3165			return -EFAULT;
3166		break;
3167	case TIPC_GROUP_JOIN:
3168		if (ol < sizeof(mreq))
3169			return -EINVAL;
3170		if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
3171			return -EFAULT;
3172		break;
3173	default:
3174		if (!sockptr_is_null(ov) || ol)
3175			return -EINVAL;
3176	}
3177
3178	lock_sock(sk);
3179
3180	switch (opt) {
3181	case TIPC_IMPORTANCE:
3182		res = tsk_set_importance(sk, value);
3183		break;
3184	case TIPC_SRC_DROPPABLE:
3185		if (sock->type != SOCK_STREAM)
3186			tsk_set_unreliable(tsk, value);
3187		else
3188			res = -ENOPROTOOPT;
3189		break;
3190	case TIPC_DEST_DROPPABLE:
3191		tsk_set_unreturnable(tsk, value);
3192		break;
3193	case TIPC_CONN_TIMEOUT:
3194		tipc_sk(sk)->conn_timeout = value;
3195		break;
3196	case TIPC_MCAST_BROADCAST:
3197		tsk->mc_method.rcast = false;
3198		tsk->mc_method.mandatory = true;
3199		break;
3200	case TIPC_MCAST_REPLICAST:
3201		tsk->mc_method.rcast = true;
3202		tsk->mc_method.mandatory = true;
3203		break;
3204	case TIPC_GROUP_JOIN:
3205		res = tipc_sk_join(tsk, &mreq);
3206		break;
3207	case TIPC_GROUP_LEAVE:
3208		res = tipc_sk_leave(tsk);
3209		break;
3210	case TIPC_NODELAY:
3211		tsk->nodelay = !!value;
3212		tsk_set_nagle(tsk);
3213		break;
3214	default:
3215		res = -EINVAL;
3216	}
3217
3218	release_sock(sk);
3219
3220	return res;
3221}
3222
3223/**
3224 * tipc_getsockopt - get socket option
3225 * @sock: socket structure
3226 * @lvl: option level
3227 * @opt: option identifier
3228 * @ov: receptacle for option value
3229 * @ol: receptacle for length of option value
3230 *
3231 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3232 * (to ease compatibility).
3233 *
3234 * Return: 0 on success, errno otherwise
3235 */
3236static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3237			   char __user *ov, int __user *ol)
3238{
3239	struct sock *sk = sock->sk;
3240	struct tipc_sock *tsk = tipc_sk(sk);
3241	struct tipc_service_range seq;
3242	int len, scope;
3243	u32 value;
3244	int res;
3245
3246	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3247		return put_user(0, ol);
3248	if (lvl != SOL_TIPC)
3249		return -ENOPROTOOPT;
3250	res = get_user(len, ol);
3251	if (res)
3252		return res;
3253
3254	lock_sock(sk);
3255
3256	switch (opt) {
3257	case TIPC_IMPORTANCE:
3258		value = tsk_importance(tsk);
3259		break;
3260	case TIPC_SRC_DROPPABLE:
3261		value = tsk_unreliable(tsk);
3262		break;
3263	case TIPC_DEST_DROPPABLE:
3264		value = tsk_unreturnable(tsk);
3265		break;
3266	case TIPC_CONN_TIMEOUT:
3267		value = tsk->conn_timeout;
3268		/* no need to set "res", since already 0 at this point */
3269		break;
3270	case TIPC_NODE_RECVQ_DEPTH:
3271		value = 0; /* was tipc_queue_size, now obsolete */
3272		break;
3273	case TIPC_SOCK_RECVQ_DEPTH:
3274		value = skb_queue_len(&sk->sk_receive_queue);
3275		break;
3276	case TIPC_SOCK_RECVQ_USED:
3277		value = sk_rmem_alloc_get(sk);
3278		break;
3279	case TIPC_GROUP_JOIN:
3280		seq.type = 0;
3281		if (tsk->group)
3282			tipc_group_self(tsk->group, &seq, &scope);
3283		value = seq.type;
3284		break;
3285	default:
3286		res = -EINVAL;
3287	}
3288
3289	release_sock(sk);
3290
3291	if (res)
3292		return res;	/* "get" failed */
3293
3294	if (len < sizeof(value))
3295		return -EINVAL;
3296
3297	if (copy_to_user(ov, &value, sizeof(value)))
3298		return -EFAULT;
3299
3300	return put_user(sizeof(value), ol);
3301}
3302
3303static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3304{
3305	struct net *net = sock_net(sock->sk);
3306	struct tipc_sioc_nodeid_req nr = {0};
3307	struct tipc_sioc_ln_req lnr;
3308	void __user *argp = (void __user *)arg;
3309
3310	switch (cmd) {
3311	case SIOCGETLINKNAME:
3312		if (copy_from_user(&lnr, argp, sizeof(lnr)))
3313			return -EFAULT;
3314		if (!tipc_node_get_linkname(net,
3315					    lnr.bearer_id & 0xffff, lnr.peer,
3316					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
3317			if (copy_to_user(argp, &lnr, sizeof(lnr)))
3318				return -EFAULT;
3319			return 0;
3320		}
3321		return -EADDRNOTAVAIL;
3322	case SIOCGETNODEID:
3323		if (copy_from_user(&nr, argp, sizeof(nr)))
3324			return -EFAULT;
3325		if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3326			return -EADDRNOTAVAIL;
3327		if (copy_to_user(argp, &nr, sizeof(nr)))
3328			return -EFAULT;
3329		return 0;
3330	default:
3331		return -ENOIOCTLCMD;
3332	}
3333}
3334
3335static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3336{
3337	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3338	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3339	u32 onode = tipc_own_addr(sock_net(sock1->sk));
3340
3341	tsk1->peer.family = AF_TIPC;
3342	tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
3343	tsk1->peer.scope = TIPC_NODE_SCOPE;
3344	tsk1->peer.addr.id.ref = tsk2->portid;
3345	tsk1->peer.addr.id.node = onode;
3346	tsk2->peer.family = AF_TIPC;
3347	tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
3348	tsk2->peer.scope = TIPC_NODE_SCOPE;
3349	tsk2->peer.addr.id.ref = tsk1->portid;
3350	tsk2->peer.addr.id.node = onode;
3351
3352	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3353	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3354	return 0;
3355}
3356
3357/* Protocol switches for the various types of TIPC sockets */
3358
3359static const struct proto_ops msg_ops = {
3360	.owner		= THIS_MODULE,
3361	.family		= AF_TIPC,
3362	.release	= tipc_release,
3363	.bind		= tipc_bind,
3364	.connect	= tipc_connect,
3365	.socketpair	= tipc_socketpair,
3366	.accept		= sock_no_accept,
3367	.getname	= tipc_getname,
3368	.poll		= tipc_poll,
3369	.ioctl		= tipc_ioctl,
3370	.listen		= sock_no_listen,
3371	.shutdown	= tipc_shutdown,
3372	.setsockopt	= tipc_setsockopt,
3373	.getsockopt	= tipc_getsockopt,
3374	.sendmsg	= tipc_sendmsg,
3375	.recvmsg	= tipc_recvmsg,
3376	.mmap		= sock_no_mmap,
 
3377};
3378
3379static const struct proto_ops packet_ops = {
3380	.owner		= THIS_MODULE,
3381	.family		= AF_TIPC,
3382	.release	= tipc_release,
3383	.bind		= tipc_bind,
3384	.connect	= tipc_connect,
3385	.socketpair	= tipc_socketpair,
3386	.accept		= tipc_accept,
3387	.getname	= tipc_getname,
3388	.poll		= tipc_poll,
3389	.ioctl		= tipc_ioctl,
3390	.listen		= tipc_listen,
3391	.shutdown	= tipc_shutdown,
3392	.setsockopt	= tipc_setsockopt,
3393	.getsockopt	= tipc_getsockopt,
3394	.sendmsg	= tipc_send_packet,
3395	.recvmsg	= tipc_recvmsg,
3396	.mmap		= sock_no_mmap,
 
3397};
3398
3399static const struct proto_ops stream_ops = {
3400	.owner		= THIS_MODULE,
3401	.family		= AF_TIPC,
3402	.release	= tipc_release,
3403	.bind		= tipc_bind,
3404	.connect	= tipc_connect,
3405	.socketpair	= tipc_socketpair,
3406	.accept		= tipc_accept,
3407	.getname	= tipc_getname,
3408	.poll		= tipc_poll,
3409	.ioctl		= tipc_ioctl,
3410	.listen		= tipc_listen,
3411	.shutdown	= tipc_shutdown,
3412	.setsockopt	= tipc_setsockopt,
3413	.getsockopt	= tipc_getsockopt,
3414	.sendmsg	= tipc_sendstream,
3415	.recvmsg	= tipc_recvstream,
3416	.mmap		= sock_no_mmap,
 
3417};
3418
3419static const struct net_proto_family tipc_family_ops = {
3420	.owner		= THIS_MODULE,
3421	.family		= AF_TIPC,
3422	.create		= tipc_sk_create
3423};
3424
3425static struct proto tipc_proto = {
3426	.name		= "TIPC",
3427	.owner		= THIS_MODULE,
3428	.obj_size	= sizeof(struct tipc_sock),
3429	.sysctl_rmem	= sysctl_tipc_rmem
3430};
3431
3432/**
3433 * tipc_socket_init - initialize TIPC socket interface
3434 *
3435 * Return: 0 on success, errno otherwise
3436 */
3437int tipc_socket_init(void)
3438{
3439	int res;
3440
3441	res = proto_register(&tipc_proto, 1);
3442	if (res) {
3443		pr_err("Failed to register TIPC protocol type\n");
3444		goto out;
3445	}
3446
3447	res = sock_register(&tipc_family_ops);
3448	if (res) {
3449		pr_err("Failed to register TIPC socket type\n");
3450		proto_unregister(&tipc_proto);
3451		goto out;
3452	}
3453 out:
3454	return res;
3455}
3456
3457/**
3458 * tipc_socket_stop - stop TIPC socket interface
3459 */
3460void tipc_socket_stop(void)
3461{
3462	sock_unregister(tipc_family_ops.family);
3463	proto_unregister(&tipc_proto);
3464}
3465
3466/* Caller should hold socket lock for the passed tipc socket. */
3467static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3468{
3469	u32 peer_node, peer_port;
3470	u32 conn_type, conn_instance;
3471	struct nlattr *nest;
3472
3473	peer_node = tsk_peer_node(tsk);
3474	peer_port = tsk_peer_port(tsk);
3475	conn_type = msg_nametype(&tsk->phdr);
3476	conn_instance = msg_nameinst(&tsk->phdr);
3477	nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3478	if (!nest)
3479		return -EMSGSIZE;
3480
3481	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3482		goto msg_full;
3483	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3484		goto msg_full;
3485
3486	if (tsk->conn_addrtype != 0) {
3487		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3488			goto msg_full;
3489		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
3490			goto msg_full;
3491		if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
3492			goto msg_full;
3493	}
3494	nla_nest_end(skb, nest);
3495
3496	return 0;
3497
3498msg_full:
3499	nla_nest_cancel(skb, nest);
3500
3501	return -EMSGSIZE;
3502}
3503
3504static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3505			  *tsk)
3506{
3507	struct net *net = sock_net(skb->sk);
3508	struct sock *sk = &tsk->sk;
3509
3510	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3511	    nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3512		return -EMSGSIZE;
3513
3514	if (tipc_sk_connected(sk)) {
3515		if (__tipc_nl_add_sk_con(skb, tsk))
3516			return -EMSGSIZE;
3517	} else if (!list_empty(&tsk->publications)) {
3518		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3519			return -EMSGSIZE;
3520	}
3521	return 0;
3522}
3523
3524/* Caller should hold socket lock for the passed tipc socket. */
3525static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3526			    struct tipc_sock *tsk)
3527{
3528	struct nlattr *attrs;
3529	void *hdr;
3530
3531	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3532			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3533	if (!hdr)
3534		goto msg_cancel;
3535
3536	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3537	if (!attrs)
3538		goto genlmsg_cancel;
3539
3540	if (__tipc_nl_add_sk_info(skb, tsk))
3541		goto attr_msg_cancel;
3542
3543	nla_nest_end(skb, attrs);
3544	genlmsg_end(skb, hdr);
3545
3546	return 0;
3547
3548attr_msg_cancel:
3549	nla_nest_cancel(skb, attrs);
3550genlmsg_cancel:
3551	genlmsg_cancel(skb, hdr);
3552msg_cancel:
3553	return -EMSGSIZE;
3554}
3555
3556int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3557		    int (*skb_handler)(struct sk_buff *skb,
3558				       struct netlink_callback *cb,
3559				       struct tipc_sock *tsk))
3560{
3561	struct rhashtable_iter *iter = (void *)cb->args[4];
3562	struct tipc_sock *tsk;
3563	int err;
3564
3565	rhashtable_walk_start(iter);
3566	while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3567		if (IS_ERR(tsk)) {
3568			err = PTR_ERR(tsk);
3569			if (err == -EAGAIN) {
3570				err = 0;
3571				continue;
3572			}
3573			break;
3574		}
3575
3576		sock_hold(&tsk->sk);
3577		rhashtable_walk_stop(iter);
3578		lock_sock(&tsk->sk);
3579		err = skb_handler(skb, cb, tsk);
3580		if (err) {
3581			release_sock(&tsk->sk);
3582			sock_put(&tsk->sk);
3583			goto out;
3584		}
3585		release_sock(&tsk->sk);
3586		rhashtable_walk_start(iter);
3587		sock_put(&tsk->sk);
3588	}
3589	rhashtable_walk_stop(iter);
3590out:
3591	return skb->len;
3592}
3593EXPORT_SYMBOL(tipc_nl_sk_walk);
3594
3595int tipc_dump_start(struct netlink_callback *cb)
3596{
3597	return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3598}
3599EXPORT_SYMBOL(tipc_dump_start);
3600
3601int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3602{
3603	/* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3604	struct rhashtable_iter *iter = (void *)cb->args[4];
3605	struct tipc_net *tn = tipc_net(net);
3606
3607	if (!iter) {
3608		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3609		if (!iter)
3610			return -ENOMEM;
3611
3612		cb->args[4] = (long)iter;
3613	}
3614
3615	rhashtable_walk_enter(&tn->sk_rht, iter);
3616	return 0;
3617}
3618
3619int tipc_dump_done(struct netlink_callback *cb)
3620{
3621	struct rhashtable_iter *hti = (void *)cb->args[4];
3622
3623	rhashtable_walk_exit(hti);
3624	kfree(hti);
3625	return 0;
3626}
3627EXPORT_SYMBOL(tipc_dump_done);
3628
3629int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3630			   struct tipc_sock *tsk, u32 sk_filter_state,
3631			   u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3632{
3633	struct sock *sk = &tsk->sk;
3634	struct nlattr *attrs;
3635	struct nlattr *stat;
3636
3637	/*filter response w.r.t sk_state*/
3638	if (!(sk_filter_state & (1 << sk->sk_state)))
3639		return 0;
3640
3641	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3642	if (!attrs)
3643		goto msg_cancel;
3644
3645	if (__tipc_nl_add_sk_info(skb, tsk))
3646		goto attr_msg_cancel;
3647
3648	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3649	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3650	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3651	    nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3652			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3653					 sock_i_uid(sk))) ||
3654	    nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3655			      tipc_diag_gen_cookie(sk),
3656			      TIPC_NLA_SOCK_PAD))
3657		goto attr_msg_cancel;
3658
3659	stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3660	if (!stat)
3661		goto attr_msg_cancel;
3662
3663	if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3664			skb_queue_len(&sk->sk_receive_queue)) ||
3665	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3666			skb_queue_len(&sk->sk_write_queue)) ||
3667	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3668			atomic_read(&sk->sk_drops)))
3669		goto stat_msg_cancel;
3670
3671	if (tsk->cong_link_cnt &&
3672	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3673		goto stat_msg_cancel;
3674
3675	if (tsk_conn_cong(tsk) &&
3676	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3677		goto stat_msg_cancel;
3678
3679	nla_nest_end(skb, stat);
3680
3681	if (tsk->group)
3682		if (tipc_group_fill_sock_diag(tsk->group, skb))
3683			goto stat_msg_cancel;
3684
3685	nla_nest_end(skb, attrs);
3686
3687	return 0;
3688
3689stat_msg_cancel:
3690	nla_nest_cancel(skb, stat);
3691attr_msg_cancel:
3692	nla_nest_cancel(skb, attrs);
3693msg_cancel:
3694	return -EMSGSIZE;
3695}
3696EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3697
3698int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3699{
3700	return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3701}
3702
3703/* Caller should hold socket lock for the passed tipc socket. */
3704static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3705				 struct netlink_callback *cb,
3706				 struct publication *publ)
3707{
3708	void *hdr;
3709	struct nlattr *attrs;
3710
3711	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3712			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3713	if (!hdr)
3714		goto msg_cancel;
3715
3716	attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3717	if (!attrs)
3718		goto genlmsg_cancel;
3719
3720	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3721		goto attr_msg_cancel;
3722	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
3723		goto attr_msg_cancel;
3724	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
3725		goto attr_msg_cancel;
3726	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
3727		goto attr_msg_cancel;
3728
3729	nla_nest_end(skb, attrs);
3730	genlmsg_end(skb, hdr);
3731
3732	return 0;
3733
3734attr_msg_cancel:
3735	nla_nest_cancel(skb, attrs);
3736genlmsg_cancel:
3737	genlmsg_cancel(skb, hdr);
3738msg_cancel:
3739	return -EMSGSIZE;
3740}
3741
3742/* Caller should hold socket lock for the passed tipc socket. */
3743static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3744				  struct netlink_callback *cb,
3745				  struct tipc_sock *tsk, u32 *last_publ)
3746{
3747	int err;
3748	struct publication *p;
3749
3750	if (*last_publ) {
3751		list_for_each_entry(p, &tsk->publications, binding_sock) {
3752			if (p->key == *last_publ)
3753				break;
3754		}
3755		if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3756			/* We never set seq or call nl_dump_check_consistent()
3757			 * this means that setting prev_seq here will cause the
3758			 * consistence check to fail in the netlink callback
3759			 * handler. Resulting in the last NLMSG_DONE message
3760			 * having the NLM_F_DUMP_INTR flag set.
3761			 */
3762			cb->prev_seq = 1;
3763			*last_publ = 0;
3764			return -EPIPE;
3765		}
3766	} else {
3767		p = list_first_entry(&tsk->publications, struct publication,
3768				     binding_sock);
3769	}
3770
3771	list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3772		err = __tipc_nl_add_sk_publ(skb, cb, p);
3773		if (err) {
3774			*last_publ = p->key;
3775			return err;
3776		}
3777	}
3778	*last_publ = 0;
3779
3780	return 0;
3781}
3782
3783int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3784{
3785	int err;
3786	u32 tsk_portid = cb->args[0];
3787	u32 last_publ = cb->args[1];
3788	u32 done = cb->args[2];
3789	struct net *net = sock_net(skb->sk);
3790	struct tipc_sock *tsk;
3791
3792	if (!tsk_portid) {
3793		struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
3794		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3795
 
 
 
 
3796		if (!attrs[TIPC_NLA_SOCK])
3797			return -EINVAL;
3798
3799		err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3800						  attrs[TIPC_NLA_SOCK],
3801						  tipc_nl_sock_policy, NULL);
3802		if (err)
3803			return err;
3804
3805		if (!sock[TIPC_NLA_SOCK_REF])
3806			return -EINVAL;
3807
3808		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3809	}
3810
3811	if (done)
3812		return 0;
3813
3814	tsk = tipc_sk_lookup(net, tsk_portid);
3815	if (!tsk)
3816		return -EINVAL;
3817
3818	lock_sock(&tsk->sk);
3819	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3820	if (!err)
3821		done = 1;
3822	release_sock(&tsk->sk);
3823	sock_put(&tsk->sk);
3824
3825	cb->args[0] = tsk_portid;
3826	cb->args[1] = last_publ;
3827	cb->args[2] = done;
3828
3829	return skb->len;
3830}
3831
3832/**
3833 * tipc_sk_filtering - check if a socket should be traced
3834 * @sk: the socket to be examined
 
 
3835 *
3836 * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
3837 * (portid, sock type, name type, name lower, name upper)
3838 *
3839 * Return: true if the socket meets the socket tuple data
3840 * (value 0 = 'any') or when there is no tuple set (all = 0),
3841 * otherwise false
3842 */
3843bool tipc_sk_filtering(struct sock *sk)
3844{
3845	struct tipc_sock *tsk;
3846	struct publication *p;
3847	u32 _port, _sktype, _type, _lower, _upper;
3848	u32 type = 0, lower = 0, upper = 0;
3849
3850	if (!sk)
3851		return true;
3852
3853	tsk = tipc_sk(sk);
3854
3855	_port = sysctl_tipc_sk_filter[0];
3856	_sktype = sysctl_tipc_sk_filter[1];
3857	_type = sysctl_tipc_sk_filter[2];
3858	_lower = sysctl_tipc_sk_filter[3];
3859	_upper = sysctl_tipc_sk_filter[4];
3860
3861	if (!_port && !_sktype && !_type && !_lower && !_upper)
3862		return true;
3863
3864	if (_port)
3865		return (_port == tsk->portid);
3866
3867	if (_sktype && _sktype != sk->sk_type)
3868		return false;
3869
3870	if (tsk->published) {
3871		p = list_first_entry_or_null(&tsk->publications,
3872					     struct publication, binding_sock);
3873		if (p) {
3874			type = p->sr.type;
3875			lower = p->sr.lower;
3876			upper = p->sr.upper;
3877		}
3878	}
3879
3880	if (!tipc_sk_type_connectionless(sk)) {
3881		type = msg_nametype(&tsk->phdr);
3882		lower = msg_nameinst(&tsk->phdr);
3883		upper = lower;
3884	}
3885
3886	if ((_type && _type != type) || (_lower && _lower != lower) ||
3887	    (_upper && _upper != upper))
3888		return false;
3889
3890	return true;
3891}
3892
3893u32 tipc_sock_get_portid(struct sock *sk)
3894{
3895	return (sk) ? (tipc_sk(sk))->portid : 0;
3896}
3897
3898/**
3899 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3900 *			both the rcv and backlog queues are considered
3901 * @sk: tipc sk to be checked
3902 * @skb: tipc msg to be checked
3903 *
3904 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3905 */
3906
3907bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3908{
3909	atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3910	unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3911	unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3912
3913	return (qsize > lim * 90 / 100);
3914}
3915
3916/**
3917 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3918 *			only the rcv queue is considered
3919 * @sk: tipc sk to be checked
3920 * @skb: tipc msg to be checked
3921 *
3922 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3923 */
3924
3925bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3926{
3927	unsigned int lim = rcvbuf_limit(sk, skb);
3928	unsigned int qsize = sk_rmem_alloc_get(sk);
3929
3930	return (qsize > lim * 90 / 100);
3931}
3932
3933/**
3934 * tipc_sk_dump - dump TIPC socket
3935 * @sk: tipc sk to be dumped
3936 * @dqueues: bitmask to decide if any socket queue to be dumped?
3937 *           - TIPC_DUMP_NONE: don't dump socket queues
3938 *           - TIPC_DUMP_SK_SNDQ: dump socket send queue
3939 *           - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3940 *           - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3941 *           - TIPC_DUMP_ALL: dump all the socket queues above
3942 * @buf: returned buffer of dump data in format
3943 */
3944int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3945{
3946	int i = 0;
3947	size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3948	u32 conn_type, conn_instance;
3949	struct tipc_sock *tsk;
3950	struct publication *p;
3951	bool tsk_connected;
3952
3953	if (!sk) {
3954		i += scnprintf(buf, sz, "sk data: (null)\n");
3955		return i;
3956	}
3957
3958	tsk = tipc_sk(sk);
3959	tsk_connected = !tipc_sk_type_connectionless(sk);
3960
3961	i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3962	i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3963	i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3964	i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3965	i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3966	if (tsk_connected) {
3967		i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3968		i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3969		conn_type = msg_nametype(&tsk->phdr);
3970		conn_instance = msg_nameinst(&tsk->phdr);
3971		i += scnprintf(buf + i, sz - i, " %u", conn_type);
3972		i += scnprintf(buf + i, sz - i, " %u", conn_instance);
3973	}
3974	i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3975	if (tsk->published) {
3976		p = list_first_entry_or_null(&tsk->publications,
3977					     struct publication, binding_sock);
3978		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
3979		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
3980		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
3981	}
3982	i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3983	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3984	i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3985	i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3986	i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3987	i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3988	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3989	i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3990	i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3991	i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3992	i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3993	i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3994	i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3995	i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3996
3997	if (dqueues & TIPC_DUMP_SK_SNDQ) {
3998		i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3999		i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
4000	}
4001
4002	if (dqueues & TIPC_DUMP_SK_RCVQ) {
4003		i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
4004		i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
4005	}
4006
4007	if (dqueues & TIPC_DUMP_SK_BKLGQ) {
4008		i += scnprintf(buf + i, sz - i, "sk_backlog:\n  head ");
4009		i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
4010		if (sk->sk_backlog.tail != sk->sk_backlog.head) {
4011			i += scnprintf(buf + i, sz - i, "  tail ");
4012			i += tipc_skb_dump(sk->sk_backlog.tail, false,
4013					   buf + i);
4014		}
4015	}
4016
4017	return i;
4018}