Loading...
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * Copyright (c) 2020-2021, Red Hat Inc
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/rhashtable.h>
39#include <linux/sched/signal.h>
40
41#include "core.h"
42#include "name_table.h"
43#include "node.h"
44#include "link.h"
45#include "name_distr.h"
46#include "socket.h"
47#include "bcast.h"
48#include "netlink.h"
49#include "group.h"
50#include "trace.h"
51
52#define NAGLE_START_INIT 4
53#define NAGLE_START_MAX 1024
54#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
55#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
56#define TIPC_MAX_PORT 0xffffffff
57#define TIPC_MIN_PORT 1
58#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
59
60enum {
61 TIPC_LISTEN = TCP_LISTEN,
62 TIPC_ESTABLISHED = TCP_ESTABLISHED,
63 TIPC_OPEN = TCP_CLOSE,
64 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
65 TIPC_CONNECTING = TCP_SYN_SENT,
66};
67
68struct sockaddr_pair {
69 struct sockaddr_tipc sock;
70 struct sockaddr_tipc member;
71};
72
73/**
74 * struct tipc_sock - TIPC socket structure
75 * @sk: socket - interacts with 'port' and with user via the socket API
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @maxnagle: maximum size of msg which can be subject to nagle
78 * @portid: unique port identity in TIPC socket hash table
79 * @phdr: preformatted message header used when sending messages
80 * @cong_links: list of congested links
81 * @publications: list of publications for port
82 * @blocking_link: address of the congested link we are currently sleeping on
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @probe_unacked: probe has not received ack yet
86 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
87 * @cong_link_cnt: number of congested links
88 * @snt_unacked: # messages sent by socket, and not yet acked by peer
89 * @snd_win: send window size
90 * @peer_caps: peer capabilities mask
91 * @rcv_unacked: # messages read by user, but not yet acked back to peer
92 * @rcv_win: receive window size
93 * @peer: 'connected' peer for dgram/rdm
94 * @node: hash table node
95 * @mc_method: cookie for use between socket and broadcast layer
96 * @rcu: rcu struct for tipc_sock
97 * @group: TIPC communications group
98 * @oneway: message count in one direction (FIXME)
99 * @nagle_start: current nagle value
100 * @snd_backlog: send backlog count
101 * @msg_acc: messages accepted; used in managing backlog and nagle
102 * @pkt_cnt: TIPC socket packet count
103 * @expect_ack: whether this TIPC socket is expecting an ack
104 * @nodelay: setsockopt() TIPC_NODELAY setting
105 * @group_is_open: TIPC socket group is fully open (FIXME)
106 * @published: true if port has one or more associated names
107 * @conn_addrtype: address type used when establishing connection
108 */
109struct tipc_sock {
110 struct sock sk;
111 u32 max_pkt;
112 u32 maxnagle;
113 u32 portid;
114 struct tipc_msg phdr;
115 struct list_head cong_links;
116 struct list_head publications;
117 u32 pub_count;
118 atomic_t dupl_rcvcnt;
119 u16 conn_timeout;
120 bool probe_unacked;
121 u16 cong_link_cnt;
122 u16 snt_unacked;
123 u16 snd_win;
124 u16 peer_caps;
125 u16 rcv_unacked;
126 u16 rcv_win;
127 struct sockaddr_tipc peer;
128 struct rhash_head node;
129 struct tipc_mc_method mc_method;
130 struct rcu_head rcu;
131 struct tipc_group *group;
132 u32 oneway;
133 u32 nagle_start;
134 u16 snd_backlog;
135 u16 msg_acc;
136 u16 pkt_cnt;
137 bool expect_ack;
138 bool nodelay;
139 bool group_is_open;
140 bool published;
141 u8 conn_addrtype;
142};
143
144static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
145static void tipc_data_ready(struct sock *sk);
146static void tipc_write_space(struct sock *sk);
147static void tipc_sock_destruct(struct sock *sk);
148static int tipc_release(struct socket *sock);
149static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
150 bool kern);
151static void tipc_sk_timeout(struct timer_list *t);
152static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
153static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
154static int tipc_sk_leave(struct tipc_sock *tsk);
155static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
156static int tipc_sk_insert(struct tipc_sock *tsk);
157static void tipc_sk_remove(struct tipc_sock *tsk);
158static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
159static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
160static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
161static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
162
163static const struct proto_ops packet_ops;
164static const struct proto_ops stream_ops;
165static const struct proto_ops msg_ops;
166static struct proto tipc_proto;
167static const struct rhashtable_params tsk_rht_params;
168
169static u32 tsk_own_node(struct tipc_sock *tsk)
170{
171 return msg_prevnode(&tsk->phdr);
172}
173
174static u32 tsk_peer_node(struct tipc_sock *tsk)
175{
176 return msg_destnode(&tsk->phdr);
177}
178
179static u32 tsk_peer_port(struct tipc_sock *tsk)
180{
181 return msg_destport(&tsk->phdr);
182}
183
184static bool tsk_unreliable(struct tipc_sock *tsk)
185{
186 return msg_src_droppable(&tsk->phdr) != 0;
187}
188
189static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
190{
191 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
192}
193
194static bool tsk_unreturnable(struct tipc_sock *tsk)
195{
196 return msg_dest_droppable(&tsk->phdr) != 0;
197}
198
199static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
200{
201 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
202}
203
204static int tsk_importance(struct tipc_sock *tsk)
205{
206 return msg_importance(&tsk->phdr);
207}
208
209static struct tipc_sock *tipc_sk(const struct sock *sk)
210{
211 return container_of(sk, struct tipc_sock, sk);
212}
213
214int tsk_set_importance(struct sock *sk, int imp)
215{
216 if (imp > TIPC_CRITICAL_IMPORTANCE)
217 return -EINVAL;
218 msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
219 return 0;
220}
221
222static bool tsk_conn_cong(struct tipc_sock *tsk)
223{
224 return tsk->snt_unacked > tsk->snd_win;
225}
226
227static u16 tsk_blocks(int len)
228{
229 return ((len / FLOWCTL_BLK_SZ) + 1);
230}
231
232/* tsk_blocks(): translate a buffer size in bytes to number of
233 * advertisable blocks, taking into account the ratio truesize(len)/len
234 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
235 */
236static u16 tsk_adv_blocks(int len)
237{
238 return len / FLOWCTL_BLK_SZ / 4;
239}
240
241/* tsk_inc(): increment counter for sent or received data
242 * - If block based flow control is not supported by peer we
243 * fall back to message based ditto, incrementing the counter
244 */
245static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
246{
247 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
248 return ((msglen / FLOWCTL_BLK_SZ) + 1);
249 return 1;
250}
251
252/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
253 */
254static void tsk_set_nagle(struct tipc_sock *tsk)
255{
256 struct sock *sk = &tsk->sk;
257
258 tsk->maxnagle = 0;
259 if (sk->sk_type != SOCK_STREAM)
260 return;
261 if (tsk->nodelay)
262 return;
263 if (!(tsk->peer_caps & TIPC_NAGLE))
264 return;
265 /* Limit node local buffer size to avoid receive queue overflow */
266 if (tsk->max_pkt == MAX_MSG_SIZE)
267 tsk->maxnagle = 1500;
268 else
269 tsk->maxnagle = tsk->max_pkt;
270}
271
272/**
273 * tsk_advance_rx_queue - discard first buffer in socket receive queue
274 * @sk: network socket
275 *
276 * Caller must hold socket lock
277 */
278static void tsk_advance_rx_queue(struct sock *sk)
279{
280 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
281 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
282}
283
284/* tipc_sk_respond() : send response message back to sender
285 */
286static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
287{
288 u32 selector;
289 u32 dnode;
290 u32 onode = tipc_own_addr(sock_net(sk));
291
292 if (!tipc_msg_reverse(onode, &skb, err))
293 return;
294
295 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
296 dnode = msg_destnode(buf_msg(skb));
297 selector = msg_origport(buf_msg(skb));
298 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
299}
300
301/**
302 * tsk_rej_rx_queue - reject all buffers in socket receive queue
303 * @sk: network socket
304 * @error: response error code
305 *
306 * Caller must hold socket lock
307 */
308static void tsk_rej_rx_queue(struct sock *sk, int error)
309{
310 struct sk_buff *skb;
311
312 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
313 tipc_sk_respond(sk, skb, error);
314}
315
316static bool tipc_sk_connected(struct sock *sk)
317{
318 return sk->sk_state == TIPC_ESTABLISHED;
319}
320
321/* tipc_sk_type_connectionless - check if the socket is datagram socket
322 * @sk: socket
323 *
324 * Returns true if connection less, false otherwise
325 */
326static bool tipc_sk_type_connectionless(struct sock *sk)
327{
328 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
329}
330
331/* tsk_peer_msg - verify if message was sent by connected port's peer
332 *
333 * Handles cases where the node's network address has changed from
334 * the default of <0.0.0> to its configured setting.
335 */
336static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
337{
338 struct sock *sk = &tsk->sk;
339 u32 self = tipc_own_addr(sock_net(sk));
340 u32 peer_port = tsk_peer_port(tsk);
341 u32 orig_node, peer_node;
342
343 if (unlikely(!tipc_sk_connected(sk)))
344 return false;
345
346 if (unlikely(msg_origport(msg) != peer_port))
347 return false;
348
349 orig_node = msg_orignode(msg);
350 peer_node = tsk_peer_node(tsk);
351
352 if (likely(orig_node == peer_node))
353 return true;
354
355 if (!orig_node && peer_node == self)
356 return true;
357
358 if (!peer_node && orig_node == self)
359 return true;
360
361 return false;
362}
363
364/* tipc_set_sk_state - set the sk_state of the socket
365 * @sk: socket
366 *
367 * Caller must hold socket lock
368 *
369 * Returns 0 on success, errno otherwise
370 */
371static int tipc_set_sk_state(struct sock *sk, int state)
372{
373 int oldsk_state = sk->sk_state;
374 int res = -EINVAL;
375
376 switch (state) {
377 case TIPC_OPEN:
378 res = 0;
379 break;
380 case TIPC_LISTEN:
381 case TIPC_CONNECTING:
382 if (oldsk_state == TIPC_OPEN)
383 res = 0;
384 break;
385 case TIPC_ESTABLISHED:
386 if (oldsk_state == TIPC_CONNECTING ||
387 oldsk_state == TIPC_OPEN)
388 res = 0;
389 break;
390 case TIPC_DISCONNECTING:
391 if (oldsk_state == TIPC_CONNECTING ||
392 oldsk_state == TIPC_ESTABLISHED)
393 res = 0;
394 break;
395 }
396
397 if (!res)
398 sk->sk_state = state;
399
400 return res;
401}
402
403static int tipc_sk_sock_err(struct socket *sock, long *timeout)
404{
405 struct sock *sk = sock->sk;
406 int err = sock_error(sk);
407 int typ = sock->type;
408
409 if (err)
410 return err;
411 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
412 if (sk->sk_state == TIPC_DISCONNECTING)
413 return -EPIPE;
414 else if (!tipc_sk_connected(sk))
415 return -ENOTCONN;
416 }
417 if (!*timeout)
418 return -EAGAIN;
419 if (signal_pending(current))
420 return sock_intr_errno(*timeout);
421
422 return 0;
423}
424
425#define tipc_wait_for_cond(sock_, timeo_, condition_) \
426({ \
427 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
428 struct sock *sk_; \
429 int rc_; \
430 \
431 while ((rc_ = !(condition_))) { \
432 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
433 smp_rmb(); \
434 sk_ = (sock_)->sk; \
435 rc_ = tipc_sk_sock_err((sock_), timeo_); \
436 if (rc_) \
437 break; \
438 add_wait_queue(sk_sleep(sk_), &wait_); \
439 release_sock(sk_); \
440 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
441 sched_annotate_sleep(); \
442 lock_sock(sk_); \
443 remove_wait_queue(sk_sleep(sk_), &wait_); \
444 } \
445 rc_; \
446})
447
448/**
449 * tipc_sk_create - create a TIPC socket
450 * @net: network namespace (must be default network)
451 * @sock: pre-allocated socket structure
452 * @protocol: protocol indicator (must be 0)
453 * @kern: caused by kernel or by userspace?
454 *
455 * This routine creates additional data structures used by the TIPC socket,
456 * initializes them, and links them together.
457 *
458 * Return: 0 on success, errno otherwise
459 */
460static int tipc_sk_create(struct net *net, struct socket *sock,
461 int protocol, int kern)
462{
463 const struct proto_ops *ops;
464 struct sock *sk;
465 struct tipc_sock *tsk;
466 struct tipc_msg *msg;
467
468 /* Validate arguments */
469 if (unlikely(protocol != 0))
470 return -EPROTONOSUPPORT;
471
472 switch (sock->type) {
473 case SOCK_STREAM:
474 ops = &stream_ops;
475 break;
476 case SOCK_SEQPACKET:
477 ops = &packet_ops;
478 break;
479 case SOCK_DGRAM:
480 case SOCK_RDM:
481 ops = &msg_ops;
482 break;
483 default:
484 return -EPROTOTYPE;
485 }
486
487 /* Allocate socket's protocol area */
488 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
489 if (sk == NULL)
490 return -ENOMEM;
491
492 tsk = tipc_sk(sk);
493 tsk->max_pkt = MAX_PKT_DEFAULT;
494 tsk->maxnagle = 0;
495 tsk->nagle_start = NAGLE_START_INIT;
496 INIT_LIST_HEAD(&tsk->publications);
497 INIT_LIST_HEAD(&tsk->cong_links);
498 msg = &tsk->phdr;
499
500 /* Finish initializing socket data structures */
501 sock->ops = ops;
502 sock_init_data(sock, sk);
503 tipc_set_sk_state(sk, TIPC_OPEN);
504 if (tipc_sk_insert(tsk)) {
505 sk_free(sk);
506 pr_warn("Socket create failed; port number exhausted\n");
507 return -EINVAL;
508 }
509
510 /* Ensure tsk is visible before we read own_addr. */
511 smp_mb();
512
513 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
514 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
515
516 msg_set_origport(msg, tsk->portid);
517 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
518 sk->sk_shutdown = 0;
519 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
520 sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
521 sk->sk_data_ready = tipc_data_ready;
522 sk->sk_write_space = tipc_write_space;
523 sk->sk_destruct = tipc_sock_destruct;
524 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
525 tsk->group_is_open = true;
526 atomic_set(&tsk->dupl_rcvcnt, 0);
527
528 /* Start out with safe limits until we receive an advertised window */
529 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
530 tsk->rcv_win = tsk->snd_win;
531
532 if (tipc_sk_type_connectionless(sk)) {
533 tsk_set_unreturnable(tsk, true);
534 if (sock->type == SOCK_DGRAM)
535 tsk_set_unreliable(tsk, true);
536 }
537 __skb_queue_head_init(&tsk->mc_method.deferredq);
538 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
539 return 0;
540}
541
542static void tipc_sk_callback(struct rcu_head *head)
543{
544 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
545
546 sock_put(&tsk->sk);
547}
548
549/* Caller should hold socket lock for the socket. */
550static void __tipc_shutdown(struct socket *sock, int error)
551{
552 struct sock *sk = sock->sk;
553 struct tipc_sock *tsk = tipc_sk(sk);
554 struct net *net = sock_net(sk);
555 long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
556 u32 dnode = tsk_peer_node(tsk);
557 struct sk_buff *skb;
558
559 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
560 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
561 !tsk_conn_cong(tsk)));
562
563 /* Push out delayed messages if in Nagle mode */
564 tipc_sk_push_backlog(tsk, false);
565 /* Remove pending SYN */
566 __skb_queue_purge(&sk->sk_write_queue);
567
568 /* Remove partially received buffer if any */
569 skb = skb_peek(&sk->sk_receive_queue);
570 if (skb && TIPC_SKB_CB(skb)->bytes_read) {
571 __skb_unlink(skb, &sk->sk_receive_queue);
572 kfree_skb(skb);
573 }
574
575 /* Reject all unreceived messages if connectionless */
576 if (tipc_sk_type_connectionless(sk)) {
577 tsk_rej_rx_queue(sk, error);
578 return;
579 }
580
581 switch (sk->sk_state) {
582 case TIPC_CONNECTING:
583 case TIPC_ESTABLISHED:
584 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
585 tipc_node_remove_conn(net, dnode, tsk->portid);
586 /* Send a FIN+/- to its peer */
587 skb = __skb_dequeue(&sk->sk_receive_queue);
588 if (skb) {
589 __skb_queue_purge(&sk->sk_receive_queue);
590 tipc_sk_respond(sk, skb, error);
591 break;
592 }
593 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
594 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
595 tsk_own_node(tsk), tsk_peer_port(tsk),
596 tsk->portid, error);
597 if (skb)
598 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
599 break;
600 case TIPC_LISTEN:
601 /* Reject all SYN messages */
602 tsk_rej_rx_queue(sk, error);
603 break;
604 default:
605 __skb_queue_purge(&sk->sk_receive_queue);
606 break;
607 }
608}
609
610/**
611 * tipc_release - destroy a TIPC socket
612 * @sock: socket to destroy
613 *
614 * This routine cleans up any messages that are still queued on the socket.
615 * For DGRAM and RDM socket types, all queued messages are rejected.
616 * For SEQPACKET and STREAM socket types, the first message is rejected
617 * and any others are discarded. (If the first message on a STREAM socket
618 * is partially-read, it is discarded and the next one is rejected instead.)
619 *
620 * NOTE: Rejected messages are not necessarily returned to the sender! They
621 * are returned or discarded according to the "destination droppable" setting
622 * specified for the message by the sender.
623 *
624 * Return: 0 on success, errno otherwise
625 */
626static int tipc_release(struct socket *sock)
627{
628 struct sock *sk = sock->sk;
629 struct tipc_sock *tsk;
630
631 /*
632 * Exit if socket isn't fully initialized (occurs when a failed accept()
633 * releases a pre-allocated child socket that was never used)
634 */
635 if (sk == NULL)
636 return 0;
637
638 tsk = tipc_sk(sk);
639 lock_sock(sk);
640
641 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
642 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
643 sk->sk_shutdown = SHUTDOWN_MASK;
644 tipc_sk_leave(tsk);
645 tipc_sk_withdraw(tsk, NULL);
646 __skb_queue_purge(&tsk->mc_method.deferredq);
647 sk_stop_timer(sk, &sk->sk_timer);
648 tipc_sk_remove(tsk);
649
650 sock_orphan(sk);
651 /* Reject any messages that accumulated in backlog queue */
652 release_sock(sk);
653 tipc_dest_list_purge(&tsk->cong_links);
654 tsk->cong_link_cnt = 0;
655 call_rcu(&tsk->rcu, tipc_sk_callback);
656 sock->sk = NULL;
657
658 return 0;
659}
660
661/**
662 * __tipc_bind - associate or disassocate TIPC name(s) with a socket
663 * @sock: socket structure
664 * @skaddr: socket address describing name(s) and desired operation
665 * @alen: size of socket address data structure
666 *
667 * Name and name sequence binding are indicated using a positive scope value;
668 * a negative scope value unbinds the specified name. Specifying no name
669 * (i.e. a socket address length of 0) unbinds all names from the socket.
670 *
671 * Return: 0 on success, errno otherwise
672 *
673 * NOTE: This routine doesn't need to take the socket lock since it doesn't
674 * access any non-constant socket information.
675 */
676static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
677{
678 struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
679 struct tipc_sock *tsk = tipc_sk(sock->sk);
680 bool unbind = false;
681
682 if (unlikely(!alen))
683 return tipc_sk_withdraw(tsk, NULL);
684
685 if (ua->addrtype == TIPC_SERVICE_ADDR) {
686 ua->addrtype = TIPC_SERVICE_RANGE;
687 ua->sr.upper = ua->sr.lower;
688 }
689 if (ua->scope < 0) {
690 unbind = true;
691 ua->scope = -ua->scope;
692 }
693 /* Users may still use deprecated TIPC_ZONE_SCOPE */
694 if (ua->scope != TIPC_NODE_SCOPE)
695 ua->scope = TIPC_CLUSTER_SCOPE;
696
697 if (tsk->group)
698 return -EACCES;
699
700 if (unbind)
701 return tipc_sk_withdraw(tsk, ua);
702 return tipc_sk_publish(tsk, ua);
703}
704
705int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
706{
707 int res;
708
709 lock_sock(sock->sk);
710 res = __tipc_bind(sock, skaddr, alen);
711 release_sock(sock->sk);
712 return res;
713}
714
715static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
716{
717 struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
718 u32 atype = ua->addrtype;
719
720 if (alen) {
721 if (!tipc_uaddr_valid(ua, alen))
722 return -EINVAL;
723 if (atype == TIPC_SOCKET_ADDR)
724 return -EAFNOSUPPORT;
725 if (ua->sr.type < TIPC_RESERVED_TYPES) {
726 pr_warn_once("Can't bind to reserved service type %u\n",
727 ua->sr.type);
728 return -EACCES;
729 }
730 }
731 return tipc_sk_bind(sock, skaddr, alen);
732}
733
734/**
735 * tipc_getname - get port ID of socket or peer socket
736 * @sock: socket structure
737 * @uaddr: area for returned socket address
738 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
739 *
740 * Return: 0 on success, errno otherwise
741 *
742 * NOTE: This routine doesn't need to take the socket lock since it only
743 * accesses socket information that is unchanging (or which changes in
744 * a completely predictable manner).
745 */
746static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
747 int peer)
748{
749 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
750 struct sock *sk = sock->sk;
751 struct tipc_sock *tsk = tipc_sk(sk);
752
753 memset(addr, 0, sizeof(*addr));
754 if (peer) {
755 if ((!tipc_sk_connected(sk)) &&
756 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
757 return -ENOTCONN;
758 addr->addr.id.ref = tsk_peer_port(tsk);
759 addr->addr.id.node = tsk_peer_node(tsk);
760 } else {
761 addr->addr.id.ref = tsk->portid;
762 addr->addr.id.node = tipc_own_addr(sock_net(sk));
763 }
764
765 addr->addrtype = TIPC_SOCKET_ADDR;
766 addr->family = AF_TIPC;
767 addr->scope = 0;
768 addr->addr.name.domain = 0;
769
770 return sizeof(*addr);
771}
772
773/**
774 * tipc_poll - read and possibly block on pollmask
775 * @file: file structure associated with the socket
776 * @sock: socket for which to calculate the poll bits
777 * @wait: ???
778 *
779 * Return: pollmask value
780 *
781 * COMMENTARY:
782 * It appears that the usual socket locking mechanisms are not useful here
783 * since the pollmask info is potentially out-of-date the moment this routine
784 * exits. TCP and other protocols seem to rely on higher level poll routines
785 * to handle any preventable race conditions, so TIPC will do the same ...
786 *
787 * IMPORTANT: The fact that a read or write operation is indicated does NOT
788 * imply that the operation will succeed, merely that it should be performed
789 * and will not block.
790 */
791static __poll_t tipc_poll(struct file *file, struct socket *sock,
792 poll_table *wait)
793{
794 struct sock *sk = sock->sk;
795 struct tipc_sock *tsk = tipc_sk(sk);
796 __poll_t revents = 0;
797
798 sock_poll_wait(file, sock, wait);
799 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
800
801 if (sk->sk_shutdown & RCV_SHUTDOWN)
802 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
803 if (sk->sk_shutdown == SHUTDOWN_MASK)
804 revents |= EPOLLHUP;
805
806 switch (sk->sk_state) {
807 case TIPC_ESTABLISHED:
808 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
809 revents |= EPOLLOUT;
810 fallthrough;
811 case TIPC_LISTEN:
812 case TIPC_CONNECTING:
813 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
814 revents |= EPOLLIN | EPOLLRDNORM;
815 break;
816 case TIPC_OPEN:
817 if (tsk->group_is_open && !tsk->cong_link_cnt)
818 revents |= EPOLLOUT;
819 if (!tipc_sk_type_connectionless(sk))
820 break;
821 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
822 break;
823 revents |= EPOLLIN | EPOLLRDNORM;
824 break;
825 case TIPC_DISCONNECTING:
826 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
827 break;
828 }
829 return revents;
830}
831
832/**
833 * tipc_sendmcast - send multicast message
834 * @sock: socket structure
835 * @ua: destination address struct
836 * @msg: message to send
837 * @dlen: length of data to send
838 * @timeout: timeout to wait for wakeup
839 *
840 * Called from function tipc_sendmsg(), which has done all sanity checks
841 * Return: the number of bytes sent on success, or errno
842 */
843static int tipc_sendmcast(struct socket *sock, struct tipc_uaddr *ua,
844 struct msghdr *msg, size_t dlen, long timeout)
845{
846 struct sock *sk = sock->sk;
847 struct tipc_sock *tsk = tipc_sk(sk);
848 struct tipc_msg *hdr = &tsk->phdr;
849 struct net *net = sock_net(sk);
850 int mtu = tipc_bcast_get_mtu(net);
851 struct sk_buff_head pkts;
852 struct tipc_nlist dsts;
853 int rc;
854
855 if (tsk->group)
856 return -EACCES;
857
858 /* Block or return if any destination link is congested */
859 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
860 if (unlikely(rc))
861 return rc;
862
863 /* Lookup destination nodes */
864 tipc_nlist_init(&dsts, tipc_own_addr(net));
865 tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
866 if (!dsts.local && !dsts.remote)
867 return -EHOSTUNREACH;
868
869 /* Build message header */
870 msg_set_type(hdr, TIPC_MCAST_MSG);
871 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
872 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
873 msg_set_destport(hdr, 0);
874 msg_set_destnode(hdr, 0);
875 msg_set_nametype(hdr, ua->sr.type);
876 msg_set_namelower(hdr, ua->sr.lower);
877 msg_set_nameupper(hdr, ua->sr.upper);
878
879 /* Build message as chain of buffers */
880 __skb_queue_head_init(&pkts);
881 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
882
883 /* Send message if build was successful */
884 if (unlikely(rc == dlen)) {
885 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
886 TIPC_DUMP_SK_SNDQ, " ");
887 rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
888 &tsk->cong_link_cnt);
889 }
890
891 tipc_nlist_purge(&dsts);
892
893 return rc ? rc : dlen;
894}
895
896/**
897 * tipc_send_group_msg - send a message to a member in the group
898 * @net: network namespace
899 * @tsk: tipc socket
900 * @m: message to send
901 * @mb: group member
902 * @dnode: destination node
903 * @dport: destination port
904 * @dlen: total length of message data
905 */
906static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
907 struct msghdr *m, struct tipc_member *mb,
908 u32 dnode, u32 dport, int dlen)
909{
910 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
911 struct tipc_mc_method *method = &tsk->mc_method;
912 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
913 struct tipc_msg *hdr = &tsk->phdr;
914 struct sk_buff_head pkts;
915 int mtu, rc;
916
917 /* Complete message header */
918 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
919 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
920 msg_set_destport(hdr, dport);
921 msg_set_destnode(hdr, dnode);
922 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
923
924 /* Build message as chain of buffers */
925 __skb_queue_head_init(&pkts);
926 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
927 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
928 if (unlikely(rc != dlen))
929 return rc;
930
931 /* Send message */
932 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
933 if (unlikely(rc == -ELINKCONG)) {
934 tipc_dest_push(&tsk->cong_links, dnode, 0);
935 tsk->cong_link_cnt++;
936 }
937
938 /* Update send window */
939 tipc_group_update_member(mb, blks);
940
941 /* A broadcast sent within next EXPIRE period must follow same path */
942 method->rcast = true;
943 method->mandatory = true;
944 return dlen;
945}
946
947/**
948 * tipc_send_group_unicast - send message to a member in the group
949 * @sock: socket structure
950 * @m: message to send
951 * @dlen: total length of message data
952 * @timeout: timeout to wait for wakeup
953 *
954 * Called from function tipc_sendmsg(), which has done all sanity checks
955 * Return: the number of bytes sent on success, or errno
956 */
957static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
958 int dlen, long timeout)
959{
960 struct sock *sk = sock->sk;
961 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
962 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
963 struct tipc_sock *tsk = tipc_sk(sk);
964 struct net *net = sock_net(sk);
965 struct tipc_member *mb = NULL;
966 u32 node, port;
967 int rc;
968
969 node = ua->sk.node;
970 port = ua->sk.ref;
971 if (!port && !node)
972 return -EHOSTUNREACH;
973
974 /* Block or return if destination link or member is congested */
975 rc = tipc_wait_for_cond(sock, &timeout,
976 !tipc_dest_find(&tsk->cong_links, node, 0) &&
977 tsk->group &&
978 !tipc_group_cong(tsk->group, node, port, blks,
979 &mb));
980 if (unlikely(rc))
981 return rc;
982
983 if (unlikely(!mb))
984 return -EHOSTUNREACH;
985
986 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
987
988 return rc ? rc : dlen;
989}
990
991/**
992 * tipc_send_group_anycast - send message to any member with given identity
993 * @sock: socket structure
994 * @m: message to send
995 * @dlen: total length of message data
996 * @timeout: timeout to wait for wakeup
997 *
998 * Called from function tipc_sendmsg(), which has done all sanity checks
999 * Return: the number of bytes sent on success, or errno
1000 */
1001static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
1002 int dlen, long timeout)
1003{
1004 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1005 struct sock *sk = sock->sk;
1006 struct tipc_sock *tsk = tipc_sk(sk);
1007 struct list_head *cong_links = &tsk->cong_links;
1008 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
1009 struct tipc_msg *hdr = &tsk->phdr;
1010 struct tipc_member *first = NULL;
1011 struct tipc_member *mbr = NULL;
1012 struct net *net = sock_net(sk);
1013 u32 node, port, exclude;
1014 struct list_head dsts;
1015 int lookups = 0;
1016 int dstcnt, rc;
1017 bool cong;
1018
1019 INIT_LIST_HEAD(&dsts);
1020 ua->sa.type = msg_nametype(hdr);
1021 ua->scope = msg_lookup_scope(hdr);
1022
1023 while (++lookups < 4) {
1024 exclude = tipc_group_exclude(tsk->group);
1025
1026 first = NULL;
1027
1028 /* Look for a non-congested destination member, if any */
1029 while (1) {
1030 if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt,
1031 exclude, false))
1032 return -EHOSTUNREACH;
1033 tipc_dest_pop(&dsts, &node, &port);
1034 cong = tipc_group_cong(tsk->group, node, port, blks,
1035 &mbr);
1036 if (!cong)
1037 break;
1038 if (mbr == first)
1039 break;
1040 if (!first)
1041 first = mbr;
1042 }
1043
1044 /* Start over if destination was not in member list */
1045 if (unlikely(!mbr))
1046 continue;
1047
1048 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
1049 break;
1050
1051 /* Block or return if destination link or member is congested */
1052 rc = tipc_wait_for_cond(sock, &timeout,
1053 !tipc_dest_find(cong_links, node, 0) &&
1054 tsk->group &&
1055 !tipc_group_cong(tsk->group, node, port,
1056 blks, &mbr));
1057 if (unlikely(rc))
1058 return rc;
1059
1060 /* Send, unless destination disappeared while waiting */
1061 if (likely(mbr))
1062 break;
1063 }
1064
1065 if (unlikely(lookups >= 4))
1066 return -EHOSTUNREACH;
1067
1068 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1069
1070 return rc ? rc : dlen;
1071}
1072
1073/**
1074 * tipc_send_group_bcast - send message to all members in communication group
1075 * @sock: socket structure
1076 * @m: message to send
1077 * @dlen: total length of message data
1078 * @timeout: timeout to wait for wakeup
1079 *
1080 * Called from function tipc_sendmsg(), which has done all sanity checks
1081 * Return: the number of bytes sent on success, or errno
1082 */
1083static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1084 int dlen, long timeout)
1085{
1086 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1087 struct sock *sk = sock->sk;
1088 struct net *net = sock_net(sk);
1089 struct tipc_sock *tsk = tipc_sk(sk);
1090 struct tipc_nlist *dsts;
1091 struct tipc_mc_method *method = &tsk->mc_method;
1092 bool ack = method->mandatory && method->rcast;
1093 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1094 struct tipc_msg *hdr = &tsk->phdr;
1095 int mtu = tipc_bcast_get_mtu(net);
1096 struct sk_buff_head pkts;
1097 int rc = -EHOSTUNREACH;
1098
1099 /* Block or return if any destination link or member is congested */
1100 rc = tipc_wait_for_cond(sock, &timeout,
1101 !tsk->cong_link_cnt && tsk->group &&
1102 !tipc_group_bc_cong(tsk->group, blks));
1103 if (unlikely(rc))
1104 return rc;
1105
1106 dsts = tipc_group_dests(tsk->group);
1107 if (!dsts->local && !dsts->remote)
1108 return -EHOSTUNREACH;
1109
1110 /* Complete message header */
1111 if (ua) {
1112 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1113 msg_set_nameinst(hdr, ua->sa.instance);
1114 } else {
1115 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1116 msg_set_nameinst(hdr, 0);
1117 }
1118 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1119 msg_set_destport(hdr, 0);
1120 msg_set_destnode(hdr, 0);
1121 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1122
1123 /* Avoid getting stuck with repeated forced replicasts */
1124 msg_set_grp_bc_ack_req(hdr, ack);
1125
1126 /* Build message as chain of buffers */
1127 __skb_queue_head_init(&pkts);
1128 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1129 if (unlikely(rc != dlen))
1130 return rc;
1131
1132 /* Send message */
1133 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1134 if (unlikely(rc))
1135 return rc;
1136
1137 /* Update broadcast sequence number and send windows */
1138 tipc_group_update_bc_members(tsk->group, blks, ack);
1139
1140 /* Broadcast link is now free to choose method for next broadcast */
1141 method->mandatory = false;
1142 method->expires = jiffies;
1143
1144 return dlen;
1145}
1146
1147/**
1148 * tipc_send_group_mcast - send message to all members with given identity
1149 * @sock: socket structure
1150 * @m: message to send
1151 * @dlen: total length of message data
1152 * @timeout: timeout to wait for wakeup
1153 *
1154 * Called from function tipc_sendmsg(), which has done all sanity checks
1155 * Return: the number of bytes sent on success, or errno
1156 */
1157static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1158 int dlen, long timeout)
1159{
1160 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1161 struct sock *sk = sock->sk;
1162 struct tipc_sock *tsk = tipc_sk(sk);
1163 struct tipc_group *grp = tsk->group;
1164 struct tipc_msg *hdr = &tsk->phdr;
1165 struct net *net = sock_net(sk);
1166 struct list_head dsts;
1167 u32 dstcnt, exclude;
1168
1169 INIT_LIST_HEAD(&dsts);
1170 ua->sa.type = msg_nametype(hdr);
1171 ua->scope = msg_lookup_scope(hdr);
1172 exclude = tipc_group_exclude(grp);
1173
1174 if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true))
1175 return -EHOSTUNREACH;
1176
1177 if (dstcnt == 1) {
1178 tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
1179 return tipc_send_group_unicast(sock, m, dlen, timeout);
1180 }
1181
1182 tipc_dest_list_purge(&dsts);
1183 return tipc_send_group_bcast(sock, m, dlen, timeout);
1184}
1185
1186/**
1187 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1188 * @net: the associated network namespace
1189 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1190 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1191 *
1192 * Multi-threaded: parallel calls with reference to same queues may occur
1193 */
1194void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1195 struct sk_buff_head *inputq)
1196{
1197 u32 self = tipc_own_addr(net);
1198 struct sk_buff *skb, *_skb;
1199 u32 portid, onode;
1200 struct sk_buff_head tmpq;
1201 struct list_head dports;
1202 struct tipc_msg *hdr;
1203 struct tipc_uaddr ua;
1204 int user, mtyp, hlen;
1205
1206 __skb_queue_head_init(&tmpq);
1207 INIT_LIST_HEAD(&dports);
1208 ua.addrtype = TIPC_SERVICE_RANGE;
1209
1210 /* tipc_skb_peek() increments the head skb's reference counter */
1211 skb = tipc_skb_peek(arrvq, &inputq->lock);
1212 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1213 hdr = buf_msg(skb);
1214 user = msg_user(hdr);
1215 mtyp = msg_type(hdr);
1216 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1217 onode = msg_orignode(hdr);
1218 ua.sr.type = msg_nametype(hdr);
1219 ua.sr.lower = msg_namelower(hdr);
1220 ua.sr.upper = msg_nameupper(hdr);
1221 if (onode == self)
1222 ua.scope = TIPC_ANY_SCOPE;
1223 else
1224 ua.scope = TIPC_CLUSTER_SCOPE;
1225
1226 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1227 spin_lock_bh(&inputq->lock);
1228 if (skb_peek(arrvq) == skb) {
1229 __skb_dequeue(arrvq);
1230 __skb_queue_tail(inputq, skb);
1231 }
1232 kfree_skb(skb);
1233 spin_unlock_bh(&inputq->lock);
1234 continue;
1235 }
1236
1237 /* Group messages require exact scope match */
1238 if (msg_in_group(hdr)) {
1239 ua.sr.lower = 0;
1240 ua.sr.upper = ~0;
1241 ua.scope = msg_lookup_scope(hdr);
1242 }
1243
1244 /* Create destination port list: */
1245 tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
1246
1247 /* Clone message per destination */
1248 while (tipc_dest_pop(&dports, NULL, &portid)) {
1249 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1250 if (_skb) {
1251 msg_set_destport(buf_msg(_skb), portid);
1252 __skb_queue_tail(&tmpq, _skb);
1253 continue;
1254 }
1255 pr_warn("Failed to clone mcast rcv buffer\n");
1256 }
1257 /* Append clones to inputq only if skb is still head of arrvq */
1258 spin_lock_bh(&inputq->lock);
1259 if (skb_peek(arrvq) == skb) {
1260 skb_queue_splice_tail_init(&tmpq, inputq);
1261 /* Decrement the skb's refcnt */
1262 kfree_skb(__skb_dequeue(arrvq));
1263 }
1264 spin_unlock_bh(&inputq->lock);
1265 __skb_queue_purge(&tmpq);
1266 kfree_skb(skb);
1267 }
1268 tipc_sk_rcv(net, inputq);
1269}
1270
1271/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
1272 * when socket is in Nagle mode
1273 */
1274static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
1275{
1276 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1277 struct sk_buff *skb = skb_peek_tail(txq);
1278 struct net *net = sock_net(&tsk->sk);
1279 u32 dnode = tsk_peer_node(tsk);
1280 int rc;
1281
1282 if (nagle_ack) {
1283 tsk->pkt_cnt += skb_queue_len(txq);
1284 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1285 tsk->oneway = 0;
1286 if (tsk->nagle_start < NAGLE_START_MAX)
1287 tsk->nagle_start *= 2;
1288 tsk->expect_ack = false;
1289 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1290 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1291 tsk->nagle_start);
1292 } else {
1293 tsk->nagle_start = NAGLE_START_INIT;
1294 if (skb) {
1295 msg_set_ack_required(buf_msg(skb));
1296 tsk->expect_ack = true;
1297 } else {
1298 tsk->expect_ack = false;
1299 }
1300 }
1301 tsk->msg_acc = 0;
1302 tsk->pkt_cnt = 0;
1303 }
1304
1305 if (!skb || tsk->cong_link_cnt)
1306 return;
1307
1308 /* Do not send SYN again after congestion */
1309 if (msg_is_syn(buf_msg(skb)))
1310 return;
1311
1312 if (tsk->msg_acc)
1313 tsk->pkt_cnt += skb_queue_len(txq);
1314 tsk->snt_unacked += tsk->snd_backlog;
1315 tsk->snd_backlog = 0;
1316 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1317 if (rc == -ELINKCONG)
1318 tsk->cong_link_cnt = 1;
1319}
1320
1321/**
1322 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1323 * @tsk: receiving socket
1324 * @skb: pointer to message buffer.
1325 * @inputq: buffer list containing the buffers
1326 * @xmitq: output message area
1327 */
1328static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1329 struct sk_buff_head *inputq,
1330 struct sk_buff_head *xmitq)
1331{
1332 struct tipc_msg *hdr = buf_msg(skb);
1333 u32 onode = tsk_own_node(tsk);
1334 struct sock *sk = &tsk->sk;
1335 int mtyp = msg_type(hdr);
1336 bool was_cong;
1337
1338 /* Ignore if connection cannot be validated: */
1339 if (!tsk_peer_msg(tsk, hdr)) {
1340 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1341 goto exit;
1342 }
1343
1344 if (unlikely(msg_errcode(hdr))) {
1345 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1346 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1347 tsk_peer_port(tsk));
1348 sk->sk_state_change(sk);
1349
1350 /* State change is ignored if socket already awake,
1351 * - convert msg to abort msg and add to inqueue
1352 */
1353 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1354 msg_set_type(hdr, TIPC_CONN_MSG);
1355 msg_set_size(hdr, BASIC_H_SIZE);
1356 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1357 __skb_queue_tail(inputq, skb);
1358 return;
1359 }
1360
1361 tsk->probe_unacked = false;
1362
1363 if (mtyp == CONN_PROBE) {
1364 msg_set_type(hdr, CONN_PROBE_REPLY);
1365 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1366 __skb_queue_tail(xmitq, skb);
1367 return;
1368 } else if (mtyp == CONN_ACK) {
1369 was_cong = tsk_conn_cong(tsk);
1370 tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
1371 tsk->snt_unacked -= msg_conn_ack(hdr);
1372 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1373 tsk->snd_win = msg_adv_win(hdr);
1374 if (was_cong && !tsk_conn_cong(tsk))
1375 sk->sk_write_space(sk);
1376 } else if (mtyp != CONN_PROBE_REPLY) {
1377 pr_warn("Received unknown CONN_PROTO msg\n");
1378 }
1379exit:
1380 kfree_skb(skb);
1381}
1382
1383/**
1384 * tipc_sendmsg - send message in connectionless manner
1385 * @sock: socket structure
1386 * @m: message to send
1387 * @dsz: amount of user data to be sent
1388 *
1389 * Message must have an destination specified explicitly.
1390 * Used for SOCK_RDM and SOCK_DGRAM messages,
1391 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1392 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1393 *
1394 * Return: the number of bytes sent on success, or errno otherwise
1395 */
1396static int tipc_sendmsg(struct socket *sock,
1397 struct msghdr *m, size_t dsz)
1398{
1399 struct sock *sk = sock->sk;
1400 int ret;
1401
1402 lock_sock(sk);
1403 ret = __tipc_sendmsg(sock, m, dsz);
1404 release_sock(sk);
1405
1406 return ret;
1407}
1408
1409static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1410{
1411 struct sock *sk = sock->sk;
1412 struct net *net = sock_net(sk);
1413 struct tipc_sock *tsk = tipc_sk(sk);
1414 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1415 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1416 struct list_head *clinks = &tsk->cong_links;
1417 bool syn = !tipc_sk_type_connectionless(sk);
1418 struct tipc_group *grp = tsk->group;
1419 struct tipc_msg *hdr = &tsk->phdr;
1420 struct tipc_socket_addr skaddr;
1421 struct sk_buff_head pkts;
1422 int atype, mtu, rc;
1423
1424 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1425 return -EMSGSIZE;
1426
1427 if (ua) {
1428 if (!tipc_uaddr_valid(ua, m->msg_namelen))
1429 return -EINVAL;
1430 atype = ua->addrtype;
1431 }
1432
1433 /* If socket belongs to a communication group follow other paths */
1434 if (grp) {
1435 if (!ua)
1436 return tipc_send_group_bcast(sock, m, dlen, timeout);
1437 if (atype == TIPC_SERVICE_ADDR)
1438 return tipc_send_group_anycast(sock, m, dlen, timeout);
1439 if (atype == TIPC_SOCKET_ADDR)
1440 return tipc_send_group_unicast(sock, m, dlen, timeout);
1441 if (atype == TIPC_SERVICE_RANGE)
1442 return tipc_send_group_mcast(sock, m, dlen, timeout);
1443 return -EINVAL;
1444 }
1445
1446 if (!ua) {
1447 ua = (struct tipc_uaddr *)&tsk->peer;
1448 if (!syn && ua->family != AF_TIPC)
1449 return -EDESTADDRREQ;
1450 atype = ua->addrtype;
1451 }
1452
1453 if (unlikely(syn)) {
1454 if (sk->sk_state == TIPC_LISTEN)
1455 return -EPIPE;
1456 if (sk->sk_state != TIPC_OPEN)
1457 return -EISCONN;
1458 if (tsk->published)
1459 return -EOPNOTSUPP;
1460 if (atype == TIPC_SERVICE_ADDR)
1461 tsk->conn_addrtype = atype;
1462 msg_set_syn(hdr, 1);
1463 }
1464
1465 memset(&skaddr, 0, sizeof(skaddr));
1466
1467 /* Determine destination */
1468 if (atype == TIPC_SERVICE_RANGE) {
1469 return tipc_sendmcast(sock, ua, m, dlen, timeout);
1470 } else if (atype == TIPC_SERVICE_ADDR) {
1471 skaddr.node = ua->lookup_node;
1472 ua->scope = tipc_node2scope(skaddr.node);
1473 if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr))
1474 return -EHOSTUNREACH;
1475 } else if (atype == TIPC_SOCKET_ADDR) {
1476 skaddr = ua->sk;
1477 } else {
1478 return -EINVAL;
1479 }
1480
1481 /* Block or return if destination link is congested */
1482 rc = tipc_wait_for_cond(sock, &timeout,
1483 !tipc_dest_find(clinks, skaddr.node, 0));
1484 if (unlikely(rc))
1485 return rc;
1486
1487 /* Finally build message header */
1488 msg_set_destnode(hdr, skaddr.node);
1489 msg_set_destport(hdr, skaddr.ref);
1490 if (atype == TIPC_SERVICE_ADDR) {
1491 msg_set_type(hdr, TIPC_NAMED_MSG);
1492 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1493 msg_set_nametype(hdr, ua->sa.type);
1494 msg_set_nameinst(hdr, ua->sa.instance);
1495 msg_set_lookup_scope(hdr, ua->scope);
1496 } else { /* TIPC_SOCKET_ADDR */
1497 msg_set_type(hdr, TIPC_DIRECT_MSG);
1498 msg_set_lookup_scope(hdr, 0);
1499 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1500 }
1501
1502 /* Add message body */
1503 __skb_queue_head_init(&pkts);
1504 mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
1505 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1506 if (unlikely(rc != dlen))
1507 return rc;
1508 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1509 __skb_queue_purge(&pkts);
1510 return -ENOMEM;
1511 }
1512
1513 /* Send message */
1514 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1515 rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
1516 if (unlikely(rc == -ELINKCONG)) {
1517 tipc_dest_push(clinks, skaddr.node, 0);
1518 tsk->cong_link_cnt++;
1519 rc = 0;
1520 }
1521
1522 if (unlikely(syn && !rc)) {
1523 tipc_set_sk_state(sk, TIPC_CONNECTING);
1524 if (dlen && timeout) {
1525 timeout = msecs_to_jiffies(timeout);
1526 tipc_wait_for_connect(sock, &timeout);
1527 }
1528 }
1529
1530 return rc ? rc : dlen;
1531}
1532
1533/**
1534 * tipc_sendstream - send stream-oriented data
1535 * @sock: socket structure
1536 * @m: data to send
1537 * @dsz: total length of data to be transmitted
1538 *
1539 * Used for SOCK_STREAM data.
1540 *
1541 * Return: the number of bytes sent on success (or partial success),
1542 * or errno if no data sent
1543 */
1544static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1545{
1546 struct sock *sk = sock->sk;
1547 int ret;
1548
1549 lock_sock(sk);
1550 ret = __tipc_sendstream(sock, m, dsz);
1551 release_sock(sk);
1552
1553 return ret;
1554}
1555
1556static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1557{
1558 struct sock *sk = sock->sk;
1559 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1560 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1561 struct sk_buff_head *txq = &sk->sk_write_queue;
1562 struct tipc_sock *tsk = tipc_sk(sk);
1563 struct tipc_msg *hdr = &tsk->phdr;
1564 struct net *net = sock_net(sk);
1565 struct sk_buff *skb;
1566 u32 dnode = tsk_peer_node(tsk);
1567 int maxnagle = tsk->maxnagle;
1568 int maxpkt = tsk->max_pkt;
1569 int send, sent = 0;
1570 int blocks, rc = 0;
1571
1572 if (unlikely(dlen > INT_MAX))
1573 return -EMSGSIZE;
1574
1575 /* Handle implicit connection setup */
1576 if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
1577 rc = __tipc_sendmsg(sock, m, dlen);
1578 if (dlen && dlen == rc) {
1579 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1580 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1581 }
1582 return rc;
1583 }
1584
1585 do {
1586 rc = tipc_wait_for_cond(sock, &timeout,
1587 (!tsk->cong_link_cnt &&
1588 !tsk_conn_cong(tsk) &&
1589 tipc_sk_connected(sk)));
1590 if (unlikely(rc))
1591 break;
1592 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1593 blocks = tsk->snd_backlog;
1594 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1595 send <= maxnagle) {
1596 rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
1597 if (unlikely(rc < 0))
1598 break;
1599 blocks += rc;
1600 tsk->msg_acc++;
1601 if (blocks <= 64 && tsk->expect_ack) {
1602 tsk->snd_backlog = blocks;
1603 sent += send;
1604 break;
1605 } else if (blocks > 64) {
1606 tsk->pkt_cnt += skb_queue_len(txq);
1607 } else {
1608 skb = skb_peek_tail(txq);
1609 if (skb) {
1610 msg_set_ack_required(buf_msg(skb));
1611 tsk->expect_ack = true;
1612 } else {
1613 tsk->expect_ack = false;
1614 }
1615 tsk->msg_acc = 0;
1616 tsk->pkt_cnt = 0;
1617 }
1618 } else {
1619 rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
1620 if (unlikely(rc != send))
1621 break;
1622 blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1623 }
1624 trace_tipc_sk_sendstream(sk, skb_peek(txq),
1625 TIPC_DUMP_SK_SNDQ, " ");
1626 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1627 if (unlikely(rc == -ELINKCONG)) {
1628 tsk->cong_link_cnt = 1;
1629 rc = 0;
1630 }
1631 if (likely(!rc)) {
1632 tsk->snt_unacked += blocks;
1633 tsk->snd_backlog = 0;
1634 sent += send;
1635 }
1636 } while (sent < dlen && !rc);
1637
1638 return sent ? sent : rc;
1639}
1640
1641/**
1642 * tipc_send_packet - send a connection-oriented message
1643 * @sock: socket structure
1644 * @m: message to send
1645 * @dsz: length of data to be transmitted
1646 *
1647 * Used for SOCK_SEQPACKET messages.
1648 *
1649 * Return: the number of bytes sent on success, or errno otherwise
1650 */
1651static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1652{
1653 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1654 return -EMSGSIZE;
1655
1656 return tipc_sendstream(sock, m, dsz);
1657}
1658
1659/* tipc_sk_finish_conn - complete the setup of a connection
1660 */
1661static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1662 u32 peer_node)
1663{
1664 struct sock *sk = &tsk->sk;
1665 struct net *net = sock_net(sk);
1666 struct tipc_msg *msg = &tsk->phdr;
1667
1668 msg_set_syn(msg, 0);
1669 msg_set_destnode(msg, peer_node);
1670 msg_set_destport(msg, peer_port);
1671 msg_set_type(msg, TIPC_CONN_MSG);
1672 msg_set_lookup_scope(msg, 0);
1673 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1674
1675 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1676 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1677 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1678 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1679 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1680 tsk_set_nagle(tsk);
1681 __skb_queue_purge(&sk->sk_write_queue);
1682 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1683 return;
1684
1685 /* Fall back to message based flow control */
1686 tsk->rcv_win = FLOWCTL_MSG_WIN;
1687 tsk->snd_win = FLOWCTL_MSG_WIN;
1688}
1689
1690/**
1691 * tipc_sk_set_orig_addr - capture sender's address for received message
1692 * @m: descriptor for message info
1693 * @skb: received message
1694 *
1695 * Note: Address is not captured if not requested by receiver.
1696 */
1697static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1698{
1699 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1700 struct tipc_msg *hdr = buf_msg(skb);
1701
1702 if (!srcaddr)
1703 return;
1704
1705 srcaddr->sock.family = AF_TIPC;
1706 srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
1707 srcaddr->sock.scope = 0;
1708 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1709 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1710 srcaddr->sock.addr.name.domain = 0;
1711 m->msg_namelen = sizeof(struct sockaddr_tipc);
1712
1713 if (!msg_in_group(hdr))
1714 return;
1715
1716 /* Group message users may also want to know sending member's id */
1717 srcaddr->member.family = AF_TIPC;
1718 srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
1719 srcaddr->member.scope = 0;
1720 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1721 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1722 srcaddr->member.addr.name.domain = 0;
1723 m->msg_namelen = sizeof(*srcaddr);
1724}
1725
1726/**
1727 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1728 * @m: descriptor for message info
1729 * @skb: received message buffer
1730 * @tsk: TIPC port associated with message
1731 *
1732 * Note: Ancillary data is not captured if not requested by receiver.
1733 *
1734 * Return: 0 if successful, otherwise errno
1735 */
1736static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1737 struct tipc_sock *tsk)
1738{
1739 struct tipc_msg *hdr;
1740 u32 data[3] = {0,};
1741 bool has_addr;
1742 int dlen, rc;
1743
1744 if (likely(m->msg_controllen == 0))
1745 return 0;
1746
1747 hdr = buf_msg(skb);
1748 dlen = msg_data_sz(hdr);
1749
1750 /* Capture errored message object, if any */
1751 if (msg_errcode(hdr)) {
1752 if (skb_linearize(skb))
1753 return -ENOMEM;
1754 hdr = buf_msg(skb);
1755 data[0] = msg_errcode(hdr);
1756 data[1] = dlen;
1757 rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
1758 if (rc || !dlen)
1759 return rc;
1760 rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
1761 if (rc)
1762 return rc;
1763 }
1764
1765 /* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
1766 switch (msg_type(hdr)) {
1767 case TIPC_NAMED_MSG:
1768 has_addr = true;
1769 data[0] = msg_nametype(hdr);
1770 data[1] = msg_namelower(hdr);
1771 data[2] = data[1];
1772 break;
1773 case TIPC_MCAST_MSG:
1774 has_addr = true;
1775 data[0] = msg_nametype(hdr);
1776 data[1] = msg_namelower(hdr);
1777 data[2] = msg_nameupper(hdr);
1778 break;
1779 case TIPC_CONN_MSG:
1780 has_addr = !!tsk->conn_addrtype;
1781 data[0] = msg_nametype(&tsk->phdr);
1782 data[1] = msg_nameinst(&tsk->phdr);
1783 data[2] = data[1];
1784 break;
1785 default:
1786 has_addr = false;
1787 }
1788 if (!has_addr)
1789 return 0;
1790 return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
1791}
1792
1793static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
1794{
1795 struct sock *sk = &tsk->sk;
1796 struct sk_buff *skb = NULL;
1797 struct tipc_msg *msg;
1798 u32 peer_port = tsk_peer_port(tsk);
1799 u32 dnode = tsk_peer_node(tsk);
1800
1801 if (!tipc_sk_connected(sk))
1802 return NULL;
1803 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1804 dnode, tsk_own_node(tsk), peer_port,
1805 tsk->portid, TIPC_OK);
1806 if (!skb)
1807 return NULL;
1808 msg = buf_msg(skb);
1809 msg_set_conn_ack(msg, tsk->rcv_unacked);
1810 tsk->rcv_unacked = 0;
1811
1812 /* Adjust to and advertize the correct window limit */
1813 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1814 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1815 msg_set_adv_win(msg, tsk->rcv_win);
1816 }
1817 return skb;
1818}
1819
1820static void tipc_sk_send_ack(struct tipc_sock *tsk)
1821{
1822 struct sk_buff *skb;
1823
1824 skb = tipc_sk_build_ack(tsk);
1825 if (!skb)
1826 return;
1827
1828 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1829 msg_link_selector(buf_msg(skb)));
1830}
1831
1832static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1833{
1834 struct sock *sk = sock->sk;
1835 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1836 long timeo = *timeop;
1837 int err = sock_error(sk);
1838
1839 if (err)
1840 return err;
1841
1842 for (;;) {
1843 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1844 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1845 err = -ENOTCONN;
1846 break;
1847 }
1848 add_wait_queue(sk_sleep(sk), &wait);
1849 release_sock(sk);
1850 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1851 sched_annotate_sleep();
1852 lock_sock(sk);
1853 remove_wait_queue(sk_sleep(sk), &wait);
1854 }
1855 err = 0;
1856 if (!skb_queue_empty(&sk->sk_receive_queue))
1857 break;
1858 err = -EAGAIN;
1859 if (!timeo)
1860 break;
1861 err = sock_intr_errno(timeo);
1862 if (signal_pending(current))
1863 break;
1864
1865 err = sock_error(sk);
1866 if (err)
1867 break;
1868 }
1869 *timeop = timeo;
1870 return err;
1871}
1872
1873/**
1874 * tipc_recvmsg - receive packet-oriented message
1875 * @sock: network socket
1876 * @m: descriptor for message info
1877 * @buflen: length of user buffer area
1878 * @flags: receive flags
1879 *
1880 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1881 * If the complete message doesn't fit in user area, truncate it.
1882 *
1883 * Return: size of returned message data, errno otherwise
1884 */
1885static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1886 size_t buflen, int flags)
1887{
1888 struct sock *sk = sock->sk;
1889 bool connected = !tipc_sk_type_connectionless(sk);
1890 struct tipc_sock *tsk = tipc_sk(sk);
1891 int rc, err, hlen, dlen, copy;
1892 struct tipc_skb_cb *skb_cb;
1893 struct sk_buff_head xmitq;
1894 struct tipc_msg *hdr;
1895 struct sk_buff *skb;
1896 bool grp_evt;
1897 long timeout;
1898
1899 /* Catch invalid receive requests */
1900 if (unlikely(!buflen))
1901 return -EINVAL;
1902
1903 lock_sock(sk);
1904 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1905 rc = -ENOTCONN;
1906 goto exit;
1907 }
1908 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1909
1910 /* Step rcv queue to first msg with data or error; wait if necessary */
1911 do {
1912 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1913 if (unlikely(rc))
1914 goto exit;
1915 skb = skb_peek(&sk->sk_receive_queue);
1916 skb_cb = TIPC_SKB_CB(skb);
1917 hdr = buf_msg(skb);
1918 dlen = msg_data_sz(hdr);
1919 hlen = msg_hdr_sz(hdr);
1920 err = msg_errcode(hdr);
1921 grp_evt = msg_is_grp_evt(hdr);
1922 if (likely(dlen || err))
1923 break;
1924 tsk_advance_rx_queue(sk);
1925 } while (1);
1926
1927 /* Collect msg meta data, including error code and rejected data */
1928 tipc_sk_set_orig_addr(m, skb);
1929 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1930 if (unlikely(rc))
1931 goto exit;
1932 hdr = buf_msg(skb);
1933
1934 /* Capture data if non-error msg, otherwise just set return value */
1935 if (likely(!err)) {
1936 int offset = skb_cb->bytes_read;
1937
1938 copy = min_t(int, dlen - offset, buflen);
1939 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1940 if (unlikely(rc))
1941 goto exit;
1942 if (unlikely(offset + copy < dlen)) {
1943 if (flags & MSG_EOR) {
1944 if (!(flags & MSG_PEEK))
1945 skb_cb->bytes_read = offset + copy;
1946 } else {
1947 m->msg_flags |= MSG_TRUNC;
1948 skb_cb->bytes_read = 0;
1949 }
1950 } else {
1951 if (flags & MSG_EOR)
1952 m->msg_flags |= MSG_EOR;
1953 skb_cb->bytes_read = 0;
1954 }
1955 } else {
1956 copy = 0;
1957 rc = 0;
1958 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
1959 rc = -ECONNRESET;
1960 goto exit;
1961 }
1962 }
1963
1964 /* Mark message as group event if applicable */
1965 if (unlikely(grp_evt)) {
1966 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1967 m->msg_flags |= MSG_EOR;
1968 m->msg_flags |= MSG_OOB;
1969 copy = 0;
1970 }
1971
1972 /* Caption of data or error code/rejected data was successful */
1973 if (unlikely(flags & MSG_PEEK))
1974 goto exit;
1975
1976 /* Send group flow control advertisement when applicable */
1977 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1978 __skb_queue_head_init(&xmitq);
1979 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1980 msg_orignode(hdr), msg_origport(hdr),
1981 &xmitq);
1982 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1983 }
1984
1985 if (skb_cb->bytes_read)
1986 goto exit;
1987
1988 tsk_advance_rx_queue(sk);
1989
1990 if (likely(!connected))
1991 goto exit;
1992
1993 /* Send connection flow control advertisement when applicable */
1994 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1995 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1996 tipc_sk_send_ack(tsk);
1997exit:
1998 release_sock(sk);
1999 return rc ? rc : copy;
2000}
2001
2002/**
2003 * tipc_recvstream - receive stream-oriented data
2004 * @sock: network socket
2005 * @m: descriptor for message info
2006 * @buflen: total size of user buffer area
2007 * @flags: receive flags
2008 *
2009 * Used for SOCK_STREAM messages only. If not enough data is available
2010 * will optionally wait for more; never truncates data.
2011 *
2012 * Return: size of returned message data, errno otherwise
2013 */
2014static int tipc_recvstream(struct socket *sock, struct msghdr *m,
2015 size_t buflen, int flags)
2016{
2017 struct sock *sk = sock->sk;
2018 struct tipc_sock *tsk = tipc_sk(sk);
2019 struct sk_buff *skb;
2020 struct tipc_msg *hdr;
2021 struct tipc_skb_cb *skb_cb;
2022 bool peek = flags & MSG_PEEK;
2023 int offset, required, copy, copied = 0;
2024 int hlen, dlen, err, rc;
2025 long timeout;
2026
2027 /* Catch invalid receive attempts */
2028 if (unlikely(!buflen))
2029 return -EINVAL;
2030
2031 lock_sock(sk);
2032
2033 if (unlikely(sk->sk_state == TIPC_OPEN)) {
2034 rc = -ENOTCONN;
2035 goto exit;
2036 }
2037 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
2038 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2039
2040 do {
2041 /* Look at first msg in receive queue; wait if necessary */
2042 rc = tipc_wait_for_rcvmsg(sock, &timeout);
2043 if (unlikely(rc))
2044 break;
2045 skb = skb_peek(&sk->sk_receive_queue);
2046 skb_cb = TIPC_SKB_CB(skb);
2047 hdr = buf_msg(skb);
2048 dlen = msg_data_sz(hdr);
2049 hlen = msg_hdr_sz(hdr);
2050 err = msg_errcode(hdr);
2051
2052 /* Discard any empty non-errored (SYN-) message */
2053 if (unlikely(!dlen && !err)) {
2054 tsk_advance_rx_queue(sk);
2055 continue;
2056 }
2057
2058 /* Collect msg meta data, incl. error code and rejected data */
2059 if (!copied) {
2060 tipc_sk_set_orig_addr(m, skb);
2061 rc = tipc_sk_anc_data_recv(m, skb, tsk);
2062 if (rc)
2063 break;
2064 hdr = buf_msg(skb);
2065 }
2066
2067 /* Copy data if msg ok, otherwise return error/partial data */
2068 if (likely(!err)) {
2069 offset = skb_cb->bytes_read;
2070 copy = min_t(int, dlen - offset, buflen - copied);
2071 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
2072 if (unlikely(rc))
2073 break;
2074 copied += copy;
2075 offset += copy;
2076 if (unlikely(offset < dlen)) {
2077 if (!peek)
2078 skb_cb->bytes_read = offset;
2079 break;
2080 }
2081 } else {
2082 rc = 0;
2083 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
2084 rc = -ECONNRESET;
2085 if (copied || rc)
2086 break;
2087 }
2088
2089 if (unlikely(peek))
2090 break;
2091
2092 tsk_advance_rx_queue(sk);
2093
2094 /* Send connection flow control advertisement when applicable */
2095 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2096 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2097 tipc_sk_send_ack(tsk);
2098
2099 /* Exit if all requested data or FIN/error received */
2100 if (copied == buflen || err)
2101 break;
2102
2103 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
2104exit:
2105 release_sock(sk);
2106 return copied ? copied : rc;
2107}
2108
2109/**
2110 * tipc_write_space - wake up thread if port congestion is released
2111 * @sk: socket
2112 */
2113static void tipc_write_space(struct sock *sk)
2114{
2115 struct socket_wq *wq;
2116
2117 rcu_read_lock();
2118 wq = rcu_dereference(sk->sk_wq);
2119 if (skwq_has_sleeper(wq))
2120 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2121 EPOLLWRNORM | EPOLLWRBAND);
2122 rcu_read_unlock();
2123}
2124
2125/**
2126 * tipc_data_ready - wake up threads to indicate messages have been received
2127 * @sk: socket
2128 */
2129static void tipc_data_ready(struct sock *sk)
2130{
2131 struct socket_wq *wq;
2132
2133 rcu_read_lock();
2134 wq = rcu_dereference(sk->sk_wq);
2135 if (skwq_has_sleeper(wq))
2136 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
2137 EPOLLRDNORM | EPOLLRDBAND);
2138 rcu_read_unlock();
2139}
2140
2141static void tipc_sock_destruct(struct sock *sk)
2142{
2143 __skb_queue_purge(&sk->sk_receive_queue);
2144}
2145
2146static void tipc_sk_proto_rcv(struct sock *sk,
2147 struct sk_buff_head *inputq,
2148 struct sk_buff_head *xmitq)
2149{
2150 struct sk_buff *skb = __skb_dequeue(inputq);
2151 struct tipc_sock *tsk = tipc_sk(sk);
2152 struct tipc_msg *hdr = buf_msg(skb);
2153 struct tipc_group *grp = tsk->group;
2154 bool wakeup = false;
2155
2156 switch (msg_user(hdr)) {
2157 case CONN_MANAGER:
2158 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2159 return;
2160 case SOCK_WAKEUP:
2161 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2162 /* coupled with smp_rmb() in tipc_wait_for_cond() */
2163 smp_wmb();
2164 tsk->cong_link_cnt--;
2165 wakeup = true;
2166 tipc_sk_push_backlog(tsk, false);
2167 break;
2168 case GROUP_PROTOCOL:
2169 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
2170 break;
2171 case TOP_SRV:
2172 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2173 hdr, inputq, xmitq);
2174 break;
2175 default:
2176 break;
2177 }
2178
2179 if (wakeup)
2180 sk->sk_write_space(sk);
2181
2182 kfree_skb(skb);
2183}
2184
2185/**
2186 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2187 * @tsk: TIPC socket
2188 * @skb: pointer to message buffer.
2189 * @xmitq: for Nagle ACK if any
2190 * Return: true if message should be added to receive queue, false otherwise
2191 */
2192static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2193 struct sk_buff_head *xmitq)
2194{
2195 struct sock *sk = &tsk->sk;
2196 struct net *net = sock_net(sk);
2197 struct tipc_msg *hdr = buf_msg(skb);
2198 bool con_msg = msg_connected(hdr);
2199 u32 pport = tsk_peer_port(tsk);
2200 u32 pnode = tsk_peer_node(tsk);
2201 u32 oport = msg_origport(hdr);
2202 u32 onode = msg_orignode(hdr);
2203 int err = msg_errcode(hdr);
2204 unsigned long delay;
2205
2206 if (unlikely(msg_mcast(hdr)))
2207 return false;
2208 tsk->oneway = 0;
2209
2210 switch (sk->sk_state) {
2211 case TIPC_CONNECTING:
2212 /* Setup ACK */
2213 if (likely(con_msg)) {
2214 if (err)
2215 break;
2216 tipc_sk_finish_conn(tsk, oport, onode);
2217 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2218 /* ACK+ message with data is added to receive queue */
2219 if (msg_data_sz(hdr))
2220 return true;
2221 /* Empty ACK-, - wake up sleeping connect() and drop */
2222 sk->sk_state_change(sk);
2223 msg_set_dest_droppable(hdr, 1);
2224 return false;
2225 }
2226 /* Ignore connectionless message if not from listening socket */
2227 if (oport != pport || onode != pnode)
2228 return false;
2229
2230 /* Rejected SYN */
2231 if (err != TIPC_ERR_OVERLOAD)
2232 break;
2233
2234 /* Prepare for new setup attempt if we have a SYN clone */
2235 if (skb_queue_empty(&sk->sk_write_queue))
2236 break;
2237 get_random_bytes(&delay, 2);
2238 delay %= (tsk->conn_timeout / 4);
2239 delay = msecs_to_jiffies(delay + 100);
2240 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2241 return false;
2242 case TIPC_OPEN:
2243 case TIPC_DISCONNECTING:
2244 return false;
2245 case TIPC_LISTEN:
2246 /* Accept only SYN message */
2247 if (!msg_is_syn(hdr) &&
2248 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2249 return false;
2250 if (!con_msg && !err)
2251 return true;
2252 return false;
2253 case TIPC_ESTABLISHED:
2254 if (!skb_queue_empty(&sk->sk_write_queue))
2255 tipc_sk_push_backlog(tsk, false);
2256 /* Accept only connection-based messages sent by peer */
2257 if (likely(con_msg && !err && pport == oport &&
2258 pnode == onode)) {
2259 if (msg_ack_required(hdr)) {
2260 struct sk_buff *skb;
2261
2262 skb = tipc_sk_build_ack(tsk);
2263 if (skb) {
2264 msg_set_nagle_ack(buf_msg(skb));
2265 __skb_queue_tail(xmitq, skb);
2266 }
2267 }
2268 return true;
2269 }
2270 if (!tsk_peer_msg(tsk, hdr))
2271 return false;
2272 if (!err)
2273 return true;
2274 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2275 tipc_node_remove_conn(net, pnode, tsk->portid);
2276 sk->sk_state_change(sk);
2277 return true;
2278 default:
2279 pr_err("Unknown sk_state %u\n", sk->sk_state);
2280 }
2281 /* Abort connection setup attempt */
2282 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2283 sk->sk_err = ECONNREFUSED;
2284 sk->sk_state_change(sk);
2285 return true;
2286}
2287
2288/**
2289 * rcvbuf_limit - get proper overload limit of socket receive queue
2290 * @sk: socket
2291 * @skb: message
2292 *
2293 * For connection oriented messages, irrespective of importance,
2294 * default queue limit is 2 MB.
2295 *
2296 * For connectionless messages, queue limits are based on message
2297 * importance as follows:
2298 *
2299 * TIPC_LOW_IMPORTANCE (2 MB)
2300 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2301 * TIPC_HIGH_IMPORTANCE (8 MB)
2302 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2303 *
2304 * Return: overload limit according to corresponding message importance
2305 */
2306static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2307{
2308 struct tipc_sock *tsk = tipc_sk(sk);
2309 struct tipc_msg *hdr = buf_msg(skb);
2310
2311 if (unlikely(msg_in_group(hdr)))
2312 return READ_ONCE(sk->sk_rcvbuf);
2313
2314 if (unlikely(!msg_connected(hdr)))
2315 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2316
2317 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2318 return READ_ONCE(sk->sk_rcvbuf);
2319
2320 return FLOWCTL_MSG_LIM;
2321}
2322
2323/**
2324 * tipc_sk_filter_rcv - validate incoming message
2325 * @sk: socket
2326 * @skb: pointer to message.
2327 * @xmitq: output message area (FIXME)
2328 *
2329 * Enqueues message on receive queue if acceptable; optionally handles
2330 * disconnect indication for a connected socket.
2331 *
2332 * Called with socket lock already taken
2333 */
2334static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2335 struct sk_buff_head *xmitq)
2336{
2337 bool sk_conn = !tipc_sk_type_connectionless(sk);
2338 struct tipc_sock *tsk = tipc_sk(sk);
2339 struct tipc_group *grp = tsk->group;
2340 struct tipc_msg *hdr = buf_msg(skb);
2341 struct net *net = sock_net(sk);
2342 struct sk_buff_head inputq;
2343 int mtyp = msg_type(hdr);
2344 int limit, err = TIPC_OK;
2345
2346 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2347 TIPC_SKB_CB(skb)->bytes_read = 0;
2348 __skb_queue_head_init(&inputq);
2349 __skb_queue_tail(&inputq, skb);
2350
2351 if (unlikely(!msg_isdata(hdr)))
2352 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2353
2354 if (unlikely(grp))
2355 tipc_group_filter_msg(grp, &inputq, xmitq);
2356
2357 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2358 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2359
2360 /* Validate and add to receive buffer if there is space */
2361 while ((skb = __skb_dequeue(&inputq))) {
2362 hdr = buf_msg(skb);
2363 limit = rcvbuf_limit(sk, skb);
2364 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
2365 (!sk_conn && msg_connected(hdr)) ||
2366 (!grp && msg_in_group(hdr)))
2367 err = TIPC_ERR_NO_PORT;
2368 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2369 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2370 "err_overload2!");
2371 atomic_inc(&sk->sk_drops);
2372 err = TIPC_ERR_OVERLOAD;
2373 }
2374
2375 if (unlikely(err)) {
2376 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2377 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2378 "@filter_rcv!");
2379 __skb_queue_tail(xmitq, skb);
2380 }
2381 err = TIPC_OK;
2382 continue;
2383 }
2384 __skb_queue_tail(&sk->sk_receive_queue, skb);
2385 skb_set_owner_r(skb, sk);
2386 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2387 "rcvq >90% allocated!");
2388 sk->sk_data_ready(sk);
2389 }
2390}
2391
2392/**
2393 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2394 * @sk: socket
2395 * @skb: message
2396 *
2397 * Caller must hold socket lock
2398 */
2399static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2400{
2401 unsigned int before = sk_rmem_alloc_get(sk);
2402 struct sk_buff_head xmitq;
2403 unsigned int added;
2404
2405 __skb_queue_head_init(&xmitq);
2406
2407 tipc_sk_filter_rcv(sk, skb, &xmitq);
2408 added = sk_rmem_alloc_get(sk) - before;
2409 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2410
2411 /* Send pending response/rejected messages, if any */
2412 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2413 return 0;
2414}
2415
2416/**
2417 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2418 * inputq and try adding them to socket or backlog queue
2419 * @inputq: list of incoming buffers with potentially different destinations
2420 * @sk: socket where the buffers should be enqueued
2421 * @dport: port number for the socket
2422 * @xmitq: output queue
2423 *
2424 * Caller must hold socket lock
2425 */
2426static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2427 u32 dport, struct sk_buff_head *xmitq)
2428{
2429 unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
2430 struct sk_buff *skb;
2431 unsigned int lim;
2432 atomic_t *dcnt;
2433 u32 onode;
2434
2435 while (skb_queue_len(inputq)) {
2436 if (unlikely(time_after_eq(jiffies, time_limit)))
2437 return;
2438
2439 skb = tipc_skb_dequeue(inputq, dport);
2440 if (unlikely(!skb))
2441 return;
2442
2443 /* Add message directly to receive queue if possible */
2444 if (!sock_owned_by_user(sk)) {
2445 tipc_sk_filter_rcv(sk, skb, xmitq);
2446 continue;
2447 }
2448
2449 /* Try backlog, compensating for double-counted bytes */
2450 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2451 if (!sk->sk_backlog.len)
2452 atomic_set(dcnt, 0);
2453 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2454 if (likely(!sk_add_backlog(sk, skb, lim))) {
2455 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2456 "bklg & rcvq >90% allocated!");
2457 continue;
2458 }
2459
2460 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2461 /* Overload => reject message back to sender */
2462 onode = tipc_own_addr(sock_net(sk));
2463 atomic_inc(&sk->sk_drops);
2464 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2465 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2466 "@sk_enqueue!");
2467 __skb_queue_tail(xmitq, skb);
2468 }
2469 break;
2470 }
2471}
2472
2473/**
2474 * tipc_sk_rcv - handle a chain of incoming buffers
2475 * @net: the associated network namespace
2476 * @inputq: buffer list containing the buffers
2477 * Consumes all buffers in list until inputq is empty
2478 * Note: may be called in multiple threads referring to the same queue
2479 */
2480void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2481{
2482 struct sk_buff_head xmitq;
2483 u32 dnode, dport = 0;
2484 int err;
2485 struct tipc_sock *tsk;
2486 struct sock *sk;
2487 struct sk_buff *skb;
2488
2489 __skb_queue_head_init(&xmitq);
2490 while (skb_queue_len(inputq)) {
2491 dport = tipc_skb_peek_port(inputq, dport);
2492 tsk = tipc_sk_lookup(net, dport);
2493
2494 if (likely(tsk)) {
2495 sk = &tsk->sk;
2496 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2497 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2498 spin_unlock_bh(&sk->sk_lock.slock);
2499 }
2500 /* Send pending response/rejected messages, if any */
2501 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2502 sock_put(sk);
2503 continue;
2504 }
2505 /* No destination socket => dequeue skb if still there */
2506 skb = tipc_skb_dequeue(inputq, dport);
2507 if (!skb)
2508 return;
2509
2510 /* Try secondary lookup if unresolved named message */
2511 err = TIPC_ERR_NO_PORT;
2512 if (tipc_msg_lookup_dest(net, skb, &err))
2513 goto xmit;
2514
2515 /* Prepare for message rejection */
2516 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2517 continue;
2518
2519 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2520xmit:
2521 dnode = msg_destnode(buf_msg(skb));
2522 tipc_node_xmit_skb(net, skb, dnode, dport);
2523 }
2524}
2525
2526static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2527{
2528 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2529 struct sock *sk = sock->sk;
2530 int done;
2531
2532 do {
2533 int err = sock_error(sk);
2534 if (err)
2535 return err;
2536 if (!*timeo_p)
2537 return -ETIMEDOUT;
2538 if (signal_pending(current))
2539 return sock_intr_errno(*timeo_p);
2540 if (sk->sk_state == TIPC_DISCONNECTING)
2541 break;
2542
2543 add_wait_queue(sk_sleep(sk), &wait);
2544 done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
2545 &wait);
2546 remove_wait_queue(sk_sleep(sk), &wait);
2547 } while (!done);
2548 return 0;
2549}
2550
2551static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2552{
2553 if (addr->family != AF_TIPC)
2554 return false;
2555 if (addr->addrtype == TIPC_SERVICE_RANGE)
2556 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2557 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2558 addr->addrtype == TIPC_SOCKET_ADDR);
2559}
2560
2561/**
2562 * tipc_connect - establish a connection to another TIPC port
2563 * @sock: socket structure
2564 * @dest: socket address for destination port
2565 * @destlen: size of socket address data structure
2566 * @flags: file-related flags associated with socket
2567 *
2568 * Return: 0 on success, errno otherwise
2569 */
2570static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2571 int destlen, int flags)
2572{
2573 struct sock *sk = sock->sk;
2574 struct tipc_sock *tsk = tipc_sk(sk);
2575 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2576 struct msghdr m = {NULL,};
2577 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2578 int previous;
2579 int res = 0;
2580
2581 if (destlen != sizeof(struct sockaddr_tipc))
2582 return -EINVAL;
2583
2584 lock_sock(sk);
2585
2586 if (tsk->group) {
2587 res = -EINVAL;
2588 goto exit;
2589 }
2590
2591 if (dst->family == AF_UNSPEC) {
2592 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2593 if (!tipc_sk_type_connectionless(sk))
2594 res = -EINVAL;
2595 goto exit;
2596 }
2597 if (!tipc_sockaddr_is_sane(dst)) {
2598 res = -EINVAL;
2599 goto exit;
2600 }
2601 /* DGRAM/RDM connect(), just save the destaddr */
2602 if (tipc_sk_type_connectionless(sk)) {
2603 memcpy(&tsk->peer, dest, destlen);
2604 goto exit;
2605 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2606 res = -EINVAL;
2607 goto exit;
2608 }
2609
2610 previous = sk->sk_state;
2611
2612 switch (sk->sk_state) {
2613 case TIPC_OPEN:
2614 /* Send a 'SYN-' to destination */
2615 m.msg_name = dest;
2616 m.msg_namelen = destlen;
2617 iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
2618
2619 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2620 * indicate send_msg() is never blocked.
2621 */
2622 if (!timeout)
2623 m.msg_flags = MSG_DONTWAIT;
2624
2625 res = __tipc_sendmsg(sock, &m, 0);
2626 if ((res < 0) && (res != -EWOULDBLOCK))
2627 goto exit;
2628
2629 /* Just entered TIPC_CONNECTING state; the only
2630 * difference is that return value in non-blocking
2631 * case is EINPROGRESS, rather than EALREADY.
2632 */
2633 res = -EINPROGRESS;
2634 fallthrough;
2635 case TIPC_CONNECTING:
2636 if (!timeout) {
2637 if (previous == TIPC_CONNECTING)
2638 res = -EALREADY;
2639 goto exit;
2640 }
2641 timeout = msecs_to_jiffies(timeout);
2642 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2643 res = tipc_wait_for_connect(sock, &timeout);
2644 break;
2645 case TIPC_ESTABLISHED:
2646 res = -EISCONN;
2647 break;
2648 default:
2649 res = -EINVAL;
2650 }
2651
2652exit:
2653 release_sock(sk);
2654 return res;
2655}
2656
2657/**
2658 * tipc_listen - allow socket to listen for incoming connections
2659 * @sock: socket structure
2660 * @len: (unused)
2661 *
2662 * Return: 0 on success, errno otherwise
2663 */
2664static int tipc_listen(struct socket *sock, int len)
2665{
2666 struct sock *sk = sock->sk;
2667 int res;
2668
2669 lock_sock(sk);
2670 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2671 release_sock(sk);
2672
2673 return res;
2674}
2675
2676static int tipc_wait_for_accept(struct socket *sock, long timeo)
2677{
2678 struct sock *sk = sock->sk;
2679 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2680 int err;
2681
2682 /* True wake-one mechanism for incoming connections: only
2683 * one process gets woken up, not the 'whole herd'.
2684 * Since we do not 'race & poll' for established sockets
2685 * anymore, the common case will execute the loop only once.
2686 */
2687 for (;;) {
2688 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2689 add_wait_queue(sk_sleep(sk), &wait);
2690 release_sock(sk);
2691 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
2692 lock_sock(sk);
2693 remove_wait_queue(sk_sleep(sk), &wait);
2694 }
2695 err = 0;
2696 if (!skb_queue_empty(&sk->sk_receive_queue))
2697 break;
2698 err = -EAGAIN;
2699 if (!timeo)
2700 break;
2701 err = sock_intr_errno(timeo);
2702 if (signal_pending(current))
2703 break;
2704 }
2705 return err;
2706}
2707
2708/**
2709 * tipc_accept - wait for connection request
2710 * @sock: listening socket
2711 * @new_sock: new socket that is to be connected
2712 * @flags: file-related flags associated with socket
2713 * @kern: caused by kernel or by userspace?
2714 *
2715 * Return: 0 on success, errno otherwise
2716 */
2717static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2718 bool kern)
2719{
2720 struct sock *new_sk, *sk = sock->sk;
2721 struct tipc_sock *new_tsock;
2722 struct msghdr m = {NULL,};
2723 struct tipc_msg *msg;
2724 struct sk_buff *buf;
2725 long timeo;
2726 int res;
2727
2728 lock_sock(sk);
2729
2730 if (sk->sk_state != TIPC_LISTEN) {
2731 res = -EINVAL;
2732 goto exit;
2733 }
2734 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2735 res = tipc_wait_for_accept(sock, timeo);
2736 if (res)
2737 goto exit;
2738
2739 buf = skb_peek(&sk->sk_receive_queue);
2740
2741 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2742 if (res)
2743 goto exit;
2744 security_sk_clone(sock->sk, new_sock->sk);
2745
2746 new_sk = new_sock->sk;
2747 new_tsock = tipc_sk(new_sk);
2748 msg = buf_msg(buf);
2749
2750 /* we lock on new_sk; but lockdep sees the lock on sk */
2751 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2752
2753 /*
2754 * Reject any stray messages received by new socket
2755 * before the socket lock was taken (very, very unlikely)
2756 */
2757 tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
2758
2759 /* Connect new socket to it's peer */
2760 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2761
2762 tsk_set_importance(new_sk, msg_importance(msg));
2763 if (msg_named(msg)) {
2764 new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
2765 msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
2766 msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
2767 }
2768
2769 /*
2770 * Respond to 'SYN-' by discarding it & returning 'ACK'.
2771 * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
2772 */
2773 if (!msg_data_sz(msg)) {
2774 tsk_advance_rx_queue(sk);
2775 } else {
2776 __skb_dequeue(&sk->sk_receive_queue);
2777 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2778 skb_set_owner_r(buf, new_sk);
2779 }
2780 iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
2781 __tipc_sendstream(new_sock, &m, 0);
2782 release_sock(new_sk);
2783exit:
2784 release_sock(sk);
2785 return res;
2786}
2787
2788/**
2789 * tipc_shutdown - shutdown socket connection
2790 * @sock: socket structure
2791 * @how: direction to close (must be SHUT_RDWR)
2792 *
2793 * Terminates connection (if necessary), then purges socket's receive queue.
2794 *
2795 * Return: 0 on success, errno otherwise
2796 */
2797static int tipc_shutdown(struct socket *sock, int how)
2798{
2799 struct sock *sk = sock->sk;
2800 int res;
2801
2802 if (how != SHUT_RDWR)
2803 return -EINVAL;
2804
2805 lock_sock(sk);
2806
2807 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2808 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2809 sk->sk_shutdown = SHUTDOWN_MASK;
2810
2811 if (sk->sk_state == TIPC_DISCONNECTING) {
2812 /* Discard any unreceived messages */
2813 __skb_queue_purge(&sk->sk_receive_queue);
2814
2815 res = 0;
2816 } else {
2817 res = -ENOTCONN;
2818 }
2819 /* Wake up anyone sleeping in poll. */
2820 sk->sk_state_change(sk);
2821
2822 release_sock(sk);
2823 return res;
2824}
2825
2826static void tipc_sk_check_probing_state(struct sock *sk,
2827 struct sk_buff_head *list)
2828{
2829 struct tipc_sock *tsk = tipc_sk(sk);
2830 u32 pnode = tsk_peer_node(tsk);
2831 u32 pport = tsk_peer_port(tsk);
2832 u32 self = tsk_own_node(tsk);
2833 u32 oport = tsk->portid;
2834 struct sk_buff *skb;
2835
2836 if (tsk->probe_unacked) {
2837 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2838 sk->sk_err = ECONNABORTED;
2839 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2840 sk->sk_state_change(sk);
2841 return;
2842 }
2843 /* Prepare new probe */
2844 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2845 pnode, self, pport, oport, TIPC_OK);
2846 if (skb)
2847 __skb_queue_tail(list, skb);
2848 tsk->probe_unacked = true;
2849 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2850}
2851
2852static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2853{
2854 struct tipc_sock *tsk = tipc_sk(sk);
2855
2856 /* Try again later if dest link is congested */
2857 if (tsk->cong_link_cnt) {
2858 sk_reset_timer(sk, &sk->sk_timer,
2859 jiffies + msecs_to_jiffies(100));
2860 return;
2861 }
2862 /* Prepare SYN for retransmit */
2863 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2864}
2865
2866static void tipc_sk_timeout(struct timer_list *t)
2867{
2868 struct sock *sk = from_timer(sk, t, sk_timer);
2869 struct tipc_sock *tsk = tipc_sk(sk);
2870 u32 pnode = tsk_peer_node(tsk);
2871 struct sk_buff_head list;
2872 int rc = 0;
2873
2874 __skb_queue_head_init(&list);
2875 bh_lock_sock(sk);
2876
2877 /* Try again later if socket is busy */
2878 if (sock_owned_by_user(sk)) {
2879 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2880 bh_unlock_sock(sk);
2881 sock_put(sk);
2882 return;
2883 }
2884
2885 if (sk->sk_state == TIPC_ESTABLISHED)
2886 tipc_sk_check_probing_state(sk, &list);
2887 else if (sk->sk_state == TIPC_CONNECTING)
2888 tipc_sk_retry_connect(sk, &list);
2889
2890 bh_unlock_sock(sk);
2891
2892 if (!skb_queue_empty(&list))
2893 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2894
2895 /* SYN messages may cause link congestion */
2896 if (rc == -ELINKCONG) {
2897 tipc_dest_push(&tsk->cong_links, pnode, 0);
2898 tsk->cong_link_cnt = 1;
2899 }
2900 sock_put(sk);
2901}
2902
2903static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2904{
2905 struct sock *sk = &tsk->sk;
2906 struct net *net = sock_net(sk);
2907 struct tipc_socket_addr skaddr;
2908 struct publication *p;
2909 u32 key;
2910
2911 if (tipc_sk_connected(sk))
2912 return -EINVAL;
2913 key = tsk->portid + tsk->pub_count + 1;
2914 if (key == tsk->portid)
2915 return -EADDRINUSE;
2916 skaddr.ref = tsk->portid;
2917 skaddr.node = tipc_own_addr(net);
2918 p = tipc_nametbl_publish(net, ua, &skaddr, key);
2919 if (unlikely(!p))
2920 return -EINVAL;
2921
2922 list_add(&p->binding_sock, &tsk->publications);
2923 tsk->pub_count++;
2924 tsk->published = true;
2925 return 0;
2926}
2927
2928static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2929{
2930 struct net *net = sock_net(&tsk->sk);
2931 struct publication *safe, *p;
2932 struct tipc_uaddr _ua;
2933 int rc = -EINVAL;
2934
2935 list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
2936 if (!ua) {
2937 tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
2938 p->sr.type, p->sr.lower, p->sr.upper);
2939 tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
2940 continue;
2941 }
2942 /* Unbind specific publication */
2943 if (p->scope != ua->scope)
2944 continue;
2945 if (p->sr.type != ua->sr.type)
2946 continue;
2947 if (p->sr.lower != ua->sr.lower)
2948 continue;
2949 if (p->sr.upper != ua->sr.upper)
2950 break;
2951 tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
2952 rc = 0;
2953 break;
2954 }
2955 if (list_empty(&tsk->publications)) {
2956 tsk->published = 0;
2957 rc = 0;
2958 }
2959 return rc;
2960}
2961
2962/* tipc_sk_reinit: set non-zero address in all existing sockets
2963 * when we go from standalone to network mode.
2964 */
2965void tipc_sk_reinit(struct net *net)
2966{
2967 struct tipc_net *tn = net_generic(net, tipc_net_id);
2968 struct rhashtable_iter iter;
2969 struct tipc_sock *tsk;
2970 struct tipc_msg *msg;
2971
2972 rhashtable_walk_enter(&tn->sk_rht, &iter);
2973
2974 do {
2975 rhashtable_walk_start(&iter);
2976
2977 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2978 sock_hold(&tsk->sk);
2979 rhashtable_walk_stop(&iter);
2980 lock_sock(&tsk->sk);
2981 msg = &tsk->phdr;
2982 msg_set_prevnode(msg, tipc_own_addr(net));
2983 msg_set_orignode(msg, tipc_own_addr(net));
2984 release_sock(&tsk->sk);
2985 rhashtable_walk_start(&iter);
2986 sock_put(&tsk->sk);
2987 }
2988
2989 rhashtable_walk_stop(&iter);
2990 } while (tsk == ERR_PTR(-EAGAIN));
2991
2992 rhashtable_walk_exit(&iter);
2993}
2994
2995static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2996{
2997 struct tipc_net *tn = net_generic(net, tipc_net_id);
2998 struct tipc_sock *tsk;
2999
3000 rcu_read_lock();
3001 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
3002 if (tsk)
3003 sock_hold(&tsk->sk);
3004 rcu_read_unlock();
3005
3006 return tsk;
3007}
3008
3009static int tipc_sk_insert(struct tipc_sock *tsk)
3010{
3011 struct sock *sk = &tsk->sk;
3012 struct net *net = sock_net(sk);
3013 struct tipc_net *tn = net_generic(net, tipc_net_id);
3014 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
3015 u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
3016
3017 while (remaining--) {
3018 portid++;
3019 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
3020 portid = TIPC_MIN_PORT;
3021 tsk->portid = portid;
3022 sock_hold(&tsk->sk);
3023 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3024 tsk_rht_params))
3025 return 0;
3026 sock_put(&tsk->sk);
3027 }
3028
3029 return -1;
3030}
3031
3032static void tipc_sk_remove(struct tipc_sock *tsk)
3033{
3034 struct sock *sk = &tsk->sk;
3035 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
3036
3037 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3038 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
3039 __sock_put(sk);
3040 }
3041}
3042
3043static const struct rhashtable_params tsk_rht_params = {
3044 .nelem_hint = 192,
3045 .head_offset = offsetof(struct tipc_sock, node),
3046 .key_offset = offsetof(struct tipc_sock, portid),
3047 .key_len = sizeof(u32), /* portid */
3048 .max_size = 1048576,
3049 .min_size = 256,
3050 .automatic_shrinking = true,
3051};
3052
3053int tipc_sk_rht_init(struct net *net)
3054{
3055 struct tipc_net *tn = net_generic(net, tipc_net_id);
3056
3057 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
3058}
3059
3060void tipc_sk_rht_destroy(struct net *net)
3061{
3062 struct tipc_net *tn = net_generic(net, tipc_net_id);
3063
3064 /* Wait for socket readers to complete */
3065 synchronize_net();
3066
3067 rhashtable_destroy(&tn->sk_rht);
3068}
3069
3070static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3071{
3072 struct net *net = sock_net(&tsk->sk);
3073 struct tipc_group *grp = tsk->group;
3074 struct tipc_msg *hdr = &tsk->phdr;
3075 struct tipc_uaddr ua;
3076 int rc;
3077
3078 if (mreq->type < TIPC_RESERVED_TYPES)
3079 return -EACCES;
3080 if (mreq->scope > TIPC_NODE_SCOPE)
3081 return -EINVAL;
3082 if (mreq->scope != TIPC_NODE_SCOPE)
3083 mreq->scope = TIPC_CLUSTER_SCOPE;
3084 if (grp)
3085 return -EACCES;
3086 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3087 if (!grp)
3088 return -ENOMEM;
3089 tsk->group = grp;
3090 msg_set_lookup_scope(hdr, mreq->scope);
3091 msg_set_nametype(hdr, mreq->type);
3092 msg_set_dest_droppable(hdr, true);
3093 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
3094 mreq->type, mreq->instance, mreq->instance);
3095 tipc_nametbl_build_group(net, grp, &ua);
3096 rc = tipc_sk_publish(tsk, &ua);
3097 if (rc) {
3098 tipc_group_delete(net, grp);
3099 tsk->group = NULL;
3100 return rc;
3101 }
3102 /* Eliminate any risk that a broadcast overtakes sent JOINs */
3103 tsk->mc_method.rcast = true;
3104 tsk->mc_method.mandatory = true;
3105 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3106 return rc;
3107}
3108
3109static int tipc_sk_leave(struct tipc_sock *tsk)
3110{
3111 struct net *net = sock_net(&tsk->sk);
3112 struct tipc_group *grp = tsk->group;
3113 struct tipc_uaddr ua;
3114 int scope;
3115
3116 if (!grp)
3117 return -EINVAL;
3118 ua.addrtype = TIPC_SERVICE_RANGE;
3119 tipc_group_self(grp, &ua.sr, &scope);
3120 ua.scope = scope;
3121 tipc_group_delete(net, grp);
3122 tsk->group = NULL;
3123 tipc_sk_withdraw(tsk, &ua);
3124 return 0;
3125}
3126
3127/**
3128 * tipc_setsockopt - set socket option
3129 * @sock: socket structure
3130 * @lvl: option level
3131 * @opt: option identifier
3132 * @ov: pointer to new option value
3133 * @ol: length of option value
3134 *
3135 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
3136 * (to ease compatibility).
3137 *
3138 * Return: 0 on success, errno otherwise
3139 */
3140static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
3141 sockptr_t ov, unsigned int ol)
3142{
3143 struct sock *sk = sock->sk;
3144 struct tipc_sock *tsk = tipc_sk(sk);
3145 struct tipc_group_req mreq;
3146 u32 value = 0;
3147 int res = 0;
3148
3149 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3150 return 0;
3151 if (lvl != SOL_TIPC)
3152 return -ENOPROTOOPT;
3153
3154 switch (opt) {
3155 case TIPC_IMPORTANCE:
3156 case TIPC_SRC_DROPPABLE:
3157 case TIPC_DEST_DROPPABLE:
3158 case TIPC_CONN_TIMEOUT:
3159 case TIPC_NODELAY:
3160 if (ol < sizeof(value))
3161 return -EINVAL;
3162 if (copy_from_sockptr(&value, ov, sizeof(u32)))
3163 return -EFAULT;
3164 break;
3165 case TIPC_GROUP_JOIN:
3166 if (ol < sizeof(mreq))
3167 return -EINVAL;
3168 if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
3169 return -EFAULT;
3170 break;
3171 default:
3172 if (!sockptr_is_null(ov) || ol)
3173 return -EINVAL;
3174 }
3175
3176 lock_sock(sk);
3177
3178 switch (opt) {
3179 case TIPC_IMPORTANCE:
3180 res = tsk_set_importance(sk, value);
3181 break;
3182 case TIPC_SRC_DROPPABLE:
3183 if (sock->type != SOCK_STREAM)
3184 tsk_set_unreliable(tsk, value);
3185 else
3186 res = -ENOPROTOOPT;
3187 break;
3188 case TIPC_DEST_DROPPABLE:
3189 tsk_set_unreturnable(tsk, value);
3190 break;
3191 case TIPC_CONN_TIMEOUT:
3192 tipc_sk(sk)->conn_timeout = value;
3193 break;
3194 case TIPC_MCAST_BROADCAST:
3195 tsk->mc_method.rcast = false;
3196 tsk->mc_method.mandatory = true;
3197 break;
3198 case TIPC_MCAST_REPLICAST:
3199 tsk->mc_method.rcast = true;
3200 tsk->mc_method.mandatory = true;
3201 break;
3202 case TIPC_GROUP_JOIN:
3203 res = tipc_sk_join(tsk, &mreq);
3204 break;
3205 case TIPC_GROUP_LEAVE:
3206 res = tipc_sk_leave(tsk);
3207 break;
3208 case TIPC_NODELAY:
3209 tsk->nodelay = !!value;
3210 tsk_set_nagle(tsk);
3211 break;
3212 default:
3213 res = -EINVAL;
3214 }
3215
3216 release_sock(sk);
3217
3218 return res;
3219}
3220
3221/**
3222 * tipc_getsockopt - get socket option
3223 * @sock: socket structure
3224 * @lvl: option level
3225 * @opt: option identifier
3226 * @ov: receptacle for option value
3227 * @ol: receptacle for length of option value
3228 *
3229 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3230 * (to ease compatibility).
3231 *
3232 * Return: 0 on success, errno otherwise
3233 */
3234static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3235 char __user *ov, int __user *ol)
3236{
3237 struct sock *sk = sock->sk;
3238 struct tipc_sock *tsk = tipc_sk(sk);
3239 struct tipc_service_range seq;
3240 int len, scope;
3241 u32 value;
3242 int res;
3243
3244 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3245 return put_user(0, ol);
3246 if (lvl != SOL_TIPC)
3247 return -ENOPROTOOPT;
3248 res = get_user(len, ol);
3249 if (res)
3250 return res;
3251
3252 lock_sock(sk);
3253
3254 switch (opt) {
3255 case TIPC_IMPORTANCE:
3256 value = tsk_importance(tsk);
3257 break;
3258 case TIPC_SRC_DROPPABLE:
3259 value = tsk_unreliable(tsk);
3260 break;
3261 case TIPC_DEST_DROPPABLE:
3262 value = tsk_unreturnable(tsk);
3263 break;
3264 case TIPC_CONN_TIMEOUT:
3265 value = tsk->conn_timeout;
3266 /* no need to set "res", since already 0 at this point */
3267 break;
3268 case TIPC_NODE_RECVQ_DEPTH:
3269 value = 0; /* was tipc_queue_size, now obsolete */
3270 break;
3271 case TIPC_SOCK_RECVQ_DEPTH:
3272 value = skb_queue_len(&sk->sk_receive_queue);
3273 break;
3274 case TIPC_SOCK_RECVQ_USED:
3275 value = sk_rmem_alloc_get(sk);
3276 break;
3277 case TIPC_GROUP_JOIN:
3278 seq.type = 0;
3279 if (tsk->group)
3280 tipc_group_self(tsk->group, &seq, &scope);
3281 value = seq.type;
3282 break;
3283 default:
3284 res = -EINVAL;
3285 }
3286
3287 release_sock(sk);
3288
3289 if (res)
3290 return res; /* "get" failed */
3291
3292 if (len < sizeof(value))
3293 return -EINVAL;
3294
3295 if (copy_to_user(ov, &value, sizeof(value)))
3296 return -EFAULT;
3297
3298 return put_user(sizeof(value), ol);
3299}
3300
3301static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3302{
3303 struct net *net = sock_net(sock->sk);
3304 struct tipc_sioc_nodeid_req nr = {0};
3305 struct tipc_sioc_ln_req lnr;
3306 void __user *argp = (void __user *)arg;
3307
3308 switch (cmd) {
3309 case SIOCGETLINKNAME:
3310 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3311 return -EFAULT;
3312 if (!tipc_node_get_linkname(net,
3313 lnr.bearer_id & 0xffff, lnr.peer,
3314 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3315 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3316 return -EFAULT;
3317 return 0;
3318 }
3319 return -EADDRNOTAVAIL;
3320 case SIOCGETNODEID:
3321 if (copy_from_user(&nr, argp, sizeof(nr)))
3322 return -EFAULT;
3323 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3324 return -EADDRNOTAVAIL;
3325 if (copy_to_user(argp, &nr, sizeof(nr)))
3326 return -EFAULT;
3327 return 0;
3328 default:
3329 return -ENOIOCTLCMD;
3330 }
3331}
3332
3333static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3334{
3335 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3336 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3337 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3338
3339 tsk1->peer.family = AF_TIPC;
3340 tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
3341 tsk1->peer.scope = TIPC_NODE_SCOPE;
3342 tsk1->peer.addr.id.ref = tsk2->portid;
3343 tsk1->peer.addr.id.node = onode;
3344 tsk2->peer.family = AF_TIPC;
3345 tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
3346 tsk2->peer.scope = TIPC_NODE_SCOPE;
3347 tsk2->peer.addr.id.ref = tsk1->portid;
3348 tsk2->peer.addr.id.node = onode;
3349
3350 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3351 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3352 return 0;
3353}
3354
3355/* Protocol switches for the various types of TIPC sockets */
3356
3357static const struct proto_ops msg_ops = {
3358 .owner = THIS_MODULE,
3359 .family = AF_TIPC,
3360 .release = tipc_release,
3361 .bind = tipc_bind,
3362 .connect = tipc_connect,
3363 .socketpair = tipc_socketpair,
3364 .accept = sock_no_accept,
3365 .getname = tipc_getname,
3366 .poll = tipc_poll,
3367 .ioctl = tipc_ioctl,
3368 .listen = sock_no_listen,
3369 .shutdown = tipc_shutdown,
3370 .setsockopt = tipc_setsockopt,
3371 .getsockopt = tipc_getsockopt,
3372 .sendmsg = tipc_sendmsg,
3373 .recvmsg = tipc_recvmsg,
3374 .mmap = sock_no_mmap,
3375 .sendpage = sock_no_sendpage
3376};
3377
3378static const struct proto_ops packet_ops = {
3379 .owner = THIS_MODULE,
3380 .family = AF_TIPC,
3381 .release = tipc_release,
3382 .bind = tipc_bind,
3383 .connect = tipc_connect,
3384 .socketpair = tipc_socketpair,
3385 .accept = tipc_accept,
3386 .getname = tipc_getname,
3387 .poll = tipc_poll,
3388 .ioctl = tipc_ioctl,
3389 .listen = tipc_listen,
3390 .shutdown = tipc_shutdown,
3391 .setsockopt = tipc_setsockopt,
3392 .getsockopt = tipc_getsockopt,
3393 .sendmsg = tipc_send_packet,
3394 .recvmsg = tipc_recvmsg,
3395 .mmap = sock_no_mmap,
3396 .sendpage = sock_no_sendpage
3397};
3398
3399static const struct proto_ops stream_ops = {
3400 .owner = THIS_MODULE,
3401 .family = AF_TIPC,
3402 .release = tipc_release,
3403 .bind = tipc_bind,
3404 .connect = tipc_connect,
3405 .socketpair = tipc_socketpair,
3406 .accept = tipc_accept,
3407 .getname = tipc_getname,
3408 .poll = tipc_poll,
3409 .ioctl = tipc_ioctl,
3410 .listen = tipc_listen,
3411 .shutdown = tipc_shutdown,
3412 .setsockopt = tipc_setsockopt,
3413 .getsockopt = tipc_getsockopt,
3414 .sendmsg = tipc_sendstream,
3415 .recvmsg = tipc_recvstream,
3416 .mmap = sock_no_mmap,
3417 .sendpage = sock_no_sendpage
3418};
3419
3420static const struct net_proto_family tipc_family_ops = {
3421 .owner = THIS_MODULE,
3422 .family = AF_TIPC,
3423 .create = tipc_sk_create
3424};
3425
3426static struct proto tipc_proto = {
3427 .name = "TIPC",
3428 .owner = THIS_MODULE,
3429 .obj_size = sizeof(struct tipc_sock),
3430 .sysctl_rmem = sysctl_tipc_rmem
3431};
3432
3433/**
3434 * tipc_socket_init - initialize TIPC socket interface
3435 *
3436 * Return: 0 on success, errno otherwise
3437 */
3438int tipc_socket_init(void)
3439{
3440 int res;
3441
3442 res = proto_register(&tipc_proto, 1);
3443 if (res) {
3444 pr_err("Failed to register TIPC protocol type\n");
3445 goto out;
3446 }
3447
3448 res = sock_register(&tipc_family_ops);
3449 if (res) {
3450 pr_err("Failed to register TIPC socket type\n");
3451 proto_unregister(&tipc_proto);
3452 goto out;
3453 }
3454 out:
3455 return res;
3456}
3457
3458/**
3459 * tipc_socket_stop - stop TIPC socket interface
3460 */
3461void tipc_socket_stop(void)
3462{
3463 sock_unregister(tipc_family_ops.family);
3464 proto_unregister(&tipc_proto);
3465}
3466
3467/* Caller should hold socket lock for the passed tipc socket. */
3468static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3469{
3470 u32 peer_node, peer_port;
3471 u32 conn_type, conn_instance;
3472 struct nlattr *nest;
3473
3474 peer_node = tsk_peer_node(tsk);
3475 peer_port = tsk_peer_port(tsk);
3476 conn_type = msg_nametype(&tsk->phdr);
3477 conn_instance = msg_nameinst(&tsk->phdr);
3478 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3479 if (!nest)
3480 return -EMSGSIZE;
3481
3482 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3483 goto msg_full;
3484 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3485 goto msg_full;
3486
3487 if (tsk->conn_addrtype != 0) {
3488 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3489 goto msg_full;
3490 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
3491 goto msg_full;
3492 if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
3493 goto msg_full;
3494 }
3495 nla_nest_end(skb, nest);
3496
3497 return 0;
3498
3499msg_full:
3500 nla_nest_cancel(skb, nest);
3501
3502 return -EMSGSIZE;
3503}
3504
3505static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3506 *tsk)
3507{
3508 struct net *net = sock_net(skb->sk);
3509 struct sock *sk = &tsk->sk;
3510
3511 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3512 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3513 return -EMSGSIZE;
3514
3515 if (tipc_sk_connected(sk)) {
3516 if (__tipc_nl_add_sk_con(skb, tsk))
3517 return -EMSGSIZE;
3518 } else if (!list_empty(&tsk->publications)) {
3519 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3520 return -EMSGSIZE;
3521 }
3522 return 0;
3523}
3524
3525/* Caller should hold socket lock for the passed tipc socket. */
3526static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3527 struct tipc_sock *tsk)
3528{
3529 struct nlattr *attrs;
3530 void *hdr;
3531
3532 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3533 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3534 if (!hdr)
3535 goto msg_cancel;
3536
3537 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3538 if (!attrs)
3539 goto genlmsg_cancel;
3540
3541 if (__tipc_nl_add_sk_info(skb, tsk))
3542 goto attr_msg_cancel;
3543
3544 nla_nest_end(skb, attrs);
3545 genlmsg_end(skb, hdr);
3546
3547 return 0;
3548
3549attr_msg_cancel:
3550 nla_nest_cancel(skb, attrs);
3551genlmsg_cancel:
3552 genlmsg_cancel(skb, hdr);
3553msg_cancel:
3554 return -EMSGSIZE;
3555}
3556
3557int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3558 int (*skb_handler)(struct sk_buff *skb,
3559 struct netlink_callback *cb,
3560 struct tipc_sock *tsk))
3561{
3562 struct rhashtable_iter *iter = (void *)cb->args[4];
3563 struct tipc_sock *tsk;
3564 int err;
3565
3566 rhashtable_walk_start(iter);
3567 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3568 if (IS_ERR(tsk)) {
3569 err = PTR_ERR(tsk);
3570 if (err == -EAGAIN) {
3571 err = 0;
3572 continue;
3573 }
3574 break;
3575 }
3576
3577 sock_hold(&tsk->sk);
3578 rhashtable_walk_stop(iter);
3579 lock_sock(&tsk->sk);
3580 err = skb_handler(skb, cb, tsk);
3581 if (err) {
3582 release_sock(&tsk->sk);
3583 sock_put(&tsk->sk);
3584 goto out;
3585 }
3586 release_sock(&tsk->sk);
3587 rhashtable_walk_start(iter);
3588 sock_put(&tsk->sk);
3589 }
3590 rhashtable_walk_stop(iter);
3591out:
3592 return skb->len;
3593}
3594EXPORT_SYMBOL(tipc_nl_sk_walk);
3595
3596int tipc_dump_start(struct netlink_callback *cb)
3597{
3598 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3599}
3600EXPORT_SYMBOL(tipc_dump_start);
3601
3602int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3603{
3604 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3605 struct rhashtable_iter *iter = (void *)cb->args[4];
3606 struct tipc_net *tn = tipc_net(net);
3607
3608 if (!iter) {
3609 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3610 if (!iter)
3611 return -ENOMEM;
3612
3613 cb->args[4] = (long)iter;
3614 }
3615
3616 rhashtable_walk_enter(&tn->sk_rht, iter);
3617 return 0;
3618}
3619
3620int tipc_dump_done(struct netlink_callback *cb)
3621{
3622 struct rhashtable_iter *hti = (void *)cb->args[4];
3623
3624 rhashtable_walk_exit(hti);
3625 kfree(hti);
3626 return 0;
3627}
3628EXPORT_SYMBOL(tipc_dump_done);
3629
3630int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3631 struct tipc_sock *tsk, u32 sk_filter_state,
3632 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3633{
3634 struct sock *sk = &tsk->sk;
3635 struct nlattr *attrs;
3636 struct nlattr *stat;
3637
3638 /*filter response w.r.t sk_state*/
3639 if (!(sk_filter_state & (1 << sk->sk_state)))
3640 return 0;
3641
3642 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3643 if (!attrs)
3644 goto msg_cancel;
3645
3646 if (__tipc_nl_add_sk_info(skb, tsk))
3647 goto attr_msg_cancel;
3648
3649 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3650 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3651 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3652 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3653 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3654 sock_i_uid(sk))) ||
3655 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3656 tipc_diag_gen_cookie(sk),
3657 TIPC_NLA_SOCK_PAD))
3658 goto attr_msg_cancel;
3659
3660 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3661 if (!stat)
3662 goto attr_msg_cancel;
3663
3664 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3665 skb_queue_len(&sk->sk_receive_queue)) ||
3666 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3667 skb_queue_len(&sk->sk_write_queue)) ||
3668 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3669 atomic_read(&sk->sk_drops)))
3670 goto stat_msg_cancel;
3671
3672 if (tsk->cong_link_cnt &&
3673 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3674 goto stat_msg_cancel;
3675
3676 if (tsk_conn_cong(tsk) &&
3677 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3678 goto stat_msg_cancel;
3679
3680 nla_nest_end(skb, stat);
3681
3682 if (tsk->group)
3683 if (tipc_group_fill_sock_diag(tsk->group, skb))
3684 goto stat_msg_cancel;
3685
3686 nla_nest_end(skb, attrs);
3687
3688 return 0;
3689
3690stat_msg_cancel:
3691 nla_nest_cancel(skb, stat);
3692attr_msg_cancel:
3693 nla_nest_cancel(skb, attrs);
3694msg_cancel:
3695 return -EMSGSIZE;
3696}
3697EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3698
3699int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3700{
3701 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3702}
3703
3704/* Caller should hold socket lock for the passed tipc socket. */
3705static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3706 struct netlink_callback *cb,
3707 struct publication *publ)
3708{
3709 void *hdr;
3710 struct nlattr *attrs;
3711
3712 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3713 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3714 if (!hdr)
3715 goto msg_cancel;
3716
3717 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3718 if (!attrs)
3719 goto genlmsg_cancel;
3720
3721 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3722 goto attr_msg_cancel;
3723 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
3724 goto attr_msg_cancel;
3725 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
3726 goto attr_msg_cancel;
3727 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
3728 goto attr_msg_cancel;
3729
3730 nla_nest_end(skb, attrs);
3731 genlmsg_end(skb, hdr);
3732
3733 return 0;
3734
3735attr_msg_cancel:
3736 nla_nest_cancel(skb, attrs);
3737genlmsg_cancel:
3738 genlmsg_cancel(skb, hdr);
3739msg_cancel:
3740 return -EMSGSIZE;
3741}
3742
3743/* Caller should hold socket lock for the passed tipc socket. */
3744static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3745 struct netlink_callback *cb,
3746 struct tipc_sock *tsk, u32 *last_publ)
3747{
3748 int err;
3749 struct publication *p;
3750
3751 if (*last_publ) {
3752 list_for_each_entry(p, &tsk->publications, binding_sock) {
3753 if (p->key == *last_publ)
3754 break;
3755 }
3756 if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3757 /* We never set seq or call nl_dump_check_consistent()
3758 * this means that setting prev_seq here will cause the
3759 * consistence check to fail in the netlink callback
3760 * handler. Resulting in the last NLMSG_DONE message
3761 * having the NLM_F_DUMP_INTR flag set.
3762 */
3763 cb->prev_seq = 1;
3764 *last_publ = 0;
3765 return -EPIPE;
3766 }
3767 } else {
3768 p = list_first_entry(&tsk->publications, struct publication,
3769 binding_sock);
3770 }
3771
3772 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3773 err = __tipc_nl_add_sk_publ(skb, cb, p);
3774 if (err) {
3775 *last_publ = p->key;
3776 return err;
3777 }
3778 }
3779 *last_publ = 0;
3780
3781 return 0;
3782}
3783
3784int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3785{
3786 int err;
3787 u32 tsk_portid = cb->args[0];
3788 u32 last_publ = cb->args[1];
3789 u32 done = cb->args[2];
3790 struct net *net = sock_net(skb->sk);
3791 struct tipc_sock *tsk;
3792
3793 if (!tsk_portid) {
3794 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
3795 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3796
3797 if (!attrs[TIPC_NLA_SOCK])
3798 return -EINVAL;
3799
3800 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3801 attrs[TIPC_NLA_SOCK],
3802 tipc_nl_sock_policy, NULL);
3803 if (err)
3804 return err;
3805
3806 if (!sock[TIPC_NLA_SOCK_REF])
3807 return -EINVAL;
3808
3809 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3810 }
3811
3812 if (done)
3813 return 0;
3814
3815 tsk = tipc_sk_lookup(net, tsk_portid);
3816 if (!tsk)
3817 return -EINVAL;
3818
3819 lock_sock(&tsk->sk);
3820 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3821 if (!err)
3822 done = 1;
3823 release_sock(&tsk->sk);
3824 sock_put(&tsk->sk);
3825
3826 cb->args[0] = tsk_portid;
3827 cb->args[1] = last_publ;
3828 cb->args[2] = done;
3829
3830 return skb->len;
3831}
3832
3833/**
3834 * tipc_sk_filtering - check if a socket should be traced
3835 * @sk: the socket to be examined
3836 *
3837 * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
3838 * (portid, sock type, name type, name lower, name upper)
3839 *
3840 * Return: true if the socket meets the socket tuple data
3841 * (value 0 = 'any') or when there is no tuple set (all = 0),
3842 * otherwise false
3843 */
3844bool tipc_sk_filtering(struct sock *sk)
3845{
3846 struct tipc_sock *tsk;
3847 struct publication *p;
3848 u32 _port, _sktype, _type, _lower, _upper;
3849 u32 type = 0, lower = 0, upper = 0;
3850
3851 if (!sk)
3852 return true;
3853
3854 tsk = tipc_sk(sk);
3855
3856 _port = sysctl_tipc_sk_filter[0];
3857 _sktype = sysctl_tipc_sk_filter[1];
3858 _type = sysctl_tipc_sk_filter[2];
3859 _lower = sysctl_tipc_sk_filter[3];
3860 _upper = sysctl_tipc_sk_filter[4];
3861
3862 if (!_port && !_sktype && !_type && !_lower && !_upper)
3863 return true;
3864
3865 if (_port)
3866 return (_port == tsk->portid);
3867
3868 if (_sktype && _sktype != sk->sk_type)
3869 return false;
3870
3871 if (tsk->published) {
3872 p = list_first_entry_or_null(&tsk->publications,
3873 struct publication, binding_sock);
3874 if (p) {
3875 type = p->sr.type;
3876 lower = p->sr.lower;
3877 upper = p->sr.upper;
3878 }
3879 }
3880
3881 if (!tipc_sk_type_connectionless(sk)) {
3882 type = msg_nametype(&tsk->phdr);
3883 lower = msg_nameinst(&tsk->phdr);
3884 upper = lower;
3885 }
3886
3887 if ((_type && _type != type) || (_lower && _lower != lower) ||
3888 (_upper && _upper != upper))
3889 return false;
3890
3891 return true;
3892}
3893
3894u32 tipc_sock_get_portid(struct sock *sk)
3895{
3896 return (sk) ? (tipc_sk(sk))->portid : 0;
3897}
3898
3899/**
3900 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3901 * both the rcv and backlog queues are considered
3902 * @sk: tipc sk to be checked
3903 * @skb: tipc msg to be checked
3904 *
3905 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3906 */
3907
3908bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3909{
3910 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3911 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3912 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3913
3914 return (qsize > lim * 90 / 100);
3915}
3916
3917/**
3918 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3919 * only the rcv queue is considered
3920 * @sk: tipc sk to be checked
3921 * @skb: tipc msg to be checked
3922 *
3923 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3924 */
3925
3926bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3927{
3928 unsigned int lim = rcvbuf_limit(sk, skb);
3929 unsigned int qsize = sk_rmem_alloc_get(sk);
3930
3931 return (qsize > lim * 90 / 100);
3932}
3933
3934/**
3935 * tipc_sk_dump - dump TIPC socket
3936 * @sk: tipc sk to be dumped
3937 * @dqueues: bitmask to decide if any socket queue to be dumped?
3938 * - TIPC_DUMP_NONE: don't dump socket queues
3939 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3940 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3941 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3942 * - TIPC_DUMP_ALL: dump all the socket queues above
3943 * @buf: returned buffer of dump data in format
3944 */
3945int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3946{
3947 int i = 0;
3948 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3949 u32 conn_type, conn_instance;
3950 struct tipc_sock *tsk;
3951 struct publication *p;
3952 bool tsk_connected;
3953
3954 if (!sk) {
3955 i += scnprintf(buf, sz, "sk data: (null)\n");
3956 return i;
3957 }
3958
3959 tsk = tipc_sk(sk);
3960 tsk_connected = !tipc_sk_type_connectionless(sk);
3961
3962 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3963 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3964 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3965 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3966 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3967 if (tsk_connected) {
3968 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3969 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3970 conn_type = msg_nametype(&tsk->phdr);
3971 conn_instance = msg_nameinst(&tsk->phdr);
3972 i += scnprintf(buf + i, sz - i, " %u", conn_type);
3973 i += scnprintf(buf + i, sz - i, " %u", conn_instance);
3974 }
3975 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3976 if (tsk->published) {
3977 p = list_first_entry_or_null(&tsk->publications,
3978 struct publication, binding_sock);
3979 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
3980 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
3981 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
3982 }
3983 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3984 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3985 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3986 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3987 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3988 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3989 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3990 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3991 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3992 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3993 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3994 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3995 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3996 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3997
3998 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3999 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
4000 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
4001 }
4002
4003 if (dqueues & TIPC_DUMP_SK_RCVQ) {
4004 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
4005 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
4006 }
4007
4008 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
4009 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
4010 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
4011 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
4012 i += scnprintf(buf + i, sz - i, " tail ");
4013 i += tipc_skb_dump(sk->sk_backlog.tail, false,
4014 buf + i);
4015 }
4016 }
4017
4018 return i;
4019}
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/rhashtable.h>
38#include "core.h"
39#include "name_table.h"
40#include "node.h"
41#include "link.h"
42#include "name_distr.h"
43#include "socket.h"
44#include "bcast.h"
45#include "netlink.h"
46
47#define SS_LISTENING -1 /* socket is listening */
48#define SS_READY -2 /* socket is connectionless */
49
50#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51#define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
52#define TIPC_FWD_MSG 1
53#define TIPC_CONN_OK 0
54#define TIPC_CONN_PROBING 1
55#define TIPC_MAX_PORT 0xffffffff
56#define TIPC_MIN_PORT 1
57
58/**
59 * struct tipc_sock - TIPC socket structure
60 * @sk: socket - interacts with 'port' and with user via the socket API
61 * @connected: non-zero if port is currently connected to a peer port
62 * @conn_type: TIPC type used when connection was established
63 * @conn_instance: TIPC instance used when connection was established
64 * @published: non-zero if port has one or more associated names
65 * @max_pkt: maximum packet size "hint" used when building messages sent by port
66 * @portid: unique port identity in TIPC socket hash table
67 * @phdr: preformatted message header used when sending messages
68 * @port_list: adjacent ports in TIPC's global list of ports
69 * @publications: list of publications for port
70 * @pub_count: total # of publications port has made during its lifetime
71 * @probing_state:
72 * @probing_intv:
73 * @conn_timeout: the time we can wait for an unresponded setup request
74 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
75 * @link_cong: non-zero if owner must sleep because of link congestion
76 * @sent_unacked: # messages sent by socket, and not yet acked by peer
77 * @rcv_unacked: # messages read by user, but not yet acked back to peer
78 * @remote: 'connected' peer for dgram/rdm
79 * @node: hash table node
80 * @rcu: rcu struct for tipc_sock
81 */
82struct tipc_sock {
83 struct sock sk;
84 int connected;
85 u32 conn_type;
86 u32 conn_instance;
87 int published;
88 u32 max_pkt;
89 u32 portid;
90 struct tipc_msg phdr;
91 struct list_head sock_list;
92 struct list_head publications;
93 u32 pub_count;
94 u32 probing_state;
95 unsigned long probing_intv;
96 uint conn_timeout;
97 atomic_t dupl_rcvcnt;
98 bool link_cong;
99 uint sent_unacked;
100 uint rcv_unacked;
101 struct sockaddr_tipc remote;
102 struct rhash_head node;
103 struct rcu_head rcu;
104};
105
106static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
107static void tipc_data_ready(struct sock *sk);
108static void tipc_write_space(struct sock *sk);
109static void tipc_sock_destruct(struct sock *sk);
110static int tipc_release(struct socket *sock);
111static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
112static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
113static void tipc_sk_timeout(unsigned long data);
114static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
115 struct tipc_name_seq const *seq);
116static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
117 struct tipc_name_seq const *seq);
118static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
119static int tipc_sk_insert(struct tipc_sock *tsk);
120static void tipc_sk_remove(struct tipc_sock *tsk);
121static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
122 size_t dsz);
123static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
124
125static const struct proto_ops packet_ops;
126static const struct proto_ops stream_ops;
127static const struct proto_ops msg_ops;
128static struct proto tipc_proto;
129
130static const struct rhashtable_params tsk_rht_params;
131
132/*
133 * Revised TIPC socket locking policy:
134 *
135 * Most socket operations take the standard socket lock when they start
136 * and hold it until they finish (or until they need to sleep). Acquiring
137 * this lock grants the owner exclusive access to the fields of the socket
138 * data structures, with the exception of the backlog queue. A few socket
139 * operations can be done without taking the socket lock because they only
140 * read socket information that never changes during the life of the socket.
141 *
142 * Socket operations may acquire the lock for the associated TIPC port if they
143 * need to perform an operation on the port. If any routine needs to acquire
144 * both the socket lock and the port lock it must take the socket lock first
145 * to avoid the risk of deadlock.
146 *
147 * The dispatcher handling incoming messages cannot grab the socket lock in
148 * the standard fashion, since invoked it runs at the BH level and cannot block.
149 * Instead, it checks to see if the socket lock is currently owned by someone,
150 * and either handles the message itself or adds it to the socket's backlog
151 * queue; in the latter case the queued message is processed once the process
152 * owning the socket lock releases it.
153 *
154 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
155 * the problem of a blocked socket operation preventing any other operations
156 * from occurring. However, applications must be careful if they have
157 * multiple threads trying to send (or receive) on the same socket, as these
158 * operations might interfere with each other. For example, doing a connect
159 * and a receive at the same time might allow the receive to consume the
160 * ACK message meant for the connect. While additional work could be done
161 * to try and overcome this, it doesn't seem to be worthwhile at the present.
162 *
163 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
164 * that another operation that must be performed in a non-blocking manner is
165 * not delayed for very long because the lock has already been taken.
166 *
167 * NOTE: This code assumes that certain fields of a port/socket pair are
168 * constant over its lifetime; such fields can be examined without taking
169 * the socket lock and/or port lock, and do not need to be re-read even
170 * after resuming processing after waiting. These fields include:
171 * - socket type
172 * - pointer to socket sk structure (aka tipc_sock structure)
173 * - pointer to port structure
174 * - port reference
175 */
176
177static u32 tsk_own_node(struct tipc_sock *tsk)
178{
179 return msg_prevnode(&tsk->phdr);
180}
181
182static u32 tsk_peer_node(struct tipc_sock *tsk)
183{
184 return msg_destnode(&tsk->phdr);
185}
186
187static u32 tsk_peer_port(struct tipc_sock *tsk)
188{
189 return msg_destport(&tsk->phdr);
190}
191
192static bool tsk_unreliable(struct tipc_sock *tsk)
193{
194 return msg_src_droppable(&tsk->phdr) != 0;
195}
196
197static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
198{
199 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
200}
201
202static bool tsk_unreturnable(struct tipc_sock *tsk)
203{
204 return msg_dest_droppable(&tsk->phdr) != 0;
205}
206
207static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
208{
209 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
210}
211
212static int tsk_importance(struct tipc_sock *tsk)
213{
214 return msg_importance(&tsk->phdr);
215}
216
217static int tsk_set_importance(struct tipc_sock *tsk, int imp)
218{
219 if (imp > TIPC_CRITICAL_IMPORTANCE)
220 return -EINVAL;
221 msg_set_importance(&tsk->phdr, (u32)imp);
222 return 0;
223}
224
225static struct tipc_sock *tipc_sk(const struct sock *sk)
226{
227 return container_of(sk, struct tipc_sock, sk);
228}
229
230static int tsk_conn_cong(struct tipc_sock *tsk)
231{
232 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
233}
234
235/**
236 * tsk_advance_rx_queue - discard first buffer in socket receive queue
237 *
238 * Caller must hold socket lock
239 */
240static void tsk_advance_rx_queue(struct sock *sk)
241{
242 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
243}
244
245/* tipc_sk_respond() : send response message back to sender
246 */
247static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
248{
249 u32 selector;
250 u32 dnode;
251 u32 onode = tipc_own_addr(sock_net(sk));
252
253 if (!tipc_msg_reverse(onode, &skb, err))
254 return;
255
256 dnode = msg_destnode(buf_msg(skb));
257 selector = msg_origport(buf_msg(skb));
258 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
259}
260
261/**
262 * tsk_rej_rx_queue - reject all buffers in socket receive queue
263 *
264 * Caller must hold socket lock
265 */
266static void tsk_rej_rx_queue(struct sock *sk)
267{
268 struct sk_buff *skb;
269
270 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
271 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
272}
273
274/* tsk_peer_msg - verify if message was sent by connected port's peer
275 *
276 * Handles cases where the node's network address has changed from
277 * the default of <0.0.0> to its configured setting.
278 */
279static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
280{
281 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
282 u32 peer_port = tsk_peer_port(tsk);
283 u32 orig_node;
284 u32 peer_node;
285
286 if (unlikely(!tsk->connected))
287 return false;
288
289 if (unlikely(msg_origport(msg) != peer_port))
290 return false;
291
292 orig_node = msg_orignode(msg);
293 peer_node = tsk_peer_node(tsk);
294
295 if (likely(orig_node == peer_node))
296 return true;
297
298 if (!orig_node && (peer_node == tn->own_addr))
299 return true;
300
301 if (!peer_node && (orig_node == tn->own_addr))
302 return true;
303
304 return false;
305}
306
307/**
308 * tipc_sk_create - create a TIPC socket
309 * @net: network namespace (must be default network)
310 * @sock: pre-allocated socket structure
311 * @protocol: protocol indicator (must be 0)
312 * @kern: caused by kernel or by userspace?
313 *
314 * This routine creates additional data structures used by the TIPC socket,
315 * initializes them, and links them together.
316 *
317 * Returns 0 on success, errno otherwise
318 */
319static int tipc_sk_create(struct net *net, struct socket *sock,
320 int protocol, int kern)
321{
322 struct tipc_net *tn;
323 const struct proto_ops *ops;
324 socket_state state;
325 struct sock *sk;
326 struct tipc_sock *tsk;
327 struct tipc_msg *msg;
328
329 /* Validate arguments */
330 if (unlikely(protocol != 0))
331 return -EPROTONOSUPPORT;
332
333 switch (sock->type) {
334 case SOCK_STREAM:
335 ops = &stream_ops;
336 state = SS_UNCONNECTED;
337 break;
338 case SOCK_SEQPACKET:
339 ops = &packet_ops;
340 state = SS_UNCONNECTED;
341 break;
342 case SOCK_DGRAM:
343 case SOCK_RDM:
344 ops = &msg_ops;
345 state = SS_READY;
346 break;
347 default:
348 return -EPROTOTYPE;
349 }
350
351 /* Allocate socket's protocol area */
352 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
353 if (sk == NULL)
354 return -ENOMEM;
355
356 tsk = tipc_sk(sk);
357 tsk->max_pkt = MAX_PKT_DEFAULT;
358 INIT_LIST_HEAD(&tsk->publications);
359 msg = &tsk->phdr;
360 tn = net_generic(sock_net(sk), tipc_net_id);
361 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
362 NAMED_H_SIZE, 0);
363
364 /* Finish initializing socket data structures */
365 sock->ops = ops;
366 sock->state = state;
367 sock_init_data(sock, sk);
368 if (tipc_sk_insert(tsk)) {
369 pr_warn("Socket create failed; port numbrer exhausted\n");
370 return -EINVAL;
371 }
372 msg_set_origport(msg, tsk->portid);
373 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
374 sk->sk_backlog_rcv = tipc_backlog_rcv;
375 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
376 sk->sk_data_ready = tipc_data_ready;
377 sk->sk_write_space = tipc_write_space;
378 sk->sk_destruct = tipc_sock_destruct;
379 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
380 tsk->sent_unacked = 0;
381 atomic_set(&tsk->dupl_rcvcnt, 0);
382
383 if (sock->state == SS_READY) {
384 tsk_set_unreturnable(tsk, true);
385 if (sock->type == SOCK_DGRAM)
386 tsk_set_unreliable(tsk, true);
387 }
388 return 0;
389}
390
391static void tipc_sk_callback(struct rcu_head *head)
392{
393 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
394
395 sock_put(&tsk->sk);
396}
397
398/**
399 * tipc_release - destroy a TIPC socket
400 * @sock: socket to destroy
401 *
402 * This routine cleans up any messages that are still queued on the socket.
403 * For DGRAM and RDM socket types, all queued messages are rejected.
404 * For SEQPACKET and STREAM socket types, the first message is rejected
405 * and any others are discarded. (If the first message on a STREAM socket
406 * is partially-read, it is discarded and the next one is rejected instead.)
407 *
408 * NOTE: Rejected messages are not necessarily returned to the sender! They
409 * are returned or discarded according to the "destination droppable" setting
410 * specified for the message by the sender.
411 *
412 * Returns 0 on success, errno otherwise
413 */
414static int tipc_release(struct socket *sock)
415{
416 struct sock *sk = sock->sk;
417 struct net *net;
418 struct tipc_sock *tsk;
419 struct sk_buff *skb;
420 u32 dnode;
421
422 /*
423 * Exit if socket isn't fully initialized (occurs when a failed accept()
424 * releases a pre-allocated child socket that was never used)
425 */
426 if (sk == NULL)
427 return 0;
428
429 net = sock_net(sk);
430 tsk = tipc_sk(sk);
431 lock_sock(sk);
432
433 /*
434 * Reject all unreceived messages, except on an active connection
435 * (which disconnects locally & sends a 'FIN+' to peer)
436 */
437 dnode = tsk_peer_node(tsk);
438 while (sock->state != SS_DISCONNECTING) {
439 skb = __skb_dequeue(&sk->sk_receive_queue);
440 if (skb == NULL)
441 break;
442 if (TIPC_SKB_CB(skb)->handle != NULL)
443 kfree_skb(skb);
444 else {
445 if ((sock->state == SS_CONNECTING) ||
446 (sock->state == SS_CONNECTED)) {
447 sock->state = SS_DISCONNECTING;
448 tsk->connected = 0;
449 tipc_node_remove_conn(net, dnode, tsk->portid);
450 }
451 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
452 }
453 }
454
455 tipc_sk_withdraw(tsk, 0, NULL);
456 sk_stop_timer(sk, &sk->sk_timer);
457 tipc_sk_remove(tsk);
458 if (tsk->connected) {
459 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
460 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
461 tsk_own_node(tsk), tsk_peer_port(tsk),
462 tsk->portid, TIPC_ERR_NO_PORT);
463 if (skb)
464 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
465 tipc_node_remove_conn(net, dnode, tsk->portid);
466 }
467
468 /* Reject any messages that accumulated in backlog queue */
469 sock->state = SS_DISCONNECTING;
470 release_sock(sk);
471
472 call_rcu(&tsk->rcu, tipc_sk_callback);
473 sock->sk = NULL;
474
475 return 0;
476}
477
478/**
479 * tipc_bind - associate or disassocate TIPC name(s) with a socket
480 * @sock: socket structure
481 * @uaddr: socket address describing name(s) and desired operation
482 * @uaddr_len: size of socket address data structure
483 *
484 * Name and name sequence binding is indicated using a positive scope value;
485 * a negative scope value unbinds the specified name. Specifying no name
486 * (i.e. a socket address length of 0) unbinds all names from the socket.
487 *
488 * Returns 0 on success, errno otherwise
489 *
490 * NOTE: This routine doesn't need to take the socket lock since it doesn't
491 * access any non-constant socket information.
492 */
493static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
494 int uaddr_len)
495{
496 struct sock *sk = sock->sk;
497 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
498 struct tipc_sock *tsk = tipc_sk(sk);
499 int res = -EINVAL;
500
501 lock_sock(sk);
502 if (unlikely(!uaddr_len)) {
503 res = tipc_sk_withdraw(tsk, 0, NULL);
504 goto exit;
505 }
506
507 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
508 res = -EINVAL;
509 goto exit;
510 }
511 if (addr->family != AF_TIPC) {
512 res = -EAFNOSUPPORT;
513 goto exit;
514 }
515
516 if (addr->addrtype == TIPC_ADDR_NAME)
517 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
518 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
519 res = -EAFNOSUPPORT;
520 goto exit;
521 }
522
523 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
524 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
525 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
526 res = -EACCES;
527 goto exit;
528 }
529
530 res = (addr->scope > 0) ?
531 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
532 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
533exit:
534 release_sock(sk);
535 return res;
536}
537
538/**
539 * tipc_getname - get port ID of socket or peer socket
540 * @sock: socket structure
541 * @uaddr: area for returned socket address
542 * @uaddr_len: area for returned length of socket address
543 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
544 *
545 * Returns 0 on success, errno otherwise
546 *
547 * NOTE: This routine doesn't need to take the socket lock since it only
548 * accesses socket information that is unchanging (or which changes in
549 * a completely predictable manner).
550 */
551static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
552 int *uaddr_len, int peer)
553{
554 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
555 struct tipc_sock *tsk = tipc_sk(sock->sk);
556 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
557
558 memset(addr, 0, sizeof(*addr));
559 if (peer) {
560 if ((sock->state != SS_CONNECTED) &&
561 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
562 return -ENOTCONN;
563 addr->addr.id.ref = tsk_peer_port(tsk);
564 addr->addr.id.node = tsk_peer_node(tsk);
565 } else {
566 addr->addr.id.ref = tsk->portid;
567 addr->addr.id.node = tn->own_addr;
568 }
569
570 *uaddr_len = sizeof(*addr);
571 addr->addrtype = TIPC_ADDR_ID;
572 addr->family = AF_TIPC;
573 addr->scope = 0;
574 addr->addr.name.domain = 0;
575
576 return 0;
577}
578
579/**
580 * tipc_poll - read and possibly block on pollmask
581 * @file: file structure associated with the socket
582 * @sock: socket for which to calculate the poll bits
583 * @wait: ???
584 *
585 * Returns pollmask value
586 *
587 * COMMENTARY:
588 * It appears that the usual socket locking mechanisms are not useful here
589 * since the pollmask info is potentially out-of-date the moment this routine
590 * exits. TCP and other protocols seem to rely on higher level poll routines
591 * to handle any preventable race conditions, so TIPC will do the same ...
592 *
593 * TIPC sets the returned events as follows:
594 *
595 * socket state flags set
596 * ------------ ---------
597 * unconnected no read flags
598 * POLLOUT if port is not congested
599 *
600 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
601 * no write flags
602 *
603 * connected POLLIN/POLLRDNORM if data in rx queue
604 * POLLOUT if port is not congested
605 *
606 * disconnecting POLLIN/POLLRDNORM/POLLHUP
607 * no write flags
608 *
609 * listening POLLIN if SYN in rx queue
610 * no write flags
611 *
612 * ready POLLIN/POLLRDNORM if data in rx queue
613 * [connectionless] POLLOUT (since port cannot be congested)
614 *
615 * IMPORTANT: The fact that a read or write operation is indicated does NOT
616 * imply that the operation will succeed, merely that it should be performed
617 * and will not block.
618 */
619static unsigned int tipc_poll(struct file *file, struct socket *sock,
620 poll_table *wait)
621{
622 struct sock *sk = sock->sk;
623 struct tipc_sock *tsk = tipc_sk(sk);
624 u32 mask = 0;
625
626 sock_poll_wait(file, sk_sleep(sk), wait);
627
628 switch ((int)sock->state) {
629 case SS_UNCONNECTED:
630 if (!tsk->link_cong)
631 mask |= POLLOUT;
632 break;
633 case SS_READY:
634 case SS_CONNECTED:
635 if (!tsk->link_cong && !tsk_conn_cong(tsk))
636 mask |= POLLOUT;
637 /* fall thru' */
638 case SS_CONNECTING:
639 case SS_LISTENING:
640 if (!skb_queue_empty(&sk->sk_receive_queue))
641 mask |= (POLLIN | POLLRDNORM);
642 break;
643 case SS_DISCONNECTING:
644 mask = (POLLIN | POLLRDNORM | POLLHUP);
645 break;
646 }
647
648 return mask;
649}
650
651/**
652 * tipc_sendmcast - send multicast message
653 * @sock: socket structure
654 * @seq: destination address
655 * @msg: message to send
656 * @dsz: total length of message data
657 * @timeo: timeout to wait for wakeup
658 *
659 * Called from function tipc_sendmsg(), which has done all sanity checks
660 * Returns the number of bytes sent on success, or errno
661 */
662static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
663 struct msghdr *msg, size_t dsz, long timeo)
664{
665 struct sock *sk = sock->sk;
666 struct tipc_sock *tsk = tipc_sk(sk);
667 struct net *net = sock_net(sk);
668 struct tipc_msg *mhdr = &tsk->phdr;
669 struct sk_buff_head pktchain;
670 struct iov_iter save = msg->msg_iter;
671 uint mtu;
672 int rc;
673
674 msg_set_type(mhdr, TIPC_MCAST_MSG);
675 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
676 msg_set_destport(mhdr, 0);
677 msg_set_destnode(mhdr, 0);
678 msg_set_nametype(mhdr, seq->type);
679 msg_set_namelower(mhdr, seq->lower);
680 msg_set_nameupper(mhdr, seq->upper);
681 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
682
683 skb_queue_head_init(&pktchain);
684
685new_mtu:
686 mtu = tipc_bcast_get_mtu(net);
687 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
688 if (unlikely(rc < 0))
689 return rc;
690
691 do {
692 rc = tipc_bcast_xmit(net, &pktchain);
693 if (likely(!rc))
694 return dsz;
695
696 if (rc == -ELINKCONG) {
697 tsk->link_cong = 1;
698 rc = tipc_wait_for_sndmsg(sock, &timeo);
699 if (!rc)
700 continue;
701 }
702 __skb_queue_purge(&pktchain);
703 if (rc == -EMSGSIZE) {
704 msg->msg_iter = save;
705 goto new_mtu;
706 }
707 break;
708 } while (1);
709 return rc;
710}
711
712/**
713 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
714 * @arrvq: queue with arriving messages, to be cloned after destination lookup
715 * @inputq: queue with cloned messages, delivered to socket after dest lookup
716 *
717 * Multi-threaded: parallel calls with reference to same queues may occur
718 */
719void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
720 struct sk_buff_head *inputq)
721{
722 struct tipc_msg *msg;
723 struct tipc_plist dports;
724 u32 portid;
725 u32 scope = TIPC_CLUSTER_SCOPE;
726 struct sk_buff_head tmpq;
727 uint hsz;
728 struct sk_buff *skb, *_skb;
729
730 __skb_queue_head_init(&tmpq);
731 tipc_plist_init(&dports);
732
733 skb = tipc_skb_peek(arrvq, &inputq->lock);
734 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
735 msg = buf_msg(skb);
736 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
737
738 if (in_own_node(net, msg_orignode(msg)))
739 scope = TIPC_NODE_SCOPE;
740
741 /* Create destination port list and message clones: */
742 tipc_nametbl_mc_translate(net,
743 msg_nametype(msg), msg_namelower(msg),
744 msg_nameupper(msg), scope, &dports);
745 portid = tipc_plist_pop(&dports);
746 for (; portid; portid = tipc_plist_pop(&dports)) {
747 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
748 if (_skb) {
749 msg_set_destport(buf_msg(_skb), portid);
750 __skb_queue_tail(&tmpq, _skb);
751 continue;
752 }
753 pr_warn("Failed to clone mcast rcv buffer\n");
754 }
755 /* Append to inputq if not already done by other thread */
756 spin_lock_bh(&inputq->lock);
757 if (skb_peek(arrvq) == skb) {
758 skb_queue_splice_tail_init(&tmpq, inputq);
759 kfree_skb(__skb_dequeue(arrvq));
760 }
761 spin_unlock_bh(&inputq->lock);
762 __skb_queue_purge(&tmpq);
763 kfree_skb(skb);
764 }
765 tipc_sk_rcv(net, inputq);
766}
767
768/**
769 * tipc_sk_proto_rcv - receive a connection mng protocol message
770 * @tsk: receiving socket
771 * @skb: pointer to message buffer.
772 */
773static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
774{
775 struct sock *sk = &tsk->sk;
776 struct tipc_msg *hdr = buf_msg(skb);
777 int mtyp = msg_type(hdr);
778 int conn_cong;
779
780 /* Ignore if connection cannot be validated: */
781 if (!tsk_peer_msg(tsk, hdr))
782 goto exit;
783
784 tsk->probing_state = TIPC_CONN_OK;
785
786 if (mtyp == CONN_PROBE) {
787 msg_set_type(hdr, CONN_PROBE_REPLY);
788 tipc_sk_respond(sk, skb, TIPC_OK);
789 return;
790 } else if (mtyp == CONN_ACK) {
791 conn_cong = tsk_conn_cong(tsk);
792 tsk->sent_unacked -= msg_msgcnt(hdr);
793 if (conn_cong)
794 sk->sk_write_space(sk);
795 } else if (mtyp != CONN_PROBE_REPLY) {
796 pr_warn("Received unknown CONN_PROTO msg\n");
797 }
798exit:
799 kfree_skb(skb);
800}
801
802static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
803{
804 struct sock *sk = sock->sk;
805 struct tipc_sock *tsk = tipc_sk(sk);
806 DEFINE_WAIT(wait);
807 int done;
808
809 do {
810 int err = sock_error(sk);
811 if (err)
812 return err;
813 if (sock->state == SS_DISCONNECTING)
814 return -EPIPE;
815 if (!*timeo_p)
816 return -EAGAIN;
817 if (signal_pending(current))
818 return sock_intr_errno(*timeo_p);
819
820 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
821 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
822 finish_wait(sk_sleep(sk), &wait);
823 } while (!done);
824 return 0;
825}
826
827/**
828 * tipc_sendmsg - send message in connectionless manner
829 * @sock: socket structure
830 * @m: message to send
831 * @dsz: amount of user data to be sent
832 *
833 * Message must have an destination specified explicitly.
834 * Used for SOCK_RDM and SOCK_DGRAM messages,
835 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
836 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
837 *
838 * Returns the number of bytes sent on success, or errno otherwise
839 */
840static int tipc_sendmsg(struct socket *sock,
841 struct msghdr *m, size_t dsz)
842{
843 struct sock *sk = sock->sk;
844 int ret;
845
846 lock_sock(sk);
847 ret = __tipc_sendmsg(sock, m, dsz);
848 release_sock(sk);
849
850 return ret;
851}
852
853static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
854{
855 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
856 struct sock *sk = sock->sk;
857 struct tipc_sock *tsk = tipc_sk(sk);
858 struct net *net = sock_net(sk);
859 struct tipc_msg *mhdr = &tsk->phdr;
860 u32 dnode, dport;
861 struct sk_buff_head pktchain;
862 struct sk_buff *skb;
863 struct tipc_name_seq *seq;
864 struct iov_iter save;
865 u32 mtu;
866 long timeo;
867 int rc;
868
869 if (dsz > TIPC_MAX_USER_MSG_SIZE)
870 return -EMSGSIZE;
871 if (unlikely(!dest)) {
872 if (tsk->connected && sock->state == SS_READY)
873 dest = &tsk->remote;
874 else
875 return -EDESTADDRREQ;
876 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
877 dest->family != AF_TIPC) {
878 return -EINVAL;
879 }
880 if (unlikely(sock->state != SS_READY)) {
881 if (sock->state == SS_LISTENING)
882 return -EPIPE;
883 if (sock->state != SS_UNCONNECTED)
884 return -EISCONN;
885 if (tsk->published)
886 return -EOPNOTSUPP;
887 if (dest->addrtype == TIPC_ADDR_NAME) {
888 tsk->conn_type = dest->addr.name.name.type;
889 tsk->conn_instance = dest->addr.name.name.instance;
890 }
891 }
892 seq = &dest->addr.nameseq;
893 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
894
895 if (dest->addrtype == TIPC_ADDR_MCAST) {
896 return tipc_sendmcast(sock, seq, m, dsz, timeo);
897 } else if (dest->addrtype == TIPC_ADDR_NAME) {
898 u32 type = dest->addr.name.name.type;
899 u32 inst = dest->addr.name.name.instance;
900 u32 domain = dest->addr.name.domain;
901
902 dnode = domain;
903 msg_set_type(mhdr, TIPC_NAMED_MSG);
904 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
905 msg_set_nametype(mhdr, type);
906 msg_set_nameinst(mhdr, inst);
907 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
908 dport = tipc_nametbl_translate(net, type, inst, &dnode);
909 msg_set_destnode(mhdr, dnode);
910 msg_set_destport(mhdr, dport);
911 if (unlikely(!dport && !dnode))
912 return -EHOSTUNREACH;
913 } else if (dest->addrtype == TIPC_ADDR_ID) {
914 dnode = dest->addr.id.node;
915 msg_set_type(mhdr, TIPC_DIRECT_MSG);
916 msg_set_lookup_scope(mhdr, 0);
917 msg_set_destnode(mhdr, dnode);
918 msg_set_destport(mhdr, dest->addr.id.ref);
919 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
920 }
921
922 skb_queue_head_init(&pktchain);
923 save = m->msg_iter;
924new_mtu:
925 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
926 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
927 if (rc < 0)
928 return rc;
929
930 do {
931 skb = skb_peek(&pktchain);
932 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
933 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
934 if (likely(!rc)) {
935 if (sock->state != SS_READY)
936 sock->state = SS_CONNECTING;
937 return dsz;
938 }
939 if (rc == -ELINKCONG) {
940 tsk->link_cong = 1;
941 rc = tipc_wait_for_sndmsg(sock, &timeo);
942 if (!rc)
943 continue;
944 }
945 __skb_queue_purge(&pktchain);
946 if (rc == -EMSGSIZE) {
947 m->msg_iter = save;
948 goto new_mtu;
949 }
950 break;
951 } while (1);
952
953 return rc;
954}
955
956static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
957{
958 struct sock *sk = sock->sk;
959 struct tipc_sock *tsk = tipc_sk(sk);
960 DEFINE_WAIT(wait);
961 int done;
962
963 do {
964 int err = sock_error(sk);
965 if (err)
966 return err;
967 if (sock->state == SS_DISCONNECTING)
968 return -EPIPE;
969 else if (sock->state != SS_CONNECTED)
970 return -ENOTCONN;
971 if (!*timeo_p)
972 return -EAGAIN;
973 if (signal_pending(current))
974 return sock_intr_errno(*timeo_p);
975
976 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
977 done = sk_wait_event(sk, timeo_p,
978 (!tsk->link_cong &&
979 !tsk_conn_cong(tsk)) ||
980 !tsk->connected);
981 finish_wait(sk_sleep(sk), &wait);
982 } while (!done);
983 return 0;
984}
985
986/**
987 * tipc_send_stream - send stream-oriented data
988 * @sock: socket structure
989 * @m: data to send
990 * @dsz: total length of data to be transmitted
991 *
992 * Used for SOCK_STREAM data.
993 *
994 * Returns the number of bytes sent on success (or partial success),
995 * or errno if no data sent
996 */
997static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
998{
999 struct sock *sk = sock->sk;
1000 int ret;
1001
1002 lock_sock(sk);
1003 ret = __tipc_send_stream(sock, m, dsz);
1004 release_sock(sk);
1005
1006 return ret;
1007}
1008
1009static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1010{
1011 struct sock *sk = sock->sk;
1012 struct net *net = sock_net(sk);
1013 struct tipc_sock *tsk = tipc_sk(sk);
1014 struct tipc_msg *mhdr = &tsk->phdr;
1015 struct sk_buff_head pktchain;
1016 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1017 u32 portid = tsk->portid;
1018 int rc = -EINVAL;
1019 long timeo;
1020 u32 dnode;
1021 uint mtu, send, sent = 0;
1022 struct iov_iter save;
1023
1024 /* Handle implied connection establishment */
1025 if (unlikely(dest)) {
1026 rc = __tipc_sendmsg(sock, m, dsz);
1027 if (dsz && (dsz == rc))
1028 tsk->sent_unacked = 1;
1029 return rc;
1030 }
1031 if (dsz > (uint)INT_MAX)
1032 return -EMSGSIZE;
1033
1034 if (unlikely(sock->state != SS_CONNECTED)) {
1035 if (sock->state == SS_DISCONNECTING)
1036 return -EPIPE;
1037 else
1038 return -ENOTCONN;
1039 }
1040
1041 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1042 dnode = tsk_peer_node(tsk);
1043 skb_queue_head_init(&pktchain);
1044
1045next:
1046 save = m->msg_iter;
1047 mtu = tsk->max_pkt;
1048 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1049 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
1050 if (unlikely(rc < 0))
1051 return rc;
1052
1053 do {
1054 if (likely(!tsk_conn_cong(tsk))) {
1055 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
1056 if (likely(!rc)) {
1057 tsk->sent_unacked++;
1058 sent += send;
1059 if (sent == dsz)
1060 return dsz;
1061 goto next;
1062 }
1063 if (rc == -EMSGSIZE) {
1064 __skb_queue_purge(&pktchain);
1065 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1066 portid);
1067 m->msg_iter = save;
1068 goto next;
1069 }
1070 if (rc != -ELINKCONG)
1071 break;
1072
1073 tsk->link_cong = 1;
1074 }
1075 rc = tipc_wait_for_sndpkt(sock, &timeo);
1076 } while (!rc);
1077
1078 __skb_queue_purge(&pktchain);
1079 return sent ? sent : rc;
1080}
1081
1082/**
1083 * tipc_send_packet - send a connection-oriented message
1084 * @sock: socket structure
1085 * @m: message to send
1086 * @dsz: length of data to be transmitted
1087 *
1088 * Used for SOCK_SEQPACKET messages.
1089 *
1090 * Returns the number of bytes sent on success, or errno otherwise
1091 */
1092static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1093{
1094 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1095 return -EMSGSIZE;
1096
1097 return tipc_send_stream(sock, m, dsz);
1098}
1099
1100/* tipc_sk_finish_conn - complete the setup of a connection
1101 */
1102static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1103 u32 peer_node)
1104{
1105 struct sock *sk = &tsk->sk;
1106 struct net *net = sock_net(sk);
1107 struct tipc_msg *msg = &tsk->phdr;
1108
1109 msg_set_destnode(msg, peer_node);
1110 msg_set_destport(msg, peer_port);
1111 msg_set_type(msg, TIPC_CONN_MSG);
1112 msg_set_lookup_scope(msg, 0);
1113 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1114
1115 tsk->probing_intv = CONN_PROBING_INTERVAL;
1116 tsk->probing_state = TIPC_CONN_OK;
1117 tsk->connected = 1;
1118 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
1119 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1120 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1121}
1122
1123/**
1124 * set_orig_addr - capture sender's address for received message
1125 * @m: descriptor for message info
1126 * @msg: received message header
1127 *
1128 * Note: Address is not captured if not requested by receiver.
1129 */
1130static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1131{
1132 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1133
1134 if (addr) {
1135 addr->family = AF_TIPC;
1136 addr->addrtype = TIPC_ADDR_ID;
1137 memset(&addr->addr, 0, sizeof(addr->addr));
1138 addr->addr.id.ref = msg_origport(msg);
1139 addr->addr.id.node = msg_orignode(msg);
1140 addr->addr.name.domain = 0; /* could leave uninitialized */
1141 addr->scope = 0; /* could leave uninitialized */
1142 m->msg_namelen = sizeof(struct sockaddr_tipc);
1143 }
1144}
1145
1146/**
1147 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1148 * @m: descriptor for message info
1149 * @msg: received message header
1150 * @tsk: TIPC port associated with message
1151 *
1152 * Note: Ancillary data is not captured if not requested by receiver.
1153 *
1154 * Returns 0 if successful, otherwise errno
1155 */
1156static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1157 struct tipc_sock *tsk)
1158{
1159 u32 anc_data[3];
1160 u32 err;
1161 u32 dest_type;
1162 int has_name;
1163 int res;
1164
1165 if (likely(m->msg_controllen == 0))
1166 return 0;
1167
1168 /* Optionally capture errored message object(s) */
1169 err = msg ? msg_errcode(msg) : 0;
1170 if (unlikely(err)) {
1171 anc_data[0] = err;
1172 anc_data[1] = msg_data_sz(msg);
1173 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1174 if (res)
1175 return res;
1176 if (anc_data[1]) {
1177 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1178 msg_data(msg));
1179 if (res)
1180 return res;
1181 }
1182 }
1183
1184 /* Optionally capture message destination object */
1185 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1186 switch (dest_type) {
1187 case TIPC_NAMED_MSG:
1188 has_name = 1;
1189 anc_data[0] = msg_nametype(msg);
1190 anc_data[1] = msg_namelower(msg);
1191 anc_data[2] = msg_namelower(msg);
1192 break;
1193 case TIPC_MCAST_MSG:
1194 has_name = 1;
1195 anc_data[0] = msg_nametype(msg);
1196 anc_data[1] = msg_namelower(msg);
1197 anc_data[2] = msg_nameupper(msg);
1198 break;
1199 case TIPC_CONN_MSG:
1200 has_name = (tsk->conn_type != 0);
1201 anc_data[0] = tsk->conn_type;
1202 anc_data[1] = tsk->conn_instance;
1203 anc_data[2] = tsk->conn_instance;
1204 break;
1205 default:
1206 has_name = 0;
1207 }
1208 if (has_name) {
1209 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1210 if (res)
1211 return res;
1212 }
1213
1214 return 0;
1215}
1216
1217static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1218{
1219 struct net *net = sock_net(&tsk->sk);
1220 struct sk_buff *skb = NULL;
1221 struct tipc_msg *msg;
1222 u32 peer_port = tsk_peer_port(tsk);
1223 u32 dnode = tsk_peer_node(tsk);
1224
1225 if (!tsk->connected)
1226 return;
1227 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1228 dnode, tsk_own_node(tsk), peer_port,
1229 tsk->portid, TIPC_OK);
1230 if (!skb)
1231 return;
1232 msg = buf_msg(skb);
1233 msg_set_msgcnt(msg, ack);
1234 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1235}
1236
1237static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1238{
1239 struct sock *sk = sock->sk;
1240 DEFINE_WAIT(wait);
1241 long timeo = *timeop;
1242 int err;
1243
1244 for (;;) {
1245 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1246 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1247 if (sock->state == SS_DISCONNECTING) {
1248 err = -ENOTCONN;
1249 break;
1250 }
1251 release_sock(sk);
1252 timeo = schedule_timeout(timeo);
1253 lock_sock(sk);
1254 }
1255 err = 0;
1256 if (!skb_queue_empty(&sk->sk_receive_queue))
1257 break;
1258 err = -EAGAIN;
1259 if (!timeo)
1260 break;
1261 err = sock_intr_errno(timeo);
1262 if (signal_pending(current))
1263 break;
1264 }
1265 finish_wait(sk_sleep(sk), &wait);
1266 *timeop = timeo;
1267 return err;
1268}
1269
1270/**
1271 * tipc_recvmsg - receive packet-oriented message
1272 * @m: descriptor for message info
1273 * @buf_len: total size of user buffer area
1274 * @flags: receive flags
1275 *
1276 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1277 * If the complete message doesn't fit in user area, truncate it.
1278 *
1279 * Returns size of returned message data, errno otherwise
1280 */
1281static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1282 int flags)
1283{
1284 struct sock *sk = sock->sk;
1285 struct tipc_sock *tsk = tipc_sk(sk);
1286 struct sk_buff *buf;
1287 struct tipc_msg *msg;
1288 long timeo;
1289 unsigned int sz;
1290 u32 err;
1291 int res;
1292
1293 /* Catch invalid receive requests */
1294 if (unlikely(!buf_len))
1295 return -EINVAL;
1296
1297 lock_sock(sk);
1298
1299 if (unlikely(sock->state == SS_UNCONNECTED)) {
1300 res = -ENOTCONN;
1301 goto exit;
1302 }
1303
1304 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1305restart:
1306
1307 /* Look for a message in receive queue; wait if necessary */
1308 res = tipc_wait_for_rcvmsg(sock, &timeo);
1309 if (res)
1310 goto exit;
1311
1312 /* Look at first message in receive queue */
1313 buf = skb_peek(&sk->sk_receive_queue);
1314 msg = buf_msg(buf);
1315 sz = msg_data_sz(msg);
1316 err = msg_errcode(msg);
1317
1318 /* Discard an empty non-errored message & try again */
1319 if ((!sz) && (!err)) {
1320 tsk_advance_rx_queue(sk);
1321 goto restart;
1322 }
1323
1324 /* Capture sender's address (optional) */
1325 set_orig_addr(m, msg);
1326
1327 /* Capture ancillary data (optional) */
1328 res = tipc_sk_anc_data_recv(m, msg, tsk);
1329 if (res)
1330 goto exit;
1331
1332 /* Capture message data (if valid) & compute return value (always) */
1333 if (!err) {
1334 if (unlikely(buf_len < sz)) {
1335 sz = buf_len;
1336 m->msg_flags |= MSG_TRUNC;
1337 }
1338 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1339 if (res)
1340 goto exit;
1341 res = sz;
1342 } else {
1343 if ((sock->state == SS_READY) ||
1344 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1345 res = 0;
1346 else
1347 res = -ECONNRESET;
1348 }
1349
1350 /* Consume received message (optional) */
1351 if (likely(!(flags & MSG_PEEK))) {
1352 if ((sock->state != SS_READY) &&
1353 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1354 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1355 tsk->rcv_unacked = 0;
1356 }
1357 tsk_advance_rx_queue(sk);
1358 }
1359exit:
1360 release_sock(sk);
1361 return res;
1362}
1363
1364/**
1365 * tipc_recv_stream - receive stream-oriented data
1366 * @m: descriptor for message info
1367 * @buf_len: total size of user buffer area
1368 * @flags: receive flags
1369 *
1370 * Used for SOCK_STREAM messages only. If not enough data is available
1371 * will optionally wait for more; never truncates data.
1372 *
1373 * Returns size of returned message data, errno otherwise
1374 */
1375static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1376 size_t buf_len, int flags)
1377{
1378 struct sock *sk = sock->sk;
1379 struct tipc_sock *tsk = tipc_sk(sk);
1380 struct sk_buff *buf;
1381 struct tipc_msg *msg;
1382 long timeo;
1383 unsigned int sz;
1384 int sz_to_copy, target, needed;
1385 int sz_copied = 0;
1386 u32 err;
1387 int res = 0;
1388
1389 /* Catch invalid receive attempts */
1390 if (unlikely(!buf_len))
1391 return -EINVAL;
1392
1393 lock_sock(sk);
1394
1395 if (unlikely(sock->state == SS_UNCONNECTED)) {
1396 res = -ENOTCONN;
1397 goto exit;
1398 }
1399
1400 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1401 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1402
1403restart:
1404 /* Look for a message in receive queue; wait if necessary */
1405 res = tipc_wait_for_rcvmsg(sock, &timeo);
1406 if (res)
1407 goto exit;
1408
1409 /* Look at first message in receive queue */
1410 buf = skb_peek(&sk->sk_receive_queue);
1411 msg = buf_msg(buf);
1412 sz = msg_data_sz(msg);
1413 err = msg_errcode(msg);
1414
1415 /* Discard an empty non-errored message & try again */
1416 if ((!sz) && (!err)) {
1417 tsk_advance_rx_queue(sk);
1418 goto restart;
1419 }
1420
1421 /* Optionally capture sender's address & ancillary data of first msg */
1422 if (sz_copied == 0) {
1423 set_orig_addr(m, msg);
1424 res = tipc_sk_anc_data_recv(m, msg, tsk);
1425 if (res)
1426 goto exit;
1427 }
1428
1429 /* Capture message data (if valid) & compute return value (always) */
1430 if (!err) {
1431 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1432
1433 sz -= offset;
1434 needed = (buf_len - sz_copied);
1435 sz_to_copy = (sz <= needed) ? sz : needed;
1436
1437 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
1438 m, sz_to_copy);
1439 if (res)
1440 goto exit;
1441
1442 sz_copied += sz_to_copy;
1443
1444 if (sz_to_copy < sz) {
1445 if (!(flags & MSG_PEEK))
1446 TIPC_SKB_CB(buf)->handle =
1447 (void *)(unsigned long)(offset + sz_to_copy);
1448 goto exit;
1449 }
1450 } else {
1451 if (sz_copied != 0)
1452 goto exit; /* can't add error msg to valid data */
1453
1454 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1455 res = 0;
1456 else
1457 res = -ECONNRESET;
1458 }
1459
1460 /* Consume received message (optional) */
1461 if (likely(!(flags & MSG_PEEK))) {
1462 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1463 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1464 tsk->rcv_unacked = 0;
1465 }
1466 tsk_advance_rx_queue(sk);
1467 }
1468
1469 /* Loop around if more data is required */
1470 if ((sz_copied < buf_len) && /* didn't get all requested data */
1471 (!skb_queue_empty(&sk->sk_receive_queue) ||
1472 (sz_copied < target)) && /* and more is ready or required */
1473 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1474 (!err)) /* and haven't reached a FIN */
1475 goto restart;
1476
1477exit:
1478 release_sock(sk);
1479 return sz_copied ? sz_copied : res;
1480}
1481
1482/**
1483 * tipc_write_space - wake up thread if port congestion is released
1484 * @sk: socket
1485 */
1486static void tipc_write_space(struct sock *sk)
1487{
1488 struct socket_wq *wq;
1489
1490 rcu_read_lock();
1491 wq = rcu_dereference(sk->sk_wq);
1492 if (skwq_has_sleeper(wq))
1493 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1494 POLLWRNORM | POLLWRBAND);
1495 rcu_read_unlock();
1496}
1497
1498/**
1499 * tipc_data_ready - wake up threads to indicate messages have been received
1500 * @sk: socket
1501 * @len: the length of messages
1502 */
1503static void tipc_data_ready(struct sock *sk)
1504{
1505 struct socket_wq *wq;
1506
1507 rcu_read_lock();
1508 wq = rcu_dereference(sk->sk_wq);
1509 if (skwq_has_sleeper(wq))
1510 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1511 POLLRDNORM | POLLRDBAND);
1512 rcu_read_unlock();
1513}
1514
1515static void tipc_sock_destruct(struct sock *sk)
1516{
1517 __skb_queue_purge(&sk->sk_receive_queue);
1518}
1519
1520/**
1521 * filter_connect - Handle all incoming messages for a connection-based socket
1522 * @tsk: TIPC socket
1523 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1524 *
1525 * Returns true if everything ok, false otherwise
1526 */
1527static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1528{
1529 struct sock *sk = &tsk->sk;
1530 struct net *net = sock_net(sk);
1531 struct socket *sock = sk->sk_socket;
1532 struct tipc_msg *hdr = buf_msg(skb);
1533
1534 if (unlikely(msg_mcast(hdr)))
1535 return false;
1536
1537 switch ((int)sock->state) {
1538 case SS_CONNECTED:
1539
1540 /* Accept only connection-based messages sent by peer */
1541 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1542 return false;
1543
1544 if (unlikely(msg_errcode(hdr))) {
1545 sock->state = SS_DISCONNECTING;
1546 tsk->connected = 0;
1547 /* Let timer expire on it's own */
1548 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1549 tsk->portid);
1550 }
1551 return true;
1552
1553 case SS_CONNECTING:
1554
1555 /* Accept only ACK or NACK message */
1556 if (unlikely(!msg_connected(hdr)))
1557 return false;
1558
1559 if (unlikely(msg_errcode(hdr))) {
1560 sock->state = SS_DISCONNECTING;
1561 sk->sk_err = ECONNREFUSED;
1562 return true;
1563 }
1564
1565 if (unlikely(!msg_isdata(hdr))) {
1566 sock->state = SS_DISCONNECTING;
1567 sk->sk_err = EINVAL;
1568 return true;
1569 }
1570
1571 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1572 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1573 sock->state = SS_CONNECTED;
1574
1575 /* If 'ACK+' message, add to socket receive queue */
1576 if (msg_data_sz(hdr))
1577 return true;
1578
1579 /* If empty 'ACK-' message, wake up sleeping connect() */
1580 if (waitqueue_active(sk_sleep(sk)))
1581 wake_up_interruptible(sk_sleep(sk));
1582
1583 /* 'ACK-' message is neither accepted nor rejected: */
1584 msg_set_dest_droppable(hdr, 1);
1585 return false;
1586
1587 case SS_LISTENING:
1588 case SS_UNCONNECTED:
1589
1590 /* Accept only SYN message */
1591 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1592 return true;
1593 break;
1594 case SS_DISCONNECTING:
1595 break;
1596 default:
1597 pr_err("Unknown socket state %u\n", sock->state);
1598 }
1599 return false;
1600}
1601
1602/**
1603 * rcvbuf_limit - get proper overload limit of socket receive queue
1604 * @sk: socket
1605 * @buf: message
1606 *
1607 * For all connection oriented messages, irrespective of importance,
1608 * the default overload value (i.e. 67MB) is set as limit.
1609 *
1610 * For all connectionless messages, by default new queue limits are
1611 * as belows:
1612 *
1613 * TIPC_LOW_IMPORTANCE (4 MB)
1614 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1615 * TIPC_HIGH_IMPORTANCE (16 MB)
1616 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1617 *
1618 * Returns overload limit according to corresponding message importance
1619 */
1620static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1621{
1622 struct tipc_msg *msg = buf_msg(buf);
1623
1624 if (msg_connected(msg))
1625 return sysctl_tipc_rmem[2];
1626
1627 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1628 msg_importance(msg);
1629}
1630
1631/**
1632 * filter_rcv - validate incoming message
1633 * @sk: socket
1634 * @skb: pointer to message.
1635 *
1636 * Enqueues message on receive queue if acceptable; optionally handles
1637 * disconnect indication for a connected socket.
1638 *
1639 * Called with socket lock already taken
1640 *
1641 * Returns true if message was added to socket receive queue, otherwise false
1642 */
1643static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
1644{
1645 struct socket *sock = sk->sk_socket;
1646 struct tipc_sock *tsk = tipc_sk(sk);
1647 struct tipc_msg *hdr = buf_msg(skb);
1648 unsigned int limit = rcvbuf_limit(sk, skb);
1649 int err = TIPC_OK;
1650 int usr = msg_user(hdr);
1651
1652 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1653 tipc_sk_proto_rcv(tsk, skb);
1654 return false;
1655 }
1656
1657 if (unlikely(usr == SOCK_WAKEUP)) {
1658 kfree_skb(skb);
1659 tsk->link_cong = 0;
1660 sk->sk_write_space(sk);
1661 return false;
1662 }
1663
1664 /* Drop if illegal message type */
1665 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1666 kfree_skb(skb);
1667 return false;
1668 }
1669
1670 /* Reject if wrong message type for current socket state */
1671 if (unlikely(sock->state == SS_READY)) {
1672 if (msg_connected(hdr)) {
1673 err = TIPC_ERR_NO_PORT;
1674 goto reject;
1675 }
1676 } else if (unlikely(!filter_connect(tsk, skb))) {
1677 err = TIPC_ERR_NO_PORT;
1678 goto reject;
1679 }
1680
1681 /* Reject message if there isn't room to queue it */
1682 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1683 err = TIPC_ERR_OVERLOAD;
1684 goto reject;
1685 }
1686
1687 /* Enqueue message */
1688 TIPC_SKB_CB(skb)->handle = NULL;
1689 __skb_queue_tail(&sk->sk_receive_queue, skb);
1690 skb_set_owner_r(skb, sk);
1691
1692 sk->sk_data_ready(sk);
1693 return true;
1694
1695reject:
1696 tipc_sk_respond(sk, skb, err);
1697 return false;
1698}
1699
1700/**
1701 * tipc_backlog_rcv - handle incoming message from backlog queue
1702 * @sk: socket
1703 * @skb: message
1704 *
1705 * Caller must hold socket lock
1706 *
1707 * Returns 0
1708 */
1709static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1710{
1711 unsigned int truesize = skb->truesize;
1712
1713 if (likely(filter_rcv(sk, skb)))
1714 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1715 return 0;
1716}
1717
1718/**
1719 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1720 * inputq and try adding them to socket or backlog queue
1721 * @inputq: list of incoming buffers with potentially different destinations
1722 * @sk: socket where the buffers should be enqueued
1723 * @dport: port number for the socket
1724 *
1725 * Caller must hold socket lock
1726 */
1727static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1728 u32 dport)
1729{
1730 unsigned int lim;
1731 atomic_t *dcnt;
1732 struct sk_buff *skb;
1733 unsigned long time_limit = jiffies + 2;
1734
1735 while (skb_queue_len(inputq)) {
1736 if (unlikely(time_after_eq(jiffies, time_limit)))
1737 return;
1738
1739 skb = tipc_skb_dequeue(inputq, dport);
1740 if (unlikely(!skb))
1741 return;
1742
1743 /* Add message directly to receive queue if possible */
1744 if (!sock_owned_by_user(sk)) {
1745 filter_rcv(sk, skb);
1746 continue;
1747 }
1748
1749 /* Try backlog, compensating for double-counted bytes */
1750 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1751 if (sk->sk_backlog.len)
1752 atomic_set(dcnt, 0);
1753 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1754 if (likely(!sk_add_backlog(sk, skb, lim)))
1755 continue;
1756
1757 /* Overload => reject message back to sender */
1758 tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
1759 break;
1760 }
1761}
1762
1763/**
1764 * tipc_sk_rcv - handle a chain of incoming buffers
1765 * @inputq: buffer list containing the buffers
1766 * Consumes all buffers in list until inputq is empty
1767 * Note: may be called in multiple threads referring to the same queue
1768 */
1769void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1770{
1771 u32 dnode, dport = 0;
1772 int err;
1773 struct tipc_sock *tsk;
1774 struct sock *sk;
1775 struct sk_buff *skb;
1776
1777 while (skb_queue_len(inputq)) {
1778 dport = tipc_skb_peek_port(inputq, dport);
1779 tsk = tipc_sk_lookup(net, dport);
1780
1781 if (likely(tsk)) {
1782 sk = &tsk->sk;
1783 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1784 tipc_sk_enqueue(inputq, sk, dport);
1785 spin_unlock_bh(&sk->sk_lock.slock);
1786 }
1787 sock_put(sk);
1788 continue;
1789 }
1790
1791 /* No destination socket => dequeue skb if still there */
1792 skb = tipc_skb_dequeue(inputq, dport);
1793 if (!skb)
1794 return;
1795
1796 /* Try secondary lookup if unresolved named message */
1797 err = TIPC_ERR_NO_PORT;
1798 if (tipc_msg_lookup_dest(net, skb, &err))
1799 goto xmit;
1800
1801 /* Prepare for message rejection */
1802 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1803 continue;
1804xmit:
1805 dnode = msg_destnode(buf_msg(skb));
1806 tipc_node_xmit_skb(net, skb, dnode, dport);
1807 }
1808}
1809
1810static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1811{
1812 struct sock *sk = sock->sk;
1813 DEFINE_WAIT(wait);
1814 int done;
1815
1816 do {
1817 int err = sock_error(sk);
1818 if (err)
1819 return err;
1820 if (!*timeo_p)
1821 return -ETIMEDOUT;
1822 if (signal_pending(current))
1823 return sock_intr_errno(*timeo_p);
1824
1825 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1826 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1827 finish_wait(sk_sleep(sk), &wait);
1828 } while (!done);
1829 return 0;
1830}
1831
1832/**
1833 * tipc_connect - establish a connection to another TIPC port
1834 * @sock: socket structure
1835 * @dest: socket address for destination port
1836 * @destlen: size of socket address data structure
1837 * @flags: file-related flags associated with socket
1838 *
1839 * Returns 0 on success, errno otherwise
1840 */
1841static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1842 int destlen, int flags)
1843{
1844 struct sock *sk = sock->sk;
1845 struct tipc_sock *tsk = tipc_sk(sk);
1846 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1847 struct msghdr m = {NULL,};
1848 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1849 socket_state previous;
1850 int res = 0;
1851
1852 lock_sock(sk);
1853
1854 /* DGRAM/RDM connect(), just save the destaddr */
1855 if (sock->state == SS_READY) {
1856 if (dst->family == AF_UNSPEC) {
1857 memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
1858 tsk->connected = 0;
1859 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1860 res = -EINVAL;
1861 } else {
1862 memcpy(&tsk->remote, dest, destlen);
1863 tsk->connected = 1;
1864 }
1865 goto exit;
1866 }
1867
1868 /*
1869 * Reject connection attempt using multicast address
1870 *
1871 * Note: send_msg() validates the rest of the address fields,
1872 * so there's no need to do it here
1873 */
1874 if (dst->addrtype == TIPC_ADDR_MCAST) {
1875 res = -EINVAL;
1876 goto exit;
1877 }
1878
1879 previous = sock->state;
1880 switch (sock->state) {
1881 case SS_UNCONNECTED:
1882 /* Send a 'SYN-' to destination */
1883 m.msg_name = dest;
1884 m.msg_namelen = destlen;
1885
1886 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1887 * indicate send_msg() is never blocked.
1888 */
1889 if (!timeout)
1890 m.msg_flags = MSG_DONTWAIT;
1891
1892 res = __tipc_sendmsg(sock, &m, 0);
1893 if ((res < 0) && (res != -EWOULDBLOCK))
1894 goto exit;
1895
1896 /* Just entered SS_CONNECTING state; the only
1897 * difference is that return value in non-blocking
1898 * case is EINPROGRESS, rather than EALREADY.
1899 */
1900 res = -EINPROGRESS;
1901 case SS_CONNECTING:
1902 if (previous == SS_CONNECTING)
1903 res = -EALREADY;
1904 if (!timeout)
1905 goto exit;
1906 timeout = msecs_to_jiffies(timeout);
1907 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1908 res = tipc_wait_for_connect(sock, &timeout);
1909 break;
1910 case SS_CONNECTED:
1911 res = -EISCONN;
1912 break;
1913 default:
1914 res = -EINVAL;
1915 break;
1916 }
1917exit:
1918 release_sock(sk);
1919 return res;
1920}
1921
1922/**
1923 * tipc_listen - allow socket to listen for incoming connections
1924 * @sock: socket structure
1925 * @len: (unused)
1926 *
1927 * Returns 0 on success, errno otherwise
1928 */
1929static int tipc_listen(struct socket *sock, int len)
1930{
1931 struct sock *sk = sock->sk;
1932 int res;
1933
1934 lock_sock(sk);
1935
1936 if (sock->state != SS_UNCONNECTED)
1937 res = -EINVAL;
1938 else {
1939 sock->state = SS_LISTENING;
1940 res = 0;
1941 }
1942
1943 release_sock(sk);
1944 return res;
1945}
1946
1947static int tipc_wait_for_accept(struct socket *sock, long timeo)
1948{
1949 struct sock *sk = sock->sk;
1950 DEFINE_WAIT(wait);
1951 int err;
1952
1953 /* True wake-one mechanism for incoming connections: only
1954 * one process gets woken up, not the 'whole herd'.
1955 * Since we do not 'race & poll' for established sockets
1956 * anymore, the common case will execute the loop only once.
1957 */
1958 for (;;) {
1959 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1960 TASK_INTERRUPTIBLE);
1961 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1962 release_sock(sk);
1963 timeo = schedule_timeout(timeo);
1964 lock_sock(sk);
1965 }
1966 err = 0;
1967 if (!skb_queue_empty(&sk->sk_receive_queue))
1968 break;
1969 err = -EINVAL;
1970 if (sock->state != SS_LISTENING)
1971 break;
1972 err = -EAGAIN;
1973 if (!timeo)
1974 break;
1975 err = sock_intr_errno(timeo);
1976 if (signal_pending(current))
1977 break;
1978 }
1979 finish_wait(sk_sleep(sk), &wait);
1980 return err;
1981}
1982
1983/**
1984 * tipc_accept - wait for connection request
1985 * @sock: listening socket
1986 * @newsock: new socket that is to be connected
1987 * @flags: file-related flags associated with socket
1988 *
1989 * Returns 0 on success, errno otherwise
1990 */
1991static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1992{
1993 struct sock *new_sk, *sk = sock->sk;
1994 struct sk_buff *buf;
1995 struct tipc_sock *new_tsock;
1996 struct tipc_msg *msg;
1997 long timeo;
1998 int res;
1999
2000 lock_sock(sk);
2001
2002 if (sock->state != SS_LISTENING) {
2003 res = -EINVAL;
2004 goto exit;
2005 }
2006 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2007 res = tipc_wait_for_accept(sock, timeo);
2008 if (res)
2009 goto exit;
2010
2011 buf = skb_peek(&sk->sk_receive_queue);
2012
2013 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2014 if (res)
2015 goto exit;
2016 security_sk_clone(sock->sk, new_sock->sk);
2017
2018 new_sk = new_sock->sk;
2019 new_tsock = tipc_sk(new_sk);
2020 msg = buf_msg(buf);
2021
2022 /* we lock on new_sk; but lockdep sees the lock on sk */
2023 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2024
2025 /*
2026 * Reject any stray messages received by new socket
2027 * before the socket lock was taken (very, very unlikely)
2028 */
2029 tsk_rej_rx_queue(new_sk);
2030
2031 /* Connect new socket to it's peer */
2032 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2033 new_sock->state = SS_CONNECTED;
2034
2035 tsk_set_importance(new_tsock, msg_importance(msg));
2036 if (msg_named(msg)) {
2037 new_tsock->conn_type = msg_nametype(msg);
2038 new_tsock->conn_instance = msg_nameinst(msg);
2039 }
2040
2041 /*
2042 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2043 * Respond to 'SYN+' by queuing it on new socket.
2044 */
2045 if (!msg_data_sz(msg)) {
2046 struct msghdr m = {NULL,};
2047
2048 tsk_advance_rx_queue(sk);
2049 __tipc_send_stream(new_sock, &m, 0);
2050 } else {
2051 __skb_dequeue(&sk->sk_receive_queue);
2052 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2053 skb_set_owner_r(buf, new_sk);
2054 }
2055 release_sock(new_sk);
2056exit:
2057 release_sock(sk);
2058 return res;
2059}
2060
2061/**
2062 * tipc_shutdown - shutdown socket connection
2063 * @sock: socket structure
2064 * @how: direction to close (must be SHUT_RDWR)
2065 *
2066 * Terminates connection (if necessary), then purges socket's receive queue.
2067 *
2068 * Returns 0 on success, errno otherwise
2069 */
2070static int tipc_shutdown(struct socket *sock, int how)
2071{
2072 struct sock *sk = sock->sk;
2073 struct net *net = sock_net(sk);
2074 struct tipc_sock *tsk = tipc_sk(sk);
2075 struct sk_buff *skb;
2076 u32 dnode = tsk_peer_node(tsk);
2077 u32 dport = tsk_peer_port(tsk);
2078 u32 onode = tipc_own_addr(net);
2079 u32 oport = tsk->portid;
2080 int res;
2081
2082 if (how != SHUT_RDWR)
2083 return -EINVAL;
2084
2085 lock_sock(sk);
2086
2087 switch (sock->state) {
2088 case SS_CONNECTING:
2089 case SS_CONNECTED:
2090
2091restart:
2092 dnode = tsk_peer_node(tsk);
2093
2094 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2095 skb = __skb_dequeue(&sk->sk_receive_queue);
2096 if (skb) {
2097 if (TIPC_SKB_CB(skb)->handle != NULL) {
2098 kfree_skb(skb);
2099 goto restart;
2100 }
2101 tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
2102 } else {
2103 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2104 TIPC_CONN_MSG, SHORT_H_SIZE,
2105 0, dnode, onode, dport, oport,
2106 TIPC_CONN_SHUTDOWN);
2107 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2108 }
2109 tsk->connected = 0;
2110 sock->state = SS_DISCONNECTING;
2111 tipc_node_remove_conn(net, dnode, tsk->portid);
2112 /* fall through */
2113
2114 case SS_DISCONNECTING:
2115
2116 /* Discard any unreceived messages */
2117 __skb_queue_purge(&sk->sk_receive_queue);
2118
2119 /* Wake up anyone sleeping in poll */
2120 sk->sk_state_change(sk);
2121 res = 0;
2122 break;
2123
2124 default:
2125 res = -ENOTCONN;
2126 }
2127
2128 release_sock(sk);
2129 return res;
2130}
2131
2132static void tipc_sk_timeout(unsigned long data)
2133{
2134 struct tipc_sock *tsk = (struct tipc_sock *)data;
2135 struct sock *sk = &tsk->sk;
2136 struct sk_buff *skb = NULL;
2137 u32 peer_port, peer_node;
2138 u32 own_node = tsk_own_node(tsk);
2139
2140 bh_lock_sock(sk);
2141 if (!tsk->connected) {
2142 bh_unlock_sock(sk);
2143 goto exit;
2144 }
2145 peer_port = tsk_peer_port(tsk);
2146 peer_node = tsk_peer_node(tsk);
2147
2148 if (tsk->probing_state == TIPC_CONN_PROBING) {
2149 if (!sock_owned_by_user(sk)) {
2150 sk->sk_socket->state = SS_DISCONNECTING;
2151 tsk->connected = 0;
2152 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2153 tsk_peer_port(tsk));
2154 sk->sk_state_change(sk);
2155 } else {
2156 /* Try again later */
2157 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2158 }
2159
2160 } else {
2161 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2162 INT_H_SIZE, 0, peer_node, own_node,
2163 peer_port, tsk->portid, TIPC_OK);
2164 tsk->probing_state = TIPC_CONN_PROBING;
2165 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
2166 }
2167 bh_unlock_sock(sk);
2168 if (skb)
2169 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2170exit:
2171 sock_put(sk);
2172}
2173
2174static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2175 struct tipc_name_seq const *seq)
2176{
2177 struct net *net = sock_net(&tsk->sk);
2178 struct publication *publ;
2179 u32 key;
2180
2181 if (tsk->connected)
2182 return -EINVAL;
2183 key = tsk->portid + tsk->pub_count + 1;
2184 if (key == tsk->portid)
2185 return -EADDRINUSE;
2186
2187 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2188 scope, tsk->portid, key);
2189 if (unlikely(!publ))
2190 return -EINVAL;
2191
2192 list_add(&publ->pport_list, &tsk->publications);
2193 tsk->pub_count++;
2194 tsk->published = 1;
2195 return 0;
2196}
2197
2198static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2199 struct tipc_name_seq const *seq)
2200{
2201 struct net *net = sock_net(&tsk->sk);
2202 struct publication *publ;
2203 struct publication *safe;
2204 int rc = -EINVAL;
2205
2206 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2207 if (seq) {
2208 if (publ->scope != scope)
2209 continue;
2210 if (publ->type != seq->type)
2211 continue;
2212 if (publ->lower != seq->lower)
2213 continue;
2214 if (publ->upper != seq->upper)
2215 break;
2216 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2217 publ->ref, publ->key);
2218 rc = 0;
2219 break;
2220 }
2221 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2222 publ->ref, publ->key);
2223 rc = 0;
2224 }
2225 if (list_empty(&tsk->publications))
2226 tsk->published = 0;
2227 return rc;
2228}
2229
2230/* tipc_sk_reinit: set non-zero address in all existing sockets
2231 * when we go from standalone to network mode.
2232 */
2233void tipc_sk_reinit(struct net *net)
2234{
2235 struct tipc_net *tn = net_generic(net, tipc_net_id);
2236 const struct bucket_table *tbl;
2237 struct rhash_head *pos;
2238 struct tipc_sock *tsk;
2239 struct tipc_msg *msg;
2240 int i;
2241
2242 rcu_read_lock();
2243 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2244 for (i = 0; i < tbl->size; i++) {
2245 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2246 spin_lock_bh(&tsk->sk.sk_lock.slock);
2247 msg = &tsk->phdr;
2248 msg_set_prevnode(msg, tn->own_addr);
2249 msg_set_orignode(msg, tn->own_addr);
2250 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2251 }
2252 }
2253 rcu_read_unlock();
2254}
2255
2256static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2257{
2258 struct tipc_net *tn = net_generic(net, tipc_net_id);
2259 struct tipc_sock *tsk;
2260
2261 rcu_read_lock();
2262 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2263 if (tsk)
2264 sock_hold(&tsk->sk);
2265 rcu_read_unlock();
2266
2267 return tsk;
2268}
2269
2270static int tipc_sk_insert(struct tipc_sock *tsk)
2271{
2272 struct sock *sk = &tsk->sk;
2273 struct net *net = sock_net(sk);
2274 struct tipc_net *tn = net_generic(net, tipc_net_id);
2275 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2276 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2277
2278 while (remaining--) {
2279 portid++;
2280 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2281 portid = TIPC_MIN_PORT;
2282 tsk->portid = portid;
2283 sock_hold(&tsk->sk);
2284 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2285 tsk_rht_params))
2286 return 0;
2287 sock_put(&tsk->sk);
2288 }
2289
2290 return -1;
2291}
2292
2293static void tipc_sk_remove(struct tipc_sock *tsk)
2294{
2295 struct sock *sk = &tsk->sk;
2296 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2297
2298 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2299 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2300 __sock_put(sk);
2301 }
2302}
2303
2304static const struct rhashtable_params tsk_rht_params = {
2305 .nelem_hint = 192,
2306 .head_offset = offsetof(struct tipc_sock, node),
2307 .key_offset = offsetof(struct tipc_sock, portid),
2308 .key_len = sizeof(u32), /* portid */
2309 .max_size = 1048576,
2310 .min_size = 256,
2311 .automatic_shrinking = true,
2312};
2313
2314int tipc_sk_rht_init(struct net *net)
2315{
2316 struct tipc_net *tn = net_generic(net, tipc_net_id);
2317
2318 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2319}
2320
2321void tipc_sk_rht_destroy(struct net *net)
2322{
2323 struct tipc_net *tn = net_generic(net, tipc_net_id);
2324
2325 /* Wait for socket readers to complete */
2326 synchronize_net();
2327
2328 rhashtable_destroy(&tn->sk_rht);
2329}
2330
2331/**
2332 * tipc_setsockopt - set socket option
2333 * @sock: socket structure
2334 * @lvl: option level
2335 * @opt: option identifier
2336 * @ov: pointer to new option value
2337 * @ol: length of option value
2338 *
2339 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2340 * (to ease compatibility).
2341 *
2342 * Returns 0 on success, errno otherwise
2343 */
2344static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2345 char __user *ov, unsigned int ol)
2346{
2347 struct sock *sk = sock->sk;
2348 struct tipc_sock *tsk = tipc_sk(sk);
2349 u32 value;
2350 int res;
2351
2352 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2353 return 0;
2354 if (lvl != SOL_TIPC)
2355 return -ENOPROTOOPT;
2356 if (ol < sizeof(value))
2357 return -EINVAL;
2358 res = get_user(value, (u32 __user *)ov);
2359 if (res)
2360 return res;
2361
2362 lock_sock(sk);
2363
2364 switch (opt) {
2365 case TIPC_IMPORTANCE:
2366 res = tsk_set_importance(tsk, value);
2367 break;
2368 case TIPC_SRC_DROPPABLE:
2369 if (sock->type != SOCK_STREAM)
2370 tsk_set_unreliable(tsk, value);
2371 else
2372 res = -ENOPROTOOPT;
2373 break;
2374 case TIPC_DEST_DROPPABLE:
2375 tsk_set_unreturnable(tsk, value);
2376 break;
2377 case TIPC_CONN_TIMEOUT:
2378 tipc_sk(sk)->conn_timeout = value;
2379 /* no need to set "res", since already 0 at this point */
2380 break;
2381 default:
2382 res = -EINVAL;
2383 }
2384
2385 release_sock(sk);
2386
2387 return res;
2388}
2389
2390/**
2391 * tipc_getsockopt - get socket option
2392 * @sock: socket structure
2393 * @lvl: option level
2394 * @opt: option identifier
2395 * @ov: receptacle for option value
2396 * @ol: receptacle for length of option value
2397 *
2398 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2399 * (to ease compatibility).
2400 *
2401 * Returns 0 on success, errno otherwise
2402 */
2403static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2404 char __user *ov, int __user *ol)
2405{
2406 struct sock *sk = sock->sk;
2407 struct tipc_sock *tsk = tipc_sk(sk);
2408 int len;
2409 u32 value;
2410 int res;
2411
2412 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2413 return put_user(0, ol);
2414 if (lvl != SOL_TIPC)
2415 return -ENOPROTOOPT;
2416 res = get_user(len, ol);
2417 if (res)
2418 return res;
2419
2420 lock_sock(sk);
2421
2422 switch (opt) {
2423 case TIPC_IMPORTANCE:
2424 value = tsk_importance(tsk);
2425 break;
2426 case TIPC_SRC_DROPPABLE:
2427 value = tsk_unreliable(tsk);
2428 break;
2429 case TIPC_DEST_DROPPABLE:
2430 value = tsk_unreturnable(tsk);
2431 break;
2432 case TIPC_CONN_TIMEOUT:
2433 value = tsk->conn_timeout;
2434 /* no need to set "res", since already 0 at this point */
2435 break;
2436 case TIPC_NODE_RECVQ_DEPTH:
2437 value = 0; /* was tipc_queue_size, now obsolete */
2438 break;
2439 case TIPC_SOCK_RECVQ_DEPTH:
2440 value = skb_queue_len(&sk->sk_receive_queue);
2441 break;
2442 default:
2443 res = -EINVAL;
2444 }
2445
2446 release_sock(sk);
2447
2448 if (res)
2449 return res; /* "get" failed */
2450
2451 if (len < sizeof(value))
2452 return -EINVAL;
2453
2454 if (copy_to_user(ov, &value, sizeof(value)))
2455 return -EFAULT;
2456
2457 return put_user(sizeof(value), ol);
2458}
2459
2460static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2461{
2462 struct sock *sk = sock->sk;
2463 struct tipc_sioc_ln_req lnr;
2464 void __user *argp = (void __user *)arg;
2465
2466 switch (cmd) {
2467 case SIOCGETLINKNAME:
2468 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2469 return -EFAULT;
2470 if (!tipc_node_get_linkname(sock_net(sk),
2471 lnr.bearer_id & 0xffff, lnr.peer,
2472 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2473 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2474 return -EFAULT;
2475 return 0;
2476 }
2477 return -EADDRNOTAVAIL;
2478 default:
2479 return -ENOIOCTLCMD;
2480 }
2481}
2482
2483/* Protocol switches for the various types of TIPC sockets */
2484
2485static const struct proto_ops msg_ops = {
2486 .owner = THIS_MODULE,
2487 .family = AF_TIPC,
2488 .release = tipc_release,
2489 .bind = tipc_bind,
2490 .connect = tipc_connect,
2491 .socketpair = sock_no_socketpair,
2492 .accept = sock_no_accept,
2493 .getname = tipc_getname,
2494 .poll = tipc_poll,
2495 .ioctl = tipc_ioctl,
2496 .listen = sock_no_listen,
2497 .shutdown = tipc_shutdown,
2498 .setsockopt = tipc_setsockopt,
2499 .getsockopt = tipc_getsockopt,
2500 .sendmsg = tipc_sendmsg,
2501 .recvmsg = tipc_recvmsg,
2502 .mmap = sock_no_mmap,
2503 .sendpage = sock_no_sendpage
2504};
2505
2506static const struct proto_ops packet_ops = {
2507 .owner = THIS_MODULE,
2508 .family = AF_TIPC,
2509 .release = tipc_release,
2510 .bind = tipc_bind,
2511 .connect = tipc_connect,
2512 .socketpair = sock_no_socketpair,
2513 .accept = tipc_accept,
2514 .getname = tipc_getname,
2515 .poll = tipc_poll,
2516 .ioctl = tipc_ioctl,
2517 .listen = tipc_listen,
2518 .shutdown = tipc_shutdown,
2519 .setsockopt = tipc_setsockopt,
2520 .getsockopt = tipc_getsockopt,
2521 .sendmsg = tipc_send_packet,
2522 .recvmsg = tipc_recvmsg,
2523 .mmap = sock_no_mmap,
2524 .sendpage = sock_no_sendpage
2525};
2526
2527static const struct proto_ops stream_ops = {
2528 .owner = THIS_MODULE,
2529 .family = AF_TIPC,
2530 .release = tipc_release,
2531 .bind = tipc_bind,
2532 .connect = tipc_connect,
2533 .socketpair = sock_no_socketpair,
2534 .accept = tipc_accept,
2535 .getname = tipc_getname,
2536 .poll = tipc_poll,
2537 .ioctl = tipc_ioctl,
2538 .listen = tipc_listen,
2539 .shutdown = tipc_shutdown,
2540 .setsockopt = tipc_setsockopt,
2541 .getsockopt = tipc_getsockopt,
2542 .sendmsg = tipc_send_stream,
2543 .recvmsg = tipc_recv_stream,
2544 .mmap = sock_no_mmap,
2545 .sendpage = sock_no_sendpage
2546};
2547
2548static const struct net_proto_family tipc_family_ops = {
2549 .owner = THIS_MODULE,
2550 .family = AF_TIPC,
2551 .create = tipc_sk_create
2552};
2553
2554static struct proto tipc_proto = {
2555 .name = "TIPC",
2556 .owner = THIS_MODULE,
2557 .obj_size = sizeof(struct tipc_sock),
2558 .sysctl_rmem = sysctl_tipc_rmem
2559};
2560
2561/**
2562 * tipc_socket_init - initialize TIPC socket interface
2563 *
2564 * Returns 0 on success, errno otherwise
2565 */
2566int tipc_socket_init(void)
2567{
2568 int res;
2569
2570 res = proto_register(&tipc_proto, 1);
2571 if (res) {
2572 pr_err("Failed to register TIPC protocol type\n");
2573 goto out;
2574 }
2575
2576 res = sock_register(&tipc_family_ops);
2577 if (res) {
2578 pr_err("Failed to register TIPC socket type\n");
2579 proto_unregister(&tipc_proto);
2580 goto out;
2581 }
2582 out:
2583 return res;
2584}
2585
2586/**
2587 * tipc_socket_stop - stop TIPC socket interface
2588 */
2589void tipc_socket_stop(void)
2590{
2591 sock_unregister(tipc_family_ops.family);
2592 proto_unregister(&tipc_proto);
2593}
2594
2595/* Caller should hold socket lock for the passed tipc socket. */
2596static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2597{
2598 u32 peer_node;
2599 u32 peer_port;
2600 struct nlattr *nest;
2601
2602 peer_node = tsk_peer_node(tsk);
2603 peer_port = tsk_peer_port(tsk);
2604
2605 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2606
2607 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2608 goto msg_full;
2609 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2610 goto msg_full;
2611
2612 if (tsk->conn_type != 0) {
2613 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2614 goto msg_full;
2615 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2616 goto msg_full;
2617 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2618 goto msg_full;
2619 }
2620 nla_nest_end(skb, nest);
2621
2622 return 0;
2623
2624msg_full:
2625 nla_nest_cancel(skb, nest);
2626
2627 return -EMSGSIZE;
2628}
2629
2630/* Caller should hold socket lock for the passed tipc socket. */
2631static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2632 struct tipc_sock *tsk)
2633{
2634 int err;
2635 void *hdr;
2636 struct nlattr *attrs;
2637 struct net *net = sock_net(skb->sk);
2638 struct tipc_net *tn = net_generic(net, tipc_net_id);
2639
2640 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2641 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2642 if (!hdr)
2643 goto msg_cancel;
2644
2645 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2646 if (!attrs)
2647 goto genlmsg_cancel;
2648 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2649 goto attr_msg_cancel;
2650 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2651 goto attr_msg_cancel;
2652
2653 if (tsk->connected) {
2654 err = __tipc_nl_add_sk_con(skb, tsk);
2655 if (err)
2656 goto attr_msg_cancel;
2657 } else if (!list_empty(&tsk->publications)) {
2658 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2659 goto attr_msg_cancel;
2660 }
2661 nla_nest_end(skb, attrs);
2662 genlmsg_end(skb, hdr);
2663
2664 return 0;
2665
2666attr_msg_cancel:
2667 nla_nest_cancel(skb, attrs);
2668genlmsg_cancel:
2669 genlmsg_cancel(skb, hdr);
2670msg_cancel:
2671 return -EMSGSIZE;
2672}
2673
2674int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2675{
2676 int err;
2677 struct tipc_sock *tsk;
2678 const struct bucket_table *tbl;
2679 struct rhash_head *pos;
2680 struct net *net = sock_net(skb->sk);
2681 struct tipc_net *tn = net_generic(net, tipc_net_id);
2682 u32 tbl_id = cb->args[0];
2683 u32 prev_portid = cb->args[1];
2684
2685 rcu_read_lock();
2686 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2687 for (; tbl_id < tbl->size; tbl_id++) {
2688 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2689 spin_lock_bh(&tsk->sk.sk_lock.slock);
2690 if (prev_portid && prev_portid != tsk->portid) {
2691 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2692 continue;
2693 }
2694
2695 err = __tipc_nl_add_sk(skb, cb, tsk);
2696 if (err) {
2697 prev_portid = tsk->portid;
2698 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2699 goto out;
2700 }
2701 prev_portid = 0;
2702 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2703 }
2704 }
2705out:
2706 rcu_read_unlock();
2707 cb->args[0] = tbl_id;
2708 cb->args[1] = prev_portid;
2709
2710 return skb->len;
2711}
2712
2713/* Caller should hold socket lock for the passed tipc socket. */
2714static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2715 struct netlink_callback *cb,
2716 struct publication *publ)
2717{
2718 void *hdr;
2719 struct nlattr *attrs;
2720
2721 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2722 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2723 if (!hdr)
2724 goto msg_cancel;
2725
2726 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2727 if (!attrs)
2728 goto genlmsg_cancel;
2729
2730 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2731 goto attr_msg_cancel;
2732 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2733 goto attr_msg_cancel;
2734 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2735 goto attr_msg_cancel;
2736 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2737 goto attr_msg_cancel;
2738
2739 nla_nest_end(skb, attrs);
2740 genlmsg_end(skb, hdr);
2741
2742 return 0;
2743
2744attr_msg_cancel:
2745 nla_nest_cancel(skb, attrs);
2746genlmsg_cancel:
2747 genlmsg_cancel(skb, hdr);
2748msg_cancel:
2749 return -EMSGSIZE;
2750}
2751
2752/* Caller should hold socket lock for the passed tipc socket. */
2753static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2754 struct netlink_callback *cb,
2755 struct tipc_sock *tsk, u32 *last_publ)
2756{
2757 int err;
2758 struct publication *p;
2759
2760 if (*last_publ) {
2761 list_for_each_entry(p, &tsk->publications, pport_list) {
2762 if (p->key == *last_publ)
2763 break;
2764 }
2765 if (p->key != *last_publ) {
2766 /* We never set seq or call nl_dump_check_consistent()
2767 * this means that setting prev_seq here will cause the
2768 * consistence check to fail in the netlink callback
2769 * handler. Resulting in the last NLMSG_DONE message
2770 * having the NLM_F_DUMP_INTR flag set.
2771 */
2772 cb->prev_seq = 1;
2773 *last_publ = 0;
2774 return -EPIPE;
2775 }
2776 } else {
2777 p = list_first_entry(&tsk->publications, struct publication,
2778 pport_list);
2779 }
2780
2781 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2782 err = __tipc_nl_add_sk_publ(skb, cb, p);
2783 if (err) {
2784 *last_publ = p->key;
2785 return err;
2786 }
2787 }
2788 *last_publ = 0;
2789
2790 return 0;
2791}
2792
2793int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2794{
2795 int err;
2796 u32 tsk_portid = cb->args[0];
2797 u32 last_publ = cb->args[1];
2798 u32 done = cb->args[2];
2799 struct net *net = sock_net(skb->sk);
2800 struct tipc_sock *tsk;
2801
2802 if (!tsk_portid) {
2803 struct nlattr **attrs;
2804 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2805
2806 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2807 if (err)
2808 return err;
2809
2810 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2811 attrs[TIPC_NLA_SOCK],
2812 tipc_nl_sock_policy);
2813 if (err)
2814 return err;
2815
2816 if (!sock[TIPC_NLA_SOCK_REF])
2817 return -EINVAL;
2818
2819 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2820 }
2821
2822 if (done)
2823 return 0;
2824
2825 tsk = tipc_sk_lookup(net, tsk_portid);
2826 if (!tsk)
2827 return -EINVAL;
2828
2829 lock_sock(&tsk->sk);
2830 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2831 if (!err)
2832 done = 1;
2833 release_sock(&tsk->sk);
2834 sock_put(&tsk->sk);
2835
2836 cb->args[0] = tsk_portid;
2837 cb->args[1] = last_publ;
2838 cb->args[2] = done;
2839
2840 return skb->len;
2841}