Loading...
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * Copyright (c) 2020-2021, Red Hat Inc
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/rhashtable.h>
39#include <linux/sched/signal.h>
40
41#include "core.h"
42#include "name_table.h"
43#include "node.h"
44#include "link.h"
45#include "name_distr.h"
46#include "socket.h"
47#include "bcast.h"
48#include "netlink.h"
49#include "group.h"
50#include "trace.h"
51
52#define NAGLE_START_INIT 4
53#define NAGLE_START_MAX 1024
54#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
55#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
56#define TIPC_MAX_PORT 0xffffffff
57#define TIPC_MIN_PORT 1
58#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
59
60enum {
61 TIPC_LISTEN = TCP_LISTEN,
62 TIPC_ESTABLISHED = TCP_ESTABLISHED,
63 TIPC_OPEN = TCP_CLOSE,
64 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
65 TIPC_CONNECTING = TCP_SYN_SENT,
66};
67
68struct sockaddr_pair {
69 struct sockaddr_tipc sock;
70 struct sockaddr_tipc member;
71};
72
73/**
74 * struct tipc_sock - TIPC socket structure
75 * @sk: socket - interacts with 'port' and with user via the socket API
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @maxnagle: maximum size of msg which can be subject to nagle
78 * @portid: unique port identity in TIPC socket hash table
79 * @phdr: preformatted message header used when sending messages
80 * @cong_links: list of congested links
81 * @publications: list of publications for port
82 * @blocking_link: address of the congested link we are currently sleeping on
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @probe_unacked: probe has not received ack yet
86 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
87 * @cong_link_cnt: number of congested links
88 * @snt_unacked: # messages sent by socket, and not yet acked by peer
89 * @snd_win: send window size
90 * @peer_caps: peer capabilities mask
91 * @rcv_unacked: # messages read by user, but not yet acked back to peer
92 * @rcv_win: receive window size
93 * @peer: 'connected' peer for dgram/rdm
94 * @node: hash table node
95 * @mc_method: cookie for use between socket and broadcast layer
96 * @rcu: rcu struct for tipc_sock
97 * @group: TIPC communications group
98 * @oneway: message count in one direction (FIXME)
99 * @nagle_start: current nagle value
100 * @snd_backlog: send backlog count
101 * @msg_acc: messages accepted; used in managing backlog and nagle
102 * @pkt_cnt: TIPC socket packet count
103 * @expect_ack: whether this TIPC socket is expecting an ack
104 * @nodelay: setsockopt() TIPC_NODELAY setting
105 * @group_is_open: TIPC socket group is fully open (FIXME)
106 * @published: true if port has one or more associated names
107 * @conn_addrtype: address type used when establishing connection
108 */
109struct tipc_sock {
110 struct sock sk;
111 u32 max_pkt;
112 u32 maxnagle;
113 u32 portid;
114 struct tipc_msg phdr;
115 struct list_head cong_links;
116 struct list_head publications;
117 u32 pub_count;
118 atomic_t dupl_rcvcnt;
119 u16 conn_timeout;
120 bool probe_unacked;
121 u16 cong_link_cnt;
122 u16 snt_unacked;
123 u16 snd_win;
124 u16 peer_caps;
125 u16 rcv_unacked;
126 u16 rcv_win;
127 struct sockaddr_tipc peer;
128 struct rhash_head node;
129 struct tipc_mc_method mc_method;
130 struct rcu_head rcu;
131 struct tipc_group *group;
132 u32 oneway;
133 u32 nagle_start;
134 u16 snd_backlog;
135 u16 msg_acc;
136 u16 pkt_cnt;
137 bool expect_ack;
138 bool nodelay;
139 bool group_is_open;
140 bool published;
141 u8 conn_addrtype;
142};
143
144static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
145static void tipc_data_ready(struct sock *sk);
146static void tipc_write_space(struct sock *sk);
147static void tipc_sock_destruct(struct sock *sk);
148static int tipc_release(struct socket *sock);
149static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
150 bool kern);
151static void tipc_sk_timeout(struct timer_list *t);
152static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
153static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
154static int tipc_sk_leave(struct tipc_sock *tsk);
155static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
156static int tipc_sk_insert(struct tipc_sock *tsk);
157static void tipc_sk_remove(struct tipc_sock *tsk);
158static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
159static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
160static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
161static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
162
163static const struct proto_ops packet_ops;
164static const struct proto_ops stream_ops;
165static const struct proto_ops msg_ops;
166static struct proto tipc_proto;
167static const struct rhashtable_params tsk_rht_params;
168
169static u32 tsk_own_node(struct tipc_sock *tsk)
170{
171 return msg_prevnode(&tsk->phdr);
172}
173
174static u32 tsk_peer_node(struct tipc_sock *tsk)
175{
176 return msg_destnode(&tsk->phdr);
177}
178
179static u32 tsk_peer_port(struct tipc_sock *tsk)
180{
181 return msg_destport(&tsk->phdr);
182}
183
184static bool tsk_unreliable(struct tipc_sock *tsk)
185{
186 return msg_src_droppable(&tsk->phdr) != 0;
187}
188
189static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
190{
191 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
192}
193
194static bool tsk_unreturnable(struct tipc_sock *tsk)
195{
196 return msg_dest_droppable(&tsk->phdr) != 0;
197}
198
199static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
200{
201 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
202}
203
204static int tsk_importance(struct tipc_sock *tsk)
205{
206 return msg_importance(&tsk->phdr);
207}
208
209static struct tipc_sock *tipc_sk(const struct sock *sk)
210{
211 return container_of(sk, struct tipc_sock, sk);
212}
213
214int tsk_set_importance(struct sock *sk, int imp)
215{
216 if (imp > TIPC_CRITICAL_IMPORTANCE)
217 return -EINVAL;
218 msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
219 return 0;
220}
221
222static bool tsk_conn_cong(struct tipc_sock *tsk)
223{
224 return tsk->snt_unacked > tsk->snd_win;
225}
226
227static u16 tsk_blocks(int len)
228{
229 return ((len / FLOWCTL_BLK_SZ) + 1);
230}
231
232/* tsk_blocks(): translate a buffer size in bytes to number of
233 * advertisable blocks, taking into account the ratio truesize(len)/len
234 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
235 */
236static u16 tsk_adv_blocks(int len)
237{
238 return len / FLOWCTL_BLK_SZ / 4;
239}
240
241/* tsk_inc(): increment counter for sent or received data
242 * - If block based flow control is not supported by peer we
243 * fall back to message based ditto, incrementing the counter
244 */
245static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
246{
247 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
248 return ((msglen / FLOWCTL_BLK_SZ) + 1);
249 return 1;
250}
251
252/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
253 */
254static void tsk_set_nagle(struct tipc_sock *tsk)
255{
256 struct sock *sk = &tsk->sk;
257
258 tsk->maxnagle = 0;
259 if (sk->sk_type != SOCK_STREAM)
260 return;
261 if (tsk->nodelay)
262 return;
263 if (!(tsk->peer_caps & TIPC_NAGLE))
264 return;
265 /* Limit node local buffer size to avoid receive queue overflow */
266 if (tsk->max_pkt == MAX_MSG_SIZE)
267 tsk->maxnagle = 1500;
268 else
269 tsk->maxnagle = tsk->max_pkt;
270}
271
272/**
273 * tsk_advance_rx_queue - discard first buffer in socket receive queue
274 * @sk: network socket
275 *
276 * Caller must hold socket lock
277 */
278static void tsk_advance_rx_queue(struct sock *sk)
279{
280 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
281 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
282}
283
284/* tipc_sk_respond() : send response message back to sender
285 */
286static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
287{
288 u32 selector;
289 u32 dnode;
290 u32 onode = tipc_own_addr(sock_net(sk));
291
292 if (!tipc_msg_reverse(onode, &skb, err))
293 return;
294
295 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
296 dnode = msg_destnode(buf_msg(skb));
297 selector = msg_origport(buf_msg(skb));
298 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
299}
300
301/**
302 * tsk_rej_rx_queue - reject all buffers in socket receive queue
303 * @sk: network socket
304 * @error: response error code
305 *
306 * Caller must hold socket lock
307 */
308static void tsk_rej_rx_queue(struct sock *sk, int error)
309{
310 struct sk_buff *skb;
311
312 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
313 tipc_sk_respond(sk, skb, error);
314}
315
316static bool tipc_sk_connected(struct sock *sk)
317{
318 return sk->sk_state == TIPC_ESTABLISHED;
319}
320
321/* tipc_sk_type_connectionless - check if the socket is datagram socket
322 * @sk: socket
323 *
324 * Returns true if connection less, false otherwise
325 */
326static bool tipc_sk_type_connectionless(struct sock *sk)
327{
328 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
329}
330
331/* tsk_peer_msg - verify if message was sent by connected port's peer
332 *
333 * Handles cases where the node's network address has changed from
334 * the default of <0.0.0> to its configured setting.
335 */
336static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
337{
338 struct sock *sk = &tsk->sk;
339 u32 self = tipc_own_addr(sock_net(sk));
340 u32 peer_port = tsk_peer_port(tsk);
341 u32 orig_node, peer_node;
342
343 if (unlikely(!tipc_sk_connected(sk)))
344 return false;
345
346 if (unlikely(msg_origport(msg) != peer_port))
347 return false;
348
349 orig_node = msg_orignode(msg);
350 peer_node = tsk_peer_node(tsk);
351
352 if (likely(orig_node == peer_node))
353 return true;
354
355 if (!orig_node && peer_node == self)
356 return true;
357
358 if (!peer_node && orig_node == self)
359 return true;
360
361 return false;
362}
363
364/* tipc_set_sk_state - set the sk_state of the socket
365 * @sk: socket
366 *
367 * Caller must hold socket lock
368 *
369 * Returns 0 on success, errno otherwise
370 */
371static int tipc_set_sk_state(struct sock *sk, int state)
372{
373 int oldsk_state = sk->sk_state;
374 int res = -EINVAL;
375
376 switch (state) {
377 case TIPC_OPEN:
378 res = 0;
379 break;
380 case TIPC_LISTEN:
381 case TIPC_CONNECTING:
382 if (oldsk_state == TIPC_OPEN)
383 res = 0;
384 break;
385 case TIPC_ESTABLISHED:
386 if (oldsk_state == TIPC_CONNECTING ||
387 oldsk_state == TIPC_OPEN)
388 res = 0;
389 break;
390 case TIPC_DISCONNECTING:
391 if (oldsk_state == TIPC_CONNECTING ||
392 oldsk_state == TIPC_ESTABLISHED)
393 res = 0;
394 break;
395 }
396
397 if (!res)
398 sk->sk_state = state;
399
400 return res;
401}
402
403static int tipc_sk_sock_err(struct socket *sock, long *timeout)
404{
405 struct sock *sk = sock->sk;
406 int err = sock_error(sk);
407 int typ = sock->type;
408
409 if (err)
410 return err;
411 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
412 if (sk->sk_state == TIPC_DISCONNECTING)
413 return -EPIPE;
414 else if (!tipc_sk_connected(sk))
415 return -ENOTCONN;
416 }
417 if (!*timeout)
418 return -EAGAIN;
419 if (signal_pending(current))
420 return sock_intr_errno(*timeout);
421
422 return 0;
423}
424
425#define tipc_wait_for_cond(sock_, timeo_, condition_) \
426({ \
427 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
428 struct sock *sk_; \
429 int rc_; \
430 \
431 while ((rc_ = !(condition_))) { \
432 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
433 smp_rmb(); \
434 sk_ = (sock_)->sk; \
435 rc_ = tipc_sk_sock_err((sock_), timeo_); \
436 if (rc_) \
437 break; \
438 add_wait_queue(sk_sleep(sk_), &wait_); \
439 release_sock(sk_); \
440 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
441 sched_annotate_sleep(); \
442 lock_sock(sk_); \
443 remove_wait_queue(sk_sleep(sk_), &wait_); \
444 } \
445 rc_; \
446})
447
448/**
449 * tipc_sk_create - create a TIPC socket
450 * @net: network namespace (must be default network)
451 * @sock: pre-allocated socket structure
452 * @protocol: protocol indicator (must be 0)
453 * @kern: caused by kernel or by userspace?
454 *
455 * This routine creates additional data structures used by the TIPC socket,
456 * initializes them, and links them together.
457 *
458 * Return: 0 on success, errno otherwise
459 */
460static int tipc_sk_create(struct net *net, struct socket *sock,
461 int protocol, int kern)
462{
463 const struct proto_ops *ops;
464 struct sock *sk;
465 struct tipc_sock *tsk;
466 struct tipc_msg *msg;
467
468 /* Validate arguments */
469 if (unlikely(protocol != 0))
470 return -EPROTONOSUPPORT;
471
472 switch (sock->type) {
473 case SOCK_STREAM:
474 ops = &stream_ops;
475 break;
476 case SOCK_SEQPACKET:
477 ops = &packet_ops;
478 break;
479 case SOCK_DGRAM:
480 case SOCK_RDM:
481 ops = &msg_ops;
482 break;
483 default:
484 return -EPROTOTYPE;
485 }
486
487 /* Allocate socket's protocol area */
488 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
489 if (sk == NULL)
490 return -ENOMEM;
491
492 tsk = tipc_sk(sk);
493 tsk->max_pkt = MAX_PKT_DEFAULT;
494 tsk->maxnagle = 0;
495 tsk->nagle_start = NAGLE_START_INIT;
496 INIT_LIST_HEAD(&tsk->publications);
497 INIT_LIST_HEAD(&tsk->cong_links);
498 msg = &tsk->phdr;
499
500 /* Finish initializing socket data structures */
501 sock->ops = ops;
502 sock_init_data(sock, sk);
503 tipc_set_sk_state(sk, TIPC_OPEN);
504 if (tipc_sk_insert(tsk)) {
505 pr_warn("Socket create failed; port number exhausted\n");
506 return -EINVAL;
507 }
508
509 /* Ensure tsk is visible before we read own_addr. */
510 smp_mb();
511
512 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
513 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
514
515 msg_set_origport(msg, tsk->portid);
516 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
517 sk->sk_shutdown = 0;
518 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
519 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
520 sk->sk_data_ready = tipc_data_ready;
521 sk->sk_write_space = tipc_write_space;
522 sk->sk_destruct = tipc_sock_destruct;
523 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
524 tsk->group_is_open = true;
525 atomic_set(&tsk->dupl_rcvcnt, 0);
526
527 /* Start out with safe limits until we receive an advertised window */
528 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
529 tsk->rcv_win = tsk->snd_win;
530
531 if (tipc_sk_type_connectionless(sk)) {
532 tsk_set_unreturnable(tsk, true);
533 if (sock->type == SOCK_DGRAM)
534 tsk_set_unreliable(tsk, true);
535 }
536 __skb_queue_head_init(&tsk->mc_method.deferredq);
537 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
538 return 0;
539}
540
541static void tipc_sk_callback(struct rcu_head *head)
542{
543 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
544
545 sock_put(&tsk->sk);
546}
547
548/* Caller should hold socket lock for the socket. */
549static void __tipc_shutdown(struct socket *sock, int error)
550{
551 struct sock *sk = sock->sk;
552 struct tipc_sock *tsk = tipc_sk(sk);
553 struct net *net = sock_net(sk);
554 long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
555 u32 dnode = tsk_peer_node(tsk);
556 struct sk_buff *skb;
557
558 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
559 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
560 !tsk_conn_cong(tsk)));
561
562 /* Push out delayed messages if in Nagle mode */
563 tipc_sk_push_backlog(tsk, false);
564 /* Remove pending SYN */
565 __skb_queue_purge(&sk->sk_write_queue);
566
567 /* Remove partially received buffer if any */
568 skb = skb_peek(&sk->sk_receive_queue);
569 if (skb && TIPC_SKB_CB(skb)->bytes_read) {
570 __skb_unlink(skb, &sk->sk_receive_queue);
571 kfree_skb(skb);
572 }
573
574 /* Reject all unreceived messages if connectionless */
575 if (tipc_sk_type_connectionless(sk)) {
576 tsk_rej_rx_queue(sk, error);
577 return;
578 }
579
580 switch (sk->sk_state) {
581 case TIPC_CONNECTING:
582 case TIPC_ESTABLISHED:
583 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
584 tipc_node_remove_conn(net, dnode, tsk->portid);
585 /* Send a FIN+/- to its peer */
586 skb = __skb_dequeue(&sk->sk_receive_queue);
587 if (skb) {
588 __skb_queue_purge(&sk->sk_receive_queue);
589 tipc_sk_respond(sk, skb, error);
590 break;
591 }
592 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
593 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
594 tsk_own_node(tsk), tsk_peer_port(tsk),
595 tsk->portid, error);
596 if (skb)
597 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
598 break;
599 case TIPC_LISTEN:
600 /* Reject all SYN messages */
601 tsk_rej_rx_queue(sk, error);
602 break;
603 default:
604 __skb_queue_purge(&sk->sk_receive_queue);
605 break;
606 }
607}
608
609/**
610 * tipc_release - destroy a TIPC socket
611 * @sock: socket to destroy
612 *
613 * This routine cleans up any messages that are still queued on the socket.
614 * For DGRAM and RDM socket types, all queued messages are rejected.
615 * For SEQPACKET and STREAM socket types, the first message is rejected
616 * and any others are discarded. (If the first message on a STREAM socket
617 * is partially-read, it is discarded and the next one is rejected instead.)
618 *
619 * NOTE: Rejected messages are not necessarily returned to the sender! They
620 * are returned or discarded according to the "destination droppable" setting
621 * specified for the message by the sender.
622 *
623 * Return: 0 on success, errno otherwise
624 */
625static int tipc_release(struct socket *sock)
626{
627 struct sock *sk = sock->sk;
628 struct tipc_sock *tsk;
629
630 /*
631 * Exit if socket isn't fully initialized (occurs when a failed accept()
632 * releases a pre-allocated child socket that was never used)
633 */
634 if (sk == NULL)
635 return 0;
636
637 tsk = tipc_sk(sk);
638 lock_sock(sk);
639
640 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
641 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
642 sk->sk_shutdown = SHUTDOWN_MASK;
643 tipc_sk_leave(tsk);
644 tipc_sk_withdraw(tsk, NULL);
645 __skb_queue_purge(&tsk->mc_method.deferredq);
646 sk_stop_timer(sk, &sk->sk_timer);
647 tipc_sk_remove(tsk);
648
649 sock_orphan(sk);
650 /* Reject any messages that accumulated in backlog queue */
651 release_sock(sk);
652 tipc_dest_list_purge(&tsk->cong_links);
653 tsk->cong_link_cnt = 0;
654 call_rcu(&tsk->rcu, tipc_sk_callback);
655 sock->sk = NULL;
656
657 return 0;
658}
659
660/**
661 * __tipc_bind - associate or disassocate TIPC name(s) with a socket
662 * @sock: socket structure
663 * @skaddr: socket address describing name(s) and desired operation
664 * @alen: size of socket address data structure
665 *
666 * Name and name sequence binding are indicated using a positive scope value;
667 * a negative scope value unbinds the specified name. Specifying no name
668 * (i.e. a socket address length of 0) unbinds all names from the socket.
669 *
670 * Return: 0 on success, errno otherwise
671 *
672 * NOTE: This routine doesn't need to take the socket lock since it doesn't
673 * access any non-constant socket information.
674 */
675static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
676{
677 struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
678 struct tipc_sock *tsk = tipc_sk(sock->sk);
679 bool unbind = false;
680
681 if (unlikely(!alen))
682 return tipc_sk_withdraw(tsk, NULL);
683
684 if (ua->addrtype == TIPC_SERVICE_ADDR) {
685 ua->addrtype = TIPC_SERVICE_RANGE;
686 ua->sr.upper = ua->sr.lower;
687 }
688 if (ua->scope < 0) {
689 unbind = true;
690 ua->scope = -ua->scope;
691 }
692 /* Users may still use deprecated TIPC_ZONE_SCOPE */
693 if (ua->scope != TIPC_NODE_SCOPE)
694 ua->scope = TIPC_CLUSTER_SCOPE;
695
696 if (tsk->group)
697 return -EACCES;
698
699 if (unbind)
700 return tipc_sk_withdraw(tsk, ua);
701 return tipc_sk_publish(tsk, ua);
702}
703
704int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
705{
706 int res;
707
708 lock_sock(sock->sk);
709 res = __tipc_bind(sock, skaddr, alen);
710 release_sock(sock->sk);
711 return res;
712}
713
714static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
715{
716 struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
717 u32 atype = ua->addrtype;
718
719 if (alen) {
720 if (!tipc_uaddr_valid(ua, alen))
721 return -EINVAL;
722 if (atype == TIPC_SOCKET_ADDR)
723 return -EAFNOSUPPORT;
724 if (ua->sr.type < TIPC_RESERVED_TYPES) {
725 pr_warn_once("Can't bind to reserved service type %u\n",
726 ua->sr.type);
727 return -EACCES;
728 }
729 }
730 return tipc_sk_bind(sock, skaddr, alen);
731}
732
733/**
734 * tipc_getname - get port ID of socket or peer socket
735 * @sock: socket structure
736 * @uaddr: area for returned socket address
737 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
738 *
739 * Return: 0 on success, errno otherwise
740 *
741 * NOTE: This routine doesn't need to take the socket lock since it only
742 * accesses socket information that is unchanging (or which changes in
743 * a completely predictable manner).
744 */
745static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
746 int peer)
747{
748 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
749 struct sock *sk = sock->sk;
750 struct tipc_sock *tsk = tipc_sk(sk);
751
752 memset(addr, 0, sizeof(*addr));
753 if (peer) {
754 if ((!tipc_sk_connected(sk)) &&
755 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
756 return -ENOTCONN;
757 addr->addr.id.ref = tsk_peer_port(tsk);
758 addr->addr.id.node = tsk_peer_node(tsk);
759 } else {
760 addr->addr.id.ref = tsk->portid;
761 addr->addr.id.node = tipc_own_addr(sock_net(sk));
762 }
763
764 addr->addrtype = TIPC_SOCKET_ADDR;
765 addr->family = AF_TIPC;
766 addr->scope = 0;
767 addr->addr.name.domain = 0;
768
769 return sizeof(*addr);
770}
771
772/**
773 * tipc_poll - read and possibly block on pollmask
774 * @file: file structure associated with the socket
775 * @sock: socket for which to calculate the poll bits
776 * @wait: ???
777 *
778 * Return: pollmask value
779 *
780 * COMMENTARY:
781 * It appears that the usual socket locking mechanisms are not useful here
782 * since the pollmask info is potentially out-of-date the moment this routine
783 * exits. TCP and other protocols seem to rely on higher level poll routines
784 * to handle any preventable race conditions, so TIPC will do the same ...
785 *
786 * IMPORTANT: The fact that a read or write operation is indicated does NOT
787 * imply that the operation will succeed, merely that it should be performed
788 * and will not block.
789 */
790static __poll_t tipc_poll(struct file *file, struct socket *sock,
791 poll_table *wait)
792{
793 struct sock *sk = sock->sk;
794 struct tipc_sock *tsk = tipc_sk(sk);
795 __poll_t revents = 0;
796
797 sock_poll_wait(file, sock, wait);
798 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
799
800 if (sk->sk_shutdown & RCV_SHUTDOWN)
801 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
802 if (sk->sk_shutdown == SHUTDOWN_MASK)
803 revents |= EPOLLHUP;
804
805 switch (sk->sk_state) {
806 case TIPC_ESTABLISHED:
807 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
808 revents |= EPOLLOUT;
809 fallthrough;
810 case TIPC_LISTEN:
811 case TIPC_CONNECTING:
812 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
813 revents |= EPOLLIN | EPOLLRDNORM;
814 break;
815 case TIPC_OPEN:
816 if (tsk->group_is_open && !tsk->cong_link_cnt)
817 revents |= EPOLLOUT;
818 if (!tipc_sk_type_connectionless(sk))
819 break;
820 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
821 break;
822 revents |= EPOLLIN | EPOLLRDNORM;
823 break;
824 case TIPC_DISCONNECTING:
825 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
826 break;
827 }
828 return revents;
829}
830
831/**
832 * tipc_sendmcast - send multicast message
833 * @sock: socket structure
834 * @ua: destination address struct
835 * @msg: message to send
836 * @dlen: length of data to send
837 * @timeout: timeout to wait for wakeup
838 *
839 * Called from function tipc_sendmsg(), which has done all sanity checks
840 * Return: the number of bytes sent on success, or errno
841 */
842static int tipc_sendmcast(struct socket *sock, struct tipc_uaddr *ua,
843 struct msghdr *msg, size_t dlen, long timeout)
844{
845 struct sock *sk = sock->sk;
846 struct tipc_sock *tsk = tipc_sk(sk);
847 struct tipc_msg *hdr = &tsk->phdr;
848 struct net *net = sock_net(sk);
849 int mtu = tipc_bcast_get_mtu(net);
850 struct sk_buff_head pkts;
851 struct tipc_nlist dsts;
852 int rc;
853
854 if (tsk->group)
855 return -EACCES;
856
857 /* Block or return if any destination link is congested */
858 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
859 if (unlikely(rc))
860 return rc;
861
862 /* Lookup destination nodes */
863 tipc_nlist_init(&dsts, tipc_own_addr(net));
864 tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
865 if (!dsts.local && !dsts.remote)
866 return -EHOSTUNREACH;
867
868 /* Build message header */
869 msg_set_type(hdr, TIPC_MCAST_MSG);
870 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
871 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
872 msg_set_destport(hdr, 0);
873 msg_set_destnode(hdr, 0);
874 msg_set_nametype(hdr, ua->sr.type);
875 msg_set_namelower(hdr, ua->sr.lower);
876 msg_set_nameupper(hdr, ua->sr.upper);
877
878 /* Build message as chain of buffers */
879 __skb_queue_head_init(&pkts);
880 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
881
882 /* Send message if build was successful */
883 if (unlikely(rc == dlen)) {
884 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
885 TIPC_DUMP_SK_SNDQ, " ");
886 rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
887 &tsk->cong_link_cnt);
888 }
889
890 tipc_nlist_purge(&dsts);
891
892 return rc ? rc : dlen;
893}
894
895/**
896 * tipc_send_group_msg - send a message to a member in the group
897 * @net: network namespace
898 * @tsk: tipc socket
899 * @m: message to send
900 * @mb: group member
901 * @dnode: destination node
902 * @dport: destination port
903 * @dlen: total length of message data
904 */
905static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
906 struct msghdr *m, struct tipc_member *mb,
907 u32 dnode, u32 dport, int dlen)
908{
909 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
910 struct tipc_mc_method *method = &tsk->mc_method;
911 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
912 struct tipc_msg *hdr = &tsk->phdr;
913 struct sk_buff_head pkts;
914 int mtu, rc;
915
916 /* Complete message header */
917 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
918 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
919 msg_set_destport(hdr, dport);
920 msg_set_destnode(hdr, dnode);
921 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
922
923 /* Build message as chain of buffers */
924 __skb_queue_head_init(&pkts);
925 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
926 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
927 if (unlikely(rc != dlen))
928 return rc;
929
930 /* Send message */
931 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
932 if (unlikely(rc == -ELINKCONG)) {
933 tipc_dest_push(&tsk->cong_links, dnode, 0);
934 tsk->cong_link_cnt++;
935 }
936
937 /* Update send window */
938 tipc_group_update_member(mb, blks);
939
940 /* A broadcast sent within next EXPIRE period must follow same path */
941 method->rcast = true;
942 method->mandatory = true;
943 return dlen;
944}
945
946/**
947 * tipc_send_group_unicast - send message to a member in the group
948 * @sock: socket structure
949 * @m: message to send
950 * @dlen: total length of message data
951 * @timeout: timeout to wait for wakeup
952 *
953 * Called from function tipc_sendmsg(), which has done all sanity checks
954 * Return: the number of bytes sent on success, or errno
955 */
956static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
957 int dlen, long timeout)
958{
959 struct sock *sk = sock->sk;
960 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
961 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
962 struct tipc_sock *tsk = tipc_sk(sk);
963 struct net *net = sock_net(sk);
964 struct tipc_member *mb = NULL;
965 u32 node, port;
966 int rc;
967
968 node = ua->sk.node;
969 port = ua->sk.ref;
970 if (!port && !node)
971 return -EHOSTUNREACH;
972
973 /* Block or return if destination link or member is congested */
974 rc = tipc_wait_for_cond(sock, &timeout,
975 !tipc_dest_find(&tsk->cong_links, node, 0) &&
976 tsk->group &&
977 !tipc_group_cong(tsk->group, node, port, blks,
978 &mb));
979 if (unlikely(rc))
980 return rc;
981
982 if (unlikely(!mb))
983 return -EHOSTUNREACH;
984
985 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
986
987 return rc ? rc : dlen;
988}
989
990/**
991 * tipc_send_group_anycast - send message to any member with given identity
992 * @sock: socket structure
993 * @m: message to send
994 * @dlen: total length of message data
995 * @timeout: timeout to wait for wakeup
996 *
997 * Called from function tipc_sendmsg(), which has done all sanity checks
998 * Return: the number of bytes sent on success, or errno
999 */
1000static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
1001 int dlen, long timeout)
1002{
1003 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1004 struct sock *sk = sock->sk;
1005 struct tipc_sock *tsk = tipc_sk(sk);
1006 struct list_head *cong_links = &tsk->cong_links;
1007 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
1008 struct tipc_msg *hdr = &tsk->phdr;
1009 struct tipc_member *first = NULL;
1010 struct tipc_member *mbr = NULL;
1011 struct net *net = sock_net(sk);
1012 u32 node, port, exclude;
1013 struct list_head dsts;
1014 int lookups = 0;
1015 int dstcnt, rc;
1016 bool cong;
1017
1018 INIT_LIST_HEAD(&dsts);
1019 ua->sa.type = msg_nametype(hdr);
1020 ua->scope = msg_lookup_scope(hdr);
1021
1022 while (++lookups < 4) {
1023 exclude = tipc_group_exclude(tsk->group);
1024
1025 first = NULL;
1026
1027 /* Look for a non-congested destination member, if any */
1028 while (1) {
1029 if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt,
1030 exclude, false))
1031 return -EHOSTUNREACH;
1032 tipc_dest_pop(&dsts, &node, &port);
1033 cong = tipc_group_cong(tsk->group, node, port, blks,
1034 &mbr);
1035 if (!cong)
1036 break;
1037 if (mbr == first)
1038 break;
1039 if (!first)
1040 first = mbr;
1041 }
1042
1043 /* Start over if destination was not in member list */
1044 if (unlikely(!mbr))
1045 continue;
1046
1047 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
1048 break;
1049
1050 /* Block or return if destination link or member is congested */
1051 rc = tipc_wait_for_cond(sock, &timeout,
1052 !tipc_dest_find(cong_links, node, 0) &&
1053 tsk->group &&
1054 !tipc_group_cong(tsk->group, node, port,
1055 blks, &mbr));
1056 if (unlikely(rc))
1057 return rc;
1058
1059 /* Send, unless destination disappeared while waiting */
1060 if (likely(mbr))
1061 break;
1062 }
1063
1064 if (unlikely(lookups >= 4))
1065 return -EHOSTUNREACH;
1066
1067 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1068
1069 return rc ? rc : dlen;
1070}
1071
1072/**
1073 * tipc_send_group_bcast - send message to all members in communication group
1074 * @sock: socket structure
1075 * @m: message to send
1076 * @dlen: total length of message data
1077 * @timeout: timeout to wait for wakeup
1078 *
1079 * Called from function tipc_sendmsg(), which has done all sanity checks
1080 * Return: the number of bytes sent on success, or errno
1081 */
1082static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1083 int dlen, long timeout)
1084{
1085 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1086 struct sock *sk = sock->sk;
1087 struct net *net = sock_net(sk);
1088 struct tipc_sock *tsk = tipc_sk(sk);
1089 struct tipc_nlist *dsts;
1090 struct tipc_mc_method *method = &tsk->mc_method;
1091 bool ack = method->mandatory && method->rcast;
1092 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1093 struct tipc_msg *hdr = &tsk->phdr;
1094 int mtu = tipc_bcast_get_mtu(net);
1095 struct sk_buff_head pkts;
1096 int rc = -EHOSTUNREACH;
1097
1098 /* Block or return if any destination link or member is congested */
1099 rc = tipc_wait_for_cond(sock, &timeout,
1100 !tsk->cong_link_cnt && tsk->group &&
1101 !tipc_group_bc_cong(tsk->group, blks));
1102 if (unlikely(rc))
1103 return rc;
1104
1105 dsts = tipc_group_dests(tsk->group);
1106 if (!dsts->local && !dsts->remote)
1107 return -EHOSTUNREACH;
1108
1109 /* Complete message header */
1110 if (ua) {
1111 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1112 msg_set_nameinst(hdr, ua->sa.instance);
1113 } else {
1114 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1115 msg_set_nameinst(hdr, 0);
1116 }
1117 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1118 msg_set_destport(hdr, 0);
1119 msg_set_destnode(hdr, 0);
1120 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1121
1122 /* Avoid getting stuck with repeated forced replicasts */
1123 msg_set_grp_bc_ack_req(hdr, ack);
1124
1125 /* Build message as chain of buffers */
1126 __skb_queue_head_init(&pkts);
1127 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1128 if (unlikely(rc != dlen))
1129 return rc;
1130
1131 /* Send message */
1132 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1133 if (unlikely(rc))
1134 return rc;
1135
1136 /* Update broadcast sequence number and send windows */
1137 tipc_group_update_bc_members(tsk->group, blks, ack);
1138
1139 /* Broadcast link is now free to choose method for next broadcast */
1140 method->mandatory = false;
1141 method->expires = jiffies;
1142
1143 return dlen;
1144}
1145
1146/**
1147 * tipc_send_group_mcast - send message to all members with given identity
1148 * @sock: socket structure
1149 * @m: message to send
1150 * @dlen: total length of message data
1151 * @timeout: timeout to wait for wakeup
1152 *
1153 * Called from function tipc_sendmsg(), which has done all sanity checks
1154 * Return: the number of bytes sent on success, or errno
1155 */
1156static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1157 int dlen, long timeout)
1158{
1159 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1160 struct sock *sk = sock->sk;
1161 struct tipc_sock *tsk = tipc_sk(sk);
1162 struct tipc_group *grp = tsk->group;
1163 struct tipc_msg *hdr = &tsk->phdr;
1164 struct net *net = sock_net(sk);
1165 struct list_head dsts;
1166 u32 dstcnt, exclude;
1167
1168 INIT_LIST_HEAD(&dsts);
1169 ua->sa.type = msg_nametype(hdr);
1170 ua->scope = msg_lookup_scope(hdr);
1171 exclude = tipc_group_exclude(grp);
1172
1173 if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true))
1174 return -EHOSTUNREACH;
1175
1176 if (dstcnt == 1) {
1177 tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
1178 return tipc_send_group_unicast(sock, m, dlen, timeout);
1179 }
1180
1181 tipc_dest_list_purge(&dsts);
1182 return tipc_send_group_bcast(sock, m, dlen, timeout);
1183}
1184
1185/**
1186 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1187 * @net: the associated network namespace
1188 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1189 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1190 *
1191 * Multi-threaded: parallel calls with reference to same queues may occur
1192 */
1193void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1194 struct sk_buff_head *inputq)
1195{
1196 u32 self = tipc_own_addr(net);
1197 struct sk_buff *skb, *_skb;
1198 u32 portid, onode;
1199 struct sk_buff_head tmpq;
1200 struct list_head dports;
1201 struct tipc_msg *hdr;
1202 struct tipc_uaddr ua;
1203 int user, mtyp, hlen;
1204
1205 __skb_queue_head_init(&tmpq);
1206 INIT_LIST_HEAD(&dports);
1207 ua.addrtype = TIPC_SERVICE_RANGE;
1208
1209 /* tipc_skb_peek() increments the head skb's reference counter */
1210 skb = tipc_skb_peek(arrvq, &inputq->lock);
1211 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1212 hdr = buf_msg(skb);
1213 user = msg_user(hdr);
1214 mtyp = msg_type(hdr);
1215 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1216 onode = msg_orignode(hdr);
1217 ua.sr.type = msg_nametype(hdr);
1218 ua.sr.lower = msg_namelower(hdr);
1219 ua.sr.upper = msg_nameupper(hdr);
1220 if (onode == self)
1221 ua.scope = TIPC_ANY_SCOPE;
1222 else
1223 ua.scope = TIPC_CLUSTER_SCOPE;
1224
1225 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1226 spin_lock_bh(&inputq->lock);
1227 if (skb_peek(arrvq) == skb) {
1228 __skb_dequeue(arrvq);
1229 __skb_queue_tail(inputq, skb);
1230 }
1231 kfree_skb(skb);
1232 spin_unlock_bh(&inputq->lock);
1233 continue;
1234 }
1235
1236 /* Group messages require exact scope match */
1237 if (msg_in_group(hdr)) {
1238 ua.sr.lower = 0;
1239 ua.sr.upper = ~0;
1240 ua.scope = msg_lookup_scope(hdr);
1241 }
1242
1243 /* Create destination port list: */
1244 tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
1245
1246 /* Clone message per destination */
1247 while (tipc_dest_pop(&dports, NULL, &portid)) {
1248 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1249 if (_skb) {
1250 msg_set_destport(buf_msg(_skb), portid);
1251 __skb_queue_tail(&tmpq, _skb);
1252 continue;
1253 }
1254 pr_warn("Failed to clone mcast rcv buffer\n");
1255 }
1256 /* Append clones to inputq only if skb is still head of arrvq */
1257 spin_lock_bh(&inputq->lock);
1258 if (skb_peek(arrvq) == skb) {
1259 skb_queue_splice_tail_init(&tmpq, inputq);
1260 /* Decrement the skb's refcnt */
1261 kfree_skb(__skb_dequeue(arrvq));
1262 }
1263 spin_unlock_bh(&inputq->lock);
1264 __skb_queue_purge(&tmpq);
1265 kfree_skb(skb);
1266 }
1267 tipc_sk_rcv(net, inputq);
1268}
1269
1270/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
1271 * when socket is in Nagle mode
1272 */
1273static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
1274{
1275 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1276 struct sk_buff *skb = skb_peek_tail(txq);
1277 struct net *net = sock_net(&tsk->sk);
1278 u32 dnode = tsk_peer_node(tsk);
1279 int rc;
1280
1281 if (nagle_ack) {
1282 tsk->pkt_cnt += skb_queue_len(txq);
1283 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1284 tsk->oneway = 0;
1285 if (tsk->nagle_start < NAGLE_START_MAX)
1286 tsk->nagle_start *= 2;
1287 tsk->expect_ack = false;
1288 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1289 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1290 tsk->nagle_start);
1291 } else {
1292 tsk->nagle_start = NAGLE_START_INIT;
1293 if (skb) {
1294 msg_set_ack_required(buf_msg(skb));
1295 tsk->expect_ack = true;
1296 } else {
1297 tsk->expect_ack = false;
1298 }
1299 }
1300 tsk->msg_acc = 0;
1301 tsk->pkt_cnt = 0;
1302 }
1303
1304 if (!skb || tsk->cong_link_cnt)
1305 return;
1306
1307 /* Do not send SYN again after congestion */
1308 if (msg_is_syn(buf_msg(skb)))
1309 return;
1310
1311 if (tsk->msg_acc)
1312 tsk->pkt_cnt += skb_queue_len(txq);
1313 tsk->snt_unacked += tsk->snd_backlog;
1314 tsk->snd_backlog = 0;
1315 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1316 if (rc == -ELINKCONG)
1317 tsk->cong_link_cnt = 1;
1318}
1319
1320/**
1321 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1322 * @tsk: receiving socket
1323 * @skb: pointer to message buffer.
1324 * @inputq: buffer list containing the buffers
1325 * @xmitq: output message area
1326 */
1327static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1328 struct sk_buff_head *inputq,
1329 struct sk_buff_head *xmitq)
1330{
1331 struct tipc_msg *hdr = buf_msg(skb);
1332 u32 onode = tsk_own_node(tsk);
1333 struct sock *sk = &tsk->sk;
1334 int mtyp = msg_type(hdr);
1335 bool was_cong;
1336
1337 /* Ignore if connection cannot be validated: */
1338 if (!tsk_peer_msg(tsk, hdr)) {
1339 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1340 goto exit;
1341 }
1342
1343 if (unlikely(msg_errcode(hdr))) {
1344 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1345 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1346 tsk_peer_port(tsk));
1347 sk->sk_state_change(sk);
1348
1349 /* State change is ignored if socket already awake,
1350 * - convert msg to abort msg and add to inqueue
1351 */
1352 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1353 msg_set_type(hdr, TIPC_CONN_MSG);
1354 msg_set_size(hdr, BASIC_H_SIZE);
1355 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1356 __skb_queue_tail(inputq, skb);
1357 return;
1358 }
1359
1360 tsk->probe_unacked = false;
1361
1362 if (mtyp == CONN_PROBE) {
1363 msg_set_type(hdr, CONN_PROBE_REPLY);
1364 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1365 __skb_queue_tail(xmitq, skb);
1366 return;
1367 } else if (mtyp == CONN_ACK) {
1368 was_cong = tsk_conn_cong(tsk);
1369 tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
1370 tsk->snt_unacked -= msg_conn_ack(hdr);
1371 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1372 tsk->snd_win = msg_adv_win(hdr);
1373 if (was_cong && !tsk_conn_cong(tsk))
1374 sk->sk_write_space(sk);
1375 } else if (mtyp != CONN_PROBE_REPLY) {
1376 pr_warn("Received unknown CONN_PROTO msg\n");
1377 }
1378exit:
1379 kfree_skb(skb);
1380}
1381
1382/**
1383 * tipc_sendmsg - send message in connectionless manner
1384 * @sock: socket structure
1385 * @m: message to send
1386 * @dsz: amount of user data to be sent
1387 *
1388 * Message must have an destination specified explicitly.
1389 * Used for SOCK_RDM and SOCK_DGRAM messages,
1390 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1391 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1392 *
1393 * Return: the number of bytes sent on success, or errno otherwise
1394 */
1395static int tipc_sendmsg(struct socket *sock,
1396 struct msghdr *m, size_t dsz)
1397{
1398 struct sock *sk = sock->sk;
1399 int ret;
1400
1401 lock_sock(sk);
1402 ret = __tipc_sendmsg(sock, m, dsz);
1403 release_sock(sk);
1404
1405 return ret;
1406}
1407
1408static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1409{
1410 struct sock *sk = sock->sk;
1411 struct net *net = sock_net(sk);
1412 struct tipc_sock *tsk = tipc_sk(sk);
1413 struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
1414 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1415 struct list_head *clinks = &tsk->cong_links;
1416 bool syn = !tipc_sk_type_connectionless(sk);
1417 struct tipc_group *grp = tsk->group;
1418 struct tipc_msg *hdr = &tsk->phdr;
1419 struct tipc_socket_addr skaddr;
1420 struct sk_buff_head pkts;
1421 int atype, mtu, rc;
1422
1423 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1424 return -EMSGSIZE;
1425
1426 if (ua) {
1427 if (!tipc_uaddr_valid(ua, m->msg_namelen))
1428 return -EINVAL;
1429 atype = ua->addrtype;
1430 }
1431
1432 /* If socket belongs to a communication group follow other paths */
1433 if (grp) {
1434 if (!ua)
1435 return tipc_send_group_bcast(sock, m, dlen, timeout);
1436 if (atype == TIPC_SERVICE_ADDR)
1437 return tipc_send_group_anycast(sock, m, dlen, timeout);
1438 if (atype == TIPC_SOCKET_ADDR)
1439 return tipc_send_group_unicast(sock, m, dlen, timeout);
1440 if (atype == TIPC_SERVICE_RANGE)
1441 return tipc_send_group_mcast(sock, m, dlen, timeout);
1442 return -EINVAL;
1443 }
1444
1445 if (!ua) {
1446 ua = (struct tipc_uaddr *)&tsk->peer;
1447 if (!syn && ua->family != AF_TIPC)
1448 return -EDESTADDRREQ;
1449 atype = ua->addrtype;
1450 }
1451
1452 if (unlikely(syn)) {
1453 if (sk->sk_state == TIPC_LISTEN)
1454 return -EPIPE;
1455 if (sk->sk_state != TIPC_OPEN)
1456 return -EISCONN;
1457 if (tsk->published)
1458 return -EOPNOTSUPP;
1459 if (atype == TIPC_SERVICE_ADDR)
1460 tsk->conn_addrtype = atype;
1461 msg_set_syn(hdr, 1);
1462 }
1463
1464 /* Determine destination */
1465 if (atype == TIPC_SERVICE_RANGE) {
1466 return tipc_sendmcast(sock, ua, m, dlen, timeout);
1467 } else if (atype == TIPC_SERVICE_ADDR) {
1468 skaddr.node = ua->lookup_node;
1469 ua->scope = tipc_node2scope(skaddr.node);
1470 if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr))
1471 return -EHOSTUNREACH;
1472 } else if (atype == TIPC_SOCKET_ADDR) {
1473 skaddr = ua->sk;
1474 } else {
1475 return -EINVAL;
1476 }
1477
1478 /* Block or return if destination link is congested */
1479 rc = tipc_wait_for_cond(sock, &timeout,
1480 !tipc_dest_find(clinks, skaddr.node, 0));
1481 if (unlikely(rc))
1482 return rc;
1483
1484 /* Finally build message header */
1485 msg_set_destnode(hdr, skaddr.node);
1486 msg_set_destport(hdr, skaddr.ref);
1487 if (atype == TIPC_SERVICE_ADDR) {
1488 msg_set_type(hdr, TIPC_NAMED_MSG);
1489 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1490 msg_set_nametype(hdr, ua->sa.type);
1491 msg_set_nameinst(hdr, ua->sa.instance);
1492 msg_set_lookup_scope(hdr, ua->scope);
1493 } else { /* TIPC_SOCKET_ADDR */
1494 msg_set_type(hdr, TIPC_DIRECT_MSG);
1495 msg_set_lookup_scope(hdr, 0);
1496 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1497 }
1498
1499 /* Add message body */
1500 __skb_queue_head_init(&pkts);
1501 mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
1502 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1503 if (unlikely(rc != dlen))
1504 return rc;
1505 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1506 __skb_queue_purge(&pkts);
1507 return -ENOMEM;
1508 }
1509
1510 /* Send message */
1511 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1512 rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
1513 if (unlikely(rc == -ELINKCONG)) {
1514 tipc_dest_push(clinks, skaddr.node, 0);
1515 tsk->cong_link_cnt++;
1516 rc = 0;
1517 }
1518
1519 if (unlikely(syn && !rc)) {
1520 tipc_set_sk_state(sk, TIPC_CONNECTING);
1521 if (dlen && timeout) {
1522 timeout = msecs_to_jiffies(timeout);
1523 tipc_wait_for_connect(sock, &timeout);
1524 }
1525 }
1526
1527 return rc ? rc : dlen;
1528}
1529
1530/**
1531 * tipc_sendstream - send stream-oriented data
1532 * @sock: socket structure
1533 * @m: data to send
1534 * @dsz: total length of data to be transmitted
1535 *
1536 * Used for SOCK_STREAM data.
1537 *
1538 * Return: the number of bytes sent on success (or partial success),
1539 * or errno if no data sent
1540 */
1541static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1542{
1543 struct sock *sk = sock->sk;
1544 int ret;
1545
1546 lock_sock(sk);
1547 ret = __tipc_sendstream(sock, m, dsz);
1548 release_sock(sk);
1549
1550 return ret;
1551}
1552
1553static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1554{
1555 struct sock *sk = sock->sk;
1556 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1557 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1558 struct sk_buff_head *txq = &sk->sk_write_queue;
1559 struct tipc_sock *tsk = tipc_sk(sk);
1560 struct tipc_msg *hdr = &tsk->phdr;
1561 struct net *net = sock_net(sk);
1562 struct sk_buff *skb;
1563 u32 dnode = tsk_peer_node(tsk);
1564 int maxnagle = tsk->maxnagle;
1565 int maxpkt = tsk->max_pkt;
1566 int send, sent = 0;
1567 int blocks, rc = 0;
1568
1569 if (unlikely(dlen > INT_MAX))
1570 return -EMSGSIZE;
1571
1572 /* Handle implicit connection setup */
1573 if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
1574 rc = __tipc_sendmsg(sock, m, dlen);
1575 if (dlen && dlen == rc) {
1576 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1577 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1578 }
1579 return rc;
1580 }
1581
1582 do {
1583 rc = tipc_wait_for_cond(sock, &timeout,
1584 (!tsk->cong_link_cnt &&
1585 !tsk_conn_cong(tsk) &&
1586 tipc_sk_connected(sk)));
1587 if (unlikely(rc))
1588 break;
1589 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1590 blocks = tsk->snd_backlog;
1591 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1592 send <= maxnagle) {
1593 rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
1594 if (unlikely(rc < 0))
1595 break;
1596 blocks += rc;
1597 tsk->msg_acc++;
1598 if (blocks <= 64 && tsk->expect_ack) {
1599 tsk->snd_backlog = blocks;
1600 sent += send;
1601 break;
1602 } else if (blocks > 64) {
1603 tsk->pkt_cnt += skb_queue_len(txq);
1604 } else {
1605 skb = skb_peek_tail(txq);
1606 if (skb) {
1607 msg_set_ack_required(buf_msg(skb));
1608 tsk->expect_ack = true;
1609 } else {
1610 tsk->expect_ack = false;
1611 }
1612 tsk->msg_acc = 0;
1613 tsk->pkt_cnt = 0;
1614 }
1615 } else {
1616 rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
1617 if (unlikely(rc != send))
1618 break;
1619 blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1620 }
1621 trace_tipc_sk_sendstream(sk, skb_peek(txq),
1622 TIPC_DUMP_SK_SNDQ, " ");
1623 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1624 if (unlikely(rc == -ELINKCONG)) {
1625 tsk->cong_link_cnt = 1;
1626 rc = 0;
1627 }
1628 if (likely(!rc)) {
1629 tsk->snt_unacked += blocks;
1630 tsk->snd_backlog = 0;
1631 sent += send;
1632 }
1633 } while (sent < dlen && !rc);
1634
1635 return sent ? sent : rc;
1636}
1637
1638/**
1639 * tipc_send_packet - send a connection-oriented message
1640 * @sock: socket structure
1641 * @m: message to send
1642 * @dsz: length of data to be transmitted
1643 *
1644 * Used for SOCK_SEQPACKET messages.
1645 *
1646 * Return: the number of bytes sent on success, or errno otherwise
1647 */
1648static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1649{
1650 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1651 return -EMSGSIZE;
1652
1653 return tipc_sendstream(sock, m, dsz);
1654}
1655
1656/* tipc_sk_finish_conn - complete the setup of a connection
1657 */
1658static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1659 u32 peer_node)
1660{
1661 struct sock *sk = &tsk->sk;
1662 struct net *net = sock_net(sk);
1663 struct tipc_msg *msg = &tsk->phdr;
1664
1665 msg_set_syn(msg, 0);
1666 msg_set_destnode(msg, peer_node);
1667 msg_set_destport(msg, peer_port);
1668 msg_set_type(msg, TIPC_CONN_MSG);
1669 msg_set_lookup_scope(msg, 0);
1670 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1671
1672 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1673 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1674 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1675 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1676 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1677 tsk_set_nagle(tsk);
1678 __skb_queue_purge(&sk->sk_write_queue);
1679 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1680 return;
1681
1682 /* Fall back to message based flow control */
1683 tsk->rcv_win = FLOWCTL_MSG_WIN;
1684 tsk->snd_win = FLOWCTL_MSG_WIN;
1685}
1686
1687/**
1688 * tipc_sk_set_orig_addr - capture sender's address for received message
1689 * @m: descriptor for message info
1690 * @skb: received message
1691 *
1692 * Note: Address is not captured if not requested by receiver.
1693 */
1694static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1695{
1696 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1697 struct tipc_msg *hdr = buf_msg(skb);
1698
1699 if (!srcaddr)
1700 return;
1701
1702 srcaddr->sock.family = AF_TIPC;
1703 srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
1704 srcaddr->sock.scope = 0;
1705 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1706 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1707 srcaddr->sock.addr.name.domain = 0;
1708 m->msg_namelen = sizeof(struct sockaddr_tipc);
1709
1710 if (!msg_in_group(hdr))
1711 return;
1712
1713 /* Group message users may also want to know sending member's id */
1714 srcaddr->member.family = AF_TIPC;
1715 srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
1716 srcaddr->member.scope = 0;
1717 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1718 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1719 srcaddr->member.addr.name.domain = 0;
1720 m->msg_namelen = sizeof(*srcaddr);
1721}
1722
1723/**
1724 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1725 * @m: descriptor for message info
1726 * @skb: received message buffer
1727 * @tsk: TIPC port associated with message
1728 *
1729 * Note: Ancillary data is not captured if not requested by receiver.
1730 *
1731 * Return: 0 if successful, otherwise errno
1732 */
1733static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1734 struct tipc_sock *tsk)
1735{
1736 struct tipc_msg *hdr;
1737 u32 data[3] = {0,};
1738 bool has_addr;
1739 int dlen, rc;
1740
1741 if (likely(m->msg_controllen == 0))
1742 return 0;
1743
1744 hdr = buf_msg(skb);
1745 dlen = msg_data_sz(hdr);
1746
1747 /* Capture errored message object, if any */
1748 if (msg_errcode(hdr)) {
1749 if (skb_linearize(skb))
1750 return -ENOMEM;
1751 hdr = buf_msg(skb);
1752 data[0] = msg_errcode(hdr);
1753 data[1] = dlen;
1754 rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
1755 if (rc || !dlen)
1756 return rc;
1757 rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
1758 if (rc)
1759 return rc;
1760 }
1761
1762 /* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
1763 switch (msg_type(hdr)) {
1764 case TIPC_NAMED_MSG:
1765 has_addr = true;
1766 data[0] = msg_nametype(hdr);
1767 data[1] = msg_namelower(hdr);
1768 data[2] = data[1];
1769 break;
1770 case TIPC_MCAST_MSG:
1771 has_addr = true;
1772 data[0] = msg_nametype(hdr);
1773 data[1] = msg_namelower(hdr);
1774 data[2] = msg_nameupper(hdr);
1775 break;
1776 case TIPC_CONN_MSG:
1777 has_addr = !!tsk->conn_addrtype;
1778 data[0] = msg_nametype(&tsk->phdr);
1779 data[1] = msg_nameinst(&tsk->phdr);
1780 data[2] = data[1];
1781 break;
1782 default:
1783 has_addr = false;
1784 }
1785 if (!has_addr)
1786 return 0;
1787 return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
1788}
1789
1790static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
1791{
1792 struct sock *sk = &tsk->sk;
1793 struct sk_buff *skb = NULL;
1794 struct tipc_msg *msg;
1795 u32 peer_port = tsk_peer_port(tsk);
1796 u32 dnode = tsk_peer_node(tsk);
1797
1798 if (!tipc_sk_connected(sk))
1799 return NULL;
1800 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1801 dnode, tsk_own_node(tsk), peer_port,
1802 tsk->portid, TIPC_OK);
1803 if (!skb)
1804 return NULL;
1805 msg = buf_msg(skb);
1806 msg_set_conn_ack(msg, tsk->rcv_unacked);
1807 tsk->rcv_unacked = 0;
1808
1809 /* Adjust to and advertize the correct window limit */
1810 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1811 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1812 msg_set_adv_win(msg, tsk->rcv_win);
1813 }
1814 return skb;
1815}
1816
1817static void tipc_sk_send_ack(struct tipc_sock *tsk)
1818{
1819 struct sk_buff *skb;
1820
1821 skb = tipc_sk_build_ack(tsk);
1822 if (!skb)
1823 return;
1824
1825 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1826 msg_link_selector(buf_msg(skb)));
1827}
1828
1829static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1830{
1831 struct sock *sk = sock->sk;
1832 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1833 long timeo = *timeop;
1834 int err = sock_error(sk);
1835
1836 if (err)
1837 return err;
1838
1839 for (;;) {
1840 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1841 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1842 err = -ENOTCONN;
1843 break;
1844 }
1845 add_wait_queue(sk_sleep(sk), &wait);
1846 release_sock(sk);
1847 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1848 sched_annotate_sleep();
1849 lock_sock(sk);
1850 remove_wait_queue(sk_sleep(sk), &wait);
1851 }
1852 err = 0;
1853 if (!skb_queue_empty(&sk->sk_receive_queue))
1854 break;
1855 err = -EAGAIN;
1856 if (!timeo)
1857 break;
1858 err = sock_intr_errno(timeo);
1859 if (signal_pending(current))
1860 break;
1861
1862 err = sock_error(sk);
1863 if (err)
1864 break;
1865 }
1866 *timeop = timeo;
1867 return err;
1868}
1869
1870/**
1871 * tipc_recvmsg - receive packet-oriented message
1872 * @sock: network socket
1873 * @m: descriptor for message info
1874 * @buflen: length of user buffer area
1875 * @flags: receive flags
1876 *
1877 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1878 * If the complete message doesn't fit in user area, truncate it.
1879 *
1880 * Return: size of returned message data, errno otherwise
1881 */
1882static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1883 size_t buflen, int flags)
1884{
1885 struct sock *sk = sock->sk;
1886 bool connected = !tipc_sk_type_connectionless(sk);
1887 struct tipc_sock *tsk = tipc_sk(sk);
1888 int rc, err, hlen, dlen, copy;
1889 struct tipc_skb_cb *skb_cb;
1890 struct sk_buff_head xmitq;
1891 struct tipc_msg *hdr;
1892 struct sk_buff *skb;
1893 bool grp_evt;
1894 long timeout;
1895
1896 /* Catch invalid receive requests */
1897 if (unlikely(!buflen))
1898 return -EINVAL;
1899
1900 lock_sock(sk);
1901 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1902 rc = -ENOTCONN;
1903 goto exit;
1904 }
1905 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1906
1907 /* Step rcv queue to first msg with data or error; wait if necessary */
1908 do {
1909 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1910 if (unlikely(rc))
1911 goto exit;
1912 skb = skb_peek(&sk->sk_receive_queue);
1913 skb_cb = TIPC_SKB_CB(skb);
1914 hdr = buf_msg(skb);
1915 dlen = msg_data_sz(hdr);
1916 hlen = msg_hdr_sz(hdr);
1917 err = msg_errcode(hdr);
1918 grp_evt = msg_is_grp_evt(hdr);
1919 if (likely(dlen || err))
1920 break;
1921 tsk_advance_rx_queue(sk);
1922 } while (1);
1923
1924 /* Collect msg meta data, including error code and rejected data */
1925 tipc_sk_set_orig_addr(m, skb);
1926 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1927 if (unlikely(rc))
1928 goto exit;
1929 hdr = buf_msg(skb);
1930
1931 /* Capture data if non-error msg, otherwise just set return value */
1932 if (likely(!err)) {
1933 int offset = skb_cb->bytes_read;
1934
1935 copy = min_t(int, dlen - offset, buflen);
1936 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1937 if (unlikely(rc))
1938 goto exit;
1939 if (unlikely(offset + copy < dlen)) {
1940 if (flags & MSG_EOR) {
1941 if (!(flags & MSG_PEEK))
1942 skb_cb->bytes_read = offset + copy;
1943 } else {
1944 m->msg_flags |= MSG_TRUNC;
1945 skb_cb->bytes_read = 0;
1946 }
1947 } else {
1948 if (flags & MSG_EOR)
1949 m->msg_flags |= MSG_EOR;
1950 skb_cb->bytes_read = 0;
1951 }
1952 } else {
1953 copy = 0;
1954 rc = 0;
1955 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
1956 rc = -ECONNRESET;
1957 goto exit;
1958 }
1959 }
1960
1961 /* Mark message as group event if applicable */
1962 if (unlikely(grp_evt)) {
1963 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1964 m->msg_flags |= MSG_EOR;
1965 m->msg_flags |= MSG_OOB;
1966 copy = 0;
1967 }
1968
1969 /* Caption of data or error code/rejected data was successful */
1970 if (unlikely(flags & MSG_PEEK))
1971 goto exit;
1972
1973 /* Send group flow control advertisement when applicable */
1974 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1975 __skb_queue_head_init(&xmitq);
1976 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1977 msg_orignode(hdr), msg_origport(hdr),
1978 &xmitq);
1979 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1980 }
1981
1982 if (skb_cb->bytes_read)
1983 goto exit;
1984
1985 tsk_advance_rx_queue(sk);
1986
1987 if (likely(!connected))
1988 goto exit;
1989
1990 /* Send connection flow control advertisement when applicable */
1991 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1992 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1993 tipc_sk_send_ack(tsk);
1994exit:
1995 release_sock(sk);
1996 return rc ? rc : copy;
1997}
1998
1999/**
2000 * tipc_recvstream - receive stream-oriented data
2001 * @sock: network socket
2002 * @m: descriptor for message info
2003 * @buflen: total size of user buffer area
2004 * @flags: receive flags
2005 *
2006 * Used for SOCK_STREAM messages only. If not enough data is available
2007 * will optionally wait for more; never truncates data.
2008 *
2009 * Return: size of returned message data, errno otherwise
2010 */
2011static int tipc_recvstream(struct socket *sock, struct msghdr *m,
2012 size_t buflen, int flags)
2013{
2014 struct sock *sk = sock->sk;
2015 struct tipc_sock *tsk = tipc_sk(sk);
2016 struct sk_buff *skb;
2017 struct tipc_msg *hdr;
2018 struct tipc_skb_cb *skb_cb;
2019 bool peek = flags & MSG_PEEK;
2020 int offset, required, copy, copied = 0;
2021 int hlen, dlen, err, rc;
2022 long timeout;
2023
2024 /* Catch invalid receive attempts */
2025 if (unlikely(!buflen))
2026 return -EINVAL;
2027
2028 lock_sock(sk);
2029
2030 if (unlikely(sk->sk_state == TIPC_OPEN)) {
2031 rc = -ENOTCONN;
2032 goto exit;
2033 }
2034 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
2035 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2036
2037 do {
2038 /* Look at first msg in receive queue; wait if necessary */
2039 rc = tipc_wait_for_rcvmsg(sock, &timeout);
2040 if (unlikely(rc))
2041 break;
2042 skb = skb_peek(&sk->sk_receive_queue);
2043 skb_cb = TIPC_SKB_CB(skb);
2044 hdr = buf_msg(skb);
2045 dlen = msg_data_sz(hdr);
2046 hlen = msg_hdr_sz(hdr);
2047 err = msg_errcode(hdr);
2048
2049 /* Discard any empty non-errored (SYN-) message */
2050 if (unlikely(!dlen && !err)) {
2051 tsk_advance_rx_queue(sk);
2052 continue;
2053 }
2054
2055 /* Collect msg meta data, incl. error code and rejected data */
2056 if (!copied) {
2057 tipc_sk_set_orig_addr(m, skb);
2058 rc = tipc_sk_anc_data_recv(m, skb, tsk);
2059 if (rc)
2060 break;
2061 hdr = buf_msg(skb);
2062 }
2063
2064 /* Copy data if msg ok, otherwise return error/partial data */
2065 if (likely(!err)) {
2066 offset = skb_cb->bytes_read;
2067 copy = min_t(int, dlen - offset, buflen - copied);
2068 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
2069 if (unlikely(rc))
2070 break;
2071 copied += copy;
2072 offset += copy;
2073 if (unlikely(offset < dlen)) {
2074 if (!peek)
2075 skb_cb->bytes_read = offset;
2076 break;
2077 }
2078 } else {
2079 rc = 0;
2080 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
2081 rc = -ECONNRESET;
2082 if (copied || rc)
2083 break;
2084 }
2085
2086 if (unlikely(peek))
2087 break;
2088
2089 tsk_advance_rx_queue(sk);
2090
2091 /* Send connection flow control advertisement when applicable */
2092 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2093 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2094 tipc_sk_send_ack(tsk);
2095
2096 /* Exit if all requested data or FIN/error received */
2097 if (copied == buflen || err)
2098 break;
2099
2100 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
2101exit:
2102 release_sock(sk);
2103 return copied ? copied : rc;
2104}
2105
2106/**
2107 * tipc_write_space - wake up thread if port congestion is released
2108 * @sk: socket
2109 */
2110static void tipc_write_space(struct sock *sk)
2111{
2112 struct socket_wq *wq;
2113
2114 rcu_read_lock();
2115 wq = rcu_dereference(sk->sk_wq);
2116 if (skwq_has_sleeper(wq))
2117 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2118 EPOLLWRNORM | EPOLLWRBAND);
2119 rcu_read_unlock();
2120}
2121
2122/**
2123 * tipc_data_ready - wake up threads to indicate messages have been received
2124 * @sk: socket
2125 */
2126static void tipc_data_ready(struct sock *sk)
2127{
2128 struct socket_wq *wq;
2129
2130 rcu_read_lock();
2131 wq = rcu_dereference(sk->sk_wq);
2132 if (skwq_has_sleeper(wq))
2133 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
2134 EPOLLRDNORM | EPOLLRDBAND);
2135 rcu_read_unlock();
2136}
2137
2138static void tipc_sock_destruct(struct sock *sk)
2139{
2140 __skb_queue_purge(&sk->sk_receive_queue);
2141}
2142
2143static void tipc_sk_proto_rcv(struct sock *sk,
2144 struct sk_buff_head *inputq,
2145 struct sk_buff_head *xmitq)
2146{
2147 struct sk_buff *skb = __skb_dequeue(inputq);
2148 struct tipc_sock *tsk = tipc_sk(sk);
2149 struct tipc_msg *hdr = buf_msg(skb);
2150 struct tipc_group *grp = tsk->group;
2151 bool wakeup = false;
2152
2153 switch (msg_user(hdr)) {
2154 case CONN_MANAGER:
2155 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2156 return;
2157 case SOCK_WAKEUP:
2158 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2159 /* coupled with smp_rmb() in tipc_wait_for_cond() */
2160 smp_wmb();
2161 tsk->cong_link_cnt--;
2162 wakeup = true;
2163 tipc_sk_push_backlog(tsk, false);
2164 break;
2165 case GROUP_PROTOCOL:
2166 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
2167 break;
2168 case TOP_SRV:
2169 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2170 hdr, inputq, xmitq);
2171 break;
2172 default:
2173 break;
2174 }
2175
2176 if (wakeup)
2177 sk->sk_write_space(sk);
2178
2179 kfree_skb(skb);
2180}
2181
2182/**
2183 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2184 * @tsk: TIPC socket
2185 * @skb: pointer to message buffer.
2186 * @xmitq: for Nagle ACK if any
2187 * Return: true if message should be added to receive queue, false otherwise
2188 */
2189static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2190 struct sk_buff_head *xmitq)
2191{
2192 struct sock *sk = &tsk->sk;
2193 struct net *net = sock_net(sk);
2194 struct tipc_msg *hdr = buf_msg(skb);
2195 bool con_msg = msg_connected(hdr);
2196 u32 pport = tsk_peer_port(tsk);
2197 u32 pnode = tsk_peer_node(tsk);
2198 u32 oport = msg_origport(hdr);
2199 u32 onode = msg_orignode(hdr);
2200 int err = msg_errcode(hdr);
2201 unsigned long delay;
2202
2203 if (unlikely(msg_mcast(hdr)))
2204 return false;
2205 tsk->oneway = 0;
2206
2207 switch (sk->sk_state) {
2208 case TIPC_CONNECTING:
2209 /* Setup ACK */
2210 if (likely(con_msg)) {
2211 if (err)
2212 break;
2213 tipc_sk_finish_conn(tsk, oport, onode);
2214 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2215 /* ACK+ message with data is added to receive queue */
2216 if (msg_data_sz(hdr))
2217 return true;
2218 /* Empty ACK-, - wake up sleeping connect() and drop */
2219 sk->sk_state_change(sk);
2220 msg_set_dest_droppable(hdr, 1);
2221 return false;
2222 }
2223 /* Ignore connectionless message if not from listening socket */
2224 if (oport != pport || onode != pnode)
2225 return false;
2226
2227 /* Rejected SYN */
2228 if (err != TIPC_ERR_OVERLOAD)
2229 break;
2230
2231 /* Prepare for new setup attempt if we have a SYN clone */
2232 if (skb_queue_empty(&sk->sk_write_queue))
2233 break;
2234 get_random_bytes(&delay, 2);
2235 delay %= (tsk->conn_timeout / 4);
2236 delay = msecs_to_jiffies(delay + 100);
2237 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2238 return false;
2239 case TIPC_OPEN:
2240 case TIPC_DISCONNECTING:
2241 return false;
2242 case TIPC_LISTEN:
2243 /* Accept only SYN message */
2244 if (!msg_is_syn(hdr) &&
2245 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2246 return false;
2247 if (!con_msg && !err)
2248 return true;
2249 return false;
2250 case TIPC_ESTABLISHED:
2251 if (!skb_queue_empty(&sk->sk_write_queue))
2252 tipc_sk_push_backlog(tsk, false);
2253 /* Accept only connection-based messages sent by peer */
2254 if (likely(con_msg && !err && pport == oport &&
2255 pnode == onode)) {
2256 if (msg_ack_required(hdr)) {
2257 struct sk_buff *skb;
2258
2259 skb = tipc_sk_build_ack(tsk);
2260 if (skb) {
2261 msg_set_nagle_ack(buf_msg(skb));
2262 __skb_queue_tail(xmitq, skb);
2263 }
2264 }
2265 return true;
2266 }
2267 if (!tsk_peer_msg(tsk, hdr))
2268 return false;
2269 if (!err)
2270 return true;
2271 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2272 tipc_node_remove_conn(net, pnode, tsk->portid);
2273 sk->sk_state_change(sk);
2274 return true;
2275 default:
2276 pr_err("Unknown sk_state %u\n", sk->sk_state);
2277 }
2278 /* Abort connection setup attempt */
2279 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2280 sk->sk_err = ECONNREFUSED;
2281 sk->sk_state_change(sk);
2282 return true;
2283}
2284
2285/**
2286 * rcvbuf_limit - get proper overload limit of socket receive queue
2287 * @sk: socket
2288 * @skb: message
2289 *
2290 * For connection oriented messages, irrespective of importance,
2291 * default queue limit is 2 MB.
2292 *
2293 * For connectionless messages, queue limits are based on message
2294 * importance as follows:
2295 *
2296 * TIPC_LOW_IMPORTANCE (2 MB)
2297 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2298 * TIPC_HIGH_IMPORTANCE (8 MB)
2299 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2300 *
2301 * Return: overload limit according to corresponding message importance
2302 */
2303static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2304{
2305 struct tipc_sock *tsk = tipc_sk(sk);
2306 struct tipc_msg *hdr = buf_msg(skb);
2307
2308 if (unlikely(msg_in_group(hdr)))
2309 return READ_ONCE(sk->sk_rcvbuf);
2310
2311 if (unlikely(!msg_connected(hdr)))
2312 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2313
2314 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2315 return READ_ONCE(sk->sk_rcvbuf);
2316
2317 return FLOWCTL_MSG_LIM;
2318}
2319
2320/**
2321 * tipc_sk_filter_rcv - validate incoming message
2322 * @sk: socket
2323 * @skb: pointer to message.
2324 * @xmitq: output message area (FIXME)
2325 *
2326 * Enqueues message on receive queue if acceptable; optionally handles
2327 * disconnect indication for a connected socket.
2328 *
2329 * Called with socket lock already taken
2330 */
2331static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2332 struct sk_buff_head *xmitq)
2333{
2334 bool sk_conn = !tipc_sk_type_connectionless(sk);
2335 struct tipc_sock *tsk = tipc_sk(sk);
2336 struct tipc_group *grp = tsk->group;
2337 struct tipc_msg *hdr = buf_msg(skb);
2338 struct net *net = sock_net(sk);
2339 struct sk_buff_head inputq;
2340 int mtyp = msg_type(hdr);
2341 int limit, err = TIPC_OK;
2342
2343 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2344 TIPC_SKB_CB(skb)->bytes_read = 0;
2345 __skb_queue_head_init(&inputq);
2346 __skb_queue_tail(&inputq, skb);
2347
2348 if (unlikely(!msg_isdata(hdr)))
2349 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2350
2351 if (unlikely(grp))
2352 tipc_group_filter_msg(grp, &inputq, xmitq);
2353
2354 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2355 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2356
2357 /* Validate and add to receive buffer if there is space */
2358 while ((skb = __skb_dequeue(&inputq))) {
2359 hdr = buf_msg(skb);
2360 limit = rcvbuf_limit(sk, skb);
2361 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
2362 (!sk_conn && msg_connected(hdr)) ||
2363 (!grp && msg_in_group(hdr)))
2364 err = TIPC_ERR_NO_PORT;
2365 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2366 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2367 "err_overload2!");
2368 atomic_inc(&sk->sk_drops);
2369 err = TIPC_ERR_OVERLOAD;
2370 }
2371
2372 if (unlikely(err)) {
2373 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2374 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2375 "@filter_rcv!");
2376 __skb_queue_tail(xmitq, skb);
2377 }
2378 err = TIPC_OK;
2379 continue;
2380 }
2381 __skb_queue_tail(&sk->sk_receive_queue, skb);
2382 skb_set_owner_r(skb, sk);
2383 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2384 "rcvq >90% allocated!");
2385 sk->sk_data_ready(sk);
2386 }
2387}
2388
2389/**
2390 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2391 * @sk: socket
2392 * @skb: message
2393 *
2394 * Caller must hold socket lock
2395 */
2396static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2397{
2398 unsigned int before = sk_rmem_alloc_get(sk);
2399 struct sk_buff_head xmitq;
2400 unsigned int added;
2401
2402 __skb_queue_head_init(&xmitq);
2403
2404 tipc_sk_filter_rcv(sk, skb, &xmitq);
2405 added = sk_rmem_alloc_get(sk) - before;
2406 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2407
2408 /* Send pending response/rejected messages, if any */
2409 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2410 return 0;
2411}
2412
2413/**
2414 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2415 * inputq and try adding them to socket or backlog queue
2416 * @inputq: list of incoming buffers with potentially different destinations
2417 * @sk: socket where the buffers should be enqueued
2418 * @dport: port number for the socket
2419 * @xmitq: output queue
2420 *
2421 * Caller must hold socket lock
2422 */
2423static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2424 u32 dport, struct sk_buff_head *xmitq)
2425{
2426 unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
2427 struct sk_buff *skb;
2428 unsigned int lim;
2429 atomic_t *dcnt;
2430 u32 onode;
2431
2432 while (skb_queue_len(inputq)) {
2433 if (unlikely(time_after_eq(jiffies, time_limit)))
2434 return;
2435
2436 skb = tipc_skb_dequeue(inputq, dport);
2437 if (unlikely(!skb))
2438 return;
2439
2440 /* Add message directly to receive queue if possible */
2441 if (!sock_owned_by_user(sk)) {
2442 tipc_sk_filter_rcv(sk, skb, xmitq);
2443 continue;
2444 }
2445
2446 /* Try backlog, compensating for double-counted bytes */
2447 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2448 if (!sk->sk_backlog.len)
2449 atomic_set(dcnt, 0);
2450 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2451 if (likely(!sk_add_backlog(sk, skb, lim))) {
2452 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2453 "bklg & rcvq >90% allocated!");
2454 continue;
2455 }
2456
2457 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2458 /* Overload => reject message back to sender */
2459 onode = tipc_own_addr(sock_net(sk));
2460 atomic_inc(&sk->sk_drops);
2461 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2462 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2463 "@sk_enqueue!");
2464 __skb_queue_tail(xmitq, skb);
2465 }
2466 break;
2467 }
2468}
2469
2470/**
2471 * tipc_sk_rcv - handle a chain of incoming buffers
2472 * @net: the associated network namespace
2473 * @inputq: buffer list containing the buffers
2474 * Consumes all buffers in list until inputq is empty
2475 * Note: may be called in multiple threads referring to the same queue
2476 */
2477void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2478{
2479 struct sk_buff_head xmitq;
2480 u32 dnode, dport = 0;
2481 int err;
2482 struct tipc_sock *tsk;
2483 struct sock *sk;
2484 struct sk_buff *skb;
2485
2486 __skb_queue_head_init(&xmitq);
2487 while (skb_queue_len(inputq)) {
2488 dport = tipc_skb_peek_port(inputq, dport);
2489 tsk = tipc_sk_lookup(net, dport);
2490
2491 if (likely(tsk)) {
2492 sk = &tsk->sk;
2493 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2494 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2495 spin_unlock_bh(&sk->sk_lock.slock);
2496 }
2497 /* Send pending response/rejected messages, if any */
2498 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2499 sock_put(sk);
2500 continue;
2501 }
2502 /* No destination socket => dequeue skb if still there */
2503 skb = tipc_skb_dequeue(inputq, dport);
2504 if (!skb)
2505 return;
2506
2507 /* Try secondary lookup if unresolved named message */
2508 err = TIPC_ERR_NO_PORT;
2509 if (tipc_msg_lookup_dest(net, skb, &err))
2510 goto xmit;
2511
2512 /* Prepare for message rejection */
2513 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2514 continue;
2515
2516 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2517xmit:
2518 dnode = msg_destnode(buf_msg(skb));
2519 tipc_node_xmit_skb(net, skb, dnode, dport);
2520 }
2521}
2522
2523static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2524{
2525 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2526 struct sock *sk = sock->sk;
2527 int done;
2528
2529 do {
2530 int err = sock_error(sk);
2531 if (err)
2532 return err;
2533 if (!*timeo_p)
2534 return -ETIMEDOUT;
2535 if (signal_pending(current))
2536 return sock_intr_errno(*timeo_p);
2537 if (sk->sk_state == TIPC_DISCONNECTING)
2538 break;
2539
2540 add_wait_queue(sk_sleep(sk), &wait);
2541 done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
2542 &wait);
2543 remove_wait_queue(sk_sleep(sk), &wait);
2544 } while (!done);
2545 return 0;
2546}
2547
2548static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2549{
2550 if (addr->family != AF_TIPC)
2551 return false;
2552 if (addr->addrtype == TIPC_SERVICE_RANGE)
2553 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2554 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2555 addr->addrtype == TIPC_SOCKET_ADDR);
2556}
2557
2558/**
2559 * tipc_connect - establish a connection to another TIPC port
2560 * @sock: socket structure
2561 * @dest: socket address for destination port
2562 * @destlen: size of socket address data structure
2563 * @flags: file-related flags associated with socket
2564 *
2565 * Return: 0 on success, errno otherwise
2566 */
2567static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2568 int destlen, int flags)
2569{
2570 struct sock *sk = sock->sk;
2571 struct tipc_sock *tsk = tipc_sk(sk);
2572 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2573 struct msghdr m = {NULL,};
2574 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2575 int previous;
2576 int res = 0;
2577
2578 if (destlen != sizeof(struct sockaddr_tipc))
2579 return -EINVAL;
2580
2581 lock_sock(sk);
2582
2583 if (tsk->group) {
2584 res = -EINVAL;
2585 goto exit;
2586 }
2587
2588 if (dst->family == AF_UNSPEC) {
2589 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2590 if (!tipc_sk_type_connectionless(sk))
2591 res = -EINVAL;
2592 goto exit;
2593 }
2594 if (!tipc_sockaddr_is_sane(dst)) {
2595 res = -EINVAL;
2596 goto exit;
2597 }
2598 /* DGRAM/RDM connect(), just save the destaddr */
2599 if (tipc_sk_type_connectionless(sk)) {
2600 memcpy(&tsk->peer, dest, destlen);
2601 goto exit;
2602 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2603 res = -EINVAL;
2604 goto exit;
2605 }
2606
2607 previous = sk->sk_state;
2608
2609 switch (sk->sk_state) {
2610 case TIPC_OPEN:
2611 /* Send a 'SYN-' to destination */
2612 m.msg_name = dest;
2613 m.msg_namelen = destlen;
2614
2615 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2616 * indicate send_msg() is never blocked.
2617 */
2618 if (!timeout)
2619 m.msg_flags = MSG_DONTWAIT;
2620
2621 res = __tipc_sendmsg(sock, &m, 0);
2622 if ((res < 0) && (res != -EWOULDBLOCK))
2623 goto exit;
2624
2625 /* Just entered TIPC_CONNECTING state; the only
2626 * difference is that return value in non-blocking
2627 * case is EINPROGRESS, rather than EALREADY.
2628 */
2629 res = -EINPROGRESS;
2630 fallthrough;
2631 case TIPC_CONNECTING:
2632 if (!timeout) {
2633 if (previous == TIPC_CONNECTING)
2634 res = -EALREADY;
2635 goto exit;
2636 }
2637 timeout = msecs_to_jiffies(timeout);
2638 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2639 res = tipc_wait_for_connect(sock, &timeout);
2640 break;
2641 case TIPC_ESTABLISHED:
2642 res = -EISCONN;
2643 break;
2644 default:
2645 res = -EINVAL;
2646 }
2647
2648exit:
2649 release_sock(sk);
2650 return res;
2651}
2652
2653/**
2654 * tipc_listen - allow socket to listen for incoming connections
2655 * @sock: socket structure
2656 * @len: (unused)
2657 *
2658 * Return: 0 on success, errno otherwise
2659 */
2660static int tipc_listen(struct socket *sock, int len)
2661{
2662 struct sock *sk = sock->sk;
2663 int res;
2664
2665 lock_sock(sk);
2666 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2667 release_sock(sk);
2668
2669 return res;
2670}
2671
2672static int tipc_wait_for_accept(struct socket *sock, long timeo)
2673{
2674 struct sock *sk = sock->sk;
2675 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2676 int err;
2677
2678 /* True wake-one mechanism for incoming connections: only
2679 * one process gets woken up, not the 'whole herd'.
2680 * Since we do not 'race & poll' for established sockets
2681 * anymore, the common case will execute the loop only once.
2682 */
2683 for (;;) {
2684 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2685 add_wait_queue(sk_sleep(sk), &wait);
2686 release_sock(sk);
2687 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
2688 lock_sock(sk);
2689 remove_wait_queue(sk_sleep(sk), &wait);
2690 }
2691 err = 0;
2692 if (!skb_queue_empty(&sk->sk_receive_queue))
2693 break;
2694 err = -EAGAIN;
2695 if (!timeo)
2696 break;
2697 err = sock_intr_errno(timeo);
2698 if (signal_pending(current))
2699 break;
2700 }
2701 return err;
2702}
2703
2704/**
2705 * tipc_accept - wait for connection request
2706 * @sock: listening socket
2707 * @new_sock: new socket that is to be connected
2708 * @flags: file-related flags associated with socket
2709 * @kern: caused by kernel or by userspace?
2710 *
2711 * Return: 0 on success, errno otherwise
2712 */
2713static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2714 bool kern)
2715{
2716 struct sock *new_sk, *sk = sock->sk;
2717 struct tipc_sock *new_tsock;
2718 struct msghdr m = {NULL,};
2719 struct tipc_msg *msg;
2720 struct sk_buff *buf;
2721 long timeo;
2722 int res;
2723
2724 lock_sock(sk);
2725
2726 if (sk->sk_state != TIPC_LISTEN) {
2727 res = -EINVAL;
2728 goto exit;
2729 }
2730 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2731 res = tipc_wait_for_accept(sock, timeo);
2732 if (res)
2733 goto exit;
2734
2735 buf = skb_peek(&sk->sk_receive_queue);
2736
2737 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2738 if (res)
2739 goto exit;
2740 security_sk_clone(sock->sk, new_sock->sk);
2741
2742 new_sk = new_sock->sk;
2743 new_tsock = tipc_sk(new_sk);
2744 msg = buf_msg(buf);
2745
2746 /* we lock on new_sk; but lockdep sees the lock on sk */
2747 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2748
2749 /*
2750 * Reject any stray messages received by new socket
2751 * before the socket lock was taken (very, very unlikely)
2752 */
2753 tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
2754
2755 /* Connect new socket to it's peer */
2756 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2757
2758 tsk_set_importance(new_sk, msg_importance(msg));
2759 if (msg_named(msg)) {
2760 new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
2761 msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
2762 msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
2763 }
2764
2765 /*
2766 * Respond to 'SYN-' by discarding it & returning 'ACK'.
2767 * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
2768 */
2769 if (!msg_data_sz(msg)) {
2770 tsk_advance_rx_queue(sk);
2771 } else {
2772 __skb_dequeue(&sk->sk_receive_queue);
2773 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2774 skb_set_owner_r(buf, new_sk);
2775 }
2776 __tipc_sendstream(new_sock, &m, 0);
2777 release_sock(new_sk);
2778exit:
2779 release_sock(sk);
2780 return res;
2781}
2782
2783/**
2784 * tipc_shutdown - shutdown socket connection
2785 * @sock: socket structure
2786 * @how: direction to close (must be SHUT_RDWR)
2787 *
2788 * Terminates connection (if necessary), then purges socket's receive queue.
2789 *
2790 * Return: 0 on success, errno otherwise
2791 */
2792static int tipc_shutdown(struct socket *sock, int how)
2793{
2794 struct sock *sk = sock->sk;
2795 int res;
2796
2797 if (how != SHUT_RDWR)
2798 return -EINVAL;
2799
2800 lock_sock(sk);
2801
2802 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2803 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2804 sk->sk_shutdown = SHUTDOWN_MASK;
2805
2806 if (sk->sk_state == TIPC_DISCONNECTING) {
2807 /* Discard any unreceived messages */
2808 __skb_queue_purge(&sk->sk_receive_queue);
2809
2810 res = 0;
2811 } else {
2812 res = -ENOTCONN;
2813 }
2814 /* Wake up anyone sleeping in poll. */
2815 sk->sk_state_change(sk);
2816
2817 release_sock(sk);
2818 return res;
2819}
2820
2821static void tipc_sk_check_probing_state(struct sock *sk,
2822 struct sk_buff_head *list)
2823{
2824 struct tipc_sock *tsk = tipc_sk(sk);
2825 u32 pnode = tsk_peer_node(tsk);
2826 u32 pport = tsk_peer_port(tsk);
2827 u32 self = tsk_own_node(tsk);
2828 u32 oport = tsk->portid;
2829 struct sk_buff *skb;
2830
2831 if (tsk->probe_unacked) {
2832 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2833 sk->sk_err = ECONNABORTED;
2834 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2835 sk->sk_state_change(sk);
2836 return;
2837 }
2838 /* Prepare new probe */
2839 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2840 pnode, self, pport, oport, TIPC_OK);
2841 if (skb)
2842 __skb_queue_tail(list, skb);
2843 tsk->probe_unacked = true;
2844 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2845}
2846
2847static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2848{
2849 struct tipc_sock *tsk = tipc_sk(sk);
2850
2851 /* Try again later if dest link is congested */
2852 if (tsk->cong_link_cnt) {
2853 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2854 return;
2855 }
2856 /* Prepare SYN for retransmit */
2857 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2858}
2859
2860static void tipc_sk_timeout(struct timer_list *t)
2861{
2862 struct sock *sk = from_timer(sk, t, sk_timer);
2863 struct tipc_sock *tsk = tipc_sk(sk);
2864 u32 pnode = tsk_peer_node(tsk);
2865 struct sk_buff_head list;
2866 int rc = 0;
2867
2868 __skb_queue_head_init(&list);
2869 bh_lock_sock(sk);
2870
2871 /* Try again later if socket is busy */
2872 if (sock_owned_by_user(sk)) {
2873 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2874 bh_unlock_sock(sk);
2875 sock_put(sk);
2876 return;
2877 }
2878
2879 if (sk->sk_state == TIPC_ESTABLISHED)
2880 tipc_sk_check_probing_state(sk, &list);
2881 else if (sk->sk_state == TIPC_CONNECTING)
2882 tipc_sk_retry_connect(sk, &list);
2883
2884 bh_unlock_sock(sk);
2885
2886 if (!skb_queue_empty(&list))
2887 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2888
2889 /* SYN messages may cause link congestion */
2890 if (rc == -ELINKCONG) {
2891 tipc_dest_push(&tsk->cong_links, pnode, 0);
2892 tsk->cong_link_cnt = 1;
2893 }
2894 sock_put(sk);
2895}
2896
2897static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2898{
2899 struct sock *sk = &tsk->sk;
2900 struct net *net = sock_net(sk);
2901 struct tipc_socket_addr skaddr;
2902 struct publication *p;
2903 u32 key;
2904
2905 if (tipc_sk_connected(sk))
2906 return -EINVAL;
2907 key = tsk->portid + tsk->pub_count + 1;
2908 if (key == tsk->portid)
2909 return -EADDRINUSE;
2910 skaddr.ref = tsk->portid;
2911 skaddr.node = tipc_own_addr(net);
2912 p = tipc_nametbl_publish(net, ua, &skaddr, key);
2913 if (unlikely(!p))
2914 return -EINVAL;
2915
2916 list_add(&p->binding_sock, &tsk->publications);
2917 tsk->pub_count++;
2918 tsk->published = true;
2919 return 0;
2920}
2921
2922static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2923{
2924 struct net *net = sock_net(&tsk->sk);
2925 struct publication *safe, *p;
2926 struct tipc_uaddr _ua;
2927 int rc = -EINVAL;
2928
2929 list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
2930 if (!ua) {
2931 tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
2932 p->sr.type, p->sr.lower, p->sr.upper);
2933 tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
2934 continue;
2935 }
2936 /* Unbind specific publication */
2937 if (p->scope != ua->scope)
2938 continue;
2939 if (p->sr.type != ua->sr.type)
2940 continue;
2941 if (p->sr.lower != ua->sr.lower)
2942 continue;
2943 if (p->sr.upper != ua->sr.upper)
2944 break;
2945 tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
2946 rc = 0;
2947 break;
2948 }
2949 if (list_empty(&tsk->publications)) {
2950 tsk->published = 0;
2951 rc = 0;
2952 }
2953 return rc;
2954}
2955
2956/* tipc_sk_reinit: set non-zero address in all existing sockets
2957 * when we go from standalone to network mode.
2958 */
2959void tipc_sk_reinit(struct net *net)
2960{
2961 struct tipc_net *tn = net_generic(net, tipc_net_id);
2962 struct rhashtable_iter iter;
2963 struct tipc_sock *tsk;
2964 struct tipc_msg *msg;
2965
2966 rhashtable_walk_enter(&tn->sk_rht, &iter);
2967
2968 do {
2969 rhashtable_walk_start(&iter);
2970
2971 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2972 sock_hold(&tsk->sk);
2973 rhashtable_walk_stop(&iter);
2974 lock_sock(&tsk->sk);
2975 msg = &tsk->phdr;
2976 msg_set_prevnode(msg, tipc_own_addr(net));
2977 msg_set_orignode(msg, tipc_own_addr(net));
2978 release_sock(&tsk->sk);
2979 rhashtable_walk_start(&iter);
2980 sock_put(&tsk->sk);
2981 }
2982
2983 rhashtable_walk_stop(&iter);
2984 } while (tsk == ERR_PTR(-EAGAIN));
2985
2986 rhashtable_walk_exit(&iter);
2987}
2988
2989static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2990{
2991 struct tipc_net *tn = net_generic(net, tipc_net_id);
2992 struct tipc_sock *tsk;
2993
2994 rcu_read_lock();
2995 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
2996 if (tsk)
2997 sock_hold(&tsk->sk);
2998 rcu_read_unlock();
2999
3000 return tsk;
3001}
3002
3003static int tipc_sk_insert(struct tipc_sock *tsk)
3004{
3005 struct sock *sk = &tsk->sk;
3006 struct net *net = sock_net(sk);
3007 struct tipc_net *tn = net_generic(net, tipc_net_id);
3008 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
3009 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
3010
3011 while (remaining--) {
3012 portid++;
3013 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
3014 portid = TIPC_MIN_PORT;
3015 tsk->portid = portid;
3016 sock_hold(&tsk->sk);
3017 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3018 tsk_rht_params))
3019 return 0;
3020 sock_put(&tsk->sk);
3021 }
3022
3023 return -1;
3024}
3025
3026static void tipc_sk_remove(struct tipc_sock *tsk)
3027{
3028 struct sock *sk = &tsk->sk;
3029 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
3030
3031 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3032 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
3033 __sock_put(sk);
3034 }
3035}
3036
3037static const struct rhashtable_params tsk_rht_params = {
3038 .nelem_hint = 192,
3039 .head_offset = offsetof(struct tipc_sock, node),
3040 .key_offset = offsetof(struct tipc_sock, portid),
3041 .key_len = sizeof(u32), /* portid */
3042 .max_size = 1048576,
3043 .min_size = 256,
3044 .automatic_shrinking = true,
3045};
3046
3047int tipc_sk_rht_init(struct net *net)
3048{
3049 struct tipc_net *tn = net_generic(net, tipc_net_id);
3050
3051 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
3052}
3053
3054void tipc_sk_rht_destroy(struct net *net)
3055{
3056 struct tipc_net *tn = net_generic(net, tipc_net_id);
3057
3058 /* Wait for socket readers to complete */
3059 synchronize_net();
3060
3061 rhashtable_destroy(&tn->sk_rht);
3062}
3063
3064static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3065{
3066 struct net *net = sock_net(&tsk->sk);
3067 struct tipc_group *grp = tsk->group;
3068 struct tipc_msg *hdr = &tsk->phdr;
3069 struct tipc_uaddr ua;
3070 int rc;
3071
3072 if (mreq->type < TIPC_RESERVED_TYPES)
3073 return -EACCES;
3074 if (mreq->scope > TIPC_NODE_SCOPE)
3075 return -EINVAL;
3076 if (mreq->scope != TIPC_NODE_SCOPE)
3077 mreq->scope = TIPC_CLUSTER_SCOPE;
3078 if (grp)
3079 return -EACCES;
3080 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3081 if (!grp)
3082 return -ENOMEM;
3083 tsk->group = grp;
3084 msg_set_lookup_scope(hdr, mreq->scope);
3085 msg_set_nametype(hdr, mreq->type);
3086 msg_set_dest_droppable(hdr, true);
3087 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
3088 mreq->type, mreq->instance, mreq->instance);
3089 tipc_nametbl_build_group(net, grp, &ua);
3090 rc = tipc_sk_publish(tsk, &ua);
3091 if (rc) {
3092 tipc_group_delete(net, grp);
3093 tsk->group = NULL;
3094 return rc;
3095 }
3096 /* Eliminate any risk that a broadcast overtakes sent JOINs */
3097 tsk->mc_method.rcast = true;
3098 tsk->mc_method.mandatory = true;
3099 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3100 return rc;
3101}
3102
3103static int tipc_sk_leave(struct tipc_sock *tsk)
3104{
3105 struct net *net = sock_net(&tsk->sk);
3106 struct tipc_group *grp = tsk->group;
3107 struct tipc_uaddr ua;
3108 int scope;
3109
3110 if (!grp)
3111 return -EINVAL;
3112 ua.addrtype = TIPC_SERVICE_RANGE;
3113 tipc_group_self(grp, &ua.sr, &scope);
3114 ua.scope = scope;
3115 tipc_group_delete(net, grp);
3116 tsk->group = NULL;
3117 tipc_sk_withdraw(tsk, &ua);
3118 return 0;
3119}
3120
3121/**
3122 * tipc_setsockopt - set socket option
3123 * @sock: socket structure
3124 * @lvl: option level
3125 * @opt: option identifier
3126 * @ov: pointer to new option value
3127 * @ol: length of option value
3128 *
3129 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
3130 * (to ease compatibility).
3131 *
3132 * Return: 0 on success, errno otherwise
3133 */
3134static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
3135 sockptr_t ov, unsigned int ol)
3136{
3137 struct sock *sk = sock->sk;
3138 struct tipc_sock *tsk = tipc_sk(sk);
3139 struct tipc_group_req mreq;
3140 u32 value = 0;
3141 int res = 0;
3142
3143 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3144 return 0;
3145 if (lvl != SOL_TIPC)
3146 return -ENOPROTOOPT;
3147
3148 switch (opt) {
3149 case TIPC_IMPORTANCE:
3150 case TIPC_SRC_DROPPABLE:
3151 case TIPC_DEST_DROPPABLE:
3152 case TIPC_CONN_TIMEOUT:
3153 case TIPC_NODELAY:
3154 if (ol < sizeof(value))
3155 return -EINVAL;
3156 if (copy_from_sockptr(&value, ov, sizeof(u32)))
3157 return -EFAULT;
3158 break;
3159 case TIPC_GROUP_JOIN:
3160 if (ol < sizeof(mreq))
3161 return -EINVAL;
3162 if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
3163 return -EFAULT;
3164 break;
3165 default:
3166 if (!sockptr_is_null(ov) || ol)
3167 return -EINVAL;
3168 }
3169
3170 lock_sock(sk);
3171
3172 switch (opt) {
3173 case TIPC_IMPORTANCE:
3174 res = tsk_set_importance(sk, value);
3175 break;
3176 case TIPC_SRC_DROPPABLE:
3177 if (sock->type != SOCK_STREAM)
3178 tsk_set_unreliable(tsk, value);
3179 else
3180 res = -ENOPROTOOPT;
3181 break;
3182 case TIPC_DEST_DROPPABLE:
3183 tsk_set_unreturnable(tsk, value);
3184 break;
3185 case TIPC_CONN_TIMEOUT:
3186 tipc_sk(sk)->conn_timeout = value;
3187 break;
3188 case TIPC_MCAST_BROADCAST:
3189 tsk->mc_method.rcast = false;
3190 tsk->mc_method.mandatory = true;
3191 break;
3192 case TIPC_MCAST_REPLICAST:
3193 tsk->mc_method.rcast = true;
3194 tsk->mc_method.mandatory = true;
3195 break;
3196 case TIPC_GROUP_JOIN:
3197 res = tipc_sk_join(tsk, &mreq);
3198 break;
3199 case TIPC_GROUP_LEAVE:
3200 res = tipc_sk_leave(tsk);
3201 break;
3202 case TIPC_NODELAY:
3203 tsk->nodelay = !!value;
3204 tsk_set_nagle(tsk);
3205 break;
3206 default:
3207 res = -EINVAL;
3208 }
3209
3210 release_sock(sk);
3211
3212 return res;
3213}
3214
3215/**
3216 * tipc_getsockopt - get socket option
3217 * @sock: socket structure
3218 * @lvl: option level
3219 * @opt: option identifier
3220 * @ov: receptacle for option value
3221 * @ol: receptacle for length of option value
3222 *
3223 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3224 * (to ease compatibility).
3225 *
3226 * Return: 0 on success, errno otherwise
3227 */
3228static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3229 char __user *ov, int __user *ol)
3230{
3231 struct sock *sk = sock->sk;
3232 struct tipc_sock *tsk = tipc_sk(sk);
3233 struct tipc_service_range seq;
3234 int len, scope;
3235 u32 value;
3236 int res;
3237
3238 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3239 return put_user(0, ol);
3240 if (lvl != SOL_TIPC)
3241 return -ENOPROTOOPT;
3242 res = get_user(len, ol);
3243 if (res)
3244 return res;
3245
3246 lock_sock(sk);
3247
3248 switch (opt) {
3249 case TIPC_IMPORTANCE:
3250 value = tsk_importance(tsk);
3251 break;
3252 case TIPC_SRC_DROPPABLE:
3253 value = tsk_unreliable(tsk);
3254 break;
3255 case TIPC_DEST_DROPPABLE:
3256 value = tsk_unreturnable(tsk);
3257 break;
3258 case TIPC_CONN_TIMEOUT:
3259 value = tsk->conn_timeout;
3260 /* no need to set "res", since already 0 at this point */
3261 break;
3262 case TIPC_NODE_RECVQ_DEPTH:
3263 value = 0; /* was tipc_queue_size, now obsolete */
3264 break;
3265 case TIPC_SOCK_RECVQ_DEPTH:
3266 value = skb_queue_len(&sk->sk_receive_queue);
3267 break;
3268 case TIPC_SOCK_RECVQ_USED:
3269 value = sk_rmem_alloc_get(sk);
3270 break;
3271 case TIPC_GROUP_JOIN:
3272 seq.type = 0;
3273 if (tsk->group)
3274 tipc_group_self(tsk->group, &seq, &scope);
3275 value = seq.type;
3276 break;
3277 default:
3278 res = -EINVAL;
3279 }
3280
3281 release_sock(sk);
3282
3283 if (res)
3284 return res; /* "get" failed */
3285
3286 if (len < sizeof(value))
3287 return -EINVAL;
3288
3289 if (copy_to_user(ov, &value, sizeof(value)))
3290 return -EFAULT;
3291
3292 return put_user(sizeof(value), ol);
3293}
3294
3295static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3296{
3297 struct net *net = sock_net(sock->sk);
3298 struct tipc_sioc_nodeid_req nr = {0};
3299 struct tipc_sioc_ln_req lnr;
3300 void __user *argp = (void __user *)arg;
3301
3302 switch (cmd) {
3303 case SIOCGETLINKNAME:
3304 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3305 return -EFAULT;
3306 if (!tipc_node_get_linkname(net,
3307 lnr.bearer_id & 0xffff, lnr.peer,
3308 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3309 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3310 return -EFAULT;
3311 return 0;
3312 }
3313 return -EADDRNOTAVAIL;
3314 case SIOCGETNODEID:
3315 if (copy_from_user(&nr, argp, sizeof(nr)))
3316 return -EFAULT;
3317 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3318 return -EADDRNOTAVAIL;
3319 if (copy_to_user(argp, &nr, sizeof(nr)))
3320 return -EFAULT;
3321 return 0;
3322 default:
3323 return -ENOIOCTLCMD;
3324 }
3325}
3326
3327static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3328{
3329 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3330 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3331 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3332
3333 tsk1->peer.family = AF_TIPC;
3334 tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
3335 tsk1->peer.scope = TIPC_NODE_SCOPE;
3336 tsk1->peer.addr.id.ref = tsk2->portid;
3337 tsk1->peer.addr.id.node = onode;
3338 tsk2->peer.family = AF_TIPC;
3339 tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
3340 tsk2->peer.scope = TIPC_NODE_SCOPE;
3341 tsk2->peer.addr.id.ref = tsk1->portid;
3342 tsk2->peer.addr.id.node = onode;
3343
3344 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3345 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3346 return 0;
3347}
3348
3349/* Protocol switches for the various types of TIPC sockets */
3350
3351static const struct proto_ops msg_ops = {
3352 .owner = THIS_MODULE,
3353 .family = AF_TIPC,
3354 .release = tipc_release,
3355 .bind = tipc_bind,
3356 .connect = tipc_connect,
3357 .socketpair = tipc_socketpair,
3358 .accept = sock_no_accept,
3359 .getname = tipc_getname,
3360 .poll = tipc_poll,
3361 .ioctl = tipc_ioctl,
3362 .listen = sock_no_listen,
3363 .shutdown = tipc_shutdown,
3364 .setsockopt = tipc_setsockopt,
3365 .getsockopt = tipc_getsockopt,
3366 .sendmsg = tipc_sendmsg,
3367 .recvmsg = tipc_recvmsg,
3368 .mmap = sock_no_mmap,
3369 .sendpage = sock_no_sendpage
3370};
3371
3372static const struct proto_ops packet_ops = {
3373 .owner = THIS_MODULE,
3374 .family = AF_TIPC,
3375 .release = tipc_release,
3376 .bind = tipc_bind,
3377 .connect = tipc_connect,
3378 .socketpair = tipc_socketpair,
3379 .accept = tipc_accept,
3380 .getname = tipc_getname,
3381 .poll = tipc_poll,
3382 .ioctl = tipc_ioctl,
3383 .listen = tipc_listen,
3384 .shutdown = tipc_shutdown,
3385 .setsockopt = tipc_setsockopt,
3386 .getsockopt = tipc_getsockopt,
3387 .sendmsg = tipc_send_packet,
3388 .recvmsg = tipc_recvmsg,
3389 .mmap = sock_no_mmap,
3390 .sendpage = sock_no_sendpage
3391};
3392
3393static const struct proto_ops stream_ops = {
3394 .owner = THIS_MODULE,
3395 .family = AF_TIPC,
3396 .release = tipc_release,
3397 .bind = tipc_bind,
3398 .connect = tipc_connect,
3399 .socketpair = tipc_socketpair,
3400 .accept = tipc_accept,
3401 .getname = tipc_getname,
3402 .poll = tipc_poll,
3403 .ioctl = tipc_ioctl,
3404 .listen = tipc_listen,
3405 .shutdown = tipc_shutdown,
3406 .setsockopt = tipc_setsockopt,
3407 .getsockopt = tipc_getsockopt,
3408 .sendmsg = tipc_sendstream,
3409 .recvmsg = tipc_recvstream,
3410 .mmap = sock_no_mmap,
3411 .sendpage = sock_no_sendpage
3412};
3413
3414static const struct net_proto_family tipc_family_ops = {
3415 .owner = THIS_MODULE,
3416 .family = AF_TIPC,
3417 .create = tipc_sk_create
3418};
3419
3420static struct proto tipc_proto = {
3421 .name = "TIPC",
3422 .owner = THIS_MODULE,
3423 .obj_size = sizeof(struct tipc_sock),
3424 .sysctl_rmem = sysctl_tipc_rmem
3425};
3426
3427/**
3428 * tipc_socket_init - initialize TIPC socket interface
3429 *
3430 * Return: 0 on success, errno otherwise
3431 */
3432int tipc_socket_init(void)
3433{
3434 int res;
3435
3436 res = proto_register(&tipc_proto, 1);
3437 if (res) {
3438 pr_err("Failed to register TIPC protocol type\n");
3439 goto out;
3440 }
3441
3442 res = sock_register(&tipc_family_ops);
3443 if (res) {
3444 pr_err("Failed to register TIPC socket type\n");
3445 proto_unregister(&tipc_proto);
3446 goto out;
3447 }
3448 out:
3449 return res;
3450}
3451
3452/**
3453 * tipc_socket_stop - stop TIPC socket interface
3454 */
3455void tipc_socket_stop(void)
3456{
3457 sock_unregister(tipc_family_ops.family);
3458 proto_unregister(&tipc_proto);
3459}
3460
3461/* Caller should hold socket lock for the passed tipc socket. */
3462static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3463{
3464 u32 peer_node, peer_port;
3465 u32 conn_type, conn_instance;
3466 struct nlattr *nest;
3467
3468 peer_node = tsk_peer_node(tsk);
3469 peer_port = tsk_peer_port(tsk);
3470 conn_type = msg_nametype(&tsk->phdr);
3471 conn_instance = msg_nameinst(&tsk->phdr);
3472 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3473 if (!nest)
3474 return -EMSGSIZE;
3475
3476 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3477 goto msg_full;
3478 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3479 goto msg_full;
3480
3481 if (tsk->conn_addrtype != 0) {
3482 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3483 goto msg_full;
3484 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
3485 goto msg_full;
3486 if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
3487 goto msg_full;
3488 }
3489 nla_nest_end(skb, nest);
3490
3491 return 0;
3492
3493msg_full:
3494 nla_nest_cancel(skb, nest);
3495
3496 return -EMSGSIZE;
3497}
3498
3499static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3500 *tsk)
3501{
3502 struct net *net = sock_net(skb->sk);
3503 struct sock *sk = &tsk->sk;
3504
3505 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3506 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3507 return -EMSGSIZE;
3508
3509 if (tipc_sk_connected(sk)) {
3510 if (__tipc_nl_add_sk_con(skb, tsk))
3511 return -EMSGSIZE;
3512 } else if (!list_empty(&tsk->publications)) {
3513 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3514 return -EMSGSIZE;
3515 }
3516 return 0;
3517}
3518
3519/* Caller should hold socket lock for the passed tipc socket. */
3520static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3521 struct tipc_sock *tsk)
3522{
3523 struct nlattr *attrs;
3524 void *hdr;
3525
3526 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3527 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3528 if (!hdr)
3529 goto msg_cancel;
3530
3531 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3532 if (!attrs)
3533 goto genlmsg_cancel;
3534
3535 if (__tipc_nl_add_sk_info(skb, tsk))
3536 goto attr_msg_cancel;
3537
3538 nla_nest_end(skb, attrs);
3539 genlmsg_end(skb, hdr);
3540
3541 return 0;
3542
3543attr_msg_cancel:
3544 nla_nest_cancel(skb, attrs);
3545genlmsg_cancel:
3546 genlmsg_cancel(skb, hdr);
3547msg_cancel:
3548 return -EMSGSIZE;
3549}
3550
3551int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3552 int (*skb_handler)(struct sk_buff *skb,
3553 struct netlink_callback *cb,
3554 struct tipc_sock *tsk))
3555{
3556 struct rhashtable_iter *iter = (void *)cb->args[4];
3557 struct tipc_sock *tsk;
3558 int err;
3559
3560 rhashtable_walk_start(iter);
3561 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3562 if (IS_ERR(tsk)) {
3563 err = PTR_ERR(tsk);
3564 if (err == -EAGAIN) {
3565 err = 0;
3566 continue;
3567 }
3568 break;
3569 }
3570
3571 sock_hold(&tsk->sk);
3572 rhashtable_walk_stop(iter);
3573 lock_sock(&tsk->sk);
3574 err = skb_handler(skb, cb, tsk);
3575 if (err) {
3576 release_sock(&tsk->sk);
3577 sock_put(&tsk->sk);
3578 goto out;
3579 }
3580 release_sock(&tsk->sk);
3581 rhashtable_walk_start(iter);
3582 sock_put(&tsk->sk);
3583 }
3584 rhashtable_walk_stop(iter);
3585out:
3586 return skb->len;
3587}
3588EXPORT_SYMBOL(tipc_nl_sk_walk);
3589
3590int tipc_dump_start(struct netlink_callback *cb)
3591{
3592 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3593}
3594EXPORT_SYMBOL(tipc_dump_start);
3595
3596int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3597{
3598 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3599 struct rhashtable_iter *iter = (void *)cb->args[4];
3600 struct tipc_net *tn = tipc_net(net);
3601
3602 if (!iter) {
3603 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3604 if (!iter)
3605 return -ENOMEM;
3606
3607 cb->args[4] = (long)iter;
3608 }
3609
3610 rhashtable_walk_enter(&tn->sk_rht, iter);
3611 return 0;
3612}
3613
3614int tipc_dump_done(struct netlink_callback *cb)
3615{
3616 struct rhashtable_iter *hti = (void *)cb->args[4];
3617
3618 rhashtable_walk_exit(hti);
3619 kfree(hti);
3620 return 0;
3621}
3622EXPORT_SYMBOL(tipc_dump_done);
3623
3624int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3625 struct tipc_sock *tsk, u32 sk_filter_state,
3626 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3627{
3628 struct sock *sk = &tsk->sk;
3629 struct nlattr *attrs;
3630 struct nlattr *stat;
3631
3632 /*filter response w.r.t sk_state*/
3633 if (!(sk_filter_state & (1 << sk->sk_state)))
3634 return 0;
3635
3636 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3637 if (!attrs)
3638 goto msg_cancel;
3639
3640 if (__tipc_nl_add_sk_info(skb, tsk))
3641 goto attr_msg_cancel;
3642
3643 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3644 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3645 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3646 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3647 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3648 sock_i_uid(sk))) ||
3649 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3650 tipc_diag_gen_cookie(sk),
3651 TIPC_NLA_SOCK_PAD))
3652 goto attr_msg_cancel;
3653
3654 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3655 if (!stat)
3656 goto attr_msg_cancel;
3657
3658 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3659 skb_queue_len(&sk->sk_receive_queue)) ||
3660 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3661 skb_queue_len(&sk->sk_write_queue)) ||
3662 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3663 atomic_read(&sk->sk_drops)))
3664 goto stat_msg_cancel;
3665
3666 if (tsk->cong_link_cnt &&
3667 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3668 goto stat_msg_cancel;
3669
3670 if (tsk_conn_cong(tsk) &&
3671 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3672 goto stat_msg_cancel;
3673
3674 nla_nest_end(skb, stat);
3675
3676 if (tsk->group)
3677 if (tipc_group_fill_sock_diag(tsk->group, skb))
3678 goto stat_msg_cancel;
3679
3680 nla_nest_end(skb, attrs);
3681
3682 return 0;
3683
3684stat_msg_cancel:
3685 nla_nest_cancel(skb, stat);
3686attr_msg_cancel:
3687 nla_nest_cancel(skb, attrs);
3688msg_cancel:
3689 return -EMSGSIZE;
3690}
3691EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3692
3693int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3694{
3695 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3696}
3697
3698/* Caller should hold socket lock for the passed tipc socket. */
3699static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3700 struct netlink_callback *cb,
3701 struct publication *publ)
3702{
3703 void *hdr;
3704 struct nlattr *attrs;
3705
3706 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3707 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3708 if (!hdr)
3709 goto msg_cancel;
3710
3711 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3712 if (!attrs)
3713 goto genlmsg_cancel;
3714
3715 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3716 goto attr_msg_cancel;
3717 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
3718 goto attr_msg_cancel;
3719 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
3720 goto attr_msg_cancel;
3721 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
3722 goto attr_msg_cancel;
3723
3724 nla_nest_end(skb, attrs);
3725 genlmsg_end(skb, hdr);
3726
3727 return 0;
3728
3729attr_msg_cancel:
3730 nla_nest_cancel(skb, attrs);
3731genlmsg_cancel:
3732 genlmsg_cancel(skb, hdr);
3733msg_cancel:
3734 return -EMSGSIZE;
3735}
3736
3737/* Caller should hold socket lock for the passed tipc socket. */
3738static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3739 struct netlink_callback *cb,
3740 struct tipc_sock *tsk, u32 *last_publ)
3741{
3742 int err;
3743 struct publication *p;
3744
3745 if (*last_publ) {
3746 list_for_each_entry(p, &tsk->publications, binding_sock) {
3747 if (p->key == *last_publ)
3748 break;
3749 }
3750 if (p->key != *last_publ) {
3751 /* We never set seq or call nl_dump_check_consistent()
3752 * this means that setting prev_seq here will cause the
3753 * consistence check to fail in the netlink callback
3754 * handler. Resulting in the last NLMSG_DONE message
3755 * having the NLM_F_DUMP_INTR flag set.
3756 */
3757 cb->prev_seq = 1;
3758 *last_publ = 0;
3759 return -EPIPE;
3760 }
3761 } else {
3762 p = list_first_entry(&tsk->publications, struct publication,
3763 binding_sock);
3764 }
3765
3766 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3767 err = __tipc_nl_add_sk_publ(skb, cb, p);
3768 if (err) {
3769 *last_publ = p->key;
3770 return err;
3771 }
3772 }
3773 *last_publ = 0;
3774
3775 return 0;
3776}
3777
3778int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3779{
3780 int err;
3781 u32 tsk_portid = cb->args[0];
3782 u32 last_publ = cb->args[1];
3783 u32 done = cb->args[2];
3784 struct net *net = sock_net(skb->sk);
3785 struct tipc_sock *tsk;
3786
3787 if (!tsk_portid) {
3788 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
3789 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3790
3791 if (!attrs[TIPC_NLA_SOCK])
3792 return -EINVAL;
3793
3794 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3795 attrs[TIPC_NLA_SOCK],
3796 tipc_nl_sock_policy, NULL);
3797 if (err)
3798 return err;
3799
3800 if (!sock[TIPC_NLA_SOCK_REF])
3801 return -EINVAL;
3802
3803 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3804 }
3805
3806 if (done)
3807 return 0;
3808
3809 tsk = tipc_sk_lookup(net, tsk_portid);
3810 if (!tsk)
3811 return -EINVAL;
3812
3813 lock_sock(&tsk->sk);
3814 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3815 if (!err)
3816 done = 1;
3817 release_sock(&tsk->sk);
3818 sock_put(&tsk->sk);
3819
3820 cb->args[0] = tsk_portid;
3821 cb->args[1] = last_publ;
3822 cb->args[2] = done;
3823
3824 return skb->len;
3825}
3826
3827/**
3828 * tipc_sk_filtering - check if a socket should be traced
3829 * @sk: the socket to be examined
3830 *
3831 * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
3832 * (portid, sock type, name type, name lower, name upper)
3833 *
3834 * Return: true if the socket meets the socket tuple data
3835 * (value 0 = 'any') or when there is no tuple set (all = 0),
3836 * otherwise false
3837 */
3838bool tipc_sk_filtering(struct sock *sk)
3839{
3840 struct tipc_sock *tsk;
3841 struct publication *p;
3842 u32 _port, _sktype, _type, _lower, _upper;
3843 u32 type = 0, lower = 0, upper = 0;
3844
3845 if (!sk)
3846 return true;
3847
3848 tsk = tipc_sk(sk);
3849
3850 _port = sysctl_tipc_sk_filter[0];
3851 _sktype = sysctl_tipc_sk_filter[1];
3852 _type = sysctl_tipc_sk_filter[2];
3853 _lower = sysctl_tipc_sk_filter[3];
3854 _upper = sysctl_tipc_sk_filter[4];
3855
3856 if (!_port && !_sktype && !_type && !_lower && !_upper)
3857 return true;
3858
3859 if (_port)
3860 return (_port == tsk->portid);
3861
3862 if (_sktype && _sktype != sk->sk_type)
3863 return false;
3864
3865 if (tsk->published) {
3866 p = list_first_entry_or_null(&tsk->publications,
3867 struct publication, binding_sock);
3868 if (p) {
3869 type = p->sr.type;
3870 lower = p->sr.lower;
3871 upper = p->sr.upper;
3872 }
3873 }
3874
3875 if (!tipc_sk_type_connectionless(sk)) {
3876 type = msg_nametype(&tsk->phdr);
3877 lower = msg_nameinst(&tsk->phdr);
3878 upper = lower;
3879 }
3880
3881 if ((_type && _type != type) || (_lower && _lower != lower) ||
3882 (_upper && _upper != upper))
3883 return false;
3884
3885 return true;
3886}
3887
3888u32 tipc_sock_get_portid(struct sock *sk)
3889{
3890 return (sk) ? (tipc_sk(sk))->portid : 0;
3891}
3892
3893/**
3894 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3895 * both the rcv and backlog queues are considered
3896 * @sk: tipc sk to be checked
3897 * @skb: tipc msg to be checked
3898 *
3899 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3900 */
3901
3902bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3903{
3904 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3905 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3906 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3907
3908 return (qsize > lim * 90 / 100);
3909}
3910
3911/**
3912 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3913 * only the rcv queue is considered
3914 * @sk: tipc sk to be checked
3915 * @skb: tipc msg to be checked
3916 *
3917 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3918 */
3919
3920bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3921{
3922 unsigned int lim = rcvbuf_limit(sk, skb);
3923 unsigned int qsize = sk_rmem_alloc_get(sk);
3924
3925 return (qsize > lim * 90 / 100);
3926}
3927
3928/**
3929 * tipc_sk_dump - dump TIPC socket
3930 * @sk: tipc sk to be dumped
3931 * @dqueues: bitmask to decide if any socket queue to be dumped?
3932 * - TIPC_DUMP_NONE: don't dump socket queues
3933 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3934 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3935 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3936 * - TIPC_DUMP_ALL: dump all the socket queues above
3937 * @buf: returned buffer of dump data in format
3938 */
3939int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3940{
3941 int i = 0;
3942 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3943 u32 conn_type, conn_instance;
3944 struct tipc_sock *tsk;
3945 struct publication *p;
3946 bool tsk_connected;
3947
3948 if (!sk) {
3949 i += scnprintf(buf, sz, "sk data: (null)\n");
3950 return i;
3951 }
3952
3953 tsk = tipc_sk(sk);
3954 tsk_connected = !tipc_sk_type_connectionless(sk);
3955
3956 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3957 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3958 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3959 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3960 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3961 if (tsk_connected) {
3962 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3963 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3964 conn_type = msg_nametype(&tsk->phdr);
3965 conn_instance = msg_nameinst(&tsk->phdr);
3966 i += scnprintf(buf + i, sz - i, " %u", conn_type);
3967 i += scnprintf(buf + i, sz - i, " %u", conn_instance);
3968 }
3969 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3970 if (tsk->published) {
3971 p = list_first_entry_or_null(&tsk->publications,
3972 struct publication, binding_sock);
3973 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
3974 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
3975 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
3976 }
3977 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3978 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3979 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3980 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3981 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3982 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3983 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3984 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3985 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3986 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3987 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3988 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3989 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3990 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3991
3992 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3993 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3994 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3995 }
3996
3997 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3998 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3999 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
4000 }
4001
4002 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
4003 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
4004 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
4005 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
4006 i += scnprintf(buf + i, sz - i, " tail ");
4007 i += tipc_skb_dump(sk->sk_backlog.tail, false,
4008 buf + i);
4009 }
4010 }
4011
4012 return i;
4013}
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/rhashtable.h>
38#include "core.h"
39#include "name_table.h"
40#include "node.h"
41#include "link.h"
42#include "name_distr.h"
43#include "socket.h"
44#include "bcast.h"
45#include "netlink.h"
46
47#define SS_LISTENING -1 /* socket is listening */
48#define SS_READY -2 /* socket is connectionless */
49
50#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51#define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
52#define TIPC_FWD_MSG 1
53#define TIPC_CONN_OK 0
54#define TIPC_CONN_PROBING 1
55#define TIPC_MAX_PORT 0xffffffff
56#define TIPC_MIN_PORT 1
57
58/**
59 * struct tipc_sock - TIPC socket structure
60 * @sk: socket - interacts with 'port' and with user via the socket API
61 * @connected: non-zero if port is currently connected to a peer port
62 * @conn_type: TIPC type used when connection was established
63 * @conn_instance: TIPC instance used when connection was established
64 * @published: non-zero if port has one or more associated names
65 * @max_pkt: maximum packet size "hint" used when building messages sent by port
66 * @portid: unique port identity in TIPC socket hash table
67 * @phdr: preformatted message header used when sending messages
68 * @port_list: adjacent ports in TIPC's global list of ports
69 * @publications: list of publications for port
70 * @pub_count: total # of publications port has made during its lifetime
71 * @probing_state:
72 * @probing_intv:
73 * @conn_timeout: the time we can wait for an unresponded setup request
74 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
75 * @link_cong: non-zero if owner must sleep because of link congestion
76 * @sent_unacked: # messages sent by socket, and not yet acked by peer
77 * @rcv_unacked: # messages read by user, but not yet acked back to peer
78 * @remote: 'connected' peer for dgram/rdm
79 * @node: hash table node
80 * @rcu: rcu struct for tipc_sock
81 */
82struct tipc_sock {
83 struct sock sk;
84 int connected;
85 u32 conn_type;
86 u32 conn_instance;
87 int published;
88 u32 max_pkt;
89 u32 portid;
90 struct tipc_msg phdr;
91 struct list_head sock_list;
92 struct list_head publications;
93 u32 pub_count;
94 u32 probing_state;
95 unsigned long probing_intv;
96 uint conn_timeout;
97 atomic_t dupl_rcvcnt;
98 bool link_cong;
99 uint sent_unacked;
100 uint rcv_unacked;
101 struct sockaddr_tipc remote;
102 struct rhash_head node;
103 struct rcu_head rcu;
104};
105
106static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
107static void tipc_data_ready(struct sock *sk);
108static void tipc_write_space(struct sock *sk);
109static void tipc_sock_destruct(struct sock *sk);
110static int tipc_release(struct socket *sock);
111static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
112static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
113static void tipc_sk_timeout(unsigned long data);
114static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
115 struct tipc_name_seq const *seq);
116static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
117 struct tipc_name_seq const *seq);
118static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
119static int tipc_sk_insert(struct tipc_sock *tsk);
120static void tipc_sk_remove(struct tipc_sock *tsk);
121static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
122 size_t dsz);
123static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
124
125static const struct proto_ops packet_ops;
126static const struct proto_ops stream_ops;
127static const struct proto_ops msg_ops;
128static struct proto tipc_proto;
129
130static const struct rhashtable_params tsk_rht_params;
131
132/*
133 * Revised TIPC socket locking policy:
134 *
135 * Most socket operations take the standard socket lock when they start
136 * and hold it until they finish (or until they need to sleep). Acquiring
137 * this lock grants the owner exclusive access to the fields of the socket
138 * data structures, with the exception of the backlog queue. A few socket
139 * operations can be done without taking the socket lock because they only
140 * read socket information that never changes during the life of the socket.
141 *
142 * Socket operations may acquire the lock for the associated TIPC port if they
143 * need to perform an operation on the port. If any routine needs to acquire
144 * both the socket lock and the port lock it must take the socket lock first
145 * to avoid the risk of deadlock.
146 *
147 * The dispatcher handling incoming messages cannot grab the socket lock in
148 * the standard fashion, since invoked it runs at the BH level and cannot block.
149 * Instead, it checks to see if the socket lock is currently owned by someone,
150 * and either handles the message itself or adds it to the socket's backlog
151 * queue; in the latter case the queued message is processed once the process
152 * owning the socket lock releases it.
153 *
154 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
155 * the problem of a blocked socket operation preventing any other operations
156 * from occurring. However, applications must be careful if they have
157 * multiple threads trying to send (or receive) on the same socket, as these
158 * operations might interfere with each other. For example, doing a connect
159 * and a receive at the same time might allow the receive to consume the
160 * ACK message meant for the connect. While additional work could be done
161 * to try and overcome this, it doesn't seem to be worthwhile at the present.
162 *
163 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
164 * that another operation that must be performed in a non-blocking manner is
165 * not delayed for very long because the lock has already been taken.
166 *
167 * NOTE: This code assumes that certain fields of a port/socket pair are
168 * constant over its lifetime; such fields can be examined without taking
169 * the socket lock and/or port lock, and do not need to be re-read even
170 * after resuming processing after waiting. These fields include:
171 * - socket type
172 * - pointer to socket sk structure (aka tipc_sock structure)
173 * - pointer to port structure
174 * - port reference
175 */
176
177static u32 tsk_own_node(struct tipc_sock *tsk)
178{
179 return msg_prevnode(&tsk->phdr);
180}
181
182static u32 tsk_peer_node(struct tipc_sock *tsk)
183{
184 return msg_destnode(&tsk->phdr);
185}
186
187static u32 tsk_peer_port(struct tipc_sock *tsk)
188{
189 return msg_destport(&tsk->phdr);
190}
191
192static bool tsk_unreliable(struct tipc_sock *tsk)
193{
194 return msg_src_droppable(&tsk->phdr) != 0;
195}
196
197static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
198{
199 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
200}
201
202static bool tsk_unreturnable(struct tipc_sock *tsk)
203{
204 return msg_dest_droppable(&tsk->phdr) != 0;
205}
206
207static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
208{
209 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
210}
211
212static int tsk_importance(struct tipc_sock *tsk)
213{
214 return msg_importance(&tsk->phdr);
215}
216
217static int tsk_set_importance(struct tipc_sock *tsk, int imp)
218{
219 if (imp > TIPC_CRITICAL_IMPORTANCE)
220 return -EINVAL;
221 msg_set_importance(&tsk->phdr, (u32)imp);
222 return 0;
223}
224
225static struct tipc_sock *tipc_sk(const struct sock *sk)
226{
227 return container_of(sk, struct tipc_sock, sk);
228}
229
230static int tsk_conn_cong(struct tipc_sock *tsk)
231{
232 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
233}
234
235/**
236 * tsk_advance_rx_queue - discard first buffer in socket receive queue
237 *
238 * Caller must hold socket lock
239 */
240static void tsk_advance_rx_queue(struct sock *sk)
241{
242 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
243}
244
245/* tipc_sk_respond() : send response message back to sender
246 */
247static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
248{
249 u32 selector;
250 u32 dnode;
251 u32 onode = tipc_own_addr(sock_net(sk));
252
253 if (!tipc_msg_reverse(onode, &skb, err))
254 return;
255
256 dnode = msg_destnode(buf_msg(skb));
257 selector = msg_origport(buf_msg(skb));
258 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
259}
260
261/**
262 * tsk_rej_rx_queue - reject all buffers in socket receive queue
263 *
264 * Caller must hold socket lock
265 */
266static void tsk_rej_rx_queue(struct sock *sk)
267{
268 struct sk_buff *skb;
269
270 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
271 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
272}
273
274/* tsk_peer_msg - verify if message was sent by connected port's peer
275 *
276 * Handles cases where the node's network address has changed from
277 * the default of <0.0.0> to its configured setting.
278 */
279static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
280{
281 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
282 u32 peer_port = tsk_peer_port(tsk);
283 u32 orig_node;
284 u32 peer_node;
285
286 if (unlikely(!tsk->connected))
287 return false;
288
289 if (unlikely(msg_origport(msg) != peer_port))
290 return false;
291
292 orig_node = msg_orignode(msg);
293 peer_node = tsk_peer_node(tsk);
294
295 if (likely(orig_node == peer_node))
296 return true;
297
298 if (!orig_node && (peer_node == tn->own_addr))
299 return true;
300
301 if (!peer_node && (orig_node == tn->own_addr))
302 return true;
303
304 return false;
305}
306
307/**
308 * tipc_sk_create - create a TIPC socket
309 * @net: network namespace (must be default network)
310 * @sock: pre-allocated socket structure
311 * @protocol: protocol indicator (must be 0)
312 * @kern: caused by kernel or by userspace?
313 *
314 * This routine creates additional data structures used by the TIPC socket,
315 * initializes them, and links them together.
316 *
317 * Returns 0 on success, errno otherwise
318 */
319static int tipc_sk_create(struct net *net, struct socket *sock,
320 int protocol, int kern)
321{
322 struct tipc_net *tn;
323 const struct proto_ops *ops;
324 socket_state state;
325 struct sock *sk;
326 struct tipc_sock *tsk;
327 struct tipc_msg *msg;
328
329 /* Validate arguments */
330 if (unlikely(protocol != 0))
331 return -EPROTONOSUPPORT;
332
333 switch (sock->type) {
334 case SOCK_STREAM:
335 ops = &stream_ops;
336 state = SS_UNCONNECTED;
337 break;
338 case SOCK_SEQPACKET:
339 ops = &packet_ops;
340 state = SS_UNCONNECTED;
341 break;
342 case SOCK_DGRAM:
343 case SOCK_RDM:
344 ops = &msg_ops;
345 state = SS_READY;
346 break;
347 default:
348 return -EPROTOTYPE;
349 }
350
351 /* Allocate socket's protocol area */
352 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
353 if (sk == NULL)
354 return -ENOMEM;
355
356 tsk = tipc_sk(sk);
357 tsk->max_pkt = MAX_PKT_DEFAULT;
358 INIT_LIST_HEAD(&tsk->publications);
359 msg = &tsk->phdr;
360 tn = net_generic(sock_net(sk), tipc_net_id);
361 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
362 NAMED_H_SIZE, 0);
363
364 /* Finish initializing socket data structures */
365 sock->ops = ops;
366 sock->state = state;
367 sock_init_data(sock, sk);
368 if (tipc_sk_insert(tsk)) {
369 pr_warn("Socket create failed; port numbrer exhausted\n");
370 return -EINVAL;
371 }
372 msg_set_origport(msg, tsk->portid);
373 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
374 sk->sk_backlog_rcv = tipc_backlog_rcv;
375 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
376 sk->sk_data_ready = tipc_data_ready;
377 sk->sk_write_space = tipc_write_space;
378 sk->sk_destruct = tipc_sock_destruct;
379 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
380 tsk->sent_unacked = 0;
381 atomic_set(&tsk->dupl_rcvcnt, 0);
382
383 if (sock->state == SS_READY) {
384 tsk_set_unreturnable(tsk, true);
385 if (sock->type == SOCK_DGRAM)
386 tsk_set_unreliable(tsk, true);
387 }
388 return 0;
389}
390
391static void tipc_sk_callback(struct rcu_head *head)
392{
393 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
394
395 sock_put(&tsk->sk);
396}
397
398/**
399 * tipc_release - destroy a TIPC socket
400 * @sock: socket to destroy
401 *
402 * This routine cleans up any messages that are still queued on the socket.
403 * For DGRAM and RDM socket types, all queued messages are rejected.
404 * For SEQPACKET and STREAM socket types, the first message is rejected
405 * and any others are discarded. (If the first message on a STREAM socket
406 * is partially-read, it is discarded and the next one is rejected instead.)
407 *
408 * NOTE: Rejected messages are not necessarily returned to the sender! They
409 * are returned or discarded according to the "destination droppable" setting
410 * specified for the message by the sender.
411 *
412 * Returns 0 on success, errno otherwise
413 */
414static int tipc_release(struct socket *sock)
415{
416 struct sock *sk = sock->sk;
417 struct net *net;
418 struct tipc_sock *tsk;
419 struct sk_buff *skb;
420 u32 dnode;
421
422 /*
423 * Exit if socket isn't fully initialized (occurs when a failed accept()
424 * releases a pre-allocated child socket that was never used)
425 */
426 if (sk == NULL)
427 return 0;
428
429 net = sock_net(sk);
430 tsk = tipc_sk(sk);
431 lock_sock(sk);
432
433 /*
434 * Reject all unreceived messages, except on an active connection
435 * (which disconnects locally & sends a 'FIN+' to peer)
436 */
437 dnode = tsk_peer_node(tsk);
438 while (sock->state != SS_DISCONNECTING) {
439 skb = __skb_dequeue(&sk->sk_receive_queue);
440 if (skb == NULL)
441 break;
442 if (TIPC_SKB_CB(skb)->handle != NULL)
443 kfree_skb(skb);
444 else {
445 if ((sock->state == SS_CONNECTING) ||
446 (sock->state == SS_CONNECTED)) {
447 sock->state = SS_DISCONNECTING;
448 tsk->connected = 0;
449 tipc_node_remove_conn(net, dnode, tsk->portid);
450 }
451 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
452 }
453 }
454
455 tipc_sk_withdraw(tsk, 0, NULL);
456 sk_stop_timer(sk, &sk->sk_timer);
457 tipc_sk_remove(tsk);
458 if (tsk->connected) {
459 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
460 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
461 tsk_own_node(tsk), tsk_peer_port(tsk),
462 tsk->portid, TIPC_ERR_NO_PORT);
463 if (skb)
464 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
465 tipc_node_remove_conn(net, dnode, tsk->portid);
466 }
467
468 /* Reject any messages that accumulated in backlog queue */
469 sock->state = SS_DISCONNECTING;
470 release_sock(sk);
471
472 call_rcu(&tsk->rcu, tipc_sk_callback);
473 sock->sk = NULL;
474
475 return 0;
476}
477
478/**
479 * tipc_bind - associate or disassocate TIPC name(s) with a socket
480 * @sock: socket structure
481 * @uaddr: socket address describing name(s) and desired operation
482 * @uaddr_len: size of socket address data structure
483 *
484 * Name and name sequence binding is indicated using a positive scope value;
485 * a negative scope value unbinds the specified name. Specifying no name
486 * (i.e. a socket address length of 0) unbinds all names from the socket.
487 *
488 * Returns 0 on success, errno otherwise
489 *
490 * NOTE: This routine doesn't need to take the socket lock since it doesn't
491 * access any non-constant socket information.
492 */
493static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
494 int uaddr_len)
495{
496 struct sock *sk = sock->sk;
497 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
498 struct tipc_sock *tsk = tipc_sk(sk);
499 int res = -EINVAL;
500
501 lock_sock(sk);
502 if (unlikely(!uaddr_len)) {
503 res = tipc_sk_withdraw(tsk, 0, NULL);
504 goto exit;
505 }
506
507 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
508 res = -EINVAL;
509 goto exit;
510 }
511 if (addr->family != AF_TIPC) {
512 res = -EAFNOSUPPORT;
513 goto exit;
514 }
515
516 if (addr->addrtype == TIPC_ADDR_NAME)
517 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
518 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
519 res = -EAFNOSUPPORT;
520 goto exit;
521 }
522
523 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
524 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
525 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
526 res = -EACCES;
527 goto exit;
528 }
529
530 res = (addr->scope > 0) ?
531 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
532 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
533exit:
534 release_sock(sk);
535 return res;
536}
537
538/**
539 * tipc_getname - get port ID of socket or peer socket
540 * @sock: socket structure
541 * @uaddr: area for returned socket address
542 * @uaddr_len: area for returned length of socket address
543 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
544 *
545 * Returns 0 on success, errno otherwise
546 *
547 * NOTE: This routine doesn't need to take the socket lock since it only
548 * accesses socket information that is unchanging (or which changes in
549 * a completely predictable manner).
550 */
551static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
552 int *uaddr_len, int peer)
553{
554 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
555 struct tipc_sock *tsk = tipc_sk(sock->sk);
556 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
557
558 memset(addr, 0, sizeof(*addr));
559 if (peer) {
560 if ((sock->state != SS_CONNECTED) &&
561 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
562 return -ENOTCONN;
563 addr->addr.id.ref = tsk_peer_port(tsk);
564 addr->addr.id.node = tsk_peer_node(tsk);
565 } else {
566 addr->addr.id.ref = tsk->portid;
567 addr->addr.id.node = tn->own_addr;
568 }
569
570 *uaddr_len = sizeof(*addr);
571 addr->addrtype = TIPC_ADDR_ID;
572 addr->family = AF_TIPC;
573 addr->scope = 0;
574 addr->addr.name.domain = 0;
575
576 return 0;
577}
578
579/**
580 * tipc_poll - read and possibly block on pollmask
581 * @file: file structure associated with the socket
582 * @sock: socket for which to calculate the poll bits
583 * @wait: ???
584 *
585 * Returns pollmask value
586 *
587 * COMMENTARY:
588 * It appears that the usual socket locking mechanisms are not useful here
589 * since the pollmask info is potentially out-of-date the moment this routine
590 * exits. TCP and other protocols seem to rely on higher level poll routines
591 * to handle any preventable race conditions, so TIPC will do the same ...
592 *
593 * TIPC sets the returned events as follows:
594 *
595 * socket state flags set
596 * ------------ ---------
597 * unconnected no read flags
598 * POLLOUT if port is not congested
599 *
600 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
601 * no write flags
602 *
603 * connected POLLIN/POLLRDNORM if data in rx queue
604 * POLLOUT if port is not congested
605 *
606 * disconnecting POLLIN/POLLRDNORM/POLLHUP
607 * no write flags
608 *
609 * listening POLLIN if SYN in rx queue
610 * no write flags
611 *
612 * ready POLLIN/POLLRDNORM if data in rx queue
613 * [connectionless] POLLOUT (since port cannot be congested)
614 *
615 * IMPORTANT: The fact that a read or write operation is indicated does NOT
616 * imply that the operation will succeed, merely that it should be performed
617 * and will not block.
618 */
619static unsigned int tipc_poll(struct file *file, struct socket *sock,
620 poll_table *wait)
621{
622 struct sock *sk = sock->sk;
623 struct tipc_sock *tsk = tipc_sk(sk);
624 u32 mask = 0;
625
626 sock_poll_wait(file, sk_sleep(sk), wait);
627
628 switch ((int)sock->state) {
629 case SS_UNCONNECTED:
630 if (!tsk->link_cong)
631 mask |= POLLOUT;
632 break;
633 case SS_READY:
634 case SS_CONNECTED:
635 if (!tsk->link_cong && !tsk_conn_cong(tsk))
636 mask |= POLLOUT;
637 /* fall thru' */
638 case SS_CONNECTING:
639 case SS_LISTENING:
640 if (!skb_queue_empty(&sk->sk_receive_queue))
641 mask |= (POLLIN | POLLRDNORM);
642 break;
643 case SS_DISCONNECTING:
644 mask = (POLLIN | POLLRDNORM | POLLHUP);
645 break;
646 }
647
648 return mask;
649}
650
651/**
652 * tipc_sendmcast - send multicast message
653 * @sock: socket structure
654 * @seq: destination address
655 * @msg: message to send
656 * @dsz: total length of message data
657 * @timeo: timeout to wait for wakeup
658 *
659 * Called from function tipc_sendmsg(), which has done all sanity checks
660 * Returns the number of bytes sent on success, or errno
661 */
662static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
663 struct msghdr *msg, size_t dsz, long timeo)
664{
665 struct sock *sk = sock->sk;
666 struct tipc_sock *tsk = tipc_sk(sk);
667 struct net *net = sock_net(sk);
668 struct tipc_msg *mhdr = &tsk->phdr;
669 struct sk_buff_head pktchain;
670 struct iov_iter save = msg->msg_iter;
671 uint mtu;
672 int rc;
673
674 msg_set_type(mhdr, TIPC_MCAST_MSG);
675 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
676 msg_set_destport(mhdr, 0);
677 msg_set_destnode(mhdr, 0);
678 msg_set_nametype(mhdr, seq->type);
679 msg_set_namelower(mhdr, seq->lower);
680 msg_set_nameupper(mhdr, seq->upper);
681 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
682
683 skb_queue_head_init(&pktchain);
684
685new_mtu:
686 mtu = tipc_bcast_get_mtu(net);
687 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
688 if (unlikely(rc < 0))
689 return rc;
690
691 do {
692 rc = tipc_bcast_xmit(net, &pktchain);
693 if (likely(!rc))
694 return dsz;
695
696 if (rc == -ELINKCONG) {
697 tsk->link_cong = 1;
698 rc = tipc_wait_for_sndmsg(sock, &timeo);
699 if (!rc)
700 continue;
701 }
702 __skb_queue_purge(&pktchain);
703 if (rc == -EMSGSIZE) {
704 msg->msg_iter = save;
705 goto new_mtu;
706 }
707 break;
708 } while (1);
709 return rc;
710}
711
712/**
713 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
714 * @arrvq: queue with arriving messages, to be cloned after destination lookup
715 * @inputq: queue with cloned messages, delivered to socket after dest lookup
716 *
717 * Multi-threaded: parallel calls with reference to same queues may occur
718 */
719void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
720 struct sk_buff_head *inputq)
721{
722 struct tipc_msg *msg;
723 struct tipc_plist dports;
724 u32 portid;
725 u32 scope = TIPC_CLUSTER_SCOPE;
726 struct sk_buff_head tmpq;
727 uint hsz;
728 struct sk_buff *skb, *_skb;
729
730 __skb_queue_head_init(&tmpq);
731 tipc_plist_init(&dports);
732
733 skb = tipc_skb_peek(arrvq, &inputq->lock);
734 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
735 msg = buf_msg(skb);
736 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
737
738 if (in_own_node(net, msg_orignode(msg)))
739 scope = TIPC_NODE_SCOPE;
740
741 /* Create destination port list and message clones: */
742 tipc_nametbl_mc_translate(net,
743 msg_nametype(msg), msg_namelower(msg),
744 msg_nameupper(msg), scope, &dports);
745 portid = tipc_plist_pop(&dports);
746 for (; portid; portid = tipc_plist_pop(&dports)) {
747 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
748 if (_skb) {
749 msg_set_destport(buf_msg(_skb), portid);
750 __skb_queue_tail(&tmpq, _skb);
751 continue;
752 }
753 pr_warn("Failed to clone mcast rcv buffer\n");
754 }
755 /* Append to inputq if not already done by other thread */
756 spin_lock_bh(&inputq->lock);
757 if (skb_peek(arrvq) == skb) {
758 skb_queue_splice_tail_init(&tmpq, inputq);
759 kfree_skb(__skb_dequeue(arrvq));
760 }
761 spin_unlock_bh(&inputq->lock);
762 __skb_queue_purge(&tmpq);
763 kfree_skb(skb);
764 }
765 tipc_sk_rcv(net, inputq);
766}
767
768/**
769 * tipc_sk_proto_rcv - receive a connection mng protocol message
770 * @tsk: receiving socket
771 * @skb: pointer to message buffer.
772 */
773static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
774{
775 struct sock *sk = &tsk->sk;
776 struct tipc_msg *hdr = buf_msg(skb);
777 int mtyp = msg_type(hdr);
778 int conn_cong;
779
780 /* Ignore if connection cannot be validated: */
781 if (!tsk_peer_msg(tsk, hdr))
782 goto exit;
783
784 tsk->probing_state = TIPC_CONN_OK;
785
786 if (mtyp == CONN_PROBE) {
787 msg_set_type(hdr, CONN_PROBE_REPLY);
788 tipc_sk_respond(sk, skb, TIPC_OK);
789 return;
790 } else if (mtyp == CONN_ACK) {
791 conn_cong = tsk_conn_cong(tsk);
792 tsk->sent_unacked -= msg_msgcnt(hdr);
793 if (conn_cong)
794 sk->sk_write_space(sk);
795 } else if (mtyp != CONN_PROBE_REPLY) {
796 pr_warn("Received unknown CONN_PROTO msg\n");
797 }
798exit:
799 kfree_skb(skb);
800}
801
802static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
803{
804 struct sock *sk = sock->sk;
805 struct tipc_sock *tsk = tipc_sk(sk);
806 DEFINE_WAIT(wait);
807 int done;
808
809 do {
810 int err = sock_error(sk);
811 if (err)
812 return err;
813 if (sock->state == SS_DISCONNECTING)
814 return -EPIPE;
815 if (!*timeo_p)
816 return -EAGAIN;
817 if (signal_pending(current))
818 return sock_intr_errno(*timeo_p);
819
820 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
821 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
822 finish_wait(sk_sleep(sk), &wait);
823 } while (!done);
824 return 0;
825}
826
827/**
828 * tipc_sendmsg - send message in connectionless manner
829 * @sock: socket structure
830 * @m: message to send
831 * @dsz: amount of user data to be sent
832 *
833 * Message must have an destination specified explicitly.
834 * Used for SOCK_RDM and SOCK_DGRAM messages,
835 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
836 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
837 *
838 * Returns the number of bytes sent on success, or errno otherwise
839 */
840static int tipc_sendmsg(struct socket *sock,
841 struct msghdr *m, size_t dsz)
842{
843 struct sock *sk = sock->sk;
844 int ret;
845
846 lock_sock(sk);
847 ret = __tipc_sendmsg(sock, m, dsz);
848 release_sock(sk);
849
850 return ret;
851}
852
853static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
854{
855 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
856 struct sock *sk = sock->sk;
857 struct tipc_sock *tsk = tipc_sk(sk);
858 struct net *net = sock_net(sk);
859 struct tipc_msg *mhdr = &tsk->phdr;
860 u32 dnode, dport;
861 struct sk_buff_head pktchain;
862 struct sk_buff *skb;
863 struct tipc_name_seq *seq;
864 struct iov_iter save;
865 u32 mtu;
866 long timeo;
867 int rc;
868
869 if (dsz > TIPC_MAX_USER_MSG_SIZE)
870 return -EMSGSIZE;
871 if (unlikely(!dest)) {
872 if (tsk->connected && sock->state == SS_READY)
873 dest = &tsk->remote;
874 else
875 return -EDESTADDRREQ;
876 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
877 dest->family != AF_TIPC) {
878 return -EINVAL;
879 }
880 if (unlikely(sock->state != SS_READY)) {
881 if (sock->state == SS_LISTENING)
882 return -EPIPE;
883 if (sock->state != SS_UNCONNECTED)
884 return -EISCONN;
885 if (tsk->published)
886 return -EOPNOTSUPP;
887 if (dest->addrtype == TIPC_ADDR_NAME) {
888 tsk->conn_type = dest->addr.name.name.type;
889 tsk->conn_instance = dest->addr.name.name.instance;
890 }
891 }
892 seq = &dest->addr.nameseq;
893 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
894
895 if (dest->addrtype == TIPC_ADDR_MCAST) {
896 return tipc_sendmcast(sock, seq, m, dsz, timeo);
897 } else if (dest->addrtype == TIPC_ADDR_NAME) {
898 u32 type = dest->addr.name.name.type;
899 u32 inst = dest->addr.name.name.instance;
900 u32 domain = dest->addr.name.domain;
901
902 dnode = domain;
903 msg_set_type(mhdr, TIPC_NAMED_MSG);
904 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
905 msg_set_nametype(mhdr, type);
906 msg_set_nameinst(mhdr, inst);
907 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
908 dport = tipc_nametbl_translate(net, type, inst, &dnode);
909 msg_set_destnode(mhdr, dnode);
910 msg_set_destport(mhdr, dport);
911 if (unlikely(!dport && !dnode))
912 return -EHOSTUNREACH;
913 } else if (dest->addrtype == TIPC_ADDR_ID) {
914 dnode = dest->addr.id.node;
915 msg_set_type(mhdr, TIPC_DIRECT_MSG);
916 msg_set_lookup_scope(mhdr, 0);
917 msg_set_destnode(mhdr, dnode);
918 msg_set_destport(mhdr, dest->addr.id.ref);
919 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
920 }
921
922 skb_queue_head_init(&pktchain);
923 save = m->msg_iter;
924new_mtu:
925 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
926 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
927 if (rc < 0)
928 return rc;
929
930 do {
931 skb = skb_peek(&pktchain);
932 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
933 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
934 if (likely(!rc)) {
935 if (sock->state != SS_READY)
936 sock->state = SS_CONNECTING;
937 return dsz;
938 }
939 if (rc == -ELINKCONG) {
940 tsk->link_cong = 1;
941 rc = tipc_wait_for_sndmsg(sock, &timeo);
942 if (!rc)
943 continue;
944 }
945 __skb_queue_purge(&pktchain);
946 if (rc == -EMSGSIZE) {
947 m->msg_iter = save;
948 goto new_mtu;
949 }
950 break;
951 } while (1);
952
953 return rc;
954}
955
956static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
957{
958 struct sock *sk = sock->sk;
959 struct tipc_sock *tsk = tipc_sk(sk);
960 DEFINE_WAIT(wait);
961 int done;
962
963 do {
964 int err = sock_error(sk);
965 if (err)
966 return err;
967 if (sock->state == SS_DISCONNECTING)
968 return -EPIPE;
969 else if (sock->state != SS_CONNECTED)
970 return -ENOTCONN;
971 if (!*timeo_p)
972 return -EAGAIN;
973 if (signal_pending(current))
974 return sock_intr_errno(*timeo_p);
975
976 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
977 done = sk_wait_event(sk, timeo_p,
978 (!tsk->link_cong &&
979 !tsk_conn_cong(tsk)) ||
980 !tsk->connected);
981 finish_wait(sk_sleep(sk), &wait);
982 } while (!done);
983 return 0;
984}
985
986/**
987 * tipc_send_stream - send stream-oriented data
988 * @sock: socket structure
989 * @m: data to send
990 * @dsz: total length of data to be transmitted
991 *
992 * Used for SOCK_STREAM data.
993 *
994 * Returns the number of bytes sent on success (or partial success),
995 * or errno if no data sent
996 */
997static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
998{
999 struct sock *sk = sock->sk;
1000 int ret;
1001
1002 lock_sock(sk);
1003 ret = __tipc_send_stream(sock, m, dsz);
1004 release_sock(sk);
1005
1006 return ret;
1007}
1008
1009static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1010{
1011 struct sock *sk = sock->sk;
1012 struct net *net = sock_net(sk);
1013 struct tipc_sock *tsk = tipc_sk(sk);
1014 struct tipc_msg *mhdr = &tsk->phdr;
1015 struct sk_buff_head pktchain;
1016 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1017 u32 portid = tsk->portid;
1018 int rc = -EINVAL;
1019 long timeo;
1020 u32 dnode;
1021 uint mtu, send, sent = 0;
1022 struct iov_iter save;
1023
1024 /* Handle implied connection establishment */
1025 if (unlikely(dest)) {
1026 rc = __tipc_sendmsg(sock, m, dsz);
1027 if (dsz && (dsz == rc))
1028 tsk->sent_unacked = 1;
1029 return rc;
1030 }
1031 if (dsz > (uint)INT_MAX)
1032 return -EMSGSIZE;
1033
1034 if (unlikely(sock->state != SS_CONNECTED)) {
1035 if (sock->state == SS_DISCONNECTING)
1036 return -EPIPE;
1037 else
1038 return -ENOTCONN;
1039 }
1040
1041 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1042 dnode = tsk_peer_node(tsk);
1043 skb_queue_head_init(&pktchain);
1044
1045next:
1046 save = m->msg_iter;
1047 mtu = tsk->max_pkt;
1048 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1049 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
1050 if (unlikely(rc < 0))
1051 return rc;
1052
1053 do {
1054 if (likely(!tsk_conn_cong(tsk))) {
1055 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
1056 if (likely(!rc)) {
1057 tsk->sent_unacked++;
1058 sent += send;
1059 if (sent == dsz)
1060 return dsz;
1061 goto next;
1062 }
1063 if (rc == -EMSGSIZE) {
1064 __skb_queue_purge(&pktchain);
1065 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1066 portid);
1067 m->msg_iter = save;
1068 goto next;
1069 }
1070 if (rc != -ELINKCONG)
1071 break;
1072
1073 tsk->link_cong = 1;
1074 }
1075 rc = tipc_wait_for_sndpkt(sock, &timeo);
1076 } while (!rc);
1077
1078 __skb_queue_purge(&pktchain);
1079 return sent ? sent : rc;
1080}
1081
1082/**
1083 * tipc_send_packet - send a connection-oriented message
1084 * @sock: socket structure
1085 * @m: message to send
1086 * @dsz: length of data to be transmitted
1087 *
1088 * Used for SOCK_SEQPACKET messages.
1089 *
1090 * Returns the number of bytes sent on success, or errno otherwise
1091 */
1092static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1093{
1094 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1095 return -EMSGSIZE;
1096
1097 return tipc_send_stream(sock, m, dsz);
1098}
1099
1100/* tipc_sk_finish_conn - complete the setup of a connection
1101 */
1102static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1103 u32 peer_node)
1104{
1105 struct sock *sk = &tsk->sk;
1106 struct net *net = sock_net(sk);
1107 struct tipc_msg *msg = &tsk->phdr;
1108
1109 msg_set_destnode(msg, peer_node);
1110 msg_set_destport(msg, peer_port);
1111 msg_set_type(msg, TIPC_CONN_MSG);
1112 msg_set_lookup_scope(msg, 0);
1113 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1114
1115 tsk->probing_intv = CONN_PROBING_INTERVAL;
1116 tsk->probing_state = TIPC_CONN_OK;
1117 tsk->connected = 1;
1118 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
1119 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1120 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1121}
1122
1123/**
1124 * set_orig_addr - capture sender's address for received message
1125 * @m: descriptor for message info
1126 * @msg: received message header
1127 *
1128 * Note: Address is not captured if not requested by receiver.
1129 */
1130static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1131{
1132 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1133
1134 if (addr) {
1135 addr->family = AF_TIPC;
1136 addr->addrtype = TIPC_ADDR_ID;
1137 memset(&addr->addr, 0, sizeof(addr->addr));
1138 addr->addr.id.ref = msg_origport(msg);
1139 addr->addr.id.node = msg_orignode(msg);
1140 addr->addr.name.domain = 0; /* could leave uninitialized */
1141 addr->scope = 0; /* could leave uninitialized */
1142 m->msg_namelen = sizeof(struct sockaddr_tipc);
1143 }
1144}
1145
1146/**
1147 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1148 * @m: descriptor for message info
1149 * @msg: received message header
1150 * @tsk: TIPC port associated with message
1151 *
1152 * Note: Ancillary data is not captured if not requested by receiver.
1153 *
1154 * Returns 0 if successful, otherwise errno
1155 */
1156static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1157 struct tipc_sock *tsk)
1158{
1159 u32 anc_data[3];
1160 u32 err;
1161 u32 dest_type;
1162 int has_name;
1163 int res;
1164
1165 if (likely(m->msg_controllen == 0))
1166 return 0;
1167
1168 /* Optionally capture errored message object(s) */
1169 err = msg ? msg_errcode(msg) : 0;
1170 if (unlikely(err)) {
1171 anc_data[0] = err;
1172 anc_data[1] = msg_data_sz(msg);
1173 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1174 if (res)
1175 return res;
1176 if (anc_data[1]) {
1177 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1178 msg_data(msg));
1179 if (res)
1180 return res;
1181 }
1182 }
1183
1184 /* Optionally capture message destination object */
1185 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1186 switch (dest_type) {
1187 case TIPC_NAMED_MSG:
1188 has_name = 1;
1189 anc_data[0] = msg_nametype(msg);
1190 anc_data[1] = msg_namelower(msg);
1191 anc_data[2] = msg_namelower(msg);
1192 break;
1193 case TIPC_MCAST_MSG:
1194 has_name = 1;
1195 anc_data[0] = msg_nametype(msg);
1196 anc_data[1] = msg_namelower(msg);
1197 anc_data[2] = msg_nameupper(msg);
1198 break;
1199 case TIPC_CONN_MSG:
1200 has_name = (tsk->conn_type != 0);
1201 anc_data[0] = tsk->conn_type;
1202 anc_data[1] = tsk->conn_instance;
1203 anc_data[2] = tsk->conn_instance;
1204 break;
1205 default:
1206 has_name = 0;
1207 }
1208 if (has_name) {
1209 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1210 if (res)
1211 return res;
1212 }
1213
1214 return 0;
1215}
1216
1217static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1218{
1219 struct net *net = sock_net(&tsk->sk);
1220 struct sk_buff *skb = NULL;
1221 struct tipc_msg *msg;
1222 u32 peer_port = tsk_peer_port(tsk);
1223 u32 dnode = tsk_peer_node(tsk);
1224
1225 if (!tsk->connected)
1226 return;
1227 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1228 dnode, tsk_own_node(tsk), peer_port,
1229 tsk->portid, TIPC_OK);
1230 if (!skb)
1231 return;
1232 msg = buf_msg(skb);
1233 msg_set_msgcnt(msg, ack);
1234 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1235}
1236
1237static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1238{
1239 struct sock *sk = sock->sk;
1240 DEFINE_WAIT(wait);
1241 long timeo = *timeop;
1242 int err;
1243
1244 for (;;) {
1245 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1246 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1247 if (sock->state == SS_DISCONNECTING) {
1248 err = -ENOTCONN;
1249 break;
1250 }
1251 release_sock(sk);
1252 timeo = schedule_timeout(timeo);
1253 lock_sock(sk);
1254 }
1255 err = 0;
1256 if (!skb_queue_empty(&sk->sk_receive_queue))
1257 break;
1258 err = -EAGAIN;
1259 if (!timeo)
1260 break;
1261 err = sock_intr_errno(timeo);
1262 if (signal_pending(current))
1263 break;
1264 }
1265 finish_wait(sk_sleep(sk), &wait);
1266 *timeop = timeo;
1267 return err;
1268}
1269
1270/**
1271 * tipc_recvmsg - receive packet-oriented message
1272 * @m: descriptor for message info
1273 * @buf_len: total size of user buffer area
1274 * @flags: receive flags
1275 *
1276 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1277 * If the complete message doesn't fit in user area, truncate it.
1278 *
1279 * Returns size of returned message data, errno otherwise
1280 */
1281static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1282 int flags)
1283{
1284 struct sock *sk = sock->sk;
1285 struct tipc_sock *tsk = tipc_sk(sk);
1286 struct sk_buff *buf;
1287 struct tipc_msg *msg;
1288 long timeo;
1289 unsigned int sz;
1290 u32 err;
1291 int res;
1292
1293 /* Catch invalid receive requests */
1294 if (unlikely(!buf_len))
1295 return -EINVAL;
1296
1297 lock_sock(sk);
1298
1299 if (unlikely(sock->state == SS_UNCONNECTED)) {
1300 res = -ENOTCONN;
1301 goto exit;
1302 }
1303
1304 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1305restart:
1306
1307 /* Look for a message in receive queue; wait if necessary */
1308 res = tipc_wait_for_rcvmsg(sock, &timeo);
1309 if (res)
1310 goto exit;
1311
1312 /* Look at first message in receive queue */
1313 buf = skb_peek(&sk->sk_receive_queue);
1314 msg = buf_msg(buf);
1315 sz = msg_data_sz(msg);
1316 err = msg_errcode(msg);
1317
1318 /* Discard an empty non-errored message & try again */
1319 if ((!sz) && (!err)) {
1320 tsk_advance_rx_queue(sk);
1321 goto restart;
1322 }
1323
1324 /* Capture sender's address (optional) */
1325 set_orig_addr(m, msg);
1326
1327 /* Capture ancillary data (optional) */
1328 res = tipc_sk_anc_data_recv(m, msg, tsk);
1329 if (res)
1330 goto exit;
1331
1332 /* Capture message data (if valid) & compute return value (always) */
1333 if (!err) {
1334 if (unlikely(buf_len < sz)) {
1335 sz = buf_len;
1336 m->msg_flags |= MSG_TRUNC;
1337 }
1338 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1339 if (res)
1340 goto exit;
1341 res = sz;
1342 } else {
1343 if ((sock->state == SS_READY) ||
1344 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1345 res = 0;
1346 else
1347 res = -ECONNRESET;
1348 }
1349
1350 /* Consume received message (optional) */
1351 if (likely(!(flags & MSG_PEEK))) {
1352 if ((sock->state != SS_READY) &&
1353 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1354 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1355 tsk->rcv_unacked = 0;
1356 }
1357 tsk_advance_rx_queue(sk);
1358 }
1359exit:
1360 release_sock(sk);
1361 return res;
1362}
1363
1364/**
1365 * tipc_recv_stream - receive stream-oriented data
1366 * @m: descriptor for message info
1367 * @buf_len: total size of user buffer area
1368 * @flags: receive flags
1369 *
1370 * Used for SOCK_STREAM messages only. If not enough data is available
1371 * will optionally wait for more; never truncates data.
1372 *
1373 * Returns size of returned message data, errno otherwise
1374 */
1375static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1376 size_t buf_len, int flags)
1377{
1378 struct sock *sk = sock->sk;
1379 struct tipc_sock *tsk = tipc_sk(sk);
1380 struct sk_buff *buf;
1381 struct tipc_msg *msg;
1382 long timeo;
1383 unsigned int sz;
1384 int sz_to_copy, target, needed;
1385 int sz_copied = 0;
1386 u32 err;
1387 int res = 0;
1388
1389 /* Catch invalid receive attempts */
1390 if (unlikely(!buf_len))
1391 return -EINVAL;
1392
1393 lock_sock(sk);
1394
1395 if (unlikely(sock->state == SS_UNCONNECTED)) {
1396 res = -ENOTCONN;
1397 goto exit;
1398 }
1399
1400 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1401 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1402
1403restart:
1404 /* Look for a message in receive queue; wait if necessary */
1405 res = tipc_wait_for_rcvmsg(sock, &timeo);
1406 if (res)
1407 goto exit;
1408
1409 /* Look at first message in receive queue */
1410 buf = skb_peek(&sk->sk_receive_queue);
1411 msg = buf_msg(buf);
1412 sz = msg_data_sz(msg);
1413 err = msg_errcode(msg);
1414
1415 /* Discard an empty non-errored message & try again */
1416 if ((!sz) && (!err)) {
1417 tsk_advance_rx_queue(sk);
1418 goto restart;
1419 }
1420
1421 /* Optionally capture sender's address & ancillary data of first msg */
1422 if (sz_copied == 0) {
1423 set_orig_addr(m, msg);
1424 res = tipc_sk_anc_data_recv(m, msg, tsk);
1425 if (res)
1426 goto exit;
1427 }
1428
1429 /* Capture message data (if valid) & compute return value (always) */
1430 if (!err) {
1431 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1432
1433 sz -= offset;
1434 needed = (buf_len - sz_copied);
1435 sz_to_copy = (sz <= needed) ? sz : needed;
1436
1437 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
1438 m, sz_to_copy);
1439 if (res)
1440 goto exit;
1441
1442 sz_copied += sz_to_copy;
1443
1444 if (sz_to_copy < sz) {
1445 if (!(flags & MSG_PEEK))
1446 TIPC_SKB_CB(buf)->handle =
1447 (void *)(unsigned long)(offset + sz_to_copy);
1448 goto exit;
1449 }
1450 } else {
1451 if (sz_copied != 0)
1452 goto exit; /* can't add error msg to valid data */
1453
1454 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1455 res = 0;
1456 else
1457 res = -ECONNRESET;
1458 }
1459
1460 /* Consume received message (optional) */
1461 if (likely(!(flags & MSG_PEEK))) {
1462 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1463 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1464 tsk->rcv_unacked = 0;
1465 }
1466 tsk_advance_rx_queue(sk);
1467 }
1468
1469 /* Loop around if more data is required */
1470 if ((sz_copied < buf_len) && /* didn't get all requested data */
1471 (!skb_queue_empty(&sk->sk_receive_queue) ||
1472 (sz_copied < target)) && /* and more is ready or required */
1473 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1474 (!err)) /* and haven't reached a FIN */
1475 goto restart;
1476
1477exit:
1478 release_sock(sk);
1479 return sz_copied ? sz_copied : res;
1480}
1481
1482/**
1483 * tipc_write_space - wake up thread if port congestion is released
1484 * @sk: socket
1485 */
1486static void tipc_write_space(struct sock *sk)
1487{
1488 struct socket_wq *wq;
1489
1490 rcu_read_lock();
1491 wq = rcu_dereference(sk->sk_wq);
1492 if (skwq_has_sleeper(wq))
1493 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1494 POLLWRNORM | POLLWRBAND);
1495 rcu_read_unlock();
1496}
1497
1498/**
1499 * tipc_data_ready - wake up threads to indicate messages have been received
1500 * @sk: socket
1501 * @len: the length of messages
1502 */
1503static void tipc_data_ready(struct sock *sk)
1504{
1505 struct socket_wq *wq;
1506
1507 rcu_read_lock();
1508 wq = rcu_dereference(sk->sk_wq);
1509 if (skwq_has_sleeper(wq))
1510 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1511 POLLRDNORM | POLLRDBAND);
1512 rcu_read_unlock();
1513}
1514
1515static void tipc_sock_destruct(struct sock *sk)
1516{
1517 __skb_queue_purge(&sk->sk_receive_queue);
1518}
1519
1520/**
1521 * filter_connect - Handle all incoming messages for a connection-based socket
1522 * @tsk: TIPC socket
1523 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1524 *
1525 * Returns true if everything ok, false otherwise
1526 */
1527static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1528{
1529 struct sock *sk = &tsk->sk;
1530 struct net *net = sock_net(sk);
1531 struct socket *sock = sk->sk_socket;
1532 struct tipc_msg *hdr = buf_msg(skb);
1533
1534 if (unlikely(msg_mcast(hdr)))
1535 return false;
1536
1537 switch ((int)sock->state) {
1538 case SS_CONNECTED:
1539
1540 /* Accept only connection-based messages sent by peer */
1541 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1542 return false;
1543
1544 if (unlikely(msg_errcode(hdr))) {
1545 sock->state = SS_DISCONNECTING;
1546 tsk->connected = 0;
1547 /* Let timer expire on it's own */
1548 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1549 tsk->portid);
1550 }
1551 return true;
1552
1553 case SS_CONNECTING:
1554
1555 /* Accept only ACK or NACK message */
1556 if (unlikely(!msg_connected(hdr)))
1557 return false;
1558
1559 if (unlikely(msg_errcode(hdr))) {
1560 sock->state = SS_DISCONNECTING;
1561 sk->sk_err = ECONNREFUSED;
1562 return true;
1563 }
1564
1565 if (unlikely(!msg_isdata(hdr))) {
1566 sock->state = SS_DISCONNECTING;
1567 sk->sk_err = EINVAL;
1568 return true;
1569 }
1570
1571 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1572 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1573 sock->state = SS_CONNECTED;
1574
1575 /* If 'ACK+' message, add to socket receive queue */
1576 if (msg_data_sz(hdr))
1577 return true;
1578
1579 /* If empty 'ACK-' message, wake up sleeping connect() */
1580 if (waitqueue_active(sk_sleep(sk)))
1581 wake_up_interruptible(sk_sleep(sk));
1582
1583 /* 'ACK-' message is neither accepted nor rejected: */
1584 msg_set_dest_droppable(hdr, 1);
1585 return false;
1586
1587 case SS_LISTENING:
1588 case SS_UNCONNECTED:
1589
1590 /* Accept only SYN message */
1591 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1592 return true;
1593 break;
1594 case SS_DISCONNECTING:
1595 break;
1596 default:
1597 pr_err("Unknown socket state %u\n", sock->state);
1598 }
1599 return false;
1600}
1601
1602/**
1603 * rcvbuf_limit - get proper overload limit of socket receive queue
1604 * @sk: socket
1605 * @buf: message
1606 *
1607 * For all connection oriented messages, irrespective of importance,
1608 * the default overload value (i.e. 67MB) is set as limit.
1609 *
1610 * For all connectionless messages, by default new queue limits are
1611 * as belows:
1612 *
1613 * TIPC_LOW_IMPORTANCE (4 MB)
1614 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1615 * TIPC_HIGH_IMPORTANCE (16 MB)
1616 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1617 *
1618 * Returns overload limit according to corresponding message importance
1619 */
1620static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1621{
1622 struct tipc_msg *msg = buf_msg(buf);
1623
1624 if (msg_connected(msg))
1625 return sysctl_tipc_rmem[2];
1626
1627 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1628 msg_importance(msg);
1629}
1630
1631/**
1632 * filter_rcv - validate incoming message
1633 * @sk: socket
1634 * @skb: pointer to message.
1635 *
1636 * Enqueues message on receive queue if acceptable; optionally handles
1637 * disconnect indication for a connected socket.
1638 *
1639 * Called with socket lock already taken
1640 *
1641 * Returns true if message was added to socket receive queue, otherwise false
1642 */
1643static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
1644{
1645 struct socket *sock = sk->sk_socket;
1646 struct tipc_sock *tsk = tipc_sk(sk);
1647 struct tipc_msg *hdr = buf_msg(skb);
1648 unsigned int limit = rcvbuf_limit(sk, skb);
1649 int err = TIPC_OK;
1650 int usr = msg_user(hdr);
1651
1652 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1653 tipc_sk_proto_rcv(tsk, skb);
1654 return false;
1655 }
1656
1657 if (unlikely(usr == SOCK_WAKEUP)) {
1658 kfree_skb(skb);
1659 tsk->link_cong = 0;
1660 sk->sk_write_space(sk);
1661 return false;
1662 }
1663
1664 /* Drop if illegal message type */
1665 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1666 kfree_skb(skb);
1667 return false;
1668 }
1669
1670 /* Reject if wrong message type for current socket state */
1671 if (unlikely(sock->state == SS_READY)) {
1672 if (msg_connected(hdr)) {
1673 err = TIPC_ERR_NO_PORT;
1674 goto reject;
1675 }
1676 } else if (unlikely(!filter_connect(tsk, skb))) {
1677 err = TIPC_ERR_NO_PORT;
1678 goto reject;
1679 }
1680
1681 /* Reject message if there isn't room to queue it */
1682 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1683 err = TIPC_ERR_OVERLOAD;
1684 goto reject;
1685 }
1686
1687 /* Enqueue message */
1688 TIPC_SKB_CB(skb)->handle = NULL;
1689 __skb_queue_tail(&sk->sk_receive_queue, skb);
1690 skb_set_owner_r(skb, sk);
1691
1692 sk->sk_data_ready(sk);
1693 return true;
1694
1695reject:
1696 tipc_sk_respond(sk, skb, err);
1697 return false;
1698}
1699
1700/**
1701 * tipc_backlog_rcv - handle incoming message from backlog queue
1702 * @sk: socket
1703 * @skb: message
1704 *
1705 * Caller must hold socket lock
1706 *
1707 * Returns 0
1708 */
1709static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1710{
1711 unsigned int truesize = skb->truesize;
1712
1713 if (likely(filter_rcv(sk, skb)))
1714 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1715 return 0;
1716}
1717
1718/**
1719 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1720 * inputq and try adding them to socket or backlog queue
1721 * @inputq: list of incoming buffers with potentially different destinations
1722 * @sk: socket where the buffers should be enqueued
1723 * @dport: port number for the socket
1724 *
1725 * Caller must hold socket lock
1726 */
1727static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1728 u32 dport)
1729{
1730 unsigned int lim;
1731 atomic_t *dcnt;
1732 struct sk_buff *skb;
1733 unsigned long time_limit = jiffies + 2;
1734
1735 while (skb_queue_len(inputq)) {
1736 if (unlikely(time_after_eq(jiffies, time_limit)))
1737 return;
1738
1739 skb = tipc_skb_dequeue(inputq, dport);
1740 if (unlikely(!skb))
1741 return;
1742
1743 /* Add message directly to receive queue if possible */
1744 if (!sock_owned_by_user(sk)) {
1745 filter_rcv(sk, skb);
1746 continue;
1747 }
1748
1749 /* Try backlog, compensating for double-counted bytes */
1750 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1751 if (sk->sk_backlog.len)
1752 atomic_set(dcnt, 0);
1753 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1754 if (likely(!sk_add_backlog(sk, skb, lim)))
1755 continue;
1756
1757 /* Overload => reject message back to sender */
1758 tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
1759 break;
1760 }
1761}
1762
1763/**
1764 * tipc_sk_rcv - handle a chain of incoming buffers
1765 * @inputq: buffer list containing the buffers
1766 * Consumes all buffers in list until inputq is empty
1767 * Note: may be called in multiple threads referring to the same queue
1768 */
1769void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1770{
1771 u32 dnode, dport = 0;
1772 int err;
1773 struct tipc_sock *tsk;
1774 struct sock *sk;
1775 struct sk_buff *skb;
1776
1777 while (skb_queue_len(inputq)) {
1778 dport = tipc_skb_peek_port(inputq, dport);
1779 tsk = tipc_sk_lookup(net, dport);
1780
1781 if (likely(tsk)) {
1782 sk = &tsk->sk;
1783 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1784 tipc_sk_enqueue(inputq, sk, dport);
1785 spin_unlock_bh(&sk->sk_lock.slock);
1786 }
1787 sock_put(sk);
1788 continue;
1789 }
1790
1791 /* No destination socket => dequeue skb if still there */
1792 skb = tipc_skb_dequeue(inputq, dport);
1793 if (!skb)
1794 return;
1795
1796 /* Try secondary lookup if unresolved named message */
1797 err = TIPC_ERR_NO_PORT;
1798 if (tipc_msg_lookup_dest(net, skb, &err))
1799 goto xmit;
1800
1801 /* Prepare for message rejection */
1802 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1803 continue;
1804xmit:
1805 dnode = msg_destnode(buf_msg(skb));
1806 tipc_node_xmit_skb(net, skb, dnode, dport);
1807 }
1808}
1809
1810static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1811{
1812 struct sock *sk = sock->sk;
1813 DEFINE_WAIT(wait);
1814 int done;
1815
1816 do {
1817 int err = sock_error(sk);
1818 if (err)
1819 return err;
1820 if (!*timeo_p)
1821 return -ETIMEDOUT;
1822 if (signal_pending(current))
1823 return sock_intr_errno(*timeo_p);
1824
1825 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1826 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1827 finish_wait(sk_sleep(sk), &wait);
1828 } while (!done);
1829 return 0;
1830}
1831
1832/**
1833 * tipc_connect - establish a connection to another TIPC port
1834 * @sock: socket structure
1835 * @dest: socket address for destination port
1836 * @destlen: size of socket address data structure
1837 * @flags: file-related flags associated with socket
1838 *
1839 * Returns 0 on success, errno otherwise
1840 */
1841static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1842 int destlen, int flags)
1843{
1844 struct sock *sk = sock->sk;
1845 struct tipc_sock *tsk = tipc_sk(sk);
1846 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1847 struct msghdr m = {NULL,};
1848 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1849 socket_state previous;
1850 int res = 0;
1851
1852 lock_sock(sk);
1853
1854 /* DGRAM/RDM connect(), just save the destaddr */
1855 if (sock->state == SS_READY) {
1856 if (dst->family == AF_UNSPEC) {
1857 memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
1858 tsk->connected = 0;
1859 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1860 res = -EINVAL;
1861 } else {
1862 memcpy(&tsk->remote, dest, destlen);
1863 tsk->connected = 1;
1864 }
1865 goto exit;
1866 }
1867
1868 /*
1869 * Reject connection attempt using multicast address
1870 *
1871 * Note: send_msg() validates the rest of the address fields,
1872 * so there's no need to do it here
1873 */
1874 if (dst->addrtype == TIPC_ADDR_MCAST) {
1875 res = -EINVAL;
1876 goto exit;
1877 }
1878
1879 previous = sock->state;
1880 switch (sock->state) {
1881 case SS_UNCONNECTED:
1882 /* Send a 'SYN-' to destination */
1883 m.msg_name = dest;
1884 m.msg_namelen = destlen;
1885
1886 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1887 * indicate send_msg() is never blocked.
1888 */
1889 if (!timeout)
1890 m.msg_flags = MSG_DONTWAIT;
1891
1892 res = __tipc_sendmsg(sock, &m, 0);
1893 if ((res < 0) && (res != -EWOULDBLOCK))
1894 goto exit;
1895
1896 /* Just entered SS_CONNECTING state; the only
1897 * difference is that return value in non-blocking
1898 * case is EINPROGRESS, rather than EALREADY.
1899 */
1900 res = -EINPROGRESS;
1901 case SS_CONNECTING:
1902 if (previous == SS_CONNECTING)
1903 res = -EALREADY;
1904 if (!timeout)
1905 goto exit;
1906 timeout = msecs_to_jiffies(timeout);
1907 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1908 res = tipc_wait_for_connect(sock, &timeout);
1909 break;
1910 case SS_CONNECTED:
1911 res = -EISCONN;
1912 break;
1913 default:
1914 res = -EINVAL;
1915 break;
1916 }
1917exit:
1918 release_sock(sk);
1919 return res;
1920}
1921
1922/**
1923 * tipc_listen - allow socket to listen for incoming connections
1924 * @sock: socket structure
1925 * @len: (unused)
1926 *
1927 * Returns 0 on success, errno otherwise
1928 */
1929static int tipc_listen(struct socket *sock, int len)
1930{
1931 struct sock *sk = sock->sk;
1932 int res;
1933
1934 lock_sock(sk);
1935
1936 if (sock->state != SS_UNCONNECTED)
1937 res = -EINVAL;
1938 else {
1939 sock->state = SS_LISTENING;
1940 res = 0;
1941 }
1942
1943 release_sock(sk);
1944 return res;
1945}
1946
1947static int tipc_wait_for_accept(struct socket *sock, long timeo)
1948{
1949 struct sock *sk = sock->sk;
1950 DEFINE_WAIT(wait);
1951 int err;
1952
1953 /* True wake-one mechanism for incoming connections: only
1954 * one process gets woken up, not the 'whole herd'.
1955 * Since we do not 'race & poll' for established sockets
1956 * anymore, the common case will execute the loop only once.
1957 */
1958 for (;;) {
1959 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1960 TASK_INTERRUPTIBLE);
1961 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1962 release_sock(sk);
1963 timeo = schedule_timeout(timeo);
1964 lock_sock(sk);
1965 }
1966 err = 0;
1967 if (!skb_queue_empty(&sk->sk_receive_queue))
1968 break;
1969 err = -EINVAL;
1970 if (sock->state != SS_LISTENING)
1971 break;
1972 err = -EAGAIN;
1973 if (!timeo)
1974 break;
1975 err = sock_intr_errno(timeo);
1976 if (signal_pending(current))
1977 break;
1978 }
1979 finish_wait(sk_sleep(sk), &wait);
1980 return err;
1981}
1982
1983/**
1984 * tipc_accept - wait for connection request
1985 * @sock: listening socket
1986 * @newsock: new socket that is to be connected
1987 * @flags: file-related flags associated with socket
1988 *
1989 * Returns 0 on success, errno otherwise
1990 */
1991static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1992{
1993 struct sock *new_sk, *sk = sock->sk;
1994 struct sk_buff *buf;
1995 struct tipc_sock *new_tsock;
1996 struct tipc_msg *msg;
1997 long timeo;
1998 int res;
1999
2000 lock_sock(sk);
2001
2002 if (sock->state != SS_LISTENING) {
2003 res = -EINVAL;
2004 goto exit;
2005 }
2006 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2007 res = tipc_wait_for_accept(sock, timeo);
2008 if (res)
2009 goto exit;
2010
2011 buf = skb_peek(&sk->sk_receive_queue);
2012
2013 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2014 if (res)
2015 goto exit;
2016 security_sk_clone(sock->sk, new_sock->sk);
2017
2018 new_sk = new_sock->sk;
2019 new_tsock = tipc_sk(new_sk);
2020 msg = buf_msg(buf);
2021
2022 /* we lock on new_sk; but lockdep sees the lock on sk */
2023 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2024
2025 /*
2026 * Reject any stray messages received by new socket
2027 * before the socket lock was taken (very, very unlikely)
2028 */
2029 tsk_rej_rx_queue(new_sk);
2030
2031 /* Connect new socket to it's peer */
2032 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2033 new_sock->state = SS_CONNECTED;
2034
2035 tsk_set_importance(new_tsock, msg_importance(msg));
2036 if (msg_named(msg)) {
2037 new_tsock->conn_type = msg_nametype(msg);
2038 new_tsock->conn_instance = msg_nameinst(msg);
2039 }
2040
2041 /*
2042 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2043 * Respond to 'SYN+' by queuing it on new socket.
2044 */
2045 if (!msg_data_sz(msg)) {
2046 struct msghdr m = {NULL,};
2047
2048 tsk_advance_rx_queue(sk);
2049 __tipc_send_stream(new_sock, &m, 0);
2050 } else {
2051 __skb_dequeue(&sk->sk_receive_queue);
2052 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2053 skb_set_owner_r(buf, new_sk);
2054 }
2055 release_sock(new_sk);
2056exit:
2057 release_sock(sk);
2058 return res;
2059}
2060
2061/**
2062 * tipc_shutdown - shutdown socket connection
2063 * @sock: socket structure
2064 * @how: direction to close (must be SHUT_RDWR)
2065 *
2066 * Terminates connection (if necessary), then purges socket's receive queue.
2067 *
2068 * Returns 0 on success, errno otherwise
2069 */
2070static int tipc_shutdown(struct socket *sock, int how)
2071{
2072 struct sock *sk = sock->sk;
2073 struct net *net = sock_net(sk);
2074 struct tipc_sock *tsk = tipc_sk(sk);
2075 struct sk_buff *skb;
2076 u32 dnode = tsk_peer_node(tsk);
2077 u32 dport = tsk_peer_port(tsk);
2078 u32 onode = tipc_own_addr(net);
2079 u32 oport = tsk->portid;
2080 int res;
2081
2082 if (how != SHUT_RDWR)
2083 return -EINVAL;
2084
2085 lock_sock(sk);
2086
2087 switch (sock->state) {
2088 case SS_CONNECTING:
2089 case SS_CONNECTED:
2090
2091restart:
2092 dnode = tsk_peer_node(tsk);
2093
2094 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2095 skb = __skb_dequeue(&sk->sk_receive_queue);
2096 if (skb) {
2097 if (TIPC_SKB_CB(skb)->handle != NULL) {
2098 kfree_skb(skb);
2099 goto restart;
2100 }
2101 tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
2102 } else {
2103 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2104 TIPC_CONN_MSG, SHORT_H_SIZE,
2105 0, dnode, onode, dport, oport,
2106 TIPC_CONN_SHUTDOWN);
2107 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2108 }
2109 tsk->connected = 0;
2110 sock->state = SS_DISCONNECTING;
2111 tipc_node_remove_conn(net, dnode, tsk->portid);
2112 /* fall through */
2113
2114 case SS_DISCONNECTING:
2115
2116 /* Discard any unreceived messages */
2117 __skb_queue_purge(&sk->sk_receive_queue);
2118
2119 /* Wake up anyone sleeping in poll */
2120 sk->sk_state_change(sk);
2121 res = 0;
2122 break;
2123
2124 default:
2125 res = -ENOTCONN;
2126 }
2127
2128 release_sock(sk);
2129 return res;
2130}
2131
2132static void tipc_sk_timeout(unsigned long data)
2133{
2134 struct tipc_sock *tsk = (struct tipc_sock *)data;
2135 struct sock *sk = &tsk->sk;
2136 struct sk_buff *skb = NULL;
2137 u32 peer_port, peer_node;
2138 u32 own_node = tsk_own_node(tsk);
2139
2140 bh_lock_sock(sk);
2141 if (!tsk->connected) {
2142 bh_unlock_sock(sk);
2143 goto exit;
2144 }
2145 peer_port = tsk_peer_port(tsk);
2146 peer_node = tsk_peer_node(tsk);
2147
2148 if (tsk->probing_state == TIPC_CONN_PROBING) {
2149 if (!sock_owned_by_user(sk)) {
2150 sk->sk_socket->state = SS_DISCONNECTING;
2151 tsk->connected = 0;
2152 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2153 tsk_peer_port(tsk));
2154 sk->sk_state_change(sk);
2155 } else {
2156 /* Try again later */
2157 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2158 }
2159
2160 } else {
2161 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2162 INT_H_SIZE, 0, peer_node, own_node,
2163 peer_port, tsk->portid, TIPC_OK);
2164 tsk->probing_state = TIPC_CONN_PROBING;
2165 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
2166 }
2167 bh_unlock_sock(sk);
2168 if (skb)
2169 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2170exit:
2171 sock_put(sk);
2172}
2173
2174static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2175 struct tipc_name_seq const *seq)
2176{
2177 struct net *net = sock_net(&tsk->sk);
2178 struct publication *publ;
2179 u32 key;
2180
2181 if (tsk->connected)
2182 return -EINVAL;
2183 key = tsk->portid + tsk->pub_count + 1;
2184 if (key == tsk->portid)
2185 return -EADDRINUSE;
2186
2187 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2188 scope, tsk->portid, key);
2189 if (unlikely(!publ))
2190 return -EINVAL;
2191
2192 list_add(&publ->pport_list, &tsk->publications);
2193 tsk->pub_count++;
2194 tsk->published = 1;
2195 return 0;
2196}
2197
2198static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2199 struct tipc_name_seq const *seq)
2200{
2201 struct net *net = sock_net(&tsk->sk);
2202 struct publication *publ;
2203 struct publication *safe;
2204 int rc = -EINVAL;
2205
2206 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2207 if (seq) {
2208 if (publ->scope != scope)
2209 continue;
2210 if (publ->type != seq->type)
2211 continue;
2212 if (publ->lower != seq->lower)
2213 continue;
2214 if (publ->upper != seq->upper)
2215 break;
2216 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2217 publ->ref, publ->key);
2218 rc = 0;
2219 break;
2220 }
2221 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2222 publ->ref, publ->key);
2223 rc = 0;
2224 }
2225 if (list_empty(&tsk->publications))
2226 tsk->published = 0;
2227 return rc;
2228}
2229
2230/* tipc_sk_reinit: set non-zero address in all existing sockets
2231 * when we go from standalone to network mode.
2232 */
2233void tipc_sk_reinit(struct net *net)
2234{
2235 struct tipc_net *tn = net_generic(net, tipc_net_id);
2236 const struct bucket_table *tbl;
2237 struct rhash_head *pos;
2238 struct tipc_sock *tsk;
2239 struct tipc_msg *msg;
2240 int i;
2241
2242 rcu_read_lock();
2243 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2244 for (i = 0; i < tbl->size; i++) {
2245 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2246 spin_lock_bh(&tsk->sk.sk_lock.slock);
2247 msg = &tsk->phdr;
2248 msg_set_prevnode(msg, tn->own_addr);
2249 msg_set_orignode(msg, tn->own_addr);
2250 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2251 }
2252 }
2253 rcu_read_unlock();
2254}
2255
2256static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2257{
2258 struct tipc_net *tn = net_generic(net, tipc_net_id);
2259 struct tipc_sock *tsk;
2260
2261 rcu_read_lock();
2262 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2263 if (tsk)
2264 sock_hold(&tsk->sk);
2265 rcu_read_unlock();
2266
2267 return tsk;
2268}
2269
2270static int tipc_sk_insert(struct tipc_sock *tsk)
2271{
2272 struct sock *sk = &tsk->sk;
2273 struct net *net = sock_net(sk);
2274 struct tipc_net *tn = net_generic(net, tipc_net_id);
2275 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2276 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2277
2278 while (remaining--) {
2279 portid++;
2280 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2281 portid = TIPC_MIN_PORT;
2282 tsk->portid = portid;
2283 sock_hold(&tsk->sk);
2284 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2285 tsk_rht_params))
2286 return 0;
2287 sock_put(&tsk->sk);
2288 }
2289
2290 return -1;
2291}
2292
2293static void tipc_sk_remove(struct tipc_sock *tsk)
2294{
2295 struct sock *sk = &tsk->sk;
2296 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2297
2298 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2299 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2300 __sock_put(sk);
2301 }
2302}
2303
2304static const struct rhashtable_params tsk_rht_params = {
2305 .nelem_hint = 192,
2306 .head_offset = offsetof(struct tipc_sock, node),
2307 .key_offset = offsetof(struct tipc_sock, portid),
2308 .key_len = sizeof(u32), /* portid */
2309 .max_size = 1048576,
2310 .min_size = 256,
2311 .automatic_shrinking = true,
2312};
2313
2314int tipc_sk_rht_init(struct net *net)
2315{
2316 struct tipc_net *tn = net_generic(net, tipc_net_id);
2317
2318 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2319}
2320
2321void tipc_sk_rht_destroy(struct net *net)
2322{
2323 struct tipc_net *tn = net_generic(net, tipc_net_id);
2324
2325 /* Wait for socket readers to complete */
2326 synchronize_net();
2327
2328 rhashtable_destroy(&tn->sk_rht);
2329}
2330
2331/**
2332 * tipc_setsockopt - set socket option
2333 * @sock: socket structure
2334 * @lvl: option level
2335 * @opt: option identifier
2336 * @ov: pointer to new option value
2337 * @ol: length of option value
2338 *
2339 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2340 * (to ease compatibility).
2341 *
2342 * Returns 0 on success, errno otherwise
2343 */
2344static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2345 char __user *ov, unsigned int ol)
2346{
2347 struct sock *sk = sock->sk;
2348 struct tipc_sock *tsk = tipc_sk(sk);
2349 u32 value;
2350 int res;
2351
2352 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2353 return 0;
2354 if (lvl != SOL_TIPC)
2355 return -ENOPROTOOPT;
2356 if (ol < sizeof(value))
2357 return -EINVAL;
2358 res = get_user(value, (u32 __user *)ov);
2359 if (res)
2360 return res;
2361
2362 lock_sock(sk);
2363
2364 switch (opt) {
2365 case TIPC_IMPORTANCE:
2366 res = tsk_set_importance(tsk, value);
2367 break;
2368 case TIPC_SRC_DROPPABLE:
2369 if (sock->type != SOCK_STREAM)
2370 tsk_set_unreliable(tsk, value);
2371 else
2372 res = -ENOPROTOOPT;
2373 break;
2374 case TIPC_DEST_DROPPABLE:
2375 tsk_set_unreturnable(tsk, value);
2376 break;
2377 case TIPC_CONN_TIMEOUT:
2378 tipc_sk(sk)->conn_timeout = value;
2379 /* no need to set "res", since already 0 at this point */
2380 break;
2381 default:
2382 res = -EINVAL;
2383 }
2384
2385 release_sock(sk);
2386
2387 return res;
2388}
2389
2390/**
2391 * tipc_getsockopt - get socket option
2392 * @sock: socket structure
2393 * @lvl: option level
2394 * @opt: option identifier
2395 * @ov: receptacle for option value
2396 * @ol: receptacle for length of option value
2397 *
2398 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2399 * (to ease compatibility).
2400 *
2401 * Returns 0 on success, errno otherwise
2402 */
2403static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2404 char __user *ov, int __user *ol)
2405{
2406 struct sock *sk = sock->sk;
2407 struct tipc_sock *tsk = tipc_sk(sk);
2408 int len;
2409 u32 value;
2410 int res;
2411
2412 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2413 return put_user(0, ol);
2414 if (lvl != SOL_TIPC)
2415 return -ENOPROTOOPT;
2416 res = get_user(len, ol);
2417 if (res)
2418 return res;
2419
2420 lock_sock(sk);
2421
2422 switch (opt) {
2423 case TIPC_IMPORTANCE:
2424 value = tsk_importance(tsk);
2425 break;
2426 case TIPC_SRC_DROPPABLE:
2427 value = tsk_unreliable(tsk);
2428 break;
2429 case TIPC_DEST_DROPPABLE:
2430 value = tsk_unreturnable(tsk);
2431 break;
2432 case TIPC_CONN_TIMEOUT:
2433 value = tsk->conn_timeout;
2434 /* no need to set "res", since already 0 at this point */
2435 break;
2436 case TIPC_NODE_RECVQ_DEPTH:
2437 value = 0; /* was tipc_queue_size, now obsolete */
2438 break;
2439 case TIPC_SOCK_RECVQ_DEPTH:
2440 value = skb_queue_len(&sk->sk_receive_queue);
2441 break;
2442 default:
2443 res = -EINVAL;
2444 }
2445
2446 release_sock(sk);
2447
2448 if (res)
2449 return res; /* "get" failed */
2450
2451 if (len < sizeof(value))
2452 return -EINVAL;
2453
2454 if (copy_to_user(ov, &value, sizeof(value)))
2455 return -EFAULT;
2456
2457 return put_user(sizeof(value), ol);
2458}
2459
2460static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2461{
2462 struct sock *sk = sock->sk;
2463 struct tipc_sioc_ln_req lnr;
2464 void __user *argp = (void __user *)arg;
2465
2466 switch (cmd) {
2467 case SIOCGETLINKNAME:
2468 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2469 return -EFAULT;
2470 if (!tipc_node_get_linkname(sock_net(sk),
2471 lnr.bearer_id & 0xffff, lnr.peer,
2472 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2473 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2474 return -EFAULT;
2475 return 0;
2476 }
2477 return -EADDRNOTAVAIL;
2478 default:
2479 return -ENOIOCTLCMD;
2480 }
2481}
2482
2483/* Protocol switches for the various types of TIPC sockets */
2484
2485static const struct proto_ops msg_ops = {
2486 .owner = THIS_MODULE,
2487 .family = AF_TIPC,
2488 .release = tipc_release,
2489 .bind = tipc_bind,
2490 .connect = tipc_connect,
2491 .socketpair = sock_no_socketpair,
2492 .accept = sock_no_accept,
2493 .getname = tipc_getname,
2494 .poll = tipc_poll,
2495 .ioctl = tipc_ioctl,
2496 .listen = sock_no_listen,
2497 .shutdown = tipc_shutdown,
2498 .setsockopt = tipc_setsockopt,
2499 .getsockopt = tipc_getsockopt,
2500 .sendmsg = tipc_sendmsg,
2501 .recvmsg = tipc_recvmsg,
2502 .mmap = sock_no_mmap,
2503 .sendpage = sock_no_sendpage
2504};
2505
2506static const struct proto_ops packet_ops = {
2507 .owner = THIS_MODULE,
2508 .family = AF_TIPC,
2509 .release = tipc_release,
2510 .bind = tipc_bind,
2511 .connect = tipc_connect,
2512 .socketpair = sock_no_socketpair,
2513 .accept = tipc_accept,
2514 .getname = tipc_getname,
2515 .poll = tipc_poll,
2516 .ioctl = tipc_ioctl,
2517 .listen = tipc_listen,
2518 .shutdown = tipc_shutdown,
2519 .setsockopt = tipc_setsockopt,
2520 .getsockopt = tipc_getsockopt,
2521 .sendmsg = tipc_send_packet,
2522 .recvmsg = tipc_recvmsg,
2523 .mmap = sock_no_mmap,
2524 .sendpage = sock_no_sendpage
2525};
2526
2527static const struct proto_ops stream_ops = {
2528 .owner = THIS_MODULE,
2529 .family = AF_TIPC,
2530 .release = tipc_release,
2531 .bind = tipc_bind,
2532 .connect = tipc_connect,
2533 .socketpair = sock_no_socketpair,
2534 .accept = tipc_accept,
2535 .getname = tipc_getname,
2536 .poll = tipc_poll,
2537 .ioctl = tipc_ioctl,
2538 .listen = tipc_listen,
2539 .shutdown = tipc_shutdown,
2540 .setsockopt = tipc_setsockopt,
2541 .getsockopt = tipc_getsockopt,
2542 .sendmsg = tipc_send_stream,
2543 .recvmsg = tipc_recv_stream,
2544 .mmap = sock_no_mmap,
2545 .sendpage = sock_no_sendpage
2546};
2547
2548static const struct net_proto_family tipc_family_ops = {
2549 .owner = THIS_MODULE,
2550 .family = AF_TIPC,
2551 .create = tipc_sk_create
2552};
2553
2554static struct proto tipc_proto = {
2555 .name = "TIPC",
2556 .owner = THIS_MODULE,
2557 .obj_size = sizeof(struct tipc_sock),
2558 .sysctl_rmem = sysctl_tipc_rmem
2559};
2560
2561/**
2562 * tipc_socket_init - initialize TIPC socket interface
2563 *
2564 * Returns 0 on success, errno otherwise
2565 */
2566int tipc_socket_init(void)
2567{
2568 int res;
2569
2570 res = proto_register(&tipc_proto, 1);
2571 if (res) {
2572 pr_err("Failed to register TIPC protocol type\n");
2573 goto out;
2574 }
2575
2576 res = sock_register(&tipc_family_ops);
2577 if (res) {
2578 pr_err("Failed to register TIPC socket type\n");
2579 proto_unregister(&tipc_proto);
2580 goto out;
2581 }
2582 out:
2583 return res;
2584}
2585
2586/**
2587 * tipc_socket_stop - stop TIPC socket interface
2588 */
2589void tipc_socket_stop(void)
2590{
2591 sock_unregister(tipc_family_ops.family);
2592 proto_unregister(&tipc_proto);
2593}
2594
2595/* Caller should hold socket lock for the passed tipc socket. */
2596static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2597{
2598 u32 peer_node;
2599 u32 peer_port;
2600 struct nlattr *nest;
2601
2602 peer_node = tsk_peer_node(tsk);
2603 peer_port = tsk_peer_port(tsk);
2604
2605 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2606
2607 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2608 goto msg_full;
2609 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2610 goto msg_full;
2611
2612 if (tsk->conn_type != 0) {
2613 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2614 goto msg_full;
2615 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2616 goto msg_full;
2617 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2618 goto msg_full;
2619 }
2620 nla_nest_end(skb, nest);
2621
2622 return 0;
2623
2624msg_full:
2625 nla_nest_cancel(skb, nest);
2626
2627 return -EMSGSIZE;
2628}
2629
2630/* Caller should hold socket lock for the passed tipc socket. */
2631static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2632 struct tipc_sock *tsk)
2633{
2634 int err;
2635 void *hdr;
2636 struct nlattr *attrs;
2637 struct net *net = sock_net(skb->sk);
2638 struct tipc_net *tn = net_generic(net, tipc_net_id);
2639
2640 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2641 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2642 if (!hdr)
2643 goto msg_cancel;
2644
2645 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2646 if (!attrs)
2647 goto genlmsg_cancel;
2648 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2649 goto attr_msg_cancel;
2650 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2651 goto attr_msg_cancel;
2652
2653 if (tsk->connected) {
2654 err = __tipc_nl_add_sk_con(skb, tsk);
2655 if (err)
2656 goto attr_msg_cancel;
2657 } else if (!list_empty(&tsk->publications)) {
2658 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2659 goto attr_msg_cancel;
2660 }
2661 nla_nest_end(skb, attrs);
2662 genlmsg_end(skb, hdr);
2663
2664 return 0;
2665
2666attr_msg_cancel:
2667 nla_nest_cancel(skb, attrs);
2668genlmsg_cancel:
2669 genlmsg_cancel(skb, hdr);
2670msg_cancel:
2671 return -EMSGSIZE;
2672}
2673
2674int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2675{
2676 int err;
2677 struct tipc_sock *tsk;
2678 const struct bucket_table *tbl;
2679 struct rhash_head *pos;
2680 struct net *net = sock_net(skb->sk);
2681 struct tipc_net *tn = net_generic(net, tipc_net_id);
2682 u32 tbl_id = cb->args[0];
2683 u32 prev_portid = cb->args[1];
2684
2685 rcu_read_lock();
2686 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2687 for (; tbl_id < tbl->size; tbl_id++) {
2688 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2689 spin_lock_bh(&tsk->sk.sk_lock.slock);
2690 if (prev_portid && prev_portid != tsk->portid) {
2691 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2692 continue;
2693 }
2694
2695 err = __tipc_nl_add_sk(skb, cb, tsk);
2696 if (err) {
2697 prev_portid = tsk->portid;
2698 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2699 goto out;
2700 }
2701 prev_portid = 0;
2702 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2703 }
2704 }
2705out:
2706 rcu_read_unlock();
2707 cb->args[0] = tbl_id;
2708 cb->args[1] = prev_portid;
2709
2710 return skb->len;
2711}
2712
2713/* Caller should hold socket lock for the passed tipc socket. */
2714static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2715 struct netlink_callback *cb,
2716 struct publication *publ)
2717{
2718 void *hdr;
2719 struct nlattr *attrs;
2720
2721 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2722 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2723 if (!hdr)
2724 goto msg_cancel;
2725
2726 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2727 if (!attrs)
2728 goto genlmsg_cancel;
2729
2730 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2731 goto attr_msg_cancel;
2732 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2733 goto attr_msg_cancel;
2734 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2735 goto attr_msg_cancel;
2736 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2737 goto attr_msg_cancel;
2738
2739 nla_nest_end(skb, attrs);
2740 genlmsg_end(skb, hdr);
2741
2742 return 0;
2743
2744attr_msg_cancel:
2745 nla_nest_cancel(skb, attrs);
2746genlmsg_cancel:
2747 genlmsg_cancel(skb, hdr);
2748msg_cancel:
2749 return -EMSGSIZE;
2750}
2751
2752/* Caller should hold socket lock for the passed tipc socket. */
2753static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2754 struct netlink_callback *cb,
2755 struct tipc_sock *tsk, u32 *last_publ)
2756{
2757 int err;
2758 struct publication *p;
2759
2760 if (*last_publ) {
2761 list_for_each_entry(p, &tsk->publications, pport_list) {
2762 if (p->key == *last_publ)
2763 break;
2764 }
2765 if (p->key != *last_publ) {
2766 /* We never set seq or call nl_dump_check_consistent()
2767 * this means that setting prev_seq here will cause the
2768 * consistence check to fail in the netlink callback
2769 * handler. Resulting in the last NLMSG_DONE message
2770 * having the NLM_F_DUMP_INTR flag set.
2771 */
2772 cb->prev_seq = 1;
2773 *last_publ = 0;
2774 return -EPIPE;
2775 }
2776 } else {
2777 p = list_first_entry(&tsk->publications, struct publication,
2778 pport_list);
2779 }
2780
2781 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2782 err = __tipc_nl_add_sk_publ(skb, cb, p);
2783 if (err) {
2784 *last_publ = p->key;
2785 return err;
2786 }
2787 }
2788 *last_publ = 0;
2789
2790 return 0;
2791}
2792
2793int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2794{
2795 int err;
2796 u32 tsk_portid = cb->args[0];
2797 u32 last_publ = cb->args[1];
2798 u32 done = cb->args[2];
2799 struct net *net = sock_net(skb->sk);
2800 struct tipc_sock *tsk;
2801
2802 if (!tsk_portid) {
2803 struct nlattr **attrs;
2804 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2805
2806 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2807 if (err)
2808 return err;
2809
2810 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2811 attrs[TIPC_NLA_SOCK],
2812 tipc_nl_sock_policy);
2813 if (err)
2814 return err;
2815
2816 if (!sock[TIPC_NLA_SOCK_REF])
2817 return -EINVAL;
2818
2819 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2820 }
2821
2822 if (done)
2823 return 0;
2824
2825 tsk = tipc_sk_lookup(net, tsk_portid);
2826 if (!tsk)
2827 return -EINVAL;
2828
2829 lock_sock(&tsk->sk);
2830 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2831 if (!err)
2832 done = 1;
2833 release_sock(&tsk->sk);
2834 sock_put(&tsk->sk);
2835
2836 cb->args[0] = tsk_portid;
2837 cb->args[1] = last_publ;
2838 cb->args[2] = done;
2839
2840 return skb->len;
2841}