Loading...
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/rhashtable.h>
38#include "core.h"
39#include "name_table.h"
40#include "node.h"
41#include "link.h"
42#include "name_distr.h"
43#include "socket.h"
44#include "bcast.h"
45#include "netlink.h"
46
47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48#define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
49#define TIPC_FWD_MSG 1
50#define TIPC_MAX_PORT 0xffffffff
51#define TIPC_MIN_PORT 1
52
53enum {
54 TIPC_LISTEN = TCP_LISTEN,
55 TIPC_ESTABLISHED = TCP_ESTABLISHED,
56 TIPC_OPEN = TCP_CLOSE,
57 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
58 TIPC_CONNECTING = TCP_SYN_SENT,
59};
60
61/**
62 * struct tipc_sock - TIPC socket structure
63 * @sk: socket - interacts with 'port' and with user via the socket API
64 * @conn_type: TIPC type used when connection was established
65 * @conn_instance: TIPC instance used when connection was established
66 * @published: non-zero if port has one or more associated names
67 * @max_pkt: maximum packet size "hint" used when building messages sent by port
68 * @portid: unique port identity in TIPC socket hash table
69 * @phdr: preformatted message header used when sending messages
70 * @publications: list of publications for port
71 * @pub_count: total # of publications port has made during its lifetime
72 * @probing_state:
73 * @conn_timeout: the time we can wait for an unresponded setup request
74 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
75 * @link_cong: non-zero if owner must sleep because of link congestion
76 * @sent_unacked: # messages sent by socket, and not yet acked by peer
77 * @rcv_unacked: # messages read by user, but not yet acked back to peer
78 * @peer: 'connected' peer for dgram/rdm
79 * @node: hash table node
80 * @rcu: rcu struct for tipc_sock
81 */
82struct tipc_sock {
83 struct sock sk;
84 u32 conn_type;
85 u32 conn_instance;
86 int published;
87 u32 max_pkt;
88 u32 portid;
89 struct tipc_msg phdr;
90 struct list_head sock_list;
91 struct list_head publications;
92 u32 pub_count;
93 uint conn_timeout;
94 atomic_t dupl_rcvcnt;
95 bool probe_unacked;
96 bool link_cong;
97 u16 snt_unacked;
98 u16 snd_win;
99 u16 peer_caps;
100 u16 rcv_unacked;
101 u16 rcv_win;
102 struct sockaddr_tipc peer;
103 struct rhash_head node;
104 struct rcu_head rcu;
105};
106
107static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
108static void tipc_data_ready(struct sock *sk);
109static void tipc_write_space(struct sock *sk);
110static void tipc_sock_destruct(struct sock *sk);
111static int tipc_release(struct socket *sock);
112static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
113static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
114static void tipc_sk_timeout(unsigned long data);
115static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
116 struct tipc_name_seq const *seq);
117static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
118 struct tipc_name_seq const *seq);
119static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
120static int tipc_sk_insert(struct tipc_sock *tsk);
121static void tipc_sk_remove(struct tipc_sock *tsk);
122static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
123 size_t dsz);
124static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
125
126static const struct proto_ops packet_ops;
127static const struct proto_ops stream_ops;
128static const struct proto_ops msg_ops;
129static struct proto tipc_proto;
130static const struct rhashtable_params tsk_rht_params;
131
132static u32 tsk_own_node(struct tipc_sock *tsk)
133{
134 return msg_prevnode(&tsk->phdr);
135}
136
137static u32 tsk_peer_node(struct tipc_sock *tsk)
138{
139 return msg_destnode(&tsk->phdr);
140}
141
142static u32 tsk_peer_port(struct tipc_sock *tsk)
143{
144 return msg_destport(&tsk->phdr);
145}
146
147static bool tsk_unreliable(struct tipc_sock *tsk)
148{
149 return msg_src_droppable(&tsk->phdr) != 0;
150}
151
152static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
153{
154 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
155}
156
157static bool tsk_unreturnable(struct tipc_sock *tsk)
158{
159 return msg_dest_droppable(&tsk->phdr) != 0;
160}
161
162static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
163{
164 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
165}
166
167static int tsk_importance(struct tipc_sock *tsk)
168{
169 return msg_importance(&tsk->phdr);
170}
171
172static int tsk_set_importance(struct tipc_sock *tsk, int imp)
173{
174 if (imp > TIPC_CRITICAL_IMPORTANCE)
175 return -EINVAL;
176 msg_set_importance(&tsk->phdr, (u32)imp);
177 return 0;
178}
179
180static struct tipc_sock *tipc_sk(const struct sock *sk)
181{
182 return container_of(sk, struct tipc_sock, sk);
183}
184
185static bool tsk_conn_cong(struct tipc_sock *tsk)
186{
187 return tsk->snt_unacked > tsk->snd_win;
188}
189
190/* tsk_blocks(): translate a buffer size in bytes to number of
191 * advertisable blocks, taking into account the ratio truesize(len)/len
192 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
193 */
194static u16 tsk_adv_blocks(int len)
195{
196 return len / FLOWCTL_BLK_SZ / 4;
197}
198
199/* tsk_inc(): increment counter for sent or received data
200 * - If block based flow control is not supported by peer we
201 * fall back to message based ditto, incrementing the counter
202 */
203static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
204{
205 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
206 return ((msglen / FLOWCTL_BLK_SZ) + 1);
207 return 1;
208}
209
210/**
211 * tsk_advance_rx_queue - discard first buffer in socket receive queue
212 *
213 * Caller must hold socket lock
214 */
215static void tsk_advance_rx_queue(struct sock *sk)
216{
217 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
218}
219
220/* tipc_sk_respond() : send response message back to sender
221 */
222static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
223{
224 u32 selector;
225 u32 dnode;
226 u32 onode = tipc_own_addr(sock_net(sk));
227
228 if (!tipc_msg_reverse(onode, &skb, err))
229 return;
230
231 dnode = msg_destnode(buf_msg(skb));
232 selector = msg_origport(buf_msg(skb));
233 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
234}
235
236/**
237 * tsk_rej_rx_queue - reject all buffers in socket receive queue
238 *
239 * Caller must hold socket lock
240 */
241static void tsk_rej_rx_queue(struct sock *sk)
242{
243 struct sk_buff *skb;
244
245 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
246 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
247}
248
249static bool tipc_sk_connected(struct sock *sk)
250{
251 return sk->sk_state == TIPC_ESTABLISHED;
252}
253
254/* tipc_sk_type_connectionless - check if the socket is datagram socket
255 * @sk: socket
256 *
257 * Returns true if connection less, false otherwise
258 */
259static bool tipc_sk_type_connectionless(struct sock *sk)
260{
261 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
262}
263
264/* tsk_peer_msg - verify if message was sent by connected port's peer
265 *
266 * Handles cases where the node's network address has changed from
267 * the default of <0.0.0> to its configured setting.
268 */
269static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
270{
271 struct sock *sk = &tsk->sk;
272 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
273 u32 peer_port = tsk_peer_port(tsk);
274 u32 orig_node;
275 u32 peer_node;
276
277 if (unlikely(!tipc_sk_connected(sk)))
278 return false;
279
280 if (unlikely(msg_origport(msg) != peer_port))
281 return false;
282
283 orig_node = msg_orignode(msg);
284 peer_node = tsk_peer_node(tsk);
285
286 if (likely(orig_node == peer_node))
287 return true;
288
289 if (!orig_node && (peer_node == tn->own_addr))
290 return true;
291
292 if (!peer_node && (orig_node == tn->own_addr))
293 return true;
294
295 return false;
296}
297
298/* tipc_set_sk_state - set the sk_state of the socket
299 * @sk: socket
300 *
301 * Caller must hold socket lock
302 *
303 * Returns 0 on success, errno otherwise
304 */
305static int tipc_set_sk_state(struct sock *sk, int state)
306{
307 int oldsk_state = sk->sk_state;
308 int res = -EINVAL;
309
310 switch (state) {
311 case TIPC_OPEN:
312 res = 0;
313 break;
314 case TIPC_LISTEN:
315 case TIPC_CONNECTING:
316 if (oldsk_state == TIPC_OPEN)
317 res = 0;
318 break;
319 case TIPC_ESTABLISHED:
320 if (oldsk_state == TIPC_CONNECTING ||
321 oldsk_state == TIPC_OPEN)
322 res = 0;
323 break;
324 case TIPC_DISCONNECTING:
325 if (oldsk_state == TIPC_CONNECTING ||
326 oldsk_state == TIPC_ESTABLISHED)
327 res = 0;
328 break;
329 }
330
331 if (!res)
332 sk->sk_state = state;
333
334 return res;
335}
336
337/**
338 * tipc_sk_create - create a TIPC socket
339 * @net: network namespace (must be default network)
340 * @sock: pre-allocated socket structure
341 * @protocol: protocol indicator (must be 0)
342 * @kern: caused by kernel or by userspace?
343 *
344 * This routine creates additional data structures used by the TIPC socket,
345 * initializes them, and links them together.
346 *
347 * Returns 0 on success, errno otherwise
348 */
349static int tipc_sk_create(struct net *net, struct socket *sock,
350 int protocol, int kern)
351{
352 struct tipc_net *tn;
353 const struct proto_ops *ops;
354 struct sock *sk;
355 struct tipc_sock *tsk;
356 struct tipc_msg *msg;
357
358 /* Validate arguments */
359 if (unlikely(protocol != 0))
360 return -EPROTONOSUPPORT;
361
362 switch (sock->type) {
363 case SOCK_STREAM:
364 ops = &stream_ops;
365 break;
366 case SOCK_SEQPACKET:
367 ops = &packet_ops;
368 break;
369 case SOCK_DGRAM:
370 case SOCK_RDM:
371 ops = &msg_ops;
372 break;
373 default:
374 return -EPROTOTYPE;
375 }
376
377 /* Allocate socket's protocol area */
378 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
379 if (sk == NULL)
380 return -ENOMEM;
381
382 tsk = tipc_sk(sk);
383 tsk->max_pkt = MAX_PKT_DEFAULT;
384 INIT_LIST_HEAD(&tsk->publications);
385 msg = &tsk->phdr;
386 tn = net_generic(sock_net(sk), tipc_net_id);
387 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
388 NAMED_H_SIZE, 0);
389
390 /* Finish initializing socket data structures */
391 sock->ops = ops;
392 sock_init_data(sock, sk);
393 tipc_set_sk_state(sk, TIPC_OPEN);
394 if (tipc_sk_insert(tsk)) {
395 pr_warn("Socket create failed; port number exhausted\n");
396 return -EINVAL;
397 }
398 msg_set_origport(msg, tsk->portid);
399 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
400 sk->sk_shutdown = 0;
401 sk->sk_backlog_rcv = tipc_backlog_rcv;
402 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
403 sk->sk_data_ready = tipc_data_ready;
404 sk->sk_write_space = tipc_write_space;
405 sk->sk_destruct = tipc_sock_destruct;
406 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
407 atomic_set(&tsk->dupl_rcvcnt, 0);
408
409 /* Start out with safe limits until we receive an advertised window */
410 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
411 tsk->rcv_win = tsk->snd_win;
412
413 if (tipc_sk_type_connectionless(sk)) {
414 tsk_set_unreturnable(tsk, true);
415 if (sock->type == SOCK_DGRAM)
416 tsk_set_unreliable(tsk, true);
417 }
418
419 return 0;
420}
421
422static void tipc_sk_callback(struct rcu_head *head)
423{
424 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
425
426 sock_put(&tsk->sk);
427}
428
429/* Caller should hold socket lock for the socket. */
430static void __tipc_shutdown(struct socket *sock, int error)
431{
432 struct sock *sk = sock->sk;
433 struct tipc_sock *tsk = tipc_sk(sk);
434 struct net *net = sock_net(sk);
435 u32 dnode = tsk_peer_node(tsk);
436 struct sk_buff *skb;
437
438 /* Reject all unreceived messages, except on an active connection
439 * (which disconnects locally & sends a 'FIN+' to peer).
440 */
441 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
442 if (TIPC_SKB_CB(skb)->bytes_read) {
443 kfree_skb(skb);
444 continue;
445 }
446 if (!tipc_sk_type_connectionless(sk) &&
447 sk->sk_state != TIPC_DISCONNECTING) {
448 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
449 tipc_node_remove_conn(net, dnode, tsk->portid);
450 }
451 tipc_sk_respond(sk, skb, error);
452 }
453
454 if (tipc_sk_type_connectionless(sk))
455 return;
456
457 if (sk->sk_state != TIPC_DISCONNECTING) {
458 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
459 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
460 tsk_own_node(tsk), tsk_peer_port(tsk),
461 tsk->portid, error);
462 if (skb)
463 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
464 tipc_node_remove_conn(net, dnode, tsk->portid);
465 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
466 }
467}
468
469/**
470 * tipc_release - destroy a TIPC socket
471 * @sock: socket to destroy
472 *
473 * This routine cleans up any messages that are still queued on the socket.
474 * For DGRAM and RDM socket types, all queued messages are rejected.
475 * For SEQPACKET and STREAM socket types, the first message is rejected
476 * and any others are discarded. (If the first message on a STREAM socket
477 * is partially-read, it is discarded and the next one is rejected instead.)
478 *
479 * NOTE: Rejected messages are not necessarily returned to the sender! They
480 * are returned or discarded according to the "destination droppable" setting
481 * specified for the message by the sender.
482 *
483 * Returns 0 on success, errno otherwise
484 */
485static int tipc_release(struct socket *sock)
486{
487 struct sock *sk = sock->sk;
488 struct tipc_sock *tsk;
489
490 /*
491 * Exit if socket isn't fully initialized (occurs when a failed accept()
492 * releases a pre-allocated child socket that was never used)
493 */
494 if (sk == NULL)
495 return 0;
496
497 tsk = tipc_sk(sk);
498 lock_sock(sk);
499
500 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
501 sk->sk_shutdown = SHUTDOWN_MASK;
502 tipc_sk_withdraw(tsk, 0, NULL);
503 sk_stop_timer(sk, &sk->sk_timer);
504 tipc_sk_remove(tsk);
505
506 /* Reject any messages that accumulated in backlog queue */
507 release_sock(sk);
508
509 call_rcu(&tsk->rcu, tipc_sk_callback);
510 sock->sk = NULL;
511
512 return 0;
513}
514
515/**
516 * tipc_bind - associate or disassocate TIPC name(s) with a socket
517 * @sock: socket structure
518 * @uaddr: socket address describing name(s) and desired operation
519 * @uaddr_len: size of socket address data structure
520 *
521 * Name and name sequence binding is indicated using a positive scope value;
522 * a negative scope value unbinds the specified name. Specifying no name
523 * (i.e. a socket address length of 0) unbinds all names from the socket.
524 *
525 * Returns 0 on success, errno otherwise
526 *
527 * NOTE: This routine doesn't need to take the socket lock since it doesn't
528 * access any non-constant socket information.
529 */
530static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
531 int uaddr_len)
532{
533 struct sock *sk = sock->sk;
534 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
535 struct tipc_sock *tsk = tipc_sk(sk);
536 int res = -EINVAL;
537
538 lock_sock(sk);
539 if (unlikely(!uaddr_len)) {
540 res = tipc_sk_withdraw(tsk, 0, NULL);
541 goto exit;
542 }
543
544 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
545 res = -EINVAL;
546 goto exit;
547 }
548 if (addr->family != AF_TIPC) {
549 res = -EAFNOSUPPORT;
550 goto exit;
551 }
552
553 if (addr->addrtype == TIPC_ADDR_NAME)
554 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
555 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
556 res = -EAFNOSUPPORT;
557 goto exit;
558 }
559
560 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
561 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
562 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
563 res = -EACCES;
564 goto exit;
565 }
566
567 res = (addr->scope > 0) ?
568 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
569 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
570exit:
571 release_sock(sk);
572 return res;
573}
574
575/**
576 * tipc_getname - get port ID of socket or peer socket
577 * @sock: socket structure
578 * @uaddr: area for returned socket address
579 * @uaddr_len: area for returned length of socket address
580 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
581 *
582 * Returns 0 on success, errno otherwise
583 *
584 * NOTE: This routine doesn't need to take the socket lock since it only
585 * accesses socket information that is unchanging (or which changes in
586 * a completely predictable manner).
587 */
588static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
589 int *uaddr_len, int peer)
590{
591 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
592 struct sock *sk = sock->sk;
593 struct tipc_sock *tsk = tipc_sk(sk);
594 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
595
596 memset(addr, 0, sizeof(*addr));
597 if (peer) {
598 if ((!tipc_sk_connected(sk)) &&
599 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
600 return -ENOTCONN;
601 addr->addr.id.ref = tsk_peer_port(tsk);
602 addr->addr.id.node = tsk_peer_node(tsk);
603 } else {
604 addr->addr.id.ref = tsk->portid;
605 addr->addr.id.node = tn->own_addr;
606 }
607
608 *uaddr_len = sizeof(*addr);
609 addr->addrtype = TIPC_ADDR_ID;
610 addr->family = AF_TIPC;
611 addr->scope = 0;
612 addr->addr.name.domain = 0;
613
614 return 0;
615}
616
617/**
618 * tipc_poll - read and possibly block on pollmask
619 * @file: file structure associated with the socket
620 * @sock: socket for which to calculate the poll bits
621 * @wait: ???
622 *
623 * Returns pollmask value
624 *
625 * COMMENTARY:
626 * It appears that the usual socket locking mechanisms are not useful here
627 * since the pollmask info is potentially out-of-date the moment this routine
628 * exits. TCP and other protocols seem to rely on higher level poll routines
629 * to handle any preventable race conditions, so TIPC will do the same ...
630 *
631 * IMPORTANT: The fact that a read or write operation is indicated does NOT
632 * imply that the operation will succeed, merely that it should be performed
633 * and will not block.
634 */
635static unsigned int tipc_poll(struct file *file, struct socket *sock,
636 poll_table *wait)
637{
638 struct sock *sk = sock->sk;
639 struct tipc_sock *tsk = tipc_sk(sk);
640 u32 mask = 0;
641
642 sock_poll_wait(file, sk_sleep(sk), wait);
643
644 if (sk->sk_shutdown & RCV_SHUTDOWN)
645 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
646 if (sk->sk_shutdown == SHUTDOWN_MASK)
647 mask |= POLLHUP;
648
649 switch (sk->sk_state) {
650 case TIPC_ESTABLISHED:
651 if (!tsk->link_cong && !tsk_conn_cong(tsk))
652 mask |= POLLOUT;
653 /* fall thru' */
654 case TIPC_LISTEN:
655 case TIPC_CONNECTING:
656 if (!skb_queue_empty(&sk->sk_receive_queue))
657 mask |= (POLLIN | POLLRDNORM);
658 break;
659 case TIPC_OPEN:
660 if (!tsk->link_cong)
661 mask |= POLLOUT;
662 if (tipc_sk_type_connectionless(sk) &&
663 (!skb_queue_empty(&sk->sk_receive_queue)))
664 mask |= (POLLIN | POLLRDNORM);
665 break;
666 case TIPC_DISCONNECTING:
667 mask = (POLLIN | POLLRDNORM | POLLHUP);
668 break;
669 }
670
671 return mask;
672}
673
674/**
675 * tipc_sendmcast - send multicast message
676 * @sock: socket structure
677 * @seq: destination address
678 * @msg: message to send
679 * @dsz: total length of message data
680 * @timeo: timeout to wait for wakeup
681 *
682 * Called from function tipc_sendmsg(), which has done all sanity checks
683 * Returns the number of bytes sent on success, or errno
684 */
685static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
686 struct msghdr *msg, size_t dsz, long timeo)
687{
688 struct sock *sk = sock->sk;
689 struct tipc_sock *tsk = tipc_sk(sk);
690 struct net *net = sock_net(sk);
691 struct tipc_msg *mhdr = &tsk->phdr;
692 struct sk_buff_head pktchain;
693 struct iov_iter save = msg->msg_iter;
694 uint mtu;
695 int rc;
696
697 if (!timeo && tsk->link_cong)
698 return -ELINKCONG;
699
700 msg_set_type(mhdr, TIPC_MCAST_MSG);
701 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
702 msg_set_destport(mhdr, 0);
703 msg_set_destnode(mhdr, 0);
704 msg_set_nametype(mhdr, seq->type);
705 msg_set_namelower(mhdr, seq->lower);
706 msg_set_nameupper(mhdr, seq->upper);
707 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
708
709 skb_queue_head_init(&pktchain);
710
711new_mtu:
712 mtu = tipc_bcast_get_mtu(net);
713 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
714 if (unlikely(rc < 0))
715 return rc;
716
717 do {
718 rc = tipc_bcast_xmit(net, &pktchain);
719 if (likely(!rc))
720 return dsz;
721
722 if (rc == -ELINKCONG) {
723 tsk->link_cong = 1;
724 rc = tipc_wait_for_sndmsg(sock, &timeo);
725 if (!rc)
726 continue;
727 }
728 __skb_queue_purge(&pktchain);
729 if (rc == -EMSGSIZE) {
730 msg->msg_iter = save;
731 goto new_mtu;
732 }
733 break;
734 } while (1);
735 return rc;
736}
737
738/**
739 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
740 * @arrvq: queue with arriving messages, to be cloned after destination lookup
741 * @inputq: queue with cloned messages, delivered to socket after dest lookup
742 *
743 * Multi-threaded: parallel calls with reference to same queues may occur
744 */
745void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
746 struct sk_buff_head *inputq)
747{
748 struct tipc_msg *msg;
749 struct tipc_plist dports;
750 u32 portid;
751 u32 scope = TIPC_CLUSTER_SCOPE;
752 struct sk_buff_head tmpq;
753 uint hsz;
754 struct sk_buff *skb, *_skb;
755
756 __skb_queue_head_init(&tmpq);
757 tipc_plist_init(&dports);
758
759 skb = tipc_skb_peek(arrvq, &inputq->lock);
760 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
761 msg = buf_msg(skb);
762 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
763
764 if (in_own_node(net, msg_orignode(msg)))
765 scope = TIPC_NODE_SCOPE;
766
767 /* Create destination port list and message clones: */
768 tipc_nametbl_mc_translate(net,
769 msg_nametype(msg), msg_namelower(msg),
770 msg_nameupper(msg), scope, &dports);
771 portid = tipc_plist_pop(&dports);
772 for (; portid; portid = tipc_plist_pop(&dports)) {
773 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
774 if (_skb) {
775 msg_set_destport(buf_msg(_skb), portid);
776 __skb_queue_tail(&tmpq, _skb);
777 continue;
778 }
779 pr_warn("Failed to clone mcast rcv buffer\n");
780 }
781 /* Append to inputq if not already done by other thread */
782 spin_lock_bh(&inputq->lock);
783 if (skb_peek(arrvq) == skb) {
784 skb_queue_splice_tail_init(&tmpq, inputq);
785 kfree_skb(__skb_dequeue(arrvq));
786 }
787 spin_unlock_bh(&inputq->lock);
788 __skb_queue_purge(&tmpq);
789 kfree_skb(skb);
790 }
791 tipc_sk_rcv(net, inputq);
792}
793
794/**
795 * tipc_sk_proto_rcv - receive a connection mng protocol message
796 * @tsk: receiving socket
797 * @skb: pointer to message buffer.
798 */
799static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
800 struct sk_buff_head *xmitq)
801{
802 struct sock *sk = &tsk->sk;
803 u32 onode = tsk_own_node(tsk);
804 struct tipc_msg *hdr = buf_msg(skb);
805 int mtyp = msg_type(hdr);
806 bool conn_cong;
807
808 /* Ignore if connection cannot be validated: */
809 if (!tsk_peer_msg(tsk, hdr))
810 goto exit;
811
812 tsk->probe_unacked = false;
813
814 if (mtyp == CONN_PROBE) {
815 msg_set_type(hdr, CONN_PROBE_REPLY);
816 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
817 __skb_queue_tail(xmitq, skb);
818 return;
819 } else if (mtyp == CONN_ACK) {
820 conn_cong = tsk_conn_cong(tsk);
821 tsk->snt_unacked -= msg_conn_ack(hdr);
822 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
823 tsk->snd_win = msg_adv_win(hdr);
824 if (conn_cong)
825 sk->sk_write_space(sk);
826 } else if (mtyp != CONN_PROBE_REPLY) {
827 pr_warn("Received unknown CONN_PROTO msg\n");
828 }
829exit:
830 kfree_skb(skb);
831}
832
833static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
834{
835 DEFINE_WAIT_FUNC(wait, woken_wake_function);
836 struct sock *sk = sock->sk;
837 struct tipc_sock *tsk = tipc_sk(sk);
838 int done;
839
840 do {
841 int err = sock_error(sk);
842 if (err)
843 return err;
844 if (sk->sk_shutdown & SEND_SHUTDOWN)
845 return -EPIPE;
846 if (!*timeo_p)
847 return -EAGAIN;
848 if (signal_pending(current))
849 return sock_intr_errno(*timeo_p);
850
851 add_wait_queue(sk_sleep(sk), &wait);
852 done = sk_wait_event(sk, timeo_p, !tsk->link_cong, &wait);
853 remove_wait_queue(sk_sleep(sk), &wait);
854 } while (!done);
855 return 0;
856}
857
858/**
859 * tipc_sendmsg - send message in connectionless manner
860 * @sock: socket structure
861 * @m: message to send
862 * @dsz: amount of user data to be sent
863 *
864 * Message must have an destination specified explicitly.
865 * Used for SOCK_RDM and SOCK_DGRAM messages,
866 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
867 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
868 *
869 * Returns the number of bytes sent on success, or errno otherwise
870 */
871static int tipc_sendmsg(struct socket *sock,
872 struct msghdr *m, size_t dsz)
873{
874 struct sock *sk = sock->sk;
875 int ret;
876
877 lock_sock(sk);
878 ret = __tipc_sendmsg(sock, m, dsz);
879 release_sock(sk);
880
881 return ret;
882}
883
884static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
885{
886 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
887 struct sock *sk = sock->sk;
888 struct tipc_sock *tsk = tipc_sk(sk);
889 struct net *net = sock_net(sk);
890 struct tipc_msg *mhdr = &tsk->phdr;
891 u32 dnode, dport;
892 struct sk_buff_head pktchain;
893 bool is_connectionless = tipc_sk_type_connectionless(sk);
894 struct sk_buff *skb;
895 struct tipc_name_seq *seq;
896 struct iov_iter save;
897 u32 mtu;
898 long timeo;
899 int rc;
900
901 if (dsz > TIPC_MAX_USER_MSG_SIZE)
902 return -EMSGSIZE;
903 if (unlikely(!dest)) {
904 if (is_connectionless && tsk->peer.family == AF_TIPC)
905 dest = &tsk->peer;
906 else
907 return -EDESTADDRREQ;
908 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
909 dest->family != AF_TIPC) {
910 return -EINVAL;
911 }
912 if (!is_connectionless) {
913 if (sk->sk_state == TIPC_LISTEN)
914 return -EPIPE;
915 if (sk->sk_state != TIPC_OPEN)
916 return -EISCONN;
917 if (tsk->published)
918 return -EOPNOTSUPP;
919 if (dest->addrtype == TIPC_ADDR_NAME) {
920 tsk->conn_type = dest->addr.name.name.type;
921 tsk->conn_instance = dest->addr.name.name.instance;
922 }
923 }
924 seq = &dest->addr.nameseq;
925 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
926
927 if (dest->addrtype == TIPC_ADDR_MCAST) {
928 return tipc_sendmcast(sock, seq, m, dsz, timeo);
929 } else if (dest->addrtype == TIPC_ADDR_NAME) {
930 u32 type = dest->addr.name.name.type;
931 u32 inst = dest->addr.name.name.instance;
932 u32 domain = dest->addr.name.domain;
933
934 dnode = domain;
935 msg_set_type(mhdr, TIPC_NAMED_MSG);
936 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
937 msg_set_nametype(mhdr, type);
938 msg_set_nameinst(mhdr, inst);
939 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
940 dport = tipc_nametbl_translate(net, type, inst, &dnode);
941 msg_set_destnode(mhdr, dnode);
942 msg_set_destport(mhdr, dport);
943 if (unlikely(!dport && !dnode))
944 return -EHOSTUNREACH;
945 } else if (dest->addrtype == TIPC_ADDR_ID) {
946 dnode = dest->addr.id.node;
947 msg_set_type(mhdr, TIPC_DIRECT_MSG);
948 msg_set_lookup_scope(mhdr, 0);
949 msg_set_destnode(mhdr, dnode);
950 msg_set_destport(mhdr, dest->addr.id.ref);
951 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
952 }
953
954 skb_queue_head_init(&pktchain);
955 save = m->msg_iter;
956new_mtu:
957 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
958 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
959 if (rc < 0)
960 return rc;
961
962 do {
963 skb = skb_peek(&pktchain);
964 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
965 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
966 if (likely(!rc)) {
967 if (!is_connectionless)
968 tipc_set_sk_state(sk, TIPC_CONNECTING);
969 return dsz;
970 }
971 if (rc == -ELINKCONG) {
972 tsk->link_cong = 1;
973 rc = tipc_wait_for_sndmsg(sock, &timeo);
974 if (!rc)
975 continue;
976 }
977 __skb_queue_purge(&pktchain);
978 if (rc == -EMSGSIZE) {
979 m->msg_iter = save;
980 goto new_mtu;
981 }
982 break;
983 } while (1);
984
985 return rc;
986}
987
988static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
989{
990 DEFINE_WAIT_FUNC(wait, woken_wake_function);
991 struct sock *sk = sock->sk;
992 struct tipc_sock *tsk = tipc_sk(sk);
993 int done;
994
995 do {
996 int err = sock_error(sk);
997 if (err)
998 return err;
999 if (sk->sk_state == TIPC_DISCONNECTING)
1000 return -EPIPE;
1001 else if (!tipc_sk_connected(sk))
1002 return -ENOTCONN;
1003 if (!*timeo_p)
1004 return -EAGAIN;
1005 if (signal_pending(current))
1006 return sock_intr_errno(*timeo_p);
1007
1008 add_wait_queue(sk_sleep(sk), &wait);
1009 done = sk_wait_event(sk, timeo_p,
1010 (!tsk->link_cong &&
1011 !tsk_conn_cong(tsk)) ||
1012 !tipc_sk_connected(sk), &wait);
1013 remove_wait_queue(sk_sleep(sk), &wait);
1014 } while (!done);
1015 return 0;
1016}
1017
1018/**
1019 * tipc_send_stream - send stream-oriented data
1020 * @sock: socket structure
1021 * @m: data to send
1022 * @dsz: total length of data to be transmitted
1023 *
1024 * Used for SOCK_STREAM data.
1025 *
1026 * Returns the number of bytes sent on success (or partial success),
1027 * or errno if no data sent
1028 */
1029static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1030{
1031 struct sock *sk = sock->sk;
1032 int ret;
1033
1034 lock_sock(sk);
1035 ret = __tipc_send_stream(sock, m, dsz);
1036 release_sock(sk);
1037
1038 return ret;
1039}
1040
1041static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1042{
1043 struct sock *sk = sock->sk;
1044 struct net *net = sock_net(sk);
1045 struct tipc_sock *tsk = tipc_sk(sk);
1046 struct tipc_msg *mhdr = &tsk->phdr;
1047 struct sk_buff_head pktchain;
1048 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1049 u32 portid = tsk->portid;
1050 int rc = -EINVAL;
1051 long timeo;
1052 u32 dnode;
1053 uint mtu, send, sent = 0;
1054 struct iov_iter save;
1055 int hlen = MIN_H_SIZE;
1056
1057 /* Handle implied connection establishment */
1058 if (unlikely(dest)) {
1059 rc = __tipc_sendmsg(sock, m, dsz);
1060 hlen = msg_hdr_sz(mhdr);
1061 if (dsz && (dsz == rc))
1062 tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
1063 return rc;
1064 }
1065 if (dsz > (uint)INT_MAX)
1066 return -EMSGSIZE;
1067
1068 if (unlikely(!tipc_sk_connected(sk))) {
1069 if (sk->sk_state == TIPC_DISCONNECTING)
1070 return -EPIPE;
1071 else
1072 return -ENOTCONN;
1073 }
1074
1075 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1076 if (!timeo && tsk->link_cong)
1077 return -ELINKCONG;
1078
1079 dnode = tsk_peer_node(tsk);
1080 skb_queue_head_init(&pktchain);
1081
1082next:
1083 save = m->msg_iter;
1084 mtu = tsk->max_pkt;
1085 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1086 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
1087 if (unlikely(rc < 0))
1088 return rc;
1089
1090 do {
1091 if (likely(!tsk_conn_cong(tsk))) {
1092 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
1093 if (likely(!rc)) {
1094 tsk->snt_unacked += tsk_inc(tsk, send + hlen);
1095 sent += send;
1096 if (sent == dsz)
1097 return dsz;
1098 goto next;
1099 }
1100 if (rc == -EMSGSIZE) {
1101 __skb_queue_purge(&pktchain);
1102 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1103 portid);
1104 m->msg_iter = save;
1105 goto next;
1106 }
1107 if (rc != -ELINKCONG)
1108 break;
1109
1110 tsk->link_cong = 1;
1111 }
1112 rc = tipc_wait_for_sndpkt(sock, &timeo);
1113 } while (!rc);
1114
1115 __skb_queue_purge(&pktchain);
1116 return sent ? sent : rc;
1117}
1118
1119/**
1120 * tipc_send_packet - send a connection-oriented message
1121 * @sock: socket structure
1122 * @m: message to send
1123 * @dsz: length of data to be transmitted
1124 *
1125 * Used for SOCK_SEQPACKET messages.
1126 *
1127 * Returns the number of bytes sent on success, or errno otherwise
1128 */
1129static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1130{
1131 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1132 return -EMSGSIZE;
1133
1134 return tipc_send_stream(sock, m, dsz);
1135}
1136
1137/* tipc_sk_finish_conn - complete the setup of a connection
1138 */
1139static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1140 u32 peer_node)
1141{
1142 struct sock *sk = &tsk->sk;
1143 struct net *net = sock_net(sk);
1144 struct tipc_msg *msg = &tsk->phdr;
1145
1146 msg_set_destnode(msg, peer_node);
1147 msg_set_destport(msg, peer_port);
1148 msg_set_type(msg, TIPC_CONN_MSG);
1149 msg_set_lookup_scope(msg, 0);
1150 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1151
1152 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1153 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1154 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1155 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1156 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1157 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1158 return;
1159
1160 /* Fall back to message based flow control */
1161 tsk->rcv_win = FLOWCTL_MSG_WIN;
1162 tsk->snd_win = FLOWCTL_MSG_WIN;
1163}
1164
1165/**
1166 * set_orig_addr - capture sender's address for received message
1167 * @m: descriptor for message info
1168 * @msg: received message header
1169 *
1170 * Note: Address is not captured if not requested by receiver.
1171 */
1172static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1173{
1174 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1175
1176 if (addr) {
1177 addr->family = AF_TIPC;
1178 addr->addrtype = TIPC_ADDR_ID;
1179 memset(&addr->addr, 0, sizeof(addr->addr));
1180 addr->addr.id.ref = msg_origport(msg);
1181 addr->addr.id.node = msg_orignode(msg);
1182 addr->addr.name.domain = 0; /* could leave uninitialized */
1183 addr->scope = 0; /* could leave uninitialized */
1184 m->msg_namelen = sizeof(struct sockaddr_tipc);
1185 }
1186}
1187
1188/**
1189 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1190 * @m: descriptor for message info
1191 * @msg: received message header
1192 * @tsk: TIPC port associated with message
1193 *
1194 * Note: Ancillary data is not captured if not requested by receiver.
1195 *
1196 * Returns 0 if successful, otherwise errno
1197 */
1198static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1199 struct tipc_sock *tsk)
1200{
1201 u32 anc_data[3];
1202 u32 err;
1203 u32 dest_type;
1204 int has_name;
1205 int res;
1206
1207 if (likely(m->msg_controllen == 0))
1208 return 0;
1209
1210 /* Optionally capture errored message object(s) */
1211 err = msg ? msg_errcode(msg) : 0;
1212 if (unlikely(err)) {
1213 anc_data[0] = err;
1214 anc_data[1] = msg_data_sz(msg);
1215 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1216 if (res)
1217 return res;
1218 if (anc_data[1]) {
1219 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1220 msg_data(msg));
1221 if (res)
1222 return res;
1223 }
1224 }
1225
1226 /* Optionally capture message destination object */
1227 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1228 switch (dest_type) {
1229 case TIPC_NAMED_MSG:
1230 has_name = 1;
1231 anc_data[0] = msg_nametype(msg);
1232 anc_data[1] = msg_namelower(msg);
1233 anc_data[2] = msg_namelower(msg);
1234 break;
1235 case TIPC_MCAST_MSG:
1236 has_name = 1;
1237 anc_data[0] = msg_nametype(msg);
1238 anc_data[1] = msg_namelower(msg);
1239 anc_data[2] = msg_nameupper(msg);
1240 break;
1241 case TIPC_CONN_MSG:
1242 has_name = (tsk->conn_type != 0);
1243 anc_data[0] = tsk->conn_type;
1244 anc_data[1] = tsk->conn_instance;
1245 anc_data[2] = tsk->conn_instance;
1246 break;
1247 default:
1248 has_name = 0;
1249 }
1250 if (has_name) {
1251 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1252 if (res)
1253 return res;
1254 }
1255
1256 return 0;
1257}
1258
1259static void tipc_sk_send_ack(struct tipc_sock *tsk)
1260{
1261 struct sock *sk = &tsk->sk;
1262 struct net *net = sock_net(sk);
1263 struct sk_buff *skb = NULL;
1264 struct tipc_msg *msg;
1265 u32 peer_port = tsk_peer_port(tsk);
1266 u32 dnode = tsk_peer_node(tsk);
1267
1268 if (!tipc_sk_connected(sk))
1269 return;
1270 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1271 dnode, tsk_own_node(tsk), peer_port,
1272 tsk->portid, TIPC_OK);
1273 if (!skb)
1274 return;
1275 msg = buf_msg(skb);
1276 msg_set_conn_ack(msg, tsk->rcv_unacked);
1277 tsk->rcv_unacked = 0;
1278
1279 /* Adjust to and advertize the correct window limit */
1280 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1281 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1282 msg_set_adv_win(msg, tsk->rcv_win);
1283 }
1284 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1285}
1286
1287static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1288{
1289 struct sock *sk = sock->sk;
1290 DEFINE_WAIT(wait);
1291 long timeo = *timeop;
1292 int err;
1293
1294 for (;;) {
1295 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1296 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1297 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1298 err = -ENOTCONN;
1299 break;
1300 }
1301 release_sock(sk);
1302 timeo = schedule_timeout(timeo);
1303 lock_sock(sk);
1304 }
1305 err = 0;
1306 if (!skb_queue_empty(&sk->sk_receive_queue))
1307 break;
1308 err = -EAGAIN;
1309 if (!timeo)
1310 break;
1311 err = sock_intr_errno(timeo);
1312 if (signal_pending(current))
1313 break;
1314 }
1315 finish_wait(sk_sleep(sk), &wait);
1316 *timeop = timeo;
1317 return err;
1318}
1319
1320/**
1321 * tipc_recvmsg - receive packet-oriented message
1322 * @m: descriptor for message info
1323 * @buf_len: total size of user buffer area
1324 * @flags: receive flags
1325 *
1326 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1327 * If the complete message doesn't fit in user area, truncate it.
1328 *
1329 * Returns size of returned message data, errno otherwise
1330 */
1331static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1332 int flags)
1333{
1334 struct sock *sk = sock->sk;
1335 struct tipc_sock *tsk = tipc_sk(sk);
1336 struct sk_buff *buf;
1337 struct tipc_msg *msg;
1338 bool is_connectionless = tipc_sk_type_connectionless(sk);
1339 long timeo;
1340 unsigned int sz;
1341 u32 err;
1342 int res, hlen;
1343
1344 /* Catch invalid receive requests */
1345 if (unlikely(!buf_len))
1346 return -EINVAL;
1347
1348 lock_sock(sk);
1349
1350 if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) {
1351 res = -ENOTCONN;
1352 goto exit;
1353 }
1354
1355 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1356restart:
1357
1358 /* Look for a message in receive queue; wait if necessary */
1359 res = tipc_wait_for_rcvmsg(sock, &timeo);
1360 if (res)
1361 goto exit;
1362
1363 /* Look at first message in receive queue */
1364 buf = skb_peek(&sk->sk_receive_queue);
1365 msg = buf_msg(buf);
1366 sz = msg_data_sz(msg);
1367 hlen = msg_hdr_sz(msg);
1368 err = msg_errcode(msg);
1369
1370 /* Discard an empty non-errored message & try again */
1371 if ((!sz) && (!err)) {
1372 tsk_advance_rx_queue(sk);
1373 goto restart;
1374 }
1375
1376 /* Capture sender's address (optional) */
1377 set_orig_addr(m, msg);
1378
1379 /* Capture ancillary data (optional) */
1380 res = tipc_sk_anc_data_recv(m, msg, tsk);
1381 if (res)
1382 goto exit;
1383
1384 /* Capture message data (if valid) & compute return value (always) */
1385 if (!err) {
1386 if (unlikely(buf_len < sz)) {
1387 sz = buf_len;
1388 m->msg_flags |= MSG_TRUNC;
1389 }
1390 res = skb_copy_datagram_msg(buf, hlen, m, sz);
1391 if (res)
1392 goto exit;
1393 res = sz;
1394 } else {
1395 if (is_connectionless || err == TIPC_CONN_SHUTDOWN ||
1396 m->msg_control)
1397 res = 0;
1398 else
1399 res = -ECONNRESET;
1400 }
1401
1402 if (unlikely(flags & MSG_PEEK))
1403 goto exit;
1404
1405 if (likely(!is_connectionless)) {
1406 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1407 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1408 tipc_sk_send_ack(tsk);
1409 }
1410 tsk_advance_rx_queue(sk);
1411exit:
1412 release_sock(sk);
1413 return res;
1414}
1415
1416/**
1417 * tipc_recv_stream - receive stream-oriented data
1418 * @m: descriptor for message info
1419 * @buf_len: total size of user buffer area
1420 * @flags: receive flags
1421 *
1422 * Used for SOCK_STREAM messages only. If not enough data is available
1423 * will optionally wait for more; never truncates data.
1424 *
1425 * Returns size of returned message data, errno otherwise
1426 */
1427static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1428 size_t buf_len, int flags)
1429{
1430 struct sock *sk = sock->sk;
1431 struct tipc_sock *tsk = tipc_sk(sk);
1432 struct sk_buff *buf;
1433 struct tipc_msg *msg;
1434 long timeo;
1435 unsigned int sz;
1436 int target;
1437 int sz_copied = 0;
1438 u32 err;
1439 int res = 0, hlen;
1440
1441 /* Catch invalid receive attempts */
1442 if (unlikely(!buf_len))
1443 return -EINVAL;
1444
1445 lock_sock(sk);
1446
1447 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1448 res = -ENOTCONN;
1449 goto exit;
1450 }
1451
1452 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1453 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1454
1455restart:
1456 /* Look for a message in receive queue; wait if necessary */
1457 res = tipc_wait_for_rcvmsg(sock, &timeo);
1458 if (res)
1459 goto exit;
1460
1461 /* Look at first message in receive queue */
1462 buf = skb_peek(&sk->sk_receive_queue);
1463 msg = buf_msg(buf);
1464 sz = msg_data_sz(msg);
1465 hlen = msg_hdr_sz(msg);
1466 err = msg_errcode(msg);
1467
1468 /* Discard an empty non-errored message & try again */
1469 if ((!sz) && (!err)) {
1470 tsk_advance_rx_queue(sk);
1471 goto restart;
1472 }
1473
1474 /* Optionally capture sender's address & ancillary data of first msg */
1475 if (sz_copied == 0) {
1476 set_orig_addr(m, msg);
1477 res = tipc_sk_anc_data_recv(m, msg, tsk);
1478 if (res)
1479 goto exit;
1480 }
1481
1482 /* Capture message data (if valid) & compute return value (always) */
1483 if (!err) {
1484 u32 offset = TIPC_SKB_CB(buf)->bytes_read;
1485 u32 needed;
1486 int sz_to_copy;
1487
1488 sz -= offset;
1489 needed = (buf_len - sz_copied);
1490 sz_to_copy = min(sz, needed);
1491
1492 res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1493 if (res)
1494 goto exit;
1495
1496 sz_copied += sz_to_copy;
1497
1498 if (sz_to_copy < sz) {
1499 if (!(flags & MSG_PEEK))
1500 TIPC_SKB_CB(buf)->bytes_read =
1501 offset + sz_to_copy;
1502 goto exit;
1503 }
1504 } else {
1505 if (sz_copied != 0)
1506 goto exit; /* can't add error msg to valid data */
1507
1508 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1509 res = 0;
1510 else
1511 res = -ECONNRESET;
1512 }
1513
1514 if (unlikely(flags & MSG_PEEK))
1515 goto exit;
1516
1517 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1518 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1519 tipc_sk_send_ack(tsk);
1520 tsk_advance_rx_queue(sk);
1521
1522 /* Loop around if more data is required */
1523 if ((sz_copied < buf_len) && /* didn't get all requested data */
1524 (!skb_queue_empty(&sk->sk_receive_queue) ||
1525 (sz_copied < target)) && /* and more is ready or required */
1526 (!err)) /* and haven't reached a FIN */
1527 goto restart;
1528
1529exit:
1530 release_sock(sk);
1531 return sz_copied ? sz_copied : res;
1532}
1533
1534/**
1535 * tipc_write_space - wake up thread if port congestion is released
1536 * @sk: socket
1537 */
1538static void tipc_write_space(struct sock *sk)
1539{
1540 struct socket_wq *wq;
1541
1542 rcu_read_lock();
1543 wq = rcu_dereference(sk->sk_wq);
1544 if (skwq_has_sleeper(wq))
1545 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1546 POLLWRNORM | POLLWRBAND);
1547 rcu_read_unlock();
1548}
1549
1550/**
1551 * tipc_data_ready - wake up threads to indicate messages have been received
1552 * @sk: socket
1553 * @len: the length of messages
1554 */
1555static void tipc_data_ready(struct sock *sk)
1556{
1557 struct socket_wq *wq;
1558
1559 rcu_read_lock();
1560 wq = rcu_dereference(sk->sk_wq);
1561 if (skwq_has_sleeper(wq))
1562 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1563 POLLRDNORM | POLLRDBAND);
1564 rcu_read_unlock();
1565}
1566
1567static void tipc_sock_destruct(struct sock *sk)
1568{
1569 __skb_queue_purge(&sk->sk_receive_queue);
1570}
1571
1572/**
1573 * filter_connect - Handle all incoming messages for a connection-based socket
1574 * @tsk: TIPC socket
1575 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1576 *
1577 * Returns true if everything ok, false otherwise
1578 */
1579static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1580{
1581 struct sock *sk = &tsk->sk;
1582 struct net *net = sock_net(sk);
1583 struct tipc_msg *hdr = buf_msg(skb);
1584
1585 if (unlikely(msg_mcast(hdr)))
1586 return false;
1587
1588 switch (sk->sk_state) {
1589 case TIPC_CONNECTING:
1590 /* Accept only ACK or NACK message */
1591 if (unlikely(!msg_connected(hdr)))
1592 return false;
1593
1594 if (unlikely(msg_errcode(hdr))) {
1595 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1596 sk->sk_err = ECONNREFUSED;
1597 return true;
1598 }
1599
1600 if (unlikely(!msg_isdata(hdr))) {
1601 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1602 sk->sk_err = EINVAL;
1603 return true;
1604 }
1605
1606 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1607 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1608
1609 /* If 'ACK+' message, add to socket receive queue */
1610 if (msg_data_sz(hdr))
1611 return true;
1612
1613 /* If empty 'ACK-' message, wake up sleeping connect() */
1614 if (waitqueue_active(sk_sleep(sk)))
1615 wake_up_interruptible(sk_sleep(sk));
1616
1617 /* 'ACK-' message is neither accepted nor rejected: */
1618 msg_set_dest_droppable(hdr, 1);
1619 return false;
1620
1621 case TIPC_OPEN:
1622 case TIPC_DISCONNECTING:
1623 break;
1624 case TIPC_LISTEN:
1625 /* Accept only SYN message */
1626 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1627 return true;
1628 break;
1629 case TIPC_ESTABLISHED:
1630 /* Accept only connection-based messages sent by peer */
1631 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1632 return false;
1633
1634 if (unlikely(msg_errcode(hdr))) {
1635 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1636 /* Let timer expire on it's own */
1637 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1638 tsk->portid);
1639 sk->sk_state_change(sk);
1640 }
1641 return true;
1642 default:
1643 pr_err("Unknown sk_state %u\n", sk->sk_state);
1644 }
1645
1646 return false;
1647}
1648
1649/**
1650 * rcvbuf_limit - get proper overload limit of socket receive queue
1651 * @sk: socket
1652 * @skb: message
1653 *
1654 * For connection oriented messages, irrespective of importance,
1655 * default queue limit is 2 MB.
1656 *
1657 * For connectionless messages, queue limits are based on message
1658 * importance as follows:
1659 *
1660 * TIPC_LOW_IMPORTANCE (2 MB)
1661 * TIPC_MEDIUM_IMPORTANCE (4 MB)
1662 * TIPC_HIGH_IMPORTANCE (8 MB)
1663 * TIPC_CRITICAL_IMPORTANCE (16 MB)
1664 *
1665 * Returns overload limit according to corresponding message importance
1666 */
1667static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1668{
1669 struct tipc_sock *tsk = tipc_sk(sk);
1670 struct tipc_msg *hdr = buf_msg(skb);
1671
1672 if (unlikely(!msg_connected(hdr)))
1673 return sk->sk_rcvbuf << msg_importance(hdr);
1674
1675 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
1676 return sk->sk_rcvbuf;
1677
1678 return FLOWCTL_MSG_LIM;
1679}
1680
1681/**
1682 * filter_rcv - validate incoming message
1683 * @sk: socket
1684 * @skb: pointer to message.
1685 *
1686 * Enqueues message on receive queue if acceptable; optionally handles
1687 * disconnect indication for a connected socket.
1688 *
1689 * Called with socket lock already taken
1690 *
1691 * Returns true if message was added to socket receive queue, otherwise false
1692 */
1693static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1694 struct sk_buff_head *xmitq)
1695{
1696 struct tipc_sock *tsk = tipc_sk(sk);
1697 struct tipc_msg *hdr = buf_msg(skb);
1698 unsigned int limit = rcvbuf_limit(sk, skb);
1699 int err = TIPC_OK;
1700 int usr = msg_user(hdr);
1701
1702 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1703 tipc_sk_proto_rcv(tsk, skb, xmitq);
1704 return false;
1705 }
1706
1707 if (unlikely(usr == SOCK_WAKEUP)) {
1708 kfree_skb(skb);
1709 tsk->link_cong = 0;
1710 sk->sk_write_space(sk);
1711 return false;
1712 }
1713
1714 /* Drop if illegal message type */
1715 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1716 kfree_skb(skb);
1717 return false;
1718 }
1719
1720 /* Reject if wrong message type for current socket state */
1721 if (tipc_sk_type_connectionless(sk)) {
1722 if (msg_connected(hdr)) {
1723 err = TIPC_ERR_NO_PORT;
1724 goto reject;
1725 }
1726 } else if (unlikely(!filter_connect(tsk, skb))) {
1727 err = TIPC_ERR_NO_PORT;
1728 goto reject;
1729 }
1730
1731 /* Reject message if there isn't room to queue it */
1732 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1733 err = TIPC_ERR_OVERLOAD;
1734 goto reject;
1735 }
1736
1737 /* Enqueue message */
1738 TIPC_SKB_CB(skb)->bytes_read = 0;
1739 __skb_queue_tail(&sk->sk_receive_queue, skb);
1740 skb_set_owner_r(skb, sk);
1741
1742 sk->sk_data_ready(sk);
1743 return true;
1744
1745reject:
1746 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1747 __skb_queue_tail(xmitq, skb);
1748 return false;
1749}
1750
1751/**
1752 * tipc_backlog_rcv - handle incoming message from backlog queue
1753 * @sk: socket
1754 * @skb: message
1755 *
1756 * Caller must hold socket lock
1757 *
1758 * Returns 0
1759 */
1760static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1761{
1762 unsigned int truesize = skb->truesize;
1763 struct sk_buff_head xmitq;
1764 u32 dnode, selector;
1765
1766 __skb_queue_head_init(&xmitq);
1767
1768 if (likely(filter_rcv(sk, skb, &xmitq))) {
1769 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1770 return 0;
1771 }
1772
1773 if (skb_queue_empty(&xmitq))
1774 return 0;
1775
1776 /* Send response/rejected message */
1777 skb = __skb_dequeue(&xmitq);
1778 dnode = msg_destnode(buf_msg(skb));
1779 selector = msg_origport(buf_msg(skb));
1780 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1781 return 0;
1782}
1783
1784/**
1785 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1786 * inputq and try adding them to socket or backlog queue
1787 * @inputq: list of incoming buffers with potentially different destinations
1788 * @sk: socket where the buffers should be enqueued
1789 * @dport: port number for the socket
1790 *
1791 * Caller must hold socket lock
1792 */
1793static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1794 u32 dport, struct sk_buff_head *xmitq)
1795{
1796 unsigned long time_limit = jiffies + 2;
1797 struct sk_buff *skb;
1798 unsigned int lim;
1799 atomic_t *dcnt;
1800 u32 onode;
1801
1802 while (skb_queue_len(inputq)) {
1803 if (unlikely(time_after_eq(jiffies, time_limit)))
1804 return;
1805
1806 skb = tipc_skb_dequeue(inputq, dport);
1807 if (unlikely(!skb))
1808 return;
1809
1810 /* Add message directly to receive queue if possible */
1811 if (!sock_owned_by_user(sk)) {
1812 filter_rcv(sk, skb, xmitq);
1813 continue;
1814 }
1815
1816 /* Try backlog, compensating for double-counted bytes */
1817 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1818 if (!sk->sk_backlog.len)
1819 atomic_set(dcnt, 0);
1820 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1821 if (likely(!sk_add_backlog(sk, skb, lim)))
1822 continue;
1823
1824 /* Overload => reject message back to sender */
1825 onode = tipc_own_addr(sock_net(sk));
1826 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1827 __skb_queue_tail(xmitq, skb);
1828 break;
1829 }
1830}
1831
1832/**
1833 * tipc_sk_rcv - handle a chain of incoming buffers
1834 * @inputq: buffer list containing the buffers
1835 * Consumes all buffers in list until inputq is empty
1836 * Note: may be called in multiple threads referring to the same queue
1837 */
1838void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1839{
1840 struct sk_buff_head xmitq;
1841 u32 dnode, dport = 0;
1842 int err;
1843 struct tipc_sock *tsk;
1844 struct sock *sk;
1845 struct sk_buff *skb;
1846
1847 __skb_queue_head_init(&xmitq);
1848 while (skb_queue_len(inputq)) {
1849 dport = tipc_skb_peek_port(inputq, dport);
1850 tsk = tipc_sk_lookup(net, dport);
1851
1852 if (likely(tsk)) {
1853 sk = &tsk->sk;
1854 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1855 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1856 spin_unlock_bh(&sk->sk_lock.slock);
1857 }
1858 /* Send pending response/rejected messages, if any */
1859 while ((skb = __skb_dequeue(&xmitq))) {
1860 dnode = msg_destnode(buf_msg(skb));
1861 tipc_node_xmit_skb(net, skb, dnode, dport);
1862 }
1863 sock_put(sk);
1864 continue;
1865 }
1866
1867 /* No destination socket => dequeue skb if still there */
1868 skb = tipc_skb_dequeue(inputq, dport);
1869 if (!skb)
1870 return;
1871
1872 /* Try secondary lookup if unresolved named message */
1873 err = TIPC_ERR_NO_PORT;
1874 if (tipc_msg_lookup_dest(net, skb, &err))
1875 goto xmit;
1876
1877 /* Prepare for message rejection */
1878 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1879 continue;
1880xmit:
1881 dnode = msg_destnode(buf_msg(skb));
1882 tipc_node_xmit_skb(net, skb, dnode, dport);
1883 }
1884}
1885
1886static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1887{
1888 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1889 struct sock *sk = sock->sk;
1890 int done;
1891
1892 do {
1893 int err = sock_error(sk);
1894 if (err)
1895 return err;
1896 if (!*timeo_p)
1897 return -ETIMEDOUT;
1898 if (signal_pending(current))
1899 return sock_intr_errno(*timeo_p);
1900
1901 add_wait_queue(sk_sleep(sk), &wait);
1902 done = sk_wait_event(sk, timeo_p,
1903 sk->sk_state != TIPC_CONNECTING, &wait);
1904 remove_wait_queue(sk_sleep(sk), &wait);
1905 } while (!done);
1906 return 0;
1907}
1908
1909/**
1910 * tipc_connect - establish a connection to another TIPC port
1911 * @sock: socket structure
1912 * @dest: socket address for destination port
1913 * @destlen: size of socket address data structure
1914 * @flags: file-related flags associated with socket
1915 *
1916 * Returns 0 on success, errno otherwise
1917 */
1918static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1919 int destlen, int flags)
1920{
1921 struct sock *sk = sock->sk;
1922 struct tipc_sock *tsk = tipc_sk(sk);
1923 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1924 struct msghdr m = {NULL,};
1925 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1926 int previous;
1927 int res = 0;
1928
1929 lock_sock(sk);
1930
1931 /* DGRAM/RDM connect(), just save the destaddr */
1932 if (tipc_sk_type_connectionless(sk)) {
1933 if (dst->family == AF_UNSPEC) {
1934 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
1935 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1936 res = -EINVAL;
1937 } else {
1938 memcpy(&tsk->peer, dest, destlen);
1939 }
1940 goto exit;
1941 }
1942
1943 /*
1944 * Reject connection attempt using multicast address
1945 *
1946 * Note: send_msg() validates the rest of the address fields,
1947 * so there's no need to do it here
1948 */
1949 if (dst->addrtype == TIPC_ADDR_MCAST) {
1950 res = -EINVAL;
1951 goto exit;
1952 }
1953
1954 previous = sk->sk_state;
1955
1956 switch (sk->sk_state) {
1957 case TIPC_OPEN:
1958 /* Send a 'SYN-' to destination */
1959 m.msg_name = dest;
1960 m.msg_namelen = destlen;
1961
1962 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1963 * indicate send_msg() is never blocked.
1964 */
1965 if (!timeout)
1966 m.msg_flags = MSG_DONTWAIT;
1967
1968 res = __tipc_sendmsg(sock, &m, 0);
1969 if ((res < 0) && (res != -EWOULDBLOCK))
1970 goto exit;
1971
1972 /* Just entered TIPC_CONNECTING state; the only
1973 * difference is that return value in non-blocking
1974 * case is EINPROGRESS, rather than EALREADY.
1975 */
1976 res = -EINPROGRESS;
1977 /* fall thru' */
1978 case TIPC_CONNECTING:
1979 if (!timeout) {
1980 if (previous == TIPC_CONNECTING)
1981 res = -EALREADY;
1982 goto exit;
1983 }
1984 timeout = msecs_to_jiffies(timeout);
1985 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1986 res = tipc_wait_for_connect(sock, &timeout);
1987 break;
1988 case TIPC_ESTABLISHED:
1989 res = -EISCONN;
1990 break;
1991 default:
1992 res = -EINVAL;
1993 }
1994
1995exit:
1996 release_sock(sk);
1997 return res;
1998}
1999
2000/**
2001 * tipc_listen - allow socket to listen for incoming connections
2002 * @sock: socket structure
2003 * @len: (unused)
2004 *
2005 * Returns 0 on success, errno otherwise
2006 */
2007static int tipc_listen(struct socket *sock, int len)
2008{
2009 struct sock *sk = sock->sk;
2010 int res;
2011
2012 lock_sock(sk);
2013 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2014 release_sock(sk);
2015
2016 return res;
2017}
2018
2019static int tipc_wait_for_accept(struct socket *sock, long timeo)
2020{
2021 struct sock *sk = sock->sk;
2022 DEFINE_WAIT(wait);
2023 int err;
2024
2025 /* True wake-one mechanism for incoming connections: only
2026 * one process gets woken up, not the 'whole herd'.
2027 * Since we do not 'race & poll' for established sockets
2028 * anymore, the common case will execute the loop only once.
2029 */
2030 for (;;) {
2031 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2032 TASK_INTERRUPTIBLE);
2033 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2034 release_sock(sk);
2035 timeo = schedule_timeout(timeo);
2036 lock_sock(sk);
2037 }
2038 err = 0;
2039 if (!skb_queue_empty(&sk->sk_receive_queue))
2040 break;
2041 err = -EAGAIN;
2042 if (!timeo)
2043 break;
2044 err = sock_intr_errno(timeo);
2045 if (signal_pending(current))
2046 break;
2047 }
2048 finish_wait(sk_sleep(sk), &wait);
2049 return err;
2050}
2051
2052/**
2053 * tipc_accept - wait for connection request
2054 * @sock: listening socket
2055 * @newsock: new socket that is to be connected
2056 * @flags: file-related flags associated with socket
2057 *
2058 * Returns 0 on success, errno otherwise
2059 */
2060static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2061{
2062 struct sock *new_sk, *sk = sock->sk;
2063 struct sk_buff *buf;
2064 struct tipc_sock *new_tsock;
2065 struct tipc_msg *msg;
2066 long timeo;
2067 int res;
2068
2069 lock_sock(sk);
2070
2071 if (sk->sk_state != TIPC_LISTEN) {
2072 res = -EINVAL;
2073 goto exit;
2074 }
2075 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2076 res = tipc_wait_for_accept(sock, timeo);
2077 if (res)
2078 goto exit;
2079
2080 buf = skb_peek(&sk->sk_receive_queue);
2081
2082 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
2083 if (res)
2084 goto exit;
2085 security_sk_clone(sock->sk, new_sock->sk);
2086
2087 new_sk = new_sock->sk;
2088 new_tsock = tipc_sk(new_sk);
2089 msg = buf_msg(buf);
2090
2091 /* we lock on new_sk; but lockdep sees the lock on sk */
2092 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2093
2094 /*
2095 * Reject any stray messages received by new socket
2096 * before the socket lock was taken (very, very unlikely)
2097 */
2098 tsk_rej_rx_queue(new_sk);
2099
2100 /* Connect new socket to it's peer */
2101 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2102
2103 tsk_set_importance(new_tsock, msg_importance(msg));
2104 if (msg_named(msg)) {
2105 new_tsock->conn_type = msg_nametype(msg);
2106 new_tsock->conn_instance = msg_nameinst(msg);
2107 }
2108
2109 /*
2110 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2111 * Respond to 'SYN+' by queuing it on new socket.
2112 */
2113 if (!msg_data_sz(msg)) {
2114 struct msghdr m = {NULL,};
2115
2116 tsk_advance_rx_queue(sk);
2117 __tipc_send_stream(new_sock, &m, 0);
2118 } else {
2119 __skb_dequeue(&sk->sk_receive_queue);
2120 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2121 skb_set_owner_r(buf, new_sk);
2122 }
2123 release_sock(new_sk);
2124exit:
2125 release_sock(sk);
2126 return res;
2127}
2128
2129/**
2130 * tipc_shutdown - shutdown socket connection
2131 * @sock: socket structure
2132 * @how: direction to close (must be SHUT_RDWR)
2133 *
2134 * Terminates connection (if necessary), then purges socket's receive queue.
2135 *
2136 * Returns 0 on success, errno otherwise
2137 */
2138static int tipc_shutdown(struct socket *sock, int how)
2139{
2140 struct sock *sk = sock->sk;
2141 int res;
2142
2143 if (how != SHUT_RDWR)
2144 return -EINVAL;
2145
2146 lock_sock(sk);
2147
2148 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2149 sk->sk_shutdown = SEND_SHUTDOWN;
2150
2151 if (sk->sk_state == TIPC_DISCONNECTING) {
2152 /* Discard any unreceived messages */
2153 __skb_queue_purge(&sk->sk_receive_queue);
2154
2155 /* Wake up anyone sleeping in poll */
2156 sk->sk_state_change(sk);
2157 res = 0;
2158 } else {
2159 res = -ENOTCONN;
2160 }
2161
2162 release_sock(sk);
2163 return res;
2164}
2165
2166static void tipc_sk_timeout(unsigned long data)
2167{
2168 struct tipc_sock *tsk = (struct tipc_sock *)data;
2169 struct sock *sk = &tsk->sk;
2170 struct sk_buff *skb = NULL;
2171 u32 peer_port, peer_node;
2172 u32 own_node = tsk_own_node(tsk);
2173
2174 bh_lock_sock(sk);
2175 if (!tipc_sk_connected(sk)) {
2176 bh_unlock_sock(sk);
2177 goto exit;
2178 }
2179 peer_port = tsk_peer_port(tsk);
2180 peer_node = tsk_peer_node(tsk);
2181
2182 if (tsk->probe_unacked) {
2183 if (!sock_owned_by_user(sk)) {
2184 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2185 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2186 tsk_peer_port(tsk));
2187 sk->sk_state_change(sk);
2188 } else {
2189 /* Try again later */
2190 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2191 }
2192
2193 bh_unlock_sock(sk);
2194 goto exit;
2195 }
2196
2197 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2198 INT_H_SIZE, 0, peer_node, own_node,
2199 peer_port, tsk->portid, TIPC_OK);
2200 tsk->probe_unacked = true;
2201 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2202 bh_unlock_sock(sk);
2203 if (skb)
2204 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2205exit:
2206 sock_put(sk);
2207}
2208
2209static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2210 struct tipc_name_seq const *seq)
2211{
2212 struct sock *sk = &tsk->sk;
2213 struct net *net = sock_net(sk);
2214 struct publication *publ;
2215 u32 key;
2216
2217 if (tipc_sk_connected(sk))
2218 return -EINVAL;
2219 key = tsk->portid + tsk->pub_count + 1;
2220 if (key == tsk->portid)
2221 return -EADDRINUSE;
2222
2223 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2224 scope, tsk->portid, key);
2225 if (unlikely(!publ))
2226 return -EINVAL;
2227
2228 list_add(&publ->pport_list, &tsk->publications);
2229 tsk->pub_count++;
2230 tsk->published = 1;
2231 return 0;
2232}
2233
2234static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2235 struct tipc_name_seq const *seq)
2236{
2237 struct net *net = sock_net(&tsk->sk);
2238 struct publication *publ;
2239 struct publication *safe;
2240 int rc = -EINVAL;
2241
2242 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2243 if (seq) {
2244 if (publ->scope != scope)
2245 continue;
2246 if (publ->type != seq->type)
2247 continue;
2248 if (publ->lower != seq->lower)
2249 continue;
2250 if (publ->upper != seq->upper)
2251 break;
2252 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2253 publ->ref, publ->key);
2254 rc = 0;
2255 break;
2256 }
2257 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2258 publ->ref, publ->key);
2259 rc = 0;
2260 }
2261 if (list_empty(&tsk->publications))
2262 tsk->published = 0;
2263 return rc;
2264}
2265
2266/* tipc_sk_reinit: set non-zero address in all existing sockets
2267 * when we go from standalone to network mode.
2268 */
2269void tipc_sk_reinit(struct net *net)
2270{
2271 struct tipc_net *tn = net_generic(net, tipc_net_id);
2272 const struct bucket_table *tbl;
2273 struct rhash_head *pos;
2274 struct tipc_sock *tsk;
2275 struct tipc_msg *msg;
2276 int i;
2277
2278 rcu_read_lock();
2279 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2280 for (i = 0; i < tbl->size; i++) {
2281 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2282 spin_lock_bh(&tsk->sk.sk_lock.slock);
2283 msg = &tsk->phdr;
2284 msg_set_prevnode(msg, tn->own_addr);
2285 msg_set_orignode(msg, tn->own_addr);
2286 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2287 }
2288 }
2289 rcu_read_unlock();
2290}
2291
2292static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2293{
2294 struct tipc_net *tn = net_generic(net, tipc_net_id);
2295 struct tipc_sock *tsk;
2296
2297 rcu_read_lock();
2298 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2299 if (tsk)
2300 sock_hold(&tsk->sk);
2301 rcu_read_unlock();
2302
2303 return tsk;
2304}
2305
2306static int tipc_sk_insert(struct tipc_sock *tsk)
2307{
2308 struct sock *sk = &tsk->sk;
2309 struct net *net = sock_net(sk);
2310 struct tipc_net *tn = net_generic(net, tipc_net_id);
2311 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2312 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2313
2314 while (remaining--) {
2315 portid++;
2316 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2317 portid = TIPC_MIN_PORT;
2318 tsk->portid = portid;
2319 sock_hold(&tsk->sk);
2320 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2321 tsk_rht_params))
2322 return 0;
2323 sock_put(&tsk->sk);
2324 }
2325
2326 return -1;
2327}
2328
2329static void tipc_sk_remove(struct tipc_sock *tsk)
2330{
2331 struct sock *sk = &tsk->sk;
2332 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2333
2334 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2335 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2336 __sock_put(sk);
2337 }
2338}
2339
2340static const struct rhashtable_params tsk_rht_params = {
2341 .nelem_hint = 192,
2342 .head_offset = offsetof(struct tipc_sock, node),
2343 .key_offset = offsetof(struct tipc_sock, portid),
2344 .key_len = sizeof(u32), /* portid */
2345 .max_size = 1048576,
2346 .min_size = 256,
2347 .automatic_shrinking = true,
2348};
2349
2350int tipc_sk_rht_init(struct net *net)
2351{
2352 struct tipc_net *tn = net_generic(net, tipc_net_id);
2353
2354 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2355}
2356
2357void tipc_sk_rht_destroy(struct net *net)
2358{
2359 struct tipc_net *tn = net_generic(net, tipc_net_id);
2360
2361 /* Wait for socket readers to complete */
2362 synchronize_net();
2363
2364 rhashtable_destroy(&tn->sk_rht);
2365}
2366
2367/**
2368 * tipc_setsockopt - set socket option
2369 * @sock: socket structure
2370 * @lvl: option level
2371 * @opt: option identifier
2372 * @ov: pointer to new option value
2373 * @ol: length of option value
2374 *
2375 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2376 * (to ease compatibility).
2377 *
2378 * Returns 0 on success, errno otherwise
2379 */
2380static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2381 char __user *ov, unsigned int ol)
2382{
2383 struct sock *sk = sock->sk;
2384 struct tipc_sock *tsk = tipc_sk(sk);
2385 u32 value;
2386 int res;
2387
2388 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2389 return 0;
2390 if (lvl != SOL_TIPC)
2391 return -ENOPROTOOPT;
2392 if (ol < sizeof(value))
2393 return -EINVAL;
2394 res = get_user(value, (u32 __user *)ov);
2395 if (res)
2396 return res;
2397
2398 lock_sock(sk);
2399
2400 switch (opt) {
2401 case TIPC_IMPORTANCE:
2402 res = tsk_set_importance(tsk, value);
2403 break;
2404 case TIPC_SRC_DROPPABLE:
2405 if (sock->type != SOCK_STREAM)
2406 tsk_set_unreliable(tsk, value);
2407 else
2408 res = -ENOPROTOOPT;
2409 break;
2410 case TIPC_DEST_DROPPABLE:
2411 tsk_set_unreturnable(tsk, value);
2412 break;
2413 case TIPC_CONN_TIMEOUT:
2414 tipc_sk(sk)->conn_timeout = value;
2415 /* no need to set "res", since already 0 at this point */
2416 break;
2417 default:
2418 res = -EINVAL;
2419 }
2420
2421 release_sock(sk);
2422
2423 return res;
2424}
2425
2426/**
2427 * tipc_getsockopt - get socket option
2428 * @sock: socket structure
2429 * @lvl: option level
2430 * @opt: option identifier
2431 * @ov: receptacle for option value
2432 * @ol: receptacle for length of option value
2433 *
2434 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2435 * (to ease compatibility).
2436 *
2437 * Returns 0 on success, errno otherwise
2438 */
2439static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2440 char __user *ov, int __user *ol)
2441{
2442 struct sock *sk = sock->sk;
2443 struct tipc_sock *tsk = tipc_sk(sk);
2444 int len;
2445 u32 value;
2446 int res;
2447
2448 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2449 return put_user(0, ol);
2450 if (lvl != SOL_TIPC)
2451 return -ENOPROTOOPT;
2452 res = get_user(len, ol);
2453 if (res)
2454 return res;
2455
2456 lock_sock(sk);
2457
2458 switch (opt) {
2459 case TIPC_IMPORTANCE:
2460 value = tsk_importance(tsk);
2461 break;
2462 case TIPC_SRC_DROPPABLE:
2463 value = tsk_unreliable(tsk);
2464 break;
2465 case TIPC_DEST_DROPPABLE:
2466 value = tsk_unreturnable(tsk);
2467 break;
2468 case TIPC_CONN_TIMEOUT:
2469 value = tsk->conn_timeout;
2470 /* no need to set "res", since already 0 at this point */
2471 break;
2472 case TIPC_NODE_RECVQ_DEPTH:
2473 value = 0; /* was tipc_queue_size, now obsolete */
2474 break;
2475 case TIPC_SOCK_RECVQ_DEPTH:
2476 value = skb_queue_len(&sk->sk_receive_queue);
2477 break;
2478 default:
2479 res = -EINVAL;
2480 }
2481
2482 release_sock(sk);
2483
2484 if (res)
2485 return res; /* "get" failed */
2486
2487 if (len < sizeof(value))
2488 return -EINVAL;
2489
2490 if (copy_to_user(ov, &value, sizeof(value)))
2491 return -EFAULT;
2492
2493 return put_user(sizeof(value), ol);
2494}
2495
2496static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2497{
2498 struct sock *sk = sock->sk;
2499 struct tipc_sioc_ln_req lnr;
2500 void __user *argp = (void __user *)arg;
2501
2502 switch (cmd) {
2503 case SIOCGETLINKNAME:
2504 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2505 return -EFAULT;
2506 if (!tipc_node_get_linkname(sock_net(sk),
2507 lnr.bearer_id & 0xffff, lnr.peer,
2508 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2509 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2510 return -EFAULT;
2511 return 0;
2512 }
2513 return -EADDRNOTAVAIL;
2514 default:
2515 return -ENOIOCTLCMD;
2516 }
2517}
2518
2519/* Protocol switches for the various types of TIPC sockets */
2520
2521static const struct proto_ops msg_ops = {
2522 .owner = THIS_MODULE,
2523 .family = AF_TIPC,
2524 .release = tipc_release,
2525 .bind = tipc_bind,
2526 .connect = tipc_connect,
2527 .socketpair = sock_no_socketpair,
2528 .accept = sock_no_accept,
2529 .getname = tipc_getname,
2530 .poll = tipc_poll,
2531 .ioctl = tipc_ioctl,
2532 .listen = sock_no_listen,
2533 .shutdown = tipc_shutdown,
2534 .setsockopt = tipc_setsockopt,
2535 .getsockopt = tipc_getsockopt,
2536 .sendmsg = tipc_sendmsg,
2537 .recvmsg = tipc_recvmsg,
2538 .mmap = sock_no_mmap,
2539 .sendpage = sock_no_sendpage
2540};
2541
2542static const struct proto_ops packet_ops = {
2543 .owner = THIS_MODULE,
2544 .family = AF_TIPC,
2545 .release = tipc_release,
2546 .bind = tipc_bind,
2547 .connect = tipc_connect,
2548 .socketpair = sock_no_socketpair,
2549 .accept = tipc_accept,
2550 .getname = tipc_getname,
2551 .poll = tipc_poll,
2552 .ioctl = tipc_ioctl,
2553 .listen = tipc_listen,
2554 .shutdown = tipc_shutdown,
2555 .setsockopt = tipc_setsockopt,
2556 .getsockopt = tipc_getsockopt,
2557 .sendmsg = tipc_send_packet,
2558 .recvmsg = tipc_recvmsg,
2559 .mmap = sock_no_mmap,
2560 .sendpage = sock_no_sendpage
2561};
2562
2563static const struct proto_ops stream_ops = {
2564 .owner = THIS_MODULE,
2565 .family = AF_TIPC,
2566 .release = tipc_release,
2567 .bind = tipc_bind,
2568 .connect = tipc_connect,
2569 .socketpair = sock_no_socketpair,
2570 .accept = tipc_accept,
2571 .getname = tipc_getname,
2572 .poll = tipc_poll,
2573 .ioctl = tipc_ioctl,
2574 .listen = tipc_listen,
2575 .shutdown = tipc_shutdown,
2576 .setsockopt = tipc_setsockopt,
2577 .getsockopt = tipc_getsockopt,
2578 .sendmsg = tipc_send_stream,
2579 .recvmsg = tipc_recv_stream,
2580 .mmap = sock_no_mmap,
2581 .sendpage = sock_no_sendpage
2582};
2583
2584static const struct net_proto_family tipc_family_ops = {
2585 .owner = THIS_MODULE,
2586 .family = AF_TIPC,
2587 .create = tipc_sk_create
2588};
2589
2590static struct proto tipc_proto = {
2591 .name = "TIPC",
2592 .owner = THIS_MODULE,
2593 .obj_size = sizeof(struct tipc_sock),
2594 .sysctl_rmem = sysctl_tipc_rmem
2595};
2596
2597/**
2598 * tipc_socket_init - initialize TIPC socket interface
2599 *
2600 * Returns 0 on success, errno otherwise
2601 */
2602int tipc_socket_init(void)
2603{
2604 int res;
2605
2606 res = proto_register(&tipc_proto, 1);
2607 if (res) {
2608 pr_err("Failed to register TIPC protocol type\n");
2609 goto out;
2610 }
2611
2612 res = sock_register(&tipc_family_ops);
2613 if (res) {
2614 pr_err("Failed to register TIPC socket type\n");
2615 proto_unregister(&tipc_proto);
2616 goto out;
2617 }
2618 out:
2619 return res;
2620}
2621
2622/**
2623 * tipc_socket_stop - stop TIPC socket interface
2624 */
2625void tipc_socket_stop(void)
2626{
2627 sock_unregister(tipc_family_ops.family);
2628 proto_unregister(&tipc_proto);
2629}
2630
2631/* Caller should hold socket lock for the passed tipc socket. */
2632static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2633{
2634 u32 peer_node;
2635 u32 peer_port;
2636 struct nlattr *nest;
2637
2638 peer_node = tsk_peer_node(tsk);
2639 peer_port = tsk_peer_port(tsk);
2640
2641 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2642
2643 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2644 goto msg_full;
2645 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2646 goto msg_full;
2647
2648 if (tsk->conn_type != 0) {
2649 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2650 goto msg_full;
2651 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2652 goto msg_full;
2653 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2654 goto msg_full;
2655 }
2656 nla_nest_end(skb, nest);
2657
2658 return 0;
2659
2660msg_full:
2661 nla_nest_cancel(skb, nest);
2662
2663 return -EMSGSIZE;
2664}
2665
2666/* Caller should hold socket lock for the passed tipc socket. */
2667static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2668 struct tipc_sock *tsk)
2669{
2670 int err;
2671 void *hdr;
2672 struct nlattr *attrs;
2673 struct net *net = sock_net(skb->sk);
2674 struct tipc_net *tn = net_generic(net, tipc_net_id);
2675 struct sock *sk = &tsk->sk;
2676
2677 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2678 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2679 if (!hdr)
2680 goto msg_cancel;
2681
2682 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2683 if (!attrs)
2684 goto genlmsg_cancel;
2685 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2686 goto attr_msg_cancel;
2687 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2688 goto attr_msg_cancel;
2689
2690 if (tipc_sk_connected(sk)) {
2691 err = __tipc_nl_add_sk_con(skb, tsk);
2692 if (err)
2693 goto attr_msg_cancel;
2694 } else if (!list_empty(&tsk->publications)) {
2695 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2696 goto attr_msg_cancel;
2697 }
2698 nla_nest_end(skb, attrs);
2699 genlmsg_end(skb, hdr);
2700
2701 return 0;
2702
2703attr_msg_cancel:
2704 nla_nest_cancel(skb, attrs);
2705genlmsg_cancel:
2706 genlmsg_cancel(skb, hdr);
2707msg_cancel:
2708 return -EMSGSIZE;
2709}
2710
2711int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2712{
2713 int err;
2714 struct tipc_sock *tsk;
2715 const struct bucket_table *tbl;
2716 struct rhash_head *pos;
2717 struct net *net = sock_net(skb->sk);
2718 struct tipc_net *tn = net_generic(net, tipc_net_id);
2719 u32 tbl_id = cb->args[0];
2720 u32 prev_portid = cb->args[1];
2721
2722 rcu_read_lock();
2723 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2724 for (; tbl_id < tbl->size; tbl_id++) {
2725 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2726 spin_lock_bh(&tsk->sk.sk_lock.slock);
2727 if (prev_portid && prev_portid != tsk->portid) {
2728 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2729 continue;
2730 }
2731
2732 err = __tipc_nl_add_sk(skb, cb, tsk);
2733 if (err) {
2734 prev_portid = tsk->portid;
2735 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2736 goto out;
2737 }
2738 prev_portid = 0;
2739 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2740 }
2741 }
2742out:
2743 rcu_read_unlock();
2744 cb->args[0] = tbl_id;
2745 cb->args[1] = prev_portid;
2746
2747 return skb->len;
2748}
2749
2750/* Caller should hold socket lock for the passed tipc socket. */
2751static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2752 struct netlink_callback *cb,
2753 struct publication *publ)
2754{
2755 void *hdr;
2756 struct nlattr *attrs;
2757
2758 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2759 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2760 if (!hdr)
2761 goto msg_cancel;
2762
2763 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2764 if (!attrs)
2765 goto genlmsg_cancel;
2766
2767 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2768 goto attr_msg_cancel;
2769 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2770 goto attr_msg_cancel;
2771 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2772 goto attr_msg_cancel;
2773 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2774 goto attr_msg_cancel;
2775
2776 nla_nest_end(skb, attrs);
2777 genlmsg_end(skb, hdr);
2778
2779 return 0;
2780
2781attr_msg_cancel:
2782 nla_nest_cancel(skb, attrs);
2783genlmsg_cancel:
2784 genlmsg_cancel(skb, hdr);
2785msg_cancel:
2786 return -EMSGSIZE;
2787}
2788
2789/* Caller should hold socket lock for the passed tipc socket. */
2790static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2791 struct netlink_callback *cb,
2792 struct tipc_sock *tsk, u32 *last_publ)
2793{
2794 int err;
2795 struct publication *p;
2796
2797 if (*last_publ) {
2798 list_for_each_entry(p, &tsk->publications, pport_list) {
2799 if (p->key == *last_publ)
2800 break;
2801 }
2802 if (p->key != *last_publ) {
2803 /* We never set seq or call nl_dump_check_consistent()
2804 * this means that setting prev_seq here will cause the
2805 * consistence check to fail in the netlink callback
2806 * handler. Resulting in the last NLMSG_DONE message
2807 * having the NLM_F_DUMP_INTR flag set.
2808 */
2809 cb->prev_seq = 1;
2810 *last_publ = 0;
2811 return -EPIPE;
2812 }
2813 } else {
2814 p = list_first_entry(&tsk->publications, struct publication,
2815 pport_list);
2816 }
2817
2818 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2819 err = __tipc_nl_add_sk_publ(skb, cb, p);
2820 if (err) {
2821 *last_publ = p->key;
2822 return err;
2823 }
2824 }
2825 *last_publ = 0;
2826
2827 return 0;
2828}
2829
2830int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2831{
2832 int err;
2833 u32 tsk_portid = cb->args[0];
2834 u32 last_publ = cb->args[1];
2835 u32 done = cb->args[2];
2836 struct net *net = sock_net(skb->sk);
2837 struct tipc_sock *tsk;
2838
2839 if (!tsk_portid) {
2840 struct nlattr **attrs;
2841 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2842
2843 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2844 if (err)
2845 return err;
2846
2847 if (!attrs[TIPC_NLA_SOCK])
2848 return -EINVAL;
2849
2850 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2851 attrs[TIPC_NLA_SOCK],
2852 tipc_nl_sock_policy);
2853 if (err)
2854 return err;
2855
2856 if (!sock[TIPC_NLA_SOCK_REF])
2857 return -EINVAL;
2858
2859 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2860 }
2861
2862 if (done)
2863 return 0;
2864
2865 tsk = tipc_sk_lookup(net, tsk_portid);
2866 if (!tsk)
2867 return -EINVAL;
2868
2869 lock_sock(&tsk->sk);
2870 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2871 if (!err)
2872 done = 1;
2873 release_sock(&tsk->sk);
2874 sock_put(&tsk->sk);
2875
2876 cb->args[0] = tsk_portid;
2877 cb->args[1] = last_publ;
2878 cb->args[2] = done;
2879
2880 return skb->len;
2881}
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/rhashtable.h>
38#include <linux/sched/signal.h>
39
40#include "core.h"
41#include "name_table.h"
42#include "node.h"
43#include "link.h"
44#include "name_distr.h"
45#include "socket.h"
46#include "bcast.h"
47#include "netlink.h"
48#include "group.h"
49#include "trace.h"
50
51#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
52#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
53#define TIPC_FWD_MSG 1
54#define TIPC_MAX_PORT 0xffffffff
55#define TIPC_MIN_PORT 1
56#define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
57
58enum {
59 TIPC_LISTEN = TCP_LISTEN,
60 TIPC_ESTABLISHED = TCP_ESTABLISHED,
61 TIPC_OPEN = TCP_CLOSE,
62 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
63 TIPC_CONNECTING = TCP_SYN_SENT,
64};
65
66struct sockaddr_pair {
67 struct sockaddr_tipc sock;
68 struct sockaddr_tipc member;
69};
70
71/**
72 * struct tipc_sock - TIPC socket structure
73 * @sk: socket - interacts with 'port' and with user via the socket API
74 * @conn_type: TIPC type used when connection was established
75 * @conn_instance: TIPC instance used when connection was established
76 * @published: non-zero if port has one or more associated names
77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
78 * @portid: unique port identity in TIPC socket hash table
79 * @phdr: preformatted message header used when sending messages
80 * #cong_links: list of congested links
81 * @publications: list of publications for port
82 * @blocking_link: address of the congested link we are currently sleeping on
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
93 */
94struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 atomic_t dupl_rcvcnt;
106 u16 conn_timeout;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 bool group_is_open;
120};
121
122static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123static void tipc_data_ready(struct sock *sk);
124static void tipc_write_space(struct sock *sk);
125static void tipc_sock_destruct(struct sock *sk);
126static int tipc_release(struct socket *sock);
127static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
128 bool kern);
129static void tipc_sk_timeout(struct timer_list *t);
130static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134static int tipc_sk_leave(struct tipc_sock *tsk);
135static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136static int tipc_sk_insert(struct tipc_sock *tsk);
137static void tipc_sk_remove(struct tipc_sock *tsk);
138static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
140
141static const struct proto_ops packet_ops;
142static const struct proto_ops stream_ops;
143static const struct proto_ops msg_ops;
144static struct proto tipc_proto;
145static const struct rhashtable_params tsk_rht_params;
146
147static u32 tsk_own_node(struct tipc_sock *tsk)
148{
149 return msg_prevnode(&tsk->phdr);
150}
151
152static u32 tsk_peer_node(struct tipc_sock *tsk)
153{
154 return msg_destnode(&tsk->phdr);
155}
156
157static u32 tsk_peer_port(struct tipc_sock *tsk)
158{
159 return msg_destport(&tsk->phdr);
160}
161
162static bool tsk_unreliable(struct tipc_sock *tsk)
163{
164 return msg_src_droppable(&tsk->phdr) != 0;
165}
166
167static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
168{
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
170}
171
172static bool tsk_unreturnable(struct tipc_sock *tsk)
173{
174 return msg_dest_droppable(&tsk->phdr) != 0;
175}
176
177static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
178{
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
180}
181
182static int tsk_importance(struct tipc_sock *tsk)
183{
184 return msg_importance(&tsk->phdr);
185}
186
187static int tsk_set_importance(struct tipc_sock *tsk, int imp)
188{
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
190 return -EINVAL;
191 msg_set_importance(&tsk->phdr, (u32)imp);
192 return 0;
193}
194
195static struct tipc_sock *tipc_sk(const struct sock *sk)
196{
197 return container_of(sk, struct tipc_sock, sk);
198}
199
200static bool tsk_conn_cong(struct tipc_sock *tsk)
201{
202 return tsk->snt_unacked > tsk->snd_win;
203}
204
205static u16 tsk_blocks(int len)
206{
207 return ((len / FLOWCTL_BLK_SZ) + 1);
208}
209
210/* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
213 */
214static u16 tsk_adv_blocks(int len)
215{
216 return len / FLOWCTL_BLK_SZ / 4;
217}
218
219/* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
222 */
223static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
224{
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
227 return 1;
228}
229
230/**
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
232 *
233 * Caller must hold socket lock
234 */
235static void tsk_advance_rx_queue(struct sock *sk)
236{
237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
239}
240
241/* tipc_sk_respond() : send response message back to sender
242 */
243static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
244{
245 u32 selector;
246 u32 dnode;
247 u32 onode = tipc_own_addr(sock_net(sk));
248
249 if (!tipc_msg_reverse(onode, &skb, err))
250 return;
251
252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
253 dnode = msg_destnode(buf_msg(skb));
254 selector = msg_origport(buf_msg(skb));
255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
256}
257
258/**
259 * tsk_rej_rx_queue - reject all buffers in socket receive queue
260 *
261 * Caller must hold socket lock
262 */
263static void tsk_rej_rx_queue(struct sock *sk)
264{
265 struct sk_buff *skb;
266
267 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
268 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
269}
270
271static bool tipc_sk_connected(struct sock *sk)
272{
273 return sk->sk_state == TIPC_ESTABLISHED;
274}
275
276/* tipc_sk_type_connectionless - check if the socket is datagram socket
277 * @sk: socket
278 *
279 * Returns true if connection less, false otherwise
280 */
281static bool tipc_sk_type_connectionless(struct sock *sk)
282{
283 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
284}
285
286/* tsk_peer_msg - verify if message was sent by connected port's peer
287 *
288 * Handles cases where the node's network address has changed from
289 * the default of <0.0.0> to its configured setting.
290 */
291static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
292{
293 struct sock *sk = &tsk->sk;
294 u32 self = tipc_own_addr(sock_net(sk));
295 u32 peer_port = tsk_peer_port(tsk);
296 u32 orig_node, peer_node;
297
298 if (unlikely(!tipc_sk_connected(sk)))
299 return false;
300
301 if (unlikely(msg_origport(msg) != peer_port))
302 return false;
303
304 orig_node = msg_orignode(msg);
305 peer_node = tsk_peer_node(tsk);
306
307 if (likely(orig_node == peer_node))
308 return true;
309
310 if (!orig_node && peer_node == self)
311 return true;
312
313 if (!peer_node && orig_node == self)
314 return true;
315
316 return false;
317}
318
319/* tipc_set_sk_state - set the sk_state of the socket
320 * @sk: socket
321 *
322 * Caller must hold socket lock
323 *
324 * Returns 0 on success, errno otherwise
325 */
326static int tipc_set_sk_state(struct sock *sk, int state)
327{
328 int oldsk_state = sk->sk_state;
329 int res = -EINVAL;
330
331 switch (state) {
332 case TIPC_OPEN:
333 res = 0;
334 break;
335 case TIPC_LISTEN:
336 case TIPC_CONNECTING:
337 if (oldsk_state == TIPC_OPEN)
338 res = 0;
339 break;
340 case TIPC_ESTABLISHED:
341 if (oldsk_state == TIPC_CONNECTING ||
342 oldsk_state == TIPC_OPEN)
343 res = 0;
344 break;
345 case TIPC_DISCONNECTING:
346 if (oldsk_state == TIPC_CONNECTING ||
347 oldsk_state == TIPC_ESTABLISHED)
348 res = 0;
349 break;
350 }
351
352 if (!res)
353 sk->sk_state = state;
354
355 return res;
356}
357
358static int tipc_sk_sock_err(struct socket *sock, long *timeout)
359{
360 struct sock *sk = sock->sk;
361 int err = sock_error(sk);
362 int typ = sock->type;
363
364 if (err)
365 return err;
366 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
367 if (sk->sk_state == TIPC_DISCONNECTING)
368 return -EPIPE;
369 else if (!tipc_sk_connected(sk))
370 return -ENOTCONN;
371 }
372 if (!*timeout)
373 return -EAGAIN;
374 if (signal_pending(current))
375 return sock_intr_errno(*timeout);
376
377 return 0;
378}
379
380#define tipc_wait_for_cond(sock_, timeo_, condition_) \
381({ \
382 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
383 struct sock *sk_; \
384 int rc_; \
385 \
386 while ((rc_ = !(condition_))) { \
387 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
388 smp_rmb(); \
389 sk_ = (sock_)->sk; \
390 rc_ = tipc_sk_sock_err((sock_), timeo_); \
391 if (rc_) \
392 break; \
393 add_wait_queue(sk_sleep(sk_), &wait_); \
394 release_sock(sk_); \
395 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
396 sched_annotate_sleep(); \
397 lock_sock(sk_); \
398 remove_wait_queue(sk_sleep(sk_), &wait_); \
399 } \
400 rc_; \
401})
402
403/**
404 * tipc_sk_create - create a TIPC socket
405 * @net: network namespace (must be default network)
406 * @sock: pre-allocated socket structure
407 * @protocol: protocol indicator (must be 0)
408 * @kern: caused by kernel or by userspace?
409 *
410 * This routine creates additional data structures used by the TIPC socket,
411 * initializes them, and links them together.
412 *
413 * Returns 0 on success, errno otherwise
414 */
415static int tipc_sk_create(struct net *net, struct socket *sock,
416 int protocol, int kern)
417{
418 const struct proto_ops *ops;
419 struct sock *sk;
420 struct tipc_sock *tsk;
421 struct tipc_msg *msg;
422
423 /* Validate arguments */
424 if (unlikely(protocol != 0))
425 return -EPROTONOSUPPORT;
426
427 switch (sock->type) {
428 case SOCK_STREAM:
429 ops = &stream_ops;
430 break;
431 case SOCK_SEQPACKET:
432 ops = &packet_ops;
433 break;
434 case SOCK_DGRAM:
435 case SOCK_RDM:
436 ops = &msg_ops;
437 break;
438 default:
439 return -EPROTOTYPE;
440 }
441
442 /* Allocate socket's protocol area */
443 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
444 if (sk == NULL)
445 return -ENOMEM;
446
447 tsk = tipc_sk(sk);
448 tsk->max_pkt = MAX_PKT_DEFAULT;
449 INIT_LIST_HEAD(&tsk->publications);
450 INIT_LIST_HEAD(&tsk->cong_links);
451 msg = &tsk->phdr;
452
453 /* Finish initializing socket data structures */
454 sock->ops = ops;
455 sock_init_data(sock, sk);
456 tipc_set_sk_state(sk, TIPC_OPEN);
457 if (tipc_sk_insert(tsk)) {
458 pr_warn("Socket create failed; port number exhausted\n");
459 return -EINVAL;
460 }
461
462 /* Ensure tsk is visible before we read own_addr. */
463 smp_mb();
464
465 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
466 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
467
468 msg_set_origport(msg, tsk->portid);
469 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
470 sk->sk_shutdown = 0;
471 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
472 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
473 sk->sk_data_ready = tipc_data_ready;
474 sk->sk_write_space = tipc_write_space;
475 sk->sk_destruct = tipc_sock_destruct;
476 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
477 tsk->group_is_open = true;
478 atomic_set(&tsk->dupl_rcvcnt, 0);
479
480 /* Start out with safe limits until we receive an advertised window */
481 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
482 tsk->rcv_win = tsk->snd_win;
483
484 if (tipc_sk_type_connectionless(sk)) {
485 tsk_set_unreturnable(tsk, true);
486 if (sock->type == SOCK_DGRAM)
487 tsk_set_unreliable(tsk, true);
488 }
489 __skb_queue_head_init(&tsk->mc_method.deferredq);
490 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
491 return 0;
492}
493
494static void tipc_sk_callback(struct rcu_head *head)
495{
496 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
497
498 sock_put(&tsk->sk);
499}
500
501/* Caller should hold socket lock for the socket. */
502static void __tipc_shutdown(struct socket *sock, int error)
503{
504 struct sock *sk = sock->sk;
505 struct tipc_sock *tsk = tipc_sk(sk);
506 struct net *net = sock_net(sk);
507 long timeout = CONN_TIMEOUT_DEFAULT;
508 u32 dnode = tsk_peer_node(tsk);
509 struct sk_buff *skb;
510
511 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
512 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
513 !tsk_conn_cong(tsk)));
514
515 /* Remove any pending SYN message */
516 __skb_queue_purge(&sk->sk_write_queue);
517
518 /* Reject all unreceived messages, except on an active connection
519 * (which disconnects locally & sends a 'FIN+' to peer).
520 */
521 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
522 if (TIPC_SKB_CB(skb)->bytes_read) {
523 kfree_skb(skb);
524 continue;
525 }
526 if (!tipc_sk_type_connectionless(sk) &&
527 sk->sk_state != TIPC_DISCONNECTING) {
528 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
529 tipc_node_remove_conn(net, dnode, tsk->portid);
530 }
531 tipc_sk_respond(sk, skb, error);
532 }
533
534 if (tipc_sk_type_connectionless(sk))
535 return;
536
537 if (sk->sk_state != TIPC_DISCONNECTING) {
538 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
539 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
540 tsk_own_node(tsk), tsk_peer_port(tsk),
541 tsk->portid, error);
542 if (skb)
543 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
544 tipc_node_remove_conn(net, dnode, tsk->portid);
545 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
546 }
547}
548
549/**
550 * tipc_release - destroy a TIPC socket
551 * @sock: socket to destroy
552 *
553 * This routine cleans up any messages that are still queued on the socket.
554 * For DGRAM and RDM socket types, all queued messages are rejected.
555 * For SEQPACKET and STREAM socket types, the first message is rejected
556 * and any others are discarded. (If the first message on a STREAM socket
557 * is partially-read, it is discarded and the next one is rejected instead.)
558 *
559 * NOTE: Rejected messages are not necessarily returned to the sender! They
560 * are returned or discarded according to the "destination droppable" setting
561 * specified for the message by the sender.
562 *
563 * Returns 0 on success, errno otherwise
564 */
565static int tipc_release(struct socket *sock)
566{
567 struct sock *sk = sock->sk;
568 struct tipc_sock *tsk;
569
570 /*
571 * Exit if socket isn't fully initialized (occurs when a failed accept()
572 * releases a pre-allocated child socket that was never used)
573 */
574 if (sk == NULL)
575 return 0;
576
577 tsk = tipc_sk(sk);
578 lock_sock(sk);
579
580 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
581 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
582 sk->sk_shutdown = SHUTDOWN_MASK;
583 tipc_sk_leave(tsk);
584 tipc_sk_withdraw(tsk, 0, NULL);
585 __skb_queue_purge(&tsk->mc_method.deferredq);
586 sk_stop_timer(sk, &sk->sk_timer);
587 tipc_sk_remove(tsk);
588
589 sock_orphan(sk);
590 /* Reject any messages that accumulated in backlog queue */
591 release_sock(sk);
592 tipc_dest_list_purge(&tsk->cong_links);
593 tsk->cong_link_cnt = 0;
594 call_rcu(&tsk->rcu, tipc_sk_callback);
595 sock->sk = NULL;
596
597 return 0;
598}
599
600/**
601 * tipc_bind - associate or disassocate TIPC name(s) with a socket
602 * @sock: socket structure
603 * @uaddr: socket address describing name(s) and desired operation
604 * @uaddr_len: size of socket address data structure
605 *
606 * Name and name sequence binding is indicated using a positive scope value;
607 * a negative scope value unbinds the specified name. Specifying no name
608 * (i.e. a socket address length of 0) unbinds all names from the socket.
609 *
610 * Returns 0 on success, errno otherwise
611 *
612 * NOTE: This routine doesn't need to take the socket lock since it doesn't
613 * access any non-constant socket information.
614 */
615static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
616 int uaddr_len)
617{
618 struct sock *sk = sock->sk;
619 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
620 struct tipc_sock *tsk = tipc_sk(sk);
621 int res = -EINVAL;
622
623 lock_sock(sk);
624 if (unlikely(!uaddr_len)) {
625 res = tipc_sk_withdraw(tsk, 0, NULL);
626 goto exit;
627 }
628 if (tsk->group) {
629 res = -EACCES;
630 goto exit;
631 }
632 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
633 res = -EINVAL;
634 goto exit;
635 }
636 if (addr->family != AF_TIPC) {
637 res = -EAFNOSUPPORT;
638 goto exit;
639 }
640
641 if (addr->addrtype == TIPC_ADDR_NAME)
642 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
643 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
644 res = -EAFNOSUPPORT;
645 goto exit;
646 }
647
648 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
649 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
650 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
651 res = -EACCES;
652 goto exit;
653 }
654
655 res = (addr->scope >= 0) ?
656 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
657 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
658exit:
659 release_sock(sk);
660 return res;
661}
662
663/**
664 * tipc_getname - get port ID of socket or peer socket
665 * @sock: socket structure
666 * @uaddr: area for returned socket address
667 * @uaddr_len: area for returned length of socket address
668 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
669 *
670 * Returns 0 on success, errno otherwise
671 *
672 * NOTE: This routine doesn't need to take the socket lock since it only
673 * accesses socket information that is unchanging (or which changes in
674 * a completely predictable manner).
675 */
676static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
677 int peer)
678{
679 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
680 struct sock *sk = sock->sk;
681 struct tipc_sock *tsk = tipc_sk(sk);
682
683 memset(addr, 0, sizeof(*addr));
684 if (peer) {
685 if ((!tipc_sk_connected(sk)) &&
686 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
687 return -ENOTCONN;
688 addr->addr.id.ref = tsk_peer_port(tsk);
689 addr->addr.id.node = tsk_peer_node(tsk);
690 } else {
691 addr->addr.id.ref = tsk->portid;
692 addr->addr.id.node = tipc_own_addr(sock_net(sk));
693 }
694
695 addr->addrtype = TIPC_ADDR_ID;
696 addr->family = AF_TIPC;
697 addr->scope = 0;
698 addr->addr.name.domain = 0;
699
700 return sizeof(*addr);
701}
702
703/**
704 * tipc_poll - read and possibly block on pollmask
705 * @file: file structure associated with the socket
706 * @sock: socket for which to calculate the poll bits
707 * @wait: ???
708 *
709 * Returns pollmask value
710 *
711 * COMMENTARY:
712 * It appears that the usual socket locking mechanisms are not useful here
713 * since the pollmask info is potentially out-of-date the moment this routine
714 * exits. TCP and other protocols seem to rely on higher level poll routines
715 * to handle any preventable race conditions, so TIPC will do the same ...
716 *
717 * IMPORTANT: The fact that a read or write operation is indicated does NOT
718 * imply that the operation will succeed, merely that it should be performed
719 * and will not block.
720 */
721static __poll_t tipc_poll(struct file *file, struct socket *sock,
722 poll_table *wait)
723{
724 struct sock *sk = sock->sk;
725 struct tipc_sock *tsk = tipc_sk(sk);
726 __poll_t revents = 0;
727
728 sock_poll_wait(file, sock, wait);
729 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
730
731 if (sk->sk_shutdown & RCV_SHUTDOWN)
732 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
733 if (sk->sk_shutdown == SHUTDOWN_MASK)
734 revents |= EPOLLHUP;
735
736 switch (sk->sk_state) {
737 case TIPC_ESTABLISHED:
738 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
739 revents |= EPOLLOUT;
740 /* fall through */
741 case TIPC_LISTEN:
742 case TIPC_CONNECTING:
743 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
744 revents |= EPOLLIN | EPOLLRDNORM;
745 break;
746 case TIPC_OPEN:
747 if (tsk->group_is_open && !tsk->cong_link_cnt)
748 revents |= EPOLLOUT;
749 if (!tipc_sk_type_connectionless(sk))
750 break;
751 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
752 break;
753 revents |= EPOLLIN | EPOLLRDNORM;
754 break;
755 case TIPC_DISCONNECTING:
756 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
757 break;
758 }
759 return revents;
760}
761
762/**
763 * tipc_sendmcast - send multicast message
764 * @sock: socket structure
765 * @seq: destination address
766 * @msg: message to send
767 * @dlen: length of data to send
768 * @timeout: timeout to wait for wakeup
769 *
770 * Called from function tipc_sendmsg(), which has done all sanity checks
771 * Returns the number of bytes sent on success, or errno
772 */
773static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
774 struct msghdr *msg, size_t dlen, long timeout)
775{
776 struct sock *sk = sock->sk;
777 struct tipc_sock *tsk = tipc_sk(sk);
778 struct tipc_msg *hdr = &tsk->phdr;
779 struct net *net = sock_net(sk);
780 int mtu = tipc_bcast_get_mtu(net);
781 struct tipc_mc_method *method = &tsk->mc_method;
782 struct sk_buff_head pkts;
783 struct tipc_nlist dsts;
784 int rc;
785
786 if (tsk->group)
787 return -EACCES;
788
789 /* Block or return if any destination link is congested */
790 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
791 if (unlikely(rc))
792 return rc;
793
794 /* Lookup destination nodes */
795 tipc_nlist_init(&dsts, tipc_own_addr(net));
796 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
797 seq->upper, &dsts);
798 if (!dsts.local && !dsts.remote)
799 return -EHOSTUNREACH;
800
801 /* Build message header */
802 msg_set_type(hdr, TIPC_MCAST_MSG);
803 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
804 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
805 msg_set_destport(hdr, 0);
806 msg_set_destnode(hdr, 0);
807 msg_set_nametype(hdr, seq->type);
808 msg_set_namelower(hdr, seq->lower);
809 msg_set_nameupper(hdr, seq->upper);
810
811 /* Build message as chain of buffers */
812 __skb_queue_head_init(&pkts);
813 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
814
815 /* Send message if build was successful */
816 if (unlikely(rc == dlen)) {
817 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
818 TIPC_DUMP_SK_SNDQ, " ");
819 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
820 &tsk->cong_link_cnt);
821 }
822
823 tipc_nlist_purge(&dsts);
824
825 return rc ? rc : dlen;
826}
827
828/**
829 * tipc_send_group_msg - send a message to a member in the group
830 * @net: network namespace
831 * @m: message to send
832 * @mb: group member
833 * @dnode: destination node
834 * @dport: destination port
835 * @dlen: total length of message data
836 */
837static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
838 struct msghdr *m, struct tipc_member *mb,
839 u32 dnode, u32 dport, int dlen)
840{
841 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
842 struct tipc_mc_method *method = &tsk->mc_method;
843 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
844 struct tipc_msg *hdr = &tsk->phdr;
845 struct sk_buff_head pkts;
846 int mtu, rc;
847
848 /* Complete message header */
849 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
850 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
851 msg_set_destport(hdr, dport);
852 msg_set_destnode(hdr, dnode);
853 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
854
855 /* Build message as chain of buffers */
856 __skb_queue_head_init(&pkts);
857 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
858 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
859 if (unlikely(rc != dlen))
860 return rc;
861
862 /* Send message */
863 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
864 if (unlikely(rc == -ELINKCONG)) {
865 tipc_dest_push(&tsk->cong_links, dnode, 0);
866 tsk->cong_link_cnt++;
867 }
868
869 /* Update send window */
870 tipc_group_update_member(mb, blks);
871
872 /* A broadcast sent within next EXPIRE period must follow same path */
873 method->rcast = true;
874 method->mandatory = true;
875 return dlen;
876}
877
878/**
879 * tipc_send_group_unicast - send message to a member in the group
880 * @sock: socket structure
881 * @m: message to send
882 * @dlen: total length of message data
883 * @timeout: timeout to wait for wakeup
884 *
885 * Called from function tipc_sendmsg(), which has done all sanity checks
886 * Returns the number of bytes sent on success, or errno
887 */
888static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
889 int dlen, long timeout)
890{
891 struct sock *sk = sock->sk;
892 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
893 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
894 struct tipc_sock *tsk = tipc_sk(sk);
895 struct net *net = sock_net(sk);
896 struct tipc_member *mb = NULL;
897 u32 node, port;
898 int rc;
899
900 node = dest->addr.id.node;
901 port = dest->addr.id.ref;
902 if (!port && !node)
903 return -EHOSTUNREACH;
904
905 /* Block or return if destination link or member is congested */
906 rc = tipc_wait_for_cond(sock, &timeout,
907 !tipc_dest_find(&tsk->cong_links, node, 0) &&
908 tsk->group &&
909 !tipc_group_cong(tsk->group, node, port, blks,
910 &mb));
911 if (unlikely(rc))
912 return rc;
913
914 if (unlikely(!mb))
915 return -EHOSTUNREACH;
916
917 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
918
919 return rc ? rc : dlen;
920}
921
922/**
923 * tipc_send_group_anycast - send message to any member with given identity
924 * @sock: socket structure
925 * @m: message to send
926 * @dlen: total length of message data
927 * @timeout: timeout to wait for wakeup
928 *
929 * Called from function tipc_sendmsg(), which has done all sanity checks
930 * Returns the number of bytes sent on success, or errno
931 */
932static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
933 int dlen, long timeout)
934{
935 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
936 struct sock *sk = sock->sk;
937 struct tipc_sock *tsk = tipc_sk(sk);
938 struct list_head *cong_links = &tsk->cong_links;
939 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
940 struct tipc_msg *hdr = &tsk->phdr;
941 struct tipc_member *first = NULL;
942 struct tipc_member *mbr = NULL;
943 struct net *net = sock_net(sk);
944 u32 node, port, exclude;
945 struct list_head dsts;
946 u32 type, inst, scope;
947 int lookups = 0;
948 int dstcnt, rc;
949 bool cong;
950
951 INIT_LIST_HEAD(&dsts);
952
953 type = msg_nametype(hdr);
954 inst = dest->addr.name.name.instance;
955 scope = msg_lookup_scope(hdr);
956
957 while (++lookups < 4) {
958 exclude = tipc_group_exclude(tsk->group);
959
960 first = NULL;
961
962 /* Look for a non-congested destination member, if any */
963 while (1) {
964 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
965 &dstcnt, exclude, false))
966 return -EHOSTUNREACH;
967 tipc_dest_pop(&dsts, &node, &port);
968 cong = tipc_group_cong(tsk->group, node, port, blks,
969 &mbr);
970 if (!cong)
971 break;
972 if (mbr == first)
973 break;
974 if (!first)
975 first = mbr;
976 }
977
978 /* Start over if destination was not in member list */
979 if (unlikely(!mbr))
980 continue;
981
982 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
983 break;
984
985 /* Block or return if destination link or member is congested */
986 rc = tipc_wait_for_cond(sock, &timeout,
987 !tipc_dest_find(cong_links, node, 0) &&
988 tsk->group &&
989 !tipc_group_cong(tsk->group, node, port,
990 blks, &mbr));
991 if (unlikely(rc))
992 return rc;
993
994 /* Send, unless destination disappeared while waiting */
995 if (likely(mbr))
996 break;
997 }
998
999 if (unlikely(lookups >= 4))
1000 return -EHOSTUNREACH;
1001
1002 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1003
1004 return rc ? rc : dlen;
1005}
1006
1007/**
1008 * tipc_send_group_bcast - send message to all members in communication group
1009 * @sk: socket structure
1010 * @m: message to send
1011 * @dlen: total length of message data
1012 * @timeout: timeout to wait for wakeup
1013 *
1014 * Called from function tipc_sendmsg(), which has done all sanity checks
1015 * Returns the number of bytes sent on success, or errno
1016 */
1017static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1018 int dlen, long timeout)
1019{
1020 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1021 struct sock *sk = sock->sk;
1022 struct net *net = sock_net(sk);
1023 struct tipc_sock *tsk = tipc_sk(sk);
1024 struct tipc_nlist *dsts;
1025 struct tipc_mc_method *method = &tsk->mc_method;
1026 bool ack = method->mandatory && method->rcast;
1027 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1028 struct tipc_msg *hdr = &tsk->phdr;
1029 int mtu = tipc_bcast_get_mtu(net);
1030 struct sk_buff_head pkts;
1031 int rc = -EHOSTUNREACH;
1032
1033 /* Block or return if any destination link or member is congested */
1034 rc = tipc_wait_for_cond(sock, &timeout,
1035 !tsk->cong_link_cnt && tsk->group &&
1036 !tipc_group_bc_cong(tsk->group, blks));
1037 if (unlikely(rc))
1038 return rc;
1039
1040 dsts = tipc_group_dests(tsk->group);
1041 if (!dsts->local && !dsts->remote)
1042 return -EHOSTUNREACH;
1043
1044 /* Complete message header */
1045 if (dest) {
1046 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1047 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1048 } else {
1049 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1050 msg_set_nameinst(hdr, 0);
1051 }
1052 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1053 msg_set_destport(hdr, 0);
1054 msg_set_destnode(hdr, 0);
1055 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1056
1057 /* Avoid getting stuck with repeated forced replicasts */
1058 msg_set_grp_bc_ack_req(hdr, ack);
1059
1060 /* Build message as chain of buffers */
1061 __skb_queue_head_init(&pkts);
1062 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1063 if (unlikely(rc != dlen))
1064 return rc;
1065
1066 /* Send message */
1067 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1068 if (unlikely(rc))
1069 return rc;
1070
1071 /* Update broadcast sequence number and send windows */
1072 tipc_group_update_bc_members(tsk->group, blks, ack);
1073
1074 /* Broadcast link is now free to choose method for next broadcast */
1075 method->mandatory = false;
1076 method->expires = jiffies;
1077
1078 return dlen;
1079}
1080
1081/**
1082 * tipc_send_group_mcast - send message to all members with given identity
1083 * @sock: socket structure
1084 * @m: message to send
1085 * @dlen: total length of message data
1086 * @timeout: timeout to wait for wakeup
1087 *
1088 * Called from function tipc_sendmsg(), which has done all sanity checks
1089 * Returns the number of bytes sent on success, or errno
1090 */
1091static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1092 int dlen, long timeout)
1093{
1094 struct sock *sk = sock->sk;
1095 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1096 struct tipc_sock *tsk = tipc_sk(sk);
1097 struct tipc_group *grp = tsk->group;
1098 struct tipc_msg *hdr = &tsk->phdr;
1099 struct net *net = sock_net(sk);
1100 u32 type, inst, scope, exclude;
1101 struct list_head dsts;
1102 u32 dstcnt;
1103
1104 INIT_LIST_HEAD(&dsts);
1105
1106 type = msg_nametype(hdr);
1107 inst = dest->addr.name.name.instance;
1108 scope = msg_lookup_scope(hdr);
1109 exclude = tipc_group_exclude(grp);
1110
1111 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1112 &dstcnt, exclude, true))
1113 return -EHOSTUNREACH;
1114
1115 if (dstcnt == 1) {
1116 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1117 return tipc_send_group_unicast(sock, m, dlen, timeout);
1118 }
1119
1120 tipc_dest_list_purge(&dsts);
1121 return tipc_send_group_bcast(sock, m, dlen, timeout);
1122}
1123
1124/**
1125 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1126 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1127 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1128 *
1129 * Multi-threaded: parallel calls with reference to same queues may occur
1130 */
1131void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1132 struct sk_buff_head *inputq)
1133{
1134 u32 self = tipc_own_addr(net);
1135 u32 type, lower, upper, scope;
1136 struct sk_buff *skb, *_skb;
1137 u32 portid, onode;
1138 struct sk_buff_head tmpq;
1139 struct list_head dports;
1140 struct tipc_msg *hdr;
1141 int user, mtyp, hlen;
1142 bool exact;
1143
1144 __skb_queue_head_init(&tmpq);
1145 INIT_LIST_HEAD(&dports);
1146
1147 skb = tipc_skb_peek(arrvq, &inputq->lock);
1148 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1149 hdr = buf_msg(skb);
1150 user = msg_user(hdr);
1151 mtyp = msg_type(hdr);
1152 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1153 onode = msg_orignode(hdr);
1154 type = msg_nametype(hdr);
1155
1156 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1157 spin_lock_bh(&inputq->lock);
1158 if (skb_peek(arrvq) == skb) {
1159 __skb_dequeue(arrvq);
1160 __skb_queue_tail(inputq, skb);
1161 }
1162 kfree_skb(skb);
1163 spin_unlock_bh(&inputq->lock);
1164 continue;
1165 }
1166
1167 /* Group messages require exact scope match */
1168 if (msg_in_group(hdr)) {
1169 lower = 0;
1170 upper = ~0;
1171 scope = msg_lookup_scope(hdr);
1172 exact = true;
1173 } else {
1174 /* TIPC_NODE_SCOPE means "any scope" in this context */
1175 if (onode == self)
1176 scope = TIPC_NODE_SCOPE;
1177 else
1178 scope = TIPC_CLUSTER_SCOPE;
1179 exact = false;
1180 lower = msg_namelower(hdr);
1181 upper = msg_nameupper(hdr);
1182 }
1183
1184 /* Create destination port list: */
1185 tipc_nametbl_mc_lookup(net, type, lower, upper,
1186 scope, exact, &dports);
1187
1188 /* Clone message per destination */
1189 while (tipc_dest_pop(&dports, NULL, &portid)) {
1190 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1191 if (_skb) {
1192 msg_set_destport(buf_msg(_skb), portid);
1193 __skb_queue_tail(&tmpq, _skb);
1194 continue;
1195 }
1196 pr_warn("Failed to clone mcast rcv buffer\n");
1197 }
1198 /* Append to inputq if not already done by other thread */
1199 spin_lock_bh(&inputq->lock);
1200 if (skb_peek(arrvq) == skb) {
1201 skb_queue_splice_tail_init(&tmpq, inputq);
1202 kfree_skb(__skb_dequeue(arrvq));
1203 }
1204 spin_unlock_bh(&inputq->lock);
1205 __skb_queue_purge(&tmpq);
1206 kfree_skb(skb);
1207 }
1208 tipc_sk_rcv(net, inputq);
1209}
1210
1211/**
1212 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1213 * @tsk: receiving socket
1214 * @skb: pointer to message buffer.
1215 */
1216static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1217 struct sk_buff_head *inputq,
1218 struct sk_buff_head *xmitq)
1219{
1220 struct tipc_msg *hdr = buf_msg(skb);
1221 u32 onode = tsk_own_node(tsk);
1222 struct sock *sk = &tsk->sk;
1223 int mtyp = msg_type(hdr);
1224 bool conn_cong;
1225
1226 /* Ignore if connection cannot be validated: */
1227 if (!tsk_peer_msg(tsk, hdr)) {
1228 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1229 goto exit;
1230 }
1231
1232 if (unlikely(msg_errcode(hdr))) {
1233 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1234 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1235 tsk_peer_port(tsk));
1236 sk->sk_state_change(sk);
1237
1238 /* State change is ignored if socket already awake,
1239 * - convert msg to abort msg and add to inqueue
1240 */
1241 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1242 msg_set_type(hdr, TIPC_CONN_MSG);
1243 msg_set_size(hdr, BASIC_H_SIZE);
1244 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1245 __skb_queue_tail(inputq, skb);
1246 return;
1247 }
1248
1249 tsk->probe_unacked = false;
1250
1251 if (mtyp == CONN_PROBE) {
1252 msg_set_type(hdr, CONN_PROBE_REPLY);
1253 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1254 __skb_queue_tail(xmitq, skb);
1255 return;
1256 } else if (mtyp == CONN_ACK) {
1257 conn_cong = tsk_conn_cong(tsk);
1258 tsk->snt_unacked -= msg_conn_ack(hdr);
1259 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1260 tsk->snd_win = msg_adv_win(hdr);
1261 if (conn_cong)
1262 sk->sk_write_space(sk);
1263 } else if (mtyp != CONN_PROBE_REPLY) {
1264 pr_warn("Received unknown CONN_PROTO msg\n");
1265 }
1266exit:
1267 kfree_skb(skb);
1268}
1269
1270/**
1271 * tipc_sendmsg - send message in connectionless manner
1272 * @sock: socket structure
1273 * @m: message to send
1274 * @dsz: amount of user data to be sent
1275 *
1276 * Message must have an destination specified explicitly.
1277 * Used for SOCK_RDM and SOCK_DGRAM messages,
1278 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1279 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1280 *
1281 * Returns the number of bytes sent on success, or errno otherwise
1282 */
1283static int tipc_sendmsg(struct socket *sock,
1284 struct msghdr *m, size_t dsz)
1285{
1286 struct sock *sk = sock->sk;
1287 int ret;
1288
1289 lock_sock(sk);
1290 ret = __tipc_sendmsg(sock, m, dsz);
1291 release_sock(sk);
1292
1293 return ret;
1294}
1295
1296static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1297{
1298 struct sock *sk = sock->sk;
1299 struct net *net = sock_net(sk);
1300 struct tipc_sock *tsk = tipc_sk(sk);
1301 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1302 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1303 struct list_head *clinks = &tsk->cong_links;
1304 bool syn = !tipc_sk_type_connectionless(sk);
1305 struct tipc_group *grp = tsk->group;
1306 struct tipc_msg *hdr = &tsk->phdr;
1307 struct tipc_name_seq *seq;
1308 struct sk_buff_head pkts;
1309 u32 dport, dnode = 0;
1310 u32 type, inst;
1311 int mtu, rc;
1312
1313 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1314 return -EMSGSIZE;
1315
1316 if (likely(dest)) {
1317 if (unlikely(m->msg_namelen < sizeof(*dest)))
1318 return -EINVAL;
1319 if (unlikely(dest->family != AF_TIPC))
1320 return -EINVAL;
1321 }
1322
1323 if (grp) {
1324 if (!dest)
1325 return tipc_send_group_bcast(sock, m, dlen, timeout);
1326 if (dest->addrtype == TIPC_ADDR_NAME)
1327 return tipc_send_group_anycast(sock, m, dlen, timeout);
1328 if (dest->addrtype == TIPC_ADDR_ID)
1329 return tipc_send_group_unicast(sock, m, dlen, timeout);
1330 if (dest->addrtype == TIPC_ADDR_MCAST)
1331 return tipc_send_group_mcast(sock, m, dlen, timeout);
1332 return -EINVAL;
1333 }
1334
1335 if (unlikely(!dest)) {
1336 dest = &tsk->peer;
1337 if (!syn && dest->family != AF_TIPC)
1338 return -EDESTADDRREQ;
1339 }
1340
1341 if (unlikely(syn)) {
1342 if (sk->sk_state == TIPC_LISTEN)
1343 return -EPIPE;
1344 if (sk->sk_state != TIPC_OPEN)
1345 return -EISCONN;
1346 if (tsk->published)
1347 return -EOPNOTSUPP;
1348 if (dest->addrtype == TIPC_ADDR_NAME) {
1349 tsk->conn_type = dest->addr.name.name.type;
1350 tsk->conn_instance = dest->addr.name.name.instance;
1351 }
1352 msg_set_syn(hdr, 1);
1353 }
1354
1355 seq = &dest->addr.nameseq;
1356 if (dest->addrtype == TIPC_ADDR_MCAST)
1357 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1358
1359 if (dest->addrtype == TIPC_ADDR_NAME) {
1360 type = dest->addr.name.name.type;
1361 inst = dest->addr.name.name.instance;
1362 dnode = dest->addr.name.domain;
1363 msg_set_type(hdr, TIPC_NAMED_MSG);
1364 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1365 msg_set_nametype(hdr, type);
1366 msg_set_nameinst(hdr, inst);
1367 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1368 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1369 msg_set_destnode(hdr, dnode);
1370 msg_set_destport(hdr, dport);
1371 if (unlikely(!dport && !dnode))
1372 return -EHOSTUNREACH;
1373 } else if (dest->addrtype == TIPC_ADDR_ID) {
1374 dnode = dest->addr.id.node;
1375 msg_set_type(hdr, TIPC_DIRECT_MSG);
1376 msg_set_lookup_scope(hdr, 0);
1377 msg_set_destnode(hdr, dnode);
1378 msg_set_destport(hdr, dest->addr.id.ref);
1379 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1380 } else {
1381 return -EINVAL;
1382 }
1383
1384 /* Block or return if destination link is congested */
1385 rc = tipc_wait_for_cond(sock, &timeout,
1386 !tipc_dest_find(clinks, dnode, 0));
1387 if (unlikely(rc))
1388 return rc;
1389
1390 __skb_queue_head_init(&pkts);
1391 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1392 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1393 if (unlikely(rc != dlen))
1394 return rc;
1395 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
1396 return -ENOMEM;
1397
1398 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1399 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1400 if (unlikely(rc == -ELINKCONG)) {
1401 tipc_dest_push(clinks, dnode, 0);
1402 tsk->cong_link_cnt++;
1403 rc = 0;
1404 }
1405
1406 if (unlikely(syn && !rc))
1407 tipc_set_sk_state(sk, TIPC_CONNECTING);
1408
1409 return rc ? rc : dlen;
1410}
1411
1412/**
1413 * tipc_sendstream - send stream-oriented data
1414 * @sock: socket structure
1415 * @m: data to send
1416 * @dsz: total length of data to be transmitted
1417 *
1418 * Used for SOCK_STREAM data.
1419 *
1420 * Returns the number of bytes sent on success (or partial success),
1421 * or errno if no data sent
1422 */
1423static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1424{
1425 struct sock *sk = sock->sk;
1426 int ret;
1427
1428 lock_sock(sk);
1429 ret = __tipc_sendstream(sock, m, dsz);
1430 release_sock(sk);
1431
1432 return ret;
1433}
1434
1435static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1436{
1437 struct sock *sk = sock->sk;
1438 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1439 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1440 struct tipc_sock *tsk = tipc_sk(sk);
1441 struct tipc_msg *hdr = &tsk->phdr;
1442 struct net *net = sock_net(sk);
1443 struct sk_buff_head pkts;
1444 u32 dnode = tsk_peer_node(tsk);
1445 int send, sent = 0;
1446 int rc = 0;
1447
1448 __skb_queue_head_init(&pkts);
1449
1450 if (unlikely(dlen > INT_MAX))
1451 return -EMSGSIZE;
1452
1453 /* Handle implicit connection setup */
1454 if (unlikely(dest)) {
1455 rc = __tipc_sendmsg(sock, m, dlen);
1456 if (dlen && dlen == rc) {
1457 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1458 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1459 }
1460 return rc;
1461 }
1462
1463 do {
1464 rc = tipc_wait_for_cond(sock, &timeout,
1465 (!tsk->cong_link_cnt &&
1466 !tsk_conn_cong(tsk) &&
1467 tipc_sk_connected(sk)));
1468 if (unlikely(rc))
1469 break;
1470
1471 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1472 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1473 if (unlikely(rc != send))
1474 break;
1475
1476 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1477 TIPC_DUMP_SK_SNDQ, " ");
1478 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1479 if (unlikely(rc == -ELINKCONG)) {
1480 tsk->cong_link_cnt = 1;
1481 rc = 0;
1482 }
1483 if (likely(!rc)) {
1484 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1485 sent += send;
1486 }
1487 } while (sent < dlen && !rc);
1488
1489 return sent ? sent : rc;
1490}
1491
1492/**
1493 * tipc_send_packet - send a connection-oriented message
1494 * @sock: socket structure
1495 * @m: message to send
1496 * @dsz: length of data to be transmitted
1497 *
1498 * Used for SOCK_SEQPACKET messages.
1499 *
1500 * Returns the number of bytes sent on success, or errno otherwise
1501 */
1502static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1503{
1504 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1505 return -EMSGSIZE;
1506
1507 return tipc_sendstream(sock, m, dsz);
1508}
1509
1510/* tipc_sk_finish_conn - complete the setup of a connection
1511 */
1512static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1513 u32 peer_node)
1514{
1515 struct sock *sk = &tsk->sk;
1516 struct net *net = sock_net(sk);
1517 struct tipc_msg *msg = &tsk->phdr;
1518
1519 msg_set_syn(msg, 0);
1520 msg_set_destnode(msg, peer_node);
1521 msg_set_destport(msg, peer_port);
1522 msg_set_type(msg, TIPC_CONN_MSG);
1523 msg_set_lookup_scope(msg, 0);
1524 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1525
1526 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1527 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1528 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1529 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1530 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1531 __skb_queue_purge(&sk->sk_write_queue);
1532 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1533 return;
1534
1535 /* Fall back to message based flow control */
1536 tsk->rcv_win = FLOWCTL_MSG_WIN;
1537 tsk->snd_win = FLOWCTL_MSG_WIN;
1538}
1539
1540/**
1541 * tipc_sk_set_orig_addr - capture sender's address for received message
1542 * @m: descriptor for message info
1543 * @hdr: received message header
1544 *
1545 * Note: Address is not captured if not requested by receiver.
1546 */
1547static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1548{
1549 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1550 struct tipc_msg *hdr = buf_msg(skb);
1551
1552 if (!srcaddr)
1553 return;
1554
1555 srcaddr->sock.family = AF_TIPC;
1556 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1557 srcaddr->sock.scope = 0;
1558 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1559 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1560 srcaddr->sock.addr.name.domain = 0;
1561 m->msg_namelen = sizeof(struct sockaddr_tipc);
1562
1563 if (!msg_in_group(hdr))
1564 return;
1565
1566 /* Group message users may also want to know sending member's id */
1567 srcaddr->member.family = AF_TIPC;
1568 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1569 srcaddr->member.scope = 0;
1570 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1571 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1572 srcaddr->member.addr.name.domain = 0;
1573 m->msg_namelen = sizeof(*srcaddr);
1574}
1575
1576/**
1577 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1578 * @m: descriptor for message info
1579 * @skb: received message buffer
1580 * @tsk: TIPC port associated with message
1581 *
1582 * Note: Ancillary data is not captured if not requested by receiver.
1583 *
1584 * Returns 0 if successful, otherwise errno
1585 */
1586static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1587 struct tipc_sock *tsk)
1588{
1589 struct tipc_msg *msg;
1590 u32 anc_data[3];
1591 u32 err;
1592 u32 dest_type;
1593 int has_name;
1594 int res;
1595
1596 if (likely(m->msg_controllen == 0))
1597 return 0;
1598 msg = buf_msg(skb);
1599
1600 /* Optionally capture errored message object(s) */
1601 err = msg ? msg_errcode(msg) : 0;
1602 if (unlikely(err)) {
1603 anc_data[0] = err;
1604 anc_data[1] = msg_data_sz(msg);
1605 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1606 if (res)
1607 return res;
1608 if (anc_data[1]) {
1609 if (skb_linearize(skb))
1610 return -ENOMEM;
1611 msg = buf_msg(skb);
1612 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1613 msg_data(msg));
1614 if (res)
1615 return res;
1616 }
1617 }
1618
1619 /* Optionally capture message destination object */
1620 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1621 switch (dest_type) {
1622 case TIPC_NAMED_MSG:
1623 has_name = 1;
1624 anc_data[0] = msg_nametype(msg);
1625 anc_data[1] = msg_namelower(msg);
1626 anc_data[2] = msg_namelower(msg);
1627 break;
1628 case TIPC_MCAST_MSG:
1629 has_name = 1;
1630 anc_data[0] = msg_nametype(msg);
1631 anc_data[1] = msg_namelower(msg);
1632 anc_data[2] = msg_nameupper(msg);
1633 break;
1634 case TIPC_CONN_MSG:
1635 has_name = (tsk->conn_type != 0);
1636 anc_data[0] = tsk->conn_type;
1637 anc_data[1] = tsk->conn_instance;
1638 anc_data[2] = tsk->conn_instance;
1639 break;
1640 default:
1641 has_name = 0;
1642 }
1643 if (has_name) {
1644 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1645 if (res)
1646 return res;
1647 }
1648
1649 return 0;
1650}
1651
1652static void tipc_sk_send_ack(struct tipc_sock *tsk)
1653{
1654 struct sock *sk = &tsk->sk;
1655 struct net *net = sock_net(sk);
1656 struct sk_buff *skb = NULL;
1657 struct tipc_msg *msg;
1658 u32 peer_port = tsk_peer_port(tsk);
1659 u32 dnode = tsk_peer_node(tsk);
1660
1661 if (!tipc_sk_connected(sk))
1662 return;
1663 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1664 dnode, tsk_own_node(tsk), peer_port,
1665 tsk->portid, TIPC_OK);
1666 if (!skb)
1667 return;
1668 msg = buf_msg(skb);
1669 msg_set_conn_ack(msg, tsk->rcv_unacked);
1670 tsk->rcv_unacked = 0;
1671
1672 /* Adjust to and advertize the correct window limit */
1673 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1674 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1675 msg_set_adv_win(msg, tsk->rcv_win);
1676 }
1677 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1678}
1679
1680static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1681{
1682 struct sock *sk = sock->sk;
1683 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1684 long timeo = *timeop;
1685 int err = sock_error(sk);
1686
1687 if (err)
1688 return err;
1689
1690 for (;;) {
1691 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1692 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1693 err = -ENOTCONN;
1694 break;
1695 }
1696 add_wait_queue(sk_sleep(sk), &wait);
1697 release_sock(sk);
1698 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1699 sched_annotate_sleep();
1700 lock_sock(sk);
1701 remove_wait_queue(sk_sleep(sk), &wait);
1702 }
1703 err = 0;
1704 if (!skb_queue_empty(&sk->sk_receive_queue))
1705 break;
1706 err = -EAGAIN;
1707 if (!timeo)
1708 break;
1709 err = sock_intr_errno(timeo);
1710 if (signal_pending(current))
1711 break;
1712
1713 err = sock_error(sk);
1714 if (err)
1715 break;
1716 }
1717 *timeop = timeo;
1718 return err;
1719}
1720
1721/**
1722 * tipc_recvmsg - receive packet-oriented message
1723 * @m: descriptor for message info
1724 * @buflen: length of user buffer area
1725 * @flags: receive flags
1726 *
1727 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1728 * If the complete message doesn't fit in user area, truncate it.
1729 *
1730 * Returns size of returned message data, errno otherwise
1731 */
1732static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1733 size_t buflen, int flags)
1734{
1735 struct sock *sk = sock->sk;
1736 bool connected = !tipc_sk_type_connectionless(sk);
1737 struct tipc_sock *tsk = tipc_sk(sk);
1738 int rc, err, hlen, dlen, copy;
1739 struct sk_buff_head xmitq;
1740 struct tipc_msg *hdr;
1741 struct sk_buff *skb;
1742 bool grp_evt;
1743 long timeout;
1744
1745 /* Catch invalid receive requests */
1746 if (unlikely(!buflen))
1747 return -EINVAL;
1748
1749 lock_sock(sk);
1750 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1751 rc = -ENOTCONN;
1752 goto exit;
1753 }
1754 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1755
1756 /* Step rcv queue to first msg with data or error; wait if necessary */
1757 do {
1758 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1759 if (unlikely(rc))
1760 goto exit;
1761 skb = skb_peek(&sk->sk_receive_queue);
1762 hdr = buf_msg(skb);
1763 dlen = msg_data_sz(hdr);
1764 hlen = msg_hdr_sz(hdr);
1765 err = msg_errcode(hdr);
1766 grp_evt = msg_is_grp_evt(hdr);
1767 if (likely(dlen || err))
1768 break;
1769 tsk_advance_rx_queue(sk);
1770 } while (1);
1771
1772 /* Collect msg meta data, including error code and rejected data */
1773 tipc_sk_set_orig_addr(m, skb);
1774 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1775 if (unlikely(rc))
1776 goto exit;
1777 hdr = buf_msg(skb);
1778
1779 /* Capture data if non-error msg, otherwise just set return value */
1780 if (likely(!err)) {
1781 copy = min_t(int, dlen, buflen);
1782 if (unlikely(copy != dlen))
1783 m->msg_flags |= MSG_TRUNC;
1784 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1785 } else {
1786 copy = 0;
1787 rc = 0;
1788 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1789 rc = -ECONNRESET;
1790 }
1791 if (unlikely(rc))
1792 goto exit;
1793
1794 /* Mark message as group event if applicable */
1795 if (unlikely(grp_evt)) {
1796 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1797 m->msg_flags |= MSG_EOR;
1798 m->msg_flags |= MSG_OOB;
1799 copy = 0;
1800 }
1801
1802 /* Caption of data or error code/rejected data was successful */
1803 if (unlikely(flags & MSG_PEEK))
1804 goto exit;
1805
1806 /* Send group flow control advertisement when applicable */
1807 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1808 __skb_queue_head_init(&xmitq);
1809 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1810 msg_orignode(hdr), msg_origport(hdr),
1811 &xmitq);
1812 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1813 }
1814
1815 tsk_advance_rx_queue(sk);
1816
1817 if (likely(!connected))
1818 goto exit;
1819
1820 /* Send connection flow control advertisement when applicable */
1821 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1822 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1823 tipc_sk_send_ack(tsk);
1824exit:
1825 release_sock(sk);
1826 return rc ? rc : copy;
1827}
1828
1829/**
1830 * tipc_recvstream - receive stream-oriented data
1831 * @m: descriptor for message info
1832 * @buflen: total size of user buffer area
1833 * @flags: receive flags
1834 *
1835 * Used for SOCK_STREAM messages only. If not enough data is available
1836 * will optionally wait for more; never truncates data.
1837 *
1838 * Returns size of returned message data, errno otherwise
1839 */
1840static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1841 size_t buflen, int flags)
1842{
1843 struct sock *sk = sock->sk;
1844 struct tipc_sock *tsk = tipc_sk(sk);
1845 struct sk_buff *skb;
1846 struct tipc_msg *hdr;
1847 struct tipc_skb_cb *skb_cb;
1848 bool peek = flags & MSG_PEEK;
1849 int offset, required, copy, copied = 0;
1850 int hlen, dlen, err, rc;
1851 long timeout;
1852
1853 /* Catch invalid receive attempts */
1854 if (unlikely(!buflen))
1855 return -EINVAL;
1856
1857 lock_sock(sk);
1858
1859 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1860 rc = -ENOTCONN;
1861 goto exit;
1862 }
1863 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1864 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1865
1866 do {
1867 /* Look at first msg in receive queue; wait if necessary */
1868 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1869 if (unlikely(rc))
1870 break;
1871 skb = skb_peek(&sk->sk_receive_queue);
1872 skb_cb = TIPC_SKB_CB(skb);
1873 hdr = buf_msg(skb);
1874 dlen = msg_data_sz(hdr);
1875 hlen = msg_hdr_sz(hdr);
1876 err = msg_errcode(hdr);
1877
1878 /* Discard any empty non-errored (SYN-) message */
1879 if (unlikely(!dlen && !err)) {
1880 tsk_advance_rx_queue(sk);
1881 continue;
1882 }
1883
1884 /* Collect msg meta data, incl. error code and rejected data */
1885 if (!copied) {
1886 tipc_sk_set_orig_addr(m, skb);
1887 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1888 if (rc)
1889 break;
1890 hdr = buf_msg(skb);
1891 }
1892
1893 /* Copy data if msg ok, otherwise return error/partial data */
1894 if (likely(!err)) {
1895 offset = skb_cb->bytes_read;
1896 copy = min_t(int, dlen - offset, buflen - copied);
1897 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1898 if (unlikely(rc))
1899 break;
1900 copied += copy;
1901 offset += copy;
1902 if (unlikely(offset < dlen)) {
1903 if (!peek)
1904 skb_cb->bytes_read = offset;
1905 break;
1906 }
1907 } else {
1908 rc = 0;
1909 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1910 rc = -ECONNRESET;
1911 if (copied || rc)
1912 break;
1913 }
1914
1915 if (unlikely(peek))
1916 break;
1917
1918 tsk_advance_rx_queue(sk);
1919
1920 /* Send connection flow control advertisement when applicable */
1921 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1922 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1923 tipc_sk_send_ack(tsk);
1924
1925 /* Exit if all requested data or FIN/error received */
1926 if (copied == buflen || err)
1927 break;
1928
1929 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1930exit:
1931 release_sock(sk);
1932 return copied ? copied : rc;
1933}
1934
1935/**
1936 * tipc_write_space - wake up thread if port congestion is released
1937 * @sk: socket
1938 */
1939static void tipc_write_space(struct sock *sk)
1940{
1941 struct socket_wq *wq;
1942
1943 rcu_read_lock();
1944 wq = rcu_dereference(sk->sk_wq);
1945 if (skwq_has_sleeper(wq))
1946 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1947 EPOLLWRNORM | EPOLLWRBAND);
1948 rcu_read_unlock();
1949}
1950
1951/**
1952 * tipc_data_ready - wake up threads to indicate messages have been received
1953 * @sk: socket
1954 * @len: the length of messages
1955 */
1956static void tipc_data_ready(struct sock *sk)
1957{
1958 struct socket_wq *wq;
1959
1960 rcu_read_lock();
1961 wq = rcu_dereference(sk->sk_wq);
1962 if (skwq_has_sleeper(wq))
1963 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1964 EPOLLRDNORM | EPOLLRDBAND);
1965 rcu_read_unlock();
1966}
1967
1968static void tipc_sock_destruct(struct sock *sk)
1969{
1970 __skb_queue_purge(&sk->sk_receive_queue);
1971}
1972
1973static void tipc_sk_proto_rcv(struct sock *sk,
1974 struct sk_buff_head *inputq,
1975 struct sk_buff_head *xmitq)
1976{
1977 struct sk_buff *skb = __skb_dequeue(inputq);
1978 struct tipc_sock *tsk = tipc_sk(sk);
1979 struct tipc_msg *hdr = buf_msg(skb);
1980 struct tipc_group *grp = tsk->group;
1981 bool wakeup = false;
1982
1983 switch (msg_user(hdr)) {
1984 case CONN_MANAGER:
1985 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1986 return;
1987 case SOCK_WAKEUP:
1988 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1989 /* coupled with smp_rmb() in tipc_wait_for_cond() */
1990 smp_wmb();
1991 tsk->cong_link_cnt--;
1992 wakeup = true;
1993 break;
1994 case GROUP_PROTOCOL:
1995 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1996 break;
1997 case TOP_SRV:
1998 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1999 hdr, inputq, xmitq);
2000 break;
2001 default:
2002 break;
2003 }
2004
2005 if (wakeup)
2006 sk->sk_write_space(sk);
2007
2008 kfree_skb(skb);
2009}
2010
2011/**
2012 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2013 * @tsk: TIPC socket
2014 * @skb: pointer to message buffer.
2015 * Returns true if message should be added to receive queue, false otherwise
2016 */
2017static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
2018{
2019 struct sock *sk = &tsk->sk;
2020 struct net *net = sock_net(sk);
2021 struct tipc_msg *hdr = buf_msg(skb);
2022 bool con_msg = msg_connected(hdr);
2023 u32 pport = tsk_peer_port(tsk);
2024 u32 pnode = tsk_peer_node(tsk);
2025 u32 oport = msg_origport(hdr);
2026 u32 onode = msg_orignode(hdr);
2027 int err = msg_errcode(hdr);
2028 unsigned long delay;
2029
2030 if (unlikely(msg_mcast(hdr)))
2031 return false;
2032
2033 switch (sk->sk_state) {
2034 case TIPC_CONNECTING:
2035 /* Setup ACK */
2036 if (likely(con_msg)) {
2037 if (err)
2038 break;
2039 tipc_sk_finish_conn(tsk, oport, onode);
2040 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2041 /* ACK+ message with data is added to receive queue */
2042 if (msg_data_sz(hdr))
2043 return true;
2044 /* Empty ACK-, - wake up sleeping connect() and drop */
2045 sk->sk_state_change(sk);
2046 msg_set_dest_droppable(hdr, 1);
2047 return false;
2048 }
2049 /* Ignore connectionless message if not from listening socket */
2050 if (oport != pport || onode != pnode)
2051 return false;
2052
2053 /* Rejected SYN */
2054 if (err != TIPC_ERR_OVERLOAD)
2055 break;
2056
2057 /* Prepare for new setup attempt if we have a SYN clone */
2058 if (skb_queue_empty(&sk->sk_write_queue))
2059 break;
2060 get_random_bytes(&delay, 2);
2061 delay %= (tsk->conn_timeout / 4);
2062 delay = msecs_to_jiffies(delay + 100);
2063 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2064 return false;
2065 case TIPC_OPEN:
2066 case TIPC_DISCONNECTING:
2067 return false;
2068 case TIPC_LISTEN:
2069 /* Accept only SYN message */
2070 if (!msg_is_syn(hdr) &&
2071 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2072 return false;
2073 if (!con_msg && !err)
2074 return true;
2075 return false;
2076 case TIPC_ESTABLISHED:
2077 /* Accept only connection-based messages sent by peer */
2078 if (likely(con_msg && !err && pport == oport && pnode == onode))
2079 return true;
2080 if (!tsk_peer_msg(tsk, hdr))
2081 return false;
2082 if (!err)
2083 return true;
2084 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2085 tipc_node_remove_conn(net, pnode, tsk->portid);
2086 sk->sk_state_change(sk);
2087 return true;
2088 default:
2089 pr_err("Unknown sk_state %u\n", sk->sk_state);
2090 }
2091 /* Abort connection setup attempt */
2092 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2093 sk->sk_err = ECONNREFUSED;
2094 sk->sk_state_change(sk);
2095 return true;
2096}
2097
2098/**
2099 * rcvbuf_limit - get proper overload limit of socket receive queue
2100 * @sk: socket
2101 * @skb: message
2102 *
2103 * For connection oriented messages, irrespective of importance,
2104 * default queue limit is 2 MB.
2105 *
2106 * For connectionless messages, queue limits are based on message
2107 * importance as follows:
2108 *
2109 * TIPC_LOW_IMPORTANCE (2 MB)
2110 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2111 * TIPC_HIGH_IMPORTANCE (8 MB)
2112 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2113 *
2114 * Returns overload limit according to corresponding message importance
2115 */
2116static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2117{
2118 struct tipc_sock *tsk = tipc_sk(sk);
2119 struct tipc_msg *hdr = buf_msg(skb);
2120
2121 if (unlikely(msg_in_group(hdr)))
2122 return READ_ONCE(sk->sk_rcvbuf);
2123
2124 if (unlikely(!msg_connected(hdr)))
2125 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2126
2127 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2128 return READ_ONCE(sk->sk_rcvbuf);
2129
2130 return FLOWCTL_MSG_LIM;
2131}
2132
2133/**
2134 * tipc_sk_filter_rcv - validate incoming message
2135 * @sk: socket
2136 * @skb: pointer to message.
2137 *
2138 * Enqueues message on receive queue if acceptable; optionally handles
2139 * disconnect indication for a connected socket.
2140 *
2141 * Called with socket lock already taken
2142 *
2143 */
2144static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2145 struct sk_buff_head *xmitq)
2146{
2147 bool sk_conn = !tipc_sk_type_connectionless(sk);
2148 struct tipc_sock *tsk = tipc_sk(sk);
2149 struct tipc_group *grp = tsk->group;
2150 struct tipc_msg *hdr = buf_msg(skb);
2151 struct net *net = sock_net(sk);
2152 struct sk_buff_head inputq;
2153 int mtyp = msg_type(hdr);
2154 int limit, err = TIPC_OK;
2155
2156 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2157 TIPC_SKB_CB(skb)->bytes_read = 0;
2158 __skb_queue_head_init(&inputq);
2159 __skb_queue_tail(&inputq, skb);
2160
2161 if (unlikely(!msg_isdata(hdr)))
2162 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2163
2164 if (unlikely(grp))
2165 tipc_group_filter_msg(grp, &inputq, xmitq);
2166
2167 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2168 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2169
2170 /* Validate and add to receive buffer if there is space */
2171 while ((skb = __skb_dequeue(&inputq))) {
2172 hdr = buf_msg(skb);
2173 limit = rcvbuf_limit(sk, skb);
2174 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2175 (!sk_conn && msg_connected(hdr)) ||
2176 (!grp && msg_in_group(hdr)))
2177 err = TIPC_ERR_NO_PORT;
2178 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2179 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2180 "err_overload2!");
2181 atomic_inc(&sk->sk_drops);
2182 err = TIPC_ERR_OVERLOAD;
2183 }
2184
2185 if (unlikely(err)) {
2186 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2187 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2188 "@filter_rcv!");
2189 __skb_queue_tail(xmitq, skb);
2190 }
2191 err = TIPC_OK;
2192 continue;
2193 }
2194 __skb_queue_tail(&sk->sk_receive_queue, skb);
2195 skb_set_owner_r(skb, sk);
2196 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2197 "rcvq >90% allocated!");
2198 sk->sk_data_ready(sk);
2199 }
2200}
2201
2202/**
2203 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2204 * @sk: socket
2205 * @skb: message
2206 *
2207 * Caller must hold socket lock
2208 */
2209static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2210{
2211 unsigned int before = sk_rmem_alloc_get(sk);
2212 struct sk_buff_head xmitq;
2213 unsigned int added;
2214
2215 __skb_queue_head_init(&xmitq);
2216
2217 tipc_sk_filter_rcv(sk, skb, &xmitq);
2218 added = sk_rmem_alloc_get(sk) - before;
2219 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2220
2221 /* Send pending response/rejected messages, if any */
2222 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2223 return 0;
2224}
2225
2226/**
2227 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2228 * inputq and try adding them to socket or backlog queue
2229 * @inputq: list of incoming buffers with potentially different destinations
2230 * @sk: socket where the buffers should be enqueued
2231 * @dport: port number for the socket
2232 *
2233 * Caller must hold socket lock
2234 */
2235static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2236 u32 dport, struct sk_buff_head *xmitq)
2237{
2238 unsigned long time_limit = jiffies + 2;
2239 struct sk_buff *skb;
2240 unsigned int lim;
2241 atomic_t *dcnt;
2242 u32 onode;
2243
2244 while (skb_queue_len(inputq)) {
2245 if (unlikely(time_after_eq(jiffies, time_limit)))
2246 return;
2247
2248 skb = tipc_skb_dequeue(inputq, dport);
2249 if (unlikely(!skb))
2250 return;
2251
2252 /* Add message directly to receive queue if possible */
2253 if (!sock_owned_by_user(sk)) {
2254 tipc_sk_filter_rcv(sk, skb, xmitq);
2255 continue;
2256 }
2257
2258 /* Try backlog, compensating for double-counted bytes */
2259 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2260 if (!sk->sk_backlog.len)
2261 atomic_set(dcnt, 0);
2262 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2263 if (likely(!sk_add_backlog(sk, skb, lim))) {
2264 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2265 "bklg & rcvq >90% allocated!");
2266 continue;
2267 }
2268
2269 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2270 /* Overload => reject message back to sender */
2271 onode = tipc_own_addr(sock_net(sk));
2272 atomic_inc(&sk->sk_drops);
2273 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2274 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2275 "@sk_enqueue!");
2276 __skb_queue_tail(xmitq, skb);
2277 }
2278 break;
2279 }
2280}
2281
2282/**
2283 * tipc_sk_rcv - handle a chain of incoming buffers
2284 * @inputq: buffer list containing the buffers
2285 * Consumes all buffers in list until inputq is empty
2286 * Note: may be called in multiple threads referring to the same queue
2287 */
2288void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2289{
2290 struct sk_buff_head xmitq;
2291 u32 dnode, dport = 0;
2292 int err;
2293 struct tipc_sock *tsk;
2294 struct sock *sk;
2295 struct sk_buff *skb;
2296
2297 __skb_queue_head_init(&xmitq);
2298 while (skb_queue_len(inputq)) {
2299 dport = tipc_skb_peek_port(inputq, dport);
2300 tsk = tipc_sk_lookup(net, dport);
2301
2302 if (likely(tsk)) {
2303 sk = &tsk->sk;
2304 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2305 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2306 spin_unlock_bh(&sk->sk_lock.slock);
2307 }
2308 /* Send pending response/rejected messages, if any */
2309 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2310 sock_put(sk);
2311 continue;
2312 }
2313 /* No destination socket => dequeue skb if still there */
2314 skb = tipc_skb_dequeue(inputq, dport);
2315 if (!skb)
2316 return;
2317
2318 /* Try secondary lookup if unresolved named message */
2319 err = TIPC_ERR_NO_PORT;
2320 if (tipc_msg_lookup_dest(net, skb, &err))
2321 goto xmit;
2322
2323 /* Prepare for message rejection */
2324 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2325 continue;
2326
2327 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2328xmit:
2329 dnode = msg_destnode(buf_msg(skb));
2330 tipc_node_xmit_skb(net, skb, dnode, dport);
2331 }
2332}
2333
2334static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2335{
2336 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2337 struct sock *sk = sock->sk;
2338 int done;
2339
2340 do {
2341 int err = sock_error(sk);
2342 if (err)
2343 return err;
2344 if (!*timeo_p)
2345 return -ETIMEDOUT;
2346 if (signal_pending(current))
2347 return sock_intr_errno(*timeo_p);
2348
2349 add_wait_queue(sk_sleep(sk), &wait);
2350 done = sk_wait_event(sk, timeo_p,
2351 sk->sk_state != TIPC_CONNECTING, &wait);
2352 remove_wait_queue(sk_sleep(sk), &wait);
2353 } while (!done);
2354 return 0;
2355}
2356
2357static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2358{
2359 if (addr->family != AF_TIPC)
2360 return false;
2361 if (addr->addrtype == TIPC_SERVICE_RANGE)
2362 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2363 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2364 addr->addrtype == TIPC_SOCKET_ADDR);
2365}
2366
2367/**
2368 * tipc_connect - establish a connection to another TIPC port
2369 * @sock: socket structure
2370 * @dest: socket address for destination port
2371 * @destlen: size of socket address data structure
2372 * @flags: file-related flags associated with socket
2373 *
2374 * Returns 0 on success, errno otherwise
2375 */
2376static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2377 int destlen, int flags)
2378{
2379 struct sock *sk = sock->sk;
2380 struct tipc_sock *tsk = tipc_sk(sk);
2381 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2382 struct msghdr m = {NULL,};
2383 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2384 int previous;
2385 int res = 0;
2386
2387 if (destlen != sizeof(struct sockaddr_tipc))
2388 return -EINVAL;
2389
2390 lock_sock(sk);
2391
2392 if (tsk->group) {
2393 res = -EINVAL;
2394 goto exit;
2395 }
2396
2397 if (dst->family == AF_UNSPEC) {
2398 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2399 if (!tipc_sk_type_connectionless(sk))
2400 res = -EINVAL;
2401 goto exit;
2402 }
2403 if (!tipc_sockaddr_is_sane(dst)) {
2404 res = -EINVAL;
2405 goto exit;
2406 }
2407 /* DGRAM/RDM connect(), just save the destaddr */
2408 if (tipc_sk_type_connectionless(sk)) {
2409 memcpy(&tsk->peer, dest, destlen);
2410 goto exit;
2411 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2412 res = -EINVAL;
2413 goto exit;
2414 }
2415
2416 previous = sk->sk_state;
2417
2418 switch (sk->sk_state) {
2419 case TIPC_OPEN:
2420 /* Send a 'SYN-' to destination */
2421 m.msg_name = dest;
2422 m.msg_namelen = destlen;
2423
2424 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2425 * indicate send_msg() is never blocked.
2426 */
2427 if (!timeout)
2428 m.msg_flags = MSG_DONTWAIT;
2429
2430 res = __tipc_sendmsg(sock, &m, 0);
2431 if ((res < 0) && (res != -EWOULDBLOCK))
2432 goto exit;
2433
2434 /* Just entered TIPC_CONNECTING state; the only
2435 * difference is that return value in non-blocking
2436 * case is EINPROGRESS, rather than EALREADY.
2437 */
2438 res = -EINPROGRESS;
2439 /* fall through */
2440 case TIPC_CONNECTING:
2441 if (!timeout) {
2442 if (previous == TIPC_CONNECTING)
2443 res = -EALREADY;
2444 goto exit;
2445 }
2446 timeout = msecs_to_jiffies(timeout);
2447 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2448 res = tipc_wait_for_connect(sock, &timeout);
2449 break;
2450 case TIPC_ESTABLISHED:
2451 res = -EISCONN;
2452 break;
2453 default:
2454 res = -EINVAL;
2455 }
2456
2457exit:
2458 release_sock(sk);
2459 return res;
2460}
2461
2462/**
2463 * tipc_listen - allow socket to listen for incoming connections
2464 * @sock: socket structure
2465 * @len: (unused)
2466 *
2467 * Returns 0 on success, errno otherwise
2468 */
2469static int tipc_listen(struct socket *sock, int len)
2470{
2471 struct sock *sk = sock->sk;
2472 int res;
2473
2474 lock_sock(sk);
2475 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2476 release_sock(sk);
2477
2478 return res;
2479}
2480
2481static int tipc_wait_for_accept(struct socket *sock, long timeo)
2482{
2483 struct sock *sk = sock->sk;
2484 DEFINE_WAIT(wait);
2485 int err;
2486
2487 /* True wake-one mechanism for incoming connections: only
2488 * one process gets woken up, not the 'whole herd'.
2489 * Since we do not 'race & poll' for established sockets
2490 * anymore, the common case will execute the loop only once.
2491 */
2492 for (;;) {
2493 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2494 TASK_INTERRUPTIBLE);
2495 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2496 release_sock(sk);
2497 timeo = schedule_timeout(timeo);
2498 lock_sock(sk);
2499 }
2500 err = 0;
2501 if (!skb_queue_empty(&sk->sk_receive_queue))
2502 break;
2503 err = -EAGAIN;
2504 if (!timeo)
2505 break;
2506 err = sock_intr_errno(timeo);
2507 if (signal_pending(current))
2508 break;
2509 }
2510 finish_wait(sk_sleep(sk), &wait);
2511 return err;
2512}
2513
2514/**
2515 * tipc_accept - wait for connection request
2516 * @sock: listening socket
2517 * @newsock: new socket that is to be connected
2518 * @flags: file-related flags associated with socket
2519 *
2520 * Returns 0 on success, errno otherwise
2521 */
2522static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2523 bool kern)
2524{
2525 struct sock *new_sk, *sk = sock->sk;
2526 struct sk_buff *buf;
2527 struct tipc_sock *new_tsock;
2528 struct tipc_msg *msg;
2529 long timeo;
2530 int res;
2531
2532 lock_sock(sk);
2533
2534 if (sk->sk_state != TIPC_LISTEN) {
2535 res = -EINVAL;
2536 goto exit;
2537 }
2538 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2539 res = tipc_wait_for_accept(sock, timeo);
2540 if (res)
2541 goto exit;
2542
2543 buf = skb_peek(&sk->sk_receive_queue);
2544
2545 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2546 if (res)
2547 goto exit;
2548 security_sk_clone(sock->sk, new_sock->sk);
2549
2550 new_sk = new_sock->sk;
2551 new_tsock = tipc_sk(new_sk);
2552 msg = buf_msg(buf);
2553
2554 /* we lock on new_sk; but lockdep sees the lock on sk */
2555 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2556
2557 /*
2558 * Reject any stray messages received by new socket
2559 * before the socket lock was taken (very, very unlikely)
2560 */
2561 tsk_rej_rx_queue(new_sk);
2562
2563 /* Connect new socket to it's peer */
2564 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2565
2566 tsk_set_importance(new_tsock, msg_importance(msg));
2567 if (msg_named(msg)) {
2568 new_tsock->conn_type = msg_nametype(msg);
2569 new_tsock->conn_instance = msg_nameinst(msg);
2570 }
2571
2572 /*
2573 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2574 * Respond to 'SYN+' by queuing it on new socket.
2575 */
2576 if (!msg_data_sz(msg)) {
2577 struct msghdr m = {NULL,};
2578
2579 tsk_advance_rx_queue(sk);
2580 __tipc_sendstream(new_sock, &m, 0);
2581 } else {
2582 __skb_dequeue(&sk->sk_receive_queue);
2583 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2584 skb_set_owner_r(buf, new_sk);
2585 }
2586 release_sock(new_sk);
2587exit:
2588 release_sock(sk);
2589 return res;
2590}
2591
2592/**
2593 * tipc_shutdown - shutdown socket connection
2594 * @sock: socket structure
2595 * @how: direction to close (must be SHUT_RDWR)
2596 *
2597 * Terminates connection (if necessary), then purges socket's receive queue.
2598 *
2599 * Returns 0 on success, errno otherwise
2600 */
2601static int tipc_shutdown(struct socket *sock, int how)
2602{
2603 struct sock *sk = sock->sk;
2604 int res;
2605
2606 if (how != SHUT_RDWR)
2607 return -EINVAL;
2608
2609 lock_sock(sk);
2610
2611 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2612 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2613 sk->sk_shutdown = SEND_SHUTDOWN;
2614
2615 if (sk->sk_state == TIPC_DISCONNECTING) {
2616 /* Discard any unreceived messages */
2617 __skb_queue_purge(&sk->sk_receive_queue);
2618
2619 /* Wake up anyone sleeping in poll */
2620 sk->sk_state_change(sk);
2621 res = 0;
2622 } else {
2623 res = -ENOTCONN;
2624 }
2625
2626 release_sock(sk);
2627 return res;
2628}
2629
2630static void tipc_sk_check_probing_state(struct sock *sk,
2631 struct sk_buff_head *list)
2632{
2633 struct tipc_sock *tsk = tipc_sk(sk);
2634 u32 pnode = tsk_peer_node(tsk);
2635 u32 pport = tsk_peer_port(tsk);
2636 u32 self = tsk_own_node(tsk);
2637 u32 oport = tsk->portid;
2638 struct sk_buff *skb;
2639
2640 if (tsk->probe_unacked) {
2641 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2642 sk->sk_err = ECONNABORTED;
2643 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2644 sk->sk_state_change(sk);
2645 return;
2646 }
2647 /* Prepare new probe */
2648 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2649 pnode, self, pport, oport, TIPC_OK);
2650 if (skb)
2651 __skb_queue_tail(list, skb);
2652 tsk->probe_unacked = true;
2653 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2654}
2655
2656static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2657{
2658 struct tipc_sock *tsk = tipc_sk(sk);
2659
2660 /* Try again later if dest link is congested */
2661 if (tsk->cong_link_cnt) {
2662 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2663 return;
2664 }
2665 /* Prepare SYN for retransmit */
2666 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2667}
2668
2669static void tipc_sk_timeout(struct timer_list *t)
2670{
2671 struct sock *sk = from_timer(sk, t, sk_timer);
2672 struct tipc_sock *tsk = tipc_sk(sk);
2673 u32 pnode = tsk_peer_node(tsk);
2674 struct sk_buff_head list;
2675 int rc = 0;
2676
2677 __skb_queue_head_init(&list);
2678 bh_lock_sock(sk);
2679
2680 /* Try again later if socket is busy */
2681 if (sock_owned_by_user(sk)) {
2682 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2683 bh_unlock_sock(sk);
2684 return;
2685 }
2686
2687 if (sk->sk_state == TIPC_ESTABLISHED)
2688 tipc_sk_check_probing_state(sk, &list);
2689 else if (sk->sk_state == TIPC_CONNECTING)
2690 tipc_sk_retry_connect(sk, &list);
2691
2692 bh_unlock_sock(sk);
2693
2694 if (!skb_queue_empty(&list))
2695 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2696
2697 /* SYN messages may cause link congestion */
2698 if (rc == -ELINKCONG) {
2699 tipc_dest_push(&tsk->cong_links, pnode, 0);
2700 tsk->cong_link_cnt = 1;
2701 }
2702 sock_put(sk);
2703}
2704
2705static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2706 struct tipc_name_seq const *seq)
2707{
2708 struct sock *sk = &tsk->sk;
2709 struct net *net = sock_net(sk);
2710 struct publication *publ;
2711 u32 key;
2712
2713 if (scope != TIPC_NODE_SCOPE)
2714 scope = TIPC_CLUSTER_SCOPE;
2715
2716 if (tipc_sk_connected(sk))
2717 return -EINVAL;
2718 key = tsk->portid + tsk->pub_count + 1;
2719 if (key == tsk->portid)
2720 return -EADDRINUSE;
2721
2722 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2723 scope, tsk->portid, key);
2724 if (unlikely(!publ))
2725 return -EINVAL;
2726
2727 list_add(&publ->binding_sock, &tsk->publications);
2728 tsk->pub_count++;
2729 tsk->published = 1;
2730 return 0;
2731}
2732
2733static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2734 struct tipc_name_seq const *seq)
2735{
2736 struct net *net = sock_net(&tsk->sk);
2737 struct publication *publ;
2738 struct publication *safe;
2739 int rc = -EINVAL;
2740
2741 if (scope != TIPC_NODE_SCOPE)
2742 scope = TIPC_CLUSTER_SCOPE;
2743
2744 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2745 if (seq) {
2746 if (publ->scope != scope)
2747 continue;
2748 if (publ->type != seq->type)
2749 continue;
2750 if (publ->lower != seq->lower)
2751 continue;
2752 if (publ->upper != seq->upper)
2753 break;
2754 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2755 publ->upper, publ->key);
2756 rc = 0;
2757 break;
2758 }
2759 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2760 publ->upper, publ->key);
2761 rc = 0;
2762 }
2763 if (list_empty(&tsk->publications))
2764 tsk->published = 0;
2765 return rc;
2766}
2767
2768/* tipc_sk_reinit: set non-zero address in all existing sockets
2769 * when we go from standalone to network mode.
2770 */
2771void tipc_sk_reinit(struct net *net)
2772{
2773 struct tipc_net *tn = net_generic(net, tipc_net_id);
2774 struct rhashtable_iter iter;
2775 struct tipc_sock *tsk;
2776 struct tipc_msg *msg;
2777
2778 rhashtable_walk_enter(&tn->sk_rht, &iter);
2779
2780 do {
2781 rhashtable_walk_start(&iter);
2782
2783 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2784 sock_hold(&tsk->sk);
2785 rhashtable_walk_stop(&iter);
2786 lock_sock(&tsk->sk);
2787 msg = &tsk->phdr;
2788 msg_set_prevnode(msg, tipc_own_addr(net));
2789 msg_set_orignode(msg, tipc_own_addr(net));
2790 release_sock(&tsk->sk);
2791 rhashtable_walk_start(&iter);
2792 sock_put(&tsk->sk);
2793 }
2794
2795 rhashtable_walk_stop(&iter);
2796 } while (tsk == ERR_PTR(-EAGAIN));
2797
2798 rhashtable_walk_exit(&iter);
2799}
2800
2801static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2802{
2803 struct tipc_net *tn = net_generic(net, tipc_net_id);
2804 struct tipc_sock *tsk;
2805
2806 rcu_read_lock();
2807 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2808 if (tsk)
2809 sock_hold(&tsk->sk);
2810 rcu_read_unlock();
2811
2812 return tsk;
2813}
2814
2815static int tipc_sk_insert(struct tipc_sock *tsk)
2816{
2817 struct sock *sk = &tsk->sk;
2818 struct net *net = sock_net(sk);
2819 struct tipc_net *tn = net_generic(net, tipc_net_id);
2820 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2821 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2822
2823 while (remaining--) {
2824 portid++;
2825 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2826 portid = TIPC_MIN_PORT;
2827 tsk->portid = portid;
2828 sock_hold(&tsk->sk);
2829 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2830 tsk_rht_params))
2831 return 0;
2832 sock_put(&tsk->sk);
2833 }
2834
2835 return -1;
2836}
2837
2838static void tipc_sk_remove(struct tipc_sock *tsk)
2839{
2840 struct sock *sk = &tsk->sk;
2841 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2842
2843 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2844 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2845 __sock_put(sk);
2846 }
2847}
2848
2849static const struct rhashtable_params tsk_rht_params = {
2850 .nelem_hint = 192,
2851 .head_offset = offsetof(struct tipc_sock, node),
2852 .key_offset = offsetof(struct tipc_sock, portid),
2853 .key_len = sizeof(u32), /* portid */
2854 .max_size = 1048576,
2855 .min_size = 256,
2856 .automatic_shrinking = true,
2857};
2858
2859int tipc_sk_rht_init(struct net *net)
2860{
2861 struct tipc_net *tn = net_generic(net, tipc_net_id);
2862
2863 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2864}
2865
2866void tipc_sk_rht_destroy(struct net *net)
2867{
2868 struct tipc_net *tn = net_generic(net, tipc_net_id);
2869
2870 /* Wait for socket readers to complete */
2871 synchronize_net();
2872
2873 rhashtable_destroy(&tn->sk_rht);
2874}
2875
2876static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2877{
2878 struct net *net = sock_net(&tsk->sk);
2879 struct tipc_group *grp = tsk->group;
2880 struct tipc_msg *hdr = &tsk->phdr;
2881 struct tipc_name_seq seq;
2882 int rc;
2883
2884 if (mreq->type < TIPC_RESERVED_TYPES)
2885 return -EACCES;
2886 if (mreq->scope > TIPC_NODE_SCOPE)
2887 return -EINVAL;
2888 if (grp)
2889 return -EACCES;
2890 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2891 if (!grp)
2892 return -ENOMEM;
2893 tsk->group = grp;
2894 msg_set_lookup_scope(hdr, mreq->scope);
2895 msg_set_nametype(hdr, mreq->type);
2896 msg_set_dest_droppable(hdr, true);
2897 seq.type = mreq->type;
2898 seq.lower = mreq->instance;
2899 seq.upper = seq.lower;
2900 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2901 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2902 if (rc) {
2903 tipc_group_delete(net, grp);
2904 tsk->group = NULL;
2905 return rc;
2906 }
2907 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2908 tsk->mc_method.rcast = true;
2909 tsk->mc_method.mandatory = true;
2910 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2911 return rc;
2912}
2913
2914static int tipc_sk_leave(struct tipc_sock *tsk)
2915{
2916 struct net *net = sock_net(&tsk->sk);
2917 struct tipc_group *grp = tsk->group;
2918 struct tipc_name_seq seq;
2919 int scope;
2920
2921 if (!grp)
2922 return -EINVAL;
2923 tipc_group_self(grp, &seq, &scope);
2924 tipc_group_delete(net, grp);
2925 tsk->group = NULL;
2926 tipc_sk_withdraw(tsk, scope, &seq);
2927 return 0;
2928}
2929
2930/**
2931 * tipc_setsockopt - set socket option
2932 * @sock: socket structure
2933 * @lvl: option level
2934 * @opt: option identifier
2935 * @ov: pointer to new option value
2936 * @ol: length of option value
2937 *
2938 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2939 * (to ease compatibility).
2940 *
2941 * Returns 0 on success, errno otherwise
2942 */
2943static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2944 char __user *ov, unsigned int ol)
2945{
2946 struct sock *sk = sock->sk;
2947 struct tipc_sock *tsk = tipc_sk(sk);
2948 struct tipc_group_req mreq;
2949 u32 value = 0;
2950 int res = 0;
2951
2952 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2953 return 0;
2954 if (lvl != SOL_TIPC)
2955 return -ENOPROTOOPT;
2956
2957 switch (opt) {
2958 case TIPC_IMPORTANCE:
2959 case TIPC_SRC_DROPPABLE:
2960 case TIPC_DEST_DROPPABLE:
2961 case TIPC_CONN_TIMEOUT:
2962 if (ol < sizeof(value))
2963 return -EINVAL;
2964 if (get_user(value, (u32 __user *)ov))
2965 return -EFAULT;
2966 break;
2967 case TIPC_GROUP_JOIN:
2968 if (ol < sizeof(mreq))
2969 return -EINVAL;
2970 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2971 return -EFAULT;
2972 break;
2973 default:
2974 if (ov || ol)
2975 return -EINVAL;
2976 }
2977
2978 lock_sock(sk);
2979
2980 switch (opt) {
2981 case TIPC_IMPORTANCE:
2982 res = tsk_set_importance(tsk, value);
2983 break;
2984 case TIPC_SRC_DROPPABLE:
2985 if (sock->type != SOCK_STREAM)
2986 tsk_set_unreliable(tsk, value);
2987 else
2988 res = -ENOPROTOOPT;
2989 break;
2990 case TIPC_DEST_DROPPABLE:
2991 tsk_set_unreturnable(tsk, value);
2992 break;
2993 case TIPC_CONN_TIMEOUT:
2994 tipc_sk(sk)->conn_timeout = value;
2995 break;
2996 case TIPC_MCAST_BROADCAST:
2997 tsk->mc_method.rcast = false;
2998 tsk->mc_method.mandatory = true;
2999 break;
3000 case TIPC_MCAST_REPLICAST:
3001 tsk->mc_method.rcast = true;
3002 tsk->mc_method.mandatory = true;
3003 break;
3004 case TIPC_GROUP_JOIN:
3005 res = tipc_sk_join(tsk, &mreq);
3006 break;
3007 case TIPC_GROUP_LEAVE:
3008 res = tipc_sk_leave(tsk);
3009 break;
3010 default:
3011 res = -EINVAL;
3012 }
3013
3014 release_sock(sk);
3015
3016 return res;
3017}
3018
3019/**
3020 * tipc_getsockopt - get socket option
3021 * @sock: socket structure
3022 * @lvl: option level
3023 * @opt: option identifier
3024 * @ov: receptacle for option value
3025 * @ol: receptacle for length of option value
3026 *
3027 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3028 * (to ease compatibility).
3029 *
3030 * Returns 0 on success, errno otherwise
3031 */
3032static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3033 char __user *ov, int __user *ol)
3034{
3035 struct sock *sk = sock->sk;
3036 struct tipc_sock *tsk = tipc_sk(sk);
3037 struct tipc_name_seq seq;
3038 int len, scope;
3039 u32 value;
3040 int res;
3041
3042 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3043 return put_user(0, ol);
3044 if (lvl != SOL_TIPC)
3045 return -ENOPROTOOPT;
3046 res = get_user(len, ol);
3047 if (res)
3048 return res;
3049
3050 lock_sock(sk);
3051
3052 switch (opt) {
3053 case TIPC_IMPORTANCE:
3054 value = tsk_importance(tsk);
3055 break;
3056 case TIPC_SRC_DROPPABLE:
3057 value = tsk_unreliable(tsk);
3058 break;
3059 case TIPC_DEST_DROPPABLE:
3060 value = tsk_unreturnable(tsk);
3061 break;
3062 case TIPC_CONN_TIMEOUT:
3063 value = tsk->conn_timeout;
3064 /* no need to set "res", since already 0 at this point */
3065 break;
3066 case TIPC_NODE_RECVQ_DEPTH:
3067 value = 0; /* was tipc_queue_size, now obsolete */
3068 break;
3069 case TIPC_SOCK_RECVQ_DEPTH:
3070 value = skb_queue_len(&sk->sk_receive_queue);
3071 break;
3072 case TIPC_SOCK_RECVQ_USED:
3073 value = sk_rmem_alloc_get(sk);
3074 break;
3075 case TIPC_GROUP_JOIN:
3076 seq.type = 0;
3077 if (tsk->group)
3078 tipc_group_self(tsk->group, &seq, &scope);
3079 value = seq.type;
3080 break;
3081 default:
3082 res = -EINVAL;
3083 }
3084
3085 release_sock(sk);
3086
3087 if (res)
3088 return res; /* "get" failed */
3089
3090 if (len < sizeof(value))
3091 return -EINVAL;
3092
3093 if (copy_to_user(ov, &value, sizeof(value)))
3094 return -EFAULT;
3095
3096 return put_user(sizeof(value), ol);
3097}
3098
3099static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3100{
3101 struct net *net = sock_net(sock->sk);
3102 struct tipc_sioc_nodeid_req nr = {0};
3103 struct tipc_sioc_ln_req lnr;
3104 void __user *argp = (void __user *)arg;
3105
3106 switch (cmd) {
3107 case SIOCGETLINKNAME:
3108 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3109 return -EFAULT;
3110 if (!tipc_node_get_linkname(net,
3111 lnr.bearer_id & 0xffff, lnr.peer,
3112 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3113 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3114 return -EFAULT;
3115 return 0;
3116 }
3117 return -EADDRNOTAVAIL;
3118 case SIOCGETNODEID:
3119 if (copy_from_user(&nr, argp, sizeof(nr)))
3120 return -EFAULT;
3121 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3122 return -EADDRNOTAVAIL;
3123 if (copy_to_user(argp, &nr, sizeof(nr)))
3124 return -EFAULT;
3125 return 0;
3126 default:
3127 return -ENOIOCTLCMD;
3128 }
3129}
3130
3131static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3132{
3133 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3134 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3135 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3136
3137 tsk1->peer.family = AF_TIPC;
3138 tsk1->peer.addrtype = TIPC_ADDR_ID;
3139 tsk1->peer.scope = TIPC_NODE_SCOPE;
3140 tsk1->peer.addr.id.ref = tsk2->portid;
3141 tsk1->peer.addr.id.node = onode;
3142 tsk2->peer.family = AF_TIPC;
3143 tsk2->peer.addrtype = TIPC_ADDR_ID;
3144 tsk2->peer.scope = TIPC_NODE_SCOPE;
3145 tsk2->peer.addr.id.ref = tsk1->portid;
3146 tsk2->peer.addr.id.node = onode;
3147
3148 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3149 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3150 return 0;
3151}
3152
3153/* Protocol switches for the various types of TIPC sockets */
3154
3155static const struct proto_ops msg_ops = {
3156 .owner = THIS_MODULE,
3157 .family = AF_TIPC,
3158 .release = tipc_release,
3159 .bind = tipc_bind,
3160 .connect = tipc_connect,
3161 .socketpair = tipc_socketpair,
3162 .accept = sock_no_accept,
3163 .getname = tipc_getname,
3164 .poll = tipc_poll,
3165 .ioctl = tipc_ioctl,
3166 .listen = sock_no_listen,
3167 .shutdown = tipc_shutdown,
3168 .setsockopt = tipc_setsockopt,
3169 .getsockopt = tipc_getsockopt,
3170 .sendmsg = tipc_sendmsg,
3171 .recvmsg = tipc_recvmsg,
3172 .mmap = sock_no_mmap,
3173 .sendpage = sock_no_sendpage
3174};
3175
3176static const struct proto_ops packet_ops = {
3177 .owner = THIS_MODULE,
3178 .family = AF_TIPC,
3179 .release = tipc_release,
3180 .bind = tipc_bind,
3181 .connect = tipc_connect,
3182 .socketpair = tipc_socketpair,
3183 .accept = tipc_accept,
3184 .getname = tipc_getname,
3185 .poll = tipc_poll,
3186 .ioctl = tipc_ioctl,
3187 .listen = tipc_listen,
3188 .shutdown = tipc_shutdown,
3189 .setsockopt = tipc_setsockopt,
3190 .getsockopt = tipc_getsockopt,
3191 .sendmsg = tipc_send_packet,
3192 .recvmsg = tipc_recvmsg,
3193 .mmap = sock_no_mmap,
3194 .sendpage = sock_no_sendpage
3195};
3196
3197static const struct proto_ops stream_ops = {
3198 .owner = THIS_MODULE,
3199 .family = AF_TIPC,
3200 .release = tipc_release,
3201 .bind = tipc_bind,
3202 .connect = tipc_connect,
3203 .socketpair = tipc_socketpair,
3204 .accept = tipc_accept,
3205 .getname = tipc_getname,
3206 .poll = tipc_poll,
3207 .ioctl = tipc_ioctl,
3208 .listen = tipc_listen,
3209 .shutdown = tipc_shutdown,
3210 .setsockopt = tipc_setsockopt,
3211 .getsockopt = tipc_getsockopt,
3212 .sendmsg = tipc_sendstream,
3213 .recvmsg = tipc_recvstream,
3214 .mmap = sock_no_mmap,
3215 .sendpage = sock_no_sendpage
3216};
3217
3218static const struct net_proto_family tipc_family_ops = {
3219 .owner = THIS_MODULE,
3220 .family = AF_TIPC,
3221 .create = tipc_sk_create
3222};
3223
3224static struct proto tipc_proto = {
3225 .name = "TIPC",
3226 .owner = THIS_MODULE,
3227 .obj_size = sizeof(struct tipc_sock),
3228 .sysctl_rmem = sysctl_tipc_rmem
3229};
3230
3231/**
3232 * tipc_socket_init - initialize TIPC socket interface
3233 *
3234 * Returns 0 on success, errno otherwise
3235 */
3236int tipc_socket_init(void)
3237{
3238 int res;
3239
3240 res = proto_register(&tipc_proto, 1);
3241 if (res) {
3242 pr_err("Failed to register TIPC protocol type\n");
3243 goto out;
3244 }
3245
3246 res = sock_register(&tipc_family_ops);
3247 if (res) {
3248 pr_err("Failed to register TIPC socket type\n");
3249 proto_unregister(&tipc_proto);
3250 goto out;
3251 }
3252 out:
3253 return res;
3254}
3255
3256/**
3257 * tipc_socket_stop - stop TIPC socket interface
3258 */
3259void tipc_socket_stop(void)
3260{
3261 sock_unregister(tipc_family_ops.family);
3262 proto_unregister(&tipc_proto);
3263}
3264
3265/* Caller should hold socket lock for the passed tipc socket. */
3266static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3267{
3268 u32 peer_node;
3269 u32 peer_port;
3270 struct nlattr *nest;
3271
3272 peer_node = tsk_peer_node(tsk);
3273 peer_port = tsk_peer_port(tsk);
3274
3275 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3276 if (!nest)
3277 return -EMSGSIZE;
3278
3279 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3280 goto msg_full;
3281 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3282 goto msg_full;
3283
3284 if (tsk->conn_type != 0) {
3285 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3286 goto msg_full;
3287 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3288 goto msg_full;
3289 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3290 goto msg_full;
3291 }
3292 nla_nest_end(skb, nest);
3293
3294 return 0;
3295
3296msg_full:
3297 nla_nest_cancel(skb, nest);
3298
3299 return -EMSGSIZE;
3300}
3301
3302static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3303 *tsk)
3304{
3305 struct net *net = sock_net(skb->sk);
3306 struct sock *sk = &tsk->sk;
3307
3308 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3309 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3310 return -EMSGSIZE;
3311
3312 if (tipc_sk_connected(sk)) {
3313 if (__tipc_nl_add_sk_con(skb, tsk))
3314 return -EMSGSIZE;
3315 } else if (!list_empty(&tsk->publications)) {
3316 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3317 return -EMSGSIZE;
3318 }
3319 return 0;
3320}
3321
3322/* Caller should hold socket lock for the passed tipc socket. */
3323static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3324 struct tipc_sock *tsk)
3325{
3326 struct nlattr *attrs;
3327 void *hdr;
3328
3329 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3330 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3331 if (!hdr)
3332 goto msg_cancel;
3333
3334 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3335 if (!attrs)
3336 goto genlmsg_cancel;
3337
3338 if (__tipc_nl_add_sk_info(skb, tsk))
3339 goto attr_msg_cancel;
3340
3341 nla_nest_end(skb, attrs);
3342 genlmsg_end(skb, hdr);
3343
3344 return 0;
3345
3346attr_msg_cancel:
3347 nla_nest_cancel(skb, attrs);
3348genlmsg_cancel:
3349 genlmsg_cancel(skb, hdr);
3350msg_cancel:
3351 return -EMSGSIZE;
3352}
3353
3354int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3355 int (*skb_handler)(struct sk_buff *skb,
3356 struct netlink_callback *cb,
3357 struct tipc_sock *tsk))
3358{
3359 struct rhashtable_iter *iter = (void *)cb->args[4];
3360 struct tipc_sock *tsk;
3361 int err;
3362
3363 rhashtable_walk_start(iter);
3364 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3365 if (IS_ERR(tsk)) {
3366 err = PTR_ERR(tsk);
3367 if (err == -EAGAIN) {
3368 err = 0;
3369 continue;
3370 }
3371 break;
3372 }
3373
3374 sock_hold(&tsk->sk);
3375 rhashtable_walk_stop(iter);
3376 lock_sock(&tsk->sk);
3377 err = skb_handler(skb, cb, tsk);
3378 if (err) {
3379 release_sock(&tsk->sk);
3380 sock_put(&tsk->sk);
3381 goto out;
3382 }
3383 release_sock(&tsk->sk);
3384 rhashtable_walk_start(iter);
3385 sock_put(&tsk->sk);
3386 }
3387 rhashtable_walk_stop(iter);
3388out:
3389 return skb->len;
3390}
3391EXPORT_SYMBOL(tipc_nl_sk_walk);
3392
3393int tipc_dump_start(struct netlink_callback *cb)
3394{
3395 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3396}
3397EXPORT_SYMBOL(tipc_dump_start);
3398
3399int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3400{
3401 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3402 struct rhashtable_iter *iter = (void *)cb->args[4];
3403 struct tipc_net *tn = tipc_net(net);
3404
3405 if (!iter) {
3406 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3407 if (!iter)
3408 return -ENOMEM;
3409
3410 cb->args[4] = (long)iter;
3411 }
3412
3413 rhashtable_walk_enter(&tn->sk_rht, iter);
3414 return 0;
3415}
3416
3417int tipc_dump_done(struct netlink_callback *cb)
3418{
3419 struct rhashtable_iter *hti = (void *)cb->args[4];
3420
3421 rhashtable_walk_exit(hti);
3422 kfree(hti);
3423 return 0;
3424}
3425EXPORT_SYMBOL(tipc_dump_done);
3426
3427int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3428 struct tipc_sock *tsk, u32 sk_filter_state,
3429 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3430{
3431 struct sock *sk = &tsk->sk;
3432 struct nlattr *attrs;
3433 struct nlattr *stat;
3434
3435 /*filter response w.r.t sk_state*/
3436 if (!(sk_filter_state & (1 << sk->sk_state)))
3437 return 0;
3438
3439 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3440 if (!attrs)
3441 goto msg_cancel;
3442
3443 if (__tipc_nl_add_sk_info(skb, tsk))
3444 goto attr_msg_cancel;
3445
3446 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3447 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3448 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3449 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3450 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3451 sock_i_uid(sk))) ||
3452 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3453 tipc_diag_gen_cookie(sk),
3454 TIPC_NLA_SOCK_PAD))
3455 goto attr_msg_cancel;
3456
3457 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3458 if (!stat)
3459 goto attr_msg_cancel;
3460
3461 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3462 skb_queue_len(&sk->sk_receive_queue)) ||
3463 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3464 skb_queue_len(&sk->sk_write_queue)) ||
3465 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3466 atomic_read(&sk->sk_drops)))
3467 goto stat_msg_cancel;
3468
3469 if (tsk->cong_link_cnt &&
3470 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3471 goto stat_msg_cancel;
3472
3473 if (tsk_conn_cong(tsk) &&
3474 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3475 goto stat_msg_cancel;
3476
3477 nla_nest_end(skb, stat);
3478
3479 if (tsk->group)
3480 if (tipc_group_fill_sock_diag(tsk->group, skb))
3481 goto stat_msg_cancel;
3482
3483 nla_nest_end(skb, attrs);
3484
3485 return 0;
3486
3487stat_msg_cancel:
3488 nla_nest_cancel(skb, stat);
3489attr_msg_cancel:
3490 nla_nest_cancel(skb, attrs);
3491msg_cancel:
3492 return -EMSGSIZE;
3493}
3494EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3495
3496int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3497{
3498 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3499}
3500
3501/* Caller should hold socket lock for the passed tipc socket. */
3502static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3503 struct netlink_callback *cb,
3504 struct publication *publ)
3505{
3506 void *hdr;
3507 struct nlattr *attrs;
3508
3509 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3510 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3511 if (!hdr)
3512 goto msg_cancel;
3513
3514 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3515 if (!attrs)
3516 goto genlmsg_cancel;
3517
3518 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3519 goto attr_msg_cancel;
3520 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3521 goto attr_msg_cancel;
3522 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3523 goto attr_msg_cancel;
3524 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3525 goto attr_msg_cancel;
3526
3527 nla_nest_end(skb, attrs);
3528 genlmsg_end(skb, hdr);
3529
3530 return 0;
3531
3532attr_msg_cancel:
3533 nla_nest_cancel(skb, attrs);
3534genlmsg_cancel:
3535 genlmsg_cancel(skb, hdr);
3536msg_cancel:
3537 return -EMSGSIZE;
3538}
3539
3540/* Caller should hold socket lock for the passed tipc socket. */
3541static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3542 struct netlink_callback *cb,
3543 struct tipc_sock *tsk, u32 *last_publ)
3544{
3545 int err;
3546 struct publication *p;
3547
3548 if (*last_publ) {
3549 list_for_each_entry(p, &tsk->publications, binding_sock) {
3550 if (p->key == *last_publ)
3551 break;
3552 }
3553 if (p->key != *last_publ) {
3554 /* We never set seq or call nl_dump_check_consistent()
3555 * this means that setting prev_seq here will cause the
3556 * consistence check to fail in the netlink callback
3557 * handler. Resulting in the last NLMSG_DONE message
3558 * having the NLM_F_DUMP_INTR flag set.
3559 */
3560 cb->prev_seq = 1;
3561 *last_publ = 0;
3562 return -EPIPE;
3563 }
3564 } else {
3565 p = list_first_entry(&tsk->publications, struct publication,
3566 binding_sock);
3567 }
3568
3569 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3570 err = __tipc_nl_add_sk_publ(skb, cb, p);
3571 if (err) {
3572 *last_publ = p->key;
3573 return err;
3574 }
3575 }
3576 *last_publ = 0;
3577
3578 return 0;
3579}
3580
3581int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3582{
3583 int err;
3584 u32 tsk_portid = cb->args[0];
3585 u32 last_publ = cb->args[1];
3586 u32 done = cb->args[2];
3587 struct net *net = sock_net(skb->sk);
3588 struct tipc_sock *tsk;
3589
3590 if (!tsk_portid) {
3591 struct nlattr **attrs;
3592 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3593
3594 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3595 if (err)
3596 return err;
3597
3598 if (!attrs[TIPC_NLA_SOCK])
3599 return -EINVAL;
3600
3601 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3602 attrs[TIPC_NLA_SOCK],
3603 tipc_nl_sock_policy, NULL);
3604 if (err)
3605 return err;
3606
3607 if (!sock[TIPC_NLA_SOCK_REF])
3608 return -EINVAL;
3609
3610 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3611 }
3612
3613 if (done)
3614 return 0;
3615
3616 tsk = tipc_sk_lookup(net, tsk_portid);
3617 if (!tsk)
3618 return -EINVAL;
3619
3620 lock_sock(&tsk->sk);
3621 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3622 if (!err)
3623 done = 1;
3624 release_sock(&tsk->sk);
3625 sock_put(&tsk->sk);
3626
3627 cb->args[0] = tsk_portid;
3628 cb->args[1] = last_publ;
3629 cb->args[2] = done;
3630
3631 return skb->len;
3632}
3633
3634/**
3635 * tipc_sk_filtering - check if a socket should be traced
3636 * @sk: the socket to be examined
3637 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3638 * (portid, sock type, name type, name lower, name upper)
3639 *
3640 * Returns true if the socket meets the socket tuple data
3641 * (value 0 = 'any') or when there is no tuple set (all = 0),
3642 * otherwise false
3643 */
3644bool tipc_sk_filtering(struct sock *sk)
3645{
3646 struct tipc_sock *tsk;
3647 struct publication *p;
3648 u32 _port, _sktype, _type, _lower, _upper;
3649 u32 type = 0, lower = 0, upper = 0;
3650
3651 if (!sk)
3652 return true;
3653
3654 tsk = tipc_sk(sk);
3655
3656 _port = sysctl_tipc_sk_filter[0];
3657 _sktype = sysctl_tipc_sk_filter[1];
3658 _type = sysctl_tipc_sk_filter[2];
3659 _lower = sysctl_tipc_sk_filter[3];
3660 _upper = sysctl_tipc_sk_filter[4];
3661
3662 if (!_port && !_sktype && !_type && !_lower && !_upper)
3663 return true;
3664
3665 if (_port)
3666 return (_port == tsk->portid);
3667
3668 if (_sktype && _sktype != sk->sk_type)
3669 return false;
3670
3671 if (tsk->published) {
3672 p = list_first_entry_or_null(&tsk->publications,
3673 struct publication, binding_sock);
3674 if (p) {
3675 type = p->type;
3676 lower = p->lower;
3677 upper = p->upper;
3678 }
3679 }
3680
3681 if (!tipc_sk_type_connectionless(sk)) {
3682 type = tsk->conn_type;
3683 lower = tsk->conn_instance;
3684 upper = tsk->conn_instance;
3685 }
3686
3687 if ((_type && _type != type) || (_lower && _lower != lower) ||
3688 (_upper && _upper != upper))
3689 return false;
3690
3691 return true;
3692}
3693
3694u32 tipc_sock_get_portid(struct sock *sk)
3695{
3696 return (sk) ? (tipc_sk(sk))->portid : 0;
3697}
3698
3699/**
3700 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3701 * both the rcv and backlog queues are considered
3702 * @sk: tipc sk to be checked
3703 * @skb: tipc msg to be checked
3704 *
3705 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3706 */
3707
3708bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3709{
3710 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3711 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3712 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3713
3714 return (qsize > lim * 90 / 100);
3715}
3716
3717/**
3718 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3719 * only the rcv queue is considered
3720 * @sk: tipc sk to be checked
3721 * @skb: tipc msg to be checked
3722 *
3723 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3724 */
3725
3726bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3727{
3728 unsigned int lim = rcvbuf_limit(sk, skb);
3729 unsigned int qsize = sk_rmem_alloc_get(sk);
3730
3731 return (qsize > lim * 90 / 100);
3732}
3733
3734/**
3735 * tipc_sk_dump - dump TIPC socket
3736 * @sk: tipc sk to be dumped
3737 * @dqueues: bitmask to decide if any socket queue to be dumped?
3738 * - TIPC_DUMP_NONE: don't dump socket queues
3739 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3740 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3741 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3742 * - TIPC_DUMP_ALL: dump all the socket queues above
3743 * @buf: returned buffer of dump data in format
3744 */
3745int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3746{
3747 int i = 0;
3748 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3749 struct tipc_sock *tsk;
3750 struct publication *p;
3751 bool tsk_connected;
3752
3753 if (!sk) {
3754 i += scnprintf(buf, sz, "sk data: (null)\n");
3755 return i;
3756 }
3757
3758 tsk = tipc_sk(sk);
3759 tsk_connected = !tipc_sk_type_connectionless(sk);
3760
3761 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3762 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3763 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3764 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3765 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3766 if (tsk_connected) {
3767 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3768 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3769 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3770 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3771 }
3772 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3773 if (tsk->published) {
3774 p = list_first_entry_or_null(&tsk->publications,
3775 struct publication, binding_sock);
3776 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3777 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3778 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3779 }
3780 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3781 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3782 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3783 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3784 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3785 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3786 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3787 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3788 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3789 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3790 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3791 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3792 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3793 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3794
3795 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3796 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3797 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3798 }
3799
3800 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3801 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3802 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3803 }
3804
3805 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3806 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
3807 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3808 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3809 i += scnprintf(buf + i, sz - i, " tail ");
3810 i += tipc_skb_dump(sk->sk_backlog.tail, false,
3811 buf + i);
3812 }
3813 }
3814
3815 return i;
3816}