Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * net/tipc/link.c: TIPC link code
   3 *
   4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
   5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
  38#include "subscr.h"
  39#include "link.h"
  40#include "bcast.h"
  41#include "socket.h"
  42#include "name_distr.h"
  43#include "discover.h"
  44#include "netlink.h"
  45#include "monitor.h"
  46#include "trace.h"
  47#include "crypto.h"
  48
  49#include <linux/pkt_sched.h>
  50
  51struct tipc_stats {
  52	u32 sent_pkts;
  53	u32 recv_pkts;
  54	u32 sent_states;
  55	u32 recv_states;
  56	u32 sent_probes;
  57	u32 recv_probes;
  58	u32 sent_nacks;
  59	u32 recv_nacks;
  60	u32 sent_acks;
  61	u32 sent_bundled;
  62	u32 sent_bundles;
  63	u32 recv_bundled;
  64	u32 recv_bundles;
  65	u32 retransmitted;
  66	u32 sent_fragmented;
  67	u32 sent_fragments;
  68	u32 recv_fragmented;
  69	u32 recv_fragments;
  70	u32 link_congs;		/* # port sends blocked by congestion */
  71	u32 deferred_recv;
  72	u32 duplicates;
  73	u32 max_queue_sz;	/* send queue size high water mark */
  74	u32 accu_queue_sz;	/* used for send queue size profiling */
  75	u32 queue_sz_counts;	/* used for send queue size profiling */
  76	u32 msg_length_counts;	/* used for message length profiling */
  77	u32 msg_lengths_total;	/* used for message length profiling */
  78	u32 msg_length_profile[7]; /* used for msg. length profiling */
  79};
  80
  81/**
  82 * struct tipc_link - TIPC link data structure
  83 * @addr: network address of link's peer node
  84 * @name: link name character string
  85 * @net: pointer to namespace struct
  86 * @peer_session: link session # being used by peer end of link
  87 * @peer_bearer_id: bearer id used by link's peer endpoint
  88 * @bearer_id: local bearer id used by link
  89 * @tolerance: minimum link continuity loss needed to reset link [in ms]
  90 * @abort_limit: # of unacknowledged continuity probes needed to reset link
  91 * @state: current state of link FSM
  92 * @peer_caps: bitmap describing capabilities of peer node
  93 * @silent_intv_cnt: # of timer intervals without any reception from peer
  94 * @priority: current link priority
  95 * @net_plane: current link network plane ('A' through 'H')
  96 * @mon_state: cookie with information needed by link monitor
  97 * @mtu: current maximum packet size for this link
  98 * @advertised_mtu: advertised own mtu when link is being established
  99 * @backlogq: queue for messages waiting to be sent
 100 * @ackers: # of peers that needs to ack each packet before it can be released
 101 * @acked: # last packet acked by a certain peer. Used for broadcast.
 102 * @rcv_nxt: next sequence number to expect for inbound messages
 103 * @inputq: buffer queue for messages to be delivered upwards
 104 * @namedq: buffer queue for name table messages to be delivered upwards
 105 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
 106 * @reasm_buf: head of partially reassembled inbound message fragments
 107 * @stats: collects statistics regarding link activity
 108 * @session: session to be used by link
 109 * @snd_nxt_state: next send seq number
 110 * @rcv_nxt_state: next rcv seq number
 111 * @in_session: have received ACTIVATE_MSG from peer
 112 * @active: link is active
 113 * @if_name: associated interface name
 114 * @rst_cnt: link reset counter
 115 * @drop_point: seq number for failover handling (FIXME)
 116 * @failover_reasm_skb: saved failover msg ptr (FIXME)
 117 * @failover_deferdq: deferred message queue for failover processing (FIXME)
 118 * @transmq: the link's transmit queue
 119 * @backlog: link's backlog by priority (importance)
 120 * @snd_nxt: next sequence number to be used
 121 * @rcv_unacked: # messages read by user, but not yet acked back to peer
 122 * @deferdq: deferred receive queue
 123 * @window: sliding window size for congestion handling
 124 * @min_win: minimal send window to be used by link
 125 * @ssthresh: slow start threshold for congestion handling
 126 * @max_win: maximal send window to be used by link
 127 * @cong_acks: congestion acks for congestion avoidance (FIXME)
 128 * @checkpoint: seq number for congestion window size handling
 129 * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
 130 * @last_gap: last gap ack blocks for bcast (FIXME)
 131 * @last_ga: ptr to gap ack blocks
 132 * @bc_rcvlink: the peer specific link used for broadcast reception
 133 * @bc_sndlink: the namespace global link used for broadcast sending
 134 * @nack_state: bcast nack state
 135 * @bc_peer_is_up: peer has acked the bcast init msg
 136 */
 137struct tipc_link {
 138	u32 addr;
 139	char name[TIPC_MAX_LINK_NAME];
 140	struct net *net;
 141
 142	/* Management and link supervision data */
 143	u16 peer_session;
 144	u16 session;
 145	u16 snd_nxt_state;
 146	u16 rcv_nxt_state;
 147	u32 peer_bearer_id;
 148	u32 bearer_id;
 149	u32 tolerance;
 150	u32 abort_limit;
 151	u32 state;
 152	u16 peer_caps;
 153	bool in_session;
 154	bool active;
 155	u32 silent_intv_cnt;
 156	char if_name[TIPC_MAX_IF_NAME];
 157	u32 priority;
 158	char net_plane;
 159	struct tipc_mon_state mon_state;
 160	u16 rst_cnt;
 161
 162	/* Failover/synch */
 163	u16 drop_point;
 164	struct sk_buff *failover_reasm_skb;
 165	struct sk_buff_head failover_deferdq;
 166
 167	/* Max packet negotiation */
 168	u16 mtu;
 169	u16 advertised_mtu;
 170
 171	/* Sending */
 172	struct sk_buff_head transmq;
 173	struct sk_buff_head backlogq;
 174	struct {
 175		u16 len;
 176		u16 limit;
 177		struct sk_buff *target_bskb;
 178	} backlog[5];
 179	u16 snd_nxt;
 180
 181	/* Reception */
 182	u16 rcv_nxt;
 183	u32 rcv_unacked;
 184	struct sk_buff_head deferdq;
 185	struct sk_buff_head *inputq;
 186	struct sk_buff_head *namedq;
 187
 188	/* Congestion handling */
 189	struct sk_buff_head wakeupq;
 190	u16 window;
 191	u16 min_win;
 192	u16 ssthresh;
 193	u16 max_win;
 194	u16 cong_acks;
 195	u16 checkpoint;
 196
 197	/* Fragmentation/reassembly */
 198	struct sk_buff *reasm_buf;
 199	struct sk_buff *reasm_tnlmsg;
 200
 201	/* Broadcast */
 202	u16 ackers;
 203	u16 acked;
 204	u16 last_gap;
 205	struct tipc_gap_ack_blks *last_ga;
 206	struct tipc_link *bc_rcvlink;
 207	struct tipc_link *bc_sndlink;
 208	u8 nack_state;
 209	bool bc_peer_is_up;
 210
 211	/* Statistics */
 212	struct tipc_stats stats;
 213};
 214
 215/*
 216 * Error message prefixes
 217 */
 218static const char *link_co_err = "Link tunneling error, ";
 219static const char *link_rst_msg = "Resetting link ";
 220
 221/* Send states for broadcast NACKs
 
 222 */
 223enum {
 224	BC_NACK_SND_CONDITIONAL,
 225	BC_NACK_SND_UNCONDITIONAL,
 226	BC_NACK_SND_SUPPRESS,
 227};
 228
 229#define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
 230#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
 
 
 
 
 
 231
 232/* Link FSM states:
 
 233 */
 234enum {
 235	LINK_ESTABLISHED     = 0xe,
 236	LINK_ESTABLISHING    = 0xe  << 4,
 237	LINK_RESET           = 0x1  << 8,
 238	LINK_RESETTING       = 0x2  << 12,
 239	LINK_PEER_RESET      = 0xd  << 16,
 240	LINK_FAILINGOVER     = 0xf  << 20,
 241	LINK_SYNCHING        = 0xc  << 24
 
 
 
 
 
 
 242};
 243
 244static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
 245			       struct sk_buff_head *xmitq);
 246static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
 247				      bool probe_reply, u16 rcvgap,
 248				      int tolerance, int priority,
 249				      struct sk_buff_head *xmitq);
 250static void link_print(struct tipc_link *l, const char *str);
 251static int tipc_link_build_nack_msg(struct tipc_link *l,
 252				    struct sk_buff_head *xmitq);
 253static void tipc_link_build_bc_init_msg(struct tipc_link *l,
 254					struct sk_buff_head *xmitq);
 255static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
 256				    struct tipc_link *l, u8 start_index);
 257static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
 258static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
 259				     u16 acked, u16 gap,
 260				     struct tipc_gap_ack_blks *ga,
 261				     struct sk_buff_head *xmitq,
 262				     bool *retransmitted, int *rc);
 263static void tipc_link_update_cwin(struct tipc_link *l, int released,
 264				  bool retransmitted);
 265/*
 266 *  Simple non-static link routines (i.e. referenced outside this file)
 267 */
 268bool tipc_link_is_up(struct tipc_link *l)
 269{
 270	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
 271}
 272
 273bool tipc_link_peer_is_down(struct tipc_link *l)
 274{
 275	return l->state == LINK_PEER_RESET;
 276}
 277
 278bool tipc_link_is_reset(struct tipc_link *l)
 279{
 280	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 281}
 282
 283bool tipc_link_is_establishing(struct tipc_link *l)
 284{
 285	return l->state == LINK_ESTABLISHING;
 286}
 
 287
 288bool tipc_link_is_synching(struct tipc_link *l)
 289{
 290	return l->state == LINK_SYNCHING;
 291}
 292
 293bool tipc_link_is_failingover(struct tipc_link *l)
 294{
 295	return l->state == LINK_FAILINGOVER;
 
 
 296}
 297
 298bool tipc_link_is_blocked(struct tipc_link *l)
 299{
 300	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 301}
 302
 303static bool link_is_bc_sndlink(struct tipc_link *l)
 
 
 
 304{
 305	return !l->bc_sndlink;
 
 
 306}
 307
 308static bool link_is_bc_rcvlink(struct tipc_link *l)
 309{
 310	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
 
 311}
 312
 313void tipc_link_set_active(struct tipc_link *l, bool active)
 
 
 
 
 
 
 
 
 314{
 315	l->active = active;
 316}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 317
 318u32 tipc_link_id(struct tipc_link *l)
 319{
 320	return l->peer_bearer_id << 16 | l->bearer_id;
 321}
 
 
 
 
 
 
 
 
 
 
 
 
 322
 323int tipc_link_min_win(struct tipc_link *l)
 324{
 325	return l->min_win;
 326}
 
 
 
 
 
 
 
 
 327
 328int tipc_link_max_win(struct tipc_link *l)
 329{
 330	return l->max_win;
 
 
 
 
 
 331}
 332
 333int tipc_link_prio(struct tipc_link *l)
 
 
 
 
 
 
 
 
 
 334{
 335	return l->priority;
 336}
 337
 338unsigned long tipc_link_tolerance(struct tipc_link *l)
 339{
 340	return l->tolerance;
 341}
 342
 343struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
 344{
 345	return l->inputq;
 346}
 347
 348char tipc_link_plane(struct tipc_link *l)
 349{
 350	return l->net_plane;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 351}
 352
 353struct net *tipc_link_net(struct tipc_link *l)
 354{
 355	return l->net;
 356}
 357
 358void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
 
 
 
 
 
 
 
 
 
 
 359{
 360	l->peer_caps = capabilities;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361}
 362
 363void tipc_link_add_bc_peer(struct tipc_link *snd_l,
 364			   struct tipc_link *uc_l,
 365			   struct sk_buff_head *xmitq)
 
 
 
 
 
 
 366{
 367	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
 
 
 
 368
 369	snd_l->ackers++;
 370	rcv_l->acked = snd_l->snd_nxt - 1;
 371	snd_l->state = LINK_ESTABLISHED;
 372	tipc_link_build_bc_init_msg(uc_l, xmitq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 373}
 374
 375void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
 376			      struct tipc_link *rcv_l,
 377			      struct sk_buff_head *xmitq)
 
 
 
 
 
 
 
 378{
 379	u16 ack = snd_l->snd_nxt - 1;
 380
 381	snd_l->ackers--;
 382	rcv_l->bc_peer_is_up = true;
 383	rcv_l->state = LINK_ESTABLISHED;
 384	tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
 385	trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
 386	tipc_link_reset(rcv_l);
 387	rcv_l->state = LINK_RESET;
 388	if (!snd_l->ackers) {
 389		trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
 390		tipc_link_reset(snd_l);
 391		snd_l->state = LINK_RESET;
 392		__skb_queue_purge(xmitq);
 
 393	}
 
 
 394}
 395
 396int tipc_link_bc_peers(struct tipc_link *l)
 397{
 398	return l->ackers;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 399}
 400
 401static u16 link_bc_rcv_gap(struct tipc_link *l)
 
 
 
 
 402{
 403	struct sk_buff *skb = skb_peek(&l->deferdq);
 404	u16 gap = 0;
 405
 406	if (more(l->snd_nxt, l->rcv_nxt))
 407		gap = l->snd_nxt - l->rcv_nxt;
 408	if (skb)
 409		gap = buf_seqno(skb) - l->rcv_nxt;
 410	return gap;
 
 
 411}
 412
 413void tipc_link_set_mtu(struct tipc_link *l, int mtu)
 
 
 
 
 414{
 415	l->mtu = mtu;
 416}
 417
 418int tipc_link_mtu(struct tipc_link *l)
 419{
 420	return l->mtu;
 
 
 
 421}
 422
 423int tipc_link_mss(struct tipc_link *l)
 
 
 
 
 424{
 425#ifdef CONFIG_TIPC_CRYPTO
 426	return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
 427#else
 428	return l->mtu - INT_H_SIZE;
 429#endif
 430}
 431
 432u16 tipc_link_rcv_nxt(struct tipc_link *l)
 433{
 434	return l->rcv_nxt;
 435}
 
 
 436
 437u16 tipc_link_acked(struct tipc_link *l)
 438{
 439	return l->acked;
 440}
 
 
 441
 442char *tipc_link_name(struct tipc_link *l)
 443{
 444	return l->name;
 
 445}
 446
 447u32 tipc_link_state(struct tipc_link *l)
 448{
 449	return l->state;
 450}
 
 
 451
 452/**
 453 * tipc_link_create - create a new link
 454 * @net: pointer to associated network namespace
 455 * @if_name: associated interface name
 456 * @bearer_id: id (index) of associated bearer
 457 * @tolerance: link tolerance to be used by link
 458 * @net_plane: network plane (A,B,c..) this link belongs to
 459 * @mtu: mtu to be advertised by link
 460 * @priority: priority to be used by link
 461 * @min_win: minimal send window to be used by link
 462 * @max_win: maximal send window to be used by link
 463 * @session: session to be used by link
 464 * @peer: node id of peer node
 465 * @peer_caps: bitmap describing peer node capabilities
 466 * @bc_sndlink: the namespace global link used for broadcast sending
 467 * @bc_rcvlink: the peer specific link used for broadcast reception
 468 * @inputq: queue to put messages ready for delivery
 469 * @namedq: queue to put binding table update messages ready for delivery
 470 * @link: return value, pointer to put the created link
 471 * @self: local unicast link id
 472 * @peer_id: 128-bit ID of peer
 473 *
 474 * Return: true if link was created, otherwise false
 475 */
 476bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
 477		      int tolerance, char net_plane, u32 mtu, int priority,
 478		      u32 min_win, u32 max_win, u32 session, u32 self,
 479		      u32 peer, u8 *peer_id, u16 peer_caps,
 480		      struct tipc_link *bc_sndlink,
 481		      struct tipc_link *bc_rcvlink,
 482		      struct sk_buff_head *inputq,
 483		      struct sk_buff_head *namedq,
 484		      struct tipc_link **link)
 485{
 486	char peer_str[NODE_ID_STR_LEN] = {0,};
 487	char self_str[NODE_ID_STR_LEN] = {0,};
 488	struct tipc_link *l;
 489
 490	l = kzalloc(sizeof(*l), GFP_ATOMIC);
 491	if (!l)
 492		return false;
 493	*link = l;
 494	l->session = session;
 495
 496	/* Set link name for unicast links only */
 497	if (peer_id) {
 498		tipc_nodeid2string(self_str, tipc_own_id(net));
 499		if (strlen(self_str) > 16)
 500			sprintf(self_str, "%x", self);
 501		tipc_nodeid2string(peer_str, peer_id);
 502		if (strlen(peer_str) > 16)
 503			sprintf(peer_str, "%x", peer);
 504	}
 505	/* Peer i/f name will be completed by reset/activate message */
 506	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
 507		 self_str, if_name, peer_str);
 508
 509	strcpy(l->if_name, if_name);
 510	l->addr = peer;
 511	l->peer_caps = peer_caps;
 512	l->net = net;
 513	l->in_session = false;
 514	l->bearer_id = bearer_id;
 515	l->tolerance = tolerance;
 516	if (bc_rcvlink)
 517		bc_rcvlink->tolerance = tolerance;
 518	l->net_plane = net_plane;
 519	l->advertised_mtu = mtu;
 520	l->mtu = mtu;
 521	l->priority = priority;
 522	tipc_link_set_queue_limits(l, min_win, max_win);
 523	l->ackers = 1;
 524	l->bc_sndlink = bc_sndlink;
 525	l->bc_rcvlink = bc_rcvlink;
 526	l->inputq = inputq;
 527	l->namedq = namedq;
 528	l->state = LINK_RESETTING;
 529	__skb_queue_head_init(&l->transmq);
 530	__skb_queue_head_init(&l->backlogq);
 531	__skb_queue_head_init(&l->deferdq);
 532	__skb_queue_head_init(&l->failover_deferdq);
 533	skb_queue_head_init(&l->wakeupq);
 534	skb_queue_head_init(l->inputq);
 535	return true;
 536}
 537
 538/**
 539 * tipc_link_bc_create - create new link to be used for broadcast
 540 * @net: pointer to associated network namespace
 541 * @mtu: mtu to be used initially if no peers
 542 * @min_win: minimal send window to be used by link
 543 * @max_win: maximal send window to be used by link
 544 * @inputq: queue to put messages ready for delivery
 545 * @namedq: queue to put binding table update messages ready for delivery
 546 * @link: return value, pointer to put the created link
 547 * @ownnode: identity of own node
 548 * @peer: node id of peer node
 549 * @peer_id: 128-bit ID of peer
 550 * @peer_caps: bitmap describing peer node capabilities
 551 * @bc_sndlink: the namespace global link used for broadcast sending
 552 *
 553 * Return: true if link was created, otherwise false
 554 */
 555bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
 556			 int mtu, u32 min_win, u32 max_win, u16 peer_caps,
 557			 struct sk_buff_head *inputq,
 558			 struct sk_buff_head *namedq,
 559			 struct tipc_link *bc_sndlink,
 560			 struct tipc_link **link)
 561{
 562	struct tipc_link *l;
 563
 564	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
 565			      max_win, 0, ownnode, peer, NULL, peer_caps,
 566			      bc_sndlink, NULL, inputq, namedq, link))
 567		return false;
 568
 569	l = *link;
 570	if (peer_id) {
 571		char peer_str[NODE_ID_STR_LEN] = {0,};
 572
 573		tipc_nodeid2string(peer_str, peer_id);
 574		if (strlen(peer_str) > 16)
 575			sprintf(peer_str, "%x", peer);
 576		/* Broadcast receiver link name: "broadcast-link:<peer>" */
 577		snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
 578			 peer_str);
 579	} else {
 580		strcpy(l->name, tipc_bclink_name);
 581	}
 582	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
 583	tipc_link_reset(l);
 584	l->state = LINK_RESET;
 585	l->ackers = 0;
 586	l->bc_rcvlink = l;
 587
 588	/* Broadcast send link is always up */
 589	if (link_is_bc_sndlink(l))
 590		l->state = LINK_ESTABLISHED;
 
 591
 592	/* Disable replicast if even a single peer doesn't support it */
 593	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
 594		tipc_bcast_toggle_rcast(net, false);
 595
 596	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 597}
 598
 599/**
 600 * tipc_link_fsm_evt - link finite state machine
 601 * @l: pointer to link
 602 * @evt: state machine event to be processed
 603 */
 604int tipc_link_fsm_evt(struct tipc_link *l, int evt)
 605{
 606	int rc = 0;
 607	int old_state = l->state;
 608
 609	switch (l->state) {
 610	case LINK_RESETTING:
 611		switch (evt) {
 612		case LINK_PEER_RESET_EVT:
 613			l->state = LINK_PEER_RESET;
 614			break;
 615		case LINK_RESET_EVT:
 616			l->state = LINK_RESET;
 617			break;
 618		case LINK_FAILURE_EVT:
 619		case LINK_FAILOVER_BEGIN_EVT:
 620		case LINK_ESTABLISH_EVT:
 621		case LINK_FAILOVER_END_EVT:
 622		case LINK_SYNCH_BEGIN_EVT:
 623		case LINK_SYNCH_END_EVT:
 624		default:
 625			goto illegal_evt;
 626		}
 627		break;
 628	case LINK_RESET:
 629		switch (evt) {
 630		case LINK_PEER_RESET_EVT:
 631			l->state = LINK_ESTABLISHING;
 632			break;
 633		case LINK_FAILOVER_BEGIN_EVT:
 634			l->state = LINK_FAILINGOVER;
 635			break;
 636		case LINK_FAILURE_EVT:
 637		case LINK_RESET_EVT:
 638		case LINK_ESTABLISH_EVT:
 639		case LINK_FAILOVER_END_EVT:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640			break;
 641		case LINK_SYNCH_BEGIN_EVT:
 642		case LINK_SYNCH_END_EVT:
 643		default:
 644			goto illegal_evt;
 645		}
 646		break;
 647	case LINK_PEER_RESET:
 648		switch (evt) {
 649		case LINK_RESET_EVT:
 650			l->state = LINK_ESTABLISHING;
 651			break;
 652		case LINK_PEER_RESET_EVT:
 653		case LINK_ESTABLISH_EVT:
 654		case LINK_FAILURE_EVT:
 655			break;
 656		case LINK_SYNCH_BEGIN_EVT:
 657		case LINK_SYNCH_END_EVT:
 658		case LINK_FAILOVER_BEGIN_EVT:
 659		case LINK_FAILOVER_END_EVT:
 660		default:
 661			goto illegal_evt;
 662		}
 663		break;
 664	case LINK_FAILINGOVER:
 665		switch (evt) {
 666		case LINK_FAILOVER_END_EVT:
 667			l->state = LINK_RESET;
 668			break;
 669		case LINK_PEER_RESET_EVT:
 670		case LINK_RESET_EVT:
 671		case LINK_ESTABLISH_EVT:
 672		case LINK_FAILURE_EVT:
 673			break;
 674		case LINK_FAILOVER_BEGIN_EVT:
 675		case LINK_SYNCH_BEGIN_EVT:
 676		case LINK_SYNCH_END_EVT:
 677		default:
 678			goto illegal_evt;
 679		}
 680		break;
 681	case LINK_ESTABLISHING:
 682		switch (evt) {
 683		case LINK_ESTABLISH_EVT:
 684			l->state = LINK_ESTABLISHED;
 685			break;
 686		case LINK_FAILOVER_BEGIN_EVT:
 687			l->state = LINK_FAILINGOVER;
 688			break;
 689		case LINK_RESET_EVT:
 690			l->state = LINK_RESET;
 691			break;
 692		case LINK_FAILURE_EVT:
 693		case LINK_PEER_RESET_EVT:
 694		case LINK_SYNCH_BEGIN_EVT:
 695		case LINK_FAILOVER_END_EVT:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696			break;
 697		case LINK_SYNCH_END_EVT:
 698		default:
 699			goto illegal_evt;
 700		}
 701		break;
 702	case LINK_ESTABLISHED:
 703		switch (evt) {
 704		case LINK_PEER_RESET_EVT:
 705			l->state = LINK_PEER_RESET;
 706			rc |= TIPC_LINK_DOWN_EVT;
 707			break;
 708		case LINK_FAILURE_EVT:
 709			l->state = LINK_RESETTING;
 710			rc |= TIPC_LINK_DOWN_EVT;
 711			break;
 712		case LINK_RESET_EVT:
 713			l->state = LINK_RESET;
 714			break;
 715		case LINK_ESTABLISH_EVT:
 716		case LINK_SYNCH_END_EVT:
 717			break;
 718		case LINK_SYNCH_BEGIN_EVT:
 719			l->state = LINK_SYNCHING;
 
 
 
 
 
 
 
 
 
 
 
 720			break;
 721		case LINK_FAILOVER_BEGIN_EVT:
 722		case LINK_FAILOVER_END_EVT:
 723		default:
 724			goto illegal_evt;
 725		}
 726		break;
 727	case LINK_SYNCHING:
 728		switch (evt) {
 729		case LINK_PEER_RESET_EVT:
 730			l->state = LINK_PEER_RESET;
 731			rc |= TIPC_LINK_DOWN_EVT;
 732			break;
 733		case LINK_FAILURE_EVT:
 734			l->state = LINK_RESETTING;
 735			rc |= TIPC_LINK_DOWN_EVT;
 736			break;
 737		case LINK_RESET_EVT:
 738			l->state = LINK_RESET;
 739			break;
 740		case LINK_ESTABLISH_EVT:
 741		case LINK_SYNCH_BEGIN_EVT:
 742			break;
 743		case LINK_SYNCH_END_EVT:
 744			l->state = LINK_ESTABLISHED;
 
 
 745			break;
 746		case LINK_FAILOVER_BEGIN_EVT:
 747		case LINK_FAILOVER_END_EVT:
 748		default:
 749			goto illegal_evt;
 750		}
 751		break;
 752	default:
 753		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
 754	}
 755	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
 756	return rc;
 757illegal_evt:
 758	pr_err("Illegal FSM event %x in state %x on link %s\n",
 759	       evt, l->state, l->name);
 760	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
 761	return rc;
 762}
 763
 764/* link_profile_stats - update statistical profiling of traffic
 
 
 765 */
 766static void link_profile_stats(struct tipc_link *l)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767{
 768	struct sk_buff *skb;
 769	struct tipc_msg *msg;
 770	int length;
 771
 772	/* Update counters used in statistical profiling of send traffic */
 773	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
 774	l->stats.queue_sz_counts++;
 
 
 
 
 
 
 
 
 775
 776	skb = skb_peek(&l->transmq);
 777	if (!skb)
 778		return;
 779	msg = buf_msg(skb);
 780	length = msg_size(msg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781
 782	if (msg_user(msg) == MSG_FRAGMENTER) {
 783		if (msg_type(msg) != FIRST_FRAGMENT)
 784			return;
 785		length = msg_size(msg_inner_hdr(msg));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 786	}
 787	l->stats.msg_lengths_total += length;
 788	l->stats.msg_length_counts++;
 789	if (length <= 64)
 790		l->stats.msg_length_profile[0]++;
 791	else if (length <= 256)
 792		l->stats.msg_length_profile[1]++;
 793	else if (length <= 1024)
 794		l->stats.msg_length_profile[2]++;
 795	else if (length <= 4096)
 796		l->stats.msg_length_profile[3]++;
 797	else if (length <= 16384)
 798		l->stats.msg_length_profile[4]++;
 799	else if (length <= 32768)
 800		l->stats.msg_length_profile[5]++;
 801	else
 802		l->stats.msg_length_profile[6]++;
 803}
 804
 805/**
 806 * tipc_link_too_silent - check if link is "too silent"
 807 * @l: tipc link to be checked
 808 *
 809 * Return: true if the link 'silent_intv_cnt' is about to reach the
 810 * 'abort_limit' value, otherwise false
 811 */
 812bool tipc_link_too_silent(struct tipc_link *l)
 813{
 814	return (l->silent_intv_cnt + 2 > l->abort_limit);
 815}
 816
 817/* tipc_link_timeout - perform periodic task as instructed from node timeout
 818 */
 819int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 820{
 821	int mtyp = 0;
 822	int rc = 0;
 823	bool state = false;
 824	bool probe = false;
 825	bool setup = false;
 826	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
 827	u16 bc_acked = l->bc_rcvlink->acked;
 828	struct tipc_mon_state *mstate = &l->mon_state;
 829
 830	trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
 831	trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
 832	switch (l->state) {
 833	case LINK_ESTABLISHED:
 834	case LINK_SYNCHING:
 835		mtyp = STATE_MSG;
 836		link_profile_stats(l);
 837		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
 838		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
 839			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 840		state = bc_acked != bc_snt;
 841		state |= l->bc_rcvlink->rcv_unacked;
 842		state |= l->rcv_unacked;
 843		state |= !skb_queue_empty(&l->transmq);
 844		probe = mstate->probing;
 845		probe |= l->silent_intv_cnt;
 846		if (probe || mstate->monitoring)
 847			l->silent_intv_cnt++;
 848		probe |= !skb_queue_empty(&l->deferdq);
 849		if (l->snd_nxt == l->checkpoint) {
 850			tipc_link_update_cwin(l, 0, 0);
 851			probe = true;
 852		}
 853		l->checkpoint = l->snd_nxt;
 854		break;
 855	case LINK_RESET:
 856		setup = l->rst_cnt++ <= 4;
 857		setup |= !(l->rst_cnt % 16);
 858		mtyp = RESET_MSG;
 859		break;
 860	case LINK_ESTABLISHING:
 861		setup = true;
 862		mtyp = ACTIVATE_MSG;
 863		break;
 864	case LINK_PEER_RESET:
 865	case LINK_RESETTING:
 866	case LINK_FAILINGOVER:
 867		break;
 868	default:
 869		break;
 870	}
 
 
 
 871
 872	if (state || probe || setup)
 873		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 874
 875	return rc;
 876}
 877
 878/**
 879 * link_schedule_user - schedule a message sender for wakeup after congestion
 880 * @l: congested link
 881 * @hdr: header of message that is being sent
 882 * Create pseudo msg to send back to user when congestion abates
 883 */
 884static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
 885{
 886	u32 dnode = tipc_own_addr(l->net);
 887	u32 dport = msg_origport(hdr);
 888	struct sk_buff *skb;
 889
 890	/* Create and schedule wakeup pseudo message */
 891	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
 892			      dnode, l->addr, dport, 0, 0);
 893	if (!skb)
 894		return -ENOBUFS;
 895	msg_set_dest_droppable(buf_msg(skb), true);
 896	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
 897	skb_queue_tail(&l->wakeupq, skb);
 898	l->stats.link_congs++;
 899	trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
 900	return -ELINKCONG;
 901}
 902
 903/**
 904 * link_prepare_wakeup - prepare users for wakeup after congestion
 905 * @l: congested link
 906 * Wake up a number of waiting users, as permitted by available space
 907 * in the send queue
 908 */
 909static void link_prepare_wakeup(struct tipc_link *l)
 910{
 911	struct sk_buff_head *wakeupq = &l->wakeupq;
 912	struct sk_buff_head *inputq = l->inputq;
 913	struct sk_buff *skb, *tmp;
 914	struct sk_buff_head tmpq;
 915	int avail[5] = {0,};
 916	int imp = 0;
 917
 918	__skb_queue_head_init(&tmpq);
 919
 920	for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
 921		avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
 922
 923	skb_queue_walk_safe(wakeupq, skb, tmp) {
 924		imp = TIPC_SKB_CB(skb)->chain_imp;
 925		if (avail[imp] <= 0)
 926			continue;
 927		avail[imp]--;
 928		__skb_unlink(skb, wakeupq);
 929		__skb_queue_tail(&tmpq, skb);
 
 930	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931
 932	spin_lock_bh(&inputq->lock);
 933	skb_queue_splice_tail(&tmpq, inputq);
 934	spin_unlock_bh(&inputq->lock);
 935
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 936}
 937
 938/**
 939 * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
 940 *                                     the given skb should be next attempted
 941 * @skb: skb to set a future retransmission time for
 942 * @l: link the skb will be transmitted on
 
 
 
 
 
 
 
 
 943 */
 944static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
 945					      struct tipc_link *l)
 946{
 947	if (link_is_bc_sndlink(l))
 948		TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
 949	else
 950		TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
 951}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952
 953void tipc_link_reset(struct tipc_link *l)
 954{
 955	struct sk_buff_head list;
 956	u32 imp;
 957
 958	__skb_queue_head_init(&list);
 959
 960	l->in_session = false;
 961	/* Force re-synch of peer session number before establishing */
 962	l->peer_session--;
 963	l->session++;
 964	l->mtu = l->advertised_mtu;
 965
 966	spin_lock_bh(&l->wakeupq.lock);
 967	skb_queue_splice_init(&l->wakeupq, &list);
 968	spin_unlock_bh(&l->wakeupq.lock);
 969
 970	spin_lock_bh(&l->inputq->lock);
 971	skb_queue_splice_init(&list, l->inputq);
 972	spin_unlock_bh(&l->inputq->lock);
 973
 974	__skb_queue_purge(&l->transmq);
 975	__skb_queue_purge(&l->deferdq);
 976	__skb_queue_purge(&l->backlogq);
 977	__skb_queue_purge(&l->failover_deferdq);
 978	for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
 979		l->backlog[imp].len = 0;
 980		l->backlog[imp].target_bskb = NULL;
 981	}
 982	kfree_skb(l->reasm_buf);
 983	kfree_skb(l->reasm_tnlmsg);
 984	kfree_skb(l->failover_reasm_skb);
 985	l->reasm_buf = NULL;
 986	l->reasm_tnlmsg = NULL;
 987	l->failover_reasm_skb = NULL;
 988	l->rcv_unacked = 0;
 989	l->snd_nxt = 1;
 990	l->rcv_nxt = 1;
 991	l->snd_nxt_state = 1;
 992	l->rcv_nxt_state = 1;
 993	l->acked = 0;
 994	l->last_gap = 0;
 995	kfree(l->last_ga);
 996	l->last_ga = NULL;
 997	l->silent_intv_cnt = 0;
 998	l->rst_cnt = 0;
 999	l->bc_peer_is_up = false;
1000	memset(&l->mon_state, 0, sizeof(l->mon_state));
1001	tipc_link_reset_stats(l);
1002}
1003
1004/**
1005 * tipc_link_xmit(): enqueue buffer list according to queue situation
1006 * @l: link to use
1007 * @list: chain of buffers containing message
1008 * @xmitq: returned list of packets to be sent by caller
1009 *
1010 * Consumes the buffer chain.
1011 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
1012 * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
1013 */
1014int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1015		   struct sk_buff_head *xmitq)
1016{
1017	struct sk_buff_head *backlogq = &l->backlogq;
1018	struct sk_buff_head *transmq = &l->transmq;
1019	struct sk_buff *skb, *_skb;
1020	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1021	u16 ack = l->rcv_nxt - 1;
1022	u16 seqno = l->snd_nxt;
1023	int pkt_cnt = skb_queue_len(list);
1024	unsigned int mss = tipc_link_mss(l);
1025	unsigned int cwin = l->window;
1026	unsigned int mtu = l->mtu;
1027	struct tipc_msg *hdr;
1028	bool new_bundle;
1029	int rc = 0;
1030	int imp;
1031
1032	if (pkt_cnt <= 0)
1033		return 0;
1034
1035	hdr = buf_msg(skb_peek(list));
1036	if (unlikely(msg_size(hdr) > mtu)) {
1037		pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1038			skb_queue_len(list), msg_user(hdr),
1039			msg_type(hdr), msg_size(hdr), mtu);
1040		__skb_queue_purge(list);
1041		return -EMSGSIZE;
1042	}
1043
1044	imp = msg_importance(hdr);
1045	/* Allow oversubscription of one data msg per source at congestion */
1046	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1047		if (imp == TIPC_SYSTEM_IMPORTANCE) {
1048			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1049			return -ENOBUFS;
1050		}
1051		rc = link_schedule_user(l, hdr);
1052	}
1053
1054	if (pkt_cnt > 1) {
1055		l->stats.sent_fragmented++;
1056		l->stats.sent_fragments += pkt_cnt;
1057	}
1058
1059	/* Prepare each packet for sending, and add to relevant queue: */
1060	while ((skb = __skb_dequeue(list))) {
1061		if (likely(skb_queue_len(transmq) < cwin)) {
1062			hdr = buf_msg(skb);
1063			msg_set_seqno(hdr, seqno);
1064			msg_set_ack(hdr, ack);
1065			msg_set_bcast_ack(hdr, bc_ack);
1066			_skb = skb_clone(skb, GFP_ATOMIC);
1067			if (!_skb) {
1068				kfree_skb(skb);
1069				__skb_queue_purge(list);
1070				return -ENOBUFS;
1071			}
1072			__skb_queue_tail(transmq, skb);
1073			tipc_link_set_skb_retransmit_time(skb, l);
1074			__skb_queue_tail(xmitq, _skb);
1075			TIPC_SKB_CB(skb)->ackers = l->ackers;
1076			l->rcv_unacked = 0;
1077			l->stats.sent_pkts++;
1078			seqno++;
1079			continue;
1080		}
1081		if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1082					mss, l->addr, &new_bundle)) {
1083			if (skb) {
1084				/* Keep a ref. to the skb for next try */
1085				l->backlog[imp].target_bskb = skb;
1086				l->backlog[imp].len++;
1087				__skb_queue_tail(backlogq, skb);
1088			} else {
1089				if (new_bundle) {
1090					l->stats.sent_bundles++;
1091					l->stats.sent_bundled++;
1092				}
1093				l->stats.sent_bundled++;
1094			}
1095			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096		}
1097		l->backlog[imp].target_bskb = NULL;
1098		l->backlog[imp].len += (1 + skb_queue_len(list));
1099		__skb_queue_tail(backlogq, skb);
1100		skb_queue_splice_tail_init(list, backlogq);
1101	}
1102	l->snd_nxt = seqno;
1103	return rc;
 
 
 
 
 
 
 
 
 
 
 
1104}
1105
1106static void tipc_link_update_cwin(struct tipc_link *l, int released,
1107				  bool retransmitted)
 
 
1108{
1109	int bklog_len = skb_queue_len(&l->backlogq);
1110	struct sk_buff_head *txq = &l->transmq;
1111	int txq_len = skb_queue_len(txq);
1112	u16 cwin = l->window;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113
1114	/* Enter fast recovery */
1115	if (unlikely(retransmitted)) {
1116		l->ssthresh = max_t(u16, l->window / 2, 300);
1117		l->window = min_t(u16, l->ssthresh, l->window);
1118		return;
 
 
 
 
 
 
 
 
 
1119	}
1120	/* Enter slow start */
1121	if (unlikely(!released)) {
1122		l->ssthresh = max_t(u16, l->window / 2, 300);
1123		l->window = l->min_win;
1124		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1125	}
1126	/* Don't increase window if no pressure on the transmit queue */
1127	if (txq_len + bklog_len < cwin)
1128		return;
1129
1130	/* Don't increase window if there are holes the transmit queue */
1131	if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
 
 
 
 
 
 
 
1132		return;
1133
1134	l->cong_acks += released;
 
 
1135
1136	/* Slow start  */
1137	if (cwin <= l->ssthresh) {
1138		l->window = min_t(u16, cwin + released, l->max_win);
1139		return;
1140	}
1141	/* Congestion avoidance */
1142	if (l->cong_acks < cwin)
1143		return;
1144	l->window = min_t(u16, ++cwin, l->max_win);
1145	l->cong_acks = 0;
1146}
1147
1148static void tipc_link_advance_backlog(struct tipc_link *l,
1149				      struct sk_buff_head *xmitq)
1150{
1151	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1152	struct sk_buff_head *txq = &l->transmq;
1153	struct sk_buff *skb, *_skb;
1154	u16 ack = l->rcv_nxt - 1;
1155	u16 seqno = l->snd_nxt;
1156	struct tipc_msg *hdr;
1157	u16 cwin = l->window;
1158	u32 imp;
1159
1160	while (skb_queue_len(txq) < cwin) {
1161		skb = skb_peek(&l->backlogq);
1162		if (!skb)
1163			break;
1164		_skb = skb_clone(skb, GFP_ATOMIC);
1165		if (!_skb)
1166			break;
1167		__skb_dequeue(&l->backlogq);
1168		hdr = buf_msg(skb);
1169		imp = msg_importance(hdr);
1170		l->backlog[imp].len--;
1171		if (unlikely(skb == l->backlog[imp].target_bskb))
1172			l->backlog[imp].target_bskb = NULL;
1173		__skb_queue_tail(&l->transmq, skb);
1174		tipc_link_set_skb_retransmit_time(skb, l);
1175
1176		__skb_queue_tail(xmitq, _skb);
1177		TIPC_SKB_CB(skb)->ackers = l->ackers;
1178		msg_set_seqno(hdr, seqno);
1179		msg_set_ack(hdr, ack);
1180		msg_set_bcast_ack(hdr, bc_ack);
1181		l->rcv_unacked = 0;
1182		l->stats.sent_pkts++;
1183		seqno++;
1184	}
1185	l->snd_nxt = seqno;
 
 
1186}
1187
1188/**
1189 * link_retransmit_failure() - Detect repeated retransmit failures
1190 * @l: tipc link sender
1191 * @r: tipc link receiver (= l in case of unicast)
1192 * @rc: returned code
1193 *
1194 * Return: true if the repeated retransmit failures happens, otherwise
1195 * false
1196 */
1197static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1198				    int *rc)
1199{
1200	struct sk_buff *skb = skb_peek(&l->transmq);
1201	struct tipc_msg *hdr;
1202
1203	if (!skb)
1204		return false;
1205
1206	if (!TIPC_SKB_CB(skb)->retr_cnt)
1207		return false;
 
 
1208
1209	if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1210			msecs_to_jiffies(r->tolerance * 10)))
1211		return false;
 
1212
1213	hdr = buf_msg(skb);
1214	if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1215		return false;
1216
1217	pr_warn("Retransmission failure on link <%s>\n", l->name);
1218	link_print(l, "State of link ");
1219	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1220		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1221	pr_info("sqno %u, prev: %x, dest: %x\n",
1222		msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1223	pr_info("retr_stamp %d, retr_cnt %d\n",
1224		jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1225		TIPC_SKB_CB(skb)->retr_cnt);
1226
1227	trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1228	trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1229	trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
 
 
 
 
 
1230
1231	if (link_is_bc_sndlink(l)) {
1232		r->state = LINK_RESET;
1233		*rc |= TIPC_LINK_DOWN_EVT;
1234	} else {
1235		*rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1236	}
1237
1238	return true;
1239}
1240
1241/* tipc_data_input - deliver data and name distr msgs to upper layer
1242 *
1243 * Consumes buffer if message is of right type
1244 * Node lock must be held
1245 */
1246static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1247			    struct sk_buff_head *inputq)
1248{
1249	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1250	struct tipc_msg *hdr = buf_msg(skb);
1251
1252	switch (msg_user(hdr)) {
1253	case TIPC_LOW_IMPORTANCE:
1254	case TIPC_MEDIUM_IMPORTANCE:
1255	case TIPC_HIGH_IMPORTANCE:
1256	case TIPC_CRITICAL_IMPORTANCE:
1257		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1258			skb_queue_tail(mc_inputq, skb);
1259			return true;
1260		}
1261		fallthrough;
1262	case CONN_MANAGER:
1263		skb_queue_tail(inputq, skb);
1264		return true;
1265	case GROUP_PROTOCOL:
1266		skb_queue_tail(mc_inputq, skb);
1267		return true;
1268	case NAME_DISTRIBUTOR:
1269		l->bc_rcvlink->state = LINK_ESTABLISHED;
1270		skb_queue_tail(l->namedq, skb);
1271		return true;
1272	case MSG_BUNDLER:
1273	case TUNNEL_PROTOCOL:
1274	case MSG_FRAGMENTER:
1275	case BCAST_PROTOCOL:
1276		return false;
1277#ifdef CONFIG_TIPC_CRYPTO
1278	case MSG_CRYPTO:
1279		if (sysctl_tipc_key_exchange_enabled &&
1280		    TIPC_SKB_CB(skb)->decrypted) {
1281			tipc_crypto_msg_rcv(l->net, skb);
1282			return true;
1283		}
1284		fallthrough;
1285#endif
1286	default:
1287		pr_warn("Dropping received illegal msg type\n");
1288		kfree_skb(skb);
1289		return true;
1290	}
1291}
1292
1293/* tipc_link_input - process packet that has passed link protocol check
1294 *
1295 * Consumes buffer
1296 */
1297static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1298			   struct sk_buff_head *inputq,
1299			   struct sk_buff **reasm_skb)
1300{
1301	struct tipc_msg *hdr = buf_msg(skb);
1302	struct sk_buff *iskb;
1303	struct sk_buff_head tmpq;
1304	int usr = msg_user(hdr);
1305	int pos = 0;
1306
1307	if (usr == MSG_BUNDLER) {
1308		skb_queue_head_init(&tmpq);
1309		l->stats.recv_bundles++;
1310		l->stats.recv_bundled += msg_msgcnt(hdr);
1311		while (tipc_msg_extract(skb, &iskb, &pos))
1312			tipc_data_input(l, iskb, &tmpq);
1313		tipc_skb_queue_splice_tail(&tmpq, inputq);
1314		return 0;
1315	} else if (usr == MSG_FRAGMENTER) {
1316		l->stats.recv_fragments++;
1317		if (tipc_buf_append(reasm_skb, &skb)) {
1318			l->stats.recv_fragmented++;
1319			tipc_data_input(l, skb, inputq);
1320		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1321			pr_warn_ratelimited("Unable to build fragment list\n");
1322			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1323		}
1324		return 0;
1325	} else if (usr == BCAST_PROTOCOL) {
1326		tipc_bcast_lock(l->net);
1327		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1328		tipc_bcast_unlock(l->net);
1329	}
1330
1331	kfree_skb(skb);
1332	return 0;
1333}
1334
1335/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1336 *			 inner message along with the ones in the old link's
1337 *			 deferdq
1338 * @l: tunnel link
1339 * @skb: TUNNEL_PROTOCOL message
1340 * @inputq: queue to put messages ready for delivery
1341 */
1342static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1343			     struct sk_buff_head *inputq)
1344{
1345	struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1346	struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1347	struct sk_buff_head *fdefq = &l->failover_deferdq;
1348	struct tipc_msg *hdr = buf_msg(skb);
1349	struct sk_buff *iskb;
1350	int ipos = 0;
1351	int rc = 0;
1352	u16 seqno;
1353
1354	if (msg_type(hdr) == SYNCH_MSG) {
1355		kfree_skb(skb);
1356		return 0;
1357	}
1358
1359	/* Not a fragment? */
1360	if (likely(!msg_nof_fragms(hdr))) {
1361		if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1362			pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1363					    skb_queue_len(fdefq));
1364			return 0;
 
1365		}
1366		kfree_skb(skb);
1367	} else {
1368		/* Set fragment type for buf_append */
1369		if (msg_fragm_no(hdr) == 1)
1370			msg_set_type(hdr, FIRST_FRAGMENT);
1371		else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1372			msg_set_type(hdr, FRAGMENT);
1373		else
1374			msg_set_type(hdr, LAST_FRAGMENT);
1375
1376		if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1377			/* Successful but non-complete reassembly? */
1378			if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1379				return 0;
1380			pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1381			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1382		}
1383		iskb = skb;
1384	}
1385
1386	do {
1387		seqno = buf_seqno(iskb);
1388		if (unlikely(less(seqno, l->drop_point))) {
1389			kfree_skb(iskb);
1390			continue;
1391		}
1392		if (unlikely(seqno != l->drop_point)) {
1393			__tipc_skb_queue_sorted(fdefq, seqno, iskb);
1394			continue;
 
 
 
 
 
1395		}
 
1396
1397		l->drop_point++;
1398		if (!tipc_data_input(l, iskb, inputq))
1399			rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1400		if (unlikely(rc))
1401			break;
1402	} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1403
1404	return rc;
1405}
1406
1407/**
1408 * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1409 * @ga: returned pointer to the Gap ACK blocks if any
1410 * @l: the tipc link
1411 * @hdr: the PROTOCOL/STATE_MSG header
1412 * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1413 *
1414 * Return: the total Gap ACK blocks size
1415 */
1416u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1417			  struct tipc_msg *hdr, bool uc)
1418{
1419	struct tipc_gap_ack_blks *p;
1420	u16 sz = 0;
1421
1422	/* Does peer support the Gap ACK blocks feature? */
1423	if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1424		p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1425		sz = ntohs(p->len);
1426		/* Sanity check */
1427		if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
1428			/* Good, check if the desired type exists */
1429			if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1430				goto ok;
1431		/* Backward compatible: peer might not support bc, but uc? */
1432		} else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1433			if (p->ugack_cnt) {
1434				p->bgack_cnt = 0;
1435				goto ok;
1436			}
1437		}
1438	}
1439	/* Other cases: ignore! */
1440	p = NULL;
1441
1442ok:
1443	*ga = p;
1444	return sz;
1445}
1446
1447static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1448				    struct tipc_link *l, u8 start_index)
1449{
1450	struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1451	struct sk_buff *skb = skb_peek(&l->deferdq);
1452	u16 expect, seqno = 0;
1453	u8 n = 0;
1454
1455	if (!skb)
1456		return 0;
1457
1458	expect = buf_seqno(skb);
1459	skb_queue_walk(&l->deferdq, skb) {
1460		seqno = buf_seqno(skb);
1461		if (unlikely(more(seqno, expect))) {
1462			gacks[n].ack = htons(expect - 1);
1463			gacks[n].gap = htons(seqno - expect);
1464			if (++n >= MAX_GAP_ACK_BLKS / 2) {
1465				pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1466						    l->name, n,
1467						    skb_queue_len(&l->deferdq));
1468				return n;
1469			}
1470		} else if (unlikely(less(seqno, expect))) {
1471			pr_warn("Unexpected skb in deferdq!\n");
1472			continue;
1473		}
1474		expect = seqno + 1;
1475	}
1476
1477	/* last block */
1478	gacks[n].ack = htons(seqno);
1479	gacks[n].gap = 0;
1480	n++;
1481	return n;
1482}
1483
1484/* tipc_build_gap_ack_blks - build Gap ACK blocks
1485 * @l: tipc unicast link
1486 * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1487 *
1488 * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1489 * links of a certain peer, the buffer after built has the network data format
1490 * as found at the struct tipc_gap_ack_blks definition.
1491 *
1492 * returns the actual allocated memory size
1493 */
1494static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1495{
1496	struct tipc_link *bcl = l->bc_rcvlink;
1497	struct tipc_gap_ack_blks *ga;
1498	u16 len;
1499
1500	ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1501
1502	/* Start with broadcast link first */
1503	tipc_bcast_lock(bcl->net);
1504	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1505	msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1506	ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1507	tipc_bcast_unlock(bcl->net);
1508
1509	/* Now for unicast link, but an explicit NACK only (???) */
1510	ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1511			__tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1512
1513	/* Total len */
1514	len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
1515	ga->len = htons(len);
1516	return len;
1517}
1518
1519/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1520 *			       acked packets, also doing retransmissions if
1521 *			       gaps found
1522 * @l: tipc link with transmq queue to be advanced
1523 * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
1524 * @acked: seqno of last packet acked by peer without any gaps before
1525 * @gap: # of gap packets
1526 * @ga: buffer pointer to Gap ACK blocks from peer
1527 * @xmitq: queue for accumulating the retransmitted packets if any
1528 * @retransmitted: returned boolean value if a retransmission is really issued
1529 * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1530 *      happens (- unlikely case)
1531 *
1532 * Return: the number of packets released from the link transmq
1533 */
1534static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1535				     u16 acked, u16 gap,
1536				     struct tipc_gap_ack_blks *ga,
1537				     struct sk_buff_head *xmitq,
1538				     bool *retransmitted, int *rc)
1539{
1540	struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1541	struct tipc_gap_ack *gacks = NULL;
1542	struct sk_buff *skb, *_skb, *tmp;
1543	struct tipc_msg *hdr;
1544	u32 qlen = skb_queue_len(&l->transmq);
1545	u16 nacked = acked, ngap = gap, gack_cnt = 0;
1546	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1547	u16 ack = l->rcv_nxt - 1;
1548	u16 seqno, n = 0;
1549	u16 end = r->acked, start = end, offset = r->last_gap;
1550	u16 si = (last_ga) ? last_ga->start_index : 0;
1551	bool is_uc = !link_is_bc_sndlink(l);
1552	bool bc_has_acked = false;
1553
1554	trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1555
1556	/* Determine Gap ACK blocks if any for the particular link */
1557	if (ga && is_uc) {
1558		/* Get the Gap ACKs, uc part */
1559		gack_cnt = ga->ugack_cnt;
1560		gacks = &ga->gacks[ga->bgack_cnt];
1561	} else if (ga) {
1562		/* Copy the Gap ACKs, bc part, for later renewal if needed */
1563		this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1564				  GFP_ATOMIC);
1565		if (likely(this_ga)) {
1566			this_ga->start_index = 0;
1567			/* Start with the bc Gap ACKs */
1568			gack_cnt = this_ga->bgack_cnt;
1569			gacks = &this_ga->gacks[0];
1570		} else {
1571			/* Hmm, we can get in trouble..., simply ignore it */
1572			pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1573		}
1574	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575
1576	/* Advance the link transmq */
1577	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1578		seqno = buf_seqno(skb);
 
 
1579
1580next_gap_ack:
1581		if (less_eq(seqno, nacked)) {
1582			if (is_uc)
1583				goto release;
1584			/* Skip packets peer has already acked */
1585			if (!more(seqno, r->acked))
1586				continue;
1587			/* Get the next of last Gap ACK blocks */
1588			while (more(seqno, end)) {
1589				if (!last_ga || si >= last_ga->bgack_cnt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1590					break;
1591				start = end + offset + 1;
1592				end = ntohs(last_ga->gacks[si].ack);
1593				offset = ntohs(last_ga->gacks[si].gap);
1594				si++;
1595				WARN_ONCE(more(start, end) ||
1596					  (!offset &&
1597					   si < last_ga->bgack_cnt) ||
1598					  si > MAX_GAP_ACK_BLKS,
1599					  "Corrupted Gap ACK: %d %d %d %d %d\n",
1600					  start, end, offset, si,
1601					  last_ga->bgack_cnt);
1602			}
1603			/* Check against the last Gap ACK block */
1604			if (tipc_in_range(seqno, start, end))
1605				continue;
1606			/* Update/release the packet peer is acking */
1607			bc_has_acked = true;
1608			if (--TIPC_SKB_CB(skb)->ackers)
1609				continue;
1610release:
1611			/* release skb */
1612			__skb_unlink(skb, &l->transmq);
1613			kfree_skb(skb);
1614		} else if (less_eq(seqno, nacked + ngap)) {
1615			/* First gap: check if repeated retrans failures? */
1616			if (unlikely(seqno == acked + 1 &&
1617				     link_retransmit_failure(l, r, rc))) {
1618				/* Ignore this bc Gap ACKs if any */
1619				kfree(this_ga);
1620				this_ga = NULL;
1621				break;
1622			}
1623			/* retransmit skb if unrestricted*/
1624			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1625				continue;
1626			tipc_link_set_skb_retransmit_time(skb, l);
1627			_skb = pskb_copy(skb, GFP_ATOMIC);
1628			if (!_skb)
1629				continue;
1630			hdr = buf_msg(_skb);
1631			msg_set_ack(hdr, ack);
1632			msg_set_bcast_ack(hdr, bc_ack);
1633			_skb->priority = TC_PRIO_CONTROL;
1634			__skb_queue_tail(xmitq, _skb);
1635			l->stats.retransmitted++;
1636			if (!is_uc)
1637				r->stats.retransmitted++;
1638			*retransmitted = true;
1639			/* Increase actual retrans counter & mark first time */
1640			if (!TIPC_SKB_CB(skb)->retr_cnt++)
1641				TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1642		} else {
1643			/* retry with Gap ACK blocks if any */
1644			if (n >= gack_cnt)
1645				break;
1646			nacked = ntohs(gacks[n].ack);
1647			ngap = ntohs(gacks[n].gap);
1648			n++;
1649			goto next_gap_ack;
1650		}
1651	}
1652
1653	/* Renew last Gap ACK blocks for bc if needed */
1654	if (bc_has_acked) {
1655		if (this_ga) {
1656			kfree(last_ga);
1657			r->last_ga = this_ga;
1658			r->last_gap = gap;
1659		} else if (last_ga) {
1660			if (less(acked, start)) {
1661				si--;
1662				offset = start - acked - 1;
1663			} else if (less(acked, end)) {
1664				acked = end;
1665			}
1666			if (si < last_ga->bgack_cnt) {
1667				last_ga->start_index = si;
1668				r->last_gap = offset;
1669			} else {
1670				kfree(last_ga);
1671				r->last_ga = NULL;
1672				r->last_gap = 0;
1673			}
1674		} else {
1675			r->last_gap = 0;
1676		}
1677		r->acked = acked;
1678	} else {
1679		kfree(this_ga);
1680	}
1681
1682	return qlen - skb_queue_len(&l->transmq);
 
 
 
 
 
 
 
 
 
 
 
1683}
1684
1685/* tipc_link_build_state_msg: prepare link state message for transmission
 
1686 *
1687 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1688 * risk of ack storms towards the sender
1689 */
1690int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 
1691{
1692	if (!l)
1693		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694
1695	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1696	if (link_is_bc_rcvlink(l)) {
1697		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
 
 
 
 
 
1698			return 0;
1699		l->rcv_unacked = 0;
1700
1701		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1702		l->snd_nxt = l->rcv_nxt;
1703		return TIPC_LINK_SND_STATE;
1704	}
1705	/* Unicast ACK */
1706	l->rcv_unacked = 0;
1707	l->stats.sent_acks++;
1708	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1709	return 0;
 
1710}
1711
1712/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
 
1713 */
1714void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 
1715{
1716	int mtyp = RESET_MSG;
1717	struct sk_buff *skb;
1718
1719	if (l->state == LINK_ESTABLISHING)
1720		mtyp = ACTIVATE_MSG;
 
 
1721
1722	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
 
 
 
 
 
 
 
 
 
 
 
1723
1724	/* Inform peer that this endpoint is going down if applicable */
1725	skb = skb_peek_tail(xmitq);
1726	if (skb && (l->state == LINK_RESET))
1727		msg_set_peer_stopping(buf_msg(skb), 1);
 
 
 
 
1728}
1729
1730/* tipc_link_build_nack_msg: prepare link nack message for transmission
1731 * Note that sending of broadcast NACK is coordinated among nodes, to
1732 * reduce the risk of NACK storms towards the sender
1733 */
1734static int tipc_link_build_nack_msg(struct tipc_link *l,
1735				    struct sk_buff_head *xmitq)
 
1736{
1737	u32 def_cnt = ++l->stats.deferred_recv;
1738	struct sk_buff_head *dfq = &l->deferdq;
1739	u32 defq_len = skb_queue_len(dfq);
1740	int match1, match2;
1741
1742	if (link_is_bc_rcvlink(l)) {
1743		match1 = def_cnt & 0xf;
1744		match2 = tipc_own_addr(l->net) & 0xf;
1745		if (match1 == match2)
1746			return TIPC_LINK_SND_STATE;
1747		return 0;
1748	}
1749
1750	if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1751		u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1752
1753		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1754					  rcvgap, 0, 0, xmitq);
1755	}
1756	return 0;
1757}
1758
1759/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1760 * @l: the link that should handle the message
1761 * @skb: TIPC packet
1762 * @xmitq: queue to place packets to be sent after this call
1763 */
1764int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1765		  struct sk_buff_head *xmitq)
1766{
1767	struct sk_buff_head *defq = &l->deferdq;
1768	struct tipc_msg *hdr = buf_msg(skb);
1769	u16 seqno, rcv_nxt, win_lim;
1770	int released = 0;
1771	int rc = 0;
1772
1773	/* Verify and update link state */
1774	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1775		return tipc_link_proto_rcv(l, skb, xmitq);
1776
1777	/* Don't send probe at next timeout expiration */
1778	l->silent_intv_cnt = 0;
1779
1780	do {
1781		hdr = buf_msg(skb);
1782		seqno = msg_seqno(hdr);
1783		rcv_nxt = l->rcv_nxt;
1784		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1785
1786		if (unlikely(!tipc_link_is_up(l))) {
1787			if (l->state == LINK_ESTABLISHING)
1788				rc = TIPC_LINK_UP_EVT;
1789			kfree_skb(skb);
1790			break;
1791		}
1792
1793		/* Drop if outside receive window */
1794		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1795			l->stats.duplicates++;
1796			kfree_skb(skb);
1797			break;
1798		}
1799		released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1800						      NULL, NULL, NULL, NULL);
1801
1802		/* Defer delivery if sequence gap */
1803		if (unlikely(seqno != rcv_nxt)) {
1804			if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1805				l->stats.duplicates++;
1806			rc |= tipc_link_build_nack_msg(l, xmitq);
1807			break;
1808		}
1809
1810		/* Deliver packet */
1811		l->rcv_nxt++;
1812		l->stats.recv_pkts++;
1813
1814		if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1815			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1816		else if (!tipc_data_input(l, skb, l->inputq))
1817			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1818		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1819			rc |= tipc_link_build_state_msg(l, xmitq);
1820		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1821			break;
1822	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1823
1824	/* Forward queues and wake up waiting users */
1825	if (released) {
1826		tipc_link_update_cwin(l, released, 0);
1827		tipc_link_advance_backlog(l, xmitq);
1828		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1829			link_prepare_wakeup(l);
1830	}
1831	return rc;
1832}
1833
1834static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1835				      bool probe_reply, u16 rcvgap,
1836				      int tolerance, int priority,
1837				      struct sk_buff_head *xmitq)
1838{
1839	struct tipc_mon_state *mstate = &l->mon_state;
1840	struct sk_buff_head *dfq = &l->deferdq;
1841	struct tipc_link *bcl = l->bc_rcvlink;
1842	struct tipc_msg *hdr;
1843	struct sk_buff *skb;
1844	bool node_up = tipc_link_is_up(bcl);
1845	u16 glen = 0, bc_rcvgap = 0;
1846	int dlen = 0;
1847	void *data;
1848
1849	/* Don't send protocol message during reset or link failover */
1850	if (tipc_link_is_blocked(l))
1851		return;
1852
1853	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1854		return;
1855
1856	if ((probe || probe_reply) && !skb_queue_empty(dfq))
1857		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1858
1859	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1860			      tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1861			      l->addr, tipc_own_addr(l->net), 0, 0, 0);
1862	if (!skb)
1863		return;
1864
1865	hdr = buf_msg(skb);
1866	data = msg_data(hdr);
1867	msg_set_session(hdr, l->session);
1868	msg_set_bearer_id(hdr, l->bearer_id);
1869	msg_set_net_plane(hdr, l->net_plane);
1870	msg_set_next_sent(hdr, l->snd_nxt);
1871	msg_set_ack(hdr, l->rcv_nxt - 1);
1872	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1873	msg_set_bc_ack_invalid(hdr, !node_up);
1874	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1875	msg_set_link_tolerance(hdr, tolerance);
1876	msg_set_linkprio(hdr, priority);
1877	msg_set_redundant_link(hdr, node_up);
1878	msg_set_seq_gap(hdr, 0);
1879	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1880
1881	if (mtyp == STATE_MSG) {
1882		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1883			msg_set_seqno(hdr, l->snd_nxt_state++);
1884		msg_set_seq_gap(hdr, rcvgap);
1885		bc_rcvgap = link_bc_rcv_gap(bcl);
1886		msg_set_bc_gap(hdr, bc_rcvgap);
1887		msg_set_probe(hdr, probe);
1888		msg_set_is_keepalive(hdr, probe || probe_reply);
1889		if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1890			glen = tipc_build_gap_ack_blks(l, hdr);
1891		tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1892		msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1893		skb_trim(skb, INT_H_SIZE + glen + dlen);
1894		l->stats.sent_states++;
1895		l->rcv_unacked = 0;
1896	} else {
1897		/* RESET_MSG or ACTIVATE_MSG */
1898		if (mtyp == ACTIVATE_MSG) {
1899			msg_set_dest_session_valid(hdr, 1);
1900			msg_set_dest_session(hdr, l->peer_session);
1901		}
1902		msg_set_max_pkt(hdr, l->advertised_mtu);
1903		strcpy(data, l->if_name);
1904		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1905		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1906	}
1907	if (probe)
1908		l->stats.sent_probes++;
1909	if (rcvgap)
1910		l->stats.sent_nacks++;
1911	if (bc_rcvgap)
1912		bcl->stats.sent_nacks++;
1913	skb->priority = TC_PRIO_CONTROL;
1914	__skb_queue_tail(xmitq, skb);
1915	trace_tipc_proto_build(skb, false, l->name);
1916}
1917
1918void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1919				    struct sk_buff_head *xmitq)
1920{
1921	u32 onode = tipc_own_addr(l->net);
1922	struct tipc_msg *hdr, *ihdr;
1923	struct sk_buff_head tnlq;
1924	struct sk_buff *skb;
1925	u32 dnode = l->addr;
1926
1927	__skb_queue_head_init(&tnlq);
1928	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1929			      INT_H_SIZE, BASIC_H_SIZE,
1930			      dnode, onode, 0, 0, 0);
1931	if (!skb) {
1932		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1933		return;
1934	}
1935
1936	hdr = buf_msg(skb);
1937	msg_set_msgcnt(hdr, 1);
1938	msg_set_bearer_id(hdr, l->peer_bearer_id);
1939
1940	ihdr = (struct tipc_msg *)msg_data(hdr);
1941	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1942		      BASIC_H_SIZE, dnode);
1943	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1944	__skb_queue_tail(&tnlq, skb);
1945	tipc_link_xmit(l, &tnlq, xmitq);
1946}
1947
1948/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1949 * with contents of the link's transmit and backlog queues.
1950 */
1951void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1952			   int mtyp, struct sk_buff_head *xmitq)
1953{
1954	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1955	struct sk_buff *skb, *tnlskb;
1956	struct tipc_msg *hdr, tnlhdr;
1957	struct sk_buff_head *queue = &l->transmq;
1958	struct sk_buff_head tmpxq, tnlq, frags;
1959	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1960	bool pktcnt_need_update = false;
1961	u16 syncpt;
1962	int rc;
1963
1964	if (!tnl)
1965		return;
1966
1967	__skb_queue_head_init(&tnlq);
1968	/* Link Synching:
1969	 * From now on, send only one single ("dummy") SYNCH message
1970	 * to peer. The SYNCH message does not contain any data, just
1971	 * a header conveying the synch point to the peer.
1972	 */
1973	if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1974		tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1975					 INT_H_SIZE, 0, l->addr,
1976					 tipc_own_addr(l->net),
1977					 0, 0, 0);
1978		if (!tnlskb) {
1979			pr_warn("%sunable to create dummy SYNCH_MSG\n",
1980				link_co_err);
1981			return;
 
 
 
 
 
 
1982		}
1983
1984		hdr = buf_msg(tnlskb);
1985		syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1986		msg_set_syncpt(hdr, syncpt);
1987		msg_set_bearer_id(hdr, l->peer_bearer_id);
1988		__skb_queue_tail(&tnlq, tnlskb);
1989		tipc_link_xmit(tnl, &tnlq, xmitq);
1990		return;
1991	}
1992
1993	__skb_queue_head_init(&tmpxq);
1994	__skb_queue_head_init(&frags);
1995	/* At least one packet required for safe algorithm => add dummy */
1996	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1997			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1998			      0, 0, TIPC_ERR_NO_PORT);
1999	if (!skb) {
2000		pr_warn("%sunable to create tunnel packet\n", link_co_err);
2001		return;
2002	}
2003	__skb_queue_tail(&tnlq, skb);
2004	tipc_link_xmit(l, &tnlq, &tmpxq);
2005	__skb_queue_purge(&tmpxq);
2006
2007	/* Initialize reusable tunnel packet header */
2008	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
2009		      mtyp, INT_H_SIZE, l->addr);
2010	if (mtyp == SYNCH_MSG)
2011		pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
2012	else
2013		pktcnt = skb_queue_len(&l->transmq);
2014	pktcnt += skb_queue_len(&l->backlogq);
2015	msg_set_msgcnt(&tnlhdr, pktcnt);
2016	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
2017tnl:
2018	/* Wrap each packet into a tunnel packet */
2019	skb_queue_walk(queue, skb) {
2020		hdr = buf_msg(skb);
2021		if (queue == &l->backlogq)
2022			msg_set_seqno(hdr, seqno++);
2023		pktlen = msg_size(hdr);
2024
2025		/* Tunnel link MTU is not large enough? This could be
2026		 * due to:
2027		 * 1) Link MTU has just changed or set differently;
2028		 * 2) Or FAILOVER on the top of a SYNCH message
2029		 *
2030		 * The 2nd case should not happen if peer supports
2031		 * TIPC_TUNNEL_ENHANCED
2032		 */
2033		if (pktlen > tnl->mtu - INT_H_SIZE) {
2034			if (mtyp == FAILOVER_MSG &&
2035			    (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2036				rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2037						       &frags);
2038				if (rc) {
2039					pr_warn("%sunable to frag msg: rc %d\n",
2040						link_co_err, rc);
2041					return;
2042				}
2043				pktcnt += skb_queue_len(&frags) - 1;
2044				pktcnt_need_update = true;
2045				skb_queue_splice_tail_init(&frags, &tnlq);
2046				continue;
2047			}
2048			/* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2049			 * => Just warn it and return!
2050			 */
2051			pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2052					    link_co_err, msg_user(hdr),
2053					    msg_type(hdr), msg_size(hdr));
2054			return;
2055		}
2056
2057		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2058		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2059		if (!tnlskb) {
2060			pr_warn("%sunable to send packet\n", link_co_err);
2061			return;
2062		}
2063		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2064		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2065		__skb_queue_tail(&tnlq, tnlskb);
2066	}
2067	if (queue != &l->backlogq) {
2068		queue = &l->backlogq;
2069		goto tnl;
2070	}
 
 
 
 
 
 
 
2071
2072	if (pktcnt_need_update)
2073		skb_queue_walk(&tnlq, skb) {
2074			hdr = buf_msg(skb);
2075			msg_set_msgcnt(hdr, pktcnt);
2076		}
2077
2078	tipc_link_xmit(tnl, &tnlq, xmitq);
 
 
2079
2080	if (mtyp == FAILOVER_MSG) {
2081		tnl->drop_point = l->rcv_nxt;
2082		tnl->failover_reasm_skb = l->reasm_buf;
2083		l->reasm_buf = NULL;
2084
2085		/* Failover the link's deferdq */
2086		if (unlikely(!skb_queue_empty(fdefq))) {
2087			pr_warn("Link failover deferdq not empty: %d!\n",
2088				skb_queue_len(fdefq));
2089			__skb_queue_purge(fdefq);
2090		}
2091		skb_queue_splice_init(&l->deferdq, fdefq);
2092	}
2093}
2094
2095/**
2096 * tipc_link_failover_prepare() - prepare tnl for link failover
2097 *
2098 * This is a special version of the precursor - tipc_link_tnl_prepare(),
2099 * see the tipc_node_link_failover() for details
2100 *
2101 * @l: failover link
2102 * @tnl: tunnel link
2103 * @xmitq: queue for messages to be xmited
2104 */
2105void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2106				struct sk_buff_head *xmitq)
2107{
2108	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2109
2110	tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2111
2112	/* This failover link endpoint was never established before,
2113	 * so it has not received anything from peer.
2114	 * Otherwise, it must be a normal failover situation or the
2115	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2116	 * would have to start over from scratch instead.
2117	 */
2118	tnl->drop_point = 1;
2119	tnl->failover_reasm_skb = NULL;
2120
2121	/* Initiate the link's failover deferdq */
2122	if (unlikely(!skb_queue_empty(fdefq))) {
2123		pr_warn("Link failover deferdq not empty: %d!\n",
2124			skb_queue_len(fdefq));
2125		__skb_queue_purge(fdefq);
 
2126	}
2127}
2128
2129/* tipc_link_validate_msg(): validate message against current link state
2130 * Returns true if message should be accepted, otherwise false
2131 */
2132bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2133{
2134	u16 curr_session = l->peer_session;
2135	u16 session = msg_session(hdr);
2136	int mtyp = msg_type(hdr);
2137
2138	if (msg_user(hdr) != LINK_PROTOCOL)
2139		return true;
2140
2141	switch (mtyp) {
2142	case RESET_MSG:
2143		if (!l->in_session)
2144			return true;
2145		/* Accept only RESET with new session number */
2146		return more(session, curr_session);
2147	case ACTIVATE_MSG:
2148		if (!l->in_session)
2149			return true;
2150		/* Accept only ACTIVATE with new or current session number */
2151		return !less(session, curr_session);
2152	case STATE_MSG:
2153		/* Accept only STATE with current session number */
2154		if (!l->in_session)
2155			return false;
2156		if (session != curr_session)
2157			return false;
2158		/* Extra sanity check */
2159		if (!tipc_link_is_up(l) && msg_ack(hdr))
2160			return false;
2161		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2162			return true;
2163		/* Accept only STATE with new sequence number */
2164		return !less(msg_seqno(hdr), l->rcv_nxt_state);
2165	default:
2166		return false;
2167	}
2168}
2169
2170/* tipc_link_proto_rcv(): receive link level protocol message :
 
2171 * Note that network plane id propagates through the network, and may
2172 * change at any time. The node with lowest numerical id determines
2173 * network plane
2174 */
2175static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2176			       struct sk_buff_head *xmitq)
2177{
2178	struct tipc_msg *hdr = buf_msg(skb);
2179	struct tipc_gap_ack_blks *ga = NULL;
2180	bool reply = msg_probe(hdr), retransmitted = false;
2181	u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
2182	u16 peers_snd_nxt =  msg_next_sent(hdr);
2183	u16 peers_tol = msg_link_tolerance(hdr);
2184	u16 peers_prio = msg_linkprio(hdr);
2185	u16 gap = msg_seq_gap(hdr);
2186	u16 ack = msg_ack(hdr);
2187	u16 rcv_nxt = l->rcv_nxt;
2188	u16 rcvgap = 0;
2189	int mtyp = msg_type(hdr);
2190	int rc = 0, released;
2191	char *if_name;
2192	void *data;
2193
2194	trace_tipc_proto_rcv(skb, false, l->name);
2195
2196	if (dlen > U16_MAX)
2197		goto exit;
2198
2199	if (tipc_link_is_blocked(l) || !xmitq)
2200		goto exit;
2201
2202	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2203		l->net_plane = msg_net_plane(hdr);
2204
2205	if (skb_linearize(skb))
2206		goto exit;
 
2207
2208	hdr = buf_msg(skb);
2209	data = msg_data(hdr);
2210
2211	if (!tipc_link_validate_msg(l, hdr)) {
2212		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2213		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2214		goto exit;
2215	}
2216
2217	switch (mtyp) {
2218	case RESET_MSG:
2219	case ACTIVATE_MSG:
2220		msg_max = msg_max_pkt(hdr);
2221		if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
2222			break;
2223		/* Complete own link name with peer's interface name */
2224		if_name =  strrchr(l->name, ':') + 1;
2225		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2226			break;
2227		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2228			break;
2229		strncpy(if_name, data, TIPC_MAX_IF_NAME);
2230
2231		/* Update own tolerance if peer indicates a non-zero value */
2232		if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2233			l->tolerance = peers_tol;
2234			l->bc_rcvlink->tolerance = peers_tol;
 
 
 
2235		}
2236		/* Update own priority if peer's priority is higher */
2237		if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2238			l->priority = peers_prio;
2239
2240		/* If peer is going down we want full re-establish cycle */
2241		if (msg_peer_stopping(hdr)) {
2242			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2243			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2244		}
 
2245
2246		/* If this endpoint was re-created while peer was ESTABLISHING
2247		 * it doesn't know current session number. Force re-synch.
2248		 */
2249		if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2250		    l->session != msg_dest_session(hdr)) {
2251			if (less(l->session, msg_dest_session(hdr)))
2252				l->session = msg_dest_session(hdr) + 1;
2253			break;
2254		}
2255
2256		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2257		if (mtyp == RESET_MSG || !tipc_link_is_up(l))
2258			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2259
2260		/* ACTIVATE_MSG takes up link if it was already locally reset */
2261		if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2262			rc = TIPC_LINK_UP_EVT;
2263
2264		l->peer_session = msg_session(hdr);
2265		l->in_session = true;
2266		l->peer_bearer_id = msg_bearer_id(hdr);
2267		if (l->mtu > msg_max)
2268			l->mtu = msg_max;
2269		break;
2270
 
 
 
2271	case STATE_MSG:
2272		/* Validate Gap ACK blocks, drop if invalid */
2273		glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2274		if (glen > dlen)
2275			break;
2276
2277		l->rcv_nxt_state = msg_seqno(hdr) + 1;
 
 
2278
2279		/* Update own tolerance if peer indicates a non-zero value */
2280		if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2281			l->tolerance = peers_tol;
2282			l->bc_rcvlink->tolerance = peers_tol;
2283		}
2284		/* Update own prio if peer indicates a different value */
2285		if ((peers_prio != l->priority) &&
2286		    tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2287			l->priority = peers_prio;
2288			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2289		}
 
 
 
 
2290
2291		l->silent_intv_cnt = 0;
2292		l->stats.recv_states++;
2293		if (msg_probe(hdr))
2294			l->stats.recv_probes++;
2295
2296		if (!tipc_link_is_up(l)) {
2297			if (l->state == LINK_ESTABLISHING)
2298				rc = TIPC_LINK_UP_EVT;
2299			break;
2300		}
2301
2302		tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2303			     &l->mon_state, l->bearer_id);
 
 
 
 
2304
2305		/* Send NACK if peer has sent pkts we haven't received yet */
2306		if ((reply || msg_is_keepalive(hdr)) &&
2307		    more(peers_snd_nxt, rcv_nxt) &&
2308		    !tipc_link_is_synching(l) &&
2309		    skb_queue_empty(&l->deferdq))
2310			rcvgap = peers_snd_nxt - l->rcv_nxt;
2311		if (rcvgap || reply)
2312			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2313						  rcvgap, 0, 0, xmitq);
2314
2315		released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2316						     &retransmitted, &rc);
2317		if (gap)
2318			l->stats.recv_nacks++;
2319		if (released || retransmitted)
2320			tipc_link_update_cwin(l, released, retransmitted);
2321		if (released)
2322			tipc_link_advance_backlog(l, xmitq);
2323		if (unlikely(!skb_queue_empty(&l->wakeupq)))
2324			link_prepare_wakeup(l);
2325	}
2326exit:
2327	kfree_skb(skb);
2328	return rc;
2329}
2330
2331/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2332 */
2333static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2334					 u16 peers_snd_nxt,
2335					 struct sk_buff_head *xmitq)
2336{
2337	struct sk_buff *skb;
2338	struct tipc_msg *hdr;
2339	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2340	u16 ack = l->rcv_nxt - 1;
2341	u16 gap_to = peers_snd_nxt - 1;
2342
2343	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2344			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2345	if (!skb)
2346		return false;
2347	hdr = buf_msg(skb);
2348	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2349	msg_set_bcast_ack(hdr, ack);
2350	msg_set_bcgap_after(hdr, ack);
2351	if (dfrd_skb)
2352		gap_to = buf_seqno(dfrd_skb) - 1;
2353	msg_set_bcgap_to(hdr, gap_to);
2354	msg_set_non_seq(hdr, bcast);
2355	__skb_queue_tail(xmitq, skb);
2356	return true;
2357}
2358
2359/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2360 *
2361 * Give a newly added peer node the sequence number where it should
2362 * start receiving and acking broadcast packets.
2363 */
2364static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2365					struct sk_buff_head *xmitq)
2366{
2367	struct sk_buff_head list;
2368
2369	__skb_queue_head_init(&list);
2370	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
 
 
 
 
 
 
 
 
 
 
 
 
 
2371		return;
2372	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2373	tipc_link_xmit(l, &list, xmitq);
 
 
2374}
2375
2376/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
 
 
 
 
2377 */
2378void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2379{
2380	int mtyp = msg_type(hdr);
2381	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
 
 
 
2382
2383	if (tipc_link_is_up(l))
2384		return;
2385
2386	if (msg_user(hdr) == BCAST_PROTOCOL) {
2387		l->rcv_nxt = peers_snd_nxt;
2388		l->state = LINK_ESTABLISHED;
2389		return;
2390	}
2391
2392	if (l->peer_caps & TIPC_BCAST_SYNCH)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2393		return;
 
2394
2395	if (msg_peer_node_is_up(hdr))
2396		return;
 
 
 
2397
2398	/* Compatibility: accept older, less safe initial synch data */
2399	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2400		l->rcv_nxt = peers_snd_nxt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2401}
2402
2403/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2404 */
2405int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2406			  struct sk_buff_head *xmitq)
2407{
2408	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2409	int rc = 0;
2410
2411	if (!tipc_link_is_up(l))
2412		return rc;
2413
2414	if (!msg_peer_node_is_up(hdr))
2415		return rc;
2416
2417	/* Open when peer acknowledges our bcast init msg (pkt #1) */
2418	if (msg_ack(hdr))
2419		l->bc_peer_is_up = true;
2420
2421	if (!l->bc_peer_is_up)
2422		return rc;
2423
2424	/* Ignore if peers_snd_nxt goes beyond receive window */
2425	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2426		return rc;
2427
2428	l->snd_nxt = peers_snd_nxt;
2429	if (link_bc_rcv_gap(l))
2430		rc |= TIPC_LINK_SND_STATE;
 
 
 
 
 
 
 
 
 
 
2431
2432	/* Return now if sender supports nack via STATE messages */
2433	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2434		return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2435
2436	/* Otherwise, be backwards compatible */
 
 
 
 
 
 
 
 
 
 
 
 
2437
2438	if (!more(peers_snd_nxt, l->rcv_nxt)) {
2439		l->nack_state = BC_NACK_SND_CONDITIONAL;
2440		return 0;
 
 
 
 
2441	}
 
 
2442
2443	/* Don't NACK if one was recently sent or peeked */
2444	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2445		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2446		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2447	}
2448
2449	/* Conditionally delay NACK sending until next synch rcv */
2450	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2451		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2452		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2453			return 0;
 
 
 
 
 
2454	}
2455
2456	/* Send NACK now but suppress next one */
2457	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2458	l->nack_state = BC_NACK_SND_SUPPRESS;
2459	return 0;
2460}
2461
2462int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2463			 struct tipc_gap_ack_blks *ga,
2464			 struct sk_buff_head *xmitq,
2465			 struct sk_buff_head *retrq)
2466{
2467	struct tipc_link *l = r->bc_sndlink;
2468	bool unused = false;
2469	int rc = 0;
2470
2471	if (!tipc_link_is_up(r) || !r->bc_peer_is_up)
2472		return 0;
2473
2474	if (gap) {
2475		l->stats.recv_nacks++;
2476		r->stats.recv_nacks++;
 
 
 
 
 
2477	}
 
 
2478
2479	if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2480		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2481
2482	trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2483	tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
 
 
2484
2485	tipc_link_advance_backlog(l, xmitq);
2486	if (unlikely(!skb_queue_empty(&l->wakeupq)))
2487		link_prepare_wakeup(l);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2488
2489	return rc;
2490}
2491
2492/* tipc_link_bc_nack_rcv(): receive broadcast nack message
2493 * This function is here for backwards compatibility, since
2494 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
 
 
2495 */
2496int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2497			  struct sk_buff_head *xmitq)
2498{
2499	struct tipc_msg *hdr = buf_msg(skb);
2500	u32 dnode = msg_destnode(hdr);
2501	int mtyp = msg_type(hdr);
2502	u16 acked = msg_bcast_ack(hdr);
2503	u16 from = acked + 1;
2504	u16 to = msg_bcgap_to(hdr);
2505	u16 peers_snd_nxt = to + 1;
2506	int rc = 0;
2507
2508	kfree_skb(skb);
2509
2510	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2511		return 0;
 
 
2512
2513	if (mtyp != STATE_MSG)
2514		return 0;
 
 
2515
2516	if (dnode == tipc_own_addr(l->net)) {
2517		rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2518					  xmitq);
2519		l->stats.recv_nacks++;
2520		return rc;
2521	}
2522
2523	/* Msg for other node => suppress own NACK at next sync if applicable */
2524	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2525		l->nack_state = BC_NACK_SND_SUPPRESS;
 
2526
2527	return 0;
 
 
2528}
2529
2530void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
2531{
2532	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
 
2533
2534	l->min_win = min_win;
2535	l->ssthresh = max_win;
2536	l->max_win = max_win;
2537	l->window = min_win;
2538	l->backlog[TIPC_LOW_IMPORTANCE].limit      = min_win * 2;
2539	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = min_win * 4;
2540	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = min_win * 6;
2541	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2542	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2543}
2544
2545/**
2546 * tipc_link_reset_stats - reset link statistics
2547 * @l: pointer to link
2548 */
2549void tipc_link_reset_stats(struct tipc_link *l)
2550{
2551	memset(&l->stats, 0, sizeof(l->stats));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2552}
2553
2554static void link_print(struct tipc_link *l, const char *str)
2555{
2556	struct sk_buff *hskb = skb_peek(&l->transmq);
2557	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2558	u16 tail = l->snd_nxt - 1;
2559
2560	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2561	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2562		skb_queue_len(&l->transmq), head, tail,
2563		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2564}
2565
2566/* Parse and validate nested (link) properties valid for media, bearer and link
 
 
 
 
 
 
 
 
2567 */
2568int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
 
2569{
2570	int err;
 
 
2571
2572	err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2573					  tipc_nl_prop_policy, NULL);
2574	if (err)
2575		return err;
2576
2577	if (props[TIPC_NLA_PROP_PRIO]) {
2578		u32 prio;
 
2579
2580		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2581		if (prio > TIPC_MAX_LINK_PRI)
2582			return -EINVAL;
2583	}
 
 
 
2584
2585	if (props[TIPC_NLA_PROP_TOL]) {
2586		u32 tol;
2587
2588		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2589		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2590			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2591	}
 
 
2592
2593	if (props[TIPC_NLA_PROP_WIN]) {
2594		u32 max_win;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2595
2596		max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2597		if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2598			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
2599	}
2600
2601	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2602}
2603
2604static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
 
2605{
2606	int i;
2607	struct nlattr *stats;
 
2608
2609	struct nla_map {
2610		u32 key;
2611		u32 val;
2612	};
2613
2614	struct nla_map map[] = {
2615		{TIPC_NLA_STATS_RX_INFO, 0},
2616		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2617		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2618		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2619		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2620		{TIPC_NLA_STATS_TX_INFO, 0},
2621		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2622		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2623		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2624		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2625		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2626			s->msg_length_counts : 1},
2627		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2628		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2629		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2630		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2631		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2632		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2633		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2634		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2635		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2636		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2637		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2638		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2639		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2640		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2641		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2642		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2643		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2644		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2645		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2646		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2647		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2648		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2649			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2650	};
2651
2652	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2653	if (!stats)
2654		return -EMSGSIZE;
2655
2656	for (i = 0; i <  ARRAY_SIZE(map); i++)
2657		if (nla_put_u32(skb, map[i].key, map[i].val))
2658			goto msg_full;
2659
2660	nla_nest_end(skb, stats);
 
2661
2662	return 0;
2663msg_full:
2664	nla_nest_cancel(skb, stats);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2665
2666	return -EMSGSIZE;
2667}
2668
2669/* Caller should hold appropriate locks to protect the link */
2670int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2671		       struct tipc_link *link, int nlflags)
2672{
2673	u32 self = tipc_own_addr(net);
2674	struct nlattr *attrs;
2675	struct nlattr *prop;
2676	void *hdr;
2677	int err;
2678
2679	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2680			  nlflags, TIPC_NL_LINK_GET);
2681	if (!hdr)
2682		return -EMSGSIZE;
2683
2684	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2685	if (!attrs)
2686		goto msg_full;
2687
2688	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2689		goto attr_msg_full;
2690	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2691		goto attr_msg_full;
2692	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2693		goto attr_msg_full;
2694	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2695		goto attr_msg_full;
2696	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2697		goto attr_msg_full;
2698
2699	if (tipc_link_is_up(link))
2700		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2701			goto attr_msg_full;
2702	if (link->active)
2703		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2704			goto attr_msg_full;
2705
2706	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2707	if (!prop)
2708		goto attr_msg_full;
2709	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2710		goto prop_msg_full;
2711	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2712		goto prop_msg_full;
2713	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2714			link->window))
2715		goto prop_msg_full;
2716	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2717		goto prop_msg_full;
2718	nla_nest_end(msg->skb, prop);
2719
2720	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2721	if (err)
2722		goto attr_msg_full;
2723
2724	nla_nest_end(msg->skb, attrs);
2725	genlmsg_end(msg->skb, hdr);
 
 
 
2726
2727	return 0;
 
2728
2729prop_msg_full:
2730	nla_nest_cancel(msg->skb, prop);
2731attr_msg_full:
2732	nla_nest_cancel(msg->skb, attrs);
2733msg_full:
2734	genlmsg_cancel(msg->skb, hdr);
2735
2736	return -EMSGSIZE;
2737}
2738
2739static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2740				      struct tipc_stats *stats)
2741{
2742	int i;
2743	struct nlattr *nest;
2744
2745	struct nla_map {
2746		__u32 key;
2747		__u32 val;
2748	};
2749
2750	struct nla_map map[] = {
2751		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2752		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2753		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2754		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2755		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2756		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2757		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2758		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2759		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2760		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2761		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2762		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2763		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2764		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2765		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2766		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2767		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2768		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2769		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2770			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2771	};
2772
2773	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2774	if (!nest)
2775		return -EMSGSIZE;
2776
2777	for (i = 0; i <  ARRAY_SIZE(map); i++)
2778		if (nla_put_u32(skb, map[i].key, map[i].val))
2779			goto msg_full;
2780
2781	nla_nest_end(skb, nest);
 
 
 
 
 
2782
2783	return 0;
2784msg_full:
2785	nla_nest_cancel(skb, nest);
 
 
 
2786
2787	return -EMSGSIZE;
 
 
 
 
 
2788}
2789
2790int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2791			struct tipc_link *bcl)
 
 
 
 
 
 
 
2792{
2793	int err;
2794	void *hdr;
2795	struct nlattr *attrs;
2796	struct nlattr *prop;
2797	u32 bc_mode = tipc_bcast_get_mode(net);
2798	u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2799
2800	if (!bcl)
2801		return 0;
2802
2803	tipc_bcast_lock(net);
2804
2805	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2806			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2807	if (!hdr) {
2808		tipc_bcast_unlock(net);
2809		return -EMSGSIZE;
2810	}
2811
2812	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2813	if (!attrs)
2814		goto msg_full;
2815
2816	/* The broadcast link is always up */
2817	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2818		goto attr_msg_full;
2819
2820	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2821		goto attr_msg_full;
2822	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2823		goto attr_msg_full;
2824	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2825		goto attr_msg_full;
2826	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2827		goto attr_msg_full;
2828
2829	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2830	if (!prop)
2831		goto attr_msg_full;
2832	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2833		goto prop_msg_full;
2834	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2835		goto prop_msg_full;
2836	if (bc_mode & BCLINK_MODE_SEL)
2837		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2838				bc_ratio))
2839			goto prop_msg_full;
2840	nla_nest_end(msg->skb, prop);
2841
2842	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2843	if (err)
2844		goto attr_msg_full;
2845
2846	tipc_bcast_unlock(net);
2847	nla_nest_end(msg->skb, attrs);
2848	genlmsg_end(msg->skb, hdr);
2849
2850	return 0;
 
 
 
 
 
 
 
 
 
 
 
2851
2852prop_msg_full:
2853	nla_nest_cancel(msg->skb, prop);
2854attr_msg_full:
2855	nla_nest_cancel(msg->skb, attrs);
2856msg_full:
2857	tipc_bcast_unlock(net);
2858	genlmsg_cancel(msg->skb, hdr);
2859
2860	return -EMSGSIZE;
2861}
2862
2863void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2864			     struct sk_buff_head *xmitq)
2865{
2866	l->tolerance = tol;
2867	if (l->bc_rcvlink)
2868		l->bc_rcvlink->tolerance = tol;
2869	if (tipc_link_is_up(l))
2870		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2871}
2872
2873void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2874			struct sk_buff_head *xmitq)
2875{
2876	l->priority = prio;
2877	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2878}
2879
2880void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2881{
2882	l->abort_limit = limit;
2883}
2884
2885/**
2886 * tipc_link_dump - dump TIPC link data
2887 * @l: tipc link to be dumped
2888 * @dqueues: bitmask to decide if any link queue to be dumped?
2889 *           - TIPC_DUMP_NONE: don't dump link queues
2890 *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2891 *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2892 *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2893 *           - TIPC_DUMP_INPUTQ: dump link input queue
2894 *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2895 *           - TIPC_DUMP_ALL: dump all the link queues above
2896 * @buf: returned buffer of dump data in format
2897 */
2898int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2899{
2900	int i = 0;
2901	size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2902	struct sk_buff_head *list;
2903	struct sk_buff *hskb, *tskb;
2904	u32 len;
2905
2906	if (!l) {
2907		i += scnprintf(buf, sz, "link data: (null)\n");
2908		return i;
2909	}
2910
2911	i += scnprintf(buf, sz, "link data: %x", l->addr);
2912	i += scnprintf(buf + i, sz - i, " %x", l->state);
2913	i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2914	i += scnprintf(buf + i, sz - i, " %u", l->session);
2915	i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2916	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2917	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2918	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2919	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2920	i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2921	i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2922	i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2923	i += scnprintf(buf + i, sz - i, " %u", 0);
2924	i += scnprintf(buf + i, sz - i, " %u", 0);
2925	i += scnprintf(buf + i, sz - i, " %u", l->acked);
2926
2927	list = &l->transmq;
2928	len = skb_queue_len(list);
2929	hskb = skb_peek(list);
2930	tskb = skb_peek_tail(list);
2931	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2932		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2933		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2934
2935	list = &l->deferdq;
2936	len = skb_queue_len(list);
2937	hskb = skb_peek(list);
2938	tskb = skb_peek_tail(list);
2939	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2940		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2941		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2942
2943	list = &l->backlogq;
2944	len = skb_queue_len(list);
2945	hskb = skb_peek(list);
2946	tskb = skb_peek_tail(list);
2947	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2948		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2949		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2950
2951	list = l->inputq;
2952	len = skb_queue_len(list);
2953	hskb = skb_peek(list);
2954	tskb = skb_peek_tail(list);
2955	i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2956		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2957		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2958
2959	if (dqueues & TIPC_DUMP_TRANSMQ) {
2960		i += scnprintf(buf + i, sz - i, "transmq: ");
2961		i += tipc_list_dump(&l->transmq, false, buf + i);
2962	}
2963	if (dqueues & TIPC_DUMP_BACKLOGQ) {
2964		i += scnprintf(buf + i, sz - i,
2965			       "backlogq: <%u %u %u %u %u>, ",
2966			       l->backlog[TIPC_LOW_IMPORTANCE].len,
2967			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2968			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
2969			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2970			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2971		i += tipc_list_dump(&l->backlogq, false, buf + i);
2972	}
2973	if (dqueues & TIPC_DUMP_DEFERDQ) {
2974		i += scnprintf(buf + i, sz - i, "deferdq: ");
2975		i += tipc_list_dump(&l->deferdq, false, buf + i);
2976	}
2977	if (dqueues & TIPC_DUMP_INPUTQ) {
2978		i += scnprintf(buf + i, sz - i, "inputq: ");
2979		i += tipc_list_dump(l->inputq, false, buf + i);
2980	}
2981	if (dqueues & TIPC_DUMP_WAKEUP) {
2982		i += scnprintf(buf + i, sz - i, "wakeup: ");
2983		i += tipc_list_dump(&l->wakeupq, false, buf + i);
2984	}
2985
2986	return i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2987}
v3.5.6
   1/*
   2 * net/tipc/link.c: TIPC link code
   3 *
   4 * Copyright (c) 1996-2007, Ericsson AB
   5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
 
  38#include "link.h"
  39#include "port.h"
 
  40#include "name_distr.h"
  41#include "discover.h"
  42#include "config.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  44
  45/*
  46 * Out-of-range value for link session numbers
  47 */
  48#define INVALID_SESSION 0x10000
 
  49
  50/*
  51 * Link state events:
  52 */
  53#define  STARTING_EVT    856384768	/* link processing trigger */
  54#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
  55#define  TIMEOUT_EVT     560817u	/* link timer expired */
 
 
  56
  57/*
  58 * The following two 'message types' is really just implementation
  59 * data conveniently stored in the message header.
  60 * They must not be considered part of the protocol
  61 */
  62#define OPEN_MSG   0
  63#define CLOSED_MSG 1
  64
  65/*
  66 * State value stored in 'exp_msg_count'
  67 */
  68#define START_CHANGEOVER 100000u
  69
  70/**
  71 * struct tipc_link_name - deconstructed link name
  72 * @addr_local: network address of node at this end
  73 * @if_local: name of interface at this end
  74 * @addr_peer: network address of node at far end
  75 * @if_peer: name of interface at far end
  76 */
  77struct tipc_link_name {
  78	u32 addr_local;
  79	char if_local[TIPC_MAX_IF_NAME];
  80	u32 addr_peer;
  81	char if_peer[TIPC_MAX_IF_NAME];
  82};
  83
  84static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
  85				       struct sk_buff *buf);
  86static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
  87static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
  88				     struct sk_buff **buf);
  89static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
  90static int  link_send_sections_long(struct tipc_port *sender,
  91				    struct iovec const *msg_sect,
  92				    u32 num_sect, unsigned int total_len,
  93				    u32 destnode);
  94static void link_check_defragm_bufs(struct tipc_link *l_ptr);
  95static void link_state_event(struct tipc_link *l_ptr, u32 event);
  96static void link_reset_statistics(struct tipc_link *l_ptr);
  97static void link_print(struct tipc_link *l_ptr, const char *str);
  98static void link_start(struct tipc_link *l_ptr);
  99static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 100
 
 
 
 
 101/*
 102 *  Simple link routines
 103 */
 104static unsigned int align(unsigned int i)
 105{
 106	return (i + 3) & ~3u;
 107}
 108
 109static void link_init_max_pkt(struct tipc_link *l_ptr)
 110{
 111	u32 max_pkt;
 
 112
 113	max_pkt = (l_ptr->b_ptr->mtu & ~3);
 114	if (max_pkt > MAX_MSG_SIZE)
 115		max_pkt = MAX_MSG_SIZE;
 
 116
 117	l_ptr->max_pkt_target = max_pkt;
 118	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
 119		l_ptr->max_pkt = l_ptr->max_pkt_target;
 120	else
 121		l_ptr->max_pkt = MAX_PKT_DEFAULT;
 122
 123	l_ptr->max_pkt_probes = 0;
 
 
 124}
 125
 126static u32 link_next_sent(struct tipc_link *l_ptr)
 127{
 128	if (l_ptr->next_out)
 129		return buf_seqno(l_ptr->next_out);
 130	return mod(l_ptr->next_out_no);
 131}
 132
 133static u32 link_last_sent(struct tipc_link *l_ptr)
 134{
 135	return mod(link_next_sent(l_ptr) - 1);
 136}
 137
 138/*
 139 *  Simple non-static link routines (i.e. referenced outside this file)
 140 */
 141int tipc_link_is_up(struct tipc_link *l_ptr)
 142{
 143	if (!l_ptr)
 144		return 0;
 145	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
 146}
 147
 148int tipc_link_is_active(struct tipc_link *l_ptr)
 149{
 150	return	(l_ptr->owner->active_links[0] == l_ptr) ||
 151		(l_ptr->owner->active_links[1] == l_ptr);
 152}
 153
 154/**
 155 * link_name_validate - validate & (optionally) deconstruct tipc_link name
 156 * @name - ptr to link name string
 157 * @name_parts - ptr to area for link name components (or NULL if not needed)
 158 *
 159 * Returns 1 if link name is valid, otherwise 0.
 160 */
 161static int link_name_validate(const char *name,
 162				struct tipc_link_name *name_parts)
 163{
 164	char name_copy[TIPC_MAX_LINK_NAME];
 165	char *addr_local;
 166	char *if_local;
 167	char *addr_peer;
 168	char *if_peer;
 169	char dummy;
 170	u32 z_local, c_local, n_local;
 171	u32 z_peer, c_peer, n_peer;
 172	u32 if_local_len;
 173	u32 if_peer_len;
 174
 175	/* copy link name & ensure length is OK */
 176	name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
 177	/* need above in case non-Posix strncpy() doesn't pad with nulls */
 178	strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
 179	if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
 180		return 0;
 181
 182	/* ensure all component parts of link name are present */
 183	addr_local = name_copy;
 184	if_local = strchr(addr_local, ':');
 185	if (if_local == NULL)
 186		return 0;
 187	*(if_local++) = 0;
 188	addr_peer = strchr(if_local, '-');
 189	if (addr_peer == NULL)
 190		return 0;
 191	*(addr_peer++) = 0;
 192	if_local_len = addr_peer - if_local;
 193	if_peer = strchr(addr_peer, ':');
 194	if (if_peer == NULL)
 195		return 0;
 196	*(if_peer++) = 0;
 197	if_peer_len = strlen(if_peer) + 1;
 198
 199	/* validate component parts of link name */
 200	if ((sscanf(addr_local, "%u.%u.%u%c",
 201		    &z_local, &c_local, &n_local, &dummy) != 3) ||
 202	    (sscanf(addr_peer, "%u.%u.%u%c",
 203		    &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
 204	    (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
 205	    (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
 206	    (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
 207	    (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
 208	    (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
 209	    (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
 210		return 0;
 211
 212	/* return link name components, if necessary */
 213	if (name_parts) {
 214		name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
 215		strcpy(name_parts->if_local, if_local);
 216		name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
 217		strcpy(name_parts->if_peer, if_peer);
 218	}
 219	return 1;
 220}
 221
 222/**
 223 * link_timeout - handle expiration of link timer
 224 * @l_ptr: pointer to link
 225 *
 226 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
 227 * with tipc_link_delete().  (There is no risk that the node will be deleted by
 228 * another thread because tipc_link_delete() always cancels the link timer before
 229 * tipc_node_delete() is called.)
 230 */
 231static void link_timeout(struct tipc_link *l_ptr)
 232{
 233	tipc_node_lock(l_ptr->owner);
 
 234
 235	/* update counters used in statistical profiling of send traffic */
 236	l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
 237	l_ptr->stats.queue_sz_counts++;
 
 238
 239	if (l_ptr->first_out) {
 240		struct tipc_msg *msg = buf_msg(l_ptr->first_out);
 241		u32 length = msg_size(msg);
 
 242
 243		if ((msg_user(msg) == MSG_FRAGMENTER) &&
 244		    (msg_type(msg) == FIRST_FRAGMENT)) {
 245			length = msg_size(msg_get_wrapped(msg));
 246		}
 247		if (length) {
 248			l_ptr->stats.msg_lengths_total += length;
 249			l_ptr->stats.msg_length_counts++;
 250			if (length <= 64)
 251				l_ptr->stats.msg_length_profile[0]++;
 252			else if (length <= 256)
 253				l_ptr->stats.msg_length_profile[1]++;
 254			else if (length <= 1024)
 255				l_ptr->stats.msg_length_profile[2]++;
 256			else if (length <= 4096)
 257				l_ptr->stats.msg_length_profile[3]++;
 258			else if (length <= 16384)
 259				l_ptr->stats.msg_length_profile[4]++;
 260			else if (length <= 32768)
 261				l_ptr->stats.msg_length_profile[5]++;
 262			else
 263				l_ptr->stats.msg_length_profile[6]++;
 264		}
 265	}
 266
 267	/* do all other link processing performed on a periodic basis */
 268	link_check_defragm_bufs(l_ptr);
 269
 270	link_state_event(l_ptr, TIMEOUT_EVT);
 271
 272	if (l_ptr->next_out)
 273		tipc_link_push_queue(l_ptr);
 274
 275	tipc_node_unlock(l_ptr->owner);
 276}
 277
 278static void link_set_timer(struct tipc_link *l_ptr, u32 time)
 279{
 280	k_start_timer(&l_ptr->timer, time);
 281}
 282
 283/**
 284 * tipc_link_create - create a new link
 285 * @n_ptr: pointer to associated node
 286 * @b_ptr: pointer to associated bearer
 287 * @media_addr: media address to use when sending messages over link
 288 *
 289 * Returns pointer to link.
 290 */
 291struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 292			      struct tipc_bearer *b_ptr,
 293			      const struct tipc_media_addr *media_addr)
 294{
 295	struct tipc_link *l_ptr;
 296	struct tipc_msg *msg;
 297	char *if_name;
 298	char addr_string[16];
 299	u32 peer = n_ptr->addr;
 300
 301	if (n_ptr->link_cnt >= 2) {
 302		tipc_addr_string_fill(addr_string, n_ptr->addr);
 303		err("Attempt to establish third link to %s\n", addr_string);
 304		return NULL;
 305	}
 306
 307	if (n_ptr->links[b_ptr->identity]) {
 308		tipc_addr_string_fill(addr_string, n_ptr->addr);
 309		err("Attempt to establish second link on <%s> to %s\n",
 310		    b_ptr->name, addr_string);
 311		return NULL;
 312	}
 313
 314	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
 315	if (!l_ptr) {
 316		warn("Link creation failed, no memory\n");
 317		return NULL;
 318	}
 319
 320	l_ptr->addr = peer;
 321	if_name = strchr(b_ptr->name, ':') + 1;
 322	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
 323		tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
 324		tipc_node(tipc_own_addr),
 325		if_name,
 326		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
 327		/* note: peer i/f name is updated by reset/activate message */
 328	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
 329	l_ptr->owner = n_ptr;
 330	l_ptr->checkpoint = 1;
 331	l_ptr->peer_session = INVALID_SESSION;
 332	l_ptr->b_ptr = b_ptr;
 333	link_set_supervision_props(l_ptr, b_ptr->tolerance);
 334	l_ptr->state = RESET_UNKNOWN;
 335
 336	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
 337	msg = l_ptr->pmsg;
 338	tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
 339	msg_set_size(msg, sizeof(l_ptr->proto_msg));
 340	msg_set_session(msg, (tipc_random & 0xffff));
 341	msg_set_bearer_id(msg, b_ptr->identity);
 342	strcpy((char *)msg_data(msg), if_name);
 343
 344	l_ptr->priority = b_ptr->priority;
 345	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
 346
 347	link_init_max_pkt(l_ptr);
 348
 349	l_ptr->next_out_no = 1;
 350	INIT_LIST_HEAD(&l_ptr->waiting_ports);
 351
 352	link_reset_statistics(l_ptr);
 353
 354	tipc_node_attach_link(n_ptr, l_ptr);
 355
 356	k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
 357	list_add_tail(&l_ptr->link_list, &b_ptr->links);
 358	tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
 359
 360	return l_ptr;
 361}
 362
 363/**
 364 * tipc_link_delete - delete a link
 365 * @l_ptr: pointer to link
 366 *
 367 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
 368 * This routine must not grab the node lock until after link timer cancellation
 369 * to avoid a potential deadlock situation.
 370 */
 371void tipc_link_delete(struct tipc_link *l_ptr)
 372{
 373	if (!l_ptr) {
 374		err("Attempt to delete non-existent link\n");
 375		return;
 376	}
 377
 378	k_cancel_timer(&l_ptr->timer);
 379
 380	tipc_node_lock(l_ptr->owner);
 381	tipc_link_reset(l_ptr);
 382	tipc_node_detach_link(l_ptr->owner, l_ptr);
 383	tipc_link_stop(l_ptr);
 384	list_del_init(&l_ptr->link_list);
 385	tipc_node_unlock(l_ptr->owner);
 386	k_term_timer(&l_ptr->timer);
 387	kfree(l_ptr);
 388}
 389
 390static void link_start(struct tipc_link *l_ptr)
 391{
 392	tipc_node_lock(l_ptr->owner);
 393	link_state_event(l_ptr, STARTING_EVT);
 394	tipc_node_unlock(l_ptr->owner);
 395}
 396
 397/**
 398 * link_schedule_port - schedule port for deferred sending
 399 * @l_ptr: pointer to link
 400 * @origport: reference to sending port
 401 * @sz: amount of data to be sent
 402 *
 403 * Schedules port for renewed sending of messages after link congestion
 404 * has abated.
 405 */
 406static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
 407{
 408	struct tipc_port *p_ptr;
 409
 410	spin_lock_bh(&tipc_port_list_lock);
 411	p_ptr = tipc_port_lock(origport);
 412	if (p_ptr) {
 413		if (!p_ptr->wakeup)
 414			goto exit;
 415		if (!list_empty(&p_ptr->wait_list))
 416			goto exit;
 417		p_ptr->congested = 1;
 418		p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
 419		list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
 420		l_ptr->stats.link_congs++;
 421exit:
 422		tipc_port_unlock(p_ptr);
 423	}
 424	spin_unlock_bh(&tipc_port_list_lock);
 425	return -ELINKCONG;
 426}
 427
 428void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
 429{
 430	struct tipc_port *p_ptr;
 431	struct tipc_port *temp_p_ptr;
 432	int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
 433
 434	if (all)
 435		win = 100000;
 436	if (win <= 0)
 437		return;
 438	if (!spin_trylock_bh(&tipc_port_list_lock))
 439		return;
 440	if (link_congested(l_ptr))
 441		goto exit;
 442	list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
 443				 wait_list) {
 444		if (win <= 0)
 445			break;
 446		list_del_init(&p_ptr->wait_list);
 447		spin_lock_bh(p_ptr->lock);
 448		p_ptr->congested = 0;
 449		p_ptr->wakeup(p_ptr);
 450		win -= p_ptr->waiting_pkts;
 451		spin_unlock_bh(p_ptr->lock);
 452	}
 453
 454exit:
 455	spin_unlock_bh(&tipc_port_list_lock);
 456}
 457
 458/**
 459 * link_release_outqueue - purge link's outbound message queue
 460 * @l_ptr: pointer to link
 461 */
 462static void link_release_outqueue(struct tipc_link *l_ptr)
 463{
 464	struct sk_buff *buf = l_ptr->first_out;
 465	struct sk_buff *next;
 466
 467	while (buf) {
 468		next = buf->next;
 469		kfree_skb(buf);
 470		buf = next;
 471	}
 472	l_ptr->first_out = NULL;
 473	l_ptr->out_queue_size = 0;
 474}
 475
 476/**
 477 * tipc_link_reset_fragments - purge link's inbound message fragments queue
 478 * @l_ptr: pointer to link
 479 */
 480void tipc_link_reset_fragments(struct tipc_link *l_ptr)
 481{
 482	struct sk_buff *buf = l_ptr->defragm_buf;
 483	struct sk_buff *next;
 484
 485	while (buf) {
 486		next = buf->next;
 487		kfree_skb(buf);
 488		buf = next;
 489	}
 490	l_ptr->defragm_buf = NULL;
 491}
 492
 493/**
 494 * tipc_link_stop - purge all inbound and outbound messages associated with link
 495 * @l_ptr: pointer to link
 496 */
 497void tipc_link_stop(struct tipc_link *l_ptr)
 498{
 499	struct sk_buff *buf;
 500	struct sk_buff *next;
 
 
 
 
 501
 502	buf = l_ptr->oldest_deferred_in;
 503	while (buf) {
 504		next = buf->next;
 505		kfree_skb(buf);
 506		buf = next;
 507	}
 508
 509	buf = l_ptr->first_out;
 510	while (buf) {
 511		next = buf->next;
 512		kfree_skb(buf);
 513		buf = next;
 514	}
 515
 516	tipc_link_reset_fragments(l_ptr);
 517
 518	kfree_skb(l_ptr->proto_msg_queue);
 519	l_ptr->proto_msg_queue = NULL;
 520}
 521
 522void tipc_link_reset(struct tipc_link *l_ptr)
 523{
 524	struct sk_buff *buf;
 525	u32 prev_state = l_ptr->state;
 526	u32 checkpoint = l_ptr->next_in_no;
 527	int was_active_link = tipc_link_is_active(l_ptr);
 528
 529	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
 530
 531	/* Link is down, accept any session */
 532	l_ptr->peer_session = INVALID_SESSION;
 533
 534	/* Prepare for max packet size negotiation */
 535	link_init_max_pkt(l_ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536
 537	l_ptr->state = RESET_UNKNOWN;
 538
 539	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
 540		return;
 541
 542	tipc_node_link_down(l_ptr->owner, l_ptr);
 543	tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
 
 544
 545	if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
 546	    l_ptr->owner->permit_changeover) {
 547		l_ptr->reset_checkpoint = checkpoint;
 548		l_ptr->exp_msg_count = START_CHANGEOVER;
 549	}
 550
 551	/* Clean up all queues: */
 552	link_release_outqueue(l_ptr);
 553	kfree_skb(l_ptr->proto_msg_queue);
 554	l_ptr->proto_msg_queue = NULL;
 555	buf = l_ptr->oldest_deferred_in;
 556	while (buf) {
 557		struct sk_buff *next = buf->next;
 558		kfree_skb(buf);
 559		buf = next;
 560	}
 561	if (!list_empty(&l_ptr->waiting_ports))
 562		tipc_link_wakeup_ports(l_ptr, 1);
 563
 564	l_ptr->retransm_queue_head = 0;
 565	l_ptr->retransm_queue_size = 0;
 566	l_ptr->last_out = NULL;
 567	l_ptr->first_out = NULL;
 568	l_ptr->next_out = NULL;
 569	l_ptr->unacked_window = 0;
 570	l_ptr->checkpoint = 1;
 571	l_ptr->next_out_no = 1;
 572	l_ptr->deferred_inqueue_sz = 0;
 573	l_ptr->oldest_deferred_in = NULL;
 574	l_ptr->newest_deferred_in = NULL;
 575	l_ptr->fsm_msg_cnt = 0;
 576	l_ptr->stale_count = 0;
 577	link_reset_statistics(l_ptr);
 578}
 579
 
 
 
 
 
 
 
 
 
 580
 581static void link_activate(struct tipc_link *l_ptr)
 582{
 583	l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
 584	tipc_node_link_up(l_ptr->owner, l_ptr);
 585	tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
 586}
 587
 588/**
 589 * link_state_event - link finite state machine
 590 * @l_ptr: pointer to link
 591 * @event: state machine event to process
 592 */
 593static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 594{
 595	struct tipc_link *other;
 596	u32 cont_intv = l_ptr->continuity_interval;
 597
 598	if (!l_ptr->started && (event != STARTING_EVT))
 599		return;		/* Not yet. */
 600
 601	if (link_blocked(l_ptr)) {
 602		if (event == TIMEOUT_EVT)
 603			link_set_timer(l_ptr, cont_intv);
 604		return;	  /* Changeover going on */
 605	}
 606
 607	switch (l_ptr->state) {
 608	case WORKING_WORKING:
 609		switch (event) {
 610		case TRAFFIC_MSG_EVT:
 611		case ACTIVATE_MSG:
 612			break;
 613		case TIMEOUT_EVT:
 614			if (l_ptr->next_in_no != l_ptr->checkpoint) {
 615				l_ptr->checkpoint = l_ptr->next_in_no;
 616				if (tipc_bclink_acks_missing(l_ptr->owner)) {
 617					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 618								 0, 0, 0, 0, 0);
 619					l_ptr->fsm_msg_cnt++;
 620				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
 621					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 622								 1, 0, 0, 0, 0);
 623					l_ptr->fsm_msg_cnt++;
 624				}
 625				link_set_timer(l_ptr, cont_intv);
 626				break;
 627			}
 628			l_ptr->state = WORKING_UNKNOWN;
 629			l_ptr->fsm_msg_cnt = 0;
 630			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 631			l_ptr->fsm_msg_cnt++;
 632			link_set_timer(l_ptr, cont_intv / 4);
 633			break;
 634		case RESET_MSG:
 635			info("Resetting link <%s>, requested by peer\n",
 636			     l_ptr->name);
 637			tipc_link_reset(l_ptr);
 638			l_ptr->state = RESET_RESET;
 639			l_ptr->fsm_msg_cnt = 0;
 640			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
 641			l_ptr->fsm_msg_cnt++;
 642			link_set_timer(l_ptr, cont_intv);
 643			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 644		default:
 645			err("Unknown link event %u in WW state\n", event);
 646		}
 647		break;
 648	case WORKING_UNKNOWN:
 649		switch (event) {
 650		case TRAFFIC_MSG_EVT:
 651		case ACTIVATE_MSG:
 652			l_ptr->state = WORKING_WORKING;
 653			l_ptr->fsm_msg_cnt = 0;
 654			link_set_timer(l_ptr, cont_intv);
 655			break;
 656		case RESET_MSG:
 657			info("Resetting link <%s>, requested by peer "
 658			     "while probing\n", l_ptr->name);
 659			tipc_link_reset(l_ptr);
 660			l_ptr->state = RESET_RESET;
 661			l_ptr->fsm_msg_cnt = 0;
 662			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
 663			l_ptr->fsm_msg_cnt++;
 664			link_set_timer(l_ptr, cont_intv);
 665			break;
 666		case TIMEOUT_EVT:
 667			if (l_ptr->next_in_no != l_ptr->checkpoint) {
 668				l_ptr->state = WORKING_WORKING;
 669				l_ptr->fsm_msg_cnt = 0;
 670				l_ptr->checkpoint = l_ptr->next_in_no;
 671				if (tipc_bclink_acks_missing(l_ptr->owner)) {
 672					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 673								 0, 0, 0, 0, 0);
 674					l_ptr->fsm_msg_cnt++;
 675				}
 676				link_set_timer(l_ptr, cont_intv);
 677			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
 678				tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 679							 1, 0, 0, 0, 0);
 680				l_ptr->fsm_msg_cnt++;
 681				link_set_timer(l_ptr, cont_intv / 4);
 682			} else {	/* Link has failed */
 683				warn("Resetting link <%s>, peer not responding\n",
 684				     l_ptr->name);
 685				tipc_link_reset(l_ptr);
 686				l_ptr->state = RESET_UNKNOWN;
 687				l_ptr->fsm_msg_cnt = 0;
 688				tipc_link_send_proto_msg(l_ptr, RESET_MSG,
 689							 0, 0, 0, 0, 0);
 690				l_ptr->fsm_msg_cnt++;
 691				link_set_timer(l_ptr, cont_intv);
 692			}
 693			break;
 
 694		default:
 695			err("Unknown link event %u in WU state\n", event);
 696		}
 697		break;
 698	case RESET_UNKNOWN:
 699		switch (event) {
 700		case TRAFFIC_MSG_EVT:
 701			break;
 702		case ACTIVATE_MSG:
 703			other = l_ptr->owner->active_links[0];
 704			if (other && link_working_unknown(other))
 705				break;
 706			l_ptr->state = WORKING_WORKING;
 707			l_ptr->fsm_msg_cnt = 0;
 708			link_activate(l_ptr);
 709			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 710			l_ptr->fsm_msg_cnt++;
 711			link_set_timer(l_ptr, cont_intv);
 712			break;
 713		case RESET_MSG:
 714			l_ptr->state = RESET_RESET;
 715			l_ptr->fsm_msg_cnt = 0;
 716			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
 717			l_ptr->fsm_msg_cnt++;
 718			link_set_timer(l_ptr, cont_intv);
 719			break;
 720		case STARTING_EVT:
 721			l_ptr->started = 1;
 722			/* fall through */
 723		case TIMEOUT_EVT:
 724			tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
 725			l_ptr->fsm_msg_cnt++;
 726			link_set_timer(l_ptr, cont_intv);
 727			break;
 
 
 728		default:
 729			err("Unknown link event %u in RU state\n", event);
 730		}
 731		break;
 732	case RESET_RESET:
 733		switch (event) {
 734		case TRAFFIC_MSG_EVT:
 735		case ACTIVATE_MSG:
 736			other = l_ptr->owner->active_links[0];
 737			if (other && link_working_unknown(other))
 738				break;
 739			l_ptr->state = WORKING_WORKING;
 740			l_ptr->fsm_msg_cnt = 0;
 741			link_activate(l_ptr);
 742			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 743			l_ptr->fsm_msg_cnt++;
 744			link_set_timer(l_ptr, cont_intv);
 745			break;
 746		case RESET_MSG:
 747			break;
 748		case TIMEOUT_EVT:
 749			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
 750			l_ptr->fsm_msg_cnt++;
 751			link_set_timer(l_ptr, cont_intv);
 752			break;
 
 
 753		default:
 754			err("Unknown link event %u in RR state\n", event);
 755		}
 756		break;
 757	default:
 758		err("Unknown link state %u/%u\n", l_ptr->state, event);
 759	}
 
 
 
 
 
 
 
 760}
 761
 762/*
 763 * link_bundle_buf(): Append contents of a buffer to
 764 * the tail of an existing one.
 765 */
 766static int link_bundle_buf(struct tipc_link *l_ptr,
 767			   struct sk_buff *bundler,
 768			   struct sk_buff *buf)
 769{
 770	struct tipc_msg *bundler_msg = buf_msg(bundler);
 771	struct tipc_msg *msg = buf_msg(buf);
 772	u32 size = msg_size(msg);
 773	u32 bundle_size = msg_size(bundler_msg);
 774	u32 to_pos = align(bundle_size);
 775	u32 pad = to_pos - bundle_size;
 776
 777	if (msg_user(bundler_msg) != MSG_BUNDLER)
 778		return 0;
 779	if (msg_type(bundler_msg) != OPEN_MSG)
 780		return 0;
 781	if (skb_tailroom(bundler) < (pad + size))
 782		return 0;
 783	if (l_ptr->max_pkt < (to_pos + size))
 784		return 0;
 785
 786	skb_put(bundler, pad + size);
 787	skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
 788	msg_set_size(bundler_msg, to_pos + size);
 789	msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
 790	kfree_skb(buf);
 791	l_ptr->stats.sent_bundled++;
 792	return 1;
 793}
 794
 795static void link_add_to_outqueue(struct tipc_link *l_ptr,
 796				 struct sk_buff *buf,
 797				 struct tipc_msg *msg)
 798{
 799	u32 ack = mod(l_ptr->next_in_no - 1);
 800	u32 seqno = mod(l_ptr->next_out_no++);
 801
 802	msg_set_word(msg, 2, ((ack << 16) | seqno));
 803	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
 804	buf->next = NULL;
 805	if (l_ptr->first_out) {
 806		l_ptr->last_out->next = buf;
 807		l_ptr->last_out = buf;
 808	} else
 809		l_ptr->first_out = l_ptr->last_out = buf;
 810
 811	l_ptr->out_queue_size++;
 812	if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
 813		l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
 814}
 815
 816static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
 817				       struct sk_buff *buf_chain,
 818				       u32 long_msgno)
 819{
 820	struct sk_buff *buf;
 821	struct tipc_msg *msg;
 
 822
 823	if (!l_ptr->next_out)
 824		l_ptr->next_out = buf_chain;
 825	while (buf_chain) {
 826		buf = buf_chain;
 827		buf_chain = buf_chain->next;
 828
 829		msg = buf_msg(buf);
 830		msg_set_long_msgno(msg, long_msgno);
 831		link_add_to_outqueue(l_ptr, buf, msg);
 832	}
 833}
 834
 835/*
 836 * tipc_link_send_buf() is the 'full path' for messages, called from
 837 * inside TIPC when the 'fast path' in tipc_send_buf
 838 * has failed, and from link_send()
 839 */
 840int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 841{
 842	struct tipc_msg *msg = buf_msg(buf);
 843	u32 size = msg_size(msg);
 844	u32 dsz = msg_data_sz(msg);
 845	u32 queue_size = l_ptr->out_queue_size;
 846	u32 imp = tipc_msg_tot_importance(msg);
 847	u32 queue_limit = l_ptr->queue_limit[imp];
 848	u32 max_packet = l_ptr->max_pkt;
 849
 850	/* Match msg importance against queue limits: */
 851	if (unlikely(queue_size >= queue_limit)) {
 852		if (imp <= TIPC_CRITICAL_IMPORTANCE) {
 853			link_schedule_port(l_ptr, msg_origport(msg), size);
 854			kfree_skb(buf);
 855			return -ELINKCONG;
 856		}
 857		kfree_skb(buf);
 858		if (imp > CONN_MANAGER) {
 859			warn("Resetting link <%s>, send queue full", l_ptr->name);
 860			tipc_link_reset(l_ptr);
 861		}
 862		return dsz;
 863	}
 864
 865	/* Fragmentation needed ? */
 866	if (size > max_packet)
 867		return link_send_long_buf(l_ptr, buf);
 868
 869	/* Packet can be queued or sent. */
 870	if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
 871		   !link_congested(l_ptr))) {
 872		link_add_to_outqueue(l_ptr, buf, msg);
 873
 874		if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
 875			l_ptr->unacked_window = 0;
 876		} else {
 877			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 878			l_ptr->stats.bearer_congs++;
 879			l_ptr->next_out = buf;
 880		}
 881		return dsz;
 882	}
 883	/* Congestion: can message be bundled ? */
 884	if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
 885	    (msg_user(msg) != MSG_FRAGMENTER)) {
 886
 887		/* Try adding message to an existing bundle */
 888		if (l_ptr->next_out &&
 889		    link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
 890			tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
 891			return dsz;
 892		}
 893
 894		/* Try creating a new bundle */
 895		if (size <= max_packet * 2 / 3) {
 896			struct sk_buff *bundler = tipc_buf_acquire(max_packet);
 897			struct tipc_msg bundler_hdr;
 898
 899			if (bundler) {
 900				tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
 901					 INT_H_SIZE, l_ptr->addr);
 902				skb_copy_to_linear_data(bundler, &bundler_hdr,
 903							INT_H_SIZE);
 904				skb_trim(bundler, INT_H_SIZE);
 905				link_bundle_buf(l_ptr, bundler, buf);
 906				buf = bundler;
 907				msg = buf_msg(buf);
 908				l_ptr->stats.sent_bundles++;
 909			}
 910		}
 911	}
 912	if (!l_ptr->next_out)
 913		l_ptr->next_out = buf;
 914	link_add_to_outqueue(l_ptr, buf, msg);
 915	tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
 916	return dsz;
 
 
 
 
 
 
 
 
 
 
 
 917}
 918
 919/*
 920 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
 921 * not been selected yet, and the the owner node is not locked
 922 * Called by TIPC internal users, e.g. the name distributor
 923 */
 924int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 925{
 926	struct tipc_link *l_ptr;
 927	struct tipc_node *n_ptr;
 928	int res = -ELINKCONG;
 929
 930	read_lock_bh(&tipc_net_lock);
 931	n_ptr = tipc_node_find(dest);
 932	if (n_ptr) {
 933		tipc_node_lock(n_ptr);
 934		l_ptr = n_ptr->active_links[selector & 1];
 935		if (l_ptr)
 936			res = tipc_link_send_buf(l_ptr, buf);
 937		else
 938			kfree_skb(buf);
 939		tipc_node_unlock(n_ptr);
 940	} else {
 941		kfree_skb(buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942	}
 943	read_unlock_bh(&tipc_net_lock);
 944	return res;
 945}
 946
 947/*
 948 * tipc_link_send_names - send name table entries to new neighbor
 949 *
 950 * Send routine for bulk delivery of name table messages when contact
 951 * with a new neighbor occurs. No link congestion checking is performed
 952 * because name table messages *must* be delivered. The messages must be
 953 * small enough not to require fragmentation.
 954 * Called without any locks held.
 955 */
 956void tipc_link_send_names(struct list_head *message_list, u32 dest)
 957{
 958	struct tipc_node *n_ptr;
 959	struct tipc_link *l_ptr;
 960	struct sk_buff *buf;
 961	struct sk_buff *temp_buf;
 962
 963	if (list_empty(message_list))
 964		return;
 965
 966	read_lock_bh(&tipc_net_lock);
 967	n_ptr = tipc_node_find(dest);
 968	if (n_ptr) {
 969		tipc_node_lock(n_ptr);
 970		l_ptr = n_ptr->active_links[0];
 971		if (l_ptr) {
 972			/* convert circular list to linear list */
 973			((struct sk_buff *)message_list->prev)->next = NULL;
 974			link_add_chain_to_outqueue(l_ptr,
 975				(struct sk_buff *)message_list->next, 0);
 976			tipc_link_push_queue(l_ptr);
 977			INIT_LIST_HEAD(message_list);
 978		}
 979		tipc_node_unlock(n_ptr);
 980	}
 981	read_unlock_bh(&tipc_net_lock);
 982
 983	/* discard the messages if they couldn't be sent */
 984	list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
 985		list_del((struct list_head *)buf);
 986		kfree_skb(buf);
 987	}
 
 988}
 989
 990/*
 991 * link_send_buf_fast: Entry for data messages where the
 992 * destination link is known and the header is complete,
 993 * inclusive total message length. Very time critical.
 994 * Link is locked. Returns user data length.
 995 */
 996static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
 997			      u32 *used_max_pkt)
 998{
 999	struct tipc_msg *msg = buf_msg(buf);
1000	int res = msg_data_sz(msg);
1001
1002	if (likely(!link_congested(l_ptr))) {
1003		if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1004			if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1005				link_add_to_outqueue(l_ptr, buf, msg);
1006				if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1007							    &l_ptr->media_addr))) {
1008					l_ptr->unacked_window = 0;
1009					return res;
1010				}
1011				tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1012				l_ptr->stats.bearer_congs++;
1013				l_ptr->next_out = buf;
1014				return res;
1015			}
1016		} else
1017			*used_max_pkt = l_ptr->max_pkt;
1018	}
1019	return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
1020}
1021
1022/*
1023 * tipc_send_buf_fast: Entry for data messages where the
1024 * destination node is known and the header is complete,
1025 * inclusive total message length.
1026 * Returns user data length.
1027 */
1028int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1029{
1030	struct tipc_link *l_ptr;
1031	struct tipc_node *n_ptr;
1032	int res;
1033	u32 selector = msg_origport(buf_msg(buf)) & 1;
1034	u32 dummy;
1035
1036	read_lock_bh(&tipc_net_lock);
1037	n_ptr = tipc_node_find(destnode);
1038	if (likely(n_ptr)) {
1039		tipc_node_lock(n_ptr);
1040		l_ptr = n_ptr->active_links[selector];
1041		if (likely(l_ptr)) {
1042			res = link_send_buf_fast(l_ptr, buf, &dummy);
1043			tipc_node_unlock(n_ptr);
1044			read_unlock_bh(&tipc_net_lock);
1045			return res;
1046		}
1047		tipc_node_unlock(n_ptr);
1048	}
1049	read_unlock_bh(&tipc_net_lock);
1050	res = msg_data_sz(buf_msg(buf));
1051	tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1052	return res;
1053}
1054
1055
1056/*
1057 * tipc_link_send_sections_fast: Entry for messages where the
1058 * destination processor is known and the header is complete,
1059 * except for total message length.
1060 * Returns user data length or errno.
1061 */
1062int tipc_link_send_sections_fast(struct tipc_port *sender,
1063				 struct iovec const *msg_sect,
1064				 const u32 num_sect,
1065				 unsigned int total_len,
1066				 u32 destaddr)
1067{
1068	struct tipc_msg *hdr = &sender->phdr;
1069	struct tipc_link *l_ptr;
1070	struct sk_buff *buf;
1071	struct tipc_node *node;
1072	int res;
1073	u32 selector = msg_origport(hdr) & 1;
1074
1075again:
1076	/*
1077	 * Try building message using port's max_pkt hint.
1078	 * (Must not hold any locks while building message.)
1079	 */
1080	res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1081			     sender->max_pkt, !sender->user_port, &buf);
1082
1083	read_lock_bh(&tipc_net_lock);
1084	node = tipc_node_find(destaddr);
1085	if (likely(node)) {
1086		tipc_node_lock(node);
1087		l_ptr = node->active_links[selector];
1088		if (likely(l_ptr)) {
1089			if (likely(buf)) {
1090				res = link_send_buf_fast(l_ptr, buf,
1091							 &sender->max_pkt);
1092exit:
1093				tipc_node_unlock(node);
1094				read_unlock_bh(&tipc_net_lock);
1095				return res;
1096			}
1097
1098			/* Exit if build request was invalid */
1099			if (unlikely(res < 0))
1100				goto exit;
1101
1102			/* Exit if link (or bearer) is congested */
1103			if (link_congested(l_ptr) ||
1104			    !list_empty(&l_ptr->b_ptr->cong_links)) {
1105				res = link_schedule_port(l_ptr,
1106							 sender->ref, res);
1107				goto exit;
1108			}
1109
1110			/*
1111			 * Message size exceeds max_pkt hint; update hint,
1112			 * then re-try fast path or fragment the message
1113			 */
1114			sender->max_pkt = l_ptr->max_pkt;
1115			tipc_node_unlock(node);
1116			read_unlock_bh(&tipc_net_lock);
1117
 
 
 
1118
1119			if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1120				goto again;
1121
1122			return link_send_sections_long(sender, msg_sect,
1123						       num_sect, total_len,
1124						       destaddr);
1125		}
1126		tipc_node_unlock(node);
1127	}
1128	read_unlock_bh(&tipc_net_lock);
1129
1130	/* Couldn't find a link to the destination node */
1131	if (buf)
1132		return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1133	if (res >= 0)
1134		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1135						 total_len, TIPC_ERR_NO_NODE);
1136	return res;
1137}
1138
1139/*
1140 * link_send_sections_long(): Entry for long messages where the
1141 * destination node is known and the header is complete,
1142 * inclusive total message length.
1143 * Link and bearer congestion status have been checked to be ok,
1144 * and are ignored if they change.
1145 *
1146 * Note that fragments do not use the full link MTU so that they won't have
1147 * to undergo refragmentation if link changeover causes them to be sent
1148 * over another link with an additional tunnel header added as prefix.
1149 * (Refragmentation will still occur if the other link has a smaller MTU.)
1150 *
1151 * Returns user data length or errno.
1152 */
1153static int link_send_sections_long(struct tipc_port *sender,
1154				   struct iovec const *msg_sect,
1155				   u32 num_sect,
1156				   unsigned int total_len,
1157				   u32 destaddr)
1158{
1159	struct tipc_link *l_ptr;
1160	struct tipc_node *node;
1161	struct tipc_msg *hdr = &sender->phdr;
1162	u32 dsz = total_len;
1163	u32 max_pkt, fragm_sz, rest;
1164	struct tipc_msg fragm_hdr;
1165	struct sk_buff *buf, *buf_chain, *prev;
1166	u32 fragm_crs, fragm_rest, hsz, sect_rest;
1167	const unchar *sect_crs;
1168	int curr_sect;
1169	u32 fragm_no;
1170
1171again:
1172	fragm_no = 1;
1173	max_pkt = sender->max_pkt - INT_H_SIZE;
1174		/* leave room for tunnel header in case of link changeover */
1175	fragm_sz = max_pkt - INT_H_SIZE;
1176		/* leave room for fragmentation header in each fragment */
1177	rest = dsz;
1178	fragm_crs = 0;
1179	fragm_rest = 0;
1180	sect_rest = 0;
1181	sect_crs = NULL;
1182	curr_sect = -1;
1183
1184	/* Prepare reusable fragment header */
1185	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1186		 INT_H_SIZE, msg_destnode(hdr));
1187	msg_set_size(&fragm_hdr, max_pkt);
1188	msg_set_fragm_no(&fragm_hdr, 1);
1189
1190	/* Prepare header of first fragment */
1191	buf_chain = buf = tipc_buf_acquire(max_pkt);
1192	if (!buf)
1193		return -ENOMEM;
1194	buf->next = NULL;
1195	skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1196	hsz = msg_hdr_sz(hdr);
1197	skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1198
1199	/* Chop up message */
1200	fragm_crs = INT_H_SIZE + hsz;
1201	fragm_rest = fragm_sz - hsz;
1202
1203	do {		/* For all sections */
1204		u32 sz;
1205
1206		if (!sect_rest) {
1207			sect_rest = msg_sect[++curr_sect].iov_len;
1208			sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1209		}
1210
1211		if (sect_rest < fragm_rest)
1212			sz = sect_rest;
1213		else
1214			sz = fragm_rest;
1215
1216		if (likely(!sender->user_port)) {
1217			if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1218error:
1219				for (; buf_chain; buf_chain = buf) {
1220					buf = buf_chain->next;
1221					kfree_skb(buf_chain);
1222				}
1223				return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1224			}
1225		} else
1226			skb_copy_to_linear_data_offset(buf, fragm_crs,
1227						       sect_crs, sz);
1228		sect_crs += sz;
1229		sect_rest -= sz;
1230		fragm_crs += sz;
1231		fragm_rest -= sz;
1232		rest -= sz;
1233
1234		if (!fragm_rest && rest) {
1235
1236			/* Initiate new fragment: */
1237			if (rest <= fragm_sz) {
1238				fragm_sz = rest;
1239				msg_set_type(&fragm_hdr, LAST_FRAGMENT);
 
1240			} else {
1241				msg_set_type(&fragm_hdr, FRAGMENT);
 
 
 
 
1242			}
1243			msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1244			msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1245			prev = buf;
1246			buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1247			if (!buf)
1248				goto error;
1249
1250			buf->next = NULL;
1251			prev->next = buf;
1252			skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1253			fragm_crs = INT_H_SIZE;
1254			fragm_rest = fragm_sz;
1255		}
1256	} while (rest > 0);
1257
1258	/*
1259	 * Now we have a buffer chain. Select a link and check
1260	 * that packet size is still OK
1261	 */
1262	node = tipc_node_find(destaddr);
1263	if (likely(node)) {
1264		tipc_node_lock(node);
1265		l_ptr = node->active_links[sender->ref & 1];
1266		if (!l_ptr) {
1267			tipc_node_unlock(node);
1268			goto reject;
1269		}
1270		if (l_ptr->max_pkt < max_pkt) {
1271			sender->max_pkt = l_ptr->max_pkt;
1272			tipc_node_unlock(node);
1273			for (; buf_chain; buf_chain = buf) {
1274				buf = buf_chain->next;
1275				kfree_skb(buf_chain);
1276			}
1277			goto again;
1278		}
1279	} else {
1280reject:
1281		for (; buf_chain; buf_chain = buf) {
1282			buf = buf_chain->next;
1283			kfree_skb(buf_chain);
1284		}
1285		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1286						 total_len, TIPC_ERR_NO_NODE);
1287	}
1288
1289	/* Append chain of fragments to send queue & send them */
1290	l_ptr->long_msg_seq_no++;
1291	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1292	l_ptr->stats.sent_fragments += fragm_no;
1293	l_ptr->stats.sent_fragmented++;
1294	tipc_link_push_queue(l_ptr);
1295	tipc_node_unlock(node);
1296	return dsz;
1297}
1298
1299/*
1300 * tipc_link_push_packet: Push one unsent packet to the media
1301 */
1302u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1303{
1304	struct sk_buff *buf = l_ptr->first_out;
1305	u32 r_q_size = l_ptr->retransm_queue_size;
1306	u32 r_q_head = l_ptr->retransm_queue_head;
1307
1308	/* Step to position where retransmission failed, if any,    */
1309	/* consider that buffers may have been released in meantime */
1310	if (r_q_size && buf) {
1311		u32 last = lesser(mod(r_q_head + r_q_size),
1312				  link_last_sent(l_ptr));
1313		u32 first = buf_seqno(buf);
1314
1315		while (buf && less(first, r_q_head)) {
1316			first = mod(first + 1);
1317			buf = buf->next;
1318		}
1319		l_ptr->retransm_queue_head = r_q_head = first;
1320		l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1321	}
1322
1323	/* Continue retransmission now, if there is anything: */
1324	if (r_q_size && buf) {
1325		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1326		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1327		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1328			l_ptr->retransm_queue_head = mod(++r_q_head);
1329			l_ptr->retransm_queue_size = --r_q_size;
1330			l_ptr->stats.retransmitted++;
1331			return 0;
1332		} else {
1333			l_ptr->stats.bearer_congs++;
1334			return PUSH_FAILED;
1335		}
1336	}
1337
1338	/* Send deferred protocol message, if any: */
1339	buf = l_ptr->proto_msg_queue;
1340	if (buf) {
1341		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1342		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1343		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1344			l_ptr->unacked_window = 0;
1345			kfree_skb(buf);
1346			l_ptr->proto_msg_queue = NULL;
1347			return 0;
1348		} else {
1349			l_ptr->stats.bearer_congs++;
1350			return PUSH_FAILED;
1351		}
1352	}
1353
1354	/* Send one deferred data message, if send window not full: */
1355	buf = l_ptr->next_out;
1356	if (buf) {
1357		struct tipc_msg *msg = buf_msg(buf);
1358		u32 next = msg_seqno(msg);
1359		u32 first = buf_seqno(l_ptr->first_out);
1360
1361		if (mod(next - first) < l_ptr->queue_limit[0]) {
1362			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1363			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1364			if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1365				if (msg_user(msg) == MSG_BUNDLER)
1366					msg_set_type(msg, CLOSED_MSG);
1367				l_ptr->next_out = buf->next;
1368				return 0;
1369			} else {
1370				l_ptr->stats.bearer_congs++;
1371				return PUSH_FAILED;
1372			}
1373		}
1374	}
1375	return PUSH_FINISHED;
1376}
 
1377
1378/*
1379 * push_queue(): push out the unsent messages of a link where
1380 *               congestion has abated. Node is locked
1381 */
1382void tipc_link_push_queue(struct tipc_link *l_ptr)
1383{
1384	u32 res;
1385
1386	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1387		return;
1388
1389	do {
1390		res = tipc_link_push_packet(l_ptr);
1391	} while (!res);
1392
1393	if (res == PUSH_FAILED)
1394		tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 
 
 
 
 
 
 
 
1395}
1396
1397static void link_reset_all(unsigned long addr)
 
1398{
1399	struct tipc_node *n_ptr;
1400	char addr_string[16];
1401	u32 i;
 
 
 
 
 
1402
1403	read_lock_bh(&tipc_net_lock);
1404	n_ptr = tipc_node_find((u32)addr);
1405	if (!n_ptr) {
1406		read_unlock_bh(&tipc_net_lock);
1407		return;	/* node no longer exists */
1408	}
1409
1410	tipc_node_lock(n_ptr);
1411
1412	warn("Resetting all links to %s\n",
1413	     tipc_addr_string_fill(addr_string, n_ptr->addr));
 
 
 
 
1414
1415	for (i = 0; i < MAX_BEARERS; i++) {
1416		if (n_ptr->links[i]) {
1417			link_print(n_ptr->links[i], "Resetting link\n");
1418			tipc_link_reset(n_ptr->links[i]);
1419		}
 
 
 
1420	}
1421
1422	tipc_node_unlock(n_ptr);
1423	read_unlock_bh(&tipc_net_lock);
1424}
1425
1426static void link_retransmit_failure(struct tipc_link *l_ptr,
1427					struct sk_buff *buf)
 
 
 
 
 
 
 
 
 
1428{
1429	struct tipc_msg *msg = buf_msg(buf);
 
1430
1431	warn("Retransmission failure on link <%s>\n", l_ptr->name);
 
1432
1433	if (l_ptr->addr) {
1434		/* Handle failure on standard link */
1435		link_print(l_ptr, "Resetting link\n");
1436		tipc_link_reset(l_ptr);
1437
1438	} else {
1439		/* Handle failure on broadcast link */
1440		struct tipc_node *n_ptr;
1441		char addr_string[16];
1442
1443		info("Msg seq number: %u,  ", msg_seqno(msg));
1444		info("Outstanding acks: %lu\n",
1445		     (unsigned long) TIPC_SKB_CB(buf)->handle);
1446
1447		n_ptr = tipc_bclink_retransmit_to();
1448		tipc_node_lock(n_ptr);
 
 
 
 
 
 
 
1449
1450		tipc_addr_string_fill(addr_string, n_ptr->addr);
1451		info("Broadcast link info for %s\n", addr_string);
1452		info("Supportable: %d,  ", n_ptr->bclink.supportable);
1453		info("Supported: %d,  ", n_ptr->bclink.supported);
1454		info("Acked: %u\n", n_ptr->bclink.acked);
1455		info("Last in: %u,  ", n_ptr->bclink.last_in);
1456		info("Oos state: %u,  ", n_ptr->bclink.oos_state);
1457		info("Last sent: %u\n", n_ptr->bclink.last_sent);
1458
1459		tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
 
 
 
 
 
1460
1461		tipc_node_unlock(n_ptr);
 
1462
1463		l_ptr->stale_count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464	}
1465}
1466
1467void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1468			  u32 retransmits)
 
 
 
 
 
1469{
1470	struct tipc_msg *msg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1471
1472	if (!buf)
1473		return;
 
1474
1475	msg = buf_msg(buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1476
1477	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1478		if (l_ptr->retransm_queue_size == 0) {
1479			l_ptr->retransm_queue_head = msg_seqno(msg);
1480			l_ptr->retransm_queue_size = retransmits;
1481		} else {
1482			err("Unexpected retransmit on link %s (qsize=%d)\n",
1483			    l_ptr->name, l_ptr->retransm_queue_size);
1484		}
1485		return;
1486	} else {
1487		/* Detect repeated retransmit failures on uncongested bearer */
1488		if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1489			if (++l_ptr->stale_count > 100) {
1490				link_retransmit_failure(l_ptr, buf);
1491				return;
1492			}
1493		} else {
1494			l_ptr->last_retransmitted = msg_seqno(msg);
1495			l_ptr->stale_count = 1;
 
 
 
 
 
1496		}
 
1497	}
1498
1499	while (retransmits && (buf != l_ptr->next_out) && buf) {
1500		msg = buf_msg(buf);
1501		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1502		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1503		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1504			buf = buf->next;
1505			retransmits--;
1506			l_ptr->stats.retransmitted++;
1507		} else {
1508			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1509			l_ptr->stats.bearer_congs++;
1510			l_ptr->retransm_queue_head = buf_seqno(buf);
1511			l_ptr->retransm_queue_size = retransmits;
1512			return;
1513		}
1514	}
1515
1516	l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
 
 
 
 
 
 
 
1517}
1518
1519/**
1520 * link_insert_deferred_queue - insert deferred messages back into receive chain
1521 */
1522static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1523						  struct sk_buff *buf)
1524{
1525	u32 seq_no;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526
1527	if (l_ptr->oldest_deferred_in == NULL)
1528		return buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1529
1530	seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1531	if (seq_no == mod(l_ptr->next_in_no)) {
1532		l_ptr->newest_deferred_in->next = buf;
1533		buf = l_ptr->oldest_deferred_in;
1534		l_ptr->oldest_deferred_in = NULL;
1535		l_ptr->deferred_inqueue_sz = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1536	}
1537	return buf;
1538}
1539
1540/**
1541 * link_recv_buf_validate - validate basic format of received message
1542 *
1543 * This routine ensures a TIPC message has an acceptable header, and at least
1544 * as much data as the header indicates it should.  The routine also ensures
1545 * that the entire message header is stored in the main fragment of the message
1546 * buffer, to simplify future access to message header fields.
1547 *
1548 * Note: Having extra info present in the message header or data areas is OK.
1549 * TIPC will ignore the excess, under the assumption that it is optional info
1550 * introduced by a later release of the protocol.
1551 */
1552static int link_recv_buf_validate(struct sk_buff *buf)
1553{
1554	static u32 min_data_hdr_size[8] = {
1555		SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1556		MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1557		};
1558
1559	struct tipc_msg *msg;
1560	u32 tipc_hdr[2];
1561	u32 size;
1562	u32 hdr_size;
1563	u32 min_hdr_size;
1564
1565	if (unlikely(buf->len < MIN_H_SIZE))
1566		return 0;
1567
1568	msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1569	if (msg == NULL)
1570		return 0;
1571
1572	if (unlikely(msg_version(msg) != TIPC_VERSION))
1573		return 0;
1574
1575	size = msg_size(msg);
1576	hdr_size = msg_hdr_sz(msg);
1577	min_hdr_size = msg_isdata(msg) ?
1578		min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1579
1580	if (unlikely((hdr_size < min_hdr_size) ||
1581		     (size < hdr_size) ||
1582		     (buf->len < size) ||
1583		     (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1584		return 0;
1585
1586	return pskb_may_pull(buf, hdr_size);
1587}
1588
1589/**
1590 * tipc_recv_msg - process TIPC messages arriving from off-node
1591 * @head: pointer to message buffer chain
1592 * @tb_ptr: pointer to bearer message arrived on
1593 *
1594 * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1595 * structure (i.e. cannot be NULL), but bearer can be inactive.
1596 */
1597void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1598{
1599	read_lock_bh(&tipc_net_lock);
1600	while (head) {
1601		struct tipc_node *n_ptr;
1602		struct tipc_link *l_ptr;
1603		struct sk_buff *crs;
1604		struct sk_buff *buf = head;
1605		struct tipc_msg *msg;
1606		u32 seq_no;
1607		u32 ackd;
1608		u32 released = 0;
1609		int type;
1610
1611		head = head->next;
1612
1613		/* Ensure bearer is still enabled */
1614		if (unlikely(!b_ptr->active))
1615			goto cont;
1616
1617		/* Ensure message is well-formed */
1618		if (unlikely(!link_recv_buf_validate(buf)))
1619			goto cont;
1620
1621		/* Ensure message data is a single contiguous unit */
1622		if (unlikely(skb_linearize(buf)))
1623			goto cont;
1624
1625		/* Handle arrival of a non-unicast link message */
1626		msg = buf_msg(buf);
1627
1628		if (unlikely(msg_non_seq(msg))) {
1629			if (msg_user(msg) ==  LINK_CONFIG)
1630				tipc_disc_recv_msg(buf, b_ptr);
1631			else
1632				tipc_bclink_recv_pkt(buf);
1633			continue;
1634		}
1635
1636		/* Discard unicast link messages destined for another node */
1637		if (unlikely(!msg_short(msg) &&
1638			     (msg_destnode(msg) != tipc_own_addr)))
1639			goto cont;
1640
1641		/* Locate neighboring node that sent message */
1642		n_ptr = tipc_node_find(msg_prevnode(msg));
1643		if (unlikely(!n_ptr))
1644			goto cont;
1645		tipc_node_lock(n_ptr);
1646
1647		/* Locate unicast link endpoint that should handle message */
1648		l_ptr = n_ptr->links[b_ptr->identity];
1649		if (unlikely(!l_ptr)) {
1650			tipc_node_unlock(n_ptr);
1651			goto cont;
1652		}
1653
1654		/* Verify that communication with node is currently allowed */
1655		if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1656			msg_user(msg) == LINK_PROTOCOL &&
1657			(msg_type(msg) == RESET_MSG ||
1658					msg_type(msg) == ACTIVATE_MSG) &&
1659			!msg_redundant_link(msg))
1660			n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1661
1662		if (n_ptr->block_setup) {
1663			tipc_node_unlock(n_ptr);
1664			goto cont;
1665		}
1666
1667		/* Validate message sequence number info */
1668		seq_no = msg_seqno(msg);
1669		ackd = msg_ack(msg);
1670
1671		/* Release acked messages */
1672		if (n_ptr->bclink.supported)
1673			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1674
1675		crs = l_ptr->first_out;
1676		while ((crs != l_ptr->next_out) &&
1677		       less_eq(buf_seqno(crs), ackd)) {
1678			struct sk_buff *next = crs->next;
1679
1680			kfree_skb(crs);
1681			crs = next;
1682			released++;
1683		}
1684		if (released) {
1685			l_ptr->first_out = crs;
1686			l_ptr->out_queue_size -= released;
1687		}
1688
1689		/* Try sending any messages link endpoint has pending */
1690		if (unlikely(l_ptr->next_out))
1691			tipc_link_push_queue(l_ptr);
1692		if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1693			tipc_link_wakeup_ports(l_ptr, 0);
1694		if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1695			l_ptr->stats.sent_acks++;
1696			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1697		}
1698
1699		/* Now (finally!) process the incoming message */
1700protocol_check:
1701		if (likely(link_working_working(l_ptr))) {
1702			if (likely(seq_no == mod(l_ptr->next_in_no))) {
1703				l_ptr->next_in_no++;
1704				if (unlikely(l_ptr->oldest_deferred_in))
1705					head = link_insert_deferred_queue(l_ptr,
1706									  head);
1707deliver:
1708				if (likely(msg_isdata(msg))) {
1709					tipc_node_unlock(n_ptr);
1710					tipc_port_recv_msg(buf);
1711					continue;
1712				}
1713				switch (msg_user(msg)) {
1714					int ret;
1715				case MSG_BUNDLER:
1716					l_ptr->stats.recv_bundles++;
1717					l_ptr->stats.recv_bundled +=
1718						msg_msgcnt(msg);
1719					tipc_node_unlock(n_ptr);
1720					tipc_link_recv_bundle(buf);
1721					continue;
1722				case NAME_DISTRIBUTOR:
1723					tipc_node_unlock(n_ptr);
1724					tipc_named_recv(buf);
1725					continue;
1726				case CONN_MANAGER:
1727					tipc_node_unlock(n_ptr);
1728					tipc_port_recv_proto_msg(buf);
1729					continue;
1730				case MSG_FRAGMENTER:
1731					l_ptr->stats.recv_fragments++;
1732					ret = tipc_link_recv_fragment(
1733						&l_ptr->defragm_buf,
1734						&buf, &msg);
1735					if (ret == 1) {
1736						l_ptr->stats.recv_fragmented++;
1737						goto deliver;
1738					}
1739					if (ret == -1)
1740						l_ptr->next_in_no--;
1741					break;
1742				case CHANGEOVER_PROTOCOL:
1743					type = msg_type(msg);
1744					if (link_recv_changeover_msg(&l_ptr,
1745								     &buf)) {
1746						msg = buf_msg(buf);
1747						seq_no = msg_seqno(msg);
1748						if (type == ORIGINAL_MSG)
1749							goto deliver;
1750						goto protocol_check;
1751					}
1752					break;
1753				default:
1754					kfree_skb(buf);
1755					buf = NULL;
1756					break;
1757				}
1758				tipc_node_unlock(n_ptr);
1759				tipc_net_route_msg(buf);
1760				continue;
 
 
 
 
 
 
 
 
 
 
 
 
1761			}
1762			link_handle_out_of_seq_msg(l_ptr, buf);
1763			head = link_insert_deferred_queue(l_ptr, head);
1764			tipc_node_unlock(n_ptr);
1765			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766		}
 
1767
1768		if (msg_user(msg) == LINK_PROTOCOL) {
1769			link_recv_proto_msg(l_ptr, buf);
1770			head = link_insert_deferred_queue(l_ptr, head);
1771			tipc_node_unlock(n_ptr);
1772			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1773		}
1774		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
 
 
 
1775
1776		if (link_working_working(l_ptr)) {
1777			/* Re-insert in front of queue */
1778			buf->next = head;
1779			head = buf;
1780			tipc_node_unlock(n_ptr);
1781			continue;
1782		}
1783		tipc_node_unlock(n_ptr);
1784cont:
1785		kfree_skb(buf);
1786	}
1787	read_unlock_bh(&tipc_net_lock);
1788}
1789
1790/*
1791 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1792 *
1793 * Returns increase in queue length (i.e. 0 or 1)
 
1794 */
1795u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1796			struct sk_buff *buf)
1797{
1798	struct sk_buff *queue_buf;
1799	struct sk_buff **prev;
1800	u32 seq_no = buf_seqno(buf);
1801
1802	buf->next = NULL;
1803
1804	/* Empty queue ? */
1805	if (*head == NULL) {
1806		*head = *tail = buf;
1807		return 1;
1808	}
1809
1810	/* Last ? */
1811	if (less(buf_seqno(*tail), seq_no)) {
1812		(*tail)->next = buf;
1813		*tail = buf;
1814		return 1;
1815	}
1816
1817	/* Locate insertion point in queue, then insert; discard if duplicate */
1818	prev = head;
1819	queue_buf = *head;
1820	for (;;) {
1821		u32 curr_seqno = buf_seqno(queue_buf);
1822
1823		if (seq_no == curr_seqno) {
1824			kfree_skb(buf);
1825			return 0;
1826		}
1827
1828		if (less(seq_no, curr_seqno))
1829			break;
1830
1831		prev = &queue_buf->next;
1832		queue_buf = queue_buf->next;
1833	}
1834
1835	buf->next = queue_buf;
1836	*prev = buf;
1837	return 1;
1838}
1839
1840/*
1841 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1842 */
1843static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1844				       struct sk_buff *buf)
1845{
1846	u32 seq_no = buf_seqno(buf);
 
1847
1848	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1849		link_recv_proto_msg(l_ptr, buf);
1850		return;
1851	}
1852
1853	/* Record OOS packet arrival (force mismatch on next timeout) */
1854	l_ptr->checkpoint--;
1855
1856	/*
1857	 * Discard packet if a duplicate; otherwise add it to deferred queue
1858	 * and notify peer of gap as per protocol specification
1859	 */
1860	if (less(seq_no, mod(l_ptr->next_in_no))) {
1861		l_ptr->stats.duplicates++;
1862		kfree_skb(buf);
1863		return;
1864	}
1865
1866	if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1867				&l_ptr->newest_deferred_in, buf)) {
1868		l_ptr->deferred_inqueue_sz++;
1869		l_ptr->stats.deferred_recv++;
1870		if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1871			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1872	} else
1873		l_ptr->stats.duplicates++;
1874}
1875
1876/*
1877 * Send protocol message to the other endpoint.
 
1878 */
1879void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1880				int probe_msg, u32 gap, u32 tolerance,
1881				u32 priority, u32 ack_mtu)
1882{
1883	struct sk_buff *buf = NULL;
1884	struct tipc_msg *msg = l_ptr->pmsg;
1885	u32 msg_size = sizeof(l_ptr->proto_msg);
1886	int r_flag;
1887
1888	/* Discard any previous message that was deferred due to congestion */
1889	if (l_ptr->proto_msg_queue) {
1890		kfree_skb(l_ptr->proto_msg_queue);
1891		l_ptr->proto_msg_queue = NULL;
 
 
1892	}
1893
1894	if (link_blocked(l_ptr))
1895		return;
1896
1897	/* Abort non-RESET send if communication with node is prohibited */
1898	if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1899		return;
 
 
1900
1901	/* Create protocol message with "out-of-sequence" sequence number */
1902	msg_set_type(msg, msg_typ);
1903	msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1904	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1905	msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
 
 
 
 
 
 
 
 
 
 
 
 
1906
1907	if (msg_typ == STATE_MSG) {
1908		u32 next_sent = mod(l_ptr->next_out_no);
1909
1910		if (!tipc_link_is_up(l_ptr))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1911			return;
1912		if (l_ptr->next_out)
1913			next_sent = buf_seqno(l_ptr->next_out);
1914		msg_set_next_sent(msg, next_sent);
1915		if (l_ptr->oldest_deferred_in) {
1916			u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1917			gap = mod(rec - mod(l_ptr->next_in_no));
1918		}
1919		msg_set_seq_gap(msg, gap);
1920		if (gap)
1921			l_ptr->stats.sent_nacks++;
1922		msg_set_link_tolerance(msg, tolerance);
1923		msg_set_linkprio(msg, priority);
1924		msg_set_max_pkt(msg, ack_mtu);
1925		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1926		msg_set_probe(msg, probe_msg != 0);
1927		if (probe_msg) {
1928			u32 mtu = l_ptr->max_pkt;
1929
1930			if ((mtu < l_ptr->max_pkt_target) &&
1931			    link_working_working(l_ptr) &&
1932			    l_ptr->fsm_msg_cnt) {
1933				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1934				if (l_ptr->max_pkt_probes == 10) {
1935					l_ptr->max_pkt_target = (msg_size - 4);
1936					l_ptr->max_pkt_probes = 0;
1937					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1938				}
1939				l_ptr->max_pkt_probes++;
 
 
 
1940			}
 
 
 
 
 
 
 
 
1941
1942			l_ptr->stats.sent_probes++;
 
 
 
 
1943		}
1944		l_ptr->stats.sent_states++;
1945	} else {		/* RESET_MSG or ACTIVATE_MSG */
1946		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1947		msg_set_seq_gap(msg, 0);
1948		msg_set_next_sent(msg, 1);
1949		msg_set_probe(msg, 0);
1950		msg_set_link_tolerance(msg, l_ptr->tolerance);
1951		msg_set_linkprio(msg, l_ptr->priority);
1952		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1953	}
1954
1955	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1956	msg_set_redundant_link(msg, r_flag);
1957	msg_set_linkprio(msg, l_ptr->priority);
1958	msg_set_size(msg, msg_size);
1959
1960	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
 
 
 
 
1961
1962	buf = tipc_buf_acquire(msg_size);
1963	if (!buf)
1964		return;
1965
1966	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
 
 
 
1967
1968	/* Defer message if bearer is already congested */
1969	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1970		l_ptr->proto_msg_queue = buf;
1971		return;
 
 
 
1972	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1973
1974	/* Defer message if attempting to send results in bearer congestion */
1975	if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1976		tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1977		l_ptr->proto_msg_queue = buf;
1978		l_ptr->stats.bearer_congs++;
1979		return;
1980	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1981
1982	/* Discard message if it was sent successfully */
1983	l_ptr->unacked_window = 0;
1984	kfree_skb(buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1985}
1986
1987/*
1988 * Receive protocol message :
1989 * Note that network plane id propagates through the network, and may
1990 * change at any time. The node with lowest address rules
 
1991 */
1992static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
 
1993{
1994	u32 rec_gap = 0;
1995	u32 max_pkt_info;
1996	u32 max_pkt_ack;
1997	u32 msg_tol;
1998	struct tipc_msg *msg = buf_msg(buf);
 
 
 
 
 
 
 
 
 
 
 
 
1999
2000	if (link_blocked(l_ptr))
2001		goto exit;
2002
2003	/* record unnumbered packet arrival (force mismatch on next timeout) */
2004	l_ptr->checkpoint--;
 
 
 
2005
2006	if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2007		if (tipc_own_addr > msg_prevnode(msg))
2008			l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2009
2010	l_ptr->owner->permit_changeover = msg_redundant_link(msg);
 
2011
2012	switch (msg_type(msg)) {
 
 
 
 
2013
 
2014	case RESET_MSG:
2015		if (!link_working_unknown(l_ptr) &&
2016		    (l_ptr->peer_session != INVALID_SESSION)) {
2017			if (less_eq(msg_session(msg), l_ptr->peer_session))
2018				break; /* duplicate or old reset: ignore */
2019		}
 
 
 
 
 
 
2020
2021		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
2022				link_working_unknown(l_ptr))) {
2023			/*
2024			 * peer has lost contact -- don't allow peer's links
2025			 * to reactivate before we recognize loss & clean up
2026			 */
2027			l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2028		}
 
 
 
2029
2030		link_state_event(l_ptr, RESET_MSG);
2031
2032		/* fall thru' */
2033	case ACTIVATE_MSG:
2034		/* Update link settings according other endpoint's values */
2035		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2036
2037		msg_tol = msg_link_tolerance(msg);
2038		if (msg_tol > l_ptr->tolerance)
2039			link_set_supervision_props(l_ptr, msg_tol);
2040
2041		if (msg_linkprio(msg) > l_ptr->priority)
2042			l_ptr->priority = msg_linkprio(msg);
2043
2044		max_pkt_info = msg_max_pkt(msg);
2045		if (max_pkt_info) {
2046			if (max_pkt_info < l_ptr->max_pkt_target)
2047				l_ptr->max_pkt_target = max_pkt_info;
2048			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2049				l_ptr->max_pkt = l_ptr->max_pkt_target;
2050		} else {
2051			l_ptr->max_pkt = l_ptr->max_pkt_target;
2052		}
2053		l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
2054
2055		/* Synchronize broadcast link info, if not done previously */
2056		if (!tipc_node_is_up(l_ptr->owner)) {
2057			l_ptr->owner->bclink.last_sent =
2058				l_ptr->owner->bclink.last_in =
2059				msg_last_bcast(msg);
2060			l_ptr->owner->bclink.oos_state = 0;
 
 
2061		}
2062
2063		l_ptr->peer_session = msg_session(msg);
2064		l_ptr->peer_bearer_id = msg_bearer_id(msg);
 
 
 
 
 
 
 
 
 
 
 
 
2065
2066		if (msg_type(msg) == ACTIVATE_MSG)
2067			link_state_event(l_ptr, ACTIVATE_MSG);
2068		break;
2069	case STATE_MSG:
 
 
 
 
2070
2071		msg_tol = msg_link_tolerance(msg);
2072		if (msg_tol)
2073			link_set_supervision_props(l_ptr, msg_tol);
2074
2075		if (msg_linkprio(msg) &&
2076		    (msg_linkprio(msg) != l_ptr->priority)) {
2077			warn("Resetting link <%s>, priority change %u->%u\n",
2078			     l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2079			l_ptr->priority = msg_linkprio(msg);
2080			tipc_link_reset(l_ptr); /* Enforce change to take effect */
2081			break;
 
 
 
2082		}
2083		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2084		l_ptr->stats.recv_states++;
2085		if (link_reset_unknown(l_ptr))
2086			break;
2087
2088		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2089			rec_gap = mod(msg_next_sent(msg) -
2090				      mod(l_ptr->next_in_no));
2091		}
2092
2093		max_pkt_ack = msg_max_pkt(msg);
2094		if (max_pkt_ack > l_ptr->max_pkt) {
2095			l_ptr->max_pkt = max_pkt_ack;
2096			l_ptr->max_pkt_probes = 0;
2097		}
2098
2099		max_pkt_ack = 0;
2100		if (msg_probe(msg)) {
2101			l_ptr->stats.recv_probes++;
2102			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
2103				max_pkt_ack = msg_size(msg);
2104		}
2105
2106		/* Protocol message before retransmits, reduce loss risk */
2107		if (l_ptr->owner->bclink.supported)
2108			tipc_bclink_update_link_state(l_ptr->owner,
2109						      msg_last_bcast(msg));
 
 
 
 
 
2110
2111		if (rec_gap || (msg_probe(msg))) {
2112			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2113						 0, rec_gap, 0, 0, max_pkt_ack);
2114		}
2115		if (msg_seq_gap(msg)) {
2116			l_ptr->stats.recv_nacks++;
2117			tipc_link_retransmit(l_ptr, l_ptr->first_out,
2118					     msg_seq_gap(msg));
2119		}
2120		break;
2121	}
2122exit:
2123	kfree_skb(buf);
 
2124}
2125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126
2127/*
2128 * tipc_link_tunnel(): Send one message via a link belonging to
2129 * another bearer. Owner node is locked.
 
2130 */
2131static void tipc_link_tunnel(struct tipc_link *l_ptr,
2132			     struct tipc_msg *tunnel_hdr,
2133			     struct tipc_msg  *msg,
2134			     u32 selector)
2135{
2136	struct tipc_link *tunnel;
2137	struct sk_buff *buf;
2138	u32 length = msg_size(msg);
2139
2140	tunnel = l_ptr->owner->active_links[selector & 1];
2141	if (!tipc_link_is_up(tunnel)) {
2142		warn("Link changeover error, "
2143		     "tunnel link no longer available\n");
2144		return;
2145	}
2146	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2147	buf = tipc_buf_acquire(length + INT_H_SIZE);
2148	if (!buf) {
2149		warn("Link changeover error, "
2150		     "unable to send tunnel msg\n");
2151		return;
2152	}
2153	skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2154	skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2155	tipc_link_send_buf(tunnel, buf);
2156}
2157
2158
2159
2160/*
2161 * changeover(): Send whole message queue via the remaining link
2162 *               Owner node is locked.
2163 */
2164void tipc_link_changeover(struct tipc_link *l_ptr)
2165{
2166	u32 msgcount = l_ptr->out_queue_size;
2167	struct sk_buff *crs = l_ptr->first_out;
2168	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
2169	struct tipc_msg tunnel_hdr;
2170	int split_bundles;
2171
2172	if (!tunnel)
2173		return;
2174
2175	if (!l_ptr->owner->permit_changeover) {
2176		warn("Link changeover error, "
2177		     "peer did not permit changeover\n");
2178		return;
2179	}
2180
2181	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2182		 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2183	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2184	msg_set_msgcnt(&tunnel_hdr, msgcount);
2185
2186	if (!l_ptr->first_out) {
2187		struct sk_buff *buf;
2188
2189		buf = tipc_buf_acquire(INT_H_SIZE);
2190		if (buf) {
2191			skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2192			msg_set_size(&tunnel_hdr, INT_H_SIZE);
2193			tipc_link_send_buf(tunnel, buf);
2194		} else {
2195			warn("Link changeover error, "
2196			     "unable to send changeover msg\n");
2197		}
2198		return;
2199	}
2200
2201	split_bundles = (l_ptr->owner->active_links[0] !=
2202			 l_ptr->owner->active_links[1]);
2203
2204	while (crs) {
2205		struct tipc_msg *msg = buf_msg(crs);
2206
2207		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2208			struct tipc_msg *m = msg_get_wrapped(msg);
2209			unchar *pos = (unchar *)m;
2210
2211			msgcount = msg_msgcnt(msg);
2212			while (msgcount--) {
2213				msg_set_seqno(m, msg_seqno(msg));
2214				tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2215						 msg_link_selector(m));
2216				pos += align(msg_size(m));
2217				m = (struct tipc_msg *)pos;
2218			}
2219		} else {
2220			tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2221					 msg_link_selector(msg));
2222		}
2223		crs = crs->next;
2224	}
2225}
2226
2227void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
 
 
 
2228{
2229	struct sk_buff *iter;
2230	struct tipc_msg tunnel_hdr;
2231
2232	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2233		 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2234	msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2235	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2236	iter = l_ptr->first_out;
2237	while (iter) {
2238		struct sk_buff *outbuf;
2239		struct tipc_msg *msg = buf_msg(iter);
2240		u32 length = msg_size(msg);
2241
2242		if (msg_user(msg) == MSG_BUNDLER)
2243			msg_set_type(msg, CLOSED_MSG);
2244		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));	/* Update */
2245		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2246		msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2247		outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2248		if (outbuf == NULL) {
2249			warn("Link changeover error, "
2250			     "unable to send duplicate msg\n");
2251			return;
2252		}
2253		skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2254		skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2255					       length);
2256		tipc_link_send_buf(tunnel, outbuf);
2257		if (!tipc_link_is_up(l_ptr))
2258			return;
2259		iter = iter->next;
2260	}
2261}
2262
2263/**
2264 * buf_extract - extracts embedded TIPC message from another message
2265 * @skb: encapsulating message buffer
2266 * @from_pos: offset to extract from
2267 *
2268 * Returns a new message buffer containing an embedded message.  The
2269 * encapsulating message itself is left unchanged.
2270 */
2271static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2272{
2273	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2274	u32 size = msg_size(msg);
2275	struct sk_buff *eb;
2276
2277	eb = tipc_buf_acquire(size);
2278	if (eb)
2279		skb_copy_to_linear_data(eb, msg, size);
2280	return eb;
2281}
2282
2283/*
2284 *  link_recv_changeover_msg(): Receive tunneled packet sent
2285 *  via other link. Node is locked. Return extracted buffer.
2286 */
2287static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2288				    struct sk_buff **buf)
2289{
2290	struct sk_buff *tunnel_buf = *buf;
2291	struct tipc_link *dest_link;
2292	struct tipc_msg *msg;
2293	struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2294	u32 msg_typ = msg_type(tunnel_msg);
2295	u32 msg_count = msg_msgcnt(tunnel_msg);
2296
2297	dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2298	if (!dest_link)
2299		goto exit;
2300	if (dest_link == *l_ptr) {
2301		err("Unexpected changeover message on link <%s>\n",
2302		    (*l_ptr)->name);
2303		goto exit;
2304	}
2305	*l_ptr = dest_link;
2306	msg = msg_get_wrapped(tunnel_msg);
2307
2308	if (msg_typ == DUPLICATE_MSG) {
2309		if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2310			goto exit;
2311		*buf = buf_extract(tunnel_buf, INT_H_SIZE);
2312		if (*buf == NULL) {
2313			warn("Link changeover error, duplicate msg dropped\n");
2314			goto exit;
2315		}
2316		kfree_skb(tunnel_buf);
2317		return 1;
2318	}
2319
2320	/* First original message ?: */
2321	if (tipc_link_is_up(dest_link)) {
2322		info("Resetting link <%s>, changeover initiated by peer\n",
2323		     dest_link->name);
2324		tipc_link_reset(dest_link);
2325		dest_link->exp_msg_count = msg_count;
2326		if (!msg_count)
2327			goto exit;
2328	} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2329		dest_link->exp_msg_count = msg_count;
2330		if (!msg_count)
2331			goto exit;
2332	}
2333
2334	/* Receive original message */
2335	if (dest_link->exp_msg_count == 0) {
2336		warn("Link switchover error, "
2337		     "got too many tunnelled messages\n");
2338		goto exit;
2339	}
2340	dest_link->exp_msg_count--;
2341	if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2342		goto exit;
2343	} else {
2344		*buf = buf_extract(tunnel_buf, INT_H_SIZE);
2345		if (*buf != NULL) {
2346			kfree_skb(tunnel_buf);
2347			return 1;
2348		} else {
2349			warn("Link changeover error, original msg dropped\n");
2350		}
2351	}
2352exit:
2353	*buf = NULL;
2354	kfree_skb(tunnel_buf);
 
2355	return 0;
2356}
2357
2358/*
2359 *  Bundler functionality:
2360 */
2361void tipc_link_recv_bundle(struct sk_buff *buf)
2362{
2363	u32 msgcount = msg_msgcnt(buf_msg(buf));
2364	u32 pos = INT_H_SIZE;
2365	struct sk_buff *obuf;
 
 
 
2366
2367	while (msgcount--) {
2368		obuf = buf_extract(buf, pos);
2369		if (obuf == NULL) {
2370			warn("Link unable to unbundle message(s)\n");
2371			break;
2372		}
2373		pos += align(msg_size(buf_msg(obuf)));
2374		tipc_net_route_msg(obuf);
2375	}
2376	kfree_skb(buf);
2377}
2378
2379/*
2380 *  Fragmentation/defragmentation:
2381 */
2382
2383/*
2384 * link_send_long_buf: Entry for buffers needing fragmentation.
2385 * The buffer is complete, inclusive total message length.
2386 * Returns user data length.
2387 */
2388static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2389{
2390	struct sk_buff *buf_chain = NULL;
2391	struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2392	struct tipc_msg *inmsg = buf_msg(buf);
2393	struct tipc_msg fragm_hdr;
2394	u32 insize = msg_size(inmsg);
2395	u32 dsz = msg_data_sz(inmsg);
2396	unchar *crs = buf->data;
2397	u32 rest = insize;
2398	u32 pack_sz = l_ptr->max_pkt;
2399	u32 fragm_sz = pack_sz - INT_H_SIZE;
2400	u32 fragm_no = 0;
2401	u32 destaddr;
2402
2403	if (msg_short(inmsg))
2404		destaddr = l_ptr->addr;
2405	else
2406		destaddr = msg_destnode(inmsg);
2407
2408	/* Prepare reusable fragment header: */
2409	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2410		 INT_H_SIZE, destaddr);
2411
2412	/* Chop up message: */
2413	while (rest > 0) {
2414		struct sk_buff *fragm;
2415
2416		if (rest <= fragm_sz) {
2417			fragm_sz = rest;
2418			msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2419		}
2420		fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2421		if (fragm == NULL) {
2422			kfree_skb(buf);
2423			while (buf_chain) {
2424				buf = buf_chain;
2425				buf_chain = buf_chain->next;
2426				kfree_skb(buf);
2427			}
2428			return -ENOMEM;
2429		}
2430		msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2431		fragm_no++;
2432		msg_set_fragm_no(&fragm_hdr, fragm_no);
2433		skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2434		skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2435					       fragm_sz);
2436		buf_chain_tail->next = fragm;
2437		buf_chain_tail = fragm;
2438
2439		rest -= fragm_sz;
2440		crs += fragm_sz;
2441		msg_set_type(&fragm_hdr, FRAGMENT);
2442	}
2443	kfree_skb(buf);
2444
2445	/* Append chain of fragments to send queue & send them */
2446	l_ptr->long_msg_seq_no++;
2447	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2448	l_ptr->stats.sent_fragments += fragm_no;
2449	l_ptr->stats.sent_fragmented++;
2450	tipc_link_push_queue(l_ptr);
2451
2452	return dsz;
2453}
2454
2455/*
2456 * A pending message being re-assembled must store certain values
2457 * to handle subsequent fragments correctly. The following functions
2458 * help storing these values in unused, available fields in the
2459 * pending message. This makes dynamic memory allocation unnecessary.
2460 */
2461static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
 
2462{
2463	msg_set_seqno(buf_msg(buf), seqno);
2464}
 
 
 
 
 
 
 
 
2465
2466static u32 get_fragm_size(struct sk_buff *buf)
2467{
2468	return msg_ack(buf_msg(buf));
2469}
2470
2471static void set_fragm_size(struct sk_buff *buf, u32 sz)
2472{
2473	msg_set_ack(buf_msg(buf), sz);
2474}
2475
2476static u32 get_expected_frags(struct sk_buff *buf)
2477{
2478	return msg_bcast_ack(buf_msg(buf));
2479}
 
 
2480
2481static void set_expected_frags(struct sk_buff *buf, u32 exp)
2482{
2483	msg_set_bcast_ack(buf_msg(buf), exp);
2484}
2485
2486static u32 get_timer_cnt(struct sk_buff *buf)
2487{
2488	return msg_reroute_cnt(buf_msg(buf));
2489}
2490
2491static void incr_timer_cnt(struct sk_buff *buf)
2492{
2493	msg_incr_reroute_cnt(buf_msg(buf));
2494}
2495
2496/*
2497 * tipc_link_recv_fragment(): Called with node lock on. Returns
2498 * the reassembled buffer if message is complete.
2499 */
2500int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2501			    struct tipc_msg **m)
2502{
2503	struct sk_buff *prev = NULL;
2504	struct sk_buff *fbuf = *fb;
2505	struct tipc_msg *fragm = buf_msg(fbuf);
2506	struct sk_buff *pbuf = *pending;
2507	u32 long_msg_seq_no = msg_long_msgno(fragm);
2508
2509	*fb = NULL;
2510
2511	/* Is there an incomplete message waiting for this fragment? */
2512	while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
2513			(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2514		prev = pbuf;
2515		pbuf = pbuf->next;
2516	}
2517
2518	if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2519		struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2520		u32 msg_sz = msg_size(imsg);
2521		u32 fragm_sz = msg_data_sz(fragm);
2522		u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2523		u32 max =  TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2524		if (msg_type(imsg) == TIPC_MCAST_MSG)
2525			max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2526		if (msg_size(imsg) > max) {
2527			kfree_skb(fbuf);
2528			return 0;
2529		}
2530		pbuf = tipc_buf_acquire(msg_size(imsg));
2531		if (pbuf != NULL) {
2532			pbuf->next = *pending;
2533			*pending = pbuf;
2534			skb_copy_to_linear_data(pbuf, imsg,
2535						msg_data_sz(fragm));
2536			/*  Prepare buffer for subsequent fragments. */
2537			set_long_msg_seqno(pbuf, long_msg_seq_no);
2538			set_fragm_size(pbuf, fragm_sz);
2539			set_expected_frags(pbuf, exp_fragm_cnt - 1);
2540		} else {
2541			dbg("Link unable to reassemble fragmented message\n");
2542			kfree_skb(fbuf);
2543			return -1;
2544		}
2545		kfree_skb(fbuf);
2546		return 0;
2547	} else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2548		u32 dsz = msg_data_sz(fragm);
2549		u32 fsz = get_fragm_size(pbuf);
2550		u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2551		u32 exp_frags = get_expected_frags(pbuf) - 1;
2552		skb_copy_to_linear_data_offset(pbuf, crs,
2553					       msg_data(fragm), dsz);
2554		kfree_skb(fbuf);
2555
2556		/* Is message complete? */
2557		if (exp_frags == 0) {
2558			if (prev)
2559				prev->next = pbuf->next;
2560			else
2561				*pending = pbuf->next;
2562			msg_reset_reroute_cnt(buf_msg(pbuf));
2563			*fb = pbuf;
2564			*m = buf_msg(pbuf);
2565			return 1;
2566		}
2567		set_expected_frags(pbuf, exp_frags);
2568		return 0;
2569	}
2570	kfree_skb(fbuf);
2571	return 0;
2572}
2573
2574/**
2575 * link_check_defragm_bufs - flush stale incoming message fragments
2576 * @l_ptr: pointer to link
2577 */
2578static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2579{
2580	struct sk_buff *prev = NULL;
2581	struct sk_buff *next = NULL;
2582	struct sk_buff *buf = l_ptr->defragm_buf;
2583
2584	if (!buf)
2585		return;
2586	if (!link_working_working(l_ptr))
2587		return;
2588	while (buf) {
2589		u32 cnt = get_timer_cnt(buf);
2590
2591		next = buf->next;
2592		if (cnt < 4) {
2593			incr_timer_cnt(buf);
2594			prev = buf;
2595		} else {
2596			if (prev)
2597				prev->next = buf->next;
2598			else
2599				l_ptr->defragm_buf = buf->next;
2600			kfree_skb(buf);
2601		}
2602		buf = next;
2603	}
2604}
2605
2606static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2607{
2608	if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2609		return;
 
2610
2611	l_ptr->tolerance = tolerance;
2612	l_ptr->continuity_interval =
2613		((tolerance / 4) > 500) ? 500 : tolerance / 4;
2614	l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2615}
2616
2617void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2618{
2619	/* Data messages from this node, inclusive FIRST_FRAGM */
2620	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2621	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2622	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2623	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2624	/* Transiting data messages,inclusive FIRST_FRAGM */
2625	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2626	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2627	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2628	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2629	l_ptr->queue_limit[CONN_MANAGER] = 1200;
2630	l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2631	l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2632	/* FRAGMENT and LAST_FRAGMENT packets */
2633	l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2634}
2635
2636/**
2637 * link_find_link - locate link by name
2638 * @name - ptr to link name string
2639 * @node - ptr to area to be filled with ptr to associated node
2640 *
2641 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2642 * this also prevents link deletion.
2643 *
2644 * Returns pointer to link (or 0 if invalid link name).
2645 */
2646static struct tipc_link *link_find_link(const char *name,
2647					struct tipc_node **node)
2648{
2649	struct tipc_link_name link_name_parts;
2650	struct tipc_bearer *b_ptr;
2651	struct tipc_link *l_ptr;
2652
2653	if (!link_name_validate(name, &link_name_parts))
2654		return NULL;
 
 
2655
2656	b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2657	if (!b_ptr)
2658		return NULL;
2659
2660	*node = tipc_node_find(link_name_parts.addr_peer);
2661	if (!*node)
2662		return NULL;
2663
2664	l_ptr = (*node)->links[b_ptr->identity];
2665	if (!l_ptr || strcmp(l_ptr->name, name))
2666		return NULL;
2667
2668	return l_ptr;
2669}
2670
2671/**
2672 * link_value_is_valid -- validate proposed link tolerance/priority/window
2673 *
2674 * @cmd - value type (TIPC_CMD_SET_LINK_*)
2675 * @new_value - the new value
2676 *
2677 * Returns 1 if value is within range, 0 if not.
2678 */
2679static int link_value_is_valid(u16 cmd, u32 new_value)
2680{
2681	switch (cmd) {
2682	case TIPC_CMD_SET_LINK_TOL:
2683		return (new_value >= TIPC_MIN_LINK_TOL) &&
2684			(new_value <= TIPC_MAX_LINK_TOL);
2685	case TIPC_CMD_SET_LINK_PRI:
2686		return (new_value <= TIPC_MAX_LINK_PRI);
2687	case TIPC_CMD_SET_LINK_WINDOW:
2688		return (new_value >= TIPC_MIN_LINK_WIN) &&
2689			(new_value <= TIPC_MAX_LINK_WIN);
2690	}
2691	return 0;
2692}
2693
2694/**
2695 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2696 * @name - ptr to link, bearer, or media name
2697 * @new_value - new value of link, bearer, or media setting
2698 * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2699 *
2700 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2701 *
2702 * Returns 0 if value updated and negative value on error.
2703 */
2704static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2705{
2706	struct tipc_node *node;
2707	struct tipc_link *l_ptr;
2708	struct tipc_bearer *b_ptr;
2709	struct tipc_media *m_ptr;
2710
2711	l_ptr = link_find_link(name, &node);
2712	if (l_ptr) {
2713		/*
2714		 * acquire node lock for tipc_link_send_proto_msg().
2715		 * see "TIPC locking policy" in net.c.
2716		 */
2717		tipc_node_lock(node);
2718		switch (cmd) {
2719		case TIPC_CMD_SET_LINK_TOL:
2720			link_set_supervision_props(l_ptr, new_value);
2721			tipc_link_send_proto_msg(l_ptr,
2722				STATE_MSG, 0, 0, new_value, 0, 0);
2723			break;
2724		case TIPC_CMD_SET_LINK_PRI:
2725			l_ptr->priority = new_value;
2726			tipc_link_send_proto_msg(l_ptr,
2727				STATE_MSG, 0, 0, 0, new_value, 0);
2728			break;
2729		case TIPC_CMD_SET_LINK_WINDOW:
2730			tipc_link_set_queue_limits(l_ptr, new_value);
2731			break;
2732		}
2733		tipc_node_unlock(node);
2734		return 0;
2735	}
2736
2737	b_ptr = tipc_bearer_find(name);
2738	if (b_ptr) {
2739		switch (cmd) {
2740		case TIPC_CMD_SET_LINK_TOL:
2741			b_ptr->tolerance = new_value;
2742			return 0;
2743		case TIPC_CMD_SET_LINK_PRI:
2744			b_ptr->priority = new_value;
2745			return 0;
2746		case TIPC_CMD_SET_LINK_WINDOW:
2747			b_ptr->window = new_value;
2748			return 0;
2749		}
2750		return -EINVAL;
2751	}
2752
2753	m_ptr = tipc_media_find(name);
2754	if (!m_ptr)
2755		return -ENODEV;
2756	switch (cmd) {
2757	case TIPC_CMD_SET_LINK_TOL:
2758		m_ptr->tolerance = new_value;
2759		return 0;
2760	case TIPC_CMD_SET_LINK_PRI:
2761		m_ptr->priority = new_value;
2762		return 0;
2763	case TIPC_CMD_SET_LINK_WINDOW:
2764		m_ptr->window = new_value;
2765		return 0;
2766	}
2767	return -EINVAL;
2768}
2769
2770struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2771				     u16 cmd)
2772{
2773	struct tipc_link_config *args;
2774	u32 new_value;
2775	int res;
2776
2777	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2778		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2779
2780	args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2781	new_value = ntohl(args->value);
2782
2783	if (!link_value_is_valid(cmd, new_value))
2784		return tipc_cfg_reply_error_string(
2785			"cannot change, value invalid");
2786
2787	if (!strcmp(args->name, tipc_bclink_name)) {
2788		if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2789		    (tipc_bclink_set_queue_limits(new_value) == 0))
2790			return tipc_cfg_reply_none();
2791		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2792						   " (cannot change setting on broadcast link)");
2793	}
2794
2795	read_lock_bh(&tipc_net_lock);
2796	res = link_cmd_set_value(args->name, new_value, cmd);
2797	read_unlock_bh(&tipc_net_lock);
2798	if (res)
2799		return tipc_cfg_reply_error_string("cannot change link setting");
2800
2801	return tipc_cfg_reply_none();
2802}
2803
2804/**
2805 * link_reset_statistics - reset link statistics
2806 * @l_ptr: pointer to link
2807 */
2808static void link_reset_statistics(struct tipc_link *l_ptr)
2809{
2810	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2811	l_ptr->stats.sent_info = l_ptr->next_out_no;
2812	l_ptr->stats.recv_info = l_ptr->next_in_no;
2813}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2814
2815struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2816{
2817	char *link_name;
2818	struct tipc_link *l_ptr;
2819	struct tipc_node *node;
2820
2821	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2822		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2823
2824	link_name = (char *)TLV_DATA(req_tlv_area);
2825	if (!strcmp(link_name, tipc_bclink_name)) {
2826		if (tipc_bclink_reset_stats())
2827			return tipc_cfg_reply_error_string("link not found");
2828		return tipc_cfg_reply_none();
2829	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2830
2831	read_lock_bh(&tipc_net_lock);
2832	l_ptr = link_find_link(link_name, &node);
2833	if (!l_ptr) {
2834		read_unlock_bh(&tipc_net_lock);
2835		return tipc_cfg_reply_error_string("link not found");
2836	}
2837
2838	tipc_node_lock(node);
2839	link_reset_statistics(l_ptr);
2840	tipc_node_unlock(node);
2841	read_unlock_bh(&tipc_net_lock);
2842	return tipc_cfg_reply_none();
2843}
2844
2845/**
2846 * percent - convert count to a percentage of total (rounding up or down)
2847 */
2848static u32 percent(u32 count, u32 total)
2849{
2850	return (count * 100 + (total / 2)) / total;
2851}
2852
2853/**
2854 * tipc_link_stats - print link statistics
2855 * @name: link name
2856 * @buf: print buffer area
2857 * @buf_size: size of print buffer area
2858 *
2859 * Returns length of print buffer data string (or 0 if error)
2860 */
2861static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2862{
2863	struct print_buf pb;
2864	struct tipc_link *l_ptr;
2865	struct tipc_node *node;
2866	char *status;
2867	u32 profile_total = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2868
2869	if (!strcmp(name, tipc_bclink_name))
2870		return tipc_bclink_stats(buf, buf_size);
2871
2872	tipc_printbuf_init(&pb, buf, buf_size);
2873
2874	read_lock_bh(&tipc_net_lock);
2875	l_ptr = link_find_link(name, &node);
2876	if (!l_ptr) {
2877		read_unlock_bh(&tipc_net_lock);
2878		return 0;
2879	}
2880	tipc_node_lock(node);
2881
2882	if (tipc_link_is_active(l_ptr))
2883		status = "ACTIVE";
2884	else if (tipc_link_is_up(l_ptr))
2885		status = "STANDBY";
2886	else
2887		status = "DEFUNCT";
2888	tipc_printf(&pb, "Link <%s>\n"
2889			 "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2890			 "  Window:%u packets\n",
2891		    l_ptr->name, status, l_ptr->max_pkt,
2892		    l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2893	tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2894		    l_ptr->next_in_no - l_ptr->stats.recv_info,
2895		    l_ptr->stats.recv_fragments,
2896		    l_ptr->stats.recv_fragmented,
2897		    l_ptr->stats.recv_bundles,
2898		    l_ptr->stats.recv_bundled);
2899	tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2900		    l_ptr->next_out_no - l_ptr->stats.sent_info,
2901		    l_ptr->stats.sent_fragments,
2902		    l_ptr->stats.sent_fragmented,
2903		    l_ptr->stats.sent_bundles,
2904		    l_ptr->stats.sent_bundled);
2905	profile_total = l_ptr->stats.msg_length_counts;
2906	if (!profile_total)
2907		profile_total = 1;
2908	tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
2909			 "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2910			 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2911		    l_ptr->stats.msg_length_counts,
2912		    l_ptr->stats.msg_lengths_total / profile_total,
2913		    percent(l_ptr->stats.msg_length_profile[0], profile_total),
2914		    percent(l_ptr->stats.msg_length_profile[1], profile_total),
2915		    percent(l_ptr->stats.msg_length_profile[2], profile_total),
2916		    percent(l_ptr->stats.msg_length_profile[3], profile_total),
2917		    percent(l_ptr->stats.msg_length_profile[4], profile_total),
2918		    percent(l_ptr->stats.msg_length_profile[5], profile_total),
2919		    percent(l_ptr->stats.msg_length_profile[6], profile_total));
2920	tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2921		    l_ptr->stats.recv_states,
2922		    l_ptr->stats.recv_probes,
2923		    l_ptr->stats.recv_nacks,
2924		    l_ptr->stats.deferred_recv,
2925		    l_ptr->stats.duplicates);
2926	tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2927		    l_ptr->stats.sent_states,
2928		    l_ptr->stats.sent_probes,
2929		    l_ptr->stats.sent_nacks,
2930		    l_ptr->stats.sent_acks,
2931		    l_ptr->stats.retransmitted);
2932	tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
2933		    l_ptr->stats.bearer_congs,
2934		    l_ptr->stats.link_congs,
2935		    l_ptr->stats.max_queue_sz,
2936		    l_ptr->stats.queue_sz_counts
2937		    ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2938		    : 0);
2939
2940	tipc_node_unlock(node);
2941	read_unlock_bh(&tipc_net_lock);
2942	return tipc_printbuf_validate(&pb);
2943}
2944
2945#define MAX_LINK_STATS_INFO 2000
2946
2947struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2948{
2949	struct sk_buff *buf;
2950	struct tlv_desc *rep_tlv;
2951	int str_len;
2952
2953	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2954		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2955
2956	buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2957	if (!buf)
2958		return NULL;
2959
2960	rep_tlv = (struct tlv_desc *)buf->data;
2961
2962	str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2963				  (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
2964	if (!str_len) {
2965		kfree_skb(buf);
2966		return tipc_cfg_reply_error_string("link not found");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2967	}
2968
2969	skb_put(buf, TLV_SPACE(str_len));
2970	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2971
2972	return buf;
2973}
2974
2975/**
2976 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2977 * @dest: network address of destination node
2978 * @selector: used to select from set of active links
2979 *
2980 * If no active link can be found, uses default maximum packet size.
2981 */
2982u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2983{
2984	struct tipc_node *n_ptr;
2985	struct tipc_link *l_ptr;
2986	u32 res = MAX_PKT_DEFAULT;
2987
2988	if (dest == tipc_own_addr)
2989		return MAX_MSG_SIZE;
2990
2991	read_lock_bh(&tipc_net_lock);
2992	n_ptr = tipc_node_find(dest);
2993	if (n_ptr) {
2994		tipc_node_lock(n_ptr);
2995		l_ptr = n_ptr->active_links[selector & 1];
2996		if (l_ptr)
2997			res = l_ptr->max_pkt;
2998		tipc_node_unlock(n_ptr);
2999	}
3000	read_unlock_bh(&tipc_net_lock);
3001	return res;
3002}
3003
3004static void link_print(struct tipc_link *l_ptr, const char *str)
3005{
3006	char print_area[256];
3007	struct print_buf pb;
3008	struct print_buf *buf = &pb;
3009
3010	tipc_printbuf_init(buf, print_area, sizeof(print_area));
3011
3012	tipc_printf(buf, str);
3013	tipc_printf(buf, "Link %x<%s>:",
3014		    l_ptr->addr, l_ptr->b_ptr->name);
3015
3016#ifdef CONFIG_TIPC_DEBUG
3017	if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3018		goto print_state;
3019
3020	tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3021	tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3022	tipc_printf(buf, "SQUE");
3023	if (l_ptr->first_out) {
3024		tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
3025		if (l_ptr->next_out)
3026			tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
3027		tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
3028		if ((mod(buf_seqno(l_ptr->last_out) -
3029			 buf_seqno(l_ptr->first_out))
3030		     != (l_ptr->out_queue_size - 1)) ||
3031		    (l_ptr->last_out->next != NULL)) {
3032			tipc_printf(buf, "\nSend queue inconsistency\n");
3033			tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3034			tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3035			tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
3036		}
3037	} else
3038		tipc_printf(buf, "[]");
3039	tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3040	if (l_ptr->oldest_deferred_in) {
3041		u32 o = buf_seqno(l_ptr->oldest_deferred_in);
3042		u32 n = buf_seqno(l_ptr->newest_deferred_in);
3043		tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3044		if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3045			tipc_printf(buf, ":RQSIZ(%u)",
3046				    l_ptr->deferred_inqueue_sz);
3047		}
3048	}
3049print_state:
3050#endif
3051
3052	if (link_working_unknown(l_ptr))
3053		tipc_printf(buf, ":WU");
3054	else if (link_reset_reset(l_ptr))
3055		tipc_printf(buf, ":RR");
3056	else if (link_reset_unknown(l_ptr))
3057		tipc_printf(buf, ":RU");
3058	else if (link_working_working(l_ptr))
3059		tipc_printf(buf, ":WW");
3060	tipc_printf(buf, "\n");
3061
3062	tipc_printbuf_validate(buf);
3063	info("%s", print_area);
3064}