Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * net/tipc/link.c: TIPC link code
   3 *
   4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
   5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
  38#include "subscr.h"
  39#include "link.h"
  40#include "bcast.h"
  41#include "socket.h"
  42#include "name_distr.h"
  43#include "discover.h"
  44#include "netlink.h"
  45#include "monitor.h"
  46
  47#include <linux/pkt_sched.h>
  48
  49struct tipc_stats {
  50	u32 sent_pkts;
  51	u32 recv_pkts;
  52	u32 sent_states;
  53	u32 recv_states;
  54	u32 sent_probes;
  55	u32 recv_probes;
  56	u32 sent_nacks;
  57	u32 recv_nacks;
  58	u32 sent_acks;
  59	u32 sent_bundled;
  60	u32 sent_bundles;
  61	u32 recv_bundled;
  62	u32 recv_bundles;
  63	u32 retransmitted;
  64	u32 sent_fragmented;
  65	u32 sent_fragments;
  66	u32 recv_fragmented;
  67	u32 recv_fragments;
  68	u32 link_congs;		/* # port sends blocked by congestion */
  69	u32 deferred_recv;
  70	u32 duplicates;
  71	u32 max_queue_sz;	/* send queue size high water mark */
  72	u32 accu_queue_sz;	/* used for send queue size profiling */
  73	u32 queue_sz_counts;	/* used for send queue size profiling */
  74	u32 msg_length_counts;	/* used for message length profiling */
  75	u32 msg_lengths_total;	/* used for message length profiling */
  76	u32 msg_length_profile[7]; /* used for msg. length profiling */
  77};
  78
  79/**
  80 * struct tipc_link - TIPC link data structure
  81 * @addr: network address of link's peer node
  82 * @name: link name character string
  83 * @media_addr: media address to use when sending messages over link
  84 * @timer: link timer
  85 * @net: pointer to namespace struct
  86 * @refcnt: reference counter for permanent references (owner node & timer)
  87 * @peer_session: link session # being used by peer end of link
  88 * @peer_bearer_id: bearer id used by link's peer endpoint
  89 * @bearer_id: local bearer id used by link
  90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
  91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
  92 * @state: current state of link FSM
  93 * @peer_caps: bitmap describing capabilities of peer node
  94 * @silent_intv_cnt: # of timer intervals without any reception from peer
  95 * @proto_msg: template for control messages generated by link
  96 * @pmsg: convenience pointer to "proto_msg" field
  97 * @priority: current link priority
  98 * @net_plane: current link network plane ('A' through 'H')
  99 * @mon_state: cookie with information needed by link monitor
 100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
 101 * @exp_msg_count: # of tunnelled messages expected during link changeover
 102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
 103 * @mtu: current maximum packet size for this link
 104 * @advertised_mtu: advertised own mtu when link is being established
 105 * @transmitq: queue for sent, non-acked messages
 106 * @backlogq: queue for messages waiting to be sent
 107 * @snt_nxt: next sequence number to use for outbound messages
 108 * @last_retransmitted: sequence number of most recently retransmitted message
 109 * @stale_count: # of identical retransmit requests made by peer
 110 * @ackers: # of peers that needs to ack each packet before it can be released
 111 * @acked: # last packet acked by a certain peer. Used for broadcast.
 112 * @rcv_nxt: next sequence number to expect for inbound messages
 113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
 114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
 115 * @inputq: buffer queue for messages to be delivered upwards
 116 * @namedq: buffer queue for name table messages to be delivered upwards
 117 * @next_out: ptr to first unsent outbound message in queue
 118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
 119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
 120 * @reasm_buf: head of partially reassembled inbound message fragments
 121 * @bc_rcvr: marks that this is a broadcast receiver link
 122 * @stats: collects statistics regarding link activity
 123 */
 124struct tipc_link {
 125	u32 addr;
 126	char name[TIPC_MAX_LINK_NAME];
 127	struct net *net;
 128
 129	/* Management and link supervision data */
 130	u32 peer_session;
 131	u32 session;
 132	u32 peer_bearer_id;
 133	u32 bearer_id;
 134	u32 tolerance;
 135	u32 abort_limit;
 136	u32 state;
 137	u16 peer_caps;
 138	bool active;
 139	u32 silent_intv_cnt;
 140	char if_name[TIPC_MAX_IF_NAME];
 141	u32 priority;
 142	char net_plane;
 143	struct tipc_mon_state mon_state;
 144	u16 rst_cnt;
 145
 146	/* Failover/synch */
 147	u16 drop_point;
 148	struct sk_buff *failover_reasm_skb;
 149
 150	/* Max packet negotiation */
 151	u16 mtu;
 152	u16 advertised_mtu;
 153
 154	/* Sending */
 155	struct sk_buff_head transmq;
 156	struct sk_buff_head backlogq;
 157	struct {
 158		u16 len;
 159		u16 limit;
 160	} backlog[5];
 161	u16 snd_nxt;
 162	u16 last_retransm;
 163	u16 window;
 164	u32 stale_count;
 165
 166	/* Reception */
 167	u16 rcv_nxt;
 168	u32 rcv_unacked;
 169	struct sk_buff_head deferdq;
 170	struct sk_buff_head *inputq;
 171	struct sk_buff_head *namedq;
 172
 173	/* Congestion handling */
 174	struct sk_buff_head wakeupq;
 175
 176	/* Fragmentation/reassembly */
 177	struct sk_buff *reasm_buf;
 178
 179	/* Broadcast */
 180	u16 ackers;
 181	u16 acked;
 182	struct tipc_link *bc_rcvlink;
 183	struct tipc_link *bc_sndlink;
 184	unsigned long prev_retr;
 185	u16 prev_from;
 186	u16 prev_to;
 187	u8 nack_state;
 188	bool bc_peer_is_up;
 189
 190	/* Statistics */
 191	struct tipc_stats stats;
 192};
 193
 194/*
 195 * Error message prefixes
 196 */
 197static const char *link_co_err = "Link tunneling error, ";
 198static const char *link_rst_msg = "Resetting link ";
 199
 200/* Send states for broadcast NACKs
 201 */
 202enum {
 203	BC_NACK_SND_CONDITIONAL,
 204	BC_NACK_SND_UNCONDITIONAL,
 205	BC_NACK_SND_SUPPRESS,
 206};
 207
 208#define TIPC_BC_RETR_LIMIT 10   /* [ms] */
 209
 210/*
 211 * Interval between NACKs when packets arrive out of order
 212 */
 213#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
 214
 215/* Wildcard value for link session numbers. When it is known that
 216 * peer endpoint is down, any session number must be accepted.
 217 */
 218#define ANY_SESSION 0x10000
 219
 220/* Link FSM states:
 221 */
 222enum {
 223	LINK_ESTABLISHED     = 0xe,
 224	LINK_ESTABLISHING    = 0xe  << 4,
 225	LINK_RESET           = 0x1  << 8,
 226	LINK_RESETTING       = 0x2  << 12,
 227	LINK_PEER_RESET      = 0xd  << 16,
 228	LINK_FAILINGOVER     = 0xf  << 20,
 229	LINK_SYNCHING        = 0xc  << 24
 230};
 231
 232/* Link FSM state checking routines
 233 */
 234static int link_is_up(struct tipc_link *l)
 235{
 236	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
 237}
 238
 239static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
 240			       struct sk_buff_head *xmitq);
 241static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
 242				      u16 rcvgap, int tolerance, int priority,
 
 243				      struct sk_buff_head *xmitq);
 244static void link_print(struct tipc_link *l, const char *str);
 245static int tipc_link_build_nack_msg(struct tipc_link *l,
 246				    struct sk_buff_head *xmitq);
 247static void tipc_link_build_bc_init_msg(struct tipc_link *l,
 248					struct sk_buff_head *xmitq);
 249static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
 250
 251/*
 252 *  Simple non-static link routines (i.e. referenced outside this file)
 253 */
 254bool tipc_link_is_up(struct tipc_link *l)
 255{
 256	return link_is_up(l);
 257}
 258
 259bool tipc_link_peer_is_down(struct tipc_link *l)
 260{
 261	return l->state == LINK_PEER_RESET;
 262}
 263
 264bool tipc_link_is_reset(struct tipc_link *l)
 265{
 266	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 267}
 268
 269bool tipc_link_is_establishing(struct tipc_link *l)
 270{
 271	return l->state == LINK_ESTABLISHING;
 272}
 273
 274bool tipc_link_is_synching(struct tipc_link *l)
 275{
 276	return l->state == LINK_SYNCHING;
 277}
 278
 279bool tipc_link_is_failingover(struct tipc_link *l)
 280{
 281	return l->state == LINK_FAILINGOVER;
 282}
 283
 284bool tipc_link_is_blocked(struct tipc_link *l)
 285{
 286	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 287}
 288
 289static bool link_is_bc_sndlink(struct tipc_link *l)
 290{
 291	return !l->bc_sndlink;
 292}
 293
 294static bool link_is_bc_rcvlink(struct tipc_link *l)
 295{
 296	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
 297}
 298
 299int tipc_link_is_active(struct tipc_link *l)
 300{
 301	return l->active;
 302}
 303
 304void tipc_link_set_active(struct tipc_link *l, bool active)
 305{
 306	l->active = active;
 307}
 308
 309u32 tipc_link_id(struct tipc_link *l)
 310{
 311	return l->peer_bearer_id << 16 | l->bearer_id;
 312}
 313
 314int tipc_link_window(struct tipc_link *l)
 315{
 316	return l->window;
 317}
 318
 319int tipc_link_prio(struct tipc_link *l)
 320{
 321	return l->priority;
 322}
 323
 324unsigned long tipc_link_tolerance(struct tipc_link *l)
 325{
 326	return l->tolerance;
 327}
 328
 329struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
 330{
 331	return l->inputq;
 332}
 333
 334char tipc_link_plane(struct tipc_link *l)
 335{
 336	return l->net_plane;
 337}
 338
 339void tipc_link_add_bc_peer(struct tipc_link *snd_l,
 340			   struct tipc_link *uc_l,
 341			   struct sk_buff_head *xmitq)
 342{
 343	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
 344
 345	snd_l->ackers++;
 346	rcv_l->acked = snd_l->snd_nxt - 1;
 347	snd_l->state = LINK_ESTABLISHED;
 348	tipc_link_build_bc_init_msg(uc_l, xmitq);
 349}
 350
 351void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
 352			      struct tipc_link *rcv_l,
 353			      struct sk_buff_head *xmitq)
 354{
 355	u16 ack = snd_l->snd_nxt - 1;
 356
 357	snd_l->ackers--;
 358	rcv_l->bc_peer_is_up = true;
 359	rcv_l->state = LINK_ESTABLISHED;
 360	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
 361	tipc_link_reset(rcv_l);
 362	rcv_l->state = LINK_RESET;
 363	if (!snd_l->ackers) {
 364		tipc_link_reset(snd_l);
 365		snd_l->state = LINK_RESET;
 366		__skb_queue_purge(xmitq);
 367	}
 368}
 369
 370int tipc_link_bc_peers(struct tipc_link *l)
 371{
 372	return l->ackers;
 373}
 374
 375u16 link_bc_rcv_gap(struct tipc_link *l)
 376{
 377	struct sk_buff *skb = skb_peek(&l->deferdq);
 378	u16 gap = 0;
 379
 380	if (more(l->snd_nxt, l->rcv_nxt))
 381		gap = l->snd_nxt - l->rcv_nxt;
 382	if (skb)
 383		gap = buf_seqno(skb) - l->rcv_nxt;
 384	return gap;
 385}
 386
 387void tipc_link_set_mtu(struct tipc_link *l, int mtu)
 388{
 389	l->mtu = mtu;
 390}
 391
 392int tipc_link_mtu(struct tipc_link *l)
 393{
 394	return l->mtu;
 395}
 396
 397u16 tipc_link_rcv_nxt(struct tipc_link *l)
 398{
 399	return l->rcv_nxt;
 400}
 401
 402u16 tipc_link_acked(struct tipc_link *l)
 403{
 404	return l->acked;
 405}
 406
 407char *tipc_link_name(struct tipc_link *l)
 408{
 409	return l->name;
 410}
 411
 412/**
 413 * tipc_link_create - create a new link
 414 * @n: pointer to associated node
 415 * @if_name: associated interface name
 416 * @bearer_id: id (index) of associated bearer
 417 * @tolerance: link tolerance to be used by link
 418 * @net_plane: network plane (A,B,c..) this link belongs to
 419 * @mtu: mtu to be advertised by link
 420 * @priority: priority to be used by link
 421 * @window: send window to be used by link
 422 * @session: session to be used by link
 423 * @ownnode: identity of own node
 424 * @peer: node id of peer node
 425 * @peer_caps: bitmap describing peer node capabilities
 426 * @bc_sndlink: the namespace global link used for broadcast sending
 427 * @bc_rcvlink: the peer specific link used for broadcast reception
 428 * @inputq: queue to put messages ready for delivery
 429 * @namedq: queue to put binding table update messages ready for delivery
 430 * @link: return value, pointer to put the created link
 431 *
 432 * Returns true if link was created, otherwise false
 433 */
 434bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
 435		      int tolerance, char net_plane, u32 mtu, int priority,
 436		      int window, u32 session, u32 ownnode, u32 peer,
 437		      u16 peer_caps,
 438		      struct tipc_link *bc_sndlink,
 439		      struct tipc_link *bc_rcvlink,
 440		      struct sk_buff_head *inputq,
 441		      struct sk_buff_head *namedq,
 442		      struct tipc_link **link)
 443{
 
 
 444	struct tipc_link *l;
 445
 446	l = kzalloc(sizeof(*l), GFP_ATOMIC);
 447	if (!l)
 448		return false;
 449	*link = l;
 450	l->session = session;
 451
 452	/* Note: peer i/f name is completed by reset/activate message */
 453	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
 454		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
 455		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
 
 
 
 
 
 
 
 
 
 456	strcpy(l->if_name, if_name);
 457	l->addr = peer;
 458	l->peer_caps = peer_caps;
 459	l->net = net;
 460	l->peer_session = ANY_SESSION;
 461	l->bearer_id = bearer_id;
 462	l->tolerance = tolerance;
 463	l->net_plane = net_plane;
 464	l->advertised_mtu = mtu;
 465	l->mtu = mtu;
 466	l->priority = priority;
 467	tipc_link_set_queue_limits(l, window);
 468	l->ackers = 1;
 469	l->bc_sndlink = bc_sndlink;
 470	l->bc_rcvlink = bc_rcvlink;
 471	l->inputq = inputq;
 472	l->namedq = namedq;
 473	l->state = LINK_RESETTING;
 474	__skb_queue_head_init(&l->transmq);
 475	__skb_queue_head_init(&l->backlogq);
 476	__skb_queue_head_init(&l->deferdq);
 477	skb_queue_head_init(&l->wakeupq);
 478	skb_queue_head_init(l->inputq);
 479	return true;
 480}
 481
 482/**
 483 * tipc_link_bc_create - create new link to be used for broadcast
 484 * @n: pointer to associated node
 485 * @mtu: mtu to be used
 486 * @window: send window to be used
 487 * @inputq: queue to put messages ready for delivery
 488 * @namedq: queue to put binding table update messages ready for delivery
 489 * @link: return value, pointer to put the created link
 490 *
 491 * Returns true if link was created, otherwise false
 492 */
 493bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
 494			 int mtu, int window, u16 peer_caps,
 495			 struct sk_buff_head *inputq,
 496			 struct sk_buff_head *namedq,
 497			 struct tipc_link *bc_sndlink,
 498			 struct tipc_link **link)
 499{
 500	struct tipc_link *l;
 501
 502	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
 503			      0, ownnode, peer, peer_caps, bc_sndlink,
 504			      NULL, inputq, namedq, link))
 505		return false;
 506
 507	l = *link;
 508	strcpy(l->name, tipc_bclink_name);
 509	tipc_link_reset(l);
 510	l->state = LINK_RESET;
 511	l->ackers = 0;
 512	l->bc_rcvlink = l;
 513
 514	/* Broadcast send link is always up */
 515	if (link_is_bc_sndlink(l))
 516		l->state = LINK_ESTABLISHED;
 517
 
 
 
 
 518	return true;
 519}
 520
 521/**
 522 * tipc_link_fsm_evt - link finite state machine
 523 * @l: pointer to link
 524 * @evt: state machine event to be processed
 525 */
 526int tipc_link_fsm_evt(struct tipc_link *l, int evt)
 527{
 528	int rc = 0;
 529
 530	switch (l->state) {
 531	case LINK_RESETTING:
 532		switch (evt) {
 533		case LINK_PEER_RESET_EVT:
 534			l->state = LINK_PEER_RESET;
 535			break;
 536		case LINK_RESET_EVT:
 537			l->state = LINK_RESET;
 538			break;
 539		case LINK_FAILURE_EVT:
 540		case LINK_FAILOVER_BEGIN_EVT:
 541		case LINK_ESTABLISH_EVT:
 542		case LINK_FAILOVER_END_EVT:
 543		case LINK_SYNCH_BEGIN_EVT:
 544		case LINK_SYNCH_END_EVT:
 545		default:
 546			goto illegal_evt;
 547		}
 548		break;
 549	case LINK_RESET:
 550		switch (evt) {
 551		case LINK_PEER_RESET_EVT:
 552			l->state = LINK_ESTABLISHING;
 553			break;
 554		case LINK_FAILOVER_BEGIN_EVT:
 555			l->state = LINK_FAILINGOVER;
 556		case LINK_FAILURE_EVT:
 557		case LINK_RESET_EVT:
 558		case LINK_ESTABLISH_EVT:
 559		case LINK_FAILOVER_END_EVT:
 560			break;
 561		case LINK_SYNCH_BEGIN_EVT:
 562		case LINK_SYNCH_END_EVT:
 563		default:
 564			goto illegal_evt;
 565		}
 566		break;
 567	case LINK_PEER_RESET:
 568		switch (evt) {
 569		case LINK_RESET_EVT:
 570			l->state = LINK_ESTABLISHING;
 571			break;
 572		case LINK_PEER_RESET_EVT:
 573		case LINK_ESTABLISH_EVT:
 574		case LINK_FAILURE_EVT:
 575			break;
 576		case LINK_SYNCH_BEGIN_EVT:
 577		case LINK_SYNCH_END_EVT:
 578		case LINK_FAILOVER_BEGIN_EVT:
 579		case LINK_FAILOVER_END_EVT:
 580		default:
 581			goto illegal_evt;
 582		}
 583		break;
 584	case LINK_FAILINGOVER:
 585		switch (evt) {
 586		case LINK_FAILOVER_END_EVT:
 587			l->state = LINK_RESET;
 588			break;
 589		case LINK_PEER_RESET_EVT:
 590		case LINK_RESET_EVT:
 591		case LINK_ESTABLISH_EVT:
 592		case LINK_FAILURE_EVT:
 593			break;
 594		case LINK_FAILOVER_BEGIN_EVT:
 595		case LINK_SYNCH_BEGIN_EVT:
 596		case LINK_SYNCH_END_EVT:
 597		default:
 598			goto illegal_evt;
 599		}
 600		break;
 601	case LINK_ESTABLISHING:
 602		switch (evt) {
 603		case LINK_ESTABLISH_EVT:
 604			l->state = LINK_ESTABLISHED;
 605			break;
 606		case LINK_FAILOVER_BEGIN_EVT:
 607			l->state = LINK_FAILINGOVER;
 608			break;
 609		case LINK_RESET_EVT:
 610			l->state = LINK_RESET;
 611			break;
 612		case LINK_FAILURE_EVT:
 613		case LINK_PEER_RESET_EVT:
 614		case LINK_SYNCH_BEGIN_EVT:
 615		case LINK_FAILOVER_END_EVT:
 616			break;
 617		case LINK_SYNCH_END_EVT:
 618		default:
 619			goto illegal_evt;
 620		}
 621		break;
 622	case LINK_ESTABLISHED:
 623		switch (evt) {
 624		case LINK_PEER_RESET_EVT:
 625			l->state = LINK_PEER_RESET;
 626			rc |= TIPC_LINK_DOWN_EVT;
 627			break;
 628		case LINK_FAILURE_EVT:
 629			l->state = LINK_RESETTING;
 630			rc |= TIPC_LINK_DOWN_EVT;
 631			break;
 632		case LINK_RESET_EVT:
 633			l->state = LINK_RESET;
 634			break;
 635		case LINK_ESTABLISH_EVT:
 636		case LINK_SYNCH_END_EVT:
 637			break;
 638		case LINK_SYNCH_BEGIN_EVT:
 639			l->state = LINK_SYNCHING;
 640			break;
 641		case LINK_FAILOVER_BEGIN_EVT:
 642		case LINK_FAILOVER_END_EVT:
 643		default:
 644			goto illegal_evt;
 645		}
 646		break;
 647	case LINK_SYNCHING:
 648		switch (evt) {
 649		case LINK_PEER_RESET_EVT:
 650			l->state = LINK_PEER_RESET;
 651			rc |= TIPC_LINK_DOWN_EVT;
 652			break;
 653		case LINK_FAILURE_EVT:
 654			l->state = LINK_RESETTING;
 655			rc |= TIPC_LINK_DOWN_EVT;
 656			break;
 657		case LINK_RESET_EVT:
 658			l->state = LINK_RESET;
 659			break;
 660		case LINK_ESTABLISH_EVT:
 661		case LINK_SYNCH_BEGIN_EVT:
 662			break;
 663		case LINK_SYNCH_END_EVT:
 664			l->state = LINK_ESTABLISHED;
 665			break;
 666		case LINK_FAILOVER_BEGIN_EVT:
 667		case LINK_FAILOVER_END_EVT:
 668		default:
 669			goto illegal_evt;
 670		}
 671		break;
 672	default:
 673		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
 674	}
 675	return rc;
 676illegal_evt:
 677	pr_err("Illegal FSM event %x in state %x on link %s\n",
 678	       evt, l->state, l->name);
 679	return rc;
 680}
 681
 682/* link_profile_stats - update statistical profiling of traffic
 683 */
 684static void link_profile_stats(struct tipc_link *l)
 685{
 686	struct sk_buff *skb;
 687	struct tipc_msg *msg;
 688	int length;
 689
 690	/* Update counters used in statistical profiling of send traffic */
 691	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
 692	l->stats.queue_sz_counts++;
 693
 694	skb = skb_peek(&l->transmq);
 695	if (!skb)
 696		return;
 697	msg = buf_msg(skb);
 698	length = msg_size(msg);
 699
 700	if (msg_user(msg) == MSG_FRAGMENTER) {
 701		if (msg_type(msg) != FIRST_FRAGMENT)
 702			return;
 703		length = msg_size(msg_get_wrapped(msg));
 704	}
 705	l->stats.msg_lengths_total += length;
 706	l->stats.msg_length_counts++;
 707	if (length <= 64)
 708		l->stats.msg_length_profile[0]++;
 709	else if (length <= 256)
 710		l->stats.msg_length_profile[1]++;
 711	else if (length <= 1024)
 712		l->stats.msg_length_profile[2]++;
 713	else if (length <= 4096)
 714		l->stats.msg_length_profile[3]++;
 715	else if (length <= 16384)
 716		l->stats.msg_length_profile[4]++;
 717	else if (length <= 32768)
 718		l->stats.msg_length_profile[5]++;
 719	else
 720		l->stats.msg_length_profile[6]++;
 721}
 722
 723/* tipc_link_timeout - perform periodic task as instructed from node timeout
 724 */
 725int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 726{
 727	int mtyp = 0;
 728	int rc = 0;
 729	bool state = false;
 730	bool probe = false;
 731	bool setup = false;
 732	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
 733	u16 bc_acked = l->bc_rcvlink->acked;
 734	struct tipc_mon_state *mstate = &l->mon_state;
 735
 736	switch (l->state) {
 737	case LINK_ESTABLISHED:
 738	case LINK_SYNCHING:
 739		mtyp = STATE_MSG;
 740		link_profile_stats(l);
 741		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
 742		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
 743			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 744		state = bc_acked != bc_snt;
 745		state |= l->bc_rcvlink->rcv_unacked;
 746		state |= l->rcv_unacked;
 747		state |= !skb_queue_empty(&l->transmq);
 748		state |= !skb_queue_empty(&l->deferdq);
 749		probe = mstate->probing;
 750		probe |= l->silent_intv_cnt;
 751		if (probe || mstate->monitoring)
 752			l->silent_intv_cnt++;
 753		break;
 754	case LINK_RESET:
 755		setup = l->rst_cnt++ <= 4;
 756		setup |= !(l->rst_cnt % 16);
 757		mtyp = RESET_MSG;
 758		break;
 759	case LINK_ESTABLISHING:
 760		setup = true;
 761		mtyp = ACTIVATE_MSG;
 762		break;
 763	case LINK_PEER_RESET:
 764	case LINK_RESETTING:
 765	case LINK_FAILINGOVER:
 766		break;
 767	default:
 768		break;
 769	}
 770
 771	if (state || probe || setup)
 772		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
 773
 774	return rc;
 775}
 776
 777/**
 778 * link_schedule_user - schedule a message sender for wakeup after congestion
 779 * @link: congested link
 780 * @list: message that was attempted sent
 781 * Create pseudo msg to send back to user when congestion abates
 782 * Does not consume buffer list
 783 */
 784static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
 785{
 786	struct tipc_msg *msg = buf_msg(skb_peek(list));
 787	int imp = msg_importance(msg);
 788	u32 oport = msg_origport(msg);
 789	u32 addr = tipc_own_addr(link->net);
 790	struct sk_buff *skb;
 791
 792	/* This really cannot happen...  */
 793	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
 794		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
 795		return -ENOBUFS;
 796	}
 797	/* Non-blocking sender: */
 798	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
 799		return -ELINKCONG;
 800
 801	/* Create and schedule wakeup pseudo message */
 802	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
 803			      addr, addr, oport, 0, 0);
 804	if (!skb)
 805		return -ENOBUFS;
 806	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
 807	TIPC_SKB_CB(skb)->chain_imp = imp;
 808	skb_queue_tail(&link->wakeupq, skb);
 809	link->stats.link_congs++;
 810	return -ELINKCONG;
 811}
 812
 813/**
 814 * link_prepare_wakeup - prepare users for wakeup after congestion
 815 * @link: congested link
 816 * Move a number of waiting users, as permitted by available space in
 817 * the send queue, from link wait queue to node wait queue for wakeup
 818 */
 819void link_prepare_wakeup(struct tipc_link *l)
 820{
 821	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
 822	int imp, lim;
 823	struct sk_buff *skb, *tmp;
 
 824
 825	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
 826		imp = TIPC_SKB_CB(skb)->chain_imp;
 827		lim = l->backlog[imp].limit;
 828		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
 829		if ((pnd[imp] + l->backlog[imp].len) >= lim)
 
 830			break;
 831		skb_unlink(skb, &l->wakeupq);
 832		skb_queue_tail(l->inputq, skb);
 833	}
 834}
 835
 836void tipc_link_reset(struct tipc_link *l)
 837{
 838	l->peer_session = ANY_SESSION;
 839	l->session++;
 840	l->mtu = l->advertised_mtu;
 841	__skb_queue_purge(&l->transmq);
 842	__skb_queue_purge(&l->deferdq);
 843	skb_queue_splice_init(&l->wakeupq, l->inputq);
 844	__skb_queue_purge(&l->backlogq);
 845	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
 846	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
 847	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
 848	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
 849	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
 850	kfree_skb(l->reasm_buf);
 851	kfree_skb(l->failover_reasm_skb);
 852	l->reasm_buf = NULL;
 853	l->failover_reasm_skb = NULL;
 854	l->rcv_unacked = 0;
 855	l->snd_nxt = 1;
 856	l->rcv_nxt = 1;
 857	l->acked = 0;
 858	l->silent_intv_cnt = 0;
 859	l->rst_cnt = 0;
 860	l->stale_count = 0;
 861	l->bc_peer_is_up = false;
 862	memset(&l->mon_state, 0, sizeof(l->mon_state));
 863	tipc_link_reset_stats(l);
 864}
 865
 866/**
 867 * tipc_link_xmit(): enqueue buffer list according to queue situation
 868 * @link: link to use
 869 * @list: chain of buffers containing message
 870 * @xmitq: returned list of packets to be sent by caller
 871 *
 872 * Consumes the buffer chain, except when returning -ELINKCONG,
 873 * since the caller then may want to make more send attempts.
 874 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
 875 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
 876 */
 877int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
 878		   struct sk_buff_head *xmitq)
 879{
 880	struct tipc_msg *hdr = buf_msg(skb_peek(list));
 881	unsigned int maxwin = l->window;
 882	unsigned int i, imp = msg_importance(hdr);
 883	unsigned int mtu = l->mtu;
 884	u16 ack = l->rcv_nxt - 1;
 885	u16 seqno = l->snd_nxt;
 886	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 887	struct sk_buff_head *transmq = &l->transmq;
 888	struct sk_buff_head *backlogq = &l->backlogq;
 889	struct sk_buff *skb, *_skb, *bskb;
 890	int pkt_cnt = skb_queue_len(list);
 
 891
 892	/* Match msg importance against this and all higher backlog limits: */
 893	if (!skb_queue_empty(backlogq)) {
 894		for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
 895			if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
 896				return link_schedule_user(l, list);
 897		}
 898	}
 899	if (unlikely(msg_size(hdr) > mtu)) {
 900		skb_queue_purge(list);
 901		return -EMSGSIZE;
 902	}
 903
 
 
 
 
 
 
 
 
 
 904	if (pkt_cnt > 1) {
 905		l->stats.sent_fragmented++;
 906		l->stats.sent_fragments += pkt_cnt;
 907	}
 908
 909	/* Prepare each packet for sending, and add to relevant queue: */
 910	while (skb_queue_len(list)) {
 911		skb = skb_peek(list);
 912		hdr = buf_msg(skb);
 913		msg_set_seqno(hdr, seqno);
 914		msg_set_ack(hdr, ack);
 915		msg_set_bcast_ack(hdr, bc_ack);
 916
 917		if (likely(skb_queue_len(transmq) < maxwin)) {
 918			_skb = skb_clone(skb, GFP_ATOMIC);
 919			if (!_skb) {
 920				skb_queue_purge(list);
 921				return -ENOBUFS;
 922			}
 923			__skb_dequeue(list);
 924			__skb_queue_tail(transmq, skb);
 925			__skb_queue_tail(xmitq, _skb);
 926			TIPC_SKB_CB(skb)->ackers = l->ackers;
 927			l->rcv_unacked = 0;
 928			l->stats.sent_pkts++;
 929			seqno++;
 930			continue;
 931		}
 932		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
 933			kfree_skb(__skb_dequeue(list));
 934			l->stats.sent_bundled++;
 935			continue;
 936		}
 937		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
 938			kfree_skb(__skb_dequeue(list));
 939			__skb_queue_tail(backlogq, bskb);
 940			l->backlog[msg_importance(buf_msg(bskb))].len++;
 941			l->stats.sent_bundled++;
 942			l->stats.sent_bundles++;
 943			continue;
 944		}
 945		l->backlog[imp].len += skb_queue_len(list);
 946		skb_queue_splice_tail_init(list, backlogq);
 947	}
 948	l->snd_nxt = seqno;
 949	return 0;
 950}
 951
 952void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 953{
 954	struct sk_buff *skb, *_skb;
 955	struct tipc_msg *hdr;
 956	u16 seqno = l->snd_nxt;
 957	u16 ack = l->rcv_nxt - 1;
 958	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 959
 960	while (skb_queue_len(&l->transmq) < l->window) {
 961		skb = skb_peek(&l->backlogq);
 962		if (!skb)
 963			break;
 964		_skb = skb_clone(skb, GFP_ATOMIC);
 965		if (!_skb)
 966			break;
 967		__skb_dequeue(&l->backlogq);
 968		hdr = buf_msg(skb);
 969		l->backlog[msg_importance(hdr)].len--;
 970		__skb_queue_tail(&l->transmq, skb);
 971		__skb_queue_tail(xmitq, _skb);
 972		TIPC_SKB_CB(skb)->ackers = l->ackers;
 973		msg_set_seqno(hdr, seqno);
 974		msg_set_ack(hdr, ack);
 975		msg_set_bcast_ack(hdr, bc_ack);
 976		l->rcv_unacked = 0;
 977		l->stats.sent_pkts++;
 978		seqno++;
 979	}
 980	l->snd_nxt = seqno;
 981}
 982
 983static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
 984{
 985	struct tipc_msg *hdr = buf_msg(skb);
 986
 987	pr_warn("Retransmission failure on link <%s>\n", l->name);
 988	link_print(l, "Resetting link ");
 989	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
 990		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
 991	pr_info("sqno %u, prev: %x, src: %x\n",
 992		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
 993}
 994
 995int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
 996		      struct sk_buff_head *xmitq)
 997{
 998	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
 999	struct tipc_msg *hdr;
1000	u16 ack = l->rcv_nxt - 1;
1001	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1002
1003	if (!skb)
1004		return 0;
1005
1006	/* Detect repeated retransmit failures on same packet */
1007	if (likely(l->last_retransm != buf_seqno(skb))) {
1008		l->last_retransm = buf_seqno(skb);
1009		l->stale_count = 1;
1010	} else if (++l->stale_count > 100) {
1011		link_retransmit_failure(l, skb);
 
 
 
1012		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1013	}
1014
1015	/* Move forward to where retransmission should start */
1016	skb_queue_walk(&l->transmq, skb) {
1017		if (!less(buf_seqno(skb), from))
1018			break;
1019	}
1020
1021	skb_queue_walk_from(&l->transmq, skb) {
1022		if (more(buf_seqno(skb), to))
1023			break;
1024		hdr = buf_msg(skb);
1025		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1026		if (!_skb)
1027			return 0;
1028		hdr = buf_msg(_skb);
1029		msg_set_ack(hdr, ack);
1030		msg_set_bcast_ack(hdr, bc_ack);
1031		_skb->priority = TC_PRIO_CONTROL;
1032		__skb_queue_tail(xmitq, _skb);
1033		l->stats.retransmitted++;
1034	}
1035	return 0;
1036}
1037
1038/* tipc_data_input - deliver data and name distr msgs to upper layer
1039 *
1040 * Consumes buffer if message is of right type
1041 * Node lock must be held
1042 */
1043static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1044			    struct sk_buff_head *inputq)
1045{
1046	switch (msg_user(buf_msg(skb))) {
 
 
 
1047	case TIPC_LOW_IMPORTANCE:
1048	case TIPC_MEDIUM_IMPORTANCE:
1049	case TIPC_HIGH_IMPORTANCE:
1050	case TIPC_CRITICAL_IMPORTANCE:
 
 
 
 
1051	case CONN_MANAGER:
1052		skb_queue_tail(inputq, skb);
1053		return true;
 
 
 
1054	case NAME_DISTRIBUTOR:
1055		l->bc_rcvlink->state = LINK_ESTABLISHED;
1056		skb_queue_tail(l->namedq, skb);
1057		return true;
1058	case MSG_BUNDLER:
1059	case TUNNEL_PROTOCOL:
1060	case MSG_FRAGMENTER:
1061	case BCAST_PROTOCOL:
1062		return false;
1063	default:
1064		pr_warn("Dropping received illegal msg type\n");
1065		kfree_skb(skb);
1066		return false;
1067	};
1068}
1069
1070/* tipc_link_input - process packet that has passed link protocol check
1071 *
1072 * Consumes buffer
1073 */
1074static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1075			   struct sk_buff_head *inputq)
1076{
1077	struct tipc_msg *hdr = buf_msg(skb);
1078	struct sk_buff **reasm_skb = &l->reasm_buf;
1079	struct sk_buff *iskb;
1080	struct sk_buff_head tmpq;
1081	int usr = msg_user(hdr);
1082	int rc = 0;
1083	int pos = 0;
1084	int ipos = 0;
1085
1086	if (unlikely(usr == TUNNEL_PROTOCOL)) {
1087		if (msg_type(hdr) == SYNCH_MSG) {
1088			__skb_queue_purge(&l->deferdq);
1089			goto drop;
1090		}
1091		if (!tipc_msg_extract(skb, &iskb, &ipos))
1092			return rc;
1093		kfree_skb(skb);
1094		skb = iskb;
1095		hdr = buf_msg(skb);
1096		if (less(msg_seqno(hdr), l->drop_point))
1097			goto drop;
1098		if (tipc_data_input(l, skb, inputq))
1099			return rc;
1100		usr = msg_user(hdr);
1101		reasm_skb = &l->failover_reasm_skb;
1102	}
1103
1104	if (usr == MSG_BUNDLER) {
1105		skb_queue_head_init(&tmpq);
1106		l->stats.recv_bundles++;
1107		l->stats.recv_bundled += msg_msgcnt(hdr);
1108		while (tipc_msg_extract(skb, &iskb, &pos))
1109			tipc_data_input(l, iskb, &tmpq);
1110		tipc_skb_queue_splice_tail(&tmpq, inputq);
1111		return 0;
1112	} else if (usr == MSG_FRAGMENTER) {
1113		l->stats.recv_fragments++;
1114		if (tipc_buf_append(reasm_skb, &skb)) {
1115			l->stats.recv_fragmented++;
1116			tipc_data_input(l, skb, inputq);
1117		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1118			pr_warn_ratelimited("Unable to build fragment list\n");
1119			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1120		}
1121		return 0;
1122	} else if (usr == BCAST_PROTOCOL) {
1123		tipc_bcast_lock(l->net);
1124		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1125		tipc_bcast_unlock(l->net);
1126	}
1127drop:
1128	kfree_skb(skb);
1129	return 0;
1130}
1131
1132static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1133{
1134	bool released = false;
1135	struct sk_buff *skb, *tmp;
1136
1137	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1138		if (more(buf_seqno(skb), acked))
1139			break;
1140		__skb_unlink(skb, &l->transmq);
1141		kfree_skb(skb);
1142		released = true;
1143	}
1144	return released;
1145}
1146
1147/* tipc_link_build_state_msg: prepare link state message for transmission
1148 *
1149 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1150 * risk of ack storms towards the sender
1151 */
1152int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1153{
1154	if (!l)
1155		return 0;
1156
1157	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1158	if (link_is_bc_rcvlink(l)) {
1159		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1160			return 0;
1161		l->rcv_unacked = 0;
1162
1163		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1164		l->snd_nxt = l->rcv_nxt;
1165		return TIPC_LINK_SND_STATE;
1166	}
1167
1168	/* Unicast ACK */
1169	l->rcv_unacked = 0;
1170	l->stats.sent_acks++;
1171	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1172	return 0;
1173}
1174
1175/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1176 */
1177void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1178{
1179	int mtyp = RESET_MSG;
1180	struct sk_buff *skb;
1181
1182	if (l->state == LINK_ESTABLISHING)
1183		mtyp = ACTIVATE_MSG;
1184
1185	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1186
1187	/* Inform peer that this endpoint is going down if applicable */
1188	skb = skb_peek_tail(xmitq);
1189	if (skb && (l->state == LINK_RESET))
1190		msg_set_peer_stopping(buf_msg(skb), 1);
1191}
1192
1193/* tipc_link_build_nack_msg: prepare link nack message for transmission
1194 * Note that sending of broadcast NACK is coordinated among nodes, to
1195 * reduce the risk of NACK storms towards the sender
1196 */
1197static int tipc_link_build_nack_msg(struct tipc_link *l,
1198				    struct sk_buff_head *xmitq)
1199{
1200	u32 def_cnt = ++l->stats.deferred_recv;
1201	int match1, match2;
1202
1203	if (link_is_bc_rcvlink(l)) {
1204		match1 = def_cnt & 0xf;
1205		match2 = tipc_own_addr(l->net) & 0xf;
1206		if (match1 == match2)
1207			return TIPC_LINK_SND_STATE;
1208		return 0;
1209	}
1210
1211	if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1212		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1213	return 0;
1214}
1215
1216/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1217 * @l: the link that should handle the message
1218 * @skb: TIPC packet
1219 * @xmitq: queue to place packets to be sent after this call
1220 */
1221int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1222		  struct sk_buff_head *xmitq)
1223{
1224	struct sk_buff_head *defq = &l->deferdq;
1225	struct tipc_msg *hdr;
1226	u16 seqno, rcv_nxt, win_lim;
1227	int rc = 0;
1228
1229	do {
1230		hdr = buf_msg(skb);
1231		seqno = msg_seqno(hdr);
1232		rcv_nxt = l->rcv_nxt;
1233		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1234
1235		/* Verify and update link state */
1236		if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1237			return tipc_link_proto_rcv(l, skb, xmitq);
1238
1239		if (unlikely(!link_is_up(l))) {
1240			if (l->state == LINK_ESTABLISHING)
1241				rc = TIPC_LINK_UP_EVT;
1242			goto drop;
1243		}
1244
1245		/* Don't send probe at next timeout expiration */
1246		l->silent_intv_cnt = 0;
1247
1248		/* Drop if outside receive window */
1249		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1250			l->stats.duplicates++;
1251			goto drop;
1252		}
1253
1254		/* Forward queues and wake up waiting users */
1255		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1256			tipc_link_advance_backlog(l, xmitq);
1257			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1258				link_prepare_wakeup(l);
1259		}
1260
1261		/* Defer delivery if sequence gap */
1262		if (unlikely(seqno != rcv_nxt)) {
1263			__tipc_skb_queue_sorted(defq, seqno, skb);
1264			rc |= tipc_link_build_nack_msg(l, xmitq);
1265			break;
1266		}
1267
1268		/* Deliver packet */
1269		l->rcv_nxt++;
1270		l->stats.recv_pkts++;
1271		if (!tipc_data_input(l, skb, l->inputq))
1272			rc |= tipc_link_input(l, skb, l->inputq);
1273		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1274			rc |= tipc_link_build_state_msg(l, xmitq);
1275		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1276			break;
1277	} while ((skb = __skb_dequeue(defq)));
1278
1279	return rc;
1280drop:
1281	kfree_skb(skb);
1282	return rc;
1283}
1284
1285static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1286				      u16 rcvgap, int tolerance, int priority,
 
1287				      struct sk_buff_head *xmitq)
1288{
1289	struct tipc_link *bcl = l->bc_rcvlink;
1290	struct sk_buff *skb;
1291	struct tipc_msg *hdr;
1292	struct sk_buff_head *dfq = &l->deferdq;
1293	bool node_up = link_is_up(bcl);
1294	struct tipc_mon_state *mstate = &l->mon_state;
1295	int dlen = 0;
1296	void *data;
1297
1298	/* Don't send protocol message during reset or link failover */
1299	if (tipc_link_is_blocked(l))
1300		return;
1301
1302	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1303		return;
1304
1305	if (!skb_queue_empty(dfq))
1306		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1307
1308	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1309			      tipc_max_domain_size, l->addr,
1310			      tipc_own_addr(l->net), 0, 0, 0);
1311	if (!skb)
1312		return;
1313
1314	hdr = buf_msg(skb);
1315	data = msg_data(hdr);
1316	msg_set_session(hdr, l->session);
1317	msg_set_bearer_id(hdr, l->bearer_id);
1318	msg_set_net_plane(hdr, l->net_plane);
1319	msg_set_next_sent(hdr, l->snd_nxt);
1320	msg_set_ack(hdr, l->rcv_nxt - 1);
1321	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1322	msg_set_bc_ack_invalid(hdr, !node_up);
1323	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1324	msg_set_link_tolerance(hdr, tolerance);
1325	msg_set_linkprio(hdr, priority);
1326	msg_set_redundant_link(hdr, node_up);
1327	msg_set_seq_gap(hdr, 0);
1328	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1329
1330	if (mtyp == STATE_MSG) {
1331		msg_set_seq_gap(hdr, rcvgap);
1332		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1333		msg_set_probe(hdr, probe);
 
1334		tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1335		msg_set_size(hdr, INT_H_SIZE + dlen);
1336		skb_trim(skb, INT_H_SIZE + dlen);
1337		l->stats.sent_states++;
1338		l->rcv_unacked = 0;
1339	} else {
1340		/* RESET_MSG or ACTIVATE_MSG */
1341		msg_set_max_pkt(hdr, l->advertised_mtu);
1342		strcpy(data, l->if_name);
1343		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1344		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1345	}
1346	if (probe)
1347		l->stats.sent_probes++;
1348	if (rcvgap)
1349		l->stats.sent_nacks++;
1350	skb->priority = TC_PRIO_CONTROL;
1351	__skb_queue_tail(xmitq, skb);
1352}
1353
1354/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1355 * with contents of the link's transmit and backlog queues.
1356 */
1357void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1358			   int mtyp, struct sk_buff_head *xmitq)
1359{
1360	struct sk_buff *skb, *tnlskb;
1361	struct tipc_msg *hdr, tnlhdr;
1362	struct sk_buff_head *queue = &l->transmq;
1363	struct sk_buff_head tmpxq, tnlq;
1364	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1365
1366	if (!tnl)
1367		return;
1368
1369	skb_queue_head_init(&tnlq);
1370	skb_queue_head_init(&tmpxq);
1371
1372	/* At least one packet required for safe algorithm => add dummy */
1373	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1374			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1375			      0, 0, TIPC_ERR_NO_PORT);
1376	if (!skb) {
1377		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1378		return;
1379	}
1380	skb_queue_tail(&tnlq, skb);
1381	tipc_link_xmit(l, &tnlq, &tmpxq);
1382	__skb_queue_purge(&tmpxq);
1383
1384	/* Initialize reusable tunnel packet header */
1385	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1386		      mtyp, INT_H_SIZE, l->addr);
1387	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1388	msg_set_msgcnt(&tnlhdr, pktcnt);
1389	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1390tnl:
1391	/* Wrap each packet into a tunnel packet */
1392	skb_queue_walk(queue, skb) {
1393		hdr = buf_msg(skb);
1394		if (queue == &l->backlogq)
1395			msg_set_seqno(hdr, seqno++);
1396		pktlen = msg_size(hdr);
1397		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1398		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1399		if (!tnlskb) {
1400			pr_warn("%sunable to send packet\n", link_co_err);
1401			return;
1402		}
1403		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1404		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1405		__skb_queue_tail(&tnlq, tnlskb);
1406	}
1407	if (queue != &l->backlogq) {
1408		queue = &l->backlogq;
1409		goto tnl;
1410	}
1411
1412	tipc_link_xmit(tnl, &tnlq, xmitq);
1413
1414	if (mtyp == FAILOVER_MSG) {
1415		tnl->drop_point = l->rcv_nxt;
1416		tnl->failover_reasm_skb = l->reasm_buf;
1417		l->reasm_buf = NULL;
1418	}
1419}
1420
1421/* tipc_link_proto_rcv(): receive link level protocol message :
1422 * Note that network plane id propagates through the network, and may
1423 * change at any time. The node with lowest numerical id determines
1424 * network plane
1425 */
1426static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1427			       struct sk_buff_head *xmitq)
1428{
1429	struct tipc_msg *hdr = buf_msg(skb);
1430	u16 rcvgap = 0;
1431	u16 ack = msg_ack(hdr);
1432	u16 gap = msg_seq_gap(hdr);
1433	u16 peers_snd_nxt =  msg_next_sent(hdr);
1434	u16 peers_tol = msg_link_tolerance(hdr);
1435	u16 peers_prio = msg_linkprio(hdr);
1436	u16 rcv_nxt = l->rcv_nxt;
1437	u16 dlen = msg_data_sz(hdr);
1438	int mtyp = msg_type(hdr);
 
1439	void *data;
1440	char *if_name;
1441	int rc = 0;
1442
1443	if (tipc_link_is_blocked(l) || !xmitq)
1444		goto exit;
1445
1446	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1447		l->net_plane = msg_net_plane(hdr);
1448
1449	skb_linearize(skb);
1450	hdr = buf_msg(skb);
1451	data = msg_data(hdr);
1452
1453	switch (mtyp) {
1454	case RESET_MSG:
1455
1456		/* Ignore duplicate RESET with old session number */
1457		if ((less_eq(msg_session(hdr), l->peer_session)) &&
1458		    (l->peer_session != ANY_SESSION))
1459			break;
1460		/* fall thru' */
1461
1462	case ACTIVATE_MSG:
1463
1464		/* Complete own link name with peer's interface name */
1465		if_name =  strrchr(l->name, ':') + 1;
1466		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1467			break;
1468		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1469			break;
1470		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1471
1472		/* Update own tolerance if peer indicates a non-zero value */
1473		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1474			l->tolerance = peers_tol;
1475
1476		/* Update own priority if peer's priority is higher */
1477		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1478			l->priority = peers_prio;
1479
1480		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1481		if (msg_peer_stopping(hdr))
1482			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1483		else if ((mtyp == RESET_MSG) || !link_is_up(l))
1484			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1485
1486		/* ACTIVATE_MSG takes up link if it was already locally reset */
1487		if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1488			rc = TIPC_LINK_UP_EVT;
1489
1490		l->peer_session = msg_session(hdr);
1491		l->peer_bearer_id = msg_bearer_id(hdr);
1492		if (l->mtu > msg_max_pkt(hdr))
1493			l->mtu = msg_max_pkt(hdr);
1494		break;
1495
1496	case STATE_MSG:
1497
1498		/* Update own tolerance if peer indicates a non-zero value */
1499		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1500			l->tolerance = peers_tol;
1501
1502		/* Update own prio if peer indicates a different value */
1503		if ((peers_prio != l->priority) &&
1504		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1505			l->priority = peers_prio;
1506			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1507		}
1508
1509		l->silent_intv_cnt = 0;
1510		l->stats.recv_states++;
1511		if (msg_probe(hdr))
1512			l->stats.recv_probes++;
1513
1514		if (!link_is_up(l)) {
1515			if (l->state == LINK_ESTABLISHING)
1516				rc = TIPC_LINK_UP_EVT;
1517			break;
1518		}
1519		tipc_mon_rcv(l->net, data, dlen, l->addr,
1520			     &l->mon_state, l->bearer_id);
1521
1522		/* Send NACK if peer has sent pkts we haven't received yet */
1523		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1524			rcvgap = peers_snd_nxt - l->rcv_nxt;
1525		if (rcvgap || (msg_probe(hdr)))
1526			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1527						  0, 0, xmitq);
1528		tipc_link_release_pkts(l, ack);
1529
1530		/* If NACK, retransmit will now start at right position */
1531		if (gap) {
1532			rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
1533			l->stats.recv_nacks++;
1534		}
1535
1536		tipc_link_advance_backlog(l, xmitq);
1537		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1538			link_prepare_wakeup(l);
1539	}
1540exit:
1541	kfree_skb(skb);
1542	return rc;
1543}
1544
1545/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1546 */
1547static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1548					 u16 peers_snd_nxt,
1549					 struct sk_buff_head *xmitq)
1550{
1551	struct sk_buff *skb;
1552	struct tipc_msg *hdr;
1553	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1554	u16 ack = l->rcv_nxt - 1;
1555	u16 gap_to = peers_snd_nxt - 1;
1556
1557	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1558			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1559	if (!skb)
1560		return false;
1561	hdr = buf_msg(skb);
1562	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1563	msg_set_bcast_ack(hdr, ack);
1564	msg_set_bcgap_after(hdr, ack);
1565	if (dfrd_skb)
1566		gap_to = buf_seqno(dfrd_skb) - 1;
1567	msg_set_bcgap_to(hdr, gap_to);
1568	msg_set_non_seq(hdr, bcast);
1569	__skb_queue_tail(xmitq, skb);
1570	return true;
1571}
1572
1573/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1574 *
1575 * Give a newly added peer node the sequence number where it should
1576 * start receiving and acking broadcast packets.
1577 */
1578static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1579					struct sk_buff_head *xmitq)
1580{
1581	struct sk_buff_head list;
1582
1583	__skb_queue_head_init(&list);
1584	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1585		return;
1586	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1587	tipc_link_xmit(l, &list, xmitq);
1588}
1589
1590/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1591 */
1592void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1593{
1594	int mtyp = msg_type(hdr);
1595	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1596
1597	if (link_is_up(l))
1598		return;
1599
1600	if (msg_user(hdr) == BCAST_PROTOCOL) {
1601		l->rcv_nxt = peers_snd_nxt;
1602		l->state = LINK_ESTABLISHED;
1603		return;
1604	}
1605
1606	if (l->peer_caps & TIPC_BCAST_SYNCH)
1607		return;
1608
1609	if (msg_peer_node_is_up(hdr))
1610		return;
1611
1612	/* Compatibility: accept older, less safe initial synch data */
1613	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1614		l->rcv_nxt = peers_snd_nxt;
1615}
1616
1617/* link_bc_retr eval()- check if the indicated range can be retransmitted now
1618 * - Adjust permitted range if there is overlap with previous retransmission
1619 */
1620static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1621{
1622	unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1623
1624	if (less(*to, *from))
1625		return false;
1626
1627	/* New retransmission request */
1628	if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1629	    less(*to, l->prev_from) || more(*from, l->prev_to)) {
1630		l->prev_from = *from;
1631		l->prev_to = *to;
1632		l->prev_retr = jiffies;
1633		return true;
1634	}
1635
1636	/* Inside range of previous retransmit */
1637	if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1638		return false;
1639
1640	/* Fully or partially outside previous range => exclude overlap */
1641	if (less(*from, l->prev_from)) {
1642		*to = l->prev_from - 1;
1643		l->prev_from = *from;
1644	}
1645	if (more(*to, l->prev_to)) {
1646		*from = l->prev_to + 1;
1647		l->prev_to = *to;
1648	}
1649	l->prev_retr = jiffies;
1650	return true;
1651}
1652
1653/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1654 */
1655int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1656			  struct sk_buff_head *xmitq)
1657{
1658	struct tipc_link *snd_l = l->bc_sndlink;
1659	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1660	u16 from = msg_bcast_ack(hdr) + 1;
1661	u16 to = from + msg_bc_gap(hdr) - 1;
1662	int rc = 0;
1663
1664	if (!link_is_up(l))
1665		return rc;
1666
1667	if (!msg_peer_node_is_up(hdr))
1668		return rc;
1669
1670	/* Open when peer ackowledges our bcast init msg (pkt #1) */
1671	if (msg_ack(hdr))
1672		l->bc_peer_is_up = true;
1673
1674	if (!l->bc_peer_is_up)
1675		return rc;
1676
1677	l->stats.recv_nacks++;
1678
1679	/* Ignore if peers_snd_nxt goes beyond receive window */
1680	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1681		return rc;
1682
1683	if (link_bc_retr_eval(snd_l, &from, &to))
1684		rc = tipc_link_retrans(snd_l, from, to, xmitq);
1685
1686	l->snd_nxt = peers_snd_nxt;
1687	if (link_bc_rcv_gap(l))
1688		rc |= TIPC_LINK_SND_STATE;
1689
1690	/* Return now if sender supports nack via STATE messages */
1691	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1692		return rc;
1693
1694	/* Otherwise, be backwards compatible */
1695
1696	if (!more(peers_snd_nxt, l->rcv_nxt)) {
1697		l->nack_state = BC_NACK_SND_CONDITIONAL;
1698		return 0;
1699	}
1700
1701	/* Don't NACK if one was recently sent or peeked */
1702	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1703		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1704		return 0;
1705	}
1706
1707	/* Conditionally delay NACK sending until next synch rcv */
1708	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1709		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1710		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1711			return 0;
1712	}
1713
1714	/* Send NACK now but suppress next one */
1715	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1716	l->nack_state = BC_NACK_SND_SUPPRESS;
1717	return 0;
1718}
1719
1720void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1721			  struct sk_buff_head *xmitq)
1722{
1723	struct sk_buff *skb, *tmp;
1724	struct tipc_link *snd_l = l->bc_sndlink;
1725
1726	if (!link_is_up(l) || !l->bc_peer_is_up)
1727		return;
1728
1729	if (!more(acked, l->acked))
1730		return;
1731
1732	/* Skip over packets peer has already acked */
1733	skb_queue_walk(&snd_l->transmq, skb) {
1734		if (more(buf_seqno(skb), l->acked))
1735			break;
1736	}
1737
1738	/* Update/release the packets peer is acking now */
1739	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1740		if (more(buf_seqno(skb), acked))
1741			break;
1742		if (!--TIPC_SKB_CB(skb)->ackers) {
1743			__skb_unlink(skb, &snd_l->transmq);
1744			kfree_skb(skb);
1745		}
1746	}
1747	l->acked = acked;
1748	tipc_link_advance_backlog(snd_l, xmitq);
1749	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1750		link_prepare_wakeup(snd_l);
1751}
1752
1753/* tipc_link_bc_nack_rcv(): receive broadcast nack message
1754 * This function is here for backwards compatibility, since
1755 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1756 */
1757int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1758			  struct sk_buff_head *xmitq)
1759{
1760	struct tipc_msg *hdr = buf_msg(skb);
1761	u32 dnode = msg_destnode(hdr);
1762	int mtyp = msg_type(hdr);
1763	u16 acked = msg_bcast_ack(hdr);
1764	u16 from = acked + 1;
1765	u16 to = msg_bcgap_to(hdr);
1766	u16 peers_snd_nxt = to + 1;
1767	int rc = 0;
1768
1769	kfree_skb(skb);
1770
1771	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1772		return 0;
1773
1774	if (mtyp != STATE_MSG)
1775		return 0;
1776
1777	if (dnode == tipc_own_addr(l->net)) {
1778		tipc_link_bc_ack_rcv(l, acked, xmitq);
1779		rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1780		l->stats.recv_nacks++;
1781		return rc;
1782	}
1783
1784	/* Msg for other node => suppress own NACK at next sync if applicable */
1785	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1786		l->nack_state = BC_NACK_SND_SUPPRESS;
1787
1788	return 0;
1789}
1790
1791void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1792{
1793	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1794
1795	l->window = win;
1796	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
1797	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
1798	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
1799	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1800	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
1801}
1802
1803/**
1804 * link_reset_stats - reset link statistics
1805 * @l: pointer to link
1806 */
1807void tipc_link_reset_stats(struct tipc_link *l)
1808{
1809	memset(&l->stats, 0, sizeof(l->stats));
1810}
1811
1812static void link_print(struct tipc_link *l, const char *str)
1813{
1814	struct sk_buff *hskb = skb_peek(&l->transmq);
1815	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1816	u16 tail = l->snd_nxt - 1;
1817
1818	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1819	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1820		skb_queue_len(&l->transmq), head, tail,
1821		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1822}
1823
1824/* Parse and validate nested (link) properties valid for media, bearer and link
1825 */
1826int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1827{
1828	int err;
1829
1830	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1831			       tipc_nl_prop_policy);
1832	if (err)
1833		return err;
1834
1835	if (props[TIPC_NLA_PROP_PRIO]) {
1836		u32 prio;
1837
1838		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1839		if (prio > TIPC_MAX_LINK_PRI)
1840			return -EINVAL;
1841	}
1842
1843	if (props[TIPC_NLA_PROP_TOL]) {
1844		u32 tol;
1845
1846		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1847		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1848			return -EINVAL;
1849	}
1850
1851	if (props[TIPC_NLA_PROP_WIN]) {
1852		u32 win;
1853
1854		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1855		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1856			return -EINVAL;
1857	}
1858
1859	return 0;
1860}
1861
1862static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1863{
1864	int i;
1865	struct nlattr *stats;
1866
1867	struct nla_map {
1868		u32 key;
1869		u32 val;
1870	};
1871
1872	struct nla_map map[] = {
1873		{TIPC_NLA_STATS_RX_INFO, 0},
1874		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1875		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1876		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1877		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1878		{TIPC_NLA_STATS_TX_INFO, 0},
1879		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1880		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1881		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1882		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1883		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1884			s->msg_length_counts : 1},
1885		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1886		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1887		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1888		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1889		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1890		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1891		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1892		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1893		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1894		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
1895		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1896		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1897		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1898		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
1899		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1900		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1901		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1902		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1903		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1904		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1905		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1906		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1907			(s->accu_queue_sz / s->queue_sz_counts) : 0}
1908	};
1909
1910	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1911	if (!stats)
1912		return -EMSGSIZE;
1913
1914	for (i = 0; i <  ARRAY_SIZE(map); i++)
1915		if (nla_put_u32(skb, map[i].key, map[i].val))
1916			goto msg_full;
1917
1918	nla_nest_end(skb, stats);
1919
1920	return 0;
1921msg_full:
1922	nla_nest_cancel(skb, stats);
1923
1924	return -EMSGSIZE;
1925}
1926
1927/* Caller should hold appropriate locks to protect the link */
1928int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1929		       struct tipc_link *link, int nlflags)
1930{
1931	int err;
1932	void *hdr;
1933	struct nlattr *attrs;
1934	struct nlattr *prop;
1935	struct tipc_net *tn = net_generic(net, tipc_net_id);
 
1936
1937	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1938			  nlflags, TIPC_NL_LINK_GET);
1939	if (!hdr)
1940		return -EMSGSIZE;
1941
1942	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1943	if (!attrs)
1944		goto msg_full;
1945
1946	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1947		goto attr_msg_full;
1948	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1949			tipc_cluster_mask(tn->own_addr)))
1950		goto attr_msg_full;
1951	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1952		goto attr_msg_full;
1953	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
1954		goto attr_msg_full;
1955	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
1956		goto attr_msg_full;
1957
1958	if (tipc_link_is_up(link))
1959		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1960			goto attr_msg_full;
1961	if (link->active)
1962		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1963			goto attr_msg_full;
1964
1965	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1966	if (!prop)
1967		goto attr_msg_full;
1968	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1969		goto prop_msg_full;
1970	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1971		goto prop_msg_full;
1972	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1973			link->window))
1974		goto prop_msg_full;
1975	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1976		goto prop_msg_full;
1977	nla_nest_end(msg->skb, prop);
1978
1979	err = __tipc_nl_add_stats(msg->skb, &link->stats);
1980	if (err)
1981		goto attr_msg_full;
1982
1983	nla_nest_end(msg->skb, attrs);
1984	genlmsg_end(msg->skb, hdr);
1985
1986	return 0;
1987
1988prop_msg_full:
1989	nla_nest_cancel(msg->skb, prop);
1990attr_msg_full:
1991	nla_nest_cancel(msg->skb, attrs);
1992msg_full:
1993	genlmsg_cancel(msg->skb, hdr);
1994
1995	return -EMSGSIZE;
1996}
1997
1998static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1999				      struct tipc_stats *stats)
2000{
2001	int i;
2002	struct nlattr *nest;
2003
2004	struct nla_map {
2005		__u32 key;
2006		__u32 val;
2007	};
2008
2009	struct nla_map map[] = {
2010		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2011		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2012		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2013		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2014		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2015		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2016		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2017		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2018		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2019		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2020		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2021		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2022		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2023		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2024		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2025		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2026		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2027		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2028		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2029			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2030	};
2031
2032	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2033	if (!nest)
2034		return -EMSGSIZE;
2035
2036	for (i = 0; i <  ARRAY_SIZE(map); i++)
2037		if (nla_put_u32(skb, map[i].key, map[i].val))
2038			goto msg_full;
2039
2040	nla_nest_end(skb, nest);
2041
2042	return 0;
2043msg_full:
2044	nla_nest_cancel(skb, nest);
2045
2046	return -EMSGSIZE;
2047}
2048
2049int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2050{
2051	int err;
2052	void *hdr;
2053	struct nlattr *attrs;
2054	struct nlattr *prop;
2055	struct tipc_net *tn = net_generic(net, tipc_net_id);
2056	struct tipc_link *bcl = tn->bcl;
2057
2058	if (!bcl)
2059		return 0;
2060
2061	tipc_bcast_lock(net);
2062
2063	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2064			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2065	if (!hdr) {
2066		tipc_bcast_unlock(net);
2067		return -EMSGSIZE;
2068	}
2069
2070	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2071	if (!attrs)
2072		goto msg_full;
2073
2074	/* The broadcast link is always up */
2075	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2076		goto attr_msg_full;
2077
2078	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2079		goto attr_msg_full;
2080	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2081		goto attr_msg_full;
2082	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2083		goto attr_msg_full;
2084	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2085		goto attr_msg_full;
2086
2087	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2088	if (!prop)
2089		goto attr_msg_full;
2090	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2091		goto prop_msg_full;
2092	nla_nest_end(msg->skb, prop);
2093
2094	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2095	if (err)
2096		goto attr_msg_full;
2097
2098	tipc_bcast_unlock(net);
2099	nla_nest_end(msg->skb, attrs);
2100	genlmsg_end(msg->skb, hdr);
2101
2102	return 0;
2103
2104prop_msg_full:
2105	nla_nest_cancel(msg->skb, prop);
2106attr_msg_full:
2107	nla_nest_cancel(msg->skb, attrs);
2108msg_full:
2109	tipc_bcast_unlock(net);
2110	genlmsg_cancel(msg->skb, hdr);
2111
2112	return -EMSGSIZE;
2113}
2114
2115void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2116			     struct sk_buff_head *xmitq)
2117{
2118	l->tolerance = tol;
2119	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
 
2120}
2121
2122void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2123			struct sk_buff_head *xmitq)
2124{
2125	l->priority = prio;
2126	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
2127}
2128
2129void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2130{
2131	l->abort_limit = limit;
2132}
v4.17
   1/*
   2 * net/tipc/link.c: TIPC link code
   3 *
   4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
   5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
  38#include "subscr.h"
  39#include "link.h"
  40#include "bcast.h"
  41#include "socket.h"
  42#include "name_distr.h"
  43#include "discover.h"
  44#include "netlink.h"
  45#include "monitor.h"
  46
  47#include <linux/pkt_sched.h>
  48
  49struct tipc_stats {
  50	u32 sent_pkts;
  51	u32 recv_pkts;
  52	u32 sent_states;
  53	u32 recv_states;
  54	u32 sent_probes;
  55	u32 recv_probes;
  56	u32 sent_nacks;
  57	u32 recv_nacks;
  58	u32 sent_acks;
  59	u32 sent_bundled;
  60	u32 sent_bundles;
  61	u32 recv_bundled;
  62	u32 recv_bundles;
  63	u32 retransmitted;
  64	u32 sent_fragmented;
  65	u32 sent_fragments;
  66	u32 recv_fragmented;
  67	u32 recv_fragments;
  68	u32 link_congs;		/* # port sends blocked by congestion */
  69	u32 deferred_recv;
  70	u32 duplicates;
  71	u32 max_queue_sz;	/* send queue size high water mark */
  72	u32 accu_queue_sz;	/* used for send queue size profiling */
  73	u32 queue_sz_counts;	/* used for send queue size profiling */
  74	u32 msg_length_counts;	/* used for message length profiling */
  75	u32 msg_lengths_total;	/* used for message length profiling */
  76	u32 msg_length_profile[7]; /* used for msg. length profiling */
  77};
  78
  79/**
  80 * struct tipc_link - TIPC link data structure
  81 * @addr: network address of link's peer node
  82 * @name: link name character string
  83 * @media_addr: media address to use when sending messages over link
  84 * @timer: link timer
  85 * @net: pointer to namespace struct
  86 * @refcnt: reference counter for permanent references (owner node & timer)
  87 * @peer_session: link session # being used by peer end of link
  88 * @peer_bearer_id: bearer id used by link's peer endpoint
  89 * @bearer_id: local bearer id used by link
  90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
  91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
  92 * @state: current state of link FSM
  93 * @peer_caps: bitmap describing capabilities of peer node
  94 * @silent_intv_cnt: # of timer intervals without any reception from peer
  95 * @proto_msg: template for control messages generated by link
  96 * @pmsg: convenience pointer to "proto_msg" field
  97 * @priority: current link priority
  98 * @net_plane: current link network plane ('A' through 'H')
  99 * @mon_state: cookie with information needed by link monitor
 100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
 101 * @exp_msg_count: # of tunnelled messages expected during link changeover
 102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
 103 * @mtu: current maximum packet size for this link
 104 * @advertised_mtu: advertised own mtu when link is being established
 105 * @transmitq: queue for sent, non-acked messages
 106 * @backlogq: queue for messages waiting to be sent
 107 * @snt_nxt: next sequence number to use for outbound messages
 108 * @last_retransmitted: sequence number of most recently retransmitted message
 109 * @stale_count: # of identical retransmit requests made by peer
 110 * @ackers: # of peers that needs to ack each packet before it can be released
 111 * @acked: # last packet acked by a certain peer. Used for broadcast.
 112 * @rcv_nxt: next sequence number to expect for inbound messages
 113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
 114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
 115 * @inputq: buffer queue for messages to be delivered upwards
 116 * @namedq: buffer queue for name table messages to be delivered upwards
 117 * @next_out: ptr to first unsent outbound message in queue
 118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
 119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
 120 * @reasm_buf: head of partially reassembled inbound message fragments
 121 * @bc_rcvr: marks that this is a broadcast receiver link
 122 * @stats: collects statistics regarding link activity
 123 */
 124struct tipc_link {
 125	u32 addr;
 126	char name[TIPC_MAX_LINK_NAME];
 127	struct net *net;
 128
 129	/* Management and link supervision data */
 130	u32 peer_session;
 131	u32 session;
 132	u32 peer_bearer_id;
 133	u32 bearer_id;
 134	u32 tolerance;
 135	u32 abort_limit;
 136	u32 state;
 137	u16 peer_caps;
 138	bool active;
 139	u32 silent_intv_cnt;
 140	char if_name[TIPC_MAX_IF_NAME];
 141	u32 priority;
 142	char net_plane;
 143	struct tipc_mon_state mon_state;
 144	u16 rst_cnt;
 145
 146	/* Failover/synch */
 147	u16 drop_point;
 148	struct sk_buff *failover_reasm_skb;
 149
 150	/* Max packet negotiation */
 151	u16 mtu;
 152	u16 advertised_mtu;
 153
 154	/* Sending */
 155	struct sk_buff_head transmq;
 156	struct sk_buff_head backlogq;
 157	struct {
 158		u16 len;
 159		u16 limit;
 160	} backlog[5];
 161	u16 snd_nxt;
 162	u16 last_retransm;
 163	u16 window;
 164	u32 stale_count;
 165
 166	/* Reception */
 167	u16 rcv_nxt;
 168	u32 rcv_unacked;
 169	struct sk_buff_head deferdq;
 170	struct sk_buff_head *inputq;
 171	struct sk_buff_head *namedq;
 172
 173	/* Congestion handling */
 174	struct sk_buff_head wakeupq;
 175
 176	/* Fragmentation/reassembly */
 177	struct sk_buff *reasm_buf;
 178
 179	/* Broadcast */
 180	u16 ackers;
 181	u16 acked;
 182	struct tipc_link *bc_rcvlink;
 183	struct tipc_link *bc_sndlink;
 184	unsigned long prev_retr;
 185	u16 prev_from;
 186	u16 prev_to;
 187	u8 nack_state;
 188	bool bc_peer_is_up;
 189
 190	/* Statistics */
 191	struct tipc_stats stats;
 192};
 193
 194/*
 195 * Error message prefixes
 196 */
 197static const char *link_co_err = "Link tunneling error, ";
 198static const char *link_rst_msg = "Resetting link ";
 199
 200/* Send states for broadcast NACKs
 201 */
 202enum {
 203	BC_NACK_SND_CONDITIONAL,
 204	BC_NACK_SND_UNCONDITIONAL,
 205	BC_NACK_SND_SUPPRESS,
 206};
 207
 208#define TIPC_BC_RETR_LIMIT 10   /* [ms] */
 209
 210/*
 211 * Interval between NACKs when packets arrive out of order
 212 */
 213#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
 214
 215/* Wildcard value for link session numbers. When it is known that
 216 * peer endpoint is down, any session number must be accepted.
 217 */
 218#define ANY_SESSION 0x10000
 219
 220/* Link FSM states:
 221 */
 222enum {
 223	LINK_ESTABLISHED     = 0xe,
 224	LINK_ESTABLISHING    = 0xe  << 4,
 225	LINK_RESET           = 0x1  << 8,
 226	LINK_RESETTING       = 0x2  << 12,
 227	LINK_PEER_RESET      = 0xd  << 16,
 228	LINK_FAILINGOVER     = 0xf  << 20,
 229	LINK_SYNCHING        = 0xc  << 24
 230};
 231
 232/* Link FSM state checking routines
 233 */
 234static int link_is_up(struct tipc_link *l)
 235{
 236	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
 237}
 238
 239static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
 240			       struct sk_buff_head *xmitq);
 241static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
 242				      bool probe_reply, u16 rcvgap,
 243				      int tolerance, int priority,
 244				      struct sk_buff_head *xmitq);
 245static void link_print(struct tipc_link *l, const char *str);
 246static int tipc_link_build_nack_msg(struct tipc_link *l,
 247				    struct sk_buff_head *xmitq);
 248static void tipc_link_build_bc_init_msg(struct tipc_link *l,
 249					struct sk_buff_head *xmitq);
 250static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
 251
 252/*
 253 *  Simple non-static link routines (i.e. referenced outside this file)
 254 */
 255bool tipc_link_is_up(struct tipc_link *l)
 256{
 257	return link_is_up(l);
 258}
 259
 260bool tipc_link_peer_is_down(struct tipc_link *l)
 261{
 262	return l->state == LINK_PEER_RESET;
 263}
 264
 265bool tipc_link_is_reset(struct tipc_link *l)
 266{
 267	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 268}
 269
 270bool tipc_link_is_establishing(struct tipc_link *l)
 271{
 272	return l->state == LINK_ESTABLISHING;
 273}
 274
 275bool tipc_link_is_synching(struct tipc_link *l)
 276{
 277	return l->state == LINK_SYNCHING;
 278}
 279
 280bool tipc_link_is_failingover(struct tipc_link *l)
 281{
 282	return l->state == LINK_FAILINGOVER;
 283}
 284
 285bool tipc_link_is_blocked(struct tipc_link *l)
 286{
 287	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 288}
 289
 290static bool link_is_bc_sndlink(struct tipc_link *l)
 291{
 292	return !l->bc_sndlink;
 293}
 294
 295static bool link_is_bc_rcvlink(struct tipc_link *l)
 296{
 297	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
 298}
 299
 300int tipc_link_is_active(struct tipc_link *l)
 301{
 302	return l->active;
 303}
 304
 305void tipc_link_set_active(struct tipc_link *l, bool active)
 306{
 307	l->active = active;
 308}
 309
 310u32 tipc_link_id(struct tipc_link *l)
 311{
 312	return l->peer_bearer_id << 16 | l->bearer_id;
 313}
 314
 315int tipc_link_window(struct tipc_link *l)
 316{
 317	return l->window;
 318}
 319
 320int tipc_link_prio(struct tipc_link *l)
 321{
 322	return l->priority;
 323}
 324
 325unsigned long tipc_link_tolerance(struct tipc_link *l)
 326{
 327	return l->tolerance;
 328}
 329
 330struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
 331{
 332	return l->inputq;
 333}
 334
 335char tipc_link_plane(struct tipc_link *l)
 336{
 337	return l->net_plane;
 338}
 339
 340void tipc_link_add_bc_peer(struct tipc_link *snd_l,
 341			   struct tipc_link *uc_l,
 342			   struct sk_buff_head *xmitq)
 343{
 344	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
 345
 346	snd_l->ackers++;
 347	rcv_l->acked = snd_l->snd_nxt - 1;
 348	snd_l->state = LINK_ESTABLISHED;
 349	tipc_link_build_bc_init_msg(uc_l, xmitq);
 350}
 351
 352void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
 353			      struct tipc_link *rcv_l,
 354			      struct sk_buff_head *xmitq)
 355{
 356	u16 ack = snd_l->snd_nxt - 1;
 357
 358	snd_l->ackers--;
 359	rcv_l->bc_peer_is_up = true;
 360	rcv_l->state = LINK_ESTABLISHED;
 361	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
 362	tipc_link_reset(rcv_l);
 363	rcv_l->state = LINK_RESET;
 364	if (!snd_l->ackers) {
 365		tipc_link_reset(snd_l);
 366		snd_l->state = LINK_RESET;
 367		__skb_queue_purge(xmitq);
 368	}
 369}
 370
 371int tipc_link_bc_peers(struct tipc_link *l)
 372{
 373	return l->ackers;
 374}
 375
 376u16 link_bc_rcv_gap(struct tipc_link *l)
 377{
 378	struct sk_buff *skb = skb_peek(&l->deferdq);
 379	u16 gap = 0;
 380
 381	if (more(l->snd_nxt, l->rcv_nxt))
 382		gap = l->snd_nxt - l->rcv_nxt;
 383	if (skb)
 384		gap = buf_seqno(skb) - l->rcv_nxt;
 385	return gap;
 386}
 387
 388void tipc_link_set_mtu(struct tipc_link *l, int mtu)
 389{
 390	l->mtu = mtu;
 391}
 392
 393int tipc_link_mtu(struct tipc_link *l)
 394{
 395	return l->mtu;
 396}
 397
 398u16 tipc_link_rcv_nxt(struct tipc_link *l)
 399{
 400	return l->rcv_nxt;
 401}
 402
 403u16 tipc_link_acked(struct tipc_link *l)
 404{
 405	return l->acked;
 406}
 407
 408char *tipc_link_name(struct tipc_link *l)
 409{
 410	return l->name;
 411}
 412
 413/**
 414 * tipc_link_create - create a new link
 415 * @n: pointer to associated node
 416 * @if_name: associated interface name
 417 * @bearer_id: id (index) of associated bearer
 418 * @tolerance: link tolerance to be used by link
 419 * @net_plane: network plane (A,B,c..) this link belongs to
 420 * @mtu: mtu to be advertised by link
 421 * @priority: priority to be used by link
 422 * @window: send window to be used by link
 423 * @session: session to be used by link
 424 * @ownnode: identity of own node
 425 * @peer: node id of peer node
 426 * @peer_caps: bitmap describing peer node capabilities
 427 * @bc_sndlink: the namespace global link used for broadcast sending
 428 * @bc_rcvlink: the peer specific link used for broadcast reception
 429 * @inputq: queue to put messages ready for delivery
 430 * @namedq: queue to put binding table update messages ready for delivery
 431 * @link: return value, pointer to put the created link
 432 *
 433 * Returns true if link was created, otherwise false
 434 */
 435bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
 436		      int tolerance, char net_plane, u32 mtu, int priority,
 437		      int window, u32 session, u32 self,
 438		      u32 peer, u8 *peer_id, u16 peer_caps,
 439		      struct tipc_link *bc_sndlink,
 440		      struct tipc_link *bc_rcvlink,
 441		      struct sk_buff_head *inputq,
 442		      struct sk_buff_head *namedq,
 443		      struct tipc_link **link)
 444{
 445	char peer_str[NODE_ID_STR_LEN] = {0,};
 446	char self_str[NODE_ID_STR_LEN] = {0,};
 447	struct tipc_link *l;
 448
 449	l = kzalloc(sizeof(*l), GFP_ATOMIC);
 450	if (!l)
 451		return false;
 452	*link = l;
 453	l->session = session;
 454
 455	/* Set link name for unicast links only */
 456	if (peer_id) {
 457		tipc_nodeid2string(self_str, tipc_own_id(net));
 458		if (strlen(self_str) > 16)
 459			sprintf(self_str, "%x", self);
 460		tipc_nodeid2string(peer_str, peer_id);
 461		if (strlen(peer_str) > 16)
 462			sprintf(peer_str, "%x", peer);
 463	}
 464	/* Peer i/f name will be completed by reset/activate message */
 465	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
 466		 self_str, if_name, peer_str);
 467
 468	strcpy(l->if_name, if_name);
 469	l->addr = peer;
 470	l->peer_caps = peer_caps;
 471	l->net = net;
 472	l->peer_session = ANY_SESSION;
 473	l->bearer_id = bearer_id;
 474	l->tolerance = tolerance;
 475	l->net_plane = net_plane;
 476	l->advertised_mtu = mtu;
 477	l->mtu = mtu;
 478	l->priority = priority;
 479	tipc_link_set_queue_limits(l, window);
 480	l->ackers = 1;
 481	l->bc_sndlink = bc_sndlink;
 482	l->bc_rcvlink = bc_rcvlink;
 483	l->inputq = inputq;
 484	l->namedq = namedq;
 485	l->state = LINK_RESETTING;
 486	__skb_queue_head_init(&l->transmq);
 487	__skb_queue_head_init(&l->backlogq);
 488	__skb_queue_head_init(&l->deferdq);
 489	skb_queue_head_init(&l->wakeupq);
 490	skb_queue_head_init(l->inputq);
 491	return true;
 492}
 493
 494/**
 495 * tipc_link_bc_create - create new link to be used for broadcast
 496 * @n: pointer to associated node
 497 * @mtu: mtu to be used initially if no peers
 498 * @window: send window to be used
 499 * @inputq: queue to put messages ready for delivery
 500 * @namedq: queue to put binding table update messages ready for delivery
 501 * @link: return value, pointer to put the created link
 502 *
 503 * Returns true if link was created, otherwise false
 504 */
 505bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
 506			 int mtu, int window, u16 peer_caps,
 507			 struct sk_buff_head *inputq,
 508			 struct sk_buff_head *namedq,
 509			 struct tipc_link *bc_sndlink,
 510			 struct tipc_link **link)
 511{
 512	struct tipc_link *l;
 513
 514	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
 515			      0, ownnode, peer, NULL, peer_caps, bc_sndlink,
 516			      NULL, inputq, namedq, link))
 517		return false;
 518
 519	l = *link;
 520	strcpy(l->name, tipc_bclink_name);
 521	tipc_link_reset(l);
 522	l->state = LINK_RESET;
 523	l->ackers = 0;
 524	l->bc_rcvlink = l;
 525
 526	/* Broadcast send link is always up */
 527	if (link_is_bc_sndlink(l))
 528		l->state = LINK_ESTABLISHED;
 529
 530	/* Disable replicast if even a single peer doesn't support it */
 531	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
 532		tipc_bcast_disable_rcast(net);
 533
 534	return true;
 535}
 536
 537/**
 538 * tipc_link_fsm_evt - link finite state machine
 539 * @l: pointer to link
 540 * @evt: state machine event to be processed
 541 */
 542int tipc_link_fsm_evt(struct tipc_link *l, int evt)
 543{
 544	int rc = 0;
 545
 546	switch (l->state) {
 547	case LINK_RESETTING:
 548		switch (evt) {
 549		case LINK_PEER_RESET_EVT:
 550			l->state = LINK_PEER_RESET;
 551			break;
 552		case LINK_RESET_EVT:
 553			l->state = LINK_RESET;
 554			break;
 555		case LINK_FAILURE_EVT:
 556		case LINK_FAILOVER_BEGIN_EVT:
 557		case LINK_ESTABLISH_EVT:
 558		case LINK_FAILOVER_END_EVT:
 559		case LINK_SYNCH_BEGIN_EVT:
 560		case LINK_SYNCH_END_EVT:
 561		default:
 562			goto illegal_evt;
 563		}
 564		break;
 565	case LINK_RESET:
 566		switch (evt) {
 567		case LINK_PEER_RESET_EVT:
 568			l->state = LINK_ESTABLISHING;
 569			break;
 570		case LINK_FAILOVER_BEGIN_EVT:
 571			l->state = LINK_FAILINGOVER;
 572		case LINK_FAILURE_EVT:
 573		case LINK_RESET_EVT:
 574		case LINK_ESTABLISH_EVT:
 575		case LINK_FAILOVER_END_EVT:
 576			break;
 577		case LINK_SYNCH_BEGIN_EVT:
 578		case LINK_SYNCH_END_EVT:
 579		default:
 580			goto illegal_evt;
 581		}
 582		break;
 583	case LINK_PEER_RESET:
 584		switch (evt) {
 585		case LINK_RESET_EVT:
 586			l->state = LINK_ESTABLISHING;
 587			break;
 588		case LINK_PEER_RESET_EVT:
 589		case LINK_ESTABLISH_EVT:
 590		case LINK_FAILURE_EVT:
 591			break;
 592		case LINK_SYNCH_BEGIN_EVT:
 593		case LINK_SYNCH_END_EVT:
 594		case LINK_FAILOVER_BEGIN_EVT:
 595		case LINK_FAILOVER_END_EVT:
 596		default:
 597			goto illegal_evt;
 598		}
 599		break;
 600	case LINK_FAILINGOVER:
 601		switch (evt) {
 602		case LINK_FAILOVER_END_EVT:
 603			l->state = LINK_RESET;
 604			break;
 605		case LINK_PEER_RESET_EVT:
 606		case LINK_RESET_EVT:
 607		case LINK_ESTABLISH_EVT:
 608		case LINK_FAILURE_EVT:
 609			break;
 610		case LINK_FAILOVER_BEGIN_EVT:
 611		case LINK_SYNCH_BEGIN_EVT:
 612		case LINK_SYNCH_END_EVT:
 613		default:
 614			goto illegal_evt;
 615		}
 616		break;
 617	case LINK_ESTABLISHING:
 618		switch (evt) {
 619		case LINK_ESTABLISH_EVT:
 620			l->state = LINK_ESTABLISHED;
 621			break;
 622		case LINK_FAILOVER_BEGIN_EVT:
 623			l->state = LINK_FAILINGOVER;
 624			break;
 625		case LINK_RESET_EVT:
 626			l->state = LINK_RESET;
 627			break;
 628		case LINK_FAILURE_EVT:
 629		case LINK_PEER_RESET_EVT:
 630		case LINK_SYNCH_BEGIN_EVT:
 631		case LINK_FAILOVER_END_EVT:
 632			break;
 633		case LINK_SYNCH_END_EVT:
 634		default:
 635			goto illegal_evt;
 636		}
 637		break;
 638	case LINK_ESTABLISHED:
 639		switch (evt) {
 640		case LINK_PEER_RESET_EVT:
 641			l->state = LINK_PEER_RESET;
 642			rc |= TIPC_LINK_DOWN_EVT;
 643			break;
 644		case LINK_FAILURE_EVT:
 645			l->state = LINK_RESETTING;
 646			rc |= TIPC_LINK_DOWN_EVT;
 647			break;
 648		case LINK_RESET_EVT:
 649			l->state = LINK_RESET;
 650			break;
 651		case LINK_ESTABLISH_EVT:
 652		case LINK_SYNCH_END_EVT:
 653			break;
 654		case LINK_SYNCH_BEGIN_EVT:
 655			l->state = LINK_SYNCHING;
 656			break;
 657		case LINK_FAILOVER_BEGIN_EVT:
 658		case LINK_FAILOVER_END_EVT:
 659		default:
 660			goto illegal_evt;
 661		}
 662		break;
 663	case LINK_SYNCHING:
 664		switch (evt) {
 665		case LINK_PEER_RESET_EVT:
 666			l->state = LINK_PEER_RESET;
 667			rc |= TIPC_LINK_DOWN_EVT;
 668			break;
 669		case LINK_FAILURE_EVT:
 670			l->state = LINK_RESETTING;
 671			rc |= TIPC_LINK_DOWN_EVT;
 672			break;
 673		case LINK_RESET_EVT:
 674			l->state = LINK_RESET;
 675			break;
 676		case LINK_ESTABLISH_EVT:
 677		case LINK_SYNCH_BEGIN_EVT:
 678			break;
 679		case LINK_SYNCH_END_EVT:
 680			l->state = LINK_ESTABLISHED;
 681			break;
 682		case LINK_FAILOVER_BEGIN_EVT:
 683		case LINK_FAILOVER_END_EVT:
 684		default:
 685			goto illegal_evt;
 686		}
 687		break;
 688	default:
 689		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
 690	}
 691	return rc;
 692illegal_evt:
 693	pr_err("Illegal FSM event %x in state %x on link %s\n",
 694	       evt, l->state, l->name);
 695	return rc;
 696}
 697
 698/* link_profile_stats - update statistical profiling of traffic
 699 */
 700static void link_profile_stats(struct tipc_link *l)
 701{
 702	struct sk_buff *skb;
 703	struct tipc_msg *msg;
 704	int length;
 705
 706	/* Update counters used in statistical profiling of send traffic */
 707	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
 708	l->stats.queue_sz_counts++;
 709
 710	skb = skb_peek(&l->transmq);
 711	if (!skb)
 712		return;
 713	msg = buf_msg(skb);
 714	length = msg_size(msg);
 715
 716	if (msg_user(msg) == MSG_FRAGMENTER) {
 717		if (msg_type(msg) != FIRST_FRAGMENT)
 718			return;
 719		length = msg_size(msg_get_wrapped(msg));
 720	}
 721	l->stats.msg_lengths_total += length;
 722	l->stats.msg_length_counts++;
 723	if (length <= 64)
 724		l->stats.msg_length_profile[0]++;
 725	else if (length <= 256)
 726		l->stats.msg_length_profile[1]++;
 727	else if (length <= 1024)
 728		l->stats.msg_length_profile[2]++;
 729	else if (length <= 4096)
 730		l->stats.msg_length_profile[3]++;
 731	else if (length <= 16384)
 732		l->stats.msg_length_profile[4]++;
 733	else if (length <= 32768)
 734		l->stats.msg_length_profile[5]++;
 735	else
 736		l->stats.msg_length_profile[6]++;
 737}
 738
 739/* tipc_link_timeout - perform periodic task as instructed from node timeout
 740 */
 741int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 742{
 743	int mtyp = 0;
 744	int rc = 0;
 745	bool state = false;
 746	bool probe = false;
 747	bool setup = false;
 748	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
 749	u16 bc_acked = l->bc_rcvlink->acked;
 750	struct tipc_mon_state *mstate = &l->mon_state;
 751
 752	switch (l->state) {
 753	case LINK_ESTABLISHED:
 754	case LINK_SYNCHING:
 755		mtyp = STATE_MSG;
 756		link_profile_stats(l);
 757		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
 758		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
 759			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 760		state = bc_acked != bc_snt;
 761		state |= l->bc_rcvlink->rcv_unacked;
 762		state |= l->rcv_unacked;
 763		state |= !skb_queue_empty(&l->transmq);
 764		state |= !skb_queue_empty(&l->deferdq);
 765		probe = mstate->probing;
 766		probe |= l->silent_intv_cnt;
 767		if (probe || mstate->monitoring)
 768			l->silent_intv_cnt++;
 769		break;
 770	case LINK_RESET:
 771		setup = l->rst_cnt++ <= 4;
 772		setup |= !(l->rst_cnt % 16);
 773		mtyp = RESET_MSG;
 774		break;
 775	case LINK_ESTABLISHING:
 776		setup = true;
 777		mtyp = ACTIVATE_MSG;
 778		break;
 779	case LINK_PEER_RESET:
 780	case LINK_RESETTING:
 781	case LINK_FAILINGOVER:
 782		break;
 783	default:
 784		break;
 785	}
 786
 787	if (state || probe || setup)
 788		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
 789
 790	return rc;
 791}
 792
 793/**
 794 * link_schedule_user - schedule a message sender for wakeup after congestion
 795 * @l: congested link
 796 * @hdr: header of message that is being sent
 797 * Create pseudo msg to send back to user when congestion abates
 
 798 */
 799static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
 800{
 801	u32 dnode = tipc_own_addr(l->net);
 802	u32 dport = msg_origport(hdr);
 
 
 803	struct sk_buff *skb;
 804
 
 
 
 
 
 
 
 
 
 805	/* Create and schedule wakeup pseudo message */
 806	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
 807			      dnode, l->addr, dport, 0, 0);
 808	if (!skb)
 809		return -ENOBUFS;
 810	msg_set_dest_droppable(buf_msg(skb), true);
 811	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
 812	skb_queue_tail(&l->wakeupq, skb);
 813	l->stats.link_congs++;
 814	return -ELINKCONG;
 815}
 816
 817/**
 818 * link_prepare_wakeup - prepare users for wakeup after congestion
 819 * @l: congested link
 820 * Wake up a number of waiting users, as permitted by available space
 821 * in the send queue
 822 */
 823void link_prepare_wakeup(struct tipc_link *l)
 824{
 
 
 825	struct sk_buff *skb, *tmp;
 826	int imp, i = 0;
 827
 828	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
 829		imp = TIPC_SKB_CB(skb)->chain_imp;
 830		if (l->backlog[imp].len < l->backlog[imp].limit) {
 831			skb_unlink(skb, &l->wakeupq);
 832			skb_queue_tail(l->inputq, skb);
 833		} else if (i++ > 10) {
 834			break;
 835		}
 
 836	}
 837}
 838
 839void tipc_link_reset(struct tipc_link *l)
 840{
 841	l->peer_session = ANY_SESSION;
 842	l->session++;
 843	l->mtu = l->advertised_mtu;
 844	__skb_queue_purge(&l->transmq);
 845	__skb_queue_purge(&l->deferdq);
 846	skb_queue_splice_init(&l->wakeupq, l->inputq);
 847	__skb_queue_purge(&l->backlogq);
 848	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
 849	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
 850	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
 851	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
 852	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
 853	kfree_skb(l->reasm_buf);
 854	kfree_skb(l->failover_reasm_skb);
 855	l->reasm_buf = NULL;
 856	l->failover_reasm_skb = NULL;
 857	l->rcv_unacked = 0;
 858	l->snd_nxt = 1;
 859	l->rcv_nxt = 1;
 860	l->acked = 0;
 861	l->silent_intv_cnt = 0;
 862	l->rst_cnt = 0;
 863	l->stale_count = 0;
 864	l->bc_peer_is_up = false;
 865	memset(&l->mon_state, 0, sizeof(l->mon_state));
 866	tipc_link_reset_stats(l);
 867}
 868
 869/**
 870 * tipc_link_xmit(): enqueue buffer list according to queue situation
 871 * @link: link to use
 872 * @list: chain of buffers containing message
 873 * @xmitq: returned list of packets to be sent by caller
 874 *
 875 * Consumes the buffer chain.
 
 876 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
 877 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
 878 */
 879int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
 880		   struct sk_buff_head *xmitq)
 881{
 882	struct tipc_msg *hdr = buf_msg(skb_peek(list));
 883	unsigned int maxwin = l->window;
 884	int imp = msg_importance(hdr);
 885	unsigned int mtu = l->mtu;
 886	u16 ack = l->rcv_nxt - 1;
 887	u16 seqno = l->snd_nxt;
 888	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 889	struct sk_buff_head *transmq = &l->transmq;
 890	struct sk_buff_head *backlogq = &l->backlogq;
 891	struct sk_buff *skb, *_skb, *bskb;
 892	int pkt_cnt = skb_queue_len(list);
 893	int rc = 0;
 894
 
 
 
 
 
 
 
 895	if (unlikely(msg_size(hdr) > mtu)) {
 896		skb_queue_purge(list);
 897		return -EMSGSIZE;
 898	}
 899
 900	/* Allow oversubscription of one data msg per source at congestion */
 901	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
 902		if (imp == TIPC_SYSTEM_IMPORTANCE) {
 903			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
 904			return -ENOBUFS;
 905		}
 906		rc = link_schedule_user(l, hdr);
 907	}
 908
 909	if (pkt_cnt > 1) {
 910		l->stats.sent_fragmented++;
 911		l->stats.sent_fragments += pkt_cnt;
 912	}
 913
 914	/* Prepare each packet for sending, and add to relevant queue: */
 915	while (skb_queue_len(list)) {
 916		skb = skb_peek(list);
 917		hdr = buf_msg(skb);
 918		msg_set_seqno(hdr, seqno);
 919		msg_set_ack(hdr, ack);
 920		msg_set_bcast_ack(hdr, bc_ack);
 921
 922		if (likely(skb_queue_len(transmq) < maxwin)) {
 923			_skb = skb_clone(skb, GFP_ATOMIC);
 924			if (!_skb) {
 925				skb_queue_purge(list);
 926				return -ENOBUFS;
 927			}
 928			__skb_dequeue(list);
 929			__skb_queue_tail(transmq, skb);
 930			__skb_queue_tail(xmitq, _skb);
 931			TIPC_SKB_CB(skb)->ackers = l->ackers;
 932			l->rcv_unacked = 0;
 933			l->stats.sent_pkts++;
 934			seqno++;
 935			continue;
 936		}
 937		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
 938			kfree_skb(__skb_dequeue(list));
 939			l->stats.sent_bundled++;
 940			continue;
 941		}
 942		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
 943			kfree_skb(__skb_dequeue(list));
 944			__skb_queue_tail(backlogq, bskb);
 945			l->backlog[msg_importance(buf_msg(bskb))].len++;
 946			l->stats.sent_bundled++;
 947			l->stats.sent_bundles++;
 948			continue;
 949		}
 950		l->backlog[imp].len += skb_queue_len(list);
 951		skb_queue_splice_tail_init(list, backlogq);
 952	}
 953	l->snd_nxt = seqno;
 954	return rc;
 955}
 956
 957void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 958{
 959	struct sk_buff *skb, *_skb;
 960	struct tipc_msg *hdr;
 961	u16 seqno = l->snd_nxt;
 962	u16 ack = l->rcv_nxt - 1;
 963	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 964
 965	while (skb_queue_len(&l->transmq) < l->window) {
 966		skb = skb_peek(&l->backlogq);
 967		if (!skb)
 968			break;
 969		_skb = skb_clone(skb, GFP_ATOMIC);
 970		if (!_skb)
 971			break;
 972		__skb_dequeue(&l->backlogq);
 973		hdr = buf_msg(skb);
 974		l->backlog[msg_importance(hdr)].len--;
 975		__skb_queue_tail(&l->transmq, skb);
 976		__skb_queue_tail(xmitq, _skb);
 977		TIPC_SKB_CB(skb)->ackers = l->ackers;
 978		msg_set_seqno(hdr, seqno);
 979		msg_set_ack(hdr, ack);
 980		msg_set_bcast_ack(hdr, bc_ack);
 981		l->rcv_unacked = 0;
 982		l->stats.sent_pkts++;
 983		seqno++;
 984	}
 985	l->snd_nxt = seqno;
 986}
 987
 988static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
 989{
 990	struct tipc_msg *hdr = buf_msg(skb);
 991
 992	pr_warn("Retransmission failure on link <%s>\n", l->name);
 993	link_print(l, "State of link ");
 994	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
 995		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
 996	pr_info("sqno %u, prev: %x, src: %x\n",
 997		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
 998}
 999
1000int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker,
1001		      u16 from, u16 to, struct sk_buff_head *xmitq)
1002{
1003	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1004	struct tipc_msg *hdr;
1005	u16 ack = l->rcv_nxt - 1;
1006	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1007
1008	if (!skb)
1009		return 0;
1010
1011	/* Detect repeated retransmit failures on same packet */
1012	if (nacker->last_retransm != buf_seqno(skb)) {
1013		nacker->last_retransm = buf_seqno(skb);
1014		nacker->stale_count = 1;
1015	} else if (++nacker->stale_count > 100) {
1016		link_retransmit_failure(l, skb);
1017		nacker->stale_count = 0;
1018		if (link_is_bc_sndlink(l))
1019			return TIPC_LINK_DOWN_EVT;
1020		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1021	}
1022
1023	/* Move forward to where retransmission should start */
1024	skb_queue_walk(&l->transmq, skb) {
1025		if (!less(buf_seqno(skb), from))
1026			break;
1027	}
1028
1029	skb_queue_walk_from(&l->transmq, skb) {
1030		if (more(buf_seqno(skb), to))
1031			break;
1032		hdr = buf_msg(skb);
1033		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1034		if (!_skb)
1035			return 0;
1036		hdr = buf_msg(_skb);
1037		msg_set_ack(hdr, ack);
1038		msg_set_bcast_ack(hdr, bc_ack);
1039		_skb->priority = TC_PRIO_CONTROL;
1040		__skb_queue_tail(xmitq, _skb);
1041		l->stats.retransmitted++;
1042	}
1043	return 0;
1044}
1045
1046/* tipc_data_input - deliver data and name distr msgs to upper layer
1047 *
1048 * Consumes buffer if message is of right type
1049 * Node lock must be held
1050 */
1051static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1052			    struct sk_buff_head *inputq)
1053{
1054	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1055	struct tipc_msg *hdr = buf_msg(skb);
1056
1057	switch (msg_user(hdr)) {
1058	case TIPC_LOW_IMPORTANCE:
1059	case TIPC_MEDIUM_IMPORTANCE:
1060	case TIPC_HIGH_IMPORTANCE:
1061	case TIPC_CRITICAL_IMPORTANCE:
1062		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1063			skb_queue_tail(mc_inputq, skb);
1064			return true;
1065		}
1066	case CONN_MANAGER:
1067		skb_queue_tail(inputq, skb);
1068		return true;
1069	case GROUP_PROTOCOL:
1070		skb_queue_tail(mc_inputq, skb);
1071		return true;
1072	case NAME_DISTRIBUTOR:
1073		l->bc_rcvlink->state = LINK_ESTABLISHED;
1074		skb_queue_tail(l->namedq, skb);
1075		return true;
1076	case MSG_BUNDLER:
1077	case TUNNEL_PROTOCOL:
1078	case MSG_FRAGMENTER:
1079	case BCAST_PROTOCOL:
1080		return false;
1081	default:
1082		pr_warn("Dropping received illegal msg type\n");
1083		kfree_skb(skb);
1084		return false;
1085	};
1086}
1087
1088/* tipc_link_input - process packet that has passed link protocol check
1089 *
1090 * Consumes buffer
1091 */
1092static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1093			   struct sk_buff_head *inputq)
1094{
1095	struct tipc_msg *hdr = buf_msg(skb);
1096	struct sk_buff **reasm_skb = &l->reasm_buf;
1097	struct sk_buff *iskb;
1098	struct sk_buff_head tmpq;
1099	int usr = msg_user(hdr);
1100	int rc = 0;
1101	int pos = 0;
1102	int ipos = 0;
1103
1104	if (unlikely(usr == TUNNEL_PROTOCOL)) {
1105		if (msg_type(hdr) == SYNCH_MSG) {
1106			__skb_queue_purge(&l->deferdq);
1107			goto drop;
1108		}
1109		if (!tipc_msg_extract(skb, &iskb, &ipos))
1110			return rc;
1111		kfree_skb(skb);
1112		skb = iskb;
1113		hdr = buf_msg(skb);
1114		if (less(msg_seqno(hdr), l->drop_point))
1115			goto drop;
1116		if (tipc_data_input(l, skb, inputq))
1117			return rc;
1118		usr = msg_user(hdr);
1119		reasm_skb = &l->failover_reasm_skb;
1120	}
1121
1122	if (usr == MSG_BUNDLER) {
1123		skb_queue_head_init(&tmpq);
1124		l->stats.recv_bundles++;
1125		l->stats.recv_bundled += msg_msgcnt(hdr);
1126		while (tipc_msg_extract(skb, &iskb, &pos))
1127			tipc_data_input(l, iskb, &tmpq);
1128		tipc_skb_queue_splice_tail(&tmpq, inputq);
1129		return 0;
1130	} else if (usr == MSG_FRAGMENTER) {
1131		l->stats.recv_fragments++;
1132		if (tipc_buf_append(reasm_skb, &skb)) {
1133			l->stats.recv_fragmented++;
1134			tipc_data_input(l, skb, inputq);
1135		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1136			pr_warn_ratelimited("Unable to build fragment list\n");
1137			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1138		}
1139		return 0;
1140	} else if (usr == BCAST_PROTOCOL) {
1141		tipc_bcast_lock(l->net);
1142		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1143		tipc_bcast_unlock(l->net);
1144	}
1145drop:
1146	kfree_skb(skb);
1147	return 0;
1148}
1149
1150static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1151{
1152	bool released = false;
1153	struct sk_buff *skb, *tmp;
1154
1155	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1156		if (more(buf_seqno(skb), acked))
1157			break;
1158		__skb_unlink(skb, &l->transmq);
1159		kfree_skb(skb);
1160		released = true;
1161	}
1162	return released;
1163}
1164
1165/* tipc_link_build_state_msg: prepare link state message for transmission
1166 *
1167 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1168 * risk of ack storms towards the sender
1169 */
1170int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1171{
1172	if (!l)
1173		return 0;
1174
1175	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1176	if (link_is_bc_rcvlink(l)) {
1177		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1178			return 0;
1179		l->rcv_unacked = 0;
1180
1181		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1182		l->snd_nxt = l->rcv_nxt;
1183		return TIPC_LINK_SND_STATE;
1184	}
1185
1186	/* Unicast ACK */
1187	l->rcv_unacked = 0;
1188	l->stats.sent_acks++;
1189	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1190	return 0;
1191}
1192
1193/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1194 */
1195void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1196{
1197	int mtyp = RESET_MSG;
1198	struct sk_buff *skb;
1199
1200	if (l->state == LINK_ESTABLISHING)
1201		mtyp = ACTIVATE_MSG;
1202
1203	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1204
1205	/* Inform peer that this endpoint is going down if applicable */
1206	skb = skb_peek_tail(xmitq);
1207	if (skb && (l->state == LINK_RESET))
1208		msg_set_peer_stopping(buf_msg(skb), 1);
1209}
1210
1211/* tipc_link_build_nack_msg: prepare link nack message for transmission
1212 * Note that sending of broadcast NACK is coordinated among nodes, to
1213 * reduce the risk of NACK storms towards the sender
1214 */
1215static int tipc_link_build_nack_msg(struct tipc_link *l,
1216				    struct sk_buff_head *xmitq)
1217{
1218	u32 def_cnt = ++l->stats.deferred_recv;
1219	int match1, match2;
1220
1221	if (link_is_bc_rcvlink(l)) {
1222		match1 = def_cnt & 0xf;
1223		match2 = tipc_own_addr(l->net) & 0xf;
1224		if (match1 == match2)
1225			return TIPC_LINK_SND_STATE;
1226		return 0;
1227	}
1228
1229	if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1230		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1231	return 0;
1232}
1233
1234/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1235 * @l: the link that should handle the message
1236 * @skb: TIPC packet
1237 * @xmitq: queue to place packets to be sent after this call
1238 */
1239int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1240		  struct sk_buff_head *xmitq)
1241{
1242	struct sk_buff_head *defq = &l->deferdq;
1243	struct tipc_msg *hdr;
1244	u16 seqno, rcv_nxt, win_lim;
1245	int rc = 0;
1246
1247	do {
1248		hdr = buf_msg(skb);
1249		seqno = msg_seqno(hdr);
1250		rcv_nxt = l->rcv_nxt;
1251		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1252
1253		/* Verify and update link state */
1254		if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1255			return tipc_link_proto_rcv(l, skb, xmitq);
1256
1257		if (unlikely(!link_is_up(l))) {
1258			if (l->state == LINK_ESTABLISHING)
1259				rc = TIPC_LINK_UP_EVT;
1260			goto drop;
1261		}
1262
1263		/* Don't send probe at next timeout expiration */
1264		l->silent_intv_cnt = 0;
1265
1266		/* Drop if outside receive window */
1267		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1268			l->stats.duplicates++;
1269			goto drop;
1270		}
1271
1272		/* Forward queues and wake up waiting users */
1273		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1274			tipc_link_advance_backlog(l, xmitq);
1275			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1276				link_prepare_wakeup(l);
1277		}
1278
1279		/* Defer delivery if sequence gap */
1280		if (unlikely(seqno != rcv_nxt)) {
1281			__tipc_skb_queue_sorted(defq, seqno, skb);
1282			rc |= tipc_link_build_nack_msg(l, xmitq);
1283			break;
1284		}
1285
1286		/* Deliver packet */
1287		l->rcv_nxt++;
1288		l->stats.recv_pkts++;
1289		if (!tipc_data_input(l, skb, l->inputq))
1290			rc |= tipc_link_input(l, skb, l->inputq);
1291		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1292			rc |= tipc_link_build_state_msg(l, xmitq);
1293		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1294			break;
1295	} while ((skb = __skb_dequeue(defq)));
1296
1297	return rc;
1298drop:
1299	kfree_skb(skb);
1300	return rc;
1301}
1302
1303static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1304				      bool probe_reply, u16 rcvgap,
1305				      int tolerance, int priority,
1306				      struct sk_buff_head *xmitq)
1307{
1308	struct tipc_link *bcl = l->bc_rcvlink;
1309	struct sk_buff *skb;
1310	struct tipc_msg *hdr;
1311	struct sk_buff_head *dfq = &l->deferdq;
1312	bool node_up = link_is_up(bcl);
1313	struct tipc_mon_state *mstate = &l->mon_state;
1314	int dlen = 0;
1315	void *data;
1316
1317	/* Don't send protocol message during reset or link failover */
1318	if (tipc_link_is_blocked(l))
1319		return;
1320
1321	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1322		return;
1323
1324	if (!skb_queue_empty(dfq))
1325		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1326
1327	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1328			      tipc_max_domain_size, l->addr,
1329			      tipc_own_addr(l->net), 0, 0, 0);
1330	if (!skb)
1331		return;
1332
1333	hdr = buf_msg(skb);
1334	data = msg_data(hdr);
1335	msg_set_session(hdr, l->session);
1336	msg_set_bearer_id(hdr, l->bearer_id);
1337	msg_set_net_plane(hdr, l->net_plane);
1338	msg_set_next_sent(hdr, l->snd_nxt);
1339	msg_set_ack(hdr, l->rcv_nxt - 1);
1340	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1341	msg_set_bc_ack_invalid(hdr, !node_up);
1342	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1343	msg_set_link_tolerance(hdr, tolerance);
1344	msg_set_linkprio(hdr, priority);
1345	msg_set_redundant_link(hdr, node_up);
1346	msg_set_seq_gap(hdr, 0);
1347	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1348
1349	if (mtyp == STATE_MSG) {
1350		msg_set_seq_gap(hdr, rcvgap);
1351		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1352		msg_set_probe(hdr, probe);
1353		msg_set_is_keepalive(hdr, probe || probe_reply);
1354		tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1355		msg_set_size(hdr, INT_H_SIZE + dlen);
1356		skb_trim(skb, INT_H_SIZE + dlen);
1357		l->stats.sent_states++;
1358		l->rcv_unacked = 0;
1359	} else {
1360		/* RESET_MSG or ACTIVATE_MSG */
1361		msg_set_max_pkt(hdr, l->advertised_mtu);
1362		strcpy(data, l->if_name);
1363		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1364		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1365	}
1366	if (probe)
1367		l->stats.sent_probes++;
1368	if (rcvgap)
1369		l->stats.sent_nacks++;
1370	skb->priority = TC_PRIO_CONTROL;
1371	__skb_queue_tail(xmitq, skb);
1372}
1373
1374/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1375 * with contents of the link's transmit and backlog queues.
1376 */
1377void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1378			   int mtyp, struct sk_buff_head *xmitq)
1379{
1380	struct sk_buff *skb, *tnlskb;
1381	struct tipc_msg *hdr, tnlhdr;
1382	struct sk_buff_head *queue = &l->transmq;
1383	struct sk_buff_head tmpxq, tnlq;
1384	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1385
1386	if (!tnl)
1387		return;
1388
1389	skb_queue_head_init(&tnlq);
1390	skb_queue_head_init(&tmpxq);
1391
1392	/* At least one packet required for safe algorithm => add dummy */
1393	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1394			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1395			      0, 0, TIPC_ERR_NO_PORT);
1396	if (!skb) {
1397		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1398		return;
1399	}
1400	skb_queue_tail(&tnlq, skb);
1401	tipc_link_xmit(l, &tnlq, &tmpxq);
1402	__skb_queue_purge(&tmpxq);
1403
1404	/* Initialize reusable tunnel packet header */
1405	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1406		      mtyp, INT_H_SIZE, l->addr);
1407	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1408	msg_set_msgcnt(&tnlhdr, pktcnt);
1409	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1410tnl:
1411	/* Wrap each packet into a tunnel packet */
1412	skb_queue_walk(queue, skb) {
1413		hdr = buf_msg(skb);
1414		if (queue == &l->backlogq)
1415			msg_set_seqno(hdr, seqno++);
1416		pktlen = msg_size(hdr);
1417		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1418		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1419		if (!tnlskb) {
1420			pr_warn("%sunable to send packet\n", link_co_err);
1421			return;
1422		}
1423		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1424		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1425		__skb_queue_tail(&tnlq, tnlskb);
1426	}
1427	if (queue != &l->backlogq) {
1428		queue = &l->backlogq;
1429		goto tnl;
1430	}
1431
1432	tipc_link_xmit(tnl, &tnlq, xmitq);
1433
1434	if (mtyp == FAILOVER_MSG) {
1435		tnl->drop_point = l->rcv_nxt;
1436		tnl->failover_reasm_skb = l->reasm_buf;
1437		l->reasm_buf = NULL;
1438	}
1439}
1440
1441/* tipc_link_proto_rcv(): receive link level protocol message :
1442 * Note that network plane id propagates through the network, and may
1443 * change at any time. The node with lowest numerical id determines
1444 * network plane
1445 */
1446static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1447			       struct sk_buff_head *xmitq)
1448{
1449	struct tipc_msg *hdr = buf_msg(skb);
1450	u16 rcvgap = 0;
1451	u16 ack = msg_ack(hdr);
1452	u16 gap = msg_seq_gap(hdr);
1453	u16 peers_snd_nxt =  msg_next_sent(hdr);
1454	u16 peers_tol = msg_link_tolerance(hdr);
1455	u16 peers_prio = msg_linkprio(hdr);
1456	u16 rcv_nxt = l->rcv_nxt;
1457	u16 dlen = msg_data_sz(hdr);
1458	int mtyp = msg_type(hdr);
1459	bool reply = msg_probe(hdr);
1460	void *data;
1461	char *if_name;
1462	int rc = 0;
1463
1464	if (tipc_link_is_blocked(l) || !xmitq)
1465		goto exit;
1466
1467	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1468		l->net_plane = msg_net_plane(hdr);
1469
1470	skb_linearize(skb);
1471	hdr = buf_msg(skb);
1472	data = msg_data(hdr);
1473
1474	switch (mtyp) {
1475	case RESET_MSG:
1476
1477		/* Ignore duplicate RESET with old session number */
1478		if ((less_eq(msg_session(hdr), l->peer_session)) &&
1479		    (l->peer_session != ANY_SESSION))
1480			break;
1481		/* fall thru' */
1482
1483	case ACTIVATE_MSG:
1484
1485		/* Complete own link name with peer's interface name */
1486		if_name =  strrchr(l->name, ':') + 1;
1487		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1488			break;
1489		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1490			break;
1491		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1492
1493		/* Update own tolerance if peer indicates a non-zero value */
1494		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1495			l->tolerance = peers_tol;
1496
1497		/* Update own priority if peer's priority is higher */
1498		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1499			l->priority = peers_prio;
1500
1501		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1502		if (msg_peer_stopping(hdr))
1503			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1504		else if ((mtyp == RESET_MSG) || !link_is_up(l))
1505			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1506
1507		/* ACTIVATE_MSG takes up link if it was already locally reset */
1508		if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1509			rc = TIPC_LINK_UP_EVT;
1510
1511		l->peer_session = msg_session(hdr);
1512		l->peer_bearer_id = msg_bearer_id(hdr);
1513		if (l->mtu > msg_max_pkt(hdr))
1514			l->mtu = msg_max_pkt(hdr);
1515		break;
1516
1517	case STATE_MSG:
1518
1519		/* Update own tolerance if peer indicates a non-zero value */
1520		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1521			l->tolerance = peers_tol;
1522
1523		/* Update own prio if peer indicates a different value */
1524		if ((peers_prio != l->priority) &&
1525		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1526			l->priority = peers_prio;
1527			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1528		}
1529
1530		l->silent_intv_cnt = 0;
1531		l->stats.recv_states++;
1532		if (msg_probe(hdr))
1533			l->stats.recv_probes++;
1534
1535		if (!link_is_up(l)) {
1536			if (l->state == LINK_ESTABLISHING)
1537				rc = TIPC_LINK_UP_EVT;
1538			break;
1539		}
1540		tipc_mon_rcv(l->net, data, dlen, l->addr,
1541			     &l->mon_state, l->bearer_id);
1542
1543		/* Send NACK if peer has sent pkts we haven't received yet */
1544		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1545			rcvgap = peers_snd_nxt - l->rcv_nxt;
1546		if (rcvgap || reply)
1547			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1548						  rcvgap, 0, 0, xmitq);
1549		tipc_link_release_pkts(l, ack);
1550
1551		/* If NACK, retransmit will now start at right position */
1552		if (gap) {
1553			rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
1554			l->stats.recv_nacks++;
1555		}
1556
1557		tipc_link_advance_backlog(l, xmitq);
1558		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1559			link_prepare_wakeup(l);
1560	}
1561exit:
1562	kfree_skb(skb);
1563	return rc;
1564}
1565
1566/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1567 */
1568static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1569					 u16 peers_snd_nxt,
1570					 struct sk_buff_head *xmitq)
1571{
1572	struct sk_buff *skb;
1573	struct tipc_msg *hdr;
1574	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1575	u16 ack = l->rcv_nxt - 1;
1576	u16 gap_to = peers_snd_nxt - 1;
1577
1578	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1579			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1580	if (!skb)
1581		return false;
1582	hdr = buf_msg(skb);
1583	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1584	msg_set_bcast_ack(hdr, ack);
1585	msg_set_bcgap_after(hdr, ack);
1586	if (dfrd_skb)
1587		gap_to = buf_seqno(dfrd_skb) - 1;
1588	msg_set_bcgap_to(hdr, gap_to);
1589	msg_set_non_seq(hdr, bcast);
1590	__skb_queue_tail(xmitq, skb);
1591	return true;
1592}
1593
1594/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1595 *
1596 * Give a newly added peer node the sequence number where it should
1597 * start receiving and acking broadcast packets.
1598 */
1599static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1600					struct sk_buff_head *xmitq)
1601{
1602	struct sk_buff_head list;
1603
1604	__skb_queue_head_init(&list);
1605	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1606		return;
1607	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1608	tipc_link_xmit(l, &list, xmitq);
1609}
1610
1611/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1612 */
1613void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1614{
1615	int mtyp = msg_type(hdr);
1616	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1617
1618	if (link_is_up(l))
1619		return;
1620
1621	if (msg_user(hdr) == BCAST_PROTOCOL) {
1622		l->rcv_nxt = peers_snd_nxt;
1623		l->state = LINK_ESTABLISHED;
1624		return;
1625	}
1626
1627	if (l->peer_caps & TIPC_BCAST_SYNCH)
1628		return;
1629
1630	if (msg_peer_node_is_up(hdr))
1631		return;
1632
1633	/* Compatibility: accept older, less safe initial synch data */
1634	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1635		l->rcv_nxt = peers_snd_nxt;
1636}
1637
1638/* link_bc_retr eval()- check if the indicated range can be retransmitted now
1639 * - Adjust permitted range if there is overlap with previous retransmission
1640 */
1641static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1642{
1643	unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1644
1645	if (less(*to, *from))
1646		return false;
1647
1648	/* New retransmission request */
1649	if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1650	    less(*to, l->prev_from) || more(*from, l->prev_to)) {
1651		l->prev_from = *from;
1652		l->prev_to = *to;
1653		l->prev_retr = jiffies;
1654		return true;
1655	}
1656
1657	/* Inside range of previous retransmit */
1658	if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1659		return false;
1660
1661	/* Fully or partially outside previous range => exclude overlap */
1662	if (less(*from, l->prev_from)) {
1663		*to = l->prev_from - 1;
1664		l->prev_from = *from;
1665	}
1666	if (more(*to, l->prev_to)) {
1667		*from = l->prev_to + 1;
1668		l->prev_to = *to;
1669	}
1670	l->prev_retr = jiffies;
1671	return true;
1672}
1673
1674/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1675 */
1676int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1677			  struct sk_buff_head *xmitq)
1678{
1679	struct tipc_link *snd_l = l->bc_sndlink;
1680	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1681	u16 from = msg_bcast_ack(hdr) + 1;
1682	u16 to = from + msg_bc_gap(hdr) - 1;
1683	int rc = 0;
1684
1685	if (!link_is_up(l))
1686		return rc;
1687
1688	if (!msg_peer_node_is_up(hdr))
1689		return rc;
1690
1691	/* Open when peer ackowledges our bcast init msg (pkt #1) */
1692	if (msg_ack(hdr))
1693		l->bc_peer_is_up = true;
1694
1695	if (!l->bc_peer_is_up)
1696		return rc;
1697
1698	l->stats.recv_nacks++;
1699
1700	/* Ignore if peers_snd_nxt goes beyond receive window */
1701	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1702		return rc;
1703
1704	if (link_bc_retr_eval(snd_l, &from, &to))
1705		rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
1706
1707	l->snd_nxt = peers_snd_nxt;
1708	if (link_bc_rcv_gap(l))
1709		rc |= TIPC_LINK_SND_STATE;
1710
1711	/* Return now if sender supports nack via STATE messages */
1712	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1713		return rc;
1714
1715	/* Otherwise, be backwards compatible */
1716
1717	if (!more(peers_snd_nxt, l->rcv_nxt)) {
1718		l->nack_state = BC_NACK_SND_CONDITIONAL;
1719		return 0;
1720	}
1721
1722	/* Don't NACK if one was recently sent or peeked */
1723	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1724		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1725		return 0;
1726	}
1727
1728	/* Conditionally delay NACK sending until next synch rcv */
1729	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1730		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1731		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1732			return 0;
1733	}
1734
1735	/* Send NACK now but suppress next one */
1736	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1737	l->nack_state = BC_NACK_SND_SUPPRESS;
1738	return 0;
1739}
1740
1741void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1742			  struct sk_buff_head *xmitq)
1743{
1744	struct sk_buff *skb, *tmp;
1745	struct tipc_link *snd_l = l->bc_sndlink;
1746
1747	if (!link_is_up(l) || !l->bc_peer_is_up)
1748		return;
1749
1750	if (!more(acked, l->acked))
1751		return;
1752
1753	/* Skip over packets peer has already acked */
1754	skb_queue_walk(&snd_l->transmq, skb) {
1755		if (more(buf_seqno(skb), l->acked))
1756			break;
1757	}
1758
1759	/* Update/release the packets peer is acking now */
1760	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1761		if (more(buf_seqno(skb), acked))
1762			break;
1763		if (!--TIPC_SKB_CB(skb)->ackers) {
1764			__skb_unlink(skb, &snd_l->transmq);
1765			kfree_skb(skb);
1766		}
1767	}
1768	l->acked = acked;
1769	tipc_link_advance_backlog(snd_l, xmitq);
1770	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1771		link_prepare_wakeup(snd_l);
1772}
1773
1774/* tipc_link_bc_nack_rcv(): receive broadcast nack message
1775 * This function is here for backwards compatibility, since
1776 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1777 */
1778int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1779			  struct sk_buff_head *xmitq)
1780{
1781	struct tipc_msg *hdr = buf_msg(skb);
1782	u32 dnode = msg_destnode(hdr);
1783	int mtyp = msg_type(hdr);
1784	u16 acked = msg_bcast_ack(hdr);
1785	u16 from = acked + 1;
1786	u16 to = msg_bcgap_to(hdr);
1787	u16 peers_snd_nxt = to + 1;
1788	int rc = 0;
1789
1790	kfree_skb(skb);
1791
1792	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1793		return 0;
1794
1795	if (mtyp != STATE_MSG)
1796		return 0;
1797
1798	if (dnode == tipc_own_addr(l->net)) {
1799		tipc_link_bc_ack_rcv(l, acked, xmitq);
1800		rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
1801		l->stats.recv_nacks++;
1802		return rc;
1803	}
1804
1805	/* Msg for other node => suppress own NACK at next sync if applicable */
1806	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1807		l->nack_state = BC_NACK_SND_SUPPRESS;
1808
1809	return 0;
1810}
1811
1812void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1813{
1814	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
1815
1816	l->window = win;
1817	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
1818	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
1819	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
1820	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1821	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
1822}
1823
1824/**
1825 * link_reset_stats - reset link statistics
1826 * @l: pointer to link
1827 */
1828void tipc_link_reset_stats(struct tipc_link *l)
1829{
1830	memset(&l->stats, 0, sizeof(l->stats));
1831}
1832
1833static void link_print(struct tipc_link *l, const char *str)
1834{
1835	struct sk_buff *hskb = skb_peek(&l->transmq);
1836	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1837	u16 tail = l->snd_nxt - 1;
1838
1839	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1840	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1841		skb_queue_len(&l->transmq), head, tail,
1842		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1843}
1844
1845/* Parse and validate nested (link) properties valid for media, bearer and link
1846 */
1847int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1848{
1849	int err;
1850
1851	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1852			       tipc_nl_prop_policy, NULL);
1853	if (err)
1854		return err;
1855
1856	if (props[TIPC_NLA_PROP_PRIO]) {
1857		u32 prio;
1858
1859		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1860		if (prio > TIPC_MAX_LINK_PRI)
1861			return -EINVAL;
1862	}
1863
1864	if (props[TIPC_NLA_PROP_TOL]) {
1865		u32 tol;
1866
1867		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1868		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1869			return -EINVAL;
1870	}
1871
1872	if (props[TIPC_NLA_PROP_WIN]) {
1873		u32 win;
1874
1875		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1876		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1877			return -EINVAL;
1878	}
1879
1880	return 0;
1881}
1882
1883static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1884{
1885	int i;
1886	struct nlattr *stats;
1887
1888	struct nla_map {
1889		u32 key;
1890		u32 val;
1891	};
1892
1893	struct nla_map map[] = {
1894		{TIPC_NLA_STATS_RX_INFO, 0},
1895		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1896		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1897		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1898		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1899		{TIPC_NLA_STATS_TX_INFO, 0},
1900		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1901		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1902		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1903		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1904		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1905			s->msg_length_counts : 1},
1906		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1907		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1908		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1909		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1910		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1911		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1912		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1913		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1914		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1915		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
1916		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1917		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1918		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1919		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
1920		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1921		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1922		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1923		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1924		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1925		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1926		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1927		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1928			(s->accu_queue_sz / s->queue_sz_counts) : 0}
1929	};
1930
1931	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1932	if (!stats)
1933		return -EMSGSIZE;
1934
1935	for (i = 0; i <  ARRAY_SIZE(map); i++)
1936		if (nla_put_u32(skb, map[i].key, map[i].val))
1937			goto msg_full;
1938
1939	nla_nest_end(skb, stats);
1940
1941	return 0;
1942msg_full:
1943	nla_nest_cancel(skb, stats);
1944
1945	return -EMSGSIZE;
1946}
1947
1948/* Caller should hold appropriate locks to protect the link */
1949int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1950		       struct tipc_link *link, int nlflags)
1951{
1952	u32 self = tipc_own_addr(net);
 
1953	struct nlattr *attrs;
1954	struct nlattr *prop;
1955	void *hdr;
1956	int err;
1957
1958	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1959			  nlflags, TIPC_NL_LINK_GET);
1960	if (!hdr)
1961		return -EMSGSIZE;
1962
1963	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1964	if (!attrs)
1965		goto msg_full;
1966
1967	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1968		goto attr_msg_full;
1969	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
 
1970		goto attr_msg_full;
1971	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1972		goto attr_msg_full;
1973	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
1974		goto attr_msg_full;
1975	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
1976		goto attr_msg_full;
1977
1978	if (tipc_link_is_up(link))
1979		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1980			goto attr_msg_full;
1981	if (link->active)
1982		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1983			goto attr_msg_full;
1984
1985	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1986	if (!prop)
1987		goto attr_msg_full;
1988	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1989		goto prop_msg_full;
1990	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1991		goto prop_msg_full;
1992	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1993			link->window))
1994		goto prop_msg_full;
1995	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1996		goto prop_msg_full;
1997	nla_nest_end(msg->skb, prop);
1998
1999	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2000	if (err)
2001		goto attr_msg_full;
2002
2003	nla_nest_end(msg->skb, attrs);
2004	genlmsg_end(msg->skb, hdr);
2005
2006	return 0;
2007
2008prop_msg_full:
2009	nla_nest_cancel(msg->skb, prop);
2010attr_msg_full:
2011	nla_nest_cancel(msg->skb, attrs);
2012msg_full:
2013	genlmsg_cancel(msg->skb, hdr);
2014
2015	return -EMSGSIZE;
2016}
2017
2018static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2019				      struct tipc_stats *stats)
2020{
2021	int i;
2022	struct nlattr *nest;
2023
2024	struct nla_map {
2025		__u32 key;
2026		__u32 val;
2027	};
2028
2029	struct nla_map map[] = {
2030		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2031		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2032		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2033		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2034		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2035		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2036		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2037		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2038		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2039		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2040		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2041		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2042		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2043		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2044		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2045		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2046		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2047		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2048		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2049			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2050	};
2051
2052	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2053	if (!nest)
2054		return -EMSGSIZE;
2055
2056	for (i = 0; i <  ARRAY_SIZE(map); i++)
2057		if (nla_put_u32(skb, map[i].key, map[i].val))
2058			goto msg_full;
2059
2060	nla_nest_end(skb, nest);
2061
2062	return 0;
2063msg_full:
2064	nla_nest_cancel(skb, nest);
2065
2066	return -EMSGSIZE;
2067}
2068
2069int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2070{
2071	int err;
2072	void *hdr;
2073	struct nlattr *attrs;
2074	struct nlattr *prop;
2075	struct tipc_net *tn = net_generic(net, tipc_net_id);
2076	struct tipc_link *bcl = tn->bcl;
2077
2078	if (!bcl)
2079		return 0;
2080
2081	tipc_bcast_lock(net);
2082
2083	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2084			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2085	if (!hdr) {
2086		tipc_bcast_unlock(net);
2087		return -EMSGSIZE;
2088	}
2089
2090	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2091	if (!attrs)
2092		goto msg_full;
2093
2094	/* The broadcast link is always up */
2095	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2096		goto attr_msg_full;
2097
2098	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2099		goto attr_msg_full;
2100	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2101		goto attr_msg_full;
2102	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2103		goto attr_msg_full;
2104	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2105		goto attr_msg_full;
2106
2107	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2108	if (!prop)
2109		goto attr_msg_full;
2110	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2111		goto prop_msg_full;
2112	nla_nest_end(msg->skb, prop);
2113
2114	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2115	if (err)
2116		goto attr_msg_full;
2117
2118	tipc_bcast_unlock(net);
2119	nla_nest_end(msg->skb, attrs);
2120	genlmsg_end(msg->skb, hdr);
2121
2122	return 0;
2123
2124prop_msg_full:
2125	nla_nest_cancel(msg->skb, prop);
2126attr_msg_full:
2127	nla_nest_cancel(msg->skb, attrs);
2128msg_full:
2129	tipc_bcast_unlock(net);
2130	genlmsg_cancel(msg->skb, hdr);
2131
2132	return -EMSGSIZE;
2133}
2134
2135void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2136			     struct sk_buff_head *xmitq)
2137{
2138	l->tolerance = tol;
2139	if (link_is_up(l))
2140		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2141}
2142
2143void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2144			struct sk_buff_head *xmitq)
2145{
2146	l->priority = prio;
2147	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2148}
2149
2150void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2151{
2152	l->abort_limit = limit;
2153}