Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * net/tipc/link.c: TIPC link code
   3 *
   4 * Copyright (c) 1996-2007, Ericsson AB
   5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
 
  38#include "link.h"
  39#include "port.h"
 
  40#include "name_distr.h"
  41#include "discover.h"
  42#include "config.h"
  43
  44
  45/*
  46 * Out-of-range value for link session numbers
  47 */
  48
  49#define INVALID_SESSION 0x10000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50
  51/*
  52 * Link state events:
  53 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54
  55#define  STARTING_EVT    856384768	/* link processing trigger */
  56#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
  57#define  TIMEOUT_EVT     560817u	/* link timer expired */
  58
  59/*
  60 * The following two 'message types' is really just implementation
  61 * data conveniently stored in the message header.
  62 * They must not be considered part of the protocol
  63 */
  64#define OPEN_MSG   0
  65#define CLOSED_MSG 1
  66
  67/*
  68 * State value stored in 'exp_msg_count'
  69 */
 
 
 
 
 
  70
  71#define START_CHANGEOVER 100000u
 
  72
  73/**
  74 * struct link_name - deconstructed link name
  75 * @addr_local: network address of node at this end
  76 * @if_local: name of interface at this end
  77 * @addr_peer: network address of node at far end
  78 * @if_peer: name of interface at far end
  79 */
  80
  81struct link_name {
  82	u32 addr_local;
  83	char if_local[TIPC_MAX_IF_NAME];
  84	u32 addr_peer;
  85	char if_peer[TIPC_MAX_IF_NAME];
 
 
  86};
  87
  88static void link_handle_out_of_seq_msg(struct link *l_ptr,
  89				       struct sk_buff *buf);
  90static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
  91static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
  92static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
  93static int  link_send_sections_long(struct tipc_port *sender,
  94				    struct iovec const *msg_sect,
  95				    u32 num_sect, unsigned int total_len,
  96				    u32 destnode);
  97static void link_check_defragm_bufs(struct link *l_ptr);
  98static void link_state_event(struct link *l_ptr, u32 event);
  99static void link_reset_statistics(struct link *l_ptr);
 100static void link_print(struct link *l_ptr, const char *str);
 101static void link_start(struct link *l_ptr);
 102static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
 103
 
 
 
 
 
 104/*
 105 *  Simple link routines
 106 */
 107
 108static unsigned int align(unsigned int i)
 109{
 110	return (i + 3) & ~3u;
 111}
 112
 113static void link_init_max_pkt(struct link *l_ptr)
 114{
 115	u32 max_pkt;
 116
 117	max_pkt = (l_ptr->b_ptr->mtu & ~3);
 118	if (max_pkt > MAX_MSG_SIZE)
 119		max_pkt = MAX_MSG_SIZE;
 120
 121	l_ptr->max_pkt_target = max_pkt;
 122	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
 123		l_ptr->max_pkt = l_ptr->max_pkt_target;
 124	else
 125		l_ptr->max_pkt = MAX_PKT_DEFAULT;
 126
 127	l_ptr->max_pkt_probes = 0;
 
 
 128}
 129
 130static u32 link_next_sent(struct link *l_ptr)
 131{
 132	if (l_ptr->next_out)
 133		return msg_seqno(buf_msg(l_ptr->next_out));
 134	return mod(l_ptr->next_out_no);
 135}
 136
 137static u32 link_last_sent(struct link *l_ptr)
 138{
 139	return mod(link_next_sent(l_ptr) - 1);
 140}
 141
 142/*
 143 *  Simple non-static link routines (i.e. referenced outside this file)
 144 */
 
 145
 146int tipc_link_is_up(struct link *l_ptr)
 147{
 148	if (!l_ptr)
 149		return 0;
 150	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
 151}
 152
 153int tipc_link_is_active(struct link *l_ptr)
 154{
 155	return	(l_ptr->owner->active_links[0] == l_ptr) ||
 156		(l_ptr->owner->active_links[1] == l_ptr);
 157}
 158
 159/**
 160 * link_name_validate - validate & (optionally) deconstruct link name
 161 * @name - ptr to link name string
 162 * @name_parts - ptr to area for link name components (or NULL if not needed)
 163 *
 164 * Returns 1 if link name is valid, otherwise 0.
 165 */
 166
 167static int link_name_validate(const char *name, struct link_name *name_parts)
 168{
 169	char name_copy[TIPC_MAX_LINK_NAME];
 170	char *addr_local;
 171	char *if_local;
 172	char *addr_peer;
 173	char *if_peer;
 174	char dummy;
 175	u32 z_local, c_local, n_local;
 176	u32 z_peer, c_peer, n_peer;
 177	u32 if_local_len;
 178	u32 if_peer_len;
 179
 180	/* copy link name & ensure length is OK */
 181
 182	name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
 183	/* need above in case non-Posix strncpy() doesn't pad with nulls */
 184	strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
 185	if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
 186		return 0;
 187
 188	/* ensure all component parts of link name are present */
 
 
 
 189
 190	addr_local = name_copy;
 191	if_local = strchr(addr_local, ':');
 192	if (if_local == NULL)
 193		return 0;
 194	*(if_local++) = 0;
 195	addr_peer = strchr(if_local, '-');
 196	if (addr_peer == NULL)
 197		return 0;
 198	*(addr_peer++) = 0;
 199	if_local_len = addr_peer - if_local;
 200	if_peer = strchr(addr_peer, ':');
 201	if (if_peer == NULL)
 202		return 0;
 203	*(if_peer++) = 0;
 204	if_peer_len = strlen(if_peer) + 1;
 205
 206	/* validate component parts of link name */
 
 
 
 207
 208	if ((sscanf(addr_local, "%u.%u.%u%c",
 209		    &z_local, &c_local, &n_local, &dummy) != 3) ||
 210	    (sscanf(addr_peer, "%u.%u.%u%c",
 211		    &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
 212	    (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
 213	    (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
 214	    (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
 215	    (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
 216	    (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
 217	    (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
 218		return 0;
 219
 220	/* return link name components, if necessary */
 
 
 
 221
 222	if (name_parts) {
 223		name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
 224		strcpy(name_parts->if_local, if_local);
 225		name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
 226		strcpy(name_parts->if_peer, if_peer);
 227	}
 228	return 1;
 229}
 230
 231/**
 232 * link_timeout - handle expiration of link timer
 233 * @l_ptr: pointer to link
 234 *
 235 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
 236 * with tipc_link_delete().  (There is no risk that the node will be deleted by
 237 * another thread because tipc_link_delete() always cancels the link timer before
 238 * tipc_node_delete() is called.)
 239 */
 240
 241static void link_timeout(struct link *l_ptr)
 242{
 243	tipc_node_lock(l_ptr->owner);
 
 244
 245	/* update counters used in statistical profiling of send traffic */
 
 
 
 
 246
 247	l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
 248	l_ptr->stats.queue_sz_counts++;
 
 
 
 249
 250	if (l_ptr->first_out) {
 251		struct tipc_msg *msg = buf_msg(l_ptr->first_out);
 252		u32 length = msg_size(msg);
 
 
 253
 254		if ((msg_user(msg) == MSG_FRAGMENTER) &&
 255		    (msg_type(msg) == FIRST_FRAGMENT)) {
 256			length = msg_size(msg_get_wrapped(msg));
 257		}
 258		if (length) {
 259			l_ptr->stats.msg_lengths_total += length;
 260			l_ptr->stats.msg_length_counts++;
 261			if (length <= 64)
 262				l_ptr->stats.msg_length_profile[0]++;
 263			else if (length <= 256)
 264				l_ptr->stats.msg_length_profile[1]++;
 265			else if (length <= 1024)
 266				l_ptr->stats.msg_length_profile[2]++;
 267			else if (length <= 4096)
 268				l_ptr->stats.msg_length_profile[3]++;
 269			else if (length <= 16384)
 270				l_ptr->stats.msg_length_profile[4]++;
 271			else if (length <= 32768)
 272				l_ptr->stats.msg_length_profile[5]++;
 273			else
 274				l_ptr->stats.msg_length_profile[6]++;
 275		}
 276	}
 277
 278	/* do all other link processing performed on a periodic basis */
 279
 280	link_check_defragm_bufs(l_ptr);
 281
 282	link_state_event(l_ptr, TIMEOUT_EVT);
 283
 284	if (l_ptr->next_out)
 285		tipc_link_push_queue(l_ptr);
 286
 287	tipc_node_unlock(l_ptr->owner);
 288}
 289
 290static void link_set_timer(struct link *l_ptr, u32 time)
 291{
 292	k_start_timer(&l_ptr->timer, time);
 293}
 294
 295/**
 296 * tipc_link_create - create a new link
 297 * @n_ptr: pointer to associated node
 298 * @b_ptr: pointer to associated bearer
 299 * @media_addr: media address to use when sending messages over link
 300 *
 301 * Returns pointer to link.
 302 */
 303
 304struct link *tipc_link_create(struct tipc_node *n_ptr,
 305			      struct tipc_bearer *b_ptr,
 306			      const struct tipc_media_addr *media_addr)
 307{
 308	struct link *l_ptr;
 309	struct tipc_msg *msg;
 310	char *if_name;
 311	char addr_string[16];
 312	u32 peer = n_ptr->addr;
 313
 314	if (n_ptr->link_cnt >= 2) {
 315		tipc_addr_string_fill(addr_string, n_ptr->addr);
 316		err("Attempt to establish third link to %s\n", addr_string);
 317		return NULL;
 318	}
 319
 320	if (n_ptr->links[b_ptr->identity]) {
 321		tipc_addr_string_fill(addr_string, n_ptr->addr);
 322		err("Attempt to establish second link on <%s> to %s\n",
 323		    b_ptr->name, addr_string);
 324		return NULL;
 325	}
 326
 327	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
 328	if (!l_ptr) {
 329		warn("Link creation failed, no memory\n");
 330		return NULL;
 331	}
 332
 333	l_ptr->addr = peer;
 334	if_name = strchr(b_ptr->name, ':') + 1;
 335	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
 336		tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
 337		tipc_node(tipc_own_addr),
 338		if_name,
 339		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
 340		/* note: peer i/f is appended to link name by reset/activate */
 341	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
 342	l_ptr->owner = n_ptr;
 343	l_ptr->checkpoint = 1;
 344	l_ptr->b_ptr = b_ptr;
 345	link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
 346	l_ptr->state = RESET_UNKNOWN;
 347
 348	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
 349	msg = l_ptr->pmsg;
 350	tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
 351	msg_set_size(msg, sizeof(l_ptr->proto_msg));
 352	msg_set_session(msg, (tipc_random & 0xffff));
 353	msg_set_bearer_id(msg, b_ptr->identity);
 354	strcpy((char *)msg_data(msg), if_name);
 355
 356	l_ptr->priority = b_ptr->priority;
 357	tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
 358
 359	link_init_max_pkt(l_ptr);
 360
 361	l_ptr->next_out_no = 1;
 362	INIT_LIST_HEAD(&l_ptr->waiting_ports);
 363
 364	link_reset_statistics(l_ptr);
 365
 366	tipc_node_attach_link(n_ptr, l_ptr);
 367
 368	k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
 369	list_add_tail(&l_ptr->link_list, &b_ptr->links);
 370	tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
 371
 372	return l_ptr;
 
 
 
 
 373}
 374
 375/**
 376 * tipc_link_delete - delete a link
 377 * @l_ptr: pointer to link
 378 *
 379 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
 380 * This routine must not grab the node lock until after link timer cancellation
 381 * to avoid a potential deadlock situation.
 382 */
 383
 384void tipc_link_delete(struct link *l_ptr)
 385{
 386	if (!l_ptr) {
 387		err("Attempt to delete non-existent link\n");
 388		return;
 389	}
 390
 391	k_cancel_timer(&l_ptr->timer);
 392
 393	tipc_node_lock(l_ptr->owner);
 394	tipc_link_reset(l_ptr);
 395	tipc_node_detach_link(l_ptr->owner, l_ptr);
 396	tipc_link_stop(l_ptr);
 397	list_del_init(&l_ptr->link_list);
 398	tipc_node_unlock(l_ptr->owner);
 399	k_term_timer(&l_ptr->timer);
 400	kfree(l_ptr);
 401}
 402
 403static void link_start(struct link *l_ptr)
 404{
 405	tipc_node_lock(l_ptr->owner);
 406	link_state_event(l_ptr, STARTING_EVT);
 407	tipc_node_unlock(l_ptr->owner);
 408}
 409
 410/**
 411 * link_schedule_port - schedule port for deferred sending
 412 * @l_ptr: pointer to link
 413 * @origport: reference to sending port
 414 * @sz: amount of data to be sent
 415 *
 416 * Schedules port for renewed sending of messages after link congestion
 417 * has abated.
 418 */
 419
 420static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
 421{
 422	struct tipc_port *p_ptr;
 423
 424	spin_lock_bh(&tipc_port_list_lock);
 425	p_ptr = tipc_port_lock(origport);
 426	if (p_ptr) {
 427		if (!p_ptr->wakeup)
 428			goto exit;
 429		if (!list_empty(&p_ptr->wait_list))
 430			goto exit;
 431		p_ptr->congested = 1;
 432		p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
 433		list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
 434		l_ptr->stats.link_congs++;
 435exit:
 436		tipc_port_unlock(p_ptr);
 437	}
 438	spin_unlock_bh(&tipc_port_list_lock);
 439	return -ELINKCONG;
 440}
 441
 442void tipc_link_wakeup_ports(struct link *l_ptr, int all)
 443{
 444	struct tipc_port *p_ptr;
 445	struct tipc_port *temp_p_ptr;
 446	int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
 447
 448	if (all)
 449		win = 100000;
 450	if (win <= 0)
 451		return;
 452	if (!spin_trylock_bh(&tipc_port_list_lock))
 453		return;
 454	if (link_congested(l_ptr))
 455		goto exit;
 456	list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
 457				 wait_list) {
 458		if (win <= 0)
 459			break;
 460		list_del_init(&p_ptr->wait_list);
 461		spin_lock_bh(p_ptr->lock);
 462		p_ptr->congested = 0;
 463		p_ptr->wakeup(p_ptr);
 464		win -= p_ptr->waiting_pkts;
 465		spin_unlock_bh(p_ptr->lock);
 466	}
 467
 468exit:
 469	spin_unlock_bh(&tipc_port_list_lock);
 470}
 471
 472/**
 473 * link_release_outqueue - purge link's outbound message queue
 474 * @l_ptr: pointer to link
 475 */
 476
 477static void link_release_outqueue(struct link *l_ptr)
 478{
 479	struct sk_buff *buf = l_ptr->first_out;
 480	struct sk_buff *next;
 481
 482	while (buf) {
 483		next = buf->next;
 484		buf_discard(buf);
 485		buf = next;
 486	}
 487	l_ptr->first_out = NULL;
 488	l_ptr->out_queue_size = 0;
 489}
 490
 491/**
 492 * tipc_link_reset_fragments - purge link's inbound message fragments queue
 493 * @l_ptr: pointer to link
 494 */
 495
 496void tipc_link_reset_fragments(struct link *l_ptr)
 497{
 498	struct sk_buff *buf = l_ptr->defragm_buf;
 499	struct sk_buff *next;
 500
 501	while (buf) {
 502		next = buf->next;
 503		buf_discard(buf);
 504		buf = next;
 505	}
 506	l_ptr->defragm_buf = NULL;
 507}
 508
 509/**
 510 * tipc_link_stop - purge all inbound and outbound messages associated with link
 511 * @l_ptr: pointer to link
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 512 */
 513
 514void tipc_link_stop(struct link *l_ptr)
 515{
 516	struct sk_buff *buf;
 517	struct sk_buff *next;
 518
 519	buf = l_ptr->oldest_deferred_in;
 520	while (buf) {
 521		next = buf->next;
 522		buf_discard(buf);
 523		buf = next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	}
 
 
 
 
 
 525
 526	buf = l_ptr->first_out;
 527	while (buf) {
 528		next = buf->next;
 529		buf_discard(buf);
 530		buf = next;
 531	}
 532
 533	tipc_link_reset_fragments(l_ptr);
 
 
 534
 535	buf_discard(l_ptr->proto_msg_queue);
 536	l_ptr->proto_msg_queue = NULL;
 537}
 538
 539/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
 540#define link_send_event(fcn, l_ptr, up) do { } while (0)
 541
 542void tipc_link_reset(struct link *l_ptr)
 
 
 543{
 544	struct sk_buff *buf;
 545	u32 prev_state = l_ptr->state;
 546	u32 checkpoint = l_ptr->next_in_no;
 547	int was_active_link = tipc_link_is_active(l_ptr);
 548
 549	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
 550
 551	/* Link is down, accept any session */
 552	l_ptr->peer_session = INVALID_SESSION;
 553
 554	/* Prepare for max packet size negotiation */
 555	link_init_max_pkt(l_ptr);
 556
 557	l_ptr->state = RESET_UNKNOWN;
 558
 559	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
 560		return;
 561
 562	tipc_node_link_down(l_ptr->owner, l_ptr);
 563	tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
 564
 565	if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
 566	    l_ptr->owner->permit_changeover) {
 567		l_ptr->reset_checkpoint = checkpoint;
 568		l_ptr->exp_msg_count = START_CHANGEOVER;
 569	}
 570
 571	/* Clean up all queues: */
 572
 573	link_release_outqueue(l_ptr);
 574	buf_discard(l_ptr->proto_msg_queue);
 575	l_ptr->proto_msg_queue = NULL;
 576	buf = l_ptr->oldest_deferred_in;
 577	while (buf) {
 578		struct sk_buff *next = buf->next;
 579		buf_discard(buf);
 580		buf = next;
 581	}
 582	if (!list_empty(&l_ptr->waiting_ports))
 583		tipc_link_wakeup_ports(l_ptr, 1);
 584
 585	l_ptr->retransm_queue_head = 0;
 586	l_ptr->retransm_queue_size = 0;
 587	l_ptr->last_out = NULL;
 588	l_ptr->first_out = NULL;
 589	l_ptr->next_out = NULL;
 590	l_ptr->unacked_window = 0;
 591	l_ptr->checkpoint = 1;
 592	l_ptr->next_out_no = 1;
 593	l_ptr->deferred_inqueue_sz = 0;
 594	l_ptr->oldest_deferred_in = NULL;
 595	l_ptr->newest_deferred_in = NULL;
 596	l_ptr->fsm_msg_cnt = 0;
 597	l_ptr->stale_count = 0;
 598	link_reset_statistics(l_ptr);
 599
 600	link_send_event(tipc_cfg_link_event, l_ptr, 0);
 601	if (!in_own_cluster(l_ptr->addr))
 602		link_send_event(tipc_disc_link_event, l_ptr, 0);
 603}
 604
 605
 606static void link_activate(struct link *l_ptr)
 607{
 608	l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
 609	tipc_node_link_up(l_ptr->owner, l_ptr);
 610	tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
 611	link_send_event(tipc_cfg_link_event, l_ptr, 1);
 612	if (!in_own_cluster(l_ptr->addr))
 613		link_send_event(tipc_disc_link_event, l_ptr, 1);
 614}
 615
 616/**
 617 * link_state_event - link finite state machine
 618 * @l_ptr: pointer to link
 619 * @event: state machine event to process
 620 */
 621
 622static void link_state_event(struct link *l_ptr, unsigned event)
 623{
 624	struct link *other;
 625	u32 cont_intv = l_ptr->continuity_interval;
 626
 627	if (!l_ptr->started && (event != STARTING_EVT))
 628		return;		/* Not yet. */
 629
 630	if (link_blocked(l_ptr)) {
 631		if (event == TIMEOUT_EVT)
 632			link_set_timer(l_ptr, cont_intv);
 633		return;	  /* Changeover going on */
 634	}
 635
 636	switch (l_ptr->state) {
 637	case WORKING_WORKING:
 638		switch (event) {
 639		case TRAFFIC_MSG_EVT:
 640		case ACTIVATE_MSG:
 641			break;
 642		case TIMEOUT_EVT:
 643			if (l_ptr->next_in_no != l_ptr->checkpoint) {
 644				l_ptr->checkpoint = l_ptr->next_in_no;
 645				if (tipc_bclink_acks_missing(l_ptr->owner)) {
 646					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 647								 0, 0, 0, 0, 0);
 648					l_ptr->fsm_msg_cnt++;
 649				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
 650					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 651								 1, 0, 0, 0, 0);
 652					l_ptr->fsm_msg_cnt++;
 653				}
 654				link_set_timer(l_ptr, cont_intv);
 655				break;
 656			}
 657			l_ptr->state = WORKING_UNKNOWN;
 658			l_ptr->fsm_msg_cnt = 0;
 659			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 660			l_ptr->fsm_msg_cnt++;
 661			link_set_timer(l_ptr, cont_intv / 4);
 662			break;
 663		case RESET_MSG:
 664			info("Resetting link <%s>, requested by peer\n",
 665			     l_ptr->name);
 666			tipc_link_reset(l_ptr);
 667			l_ptr->state = RESET_RESET;
 668			l_ptr->fsm_msg_cnt = 0;
 669			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
 670			l_ptr->fsm_msg_cnt++;
 671			link_set_timer(l_ptr, cont_intv);
 672			break;
 
 
 
 
 
 
 673		default:
 674			err("Unknown link event %u in WW state\n", event);
 675		}
 676		break;
 677	case WORKING_UNKNOWN:
 678		switch (event) {
 679		case TRAFFIC_MSG_EVT:
 680		case ACTIVATE_MSG:
 681			l_ptr->state = WORKING_WORKING;
 682			l_ptr->fsm_msg_cnt = 0;
 683			link_set_timer(l_ptr, cont_intv);
 684			break;
 685		case RESET_MSG:
 686			info("Resetting link <%s>, requested by peer "
 687			     "while probing\n", l_ptr->name);
 688			tipc_link_reset(l_ptr);
 689			l_ptr->state = RESET_RESET;
 690			l_ptr->fsm_msg_cnt = 0;
 691			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
 692			l_ptr->fsm_msg_cnt++;
 693			link_set_timer(l_ptr, cont_intv);
 694			break;
 695		case TIMEOUT_EVT:
 696			if (l_ptr->next_in_no != l_ptr->checkpoint) {
 697				l_ptr->state = WORKING_WORKING;
 698				l_ptr->fsm_msg_cnt = 0;
 699				l_ptr->checkpoint = l_ptr->next_in_no;
 700				if (tipc_bclink_acks_missing(l_ptr->owner)) {
 701					tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 702								 0, 0, 0, 0, 0);
 703					l_ptr->fsm_msg_cnt++;
 704				}
 705				link_set_timer(l_ptr, cont_intv);
 706			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
 707				tipc_link_send_proto_msg(l_ptr, STATE_MSG,
 708							 1, 0, 0, 0, 0);
 709				l_ptr->fsm_msg_cnt++;
 710				link_set_timer(l_ptr, cont_intv / 4);
 711			} else {	/* Link has failed */
 712				warn("Resetting link <%s>, peer not responding\n",
 713				     l_ptr->name);
 714				tipc_link_reset(l_ptr);
 715				l_ptr->state = RESET_UNKNOWN;
 716				l_ptr->fsm_msg_cnt = 0;
 717				tipc_link_send_proto_msg(l_ptr, RESET_MSG,
 718							 0, 0, 0, 0, 0);
 719				l_ptr->fsm_msg_cnt++;
 720				link_set_timer(l_ptr, cont_intv);
 721			}
 722			break;
 
 
 
 
 
 
 
 723		default:
 724			err("Unknown link event %u in WU state\n", event);
 725		}
 726		break;
 727	case RESET_UNKNOWN:
 728		switch (event) {
 729		case TRAFFIC_MSG_EVT:
 730			break;
 731		case ACTIVATE_MSG:
 732			other = l_ptr->owner->active_links[0];
 733			if (other && link_working_unknown(other))
 734				break;
 735			l_ptr->state = WORKING_WORKING;
 736			l_ptr->fsm_msg_cnt = 0;
 737			link_activate(l_ptr);
 738			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 739			l_ptr->fsm_msg_cnt++;
 740			link_set_timer(l_ptr, cont_intv);
 741			break;
 742		case RESET_MSG:
 743			l_ptr->state = RESET_RESET;
 744			l_ptr->fsm_msg_cnt = 0;
 745			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
 746			l_ptr->fsm_msg_cnt++;
 747			link_set_timer(l_ptr, cont_intv);
 748			break;
 749		case STARTING_EVT:
 750			l_ptr->started = 1;
 751			/* fall through */
 752		case TIMEOUT_EVT:
 753			tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
 754			l_ptr->fsm_msg_cnt++;
 755			link_set_timer(l_ptr, cont_intv);
 756			break;
 
 
 
 
 757		default:
 758			err("Unknown link event %u in RU state\n", event);
 759		}
 760		break;
 761	case RESET_RESET:
 762		switch (event) {
 763		case TRAFFIC_MSG_EVT:
 764		case ACTIVATE_MSG:
 765			other = l_ptr->owner->active_links[0];
 766			if (other && link_working_unknown(other))
 767				break;
 768			l_ptr->state = WORKING_WORKING;
 769			l_ptr->fsm_msg_cnt = 0;
 770			link_activate(l_ptr);
 771			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
 772			l_ptr->fsm_msg_cnt++;
 773			link_set_timer(l_ptr, cont_intv);
 774			break;
 775		case RESET_MSG:
 776			break;
 777		case TIMEOUT_EVT:
 778			tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
 779			l_ptr->fsm_msg_cnt++;
 780			link_set_timer(l_ptr, cont_intv);
 781			break;
 
 
 
 782		default:
 783			err("Unknown link event %u in RR state\n", event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784		}
 785		break;
 786	default:
 787		err("Unknown link state %u/%u\n", l_ptr->state, event);
 788	}
 
 
 
 
 
 
 
 789}
 790
 791/*
 792 * link_bundle_buf(): Append contents of a buffer to
 793 * the tail of an existing one.
 794 */
 795
 796static int link_bundle_buf(struct link *l_ptr,
 797			   struct sk_buff *bundler,
 798			   struct sk_buff *buf)
 799{
 800	struct tipc_msg *bundler_msg = buf_msg(bundler);
 801	struct tipc_msg *msg = buf_msg(buf);
 802	u32 size = msg_size(msg);
 803	u32 bundle_size = msg_size(bundler_msg);
 804	u32 to_pos = align(bundle_size);
 805	u32 pad = to_pos - bundle_size;
 806
 807	if (msg_user(bundler_msg) != MSG_BUNDLER)
 808		return 0;
 809	if (msg_type(bundler_msg) != OPEN_MSG)
 810		return 0;
 811	if (skb_tailroom(bundler) < (pad + size))
 812		return 0;
 813	if (l_ptr->max_pkt < (to_pos + size))
 814		return 0;
 815
 816	skb_put(bundler, pad + size);
 817	skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
 818	msg_set_size(bundler_msg, to_pos + size);
 819	msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
 820	buf_discard(buf);
 821	l_ptr->stats.sent_bundled++;
 822	return 1;
 823}
 824
 825static void link_add_to_outqueue(struct link *l_ptr,
 826				 struct sk_buff *buf,
 827				 struct tipc_msg *msg)
 828{
 829	u32 ack = mod(l_ptr->next_in_no - 1);
 830	u32 seqno = mod(l_ptr->next_out_no++);
 831
 832	msg_set_word(msg, 2, ((ack << 16) | seqno));
 833	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
 834	buf->next = NULL;
 835	if (l_ptr->first_out) {
 836		l_ptr->last_out->next = buf;
 837		l_ptr->last_out = buf;
 838	} else
 839		l_ptr->first_out = l_ptr->last_out = buf;
 840
 841	l_ptr->out_queue_size++;
 842	if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
 843		l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
 844}
 845
 846static void link_add_chain_to_outqueue(struct link *l_ptr,
 847				       struct sk_buff *buf_chain,
 848				       u32 long_msgno)
 849{
 850	struct sk_buff *buf;
 851	struct tipc_msg *msg;
 
 
 
 
 
 852
 853	if (!l_ptr->next_out)
 854		l_ptr->next_out = buf_chain;
 855	while (buf_chain) {
 856		buf = buf_chain;
 857		buf_chain = buf_chain->next;
 858
 859		msg = buf_msg(buf);
 860		msg_set_long_msgno(msg, long_msgno);
 861		link_add_to_outqueue(l_ptr, buf, msg);
 
 862	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 863}
 864
 865/*
 866 * tipc_link_send_buf() is the 'full path' for messages, called from
 867 * inside TIPC when the 'fast path' in tipc_send_buf
 868 * has failed, and from link_send()
 
 
 869 */
 870
 871int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
 872{
 873	struct tipc_msg *msg = buf_msg(buf);
 874	u32 size = msg_size(msg);
 875	u32 dsz = msg_data_sz(msg);
 876	u32 queue_size = l_ptr->out_queue_size;
 877	u32 imp = tipc_msg_tot_importance(msg);
 878	u32 queue_limit = l_ptr->queue_limit[imp];
 879	u32 max_packet = l_ptr->max_pkt;
 880
 881	msg_set_prevnode(msg, tipc_own_addr);	/* If routed message */
 882
 883	/* Match msg importance against queue limits: */
 884
 885	if (unlikely(queue_size >= queue_limit)) {
 886		if (imp <= TIPC_CRITICAL_IMPORTANCE) {
 887			link_schedule_port(l_ptr, msg_origport(msg), size);
 888			buf_discard(buf);
 889			return -ELINKCONG;
 890		}
 891		buf_discard(buf);
 892		if (imp > CONN_MANAGER) {
 893			warn("Resetting link <%s>, send queue full", l_ptr->name);
 894			tipc_link_reset(l_ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895		}
 896		return dsz;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 897	}
 898
 899	/* Fragmentation needed ? */
 900
 901	if (size > max_packet)
 902		return link_send_long_buf(l_ptr, buf);
 903
 904	/* Packet can be queued or sent: */
 
 905
 906	if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
 907		   !link_congested(l_ptr))) {
 908		link_add_to_outqueue(l_ptr, buf, msg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909
 910		if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
 911			l_ptr->unacked_window = 0;
 912		} else {
 913			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 914			l_ptr->stats.bearer_congs++;
 915			l_ptr->next_out = buf;
 916		}
 917		return dsz;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 918	}
 919	/* Congestion: can message be bundled ?: */
 920
 921	if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
 922	    (msg_user(msg) != MSG_FRAGMENTER)) {
 923
 924		/* Try adding message to an existing bundle */
 925
 926		if (l_ptr->next_out &&
 927		    link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
 928			tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
 929			return dsz;
 930		}
 931
 932		/* Try creating a new bundle */
 933
 934		if (size <= max_packet * 2 / 3) {
 935			struct sk_buff *bundler = tipc_buf_acquire(max_packet);
 936			struct tipc_msg bundler_hdr;
 937
 938			if (bundler) {
 939				tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
 940					 INT_H_SIZE, l_ptr->addr);
 941				skb_copy_to_linear_data(bundler, &bundler_hdr,
 942							INT_H_SIZE);
 943				skb_trim(bundler, INT_H_SIZE);
 944				link_bundle_buf(l_ptr, bundler, buf);
 945				buf = bundler;
 946				msg = buf_msg(buf);
 947				l_ptr->stats.sent_bundles++;
 948			}
 949		}
 950	}
 951	if (!l_ptr->next_out)
 952		l_ptr->next_out = buf;
 953	link_add_to_outqueue(l_ptr, buf, msg);
 954	tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
 955	return dsz;
 956}
 957
 958/*
 959 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
 960 * not been selected yet, and the the owner node is not locked
 961 * Called by TIPC internal users, e.g. the name distributor
 962 */
 963
 964int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 965{
 966	struct link *l_ptr;
 967	struct tipc_node *n_ptr;
 968	int res = -ELINKCONG;
 969
 970	read_lock_bh(&tipc_net_lock);
 971	n_ptr = tipc_node_find(dest);
 972	if (n_ptr) {
 973		tipc_node_lock(n_ptr);
 974		l_ptr = n_ptr->active_links[selector & 1];
 975		if (l_ptr)
 976			res = tipc_link_send_buf(l_ptr, buf);
 977		else
 978			buf_discard(buf);
 979		tipc_node_unlock(n_ptr);
 980	} else {
 981		buf_discard(buf);
 982	}
 983	read_unlock_bh(&tipc_net_lock);
 984	return res;
 985}
 986
 987/*
 988 * link_send_buf_fast: Entry for data messages where the
 989 * destination link is known and the header is complete,
 990 * inclusive total message length. Very time critical.
 991 * Link is locked. Returns user data length.
 992 */
 993
 994static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
 995			      u32 *used_max_pkt)
 996{
 997	struct tipc_msg *msg = buf_msg(buf);
 998	int res = msg_data_sz(msg);
 999
1000	if (likely(!link_congested(l_ptr))) {
1001		if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1002			if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1003				link_add_to_outqueue(l_ptr, buf, msg);
1004				if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1005							    &l_ptr->media_addr))) {
1006					l_ptr->unacked_window = 0;
1007					return res;
1008				}
1009				tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1010				l_ptr->stats.bearer_congs++;
1011				l_ptr->next_out = buf;
1012				return res;
1013			}
1014		} else
1015			*used_max_pkt = l_ptr->max_pkt;
1016	}
1017	return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
1018}
1019
1020/*
1021 * tipc_send_buf_fast: Entry for data messages where the
1022 * destination node is known and the header is complete,
1023 * inclusive total message length.
1024 * Returns user data length.
1025 */
1026int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1027{
1028	struct link *l_ptr;
1029	struct tipc_node *n_ptr;
1030	int res;
1031	u32 selector = msg_origport(buf_msg(buf)) & 1;
1032	u32 dummy;
1033
1034	if (destnode == tipc_own_addr)
1035		return tipc_port_recv_msg(buf);
1036
1037	read_lock_bh(&tipc_net_lock);
1038	n_ptr = tipc_node_find(destnode);
1039	if (likely(n_ptr)) {
1040		tipc_node_lock(n_ptr);
1041		l_ptr = n_ptr->active_links[selector];
1042		if (likely(l_ptr)) {
1043			res = link_send_buf_fast(l_ptr, buf, &dummy);
1044			tipc_node_unlock(n_ptr);
1045			read_unlock_bh(&tipc_net_lock);
1046			return res;
1047		}
1048		tipc_node_unlock(n_ptr);
1049	}
1050	read_unlock_bh(&tipc_net_lock);
1051	res = msg_data_sz(buf_msg(buf));
1052	tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1053	return res;
 
 
 
 
 
 
 
 
 
 
1054}
1055
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056
1057/*
1058 * tipc_link_send_sections_fast: Entry for messages where the
1059 * destination processor is known and the header is complete,
1060 * except for total message length.
1061 * Returns user data length or errno.
1062 */
1063int tipc_link_send_sections_fast(struct tipc_port *sender,
1064				 struct iovec const *msg_sect,
1065				 const u32 num_sect,
1066				 unsigned int total_len,
1067				 u32 destaddr)
1068{
1069	struct tipc_msg *hdr = &sender->phdr;
1070	struct link *l_ptr;
1071	struct sk_buff *buf;
1072	struct tipc_node *node;
1073	int res;
1074	u32 selector = msg_origport(hdr) & 1;
1075
1076again:
1077	/*
1078	 * Try building message using port's max_pkt hint.
1079	 * (Must not hold any locks while building message.)
1080	 */
1081
1082	res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1083			     sender->max_pkt, !sender->user_port, &buf);
1084
1085	read_lock_bh(&tipc_net_lock);
1086	node = tipc_node_find(destaddr);
1087	if (likely(node)) {
1088		tipc_node_lock(node);
1089		l_ptr = node->active_links[selector];
1090		if (likely(l_ptr)) {
1091			if (likely(buf)) {
1092				res = link_send_buf_fast(l_ptr, buf,
1093							 &sender->max_pkt);
1094exit:
1095				tipc_node_unlock(node);
1096				read_unlock_bh(&tipc_net_lock);
1097				return res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098			}
1099
1100			/* Exit if build request was invalid */
1101
1102			if (unlikely(res < 0))
1103				goto exit;
1104
1105			/* Exit if link (or bearer) is congested */
1106
1107			if (link_congested(l_ptr) ||
1108			    !list_empty(&l_ptr->b_ptr->cong_links)) {
1109				res = link_schedule_port(l_ptr,
1110							 sender->ref, res);
1111				goto exit;
 
 
 
 
 
 
 
 
 
1112			}
 
 
 
 
 
 
 
 
 
 
1113
1114			/*
1115			 * Message size exceeds max_pkt hint; update hint,
1116			 * then re-try fast path or fragment the message
1117			 */
 
 
 
1118
1119			sender->max_pkt = l_ptr->max_pkt;
1120			tipc_node_unlock(node);
1121			read_unlock_bh(&tipc_net_lock);
 
 
 
 
 
 
 
 
 
 
 
 
1122
 
 
 
1123
1124			if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1125				goto again;
1126
1127			return link_send_sections_long(sender, msg_sect,
1128						       num_sect, total_len,
1129						       destaddr);
1130		}
1131		tipc_node_unlock(node);
1132	}
1133	read_unlock_bh(&tipc_net_lock);
 
 
 
 
 
1134
1135	/* Couldn't find a link to the destination node */
 
 
 
 
 
 
 
 
 
 
1136
1137	if (buf)
1138		return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1139	if (res >= 0)
1140		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1141						 total_len, TIPC_ERR_NO_NODE);
1142	return res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143}
1144
1145/*
1146 * link_send_sections_long(): Entry for long messages where the
1147 * destination node is known and the header is complete,
1148 * inclusive total message length.
1149 * Link and bearer congestion status have been checked to be ok,
1150 * and are ignored if they change.
1151 *
1152 * Note that fragments do not use the full link MTU so that they won't have
1153 * to undergo refragmentation if link changeover causes them to be sent
1154 * over another link with an additional tunnel header added as prefix.
1155 * (Refragmentation will still occur if the other link has a smaller MTU.)
1156 *
1157 * Returns user data length or errno.
 
1158 */
1159static int link_send_sections_long(struct tipc_port *sender,
1160				   struct iovec const *msg_sect,
1161				   u32 num_sect,
1162				   unsigned int total_len,
1163				   u32 destaddr)
1164{
1165	struct link *l_ptr;
1166	struct tipc_node *node;
1167	struct tipc_msg *hdr = &sender->phdr;
1168	u32 dsz = total_len;
1169	u32 max_pkt, fragm_sz, rest;
1170	struct tipc_msg fragm_hdr;
1171	struct sk_buff *buf, *buf_chain, *prev;
1172	u32 fragm_crs, fragm_rest, hsz, sect_rest;
1173	const unchar *sect_crs;
1174	int curr_sect;
1175	u32 fragm_no;
1176
1177again:
1178	fragm_no = 1;
1179	max_pkt = sender->max_pkt - INT_H_SIZE;
1180		/* leave room for tunnel header in case of link changeover */
1181	fragm_sz = max_pkt - INT_H_SIZE;
1182		/* leave room for fragmentation header in each fragment */
1183	rest = dsz;
1184	fragm_crs = 0;
1185	fragm_rest = 0;
1186	sect_rest = 0;
1187	sect_crs = NULL;
1188	curr_sect = -1;
1189
1190	/* Prepare reusable fragment header: */
1191
1192	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1193		 INT_H_SIZE, msg_destnode(hdr));
1194	msg_set_size(&fragm_hdr, max_pkt);
1195	msg_set_fragm_no(&fragm_hdr, 1);
1196
1197	/* Prepare header of first fragment: */
1198
1199	buf_chain = buf = tipc_buf_acquire(max_pkt);
1200	if (!buf)
1201		return -ENOMEM;
1202	buf->next = NULL;
1203	skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1204	hsz = msg_hdr_sz(hdr);
1205	skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1206
1207	/* Chop up message: */
1208
1209	fragm_crs = INT_H_SIZE + hsz;
1210	fragm_rest = fragm_sz - hsz;
1211
1212	do {		/* For all sections */
1213		u32 sz;
1214
1215		if (!sect_rest) {
1216			sect_rest = msg_sect[++curr_sect].iov_len;
1217			sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1218		}
1219
1220		if (sect_rest < fragm_rest)
1221			sz = sect_rest;
1222		else
1223			sz = fragm_rest;
1224
1225		if (likely(!sender->user_port)) {
1226			if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1227error:
1228				for (; buf_chain; buf_chain = buf) {
1229					buf = buf_chain->next;
1230					buf_discard(buf_chain);
1231				}
1232				return -EFAULT;
1233			}
1234		} else
1235			skb_copy_to_linear_data_offset(buf, fragm_crs,
1236						       sect_crs, sz);
1237		sect_crs += sz;
1238		sect_rest -= sz;
1239		fragm_crs += sz;
1240		fragm_rest -= sz;
1241		rest -= sz;
1242
1243		if (!fragm_rest && rest) {
1244
1245			/* Initiate new fragment: */
1246			if (rest <= fragm_sz) {
1247				fragm_sz = rest;
1248				msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1249			} else {
1250				msg_set_type(&fragm_hdr, FRAGMENT);
1251			}
1252			msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1253			msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1254			prev = buf;
1255			buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1256			if (!buf)
1257				goto error;
1258
1259			buf->next = NULL;
1260			prev->next = buf;
1261			skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1262			fragm_crs = INT_H_SIZE;
1263			fragm_rest = fragm_sz;
1264		}
1265	} while (rest > 0);
1266
1267	/*
1268	 * Now we have a buffer chain. Select a link and check
1269	 * that packet size is still OK
1270	 */
1271	node = tipc_node_find(destaddr);
1272	if (likely(node)) {
1273		tipc_node_lock(node);
1274		l_ptr = node->active_links[sender->ref & 1];
1275		if (!l_ptr) {
1276			tipc_node_unlock(node);
1277			goto reject;
1278		}
1279		if (l_ptr->max_pkt < max_pkt) {
1280			sender->max_pkt = l_ptr->max_pkt;
1281			tipc_node_unlock(node);
1282			for (; buf_chain; buf_chain = buf) {
1283				buf = buf_chain->next;
1284				buf_discard(buf_chain);
1285			}
1286			goto again;
1287		}
1288	} else {
1289reject:
1290		for (; buf_chain; buf_chain = buf) {
1291			buf = buf_chain->next;
1292			buf_discard(buf_chain);
1293		}
1294		return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1295						 total_len, TIPC_ERR_NO_NODE);
1296	}
1297
1298	/* Append chain of fragments to send queue & send them */
1299
1300	l_ptr->long_msg_seq_no++;
1301	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1302	l_ptr->stats.sent_fragments += fragm_no;
1303	l_ptr->stats.sent_fragmented++;
1304	tipc_link_push_queue(l_ptr);
1305	tipc_node_unlock(node);
1306	return dsz;
1307}
1308
1309/*
1310 * tipc_link_push_packet: Push one unsent packet to the media
 
 
1311 */
1312u32 tipc_link_push_packet(struct link *l_ptr)
 
1313{
1314	struct sk_buff *buf = l_ptr->first_out;
1315	u32 r_q_size = l_ptr->retransm_queue_size;
1316	u32 r_q_head = l_ptr->retransm_queue_head;
1317
1318	/* Step to position where retransmission failed, if any,    */
1319	/* consider that buffers may have been released in meantime */
1320
1321	if (r_q_size && buf) {
1322		u32 last = lesser(mod(r_q_head + r_q_size),
1323				  link_last_sent(l_ptr));
1324		u32 first = msg_seqno(buf_msg(buf));
1325
1326		while (buf && less(first, r_q_head)) {
1327			first = mod(first + 1);
1328			buf = buf->next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329		}
1330		l_ptr->retransm_queue_head = r_q_head = first;
1331		l_ptr->retransm_queue_size = r_q_size = mod(last - first);
 
 
 
 
1332	}
 
1333
1334	/* Continue retransmission now, if there is anything: */
1335
1336	if (r_q_size && buf) {
1337		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1338		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1339		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1340			l_ptr->retransm_queue_head = mod(++r_q_head);
1341			l_ptr->retransm_queue_size = --r_q_size;
1342			l_ptr->stats.retransmitted++;
1343			return 0;
1344		} else {
1345			l_ptr->stats.bearer_congs++;
1346			return PUSH_FAILED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1347		}
 
 
 
 
 
1348	}
1349
1350	/* Send deferred protocol message, if any: */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1351
1352	buf = l_ptr->proto_msg_queue;
1353	if (buf) {
1354		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1355		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1356		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1357			l_ptr->unacked_window = 0;
1358			buf_discard(buf);
1359			l_ptr->proto_msg_queue = NULL;
1360			return 0;
1361		} else {
1362			l_ptr->stats.bearer_congs++;
1363			return PUSH_FAILED;
1364		}
1365	}
1366
1367	/* Send one deferred data message, if send window not full: */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1368
1369	buf = l_ptr->next_out;
1370	if (buf) {
1371		struct tipc_msg *msg = buf_msg(buf);
1372		u32 next = msg_seqno(msg);
1373		u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1374
1375		if (mod(next - first) < l_ptr->queue_limit[0]) {
1376			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1377			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1378			if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1379				if (msg_user(msg) == MSG_BUNDLER)
1380					msg_set_type(msg, CLOSED_MSG);
1381				l_ptr->next_out = buf->next;
1382				return 0;
1383			} else {
1384				l_ptr->stats.bearer_congs++;
1385				return PUSH_FAILED;
1386			}
1387		}
 
1388	}
1389	return PUSH_FINISHED;
1390}
1391
1392/*
1393 * push_queue(): push out the unsent messages of a link where
1394 *               congestion has abated. Node is locked
1395 */
1396void tipc_link_push_queue(struct link *l_ptr)
1397{
1398	u32 res;
1399
1400	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1401		return;
1402
1403	do {
1404		res = tipc_link_push_packet(l_ptr);
1405	} while (!res);
 
 
 
 
 
 
 
1406
1407	if (res == PUSH_FAILED)
1408		tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 
 
 
 
 
 
1409}
1410
1411static void link_reset_all(unsigned long addr)
 
 
 
 
 
 
 
 
 
 
1412{
1413	struct tipc_node *n_ptr;
1414	char addr_string[16];
1415	u32 i;
1416
1417	read_lock_bh(&tipc_net_lock);
1418	n_ptr = tipc_node_find((u32)addr);
1419	if (!n_ptr) {
1420		read_unlock_bh(&tipc_net_lock);
1421		return;	/* node no longer exists */
1422	}
1423
1424	tipc_node_lock(n_ptr);
1425
1426	warn("Resetting all links to %s\n",
1427	     tipc_addr_string_fill(addr_string, n_ptr->addr));
1428
1429	for (i = 0; i < MAX_BEARERS; i++) {
1430		if (n_ptr->links[i]) {
1431			link_print(n_ptr->links[i], "Resetting link\n");
1432			tipc_link_reset(n_ptr->links[i]);
1433		}
1434	}
 
 
1435
1436	tipc_node_unlock(n_ptr);
1437	read_unlock_bh(&tipc_net_lock);
 
1438}
1439
1440static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
 
1441{
1442	struct tipc_msg *msg = buf_msg(buf);
1443
1444	warn("Retransmission failure on link <%s>\n", l_ptr->name);
1445
1446	if (l_ptr->addr) {
1447
1448		/* Handle failure on standard link */
1449
1450		link_print(l_ptr, "Resetting link\n");
1451		tipc_link_reset(l_ptr);
1452
1453	} else {
1454
1455		/* Handle failure on broadcast link */
1456
1457		struct tipc_node *n_ptr;
1458		char addr_string[16];
1459
1460		info("Msg seq number: %u,  ", msg_seqno(msg));
1461		info("Outstanding acks: %lu\n",
1462		     (unsigned long) TIPC_SKB_CB(buf)->handle);
1463
1464		n_ptr = tipc_bclink_retransmit_to();
1465		tipc_node_lock(n_ptr);
1466
1467		tipc_addr_string_fill(addr_string, n_ptr->addr);
1468		info("Multicast link info for %s\n", addr_string);
1469		info("Supported: %d,  ", n_ptr->bclink.supported);
1470		info("Acked: %u\n", n_ptr->bclink.acked);
1471		info("Last in: %u,  ", n_ptr->bclink.last_in);
1472		info("Gap after: %u,  ", n_ptr->bclink.gap_after);
1473		info("Gap to: %u\n", n_ptr->bclink.gap_to);
1474		info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1475
1476		tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1477
1478		tipc_node_unlock(n_ptr);
1479
1480		l_ptr->stale_count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1481	}
 
 
 
 
 
 
1482}
1483
1484void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1485			  u32 retransmits)
 
 
 
 
 
 
 
 
 
1486{
1487	struct tipc_msg *msg;
1488
1489	if (!buf)
1490		return;
1491
1492	msg = buf_msg(buf);
1493
1494	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1495		if (l_ptr->retransm_queue_size == 0) {
1496			l_ptr->retransm_queue_head = msg_seqno(msg);
1497			l_ptr->retransm_queue_size = retransmits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1498		} else {
1499			err("Unexpected retransmit on link %s (qsize=%d)\n",
1500			    l_ptr->name, l_ptr->retransm_queue_size);
1501		}
1502		return;
1503	} else {
1504		/* Detect repeated retransmit failures on uncongested bearer */
1505
1506		if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1507			if (++l_ptr->stale_count > 100) {
1508				link_retransmit_failure(l_ptr, buf);
1509				return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1510			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1511		} else {
1512			l_ptr->last_retransmitted = msg_seqno(msg);
1513			l_ptr->stale_count = 1;
 
 
 
 
 
1514		}
1515	}
1516
1517	while (retransmits && (buf != l_ptr->next_out) && buf) {
1518		msg = buf_msg(buf);
1519		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1520		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1521		if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1522			buf = buf->next;
1523			retransmits--;
1524			l_ptr->stats.retransmitted++;
 
 
 
 
 
 
 
 
 
 
 
 
 
1525		} else {
1526			tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1527			l_ptr->stats.bearer_congs++;
1528			l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1529			l_ptr->retransm_queue_size = retransmits;
1530			return;
1531		}
 
 
 
1532	}
1533
1534	l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1535}
1536
1537/**
1538 * link_insert_deferred_queue - insert deferred messages back into receive chain
 
 
1539 */
1540
1541static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1542						  struct sk_buff *buf)
1543{
1544	u32 seq_no;
 
1545
1546	if (l_ptr->oldest_deferred_in == NULL)
1547		return buf;
 
 
 
1548
1549	seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1550	if (seq_no == mod(l_ptr->next_in_no)) {
1551		l_ptr->newest_deferred_in->next = buf;
1552		buf = l_ptr->oldest_deferred_in;
1553		l_ptr->oldest_deferred_in = NULL;
1554		l_ptr->deferred_inqueue_sz = 0;
1555	}
1556	return buf;
 
1557}
1558
1559/**
1560 * link_recv_buf_validate - validate basic format of received message
1561 *
1562 * This routine ensures a TIPC message has an acceptable header, and at least
1563 * as much data as the header indicates it should.  The routine also ensures
1564 * that the entire message header is stored in the main fragment of the message
1565 * buffer, to simplify future access to message header fields.
1566 *
1567 * Note: Having extra info present in the message header or data areas is OK.
1568 * TIPC will ignore the excess, under the assumption that it is optional info
1569 * introduced by a later release of the protocol.
1570 */
1571
1572static int link_recv_buf_validate(struct sk_buff *buf)
1573{
1574	static u32 min_data_hdr_size[8] = {
1575		SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1576		MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1577		};
1578
1579	struct tipc_msg *msg;
1580	u32 tipc_hdr[2];
1581	u32 size;
1582	u32 hdr_size;
1583	u32 min_hdr_size;
1584
1585	if (unlikely(buf->len < MIN_H_SIZE))
1586		return 0;
1587
1588	msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1589	if (msg == NULL)
1590		return 0;
1591
1592	if (unlikely(msg_version(msg) != TIPC_VERSION))
1593		return 0;
1594
1595	size = msg_size(msg);
1596	hdr_size = msg_hdr_sz(msg);
1597	min_hdr_size = msg_isdata(msg) ?
1598		min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1599
1600	if (unlikely((hdr_size < min_hdr_size) ||
1601		     (size < hdr_size) ||
1602		     (buf->len < size) ||
1603		     (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1604		return 0;
1605
1606	return pskb_may_pull(buf, hdr_size);
 
 
 
1607}
1608
1609/**
1610 * tipc_recv_msg - process TIPC messages arriving from off-node
1611 * @head: pointer to message buffer chain
1612 * @tb_ptr: pointer to bearer message arrived on
1613 *
1614 * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1615 * structure (i.e. cannot be NULL), but bearer can be inactive.
1616 */
1617
1618void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1619{
1620	read_lock_bh(&tipc_net_lock);
1621	while (head) {
1622		struct tipc_node *n_ptr;
1623		struct link *l_ptr;
1624		struct sk_buff *crs;
1625		struct sk_buff *buf = head;
1626		struct tipc_msg *msg;
1627		u32 seq_no;
1628		u32 ackd;
1629		u32 released = 0;
1630		int type;
1631
1632		head = head->next;
 
 
 
 
 
 
1633
1634		/* Ensure bearer is still enabled */
 
1635
1636		if (unlikely(!b_ptr->active))
1637			goto cont;
 
 
 
1638
1639		/* Ensure message is well-formed */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1640
1641		if (unlikely(!link_recv_buf_validate(buf)))
1642			goto cont;
1643
1644		/* Ensure message data is a single contiguous unit */
 
 
 
 
 
 
 
 
 
 
 
1645
1646		if (unlikely(buf_linearize(buf)))
1647			goto cont;
 
 
 
 
 
 
1648
1649		/* Handle arrival of a non-unicast link message */
 
 
 
 
 
 
1650
1651		msg = buf_msg(buf);
 
 
 
 
 
 
 
 
 
 
 
 
1652
1653		if (unlikely(msg_non_seq(msg))) {
1654			if (msg_user(msg) ==  LINK_CONFIG)
1655				tipc_disc_recv_msg(buf, b_ptr);
1656			else
1657				tipc_bclink_recv_pkt(buf);
1658			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659		}
1660
1661		if (unlikely(!msg_short(msg) &&
1662			     (msg_destnode(msg) != tipc_own_addr)))
1663			goto cont;
1664
1665		/* Discard non-routeable messages destined for another node */
1666
1667		if (unlikely(!msg_isdata(msg) &&
1668			     (msg_destnode(msg) != tipc_own_addr))) {
1669			if ((msg_user(msg) != CONN_MANAGER) &&
1670			    (msg_user(msg) != MSG_FRAGMENTER))
1671				goto cont;
1672		}
1673
1674		/* Locate neighboring node that sent message */
1675
1676		n_ptr = tipc_node_find(msg_prevnode(msg));
1677		if (unlikely(!n_ptr))
1678			goto cont;
1679		tipc_node_lock(n_ptr);
1680
1681		/* Don't talk to neighbor during cleanup after last session */
1682
1683		if (n_ptr->cleanup_required) {
1684			tipc_node_unlock(n_ptr);
1685			goto cont;
1686		}
1687
1688		/* Locate unicast link endpoint that should handle message */
1689
1690		l_ptr = n_ptr->links[b_ptr->identity];
1691		if (unlikely(!l_ptr)) {
1692			tipc_node_unlock(n_ptr);
1693			goto cont;
1694		}
1695
1696		/* Validate message sequence number info */
1697
1698		seq_no = msg_seqno(msg);
1699		ackd = msg_ack(msg);
1700
1701		/* Release acked messages */
1702
1703		if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1704			if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1705				tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1706		}
1707
1708		crs = l_ptr->first_out;
1709		while ((crs != l_ptr->next_out) &&
1710		       less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1711			struct sk_buff *next = crs->next;
1712
1713			buf_discard(crs);
1714			crs = next;
1715			released++;
1716		}
1717		if (released) {
1718			l_ptr->first_out = crs;
1719			l_ptr->out_queue_size -= released;
1720		}
1721
1722		/* Try sending any messages link endpoint has pending */
1723
1724		if (unlikely(l_ptr->next_out))
1725			tipc_link_push_queue(l_ptr);
1726		if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1727			tipc_link_wakeup_ports(l_ptr, 0);
1728		if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1729			l_ptr->stats.sent_acks++;
1730			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1731		}
1732
1733		/* Now (finally!) process the incoming message */
1734
1735protocol_check:
1736		if (likely(link_working_working(l_ptr))) {
1737			if (likely(seq_no == mod(l_ptr->next_in_no))) {
1738				l_ptr->next_in_no++;
1739				if (unlikely(l_ptr->oldest_deferred_in))
1740					head = link_insert_deferred_queue(l_ptr,
1741									  head);
1742				if (likely(msg_is_dest(msg, tipc_own_addr))) {
1743deliver:
1744					if (likely(msg_isdata(msg))) {
1745						tipc_node_unlock(n_ptr);
1746						tipc_port_recv_msg(buf);
1747						continue;
1748					}
1749					switch (msg_user(msg)) {
1750					case MSG_BUNDLER:
1751						l_ptr->stats.recv_bundles++;
1752						l_ptr->stats.recv_bundled +=
1753							msg_msgcnt(msg);
1754						tipc_node_unlock(n_ptr);
1755						tipc_link_recv_bundle(buf);
1756						continue;
1757					case NAME_DISTRIBUTOR:
1758						tipc_node_unlock(n_ptr);
1759						tipc_named_recv(buf);
1760						continue;
1761					case CONN_MANAGER:
1762						tipc_node_unlock(n_ptr);
1763						tipc_port_recv_proto_msg(buf);
1764						continue;
1765					case MSG_FRAGMENTER:
1766						l_ptr->stats.recv_fragments++;
1767						if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1768									    &buf, &msg)) {
1769							l_ptr->stats.recv_fragmented++;
1770							goto deliver;
1771						}
1772						break;
1773					case CHANGEOVER_PROTOCOL:
1774						type = msg_type(msg);
1775						if (link_recv_changeover_msg(&l_ptr, &buf)) {
1776							msg = buf_msg(buf);
1777							seq_no = msg_seqno(msg);
1778							if (type == ORIGINAL_MSG)
1779								goto deliver;
1780							goto protocol_check;
1781						}
1782						break;
1783					default:
1784						buf_discard(buf);
1785						buf = NULL;
1786						break;
1787					}
1788				}
1789				tipc_node_unlock(n_ptr);
1790				tipc_net_route_msg(buf);
 
1791				continue;
1792			}
1793			link_handle_out_of_seq_msg(l_ptr, buf);
1794			head = link_insert_deferred_queue(l_ptr, head);
1795			tipc_node_unlock(n_ptr);
1796			continue;
1797		}
1798
1799		if (msg_user(msg) == LINK_PROTOCOL) {
1800			link_recv_proto_msg(l_ptr, buf);
1801			head = link_insert_deferred_queue(l_ptr, head);
1802			tipc_node_unlock(n_ptr);
1803			continue;
1804		}
1805		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1806
1807		if (link_working_working(l_ptr)) {
1808			/* Re-insert in front of queue */
1809			buf->next = head;
1810			head = buf;
1811			tipc_node_unlock(n_ptr);
1812			continue;
1813		}
1814		tipc_node_unlock(n_ptr);
1815cont:
1816		buf_discard(buf);
1817	}
1818	read_unlock_bh(&tipc_net_lock);
1819}
1820
1821/*
1822 * link_defer_buf(): Sort a received out-of-sequence packet
1823 *                   into the deferred reception queue.
1824 * Returns the increase of the queue length,i.e. 0 or 1
1825 */
1826
1827u32 tipc_link_defer_pkt(struct sk_buff **head,
1828			struct sk_buff **tail,
1829			struct sk_buff *buf)
1830{
1831	struct sk_buff *prev = NULL;
1832	struct sk_buff *crs = *head;
1833	u32 seq_no = msg_seqno(buf_msg(buf));
1834
1835	buf->next = NULL;
1836
1837	/* Empty queue ? */
1838	if (*head == NULL) {
1839		*head = *tail = buf;
1840		return 1;
1841	}
1842
1843	/* Last ? */
1844	if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1845		(*tail)->next = buf;
1846		*tail = buf;
1847		return 1;
1848	}
1849
1850	/* Scan through queue and sort it in */
1851	do {
1852		struct tipc_msg *msg = buf_msg(crs);
1853
1854		if (less(seq_no, msg_seqno(msg))) {
1855			buf->next = crs;
1856			if (prev)
1857				prev->next = buf;
1858			else
1859				*head = buf;
1860			return 1;
1861		}
1862		if (seq_no == msg_seqno(msg))
1863			break;
1864		prev = crs;
1865		crs = crs->next;
1866	} while (crs);
1867
1868	/* Message is a duplicate of an existing message */
1869
1870	buf_discard(buf);
1871	return 0;
1872}
1873
1874/**
1875 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1876 */
1877
1878static void link_handle_out_of_seq_msg(struct link *l_ptr,
1879				       struct sk_buff *buf)
1880{
1881	u32 seq_no = msg_seqno(buf_msg(buf));
1882
1883	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1884		link_recv_proto_msg(l_ptr, buf);
1885		return;
 
 
 
 
1886	}
 
1887
1888	/* Record OOS packet arrival (force mismatch on next timeout) */
1889
1890	l_ptr->checkpoint--;
1891
1892	/*
1893	 * Discard packet if a duplicate; otherwise add it to deferred queue
1894	 * and notify peer of gap as per protocol specification
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895	 */
 
 
1896
1897	if (less(seq_no, mod(l_ptr->next_in_no))) {
1898		l_ptr->stats.duplicates++;
1899		buf_discard(buf);
1900		return;
 
1901	}
1902
1903	if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1904				&l_ptr->newest_deferred_in, buf)) {
1905		l_ptr->deferred_inqueue_sz++;
1906		l_ptr->stats.deferred_recv++;
1907		if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1908			tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1909	} else
1910		l_ptr->stats.duplicates++;
1911}
1912
1913/*
1914 * Send protocol message to the other endpoint.
1915 */
1916void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1917			      u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1918{
1919	struct sk_buff *buf = NULL;
1920	struct tipc_msg *msg = l_ptr->pmsg;
1921	u32 msg_size = sizeof(l_ptr->proto_msg);
1922	int r_flag;
1923
1924	if (link_blocked(l_ptr))
1925		return;
1926	msg_set_type(msg, msg_typ);
1927	msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1928	msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1929	msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1930
1931	if (msg_typ == STATE_MSG) {
1932		u32 next_sent = mod(l_ptr->next_out_no);
1933
1934		if (!tipc_link_is_up(l_ptr))
1935			return;
1936		if (l_ptr->next_out)
1937			next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1938		msg_set_next_sent(msg, next_sent);
1939		if (l_ptr->oldest_deferred_in) {
1940			u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1941			gap = mod(rec - mod(l_ptr->next_in_no));
1942		}
1943		msg_set_seq_gap(msg, gap);
1944		if (gap)
1945			l_ptr->stats.sent_nacks++;
1946		msg_set_link_tolerance(msg, tolerance);
1947		msg_set_linkprio(msg, priority);
1948		msg_set_max_pkt(msg, ack_mtu);
1949		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1950		msg_set_probe(msg, probe_msg != 0);
1951		if (probe_msg) {
1952			u32 mtu = l_ptr->max_pkt;
1953
1954			if ((mtu < l_ptr->max_pkt_target) &&
1955			    link_working_working(l_ptr) &&
1956			    l_ptr->fsm_msg_cnt) {
1957				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1958				if (l_ptr->max_pkt_probes == 10) {
1959					l_ptr->max_pkt_target = (msg_size - 4);
1960					l_ptr->max_pkt_probes = 0;
1961					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1962				}
1963				l_ptr->max_pkt_probes++;
1964			}
1965
1966			l_ptr->stats.sent_probes++;
1967		}
1968		l_ptr->stats.sent_states++;
1969	} else {		/* RESET_MSG or ACTIVATE_MSG */
1970		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1971		msg_set_seq_gap(msg, 0);
1972		msg_set_next_sent(msg, 1);
1973		msg_set_probe(msg, 0);
1974		msg_set_link_tolerance(msg, l_ptr->tolerance);
1975		msg_set_linkprio(msg, l_ptr->priority);
1976		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1977	}
1978
1979	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1980	msg_set_redundant_link(msg, r_flag);
1981	msg_set_linkprio(msg, l_ptr->priority);
1982
1983	/* Ensure sequence number will not fit : */
1984
1985	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1986
1987	/* Congestion? */
1988
1989	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1990		if (!l_ptr->proto_msg_queue) {
1991			l_ptr->proto_msg_queue =
1992				tipc_buf_acquire(sizeof(l_ptr->proto_msg));
1993		}
1994		buf = l_ptr->proto_msg_queue;
1995		if (!buf)
1996			return;
1997		skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1998		return;
1999	}
2000
2001	/* Message can be sent */
2002
2003	buf = tipc_buf_acquire(msg_size);
2004	if (!buf)
2005		return;
2006
2007	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2008	msg_set_size(buf_msg(buf), msg_size);
2009
2010	if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2011		l_ptr->unacked_window = 0;
2012		buf_discard(buf);
2013		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2014	}
2015
2016	/* New congestion */
2017	tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2018	l_ptr->proto_msg_queue = buf;
2019	l_ptr->stats.bearer_congs++;
2020}
2021
2022/*
2023 * Receive protocol message :
2024 * Note that network plane id propagates through the network, and may
2025 * change at any time. The node with lowest address rules
 
2026 */
2027
2028static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2029{
2030	u32 rec_gap = 0;
2031	u32 max_pkt_info;
2032	u32 max_pkt_ack;
2033	u32 msg_tol;
2034	struct tipc_msg *msg = buf_msg(buf);
 
 
 
 
 
 
 
 
 
 
2035
2036	if (link_blocked(l_ptr))
 
 
2037		goto exit;
2038
2039	/* record unnumbered packet arrival (force mismatch on next timeout) */
 
2040
2041	l_ptr->checkpoint--;
 
2042
2043	if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2044		if (tipc_own_addr > msg_prevnode(msg))
2045			l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2046
2047	l_ptr->owner->permit_changeover = msg_redundant_link(msg);
 
2048
2049	switch (msg_type(msg)) {
 
 
 
 
2050
 
2051	case RESET_MSG:
2052		if (!link_working_unknown(l_ptr) &&
2053		    (l_ptr->peer_session != INVALID_SESSION)) {
2054			if (msg_session(msg) == l_ptr->peer_session)
2055				break; /* duplicate: ignore */
2056		}
2057		/* fall thru' */
2058	case ACTIVATE_MSG:
2059		/* Update link settings according other endpoint's values */
2060
2061		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
 
 
 
 
 
 
 
2062
2063		msg_tol = msg_link_tolerance(msg);
2064		if (msg_tol > l_ptr->tolerance)
2065			link_set_supervision_props(l_ptr, msg_tol);
2066
2067		if (msg_linkprio(msg) > l_ptr->priority)
2068			l_ptr->priority = msg_linkprio(msg);
2069
2070		max_pkt_info = msg_max_pkt(msg);
2071		if (max_pkt_info) {
2072			if (max_pkt_info < l_ptr->max_pkt_target)
2073				l_ptr->max_pkt_target = max_pkt_info;
2074			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2075				l_ptr->max_pkt = l_ptr->max_pkt_target;
2076		} else {
2077			l_ptr->max_pkt = l_ptr->max_pkt_target;
2078		}
2079		l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2080
2081		link_state_event(l_ptr, msg_type(msg));
2082
2083		l_ptr->peer_session = msg_session(msg);
2084		l_ptr->peer_bearer_id = msg_bearer_id(msg);
 
 
 
 
 
 
 
2085
2086		/* Synchronize broadcast sequence numbers */
2087		if (!tipc_node_redundant_links(l_ptr->owner))
2088			l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
 
 
 
 
 
 
 
 
 
 
2089		break;
2090	case STATE_MSG:
2091
2092		msg_tol = msg_link_tolerance(msg);
2093		if (msg_tol)
2094			link_set_supervision_props(l_ptr, msg_tol);
2095
2096		if (msg_linkprio(msg) &&
2097		    (msg_linkprio(msg) != l_ptr->priority)) {
2098			warn("Resetting link <%s>, priority change %u->%u\n",
2099			     l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2100			l_ptr->priority = msg_linkprio(msg);
2101			tipc_link_reset(l_ptr); /* Enforce change to take effect */
2102			break;
2103		}
2104		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2105		l_ptr->stats.recv_states++;
2106		if (link_reset_unknown(l_ptr))
2107			break;
2108
2109		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2110			rec_gap = mod(msg_next_sent(msg) -
2111				      mod(l_ptr->next_in_no));
2112		}
2113
2114		max_pkt_ack = msg_max_pkt(msg);
2115		if (max_pkt_ack > l_ptr->max_pkt) {
2116			l_ptr->max_pkt = max_pkt_ack;
2117			l_ptr->max_pkt_probes = 0;
2118		}
2119
2120		max_pkt_ack = 0;
2121		if (msg_probe(msg)) {
2122			l_ptr->stats.recv_probes++;
2123			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
2124				max_pkt_ack = msg_size(msg);
 
 
 
 
 
 
 
 
 
 
2125		}
2126
2127		/* Protocol message before retransmits, reduce loss risk */
 
2128
2129		tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
 
 
 
 
 
 
 
 
2130
2131		if (rec_gap || (msg_probe(msg))) {
2132			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2133						 0, rec_gap, 0, 0, max_pkt_ack);
2134		}
2135		if (msg_seq_gap(msg)) {
2136			l_ptr->stats.recv_nacks++;
2137			tipc_link_retransmit(l_ptr, l_ptr->first_out,
2138					     msg_seq_gap(msg));
2139		}
2140		break;
2141	}
2142exit:
2143	buf_discard(buf);
 
2144}
2145
2146
2147/*
2148 * tipc_link_tunnel(): Send one message via a link belonging to
2149 * another bearer. Owner node is locked.
2150 */
2151static void tipc_link_tunnel(struct link *l_ptr,
2152			     struct tipc_msg *tunnel_hdr,
2153			     struct tipc_msg  *msg,
2154			     u32 selector)
2155{
2156	struct link *tunnel;
2157	struct sk_buff *buf;
2158	u32 length = msg_size(msg);
2159
2160	tunnel = l_ptr->owner->active_links[selector & 1];
2161	if (!tipc_link_is_up(tunnel)) {
2162		warn("Link changeover error, "
2163		     "tunnel link no longer available\n");
2164		return;
2165	}
2166	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2167	buf = tipc_buf_acquire(length + INT_H_SIZE);
2168	if (!buf) {
2169		warn("Link changeover error, "
2170		     "unable to send tunnel msg\n");
2171		return;
2172	}
2173	skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2174	skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2175	tipc_link_send_buf(tunnel, buf);
2176}
2177
 
 
 
 
 
 
 
 
 
2178
 
 
 
 
 
 
2179
2180/*
2181 * changeover(): Send whole message queue via the remaining link
2182 *               Owner node is locked.
2183 */
2184
2185void tipc_link_changeover(struct link *l_ptr)
2186{
2187	u32 msgcount = l_ptr->out_queue_size;
2188	struct sk_buff *crs = l_ptr->first_out;
2189	struct link *tunnel = l_ptr->owner->active_links[0];
2190	struct tipc_msg tunnel_hdr;
2191	int split_bundles;
2192
2193	if (!tunnel)
2194		return;
2195
2196	if (!l_ptr->owner->permit_changeover) {
2197		warn("Link changeover error, "
2198		     "peer did not permit changeover\n");
2199		return;
2200	}
2201
2202	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2203		 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2204	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2205	msg_set_msgcnt(&tunnel_hdr, msgcount);
2206
2207	if (!l_ptr->first_out) {
2208		struct sk_buff *buf;
2209
2210		buf = tipc_buf_acquire(INT_H_SIZE);
2211		if (buf) {
2212			skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2213			msg_set_size(&tunnel_hdr, INT_H_SIZE);
2214			tipc_link_send_buf(tunnel, buf);
2215		} else {
2216			warn("Link changeover error, "
2217			     "unable to send changeover msg\n");
2218		}
2219		return;
2220	}
2221
2222	split_bundles = (l_ptr->owner->active_links[0] !=
2223			 l_ptr->owner->active_links[1]);
2224
2225	while (crs) {
2226		struct tipc_msg *msg = buf_msg(crs);
2227
2228		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2229			struct tipc_msg *m = msg_get_wrapped(msg);
2230			unchar *pos = (unchar *)m;
2231
2232			msgcount = msg_msgcnt(msg);
2233			while (msgcount--) {
2234				msg_set_seqno(m, msg_seqno(msg));
2235				tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2236						 msg_link_selector(m));
2237				pos += align(msg_size(m));
2238				m = (struct tipc_msg *)pos;
2239			}
2240		} else {
2241			tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2242					 msg_link_selector(msg));
2243		}
2244		crs = crs->next;
2245	}
2246}
2247
2248void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
 
 
 
2249{
2250	struct sk_buff *iter;
2251	struct tipc_msg tunnel_hdr;
2252
2253	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2254		 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2255	msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2256	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2257	iter = l_ptr->first_out;
2258	while (iter) {
2259		struct sk_buff *outbuf;
2260		struct tipc_msg *msg = buf_msg(iter);
2261		u32 length = msg_size(msg);
2262
2263		if (msg_user(msg) == MSG_BUNDLER)
2264			msg_set_type(msg, CLOSED_MSG);
2265		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));	/* Update */
2266		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2267		msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2268		outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2269		if (outbuf == NULL) {
2270			warn("Link changeover error, "
2271			     "unable to send duplicate msg\n");
2272			return;
2273		}
2274		skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2275		skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2276					       length);
2277		tipc_link_send_buf(tunnel, outbuf);
2278		if (!tipc_link_is_up(l_ptr))
2279			return;
2280		iter = iter->next;
2281	}
2282}
2283
 
 
2284
 
 
2285
2286/**
2287 * buf_extract - extracts embedded TIPC message from another message
2288 * @skb: encapsulating message buffer
2289 * @from_pos: offset to extract from
2290 *
2291 * Returns a new message buffer containing an embedded message.  The
2292 * encapsulating message itself is left unchanged.
2293 */
2294
2295static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2296{
2297	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2298	u32 size = msg_size(msg);
2299	struct sk_buff *eb;
2300
2301	eb = tipc_buf_acquire(size);
2302	if (eb)
2303		skb_copy_to_linear_data(eb, msg, size);
2304	return eb;
2305}
2306
2307/*
2308 *  link_recv_changeover_msg(): Receive tunneled packet sent
2309 *  via other link. Node is locked. Return extracted buffer.
2310 */
2311
2312static int link_recv_changeover_msg(struct link **l_ptr,
2313				    struct sk_buff **buf)
2314{
2315	struct sk_buff *tunnel_buf = *buf;
2316	struct link *dest_link;
2317	struct tipc_msg *msg;
2318	struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2319	u32 msg_typ = msg_type(tunnel_msg);
2320	u32 msg_count = msg_msgcnt(tunnel_msg);
2321
2322	dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2323	if (!dest_link)
2324		goto exit;
2325	if (dest_link == *l_ptr) {
2326		err("Unexpected changeover message on link <%s>\n",
2327		    (*l_ptr)->name);
2328		goto exit;
2329	}
2330	*l_ptr = dest_link;
2331	msg = msg_get_wrapped(tunnel_msg);
2332
2333	if (msg_typ == DUPLICATE_MSG) {
2334		if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2335			goto exit;
2336		*buf = buf_extract(tunnel_buf, INT_H_SIZE);
2337		if (*buf == NULL) {
2338			warn("Link changeover error, duplicate msg dropped\n");
2339			goto exit;
2340		}
2341		buf_discard(tunnel_buf);
2342		return 1;
2343	}
2344
2345	/* First original message ?: */
2346
2347	if (tipc_link_is_up(dest_link)) {
2348		info("Resetting link <%s>, changeover initiated by peer\n",
2349		     dest_link->name);
2350		tipc_link_reset(dest_link);
2351		dest_link->exp_msg_count = msg_count;
2352		if (!msg_count)
2353			goto exit;
2354	} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2355		dest_link->exp_msg_count = msg_count;
2356		if (!msg_count)
2357			goto exit;
2358	}
2359
2360	/* Receive original message */
2361
2362	if (dest_link->exp_msg_count == 0) {
2363		warn("Link switchover error, "
2364		     "got too many tunnelled messages\n");
2365		goto exit;
2366	}
2367	dest_link->exp_msg_count--;
2368	if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2369		goto exit;
2370	} else {
2371		*buf = buf_extract(tunnel_buf, INT_H_SIZE);
2372		if (*buf != NULL) {
2373			buf_discard(tunnel_buf);
2374			return 1;
2375		} else {
2376			warn("Link changeover error, original msg dropped\n");
2377		}
2378	}
2379exit:
2380	*buf = NULL;
2381	buf_discard(tunnel_buf);
2382	return 0;
2383}
2384
2385/*
2386 *  Bundler functionality:
2387 */
2388void tipc_link_recv_bundle(struct sk_buff *buf)
2389{
2390	u32 msgcount = msg_msgcnt(buf_msg(buf));
2391	u32 pos = INT_H_SIZE;
2392	struct sk_buff *obuf;
2393
2394	while (msgcount--) {
2395		obuf = buf_extract(buf, pos);
2396		if (obuf == NULL) {
2397			warn("Link unable to unbundle message(s)\n");
2398			break;
2399		}
2400		pos += align(msg_size(buf_msg(obuf)));
2401		tipc_net_route_msg(obuf);
2402	}
2403	buf_discard(buf);
2404}
2405
2406/*
2407 *  Fragmentation/defragmentation:
2408 */
 
 
 
2409
 
 
 
 
 
2410
2411/*
2412 * link_send_long_buf: Entry for buffers needing fragmentation.
2413 * The buffer is complete, inclusive total message length.
2414 * Returns user data length.
2415 */
2416static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2417{
2418	struct sk_buff *buf_chain = NULL;
2419	struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2420	struct tipc_msg *inmsg = buf_msg(buf);
2421	struct tipc_msg fragm_hdr;
2422	u32 insize = msg_size(inmsg);
2423	u32 dsz = msg_data_sz(inmsg);
2424	unchar *crs = buf->data;
2425	u32 rest = insize;
2426	u32 pack_sz = l_ptr->max_pkt;
2427	u32 fragm_sz = pack_sz - INT_H_SIZE;
2428	u32 fragm_no = 0;
2429	u32 destaddr;
2430
2431	if (msg_short(inmsg))
2432		destaddr = l_ptr->addr;
2433	else
2434		destaddr = msg_destnode(inmsg);
2435
2436	/* Prepare reusable fragment header: */
 
 
 
2437
2438	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2439		 INT_H_SIZE, destaddr);
2440
2441	/* Chop up message: */
 
2442
2443	while (rest > 0) {
2444		struct sk_buff *fragm;
 
2445
2446		if (rest <= fragm_sz) {
2447			fragm_sz = rest;
2448			msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2449		}
2450		fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2451		if (fragm == NULL) {
2452			buf_discard(buf);
2453			while (buf_chain) {
2454				buf = buf_chain;
2455				buf_chain = buf_chain->next;
2456				buf_discard(buf);
2457			}
2458			return -ENOMEM;
2459		}
2460		msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2461		fragm_no++;
2462		msg_set_fragm_no(&fragm_hdr, fragm_no);
2463		skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2464		skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2465					       fragm_sz);
2466		buf_chain_tail->next = fragm;
2467		buf_chain_tail = fragm;
2468
2469		rest -= fragm_sz;
2470		crs += fragm_sz;
2471		msg_set_type(&fragm_hdr, FRAGMENT);
2472	}
2473	buf_discard(buf);
2474
2475	/* Append chain of fragments to send queue & send them */
2476
2477	l_ptr->long_msg_seq_no++;
2478	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2479	l_ptr->stats.sent_fragments += fragm_no;
2480	l_ptr->stats.sent_fragmented++;
2481	tipc_link_push_queue(l_ptr);
2482
2483	return dsz;
2484}
2485
2486/*
2487 * A pending message being re-assembled must store certain values
2488 * to handle subsequent fragments correctly. The following functions
2489 * help storing these values in unused, available fields in the
2490 * pending message. This makes dynamic memory allocation unnecessary.
2491 */
2492
2493static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2494{
2495	msg_set_seqno(buf_msg(buf), seqno);
2496}
 
 
 
 
 
 
2497
2498static u32 get_fragm_size(struct sk_buff *buf)
2499{
2500	return msg_ack(buf_msg(buf));
2501}
2502
2503static void set_fragm_size(struct sk_buff *buf, u32 sz)
2504{
2505	msg_set_ack(buf_msg(buf), sz);
2506}
2507
2508static u32 get_expected_frags(struct sk_buff *buf)
2509{
2510	return msg_bcast_ack(buf_msg(buf));
2511}
2512
2513static void set_expected_frags(struct sk_buff *buf, u32 exp)
2514{
2515	msg_set_bcast_ack(buf_msg(buf), exp);
2516}
 
 
2517
2518static u32 get_timer_cnt(struct sk_buff *buf)
2519{
2520	return msg_reroute_cnt(buf_msg(buf));
2521}
2522
2523static void incr_timer_cnt(struct sk_buff *buf)
2524{
2525	msg_incr_reroute_cnt(buf_msg(buf));
2526}
2527
2528/*
2529 * tipc_link_recv_fragment(): Called with node lock on. Returns
2530 * the reassembled buffer if message is complete.
2531 */
2532int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2533			    struct tipc_msg **m)
2534{
2535	struct sk_buff *prev = NULL;
2536	struct sk_buff *fbuf = *fb;
2537	struct tipc_msg *fragm = buf_msg(fbuf);
2538	struct sk_buff *pbuf = *pending;
2539	u32 long_msg_seq_no = msg_long_msgno(fragm);
2540
2541	*fb = NULL;
2542
2543	/* Is there an incomplete message waiting for this fragment? */
2544
2545	while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
2546			(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2547		prev = pbuf;
2548		pbuf = pbuf->next;
2549	}
2550
2551	if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2552		struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2553		u32 msg_sz = msg_size(imsg);
2554		u32 fragm_sz = msg_data_sz(fragm);
2555		u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2556		u32 max =  TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2557		if (msg_type(imsg) == TIPC_MCAST_MSG)
2558			max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2559		if (msg_size(imsg) > max) {
2560			buf_discard(fbuf);
2561			return 0;
2562		}
2563		pbuf = tipc_buf_acquire(msg_size(imsg));
2564		if (pbuf != NULL) {
2565			pbuf->next = *pending;
2566			*pending = pbuf;
2567			skb_copy_to_linear_data(pbuf, imsg,
2568						msg_data_sz(fragm));
2569			/*  Prepare buffer for subsequent fragments. */
2570
2571			set_long_msg_seqno(pbuf, long_msg_seq_no);
2572			set_fragm_size(pbuf, fragm_sz);
2573			set_expected_frags(pbuf, exp_fragm_cnt - 1);
2574		} else {
2575			warn("Link unable to reassemble fragmented message\n");
2576		}
2577		buf_discard(fbuf);
2578		return 0;
2579	} else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2580		u32 dsz = msg_data_sz(fragm);
2581		u32 fsz = get_fragm_size(pbuf);
2582		u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2583		u32 exp_frags = get_expected_frags(pbuf) - 1;
2584		skb_copy_to_linear_data_offset(pbuf, crs,
2585					       msg_data(fragm), dsz);
2586		buf_discard(fbuf);
2587
2588		/* Is message complete? */
2589
2590		if (exp_frags == 0) {
2591			if (prev)
2592				prev->next = pbuf->next;
2593			else
2594				*pending = pbuf->next;
2595			msg_reset_reroute_cnt(buf_msg(pbuf));
2596			*fb = pbuf;
2597			*m = buf_msg(pbuf);
2598			return 1;
2599		}
2600		set_expected_frags(pbuf, exp_frags);
2601		return 0;
2602	}
2603	buf_discard(fbuf);
2604	return 0;
2605}
2606
2607/**
2608 * link_check_defragm_bufs - flush stale incoming message fragments
2609 * @l_ptr: pointer to link
2610 */
2611
2612static void link_check_defragm_bufs(struct link *l_ptr)
2613{
2614	struct sk_buff *prev = NULL;
2615	struct sk_buff *next = NULL;
2616	struct sk_buff *buf = l_ptr->defragm_buf;
2617
2618	if (!buf)
2619		return;
2620	if (!link_working_working(l_ptr))
2621		return;
2622	while (buf) {
2623		u32 cnt = get_timer_cnt(buf);
2624
2625		next = buf->next;
2626		if (cnt < 4) {
2627			incr_timer_cnt(buf);
2628			prev = buf;
2629		} else {
2630			if (prev)
2631				prev->next = buf->next;
2632			else
2633				l_ptr->defragm_buf = buf->next;
2634			buf_discard(buf);
2635		}
2636		buf = next;
2637	}
2638}
2639
2640
2641
2642static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2643{
2644	if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2645		return;
 
2646
2647	l_ptr->tolerance = tolerance;
2648	l_ptr->continuity_interval =
2649		((tolerance / 4) > 500) ? 500 : tolerance / 4;
2650	l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2651}
2652
2653
2654void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2655{
2656	/* Data messages from this node, inclusive FIRST_FRAGM */
2657	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2658	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2659	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2660	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2661	/* Transiting data messages,inclusive FIRST_FRAGM */
2662	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2663	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2664	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2665	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2666	l_ptr->queue_limit[CONN_MANAGER] = 1200;
2667	l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2668	l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2669	/* FRAGMENT and LAST_FRAGMENT packets */
2670	l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2671}
2672
2673/**
2674 * link_find_link - locate link by name
2675 * @name - ptr to link name string
2676 * @node - ptr to area to be filled with ptr to associated node
2677 *
2678 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2679 * this also prevents link deletion.
2680 *
2681 * Returns pointer to link (or 0 if invalid link name).
2682 */
2683
2684static struct link *link_find_link(const char *name, struct tipc_node **node)
2685{
2686	struct link_name link_name_parts;
2687	struct tipc_bearer *b_ptr;
2688	struct link *l_ptr;
2689
2690	if (!link_name_validate(name, &link_name_parts))
2691		return NULL;
2692
2693	b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2694	if (!b_ptr)
2695		return NULL;
2696
2697	*node = tipc_node_find(link_name_parts.addr_peer);
2698	if (!*node)
2699		return NULL;
 
2700
2701	l_ptr = (*node)->links[b_ptr->identity];
2702	if (!l_ptr || strcmp(l_ptr->name, name))
2703		return NULL;
2704
2705	return l_ptr;
2706}
 
 
2707
2708struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2709				     u16 cmd)
2710{
2711	struct tipc_link_config *args;
2712	u32 new_value;
2713	struct link *l_ptr;
2714	struct tipc_node *node;
2715	int res;
2716
2717	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2718		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
 
2719
2720	args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2721	new_value = ntohl(args->value);
2722
2723	if (!strcmp(args->name, tipc_bclink_name)) {
2724		if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2725		    (tipc_bclink_set_queue_limits(new_value) == 0))
2726			return tipc_cfg_reply_none();
2727		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2728						   " (cannot change setting on broadcast link)");
2729	}
2730
2731	read_lock_bh(&tipc_net_lock);
2732	l_ptr = link_find_link(args->name, &node);
2733	if (!l_ptr) {
2734		read_unlock_bh(&tipc_net_lock);
2735		return tipc_cfg_reply_error_string("link not found");
2736	}
2737
2738	tipc_node_lock(node);
2739	res = -EINVAL;
2740	switch (cmd) {
2741	case TIPC_CMD_SET_LINK_TOL:
2742		if ((new_value >= TIPC_MIN_LINK_TOL) &&
2743		    (new_value <= TIPC_MAX_LINK_TOL)) {
2744			link_set_supervision_props(l_ptr, new_value);
2745			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2746						 0, 0, new_value, 0, 0);
2747			res = 0;
2748		}
2749		break;
2750	case TIPC_CMD_SET_LINK_PRI:
2751		if ((new_value >= TIPC_MIN_LINK_PRI) &&
2752		    (new_value <= TIPC_MAX_LINK_PRI)) {
2753			l_ptr->priority = new_value;
2754			tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2755						 0, 0, 0, new_value, 0);
2756			res = 0;
2757		}
2758		break;
2759	case TIPC_CMD_SET_LINK_WINDOW:
2760		if ((new_value >= TIPC_MIN_LINK_WIN) &&
2761		    (new_value <= TIPC_MAX_LINK_WIN)) {
2762			tipc_link_set_queue_limits(l_ptr, new_value);
2763			res = 0;
2764		}
2765		break;
2766	}
2767	tipc_node_unlock(node);
2768
2769	read_unlock_bh(&tipc_net_lock);
2770	if (res)
2771		return tipc_cfg_reply_error_string("cannot change link setting");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2772
2773	return tipc_cfg_reply_none();
2774}
2775
2776/**
2777 * link_reset_statistics - reset link statistics
2778 * @l_ptr: pointer to link
2779 */
2780
2781static void link_reset_statistics(struct link *l_ptr)
2782{
2783	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2784	l_ptr->stats.sent_info = l_ptr->next_out_no;
2785	l_ptr->stats.recv_info = l_ptr->next_in_no;
2786}
2787
2788struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2789{
2790	char *link_name;
2791	struct link *l_ptr;
2792	struct tipc_node *node;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2793
2794	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2795		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2796
2797	link_name = (char *)TLV_DATA(req_tlv_area);
2798	if (!strcmp(link_name, tipc_bclink_name)) {
2799		if (tipc_bclink_reset_stats())
2800			return tipc_cfg_reply_error_string("link not found");
2801		return tipc_cfg_reply_none();
2802	}
2803
2804	read_lock_bh(&tipc_net_lock);
2805	l_ptr = link_find_link(link_name, &node);
2806	if (!l_ptr) {
2807		read_unlock_bh(&tipc_net_lock);
2808		return tipc_cfg_reply_error_string("link not found");
2809	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2810
2811	tipc_node_lock(node);
2812	link_reset_statistics(l_ptr);
2813	tipc_node_unlock(node);
2814	read_unlock_bh(&tipc_net_lock);
2815	return tipc_cfg_reply_none();
2816}
2817
2818/**
2819 * percent - convert count to a percentage of total (rounding up or down)
2820 */
2821
2822static u32 percent(u32 count, u32 total)
2823{
2824	return (count * 100 + (total / 2)) / total;
2825}
2826
2827/**
2828 * tipc_link_stats - print link statistics
2829 * @name: link name
2830 * @buf: print buffer area
2831 * @buf_size: size of print buffer area
2832 *
2833 * Returns length of print buffer data string (or 0 if error)
2834 */
2835
2836static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2837{
2838	struct print_buf pb;
2839	struct link *l_ptr;
2840	struct tipc_node *node;
2841	char *status;
2842	u32 profile_total = 0;
2843
2844	if (!strcmp(name, tipc_bclink_name))
2845		return tipc_bclink_stats(buf, buf_size);
2846
2847	tipc_printbuf_init(&pb, buf, buf_size);
2848
2849	read_lock_bh(&tipc_net_lock);
2850	l_ptr = link_find_link(name, &node);
2851	if (!l_ptr) {
2852		read_unlock_bh(&tipc_net_lock);
2853		return 0;
2854	}
2855	tipc_node_lock(node);
2856
2857	if (tipc_link_is_active(l_ptr))
2858		status = "ACTIVE";
2859	else if (tipc_link_is_up(l_ptr))
2860		status = "STANDBY";
2861	else
2862		status = "DEFUNCT";
2863	tipc_printf(&pb, "Link <%s>\n"
2864			 "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2865			 "  Window:%u packets\n",
2866		    l_ptr->name, status, l_ptr->max_pkt,
2867		    l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2868	tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2869		    l_ptr->next_in_no - l_ptr->stats.recv_info,
2870		    l_ptr->stats.recv_fragments,
2871		    l_ptr->stats.recv_fragmented,
2872		    l_ptr->stats.recv_bundles,
2873		    l_ptr->stats.recv_bundled);
2874	tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2875		    l_ptr->next_out_no - l_ptr->stats.sent_info,
2876		    l_ptr->stats.sent_fragments,
2877		    l_ptr->stats.sent_fragmented,
2878		    l_ptr->stats.sent_bundles,
2879		    l_ptr->stats.sent_bundled);
2880	profile_total = l_ptr->stats.msg_length_counts;
2881	if (!profile_total)
2882		profile_total = 1;
2883	tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
2884			 "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2885			 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2886		    l_ptr->stats.msg_length_counts,
2887		    l_ptr->stats.msg_lengths_total / profile_total,
2888		    percent(l_ptr->stats.msg_length_profile[0], profile_total),
2889		    percent(l_ptr->stats.msg_length_profile[1], profile_total),
2890		    percent(l_ptr->stats.msg_length_profile[2], profile_total),
2891		    percent(l_ptr->stats.msg_length_profile[3], profile_total),
2892		    percent(l_ptr->stats.msg_length_profile[4], profile_total),
2893		    percent(l_ptr->stats.msg_length_profile[5], profile_total),
2894		    percent(l_ptr->stats.msg_length_profile[6], profile_total));
2895	tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2896		    l_ptr->stats.recv_states,
2897		    l_ptr->stats.recv_probes,
2898		    l_ptr->stats.recv_nacks,
2899		    l_ptr->stats.deferred_recv,
2900		    l_ptr->stats.duplicates);
2901	tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2902		    l_ptr->stats.sent_states,
2903		    l_ptr->stats.sent_probes,
2904		    l_ptr->stats.sent_nacks,
2905		    l_ptr->stats.sent_acks,
2906		    l_ptr->stats.retransmitted);
2907	tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
2908		    l_ptr->stats.bearer_congs,
2909		    l_ptr->stats.link_congs,
2910		    l_ptr->stats.max_queue_sz,
2911		    l_ptr->stats.queue_sz_counts
2912		    ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2913		    : 0);
2914
2915	tipc_node_unlock(node);
2916	read_unlock_bh(&tipc_net_lock);
2917	return tipc_printbuf_validate(&pb);
2918}
2919
2920#define MAX_LINK_STATS_INFO 2000
2921
2922struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2923{
2924	struct sk_buff *buf;
2925	struct tlv_desc *rep_tlv;
2926	int str_len;
2927
2928	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2929		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2930
2931	buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2932	if (!buf)
2933		return NULL;
2934
2935	rep_tlv = (struct tlv_desc *)buf->data;
2936
2937	str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2938				  (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
2939	if (!str_len) {
2940		buf_discard(buf);
2941		return tipc_cfg_reply_error_string("link not found");
2942	}
2943
2944	skb_put(buf, TLV_SPACE(str_len));
2945	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2946
2947	return buf;
2948}
2949
2950/**
2951 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2952 * @dest: network address of destination node
2953 * @selector: used to select from set of active links
2954 *
2955 * If no active link can be found, uses default maximum packet size.
2956 */
2957
2958u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2959{
2960	struct tipc_node *n_ptr;
2961	struct link *l_ptr;
2962	u32 res = MAX_PKT_DEFAULT;
2963
2964	if (dest == tipc_own_addr)
2965		return MAX_MSG_SIZE;
2966
2967	read_lock_bh(&tipc_net_lock);
2968	n_ptr = tipc_node_find(dest);
2969	if (n_ptr) {
2970		tipc_node_lock(n_ptr);
2971		l_ptr = n_ptr->active_links[selector & 1];
2972		if (l_ptr)
2973			res = l_ptr->max_pkt;
2974		tipc_node_unlock(n_ptr);
2975	}
2976	read_unlock_bh(&tipc_net_lock);
2977	return res;
2978}
2979
2980static void link_print(struct link *l_ptr, const char *str)
2981{
2982	char print_area[256];
2983	struct print_buf pb;
2984	struct print_buf *buf = &pb;
2985
2986	tipc_printbuf_init(buf, print_area, sizeof(print_area));
2987
2988	tipc_printf(buf, str);
2989	tipc_printf(buf, "Link %x<%s>:",
2990		    l_ptr->addr, l_ptr->b_ptr->name);
2991
2992#ifdef CONFIG_TIPC_DEBUG
2993	if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
2994		goto print_state;
2995
2996	tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
2997	tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
2998	tipc_printf(buf, "SQUE");
2999	if (l_ptr->first_out) {
3000		tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3001		if (l_ptr->next_out)
3002			tipc_printf(buf, "%u..",
3003				    msg_seqno(buf_msg(l_ptr->next_out)));
3004		tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
3005		if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3006			 msg_seqno(buf_msg(l_ptr->first_out)))
3007		     != (l_ptr->out_queue_size - 1)) ||
3008		    (l_ptr->last_out->next != NULL)) {
3009			tipc_printf(buf, "\nSend queue inconsistency\n");
3010			tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3011			tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3012			tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
3013		}
3014	} else
3015		tipc_printf(buf, "[]");
3016	tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3017	if (l_ptr->oldest_deferred_in) {
3018		u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3019		u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3020		tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3021		if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3022			tipc_printf(buf, ":RQSIZ(%u)",
3023				    l_ptr->deferred_inqueue_sz);
3024		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3025	}
3026print_state:
3027#endif
3028
3029	if (link_working_unknown(l_ptr))
3030		tipc_printf(buf, ":WU");
3031	else if (link_reset_reset(l_ptr))
3032		tipc_printf(buf, ":RR");
3033	else if (link_reset_unknown(l_ptr))
3034		tipc_printf(buf, ":RU");
3035	else if (link_working_working(l_ptr))
3036		tipc_printf(buf, ":WW");
3037	tipc_printf(buf, "\n");
3038
3039	tipc_printbuf_validate(buf);
3040	info("%s", print_area);
3041}
3042
v6.13.7
   1/*
   2 * net/tipc/link.c: TIPC link code
   3 *
   4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
   5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
   6 * All rights reserved.
   7 *
   8 * Redistribution and use in source and binary forms, with or without
   9 * modification, are permitted provided that the following conditions are met:
  10 *
  11 * 1. Redistributions of source code must retain the above copyright
  12 *    notice, this list of conditions and the following disclaimer.
  13 * 2. Redistributions in binary form must reproduce the above copyright
  14 *    notice, this list of conditions and the following disclaimer in the
  15 *    documentation and/or other materials provided with the distribution.
  16 * 3. Neither the names of the copyright holders nor the names of its
  17 *    contributors may be used to endorse or promote products derived from
  18 *    this software without specific prior written permission.
  19 *
  20 * Alternatively, this software may be distributed under the terms of the
  21 * GNU General Public License ("GPL") version 2 as published by the Free
  22 * Software Foundation.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34 * POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37#include "core.h"
  38#include "subscr.h"
  39#include "link.h"
  40#include "bcast.h"
  41#include "socket.h"
  42#include "name_distr.h"
  43#include "discover.h"
  44#include "netlink.h"
  45#include "monitor.h"
  46#include "trace.h"
  47#include "crypto.h"
  48
  49#include <linux/pkt_sched.h>
  50
  51struct tipc_stats {
  52	u32 sent_pkts;
  53	u32 recv_pkts;
  54	u32 sent_states;
  55	u32 recv_states;
  56	u32 sent_probes;
  57	u32 recv_probes;
  58	u32 sent_nacks;
  59	u32 recv_nacks;
  60	u32 sent_acks;
  61	u32 sent_bundled;
  62	u32 sent_bundles;
  63	u32 recv_bundled;
  64	u32 recv_bundles;
  65	u32 retransmitted;
  66	u32 sent_fragmented;
  67	u32 sent_fragments;
  68	u32 recv_fragmented;
  69	u32 recv_fragments;
  70	u32 link_congs;		/* # port sends blocked by congestion */
  71	u32 deferred_recv;
  72	u32 duplicates;
  73	u32 max_queue_sz;	/* send queue size high water mark */
  74	u32 accu_queue_sz;	/* used for send queue size profiling */
  75	u32 queue_sz_counts;	/* used for send queue size profiling */
  76	u32 msg_length_counts;	/* used for message length profiling */
  77	u32 msg_lengths_total;	/* used for message length profiling */
  78	u32 msg_length_profile[7]; /* used for msg. length profiling */
  79};
  80
  81/**
  82 * struct tipc_link - TIPC link data structure
  83 * @addr: network address of link's peer node
  84 * @name: link name character string
  85 * @net: pointer to namespace struct
  86 * @peer_session: link session # being used by peer end of link
  87 * @peer_bearer_id: bearer id used by link's peer endpoint
  88 * @bearer_id: local bearer id used by link
  89 * @tolerance: minimum link continuity loss needed to reset link [in ms]
  90 * @abort_limit: # of unacknowledged continuity probes needed to reset link
  91 * @state: current state of link FSM
  92 * @peer_caps: bitmap describing capabilities of peer node
  93 * @silent_intv_cnt: # of timer intervals without any reception from peer
  94 * @priority: current link priority
  95 * @net_plane: current link network plane ('A' through 'H')
  96 * @mon_state: cookie with information needed by link monitor
  97 * @mtu: current maximum packet size for this link
  98 * @advertised_mtu: advertised own mtu when link is being established
  99 * @backlogq: queue for messages waiting to be sent
 100 * @ackers: # of peers that needs to ack each packet before it can be released
 101 * @acked: # last packet acked by a certain peer. Used for broadcast.
 102 * @rcv_nxt: next sequence number to expect for inbound messages
 103 * @inputq: buffer queue for messages to be delivered upwards
 104 * @namedq: buffer queue for name table messages to be delivered upwards
 105 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
 106 * @reasm_buf: head of partially reassembled inbound message fragments
 107 * @stats: collects statistics regarding link activity
 108 * @session: session to be used by link
 109 * @snd_nxt_state: next send seq number
 110 * @rcv_nxt_state: next rcv seq number
 111 * @in_session: have received ACTIVATE_MSG from peer
 112 * @active: link is active
 113 * @if_name: associated interface name
 114 * @rst_cnt: link reset counter
 115 * @drop_point: seq number for failover handling (FIXME)
 116 * @failover_reasm_skb: saved failover msg ptr (FIXME)
 117 * @failover_deferdq: deferred message queue for failover processing (FIXME)
 118 * @transmq: the link's transmit queue
 119 * @backlog: link's backlog by priority (importance)
 120 * @snd_nxt: next sequence number to be used
 121 * @rcv_unacked: # messages read by user, but not yet acked back to peer
 122 * @deferdq: deferred receive queue
 123 * @window: sliding window size for congestion handling
 124 * @min_win: minimal send window to be used by link
 125 * @ssthresh: slow start threshold for congestion handling
 126 * @max_win: maximal send window to be used by link
 127 * @cong_acks: congestion acks for congestion avoidance (FIXME)
 128 * @checkpoint: seq number for congestion window size handling
 129 * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
 130 * @last_gap: last gap ack blocks for bcast (FIXME)
 131 * @last_ga: ptr to gap ack blocks
 132 * @bc_rcvlink: the peer specific link used for broadcast reception
 133 * @bc_sndlink: the namespace global link used for broadcast sending
 134 * @nack_state: bcast nack state
 135 * @bc_peer_is_up: peer has acked the bcast init msg
 136 */
 137struct tipc_link {
 138	u32 addr;
 139	char name[TIPC_MAX_LINK_NAME];
 140	struct net *net;
 141
 142	/* Management and link supervision data */
 143	u16 peer_session;
 144	u16 session;
 145	u16 snd_nxt_state;
 146	u16 rcv_nxt_state;
 147	u32 peer_bearer_id;
 148	u32 bearer_id;
 149	u32 tolerance;
 150	u32 abort_limit;
 151	u32 state;
 152	u16 peer_caps;
 153	bool in_session;
 154	bool active;
 155	u32 silent_intv_cnt;
 156	char if_name[TIPC_MAX_IF_NAME];
 157	u32 priority;
 158	char net_plane;
 159	struct tipc_mon_state mon_state;
 160	u16 rst_cnt;
 161
 162	/* Failover/synch */
 163	u16 drop_point;
 164	struct sk_buff *failover_reasm_skb;
 165	struct sk_buff_head failover_deferdq;
 166
 167	/* Max packet negotiation */
 168	u16 mtu;
 169	u16 advertised_mtu;
 170
 171	/* Sending */
 172	struct sk_buff_head transmq;
 173	struct sk_buff_head backlogq;
 174	struct {
 175		u16 len;
 176		u16 limit;
 177		struct sk_buff *target_bskb;
 178	} backlog[5];
 179	u16 snd_nxt;
 180
 181	/* Reception */
 182	u16 rcv_nxt;
 183	u32 rcv_unacked;
 184	struct sk_buff_head deferdq;
 185	struct sk_buff_head *inputq;
 186	struct sk_buff_head *namedq;
 187
 188	/* Congestion handling */
 189	struct sk_buff_head wakeupq;
 190	u16 window;
 191	u16 min_win;
 192	u16 ssthresh;
 193	u16 max_win;
 194	u16 cong_acks;
 195	u16 checkpoint;
 196
 197	/* Fragmentation/reassembly */
 198	struct sk_buff *reasm_buf;
 199	struct sk_buff *reasm_tnlmsg;
 200
 201	/* Broadcast */
 202	u16 ackers;
 203	u16 acked;
 204	u16 last_gap;
 205	struct tipc_gap_ack_blks *last_ga;
 206	struct tipc_link *bc_rcvlink;
 207	struct tipc_link *bc_sndlink;
 208	u8 nack_state;
 209	bool bc_peer_is_up;
 210
 211	/* Statistics */
 212	struct tipc_stats stats;
 213};
 214
 215/*
 216 * Error message prefixes
 
 
 217 */
 218static const char *link_co_err = "Link tunneling error, ";
 219static const char *link_rst_msg = "Resetting link ";
 220
 221/* Send states for broadcast NACKs
 
 222 */
 223enum {
 224	BC_NACK_SND_CONDITIONAL,
 225	BC_NACK_SND_UNCONDITIONAL,
 226	BC_NACK_SND_SUPPRESS,
 227};
 228
 229#define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
 230#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
 231
 232/* Link FSM states:
 
 
 
 
 
 233 */
 234enum {
 235	LINK_ESTABLISHED     = 0xe,
 236	LINK_ESTABLISHING    = 0xe  << 4,
 237	LINK_RESET           = 0x1  << 8,
 238	LINK_RESETTING       = 0x2  << 12,
 239	LINK_PEER_RESET      = 0xd  << 16,
 240	LINK_FAILINGOVER     = 0xf  << 20,
 241	LINK_SYNCHING        = 0xc  << 24
 242};
 243
 244static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
 245			       struct sk_buff_head *xmitq);
 246static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
 247				      bool probe_reply, u16 rcvgap,
 248				      int tolerance, int priority,
 249				      struct sk_buff_head *xmitq);
 250static void link_print(struct tipc_link *l, const char *str);
 251static int tipc_link_build_nack_msg(struct tipc_link *l,
 252				    struct sk_buff_head *xmitq);
 253static void tipc_link_build_bc_init_msg(struct tipc_link *l,
 254					struct sk_buff_head *xmitq);
 255static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
 256				    struct tipc_link *l, u8 start_index);
 257static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
 258static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
 259				     u16 acked, u16 gap,
 260				     struct tipc_gap_ack_blks *ga,
 261				     struct sk_buff_head *xmitq,
 262				     bool *retransmitted, int *rc);
 263static void tipc_link_update_cwin(struct tipc_link *l, int released,
 264				  bool retransmitted);
 265/*
 266 *  Simple non-static link routines (i.e. referenced outside this file)
 267 */
 268bool tipc_link_is_up(struct tipc_link *l)
 
 269{
 270	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
 271}
 272
 273bool tipc_link_peer_is_down(struct tipc_link *l)
 274{
 275	return l->state == LINK_PEER_RESET;
 276}
 
 
 
 277
 278bool tipc_link_is_reset(struct tipc_link *l)
 279{
 280	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 281}
 
 282
 283bool tipc_link_is_establishing(struct tipc_link *l)
 284{
 285	return l->state == LINK_ESTABLISHING;
 286}
 287
 288bool tipc_link_is_synching(struct tipc_link *l)
 289{
 290	return l->state == LINK_SYNCHING;
 
 
 291}
 292
 293bool tipc_link_is_failingover(struct tipc_link *l)
 294{
 295	return l->state == LINK_FAILINGOVER;
 296}
 297
 298bool tipc_link_is_blocked(struct tipc_link *l)
 299{
 300	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 301}
 302
 303static bool link_is_bc_sndlink(struct tipc_link *l)
 304{
 305	return !l->bc_sndlink;
 
 
 306}
 307
 308static bool link_is_bc_rcvlink(struct tipc_link *l)
 309{
 310	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
 
 311}
 312
 313void tipc_link_set_active(struct tipc_link *l, bool active)
 314{
 315	l->active = active;
 316}
 
 
 
 317
 318u32 tipc_link_id(struct tipc_link *l)
 319{
 320	return l->peer_bearer_id << 16 | l->bearer_id;
 321}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322
 323int tipc_link_min_win(struct tipc_link *l)
 324{
 325	return l->min_win;
 326}
 327
 328int tipc_link_max_win(struct tipc_link *l)
 329{
 330	return l->max_win;
 331}
 
 
 
 
 
 
 
 
 
 
 
 332
 333int tipc_link_prio(struct tipc_link *l)
 334{
 335	return l->priority;
 336}
 337
 338unsigned long tipc_link_tolerance(struct tipc_link *l)
 339{
 340	return l->tolerance;
 341}
 
 
 
 
 
 
 
 342
 343struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
 344{
 345	return l->inputq;
 346}
 347
 348char tipc_link_plane(struct tipc_link *l)
 349{
 350	return l->net_plane;
 
 
 
 
 351}
 352
 353struct net *tipc_link_net(struct tipc_link *l)
 354{
 355	return l->net;
 356}
 
 
 
 
 
 357
 358void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
 359{
 360	l->peer_caps = capabilities;
 361}
 362
 363void tipc_link_add_bc_peer(struct tipc_link *snd_l,
 364			   struct tipc_link *uc_l,
 365			   struct sk_buff_head *xmitq)
 366{
 367	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
 368
 369	snd_l->ackers++;
 370	rcv_l->acked = snd_l->snd_nxt - 1;
 371	snd_l->state = LINK_ESTABLISHED;
 372	tipc_link_build_bc_init_msg(uc_l, xmitq);
 373}
 374
 375void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
 376			      struct tipc_link *rcv_l,
 377			      struct sk_buff_head *xmitq)
 378{
 379	u16 ack = snd_l->snd_nxt - 1;
 380
 381	snd_l->ackers--;
 382	rcv_l->bc_peer_is_up = true;
 383	rcv_l->state = LINK_ESTABLISHED;
 384	tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
 385	trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
 386	tipc_link_reset(rcv_l);
 387	rcv_l->state = LINK_RESET;
 388	if (!snd_l->ackers) {
 389		trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
 390		tipc_link_reset(snd_l);
 391		snd_l->state = LINK_RESET;
 392		__skb_queue_purge(xmitq);
 
 
 
 
 
 
 
 
 
 
 393	}
 
 
 
 
 
 
 
 
 
 
 
 394}
 395
 396int tipc_link_bc_peers(struct tipc_link *l)
 397{
 398	return l->ackers;
 399}
 400
 401static u16 link_bc_rcv_gap(struct tipc_link *l)
 
 
 
 
 
 
 
 
 
 
 
 402{
 403	struct sk_buff *skb = skb_peek(&l->deferdq);
 404	u16 gap = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 405
 406	if (more(l->snd_nxt, l->rcv_nxt))
 407		gap = l->snd_nxt - l->rcv_nxt;
 408	if (skb)
 409		gap = buf_seqno(skb) - l->rcv_nxt;
 410	return gap;
 411}
 412
 413void tipc_link_set_mtu(struct tipc_link *l, int mtu)
 
 
 
 
 
 
 
 
 
 414{
 415	l->mtu = mtu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416}
 417
 418int tipc_link_mtu(struct tipc_link *l)
 
 
 
 
 
 
 
 
 
 
 419{
 420	return l->mtu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 421}
 422
 423int tipc_link_mss(struct tipc_link *l)
 424{
 425#ifdef CONFIG_TIPC_CRYPTO
 426	return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
 427#else
 428	return l->mtu - INT_H_SIZE;
 429#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430}
 431
 432u16 tipc_link_rcv_nxt(struct tipc_link *l)
 
 
 
 
 
 433{
 434	return l->rcv_nxt;
 
 
 
 
 
 
 
 
 
 435}
 436
 437u16 tipc_link_acked(struct tipc_link *l)
 438{
 439	return l->acked;
 440}
 441
 442char *tipc_link_name(struct tipc_link *l)
 443{
 444	return l->name;
 445}
 446
 447u32 tipc_link_state(struct tipc_link *l)
 448{
 449	return l->state;
 
 
 
 450}
 451
 452/**
 453 * tipc_link_create - create a new link
 454 * @net: pointer to associated network namespace
 455 * @if_name: associated interface name
 456 * @bearer_id: id (index) of associated bearer
 457 * @tolerance: link tolerance to be used by link
 458 * @net_plane: network plane (A,B,c..) this link belongs to
 459 * @mtu: mtu to be advertised by link
 460 * @priority: priority to be used by link
 461 * @min_win: minimal send window to be used by link
 462 * @max_win: maximal send window to be used by link
 463 * @session: session to be used by link
 464 * @peer: node id of peer node
 465 * @peer_caps: bitmap describing peer node capabilities
 466 * @bc_sndlink: the namespace global link used for broadcast sending
 467 * @bc_rcvlink: the peer specific link used for broadcast reception
 468 * @inputq: queue to put messages ready for delivery
 469 * @namedq: queue to put binding table update messages ready for delivery
 470 * @link: return value, pointer to put the created link
 471 * @self: local unicast link id
 472 * @peer_id: 128-bit ID of peer
 473 *
 474 * Return: true if link was created, otherwise false
 475 */
 476bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
 477		      int tolerance, char net_plane, u32 mtu, int priority,
 478		      u32 min_win, u32 max_win, u32 session, u32 self,
 479		      u32 peer, u8 *peer_id, u16 peer_caps,
 480		      struct tipc_link *bc_sndlink,
 481		      struct tipc_link *bc_rcvlink,
 482		      struct sk_buff_head *inputq,
 483		      struct sk_buff_head *namedq,
 484		      struct tipc_link **link)
 485{
 486	char peer_str[NODE_ID_STR_LEN] = {0,};
 487	char self_str[NODE_ID_STR_LEN] = {0,};
 488	struct tipc_link *l;
 489
 490	l = kzalloc(sizeof(*l), GFP_ATOMIC);
 491	if (!l)
 492		return false;
 493	*link = l;
 494	l->session = session;
 495
 496	/* Set link name for unicast links only */
 497	if (peer_id) {
 498		tipc_nodeid2string(self_str, tipc_own_id(net));
 499		if (strlen(self_str) > 16)
 500			sprintf(self_str, "%x", self);
 501		tipc_nodeid2string(peer_str, peer_id);
 502		if (strlen(peer_str) > 16)
 503			sprintf(peer_str, "%x", peer);
 504	}
 505	/* Peer i/f name will be completed by reset/activate message */
 506	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
 507		 self_str, if_name, peer_str);
 508
 509	strcpy(l->if_name, if_name);
 510	l->addr = peer;
 511	l->peer_caps = peer_caps;
 512	l->net = net;
 513	l->in_session = false;
 514	l->bearer_id = bearer_id;
 515	l->tolerance = tolerance;
 516	if (bc_rcvlink)
 517		bc_rcvlink->tolerance = tolerance;
 518	l->net_plane = net_plane;
 519	l->advertised_mtu = mtu;
 520	l->mtu = mtu;
 521	l->priority = priority;
 522	tipc_link_set_queue_limits(l, min_win, max_win);
 523	l->ackers = 1;
 524	l->bc_sndlink = bc_sndlink;
 525	l->bc_rcvlink = bc_rcvlink;
 526	l->inputq = inputq;
 527	l->namedq = namedq;
 528	l->state = LINK_RESETTING;
 529	__skb_queue_head_init(&l->transmq);
 530	__skb_queue_head_init(&l->backlogq);
 531	__skb_queue_head_init(&l->deferdq);
 532	__skb_queue_head_init(&l->failover_deferdq);
 533	skb_queue_head_init(&l->wakeupq);
 534	skb_queue_head_init(l->inputq);
 535	return true;
 536}
 537
 538/**
 539 * tipc_link_bc_create - create new link to be used for broadcast
 540 * @net: pointer to associated network namespace
 541 * @mtu: mtu to be used initially if no peers
 542 * @min_win: minimal send window to be used by link
 543 * @max_win: maximal send window to be used by link
 544 * @inputq: queue to put messages ready for delivery
 545 * @namedq: queue to put binding table update messages ready for delivery
 546 * @link: return value, pointer to put the created link
 547 * @ownnode: identity of own node
 548 * @peer: node id of peer node
 549 * @peer_id: 128-bit ID of peer
 550 * @peer_caps: bitmap describing peer node capabilities
 551 * @bc_sndlink: the namespace global link used for broadcast sending
 552 *
 553 * Return: true if link was created, otherwise false
 554 */
 555bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
 556			 int mtu, u32 min_win, u32 max_win, u16 peer_caps,
 557			 struct sk_buff_head *inputq,
 558			 struct sk_buff_head *namedq,
 559			 struct tipc_link *bc_sndlink,
 560			 struct tipc_link **link)
 561{
 562	struct tipc_link *l;
 563
 564	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
 565			      max_win, 0, ownnode, peer, NULL, peer_caps,
 566			      bc_sndlink, NULL, inputq, namedq, link))
 567		return false;
 568
 569	l = *link;
 570	if (peer_id) {
 571		char peer_str[NODE_ID_STR_LEN] = {0,};
 572
 573		tipc_nodeid2string(peer_str, peer_id);
 574		if (strlen(peer_str) > 16)
 575			sprintf(peer_str, "%x", peer);
 576		/* Broadcast receiver link name: "broadcast-link:<peer>" */
 577		snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
 578			 peer_str);
 579	} else {
 580		strcpy(l->name, tipc_bclink_name);
 581	}
 582	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
 583	tipc_link_reset(l);
 584	l->state = LINK_RESET;
 585	l->ackers = 0;
 586	l->bc_rcvlink = l;
 587
 588	/* Broadcast send link is always up */
 589	if (link_is_bc_sndlink(l))
 590		l->state = LINK_ESTABLISHED;
 
 
 
 591
 592	/* Disable replicast if even a single peer doesn't support it */
 593	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
 594		tipc_bcast_toggle_rcast(net, false);
 595
 596	return true;
 
 597}
 598
 599/**
 600 * tipc_link_fsm_evt - link finite state machine
 601 * @l: pointer to link
 602 * @evt: state machine event to be processed
 603 */
 604int tipc_link_fsm_evt(struct tipc_link *l, int evt)
 605{
 606	int rc = 0;
 607	int old_state = l->state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 608
 609	switch (l->state) {
 610	case LINK_RESETTING:
 611		switch (evt) {
 612		case LINK_PEER_RESET_EVT:
 613			l->state = LINK_PEER_RESET;
 614			break;
 615		case LINK_RESET_EVT:
 616			l->state = LINK_RESET;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617			break;
 618		case LINK_FAILURE_EVT:
 619		case LINK_FAILOVER_BEGIN_EVT:
 620		case LINK_ESTABLISH_EVT:
 621		case LINK_FAILOVER_END_EVT:
 622		case LINK_SYNCH_BEGIN_EVT:
 623		case LINK_SYNCH_END_EVT:
 624		default:
 625			goto illegal_evt;
 626		}
 627		break;
 628	case LINK_RESET:
 629		switch (evt) {
 630		case LINK_PEER_RESET_EVT:
 631			l->state = LINK_ESTABLISHING;
 632			break;
 633		case LINK_FAILOVER_BEGIN_EVT:
 634			l->state = LINK_FAILINGOVER;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635			break;
 636		case LINK_FAILURE_EVT:
 637		case LINK_RESET_EVT:
 638		case LINK_ESTABLISH_EVT:
 639		case LINK_FAILOVER_END_EVT:
 640			break;
 641		case LINK_SYNCH_BEGIN_EVT:
 642		case LINK_SYNCH_END_EVT:
 643		default:
 644			goto illegal_evt;
 645		}
 646		break;
 647	case LINK_PEER_RESET:
 648		switch (evt) {
 649		case LINK_RESET_EVT:
 650			l->state = LINK_ESTABLISHING;
 651			break;
 652		case LINK_PEER_RESET_EVT:
 653		case LINK_ESTABLISH_EVT:
 654		case LINK_FAILURE_EVT:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655			break;
 656		case LINK_SYNCH_BEGIN_EVT:
 657		case LINK_SYNCH_END_EVT:
 658		case LINK_FAILOVER_BEGIN_EVT:
 659		case LINK_FAILOVER_END_EVT:
 660		default:
 661			goto illegal_evt;
 662		}
 663		break;
 664	case LINK_FAILINGOVER:
 665		switch (evt) {
 666		case LINK_FAILOVER_END_EVT:
 667			l->state = LINK_RESET;
 668			break;
 669		case LINK_PEER_RESET_EVT:
 670		case LINK_RESET_EVT:
 671		case LINK_ESTABLISH_EVT:
 672		case LINK_FAILURE_EVT:
 
 
 
 
 
 
 
 
 
 
 
 673			break;
 674		case LINK_FAILOVER_BEGIN_EVT:
 675		case LINK_SYNCH_BEGIN_EVT:
 676		case LINK_SYNCH_END_EVT:
 677		default:
 678			goto illegal_evt;
 679		}
 680		break;
 681	case LINK_ESTABLISHING:
 682		switch (evt) {
 683		case LINK_ESTABLISH_EVT:
 684			l->state = LINK_ESTABLISHED;
 685			break;
 686		case LINK_FAILOVER_BEGIN_EVT:
 687			l->state = LINK_FAILINGOVER;
 688			break;
 689		case LINK_RESET_EVT:
 690			l->state = LINK_RESET;
 691			break;
 692		case LINK_FAILURE_EVT:
 693		case LINK_PEER_RESET_EVT:
 694		case LINK_SYNCH_BEGIN_EVT:
 695		case LINK_FAILOVER_END_EVT:
 696			break;
 697		case LINK_SYNCH_END_EVT:
 698		default:
 699			goto illegal_evt;
 700		}
 701		break;
 702	case LINK_ESTABLISHED:
 703		switch (evt) {
 704		case LINK_PEER_RESET_EVT:
 705			l->state = LINK_PEER_RESET;
 706			rc |= TIPC_LINK_DOWN_EVT;
 707			break;
 708		case LINK_FAILURE_EVT:
 709			l->state = LINK_RESETTING;
 710			rc |= TIPC_LINK_DOWN_EVT;
 711			break;
 712		case LINK_RESET_EVT:
 713			l->state = LINK_RESET;
 714			break;
 715		case LINK_ESTABLISH_EVT:
 716		case LINK_SYNCH_END_EVT:
 717			break;
 718		case LINK_SYNCH_BEGIN_EVT:
 719			l->state = LINK_SYNCHING;
 720			break;
 721		case LINK_FAILOVER_BEGIN_EVT:
 722		case LINK_FAILOVER_END_EVT:
 723		default:
 724			goto illegal_evt;
 725		}
 726		break;
 727	case LINK_SYNCHING:
 728		switch (evt) {
 729		case LINK_PEER_RESET_EVT:
 730			l->state = LINK_PEER_RESET;
 731			rc |= TIPC_LINK_DOWN_EVT;
 732			break;
 733		case LINK_FAILURE_EVT:
 734			l->state = LINK_RESETTING;
 735			rc |= TIPC_LINK_DOWN_EVT;
 736			break;
 737		case LINK_RESET_EVT:
 738			l->state = LINK_RESET;
 739			break;
 740		case LINK_ESTABLISH_EVT:
 741		case LINK_SYNCH_BEGIN_EVT:
 742			break;
 743		case LINK_SYNCH_END_EVT:
 744			l->state = LINK_ESTABLISHED;
 745			break;
 746		case LINK_FAILOVER_BEGIN_EVT:
 747		case LINK_FAILOVER_END_EVT:
 748		default:
 749			goto illegal_evt;
 750		}
 751		break;
 752	default:
 753		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
 754	}
 755	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
 756	return rc;
 757illegal_evt:
 758	pr_err("Illegal FSM event %x in state %x on link %s\n",
 759	       evt, l->state, l->name);
 760	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
 761	return rc;
 762}
 763
 764/* link_profile_stats - update statistical profiling of traffic
 
 
 765 */
 766static void link_profile_stats(struct tipc_link *l)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767{
 768	struct sk_buff *skb;
 769	struct tipc_msg *msg;
 770	int length;
 771
 772	/* Update counters used in statistical profiling of send traffic */
 773	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
 774	l->stats.queue_sz_counts++;
 775
 776	skb = skb_peek(&l->transmq);
 777	if (!skb)
 778		return;
 779	msg = buf_msg(skb);
 780	length = msg_size(msg);
 781
 782	if (msg_user(msg) == MSG_FRAGMENTER) {
 783		if (msg_type(msg) != FIRST_FRAGMENT)
 784			return;
 785		length = msg_size(msg_inner_hdr(msg));
 786	}
 787	l->stats.msg_lengths_total += length;
 788	l->stats.msg_length_counts++;
 789	if (length <= 64)
 790		l->stats.msg_length_profile[0]++;
 791	else if (length <= 256)
 792		l->stats.msg_length_profile[1]++;
 793	else if (length <= 1024)
 794		l->stats.msg_length_profile[2]++;
 795	else if (length <= 4096)
 796		l->stats.msg_length_profile[3]++;
 797	else if (length <= 16384)
 798		l->stats.msg_length_profile[4]++;
 799	else if (length <= 32768)
 800		l->stats.msg_length_profile[5]++;
 801	else
 802		l->stats.msg_length_profile[6]++;
 803}
 804
 805/**
 806 * tipc_link_too_silent - check if link is "too silent"
 807 * @l: tipc link to be checked
 808 *
 809 * Return: true if the link 'silent_intv_cnt' is about to reach the
 810 * 'abort_limit' value, otherwise false
 811 */
 812bool tipc_link_too_silent(struct tipc_link *l)
 
 813{
 814	return (l->silent_intv_cnt + 2 > l->abort_limit);
 815}
 
 
 
 
 
 
 
 
 
 816
 817/* tipc_link_timeout - perform periodic task as instructed from node timeout
 818 */
 819int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 820{
 821	int mtyp = 0;
 822	int rc = 0;
 823	bool state = false;
 824	bool probe = false;
 825	bool setup = false;
 826	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
 827	u16 bc_acked = l->bc_rcvlink->acked;
 828	struct tipc_mon_state *mstate = &l->mon_state;
 829
 830	trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
 831	trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
 832	switch (l->state) {
 833	case LINK_ESTABLISHED:
 834	case LINK_SYNCHING:
 835		mtyp = STATE_MSG;
 836		link_profile_stats(l);
 837		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
 838		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
 839			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 840		state = bc_acked != bc_snt;
 841		state |= l->bc_rcvlink->rcv_unacked;
 842		state |= l->rcv_unacked;
 843		state |= !skb_queue_empty(&l->transmq);
 844		probe = mstate->probing;
 845		probe |= l->silent_intv_cnt;
 846		if (probe || mstate->monitoring)
 847			l->silent_intv_cnt++;
 848		probe |= !skb_queue_empty(&l->deferdq);
 849		if (l->snd_nxt == l->checkpoint) {
 850			tipc_link_update_cwin(l, 0, 0);
 851			probe = true;
 852		}
 853		l->checkpoint = l->snd_nxt;
 854		break;
 855	case LINK_RESET:
 856		setup = l->rst_cnt++ <= 4;
 857		setup |= !(l->rst_cnt % 16);
 858		mtyp = RESET_MSG;
 859		break;
 860	case LINK_ESTABLISHING:
 861		setup = true;
 862		mtyp = ACTIVATE_MSG;
 863		break;
 864	case LINK_PEER_RESET:
 865	case LINK_RESETTING:
 866	case LINK_FAILINGOVER:
 867		break;
 868	default:
 869		break;
 870	}
 871
 872	if (state || probe || setup)
 873		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
 
 
 874
 875	return rc;
 876}
 877
 878/**
 879 * link_schedule_user - schedule a message sender for wakeup after congestion
 880 * @l: congested link
 881 * @hdr: header of message that is being sent
 882 * Create pseudo msg to send back to user when congestion abates
 883 */
 884static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
 885{
 886	u32 dnode = tipc_own_addr(l->net);
 887	u32 dport = msg_origport(hdr);
 888	struct sk_buff *skb;
 889
 890	/* Create and schedule wakeup pseudo message */
 891	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
 892			      dnode, l->addr, dport, 0, 0);
 893	if (!skb)
 894		return -ENOBUFS;
 895	msg_set_dest_droppable(buf_msg(skb), true);
 896	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
 897	skb_queue_tail(&l->wakeupq, skb);
 898	l->stats.link_congs++;
 899	trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
 900	return -ELINKCONG;
 901}
 902
 903/**
 904 * link_prepare_wakeup - prepare users for wakeup after congestion
 905 * @l: congested link
 906 * Wake up a number of waiting users, as permitted by available space
 907 * in the send queue
 908 */
 909static void link_prepare_wakeup(struct tipc_link *l)
 910{
 911	struct sk_buff_head *wakeupq = &l->wakeupq;
 912	struct sk_buff_head *inputq = l->inputq;
 913	struct sk_buff *skb, *tmp;
 914	struct sk_buff_head tmpq;
 915	int avail[5] = {0,};
 916	int imp = 0;
 917
 918	__skb_queue_head_init(&tmpq);
 919
 920	for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
 921		avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
 922
 923	skb_queue_walk_safe(wakeupq, skb, tmp) {
 924		imp = TIPC_SKB_CB(skb)->chain_imp;
 925		if (avail[imp] <= 0)
 926			continue;
 927		avail[imp]--;
 928		__skb_unlink(skb, wakeupq);
 929		__skb_queue_tail(&tmpq, skb);
 930	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931
 932	spin_lock_bh(&inputq->lock);
 933	skb_queue_splice_tail(&tmpq, inputq);
 934	spin_unlock_bh(&inputq->lock);
 935
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 936}
 937
 938/**
 939 * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
 940 *                                     the given skb should be next attempted
 941 * @skb: skb to set a future retransmission time for
 942 * @l: link the skb will be transmitted on
 943 */
 944static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
 945					      struct tipc_link *l)
 946{
 947	if (link_is_bc_sndlink(l))
 948		TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
 949	else
 950		TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951}
 952
 953void tipc_link_reset(struct tipc_link *l)
 954{
 955	struct sk_buff_head list;
 956	u32 imp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 957
 958	__skb_queue_head_init(&list);
 959
 960	l->in_session = false;
 961	/* Force re-synch of peer session number before establishing */
 962	l->peer_session--;
 963	l->session++;
 964	l->mtu = l->advertised_mtu;
 965
 966	spin_lock_bh(&l->wakeupq.lock);
 967	skb_queue_splice_init(&l->wakeupq, &list);
 968	spin_unlock_bh(&l->wakeupq.lock);
 969
 970	spin_lock_bh(&l->inputq->lock);
 971	skb_queue_splice_init(&list, l->inputq);
 972	spin_unlock_bh(&l->inputq->lock);
 973
 974	__skb_queue_purge(&l->transmq);
 975	__skb_queue_purge(&l->deferdq);
 976	__skb_queue_purge(&l->backlogq);
 977	__skb_queue_purge(&l->failover_deferdq);
 978	for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
 979		l->backlog[imp].len = 0;
 980		l->backlog[imp].target_bskb = NULL;
 981	}
 982	kfree_skb(l->reasm_buf);
 983	kfree_skb(l->reasm_tnlmsg);
 984	kfree_skb(l->failover_reasm_skb);
 985	l->reasm_buf = NULL;
 986	l->reasm_tnlmsg = NULL;
 987	l->failover_reasm_skb = NULL;
 988	l->rcv_unacked = 0;
 989	l->snd_nxt = 1;
 990	l->rcv_nxt = 1;
 991	l->snd_nxt_state = 1;
 992	l->rcv_nxt_state = 1;
 993	l->acked = 0;
 994	l->last_gap = 0;
 995	kfree(l->last_ga);
 996	l->last_ga = NULL;
 997	l->silent_intv_cnt = 0;
 998	l->rst_cnt = 0;
 999	l->bc_peer_is_up = false;
1000	memset(&l->mon_state, 0, sizeof(l->mon_state));
1001	tipc_link_reset_stats(l);
1002}
1003
1004/**
1005 * tipc_link_xmit(): enqueue buffer list according to queue situation
1006 * @l: link to use
1007 * @list: chain of buffers containing message
1008 * @xmitq: returned list of packets to be sent by caller
1009 *
1010 * Consumes the buffer chain.
1011 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
1012 * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
1013 */
1014int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1015		   struct sk_buff_head *xmitq)
1016{
1017	struct sk_buff_head *backlogq = &l->backlogq;
1018	struct sk_buff_head *transmq = &l->transmq;
1019	struct sk_buff *skb, *_skb;
1020	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1021	u16 ack = l->rcv_nxt - 1;
1022	u16 seqno = l->snd_nxt;
1023	int pkt_cnt = skb_queue_len(list);
1024	unsigned int mss = tipc_link_mss(l);
1025	unsigned int cwin = l->window;
1026	unsigned int mtu = l->mtu;
1027	struct tipc_msg *hdr;
1028	bool new_bundle;
1029	int rc = 0;
1030	int imp;
1031
1032	if (pkt_cnt <= 0)
1033		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034
1035	hdr = buf_msg(skb_peek(list));
1036	if (unlikely(msg_size(hdr) > mtu)) {
1037		pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1038			skb_queue_len(list), msg_user(hdr),
1039			msg_type(hdr), msg_size(hdr), mtu);
1040		__skb_queue_purge(list);
1041		return -EMSGSIZE;
1042	}
1043
1044	imp = msg_importance(hdr);
1045	/* Allow oversubscription of one data msg per source at congestion */
1046	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1047		if (imp == TIPC_SYSTEM_IMPORTANCE) {
1048			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1049			return -ENOBUFS;
1050		}
1051		rc = link_schedule_user(l, hdr);
1052	}
1053
1054	if (pkt_cnt > 1) {
1055		l->stats.sent_fragmented++;
1056		l->stats.sent_fragments += pkt_cnt;
1057	}
1058
1059	/* Prepare each packet for sending, and add to relevant queue: */
1060	while ((skb = __skb_dequeue(list))) {
1061		if (likely(skb_queue_len(transmq) < cwin)) {
1062			hdr = buf_msg(skb);
1063			msg_set_seqno(hdr, seqno);
1064			msg_set_ack(hdr, ack);
1065			msg_set_bcast_ack(hdr, bc_ack);
1066			_skb = skb_clone(skb, GFP_ATOMIC);
1067			if (!_skb) {
1068				kfree_skb(skb);
1069				__skb_queue_purge(list);
1070				return -ENOBUFS;
1071			}
1072			__skb_queue_tail(transmq, skb);
1073			tipc_link_set_skb_retransmit_time(skb, l);
1074			__skb_queue_tail(xmitq, _skb);
1075			TIPC_SKB_CB(skb)->ackers = l->ackers;
1076			l->rcv_unacked = 0;
1077			l->stats.sent_pkts++;
1078			seqno++;
1079			continue;
1080		}
1081		if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1082					mss, l->addr, &new_bundle)) {
1083			if (skb) {
1084				/* Keep a ref. to the skb for next try */
1085				l->backlog[imp].target_bskb = skb;
1086				l->backlog[imp].len++;
1087				__skb_queue_tail(backlogq, skb);
1088			} else {
1089				if (new_bundle) {
1090					l->stats.sent_bundles++;
1091					l->stats.sent_bundled++;
1092				}
1093				l->stats.sent_bundled++;
1094			}
1095			continue;
1096		}
1097		l->backlog[imp].target_bskb = NULL;
1098		l->backlog[imp].len += (1 + skb_queue_len(list));
1099		__skb_queue_tail(backlogq, skb);
1100		skb_queue_splice_tail_init(list, backlogq);
1101	}
1102	l->snd_nxt = seqno;
1103	return rc;
1104}
1105
1106static void tipc_link_update_cwin(struct tipc_link *l, int released,
1107				  bool retransmitted)
1108{
1109	int bklog_len = skb_queue_len(&l->backlogq);
1110	struct sk_buff_head *txq = &l->transmq;
1111	int txq_len = skb_queue_len(txq);
1112	u16 cwin = l->window;
1113
1114	/* Enter fast recovery */
1115	if (unlikely(retransmitted)) {
1116		l->ssthresh = max_t(u16, l->window / 2, 300);
1117		l->window = min_t(u16, l->ssthresh, l->window);
1118		return;
1119	}
1120	/* Enter slow start */
1121	if (unlikely(!released)) {
1122		l->ssthresh = max_t(u16, l->window / 2, 300);
1123		l->window = l->min_win;
1124		return;
1125	}
1126	/* Don't increase window if no pressure on the transmit queue */
1127	if (txq_len + bklog_len < cwin)
1128		return;
1129
1130	/* Don't increase window if there are holes the transmit queue */
1131	if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1132		return;
1133
1134	l->cong_acks += released;
 
1135
1136	/* Slow start  */
1137	if (cwin <= l->ssthresh) {
1138		l->window = min_t(u16, cwin + released, l->max_win);
1139		return;
 
1140	}
1141	/* Congestion avoidance */
1142	if (l->cong_acks < cwin)
1143		return;
1144	l->window = min_t(u16, ++cwin, l->max_win);
1145	l->cong_acks = 0;
1146}
1147
1148static void tipc_link_advance_backlog(struct tipc_link *l,
1149				      struct sk_buff_head *xmitq)
1150{
1151	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1152	struct sk_buff_head *txq = &l->transmq;
1153	struct sk_buff *skb, *_skb;
1154	u16 ack = l->rcv_nxt - 1;
1155	u16 seqno = l->snd_nxt;
1156	struct tipc_msg *hdr;
1157	u16 cwin = l->window;
1158	u32 imp;
1159
1160	while (skb_queue_len(txq) < cwin) {
1161		skb = skb_peek(&l->backlogq);
1162		if (!skb)
1163			break;
1164		_skb = skb_clone(skb, GFP_ATOMIC);
1165		if (!_skb)
1166			break;
1167		__skb_dequeue(&l->backlogq);
1168		hdr = buf_msg(skb);
1169		imp = msg_importance(hdr);
1170		l->backlog[imp].len--;
1171		if (unlikely(skb == l->backlog[imp].target_bskb))
1172			l->backlog[imp].target_bskb = NULL;
1173		__skb_queue_tail(&l->transmq, skb);
1174		tipc_link_set_skb_retransmit_time(skb, l);
1175
1176		__skb_queue_tail(xmitq, _skb);
1177		TIPC_SKB_CB(skb)->ackers = l->ackers;
1178		msg_set_seqno(hdr, seqno);
1179		msg_set_ack(hdr, ack);
1180		msg_set_bcast_ack(hdr, bc_ack);
1181		l->rcv_unacked = 0;
1182		l->stats.sent_pkts++;
1183		seqno++;
1184	}
1185	l->snd_nxt = seqno;
1186}
1187
1188/**
1189 * link_retransmit_failure() - Detect repeated retransmit failures
1190 * @l: tipc link sender
1191 * @r: tipc link receiver (= l in case of unicast)
1192 * @rc: returned code
 
 
 
 
 
 
1193 *
1194 * Return: true if the repeated retransmit failures happens, otherwise
1195 * false
1196 */
1197static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1198				    int *rc)
1199{
1200	struct sk_buff *skb = skb_peek(&l->transmq);
1201	struct tipc_msg *hdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202
1203	if (!skb)
1204		return false;
1205
1206	if (!TIPC_SKB_CB(skb)->retr_cnt)
1207		return false;
1208
1209	if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1210			msecs_to_jiffies(r->tolerance * 10)))
1211		return false;
1212
1213	hdr = buf_msg(skb);
1214	if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1215		return false;
1216
1217	pr_warn("Retransmission failure on link <%s>\n", l->name);
1218	link_print(l, "State of link ");
1219	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1220		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1221	pr_info("sqno %u, prev: %x, dest: %x\n",
1222		msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1223	pr_info("retr_stamp %d, retr_cnt %d\n",
1224		jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1225		TIPC_SKB_CB(skb)->retr_cnt);
1226
1227	trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1228	trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1229	trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1230
1231	if (link_is_bc_sndlink(l)) {
1232		r->state = LINK_RESET;
1233		*rc |= TIPC_LINK_DOWN_EVT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234	} else {
1235		*rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1236	}
1237
1238	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1239}
1240
1241/* tipc_data_input - deliver data and name distr msgs to upper layer
1242 *
1243 * Consumes buffer if message is of right type
1244 * Node lock must be held
1245 */
1246static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1247			    struct sk_buff_head *inputq)
1248{
1249	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1250	struct tipc_msg *hdr = buf_msg(skb);
 
 
 
 
1251
1252	switch (msg_user(hdr)) {
1253	case TIPC_LOW_IMPORTANCE:
1254	case TIPC_MEDIUM_IMPORTANCE:
1255	case TIPC_HIGH_IMPORTANCE:
1256	case TIPC_CRITICAL_IMPORTANCE:
1257		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1258			skb_queue_tail(mc_inputq, skb);
1259			return true;
1260		}
1261		fallthrough;
1262	case CONN_MANAGER:
1263		skb_queue_tail(inputq, skb);
1264		return true;
1265	case GROUP_PROTOCOL:
1266		skb_queue_tail(mc_inputq, skb);
1267		return true;
1268	case NAME_DISTRIBUTOR:
1269		l->bc_rcvlink->state = LINK_ESTABLISHED;
1270		skb_queue_tail(l->namedq, skb);
1271		return true;
1272	case MSG_BUNDLER:
1273	case TUNNEL_PROTOCOL:
1274	case MSG_FRAGMENTER:
1275	case BCAST_PROTOCOL:
1276		return false;
1277#ifdef CONFIG_TIPC_CRYPTO
1278	case MSG_CRYPTO:
1279		if (sysctl_tipc_key_exchange_enabled &&
1280		    TIPC_SKB_CB(skb)->decrypted) {
1281			tipc_crypto_msg_rcv(l->net, skb);
1282			return true;
1283		}
1284		fallthrough;
1285#endif
1286	default:
1287		pr_warn("Dropping received illegal msg type\n");
1288		kfree_skb(skb);
1289		return true;
1290	}
1291}
1292
1293/* tipc_link_input - process packet that has passed link protocol check
1294 *
1295 * Consumes buffer
1296 */
1297static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1298			   struct sk_buff_head *inputq,
1299			   struct sk_buff **reasm_skb)
1300{
1301	struct tipc_msg *hdr = buf_msg(skb);
1302	struct sk_buff *iskb;
1303	struct sk_buff_head tmpq;
1304	int usr = msg_user(hdr);
1305	int pos = 0;
1306
1307	if (usr == MSG_BUNDLER) {
1308		skb_queue_head_init(&tmpq);
1309		l->stats.recv_bundles++;
1310		l->stats.recv_bundled += msg_msgcnt(hdr);
1311		while (tipc_msg_extract(skb, &iskb, &pos))
1312			tipc_data_input(l, iskb, &tmpq);
1313		tipc_skb_queue_splice_tail(&tmpq, inputq);
1314		return 0;
1315	} else if (usr == MSG_FRAGMENTER) {
1316		l->stats.recv_fragments++;
1317		if (tipc_buf_append(reasm_skb, &skb)) {
1318			l->stats.recv_fragmented++;
1319			tipc_data_input(l, skb, inputq);
1320		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1321			pr_warn_ratelimited("Unable to build fragment list\n");
1322			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1323		}
1324		return 0;
1325	} else if (usr == BCAST_PROTOCOL) {
1326		tipc_bcast_lock(l->net);
1327		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1328		tipc_bcast_unlock(l->net);
1329	}
1330
1331	kfree_skb(skb);
1332	return 0;
1333}
1334
1335/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1336 *			 inner message along with the ones in the old link's
1337 *			 deferdq
1338 * @l: tunnel link
1339 * @skb: TUNNEL_PROTOCOL message
1340 * @inputq: queue to put messages ready for delivery
1341 */
1342static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1343			     struct sk_buff_head *inputq)
1344{
1345	struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1346	struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1347	struct sk_buff_head *fdefq = &l->failover_deferdq;
1348	struct tipc_msg *hdr = buf_msg(skb);
1349	struct sk_buff *iskb;
1350	int ipos = 0;
1351	int rc = 0;
1352	u16 seqno;
1353
1354	if (msg_type(hdr) == SYNCH_MSG) {
1355		kfree_skb(skb);
1356		return 0;
 
 
 
 
 
 
 
 
 
 
1357	}
1358
1359	/* Not a fragment? */
1360	if (likely(!msg_nof_fragms(hdr))) {
1361		if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1362			pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1363					    skb_queue_len(fdefq));
1364			return 0;
1365		}
1366		kfree_skb(skb);
1367	} else {
1368		/* Set fragment type for buf_append */
1369		if (msg_fragm_no(hdr) == 1)
1370			msg_set_type(hdr, FIRST_FRAGMENT);
1371		else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1372			msg_set_type(hdr, FRAGMENT);
1373		else
1374			msg_set_type(hdr, LAST_FRAGMENT);
1375
1376		if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1377			/* Successful but non-complete reassembly? */
1378			if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
 
 
 
 
 
 
 
 
 
 
1379				return 0;
1380			pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1381			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 
 
1382		}
1383		iskb = skb;
1384	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1385
1386	do {
1387		seqno = buf_seqno(iskb);
1388		if (unlikely(less(seqno, l->drop_point))) {
1389			kfree_skb(iskb);
1390			continue;
1391		}
1392		if (unlikely(seqno != l->drop_point)) {
1393			__tipc_skb_queue_sorted(fdefq, seqno, iskb);
1394			continue;
1395		}
1396
1397		l->drop_point++;
1398		if (!tipc_data_input(l, iskb, inputq))
1399			rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1400		if (unlikely(rc))
1401			break;
1402	} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1403
1404	return rc;
1405}
1406
1407/**
1408 * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1409 * @ga: returned pointer to the Gap ACK blocks if any
1410 * @l: the tipc link
1411 * @hdr: the PROTOCOL/STATE_MSG header
1412 * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1413 *
1414 * Return: the total Gap ACK blocks size
1415 */
1416u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1417			  struct tipc_msg *hdr, bool uc)
1418{
1419	struct tipc_gap_ack_blks *p;
1420	u16 sz = 0;
 
1421
1422	/* Does peer support the Gap ACK blocks feature? */
1423	if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1424		p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1425		sz = ntohs(p->len);
1426		/* Sanity check */
1427		if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
1428			/* Good, check if the desired type exists */
1429			if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1430				goto ok;
1431		/* Backward compatible: peer might not support bc, but uc? */
1432		} else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1433			if (p->ugack_cnt) {
1434				p->bgack_cnt = 0;
1435				goto ok;
1436			}
 
1437		}
1438	}
1439	/* Other cases: ignore! */
1440	p = NULL;
1441
1442ok:
1443	*ga = p;
1444	return sz;
1445}
1446
1447static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1448				    struct tipc_link *l, u8 start_index)
1449{
1450	struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1451	struct sk_buff *skb = skb_peek(&l->deferdq);
1452	u16 expect, seqno = 0;
1453	u8 n = 0;
 
 
 
 
 
 
 
 
1454
1455	if (!skb)
1456		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1457
1458	expect = buf_seqno(skb);
1459	skb_queue_walk(&l->deferdq, skb) {
1460		seqno = buf_seqno(skb);
1461		if (unlikely(more(seqno, expect))) {
1462			gacks[n].ack = htons(expect - 1);
1463			gacks[n].gap = htons(seqno - expect);
1464			if (++n >= MAX_GAP_ACK_BLKS / 2) {
1465				pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1466						    l->name, n,
1467						    skb_queue_len(&l->deferdq));
1468				return n;
1469			}
1470		} else if (unlikely(less(seqno, expect))) {
1471			pr_warn("Unexpected skb in deferdq!\n");
1472			continue;
1473		}
1474		expect = seqno + 1;
1475	}
1476
1477	/* last block */
1478	gacks[n].ack = htons(seqno);
1479	gacks[n].gap = 0;
1480	n++;
1481	return n;
1482}
1483
1484/* tipc_build_gap_ack_blks - build Gap ACK blocks
1485 * @l: tipc unicast link
1486 * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1487 *
1488 * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1489 * links of a certain peer, the buffer after built has the network data format
1490 * as found at the struct tipc_gap_ack_blks definition.
1491 *
1492 * returns the actual allocated memory size
1493 */
1494static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1495{
1496	struct tipc_link *bcl = l->bc_rcvlink;
1497	struct tipc_gap_ack_blks *ga;
1498	u16 len;
1499
1500	ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1501
1502	/* Start with broadcast link first */
1503	tipc_bcast_lock(bcl->net);
1504	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1505	msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1506	ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1507	tipc_bcast_unlock(bcl->net);
1508
1509	/* Now for unicast link, but an explicit NACK only (???) */
1510	ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1511			__tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1512
1513	/* Total len */
1514	len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
1515	ga->len = htons(len);
1516	return len;
1517}
1518
1519/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1520 *			       acked packets, also doing retransmissions if
1521 *			       gaps found
1522 * @l: tipc link with transmq queue to be advanced
1523 * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
1524 * @acked: seqno of last packet acked by peer without any gaps before
1525 * @gap: # of gap packets
1526 * @ga: buffer pointer to Gap ACK blocks from peer
1527 * @xmitq: queue for accumulating the retransmitted packets if any
1528 * @retransmitted: returned boolean value if a retransmission is really issued
1529 * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1530 *      happens (- unlikely case)
1531 *
1532 * Return: the number of packets released from the link transmq
1533 */
1534static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1535				     u16 acked, u16 gap,
1536				     struct tipc_gap_ack_blks *ga,
1537				     struct sk_buff_head *xmitq,
1538				     bool *retransmitted, int *rc)
1539{
1540	struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1541	struct tipc_gap_ack *gacks = NULL;
1542	struct sk_buff *skb, *_skb, *tmp;
1543	struct tipc_msg *hdr;
1544	u32 qlen = skb_queue_len(&l->transmq);
1545	u16 nacked = acked, ngap = gap, gack_cnt = 0;
1546	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1547	u16 ack = l->rcv_nxt - 1;
1548	u16 seqno, n = 0;
1549	u16 end = r->acked, start = end, offset = r->last_gap;
1550	u16 si = (last_ga) ? last_ga->start_index : 0;
1551	bool is_uc = !link_is_bc_sndlink(l);
1552	bool bc_has_acked = false;
1553
1554	trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1555
1556	/* Determine Gap ACK blocks if any for the particular link */
1557	if (ga && is_uc) {
1558		/* Get the Gap ACKs, uc part */
1559		gack_cnt = ga->ugack_cnt;
1560		gacks = &ga->gacks[ga->bgack_cnt];
1561	} else if (ga) {
1562		/* Copy the Gap ACKs, bc part, for later renewal if needed */
1563		this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1564				  GFP_ATOMIC);
1565		if (likely(this_ga)) {
1566			this_ga->start_index = 0;
1567			/* Start with the bc Gap ACKs */
1568			gack_cnt = this_ga->bgack_cnt;
1569			gacks = &this_ga->gacks[0];
1570		} else {
1571			/* Hmm, we can get in trouble..., simply ignore it */
1572			pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1573		}
1574	}
 
 
1575
1576	/* Advance the link transmq */
1577	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1578		seqno = buf_seqno(skb);
1579
1580next_gap_ack:
1581		if (less_eq(seqno, nacked)) {
1582			if (is_uc)
1583				goto release;
1584			/* Skip packets peer has already acked */
1585			if (!more(seqno, r->acked))
1586				continue;
1587			/* Get the next of last Gap ACK blocks */
1588			while (more(seqno, end)) {
1589				if (!last_ga || si >= last_ga->bgack_cnt)
1590					break;
1591				start = end + offset + 1;
1592				end = ntohs(last_ga->gacks[si].ack);
1593				offset = ntohs(last_ga->gacks[si].gap);
1594				si++;
1595				WARN_ONCE(more(start, end) ||
1596					  (!offset &&
1597					   si < last_ga->bgack_cnt) ||
1598					  si > MAX_GAP_ACK_BLKS,
1599					  "Corrupted Gap ACK: %d %d %d %d %d\n",
1600					  start, end, offset, si,
1601					  last_ga->bgack_cnt);
1602			}
1603			/* Check against the last Gap ACK block */
1604			if (tipc_in_range(seqno, start, end))
1605				continue;
1606			/* Update/release the packet peer is acking */
1607			bc_has_acked = true;
1608			if (--TIPC_SKB_CB(skb)->ackers)
1609				continue;
1610release:
1611			/* release skb */
1612			__skb_unlink(skb, &l->transmq);
1613			kfree_skb(skb);
1614		} else if (less_eq(seqno, nacked + ngap)) {
1615			/* First gap: check if repeated retrans failures? */
1616			if (unlikely(seqno == acked + 1 &&
1617				     link_retransmit_failure(l, r, rc))) {
1618				/* Ignore this bc Gap ACKs if any */
1619				kfree(this_ga);
1620				this_ga = NULL;
1621				break;
1622			}
1623			/* retransmit skb if unrestricted*/
1624			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1625				continue;
1626			tipc_link_set_skb_retransmit_time(skb, l);
1627			_skb = pskb_copy(skb, GFP_ATOMIC);
1628			if (!_skb)
1629				continue;
1630			hdr = buf_msg(_skb);
1631			msg_set_ack(hdr, ack);
1632			msg_set_bcast_ack(hdr, bc_ack);
1633			_skb->priority = TC_PRIO_CONTROL;
1634			__skb_queue_tail(xmitq, _skb);
1635			l->stats.retransmitted++;
1636			if (!is_uc)
1637				r->stats.retransmitted++;
1638			*retransmitted = true;
1639			/* Increase actual retrans counter & mark first time */
1640			if (!TIPC_SKB_CB(skb)->retr_cnt++)
1641				TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1642		} else {
1643			/* retry with Gap ACK blocks if any */
1644			if (n >= gack_cnt)
1645				break;
1646			nacked = ntohs(gacks[n].ack);
1647			ngap = ntohs(gacks[n].gap);
1648			n++;
1649			goto next_gap_ack;
1650		}
1651	}
1652
1653	/* Renew last Gap ACK blocks for bc if needed */
1654	if (bc_has_acked) {
1655		if (this_ga) {
1656			kfree(last_ga);
1657			r->last_ga = this_ga;
1658			r->last_gap = gap;
1659		} else if (last_ga) {
1660			if (less(acked, start)) {
1661				si--;
1662				offset = start - acked - 1;
1663			} else if (less(acked, end)) {
1664				acked = end;
1665			}
1666			if (si < last_ga->bgack_cnt) {
1667				last_ga->start_index = si;
1668				r->last_gap = offset;
1669			} else {
1670				kfree(last_ga);
1671				r->last_ga = NULL;
1672				r->last_gap = 0;
1673			}
1674		} else {
1675			r->last_gap = 0;
 
 
 
 
1676		}
1677		r->acked = acked;
1678	} else {
1679		kfree(this_ga);
1680	}
1681
1682	return qlen - skb_queue_len(&l->transmq);
1683}
1684
1685/* tipc_link_build_state_msg: prepare link state message for transmission
1686 *
1687 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1688 * risk of ack storms towards the sender
1689 */
1690int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 
 
1691{
1692	if (!l)
1693		return 0;
1694
1695	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1696	if (link_is_bc_rcvlink(l)) {
1697		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1698			return 0;
1699		l->rcv_unacked = 0;
1700
1701		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1702		l->snd_nxt = l->rcv_nxt;
1703		return TIPC_LINK_SND_STATE;
1704	}
1705	/* Unicast ACK */
1706	l->rcv_unacked = 0;
1707	l->stats.sent_acks++;
1708	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1709	return 0;
1710}
1711
1712/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
 
 
 
 
 
 
 
 
 
 
1713 */
1714void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 
1715{
1716	int mtyp = RESET_MSG;
1717	struct sk_buff *skb;
 
 
 
 
 
 
 
 
 
 
 
1718
1719	if (l->state == LINK_ESTABLISHING)
1720		mtyp = ACTIVATE_MSG;
 
 
 
 
1721
1722	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
 
 
 
 
 
 
 
 
 
1723
1724	/* Inform peer that this endpoint is going down if applicable */
1725	skb = skb_peek_tail(xmitq);
1726	if (skb && (l->state == LINK_RESET))
1727		msg_set_peer_stopping(buf_msg(skb), 1);
1728}
1729
1730/* tipc_link_build_nack_msg: prepare link nack message for transmission
1731 * Note that sending of broadcast NACK is coordinated among nodes, to
1732 * reduce the risk of NACK storms towards the sender
 
 
 
 
1733 */
1734static int tipc_link_build_nack_msg(struct tipc_link *l,
1735				    struct sk_buff_head *xmitq)
1736{
1737	u32 def_cnt = ++l->stats.deferred_recv;
1738	struct sk_buff_head *dfq = &l->deferdq;
1739	u32 defq_len = skb_queue_len(dfq);
1740	int match1, match2;
 
 
 
 
 
 
 
1741
1742	if (link_is_bc_rcvlink(l)) {
1743		match1 = def_cnt & 0xf;
1744		match2 = tipc_own_addr(l->net) & 0xf;
1745		if (match1 == match2)
1746			return TIPC_LINK_SND_STATE;
1747		return 0;
1748	}
1749
1750	if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1751		u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1752
1753		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1754					  rcvgap, 0, 0, xmitq);
1755	}
1756	return 0;
1757}
1758
1759/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1760 * @l: the link that should handle the message
1761 * @skb: TIPC packet
1762 * @xmitq: queue to place packets to be sent after this call
1763 */
1764int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1765		  struct sk_buff_head *xmitq)
1766{
1767	struct sk_buff_head *defq = &l->deferdq;
1768	struct tipc_msg *hdr = buf_msg(skb);
1769	u16 seqno, rcv_nxt, win_lim;
1770	int released = 0;
1771	int rc = 0;
1772
1773	/* Verify and update link state */
1774	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1775		return tipc_link_proto_rcv(l, skb, xmitq);
1776
1777	/* Don't send probe at next timeout expiration */
1778	l->silent_intv_cnt = 0;
1779
1780	do {
1781		hdr = buf_msg(skb);
1782		seqno = msg_seqno(hdr);
1783		rcv_nxt = l->rcv_nxt;
1784		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1785
1786		if (unlikely(!tipc_link_is_up(l))) {
1787			if (l->state == LINK_ESTABLISHING)
1788				rc = TIPC_LINK_UP_EVT;
1789			kfree_skb(skb);
1790			break;
1791		}
1792
1793		/* Drop if outside receive window */
1794		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1795			l->stats.duplicates++;
1796			kfree_skb(skb);
1797			break;
1798		}
1799		released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1800						      NULL, NULL, NULL, NULL);
1801
1802		/* Defer delivery if sequence gap */
1803		if (unlikely(seqno != rcv_nxt)) {
1804			if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1805				l->stats.duplicates++;
1806			rc |= tipc_link_build_nack_msg(l, xmitq);
1807			break;
1808		}
1809
1810		/* Deliver packet */
1811		l->rcv_nxt++;
1812		l->stats.recv_pkts++;
1813
1814		if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1815			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1816		else if (!tipc_data_input(l, skb, l->inputq))
1817			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1818		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1819			rc |= tipc_link_build_state_msg(l, xmitq);
1820		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1821			break;
1822	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1823
1824	/* Forward queues and wake up waiting users */
1825	if (released) {
1826		tipc_link_update_cwin(l, released, 0);
1827		tipc_link_advance_backlog(l, xmitq);
1828		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1829			link_prepare_wakeup(l);
1830	}
1831	return rc;
1832}
1833
1834static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1835				      bool probe_reply, u16 rcvgap,
1836				      int tolerance, int priority,
1837				      struct sk_buff_head *xmitq)
1838{
1839	struct tipc_mon_state *mstate = &l->mon_state;
1840	struct sk_buff_head *dfq = &l->deferdq;
1841	struct tipc_link *bcl = l->bc_rcvlink;
1842	struct tipc_msg *hdr;
1843	struct sk_buff *skb;
1844	bool node_up = tipc_link_is_up(bcl);
1845	u16 glen = 0, bc_rcvgap = 0;
1846	int dlen = 0;
1847	void *data;
1848
1849	/* Don't send protocol message during reset or link failover */
1850	if (tipc_link_is_blocked(l))
1851		return;
1852
1853	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1854		return;
1855
1856	if ((probe || probe_reply) && !skb_queue_empty(dfq))
1857		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1858
1859	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1860			      tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1861			      l->addr, tipc_own_addr(l->net), 0, 0, 0);
1862	if (!skb)
1863		return;
1864
1865	hdr = buf_msg(skb);
1866	data = msg_data(hdr);
1867	msg_set_session(hdr, l->session);
1868	msg_set_bearer_id(hdr, l->bearer_id);
1869	msg_set_net_plane(hdr, l->net_plane);
1870	msg_set_next_sent(hdr, l->snd_nxt);
1871	msg_set_ack(hdr, l->rcv_nxt - 1);
1872	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1873	msg_set_bc_ack_invalid(hdr, !node_up);
1874	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1875	msg_set_link_tolerance(hdr, tolerance);
1876	msg_set_linkprio(hdr, priority);
1877	msg_set_redundant_link(hdr, node_up);
1878	msg_set_seq_gap(hdr, 0);
1879	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1880
1881	if (mtyp == STATE_MSG) {
1882		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1883			msg_set_seqno(hdr, l->snd_nxt_state++);
1884		msg_set_seq_gap(hdr, rcvgap);
1885		bc_rcvgap = link_bc_rcv_gap(bcl);
1886		msg_set_bc_gap(hdr, bc_rcvgap);
1887		msg_set_probe(hdr, probe);
1888		msg_set_is_keepalive(hdr, probe || probe_reply);
1889		if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1890			glen = tipc_build_gap_ack_blks(l, hdr);
1891		tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1892		msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1893		skb_trim(skb, INT_H_SIZE + glen + dlen);
1894		l->stats.sent_states++;
1895		l->rcv_unacked = 0;
1896	} else {
1897		/* RESET_MSG or ACTIVATE_MSG */
1898		if (mtyp == ACTIVATE_MSG) {
1899			msg_set_dest_session_valid(hdr, 1);
1900			msg_set_dest_session(hdr, l->peer_session);
1901		}
1902		msg_set_max_pkt(hdr, l->advertised_mtu);
1903		strcpy(data, l->if_name);
1904		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1905		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1906	}
1907	if (probe)
1908		l->stats.sent_probes++;
1909	if (rcvgap)
1910		l->stats.sent_nacks++;
1911	if (bc_rcvgap)
1912		bcl->stats.sent_nacks++;
1913	skb->priority = TC_PRIO_CONTROL;
1914	__skb_queue_tail(xmitq, skb);
1915	trace_tipc_proto_build(skb, false, l->name);
1916}
1917
1918void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1919				    struct sk_buff_head *xmitq)
1920{
1921	u32 onode = tipc_own_addr(l->net);
1922	struct tipc_msg *hdr, *ihdr;
1923	struct sk_buff_head tnlq;
1924	struct sk_buff *skb;
1925	u32 dnode = l->addr;
1926
1927	__skb_queue_head_init(&tnlq);
1928	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1929			      INT_H_SIZE, BASIC_H_SIZE,
1930			      dnode, onode, 0, 0, 0);
1931	if (!skb) {
1932		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1933		return;
1934	}
1935
1936	hdr = buf_msg(skb);
1937	msg_set_msgcnt(hdr, 1);
1938	msg_set_bearer_id(hdr, l->peer_bearer_id);
1939
1940	ihdr = (struct tipc_msg *)msg_data(hdr);
1941	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1942		      BASIC_H_SIZE, dnode);
1943	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1944	__skb_queue_tail(&tnlq, skb);
1945	tipc_link_xmit(l, &tnlq, xmitq);
1946}
1947
1948/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1949 * with contents of the link's transmit and backlog queues.
1950 */
1951void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1952			   int mtyp, struct sk_buff_head *xmitq)
1953{
1954	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1955	struct sk_buff *skb, *tnlskb;
1956	struct tipc_msg *hdr, tnlhdr;
1957	struct sk_buff_head *queue = &l->transmq;
1958	struct sk_buff_head tmpxq, tnlq, frags;
1959	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1960	bool pktcnt_need_update = false;
1961	u16 syncpt;
1962	int rc;
1963
1964	if (!tnl)
1965		return;
1966
1967	__skb_queue_head_init(&tnlq);
1968	/* Link Synching:
1969	 * From now on, send only one single ("dummy") SYNCH message
1970	 * to peer. The SYNCH message does not contain any data, just
1971	 * a header conveying the synch point to the peer.
1972	 */
1973	if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1974		tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1975					 INT_H_SIZE, 0, l->addr,
1976					 tipc_own_addr(l->net),
1977					 0, 0, 0);
1978		if (!tnlskb) {
1979			pr_warn("%sunable to create dummy SYNCH_MSG\n",
1980				link_co_err);
1981			return;
1982		}
1983
1984		hdr = buf_msg(tnlskb);
1985		syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1986		msg_set_syncpt(hdr, syncpt);
1987		msg_set_bearer_id(hdr, l->peer_bearer_id);
1988		__skb_queue_tail(&tnlq, tnlskb);
1989		tipc_link_xmit(tnl, &tnlq, xmitq);
1990		return;
1991	}
1992
1993	__skb_queue_head_init(&tmpxq);
1994	__skb_queue_head_init(&frags);
1995	/* At least one packet required for safe algorithm => add dummy */
1996	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1997			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1998			      0, 0, TIPC_ERR_NO_PORT);
1999	if (!skb) {
2000		pr_warn("%sunable to create tunnel packet\n", link_co_err);
2001		return;
2002	}
2003	__skb_queue_tail(&tnlq, skb);
2004	tipc_link_xmit(l, &tnlq, &tmpxq);
2005	__skb_queue_purge(&tmpxq);
2006
2007	/* Initialize reusable tunnel packet header */
2008	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
2009		      mtyp, INT_H_SIZE, l->addr);
2010	if (mtyp == SYNCH_MSG)
2011		pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
2012	else
2013		pktcnt = skb_queue_len(&l->transmq);
2014	pktcnt += skb_queue_len(&l->backlogq);
2015	msg_set_msgcnt(&tnlhdr, pktcnt);
2016	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
2017tnl:
2018	/* Wrap each packet into a tunnel packet */
2019	skb_queue_walk(queue, skb) {
2020		hdr = buf_msg(skb);
2021		if (queue == &l->backlogq)
2022			msg_set_seqno(hdr, seqno++);
2023		pktlen = msg_size(hdr);
2024
2025		/* Tunnel link MTU is not large enough? This could be
2026		 * due to:
2027		 * 1) Link MTU has just changed or set differently;
2028		 * 2) Or FAILOVER on the top of a SYNCH message
2029		 *
2030		 * The 2nd case should not happen if peer supports
2031		 * TIPC_TUNNEL_ENHANCED
2032		 */
2033		if (pktlen > tnl->mtu - INT_H_SIZE) {
2034			if (mtyp == FAILOVER_MSG &&
2035			    (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2036				rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2037						       &frags);
2038				if (rc) {
2039					pr_warn("%sunable to frag msg: rc %d\n",
2040						link_co_err, rc);
2041					return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2042				}
2043				pktcnt += skb_queue_len(&frags) - 1;
2044				pktcnt_need_update = true;
2045				skb_queue_splice_tail_init(&frags, &tnlq);
2046				continue;
2047			}
2048			/* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2049			 * => Just warn it and return!
2050			 */
2051			pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2052					    link_co_err, msg_user(hdr),
2053					    msg_type(hdr), msg_size(hdr));
2054			return;
 
 
 
 
2055		}
 
2056
2057		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2058		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2059		if (!tnlskb) {
2060			pr_warn("%sunable to send packet\n", link_co_err);
2061			return;
 
2062		}
2063		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2064		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2065		__skb_queue_tail(&tnlq, tnlskb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2066	}
2067	if (queue != &l->backlogq) {
2068		queue = &l->backlogq;
2069		goto tnl;
 
 
 
2070	}
2071
2072	if (pktcnt_need_update)
2073		skb_queue_walk(&tnlq, skb) {
2074			hdr = buf_msg(skb);
2075			msg_set_msgcnt(hdr, pktcnt);
 
 
 
 
 
 
 
2076		}
 
 
 
 
 
2077
2078	tipc_link_xmit(tnl, &tnlq, xmitq);
 
 
 
 
 
 
 
 
2079
2080	if (mtyp == FAILOVER_MSG) {
2081		tnl->drop_point = l->rcv_nxt;
2082		tnl->failover_reasm_skb = l->reasm_buf;
2083		l->reasm_buf = NULL;
2084
2085		/* Failover the link's deferdq */
2086		if (unlikely(!skb_queue_empty(fdefq))) {
2087			pr_warn("Link failover deferdq not empty: %d!\n",
2088				skb_queue_len(fdefq));
2089			__skb_queue_purge(fdefq);
2090		}
2091		skb_queue_splice_init(&l->deferdq, fdefq);
2092	}
2093}
2094
2095/**
2096 * tipc_link_failover_prepare() - prepare tnl for link failover
2097 *
2098 * This is a special version of the precursor - tipc_link_tnl_prepare(),
2099 * see the tipc_node_link_failover() for details
2100 *
2101 * @l: failover link
2102 * @tnl: tunnel link
2103 * @xmitq: queue for messages to be xmited
2104 */
2105void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2106				struct sk_buff_head *xmitq)
2107{
2108	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2109
2110	tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2111
2112	/* This failover link endpoint was never established before,
2113	 * so it has not received anything from peer.
2114	 * Otherwise, it must be a normal failover situation or the
2115	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2116	 * would have to start over from scratch instead.
2117	 */
2118	tnl->drop_point = 1;
2119	tnl->failover_reasm_skb = NULL;
2120
2121	/* Initiate the link's failover deferdq */
2122	if (unlikely(!skb_queue_empty(fdefq))) {
2123		pr_warn("Link failover deferdq not empty: %d!\n",
2124			skb_queue_len(fdefq));
2125		__skb_queue_purge(fdefq);
2126	}
 
 
 
 
 
 
 
 
 
2127}
2128
2129/* tipc_link_validate_msg(): validate message against current link state
2130 * Returns true if message should be accepted, otherwise false
2131 */
2132bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
 
2133{
2134	u16 curr_session = l->peer_session;
2135	u16 session = msg_session(hdr);
2136	int mtyp = msg_type(hdr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2137
2138	if (msg_user(hdr) != LINK_PROTOCOL)
2139		return true;
 
 
 
 
2140
2141	switch (mtyp) {
2142	case RESET_MSG:
2143		if (!l->in_session)
2144			return true;
2145		/* Accept only RESET with new session number */
2146		return more(session, curr_session);
2147	case ACTIVATE_MSG:
2148		if (!l->in_session)
2149			return true;
2150		/* Accept only ACTIVATE with new or current session number */
2151		return !less(session, curr_session);
2152	case STATE_MSG:
2153		/* Accept only STATE with current session number */
2154		if (!l->in_session)
2155			return false;
2156		if (session != curr_session)
2157			return false;
2158		/* Extra sanity check */
2159		if (!tipc_link_is_up(l) && msg_ack(hdr))
2160			return false;
2161		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2162			return true;
2163		/* Accept only STATE with new sequence number */
2164		return !less(msg_seqno(hdr), l->rcv_nxt_state);
2165	default:
2166		return false;
2167	}
 
 
 
 
 
2168}
2169
2170/* tipc_link_proto_rcv(): receive link level protocol message :
 
2171 * Note that network plane id propagates through the network, and may
2172 * change at any time. The node with lowest numerical id determines
2173 * network plane
2174 */
2175static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2176			       struct sk_buff_head *xmitq)
2177{
2178	struct tipc_msg *hdr = buf_msg(skb);
2179	struct tipc_gap_ack_blks *ga = NULL;
2180	bool reply = msg_probe(hdr), retransmitted = false;
2181	u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
2182	u16 peers_snd_nxt =  msg_next_sent(hdr);
2183	u16 peers_tol = msg_link_tolerance(hdr);
2184	u16 peers_prio = msg_linkprio(hdr);
2185	u16 gap = msg_seq_gap(hdr);
2186	u16 ack = msg_ack(hdr);
2187	u16 rcv_nxt = l->rcv_nxt;
2188	u16 rcvgap = 0;
2189	int mtyp = msg_type(hdr);
2190	int rc = 0, released;
2191	char *if_name;
2192	void *data;
2193
2194	trace_tipc_proto_rcv(skb, false, l->name);
2195
2196	if (dlen > U16_MAX)
2197		goto exit;
2198
2199	if (tipc_link_is_blocked(l) || !xmitq)
2200		goto exit;
2201
2202	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2203		l->net_plane = msg_net_plane(hdr);
2204
2205	if (skb_linearize(skb))
2206		goto exit;
 
2207
2208	hdr = buf_msg(skb);
2209	data = msg_data(hdr);
2210
2211	if (!tipc_link_validate_msg(l, hdr)) {
2212		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2213		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2214		goto exit;
2215	}
2216
2217	switch (mtyp) {
2218	case RESET_MSG:
 
 
 
 
 
 
2219	case ACTIVATE_MSG:
2220		msg_max = msg_max_pkt(hdr);
2221		if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
2222			break;
2223		/* Complete own link name with peer's interface name */
2224		if_name =  strrchr(l->name, ':') + 1;
2225		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2226			break;
2227		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2228			break;
2229		strncpy(if_name, data, TIPC_MAX_IF_NAME);
2230
2231		/* Update own tolerance if peer indicates a non-zero value */
2232		if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2233			l->tolerance = peers_tol;
2234			l->bc_rcvlink->tolerance = peers_tol;
2235		}
2236		/* Update own priority if peer's priority is higher */
2237		if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2238			l->priority = peers_prio;
2239
2240		/* If peer is going down we want full re-establish cycle */
2241		if (msg_peer_stopping(hdr)) {
2242			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2243			break;
 
 
2244		}
 
 
 
2245
2246		/* If this endpoint was re-created while peer was ESTABLISHING
2247		 * it doesn't know current session number. Force re-synch.
2248		 */
2249		if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2250		    l->session != msg_dest_session(hdr)) {
2251			if (less(l->session, msg_dest_session(hdr)))
2252				l->session = msg_dest_session(hdr) + 1;
2253			break;
2254		}
2255
2256		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2257		if (mtyp == RESET_MSG || !tipc_link_is_up(l))
2258			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2259
2260		/* ACTIVATE_MSG takes up link if it was already locally reset */
2261		if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2262			rc = TIPC_LINK_UP_EVT;
2263
2264		l->peer_session = msg_session(hdr);
2265		l->in_session = true;
2266		l->peer_bearer_id = msg_bearer_id(hdr);
2267		if (l->mtu > msg_max)
2268			l->mtu = msg_max;
2269		break;
 
 
 
 
 
2270
2271	case STATE_MSG:
2272		/* Validate Gap ACK blocks, drop if invalid */
2273		glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2274		if (glen > dlen)
 
 
 
 
 
 
 
2275			break;
2276
2277		l->rcv_nxt_state = msg_seqno(hdr) + 1;
 
 
 
2278
2279		/* Update own tolerance if peer indicates a non-zero value */
2280		if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2281			l->tolerance = peers_tol;
2282			l->bc_rcvlink->tolerance = peers_tol;
2283		}
2284		/* Update own prio if peer indicates a different value */
2285		if ((peers_prio != l->priority) &&
2286		    tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2287			l->priority = peers_prio;
2288			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2289		}
2290
2291		l->silent_intv_cnt = 0;
2292		l->stats.recv_states++;
2293		if (msg_probe(hdr))
2294			l->stats.recv_probes++;
2295
2296		if (!tipc_link_is_up(l)) {
2297			if (l->state == LINK_ESTABLISHING)
2298				rc = TIPC_LINK_UP_EVT;
2299			break;
2300		}
2301
2302		tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2303			     &l->mon_state, l->bearer_id);
2304
2305		/* Send NACK if peer has sent pkts we haven't received yet */
2306		if ((reply || msg_is_keepalive(hdr)) &&
2307		    more(peers_snd_nxt, rcv_nxt) &&
2308		    !tipc_link_is_synching(l) &&
2309		    skb_queue_empty(&l->deferdq))
2310			rcvgap = peers_snd_nxt - l->rcv_nxt;
2311		if (rcvgap || reply)
2312			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2313						  rcvgap, 0, 0, xmitq);
2314
2315		released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2316						     &retransmitted, &rc);
2317		if (gap)
2318			l->stats.recv_nacks++;
2319		if (released || retransmitted)
2320			tipc_link_update_cwin(l, released, retransmitted);
2321		if (released)
2322			tipc_link_advance_backlog(l, xmitq);
2323		if (unlikely(!skb_queue_empty(&l->wakeupq)))
2324			link_prepare_wakeup(l);
2325	}
2326exit:
2327	kfree_skb(skb);
2328	return rc;
2329}
2330
2331/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
 
 
 
2332 */
2333static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2334					 u16 peers_snd_nxt,
2335					 struct sk_buff_head *xmitq)
2336{
2337	struct sk_buff *skb;
2338	struct tipc_msg *hdr;
2339	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2340	u16 ack = l->rcv_nxt - 1;
2341	u16 gap_to = peers_snd_nxt - 1;
2342
2343	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2344			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2345	if (!skb)
2346		return false;
2347	hdr = buf_msg(skb);
2348	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2349	msg_set_bcast_ack(hdr, ack);
2350	msg_set_bcgap_after(hdr, ack);
2351	if (dfrd_skb)
2352		gap_to = buf_seqno(dfrd_skb) - 1;
2353	msg_set_bcgap_to(hdr, gap_to);
2354	msg_set_non_seq(hdr, bcast);
2355	__skb_queue_tail(xmitq, skb);
2356	return true;
 
2357}
2358
2359/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2360 *
2361 * Give a newly added peer node the sequence number where it should
2362 * start receiving and acking broadcast packets.
2363 */
2364static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2365					struct sk_buff_head *xmitq)
2366{
2367	struct sk_buff_head list;
2368
2369	__skb_queue_head_init(&list);
2370	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2371		return;
2372	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2373	tipc_link_xmit(l, &list, xmitq);
2374}
2375
2376/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
 
 
2377 */
2378void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
 
2379{
2380	int mtyp = msg_type(hdr);
2381	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
 
 
 
2382
2383	if (tipc_link_is_up(l))
2384		return;
2385
2386	if (msg_user(hdr) == BCAST_PROTOCOL) {
2387		l->rcv_nxt = peers_snd_nxt;
2388		l->state = LINK_ESTABLISHED;
2389		return;
2390	}
2391
2392	if (l->peer_caps & TIPC_BCAST_SYNCH)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2393		return;
 
 
 
 
2394
2395	if (msg_peer_node_is_up(hdr))
2396		return;
2397
2398	/* Compatibility: accept older, less safe initial synch data */
2399	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2400		l->rcv_nxt = peers_snd_nxt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2401}
2402
2403/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2404 */
2405int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2406			  struct sk_buff_head *xmitq)
2407{
2408	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2409	int rc = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2410
2411	if (!tipc_link_is_up(l))
2412		return rc;
2413
2414	if (!msg_peer_node_is_up(hdr))
2415		return rc;
2416
2417	/* Open when peer acknowledges our bcast init msg (pkt #1) */
2418	if (msg_ack(hdr))
2419		l->bc_peer_is_up = true;
 
 
 
 
 
2420
2421	if (!l->bc_peer_is_up)
2422		return rc;
 
 
 
2423
2424	/* Ignore if peers_snd_nxt goes beyond receive window */
2425	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2426		return rc;
 
 
2427
2428	l->snd_nxt = peers_snd_nxt;
2429	if (link_bc_rcv_gap(l))
2430		rc |= TIPC_LINK_SND_STATE;
 
2431
2432	/* Return now if sender supports nack via STATE messages */
2433	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2434		return rc;
 
 
 
 
 
 
2435
2436	/* Otherwise, be backwards compatible */
 
 
 
 
 
 
 
 
 
2437
2438	if (!more(peers_snd_nxt, l->rcv_nxt)) {
2439		l->nack_state = BC_NACK_SND_CONDITIONAL;
2440		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2441	}
 
 
 
 
 
2442
2443	/* Don't NACK if one was recently sent or peeked */
2444	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2445		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2446		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
2447	}
 
 
2448
2449	/* Conditionally delay NACK sending until next synch rcv */
2450	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2451		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2452		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2453			return 0;
2454	}
2455
2456	/* Send NACK now but suppress next one */
2457	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2458	l->nack_state = BC_NACK_SND_SUPPRESS;
2459	return 0;
2460}
2461
2462int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2463			 struct tipc_gap_ack_blks *ga,
2464			 struct sk_buff_head *xmitq,
2465			 struct sk_buff_head *retrq)
2466{
2467	struct tipc_link *l = r->bc_sndlink;
2468	bool unused = false;
2469	int rc = 0;
 
 
 
 
 
 
 
 
 
 
 
2470
2471	if (!tipc_link_is_up(r) || !r->bc_peer_is_up)
2472		return 0;
 
 
2473
2474	if (gap) {
2475		l->stats.recv_nacks++;
2476		r->stats.recv_nacks++;
2477	}
2478
2479	if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2480		return 0;
2481
2482	trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2483	tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
2484
2485	tipc_link_advance_backlog(l, xmitq);
2486	if (unlikely(!skb_queue_empty(&l->wakeupq)))
2487		link_prepare_wakeup(l);
2488
2489	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490}
2491
2492/* tipc_link_bc_nack_rcv(): receive broadcast nack message
2493 * This function is here for backwards compatibility, since
2494 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
 
 
2495 */
2496int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2497			  struct sk_buff_head *xmitq)
2498{
2499	struct tipc_msg *hdr = buf_msg(skb);
2500	u32 dnode = msg_destnode(hdr);
2501	int mtyp = msg_type(hdr);
2502	u16 acked = msg_bcast_ack(hdr);
2503	u16 from = acked + 1;
2504	u16 to = msg_bcgap_to(hdr);
2505	u16 peers_snd_nxt = to + 1;
2506	int rc = 0;
2507
2508	kfree_skb(skb);
 
 
 
2509
2510	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2511		return 0;
 
 
2512
2513	if (mtyp != STATE_MSG)
2514		return 0;
 
 
2515
2516	if (dnode == tipc_own_addr(l->net)) {
2517		rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2518					  xmitq);
2519		l->stats.recv_nacks++;
2520		return rc;
2521	}
2522
2523	/* Msg for other node => suppress own NACK at next sync if applicable */
2524	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2525		l->nack_state = BC_NACK_SND_SUPPRESS;
 
2526
2527	return 0;
 
 
2528}
2529
2530void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
 
 
 
 
 
2531{
2532	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2533
2534	l->min_win = min_win;
2535	l->ssthresh = max_win;
2536	l->max_win = max_win;
2537	l->window = min_win;
2538	l->backlog[TIPC_LOW_IMPORTANCE].limit      = min_win * 2;
2539	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = min_win * 4;
2540	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = min_win * 6;
2541	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2542	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2543}
2544
2545/**
2546 * tipc_link_reset_stats - reset link statistics
2547 * @l: pointer to link
2548 */
2549void tipc_link_reset_stats(struct tipc_link *l)
 
2550{
2551	memset(&l->stats, 0, sizeof(l->stats));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2552}
2553
2554static void link_print(struct tipc_link *l, const char *str)
 
 
2555{
2556	struct sk_buff *hskb = skb_peek(&l->transmq);
2557	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2558	u16 tail = l->snd_nxt - 1;
2559
2560	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2561	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2562		skb_queue_len(&l->transmq), head, tail,
2563		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2564}
2565
2566/* Parse and validate nested (link) properties valid for media, bearer and link
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2567 */
2568int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
 
2569{
2570	int err;
 
 
 
 
 
 
 
 
 
2571
2572	err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2573					  tipc_nl_prop_policy, NULL);
2574	if (err)
2575		return err;
2576
2577	if (props[TIPC_NLA_PROP_PRIO]) {
2578		u32 prio;
 
2579
2580		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2581		if (prio > TIPC_MAX_LINK_PRI)
2582			return -EINVAL;
2583	}
2584
2585	if (props[TIPC_NLA_PROP_TOL]) {
2586		u32 tol;
 
 
 
 
 
 
2587
2588		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2589		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2590			return -EINVAL;
2591	}
2592
2593	if (props[TIPC_NLA_PROP_WIN]) {
2594		u32 max_win;
2595
2596		max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2597		if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2598			return -EINVAL;
 
 
 
2599	}
2600
2601	return 0;
2602}
 
 
 
 
2603
2604static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2605{
2606	int i;
2607	struct nlattr *stats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2608
2609	struct nla_map {
2610		u32 key;
2611		u32 val;
2612	};
2613
2614	struct nla_map map[] = {
2615		{TIPC_NLA_STATS_RX_INFO, 0},
2616		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2617		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2618		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2619		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2620		{TIPC_NLA_STATS_TX_INFO, 0},
2621		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2622		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2623		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2624		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2625		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2626			s->msg_length_counts : 1},
2627		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2628		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2629		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2630		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2631		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2632		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2633		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2634		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2635		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2636		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2637		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2638		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2639		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2640		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2641		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2642		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2643		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2644		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2645		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2646		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2647		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2648		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2649			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2650	};
2651
2652	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2653	if (!stats)
2654		return -EMSGSIZE;
2655
2656	for (i = 0; i <  ARRAY_SIZE(map); i++)
2657		if (nla_put_u32(skb, map[i].key, map[i].val))
2658			goto msg_full;
2659
2660	nla_nest_end(skb, stats);
 
2661
2662	return 0;
2663msg_full:
2664	nla_nest_cancel(skb, stats);
 
2665
2666	return -EMSGSIZE;
 
 
 
 
2667}
2668
2669/* Caller should hold appropriate locks to protect the link */
2670int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2671		       struct tipc_link *link, int nlflags)
2672{
2673	u32 self = tipc_own_addr(net);
2674	struct nlattr *attrs;
2675	struct nlattr *prop;
2676	void *hdr;
2677	int err;
2678
2679	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2680			  nlflags, TIPC_NL_LINK_GET);
2681	if (!hdr)
2682		return -EMSGSIZE;
2683
2684	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2685	if (!attrs)
2686		goto msg_full;
2687
2688	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2689		goto attr_msg_full;
2690	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2691		goto attr_msg_full;
2692	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2693		goto attr_msg_full;
2694	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2695		goto attr_msg_full;
2696	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2697		goto attr_msg_full;
2698
2699	if (tipc_link_is_up(link))
2700		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2701			goto attr_msg_full;
2702	if (link->active)
2703		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2704			goto attr_msg_full;
2705
2706	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2707	if (!prop)
2708		goto attr_msg_full;
2709	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2710		goto prop_msg_full;
2711	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2712		goto prop_msg_full;
2713	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2714			link->window))
2715		goto prop_msg_full;
2716	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2717		goto prop_msg_full;
2718	nla_nest_end(msg->skb, prop);
2719
2720	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2721	if (err)
2722		goto attr_msg_full;
2723
2724	nla_nest_end(msg->skb, attrs);
2725	genlmsg_end(msg->skb, hdr);
2726
2727	return 0;
 
 
 
 
 
2728
2729prop_msg_full:
2730	nla_nest_cancel(msg->skb, prop);
2731attr_msg_full:
2732	nla_nest_cancel(msg->skb, attrs);
2733msg_full:
2734	genlmsg_cancel(msg->skb, hdr);
2735
2736	return -EMSGSIZE;
2737}
2738
2739static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2740				      struct tipc_stats *stats)
2741{
2742	int i;
2743	struct nlattr *nest;
2744
2745	struct nla_map {
2746		__u32 key;
2747		__u32 val;
2748	};
2749
2750	struct nla_map map[] = {
2751		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2752		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2753		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2754		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2755		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2756		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2757		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2758		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2759		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2760		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2761		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2762		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2763		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2764		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2765		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2766		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2767		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2768		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2769		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2770			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2771	};
2772
2773	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2774	if (!nest)
2775		return -EMSGSIZE;
2776
2777	for (i = 0; i <  ARRAY_SIZE(map); i++)
2778		if (nla_put_u32(skb, map[i].key, map[i].val))
2779			goto msg_full;
2780
2781	nla_nest_end(skb, nest);
 
 
 
 
 
2782
2783	return 0;
2784msg_full:
2785	nla_nest_cancel(skb, nest);
2786
2787	return -EMSGSIZE;
 
 
2788}
2789
2790int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2791			struct tipc_link *bcl)
 
 
 
 
 
 
 
 
2792{
2793	int err;
2794	void *hdr;
2795	struct nlattr *attrs;
2796	struct nlattr *prop;
2797	u32 bc_mode = tipc_bcast_get_mode(net);
2798	u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
 
 
2799
2800	if (!bcl)
 
 
 
 
 
2801		return 0;
 
 
2802
2803	tipc_bcast_lock(net);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2804
2805	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2806			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2807	if (!hdr) {
2808		tipc_bcast_unlock(net);
2809		return -EMSGSIZE;
2810	}
2811
2812	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2813	if (!attrs)
2814		goto msg_full;
2815
2816	/* The broadcast link is always up */
2817	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2818		goto attr_msg_full;
2819
2820	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2821		goto attr_msg_full;
2822	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2823		goto attr_msg_full;
2824	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2825		goto attr_msg_full;
2826	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2827		goto attr_msg_full;
2828
2829	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2830	if (!prop)
2831		goto attr_msg_full;
2832	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2833		goto prop_msg_full;
2834	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2835		goto prop_msg_full;
2836	if (bc_mode & BCLINK_MODE_SEL)
2837		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2838				bc_ratio))
2839			goto prop_msg_full;
2840	nla_nest_end(msg->skb, prop);
2841
2842	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2843	if (err)
2844		goto attr_msg_full;
2845
2846	tipc_bcast_unlock(net);
2847	nla_nest_end(msg->skb, attrs);
2848	genlmsg_end(msg->skb, hdr);
2849
2850	return 0;
 
 
 
 
 
 
 
 
 
2851
2852prop_msg_full:
2853	nla_nest_cancel(msg->skb, prop);
2854attr_msg_full:
2855	nla_nest_cancel(msg->skb, attrs);
2856msg_full:
2857	tipc_bcast_unlock(net);
2858	genlmsg_cancel(msg->skb, hdr);
2859
2860	return -EMSGSIZE;
2861}
2862
2863void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2864			     struct sk_buff_head *xmitq)
2865{
2866	l->tolerance = tol;
2867	if (l->bc_rcvlink)
2868		l->bc_rcvlink->tolerance = tol;
2869	if (tipc_link_is_up(l))
2870		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2871}
2872
2873void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2874			struct sk_buff_head *xmitq)
2875{
2876	l->priority = prio;
2877	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2878}
2879
2880void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2881{
2882	l->abort_limit = limit;
2883}
2884
2885/**
2886 * tipc_link_dump - dump TIPC link data
2887 * @l: tipc link to be dumped
2888 * @dqueues: bitmask to decide if any link queue to be dumped?
2889 *           - TIPC_DUMP_NONE: don't dump link queues
2890 *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2891 *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2892 *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2893 *           - TIPC_DUMP_INPUTQ: dump link input queue
2894 *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2895 *           - TIPC_DUMP_ALL: dump all the link queues above
2896 * @buf: returned buffer of dump data in format
2897 */
2898int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2899{
2900	int i = 0;
2901	size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2902	struct sk_buff_head *list;
2903	struct sk_buff *hskb, *tskb;
2904	u32 len;
2905
2906	if (!l) {
2907		i += scnprintf(buf, sz, "link data: (null)\n");
2908		return i;
2909	}
2910
2911	i += scnprintf(buf, sz, "link data: %x", l->addr);
2912	i += scnprintf(buf + i, sz - i, " %x", l->state);
2913	i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2914	i += scnprintf(buf + i, sz - i, " %u", l->session);
2915	i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2916	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2917	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2918	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2919	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2920	i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2921	i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2922	i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2923	i += scnprintf(buf + i, sz - i, " %u", 0);
2924	i += scnprintf(buf + i, sz - i, " %u", 0);
2925	i += scnprintf(buf + i, sz - i, " %u", l->acked);
2926
2927	list = &l->transmq;
2928	len = skb_queue_len(list);
2929	hskb = skb_peek(list);
2930	tskb = skb_peek_tail(list);
2931	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2932		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2933		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2934
2935	list = &l->deferdq;
2936	len = skb_queue_len(list);
2937	hskb = skb_peek(list);
2938	tskb = skb_peek_tail(list);
2939	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2940		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2941		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2942
2943	list = &l->backlogq;
2944	len = skb_queue_len(list);
2945	hskb = skb_peek(list);
2946	tskb = skb_peek_tail(list);
2947	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2948		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2949		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2950
2951	list = l->inputq;
2952	len = skb_queue_len(list);
2953	hskb = skb_peek(list);
2954	tskb = skb_peek_tail(list);
2955	i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2956		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2957		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2958
2959	if (dqueues & TIPC_DUMP_TRANSMQ) {
2960		i += scnprintf(buf + i, sz - i, "transmq: ");
2961		i += tipc_list_dump(&l->transmq, false, buf + i);
2962	}
2963	if (dqueues & TIPC_DUMP_BACKLOGQ) {
2964		i += scnprintf(buf + i, sz - i,
2965			       "backlogq: <%u %u %u %u %u>, ",
2966			       l->backlog[TIPC_LOW_IMPORTANCE].len,
2967			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2968			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
2969			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2970			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2971		i += tipc_list_dump(&l->backlogq, false, buf + i);
2972	}
2973	if (dqueues & TIPC_DUMP_DEFERDQ) {
2974		i += scnprintf(buf + i, sz - i, "deferdq: ");
2975		i += tipc_list_dump(&l->deferdq, false, buf + i);
2976	}
2977	if (dqueues & TIPC_DUMP_INPUTQ) {
2978		i += scnprintf(buf + i, sz - i, "inputq: ");
2979		i += tipc_list_dump(l->inputq, false, buf + i);
2980	}
2981	if (dqueues & TIPC_DUMP_WAKEUP) {
2982		i += scnprintf(buf + i, sz - i, "wakeup: ");
2983		i += tipc_list_dump(&l->wakeupq, false, buf + i);
2984	}
 
 
2985
2986	return i;
 
 
 
 
 
 
 
 
 
 
 
2987}