Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v3.5.6
  1/* SCTP kernel implementation
  2 * Copyright (c) 1999-2000 Cisco, Inc.
  3 * Copyright (c) 1999-2001 Motorola, Inc.
  4 * Copyright (c) 2001-2003 International Business Machines Corp.
  5 * Copyright (c) 2001 Intel Corp.
  6 * Copyright (c) 2001 La Monte H.P. Yarroll
  7 *
  8 * This file is part of the SCTP kernel implementation
  9 *
 10 * This module provides the abstraction for an SCTP tranport representing
 11 * a remote transport address.  For local transport addresses, we just use
 12 * union sctp_addr.
 13 *
 14 * This SCTP implementation is free software;
 15 * you can redistribute it and/or modify it under the terms of
 16 * the GNU General Public License as published by
 17 * the Free Software Foundation; either version 2, or (at your option)
 18 * any later version.
 19 *
 20 * This SCTP implementation is distributed in the hope that it
 21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 22 *                 ************************
 23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 24 * See the GNU General Public License for more details.
 25 *
 26 * You should have received a copy of the GNU General Public License
 27 * along with GNU CC; see the file COPYING.  If not, write to
 28 * the Free Software Foundation, 59 Temple Place - Suite 330,
 29 * Boston, MA 02111-1307, USA.
 30 *
 31 * Please send any bug reports or fixes you make to the
 32 * email address(es):
 33 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
 34 *
 35 * Or submit a bug report through the following website:
 36 *    http://www.sf.net/projects/lksctp
 37 *
 38 * Written or modified by:
 39 *    La Monte H.P. Yarroll <piggy@acm.org>
 40 *    Karl Knutson          <karl@athena.chicago.il.us>
 41 *    Jon Grimm             <jgrimm@us.ibm.com>
 42 *    Xingang Guo           <xingang.guo@intel.com>
 43 *    Hui Huang             <hui.huang@nokia.com>
 44 *    Sridhar Samudrala	    <sri@us.ibm.com>
 45 *    Ardelle Fan	    <ardelle.fan@intel.com>
 46 *
 47 * Any bugs reported given to us we will try to fix... any fixes shared will
 48 * be incorporated into the next SCTP release.
 49 */
 50
 51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 52
 53#include <linux/slab.h>
 54#include <linux/types.h>
 55#include <linux/random.h>
 56#include <net/sctp/sctp.h>
 57#include <net/sctp/sm.h>
 58
 59/* 1st Level Abstractions.  */
 60
 61/* Initialize a new transport from provided memory.  */
 62static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
 
 63						  const union sctp_addr *addr,
 64						  gfp_t gfp)
 65{
 66	/* Copy in the address.  */
 67	peer->ipaddr = *addr;
 68	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
 69	memset(&peer->saddr, 0, sizeof(union sctp_addr));
 70
 71	peer->sack_generation = 0;
 72
 73	/* From 6.3.1 RTO Calculation:
 74	 *
 75	 * C1) Until an RTT measurement has been made for a packet sent to the
 76	 * given destination transport address, set RTO to the protocol
 77	 * parameter 'RTO.Initial'.
 78	 */
 79	peer->rto = msecs_to_jiffies(sctp_rto_initial);
 80
 81	peer->last_time_heard = jiffies;
 82	peer->last_time_ecne_reduced = jiffies;
 83
 84	peer->param_flags = SPP_HB_DISABLE |
 85			    SPP_PMTUD_ENABLE |
 86			    SPP_SACKDELAY_ENABLE;
 87
 88	/* Initialize the default path max_retrans.  */
 89	peer->pathmaxrxt  = sctp_max_retrans_path;
 
 90
 91	INIT_LIST_HEAD(&peer->transmitted);
 92	INIT_LIST_HEAD(&peer->send_ready);
 93	INIT_LIST_HEAD(&peer->transports);
 94
 95	setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
 96			(unsigned long)peer);
 97	setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
 98			(unsigned long)peer);
 99	setup_timer(&peer->proto_unreach_timer,
100		    sctp_generate_proto_unreach_event, (unsigned long)peer);
101
102	/* Initialize the 64-bit random nonce sent with heartbeat. */
103	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
104
105	atomic_set(&peer->refcnt, 1);
106
107	return peer;
108}
109
110/* Allocate and initialize a new transport.  */
111struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
 
112					  gfp_t gfp)
113{
114	struct sctp_transport *transport;
115
116	transport = t_new(struct sctp_transport, gfp);
117	if (!transport)
118		goto fail;
119
120	if (!sctp_transport_init(transport, addr, gfp))
121		goto fail_init;
122
123	transport->malloced = 1;
124	SCTP_DBG_OBJCNT_INC(transport);
125
126	return transport;
127
128fail_init:
129	kfree(transport);
130
131fail:
132	return NULL;
133}
134
135/* This transport is no longer needed.  Free up if possible, or
136 * delay until it last reference count.
137 */
138void sctp_transport_free(struct sctp_transport *transport)
139{
140	transport->dead = 1;
141
142	/* Try to delete the heartbeat timer.  */
143	if (del_timer(&transport->hb_timer))
144		sctp_transport_put(transport);
145
146	/* Delete the T3_rtx timer if it's active.
147	 * There is no point in not doing this now and letting
148	 * structure hang around in memory since we know
149	 * the tranport is going away.
150	 */
151	if (timer_pending(&transport->T3_rtx_timer) &&
152	    del_timer(&transport->T3_rtx_timer))
153		sctp_transport_put(transport);
154
155	/* Delete the ICMP proto unreachable timer if it's active. */
156	if (timer_pending(&transport->proto_unreach_timer) &&
157	    del_timer(&transport->proto_unreach_timer))
158		sctp_association_put(transport->asoc);
159
160	sctp_transport_put(transport);
161}
162
 
 
 
 
 
 
 
 
 
 
 
163/* Destroy the transport data structure.
164 * Assumes there are no more users of this structure.
165 */
166static void sctp_transport_destroy(struct sctp_transport *transport)
167{
168	SCTP_ASSERT(transport->dead, "Transport is not dead", return);
 
 
 
 
 
169
170	if (transport->asoc)
171		sctp_association_put(transport->asoc);
172
173	sctp_packet_free(&transport->packet);
174
175	dst_release(transport->dst);
176	kfree(transport);
177	SCTP_DBG_OBJCNT_DEC(transport);
178}
179
180/* Start T3_rtx timer if it is not already running and update the heartbeat
181 * timer.  This routine is called every time a DATA chunk is sent.
182 */
183void sctp_transport_reset_timers(struct sctp_transport *transport)
184{
185	/* RFC 2960 6.3.2 Retransmission Timer Rules
186	 *
187	 * R1) Every time a DATA chunk is sent to any address(including a
188	 * retransmission), if the T3-rtx timer of that address is not running
189	 * start it running so that it will expire after the RTO of that
190	 * address.
191	 */
192
193	if (!timer_pending(&transport->T3_rtx_timer))
194		if (!mod_timer(&transport->T3_rtx_timer,
195			       jiffies + transport->rto))
196			sctp_transport_hold(transport);
 
 
 
 
 
197
198	/* When a data chunk is sent, reset the heartbeat interval.  */
199	if (!mod_timer(&transport->hb_timer,
200		       sctp_transport_timeout(transport)))
201	    sctp_transport_hold(transport);
 
 
202}
203
204/* This transport has been assigned to an association.
205 * Initialize fields from the association or from the sock itself.
206 * Register the reference count in the association.
207 */
208void sctp_transport_set_owner(struct sctp_transport *transport,
209			      struct sctp_association *asoc)
210{
211	transport->asoc = asoc;
212	sctp_association_hold(asoc);
213}
214
215/* Initialize the pmtu of a transport. */
216void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
217{
218	/* If we don't have a fresh route, look one up */
219	if (!transport->dst || transport->dst->obsolete > 1) {
220		dst_release(transport->dst);
221		transport->af_specific->get_dst(transport, &transport->saddr,
222						&transport->fl, sk);
223	}
224
225	if (transport->dst) {
226		transport->pathmtu = dst_mtu(transport->dst);
227	} else
228		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
229}
230
231void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
232{
233	struct dst_entry *dst;
234
235	if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
236		pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
237			__func__, pmtu,
238			SCTP_DEFAULT_MINSEGMENT);
239		/* Use default minimum segment size and disable
240		 * pmtu discovery on this transport.
241		 */
242		t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
243	} else {
244		t->pathmtu = pmtu;
245	}
246
247	dst = sctp_transport_dst_check(t);
248	if (dst)
249		dst->ops->update_pmtu(dst, pmtu);
 
 
 
 
 
 
 
 
250}
251
252/* Caches the dst entry and source address for a transport's destination
253 * address.
254 */
255void sctp_transport_route(struct sctp_transport *transport,
256			  union sctp_addr *saddr, struct sctp_sock *opt)
257{
258	struct sctp_association *asoc = transport->asoc;
259	struct sctp_af *af = transport->af_specific;
260
261	af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
262
263	if (saddr)
264		memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
265	else
266		af->get_saddr(opt, transport, &transport->fl);
267
268	if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
269		return;
270	}
271	if (transport->dst) {
272		transport->pathmtu = dst_mtu(transport->dst);
273
274		/* Initialize sk->sk_rcv_saddr, if the transport is the
275		 * association's active path for getsockname().
276		 */
277		if (asoc && (!asoc->peer.primary_path ||
278				(transport == asoc->peer.active_path)))
279			opt->pf->af->to_sk_saddr(&transport->saddr,
280						 asoc->base.sk);
281	} else
282		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
283}
284
285/* Hold a reference to a transport.  */
286void sctp_transport_hold(struct sctp_transport *transport)
287{
288	atomic_inc(&transport->refcnt);
289}
290
291/* Release a reference to a transport and clean up
292 * if there are no more references.
293 */
294void sctp_transport_put(struct sctp_transport *transport)
295{
296	if (atomic_dec_and_test(&transport->refcnt))
297		sctp_transport_destroy(transport);
298}
299
300/* Update transport's RTO based on the newly calculated RTT. */
301void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
302{
303	/* Check for valid transport.  */
304	SCTP_ASSERT(tp, "NULL transport", return);
305
306	/* We should not be doing any RTO updates unless rto_pending is set.  */
307	SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
308
309	if (tp->rttvar || tp->srtt) {
 
310		/* 6.3.1 C3) When a new RTT measurement R' is made, set
311		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
312		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
313		 */
314
315		/* Note:  The above algorithm has been rewritten to
316		 * express rto_beta and rto_alpha as inverse powers
317		 * of two.
318		 * For example, assuming the default value of RTO.Alpha of
319		 * 1/8, rto_alpha would be expressed as 3.
320		 */
321		tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
322			+ ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
323		tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
324			+ (rtt >> sctp_rto_alpha);
325	} else {
326		/* 6.3.1 C2) When the first RTT measurement R is made, set
327		 * SRTT <- R, RTTVAR <- R/2.
328		 */
329		tp->srtt = rtt;
330		tp->rttvar = rtt >> 1;
331	}
332
333	/* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
334	 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
335	 */
336	if (tp->rttvar == 0)
337		tp->rttvar = SCTP_CLOCK_GRANULARITY;
338
339	/* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
340	tp->rto = tp->srtt + (tp->rttvar << 2);
341
342	/* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
343	 * seconds then it is rounded up to RTO.Min seconds.
344	 */
345	if (tp->rto < tp->asoc->rto_min)
346		tp->rto = tp->asoc->rto_min;
347
348	/* 6.3.1 C7) A maximum value may be placed on RTO provided it is
349	 * at least RTO.max seconds.
350	 */
351	if (tp->rto > tp->asoc->rto_max)
352		tp->rto = tp->asoc->rto_max;
353
 
354	tp->rtt = rtt;
355
356	/* Reset rto_pending so that a new RTT measurement is started when a
357	 * new data chunk is sent.
358	 */
359	tp->rto_pending = 0;
360
361	SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
362			  "rttvar: %d, rto: %ld\n", __func__,
363			  tp, rtt, tp->srtt, tp->rttvar, tp->rto);
364}
365
366/* This routine updates the transport's cwnd and partial_bytes_acked
367 * parameters based on the bytes acked in the received SACK.
368 */
369void sctp_transport_raise_cwnd(struct sctp_transport *transport,
370			       __u32 sack_ctsn, __u32 bytes_acked)
371{
372	struct sctp_association *asoc = transport->asoc;
373	__u32 cwnd, ssthresh, flight_size, pba, pmtu;
374
375	cwnd = transport->cwnd;
376	flight_size = transport->flight_size;
377
378	/* See if we need to exit Fast Recovery first */
379	if (asoc->fast_recovery &&
380	    TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
381		asoc->fast_recovery = 0;
382
383	/* The appropriate cwnd increase algorithm is performed if, and only
384	 * if the cumulative TSN whould advanced and the congestion window is
385	 * being fully utilized.
386	 */
387	if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
388	    (flight_size < cwnd))
389		return;
390
391	ssthresh = transport->ssthresh;
392	pba = transport->partial_bytes_acked;
393	pmtu = transport->asoc->pathmtu;
394
395	if (cwnd <= ssthresh) {
396		/* RFC 4960 7.2.1
397		 * o  When cwnd is less than or equal to ssthresh, an SCTP
398		 *    endpoint MUST use the slow-start algorithm to increase
399		 *    cwnd only if the current congestion window is being fully
400		 *    utilized, an incoming SACK advances the Cumulative TSN
401		 *    Ack Point, and the data sender is not in Fast Recovery.
402		 *    Only when these three conditions are met can the cwnd be
403		 *    increased; otherwise, the cwnd MUST not be increased.
404		 *    If these conditions are met, then cwnd MUST be increased
405		 *    by, at most, the lesser of 1) the total size of the
406		 *    previously outstanding DATA chunk(s) acknowledged, and
407		 *    2) the destination's path MTU.  This upper bound protects
408		 *    against the ACK-Splitting attack outlined in [SAVAGE99].
409		 */
410		if (asoc->fast_recovery)
411			return;
412
413		if (bytes_acked > pmtu)
414			cwnd += pmtu;
415		else
416			cwnd += bytes_acked;
417		SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, "
418				  "bytes_acked: %d, cwnd: %d, ssthresh: %d, "
419				  "flight_size: %d, pba: %d\n",
420				  __func__,
421				  transport, bytes_acked, cwnd,
422				  ssthresh, flight_size, pba);
423	} else {
424		/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
425		 * upon each SACK arrival that advances the Cumulative TSN Ack
426		 * Point, increase partial_bytes_acked by the total number of
427		 * bytes of all new chunks acknowledged in that SACK including
428		 * chunks acknowledged by the new Cumulative TSN Ack and by
429		 * Gap Ack Blocks.
430		 *
431		 * When partial_bytes_acked is equal to or greater than cwnd
432		 * and before the arrival of the SACK the sender had cwnd or
433		 * more bytes of data outstanding (i.e., before arrival of the
434		 * SACK, flightsize was greater than or equal to cwnd),
435		 * increase cwnd by MTU, and reset partial_bytes_acked to
436		 * (partial_bytes_acked - cwnd).
437		 */
438		pba += bytes_acked;
439		if (pba >= cwnd) {
440			cwnd += pmtu;
441			pba = ((cwnd < pba) ? (pba - cwnd) : 0);
442		}
443		SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: "
444				  "transport: %p, bytes_acked: %d, cwnd: %d, "
445				  "ssthresh: %d, flight_size: %d, pba: %d\n",
446				  __func__,
447				  transport, bytes_acked, cwnd,
448				  ssthresh, flight_size, pba);
449	}
450
451	transport->cwnd = cwnd;
452	transport->partial_bytes_acked = pba;
453}
454
455/* This routine is used to lower the transport's cwnd when congestion is
456 * detected.
457 */
458void sctp_transport_lower_cwnd(struct sctp_transport *transport,
459			       sctp_lower_cwnd_t reason)
460{
461	struct sctp_association *asoc = transport->asoc;
462
463	switch (reason) {
464	case SCTP_LOWER_CWND_T3_RTX:
465		/* RFC 2960 Section 7.2.3, sctpimpguide
466		 * When the T3-rtx timer expires on an address, SCTP should
467		 * perform slow start by:
468		 *      ssthresh = max(cwnd/2, 4*MTU)
469		 *      cwnd = 1*MTU
470		 *      partial_bytes_acked = 0
471		 */
472		transport->ssthresh = max(transport->cwnd/2,
473					  4*asoc->pathmtu);
474		transport->cwnd = asoc->pathmtu;
475
476		/* T3-rtx also clears fast recovery */
477		asoc->fast_recovery = 0;
478		break;
479
480	case SCTP_LOWER_CWND_FAST_RTX:
481		/* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
482		 * destination address(es) to which the missing DATA chunks
483		 * were last sent, according to the formula described in
484		 * Section 7.2.3.
485		 *
486		 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
487		 * losses from SACK (see Section 7.2.4), An endpoint
488		 * should do the following:
489		 *      ssthresh = max(cwnd/2, 4*MTU)
490		 *      cwnd = ssthresh
491		 *      partial_bytes_acked = 0
492		 */
493		if (asoc->fast_recovery)
494			return;
495
496		/* Mark Fast recovery */
497		asoc->fast_recovery = 1;
498		asoc->fast_recovery_exit = asoc->next_tsn - 1;
499
500		transport->ssthresh = max(transport->cwnd/2,
501					  4*asoc->pathmtu);
502		transport->cwnd = transport->ssthresh;
503		break;
504
505	case SCTP_LOWER_CWND_ECNE:
506		/* RFC 2481 Section 6.1.2.
507		 * If the sender receives an ECN-Echo ACK packet
508		 * then the sender knows that congestion was encountered in the
509		 * network on the path from the sender to the receiver. The
510		 * indication of congestion should be treated just as a
511		 * congestion loss in non-ECN Capable TCP. That is, the TCP
512		 * source halves the congestion window "cwnd" and reduces the
513		 * slow start threshold "ssthresh".
514		 * A critical condition is that TCP does not react to
515		 * congestion indications more than once every window of
516		 * data (or more loosely more than once every round-trip time).
517		 */
518		if (time_after(jiffies, transport->last_time_ecne_reduced +
519					transport->rtt)) {
520			transport->ssthresh = max(transport->cwnd/2,
521						  4*asoc->pathmtu);
522			transport->cwnd = transport->ssthresh;
523			transport->last_time_ecne_reduced = jiffies;
524		}
525		break;
526
527	case SCTP_LOWER_CWND_INACTIVE:
528		/* RFC 2960 Section 7.2.1, sctpimpguide
529		 * When the endpoint does not transmit data on a given
530		 * transport address, the cwnd of the transport address
531		 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
532		 * NOTE: Although the draft recommends that this check needs
533		 * to be done every RTO interval, we do it every hearbeat
534		 * interval.
535		 */
536		transport->cwnd = max(transport->cwnd/2,
537					 4*asoc->pathmtu);
538		break;
539	}
540
541	transport->partial_bytes_acked = 0;
542	SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: "
543			  "%d ssthresh: %d\n", __func__,
544			  transport, reason,
545			  transport->cwnd, transport->ssthresh);
546}
547
548/* Apply Max.Burst limit to the congestion window:
549 * sctpimpguide-05 2.14.2
550 * D) When the time comes for the sender to
551 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
552 * first be applied to limit how many new DATA chunks may be sent.
553 * The limit is applied by adjusting cwnd as follows:
554 * 	if ((flightsize+ Max.Burst * MTU) < cwnd)
555 * 		cwnd = flightsize + Max.Burst * MTU
556 */
557
558void sctp_transport_burst_limited(struct sctp_transport *t)
559{
560	struct sctp_association *asoc = t->asoc;
561	u32 old_cwnd = t->cwnd;
562	u32 max_burst_bytes;
563
564	if (t->burst_limited)
565		return;
566
567	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
568	if (max_burst_bytes < old_cwnd) {
569		t->cwnd = max_burst_bytes;
570		t->burst_limited = old_cwnd;
571	}
572}
573
574/* Restore the old cwnd congestion window, after the burst had it's
575 * desired effect.
576 */
577void sctp_transport_burst_reset(struct sctp_transport *t)
578{
579	if (t->burst_limited) {
580		t->cwnd = t->burst_limited;
581		t->burst_limited = 0;
582	}
583}
584
585/* What is the next timeout value for this transport? */
586unsigned long sctp_transport_timeout(struct sctp_transport *t)
587{
588	unsigned long timeout;
589	timeout = t->rto + sctp_jitter(t->rto);
590	if (t->state != SCTP_UNCONFIRMED)
591		timeout += t->hbinterval;
592	timeout += jiffies;
 
 
593	return timeout;
594}
595
596/* Reset transport variables to their initial values */
597void sctp_transport_reset(struct sctp_transport *t)
598{
599	struct sctp_association *asoc = t->asoc;
600
601	/* RFC 2960 (bis), Section 5.2.4
602	 * All the congestion control parameters (e.g., cwnd, ssthresh)
603	 * related to this peer MUST be reset to their initial values
604	 * (see Section 6.2.1)
605	 */
606	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
607	t->burst_limited = 0;
608	t->ssthresh = asoc->peer.i.a_rwnd;
609	t->rto = asoc->rto_initial;
 
610	t->rtt = 0;
611	t->srtt = 0;
612	t->rttvar = 0;
613
614	/* Reset these additional varibles so that we have a clean
615	 * slate.
616	 */
617	t->partial_bytes_acked = 0;
618	t->flight_size = 0;
619	t->error_count = 0;
620	t->rto_pending = 0;
621	t->hb_sent = 0;
622
623	/* Initialize the state information for SFR-CACC */
624	t->cacc.changeover_active = 0;
625	t->cacc.cycling_changeover = 0;
626	t->cacc.next_tsn_at_change = 0;
627	t->cacc.cacc_saw_newack = 0;
628}
629
630/* Schedule retransmission on the given transport */
631void sctp_transport_immediate_rtx(struct sctp_transport *t)
632{
633	/* Stop pending T3_rtx_timer */
634	if (timer_pending(&t->T3_rtx_timer)) {
635		(void)del_timer(&t->T3_rtx_timer);
636		sctp_transport_put(t);
637	}
638	sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
639	if (!timer_pending(&t->T3_rtx_timer)) {
640		if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
641			sctp_transport_hold(t);
642	}
643	return;
644}
v4.6
  1/* SCTP kernel implementation
  2 * Copyright (c) 1999-2000 Cisco, Inc.
  3 * Copyright (c) 1999-2001 Motorola, Inc.
  4 * Copyright (c) 2001-2003 International Business Machines Corp.
  5 * Copyright (c) 2001 Intel Corp.
  6 * Copyright (c) 2001 La Monte H.P. Yarroll
  7 *
  8 * This file is part of the SCTP kernel implementation
  9 *
 10 * This module provides the abstraction for an SCTP tranport representing
 11 * a remote transport address.  For local transport addresses, we just use
 12 * union sctp_addr.
 13 *
 14 * This SCTP implementation is free software;
 15 * you can redistribute it and/or modify it under the terms of
 16 * the GNU General Public License as published by
 17 * the Free Software Foundation; either version 2, or (at your option)
 18 * any later version.
 19 *
 20 * This SCTP implementation is distributed in the hope that it
 21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 22 *                 ************************
 23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 24 * See the GNU General Public License for more details.
 25 *
 26 * You should have received a copy of the GNU General Public License
 27 * along with GNU CC; see the file COPYING.  If not, see
 28 * <http://www.gnu.org/licenses/>.
 
 29 *
 30 * Please send any bug reports or fixes you make to the
 31 * email address(es):
 32 *    lksctp developers <linux-sctp@vger.kernel.org>
 
 
 
 33 *
 34 * Written or modified by:
 35 *    La Monte H.P. Yarroll <piggy@acm.org>
 36 *    Karl Knutson          <karl@athena.chicago.il.us>
 37 *    Jon Grimm             <jgrimm@us.ibm.com>
 38 *    Xingang Guo           <xingang.guo@intel.com>
 39 *    Hui Huang             <hui.huang@nokia.com>
 40 *    Sridhar Samudrala	    <sri@us.ibm.com>
 41 *    Ardelle Fan	    <ardelle.fan@intel.com>
 
 
 
 42 */
 43
 44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 45
 46#include <linux/slab.h>
 47#include <linux/types.h>
 48#include <linux/random.h>
 49#include <net/sctp/sctp.h>
 50#include <net/sctp/sm.h>
 51
 52/* 1st Level Abstractions.  */
 53
 54/* Initialize a new transport from provided memory.  */
 55static struct sctp_transport *sctp_transport_init(struct net *net,
 56						  struct sctp_transport *peer,
 57						  const union sctp_addr *addr,
 58						  gfp_t gfp)
 59{
 60	/* Copy in the address.  */
 61	peer->ipaddr = *addr;
 62	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
 63	memset(&peer->saddr, 0, sizeof(union sctp_addr));
 64
 65	peer->sack_generation = 0;
 66
 67	/* From 6.3.1 RTO Calculation:
 68	 *
 69	 * C1) Until an RTT measurement has been made for a packet sent to the
 70	 * given destination transport address, set RTO to the protocol
 71	 * parameter 'RTO.Initial'.
 72	 */
 73	peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
 74
 75	peer->last_time_heard = ktime_set(0, 0);
 76	peer->last_time_ecne_reduced = jiffies;
 77
 78	peer->param_flags = SPP_HB_DISABLE |
 79			    SPP_PMTUD_ENABLE |
 80			    SPP_SACKDELAY_ENABLE;
 81
 82	/* Initialize the default path max_retrans.  */
 83	peer->pathmaxrxt  = net->sctp.max_retrans_path;
 84	peer->pf_retrans  = net->sctp.pf_retrans;
 85
 86	INIT_LIST_HEAD(&peer->transmitted);
 87	INIT_LIST_HEAD(&peer->send_ready);
 88	INIT_LIST_HEAD(&peer->transports);
 89
 90	setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
 91			(unsigned long)peer);
 92	setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
 93			(unsigned long)peer);
 94	setup_timer(&peer->proto_unreach_timer,
 95		    sctp_generate_proto_unreach_event, (unsigned long)peer);
 96
 97	/* Initialize the 64-bit random nonce sent with heartbeat. */
 98	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
 99
100	atomic_set(&peer->refcnt, 1);
101
102	return peer;
103}
104
105/* Allocate and initialize a new transport.  */
106struct sctp_transport *sctp_transport_new(struct net *net,
107					  const union sctp_addr *addr,
108					  gfp_t gfp)
109{
110	struct sctp_transport *transport;
111
112	transport = kzalloc(sizeof(*transport), gfp);
113	if (!transport)
114		goto fail;
115
116	if (!sctp_transport_init(net, transport, addr, gfp))
117		goto fail_init;
118
 
119	SCTP_DBG_OBJCNT_INC(transport);
120
121	return transport;
122
123fail_init:
124	kfree(transport);
125
126fail:
127	return NULL;
128}
129
130/* This transport is no longer needed.  Free up if possible, or
131 * delay until it last reference count.
132 */
133void sctp_transport_free(struct sctp_transport *transport)
134{
 
 
135	/* Try to delete the heartbeat timer.  */
136	if (del_timer(&transport->hb_timer))
137		sctp_transport_put(transport);
138
139	/* Delete the T3_rtx timer if it's active.
140	 * There is no point in not doing this now and letting
141	 * structure hang around in memory since we know
142	 * the tranport is going away.
143	 */
144	if (del_timer(&transport->T3_rtx_timer))
 
145		sctp_transport_put(transport);
146
147	/* Delete the ICMP proto unreachable timer if it's active. */
148	if (del_timer(&transport->proto_unreach_timer))
 
149		sctp_association_put(transport->asoc);
150
151	sctp_transport_put(transport);
152}
153
154static void sctp_transport_destroy_rcu(struct rcu_head *head)
155{
156	struct sctp_transport *transport;
157
158	transport = container_of(head, struct sctp_transport, rcu);
159
160	dst_release(transport->dst);
161	kfree(transport);
162	SCTP_DBG_OBJCNT_DEC(transport);
163}
164
165/* Destroy the transport data structure.
166 * Assumes there are no more users of this structure.
167 */
168static void sctp_transport_destroy(struct sctp_transport *transport)
169{
170	if (unlikely(atomic_read(&transport->refcnt))) {
171		WARN(1, "Attempt to destroy undead transport %p!\n", transport);
172		return;
173	}
174
175	sctp_packet_free(&transport->packet);
176
177	if (transport->asoc)
178		sctp_association_put(transport->asoc);
179
180	call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
 
 
 
 
181}
182
183/* Start T3_rtx timer if it is not already running and update the heartbeat
184 * timer.  This routine is called every time a DATA chunk is sent.
185 */
186void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
187{
188	/* RFC 2960 6.3.2 Retransmission Timer Rules
189	 *
190	 * R1) Every time a DATA chunk is sent to any address(including a
191	 * retransmission), if the T3-rtx timer of that address is not running
192	 * start it running so that it will expire after the RTO of that
193	 * address.
194	 */
195
196	if (!timer_pending(&transport->T3_rtx_timer))
197		if (!mod_timer(&transport->T3_rtx_timer,
198			       jiffies + transport->rto))
199			sctp_transport_hold(transport);
200}
201
202void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
203{
204	unsigned long expires;
205
206	/* When a data chunk is sent, reset the heartbeat interval.  */
207	expires = jiffies + sctp_transport_timeout(transport);
208	if (time_before(transport->hb_timer.expires, expires) &&
209	    !mod_timer(&transport->hb_timer,
210		       expires + prandom_u32_max(transport->rto)))
211		sctp_transport_hold(transport);
212}
213
214/* This transport has been assigned to an association.
215 * Initialize fields from the association or from the sock itself.
216 * Register the reference count in the association.
217 */
218void sctp_transport_set_owner(struct sctp_transport *transport,
219			      struct sctp_association *asoc)
220{
221	transport->asoc = asoc;
222	sctp_association_hold(asoc);
223}
224
225/* Initialize the pmtu of a transport. */
226void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
227{
228	/* If we don't have a fresh route, look one up */
229	if (!transport->dst || transport->dst->obsolete) {
230		dst_release(transport->dst);
231		transport->af_specific->get_dst(transport, &transport->saddr,
232						&transport->fl, sk);
233	}
234
235	if (transport->dst) {
236		transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
237	} else
238		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
239}
240
241void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
242{
243	struct dst_entry *dst;
244
245	if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
246		pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
247			__func__, pmtu,
248			SCTP_DEFAULT_MINSEGMENT);
249		/* Use default minimum segment size and disable
250		 * pmtu discovery on this transport.
251		 */
252		t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
253	} else {
254		t->pathmtu = pmtu;
255	}
256
257	dst = sctp_transport_dst_check(t);
258	if (!dst)
259		t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
260
261	if (dst) {
262		dst->ops->update_pmtu(dst, sk, NULL, pmtu);
263
264		dst = sctp_transport_dst_check(t);
265		if (!dst)
266			t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
267	}
268}
269
270/* Caches the dst entry and source address for a transport's destination
271 * address.
272 */
273void sctp_transport_route(struct sctp_transport *transport,
274			  union sctp_addr *saddr, struct sctp_sock *opt)
275{
276	struct sctp_association *asoc = transport->asoc;
277	struct sctp_af *af = transport->af_specific;
278
279	af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
280
281	if (saddr)
282		memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
283	else
284		af->get_saddr(opt, transport, &transport->fl);
285
286	if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
287		return;
288	}
289	if (transport->dst) {
290		transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
291
292		/* Initialize sk->sk_rcv_saddr, if the transport is the
293		 * association's active path for getsockname().
294		 */
295		if (asoc && (!asoc->peer.primary_path ||
296				(transport == asoc->peer.active_path)))
297			opt->pf->to_sk_saddr(&transport->saddr,
298					     asoc->base.sk);
299	} else
300		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
301}
302
303/* Hold a reference to a transport.  */
304int sctp_transport_hold(struct sctp_transport *transport)
305{
306	return atomic_add_unless(&transport->refcnt, 1, 0);
307}
308
309/* Release a reference to a transport and clean up
310 * if there are no more references.
311 */
312void sctp_transport_put(struct sctp_transport *transport)
313{
314	if (atomic_dec_and_test(&transport->refcnt))
315		sctp_transport_destroy(transport);
316}
317
318/* Update transport's RTO based on the newly calculated RTT. */
319void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
320{
321	if (unlikely(!tp->rto_pending))
322		/* We should not be doing any RTO updates unless rto_pending is set.  */
323		pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
 
 
324
325	if (tp->rttvar || tp->srtt) {
326		struct net *net = sock_net(tp->asoc->base.sk);
327		/* 6.3.1 C3) When a new RTT measurement R' is made, set
328		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
329		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
330		 */
331
332		/* Note:  The above algorithm has been rewritten to
333		 * express rto_beta and rto_alpha as inverse powers
334		 * of two.
335		 * For example, assuming the default value of RTO.Alpha of
336		 * 1/8, rto_alpha would be expressed as 3.
337		 */
338		tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
339			+ (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
340		tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
341			+ (rtt >> net->sctp.rto_alpha);
342	} else {
343		/* 6.3.1 C2) When the first RTT measurement R is made, set
344		 * SRTT <- R, RTTVAR <- R/2.
345		 */
346		tp->srtt = rtt;
347		tp->rttvar = rtt >> 1;
348	}
349
350	/* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
351	 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
352	 */
353	if (tp->rttvar == 0)
354		tp->rttvar = SCTP_CLOCK_GRANULARITY;
355
356	/* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
357	tp->rto = tp->srtt + (tp->rttvar << 2);
358
359	/* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
360	 * seconds then it is rounded up to RTO.Min seconds.
361	 */
362	if (tp->rto < tp->asoc->rto_min)
363		tp->rto = tp->asoc->rto_min;
364
365	/* 6.3.1 C7) A maximum value may be placed on RTO provided it is
366	 * at least RTO.max seconds.
367	 */
368	if (tp->rto > tp->asoc->rto_max)
369		tp->rto = tp->asoc->rto_max;
370
371	sctp_max_rto(tp->asoc, tp);
372	tp->rtt = rtt;
373
374	/* Reset rto_pending so that a new RTT measurement is started when a
375	 * new data chunk is sent.
376	 */
377	tp->rto_pending = 0;
378
379	pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
380		 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
 
381}
382
383/* This routine updates the transport's cwnd and partial_bytes_acked
384 * parameters based on the bytes acked in the received SACK.
385 */
386void sctp_transport_raise_cwnd(struct sctp_transport *transport,
387			       __u32 sack_ctsn, __u32 bytes_acked)
388{
389	struct sctp_association *asoc = transport->asoc;
390	__u32 cwnd, ssthresh, flight_size, pba, pmtu;
391
392	cwnd = transport->cwnd;
393	flight_size = transport->flight_size;
394
395	/* See if we need to exit Fast Recovery first */
396	if (asoc->fast_recovery &&
397	    TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
398		asoc->fast_recovery = 0;
399
400	/* The appropriate cwnd increase algorithm is performed if, and only
401	 * if the cumulative TSN whould advanced and the congestion window is
402	 * being fully utilized.
403	 */
404	if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
405	    (flight_size < cwnd))
406		return;
407
408	ssthresh = transport->ssthresh;
409	pba = transport->partial_bytes_acked;
410	pmtu = transport->asoc->pathmtu;
411
412	if (cwnd <= ssthresh) {
413		/* RFC 4960 7.2.1
414		 * o  When cwnd is less than or equal to ssthresh, an SCTP
415		 *    endpoint MUST use the slow-start algorithm to increase
416		 *    cwnd only if the current congestion window is being fully
417		 *    utilized, an incoming SACK advances the Cumulative TSN
418		 *    Ack Point, and the data sender is not in Fast Recovery.
419		 *    Only when these three conditions are met can the cwnd be
420		 *    increased; otherwise, the cwnd MUST not be increased.
421		 *    If these conditions are met, then cwnd MUST be increased
422		 *    by, at most, the lesser of 1) the total size of the
423		 *    previously outstanding DATA chunk(s) acknowledged, and
424		 *    2) the destination's path MTU.  This upper bound protects
425		 *    against the ACK-Splitting attack outlined in [SAVAGE99].
426		 */
427		if (asoc->fast_recovery)
428			return;
429
430		if (bytes_acked > pmtu)
431			cwnd += pmtu;
432		else
433			cwnd += bytes_acked;
434
435		pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
436			 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
437			 __func__, transport, bytes_acked, cwnd, ssthresh,
438			 flight_size, pba);
 
439	} else {
440		/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
441		 * upon each SACK arrival that advances the Cumulative TSN Ack
442		 * Point, increase partial_bytes_acked by the total number of
443		 * bytes of all new chunks acknowledged in that SACK including
444		 * chunks acknowledged by the new Cumulative TSN Ack and by
445		 * Gap Ack Blocks.
446		 *
447		 * When partial_bytes_acked is equal to or greater than cwnd
448		 * and before the arrival of the SACK the sender had cwnd or
449		 * more bytes of data outstanding (i.e., before arrival of the
450		 * SACK, flightsize was greater than or equal to cwnd),
451		 * increase cwnd by MTU, and reset partial_bytes_acked to
452		 * (partial_bytes_acked - cwnd).
453		 */
454		pba += bytes_acked;
455		if (pba >= cwnd) {
456			cwnd += pmtu;
457			pba = ((cwnd < pba) ? (pba - cwnd) : 0);
458		}
459
460		pr_debug("%s: congestion avoidance: transport:%p, "
461			 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
462			 "flight_size:%d, pba:%d\n", __func__,
463			 transport, bytes_acked, cwnd, ssthresh,
464			 flight_size, pba);
465	}
466
467	transport->cwnd = cwnd;
468	transport->partial_bytes_acked = pba;
469}
470
471/* This routine is used to lower the transport's cwnd when congestion is
472 * detected.
473 */
474void sctp_transport_lower_cwnd(struct sctp_transport *transport,
475			       sctp_lower_cwnd_t reason)
476{
477	struct sctp_association *asoc = transport->asoc;
478
479	switch (reason) {
480	case SCTP_LOWER_CWND_T3_RTX:
481		/* RFC 2960 Section 7.2.3, sctpimpguide
482		 * When the T3-rtx timer expires on an address, SCTP should
483		 * perform slow start by:
484		 *      ssthresh = max(cwnd/2, 4*MTU)
485		 *      cwnd = 1*MTU
486		 *      partial_bytes_acked = 0
487		 */
488		transport->ssthresh = max(transport->cwnd/2,
489					  4*asoc->pathmtu);
490		transport->cwnd = asoc->pathmtu;
491
492		/* T3-rtx also clears fast recovery */
493		asoc->fast_recovery = 0;
494		break;
495
496	case SCTP_LOWER_CWND_FAST_RTX:
497		/* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
498		 * destination address(es) to which the missing DATA chunks
499		 * were last sent, according to the formula described in
500		 * Section 7.2.3.
501		 *
502		 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
503		 * losses from SACK (see Section 7.2.4), An endpoint
504		 * should do the following:
505		 *      ssthresh = max(cwnd/2, 4*MTU)
506		 *      cwnd = ssthresh
507		 *      partial_bytes_acked = 0
508		 */
509		if (asoc->fast_recovery)
510			return;
511
512		/* Mark Fast recovery */
513		asoc->fast_recovery = 1;
514		asoc->fast_recovery_exit = asoc->next_tsn - 1;
515
516		transport->ssthresh = max(transport->cwnd/2,
517					  4*asoc->pathmtu);
518		transport->cwnd = transport->ssthresh;
519		break;
520
521	case SCTP_LOWER_CWND_ECNE:
522		/* RFC 2481 Section 6.1.2.
523		 * If the sender receives an ECN-Echo ACK packet
524		 * then the sender knows that congestion was encountered in the
525		 * network on the path from the sender to the receiver. The
526		 * indication of congestion should be treated just as a
527		 * congestion loss in non-ECN Capable TCP. That is, the TCP
528		 * source halves the congestion window "cwnd" and reduces the
529		 * slow start threshold "ssthresh".
530		 * A critical condition is that TCP does not react to
531		 * congestion indications more than once every window of
532		 * data (or more loosely more than once every round-trip time).
533		 */
534		if (time_after(jiffies, transport->last_time_ecne_reduced +
535					transport->rtt)) {
536			transport->ssthresh = max(transport->cwnd/2,
537						  4*asoc->pathmtu);
538			transport->cwnd = transport->ssthresh;
539			transport->last_time_ecne_reduced = jiffies;
540		}
541		break;
542
543	case SCTP_LOWER_CWND_INACTIVE:
544		/* RFC 2960 Section 7.2.1, sctpimpguide
545		 * When the endpoint does not transmit data on a given
546		 * transport address, the cwnd of the transport address
547		 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
548		 * NOTE: Although the draft recommends that this check needs
549		 * to be done every RTO interval, we do it every hearbeat
550		 * interval.
551		 */
552		transport->cwnd = max(transport->cwnd/2,
553					 4*asoc->pathmtu);
554		break;
555	}
556
557	transport->partial_bytes_acked = 0;
558
559	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
560		 __func__, transport, reason, transport->cwnd,
561		 transport->ssthresh);
562}
563
564/* Apply Max.Burst limit to the congestion window:
565 * sctpimpguide-05 2.14.2
566 * D) When the time comes for the sender to
567 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
568 * first be applied to limit how many new DATA chunks may be sent.
569 * The limit is applied by adjusting cwnd as follows:
570 * 	if ((flightsize+ Max.Burst * MTU) < cwnd)
571 * 		cwnd = flightsize + Max.Burst * MTU
572 */
573
574void sctp_transport_burst_limited(struct sctp_transport *t)
575{
576	struct sctp_association *asoc = t->asoc;
577	u32 old_cwnd = t->cwnd;
578	u32 max_burst_bytes;
579
580	if (t->burst_limited || asoc->max_burst == 0)
581		return;
582
583	max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
584	if (max_burst_bytes < old_cwnd) {
585		t->cwnd = max_burst_bytes;
586		t->burst_limited = old_cwnd;
587	}
588}
589
590/* Restore the old cwnd congestion window, after the burst had it's
591 * desired effect.
592 */
593void sctp_transport_burst_reset(struct sctp_transport *t)
594{
595	if (t->burst_limited) {
596		t->cwnd = t->burst_limited;
597		t->burst_limited = 0;
598	}
599}
600
601/* What is the next timeout value for this transport? */
602unsigned long sctp_transport_timeout(struct sctp_transport *trans)
603{
604	/* RTO + timer slack +/- 50% of RTO */
605	unsigned long timeout = trans->rto >> 1;
606
607	if (trans->state != SCTP_UNCONFIRMED &&
608	    trans->state != SCTP_PF)
609		timeout += trans->hbinterval;
610
611	return timeout;
612}
613
614/* Reset transport variables to their initial values */
615void sctp_transport_reset(struct sctp_transport *t)
616{
617	struct sctp_association *asoc = t->asoc;
618
619	/* RFC 2960 (bis), Section 5.2.4
620	 * All the congestion control parameters (e.g., cwnd, ssthresh)
621	 * related to this peer MUST be reset to their initial values
622	 * (see Section 6.2.1)
623	 */
624	t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
625	t->burst_limited = 0;
626	t->ssthresh = asoc->peer.i.a_rwnd;
627	t->rto = asoc->rto_initial;
628	sctp_max_rto(asoc, t);
629	t->rtt = 0;
630	t->srtt = 0;
631	t->rttvar = 0;
632
633	/* Reset these additional varibles so that we have a clean
634	 * slate.
635	 */
636	t->partial_bytes_acked = 0;
637	t->flight_size = 0;
638	t->error_count = 0;
639	t->rto_pending = 0;
640	t->hb_sent = 0;
641
642	/* Initialize the state information for SFR-CACC */
643	t->cacc.changeover_active = 0;
644	t->cacc.cycling_changeover = 0;
645	t->cacc.next_tsn_at_change = 0;
646	t->cacc.cacc_saw_newack = 0;
647}
648
649/* Schedule retransmission on the given transport */
650void sctp_transport_immediate_rtx(struct sctp_transport *t)
651{
652	/* Stop pending T3_rtx_timer */
653	if (del_timer(&t->T3_rtx_timer))
 
654		sctp_transport_put(t);
655
656	sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
657	if (!timer_pending(&t->T3_rtx_timer)) {
658		if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
659			sctp_transport_hold(t);
660	}
 
661}