Loading...
1/* SCTP kernel implementation
2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2001-2003 International Business Machines Corp.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP tranport representing
11 * a remote transport address. For local transport addresses, we just use
12 * union sctp_addr.
13 *
14 * This SCTP implementation is free software;
15 * you can redistribute it and/or modify it under the terms of
16 * the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This SCTP implementation is distributed in the hope that it
21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 * ************************
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 * See the GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with GNU CC; see the file COPYING. If not, write to
28 * the Free Software Foundation, 59 Temple Place - Suite 330,
29 * Boston, MA 02111-1307, USA.
30 *
31 * Please send any bug reports or fixes you make to the
32 * email address(es):
33 * lksctp developers <lksctp-developers@lists.sourceforge.net>
34 *
35 * Or submit a bug report through the following website:
36 * http://www.sf.net/projects/lksctp
37 *
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Karl Knutson <karl@athena.chicago.il.us>
41 * Jon Grimm <jgrimm@us.ibm.com>
42 * Xingang Guo <xingang.guo@intel.com>
43 * Hui Huang <hui.huang@nokia.com>
44 * Sridhar Samudrala <sri@us.ibm.com>
45 * Ardelle Fan <ardelle.fan@intel.com>
46 *
47 * Any bugs reported given to us we will try to fix... any fixes shared will
48 * be incorporated into the next SCTP release.
49 */
50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
53#include <linux/slab.h>
54#include <linux/types.h>
55#include <linux/random.h>
56#include <net/sctp/sctp.h>
57#include <net/sctp/sm.h>
58
59/* 1st Level Abstractions. */
60
61/* Initialize a new transport from provided memory. */
62static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
63 const union sctp_addr *addr,
64 gfp_t gfp)
65{
66 /* Copy in the address. */
67 peer->ipaddr = *addr;
68 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
69 memset(&peer->saddr, 0, sizeof(union sctp_addr));
70
71 peer->sack_generation = 0;
72
73 /* From 6.3.1 RTO Calculation:
74 *
75 * C1) Until an RTT measurement has been made for a packet sent to the
76 * given destination transport address, set RTO to the protocol
77 * parameter 'RTO.Initial'.
78 */
79 peer->rto = msecs_to_jiffies(sctp_rto_initial);
80
81 peer->last_time_heard = jiffies;
82 peer->last_time_ecne_reduced = jiffies;
83
84 peer->param_flags = SPP_HB_DISABLE |
85 SPP_PMTUD_ENABLE |
86 SPP_SACKDELAY_ENABLE;
87
88 /* Initialize the default path max_retrans. */
89 peer->pathmaxrxt = sctp_max_retrans_path;
90
91 INIT_LIST_HEAD(&peer->transmitted);
92 INIT_LIST_HEAD(&peer->send_ready);
93 INIT_LIST_HEAD(&peer->transports);
94
95 setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
96 (unsigned long)peer);
97 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
98 (unsigned long)peer);
99 setup_timer(&peer->proto_unreach_timer,
100 sctp_generate_proto_unreach_event, (unsigned long)peer);
101
102 /* Initialize the 64-bit random nonce sent with heartbeat. */
103 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
104
105 atomic_set(&peer->refcnt, 1);
106
107 return peer;
108}
109
110/* Allocate and initialize a new transport. */
111struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
112 gfp_t gfp)
113{
114 struct sctp_transport *transport;
115
116 transport = t_new(struct sctp_transport, gfp);
117 if (!transport)
118 goto fail;
119
120 if (!sctp_transport_init(transport, addr, gfp))
121 goto fail_init;
122
123 transport->malloced = 1;
124 SCTP_DBG_OBJCNT_INC(transport);
125
126 return transport;
127
128fail_init:
129 kfree(transport);
130
131fail:
132 return NULL;
133}
134
135/* This transport is no longer needed. Free up if possible, or
136 * delay until it last reference count.
137 */
138void sctp_transport_free(struct sctp_transport *transport)
139{
140 transport->dead = 1;
141
142 /* Try to delete the heartbeat timer. */
143 if (del_timer(&transport->hb_timer))
144 sctp_transport_put(transport);
145
146 /* Delete the T3_rtx timer if it's active.
147 * There is no point in not doing this now and letting
148 * structure hang around in memory since we know
149 * the tranport is going away.
150 */
151 if (timer_pending(&transport->T3_rtx_timer) &&
152 del_timer(&transport->T3_rtx_timer))
153 sctp_transport_put(transport);
154
155 /* Delete the ICMP proto unreachable timer if it's active. */
156 if (timer_pending(&transport->proto_unreach_timer) &&
157 del_timer(&transport->proto_unreach_timer))
158 sctp_association_put(transport->asoc);
159
160 sctp_transport_put(transport);
161}
162
163/* Destroy the transport data structure.
164 * Assumes there are no more users of this structure.
165 */
166static void sctp_transport_destroy(struct sctp_transport *transport)
167{
168 SCTP_ASSERT(transport->dead, "Transport is not dead", return);
169
170 if (transport->asoc)
171 sctp_association_put(transport->asoc);
172
173 sctp_packet_free(&transport->packet);
174
175 dst_release(transport->dst);
176 kfree(transport);
177 SCTP_DBG_OBJCNT_DEC(transport);
178}
179
180/* Start T3_rtx timer if it is not already running and update the heartbeat
181 * timer. This routine is called every time a DATA chunk is sent.
182 */
183void sctp_transport_reset_timers(struct sctp_transport *transport)
184{
185 /* RFC 2960 6.3.2 Retransmission Timer Rules
186 *
187 * R1) Every time a DATA chunk is sent to any address(including a
188 * retransmission), if the T3-rtx timer of that address is not running
189 * start it running so that it will expire after the RTO of that
190 * address.
191 */
192
193 if (!timer_pending(&transport->T3_rtx_timer))
194 if (!mod_timer(&transport->T3_rtx_timer,
195 jiffies + transport->rto))
196 sctp_transport_hold(transport);
197
198 /* When a data chunk is sent, reset the heartbeat interval. */
199 if (!mod_timer(&transport->hb_timer,
200 sctp_transport_timeout(transport)))
201 sctp_transport_hold(transport);
202}
203
204/* This transport has been assigned to an association.
205 * Initialize fields from the association or from the sock itself.
206 * Register the reference count in the association.
207 */
208void sctp_transport_set_owner(struct sctp_transport *transport,
209 struct sctp_association *asoc)
210{
211 transport->asoc = asoc;
212 sctp_association_hold(asoc);
213}
214
215/* Initialize the pmtu of a transport. */
216void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
217{
218 /* If we don't have a fresh route, look one up */
219 if (!transport->dst || transport->dst->obsolete > 1) {
220 dst_release(transport->dst);
221 transport->af_specific->get_dst(transport, &transport->saddr,
222 &transport->fl, sk);
223 }
224
225 if (transport->dst) {
226 transport->pathmtu = dst_mtu(transport->dst);
227 } else
228 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
229}
230
231void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
232{
233 struct dst_entry *dst;
234
235 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
236 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
237 __func__, pmtu,
238 SCTP_DEFAULT_MINSEGMENT);
239 /* Use default minimum segment size and disable
240 * pmtu discovery on this transport.
241 */
242 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
243 } else {
244 t->pathmtu = pmtu;
245 }
246
247 dst = sctp_transport_dst_check(t);
248 if (dst)
249 dst->ops->update_pmtu(dst, pmtu);
250}
251
252/* Caches the dst entry and source address for a transport's destination
253 * address.
254 */
255void sctp_transport_route(struct sctp_transport *transport,
256 union sctp_addr *saddr, struct sctp_sock *opt)
257{
258 struct sctp_association *asoc = transport->asoc;
259 struct sctp_af *af = transport->af_specific;
260
261 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
262
263 if (saddr)
264 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
265 else
266 af->get_saddr(opt, transport, &transport->fl);
267
268 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
269 return;
270 }
271 if (transport->dst) {
272 transport->pathmtu = dst_mtu(transport->dst);
273
274 /* Initialize sk->sk_rcv_saddr, if the transport is the
275 * association's active path for getsockname().
276 */
277 if (asoc && (!asoc->peer.primary_path ||
278 (transport == asoc->peer.active_path)))
279 opt->pf->af->to_sk_saddr(&transport->saddr,
280 asoc->base.sk);
281 } else
282 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
283}
284
285/* Hold a reference to a transport. */
286void sctp_transport_hold(struct sctp_transport *transport)
287{
288 atomic_inc(&transport->refcnt);
289}
290
291/* Release a reference to a transport and clean up
292 * if there are no more references.
293 */
294void sctp_transport_put(struct sctp_transport *transport)
295{
296 if (atomic_dec_and_test(&transport->refcnt))
297 sctp_transport_destroy(transport);
298}
299
300/* Update transport's RTO based on the newly calculated RTT. */
301void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
302{
303 /* Check for valid transport. */
304 SCTP_ASSERT(tp, "NULL transport", return);
305
306 /* We should not be doing any RTO updates unless rto_pending is set. */
307 SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
308
309 if (tp->rttvar || tp->srtt) {
310 /* 6.3.1 C3) When a new RTT measurement R' is made, set
311 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
312 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
313 */
314
315 /* Note: The above algorithm has been rewritten to
316 * express rto_beta and rto_alpha as inverse powers
317 * of two.
318 * For example, assuming the default value of RTO.Alpha of
319 * 1/8, rto_alpha would be expressed as 3.
320 */
321 tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
322 + ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
323 tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
324 + (rtt >> sctp_rto_alpha);
325 } else {
326 /* 6.3.1 C2) When the first RTT measurement R is made, set
327 * SRTT <- R, RTTVAR <- R/2.
328 */
329 tp->srtt = rtt;
330 tp->rttvar = rtt >> 1;
331 }
332
333 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
334 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
335 */
336 if (tp->rttvar == 0)
337 tp->rttvar = SCTP_CLOCK_GRANULARITY;
338
339 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
340 tp->rto = tp->srtt + (tp->rttvar << 2);
341
342 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
343 * seconds then it is rounded up to RTO.Min seconds.
344 */
345 if (tp->rto < tp->asoc->rto_min)
346 tp->rto = tp->asoc->rto_min;
347
348 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
349 * at least RTO.max seconds.
350 */
351 if (tp->rto > tp->asoc->rto_max)
352 tp->rto = tp->asoc->rto_max;
353
354 tp->rtt = rtt;
355
356 /* Reset rto_pending so that a new RTT measurement is started when a
357 * new data chunk is sent.
358 */
359 tp->rto_pending = 0;
360
361 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
362 "rttvar: %d, rto: %ld\n", __func__,
363 tp, rtt, tp->srtt, tp->rttvar, tp->rto);
364}
365
366/* This routine updates the transport's cwnd and partial_bytes_acked
367 * parameters based on the bytes acked in the received SACK.
368 */
369void sctp_transport_raise_cwnd(struct sctp_transport *transport,
370 __u32 sack_ctsn, __u32 bytes_acked)
371{
372 struct sctp_association *asoc = transport->asoc;
373 __u32 cwnd, ssthresh, flight_size, pba, pmtu;
374
375 cwnd = transport->cwnd;
376 flight_size = transport->flight_size;
377
378 /* See if we need to exit Fast Recovery first */
379 if (asoc->fast_recovery &&
380 TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
381 asoc->fast_recovery = 0;
382
383 /* The appropriate cwnd increase algorithm is performed if, and only
384 * if the cumulative TSN whould advanced and the congestion window is
385 * being fully utilized.
386 */
387 if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
388 (flight_size < cwnd))
389 return;
390
391 ssthresh = transport->ssthresh;
392 pba = transport->partial_bytes_acked;
393 pmtu = transport->asoc->pathmtu;
394
395 if (cwnd <= ssthresh) {
396 /* RFC 4960 7.2.1
397 * o When cwnd is less than or equal to ssthresh, an SCTP
398 * endpoint MUST use the slow-start algorithm to increase
399 * cwnd only if the current congestion window is being fully
400 * utilized, an incoming SACK advances the Cumulative TSN
401 * Ack Point, and the data sender is not in Fast Recovery.
402 * Only when these three conditions are met can the cwnd be
403 * increased; otherwise, the cwnd MUST not be increased.
404 * If these conditions are met, then cwnd MUST be increased
405 * by, at most, the lesser of 1) the total size of the
406 * previously outstanding DATA chunk(s) acknowledged, and
407 * 2) the destination's path MTU. This upper bound protects
408 * against the ACK-Splitting attack outlined in [SAVAGE99].
409 */
410 if (asoc->fast_recovery)
411 return;
412
413 if (bytes_acked > pmtu)
414 cwnd += pmtu;
415 else
416 cwnd += bytes_acked;
417 SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, "
418 "bytes_acked: %d, cwnd: %d, ssthresh: %d, "
419 "flight_size: %d, pba: %d\n",
420 __func__,
421 transport, bytes_acked, cwnd,
422 ssthresh, flight_size, pba);
423 } else {
424 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
425 * upon each SACK arrival that advances the Cumulative TSN Ack
426 * Point, increase partial_bytes_acked by the total number of
427 * bytes of all new chunks acknowledged in that SACK including
428 * chunks acknowledged by the new Cumulative TSN Ack and by
429 * Gap Ack Blocks.
430 *
431 * When partial_bytes_acked is equal to or greater than cwnd
432 * and before the arrival of the SACK the sender had cwnd or
433 * more bytes of data outstanding (i.e., before arrival of the
434 * SACK, flightsize was greater than or equal to cwnd),
435 * increase cwnd by MTU, and reset partial_bytes_acked to
436 * (partial_bytes_acked - cwnd).
437 */
438 pba += bytes_acked;
439 if (pba >= cwnd) {
440 cwnd += pmtu;
441 pba = ((cwnd < pba) ? (pba - cwnd) : 0);
442 }
443 SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: "
444 "transport: %p, bytes_acked: %d, cwnd: %d, "
445 "ssthresh: %d, flight_size: %d, pba: %d\n",
446 __func__,
447 transport, bytes_acked, cwnd,
448 ssthresh, flight_size, pba);
449 }
450
451 transport->cwnd = cwnd;
452 transport->partial_bytes_acked = pba;
453}
454
455/* This routine is used to lower the transport's cwnd when congestion is
456 * detected.
457 */
458void sctp_transport_lower_cwnd(struct sctp_transport *transport,
459 sctp_lower_cwnd_t reason)
460{
461 struct sctp_association *asoc = transport->asoc;
462
463 switch (reason) {
464 case SCTP_LOWER_CWND_T3_RTX:
465 /* RFC 2960 Section 7.2.3, sctpimpguide
466 * When the T3-rtx timer expires on an address, SCTP should
467 * perform slow start by:
468 * ssthresh = max(cwnd/2, 4*MTU)
469 * cwnd = 1*MTU
470 * partial_bytes_acked = 0
471 */
472 transport->ssthresh = max(transport->cwnd/2,
473 4*asoc->pathmtu);
474 transport->cwnd = asoc->pathmtu;
475
476 /* T3-rtx also clears fast recovery */
477 asoc->fast_recovery = 0;
478 break;
479
480 case SCTP_LOWER_CWND_FAST_RTX:
481 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
482 * destination address(es) to which the missing DATA chunks
483 * were last sent, according to the formula described in
484 * Section 7.2.3.
485 *
486 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
487 * losses from SACK (see Section 7.2.4), An endpoint
488 * should do the following:
489 * ssthresh = max(cwnd/2, 4*MTU)
490 * cwnd = ssthresh
491 * partial_bytes_acked = 0
492 */
493 if (asoc->fast_recovery)
494 return;
495
496 /* Mark Fast recovery */
497 asoc->fast_recovery = 1;
498 asoc->fast_recovery_exit = asoc->next_tsn - 1;
499
500 transport->ssthresh = max(transport->cwnd/2,
501 4*asoc->pathmtu);
502 transport->cwnd = transport->ssthresh;
503 break;
504
505 case SCTP_LOWER_CWND_ECNE:
506 /* RFC 2481 Section 6.1.2.
507 * If the sender receives an ECN-Echo ACK packet
508 * then the sender knows that congestion was encountered in the
509 * network on the path from the sender to the receiver. The
510 * indication of congestion should be treated just as a
511 * congestion loss in non-ECN Capable TCP. That is, the TCP
512 * source halves the congestion window "cwnd" and reduces the
513 * slow start threshold "ssthresh".
514 * A critical condition is that TCP does not react to
515 * congestion indications more than once every window of
516 * data (or more loosely more than once every round-trip time).
517 */
518 if (time_after(jiffies, transport->last_time_ecne_reduced +
519 transport->rtt)) {
520 transport->ssthresh = max(transport->cwnd/2,
521 4*asoc->pathmtu);
522 transport->cwnd = transport->ssthresh;
523 transport->last_time_ecne_reduced = jiffies;
524 }
525 break;
526
527 case SCTP_LOWER_CWND_INACTIVE:
528 /* RFC 2960 Section 7.2.1, sctpimpguide
529 * When the endpoint does not transmit data on a given
530 * transport address, the cwnd of the transport address
531 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
532 * NOTE: Although the draft recommends that this check needs
533 * to be done every RTO interval, we do it every hearbeat
534 * interval.
535 */
536 transport->cwnd = max(transport->cwnd/2,
537 4*asoc->pathmtu);
538 break;
539 }
540
541 transport->partial_bytes_acked = 0;
542 SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: "
543 "%d ssthresh: %d\n", __func__,
544 transport, reason,
545 transport->cwnd, transport->ssthresh);
546}
547
548/* Apply Max.Burst limit to the congestion window:
549 * sctpimpguide-05 2.14.2
550 * D) When the time comes for the sender to
551 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
552 * first be applied to limit how many new DATA chunks may be sent.
553 * The limit is applied by adjusting cwnd as follows:
554 * if ((flightsize+ Max.Burst * MTU) < cwnd)
555 * cwnd = flightsize + Max.Burst * MTU
556 */
557
558void sctp_transport_burst_limited(struct sctp_transport *t)
559{
560 struct sctp_association *asoc = t->asoc;
561 u32 old_cwnd = t->cwnd;
562 u32 max_burst_bytes;
563
564 if (t->burst_limited)
565 return;
566
567 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
568 if (max_burst_bytes < old_cwnd) {
569 t->cwnd = max_burst_bytes;
570 t->burst_limited = old_cwnd;
571 }
572}
573
574/* Restore the old cwnd congestion window, after the burst had it's
575 * desired effect.
576 */
577void sctp_transport_burst_reset(struct sctp_transport *t)
578{
579 if (t->burst_limited) {
580 t->cwnd = t->burst_limited;
581 t->burst_limited = 0;
582 }
583}
584
585/* What is the next timeout value for this transport? */
586unsigned long sctp_transport_timeout(struct sctp_transport *t)
587{
588 unsigned long timeout;
589 timeout = t->rto + sctp_jitter(t->rto);
590 if (t->state != SCTP_UNCONFIRMED)
591 timeout += t->hbinterval;
592 timeout += jiffies;
593 return timeout;
594}
595
596/* Reset transport variables to their initial values */
597void sctp_transport_reset(struct sctp_transport *t)
598{
599 struct sctp_association *asoc = t->asoc;
600
601 /* RFC 2960 (bis), Section 5.2.4
602 * All the congestion control parameters (e.g., cwnd, ssthresh)
603 * related to this peer MUST be reset to their initial values
604 * (see Section 6.2.1)
605 */
606 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
607 t->burst_limited = 0;
608 t->ssthresh = asoc->peer.i.a_rwnd;
609 t->rto = asoc->rto_initial;
610 t->rtt = 0;
611 t->srtt = 0;
612 t->rttvar = 0;
613
614 /* Reset these additional varibles so that we have a clean
615 * slate.
616 */
617 t->partial_bytes_acked = 0;
618 t->flight_size = 0;
619 t->error_count = 0;
620 t->rto_pending = 0;
621 t->hb_sent = 0;
622
623 /* Initialize the state information for SFR-CACC */
624 t->cacc.changeover_active = 0;
625 t->cacc.cycling_changeover = 0;
626 t->cacc.next_tsn_at_change = 0;
627 t->cacc.cacc_saw_newack = 0;
628}
629
630/* Schedule retransmission on the given transport */
631void sctp_transport_immediate_rtx(struct sctp_transport *t)
632{
633 /* Stop pending T3_rtx_timer */
634 if (timer_pending(&t->T3_rtx_timer)) {
635 (void)del_timer(&t->T3_rtx_timer);
636 sctp_transport_put(t);
637 }
638 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
639 if (!timer_pending(&t->T3_rtx_timer)) {
640 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
641 sctp_transport_hold(t);
642 }
643 return;
644}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 International Business Machines Corp.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * This module provides the abstraction for an SCTP transport representing
12 * a remote transport address. For local transport addresses, we just use
13 * union sctp_addr.
14 *
15 * Please send any bug reports or fixes you make to the
16 * email address(es):
17 * lksctp developers <linux-sctp@vger.kernel.org>
18 *
19 * Written or modified by:
20 * La Monte H.P. Yarroll <piggy@acm.org>
21 * Karl Knutson <karl@athena.chicago.il.us>
22 * Jon Grimm <jgrimm@us.ibm.com>
23 * Xingang Guo <xingang.guo@intel.com>
24 * Hui Huang <hui.huang@nokia.com>
25 * Sridhar Samudrala <sri@us.ibm.com>
26 * Ardelle Fan <ardelle.fan@intel.com>
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/random.h>
34#include <net/sctp/sctp.h>
35#include <net/sctp/sm.h>
36
37/* 1st Level Abstractions. */
38
39/* Initialize a new transport from provided memory. */
40static struct sctp_transport *sctp_transport_init(struct net *net,
41 struct sctp_transport *peer,
42 const union sctp_addr *addr,
43 gfp_t gfp)
44{
45 /* Copy in the address. */
46 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
47 memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len);
48 memset(&peer->saddr, 0, sizeof(union sctp_addr));
49
50 peer->sack_generation = 0;
51
52 /* From 6.3.1 RTO Calculation:
53 *
54 * C1) Until an RTT measurement has been made for a packet sent to the
55 * given destination transport address, set RTO to the protocol
56 * parameter 'RTO.Initial'.
57 */
58 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
59
60 peer->last_time_heard = 0;
61 peer->last_time_ecne_reduced = jiffies;
62
63 peer->param_flags = SPP_HB_DISABLE |
64 SPP_PMTUD_ENABLE |
65 SPP_SACKDELAY_ENABLE;
66
67 /* Initialize the default path max_retrans. */
68 peer->pathmaxrxt = net->sctp.max_retrans_path;
69 peer->pf_retrans = net->sctp.pf_retrans;
70
71 INIT_LIST_HEAD(&peer->transmitted);
72 INIT_LIST_HEAD(&peer->send_ready);
73 INIT_LIST_HEAD(&peer->transports);
74
75 timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
76 timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
77 timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
78 timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0);
79 timer_setup(&peer->proto_unreach_timer,
80 sctp_generate_proto_unreach_event, 0);
81
82 /* Initialize the 64-bit random nonce sent with heartbeat. */
83 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
84
85 refcount_set(&peer->refcnt, 1);
86
87 return peer;
88}
89
90/* Allocate and initialize a new transport. */
91struct sctp_transport *sctp_transport_new(struct net *net,
92 const union sctp_addr *addr,
93 gfp_t gfp)
94{
95 struct sctp_transport *transport;
96
97 transport = kzalloc(sizeof(*transport), gfp);
98 if (!transport)
99 goto fail;
100
101 if (!sctp_transport_init(net, transport, addr, gfp))
102 goto fail_init;
103
104 SCTP_DBG_OBJCNT_INC(transport);
105
106 return transport;
107
108fail_init:
109 kfree(transport);
110
111fail:
112 return NULL;
113}
114
115/* This transport is no longer needed. Free up if possible, or
116 * delay until it last reference count.
117 */
118void sctp_transport_free(struct sctp_transport *transport)
119{
120 /* Try to delete the heartbeat timer. */
121 if (del_timer(&transport->hb_timer))
122 sctp_transport_put(transport);
123
124 /* Delete the T3_rtx timer if it's active.
125 * There is no point in not doing this now and letting
126 * structure hang around in memory since we know
127 * the transport is going away.
128 */
129 if (del_timer(&transport->T3_rtx_timer))
130 sctp_transport_put(transport);
131
132 if (del_timer(&transport->reconf_timer))
133 sctp_transport_put(transport);
134
135 if (del_timer(&transport->probe_timer))
136 sctp_transport_put(transport);
137
138 /* Delete the ICMP proto unreachable timer if it's active. */
139 if (del_timer(&transport->proto_unreach_timer))
140 sctp_transport_put(transport);
141
142 sctp_transport_put(transport);
143}
144
145static void sctp_transport_destroy_rcu(struct rcu_head *head)
146{
147 struct sctp_transport *transport;
148
149 transport = container_of(head, struct sctp_transport, rcu);
150
151 dst_release(transport->dst);
152 kfree(transport);
153 SCTP_DBG_OBJCNT_DEC(transport);
154}
155
156/* Destroy the transport data structure.
157 * Assumes there are no more users of this structure.
158 */
159static void sctp_transport_destroy(struct sctp_transport *transport)
160{
161 if (unlikely(refcount_read(&transport->refcnt))) {
162 WARN(1, "Attempt to destroy undead transport %p!\n", transport);
163 return;
164 }
165
166 sctp_packet_free(&transport->packet);
167
168 if (transport->asoc)
169 sctp_association_put(transport->asoc);
170
171 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
172}
173
174/* Start T3_rtx timer if it is not already running and update the heartbeat
175 * timer. This routine is called every time a DATA chunk is sent.
176 */
177void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
178{
179 /* RFC 2960 6.3.2 Retransmission Timer Rules
180 *
181 * R1) Every time a DATA chunk is sent to any address(including a
182 * retransmission), if the T3-rtx timer of that address is not running
183 * start it running so that it will expire after the RTO of that
184 * address.
185 */
186
187 if (!timer_pending(&transport->T3_rtx_timer))
188 if (!mod_timer(&transport->T3_rtx_timer,
189 jiffies + transport->rto))
190 sctp_transport_hold(transport);
191}
192
193void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
194{
195 unsigned long expires;
196
197 /* When a data chunk is sent, reset the heartbeat interval. */
198 expires = jiffies + sctp_transport_timeout(transport);
199 if (!mod_timer(&transport->hb_timer,
200 expires + get_random_u32_below(transport->rto)))
201 sctp_transport_hold(transport);
202}
203
204void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
205{
206 if (!timer_pending(&transport->reconf_timer))
207 if (!mod_timer(&transport->reconf_timer,
208 jiffies + transport->rto))
209 sctp_transport_hold(transport);
210}
211
212void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
213{
214 if (!mod_timer(&transport->probe_timer,
215 jiffies + transport->probe_interval))
216 sctp_transport_hold(transport);
217}
218
219void sctp_transport_reset_raise_timer(struct sctp_transport *transport)
220{
221 if (!mod_timer(&transport->probe_timer,
222 jiffies + transport->probe_interval * 30))
223 sctp_transport_hold(transport);
224}
225
226/* This transport has been assigned to an association.
227 * Initialize fields from the association or from the sock itself.
228 * Register the reference count in the association.
229 */
230void sctp_transport_set_owner(struct sctp_transport *transport,
231 struct sctp_association *asoc)
232{
233 transport->asoc = asoc;
234 sctp_association_hold(asoc);
235}
236
237/* Initialize the pmtu of a transport. */
238void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
239{
240 /* If we don't have a fresh route, look one up */
241 if (!transport->dst || transport->dst->obsolete) {
242 sctp_transport_dst_release(transport);
243 transport->af_specific->get_dst(transport, &transport->saddr,
244 &transport->fl, sk);
245 }
246
247 if (transport->param_flags & SPP_PMTUD_DISABLE) {
248 struct sctp_association *asoc = transport->asoc;
249
250 if (!transport->pathmtu && asoc && asoc->pathmtu)
251 transport->pathmtu = asoc->pathmtu;
252 if (transport->pathmtu)
253 return;
254 }
255
256 if (transport->dst)
257 transport->pathmtu = sctp_dst_mtu(transport->dst);
258 else
259 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
260
261 sctp_transport_pl_update(transport);
262}
263
264void sctp_transport_pl_send(struct sctp_transport *t)
265{
266 if (t->pl.probe_count < SCTP_MAX_PROBES)
267 goto out;
268
269 t->pl.probe_count = 0;
270 if (t->pl.state == SCTP_PL_BASE) {
271 if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
272 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
273
274 t->pl.pmtu = SCTP_BASE_PLPMTU;
275 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
276 sctp_assoc_sync_pmtu(t->asoc);
277 }
278 } else if (t->pl.state == SCTP_PL_SEARCH) {
279 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
280 t->pl.state = SCTP_PL_BASE; /* Search -> Base */
281 t->pl.probe_size = SCTP_BASE_PLPMTU;
282 t->pl.probe_high = 0;
283
284 t->pl.pmtu = SCTP_BASE_PLPMTU;
285 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
286 sctp_assoc_sync_pmtu(t->asoc);
287 } else { /* Normal probe failure. */
288 t->pl.probe_high = t->pl.probe_size;
289 t->pl.probe_size = t->pl.pmtu;
290 }
291 } else if (t->pl.state == SCTP_PL_COMPLETE) {
292 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
293 t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */
294 t->pl.probe_size = SCTP_BASE_PLPMTU;
295
296 t->pl.pmtu = SCTP_BASE_PLPMTU;
297 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
298 sctp_assoc_sync_pmtu(t->asoc);
299 }
300 }
301
302out:
303 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
304 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
305 t->pl.probe_count++;
306}
307
308bool sctp_transport_pl_recv(struct sctp_transport *t)
309{
310 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
311 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
312
313 t->pl.pmtu = t->pl.probe_size;
314 t->pl.probe_count = 0;
315 if (t->pl.state == SCTP_PL_BASE) {
316 t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */
317 t->pl.probe_size += SCTP_PL_BIG_STEP;
318 } else if (t->pl.state == SCTP_PL_ERROR) {
319 t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */
320
321 t->pl.pmtu = t->pl.probe_size;
322 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
323 sctp_assoc_sync_pmtu(t->asoc);
324 t->pl.probe_size += SCTP_PL_BIG_STEP;
325 } else if (t->pl.state == SCTP_PL_SEARCH) {
326 if (!t->pl.probe_high) {
327 if (t->pl.probe_size < SCTP_MAX_PLPMTU) {
328 t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
329 SCTP_MAX_PLPMTU);
330 return false;
331 }
332 t->pl.probe_high = SCTP_MAX_PLPMTU;
333 }
334 t->pl.probe_size += SCTP_PL_MIN_STEP;
335 if (t->pl.probe_size >= t->pl.probe_high) {
336 t->pl.probe_high = 0;
337 t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
338
339 t->pl.probe_size = t->pl.pmtu;
340 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
341 sctp_assoc_sync_pmtu(t->asoc);
342 sctp_transport_reset_raise_timer(t);
343 }
344 } else if (t->pl.state == SCTP_PL_COMPLETE) {
345 /* Raise probe_size again after 30 * interval in Search Complete */
346 t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
347 t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU);
348 }
349
350 return t->pl.state == SCTP_PL_COMPLETE;
351}
352
353static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
354{
355 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n",
356 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu);
357
358 if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size)
359 return false;
360
361 if (t->pl.state == SCTP_PL_BASE) {
362 if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) {
363 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
364
365 t->pl.pmtu = SCTP_BASE_PLPMTU;
366 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
367 return true;
368 }
369 } else if (t->pl.state == SCTP_PL_SEARCH) {
370 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
371 t->pl.state = SCTP_PL_BASE; /* Search -> Base */
372 t->pl.probe_size = SCTP_BASE_PLPMTU;
373 t->pl.probe_count = 0;
374
375 t->pl.probe_high = 0;
376 t->pl.pmtu = SCTP_BASE_PLPMTU;
377 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
378 return true;
379 } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) {
380 t->pl.probe_size = pmtu;
381 t->pl.probe_count = 0;
382 }
383 } else if (t->pl.state == SCTP_PL_COMPLETE) {
384 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
385 t->pl.state = SCTP_PL_BASE; /* Complete -> Base */
386 t->pl.probe_size = SCTP_BASE_PLPMTU;
387 t->pl.probe_count = 0;
388
389 t->pl.probe_high = 0;
390 t->pl.pmtu = SCTP_BASE_PLPMTU;
391 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
392 sctp_transport_reset_probe_timer(t);
393 return true;
394 }
395 }
396
397 return false;
398}
399
400bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
401{
402 struct sock *sk = t->asoc->base.sk;
403 struct dst_entry *dst;
404 bool change = true;
405
406 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
407 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
408 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
409 /* Use default minimum segment instead */
410 pmtu = SCTP_DEFAULT_MINSEGMENT;
411 }
412 pmtu = SCTP_TRUNC4(pmtu);
413
414 if (sctp_transport_pl_enabled(t))
415 return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t));
416
417 dst = sctp_transport_dst_check(t);
418 if (dst) {
419 struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
420 union sctp_addr addr;
421
422 pf->af->from_sk(&addr, sk);
423 pf->to_sk_daddr(&t->ipaddr, sk);
424 dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
425 pf->to_sk_daddr(&addr, sk);
426
427 dst = sctp_transport_dst_check(t);
428 }
429
430 if (!dst) {
431 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
432 dst = t->dst;
433 }
434
435 if (dst) {
436 /* Re-fetch, as under layers may have a higher minimum size */
437 pmtu = sctp_dst_mtu(dst);
438 change = t->pathmtu != pmtu;
439 }
440 t->pathmtu = pmtu;
441
442 return change;
443}
444
445/* Caches the dst entry and source address for a transport's destination
446 * address.
447 */
448void sctp_transport_route(struct sctp_transport *transport,
449 union sctp_addr *saddr, struct sctp_sock *opt)
450{
451 struct sctp_association *asoc = transport->asoc;
452 struct sctp_af *af = transport->af_specific;
453
454 sctp_transport_dst_release(transport);
455 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
456
457 if (saddr)
458 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
459 else
460 af->get_saddr(opt, transport, &transport->fl);
461
462 sctp_transport_pmtu(transport, sctp_opt2sk(opt));
463
464 /* Initialize sk->sk_rcv_saddr, if the transport is the
465 * association's active path for getsockname().
466 */
467 if (transport->dst && asoc &&
468 (!asoc->peer.primary_path || transport == asoc->peer.active_path))
469 opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk);
470}
471
472/* Hold a reference to a transport. */
473int sctp_transport_hold(struct sctp_transport *transport)
474{
475 return refcount_inc_not_zero(&transport->refcnt);
476}
477
478/* Release a reference to a transport and clean up
479 * if there are no more references.
480 */
481void sctp_transport_put(struct sctp_transport *transport)
482{
483 if (refcount_dec_and_test(&transport->refcnt))
484 sctp_transport_destroy(transport);
485}
486
487/* Update transport's RTO based on the newly calculated RTT. */
488void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
489{
490 if (unlikely(!tp->rto_pending))
491 /* We should not be doing any RTO updates unless rto_pending is set. */
492 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
493
494 if (tp->rttvar || tp->srtt) {
495 struct net *net = tp->asoc->base.net;
496 /* 6.3.1 C3) When a new RTT measurement R' is made, set
497 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
498 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
499 */
500
501 /* Note: The above algorithm has been rewritten to
502 * express rto_beta and rto_alpha as inverse powers
503 * of two.
504 * For example, assuming the default value of RTO.Alpha of
505 * 1/8, rto_alpha would be expressed as 3.
506 */
507 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
508 + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
509 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
510 + (rtt >> net->sctp.rto_alpha);
511 } else {
512 /* 6.3.1 C2) When the first RTT measurement R is made, set
513 * SRTT <- R, RTTVAR <- R/2.
514 */
515 tp->srtt = rtt;
516 tp->rttvar = rtt >> 1;
517 }
518
519 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
520 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
521 */
522 if (tp->rttvar == 0)
523 tp->rttvar = SCTP_CLOCK_GRANULARITY;
524
525 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
526 tp->rto = tp->srtt + (tp->rttvar << 2);
527
528 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
529 * seconds then it is rounded up to RTO.Min seconds.
530 */
531 if (tp->rto < tp->asoc->rto_min)
532 tp->rto = tp->asoc->rto_min;
533
534 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
535 * at least RTO.max seconds.
536 */
537 if (tp->rto > tp->asoc->rto_max)
538 tp->rto = tp->asoc->rto_max;
539
540 sctp_max_rto(tp->asoc, tp);
541 tp->rtt = rtt;
542
543 /* Reset rto_pending so that a new RTT measurement is started when a
544 * new data chunk is sent.
545 */
546 tp->rto_pending = 0;
547
548 pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
549 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
550}
551
552/* This routine updates the transport's cwnd and partial_bytes_acked
553 * parameters based on the bytes acked in the received SACK.
554 */
555void sctp_transport_raise_cwnd(struct sctp_transport *transport,
556 __u32 sack_ctsn, __u32 bytes_acked)
557{
558 struct sctp_association *asoc = transport->asoc;
559 __u32 cwnd, ssthresh, flight_size, pba, pmtu;
560
561 cwnd = transport->cwnd;
562 flight_size = transport->flight_size;
563
564 /* See if we need to exit Fast Recovery first */
565 if (asoc->fast_recovery &&
566 TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
567 asoc->fast_recovery = 0;
568
569 ssthresh = transport->ssthresh;
570 pba = transport->partial_bytes_acked;
571 pmtu = transport->asoc->pathmtu;
572
573 if (cwnd <= ssthresh) {
574 /* RFC 4960 7.2.1
575 * o When cwnd is less than or equal to ssthresh, an SCTP
576 * endpoint MUST use the slow-start algorithm to increase
577 * cwnd only if the current congestion window is being fully
578 * utilized, an incoming SACK advances the Cumulative TSN
579 * Ack Point, and the data sender is not in Fast Recovery.
580 * Only when these three conditions are met can the cwnd be
581 * increased; otherwise, the cwnd MUST not be increased.
582 * If these conditions are met, then cwnd MUST be increased
583 * by, at most, the lesser of 1) the total size of the
584 * previously outstanding DATA chunk(s) acknowledged, and
585 * 2) the destination's path MTU. This upper bound protects
586 * against the ACK-Splitting attack outlined in [SAVAGE99].
587 */
588 if (asoc->fast_recovery)
589 return;
590
591 /* The appropriate cwnd increase algorithm is performed
592 * if, and only if the congestion window is being fully
593 * utilized. Note that RFC4960 Errata 3.22 removed the
594 * other condition on ctsn moving.
595 */
596 if (flight_size < cwnd)
597 return;
598
599 if (bytes_acked > pmtu)
600 cwnd += pmtu;
601 else
602 cwnd += bytes_acked;
603
604 pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
605 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
606 __func__, transport, bytes_acked, cwnd, ssthresh,
607 flight_size, pba);
608 } else {
609 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
610 * upon each SACK arrival, increase partial_bytes_acked
611 * by the total number of bytes of all new chunks
612 * acknowledged in that SACK including chunks
613 * acknowledged by the new Cumulative TSN Ack and by Gap
614 * Ack Blocks. (updated by RFC4960 Errata 3.22)
615 *
616 * When partial_bytes_acked is greater than cwnd and
617 * before the arrival of the SACK the sender had less
618 * bytes of data outstanding than cwnd (i.e., before
619 * arrival of the SACK, flightsize was less than cwnd),
620 * reset partial_bytes_acked to cwnd. (RFC 4960 Errata
621 * 3.26)
622 *
623 * When partial_bytes_acked is equal to or greater than
624 * cwnd and before the arrival of the SACK the sender
625 * had cwnd or more bytes of data outstanding (i.e.,
626 * before arrival of the SACK, flightsize was greater
627 * than or equal to cwnd), partial_bytes_acked is reset
628 * to (partial_bytes_acked - cwnd). Next, cwnd is
629 * increased by MTU. (RFC 4960 Errata 3.12)
630 */
631 pba += bytes_acked;
632 if (pba > cwnd && flight_size < cwnd)
633 pba = cwnd;
634 if (pba >= cwnd && flight_size >= cwnd) {
635 pba = pba - cwnd;
636 cwnd += pmtu;
637 }
638
639 pr_debug("%s: congestion avoidance: transport:%p, "
640 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
641 "flight_size:%d, pba:%d\n", __func__,
642 transport, bytes_acked, cwnd, ssthresh,
643 flight_size, pba);
644 }
645
646 transport->cwnd = cwnd;
647 transport->partial_bytes_acked = pba;
648}
649
650/* This routine is used to lower the transport's cwnd when congestion is
651 * detected.
652 */
653void sctp_transport_lower_cwnd(struct sctp_transport *transport,
654 enum sctp_lower_cwnd reason)
655{
656 struct sctp_association *asoc = transport->asoc;
657
658 switch (reason) {
659 case SCTP_LOWER_CWND_T3_RTX:
660 /* RFC 2960 Section 7.2.3, sctpimpguide
661 * When the T3-rtx timer expires on an address, SCTP should
662 * perform slow start by:
663 * ssthresh = max(cwnd/2, 4*MTU)
664 * cwnd = 1*MTU
665 * partial_bytes_acked = 0
666 */
667 transport->ssthresh = max(transport->cwnd/2,
668 4*asoc->pathmtu);
669 transport->cwnd = asoc->pathmtu;
670
671 /* T3-rtx also clears fast recovery */
672 asoc->fast_recovery = 0;
673 break;
674
675 case SCTP_LOWER_CWND_FAST_RTX:
676 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
677 * destination address(es) to which the missing DATA chunks
678 * were last sent, according to the formula described in
679 * Section 7.2.3.
680 *
681 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
682 * losses from SACK (see Section 7.2.4), An endpoint
683 * should do the following:
684 * ssthresh = max(cwnd/2, 4*MTU)
685 * cwnd = ssthresh
686 * partial_bytes_acked = 0
687 */
688 if (asoc->fast_recovery)
689 return;
690
691 /* Mark Fast recovery */
692 asoc->fast_recovery = 1;
693 asoc->fast_recovery_exit = asoc->next_tsn - 1;
694
695 transport->ssthresh = max(transport->cwnd/2,
696 4*asoc->pathmtu);
697 transport->cwnd = transport->ssthresh;
698 break;
699
700 case SCTP_LOWER_CWND_ECNE:
701 /* RFC 2481 Section 6.1.2.
702 * If the sender receives an ECN-Echo ACK packet
703 * then the sender knows that congestion was encountered in the
704 * network on the path from the sender to the receiver. The
705 * indication of congestion should be treated just as a
706 * congestion loss in non-ECN Capable TCP. That is, the TCP
707 * source halves the congestion window "cwnd" and reduces the
708 * slow start threshold "ssthresh".
709 * A critical condition is that TCP does not react to
710 * congestion indications more than once every window of
711 * data (or more loosely more than once every round-trip time).
712 */
713 if (time_after(jiffies, transport->last_time_ecne_reduced +
714 transport->rtt)) {
715 transport->ssthresh = max(transport->cwnd/2,
716 4*asoc->pathmtu);
717 transport->cwnd = transport->ssthresh;
718 transport->last_time_ecne_reduced = jiffies;
719 }
720 break;
721
722 case SCTP_LOWER_CWND_INACTIVE:
723 /* RFC 2960 Section 7.2.1, sctpimpguide
724 * When the endpoint does not transmit data on a given
725 * transport address, the cwnd of the transport address
726 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
727 * NOTE: Although the draft recommends that this check needs
728 * to be done every RTO interval, we do it every hearbeat
729 * interval.
730 */
731 transport->cwnd = max(transport->cwnd/2,
732 4*asoc->pathmtu);
733 /* RFC 4960 Errata 3.27.2: also adjust sshthresh */
734 transport->ssthresh = transport->cwnd;
735 break;
736 }
737
738 transport->partial_bytes_acked = 0;
739
740 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
741 __func__, transport, reason, transport->cwnd,
742 transport->ssthresh);
743}
744
745/* Apply Max.Burst limit to the congestion window:
746 * sctpimpguide-05 2.14.2
747 * D) When the time comes for the sender to
748 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
749 * first be applied to limit how many new DATA chunks may be sent.
750 * The limit is applied by adjusting cwnd as follows:
751 * if ((flightsize+ Max.Burst * MTU) < cwnd)
752 * cwnd = flightsize + Max.Burst * MTU
753 */
754
755void sctp_transport_burst_limited(struct sctp_transport *t)
756{
757 struct sctp_association *asoc = t->asoc;
758 u32 old_cwnd = t->cwnd;
759 u32 max_burst_bytes;
760
761 if (t->burst_limited || asoc->max_burst == 0)
762 return;
763
764 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
765 if (max_burst_bytes < old_cwnd) {
766 t->cwnd = max_burst_bytes;
767 t->burst_limited = old_cwnd;
768 }
769}
770
771/* Restore the old cwnd congestion window, after the burst had it's
772 * desired effect.
773 */
774void sctp_transport_burst_reset(struct sctp_transport *t)
775{
776 if (t->burst_limited) {
777 t->cwnd = t->burst_limited;
778 t->burst_limited = 0;
779 }
780}
781
782/* What is the next timeout value for this transport? */
783unsigned long sctp_transport_timeout(struct sctp_transport *trans)
784{
785 /* RTO + timer slack +/- 50% of RTO */
786 unsigned long timeout = trans->rto >> 1;
787
788 if (trans->state != SCTP_UNCONFIRMED &&
789 trans->state != SCTP_PF)
790 timeout += trans->hbinterval;
791
792 return max_t(unsigned long, timeout, HZ / 5);
793}
794
795/* Reset transport variables to their initial values */
796void sctp_transport_reset(struct sctp_transport *t)
797{
798 struct sctp_association *asoc = t->asoc;
799
800 /* RFC 2960 (bis), Section 5.2.4
801 * All the congestion control parameters (e.g., cwnd, ssthresh)
802 * related to this peer MUST be reset to their initial values
803 * (see Section 6.2.1)
804 */
805 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
806 t->burst_limited = 0;
807 t->ssthresh = asoc->peer.i.a_rwnd;
808 t->rto = asoc->rto_initial;
809 sctp_max_rto(asoc, t);
810 t->rtt = 0;
811 t->srtt = 0;
812 t->rttvar = 0;
813
814 /* Reset these additional variables so that we have a clean slate. */
815 t->partial_bytes_acked = 0;
816 t->flight_size = 0;
817 t->error_count = 0;
818 t->rto_pending = 0;
819 t->hb_sent = 0;
820
821 /* Initialize the state information for SFR-CACC */
822 t->cacc.changeover_active = 0;
823 t->cacc.cycling_changeover = 0;
824 t->cacc.next_tsn_at_change = 0;
825 t->cacc.cacc_saw_newack = 0;
826}
827
828/* Schedule retransmission on the given transport */
829void sctp_transport_immediate_rtx(struct sctp_transport *t)
830{
831 /* Stop pending T3_rtx_timer */
832 if (del_timer(&t->T3_rtx_timer))
833 sctp_transport_put(t);
834
835 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
836 if (!timer_pending(&t->T3_rtx_timer)) {
837 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
838 sctp_transport_hold(t);
839 }
840}
841
842/* Drop dst */
843void sctp_transport_dst_release(struct sctp_transport *t)
844{
845 dst_release(t->dst);
846 t->dst = NULL;
847 t->dst_pending_confirm = 0;
848}
849
850/* Schedule neighbour confirm */
851void sctp_transport_dst_confirm(struct sctp_transport *t)
852{
853 t->dst_pending_confirm = 1;
854}