Loading...
1/* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP association.
11 *
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
27 *
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <linux-sctp@vger.kernel.org>
31 *
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@us.ibm.com>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Hui Huang <hui.huang@nokia.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 * Daisy Chang <daisyc@us.ibm.com>
40 * Ryan Layer <rmlayer@us.ibm.com>
41 * Kevin Gao <kevin.gao@intel.com>
42 */
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/types.h>
47#include <linux/fcntl.h>
48#include <linux/poll.h>
49#include <linux/init.h>
50
51#include <linux/slab.h>
52#include <linux/in.h>
53#include <net/ipv6.h>
54#include <net/sctp/sctp.h>
55#include <net/sctp/sm.h>
56
57/* Forward declarations for internal functions. */
58static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59static void sctp_assoc_bh_rcv(struct work_struct *work);
60static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62
63/* 1st Level Abstractions. */
64
65/* Initialize a new association from provided memory. */
66static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 const struct sctp_endpoint *ep,
68 const struct sock *sk,
69 sctp_scope_t scope,
70 gfp_t gfp)
71{
72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp;
74 int i;
75 sctp_paramhdr_t *p;
76 int err;
77
78 /* Retrieve the SCTP per socket area. */
79 sp = sctp_sk((struct sock *)sk);
80
81 /* Discarding const is appropriate here. */
82 asoc->ep = (struct sctp_endpoint *)ep;
83 asoc->base.sk = (struct sock *)sk;
84
85 sctp_endpoint_hold(asoc->ep);
86 sock_hold(asoc->base.sk);
87
88 /* Initialize the common base substructure. */
89 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90
91 /* Initialize the object handling fields. */
92 atomic_set(&asoc->base.refcnt, 1);
93
94 /* Initialize the bind addr area. */
95 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
96
97 asoc->state = SCTP_STATE_CLOSED;
98 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
99 asoc->user_frag = sp->user_frag;
100
101 /* Set the association max_retrans and RTO values from the
102 * socket values.
103 */
104 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
105 asoc->pf_retrans = net->sctp.pf_retrans;
106
107 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
108 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
109 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
110
111 /* Initialize the association's heartbeat interval based on the
112 * sock configured value.
113 */
114 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
115
116 /* Initialize path max retrans value. */
117 asoc->pathmaxrxt = sp->pathmaxrxt;
118
119 /* Initialize default path MTU. */
120 asoc->pathmtu = sp->pathmtu;
121
122 /* Set association default SACK delay */
123 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
124 asoc->sackfreq = sp->sackfreq;
125
126 /* Set the association default flags controlling
127 * Heartbeat, SACK delay, and Path MTU Discovery.
128 */
129 asoc->param_flags = sp->param_flags;
130
131 /* Initialize the maximum number of new data packets that can be sent
132 * in a burst.
133 */
134 asoc->max_burst = sp->max_burst;
135
136 /* initialize association timers */
137 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
138 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
139 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
140
141 /* sctpimpguide Section 2.12.2
142 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
143 * recommended value of 5 times 'RTO.Max'.
144 */
145 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
146 = 5 * asoc->rto_max;
147
148 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
149 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
150
151 /* Initializes the timers */
152 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
153 setup_timer(&asoc->timers[i], sctp_timer_events[i],
154 (unsigned long)asoc);
155
156 /* Pull default initialization values from the sock options.
157 * Note: This assumes that the values have already been
158 * validated in the sock.
159 */
160 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
161 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
162 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
163
164 asoc->max_init_timeo =
165 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
166
167 /* Set the local window size for receive.
168 * This is also the rcvbuf space per association.
169 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
170 * 1500 bytes in one SCTP packet.
171 */
172 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
173 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
174 else
175 asoc->rwnd = sk->sk_rcvbuf/2;
176
177 asoc->a_rwnd = asoc->rwnd;
178
179 /* Use my own max window until I learn something better. */
180 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
181
182 /* Initialize the receive memory counter */
183 atomic_set(&asoc->rmem_alloc, 0);
184
185 init_waitqueue_head(&asoc->wait);
186
187 asoc->c.my_vtag = sctp_generate_tag(ep);
188 asoc->c.my_port = ep->base.bind_addr.port;
189
190 asoc->c.initial_tsn = sctp_generate_tsn(ep);
191
192 asoc->next_tsn = asoc->c.initial_tsn;
193
194 asoc->ctsn_ack_point = asoc->next_tsn - 1;
195 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
196 asoc->highest_sacked = asoc->ctsn_ack_point;
197 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
198
199 /* ADDIP Section 4.1 Asconf Chunk Procedures
200 *
201 * When an endpoint has an ASCONF signaled change to be sent to the
202 * remote endpoint it should do the following:
203 * ...
204 * A2) a serial number should be assigned to the chunk. The serial
205 * number SHOULD be a monotonically increasing number. The serial
206 * numbers SHOULD be initialized at the start of the
207 * association to the same value as the initial TSN.
208 */
209 asoc->addip_serial = asoc->c.initial_tsn;
210
211 INIT_LIST_HEAD(&asoc->addip_chunk_list);
212 INIT_LIST_HEAD(&asoc->asconf_ack_list);
213
214 /* Make an empty list of remote transport addresses. */
215 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
216
217 /* RFC 2960 5.1 Normal Establishment of an Association
218 *
219 * After the reception of the first data chunk in an
220 * association the endpoint must immediately respond with a
221 * sack to acknowledge the data chunk. Subsequent
222 * acknowledgements should be done as described in Section
223 * 6.2.
224 *
225 * [We implement this by telling a new association that it
226 * already received one packet.]
227 */
228 asoc->peer.sack_needed = 1;
229 asoc->peer.sack_generation = 1;
230
231 /* Assume that the peer will tell us if he recognizes ASCONF
232 * as part of INIT exchange.
233 * The sctp_addip_noauth option is there for backward compatibility
234 * and will revert old behavior.
235 */
236 if (net->sctp.addip_noauth)
237 asoc->peer.asconf_capable = 1;
238
239 /* Create an input queue. */
240 sctp_inq_init(&asoc->base.inqueue);
241 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
242
243 /* Create an output queue. */
244 sctp_outq_init(asoc, &asoc->outqueue);
245
246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 goto fail_init;
248
249 /* Assume that peer would support both address types unless we are
250 * told otherwise.
251 */
252 asoc->peer.ipv4_address = 1;
253 if (asoc->base.sk->sk_family == PF_INET6)
254 asoc->peer.ipv6_address = 1;
255 INIT_LIST_HEAD(&asoc->asocs);
256
257 asoc->default_stream = sp->default_stream;
258 asoc->default_ppid = sp->default_ppid;
259 asoc->default_flags = sp->default_flags;
260 asoc->default_context = sp->default_context;
261 asoc->default_timetolive = sp->default_timetolive;
262 asoc->default_rcv_context = sp->default_rcv_context;
263
264 /* AUTH related initializations */
265 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
267 if (err)
268 goto fail_init;
269
270 asoc->active_key_id = ep->active_key_id;
271
272 /* Save the hmacs and chunks list into this association */
273 if (ep->auth_hmacs_list)
274 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
275 ntohs(ep->auth_hmacs_list->param_hdr.length));
276 if (ep->auth_chunk_list)
277 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
278 ntohs(ep->auth_chunk_list->param_hdr.length));
279
280 /* Get the AUTH random number for this association */
281 p = (sctp_paramhdr_t *)asoc->c.auth_random;
282 p->type = SCTP_PARAM_RANDOM;
283 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
284 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
285
286 return asoc;
287
288fail_init:
289 sock_put(asoc->base.sk);
290 sctp_endpoint_put(asoc->ep);
291 return NULL;
292}
293
294/* Allocate and initialize a new association */
295struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
296 const struct sock *sk,
297 sctp_scope_t scope,
298 gfp_t gfp)
299{
300 struct sctp_association *asoc;
301
302 asoc = kzalloc(sizeof(*asoc), gfp);
303 if (!asoc)
304 goto fail;
305
306 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
307 goto fail_init;
308
309 SCTP_DBG_OBJCNT_INC(assoc);
310
311 pr_debug("Created asoc %p\n", asoc);
312
313 return asoc;
314
315fail_init:
316 kfree(asoc);
317fail:
318 return NULL;
319}
320
321/* Free this association if possible. There may still be users, so
322 * the actual deallocation may be delayed.
323 */
324void sctp_association_free(struct sctp_association *asoc)
325{
326 struct sock *sk = asoc->base.sk;
327 struct sctp_transport *transport;
328 struct list_head *pos, *temp;
329 int i;
330
331 /* Only real associations count against the endpoint, so
332 * don't bother for if this is a temporary association.
333 */
334 if (!list_empty(&asoc->asocs)) {
335 list_del(&asoc->asocs);
336
337 /* Decrement the backlog value for a TCP-style listening
338 * socket.
339 */
340 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
341 sk->sk_ack_backlog--;
342 }
343
344 /* Mark as dead, so other users can know this structure is
345 * going away.
346 */
347 asoc->base.dead = true;
348
349 /* Dispose of any data lying around in the outqueue. */
350 sctp_outq_free(&asoc->outqueue);
351
352 /* Dispose of any pending messages for the upper layer. */
353 sctp_ulpq_free(&asoc->ulpq);
354
355 /* Dispose of any pending chunks on the inqueue. */
356 sctp_inq_free(&asoc->base.inqueue);
357
358 sctp_tsnmap_free(&asoc->peer.tsn_map);
359
360 /* Free ssnmap storage. */
361 sctp_ssnmap_free(asoc->ssnmap);
362
363 /* Clean up the bound address list. */
364 sctp_bind_addr_free(&asoc->base.bind_addr);
365
366 /* Do we need to go through all of our timers and
367 * delete them? To be safe we will try to delete all, but we
368 * should be able to go through and make a guess based
369 * on our state.
370 */
371 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
372 if (del_timer(&asoc->timers[i]))
373 sctp_association_put(asoc);
374 }
375
376 /* Free peer's cached cookie. */
377 kfree(asoc->peer.cookie);
378 kfree(asoc->peer.peer_random);
379 kfree(asoc->peer.peer_chunks);
380 kfree(asoc->peer.peer_hmacs);
381
382 /* Release the transport structures. */
383 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
384 transport = list_entry(pos, struct sctp_transport, transports);
385 list_del_rcu(pos);
386 sctp_unhash_transport(transport);
387 sctp_transport_free(transport);
388 }
389
390 asoc->peer.transport_count = 0;
391
392 sctp_asconf_queue_teardown(asoc);
393
394 /* Free pending address space being deleted */
395 kfree(asoc->asconf_addr_del_pending);
396
397 /* AUTH - Free the endpoint shared keys */
398 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
399
400 /* AUTH - Free the association shared key */
401 sctp_auth_key_put(asoc->asoc_shared_key);
402
403 sctp_association_put(asoc);
404}
405
406/* Cleanup and free up an association. */
407static void sctp_association_destroy(struct sctp_association *asoc)
408{
409 if (unlikely(!asoc->base.dead)) {
410 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
411 return;
412 }
413
414 sctp_endpoint_put(asoc->ep);
415 sock_put(asoc->base.sk);
416
417 if (asoc->assoc_id != 0) {
418 spin_lock_bh(&sctp_assocs_id_lock);
419 idr_remove(&sctp_assocs_id, asoc->assoc_id);
420 spin_unlock_bh(&sctp_assocs_id_lock);
421 }
422
423 WARN_ON(atomic_read(&asoc->rmem_alloc));
424
425 kfree(asoc);
426 SCTP_DBG_OBJCNT_DEC(assoc);
427}
428
429/* Change the primary destination address for the peer. */
430void sctp_assoc_set_primary(struct sctp_association *asoc,
431 struct sctp_transport *transport)
432{
433 int changeover = 0;
434
435 /* it's a changeover only if we already have a primary path
436 * that we are changing
437 */
438 if (asoc->peer.primary_path != NULL &&
439 asoc->peer.primary_path != transport)
440 changeover = 1 ;
441
442 asoc->peer.primary_path = transport;
443
444 /* Set a default msg_name for events. */
445 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
446 sizeof(union sctp_addr));
447
448 /* If the primary path is changing, assume that the
449 * user wants to use this new path.
450 */
451 if ((transport->state == SCTP_ACTIVE) ||
452 (transport->state == SCTP_UNKNOWN))
453 asoc->peer.active_path = transport;
454
455 /*
456 * SFR-CACC algorithm:
457 * Upon the receipt of a request to change the primary
458 * destination address, on the data structure for the new
459 * primary destination, the sender MUST do the following:
460 *
461 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
462 * to this destination address earlier. The sender MUST set
463 * CYCLING_CHANGEOVER to indicate that this switch is a
464 * double switch to the same destination address.
465 *
466 * Really, only bother is we have data queued or outstanding on
467 * the association.
468 */
469 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
470 return;
471
472 if (transport->cacc.changeover_active)
473 transport->cacc.cycling_changeover = changeover;
474
475 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
476 * a changeover has occurred.
477 */
478 transport->cacc.changeover_active = changeover;
479
480 /* 3) The sender MUST store the next TSN to be sent in
481 * next_tsn_at_change.
482 */
483 transport->cacc.next_tsn_at_change = asoc->next_tsn;
484}
485
486/* Remove a transport from an association. */
487void sctp_assoc_rm_peer(struct sctp_association *asoc,
488 struct sctp_transport *peer)
489{
490 struct list_head *pos;
491 struct sctp_transport *transport;
492
493 pr_debug("%s: association:%p addr:%pISpc\n",
494 __func__, asoc, &peer->ipaddr.sa);
495
496 /* If we are to remove the current retran_path, update it
497 * to the next peer before removing this peer from the list.
498 */
499 if (asoc->peer.retran_path == peer)
500 sctp_assoc_update_retran_path(asoc);
501
502 /* Remove this peer from the list. */
503 list_del_rcu(&peer->transports);
504 /* Remove this peer from the transport hashtable */
505 sctp_unhash_transport(peer);
506
507 /* Get the first transport of asoc. */
508 pos = asoc->peer.transport_addr_list.next;
509 transport = list_entry(pos, struct sctp_transport, transports);
510
511 /* Update any entries that match the peer to be deleted. */
512 if (asoc->peer.primary_path == peer)
513 sctp_assoc_set_primary(asoc, transport);
514 if (asoc->peer.active_path == peer)
515 asoc->peer.active_path = transport;
516 if (asoc->peer.retran_path == peer)
517 asoc->peer.retran_path = transport;
518 if (asoc->peer.last_data_from == peer)
519 asoc->peer.last_data_from = transport;
520
521 /* If we remove the transport an INIT was last sent to, set it to
522 * NULL. Combined with the update of the retran path above, this
523 * will cause the next INIT to be sent to the next available
524 * transport, maintaining the cycle.
525 */
526 if (asoc->init_last_sent_to == peer)
527 asoc->init_last_sent_to = NULL;
528
529 /* If we remove the transport an SHUTDOWN was last sent to, set it
530 * to NULL. Combined with the update of the retran path above, this
531 * will cause the next SHUTDOWN to be sent to the next available
532 * transport, maintaining the cycle.
533 */
534 if (asoc->shutdown_last_sent_to == peer)
535 asoc->shutdown_last_sent_to = NULL;
536
537 /* If we remove the transport an ASCONF was last sent to, set it to
538 * NULL.
539 */
540 if (asoc->addip_last_asconf &&
541 asoc->addip_last_asconf->transport == peer)
542 asoc->addip_last_asconf->transport = NULL;
543
544 /* If we have something on the transmitted list, we have to
545 * save it off. The best place is the active path.
546 */
547 if (!list_empty(&peer->transmitted)) {
548 struct sctp_transport *active = asoc->peer.active_path;
549 struct sctp_chunk *ch;
550
551 /* Reset the transport of each chunk on this list */
552 list_for_each_entry(ch, &peer->transmitted,
553 transmitted_list) {
554 ch->transport = NULL;
555 ch->rtt_in_progress = 0;
556 }
557
558 list_splice_tail_init(&peer->transmitted,
559 &active->transmitted);
560
561 /* Start a T3 timer here in case it wasn't running so
562 * that these migrated packets have a chance to get
563 * retransmitted.
564 */
565 if (!timer_pending(&active->T3_rtx_timer))
566 if (!mod_timer(&active->T3_rtx_timer,
567 jiffies + active->rto))
568 sctp_transport_hold(active);
569 }
570
571 asoc->peer.transport_count--;
572
573 sctp_transport_free(peer);
574}
575
576/* Add a transport address to an association. */
577struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
578 const union sctp_addr *addr,
579 const gfp_t gfp,
580 const int peer_state)
581{
582 struct net *net = sock_net(asoc->base.sk);
583 struct sctp_transport *peer;
584 struct sctp_sock *sp;
585 unsigned short port;
586
587 sp = sctp_sk(asoc->base.sk);
588
589 /* AF_INET and AF_INET6 share common port field. */
590 port = ntohs(addr->v4.sin_port);
591
592 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
593 asoc, &addr->sa, peer_state);
594
595 /* Set the port if it has not been set yet. */
596 if (0 == asoc->peer.port)
597 asoc->peer.port = port;
598
599 /* Check to see if this is a duplicate. */
600 peer = sctp_assoc_lookup_paddr(asoc, addr);
601 if (peer) {
602 /* An UNKNOWN state is only set on transports added by
603 * user in sctp_connectx() call. Such transports should be
604 * considered CONFIRMED per RFC 4960, Section 5.4.
605 */
606 if (peer->state == SCTP_UNKNOWN) {
607 peer->state = SCTP_ACTIVE;
608 }
609 return peer;
610 }
611
612 peer = sctp_transport_new(net, addr, gfp);
613 if (!peer)
614 return NULL;
615
616 sctp_transport_set_owner(peer, asoc);
617
618 /* Initialize the peer's heartbeat interval based on the
619 * association configured value.
620 */
621 peer->hbinterval = asoc->hbinterval;
622
623 /* Set the path max_retrans. */
624 peer->pathmaxrxt = asoc->pathmaxrxt;
625
626 /* And the partial failure retrans threshold */
627 peer->pf_retrans = asoc->pf_retrans;
628
629 /* Initialize the peer's SACK delay timeout based on the
630 * association configured value.
631 */
632 peer->sackdelay = asoc->sackdelay;
633 peer->sackfreq = asoc->sackfreq;
634
635 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
636 * based on association setting.
637 */
638 peer->param_flags = asoc->param_flags;
639
640 sctp_transport_route(peer, NULL, sp);
641
642 /* Initialize the pmtu of the transport. */
643 if (peer->param_flags & SPP_PMTUD_DISABLE) {
644 if (asoc->pathmtu)
645 peer->pathmtu = asoc->pathmtu;
646 else
647 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
648 }
649
650 /* If this is the first transport addr on this association,
651 * initialize the association PMTU to the peer's PMTU.
652 * If not and the current association PMTU is higher than the new
653 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
654 */
655 if (asoc->pathmtu)
656 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
657 else
658 asoc->pathmtu = peer->pathmtu;
659
660 pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
661 asoc->pathmtu);
662
663 peer->pmtu_pending = 0;
664
665 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
666
667 /* The asoc->peer.port might not be meaningful yet, but
668 * initialize the packet structure anyway.
669 */
670 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
671 asoc->peer.port);
672
673 /* 7.2.1 Slow-Start
674 *
675 * o The initial cwnd before DATA transmission or after a sufficiently
676 * long idle period MUST be set to
677 * min(4*MTU, max(2*MTU, 4380 bytes))
678 *
679 * o The initial value of ssthresh MAY be arbitrarily high
680 * (for example, implementations MAY use the size of the
681 * receiver advertised window).
682 */
683 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
684
685 /* At this point, we may not have the receiver's advertised window,
686 * so initialize ssthresh to the default value and it will be set
687 * later when we process the INIT.
688 */
689 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
690
691 peer->partial_bytes_acked = 0;
692 peer->flight_size = 0;
693 peer->burst_limited = 0;
694
695 /* Set the transport's RTO.initial value */
696 peer->rto = asoc->rto_initial;
697 sctp_max_rto(asoc, peer);
698
699 /* Set the peer's active state. */
700 peer->state = peer_state;
701
702 /* Attach the remote transport to our asoc. */
703 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
704 asoc->peer.transport_count++;
705 /* Add this peer into the transport hashtable */
706 sctp_hash_transport(peer);
707
708 /* If we do not yet have a primary path, set one. */
709 if (!asoc->peer.primary_path) {
710 sctp_assoc_set_primary(asoc, peer);
711 asoc->peer.retran_path = peer;
712 }
713
714 if (asoc->peer.active_path == asoc->peer.retran_path &&
715 peer->state != SCTP_UNCONFIRMED) {
716 asoc->peer.retran_path = peer;
717 }
718
719 return peer;
720}
721
722/* Delete a transport address from an association. */
723void sctp_assoc_del_peer(struct sctp_association *asoc,
724 const union sctp_addr *addr)
725{
726 struct list_head *pos;
727 struct list_head *temp;
728 struct sctp_transport *transport;
729
730 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
731 transport = list_entry(pos, struct sctp_transport, transports);
732 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
733 /* Do book keeping for removing the peer and free it. */
734 sctp_assoc_rm_peer(asoc, transport);
735 break;
736 }
737 }
738}
739
740/* Lookup a transport by address. */
741struct sctp_transport *sctp_assoc_lookup_paddr(
742 const struct sctp_association *asoc,
743 const union sctp_addr *address)
744{
745 struct sctp_transport *t;
746
747 /* Cycle through all transports searching for a peer address. */
748
749 list_for_each_entry(t, &asoc->peer.transport_addr_list,
750 transports) {
751 if (sctp_cmp_addr_exact(address, &t->ipaddr))
752 return t;
753 }
754
755 return NULL;
756}
757
758/* Remove all transports except a give one */
759void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
760 struct sctp_transport *primary)
761{
762 struct sctp_transport *temp;
763 struct sctp_transport *t;
764
765 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
766 transports) {
767 /* if the current transport is not the primary one, delete it */
768 if (t != primary)
769 sctp_assoc_rm_peer(asoc, t);
770 }
771}
772
773/* Engage in transport control operations.
774 * Mark the transport up or down and send a notification to the user.
775 * Select and update the new active and retran paths.
776 */
777void sctp_assoc_control_transport(struct sctp_association *asoc,
778 struct sctp_transport *transport,
779 sctp_transport_cmd_t command,
780 sctp_sn_error_t error)
781{
782 struct sctp_ulpevent *event;
783 struct sockaddr_storage addr;
784 int spc_state = 0;
785 bool ulp_notify = true;
786
787 /* Record the transition on the transport. */
788 switch (command) {
789 case SCTP_TRANSPORT_UP:
790 /* If we are moving from UNCONFIRMED state due
791 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
792 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
793 */
794 if (SCTP_UNCONFIRMED == transport->state &&
795 SCTP_HEARTBEAT_SUCCESS == error)
796 spc_state = SCTP_ADDR_CONFIRMED;
797 else
798 spc_state = SCTP_ADDR_AVAILABLE;
799 /* Don't inform ULP about transition from PF to
800 * active state and set cwnd to 1 MTU, see SCTP
801 * Quick failover draft section 5.1, point 5
802 */
803 if (transport->state == SCTP_PF) {
804 ulp_notify = false;
805 transport->cwnd = asoc->pathmtu;
806 }
807 transport->state = SCTP_ACTIVE;
808 break;
809
810 case SCTP_TRANSPORT_DOWN:
811 /* If the transport was never confirmed, do not transition it
812 * to inactive state. Also, release the cached route since
813 * there may be a better route next time.
814 */
815 if (transport->state != SCTP_UNCONFIRMED)
816 transport->state = SCTP_INACTIVE;
817 else {
818 dst_release(transport->dst);
819 transport->dst = NULL;
820 ulp_notify = false;
821 }
822
823 spc_state = SCTP_ADDR_UNREACHABLE;
824 break;
825
826 case SCTP_TRANSPORT_PF:
827 transport->state = SCTP_PF;
828 ulp_notify = false;
829 break;
830
831 default:
832 return;
833 }
834
835 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
836 * to the user.
837 */
838 if (ulp_notify) {
839 memset(&addr, 0, sizeof(struct sockaddr_storage));
840 memcpy(&addr, &transport->ipaddr,
841 transport->af_specific->sockaddr_len);
842
843 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
844 0, spc_state, error, GFP_ATOMIC);
845 if (event)
846 sctp_ulpq_tail_event(&asoc->ulpq, event);
847 }
848
849 /* Select new active and retran paths. */
850 sctp_select_active_and_retran_path(asoc);
851}
852
853/* Hold a reference to an association. */
854void sctp_association_hold(struct sctp_association *asoc)
855{
856 atomic_inc(&asoc->base.refcnt);
857}
858
859/* Release a reference to an association and cleanup
860 * if there are no more references.
861 */
862void sctp_association_put(struct sctp_association *asoc)
863{
864 if (atomic_dec_and_test(&asoc->base.refcnt))
865 sctp_association_destroy(asoc);
866}
867
868/* Allocate the next TSN, Transmission Sequence Number, for the given
869 * association.
870 */
871__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
872{
873 /* From Section 1.6 Serial Number Arithmetic:
874 * Transmission Sequence Numbers wrap around when they reach
875 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
876 * after transmitting TSN = 2*32 - 1 is TSN = 0.
877 */
878 __u32 retval = asoc->next_tsn;
879 asoc->next_tsn++;
880 asoc->unack_data++;
881
882 return retval;
883}
884
885/* Compare two addresses to see if they match. Wildcard addresses
886 * only match themselves.
887 */
888int sctp_cmp_addr_exact(const union sctp_addr *ss1,
889 const union sctp_addr *ss2)
890{
891 struct sctp_af *af;
892
893 af = sctp_get_af_specific(ss1->sa.sa_family);
894 if (unlikely(!af))
895 return 0;
896
897 return af->cmp_addr(ss1, ss2);
898}
899
900/* Return an ecne chunk to get prepended to a packet.
901 * Note: We are sly and return a shared, prealloced chunk. FIXME:
902 * No we don't, but we could/should.
903 */
904struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
905{
906 if (!asoc->need_ecne)
907 return NULL;
908
909 /* Send ECNE if needed.
910 * Not being able to allocate a chunk here is not deadly.
911 */
912 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
913}
914
915/*
916 * Find which transport this TSN was sent on.
917 */
918struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
919 __u32 tsn)
920{
921 struct sctp_transport *active;
922 struct sctp_transport *match;
923 struct sctp_transport *transport;
924 struct sctp_chunk *chunk;
925 __be32 key = htonl(tsn);
926
927 match = NULL;
928
929 /*
930 * FIXME: In general, find a more efficient data structure for
931 * searching.
932 */
933
934 /*
935 * The general strategy is to search each transport's transmitted
936 * list. Return which transport this TSN lives on.
937 *
938 * Let's be hopeful and check the active_path first.
939 * Another optimization would be to know if there is only one
940 * outbound path and not have to look for the TSN at all.
941 *
942 */
943
944 active = asoc->peer.active_path;
945
946 list_for_each_entry(chunk, &active->transmitted,
947 transmitted_list) {
948
949 if (key == chunk->subh.data_hdr->tsn) {
950 match = active;
951 goto out;
952 }
953 }
954
955 /* If not found, go search all the other transports. */
956 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
957 transports) {
958
959 if (transport == active)
960 continue;
961 list_for_each_entry(chunk, &transport->transmitted,
962 transmitted_list) {
963 if (key == chunk->subh.data_hdr->tsn) {
964 match = transport;
965 goto out;
966 }
967 }
968 }
969out:
970 return match;
971}
972
973/* Is this the association we are looking for? */
974struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
975 struct net *net,
976 const union sctp_addr *laddr,
977 const union sctp_addr *paddr)
978{
979 struct sctp_transport *transport;
980
981 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
982 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
983 net_eq(sock_net(asoc->base.sk), net)) {
984 transport = sctp_assoc_lookup_paddr(asoc, paddr);
985 if (!transport)
986 goto out;
987
988 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
989 sctp_sk(asoc->base.sk)))
990 goto out;
991 }
992 transport = NULL;
993
994out:
995 return transport;
996}
997
998/* Do delayed input processing. This is scheduled by sctp_rcv(). */
999static void sctp_assoc_bh_rcv(struct work_struct *work)
1000{
1001 struct sctp_association *asoc =
1002 container_of(work, struct sctp_association,
1003 base.inqueue.immediate);
1004 struct net *net = sock_net(asoc->base.sk);
1005 struct sctp_endpoint *ep;
1006 struct sctp_chunk *chunk;
1007 struct sctp_inq *inqueue;
1008 int state;
1009 sctp_subtype_t subtype;
1010 int error = 0;
1011
1012 /* The association should be held so we should be safe. */
1013 ep = asoc->ep;
1014
1015 inqueue = &asoc->base.inqueue;
1016 sctp_association_hold(asoc);
1017 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1018 state = asoc->state;
1019 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1020
1021 /* SCTP-AUTH, Section 6.3:
1022 * The receiver has a list of chunk types which it expects
1023 * to be received only after an AUTH-chunk. This list has
1024 * been sent to the peer during the association setup. It
1025 * MUST silently discard these chunks if they are not placed
1026 * after an AUTH chunk in the packet.
1027 */
1028 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1029 continue;
1030
1031 /* Remember where the last DATA chunk came from so we
1032 * know where to send the SACK.
1033 */
1034 if (sctp_chunk_is_data(chunk))
1035 asoc->peer.last_data_from = chunk->transport;
1036 else {
1037 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1038 asoc->stats.ictrlchunks++;
1039 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1040 asoc->stats.isacks++;
1041 }
1042
1043 if (chunk->transport)
1044 chunk->transport->last_time_heard = ktime_get();
1045
1046 /* Run through the state machine. */
1047 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1048 state, ep, asoc, chunk, GFP_ATOMIC);
1049
1050 /* Check to see if the association is freed in response to
1051 * the incoming chunk. If so, get out of the while loop.
1052 */
1053 if (asoc->base.dead)
1054 break;
1055
1056 /* If there is an error on chunk, discard this packet. */
1057 if (error && chunk)
1058 chunk->pdiscard = 1;
1059 }
1060 sctp_association_put(asoc);
1061}
1062
1063/* This routine moves an association from its old sk to a new sk. */
1064void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1065{
1066 struct sctp_sock *newsp = sctp_sk(newsk);
1067 struct sock *oldsk = assoc->base.sk;
1068
1069 /* Delete the association from the old endpoint's list of
1070 * associations.
1071 */
1072 list_del_init(&assoc->asocs);
1073
1074 /* Decrement the backlog value for a TCP-style socket. */
1075 if (sctp_style(oldsk, TCP))
1076 oldsk->sk_ack_backlog--;
1077
1078 /* Release references to the old endpoint and the sock. */
1079 sctp_endpoint_put(assoc->ep);
1080 sock_put(assoc->base.sk);
1081
1082 /* Get a reference to the new endpoint. */
1083 assoc->ep = newsp->ep;
1084 sctp_endpoint_hold(assoc->ep);
1085
1086 /* Get a reference to the new sock. */
1087 assoc->base.sk = newsk;
1088 sock_hold(assoc->base.sk);
1089
1090 /* Add the association to the new endpoint's list of associations. */
1091 sctp_endpoint_add_asoc(newsp->ep, assoc);
1092}
1093
1094/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1095void sctp_assoc_update(struct sctp_association *asoc,
1096 struct sctp_association *new)
1097{
1098 struct sctp_transport *trans;
1099 struct list_head *pos, *temp;
1100
1101 /* Copy in new parameters of peer. */
1102 asoc->c = new->c;
1103 asoc->peer.rwnd = new->peer.rwnd;
1104 asoc->peer.sack_needed = new->peer.sack_needed;
1105 asoc->peer.auth_capable = new->peer.auth_capable;
1106 asoc->peer.i = new->peer.i;
1107 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1108 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1109
1110 /* Remove any peer addresses not present in the new association. */
1111 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1112 trans = list_entry(pos, struct sctp_transport, transports);
1113 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1114 sctp_assoc_rm_peer(asoc, trans);
1115 continue;
1116 }
1117
1118 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1119 sctp_transport_reset(trans);
1120 }
1121
1122 /* If the case is A (association restart), use
1123 * initial_tsn as next_tsn. If the case is B, use
1124 * current next_tsn in case data sent to peer
1125 * has been discarded and needs retransmission.
1126 */
1127 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1128 asoc->next_tsn = new->next_tsn;
1129 asoc->ctsn_ack_point = new->ctsn_ack_point;
1130 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1131
1132 /* Reinitialize SSN for both local streams
1133 * and peer's streams.
1134 */
1135 sctp_ssnmap_clear(asoc->ssnmap);
1136
1137 /* Flush the ULP reassembly and ordered queue.
1138 * Any data there will now be stale and will
1139 * cause problems.
1140 */
1141 sctp_ulpq_flush(&asoc->ulpq);
1142
1143 /* reset the overall association error count so
1144 * that the restarted association doesn't get torn
1145 * down on the next retransmission timer.
1146 */
1147 asoc->overall_error_count = 0;
1148
1149 } else {
1150 /* Add any peer addresses from the new association. */
1151 list_for_each_entry(trans, &new->peer.transport_addr_list,
1152 transports) {
1153 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1154 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1155 GFP_ATOMIC, trans->state);
1156 }
1157
1158 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1159 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1160 if (!asoc->ssnmap) {
1161 /* Move the ssnmap. */
1162 asoc->ssnmap = new->ssnmap;
1163 new->ssnmap = NULL;
1164 }
1165
1166 if (!asoc->assoc_id) {
1167 /* get a new association id since we don't have one
1168 * yet.
1169 */
1170 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1171 }
1172 }
1173
1174 /* SCTP-AUTH: Save the peer parameters from the new associations
1175 * and also move the association shared keys over
1176 */
1177 kfree(asoc->peer.peer_random);
1178 asoc->peer.peer_random = new->peer.peer_random;
1179 new->peer.peer_random = NULL;
1180
1181 kfree(asoc->peer.peer_chunks);
1182 asoc->peer.peer_chunks = new->peer.peer_chunks;
1183 new->peer.peer_chunks = NULL;
1184
1185 kfree(asoc->peer.peer_hmacs);
1186 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1187 new->peer.peer_hmacs = NULL;
1188
1189 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1190}
1191
1192/* Update the retran path for sending a retransmitted packet.
1193 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1194 *
1195 * When there is outbound data to send and the primary path
1196 * becomes inactive (e.g., due to failures), or where the
1197 * SCTP user explicitly requests to send data to an
1198 * inactive destination transport address, before reporting
1199 * an error to its ULP, the SCTP endpoint should try to send
1200 * the data to an alternate active destination transport
1201 * address if one exists.
1202 *
1203 * When retransmitting data that timed out, if the endpoint
1204 * is multihomed, it should consider each source-destination
1205 * address pair in its retransmission selection policy.
1206 * When retransmitting timed-out data, the endpoint should
1207 * attempt to pick the most divergent source-destination
1208 * pair from the original source-destination pair to which
1209 * the packet was transmitted.
1210 *
1211 * Note: Rules for picking the most divergent source-destination
1212 * pair are an implementation decision and are not specified
1213 * within this document.
1214 *
1215 * Our basic strategy is to round-robin transports in priorities
1216 * according to sctp_trans_score() e.g., if no such
1217 * transport with state SCTP_ACTIVE exists, round-robin through
1218 * SCTP_UNKNOWN, etc. You get the picture.
1219 */
1220static u8 sctp_trans_score(const struct sctp_transport *trans)
1221{
1222 switch (trans->state) {
1223 case SCTP_ACTIVE:
1224 return 3; /* best case */
1225 case SCTP_UNKNOWN:
1226 return 2;
1227 case SCTP_PF:
1228 return 1;
1229 default: /* case SCTP_INACTIVE */
1230 return 0; /* worst case */
1231 }
1232}
1233
1234static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1235 struct sctp_transport *trans2)
1236{
1237 if (trans1->error_count > trans2->error_count) {
1238 return trans2;
1239 } else if (trans1->error_count == trans2->error_count &&
1240 ktime_after(trans2->last_time_heard,
1241 trans1->last_time_heard)) {
1242 return trans2;
1243 } else {
1244 return trans1;
1245 }
1246}
1247
1248static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1249 struct sctp_transport *best)
1250{
1251 u8 score_curr, score_best;
1252
1253 if (best == NULL || curr == best)
1254 return curr;
1255
1256 score_curr = sctp_trans_score(curr);
1257 score_best = sctp_trans_score(best);
1258
1259 /* First, try a score-based selection if both transport states
1260 * differ. If we're in a tie, lets try to make a more clever
1261 * decision here based on error counts and last time heard.
1262 */
1263 if (score_curr > score_best)
1264 return curr;
1265 else if (score_curr == score_best)
1266 return sctp_trans_elect_tie(best, curr);
1267 else
1268 return best;
1269}
1270
1271void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1272{
1273 struct sctp_transport *trans = asoc->peer.retran_path;
1274 struct sctp_transport *trans_next = NULL;
1275
1276 /* We're done as we only have the one and only path. */
1277 if (asoc->peer.transport_count == 1)
1278 return;
1279 /* If active_path and retran_path are the same and active,
1280 * then this is the only active path. Use it.
1281 */
1282 if (asoc->peer.active_path == asoc->peer.retran_path &&
1283 asoc->peer.active_path->state == SCTP_ACTIVE)
1284 return;
1285
1286 /* Iterate from retran_path's successor back to retran_path. */
1287 for (trans = list_next_entry(trans, transports); 1;
1288 trans = list_next_entry(trans, transports)) {
1289 /* Manually skip the head element. */
1290 if (&trans->transports == &asoc->peer.transport_addr_list)
1291 continue;
1292 if (trans->state == SCTP_UNCONFIRMED)
1293 continue;
1294 trans_next = sctp_trans_elect_best(trans, trans_next);
1295 /* Active is good enough for immediate return. */
1296 if (trans_next->state == SCTP_ACTIVE)
1297 break;
1298 /* We've reached the end, time to update path. */
1299 if (trans == asoc->peer.retran_path)
1300 break;
1301 }
1302
1303 asoc->peer.retran_path = trans_next;
1304
1305 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1306 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1307}
1308
1309static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1310{
1311 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1312 struct sctp_transport *trans_pf = NULL;
1313
1314 /* Look for the two most recently used active transports. */
1315 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1316 transports) {
1317 /* Skip uninteresting transports. */
1318 if (trans->state == SCTP_INACTIVE ||
1319 trans->state == SCTP_UNCONFIRMED)
1320 continue;
1321 /* Keep track of the best PF transport from our
1322 * list in case we don't find an active one.
1323 */
1324 if (trans->state == SCTP_PF) {
1325 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1326 continue;
1327 }
1328 /* For active transports, pick the most recent ones. */
1329 if (trans_pri == NULL ||
1330 ktime_after(trans->last_time_heard,
1331 trans_pri->last_time_heard)) {
1332 trans_sec = trans_pri;
1333 trans_pri = trans;
1334 } else if (trans_sec == NULL ||
1335 ktime_after(trans->last_time_heard,
1336 trans_sec->last_time_heard)) {
1337 trans_sec = trans;
1338 }
1339 }
1340
1341 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1342 *
1343 * By default, an endpoint should always transmit to the primary
1344 * path, unless the SCTP user explicitly specifies the
1345 * destination transport address (and possibly source transport
1346 * address) to use. [If the primary is active but not most recent,
1347 * bump the most recently used transport.]
1348 */
1349 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1350 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1351 asoc->peer.primary_path != trans_pri) {
1352 trans_sec = trans_pri;
1353 trans_pri = asoc->peer.primary_path;
1354 }
1355
1356 /* We did not find anything useful for a possible retransmission
1357 * path; either primary path that we found is the the same as
1358 * the current one, or we didn't generally find an active one.
1359 */
1360 if (trans_sec == NULL)
1361 trans_sec = trans_pri;
1362
1363 /* If we failed to find a usable transport, just camp on the
1364 * active or pick a PF iff it's the better choice.
1365 */
1366 if (trans_pri == NULL) {
1367 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1368 trans_sec = trans_pri;
1369 }
1370
1371 /* Set the active and retran transports. */
1372 asoc->peer.active_path = trans_pri;
1373 asoc->peer.retran_path = trans_sec;
1374}
1375
1376struct sctp_transport *
1377sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1378 struct sctp_transport *last_sent_to)
1379{
1380 /* If this is the first time packet is sent, use the active path,
1381 * else use the retran path. If the last packet was sent over the
1382 * retran path, update the retran path and use it.
1383 */
1384 if (last_sent_to == NULL) {
1385 return asoc->peer.active_path;
1386 } else {
1387 if (last_sent_to == asoc->peer.retran_path)
1388 sctp_assoc_update_retran_path(asoc);
1389
1390 return asoc->peer.retran_path;
1391 }
1392}
1393
1394/* Update the association's pmtu and frag_point by going through all the
1395 * transports. This routine is called when a transport's PMTU has changed.
1396 */
1397void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1398{
1399 struct sctp_transport *t;
1400 __u32 pmtu = 0;
1401
1402 if (!asoc)
1403 return;
1404
1405 /* Get the lowest pmtu of all the transports. */
1406 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1407 transports) {
1408 if (t->pmtu_pending && t->dst) {
1409 sctp_transport_update_pmtu(sk, t,
1410 WORD_TRUNC(dst_mtu(t->dst)));
1411 t->pmtu_pending = 0;
1412 }
1413 if (!pmtu || (t->pathmtu < pmtu))
1414 pmtu = t->pathmtu;
1415 }
1416
1417 if (pmtu) {
1418 asoc->pathmtu = pmtu;
1419 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1420 }
1421
1422 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1423 asoc->pathmtu, asoc->frag_point);
1424}
1425
1426/* Should we send a SACK to update our peer? */
1427static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1428{
1429 struct net *net = sock_net(asoc->base.sk);
1430 switch (asoc->state) {
1431 case SCTP_STATE_ESTABLISHED:
1432 case SCTP_STATE_SHUTDOWN_PENDING:
1433 case SCTP_STATE_SHUTDOWN_RECEIVED:
1434 case SCTP_STATE_SHUTDOWN_SENT:
1435 if ((asoc->rwnd > asoc->a_rwnd) &&
1436 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1437 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1438 asoc->pathmtu)))
1439 return true;
1440 break;
1441 default:
1442 break;
1443 }
1444 return false;
1445}
1446
1447/* Increase asoc's rwnd by len and send any window update SACK if needed. */
1448void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1449{
1450 struct sctp_chunk *sack;
1451 struct timer_list *timer;
1452
1453 if (asoc->rwnd_over) {
1454 if (asoc->rwnd_over >= len) {
1455 asoc->rwnd_over -= len;
1456 } else {
1457 asoc->rwnd += (len - asoc->rwnd_over);
1458 asoc->rwnd_over = 0;
1459 }
1460 } else {
1461 asoc->rwnd += len;
1462 }
1463
1464 /* If we had window pressure, start recovering it
1465 * once our rwnd had reached the accumulated pressure
1466 * threshold. The idea is to recover slowly, but up
1467 * to the initial advertised window.
1468 */
1469 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1470 int change = min(asoc->pathmtu, asoc->rwnd_press);
1471 asoc->rwnd += change;
1472 asoc->rwnd_press -= change;
1473 }
1474
1475 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1476 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1477 asoc->a_rwnd);
1478
1479 /* Send a window update SACK if the rwnd has increased by at least the
1480 * minimum of the association's PMTU and half of the receive buffer.
1481 * The algorithm used is similar to the one described in
1482 * Section 4.2.3.3 of RFC 1122.
1483 */
1484 if (sctp_peer_needs_update(asoc)) {
1485 asoc->a_rwnd = asoc->rwnd;
1486
1487 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1488 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1489 asoc->a_rwnd);
1490
1491 sack = sctp_make_sack(asoc);
1492 if (!sack)
1493 return;
1494
1495 asoc->peer.sack_needed = 0;
1496
1497 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1498
1499 /* Stop the SACK timer. */
1500 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1501 if (del_timer(timer))
1502 sctp_association_put(asoc);
1503 }
1504}
1505
1506/* Decrease asoc's rwnd by len. */
1507void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1508{
1509 int rx_count;
1510 int over = 0;
1511
1512 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1513 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1514 "asoc->rwnd_over:%u!\n", __func__, asoc,
1515 asoc->rwnd, asoc->rwnd_over);
1516
1517 if (asoc->ep->rcvbuf_policy)
1518 rx_count = atomic_read(&asoc->rmem_alloc);
1519 else
1520 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1521
1522 /* If we've reached or overflowed our receive buffer, announce
1523 * a 0 rwnd if rwnd would still be positive. Store the
1524 * the potential pressure overflow so that the window can be restored
1525 * back to original value.
1526 */
1527 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1528 over = 1;
1529
1530 if (asoc->rwnd >= len) {
1531 asoc->rwnd -= len;
1532 if (over) {
1533 asoc->rwnd_press += asoc->rwnd;
1534 asoc->rwnd = 0;
1535 }
1536 } else {
1537 asoc->rwnd_over = len - asoc->rwnd;
1538 asoc->rwnd = 0;
1539 }
1540
1541 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1542 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1543 asoc->rwnd_press);
1544}
1545
1546/* Build the bind address list for the association based on info from the
1547 * local endpoint and the remote peer.
1548 */
1549int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1550 sctp_scope_t scope, gfp_t gfp)
1551{
1552 int flags;
1553
1554 /* Use scoping rules to determine the subset of addresses from
1555 * the endpoint.
1556 */
1557 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1558 if (asoc->peer.ipv4_address)
1559 flags |= SCTP_ADDR4_PEERSUPP;
1560 if (asoc->peer.ipv6_address)
1561 flags |= SCTP_ADDR6_PEERSUPP;
1562
1563 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1564 &asoc->base.bind_addr,
1565 &asoc->ep->base.bind_addr,
1566 scope, gfp, flags);
1567}
1568
1569/* Build the association's bind address list from the cookie. */
1570int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1571 struct sctp_cookie *cookie,
1572 gfp_t gfp)
1573{
1574 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1575 int var_size3 = cookie->raw_addr_list_len;
1576 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1577
1578 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1579 asoc->ep->base.bind_addr.port, gfp);
1580}
1581
1582/* Lookup laddr in the bind address list of an association. */
1583int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1584 const union sctp_addr *laddr)
1585{
1586 int found = 0;
1587
1588 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1589 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1590 sctp_sk(asoc->base.sk)))
1591 found = 1;
1592
1593 return found;
1594}
1595
1596/* Set an association id for a given association */
1597int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1598{
1599 bool preload = gfpflags_allow_blocking(gfp);
1600 int ret;
1601
1602 /* If the id is already assigned, keep it. */
1603 if (asoc->assoc_id)
1604 return 0;
1605
1606 if (preload)
1607 idr_preload(gfp);
1608 spin_lock_bh(&sctp_assocs_id_lock);
1609 /* 0 is not a valid assoc_id, must be >= 1 */
1610 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1611 spin_unlock_bh(&sctp_assocs_id_lock);
1612 if (preload)
1613 idr_preload_end();
1614 if (ret < 0)
1615 return ret;
1616
1617 asoc->assoc_id = (sctp_assoc_t)ret;
1618 return 0;
1619}
1620
1621/* Free the ASCONF queue */
1622static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1623{
1624 struct sctp_chunk *asconf;
1625 struct sctp_chunk *tmp;
1626
1627 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1628 list_del_init(&asconf->list);
1629 sctp_chunk_free(asconf);
1630 }
1631}
1632
1633/* Free asconf_ack cache */
1634static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1635{
1636 struct sctp_chunk *ack;
1637 struct sctp_chunk *tmp;
1638
1639 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1640 transmitted_list) {
1641 list_del_init(&ack->transmitted_list);
1642 sctp_chunk_free(ack);
1643 }
1644}
1645
1646/* Clean up the ASCONF_ACK queue */
1647void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1648{
1649 struct sctp_chunk *ack;
1650 struct sctp_chunk *tmp;
1651
1652 /* We can remove all the entries from the queue up to
1653 * the "Peer-Sequence-Number".
1654 */
1655 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1656 transmitted_list) {
1657 if (ack->subh.addip_hdr->serial ==
1658 htonl(asoc->peer.addip_serial))
1659 break;
1660
1661 list_del_init(&ack->transmitted_list);
1662 sctp_chunk_free(ack);
1663 }
1664}
1665
1666/* Find the ASCONF_ACK whose serial number matches ASCONF */
1667struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1668 const struct sctp_association *asoc,
1669 __be32 serial)
1670{
1671 struct sctp_chunk *ack;
1672
1673 /* Walk through the list of cached ASCONF-ACKs and find the
1674 * ack chunk whose serial number matches that of the request.
1675 */
1676 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1677 if (sctp_chunk_pending(ack))
1678 continue;
1679 if (ack->subh.addip_hdr->serial == serial) {
1680 sctp_chunk_hold(ack);
1681 return ack;
1682 }
1683 }
1684
1685 return NULL;
1686}
1687
1688void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1689{
1690 /* Free any cached ASCONF_ACK chunk. */
1691 sctp_assoc_free_asconf_acks(asoc);
1692
1693 /* Free the ASCONF queue. */
1694 sctp_assoc_free_asconf_queue(asoc);
1695
1696 /* Free any cached ASCONF chunk. */
1697 if (asoc->addip_last_asconf)
1698 sctp_chunk_free(asoc->addip_last_asconf);
1699}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * This module provides the abstraction for an SCTP association.
12 *
13 * Please send any bug reports or fixes you make to the
14 * email address(es):
15 * lksctp developers <linux-sctp@vger.kernel.org>
16 *
17 * Written or modified by:
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Karl Knutson <karl@athena.chicago.il.us>
20 * Jon Grimm <jgrimm@us.ibm.com>
21 * Xingang Guo <xingang.guo@intel.com>
22 * Hui Huang <hui.huang@nokia.com>
23 * Sridhar Samudrala <sri@us.ibm.com>
24 * Daisy Chang <daisyc@us.ibm.com>
25 * Ryan Layer <rmlayer@us.ibm.com>
26 * Kevin Gao <kevin.gao@intel.com>
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/types.h>
32#include <linux/fcntl.h>
33#include <linux/poll.h>
34#include <linux/init.h>
35
36#include <linux/slab.h>
37#include <linux/in.h>
38#include <net/ipv6.h>
39#include <net/sctp/sctp.h>
40#include <net/sctp/sm.h>
41
42/* Forward declarations for internal functions. */
43static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
44static void sctp_assoc_bh_rcv(struct work_struct *work);
45static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
46static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
47
48/* 1st Level Abstractions. */
49
50/* Initialize a new association from provided memory. */
51static struct sctp_association *sctp_association_init(
52 struct sctp_association *asoc,
53 const struct sctp_endpoint *ep,
54 const struct sock *sk,
55 enum sctp_scope scope, gfp_t gfp)
56{
57 struct sctp_sock *sp;
58 struct sctp_paramhdr *p;
59 int i;
60
61 /* Retrieve the SCTP per socket area. */
62 sp = sctp_sk((struct sock *)sk);
63
64 /* Discarding const is appropriate here. */
65 asoc->ep = (struct sctp_endpoint *)ep;
66 asoc->base.sk = (struct sock *)sk;
67 asoc->base.net = sock_net(sk);
68
69 sctp_endpoint_hold(asoc->ep);
70 sock_hold(asoc->base.sk);
71
72 /* Initialize the common base substructure. */
73 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
74
75 /* Initialize the object handling fields. */
76 refcount_set(&asoc->base.refcnt, 1);
77
78 /* Initialize the bind addr area. */
79 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
80
81 asoc->state = SCTP_STATE_CLOSED;
82 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
83 asoc->user_frag = sp->user_frag;
84
85 /* Set the association max_retrans and RTO values from the
86 * socket values.
87 */
88 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
89 asoc->pf_retrans = sp->pf_retrans;
90 asoc->ps_retrans = sp->ps_retrans;
91 asoc->pf_expose = sp->pf_expose;
92
93 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
94 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
95 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
96
97 /* Initialize the association's heartbeat interval based on the
98 * sock configured value.
99 */
100 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
101 asoc->probe_interval = msecs_to_jiffies(sp->probe_interval);
102
103 asoc->encap_port = sp->encap_port;
104
105 /* Initialize path max retrans value. */
106 asoc->pathmaxrxt = sp->pathmaxrxt;
107
108 asoc->flowlabel = sp->flowlabel;
109 asoc->dscp = sp->dscp;
110
111 /* Set association default SACK delay */
112 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
113 asoc->sackfreq = sp->sackfreq;
114
115 /* Set the association default flags controlling
116 * Heartbeat, SACK delay, and Path MTU Discovery.
117 */
118 asoc->param_flags = sp->param_flags;
119
120 /* Initialize the maximum number of new data packets that can be sent
121 * in a burst.
122 */
123 asoc->max_burst = sp->max_burst;
124
125 asoc->subscribe = sp->subscribe;
126
127 /* initialize association timers */
128 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
129 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
130 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
131
132 /* sctpimpguide Section 2.12.2
133 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
134 * recommended value of 5 times 'RTO.Max'.
135 */
136 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
137 = 5 * asoc->rto_max;
138
139 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
140 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
141 (unsigned long)sp->autoclose * HZ;
142
143 /* Initializes the timers */
144 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
145 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
146
147 /* Pull default initialization values from the sock options.
148 * Note: This assumes that the values have already been
149 * validated in the sock.
150 */
151 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
152 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
153 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
154
155 asoc->max_init_timeo =
156 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
157
158 /* Set the local window size for receive.
159 * This is also the rcvbuf space per association.
160 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
161 * 1500 bytes in one SCTP packet.
162 */
163 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
164 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
165 else
166 asoc->rwnd = sk->sk_rcvbuf/2;
167
168 asoc->a_rwnd = asoc->rwnd;
169
170 /* Use my own max window until I learn something better. */
171 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
172
173 /* Initialize the receive memory counter */
174 atomic_set(&asoc->rmem_alloc, 0);
175
176 init_waitqueue_head(&asoc->wait);
177
178 asoc->c.my_vtag = sctp_generate_tag(ep);
179 asoc->c.my_port = ep->base.bind_addr.port;
180
181 asoc->c.initial_tsn = sctp_generate_tsn(ep);
182
183 asoc->next_tsn = asoc->c.initial_tsn;
184
185 asoc->ctsn_ack_point = asoc->next_tsn - 1;
186 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
187 asoc->highest_sacked = asoc->ctsn_ack_point;
188 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
189
190 /* ADDIP Section 4.1 Asconf Chunk Procedures
191 *
192 * When an endpoint has an ASCONF signaled change to be sent to the
193 * remote endpoint it should do the following:
194 * ...
195 * A2) a serial number should be assigned to the chunk. The serial
196 * number SHOULD be a monotonically increasing number. The serial
197 * numbers SHOULD be initialized at the start of the
198 * association to the same value as the initial TSN.
199 */
200 asoc->addip_serial = asoc->c.initial_tsn;
201 asoc->strreset_outseq = asoc->c.initial_tsn;
202
203 INIT_LIST_HEAD(&asoc->addip_chunk_list);
204 INIT_LIST_HEAD(&asoc->asconf_ack_list);
205
206 /* Make an empty list of remote transport addresses. */
207 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
208
209 /* RFC 2960 5.1 Normal Establishment of an Association
210 *
211 * After the reception of the first data chunk in an
212 * association the endpoint must immediately respond with a
213 * sack to acknowledge the data chunk. Subsequent
214 * acknowledgements should be done as described in Section
215 * 6.2.
216 *
217 * [We implement this by telling a new association that it
218 * already received one packet.]
219 */
220 asoc->peer.sack_needed = 1;
221 asoc->peer.sack_generation = 1;
222
223 /* Create an input queue. */
224 sctp_inq_init(&asoc->base.inqueue);
225 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
226
227 /* Create an output queue. */
228 sctp_outq_init(asoc, &asoc->outqueue);
229
230 sctp_ulpq_init(&asoc->ulpq, asoc);
231
232 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
233 goto stream_free;
234
235 /* Initialize default path MTU. */
236 asoc->pathmtu = sp->pathmtu;
237 sctp_assoc_update_frag_point(asoc);
238
239 /* Assume that peer would support both address types unless we are
240 * told otherwise.
241 */
242 asoc->peer.ipv4_address = 1;
243 if (asoc->base.sk->sk_family == PF_INET6)
244 asoc->peer.ipv6_address = 1;
245 INIT_LIST_HEAD(&asoc->asocs);
246
247 asoc->default_stream = sp->default_stream;
248 asoc->default_ppid = sp->default_ppid;
249 asoc->default_flags = sp->default_flags;
250 asoc->default_context = sp->default_context;
251 asoc->default_timetolive = sp->default_timetolive;
252 asoc->default_rcv_context = sp->default_rcv_context;
253
254 /* AUTH related initializations */
255 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
256 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
257 goto stream_free;
258
259 asoc->active_key_id = ep->active_key_id;
260 asoc->strreset_enable = ep->strreset_enable;
261
262 /* Save the hmacs and chunks list into this association */
263 if (ep->auth_hmacs_list)
264 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
265 ntohs(ep->auth_hmacs_list->param_hdr.length));
266 if (ep->auth_chunk_list)
267 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
268 ntohs(ep->auth_chunk_list->param_hdr.length));
269
270 /* Get the AUTH random number for this association */
271 p = (struct sctp_paramhdr *)asoc->c.auth_random;
272 p->type = SCTP_PARAM_RANDOM;
273 p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
274 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
275
276 return asoc;
277
278stream_free:
279 sctp_stream_free(&asoc->stream);
280 sock_put(asoc->base.sk);
281 sctp_endpoint_put(asoc->ep);
282 return NULL;
283}
284
285/* Allocate and initialize a new association */
286struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
287 const struct sock *sk,
288 enum sctp_scope scope, gfp_t gfp)
289{
290 struct sctp_association *asoc;
291
292 asoc = kzalloc(sizeof(*asoc), gfp);
293 if (!asoc)
294 goto fail;
295
296 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
297 goto fail_init;
298
299 SCTP_DBG_OBJCNT_INC(assoc);
300
301 pr_debug("Created asoc %p\n", asoc);
302
303 return asoc;
304
305fail_init:
306 kfree(asoc);
307fail:
308 return NULL;
309}
310
311/* Free this association if possible. There may still be users, so
312 * the actual deallocation may be delayed.
313 */
314void sctp_association_free(struct sctp_association *asoc)
315{
316 struct sock *sk = asoc->base.sk;
317 struct sctp_transport *transport;
318 struct list_head *pos, *temp;
319 int i;
320
321 /* Only real associations count against the endpoint, so
322 * don't bother for if this is a temporary association.
323 */
324 if (!list_empty(&asoc->asocs)) {
325 list_del(&asoc->asocs);
326
327 /* Decrement the backlog value for a TCP-style listening
328 * socket.
329 */
330 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
331 sk_acceptq_removed(sk);
332 }
333
334 /* Mark as dead, so other users can know this structure is
335 * going away.
336 */
337 asoc->base.dead = true;
338
339 /* Dispose of any data lying around in the outqueue. */
340 sctp_outq_free(&asoc->outqueue);
341
342 /* Dispose of any pending messages for the upper layer. */
343 sctp_ulpq_free(&asoc->ulpq);
344
345 /* Dispose of any pending chunks on the inqueue. */
346 sctp_inq_free(&asoc->base.inqueue);
347
348 sctp_tsnmap_free(&asoc->peer.tsn_map);
349
350 /* Free stream information. */
351 sctp_stream_free(&asoc->stream);
352
353 if (asoc->strreset_chunk)
354 sctp_chunk_free(asoc->strreset_chunk);
355
356 /* Clean up the bound address list. */
357 sctp_bind_addr_free(&asoc->base.bind_addr);
358
359 /* Do we need to go through all of our timers and
360 * delete them? To be safe we will try to delete all, but we
361 * should be able to go through and make a guess based
362 * on our state.
363 */
364 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
365 if (del_timer(&asoc->timers[i]))
366 sctp_association_put(asoc);
367 }
368
369 /* Free peer's cached cookie. */
370 kfree(asoc->peer.cookie);
371 kfree(asoc->peer.peer_random);
372 kfree(asoc->peer.peer_chunks);
373 kfree(asoc->peer.peer_hmacs);
374
375 /* Release the transport structures. */
376 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
377 transport = list_entry(pos, struct sctp_transport, transports);
378 list_del_rcu(pos);
379 sctp_unhash_transport(transport);
380 sctp_transport_free(transport);
381 }
382
383 asoc->peer.transport_count = 0;
384
385 sctp_asconf_queue_teardown(asoc);
386
387 /* Free pending address space being deleted */
388 kfree(asoc->asconf_addr_del_pending);
389
390 /* AUTH - Free the endpoint shared keys */
391 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
392
393 /* AUTH - Free the association shared key */
394 sctp_auth_key_put(asoc->asoc_shared_key);
395
396 sctp_association_put(asoc);
397}
398
399/* Cleanup and free up an association. */
400static void sctp_association_destroy(struct sctp_association *asoc)
401{
402 if (unlikely(!asoc->base.dead)) {
403 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
404 return;
405 }
406
407 sctp_endpoint_put(asoc->ep);
408 sock_put(asoc->base.sk);
409
410 if (asoc->assoc_id != 0) {
411 spin_lock_bh(&sctp_assocs_id_lock);
412 idr_remove(&sctp_assocs_id, asoc->assoc_id);
413 spin_unlock_bh(&sctp_assocs_id_lock);
414 }
415
416 WARN_ON(atomic_read(&asoc->rmem_alloc));
417
418 kfree_rcu(asoc, rcu);
419 SCTP_DBG_OBJCNT_DEC(assoc);
420}
421
422/* Change the primary destination address for the peer. */
423void sctp_assoc_set_primary(struct sctp_association *asoc,
424 struct sctp_transport *transport)
425{
426 int changeover = 0;
427
428 /* it's a changeover only if we already have a primary path
429 * that we are changing
430 */
431 if (asoc->peer.primary_path != NULL &&
432 asoc->peer.primary_path != transport)
433 changeover = 1 ;
434
435 asoc->peer.primary_path = transport;
436 sctp_ulpevent_notify_peer_addr_change(transport,
437 SCTP_ADDR_MADE_PRIM, 0);
438
439 /* Set a default msg_name for events. */
440 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
441 sizeof(union sctp_addr));
442
443 /* If the primary path is changing, assume that the
444 * user wants to use this new path.
445 */
446 if ((transport->state == SCTP_ACTIVE) ||
447 (transport->state == SCTP_UNKNOWN))
448 asoc->peer.active_path = transport;
449
450 /*
451 * SFR-CACC algorithm:
452 * Upon the receipt of a request to change the primary
453 * destination address, on the data structure for the new
454 * primary destination, the sender MUST do the following:
455 *
456 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
457 * to this destination address earlier. The sender MUST set
458 * CYCLING_CHANGEOVER to indicate that this switch is a
459 * double switch to the same destination address.
460 *
461 * Really, only bother is we have data queued or outstanding on
462 * the association.
463 */
464 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
465 return;
466
467 if (transport->cacc.changeover_active)
468 transport->cacc.cycling_changeover = changeover;
469
470 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
471 * a changeover has occurred.
472 */
473 transport->cacc.changeover_active = changeover;
474
475 /* 3) The sender MUST store the next TSN to be sent in
476 * next_tsn_at_change.
477 */
478 transport->cacc.next_tsn_at_change = asoc->next_tsn;
479}
480
481/* Remove a transport from an association. */
482void sctp_assoc_rm_peer(struct sctp_association *asoc,
483 struct sctp_transport *peer)
484{
485 struct sctp_transport *transport;
486 struct list_head *pos;
487 struct sctp_chunk *ch;
488
489 pr_debug("%s: association:%p addr:%pISpc\n",
490 __func__, asoc, &peer->ipaddr.sa);
491
492 /* If we are to remove the current retran_path, update it
493 * to the next peer before removing this peer from the list.
494 */
495 if (asoc->peer.retran_path == peer)
496 sctp_assoc_update_retran_path(asoc);
497
498 /* Remove this peer from the list. */
499 list_del_rcu(&peer->transports);
500 /* Remove this peer from the transport hashtable */
501 sctp_unhash_transport(peer);
502
503 /* Get the first transport of asoc. */
504 pos = asoc->peer.transport_addr_list.next;
505 transport = list_entry(pos, struct sctp_transport, transports);
506
507 /* Update any entries that match the peer to be deleted. */
508 if (asoc->peer.primary_path == peer)
509 sctp_assoc_set_primary(asoc, transport);
510 if (asoc->peer.active_path == peer)
511 asoc->peer.active_path = transport;
512 if (asoc->peer.retran_path == peer)
513 asoc->peer.retran_path = transport;
514 if (asoc->peer.last_data_from == peer)
515 asoc->peer.last_data_from = transport;
516
517 if (asoc->strreset_chunk &&
518 asoc->strreset_chunk->transport == peer) {
519 asoc->strreset_chunk->transport = transport;
520 sctp_transport_reset_reconf_timer(transport);
521 }
522
523 /* If we remove the transport an INIT was last sent to, set it to
524 * NULL. Combined with the update of the retran path above, this
525 * will cause the next INIT to be sent to the next available
526 * transport, maintaining the cycle.
527 */
528 if (asoc->init_last_sent_to == peer)
529 asoc->init_last_sent_to = NULL;
530
531 /* If we remove the transport an SHUTDOWN was last sent to, set it
532 * to NULL. Combined with the update of the retran path above, this
533 * will cause the next SHUTDOWN to be sent to the next available
534 * transport, maintaining the cycle.
535 */
536 if (asoc->shutdown_last_sent_to == peer)
537 asoc->shutdown_last_sent_to = NULL;
538
539 /* If we remove the transport an ASCONF was last sent to, set it to
540 * NULL.
541 */
542 if (asoc->addip_last_asconf &&
543 asoc->addip_last_asconf->transport == peer)
544 asoc->addip_last_asconf->transport = NULL;
545
546 /* If we have something on the transmitted list, we have to
547 * save it off. The best place is the active path.
548 */
549 if (!list_empty(&peer->transmitted)) {
550 struct sctp_transport *active = asoc->peer.active_path;
551
552 /* Reset the transport of each chunk on this list */
553 list_for_each_entry(ch, &peer->transmitted,
554 transmitted_list) {
555 ch->transport = NULL;
556 ch->rtt_in_progress = 0;
557 }
558
559 list_splice_tail_init(&peer->transmitted,
560 &active->transmitted);
561
562 /* Start a T3 timer here in case it wasn't running so
563 * that these migrated packets have a chance to get
564 * retransmitted.
565 */
566 if (!timer_pending(&active->T3_rtx_timer))
567 if (!mod_timer(&active->T3_rtx_timer,
568 jiffies + active->rto))
569 sctp_transport_hold(active);
570 }
571
572 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
573 if (ch->transport == peer)
574 ch->transport = NULL;
575
576 asoc->peer.transport_count--;
577
578 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0);
579 sctp_transport_free(peer);
580}
581
582/* Add a transport address to an association. */
583struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
584 const union sctp_addr *addr,
585 const gfp_t gfp,
586 const int peer_state)
587{
588 struct sctp_transport *peer;
589 struct sctp_sock *sp;
590 unsigned short port;
591
592 sp = sctp_sk(asoc->base.sk);
593
594 /* AF_INET and AF_INET6 share common port field. */
595 port = ntohs(addr->v4.sin_port);
596
597 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
598 asoc, &addr->sa, peer_state);
599
600 /* Set the port if it has not been set yet. */
601 if (0 == asoc->peer.port)
602 asoc->peer.port = port;
603
604 /* Check to see if this is a duplicate. */
605 peer = sctp_assoc_lookup_paddr(asoc, addr);
606 if (peer) {
607 /* An UNKNOWN state is only set on transports added by
608 * user in sctp_connectx() call. Such transports should be
609 * considered CONFIRMED per RFC 4960, Section 5.4.
610 */
611 if (peer->state == SCTP_UNKNOWN) {
612 peer->state = SCTP_ACTIVE;
613 }
614 return peer;
615 }
616
617 peer = sctp_transport_new(asoc->base.net, addr, gfp);
618 if (!peer)
619 return NULL;
620
621 sctp_transport_set_owner(peer, asoc);
622
623 /* Initialize the peer's heartbeat interval based on the
624 * association configured value.
625 */
626 peer->hbinterval = asoc->hbinterval;
627 peer->probe_interval = asoc->probe_interval;
628
629 peer->encap_port = asoc->encap_port;
630
631 /* Set the path max_retrans. */
632 peer->pathmaxrxt = asoc->pathmaxrxt;
633
634 /* And the partial failure retrans threshold */
635 peer->pf_retrans = asoc->pf_retrans;
636 /* And the primary path switchover retrans threshold */
637 peer->ps_retrans = asoc->ps_retrans;
638
639 /* Initialize the peer's SACK delay timeout based on the
640 * association configured value.
641 */
642 peer->sackdelay = asoc->sackdelay;
643 peer->sackfreq = asoc->sackfreq;
644
645 if (addr->sa.sa_family == AF_INET6) {
646 __be32 info = addr->v6.sin6_flowinfo;
647
648 if (info) {
649 peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
650 peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
651 } else {
652 peer->flowlabel = asoc->flowlabel;
653 }
654 }
655 peer->dscp = asoc->dscp;
656
657 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
658 * based on association setting.
659 */
660 peer->param_flags = asoc->param_flags;
661
662 /* Initialize the pmtu of the transport. */
663 sctp_transport_route(peer, NULL, sp);
664
665 /* If this is the first transport addr on this association,
666 * initialize the association PMTU to the peer's PMTU.
667 * If not and the current association PMTU is higher than the new
668 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
669 */
670 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
671 min_t(int, peer->pathmtu, asoc->pathmtu) :
672 peer->pathmtu);
673
674 peer->pmtu_pending = 0;
675
676 /* The asoc->peer.port might not be meaningful yet, but
677 * initialize the packet structure anyway.
678 */
679 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
680 asoc->peer.port);
681
682 /* 7.2.1 Slow-Start
683 *
684 * o The initial cwnd before DATA transmission or after a sufficiently
685 * long idle period MUST be set to
686 * min(4*MTU, max(2*MTU, 4380 bytes))
687 *
688 * o The initial value of ssthresh MAY be arbitrarily high
689 * (for example, implementations MAY use the size of the
690 * receiver advertised window).
691 */
692 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
693
694 /* At this point, we may not have the receiver's advertised window,
695 * so initialize ssthresh to the default value and it will be set
696 * later when we process the INIT.
697 */
698 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
699
700 peer->partial_bytes_acked = 0;
701 peer->flight_size = 0;
702 peer->burst_limited = 0;
703
704 /* Set the transport's RTO.initial value */
705 peer->rto = asoc->rto_initial;
706 sctp_max_rto(asoc, peer);
707
708 /* Set the peer's active state. */
709 peer->state = peer_state;
710
711 /* Add this peer into the transport hashtable */
712 if (sctp_hash_transport(peer)) {
713 sctp_transport_free(peer);
714 return NULL;
715 }
716
717 sctp_transport_pl_reset(peer);
718
719 /* Attach the remote transport to our asoc. */
720 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
721 asoc->peer.transport_count++;
722
723 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0);
724
725 /* If we do not yet have a primary path, set one. */
726 if (!asoc->peer.primary_path) {
727 sctp_assoc_set_primary(asoc, peer);
728 asoc->peer.retran_path = peer;
729 }
730
731 if (asoc->peer.active_path == asoc->peer.retran_path &&
732 peer->state != SCTP_UNCONFIRMED) {
733 asoc->peer.retran_path = peer;
734 }
735
736 return peer;
737}
738
739/* Delete a transport address from an association. */
740void sctp_assoc_del_peer(struct sctp_association *asoc,
741 const union sctp_addr *addr)
742{
743 struct list_head *pos;
744 struct list_head *temp;
745 struct sctp_transport *transport;
746
747 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
748 transport = list_entry(pos, struct sctp_transport, transports);
749 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
750 /* Do book keeping for removing the peer and free it. */
751 sctp_assoc_rm_peer(asoc, transport);
752 break;
753 }
754 }
755}
756
757/* Lookup a transport by address. */
758struct sctp_transport *sctp_assoc_lookup_paddr(
759 const struct sctp_association *asoc,
760 const union sctp_addr *address)
761{
762 struct sctp_transport *t;
763
764 /* Cycle through all transports searching for a peer address. */
765
766 list_for_each_entry(t, &asoc->peer.transport_addr_list,
767 transports) {
768 if (sctp_cmp_addr_exact(address, &t->ipaddr))
769 return t;
770 }
771
772 return NULL;
773}
774
775/* Remove all transports except a give one */
776void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
777 struct sctp_transport *primary)
778{
779 struct sctp_transport *temp;
780 struct sctp_transport *t;
781
782 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
783 transports) {
784 /* if the current transport is not the primary one, delete it */
785 if (t != primary)
786 sctp_assoc_rm_peer(asoc, t);
787 }
788}
789
790/* Engage in transport control operations.
791 * Mark the transport up or down and send a notification to the user.
792 * Select and update the new active and retran paths.
793 */
794void sctp_assoc_control_transport(struct sctp_association *asoc,
795 struct sctp_transport *transport,
796 enum sctp_transport_cmd command,
797 sctp_sn_error_t error)
798{
799 int spc_state = SCTP_ADDR_AVAILABLE;
800 bool ulp_notify = true;
801
802 /* Record the transition on the transport. */
803 switch (command) {
804 case SCTP_TRANSPORT_UP:
805 /* If we are moving from UNCONFIRMED state due
806 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
807 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
808 */
809 if (transport->state == SCTP_PF &&
810 asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
811 ulp_notify = false;
812 else if (transport->state == SCTP_UNCONFIRMED &&
813 error == SCTP_HEARTBEAT_SUCCESS)
814 spc_state = SCTP_ADDR_CONFIRMED;
815
816 transport->state = SCTP_ACTIVE;
817 sctp_transport_pl_reset(transport);
818 break;
819
820 case SCTP_TRANSPORT_DOWN:
821 /* If the transport was never confirmed, do not transition it
822 * to inactive state. Also, release the cached route since
823 * there may be a better route next time.
824 */
825 if (transport->state != SCTP_UNCONFIRMED) {
826 transport->state = SCTP_INACTIVE;
827 sctp_transport_pl_reset(transport);
828 spc_state = SCTP_ADDR_UNREACHABLE;
829 } else {
830 sctp_transport_dst_release(transport);
831 ulp_notify = false;
832 }
833 break;
834
835 case SCTP_TRANSPORT_PF:
836 transport->state = SCTP_PF;
837 if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
838 ulp_notify = false;
839 else
840 spc_state = SCTP_ADDR_POTENTIALLY_FAILED;
841 break;
842
843 default:
844 return;
845 }
846
847 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
848 * to the user.
849 */
850 if (ulp_notify)
851 sctp_ulpevent_notify_peer_addr_change(transport,
852 spc_state, error);
853
854 /* Select new active and retran paths. */
855 sctp_select_active_and_retran_path(asoc);
856}
857
858/* Hold a reference to an association. */
859void sctp_association_hold(struct sctp_association *asoc)
860{
861 refcount_inc(&asoc->base.refcnt);
862}
863
864/* Release a reference to an association and cleanup
865 * if there are no more references.
866 */
867void sctp_association_put(struct sctp_association *asoc)
868{
869 if (refcount_dec_and_test(&asoc->base.refcnt))
870 sctp_association_destroy(asoc);
871}
872
873/* Allocate the next TSN, Transmission Sequence Number, for the given
874 * association.
875 */
876__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
877{
878 /* From Section 1.6 Serial Number Arithmetic:
879 * Transmission Sequence Numbers wrap around when they reach
880 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
881 * after transmitting TSN = 2*32 - 1 is TSN = 0.
882 */
883 __u32 retval = asoc->next_tsn;
884 asoc->next_tsn++;
885 asoc->unack_data++;
886
887 return retval;
888}
889
890/* Compare two addresses to see if they match. Wildcard addresses
891 * only match themselves.
892 */
893int sctp_cmp_addr_exact(const union sctp_addr *ss1,
894 const union sctp_addr *ss2)
895{
896 struct sctp_af *af;
897
898 af = sctp_get_af_specific(ss1->sa.sa_family);
899 if (unlikely(!af))
900 return 0;
901
902 return af->cmp_addr(ss1, ss2);
903}
904
905/* Return an ecne chunk to get prepended to a packet.
906 * Note: We are sly and return a shared, prealloced chunk. FIXME:
907 * No we don't, but we could/should.
908 */
909struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
910{
911 if (!asoc->need_ecne)
912 return NULL;
913
914 /* Send ECNE if needed.
915 * Not being able to allocate a chunk here is not deadly.
916 */
917 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
918}
919
920/*
921 * Find which transport this TSN was sent on.
922 */
923struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
924 __u32 tsn)
925{
926 struct sctp_transport *active;
927 struct sctp_transport *match;
928 struct sctp_transport *transport;
929 struct sctp_chunk *chunk;
930 __be32 key = htonl(tsn);
931
932 match = NULL;
933
934 /*
935 * FIXME: In general, find a more efficient data structure for
936 * searching.
937 */
938
939 /*
940 * The general strategy is to search each transport's transmitted
941 * list. Return which transport this TSN lives on.
942 *
943 * Let's be hopeful and check the active_path first.
944 * Another optimization would be to know if there is only one
945 * outbound path and not have to look for the TSN at all.
946 *
947 */
948
949 active = asoc->peer.active_path;
950
951 list_for_each_entry(chunk, &active->transmitted,
952 transmitted_list) {
953
954 if (key == chunk->subh.data_hdr->tsn) {
955 match = active;
956 goto out;
957 }
958 }
959
960 /* If not found, go search all the other transports. */
961 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
962 transports) {
963
964 if (transport == active)
965 continue;
966 list_for_each_entry(chunk, &transport->transmitted,
967 transmitted_list) {
968 if (key == chunk->subh.data_hdr->tsn) {
969 match = transport;
970 goto out;
971 }
972 }
973 }
974out:
975 return match;
976}
977
978/* Do delayed input processing. This is scheduled by sctp_rcv(). */
979static void sctp_assoc_bh_rcv(struct work_struct *work)
980{
981 struct sctp_association *asoc =
982 container_of(work, struct sctp_association,
983 base.inqueue.immediate);
984 struct net *net = asoc->base.net;
985 union sctp_subtype subtype;
986 struct sctp_endpoint *ep;
987 struct sctp_chunk *chunk;
988 struct sctp_inq *inqueue;
989 int first_time = 1; /* is this the first time through the loop */
990 int error = 0;
991 int state;
992
993 /* The association should be held so we should be safe. */
994 ep = asoc->ep;
995
996 inqueue = &asoc->base.inqueue;
997 sctp_association_hold(asoc);
998 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
999 state = asoc->state;
1000 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1001
1002 /* If the first chunk in the packet is AUTH, do special
1003 * processing specified in Section 6.3 of SCTP-AUTH spec
1004 */
1005 if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1006 struct sctp_chunkhdr *next_hdr;
1007
1008 next_hdr = sctp_inq_peek(inqueue);
1009 if (!next_hdr)
1010 goto normal;
1011
1012 /* If the next chunk is COOKIE-ECHO, skip the AUTH
1013 * chunk while saving a pointer to it so we can do
1014 * Authentication later (during cookie-echo
1015 * processing).
1016 */
1017 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1018 chunk->auth_chunk = skb_clone(chunk->skb,
1019 GFP_ATOMIC);
1020 chunk->auth = 1;
1021 continue;
1022 }
1023 }
1024
1025normal:
1026 /* SCTP-AUTH, Section 6.3:
1027 * The receiver has a list of chunk types which it expects
1028 * to be received only after an AUTH-chunk. This list has
1029 * been sent to the peer during the association setup. It
1030 * MUST silently discard these chunks if they are not placed
1031 * after an AUTH chunk in the packet.
1032 */
1033 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1034 continue;
1035
1036 /* Remember where the last DATA chunk came from so we
1037 * know where to send the SACK.
1038 */
1039 if (sctp_chunk_is_data(chunk))
1040 asoc->peer.last_data_from = chunk->transport;
1041 else {
1042 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1043 asoc->stats.ictrlchunks++;
1044 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1045 asoc->stats.isacks++;
1046 }
1047
1048 if (chunk->transport)
1049 chunk->transport->last_time_heard = ktime_get();
1050
1051 /* Run through the state machine. */
1052 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1053 state, ep, asoc, chunk, GFP_ATOMIC);
1054
1055 /* Check to see if the association is freed in response to
1056 * the incoming chunk. If so, get out of the while loop.
1057 */
1058 if (asoc->base.dead)
1059 break;
1060
1061 /* If there is an error on chunk, discard this packet. */
1062 if (error && chunk)
1063 chunk->pdiscard = 1;
1064
1065 if (first_time)
1066 first_time = 0;
1067 }
1068 sctp_association_put(asoc);
1069}
1070
1071/* This routine moves an association from its old sk to a new sk. */
1072void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1073{
1074 struct sctp_sock *newsp = sctp_sk(newsk);
1075 struct sock *oldsk = assoc->base.sk;
1076
1077 /* Delete the association from the old endpoint's list of
1078 * associations.
1079 */
1080 list_del_init(&assoc->asocs);
1081
1082 /* Decrement the backlog value for a TCP-style socket. */
1083 if (sctp_style(oldsk, TCP))
1084 sk_acceptq_removed(oldsk);
1085
1086 /* Release references to the old endpoint and the sock. */
1087 sctp_endpoint_put(assoc->ep);
1088 sock_put(assoc->base.sk);
1089
1090 /* Get a reference to the new endpoint. */
1091 assoc->ep = newsp->ep;
1092 sctp_endpoint_hold(assoc->ep);
1093
1094 /* Get a reference to the new sock. */
1095 assoc->base.sk = newsk;
1096 sock_hold(assoc->base.sk);
1097
1098 /* Add the association to the new endpoint's list of associations. */
1099 sctp_endpoint_add_asoc(newsp->ep, assoc);
1100}
1101
1102/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1103int sctp_assoc_update(struct sctp_association *asoc,
1104 struct sctp_association *new)
1105{
1106 struct sctp_transport *trans;
1107 struct list_head *pos, *temp;
1108
1109 /* Copy in new parameters of peer. */
1110 asoc->c = new->c;
1111 asoc->peer.rwnd = new->peer.rwnd;
1112 asoc->peer.sack_needed = new->peer.sack_needed;
1113 asoc->peer.auth_capable = new->peer.auth_capable;
1114 asoc->peer.i = new->peer.i;
1115
1116 if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1117 asoc->peer.i.initial_tsn, GFP_ATOMIC))
1118 return -ENOMEM;
1119
1120 /* Remove any peer addresses not present in the new association. */
1121 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1122 trans = list_entry(pos, struct sctp_transport, transports);
1123 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1124 sctp_assoc_rm_peer(asoc, trans);
1125 continue;
1126 }
1127
1128 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1129 sctp_transport_reset(trans);
1130 }
1131
1132 /* If the case is A (association restart), use
1133 * initial_tsn as next_tsn. If the case is B, use
1134 * current next_tsn in case data sent to peer
1135 * has been discarded and needs retransmission.
1136 */
1137 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1138 asoc->next_tsn = new->next_tsn;
1139 asoc->ctsn_ack_point = new->ctsn_ack_point;
1140 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1141
1142 /* Reinitialize SSN for both local streams
1143 * and peer's streams.
1144 */
1145 sctp_stream_clear(&asoc->stream);
1146
1147 /* Flush the ULP reassembly and ordered queue.
1148 * Any data there will now be stale and will
1149 * cause problems.
1150 */
1151 sctp_ulpq_flush(&asoc->ulpq);
1152
1153 /* reset the overall association error count so
1154 * that the restarted association doesn't get torn
1155 * down on the next retransmission timer.
1156 */
1157 asoc->overall_error_count = 0;
1158
1159 } else {
1160 /* Add any peer addresses from the new association. */
1161 list_for_each_entry(trans, &new->peer.transport_addr_list,
1162 transports)
1163 if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
1164 GFP_ATOMIC, trans->state))
1165 return -ENOMEM;
1166
1167 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1168 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1169
1170 if (sctp_state(asoc, COOKIE_WAIT))
1171 sctp_stream_update(&asoc->stream, &new->stream);
1172
1173 /* get a new assoc id if we don't have one yet. */
1174 if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
1175 return -ENOMEM;
1176 }
1177
1178 /* SCTP-AUTH: Save the peer parameters from the new associations
1179 * and also move the association shared keys over
1180 */
1181 kfree(asoc->peer.peer_random);
1182 asoc->peer.peer_random = new->peer.peer_random;
1183 new->peer.peer_random = NULL;
1184
1185 kfree(asoc->peer.peer_chunks);
1186 asoc->peer.peer_chunks = new->peer.peer_chunks;
1187 new->peer.peer_chunks = NULL;
1188
1189 kfree(asoc->peer.peer_hmacs);
1190 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1191 new->peer.peer_hmacs = NULL;
1192
1193 return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1194}
1195
1196/* Update the retran path for sending a retransmitted packet.
1197 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1198 *
1199 * When there is outbound data to send and the primary path
1200 * becomes inactive (e.g., due to failures), or where the
1201 * SCTP user explicitly requests to send data to an
1202 * inactive destination transport address, before reporting
1203 * an error to its ULP, the SCTP endpoint should try to send
1204 * the data to an alternate active destination transport
1205 * address if one exists.
1206 *
1207 * When retransmitting data that timed out, if the endpoint
1208 * is multihomed, it should consider each source-destination
1209 * address pair in its retransmission selection policy.
1210 * When retransmitting timed-out data, the endpoint should
1211 * attempt to pick the most divergent source-destination
1212 * pair from the original source-destination pair to which
1213 * the packet was transmitted.
1214 *
1215 * Note: Rules for picking the most divergent source-destination
1216 * pair are an implementation decision and are not specified
1217 * within this document.
1218 *
1219 * Our basic strategy is to round-robin transports in priorities
1220 * according to sctp_trans_score() e.g., if no such
1221 * transport with state SCTP_ACTIVE exists, round-robin through
1222 * SCTP_UNKNOWN, etc. You get the picture.
1223 */
1224static u8 sctp_trans_score(const struct sctp_transport *trans)
1225{
1226 switch (trans->state) {
1227 case SCTP_ACTIVE:
1228 return 3; /* best case */
1229 case SCTP_UNKNOWN:
1230 return 2;
1231 case SCTP_PF:
1232 return 1;
1233 default: /* case SCTP_INACTIVE */
1234 return 0; /* worst case */
1235 }
1236}
1237
1238static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1239 struct sctp_transport *trans2)
1240{
1241 if (trans1->error_count > trans2->error_count) {
1242 return trans2;
1243 } else if (trans1->error_count == trans2->error_count &&
1244 ktime_after(trans2->last_time_heard,
1245 trans1->last_time_heard)) {
1246 return trans2;
1247 } else {
1248 return trans1;
1249 }
1250}
1251
1252static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1253 struct sctp_transport *best)
1254{
1255 u8 score_curr, score_best;
1256
1257 if (best == NULL || curr == best)
1258 return curr;
1259
1260 score_curr = sctp_trans_score(curr);
1261 score_best = sctp_trans_score(best);
1262
1263 /* First, try a score-based selection if both transport states
1264 * differ. If we're in a tie, lets try to make a more clever
1265 * decision here based on error counts and last time heard.
1266 */
1267 if (score_curr > score_best)
1268 return curr;
1269 else if (score_curr == score_best)
1270 return sctp_trans_elect_tie(best, curr);
1271 else
1272 return best;
1273}
1274
1275void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1276{
1277 struct sctp_transport *trans = asoc->peer.retran_path;
1278 struct sctp_transport *trans_next = NULL;
1279
1280 /* We're done as we only have the one and only path. */
1281 if (asoc->peer.transport_count == 1)
1282 return;
1283 /* If active_path and retran_path are the same and active,
1284 * then this is the only active path. Use it.
1285 */
1286 if (asoc->peer.active_path == asoc->peer.retran_path &&
1287 asoc->peer.active_path->state == SCTP_ACTIVE)
1288 return;
1289
1290 /* Iterate from retran_path's successor back to retran_path. */
1291 for (trans = list_next_entry(trans, transports); 1;
1292 trans = list_next_entry(trans, transports)) {
1293 /* Manually skip the head element. */
1294 if (&trans->transports == &asoc->peer.transport_addr_list)
1295 continue;
1296 if (trans->state == SCTP_UNCONFIRMED)
1297 continue;
1298 trans_next = sctp_trans_elect_best(trans, trans_next);
1299 /* Active is good enough for immediate return. */
1300 if (trans_next->state == SCTP_ACTIVE)
1301 break;
1302 /* We've reached the end, time to update path. */
1303 if (trans == asoc->peer.retran_path)
1304 break;
1305 }
1306
1307 asoc->peer.retran_path = trans_next;
1308
1309 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1310 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1311}
1312
1313static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1314{
1315 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1316 struct sctp_transport *trans_pf = NULL;
1317
1318 /* Look for the two most recently used active transports. */
1319 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1320 transports) {
1321 /* Skip uninteresting transports. */
1322 if (trans->state == SCTP_INACTIVE ||
1323 trans->state == SCTP_UNCONFIRMED)
1324 continue;
1325 /* Keep track of the best PF transport from our
1326 * list in case we don't find an active one.
1327 */
1328 if (trans->state == SCTP_PF) {
1329 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1330 continue;
1331 }
1332 /* For active transports, pick the most recent ones. */
1333 if (trans_pri == NULL ||
1334 ktime_after(trans->last_time_heard,
1335 trans_pri->last_time_heard)) {
1336 trans_sec = trans_pri;
1337 trans_pri = trans;
1338 } else if (trans_sec == NULL ||
1339 ktime_after(trans->last_time_heard,
1340 trans_sec->last_time_heard)) {
1341 trans_sec = trans;
1342 }
1343 }
1344
1345 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1346 *
1347 * By default, an endpoint should always transmit to the primary
1348 * path, unless the SCTP user explicitly specifies the
1349 * destination transport address (and possibly source transport
1350 * address) to use. [If the primary is active but not most recent,
1351 * bump the most recently used transport.]
1352 */
1353 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1354 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1355 asoc->peer.primary_path != trans_pri) {
1356 trans_sec = trans_pri;
1357 trans_pri = asoc->peer.primary_path;
1358 }
1359
1360 /* We did not find anything useful for a possible retransmission
1361 * path; either primary path that we found is the same as
1362 * the current one, or we didn't generally find an active one.
1363 */
1364 if (trans_sec == NULL)
1365 trans_sec = trans_pri;
1366
1367 /* If we failed to find a usable transport, just camp on the
1368 * active or pick a PF iff it's the better choice.
1369 */
1370 if (trans_pri == NULL) {
1371 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1372 trans_sec = trans_pri;
1373 }
1374
1375 /* Set the active and retran transports. */
1376 asoc->peer.active_path = trans_pri;
1377 asoc->peer.retran_path = trans_sec;
1378}
1379
1380struct sctp_transport *
1381sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1382 struct sctp_transport *last_sent_to)
1383{
1384 /* If this is the first time packet is sent, use the active path,
1385 * else use the retran path. If the last packet was sent over the
1386 * retran path, update the retran path and use it.
1387 */
1388 if (last_sent_to == NULL) {
1389 return asoc->peer.active_path;
1390 } else {
1391 if (last_sent_to == asoc->peer.retran_path)
1392 sctp_assoc_update_retran_path(asoc);
1393
1394 return asoc->peer.retran_path;
1395 }
1396}
1397
1398void sctp_assoc_update_frag_point(struct sctp_association *asoc)
1399{
1400 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
1401 sctp_datachk_len(&asoc->stream));
1402
1403 if (asoc->user_frag)
1404 frag = min_t(int, frag, asoc->user_frag);
1405
1406 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
1407 sctp_datachk_len(&asoc->stream));
1408
1409 asoc->frag_point = SCTP_TRUNC4(frag);
1410}
1411
1412void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
1413{
1414 if (asoc->pathmtu != pmtu) {
1415 asoc->pathmtu = pmtu;
1416 sctp_assoc_update_frag_point(asoc);
1417 }
1418
1419 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1420 asoc->pathmtu, asoc->frag_point);
1421}
1422
1423/* Update the association's pmtu and frag_point by going through all the
1424 * transports. This routine is called when a transport's PMTU has changed.
1425 */
1426void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1427{
1428 struct sctp_transport *t;
1429 __u32 pmtu = 0;
1430
1431 if (!asoc)
1432 return;
1433
1434 /* Get the lowest pmtu of all the transports. */
1435 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1436 if (t->pmtu_pending && t->dst) {
1437 sctp_transport_update_pmtu(t,
1438 atomic_read(&t->mtu_info));
1439 t->pmtu_pending = 0;
1440 }
1441 if (!pmtu || (t->pathmtu < pmtu))
1442 pmtu = t->pathmtu;
1443 }
1444
1445 sctp_assoc_set_pmtu(asoc, pmtu);
1446}
1447
1448/* Should we send a SACK to update our peer? */
1449static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1450{
1451 struct net *net = asoc->base.net;
1452
1453 switch (asoc->state) {
1454 case SCTP_STATE_ESTABLISHED:
1455 case SCTP_STATE_SHUTDOWN_PENDING:
1456 case SCTP_STATE_SHUTDOWN_RECEIVED:
1457 case SCTP_STATE_SHUTDOWN_SENT:
1458 if ((asoc->rwnd > asoc->a_rwnd) &&
1459 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1460 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1461 asoc->pathmtu)))
1462 return true;
1463 break;
1464 default:
1465 break;
1466 }
1467 return false;
1468}
1469
1470/* Increase asoc's rwnd by len and send any window update SACK if needed. */
1471void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1472{
1473 struct sctp_chunk *sack;
1474 struct timer_list *timer;
1475
1476 if (asoc->rwnd_over) {
1477 if (asoc->rwnd_over >= len) {
1478 asoc->rwnd_over -= len;
1479 } else {
1480 asoc->rwnd += (len - asoc->rwnd_over);
1481 asoc->rwnd_over = 0;
1482 }
1483 } else {
1484 asoc->rwnd += len;
1485 }
1486
1487 /* If we had window pressure, start recovering it
1488 * once our rwnd had reached the accumulated pressure
1489 * threshold. The idea is to recover slowly, but up
1490 * to the initial advertised window.
1491 */
1492 if (asoc->rwnd_press) {
1493 int change = min(asoc->pathmtu, asoc->rwnd_press);
1494 asoc->rwnd += change;
1495 asoc->rwnd_press -= change;
1496 }
1497
1498 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1499 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1500 asoc->a_rwnd);
1501
1502 /* Send a window update SACK if the rwnd has increased by at least the
1503 * minimum of the association's PMTU and half of the receive buffer.
1504 * The algorithm used is similar to the one described in
1505 * Section 4.2.3.3 of RFC 1122.
1506 */
1507 if (sctp_peer_needs_update(asoc)) {
1508 asoc->a_rwnd = asoc->rwnd;
1509
1510 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1511 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1512 asoc->a_rwnd);
1513
1514 sack = sctp_make_sack(asoc);
1515 if (!sack)
1516 return;
1517
1518 asoc->peer.sack_needed = 0;
1519
1520 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1521
1522 /* Stop the SACK timer. */
1523 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1524 if (del_timer(timer))
1525 sctp_association_put(asoc);
1526 }
1527}
1528
1529/* Decrease asoc's rwnd by len. */
1530void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1531{
1532 int rx_count;
1533 int over = 0;
1534
1535 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1536 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1537 "asoc->rwnd_over:%u!\n", __func__, asoc,
1538 asoc->rwnd, asoc->rwnd_over);
1539
1540 if (asoc->ep->rcvbuf_policy)
1541 rx_count = atomic_read(&asoc->rmem_alloc);
1542 else
1543 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1544
1545 /* If we've reached or overflowed our receive buffer, announce
1546 * a 0 rwnd if rwnd would still be positive. Store the
1547 * potential pressure overflow so that the window can be restored
1548 * back to original value.
1549 */
1550 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1551 over = 1;
1552
1553 if (asoc->rwnd >= len) {
1554 asoc->rwnd -= len;
1555 if (over) {
1556 asoc->rwnd_press += asoc->rwnd;
1557 asoc->rwnd = 0;
1558 }
1559 } else {
1560 asoc->rwnd_over += len - asoc->rwnd;
1561 asoc->rwnd = 0;
1562 }
1563
1564 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1565 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1566 asoc->rwnd_press);
1567}
1568
1569/* Build the bind address list for the association based on info from the
1570 * local endpoint and the remote peer.
1571 */
1572int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1573 enum sctp_scope scope, gfp_t gfp)
1574{
1575 struct sock *sk = asoc->base.sk;
1576 int flags;
1577
1578 /* Use scoping rules to determine the subset of addresses from
1579 * the endpoint.
1580 */
1581 flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1582 if (!inet_v6_ipv6only(sk))
1583 flags |= SCTP_ADDR4_ALLOWED;
1584 if (asoc->peer.ipv4_address)
1585 flags |= SCTP_ADDR4_PEERSUPP;
1586 if (asoc->peer.ipv6_address)
1587 flags |= SCTP_ADDR6_PEERSUPP;
1588
1589 return sctp_bind_addr_copy(asoc->base.net,
1590 &asoc->base.bind_addr,
1591 &asoc->ep->base.bind_addr,
1592 scope, gfp, flags);
1593}
1594
1595/* Build the association's bind address list from the cookie. */
1596int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1597 struct sctp_cookie *cookie,
1598 gfp_t gfp)
1599{
1600 struct sctp_init_chunk *peer_init = (struct sctp_init_chunk *)(cookie + 1);
1601 int var_size2 = ntohs(peer_init->chunk_hdr.length);
1602 int var_size3 = cookie->raw_addr_list_len;
1603 __u8 *raw = (__u8 *)peer_init + var_size2;
1604
1605 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1606 asoc->ep->base.bind_addr.port, gfp);
1607}
1608
1609/* Lookup laddr in the bind address list of an association. */
1610int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1611 const union sctp_addr *laddr)
1612{
1613 int found = 0;
1614
1615 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1616 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1617 sctp_sk(asoc->base.sk)))
1618 found = 1;
1619
1620 return found;
1621}
1622
1623/* Set an association id for a given association */
1624int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1625{
1626 bool preload = gfpflags_allow_blocking(gfp);
1627 int ret;
1628
1629 /* If the id is already assigned, keep it. */
1630 if (asoc->assoc_id)
1631 return 0;
1632
1633 if (preload)
1634 idr_preload(gfp);
1635 spin_lock_bh(&sctp_assocs_id_lock);
1636 /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and
1637 * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC.
1638 */
1639 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0,
1640 GFP_NOWAIT);
1641 spin_unlock_bh(&sctp_assocs_id_lock);
1642 if (preload)
1643 idr_preload_end();
1644 if (ret < 0)
1645 return ret;
1646
1647 asoc->assoc_id = (sctp_assoc_t)ret;
1648 return 0;
1649}
1650
1651/* Free the ASCONF queue */
1652static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1653{
1654 struct sctp_chunk *asconf;
1655 struct sctp_chunk *tmp;
1656
1657 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1658 list_del_init(&asconf->list);
1659 sctp_chunk_free(asconf);
1660 }
1661}
1662
1663/* Free asconf_ack cache */
1664static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1665{
1666 struct sctp_chunk *ack;
1667 struct sctp_chunk *tmp;
1668
1669 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1670 transmitted_list) {
1671 list_del_init(&ack->transmitted_list);
1672 sctp_chunk_free(ack);
1673 }
1674}
1675
1676/* Clean up the ASCONF_ACK queue */
1677void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1678{
1679 struct sctp_chunk *ack;
1680 struct sctp_chunk *tmp;
1681
1682 /* We can remove all the entries from the queue up to
1683 * the "Peer-Sequence-Number".
1684 */
1685 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1686 transmitted_list) {
1687 if (ack->subh.addip_hdr->serial ==
1688 htonl(asoc->peer.addip_serial))
1689 break;
1690
1691 list_del_init(&ack->transmitted_list);
1692 sctp_chunk_free(ack);
1693 }
1694}
1695
1696/* Find the ASCONF_ACK whose serial number matches ASCONF */
1697struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1698 const struct sctp_association *asoc,
1699 __be32 serial)
1700{
1701 struct sctp_chunk *ack;
1702
1703 /* Walk through the list of cached ASCONF-ACKs and find the
1704 * ack chunk whose serial number matches that of the request.
1705 */
1706 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1707 if (sctp_chunk_pending(ack))
1708 continue;
1709 if (ack->subh.addip_hdr->serial == serial) {
1710 sctp_chunk_hold(ack);
1711 return ack;
1712 }
1713 }
1714
1715 return NULL;
1716}
1717
1718void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1719{
1720 /* Free any cached ASCONF_ACK chunk. */
1721 sctp_assoc_free_asconf_acks(asoc);
1722
1723 /* Free the ASCONF queue. */
1724 sctp_assoc_free_asconf_queue(asoc);
1725
1726 /* Free any cached ASCONF chunk. */
1727 if (asoc->addip_last_asconf)
1728 sctp_chunk_free(asoc->addip_last_asconf);
1729}