Loading...
1/* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 *
6 * This file is part of the SCTP kernel implementation
7 *
8 * These functions handle output processing.
9 *
10 * This SCTP implementation is free software;
11 * you can redistribute it and/or modify it under the terms of
12 * the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This SCTP implementation is distributed in the hope that it
17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18 * ************************
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 * See the GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with GNU CC; see the file COPYING. If not, see
24 * <http://www.gnu.org/licenses/>.
25 *
26 * Please send any bug reports or fixes you make to the
27 * email address(es):
28 * lksctp developers <linux-sctp@vger.kernel.org>
29 *
30 * Written or modified by:
31 * La Monte H.P. Yarroll <piggy@acm.org>
32 * Karl Knutson <karl@athena.chicago.il.us>
33 * Jon Grimm <jgrimm@austin.ibm.com>
34 * Sridhar Samudrala <sri@us.ibm.com>
35 */
36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39#include <linux/types.h>
40#include <linux/kernel.h>
41#include <linux/wait.h>
42#include <linux/time.h>
43#include <linux/ip.h>
44#include <linux/ipv6.h>
45#include <linux/init.h>
46#include <linux/slab.h>
47#include <net/inet_ecn.h>
48#include <net/ip.h>
49#include <net/icmp.h>
50#include <net/net_namespace.h>
51
52#include <linux/socket.h> /* for sa_family_t */
53#include <net/sock.h>
54
55#include <net/sctp/sctp.h>
56#include <net/sctp/sm.h>
57#include <net/sctp/checksum.h>
58
59/* Forward declarations for private helpers. */
60static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
61 struct sctp_chunk *chunk);
62static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
63 struct sctp_chunk *chunk);
64static void sctp_packet_append_data(struct sctp_packet *packet,
65 struct sctp_chunk *chunk);
66static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
67 struct sctp_chunk *chunk,
68 u16 chunk_len);
69
70static void sctp_packet_reset(struct sctp_packet *packet)
71{
72 packet->size = packet->overhead;
73 packet->has_cookie_echo = 0;
74 packet->has_sack = 0;
75 packet->has_data = 0;
76 packet->has_auth = 0;
77 packet->ipfragok = 0;
78 packet->auth = NULL;
79}
80
81/* Config a packet.
82 * This appears to be a followup set of initializations.
83 */
84struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
85 __u32 vtag, int ecn_capable)
86{
87 struct sctp_chunk *chunk = NULL;
88
89 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
90
91 packet->vtag = vtag;
92
93 if (ecn_capable && sctp_packet_empty(packet)) {
94 chunk = sctp_get_ecne_prepend(packet->transport->asoc);
95
96 /* If there a is a prepend chunk stick it on the list before
97 * any other chunks get appended.
98 */
99 if (chunk)
100 sctp_packet_append_chunk(packet, chunk);
101 }
102
103 return packet;
104}
105
106/* Initialize the packet structure. */
107struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
108 struct sctp_transport *transport,
109 __u16 sport, __u16 dport)
110{
111 struct sctp_association *asoc = transport->asoc;
112 size_t overhead;
113
114 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
115
116 packet->transport = transport;
117 packet->source_port = sport;
118 packet->destination_port = dport;
119 INIT_LIST_HEAD(&packet->chunk_list);
120 if (asoc) {
121 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
122 overhead = sp->pf->af->net_header_len;
123 } else {
124 overhead = sizeof(struct ipv6hdr);
125 }
126 overhead += sizeof(struct sctphdr);
127 packet->overhead = overhead;
128 sctp_packet_reset(packet);
129 packet->vtag = 0;
130
131 return packet;
132}
133
134/* Free a packet. */
135void sctp_packet_free(struct sctp_packet *packet)
136{
137 struct sctp_chunk *chunk, *tmp;
138
139 pr_debug("%s: packet:%p\n", __func__, packet);
140
141 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
142 list_del_init(&chunk->list);
143 sctp_chunk_free(chunk);
144 }
145}
146
147/* This routine tries to append the chunk to the offered packet. If adding
148 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
149 * is not present in the packet, it transmits the input packet.
150 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
151 * as it can fit in the packet, but any more data that does not fit in this
152 * packet can be sent only after receiving the COOKIE_ACK.
153 */
154sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
155 struct sctp_chunk *chunk,
156 int one_packet)
157{
158 sctp_xmit_t retval;
159 int error = 0;
160
161 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
162
163 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
164 case SCTP_XMIT_PMTU_FULL:
165 if (!packet->has_cookie_echo) {
166 error = sctp_packet_transmit(packet);
167 if (error < 0)
168 chunk->skb->sk->sk_err = -error;
169
170 /* If we have an empty packet, then we can NOT ever
171 * return PMTU_FULL.
172 */
173 if (!one_packet)
174 retval = sctp_packet_append_chunk(packet,
175 chunk);
176 }
177 break;
178
179 case SCTP_XMIT_RWND_FULL:
180 case SCTP_XMIT_OK:
181 case SCTP_XMIT_NAGLE_DELAY:
182 break;
183 }
184
185 return retval;
186}
187
188/* Try to bundle an auth chunk into the packet. */
189static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
190 struct sctp_chunk *chunk)
191{
192 struct sctp_association *asoc = pkt->transport->asoc;
193 struct sctp_chunk *auth;
194 sctp_xmit_t retval = SCTP_XMIT_OK;
195
196 /* if we don't have an association, we can't do authentication */
197 if (!asoc)
198 return retval;
199
200 /* See if this is an auth chunk we are bundling or if
201 * auth is already bundled.
202 */
203 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
204 return retval;
205
206 /* if the peer did not request this chunk to be authenticated,
207 * don't do it
208 */
209 if (!chunk->auth)
210 return retval;
211
212 auth = sctp_make_auth(asoc);
213 if (!auth)
214 return retval;
215
216 retval = __sctp_packet_append_chunk(pkt, auth);
217
218 if (retval != SCTP_XMIT_OK)
219 sctp_chunk_free(auth);
220
221 return retval;
222}
223
224/* Try to bundle a SACK with the packet. */
225static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
226 struct sctp_chunk *chunk)
227{
228 sctp_xmit_t retval = SCTP_XMIT_OK;
229
230 /* If sending DATA and haven't aleady bundled a SACK, try to
231 * bundle one in to the packet.
232 */
233 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
234 !pkt->has_cookie_echo) {
235 struct sctp_association *asoc;
236 struct timer_list *timer;
237 asoc = pkt->transport->asoc;
238 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
239
240 /* If the SACK timer is running, we have a pending SACK */
241 if (timer_pending(timer)) {
242 struct sctp_chunk *sack;
243
244 if (pkt->transport->sack_generation !=
245 pkt->transport->asoc->peer.sack_generation)
246 return retval;
247
248 asoc->a_rwnd = asoc->rwnd;
249 sack = sctp_make_sack(asoc);
250 if (sack) {
251 retval = __sctp_packet_append_chunk(pkt, sack);
252 if (retval != SCTP_XMIT_OK) {
253 sctp_chunk_free(sack);
254 goto out;
255 }
256 asoc->peer.sack_needed = 0;
257 if (del_timer(timer))
258 sctp_association_put(asoc);
259 }
260 }
261 }
262out:
263 return retval;
264}
265
266
267/* Append a chunk to the offered packet reporting back any inability to do
268 * so.
269 */
270static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
271 struct sctp_chunk *chunk)
272{
273 sctp_xmit_t retval = SCTP_XMIT_OK;
274 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
275
276 /* Check to see if this chunk will fit into the packet */
277 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
278 if (retval != SCTP_XMIT_OK)
279 goto finish;
280
281 /* We believe that this chunk is OK to add to the packet */
282 switch (chunk->chunk_hdr->type) {
283 case SCTP_CID_DATA:
284 /* Account for the data being in the packet */
285 sctp_packet_append_data(packet, chunk);
286 /* Disallow SACK bundling after DATA. */
287 packet->has_sack = 1;
288 /* Disallow AUTH bundling after DATA */
289 packet->has_auth = 1;
290 /* Let it be knows that packet has DATA in it */
291 packet->has_data = 1;
292 /* timestamp the chunk for rtx purposes */
293 chunk->sent_at = jiffies;
294 break;
295 case SCTP_CID_COOKIE_ECHO:
296 packet->has_cookie_echo = 1;
297 break;
298
299 case SCTP_CID_SACK:
300 packet->has_sack = 1;
301 if (chunk->asoc)
302 chunk->asoc->stats.osacks++;
303 break;
304
305 case SCTP_CID_AUTH:
306 packet->has_auth = 1;
307 packet->auth = chunk;
308 break;
309 }
310
311 /* It is OK to send this chunk. */
312 list_add_tail(&chunk->list, &packet->chunk_list);
313 packet->size += chunk_len;
314 chunk->transport = packet->transport;
315finish:
316 return retval;
317}
318
319/* Append a chunk to the offered packet reporting back any inability to do
320 * so.
321 */
322sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
323 struct sctp_chunk *chunk)
324{
325 sctp_xmit_t retval = SCTP_XMIT_OK;
326
327 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
328
329 /* Data chunks are special. Before seeing what else we can
330 * bundle into this packet, check to see if we are allowed to
331 * send this DATA.
332 */
333 if (sctp_chunk_is_data(chunk)) {
334 retval = sctp_packet_can_append_data(packet, chunk);
335 if (retval != SCTP_XMIT_OK)
336 goto finish;
337 }
338
339 /* Try to bundle AUTH chunk */
340 retval = sctp_packet_bundle_auth(packet, chunk);
341 if (retval != SCTP_XMIT_OK)
342 goto finish;
343
344 /* Try to bundle SACK chunk */
345 retval = sctp_packet_bundle_sack(packet, chunk);
346 if (retval != SCTP_XMIT_OK)
347 goto finish;
348
349 retval = __sctp_packet_append_chunk(packet, chunk);
350
351finish:
352 return retval;
353}
354
355static void sctp_packet_release_owner(struct sk_buff *skb)
356{
357 sk_free(skb->sk);
358}
359
360static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
361{
362 skb_orphan(skb);
363 skb->sk = sk;
364 skb->destructor = sctp_packet_release_owner;
365
366 /*
367 * The data chunks have already been accounted for in sctp_sendmsg(),
368 * therefore only reserve a single byte to keep socket around until
369 * the packet has been transmitted.
370 */
371 atomic_inc(&sk->sk_wmem_alloc);
372}
373
374/* All packets are sent to the network through this function from
375 * sctp_outq_tail().
376 *
377 * The return value is a normal kernel error return value.
378 */
379int sctp_packet_transmit(struct sctp_packet *packet)
380{
381 struct sctp_transport *tp = packet->transport;
382 struct sctp_association *asoc = tp->asoc;
383 struct sctphdr *sh;
384 struct sk_buff *nskb;
385 struct sctp_chunk *chunk, *tmp;
386 struct sock *sk;
387 int err = 0;
388 int padding; /* How much padding do we need? */
389 __u8 has_data = 0;
390 struct dst_entry *dst;
391 unsigned char *auth = NULL; /* pointer to auth in skb data */
392
393 pr_debug("%s: packet:%p\n", __func__, packet);
394
395 /* Do NOT generate a chunkless packet. */
396 if (list_empty(&packet->chunk_list))
397 return err;
398
399 /* Set up convenience variables... */
400 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
401 sk = chunk->skb->sk;
402
403 /* Allocate the new skb. */
404 nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
405 if (!nskb)
406 goto nomem;
407
408 /* Make sure the outbound skb has enough header room reserved. */
409 skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
410
411 /* Set the owning socket so that we know where to get the
412 * destination IP address.
413 */
414 sctp_packet_set_owner_w(nskb, sk);
415
416 if (!sctp_transport_dst_check(tp)) {
417 sctp_transport_route(tp, NULL, sctp_sk(sk));
418 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
419 sctp_assoc_sync_pmtu(sk, asoc);
420 }
421 }
422 dst = dst_clone(tp->dst);
423 if (!dst)
424 goto no_route;
425 skb_dst_set(nskb, dst);
426
427 /* Build the SCTP header. */
428 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
429 skb_reset_transport_header(nskb);
430 sh->source = htons(packet->source_port);
431 sh->dest = htons(packet->destination_port);
432
433 /* From 6.8 Adler-32 Checksum Calculation:
434 * After the packet is constructed (containing the SCTP common
435 * header and one or more control or DATA chunks), the
436 * transmitter shall:
437 *
438 * 1) Fill in the proper Verification Tag in the SCTP common
439 * header and initialize the checksum field to 0's.
440 */
441 sh->vtag = htonl(packet->vtag);
442 sh->checksum = 0;
443
444 /**
445 * 6.10 Bundling
446 *
447 * An endpoint bundles chunks by simply including multiple
448 * chunks in one outbound SCTP packet. ...
449 */
450
451 /**
452 * 3.2 Chunk Field Descriptions
453 *
454 * The total length of a chunk (including Type, Length and
455 * Value fields) MUST be a multiple of 4 bytes. If the length
456 * of the chunk is not a multiple of 4 bytes, the sender MUST
457 * pad the chunk with all zero bytes and this padding is not
458 * included in the chunk length field. The sender should
459 * never pad with more than 3 bytes.
460 *
461 * [This whole comment explains WORD_ROUND() below.]
462 */
463
464 pr_debug("***sctp_transmit_packet***\n");
465
466 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
467 list_del_init(&chunk->list);
468 if (sctp_chunk_is_data(chunk)) {
469 /* 6.3.1 C4) When data is in flight and when allowed
470 * by rule C5, a new RTT measurement MUST be made each
471 * round trip. Furthermore, new RTT measurements
472 * SHOULD be made no more than once per round-trip
473 * for a given destination transport address.
474 */
475
476 if (!chunk->resent && !tp->rto_pending) {
477 chunk->rtt_in_progress = 1;
478 tp->rto_pending = 1;
479 }
480
481 has_data = 1;
482 }
483
484 padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
485 if (padding)
486 memset(skb_put(chunk->skb, padding), 0, padding);
487
488 /* if this is the auth chunk that we are adding,
489 * store pointer where it will be added and put
490 * the auth into the packet.
491 */
492 if (chunk == packet->auth)
493 auth = skb_tail_pointer(nskb);
494
495 memcpy(skb_put(nskb, chunk->skb->len),
496 chunk->skb->data, chunk->skb->len);
497
498 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, "
499 "rtt_in_progress:%d\n", chunk,
500 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
501 chunk->has_tsn ? "TSN" : "No TSN",
502 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
503 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
504 chunk->rtt_in_progress);
505
506 /*
507 * If this is a control chunk, this is our last
508 * reference. Free data chunks after they've been
509 * acknowledged or have failed.
510 */
511 if (!sctp_chunk_is_data(chunk))
512 sctp_chunk_free(chunk);
513 }
514
515 /* SCTP-AUTH, Section 6.2
516 * The sender MUST calculate the MAC as described in RFC2104 [2]
517 * using the hash function H as described by the MAC Identifier and
518 * the shared association key K based on the endpoint pair shared key
519 * described by the shared key identifier. The 'data' used for the
520 * computation of the AUTH-chunk is given by the AUTH chunk with its
521 * HMAC field set to zero (as shown in Figure 6) followed by all
522 * chunks that are placed after the AUTH chunk in the SCTP packet.
523 */
524 if (auth)
525 sctp_auth_calculate_hmac(asoc, nskb,
526 (struct sctp_auth_chunk *)auth,
527 GFP_ATOMIC);
528
529 /* 2) Calculate the Adler-32 checksum of the whole packet,
530 * including the SCTP common header and all the
531 * chunks.
532 *
533 * Note: Adler-32 is no longer applicable, as has been replaced
534 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
535 */
536 if (!sctp_checksum_disable) {
537 if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
538 (dst_xfrm(dst) != NULL) || packet->ipfragok) {
539 sh->checksum = sctp_compute_cksum(nskb, 0);
540 } else {
541 /* no need to seed pseudo checksum for SCTP */
542 nskb->ip_summed = CHECKSUM_PARTIAL;
543 nskb->csum_start = skb_transport_header(nskb) - nskb->head;
544 nskb->csum_offset = offsetof(struct sctphdr, checksum);
545 }
546 }
547
548 /* IP layer ECN support
549 * From RFC 2481
550 * "The ECN-Capable Transport (ECT) bit would be set by the
551 * data sender to indicate that the end-points of the
552 * transport protocol are ECN-capable."
553 *
554 * Now setting the ECT bit all the time, as it should not cause
555 * any problems protocol-wise even if our peer ignores it.
556 *
557 * Note: The works for IPv6 layer checks this bit too later
558 * in transmission. See IP6_ECN_flow_xmit().
559 */
560 tp->af_specific->ecn_capable(nskb->sk);
561
562 /* Set up the IP options. */
563 /* BUG: not implemented
564 * For v4 this all lives somewhere in sk->sk_opt...
565 */
566
567 /* Dump that on IP! */
568 if (asoc) {
569 asoc->stats.opackets++;
570 if (asoc->peer.last_sent_to != tp)
571 /* Considering the multiple CPU scenario, this is a
572 * "correcter" place for last_sent_to. --xguo
573 */
574 asoc->peer.last_sent_to = tp;
575 }
576
577 if (has_data) {
578 struct timer_list *timer;
579 unsigned long timeout;
580
581 /* Restart the AUTOCLOSE timer when sending data. */
582 if (sctp_state(asoc, ESTABLISHED) &&
583 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
584 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
585 timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
586
587 if (!mod_timer(timer, jiffies + timeout))
588 sctp_association_hold(asoc);
589 }
590 }
591
592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len);
593
594 nskb->local_df = packet->ipfragok;
595 tp->af_specific->sctp_xmit(nskb, tp);
596
597out:
598 sctp_packet_reset(packet);
599 return err;
600no_route:
601 kfree_skb(nskb);
602 IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
603
604 /* FIXME: Returning the 'err' will effect all the associations
605 * associated with a socket, although only one of the paths of the
606 * association is unreachable.
607 * The real failure of a transport or association can be passed on
608 * to the user via notifications. So setting this error may not be
609 * required.
610 */
611 /* err = -EHOSTUNREACH; */
612err:
613 /* Control chunks are unreliable so just drop them. DATA chunks
614 * will get resent or dropped later.
615 */
616
617 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
618 list_del_init(&chunk->list);
619 if (!sctp_chunk_is_data(chunk))
620 sctp_chunk_free(chunk);
621 }
622 goto out;
623nomem:
624 err = -ENOMEM;
625 goto err;
626}
627
628/********************************************************************
629 * 2nd Level Abstractions
630 ********************************************************************/
631
632/* This private function check to see if a chunk can be added */
633static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
634 struct sctp_chunk *chunk)
635{
636 sctp_xmit_t retval = SCTP_XMIT_OK;
637 size_t datasize, rwnd, inflight, flight_size;
638 struct sctp_transport *transport = packet->transport;
639 struct sctp_association *asoc = transport->asoc;
640 struct sctp_outq *q = &asoc->outqueue;
641
642 /* RFC 2960 6.1 Transmission of DATA Chunks
643 *
644 * A) At any given time, the data sender MUST NOT transmit new data to
645 * any destination transport address if its peer's rwnd indicates
646 * that the peer has no buffer space (i.e. rwnd is 0, see Section
647 * 6.2.1). However, regardless of the value of rwnd (including if it
648 * is 0), the data sender can always have one DATA chunk in flight to
649 * the receiver if allowed by cwnd (see rule B below). This rule
650 * allows the sender to probe for a change in rwnd that the sender
651 * missed due to the SACK having been lost in transit from the data
652 * receiver to the data sender.
653 */
654
655 rwnd = asoc->peer.rwnd;
656 inflight = q->outstanding_bytes;
657 flight_size = transport->flight_size;
658
659 datasize = sctp_data_size(chunk);
660
661 if (datasize > rwnd) {
662 if (inflight > 0) {
663 /* We have (at least) one data chunk in flight,
664 * so we can't fall back to rule 6.1 B).
665 */
666 retval = SCTP_XMIT_RWND_FULL;
667 goto finish;
668 }
669 }
670
671 /* RFC 2960 6.1 Transmission of DATA Chunks
672 *
673 * B) At any given time, the sender MUST NOT transmit new data
674 * to a given transport address if it has cwnd or more bytes
675 * of data outstanding to that transport address.
676 */
677 /* RFC 7.2.4 & the Implementers Guide 2.8.
678 *
679 * 3) ...
680 * When a Fast Retransmit is being performed the sender SHOULD
681 * ignore the value of cwnd and SHOULD NOT delay retransmission.
682 */
683 if (chunk->fast_retransmit != SCTP_NEED_FRTX)
684 if (flight_size >= transport->cwnd) {
685 retval = SCTP_XMIT_RWND_FULL;
686 goto finish;
687 }
688
689 /* Nagle's algorithm to solve small-packet problem:
690 * Inhibit the sending of new chunks when new outgoing data arrives
691 * if any previously transmitted data on the connection remains
692 * unacknowledged.
693 */
694 if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
695 inflight && sctp_state(asoc, ESTABLISHED)) {
696 unsigned int max = transport->pathmtu - packet->overhead;
697 unsigned int len = chunk->skb->len + q->out_qlen;
698
699 /* Check whether this chunk and all the rest of pending
700 * data will fit or delay in hopes of bundling a full
701 * sized packet.
702 * Don't delay large message writes that may have been
703 * fragmeneted into small peices.
704 */
705 if ((len < max) && chunk->msg->can_delay) {
706 retval = SCTP_XMIT_NAGLE_DELAY;
707 goto finish;
708 }
709 }
710
711finish:
712 return retval;
713}
714
715/* This private function does management things when adding DATA chunk */
716static void sctp_packet_append_data(struct sctp_packet *packet,
717 struct sctp_chunk *chunk)
718{
719 struct sctp_transport *transport = packet->transport;
720 size_t datasize = sctp_data_size(chunk);
721 struct sctp_association *asoc = transport->asoc;
722 u32 rwnd = asoc->peer.rwnd;
723
724 /* Keep track of how many bytes are in flight over this transport. */
725 transport->flight_size += datasize;
726
727 /* Keep track of how many bytes are in flight to the receiver. */
728 asoc->outqueue.outstanding_bytes += datasize;
729
730 /* Update our view of the receiver's rwnd. */
731 if (datasize < rwnd)
732 rwnd -= datasize;
733 else
734 rwnd = 0;
735
736 asoc->peer.rwnd = rwnd;
737 /* Has been accepted for transmission. */
738 if (!asoc->peer.prsctp_capable)
739 chunk->msg->can_abandon = 0;
740 sctp_chunk_assign_tsn(chunk);
741 sctp_chunk_assign_ssn(chunk);
742}
743
744static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
745 struct sctp_chunk *chunk,
746 u16 chunk_len)
747{
748 size_t psize;
749 size_t pmtu;
750 int too_big;
751 sctp_xmit_t retval = SCTP_XMIT_OK;
752
753 psize = packet->size;
754 pmtu = ((packet->transport->asoc) ?
755 (packet->transport->asoc->pathmtu) :
756 (packet->transport->pathmtu));
757
758 too_big = (psize + chunk_len > pmtu);
759
760 /* Decide if we need to fragment or resubmit later. */
761 if (too_big) {
762 /* It's OK to fragmet at IP level if any one of the following
763 * is true:
764 * 1. The packet is empty (meaning this chunk is greater
765 * the MTU)
766 * 2. The chunk we are adding is a control chunk
767 * 3. The packet doesn't have any data in it yet and data
768 * requires authentication.
769 */
770 if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
771 (!packet->has_data && chunk->auth)) {
772 /* We no longer do re-fragmentation.
773 * Just fragment at the IP layer, if we
774 * actually hit this condition
775 */
776 packet->ipfragok = 1;
777 } else {
778 retval = SCTP_XMIT_PMTU_FULL;
779 }
780 }
781
782 return retval;
783}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 *
7 * This file is part of the SCTP kernel implementation
8 *
9 * These functions handle output processing.
10 *
11 * Please send any bug reports or fixes you make to the
12 * email address(es):
13 * lksctp developers <linux-sctp@vger.kernel.org>
14 *
15 * Written or modified by:
16 * La Monte H.P. Yarroll <piggy@acm.org>
17 * Karl Knutson <karl@athena.chicago.il.us>
18 * Jon Grimm <jgrimm@austin.ibm.com>
19 * Sridhar Samudrala <sri@us.ibm.com>
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/wait.h>
27#include <linux/time.h>
28#include <linux/ip.h>
29#include <linux/ipv6.h>
30#include <linux/init.h>
31#include <linux/slab.h>
32#include <net/inet_ecn.h>
33#include <net/ip.h>
34#include <net/icmp.h>
35#include <net/net_namespace.h>
36
37#include <linux/socket.h> /* for sa_family_t */
38#include <net/sock.h>
39
40#include <net/sctp/sctp.h>
41#include <net/sctp/sm.h>
42#include <net/sctp/checksum.h>
43
44/* Forward declarations for private helpers. */
45static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
46 struct sctp_chunk *chunk);
47static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
48 struct sctp_chunk *chunk);
49static void sctp_packet_append_data(struct sctp_packet *packet,
50 struct sctp_chunk *chunk);
51static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
52 struct sctp_chunk *chunk,
53 u16 chunk_len);
54
55static void sctp_packet_reset(struct sctp_packet *packet)
56{
57 /* sctp_packet_transmit() relies on this to reset size to the
58 * current overhead after sending packets.
59 */
60 packet->size = packet->overhead;
61
62 packet->has_cookie_echo = 0;
63 packet->has_sack = 0;
64 packet->has_data = 0;
65 packet->has_auth = 0;
66 packet->ipfragok = 0;
67 packet->auth = NULL;
68}
69
70/* Config a packet.
71 * This appears to be a followup set of initializations.
72 */
73void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
74 int ecn_capable)
75{
76 struct sctp_transport *tp = packet->transport;
77 struct sctp_association *asoc = tp->asoc;
78 struct sctp_sock *sp = NULL;
79 struct sock *sk;
80
81 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
82 packet->vtag = vtag;
83
84 /* do the following jobs only once for a flush schedule */
85 if (!sctp_packet_empty(packet))
86 return;
87
88 /* set packet max_size with pathmtu, then calculate overhead */
89 packet->max_size = tp->pathmtu;
90
91 if (asoc) {
92 sk = asoc->base.sk;
93 sp = sctp_sk(sk);
94 }
95 packet->overhead = sctp_mtu_payload(sp, 0, 0);
96 packet->size = packet->overhead;
97
98 if (!asoc)
99 return;
100
101 /* update dst or transport pathmtu if in need */
102 if (!sctp_transport_dst_check(tp)) {
103 sctp_transport_route(tp, NULL, sp);
104 if (asoc->param_flags & SPP_PMTUD_ENABLE)
105 sctp_assoc_sync_pmtu(asoc);
106 } else if (!sctp_transport_pmtu_check(tp)) {
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
109 }
110
111 if (asoc->pmtu_pending) {
112 if (asoc->param_flags & SPP_PMTUD_ENABLE)
113 sctp_assoc_sync_pmtu(asoc);
114 asoc->pmtu_pending = 0;
115 }
116
117 /* If there a is a prepend chunk stick it on the list before
118 * any other chunks get appended.
119 */
120 if (ecn_capable) {
121 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
122
123 if (chunk)
124 sctp_packet_append_chunk(packet, chunk);
125 }
126
127 if (!tp->dst)
128 return;
129
130 /* set packet max_size with gso_max_size if gso is enabled*/
131 rcu_read_lock();
132 if (__sk_dst_get(sk) != tp->dst) {
133 dst_hold(tp->dst);
134 sk_setup_caps(sk, tp->dst);
135 }
136 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
137 : asoc->pathmtu;
138 rcu_read_unlock();
139}
140
141/* Initialize the packet structure. */
142void sctp_packet_init(struct sctp_packet *packet,
143 struct sctp_transport *transport,
144 __u16 sport, __u16 dport)
145{
146 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
147
148 packet->transport = transport;
149 packet->source_port = sport;
150 packet->destination_port = dport;
151 INIT_LIST_HEAD(&packet->chunk_list);
152 /* The overhead will be calculated by sctp_packet_config() */
153 packet->overhead = 0;
154 sctp_packet_reset(packet);
155 packet->vtag = 0;
156}
157
158/* Free a packet. */
159void sctp_packet_free(struct sctp_packet *packet)
160{
161 struct sctp_chunk *chunk, *tmp;
162
163 pr_debug("%s: packet:%p\n", __func__, packet);
164
165 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
166 list_del_init(&chunk->list);
167 sctp_chunk_free(chunk);
168 }
169}
170
171/* This routine tries to append the chunk to the offered packet. If adding
172 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
173 * is not present in the packet, it transmits the input packet.
174 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
175 * as it can fit in the packet, but any more data that does not fit in this
176 * packet can be sent only after receiving the COOKIE_ACK.
177 */
178enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
179 struct sctp_chunk *chunk,
180 int one_packet, gfp_t gfp)
181{
182 enum sctp_xmit retval;
183
184 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
185 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
186
187 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
188 case SCTP_XMIT_PMTU_FULL:
189 if (!packet->has_cookie_echo) {
190 int error = 0;
191
192 error = sctp_packet_transmit(packet, gfp);
193 if (error < 0)
194 chunk->skb->sk->sk_err = -error;
195
196 /* If we have an empty packet, then we can NOT ever
197 * return PMTU_FULL.
198 */
199 if (!one_packet)
200 retval = sctp_packet_append_chunk(packet,
201 chunk);
202 }
203 break;
204
205 case SCTP_XMIT_RWND_FULL:
206 case SCTP_XMIT_OK:
207 case SCTP_XMIT_DELAY:
208 break;
209 }
210
211 return retval;
212}
213
214/* Try to bundle an auth chunk into the packet. */
215static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
216 struct sctp_chunk *chunk)
217{
218 struct sctp_association *asoc = pkt->transport->asoc;
219 enum sctp_xmit retval = SCTP_XMIT_OK;
220 struct sctp_chunk *auth;
221
222 /* if we don't have an association, we can't do authentication */
223 if (!asoc)
224 return retval;
225
226 /* See if this is an auth chunk we are bundling or if
227 * auth is already bundled.
228 */
229 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
230 return retval;
231
232 /* if the peer did not request this chunk to be authenticated,
233 * don't do it
234 */
235 if (!chunk->auth)
236 return retval;
237
238 auth = sctp_make_auth(asoc, chunk->shkey->key_id);
239 if (!auth)
240 return retval;
241
242 auth->shkey = chunk->shkey;
243 sctp_auth_shkey_hold(auth->shkey);
244
245 retval = __sctp_packet_append_chunk(pkt, auth);
246
247 if (retval != SCTP_XMIT_OK)
248 sctp_chunk_free(auth);
249
250 return retval;
251}
252
253/* Try to bundle a SACK with the packet. */
254static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
255 struct sctp_chunk *chunk)
256{
257 enum sctp_xmit retval = SCTP_XMIT_OK;
258
259 /* If sending DATA and haven't aleady bundled a SACK, try to
260 * bundle one in to the packet.
261 */
262 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
263 !pkt->has_cookie_echo) {
264 struct sctp_association *asoc;
265 struct timer_list *timer;
266 asoc = pkt->transport->asoc;
267 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
268
269 /* If the SACK timer is running, we have a pending SACK */
270 if (timer_pending(timer)) {
271 struct sctp_chunk *sack;
272
273 if (pkt->transport->sack_generation !=
274 pkt->transport->asoc->peer.sack_generation)
275 return retval;
276
277 asoc->a_rwnd = asoc->rwnd;
278 sack = sctp_make_sack(asoc);
279 if (sack) {
280 retval = __sctp_packet_append_chunk(pkt, sack);
281 if (retval != SCTP_XMIT_OK) {
282 sctp_chunk_free(sack);
283 goto out;
284 }
285 SCTP_INC_STATS(asoc->base.net,
286 SCTP_MIB_OUTCTRLCHUNKS);
287 asoc->stats.octrlchunks++;
288 asoc->peer.sack_needed = 0;
289 if (del_timer(timer))
290 sctp_association_put(asoc);
291 }
292 }
293 }
294out:
295 return retval;
296}
297
298
299/* Append a chunk to the offered packet reporting back any inability to do
300 * so.
301 */
302static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
303 struct sctp_chunk *chunk)
304{
305 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
306 enum sctp_xmit retval = SCTP_XMIT_OK;
307
308 /* Check to see if this chunk will fit into the packet */
309 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
310 if (retval != SCTP_XMIT_OK)
311 goto finish;
312
313 /* We believe that this chunk is OK to add to the packet */
314 switch (chunk->chunk_hdr->type) {
315 case SCTP_CID_DATA:
316 case SCTP_CID_I_DATA:
317 /* Account for the data being in the packet */
318 sctp_packet_append_data(packet, chunk);
319 /* Disallow SACK bundling after DATA. */
320 packet->has_sack = 1;
321 /* Disallow AUTH bundling after DATA */
322 packet->has_auth = 1;
323 /* Let it be knows that packet has DATA in it */
324 packet->has_data = 1;
325 /* timestamp the chunk for rtx purposes */
326 chunk->sent_at = jiffies;
327 /* Mainly used for prsctp RTX policy */
328 chunk->sent_count++;
329 break;
330 case SCTP_CID_COOKIE_ECHO:
331 packet->has_cookie_echo = 1;
332 break;
333
334 case SCTP_CID_SACK:
335 packet->has_sack = 1;
336 if (chunk->asoc)
337 chunk->asoc->stats.osacks++;
338 break;
339
340 case SCTP_CID_AUTH:
341 packet->has_auth = 1;
342 packet->auth = chunk;
343 break;
344 }
345
346 /* It is OK to send this chunk. */
347 list_add_tail(&chunk->list, &packet->chunk_list);
348 packet->size += chunk_len;
349 chunk->transport = packet->transport;
350finish:
351 return retval;
352}
353
354/* Append a chunk to the offered packet reporting back any inability to do
355 * so.
356 */
357enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
358 struct sctp_chunk *chunk)
359{
360 enum sctp_xmit retval = SCTP_XMIT_OK;
361
362 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
363
364 /* Data chunks are special. Before seeing what else we can
365 * bundle into this packet, check to see if we are allowed to
366 * send this DATA.
367 */
368 if (sctp_chunk_is_data(chunk)) {
369 retval = sctp_packet_can_append_data(packet, chunk);
370 if (retval != SCTP_XMIT_OK)
371 goto finish;
372 }
373
374 /* Try to bundle AUTH chunk */
375 retval = sctp_packet_bundle_auth(packet, chunk);
376 if (retval != SCTP_XMIT_OK)
377 goto finish;
378
379 /* Try to bundle SACK chunk */
380 retval = sctp_packet_bundle_sack(packet, chunk);
381 if (retval != SCTP_XMIT_OK)
382 goto finish;
383
384 retval = __sctp_packet_append_chunk(packet, chunk);
385
386finish:
387 return retval;
388}
389
390static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
391{
392 if (SCTP_OUTPUT_CB(head)->last == head)
393 skb_shinfo(head)->frag_list = skb;
394 else
395 SCTP_OUTPUT_CB(head)->last->next = skb;
396 SCTP_OUTPUT_CB(head)->last = skb;
397
398 head->truesize += skb->truesize;
399 head->data_len += skb->len;
400 head->len += skb->len;
401 refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
402
403 __skb_header_release(skb);
404}
405
406static int sctp_packet_pack(struct sctp_packet *packet,
407 struct sk_buff *head, int gso, gfp_t gfp)
408{
409 struct sctp_transport *tp = packet->transport;
410 struct sctp_auth_chunk *auth = NULL;
411 struct sctp_chunk *chunk, *tmp;
412 int pkt_count = 0, pkt_size;
413 struct sock *sk = head->sk;
414 struct sk_buff *nskb;
415 int auth_len = 0;
416
417 if (gso) {
418 skb_shinfo(head)->gso_type = sk->sk_gso_type;
419 SCTP_OUTPUT_CB(head)->last = head;
420 } else {
421 nskb = head;
422 pkt_size = packet->size;
423 goto merge;
424 }
425
426 do {
427 /* calculate the pkt_size and alloc nskb */
428 pkt_size = packet->overhead;
429 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
430 list) {
431 int padded = SCTP_PAD4(chunk->skb->len);
432
433 if (chunk == packet->auth)
434 auth_len = padded;
435 else if (auth_len + padded + packet->overhead >
436 tp->pathmtu)
437 return 0;
438 else if (pkt_size + padded > tp->pathmtu)
439 break;
440 pkt_size += padded;
441 }
442 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
443 if (!nskb)
444 return 0;
445 skb_reserve(nskb, packet->overhead + MAX_HEADER);
446
447merge:
448 /* merge chunks into nskb and append nskb into head list */
449 pkt_size -= packet->overhead;
450 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
451 int padding;
452
453 list_del_init(&chunk->list);
454 if (sctp_chunk_is_data(chunk)) {
455 if (!sctp_chunk_retransmitted(chunk) &&
456 !tp->rto_pending) {
457 chunk->rtt_in_progress = 1;
458 tp->rto_pending = 1;
459 }
460 }
461
462 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
463 if (padding)
464 skb_put_zero(chunk->skb, padding);
465
466 if (chunk == packet->auth)
467 auth = (struct sctp_auth_chunk *)
468 skb_tail_pointer(nskb);
469
470 skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
471
472 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
473 chunk,
474 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
475 chunk->has_tsn ? "TSN" : "No TSN",
476 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
477 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
478 chunk->rtt_in_progress);
479
480 pkt_size -= SCTP_PAD4(chunk->skb->len);
481
482 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
483 sctp_chunk_free(chunk);
484
485 if (!pkt_size)
486 break;
487 }
488
489 if (auth) {
490 sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
491 packet->auth->shkey, gfp);
492 /* free auth if no more chunks, or add it back */
493 if (list_empty(&packet->chunk_list))
494 sctp_chunk_free(packet->auth);
495 else
496 list_add(&packet->auth->list,
497 &packet->chunk_list);
498 }
499
500 if (gso)
501 sctp_packet_gso_append(head, nskb);
502
503 pkt_count++;
504 } while (!list_empty(&packet->chunk_list));
505
506 if (gso) {
507 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
508 sizeof(struct inet6_skb_parm)));
509 skb_shinfo(head)->gso_segs = pkt_count;
510 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
511 rcu_read_lock();
512 if (skb_dst(head) != tp->dst) {
513 dst_hold(tp->dst);
514 sk_setup_caps(sk, tp->dst);
515 }
516 rcu_read_unlock();
517 goto chksum;
518 }
519
520 if (sctp_checksum_disable)
521 return 1;
522
523 if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
524 dst_xfrm(skb_dst(head)) || packet->ipfragok) {
525 struct sctphdr *sh =
526 (struct sctphdr *)skb_transport_header(head);
527
528 sh->checksum = sctp_compute_cksum(head, 0);
529 } else {
530chksum:
531 head->ip_summed = CHECKSUM_PARTIAL;
532 head->csum_not_inet = 1;
533 head->csum_start = skb_transport_header(head) - head->head;
534 head->csum_offset = offsetof(struct sctphdr, checksum);
535 }
536
537 return pkt_count;
538}
539
540/* All packets are sent to the network through this function from
541 * sctp_outq_tail().
542 *
543 * The return value is always 0 for now.
544 */
545int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
546{
547 struct sctp_transport *tp = packet->transport;
548 struct sctp_association *asoc = tp->asoc;
549 struct sctp_chunk *chunk, *tmp;
550 int pkt_count, gso = 0;
551 struct dst_entry *dst;
552 struct sk_buff *head;
553 struct sctphdr *sh;
554 struct sock *sk;
555
556 pr_debug("%s: packet:%p\n", __func__, packet);
557 if (list_empty(&packet->chunk_list))
558 return 0;
559 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
560 sk = chunk->skb->sk;
561
562 /* check gso */
563 if (packet->size > tp->pathmtu && !packet->ipfragok) {
564 if (!sk_can_gso(sk)) {
565 pr_err_once("Trying to GSO but underlying device doesn't support it.");
566 goto out;
567 }
568 gso = 1;
569 }
570
571 /* alloc head skb */
572 head = alloc_skb((gso ? packet->overhead : packet->size) +
573 MAX_HEADER, gfp);
574 if (!head)
575 goto out;
576 skb_reserve(head, packet->overhead + MAX_HEADER);
577 skb_set_owner_w(head, sk);
578
579 /* set sctp header */
580 sh = skb_push(head, sizeof(struct sctphdr));
581 skb_reset_transport_header(head);
582 sh->source = htons(packet->source_port);
583 sh->dest = htons(packet->destination_port);
584 sh->vtag = htonl(packet->vtag);
585 sh->checksum = 0;
586
587 /* drop packet if no dst */
588 dst = dst_clone(tp->dst);
589 if (!dst) {
590 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
591 kfree_skb(head);
592 goto out;
593 }
594 skb_dst_set(head, dst);
595
596 /* pack up chunks */
597 pkt_count = sctp_packet_pack(packet, head, gso, gfp);
598 if (!pkt_count) {
599 kfree_skb(head);
600 goto out;
601 }
602 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
603
604 /* start autoclose timer */
605 if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
606 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
607 struct timer_list *timer =
608 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
609 unsigned long timeout =
610 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
611
612 if (!mod_timer(timer, jiffies + timeout))
613 sctp_association_hold(asoc);
614 }
615
616 /* sctp xmit */
617 tp->af_specific->ecn_capable(sk);
618 if (asoc) {
619 asoc->stats.opackets += pkt_count;
620 if (asoc->peer.last_sent_to != tp)
621 asoc->peer.last_sent_to = tp;
622 }
623 head->ignore_df = packet->ipfragok;
624 if (tp->dst_pending_confirm)
625 skb_set_dst_pending_confirm(head, 1);
626 /* neighbour should be confirmed on successful transmission or
627 * positive error
628 */
629 if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
630 tp->dst_pending_confirm)
631 tp->dst_pending_confirm = 0;
632
633out:
634 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
635 list_del_init(&chunk->list);
636 if (!sctp_chunk_is_data(chunk))
637 sctp_chunk_free(chunk);
638 }
639 sctp_packet_reset(packet);
640 return 0;
641}
642
643/********************************************************************
644 * 2nd Level Abstractions
645 ********************************************************************/
646
647/* This private function check to see if a chunk can be added */
648static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
649 struct sctp_chunk *chunk)
650{
651 size_t datasize, rwnd, inflight, flight_size;
652 struct sctp_transport *transport = packet->transport;
653 struct sctp_association *asoc = transport->asoc;
654 struct sctp_outq *q = &asoc->outqueue;
655
656 /* RFC 2960 6.1 Transmission of DATA Chunks
657 *
658 * A) At any given time, the data sender MUST NOT transmit new data to
659 * any destination transport address if its peer's rwnd indicates
660 * that the peer has no buffer space (i.e. rwnd is 0, see Section
661 * 6.2.1). However, regardless of the value of rwnd (including if it
662 * is 0), the data sender can always have one DATA chunk in flight to
663 * the receiver if allowed by cwnd (see rule B below). This rule
664 * allows the sender to probe for a change in rwnd that the sender
665 * missed due to the SACK having been lost in transit from the data
666 * receiver to the data sender.
667 */
668
669 rwnd = asoc->peer.rwnd;
670 inflight = q->outstanding_bytes;
671 flight_size = transport->flight_size;
672
673 datasize = sctp_data_size(chunk);
674
675 if (datasize > rwnd && inflight > 0)
676 /* We have (at least) one data chunk in flight,
677 * so we can't fall back to rule 6.1 B).
678 */
679 return SCTP_XMIT_RWND_FULL;
680
681 /* RFC 2960 6.1 Transmission of DATA Chunks
682 *
683 * B) At any given time, the sender MUST NOT transmit new data
684 * to a given transport address if it has cwnd or more bytes
685 * of data outstanding to that transport address.
686 */
687 /* RFC 7.2.4 & the Implementers Guide 2.8.
688 *
689 * 3) ...
690 * When a Fast Retransmit is being performed the sender SHOULD
691 * ignore the value of cwnd and SHOULD NOT delay retransmission.
692 */
693 if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
694 flight_size >= transport->cwnd)
695 return SCTP_XMIT_RWND_FULL;
696
697 /* Nagle's algorithm to solve small-packet problem:
698 * Inhibit the sending of new chunks when new outgoing data arrives
699 * if any previously transmitted data on the connection remains
700 * unacknowledged.
701 */
702
703 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
704 !asoc->force_delay)
705 /* Nothing unacked */
706 return SCTP_XMIT_OK;
707
708 if (!sctp_packet_empty(packet))
709 /* Append to packet */
710 return SCTP_XMIT_OK;
711
712 if (!sctp_state(asoc, ESTABLISHED))
713 return SCTP_XMIT_OK;
714
715 /* Check whether this chunk and all the rest of pending data will fit
716 * or delay in hopes of bundling a full sized packet.
717 */
718 if (chunk->skb->len + q->out_qlen > transport->pathmtu -
719 packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
720 /* Enough data queued to fill a packet */
721 return SCTP_XMIT_OK;
722
723 /* Don't delay large message writes that may have been fragmented */
724 if (!chunk->msg->can_delay)
725 return SCTP_XMIT_OK;
726
727 /* Defer until all data acked or packet full */
728 return SCTP_XMIT_DELAY;
729}
730
731/* This private function does management things when adding DATA chunk */
732static void sctp_packet_append_data(struct sctp_packet *packet,
733 struct sctp_chunk *chunk)
734{
735 struct sctp_transport *transport = packet->transport;
736 size_t datasize = sctp_data_size(chunk);
737 struct sctp_association *asoc = transport->asoc;
738 u32 rwnd = asoc->peer.rwnd;
739
740 /* Keep track of how many bytes are in flight over this transport. */
741 transport->flight_size += datasize;
742
743 /* Keep track of how many bytes are in flight to the receiver. */
744 asoc->outqueue.outstanding_bytes += datasize;
745
746 /* Update our view of the receiver's rwnd. */
747 if (datasize < rwnd)
748 rwnd -= datasize;
749 else
750 rwnd = 0;
751
752 asoc->peer.rwnd = rwnd;
753 sctp_chunk_assign_tsn(chunk);
754 asoc->stream.si->assign_number(chunk);
755}
756
757static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
758 struct sctp_chunk *chunk,
759 u16 chunk_len)
760{
761 enum sctp_xmit retval = SCTP_XMIT_OK;
762 size_t psize, pmtu, maxsize;
763
764 /* Don't bundle in this packet if this chunk's auth key doesn't
765 * match other chunks already enqueued on this packet. Also,
766 * don't bundle the chunk with auth key if other chunks in this
767 * packet don't have auth key.
768 */
769 if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
770 (!packet->auth && chunk->shkey &&
771 chunk->chunk_hdr->type != SCTP_CID_AUTH))
772 return SCTP_XMIT_PMTU_FULL;
773
774 psize = packet->size;
775 if (packet->transport->asoc)
776 pmtu = packet->transport->asoc->pathmtu;
777 else
778 pmtu = packet->transport->pathmtu;
779
780 /* Decide if we need to fragment or resubmit later. */
781 if (psize + chunk_len > pmtu) {
782 /* It's OK to fragment at IP level if any one of the following
783 * is true:
784 * 1. The packet is empty (meaning this chunk is greater
785 * the MTU)
786 * 2. The packet doesn't have any data in it yet and data
787 * requires authentication.
788 */
789 if (sctp_packet_empty(packet) ||
790 (!packet->has_data && chunk->auth)) {
791 /* We no longer do re-fragmentation.
792 * Just fragment at the IP layer, if we
793 * actually hit this condition
794 */
795 packet->ipfragok = 1;
796 goto out;
797 }
798
799 /* Similarly, if this chunk was built before a PMTU
800 * reduction, we have to fragment it at IP level now. So
801 * if the packet already contains something, we need to
802 * flush.
803 */
804 maxsize = pmtu - packet->overhead;
805 if (packet->auth)
806 maxsize -= SCTP_PAD4(packet->auth->skb->len);
807 if (chunk_len > maxsize)
808 retval = SCTP_XMIT_PMTU_FULL;
809
810 /* It is also okay to fragment if the chunk we are
811 * adding is a control chunk, but only if current packet
812 * is not a GSO one otherwise it causes fragmentation of
813 * a large frame. So in this case we allow the
814 * fragmentation by forcing it to be in a new packet.
815 */
816 if (!sctp_chunk_is_data(chunk) && packet->has_data)
817 retval = SCTP_XMIT_PMTU_FULL;
818
819 if (psize + chunk_len > packet->max_size)
820 /* Hit GSO/PMTU limit, gotta flush */
821 retval = SCTP_XMIT_PMTU_FULL;
822
823 if (!packet->transport->burst_limited &&
824 psize + chunk_len > (packet->transport->cwnd >> 1))
825 /* Do not allow a single GSO packet to use more
826 * than half of cwnd.
827 */
828 retval = SCTP_XMIT_PMTU_FULL;
829
830 if (packet->transport->burst_limited &&
831 psize + chunk_len > (packet->transport->burst_limited >> 1))
832 /* Do not allow a single GSO packet to use more
833 * than half of original cwnd.
834 */
835 retval = SCTP_XMIT_PMTU_FULL;
836 /* Otherwise it will fit in the GSO packet */
837 }
838
839out:
840 return retval;
841}